async 1.23.0 → 1.24.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/tests.yml +0 -1
- data/README.md +18 -6
- data/lib/async.rb +2 -2
- data/lib/async/barrier.rb +5 -3
- data/lib/async/debug/monitor.rb +4 -4
- data/lib/async/debug/selector.rb +2 -2
- data/lib/async/node.rb +4 -0
- data/lib/async/queue.rb +17 -11
- data/lib/async/reactor.rb +83 -69
- data/lib/async/semaphore.rb +5 -3
- data/lib/async/task.rb +7 -6
- data/lib/async/version.rb +1 -1
- data/lib/async/wrapper.rb +4 -4
- data/lib/kernel/async.rb +2 -2
- data/spec/async/barrier_spec.rb +4 -0
- data/spec/async/chainable_async_examples.rb +12 -0
- data/spec/async/queue_spec.rb +37 -0
- data/spec/async/reactor_spec.rb +34 -1
- data/spec/async/semaphore_spec.rb +4 -0
- metadata +5 -4
- data/.github/FUNDING.yml +0 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 2edec7ff8d1bdb38814250485c36cfe4128a627c81a6976124cb689f9adf144a
|
4
|
+
data.tar.gz: 885288ad542ef4758f3a2b1fe2d5ce2bcd13ff1c71992262927ecca7ee21dbb0
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6919a8b9bb8d54dce220024d0c8418a303cb2bb43dbeb6878491a9491c64ab45d953ca83831b6cccfcf6ee12d0db19a47509a981f3841591061e1d3240a75467
|
7
|
+
data.tar.gz: 41f251a4cf7fa2982f974964e6c7cf46d323364c53c34cce367475fe92e252433633967b4cf22a2a2edac40819ab8ec1b03207fe0e5d22febc2d96c1fdf795a8
|
data/.github/workflows/tests.yml
CHANGED
data/README.md
CHANGED
@@ -5,7 +5,7 @@ Async is a composable asynchronous I/O framework for Ruby based on [nio4r] and [
|
|
5
5
|
[timers]: https://github.com/socketry/timers
|
6
6
|
[nio4r]: https://github.com/socketry/nio4r
|
7
7
|
|
8
|
-
[![Build Status](https://secure.travis-ci.
|
8
|
+
[![Build Status](https://secure.travis-ci.com/socketry/async.svg)](http://travis-ci.com/socketry/async)
|
9
9
|
[![Code Climate](https://codeclimate.com/github/socketry/async.svg)](https://codeclimate.com/github/socketry/async)
|
10
10
|
[![Coverage Status](https://coveralls.io/repos/socketry/async/badge.svg)](https://coveralls.io/r/socketry/async)
|
11
11
|
[![Gitter](https://badges.gitter.im/join.svg)](https://gitter.im/socketry/async)
|
@@ -192,9 +192,9 @@ Async do |task|
|
|
192
192
|
end
|
193
193
|
```
|
194
194
|
|
195
|
-
####
|
195
|
+
#### Embedding Reactors
|
196
196
|
|
197
|
-
`Async::Reactor#run` will run until the reactor runs out of work to do
|
197
|
+
`Async::Reactor#run` will run until the reactor runs out of work to do. To run a single iteration of the reactor, use `Async::Reactor#run_once`
|
198
198
|
|
199
199
|
```ruby
|
200
200
|
require 'async'
|
@@ -203,13 +203,25 @@ Async.logger.debug!
|
|
203
203
|
reactor = Async::Reactor.new
|
204
204
|
|
205
205
|
# Run the reactor for 1 second:
|
206
|
-
reactor.
|
206
|
+
reactor.async do |task|
|
207
207
|
task.sleep 1
|
208
|
-
|
208
|
+
puts "Finished!"
|
209
|
+
end
|
210
|
+
|
211
|
+
while reactor.run_once
|
212
|
+
# Round and round we go!
|
209
213
|
end
|
210
214
|
```
|
211
215
|
|
212
|
-
You can use this approach to embed the reactor in another event loop.
|
216
|
+
You can use this approach to embed the reactor in another event loop.
|
217
|
+
|
218
|
+
#### Stopping Reactors
|
219
|
+
|
220
|
+
`Async::Reactor#stop` will stop the current reactor and all children tasks.
|
221
|
+
|
222
|
+
#### Interrupting Reactors
|
223
|
+
|
224
|
+
`Async::Reactor#interrupt` can be called safely from a different thread (or signal handler) and will cause the reactor to invoke `#stop`.
|
213
225
|
|
214
226
|
### Resource Management
|
215
227
|
|
data/lib/async.rb
CHANGED
data/lib/async/barrier.rb
CHANGED
@@ -23,8 +23,10 @@ require_relative 'task'
|
|
23
23
|
module Async
|
24
24
|
# A semaphore is used to control access to a common resource in a concurrent system. A useful way to think of a semaphore as used in the real-world systems is as a record of how many units of a particular resource are available, coupled with operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the resource becomes available.
|
25
25
|
class Barrier
|
26
|
-
def initialize
|
26
|
+
def initialize(parent: nil)
|
27
27
|
@tasks = []
|
28
|
+
|
29
|
+
@parent = parent
|
28
30
|
end
|
29
31
|
|
30
32
|
# All tasks which have been invoked into the barrier.
|
@@ -34,8 +36,8 @@ module Async
|
|
34
36
|
@tasks.size
|
35
37
|
end
|
36
38
|
|
37
|
-
def async(*
|
38
|
-
task = parent.async(*
|
39
|
+
def async(*arguments, parent: (@parent or Task.current), **options, &block)
|
40
|
+
task = parent.async(*arguments, **options, &block)
|
39
41
|
|
40
42
|
@tasks << task
|
41
43
|
|
data/lib/async/debug/monitor.rb
CHANGED
@@ -31,12 +31,12 @@ module Async
|
|
31
31
|
@monitor.close
|
32
32
|
end
|
33
33
|
|
34
|
-
def method_missing(*
|
35
|
-
@monitor.send(*
|
34
|
+
def method_missing(*arguments, &block)
|
35
|
+
@monitor.send(*arguments)
|
36
36
|
end
|
37
37
|
|
38
|
-
def respond_to?(*
|
39
|
-
@monitor.respond_to?(*
|
38
|
+
def respond_to?(*arguments)
|
39
|
+
@monitor.respond_to?(*arguments)
|
40
40
|
end
|
41
41
|
|
42
42
|
def inspect
|
data/lib/async/debug/selector.rb
CHANGED
data/lib/async/node.rb
CHANGED
data/lib/async/queue.rb
CHANGED
@@ -23,10 +23,11 @@ require_relative 'notification'
|
|
23
23
|
module Async
|
24
24
|
# A queue which allows items to be processed in order.
|
25
25
|
class Queue < Notification
|
26
|
-
def initialize
|
27
|
-
super
|
26
|
+
def initialize(parent: nil)
|
27
|
+
super()
|
28
28
|
|
29
29
|
@items = []
|
30
|
+
@parent = parent
|
30
31
|
end
|
31
32
|
|
32
33
|
attr :items
|
@@ -47,21 +48,26 @@ module Async
|
|
47
48
|
@items.shift
|
48
49
|
end
|
49
50
|
|
50
|
-
def async(&block)
|
51
|
-
parent = Task.current
|
52
|
-
|
51
|
+
def async(parent: (@parent or Task.current), &block)
|
53
52
|
while item = self.dequeue
|
54
53
|
parent.async(item, &block)
|
55
54
|
end
|
56
55
|
end
|
56
|
+
|
57
|
+
def each
|
58
|
+
while item = self.dequeue
|
59
|
+
yield item
|
60
|
+
end
|
61
|
+
end
|
57
62
|
end
|
58
63
|
|
59
64
|
class LimitedQueue < Queue
|
60
|
-
def initialize(limit = 1)
|
61
|
-
super()
|
65
|
+
def initialize(limit = 1, **options)
|
66
|
+
super(**options)
|
62
67
|
|
63
68
|
@limit = limit
|
64
|
-
|
69
|
+
|
70
|
+
@full = Notification.new
|
65
71
|
end
|
66
72
|
|
67
73
|
attr :limit
|
@@ -72,8 +78,8 @@ module Async
|
|
72
78
|
end
|
73
79
|
|
74
80
|
def enqueue item
|
75
|
-
|
76
|
-
@full.
|
81
|
+
while limited?
|
82
|
+
@full.wait
|
77
83
|
end
|
78
84
|
|
79
85
|
super
|
@@ -82,7 +88,7 @@ module Async
|
|
82
88
|
def dequeue
|
83
89
|
item = super
|
84
90
|
|
85
|
-
@full.
|
91
|
+
@full.signal
|
86
92
|
|
87
93
|
return item
|
88
94
|
end
|
data/lib/async/reactor.rb
CHANGED
@@ -42,16 +42,16 @@ module Async
|
|
42
42
|
# - When invoked at the top level, will create and run a reactor, and invoke
|
43
43
|
# the block as an asynchronous task. Will block until the reactor finishes
|
44
44
|
# running.
|
45
|
-
def self.run(*
|
45
|
+
def self.run(*arguments, **options, &block)
|
46
46
|
if current = Task.current?
|
47
47
|
reactor = current.reactor
|
48
48
|
|
49
|
-
return reactor.async(*
|
49
|
+
return reactor.async(*arguments, **options, &block)
|
50
50
|
else
|
51
51
|
reactor = self.new(**options)
|
52
52
|
|
53
53
|
begin
|
54
|
-
return reactor.run(*
|
54
|
+
return reactor.run(*arguments, &block)
|
55
55
|
ensure
|
56
56
|
reactor.close
|
57
57
|
end
|
@@ -80,7 +80,8 @@ module Async
|
|
80
80
|
@ready = []
|
81
81
|
@running = []
|
82
82
|
|
83
|
-
@
|
83
|
+
@interrupted = false
|
84
|
+
@guard = Mutex.new
|
84
85
|
end
|
85
86
|
|
86
87
|
def logger
|
@@ -88,14 +89,11 @@ module Async
|
|
88
89
|
end
|
89
90
|
|
90
91
|
def to_s
|
91
|
-
"\#<#{self.description}
|
92
|
+
"\#<#{self.description} #{@children&.size || 0} children #{stopped? ? 'stopped' : 'running'}>"
|
92
93
|
end
|
93
94
|
|
94
|
-
# @attr stopped [Boolean]
|
95
|
-
attr :stopped
|
96
|
-
|
97
95
|
def stopped?
|
98
|
-
@
|
96
|
+
@children.nil? || @children.empty?
|
99
97
|
end
|
100
98
|
|
101
99
|
# TODO Remove these in next major release. They are too confusing to use correctly.
|
@@ -109,7 +107,7 @@ module Async
|
|
109
107
|
#
|
110
108
|
# @yield [Task] Executed within the task.
|
111
109
|
# @return [Task] The task that was scheduled into the reactor.
|
112
|
-
def async(*
|
110
|
+
def async(*arguments, **options, &block)
|
113
111
|
task = Task.new(self, **options, &block)
|
114
112
|
|
115
113
|
# I want to take a moment to explain the logic of this.
|
@@ -119,7 +117,7 @@ module Async
|
|
119
117
|
# - Fail at the point of the method call where possible.
|
120
118
|
# - Execute determinstically where possible.
|
121
119
|
# - Avoid scheduler overhead if no blocking operation is performed.
|
122
|
-
task.run(*
|
120
|
+
task.run(*arguments)
|
123
121
|
|
124
122
|
# logger.debug "Initial execution of task #{fiber} complete (#{result} -> #{fiber.alive?})..."
|
125
123
|
return task
|
@@ -132,12 +130,13 @@ module Async
|
|
132
130
|
return monitor
|
133
131
|
end
|
134
132
|
|
135
|
-
#
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
133
|
+
# Interrupt the reactor at the earliest convenience. Can be called from a different thread safely.
|
134
|
+
def interrupt
|
135
|
+
@guard.synchronize do
|
136
|
+
unless @interrupted
|
137
|
+
@interrupted = true
|
138
|
+
@selector.wakeup
|
139
|
+
end
|
141
140
|
end
|
142
141
|
end
|
143
142
|
|
@@ -155,73 +154,95 @@ module Async
|
|
155
154
|
end
|
156
155
|
|
157
156
|
def finished?
|
158
|
-
# I'm not sure if checking `@running.empty?` is really required.
|
157
|
+
# TODO I'm not sure if checking `@running.empty?` is really required.
|
159
158
|
super && @ready.empty? && @running.empty?
|
160
159
|
end
|
161
160
|
|
162
|
-
# Run
|
163
|
-
#
|
164
|
-
|
165
|
-
|
161
|
+
# Run one iteration of the event loop.
|
162
|
+
# @param timeout [Float | nil] the maximum timeout, or if nil, indefinite.
|
163
|
+
# @return [Boolean] whether there is more work to do.
|
164
|
+
def run_once(timeout = nil)
|
165
|
+
logger.debug(self) {"@ready = #{@ready} @running = #{@running}"}
|
166
166
|
|
167
|
-
@
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
until @stopped
|
172
|
-
logger.debug(self) {"@ready = #{@ready} @running = #{@running}"}
|
167
|
+
if @ready.any?
|
168
|
+
# running used to correctly answer on `finished?`, and to reuse Array object.
|
169
|
+
@running, @ready = @ready, @running
|
173
170
|
|
174
|
-
|
175
|
-
|
176
|
-
@running, @ready = @ready, @running
|
177
|
-
|
178
|
-
@running.each do |fiber|
|
179
|
-
fiber.resume if fiber.alive?
|
180
|
-
end
|
181
|
-
|
182
|
-
@running.clear
|
171
|
+
@running.each do |fiber|
|
172
|
+
fiber.resume if fiber.alive?
|
183
173
|
end
|
184
174
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
175
|
+
@running.clear
|
176
|
+
end
|
177
|
+
|
178
|
+
if @ready.empty?
|
179
|
+
interval = @timers.wait_interval
|
180
|
+
else
|
181
|
+
# if there are tasks ready to execute, don't sleep:
|
182
|
+
interval = 0
|
183
|
+
end
|
184
|
+
|
185
|
+
# If there is no interval to wait (thus no timers), and no tasks, we could be done:
|
186
|
+
if interval.nil?
|
187
|
+
if self.finished?
|
188
|
+
# If there is nothing to do, then finish:
|
189
|
+
return false
|
190
190
|
end
|
191
191
|
|
192
|
-
#
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
192
|
+
# Allow the user to specify a maximum interval if we would otherwise be sleeping indefinitely:
|
193
|
+
interval = timeout
|
194
|
+
elsif interval < 0
|
195
|
+
# We have timers ready to fire, don't sleep in the selctor:
|
196
|
+
interval = 0
|
197
|
+
elsif timeout and interval > timeout
|
198
|
+
interval = timeout
|
199
|
+
end
|
200
|
+
|
201
|
+
logger.debug(self) {"Selecting with #{@children&.size} children with interval = #{interval ? interval.round(2) : 'infinite'}..."}
|
202
|
+
if monitors = @selector.select(interval)
|
203
|
+
monitors.each do |monitor|
|
204
|
+
monitor.value.resume
|
201
205
|
end
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
206
|
+
end
|
207
|
+
|
208
|
+
@timers.fire
|
209
|
+
|
210
|
+
# We check and clear the interrupted flag here:
|
211
|
+
if @interrupted
|
212
|
+
@guard.synchronize do
|
213
|
+
@interrupted = false
|
208
214
|
end
|
209
215
|
|
210
|
-
|
216
|
+
self.stop
|
217
|
+
|
218
|
+
return false
|
219
|
+
end
|
220
|
+
|
221
|
+
return true
|
222
|
+
end
|
223
|
+
|
224
|
+
# Run the reactor until either all tasks complete or {#pause} or {#stop} is
|
225
|
+
# invoked. Proxies arguments to {#async} immediately before entering the
|
226
|
+
# loop, if a block is provided.
|
227
|
+
def run(*arguments, &block)
|
228
|
+
raise RuntimeError, 'Reactor has been closed' if @selector.nil?
|
229
|
+
|
230
|
+
initial_task = self.async(*arguments, &block) if block_given?
|
231
|
+
|
232
|
+
while self.run_once
|
233
|
+
# Round and round we go!
|
211
234
|
end
|
212
235
|
|
213
236
|
return initial_task
|
214
237
|
ensure
|
215
238
|
logger.debug(self) {"Exiting run-loop because #{$! ? $! : 'finished'}."}
|
216
|
-
|
217
|
-
@stopped = true
|
218
239
|
end
|
219
|
-
|
240
|
+
|
220
241
|
# Stop each of the children tasks and close the selector.
|
221
242
|
#
|
222
243
|
# @return [void]
|
223
244
|
def close
|
224
|
-
|
245
|
+
self.stop
|
225
246
|
|
226
247
|
# TODO Should we also clear all timers?
|
227
248
|
@selector.close
|
@@ -267,12 +288,5 @@ module Async
|
|
267
288
|
ensure
|
268
289
|
timer.cancel if timer
|
269
290
|
end
|
270
|
-
|
271
|
-
# TODO remove
|
272
|
-
def timeout(*args, &block)
|
273
|
-
warn "#{self.class}\#timeout(...) is deprecated, use #{self.class}\#with_timeout(...) instead."
|
274
|
-
|
275
|
-
with_timeout(*args, &block)
|
276
|
-
end
|
277
291
|
end
|
278
292
|
end
|
data/lib/async/semaphore.rb
CHANGED
@@ -21,10 +21,12 @@
|
|
21
21
|
module Async
|
22
22
|
# A semaphore is used to control access to a common resource in a concurrent system. A useful way to think of a semaphore as used in the real-world systems is as a record of how many units of a particular resource are available, coupled with operations to adjust that record safely (i.e. to avoid race conditions) as units are required or become free, and, if necessary, wait until a unit of the resource becomes available.
|
23
23
|
class Semaphore
|
24
|
-
def initialize(limit = 1)
|
24
|
+
def initialize(limit = 1, parent: nil)
|
25
25
|
@count = 0
|
26
26
|
@limit = limit
|
27
27
|
@waiting = []
|
28
|
+
|
29
|
+
@parent = parent
|
28
30
|
end
|
29
31
|
|
30
32
|
# The current number of tasks that have acquired the semaphore.
|
@@ -47,14 +49,14 @@ module Async
|
|
47
49
|
end
|
48
50
|
|
49
51
|
# Run an async task. Will wait until the semaphore is ready until spawning and running the task.
|
50
|
-
def async(*
|
52
|
+
def async(*arguments, parent: (@parent or Task.current), **options)
|
51
53
|
wait
|
52
54
|
|
53
55
|
parent.async(**options) do |task|
|
54
56
|
@count += 1
|
55
57
|
|
56
58
|
begin
|
57
|
-
yield task, *
|
59
|
+
yield task, *arguments
|
58
60
|
ensure
|
59
61
|
self.release
|
60
62
|
end
|
data/lib/async/task.rb
CHANGED
@@ -112,19 +112,20 @@ module Async
|
|
112
112
|
attr :status
|
113
113
|
|
114
114
|
# Begin the execution of the task.
|
115
|
-
def run(*
|
115
|
+
def run(*arguments)
|
116
116
|
if @status == :initialized
|
117
117
|
@status = :running
|
118
|
-
|
118
|
+
|
119
|
+
@fiber.resume(*arguments)
|
119
120
|
else
|
120
121
|
raise RuntimeError, "Task already running!"
|
121
122
|
end
|
122
123
|
end
|
123
124
|
|
124
|
-
def async(*
|
125
|
+
def async(*arguments, **options, &block)
|
125
126
|
task = Task.new(@reactor, self, **options, &block)
|
126
127
|
|
127
|
-
task.run(*
|
128
|
+
task.run(*arguments)
|
128
129
|
|
129
130
|
return task
|
130
131
|
end
|
@@ -248,11 +249,11 @@ module Async
|
|
248
249
|
end
|
249
250
|
|
250
251
|
def make_fiber(&block)
|
251
|
-
Fiber.new do |*
|
252
|
+
Fiber.new do |*arguments|
|
252
253
|
set!
|
253
254
|
|
254
255
|
begin
|
255
|
-
@result = yield(self, *
|
256
|
+
@result = yield(self, *arguments)
|
256
257
|
@status = :complete
|
257
258
|
# logger.debug(self) {"Task was completed with #{@children.size} children!"}
|
258
259
|
rescue Stop
|
data/lib/async/version.rb
CHANGED
data/lib/async/wrapper.rb
CHANGED
@@ -73,22 +73,22 @@ module Async
|
|
73
73
|
self.class.new(@io.dup, @reactor)
|
74
74
|
end
|
75
75
|
|
76
|
-
def resume(*
|
76
|
+
def resume(*arguments)
|
77
77
|
# It's possible that the monitor was closed before calling resume.
|
78
78
|
return unless @monitor
|
79
79
|
|
80
80
|
readiness = @monitor.readiness
|
81
81
|
|
82
82
|
if @readable and (readiness == :r or readiness == :rw)
|
83
|
-
@readable.resume(*
|
83
|
+
@readable.resume(*arguments)
|
84
84
|
end
|
85
85
|
|
86
86
|
if @writable and (readiness == :w or readiness == :rw)
|
87
|
-
@writable.resume(*
|
87
|
+
@writable.resume(*arguments)
|
88
88
|
end
|
89
89
|
|
90
90
|
if @any
|
91
|
-
@any.resume(*
|
91
|
+
@any.resume(*arguments)
|
92
92
|
end
|
93
93
|
end
|
94
94
|
|
data/lib/kernel/async.rb
CHANGED
@@ -22,7 +22,7 @@ require_relative "../async/reactor"
|
|
22
22
|
|
23
23
|
module Kernel
|
24
24
|
# Run the given block of code in a task, asynchronously, creating a reactor if necessary.
|
25
|
-
def Async(*
|
26
|
-
::Async::Reactor.run(*
|
25
|
+
def Async(*arguments, &block)
|
26
|
+
::Async::Reactor.run(*arguments, &block)
|
27
27
|
end
|
28
28
|
end
|
data/spec/async/barrier_spec.rb
CHANGED
@@ -24,6 +24,8 @@ require 'async/rspec'
|
|
24
24
|
|
25
25
|
require 'async/semaphore'
|
26
26
|
|
27
|
+
require_relative 'chainable_async_examples'
|
28
|
+
|
27
29
|
RSpec.describe Async::Barrier do
|
28
30
|
include_context Async::RSpec::Reactor
|
29
31
|
|
@@ -107,4 +109,6 @@ RSpec.describe Async::Barrier do
|
|
107
109
|
subject.wait
|
108
110
|
end
|
109
111
|
end
|
112
|
+
|
113
|
+
it_behaves_like 'chainable async'
|
110
114
|
end
|
data/spec/async/queue_spec.rb
CHANGED
@@ -21,8 +21,10 @@
|
|
21
21
|
require 'async'
|
22
22
|
require 'async/queue'
|
23
23
|
require 'async/rspec'
|
24
|
+
require 'async/semaphore'
|
24
25
|
|
25
26
|
require_relative 'condition_examples'
|
27
|
+
require_relative 'chainable_async_examples'
|
26
28
|
|
27
29
|
RSpec.shared_context Async::Queue do
|
28
30
|
it 'should process items in order' do
|
@@ -48,6 +50,41 @@ RSpec.shared_context Async::Queue do
|
|
48
50
|
expect(item).to be 1
|
49
51
|
end
|
50
52
|
end
|
53
|
+
|
54
|
+
context 'with semaphore' do
|
55
|
+
let(:capacity) {2}
|
56
|
+
let(:semaphore) {Async::Semaphore.new(capacity)}
|
57
|
+
let(:repeats) {capacity * 2}
|
58
|
+
|
59
|
+
it 'should process several items limited by a semaphore' do
|
60
|
+
count = 0
|
61
|
+
|
62
|
+
Async do
|
63
|
+
repeats.times do
|
64
|
+
subject.enqueue :item
|
65
|
+
end
|
66
|
+
|
67
|
+
subject.enqueue nil
|
68
|
+
end
|
69
|
+
|
70
|
+
subject.async(parent: semaphore) do |task|
|
71
|
+
count += 1
|
72
|
+
end
|
73
|
+
|
74
|
+
expect(count).to be == repeats
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
it_behaves_like 'chainable async' do
|
79
|
+
before do
|
80
|
+
subject.enqueue(:item)
|
81
|
+
|
82
|
+
# The limited queue may block.
|
83
|
+
Async do
|
84
|
+
subject.enqueue(nil)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
51
88
|
end
|
52
89
|
|
53
90
|
RSpec.describe Async::Queue do
|
data/spec/async/reactor_spec.rb
CHANGED
@@ -19,6 +19,8 @@
|
|
19
19
|
# THE SOFTWARE.
|
20
20
|
|
21
21
|
require 'async'
|
22
|
+
require 'async/rspec/reactor'
|
23
|
+
|
22
24
|
require 'benchmark/ips'
|
23
25
|
|
24
26
|
RSpec.describe Async::Reactor do
|
@@ -45,8 +47,39 @@ RSpec.describe Async::Reactor do
|
|
45
47
|
end
|
46
48
|
end
|
47
49
|
|
50
|
+
describe '#run_once' do
|
51
|
+
it "can run the reactor" do
|
52
|
+
# Run the reactor for 1 second:
|
53
|
+
task = subject.async do |task|
|
54
|
+
task.yield
|
55
|
+
end
|
56
|
+
|
57
|
+
expect(task).to be_running
|
58
|
+
|
59
|
+
# This will resume the task, and then the reactor will be finished.
|
60
|
+
expect(subject.run_once).to be false
|
61
|
+
|
62
|
+
expect(task).to be_finished
|
63
|
+
end
|
64
|
+
|
65
|
+
it "can run one iteration" do
|
66
|
+
state = nil
|
67
|
+
|
68
|
+
subject.async do |task|
|
69
|
+
state = :started
|
70
|
+
task.yield
|
71
|
+
state = :finished
|
72
|
+
end
|
73
|
+
|
74
|
+
expect(state).to be :started
|
75
|
+
|
76
|
+
subject.run_once
|
77
|
+
expect(state).to be :finished
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
48
81
|
describe '#stop' do
|
49
|
-
it "can
|
82
|
+
it "can stop the reactor" do
|
50
83
|
state = nil
|
51
84
|
|
52
85
|
subject.async do |task|
|
@@ -22,6 +22,8 @@ require 'async/semaphore'
|
|
22
22
|
require 'async/barrier'
|
23
23
|
require 'async/rspec'
|
24
24
|
|
25
|
+
require_relative 'chainable_async_examples'
|
26
|
+
|
25
27
|
RSpec.describe Async::Semaphore do
|
26
28
|
include_context Async::RSpec::Reactor
|
27
29
|
|
@@ -160,4 +162,6 @@ RSpec.describe Async::Semaphore do
|
|
160
162
|
barrier.wait
|
161
163
|
end
|
162
164
|
end
|
165
|
+
|
166
|
+
it_behaves_like 'chainable async'
|
163
167
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: async
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.24.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2019-
|
11
|
+
date: 2019-12-08 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: nio4r
|
@@ -132,7 +132,6 @@ extensions: []
|
|
132
132
|
extra_rdoc_files: []
|
133
133
|
files:
|
134
134
|
- ".editorconfig"
|
135
|
-
- ".github/FUNDING.yml"
|
136
135
|
- ".github/workflows/tests.yml"
|
137
136
|
- ".gitignore"
|
138
137
|
- ".rspec"
|
@@ -181,6 +180,7 @@ files:
|
|
181
180
|
- papers/1982 Grossman.pdf
|
182
181
|
- papers/1987 ODell.pdf
|
183
182
|
- spec/async/barrier_spec.rb
|
183
|
+
- spec/async/chainable_async_examples.rb
|
184
184
|
- spec/async/clock_spec.rb
|
185
185
|
- spec/async/condition_examples.rb
|
186
186
|
- spec/async/condition_spec.rb
|
@@ -218,12 +218,13 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
218
218
|
- !ruby/object:Gem::Version
|
219
219
|
version: '0'
|
220
220
|
requirements: []
|
221
|
-
rubygems_version: 3.0.
|
221
|
+
rubygems_version: 3.0.6
|
222
222
|
signing_key:
|
223
223
|
specification_version: 4
|
224
224
|
summary: Async is an asynchronous I/O framework based on nio4r.
|
225
225
|
test_files:
|
226
226
|
- spec/async/barrier_spec.rb
|
227
|
+
- spec/async/chainable_async_examples.rb
|
227
228
|
- spec/async/clock_spec.rb
|
228
229
|
- spec/async/condition_examples.rb
|
229
230
|
- spec/async/condition_spec.rb
|
data/.github/FUNDING.yml
DELETED