async 2.28.1 → 2.29.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/lib/async/condition.rb +14 -32
- data/lib/async/notification.rb +7 -5
- data/lib/async/priority_queue.rb +237 -0
- data/lib/async/promise.rb +188 -0
- data/lib/async/queue.rb +29 -109
- data/lib/async/task.rb +62 -37
- data/lib/async/variable.rb +5 -0
- data/lib/async/version.rb +1 -1
- data/lib/async/waiter.rb +2 -0
- data/lib/kernel/sync.rb +6 -2
- data/readme.md +10 -6
- data/releases.md +78 -0
- data.tar.gz.sig +0 -0
- metadata +4 -3
- metadata.gz.sig +0 -0
- data/agent.md +0 -63
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: f3ecf839913862d1e1638fd14c067e57b4e68a469bc8805d6dd4d568829ab02c
|
4
|
+
data.tar.gz: 10a3624e0c564f7d895f77738e88b8a2e0b89ca58fd285488b32bf57509ff3de
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 4b3ad1290e807941399e29efe94faf04bbc721a4d59b06292abaac482acc27fac25834629a2869a343439594cd8f0576c01af3ccf9bc9f5661885f8fdbf3bcf1
|
7
|
+
data.tar.gz: 89c92f9cf32ab403100ea4c00e3d93a2257aac2dd036473ecde04efb5182cffb2d91e76185b7d48f24f9ccceaf3e8dec6609549e96b72225012a964d5d93c207
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/lib/async/condition.rb
CHANGED
@@ -13,65 +13,47 @@ module Async
|
|
13
13
|
class Condition
|
14
14
|
# Create a new condition.
|
15
15
|
def initialize
|
16
|
-
@
|
16
|
+
@ready = ::Thread::Queue.new
|
17
17
|
end
|
18
18
|
|
19
|
-
class FiberNode < List::Node
|
20
|
-
def initialize(fiber)
|
21
|
-
@fiber = fiber
|
22
|
-
end
|
23
|
-
|
24
|
-
def transfer(*arguments)
|
25
|
-
@fiber.transfer(*arguments)
|
26
|
-
end
|
27
|
-
|
28
|
-
def alive?
|
29
|
-
@fiber.alive?
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
private_constant :FiberNode
|
34
|
-
|
35
19
|
# Queue up the current fiber and wait on yielding the task.
|
36
20
|
# @returns [Object]
|
37
21
|
def wait
|
38
|
-
@
|
39
|
-
Fiber.scheduler.transfer
|
40
|
-
end
|
22
|
+
@ready.pop
|
41
23
|
end
|
42
24
|
|
43
|
-
# @
|
25
|
+
# @returns [Boolean] If there are no fibers waiting on this condition.
|
44
26
|
def empty?
|
45
|
-
|
46
|
-
|
47
|
-
@waiting.empty?
|
27
|
+
@ready.num_waiting.zero?
|
48
28
|
end
|
49
29
|
|
50
30
|
# @returns [Boolean] Is any fiber waiting on this notification?
|
51
31
|
def waiting?
|
52
|
-
|
32
|
+
!self.empty?
|
53
33
|
end
|
54
34
|
|
55
35
|
# Signal to a given task that it should resume operations.
|
56
36
|
# @parameter value [Object | Nil] The value to return to the waiting fibers.
|
57
37
|
def signal(value = nil)
|
58
|
-
return if
|
38
|
+
return if empty?
|
59
39
|
|
60
|
-
|
40
|
+
ready = self.exchange
|
61
41
|
|
62
|
-
|
63
|
-
|
42
|
+
ready.num_waiting.times do
|
43
|
+
ready.push(value)
|
64
44
|
end
|
65
45
|
|
46
|
+
ready.close
|
47
|
+
|
66
48
|
return nil
|
67
49
|
end
|
68
50
|
|
69
51
|
protected
|
70
52
|
|
71
53
|
def exchange
|
72
|
-
|
73
|
-
@
|
74
|
-
return
|
54
|
+
ready = @ready
|
55
|
+
@ready = ::Thread::Queue.new
|
56
|
+
return ready
|
75
57
|
end
|
76
58
|
end
|
77
59
|
end
|
data/lib/async/notification.rb
CHANGED
@@ -12,23 +12,25 @@ module Async
|
|
12
12
|
# Signal to a given task that it should resume operations.
|
13
13
|
#
|
14
14
|
# @returns [Boolean] if a task was signalled.
|
15
|
-
def signal(value = nil
|
16
|
-
return false if
|
15
|
+
def signal(value = nil)
|
16
|
+
return false if empty?
|
17
17
|
|
18
18
|
Fiber.scheduler.push Signal.new(self.exchange, value)
|
19
19
|
|
20
20
|
return true
|
21
21
|
end
|
22
22
|
|
23
|
-
Signal = Struct.new(:
|
23
|
+
Signal = Struct.new(:ready, :value) do
|
24
24
|
def alive?
|
25
25
|
true
|
26
26
|
end
|
27
27
|
|
28
28
|
def transfer
|
29
|
-
|
30
|
-
|
29
|
+
ready.num_waiting.times do
|
30
|
+
ready.push(value)
|
31
31
|
end
|
32
|
+
|
33
|
+
ready.close
|
32
34
|
end
|
33
35
|
end
|
34
36
|
|
@@ -0,0 +1,237 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Released under the MIT License.
|
4
|
+
# Copyright, 2025, by Shopify Inc.
|
5
|
+
# Copyright, 2025, by Samuel Williams.
|
6
|
+
|
7
|
+
require "io/event/priority_heap"
|
8
|
+
require "thread"
|
9
|
+
|
10
|
+
require_relative "queue"
|
11
|
+
|
12
|
+
module Async
|
13
|
+
# A queue which allows items to be processed in priority order of consumers.
|
14
|
+
#
|
15
|
+
# Unlike a traditional priority queue where items have priorities, this queue
|
16
|
+
# assigns priorities to consumers (fibers waiting to dequeue). Higher priority
|
17
|
+
# consumers are served first when items become available.
|
18
|
+
#
|
19
|
+
# @public Since *Async v2*.
|
20
|
+
class PriorityQueue
|
21
|
+
ClosedError = Queue::ClosedError
|
22
|
+
|
23
|
+
# A waiter represents a fiber waiting to dequeue with a given priority.
|
24
|
+
Waiter = Struct.new(:fiber, :priority, :sequence, :condition, :value) do
|
25
|
+
include Comparable
|
26
|
+
|
27
|
+
def <=>(other)
|
28
|
+
# Higher priority comes first, then FIFO for equal priorities:
|
29
|
+
if priority == other.priority
|
30
|
+
# Use sequence for FIFO behavior (lower sequence = earlier):
|
31
|
+
sequence <=> other.sequence
|
32
|
+
else
|
33
|
+
other.priority <=> priority # Reverse for max-heap behavior
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def signal(value)
|
38
|
+
self.value = value
|
39
|
+
condition.signal
|
40
|
+
end
|
41
|
+
|
42
|
+
def wait_for_value(mutex)
|
43
|
+
condition.wait(mutex)
|
44
|
+
return self.value
|
45
|
+
end
|
46
|
+
|
47
|
+
# Invalidate this waiter, making it unusable and detectable as abandoned.
|
48
|
+
def invalidate!
|
49
|
+
self.fiber = nil
|
50
|
+
end
|
51
|
+
|
52
|
+
# Check if this waiter has been invalidated.
|
53
|
+
def valid?
|
54
|
+
self.fiber&.alive?
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
# Create a new priority queue.
|
59
|
+
#
|
60
|
+
# @parameter parent [Interface(:async) | Nil] The parent task to use for async operations.
|
61
|
+
def initialize(parent: nil)
|
62
|
+
@items = []
|
63
|
+
@closed = false
|
64
|
+
@parent = parent
|
65
|
+
@waiting = IO::Event::PriorityHeap.new
|
66
|
+
@sequence = 0
|
67
|
+
|
68
|
+
@mutex = Mutex.new
|
69
|
+
end
|
70
|
+
|
71
|
+
# Close the queue, causing all waiting tasks to return `nil`.
|
72
|
+
# Any subsequent calls to {enqueue} will raise an exception.
|
73
|
+
def close
|
74
|
+
@mutex.synchronize do
|
75
|
+
@closed = true
|
76
|
+
|
77
|
+
# Signal all waiting fibers with nil, skipping dead/invalid ones:
|
78
|
+
while waiter = @waiting.pop
|
79
|
+
waiter.signal(nil)
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
# @attribute [Array] The items in the queue.
|
85
|
+
attr :items
|
86
|
+
|
87
|
+
# @returns [Integer] The number of items in the queue.
|
88
|
+
def size
|
89
|
+
@items.size
|
90
|
+
end
|
91
|
+
|
92
|
+
# @returns [Boolean] Whether the queue is empty.
|
93
|
+
def empty?
|
94
|
+
@items.empty?
|
95
|
+
end
|
96
|
+
|
97
|
+
# @returns [Integer] The number of fibers waiting to dequeue.
|
98
|
+
def waiting
|
99
|
+
@mutex.synchronize do
|
100
|
+
@waiting.size
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# Add an item to the queue.
|
105
|
+
#
|
106
|
+
# @parameter item [Object] The item to add to the queue.
|
107
|
+
def push(item)
|
108
|
+
@mutex.synchronize do
|
109
|
+
if @closed
|
110
|
+
raise ClosedError, "Cannot push items to a closed queue."
|
111
|
+
end
|
112
|
+
|
113
|
+
@items << item
|
114
|
+
|
115
|
+
# Wake up the highest priority waiter if any, skipping dead/invalid waiters:
|
116
|
+
while waiter = @waiting.pop
|
117
|
+
if waiter.valid?
|
118
|
+
value = @items.shift
|
119
|
+
waiter.signal(value)
|
120
|
+
break
|
121
|
+
end
|
122
|
+
# Dead/invalid waiter discarded, try next one.
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
# Compatibility with {::Queue#push}.
|
128
|
+
def <<(item)
|
129
|
+
self.push(item)
|
130
|
+
end
|
131
|
+
|
132
|
+
# Add multiple items to the queue.
|
133
|
+
#
|
134
|
+
# @parameter items [Array] The items to add to the queue.
|
135
|
+
def enqueue(*items)
|
136
|
+
@mutex.synchronize do
|
137
|
+
if @closed
|
138
|
+
raise ClosedError, "Cannot enqueue items to a closed queue."
|
139
|
+
end
|
140
|
+
|
141
|
+
@items.concat(items)
|
142
|
+
|
143
|
+
# Wake up waiting fibers in priority order, skipping dead/invalid waiters:
|
144
|
+
while !@items.empty? && (waiter = @waiting.pop)
|
145
|
+
if waiter.valid?
|
146
|
+
value = @items.shift
|
147
|
+
waiter.signal(value)
|
148
|
+
end
|
149
|
+
# Dead/invalid waiter discarded, continue to next one.
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
# Remove and return the next item from the queue.
|
155
|
+
#
|
156
|
+
# If the queue is empty, this method will block until an item is available.
|
157
|
+
# Fibers are served in priority order, with higher priority fibers receiving
|
158
|
+
# items first.
|
159
|
+
#
|
160
|
+
# @parameter priority [Numeric] The priority of this consumer (higher = served first).
|
161
|
+
# @returns [Object] The next item in the queue.
|
162
|
+
def dequeue(priority: 0)
|
163
|
+
@mutex.synchronize do
|
164
|
+
# If queue is closed and empty, return nil immediately:
|
165
|
+
if @closed && @items.empty?
|
166
|
+
return nil
|
167
|
+
end
|
168
|
+
|
169
|
+
# Fast path: if items available and either no waiters or we have higher priority:
|
170
|
+
unless @items.empty?
|
171
|
+
head = @waiting.peek
|
172
|
+
if head.nil? or priority > head.priority
|
173
|
+
return @items.shift
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
# Need to wait - create our own condition variable and add to waiting queue:
|
178
|
+
sequence = @sequence
|
179
|
+
@sequence += 1
|
180
|
+
|
181
|
+
condition = ConditionVariable.new
|
182
|
+
|
183
|
+
begin
|
184
|
+
waiter = Waiter.new(Fiber.current, priority, sequence, condition, nil)
|
185
|
+
@waiting.push(waiter)
|
186
|
+
|
187
|
+
# Wait for our specific condition variable to be signaled:
|
188
|
+
return waiter.wait_for_value(@mutex)
|
189
|
+
ensure
|
190
|
+
waiter&.invalidate!
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
# Compatibility with {::Queue#pop}.
|
196
|
+
#
|
197
|
+
# @parameter priority [Numeric] The priority of this consumer.
|
198
|
+
def pop(priority: 0)
|
199
|
+
self.dequeue(priority: priority)
|
200
|
+
end
|
201
|
+
|
202
|
+
# Process each item in the queue.
|
203
|
+
#
|
204
|
+
# @asynchronous Executes the given block concurrently for each item.
|
205
|
+
#
|
206
|
+
# @parameter priority [Numeric] The priority for processing items.
|
207
|
+
# @parameter parent [Interface(:async) | Nil] The parent task to use for async operations.
|
208
|
+
# @parameter options [Hash] The options to pass to the task.
|
209
|
+
# @yields {|task| ...} When the system is idle, the block will be executed in a new task.
|
210
|
+
def async(priority: 0, parent: (@parent or Task.current), **options, &block)
|
211
|
+
while item = self.dequeue(priority: priority)
|
212
|
+
parent.async(item, **options, &block)
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
# Enumerate each item in the queue.
|
217
|
+
#
|
218
|
+
# @parameter priority [Numeric] The priority for dequeuing items.
|
219
|
+
def each(priority: 0)
|
220
|
+
while item = self.dequeue(priority: priority)
|
221
|
+
yield item
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
# Signal the queue with a value, the same as {#enqueue}.
|
226
|
+
def signal(value = nil)
|
227
|
+
self.enqueue(value)
|
228
|
+
end
|
229
|
+
|
230
|
+
# Wait for an item to be available, the same as {#dequeue}.
|
231
|
+
#
|
232
|
+
# @parameter priority [Numeric] The priority of this consumer.
|
233
|
+
def wait(priority: 0)
|
234
|
+
self.dequeue(priority: priority)
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|
@@ -0,0 +1,188 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Released under the MIT License.
|
4
|
+
# Copyright, 2025, by Shopify Inc.
|
5
|
+
# Copyright, 2025, by Samuel Williams.
|
6
|
+
|
7
|
+
module Async
|
8
|
+
# A promise represents a value that will be available in the future.
|
9
|
+
# Unlike Condition, once resolved (or rejected), all future waits return immediately
|
10
|
+
# with the stored value or raise the stored exception.
|
11
|
+
#
|
12
|
+
# This is thread-safe and integrates with the fiber scheduler.
|
13
|
+
#
|
14
|
+
# @public Since *Async v2*.
|
15
|
+
class Promise
|
16
|
+
# Create a new promise.
|
17
|
+
def initialize
|
18
|
+
# nil = pending, :completed = success, :failed = failure, :cancelled = cancelled:
|
19
|
+
@resolved = nil
|
20
|
+
|
21
|
+
# Stores either the result value or the exception:
|
22
|
+
@value = nil
|
23
|
+
|
24
|
+
# Track how many fibers are currently waiting:
|
25
|
+
@waiting = 0
|
26
|
+
|
27
|
+
@mutex = Mutex.new
|
28
|
+
@condition = ConditionVariable.new
|
29
|
+
end
|
30
|
+
|
31
|
+
# @returns [Boolean] Whether the promise has been resolved or rejected.
|
32
|
+
def resolved?
|
33
|
+
@mutex.synchronize {!!@resolved}
|
34
|
+
end
|
35
|
+
|
36
|
+
# @returns [Symbol | Nil] The internal resolved state (:completed, :failed, :cancelled, or nil if pending).
|
37
|
+
# @private For internal use by Task.
|
38
|
+
def resolved
|
39
|
+
@mutex.synchronize {@resolved}
|
40
|
+
end
|
41
|
+
|
42
|
+
# @returns [Boolean] Whether the promise has been cancelled.
|
43
|
+
def cancelled?
|
44
|
+
@mutex.synchronize {@resolved == :cancelled}
|
45
|
+
end
|
46
|
+
|
47
|
+
# @returns [Boolean] Whether the promise failed with an exception.
|
48
|
+
def failed?
|
49
|
+
@mutex.synchronize {@resolved == :failed}
|
50
|
+
end
|
51
|
+
|
52
|
+
# @returns [Boolean] Whether the promise has completed successfully.
|
53
|
+
def completed?
|
54
|
+
@mutex.synchronize {@resolved == :completed}
|
55
|
+
end
|
56
|
+
|
57
|
+
# @returns [Boolean] Whether any fibers are currently waiting for this promise.
|
58
|
+
def waiting?
|
59
|
+
@mutex.synchronize {@waiting > 0}
|
60
|
+
end
|
61
|
+
|
62
|
+
# Artificially mark that someone is waiting (useful for suppressing warnings).
|
63
|
+
# @private Internal use only.
|
64
|
+
def suppress_warnings!
|
65
|
+
@mutex.synchronize {@waiting += 1}
|
66
|
+
end
|
67
|
+
|
68
|
+
# Non-blocking access to the current value. Returns nil if not yet resolved.
|
69
|
+
# Does not raise exceptions even if the promise was rejected or cancelled.
|
70
|
+
# For resolved promises, returns the raw stored value (result, exception, or cancel exception).
|
71
|
+
#
|
72
|
+
# @returns [Object | Nil] The stored value, or nil if pending.
|
73
|
+
def value
|
74
|
+
@mutex.synchronize {@resolved ? @value : nil}
|
75
|
+
end
|
76
|
+
|
77
|
+
# Wait for the promise to be resolved and return the value.
|
78
|
+
# If already resolved, returns immediately. If rejected, raises the stored exception.
|
79
|
+
#
|
80
|
+
# @returns [Object] The resolved value.
|
81
|
+
# @raises [Exception] The rejected or cancelled exception.
|
82
|
+
def wait
|
83
|
+
@mutex.synchronize do
|
84
|
+
# Increment waiting count:
|
85
|
+
@waiting += 1
|
86
|
+
|
87
|
+
begin
|
88
|
+
# Wait for resolution if not already resolved:
|
89
|
+
@condition.wait(@mutex) unless @resolved
|
90
|
+
|
91
|
+
# Return value or raise exception based on resolution type:
|
92
|
+
if @resolved == :completed
|
93
|
+
return @value
|
94
|
+
else
|
95
|
+
# Both :failed and :cancelled store exceptions in @value
|
96
|
+
raise @value
|
97
|
+
end
|
98
|
+
ensure
|
99
|
+
# Decrement waiting count when done:
|
100
|
+
@waiting -= 1
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
# Resolve the promise with a value.
|
106
|
+
# All current and future waiters will receive this value.
|
107
|
+
# Can only be called once - subsequent calls are ignored.
|
108
|
+
#
|
109
|
+
# @parameter value [Object] The value to resolve the promise with.
|
110
|
+
def resolve(value)
|
111
|
+
@mutex.synchronize do
|
112
|
+
return if @resolved
|
113
|
+
|
114
|
+
@value = value
|
115
|
+
@resolved = :completed
|
116
|
+
|
117
|
+
# Wake up all waiting fibers:
|
118
|
+
@condition.broadcast
|
119
|
+
end
|
120
|
+
|
121
|
+
return value
|
122
|
+
end
|
123
|
+
|
124
|
+
# Reject the promise with an exception.
|
125
|
+
# All current and future waiters will receive this exception.
|
126
|
+
# Can only be called once - subsequent calls are ignored.
|
127
|
+
#
|
128
|
+
# @parameter exception [Exception] The exception to reject the promise with.
|
129
|
+
def reject(exception)
|
130
|
+
@mutex.synchronize do
|
131
|
+
return if @resolved
|
132
|
+
|
133
|
+
@value = exception
|
134
|
+
@resolved = :failed
|
135
|
+
|
136
|
+
# Wake up all waiting fibers:
|
137
|
+
@condition.broadcast
|
138
|
+
end
|
139
|
+
|
140
|
+
return nil
|
141
|
+
end
|
142
|
+
|
143
|
+
# Exception used to indicate cancellation.
|
144
|
+
class Cancel < Exception
|
145
|
+
end
|
146
|
+
|
147
|
+
# Cancel the promise, indicating cancellation.
|
148
|
+
# All current and future waiters will receive nil.
|
149
|
+
# Can only be called on pending promises - no-op if already resolved.
|
150
|
+
def cancel(exception = Cancel.new("Promise was cancelled!"))
|
151
|
+
@mutex.synchronize do
|
152
|
+
# No-op if already in any final state
|
153
|
+
return if @resolved
|
154
|
+
|
155
|
+
@value = exception
|
156
|
+
@resolved = :cancelled
|
157
|
+
|
158
|
+
# Wake up all waiting fibers:
|
159
|
+
@condition.broadcast
|
160
|
+
end
|
161
|
+
|
162
|
+
return nil
|
163
|
+
end
|
164
|
+
|
165
|
+
# Resolve the promise with the result of the block.
|
166
|
+
# If the block raises an exception, the promise will be rejected.
|
167
|
+
# If the promise was already resolved, the block will not be called.
|
168
|
+
# @yields {...} The block to call to resolve the promise.
|
169
|
+
# @returns [Object] The result of the block.
|
170
|
+
def fulfill(&block)
|
171
|
+
raise "Promise already resolved!" if @resolved
|
172
|
+
|
173
|
+
begin
|
174
|
+
return self.resolve(yield)
|
175
|
+
rescue Cancel => exception
|
176
|
+
return self.cancel(exception)
|
177
|
+
rescue => error
|
178
|
+
return self.reject(error)
|
179
|
+
rescue Exception => exception
|
180
|
+
self.reject(exception)
|
181
|
+
raise
|
182
|
+
ensure
|
183
|
+
# Handle non-local exits (throw, etc.) that bypass normal flow:
|
184
|
+
self.resolve(nil) unless @resolved
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
end
|
data/lib/async/queue.rb
CHANGED
@@ -10,10 +10,14 @@
|
|
10
10
|
require_relative "notification"
|
11
11
|
|
12
12
|
module Async
|
13
|
-
# A queue which allows items to be processed in order.
|
13
|
+
# A thread-safe queue which allows items to be processed in order.
|
14
|
+
#
|
15
|
+
# This implementation uses Thread::Queue internally for thread safety while
|
16
|
+
# maintaining compatibility with the fiber scheduler.
|
14
17
|
#
|
15
18
|
# It has a compatible interface with {Notification} and {Condition}, except that it's multi-value.
|
16
19
|
#
|
20
|
+
# @asynchronous This class is thread-safe.
|
17
21
|
# @public Since *Async v1*.
|
18
22
|
class Queue
|
19
23
|
# An error raised when trying to enqueue items to a closed queue.
|
@@ -21,53 +25,39 @@ module Async
|
|
21
25
|
class ClosedError < RuntimeError
|
22
26
|
end
|
23
27
|
|
24
|
-
# Create a new queue.
|
28
|
+
# Create a new thread-safe queue.
|
25
29
|
#
|
26
30
|
# @parameter parent [Interface(:async) | Nil] The parent task to use for async operations.
|
27
|
-
|
28
|
-
|
29
|
-
@items = []
|
30
|
-
@closed = false
|
31
|
+
def initialize(parent: nil, delegate: Thread::Queue.new)
|
32
|
+
@delegate = delegate
|
31
33
|
@parent = parent
|
32
|
-
@available = available
|
33
34
|
end
|
34
35
|
|
35
36
|
# @returns [Boolean] Whether the queue is closed.
|
36
37
|
def closed?
|
37
|
-
@closed
|
38
|
+
@delegate.closed?
|
38
39
|
end
|
39
40
|
|
40
41
|
# Close the queue, causing all waiting tasks to return `nil`. Any subsequent calls to {enqueue} will raise an exception.
|
41
42
|
def close
|
42
|
-
@
|
43
|
-
|
44
|
-
while @available.waiting?
|
45
|
-
@available.signal(nil)
|
46
|
-
end
|
43
|
+
@delegate.close
|
47
44
|
end
|
48
45
|
|
49
|
-
# @attribute [Array] The items in the queue.
|
50
|
-
attr :items
|
51
|
-
|
52
46
|
# @returns [Integer] The number of items in the queue.
|
53
47
|
def size
|
54
|
-
@
|
48
|
+
@delegate.size
|
55
49
|
end
|
56
50
|
|
57
51
|
# @returns [Boolean] Whether the queue is empty.
|
58
52
|
def empty?
|
59
|
-
@
|
53
|
+
@delegate.empty?
|
60
54
|
end
|
61
55
|
|
62
56
|
# Add an item to the queue.
|
63
57
|
def push(item)
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
@items << item
|
69
|
-
|
70
|
-
@available.signal unless self.empty?
|
58
|
+
@delegate.push(item)
|
59
|
+
rescue ClosedQueueError
|
60
|
+
raise ClosedError, "Cannot enqueue items to a closed queue!"
|
71
61
|
end
|
72
62
|
|
73
63
|
# Compatibility with {::Queue#push}.
|
@@ -77,31 +67,19 @@ module Async
|
|
77
67
|
|
78
68
|
# Add multiple items to the queue.
|
79
69
|
def enqueue(*items)
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
@items.concat(items)
|
85
|
-
|
86
|
-
@available.signal unless self.empty?
|
70
|
+
items.each {|item| @delegate.push(item)}
|
71
|
+
rescue ClosedQueueError
|
72
|
+
raise ClosedError, "Cannot enqueue items to a closed queue!"
|
87
73
|
end
|
88
74
|
|
89
75
|
# Remove and return the next item from the queue.
|
90
76
|
def dequeue
|
91
|
-
|
92
|
-
if @closed
|
93
|
-
return nil
|
94
|
-
end
|
95
|
-
|
96
|
-
@available.wait
|
97
|
-
end
|
98
|
-
|
99
|
-
@items.shift
|
77
|
+
@delegate.pop
|
100
78
|
end
|
101
79
|
|
102
80
|
# Compatibility with {::Queue#pop}.
|
103
|
-
def pop
|
104
|
-
|
81
|
+
def pop(...)
|
82
|
+
@delegate.pop(...)
|
105
83
|
end
|
106
84
|
|
107
85
|
# Process each item in the queue.
|
@@ -136,7 +114,8 @@ module Async
|
|
136
114
|
end
|
137
115
|
end
|
138
116
|
|
139
|
-
# A queue which limits the number of items that can be enqueued.
|
117
|
+
# A thread-safe queue which limits the number of items that can be enqueued.
|
118
|
+
#
|
140
119
|
# @public Since *Async v1*.
|
141
120
|
class LimitedQueue < Queue
|
142
121
|
# @private This exists purely for emitting a warning.
|
@@ -149,78 +128,19 @@ module Async
|
|
149
128
|
# Create a new limited queue.
|
150
129
|
#
|
151
130
|
# @parameter limit [Integer] The maximum number of items that can be enqueued.
|
152
|
-
# @parameter full [Notification] The notification to use for signaling when the queue is full.
|
153
|
-
def initialize(limit = 1,
|
154
|
-
super(**options)
|
155
|
-
|
156
|
-
@limit = limit
|
157
|
-
@full = full
|
131
|
+
# @parameter full [Notification] The notification to use for signaling when the queue is full. (ignored, for compatibility)
|
132
|
+
def initialize(limit = 1, **options)
|
133
|
+
super(**options, delegate: Thread::SizedQueue.new(limit))
|
158
134
|
end
|
159
135
|
|
160
136
|
# @attribute [Integer] The maximum number of items that can be enqueued.
|
161
|
-
|
162
|
-
|
163
|
-
# Close the queue, causing all waiting tasks to return `nil`. Any subsequent calls to {enqueue} will raise an exception.
|
164
|
-
# Also signals all tasks waiting for the queue to be full.
|
165
|
-
def close
|
166
|
-
super
|
167
|
-
|
168
|
-
while @full.waiting?
|
169
|
-
@full.signal(nil)
|
170
|
-
end
|
137
|
+
def limit
|
138
|
+
@delegate.max
|
171
139
|
end
|
172
140
|
|
173
141
|
# @returns [Boolean] Whether trying to enqueue an item would block.
|
174
142
|
def limited?
|
175
|
-
!@closed && @
|
176
|
-
end
|
177
|
-
|
178
|
-
# Add an item to the queue.
|
179
|
-
#
|
180
|
-
# If the queue is full, this method will block until there is space available.
|
181
|
-
#
|
182
|
-
# @parameter item [Object] The item to add to the queue.
|
183
|
-
def push(item)
|
184
|
-
while limited?
|
185
|
-
@full.wait
|
186
|
-
end
|
187
|
-
|
188
|
-
super
|
189
|
-
end
|
190
|
-
|
191
|
-
# Add multiple items to the queue.
|
192
|
-
#
|
193
|
-
# If the queue is full, this method will block until there is space available.
|
194
|
-
#
|
195
|
-
# @parameter items [Array] The items to add to the queue.
|
196
|
-
def enqueue(*items)
|
197
|
-
while !items.empty?
|
198
|
-
while limited?
|
199
|
-
@full.wait
|
200
|
-
end
|
201
|
-
|
202
|
-
if @closed
|
203
|
-
raise ClosedError, "Cannot enqueue items to a closed queue."
|
204
|
-
end
|
205
|
-
|
206
|
-
available = @limit - @items.size
|
207
|
-
@items.concat(items.shift(available))
|
208
|
-
|
209
|
-
@available.signal unless self.empty?
|
210
|
-
end
|
211
|
-
end
|
212
|
-
|
213
|
-
# Remove and return the next item from the queue.
|
214
|
-
#
|
215
|
-
# If the queue is empty, this method will block until an item is available.
|
216
|
-
#
|
217
|
-
# @returns [Object] The next item in the queue.
|
218
|
-
def dequeue
|
219
|
-
item = super
|
220
|
-
|
221
|
-
@full.signal
|
222
|
-
|
223
|
-
return item
|
143
|
+
!@delegate.closed? && @delegate.size >= @delegate.max
|
224
144
|
end
|
225
145
|
end
|
226
146
|
end
|
data/lib/async/task.rb
CHANGED
@@ -7,12 +7,14 @@
|
|
7
7
|
# Copyright, 2020, by Patrik Wenger.
|
8
8
|
# Copyright, 2023, by Math Ieu.
|
9
9
|
# Copyright, 2025, by Shigeru Nakajima.
|
10
|
+
# Copyright, 2025, by Shopify Inc.
|
10
11
|
|
11
12
|
require "fiber"
|
12
13
|
require "console"
|
13
14
|
|
14
15
|
require_relative "node"
|
15
16
|
require_relative "condition"
|
17
|
+
require_relative "promise"
|
16
18
|
require_relative "stop"
|
17
19
|
|
18
20
|
Fiber.attr_accessor :async_task
|
@@ -63,14 +65,24 @@ module Async
|
|
63
65
|
|
64
66
|
# These instance variables are critical to the state of the task.
|
65
67
|
# In the initialized state, the @block should be set, but the @fiber should be nil.
|
66
|
-
# In the running state, the @fiber should be set.
|
68
|
+
# In the running state, the @fiber should be set, and @block should be nil.
|
67
69
|
# In a finished state, the @block should be nil, and the @fiber should be nil.
|
68
70
|
@block = block
|
69
71
|
@fiber = nil
|
70
72
|
|
71
|
-
@
|
72
|
-
|
73
|
-
|
73
|
+
@promise = Promise.new
|
74
|
+
|
75
|
+
# Handle finished: parameter for backward compatibility:
|
76
|
+
case finished
|
77
|
+
when false
|
78
|
+
# `finished: false` suppresses warnings for expected task failures:
|
79
|
+
@promise.suppress_warnings!
|
80
|
+
when nil
|
81
|
+
# `finished: nil` is the default, no special handling:
|
82
|
+
else
|
83
|
+
# All other `finished:` values are deprecated:
|
84
|
+
warn("finished: argument with non-false value is deprecated and will be removed.", uplevel: 1, category: :deprecated) if $VERBOSE
|
85
|
+
end
|
74
86
|
|
75
87
|
@defer_stop = nil
|
76
88
|
end
|
@@ -109,7 +121,7 @@ module Async
|
|
109
121
|
|
110
122
|
# @returns [String] A description of the task and it's current status.
|
111
123
|
def to_s
|
112
|
-
"\#<#{self.description} (#{
|
124
|
+
"\#<#{self.description} (#{self.status})>"
|
113
125
|
end
|
114
126
|
|
115
127
|
# @deprecated Prefer {Kernel#sleep} except when compatibility with `stable-v1` is required.
|
@@ -146,22 +158,22 @@ module Async
|
|
146
158
|
|
147
159
|
# @returns [Boolean] Whether the task is running.
|
148
160
|
def running?
|
149
|
-
|
161
|
+
self.alive?
|
150
162
|
end
|
151
163
|
|
152
164
|
# @returns [Boolean] Whether the task failed with an exception.
|
153
165
|
def failed?
|
154
|
-
@
|
166
|
+
@promise.failed?
|
155
167
|
end
|
156
168
|
|
157
169
|
# @returns [Boolean] Whether the task has been stopped.
|
158
170
|
def stopped?
|
159
|
-
@
|
171
|
+
@promise.cancelled?
|
160
172
|
end
|
161
173
|
|
162
174
|
# @returns [Boolean] Whether the task has completed execution and generated a result.
|
163
175
|
def completed?
|
164
|
-
@
|
176
|
+
@promise.completed?
|
165
177
|
end
|
166
178
|
|
167
179
|
# Alias for {#completed?}.
|
@@ -170,20 +182,32 @@ module Async
|
|
170
182
|
end
|
171
183
|
|
172
184
|
# @attribute [Symbol] The status of the execution of the task, one of `:initialized`, `:running`, `:complete`, `:stopped` or `:failed`.
|
173
|
-
|
185
|
+
def status
|
186
|
+
case @promise.resolved
|
187
|
+
when :cancelled
|
188
|
+
:stopped
|
189
|
+
when :failed
|
190
|
+
:failed
|
191
|
+
when :completed
|
192
|
+
:completed
|
193
|
+
when nil
|
194
|
+
self.running? ? :running : :initialized
|
195
|
+
end
|
196
|
+
end
|
174
197
|
|
175
198
|
# Begin the execution of the task.
|
176
199
|
#
|
177
200
|
# @raises [RuntimeError] If the task is already running.
|
178
201
|
def run(*arguments)
|
179
|
-
|
180
|
-
|
202
|
+
# Move from initialized to running by clearing @block
|
203
|
+
if block = @block
|
204
|
+
@block = nil
|
181
205
|
|
182
206
|
schedule do
|
183
|
-
|
207
|
+
block.call(self, *arguments)
|
184
208
|
rescue => error
|
185
209
|
# I'm not completely happy with this overhead, but the alternative is to not log anything which makes debugging extremely difficult. Maybe we can introduce a debug wrapper which adds extra logging.
|
186
|
-
|
210
|
+
unless @promise.waiting?
|
187
211
|
warn(self, "Task may have ended with unhandled exception.", exception: error)
|
188
212
|
end
|
189
213
|
|
@@ -225,24 +249,29 @@ module Async
|
|
225
249
|
#
|
226
250
|
# @raises [RuntimeError] If the task's fiber is the current fiber.
|
227
251
|
# @returns [Object] The final expression/result of the task's block.
|
252
|
+
# @asynchronous This method is thread-safe.
|
228
253
|
def wait
|
229
254
|
raise "Cannot wait on own fiber!" if Fiber.current.equal?(@fiber)
|
230
255
|
|
231
|
-
#
|
232
|
-
|
233
|
-
@
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
if @status == :failed
|
238
|
-
raise @result
|
239
|
-
else
|
240
|
-
return @result
|
256
|
+
# Wait for the task to complete - Promise handles all the complexity:
|
257
|
+
begin
|
258
|
+
@promise.wait
|
259
|
+
rescue Promise::Cancel
|
260
|
+
# For backward compatibility, stopped tasks return nil
|
261
|
+
return nil
|
241
262
|
end
|
242
263
|
end
|
243
264
|
|
244
265
|
# Access the result of the task without waiting. May be nil if the task is not completed. Does not raise exceptions.
|
245
|
-
|
266
|
+
def result
|
267
|
+
value = @promise.value
|
268
|
+
# For backward compatibility, return nil for stopped tasks
|
269
|
+
if @promise.cancelled?
|
270
|
+
nil
|
271
|
+
else
|
272
|
+
value
|
273
|
+
end
|
274
|
+
end
|
246
275
|
|
247
276
|
# Stop the task and all of its children.
|
248
277
|
#
|
@@ -375,29 +404,25 @@ module Async
|
|
375
404
|
|
376
405
|
# Attempt to remove this node from the task tree.
|
377
406
|
consume
|
378
|
-
|
379
|
-
# If this task was being used as a future, signal completion here:
|
380
|
-
if @finished
|
381
|
-
@finished.signal(self)
|
382
|
-
@finished = nil
|
383
|
-
end
|
384
407
|
end
|
385
408
|
|
386
409
|
# State transition into the completed state.
|
387
410
|
def completed!(result)
|
388
|
-
|
389
|
-
@
|
411
|
+
# Resolve the promise with the result:
|
412
|
+
@promise&.resolve(result)
|
390
413
|
end
|
391
414
|
|
392
415
|
# State transition into the failed state.
|
393
416
|
def failed!(exception = false)
|
394
|
-
|
395
|
-
@
|
417
|
+
# Reject the promise with the exception:
|
418
|
+
@promise&.reject(exception)
|
396
419
|
end
|
397
420
|
|
398
421
|
def stopped!
|
399
422
|
# Console.info(self, status:) {"Task #{self} was stopped with #{@children&.size.inspect} children!"}
|
400
|
-
|
423
|
+
|
424
|
+
# Cancel the promise:
|
425
|
+
@promise&.cancel
|
401
426
|
|
402
427
|
stopped = false
|
403
428
|
|
@@ -442,7 +467,7 @@ module Async
|
|
442
467
|
|
443
468
|
@fiber.async_task = self
|
444
469
|
|
445
|
-
self.
|
470
|
+
(Fiber.scheduler || self.reactor).resume(@fiber)
|
446
471
|
end
|
447
472
|
end
|
448
473
|
end
|
data/lib/async/variable.rb
CHANGED
@@ -2,16 +2,21 @@
|
|
2
2
|
|
3
3
|
# Released under the MIT License.
|
4
4
|
# Copyright, 2021-2025, by Samuel Williams.
|
5
|
+
# Copyright, 2025, by Shopify Inc.
|
5
6
|
|
6
7
|
require_relative "condition"
|
7
8
|
|
8
9
|
module Async
|
9
10
|
# A synchronization primitive that allows one task to wait for another task to resolve a value.
|
11
|
+
#
|
12
|
+
# @deprecated Use {Async::Promise} instead.
|
10
13
|
class Variable
|
11
14
|
# Create a new variable.
|
12
15
|
#
|
13
16
|
# @parameter condition [Condition] The condition to use for synchronization.
|
14
17
|
def initialize(condition = Condition.new)
|
18
|
+
warn("`Async::Variable` is deprecated, use `Async::Promise` instead.", category: :deprecated, uplevel: 1) if $VERBOSE
|
19
|
+
|
15
20
|
@condition = condition
|
16
21
|
@value = nil
|
17
22
|
end
|
data/lib/async/version.rb
CHANGED
data/lib/async/waiter.rb
CHANGED
@@ -3,9 +3,11 @@
|
|
3
3
|
# Released under the MIT License.
|
4
4
|
# Copyright, 2022-2025, by Samuel Williams.
|
5
5
|
# Copyright, 2024, by Patrik Wenger.
|
6
|
+
# Copyright, 2025, by Shopify Inc.
|
6
7
|
|
7
8
|
module Async
|
8
9
|
# A composable synchronization primitive, which allows one task to wait for a number of other tasks to complete. It can be used in conjunction with {Semaphore} and/or {Barrier}.
|
10
|
+
#
|
9
11
|
# @deprecated `Async::Waiter` is deprecated, use `Async::Barrier` instead.
|
10
12
|
class Waiter
|
11
13
|
# Create a waiter instance.
|
data/lib/kernel/sync.rb
CHANGED
@@ -1,9 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
# Released under the MIT License.
|
4
|
-
# Copyright, 2019-
|
4
|
+
# Copyright, 2019-2025, by Samuel Williams.
|
5
5
|
# Copyright, 2020, by Brian Morearty.
|
6
6
|
# Copyright, 2024, by Patrik Wenger.
|
7
|
+
# Copyright, 2025, by Shopify Inc.
|
7
8
|
|
8
9
|
require_relative "../async/reactor"
|
9
10
|
|
@@ -30,7 +31,10 @@ module Kernel
|
|
30
31
|
reactor = Async::Reactor.new
|
31
32
|
|
32
33
|
begin
|
33
|
-
|
34
|
+
# Use finished: false to suppress warnings since we're handling exceptions explicitly
|
35
|
+
task = reactor.async(annotation: annotation, finished: false, &block)
|
36
|
+
reactor.run
|
37
|
+
return task.wait
|
34
38
|
ensure
|
35
39
|
Fiber.set_scheduler(nil)
|
36
40
|
end
|
data/readme.md
CHANGED
@@ -35,6 +35,16 @@ Please see the [project documentation](https://socketry.github.io/async/) for mo
|
|
35
35
|
|
36
36
|
Please see the [project releases](https://socketry.github.io/async/releases/index) for all releases.
|
37
37
|
|
38
|
+
### v2.29.0
|
39
|
+
|
40
|
+
This release introduces thread-safety as a core concept of Async. Many core classes now have thread-safe guarantees, allowing them to be used safely across multiple threads.
|
41
|
+
|
42
|
+
- Thread-safe `Async::Condition` and `Async::Notification`, implemented using `Thread::Queue`.
|
43
|
+
- Thread-safe `Async::Queue` and `Async::LimitedQueue`, implemented using `Thread::Queue` and `Thread::LimitedQueue` respectively.
|
44
|
+
- `Async::Variable` is deprecated in favor of `Async::Promise`.
|
45
|
+
- [Introduce `Async::Promise`](https://socketry.github.io/async/releases/index#introduce-async::promise)
|
46
|
+
- [Introduce `Async::PriorityQueue`](https://socketry.github.io/async/releases/index#introduce-async::priorityqueue)
|
47
|
+
|
38
48
|
### v2.28.1
|
39
49
|
|
40
50
|
- Fix race condition between `Async::Barrier#stop` and finish signalling.
|
@@ -81,12 +91,6 @@ Please see the [project releases](https://socketry.github.io/async/releases/inde
|
|
81
91
|
- [Use `IO::Event::WorkerPool` for Blocking Operations](https://socketry.github.io/async/releases/index#use-io::event::workerpool-for-blocking-operations)
|
82
92
|
- [Better handling of `IO#close` using `fiber_interrupt`](https://socketry.github.io/async/releases/index#better-handling-of-io#close-using-fiber_interrupt)
|
83
93
|
|
84
|
-
### v2.24.0
|
85
|
-
|
86
|
-
- Ruby v3.1 support is dropped.
|
87
|
-
- `Async::Wrapper` which was previously deprecated, is now removed.
|
88
|
-
- [Flexible Timeouts](https://socketry.github.io/async/releases/index#flexible-timeouts)
|
89
|
-
|
90
94
|
## See Also
|
91
95
|
|
92
96
|
- [async-http](https://github.com/socketry/async-http) — Asynchronous HTTP client/server.
|
data/releases.md
CHANGED
@@ -1,5 +1,83 @@
|
|
1
1
|
# Releases
|
2
2
|
|
3
|
+
## v2.29.0
|
4
|
+
|
5
|
+
This release introduces thread-safety as a core concept of Async. Many core classes now have thread-safe guarantees, allowing them to be used safely across multiple threads.
|
6
|
+
|
7
|
+
- Thread-safe `Async::Condition` and `Async::Notification`, implemented using `Thread::Queue`.
|
8
|
+
- Thread-safe `Async::Queue` and `Async::LimitedQueue`, implemented using `Thread::Queue` and `Thread::LimitedQueue` respectively.
|
9
|
+
- `Async::Variable` is deprecated in favor of `Async::Promise`.
|
10
|
+
|
11
|
+
### Introduce `Async::Promise`
|
12
|
+
|
13
|
+
This release introduces the new `Async::Promise` class and refactors `Async::Task` to use promises for state management internally. This architectural improvement achieves the design goal that "a task should be a promise with attached computation and cancellation handling."
|
14
|
+
|
15
|
+
- **Thread-safe promise implementation** with immutable state transitions.
|
16
|
+
- **Consistent state management** using symbols: `:completed`, `:failed`, `:cancelled`.
|
17
|
+
- **Promise cancellation** with `cancel()` method and `Cancel` exception class.
|
18
|
+
- **Comprehensive test coverage** with 47 new test cases covering all edge cases.
|
19
|
+
|
20
|
+
<!-- end list -->
|
21
|
+
|
22
|
+
``` ruby
|
23
|
+
require 'async/promise'
|
24
|
+
|
25
|
+
# Basic promise usage - works independently of Async framework
|
26
|
+
promise = Async::Promise.new
|
27
|
+
|
28
|
+
# In another thread or fiber, resolve the promise
|
29
|
+
Thread.new do
|
30
|
+
sleep(1) # Simulate some work
|
31
|
+
promise.resolve("Hello, World!")
|
32
|
+
end
|
33
|
+
|
34
|
+
# Wait for the result
|
35
|
+
result = promise.wait
|
36
|
+
puts result # => "Hello, World!"
|
37
|
+
|
38
|
+
# Check promise state
|
39
|
+
puts promise.resolved? # => true
|
40
|
+
puts promise.completed? # => true
|
41
|
+
```
|
42
|
+
|
43
|
+
Promises bridge Thread and Fiber concurrency models - a promise resolved in one thread can be awaited in a fiber, and vice versa.
|
44
|
+
|
45
|
+
### Introduce `Async::PriorityQueue`
|
46
|
+
|
47
|
+
The new `Async::PriorityQueue` provides a thread-safe, fiber-aware queue where consumers can specify priority levels. Higher priority consumers are served first when items become available, with FIFO ordering maintained for equal priorities. This is useful for implementing priority-based task processing systems where critical operations need to be handled before lower priority work.
|
48
|
+
|
49
|
+
``` ruby
|
50
|
+
require 'async'
|
51
|
+
require 'async/priority_queue'
|
52
|
+
|
53
|
+
Async do
|
54
|
+
queue = Async::PriorityQueue.new
|
55
|
+
|
56
|
+
# Start consumers with different priorities
|
57
|
+
low_priority = async do
|
58
|
+
puts "Low priority consumer got: #{queue.dequeue(priority: 1)}"
|
59
|
+
end
|
60
|
+
|
61
|
+
medium_priority = async do
|
62
|
+
puts "Medium priority consumer got: #{queue.dequeue(priority: 5)}"
|
63
|
+
end
|
64
|
+
|
65
|
+
high_priority = async do
|
66
|
+
puts "High priority consumer got: #{queue.dequeue(priority: 10)}"
|
67
|
+
end
|
68
|
+
|
69
|
+
# Add items to the queue
|
70
|
+
queue.push("first item")
|
71
|
+
queue.push("second item")
|
72
|
+
queue.push("third item")
|
73
|
+
|
74
|
+
# Output:
|
75
|
+
# High priority consumer got: first item
|
76
|
+
# Medium priority consumer got: second item
|
77
|
+
# Low priority consumer got: third item
|
78
|
+
end
|
79
|
+
```
|
80
|
+
|
3
81
|
## v2.28.1
|
4
82
|
|
5
83
|
- Fix race condition between `Async::Barrier#stop` and finish signalling.
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: async
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.
|
4
|
+
version: 2.29.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -145,7 +145,6 @@ executables: []
|
|
145
145
|
extensions: []
|
146
146
|
extra_rdoc_files: []
|
147
147
|
files:
|
148
|
-
- agent.md
|
149
148
|
- context/best-practices.md
|
150
149
|
- context/debugging.md
|
151
150
|
- context/getting-started.md
|
@@ -165,6 +164,8 @@ files:
|
|
165
164
|
- lib/async/list.rb
|
166
165
|
- lib/async/node.rb
|
167
166
|
- lib/async/notification.rb
|
167
|
+
- lib/async/priority_queue.rb
|
168
|
+
- lib/async/promise.rb
|
168
169
|
- lib/async/queue.rb
|
169
170
|
- lib/async/reactor.rb
|
170
171
|
- lib/async/scheduler.rb
|
@@ -208,7 +209,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
208
209
|
- !ruby/object:Gem::Version
|
209
210
|
version: '0'
|
210
211
|
requirements: []
|
211
|
-
rubygems_version: 3.
|
212
|
+
rubygems_version: 3.7.0.dev
|
212
213
|
specification_version: 4
|
213
214
|
summary: A concurrency framework for Ruby.
|
214
215
|
test_files: []
|
metadata.gz.sig
CHANGED
Binary file
|
data/agent.md
DELETED
@@ -1,63 +0,0 @@
|
|
1
|
-
# Agent
|
2
|
-
|
3
|
-
## Context
|
4
|
-
|
5
|
-
This section provides links to documentation from installed packages. It is automatically generated and may be updated by running `bake agent:context:install`.
|
6
|
-
|
7
|
-
**Important:** Before performing any code, documentation, or analysis tasks, always read and apply the full content of any relevant documentation referenced in the following sections. These context files contain authoritative standards and best practices for documentation, code style, and project-specific workflows. **Do not proceed with any actions until you have read and incorporated the guidance from relevant context files.**
|
8
|
-
|
9
|
-
### agent-context
|
10
|
-
|
11
|
-
Install and manage context files from Ruby gems.
|
12
|
-
|
13
|
-
#### [Usage Guide](.context/agent-context/usage.md)
|
14
|
-
|
15
|
-
`agent-context` is a tool that helps you discover and install contextual information from Ruby gems for AI agents. Gems can provide additional documentation, examples, and guidance in a `context/` ...
|
16
|
-
|
17
|
-
### decode
|
18
|
-
|
19
|
-
Code analysis for documentation generation.
|
20
|
-
|
21
|
-
#### [Getting Started with Decode](.context/decode/getting-started.md)
|
22
|
-
|
23
|
-
The Decode gem provides programmatic access to Ruby code structure and metadata. It can parse Ruby files and extract definitions, comments, and documentation pragmas, enabling code analysis, docume...
|
24
|
-
|
25
|
-
#### [Documentation Coverage](.context/decode/coverage.md)
|
26
|
-
|
27
|
-
This guide explains how to test and monitor documentation coverage in your Ruby projects using the Decode gem's built-in bake tasks.
|
28
|
-
|
29
|
-
#### [Ruby Documentation](.context/decode/ruby-documentation.md)
|
30
|
-
|
31
|
-
This guide covers documentation practices and pragmas supported by the Decode gem for documenting Ruby code. These pragmas provide structured documentation that can be parsed and used to generate A...
|
32
|
-
|
33
|
-
#### [Setting Up RBS Types and Steep Type Checking for Ruby Gems](.context/decode/types.md)
|
34
|
-
|
35
|
-
This guide covers the process for establishing robust type checking in Ruby gems using RBS and Steep, focusing on automated generation from source documentation and proper validation.
|
36
|
-
|
37
|
-
### sus
|
38
|
-
|
39
|
-
A fast and scalable test runner.
|
40
|
-
|
41
|
-
#### [Using Sus Testing Framework](.context/sus/usage.md)
|
42
|
-
|
43
|
-
Sus is a modern Ruby testing framework that provides a clean, BDD-style syntax for writing tests. It's designed to be fast, simple, and expressive.
|
44
|
-
|
45
|
-
#### [Mocking](.context/sus/mocking.md)
|
46
|
-
|
47
|
-
There are two types of mocking in sus: `receive` and `mock`. The `receive` matcher is a subset of full mocking and is used to set expectations on method calls, while `mock` can be used to replace m...
|
48
|
-
|
49
|
-
#### [Shared Test Behaviors and Fixtures](.context/sus/shared.md)
|
50
|
-
|
51
|
-
Sus provides shared test contexts which can be used to define common behaviours or tests that can be reused across one or more test files.
|
52
|
-
|
53
|
-
### sus-fixtures-agent-context
|
54
|
-
|
55
|
-
Test fixtures for running in Async.
|
56
|
-
|
57
|
-
#### [Getting Started](.context/sus-fixtures-agent-context/getting-started.md)
|
58
|
-
|
59
|
-
This guide explains how to use the `sus-fixtures-agent-context` gem to test agent contexts.
|
60
|
-
|
61
|
-
#### [GitHub Actions](.context/sus-fixtures-agent-context/github-actions.md)
|
62
|
-
|
63
|
-
This guide explains how to integrate the `sus-fixtures-agent-context` gem with GitHub Actions for testing agent contexts.
|