ractor_queue 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +278 -0
- data/ext/ractor_queue/extconf.rb +10 -0
- data/ext/ractor_queue/ractor_queue.cpp +45 -0
- data/ext/ractor_queue/standard_queue.h +29 -0
- data/lib/ractor_queue/errors.rb +5 -0
- data/lib/ractor_queue/interface.rb +107 -0
- data/lib/ractor_queue/ractor_queue.rb +22 -0
- data/lib/ractor_queue/version.rb +3 -0
- data/lib/ractor_queue.rb +5 -0
- data/vendor/atomic_queue/include/atomic_queue/atomic_queue.h +677 -0
- data/vendor/atomic_queue/include/atomic_queue/defs.h +127 -0
- metadata +110 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 47a22f9a81a1d6ce08a5588f4d1324e0d15006a57062057ec2ac74d5f44b8888
|
|
4
|
+
data.tar.gz: c56c3feadd7d4bfa98c28f9b10705fd749ca51d5fb56ab9004530fcba6d6b984
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: f381232cfc1aff09b17f0a41c35fdd22db4bbbbca1319135593c61c3f98dab892a811e59df1922e9eb0fcc55989b896460cf7587d911b6a3809306e28c7beb70
|
|
7
|
+
data.tar.gz: 335c0137bb03e242cd3fc63daad949e4a862fdf92eca3db068dbbf0728eec31e34210b833cd68376d9a9ac899c1b09f813bc871b6d083469336e3984419df705
|
data/README.md
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
# ractor_queue
|
|
2
|
+
|
|
3
|
+
A lock-free, bounded, MPMC queue that can be shared across Ruby Ractors.
|
|
4
|
+
|
|
5
|
+
Ruby's built-in `Queue` uses a `Mutex` internally and cannot be passed as a shared reference across Ractor boundaries. `RactorQueue` has no mutex — it is always `Ractor.shareable?` and can be handed to any number of Ractors simultaneously.
|
|
6
|
+
|
|
7
|
+
```ruby
|
|
8
|
+
q = RactorQueue.new(capacity: 1024)
|
|
9
|
+
|
|
10
|
+
producer = Ractor.new(q) { |queue| 1000.times { |i| queue.push(i) } }
|
|
11
|
+
consumer = Ractor.new(q) { |queue| 1000.times { queue.pop } }
|
|
12
|
+
|
|
13
|
+
producer.value
|
|
14
|
+
consumer.value
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
Backed by the [max0x7ba/atomic_queue](https://github.com/max0x7ba/atomic_queue) C++14 header-only library via [Rice](https://github.com/jasonroelofs/rice) 4.x bindings.
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
Add to your `Gemfile`:
|
|
24
|
+
|
|
25
|
+
```ruby
|
|
26
|
+
gem "ractor_queue"
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
Or install directly:
|
|
30
|
+
|
|
31
|
+
```sh
|
|
32
|
+
gem install ractor_queue
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Requires MRI Ruby 3.2+ and a C++17 compiler. The native extension is built automatically on `gem install`.
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## Quick Start
|
|
40
|
+
|
|
41
|
+
```ruby
|
|
42
|
+
require "ractor_queue"
|
|
43
|
+
|
|
44
|
+
# Create a bounded queue (capacity rounds up to the next power of two, minimum 4096)
|
|
45
|
+
q = RactorQueue.new(capacity: 256)
|
|
46
|
+
|
|
47
|
+
# Non-blocking
|
|
48
|
+
q.try_push(42) # => true (enqueued)
|
|
49
|
+
q.try_push(:hello) # => true
|
|
50
|
+
q.try_pop # => 42
|
|
51
|
+
q.try_pop # => :hello
|
|
52
|
+
q.try_pop # => RactorQueue::EMPTY (queue was empty)
|
|
53
|
+
|
|
54
|
+
# Check for empty with identity comparison (never use ==)
|
|
55
|
+
v = q.try_pop
|
|
56
|
+
process(v) unless v.equal?(RactorQueue::EMPTY)
|
|
57
|
+
|
|
58
|
+
# Blocking — spin-waits until space / item is available
|
|
59
|
+
q.push(99) # => self (chainable)
|
|
60
|
+
q.pop # => 99
|
|
61
|
+
|
|
62
|
+
# Blocking with timeout
|
|
63
|
+
q.pop(timeout: 0.5) # raises RactorQueue::TimeoutError after 500 ms if still empty
|
|
64
|
+
|
|
65
|
+
# State (approximate under concurrency)
|
|
66
|
+
q.size # => Integer
|
|
67
|
+
q.empty? # => true / false
|
|
68
|
+
q.full? # => true / false
|
|
69
|
+
q.capacity # => Integer (exact)
|
|
70
|
+
|
|
71
|
+
# Always true — the queue itself is Ractor-shareable
|
|
72
|
+
Ractor.shareable?(q) # => true
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
---
|
|
76
|
+
|
|
77
|
+
## API
|
|
78
|
+
|
|
79
|
+
| Method | Returns | Notes |
|
|
80
|
+
|---|---|---|
|
|
81
|
+
| `RactorQueue.new(capacity:, validate_shareable: false)` | `RactorQueue` instance | Capacity rounded up to power-of-two minimum |
|
|
82
|
+
| `try_push(obj)` | `true` / `false` | Non-blocking; `false` if full |
|
|
83
|
+
| `try_pop` | `obj` or `RactorQueue::EMPTY` | Non-blocking; `EMPTY` sentinel if queue was empty; `nil` if `nil` was pushed |
|
|
84
|
+
| `push(obj, timeout: nil)` | `self` | Blocks until space; raises `TimeoutError` if timeout expires |
|
|
85
|
+
| `pop(timeout: nil)` | `obj` | Blocks until item; raises `TimeoutError` if timeout expires |
|
|
86
|
+
| `size` | Integer | Approximate element count |
|
|
87
|
+
| `empty?` | Boolean | Approximate |
|
|
88
|
+
| `full?` | Boolean | Approximate |
|
|
89
|
+
| `capacity` | Integer | Exact allocated capacity |
|
|
90
|
+
|
|
91
|
+
### Errors
|
|
92
|
+
|
|
93
|
+
| Class / Constant | Meaning |
|
|
94
|
+
|---|---|
|
|
95
|
+
| `RactorQueue::EMPTY` | Sentinel returned by `try_pop` when the queue is empty. Check with `equal?`, never `==`. |
|
|
96
|
+
| `RactorQueue::TimeoutError` | Raised by `push` or `pop` when the `timeout:` deadline expires. |
|
|
97
|
+
| `RactorQueue::NotShareableError` | Raised by `push`/`try_push` when `validate_shareable: true` and the object is not Ractor-shareable. |
|
|
98
|
+
|
|
99
|
+
### `validate_shareable`
|
|
100
|
+
|
|
101
|
+
With `validate_shareable: true`, the queue raises `NotShareableError` at push time for any non-shareable object, catching mistakes before they reach a Ractor boundary:
|
|
102
|
+
|
|
103
|
+
```ruby
|
|
104
|
+
safe_q = RactorQueue.new(capacity: 64, validate_shareable: true)
|
|
105
|
+
|
|
106
|
+
safe_q.push(42) # ok — Integer is shareable
|
|
107
|
+
safe_q.push("hello".freeze) # ok — frozen String is shareable
|
|
108
|
+
safe_q.push([1, 2, 3]) # raises RactorQueue::NotShareableError
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
## Ractor Patterns
|
|
114
|
+
|
|
115
|
+
### 1. Single Producer / Single Consumer (1P1C)
|
|
116
|
+
|
|
117
|
+
The baseline pattern — one Ractor feeds another through a shared queue.
|
|
118
|
+
|
|
119
|
+
```ruby
|
|
120
|
+
q = RactorQueue.new(capacity: 1024)
|
|
121
|
+
|
|
122
|
+
producer = Ractor.new(q) do |queue|
|
|
123
|
+
100.times { |i| queue.push(i * i) }
|
|
124
|
+
queue.push(:done)
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
consumer = Ractor.new(q) do |queue|
|
|
128
|
+
results = []
|
|
129
|
+
loop do
|
|
130
|
+
v = queue.pop
|
|
131
|
+
break if v == :done
|
|
132
|
+
results << v
|
|
133
|
+
end
|
|
134
|
+
results
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
producer.value
|
|
138
|
+
puts consumer.value.inspect
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### 2. Worker Pool (MPMC)
|
|
142
|
+
|
|
143
|
+
A shared job queue drained by N Ractor workers. Size the queues large enough to hold all in-flight items — chaining two small bounded queues risks deadlock (see [Concurrency Notes](#concurrency-notes)).
|
|
144
|
+
|
|
145
|
+
```ruby
|
|
146
|
+
WORKERS = 8
|
|
147
|
+
jobs = RactorQueue.new(capacity: 10_000)
|
|
148
|
+
results = RactorQueue.new(capacity: 10_000)
|
|
149
|
+
|
|
150
|
+
workers = WORKERS.times.map do
|
|
151
|
+
Ractor.new(jobs, results) do |jq, rq|
|
|
152
|
+
loop do
|
|
153
|
+
job = jq.pop(timeout: 30)
|
|
154
|
+
break if job == :stop
|
|
155
|
+
rq.push(job * job) # do work
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
1000.times { |i| jobs.push(i) }
|
|
161
|
+
WORKERS.times { jobs.push(:stop) }
|
|
162
|
+
|
|
163
|
+
results_list = 1000.times.map { results.pop }
|
|
164
|
+
workers.each(&:value)
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
### 3. Queue Pool (High Ractor Counts)
|
|
168
|
+
|
|
169
|
+
When many Ractors share a single bounded queue, the spin-wait backoff keeps things moving, but beyond ~2× core count you get diminishing returns from cache-line contention. Use one queue per producer/consumer pair — zero cross-pair contention, linear scaling to core count:
|
|
170
|
+
|
|
171
|
+
```ruby
|
|
172
|
+
PAIRS = 16 # 32 Ractors total
|
|
173
|
+
|
|
174
|
+
pairs = PAIRS.times.map do
|
|
175
|
+
q = RactorQueue.new(capacity: 1024)
|
|
176
|
+
p = Ractor.new(q) { |queue| 1000.times { |i| queue.push(i) } }
|
|
177
|
+
c = Ractor.new(q) { |queue| 1000.times { queue.pop } }
|
|
178
|
+
[p, c]
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
pairs.each { |p, c| p.value; c.value }
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
---
|
|
185
|
+
|
|
186
|
+
## Concurrency Notes
|
|
187
|
+
|
|
188
|
+
### Spin-wait backoff
|
|
189
|
+
|
|
190
|
+
`push` and `pop` use an exponential backoff spin loop: the first 16 retries call `Thread.pass`; subsequent retries call `sleep(0.0001)`. The sleep actually suspends the OS thread, preventing scheduler thrashing when many Ractors are blocked on the same queue.
|
|
191
|
+
|
|
192
|
+
This means `Thread#raise` and `Ctrl-C` can interrupt a blocked `push` or `pop` at any point.
|
|
193
|
+
|
|
194
|
+
### Two-queue deadlock
|
|
195
|
+
|
|
196
|
+
Chaining two bounded queues in a pipeline can deadlock when both queues are full simultaneously:
|
|
197
|
+
|
|
198
|
+
```
|
|
199
|
+
main blocks pushing to jobs (full)
|
|
200
|
+
↓
|
|
201
|
+
workers block pushing to results (full)
|
|
202
|
+
↓
|
|
203
|
+
main cannot drain results (it is blocked)
|
|
204
|
+
↓ deadlock
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
**Fix:** size at least one queue large enough that its producer never blocks, or drain results asynchronously in a separate Ractor.
|
|
208
|
+
|
|
209
|
+
### Spin-wait storm
|
|
210
|
+
|
|
211
|
+
When more Ractors are actively spinning (blocked on `push`/`pop`) than there are idle cores, the OS scheduler can thrash. The sleep-based backoff mitigates this, but the practical ceiling for a single shared queue is roughly `2 × CPU cores` Ractors doing nothing but queue operations. For higher Ractor counts, use the queue pool pattern.
|
|
212
|
+
|
|
213
|
+
### `nil` as a payload
|
|
214
|
+
|
|
215
|
+
`try_pop` returns `RactorQueue::EMPTY` when the queue is empty and `nil` when `nil` was the pushed value — the two are unambiguous. Always check for empty with identity comparison:
|
|
216
|
+
|
|
217
|
+
```ruby
|
|
218
|
+
v = q.try_pop
|
|
219
|
+
return if v.equal?(RactorQueue::EMPTY)
|
|
220
|
+
process(v) # v may be nil — that's fine, it's a real payload
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
Do not use `==` to check for `EMPTY` — use `equal?`.
|
|
224
|
+
|
|
225
|
+
---
|
|
226
|
+
|
|
227
|
+
## Performance
|
|
228
|
+
|
|
229
|
+
Measured on Apple M2 Max (12 cores), Ruby 4.0.2:
|
|
230
|
+
|
|
231
|
+
| Configuration | Throughput |
|
|
232
|
+
|---|---|
|
|
233
|
+
| 1 producer / 1 consumer Ractor | ~470K ops/s |
|
|
234
|
+
| 2P / 2C shared queue | ~855K ops/s |
|
|
235
|
+
| 4P / 4C shared queue | ~1.25M ops/s |
|
|
236
|
+
| 8P / 8C shared queue | ~1.53M ops/s |
|
|
237
|
+
| 8P / 8C queue pool (8 queues) | ~1.66M ops/s |
|
|
238
|
+
| 50P / 50C queue pool | ~1.60M ops/s |
|
|
239
|
+
|
|
240
|
+
Ruby's built-in `Queue` is not included — it cannot participate in Ractor benchmarks.
|
|
241
|
+
|
|
242
|
+
Under MRI threads (no Ractors), Ruby's `Queue` is faster because the GVL makes lock-free atomics unnecessary. RactorQueue's advantage is exclusive to Ractor workloads.
|
|
243
|
+
|
|
244
|
+
---
|
|
245
|
+
|
|
246
|
+
## Running the Examples
|
|
247
|
+
|
|
248
|
+
```sh
|
|
249
|
+
bundle exec ruby examples/01_basic_usage.rb # Ractor usage patterns
|
|
250
|
+
bundle exec ruby examples/02_performance.rb # Throughput benchmarks
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## Development
|
|
256
|
+
|
|
257
|
+
```sh
|
|
258
|
+
bundle install
|
|
259
|
+
bundle exec rake compile # build the native extension
|
|
260
|
+
bundle exec rake test # run the test suite
|
|
261
|
+
```
|
|
262
|
+
|
|
263
|
+
---
|
|
264
|
+
|
|
265
|
+
## Documentation
|
|
266
|
+
|
|
267
|
+
| Document | Description |
|
|
268
|
+
|---|---|
|
|
269
|
+
| [`examples/01_basic_usage.rb`](examples/01_basic_usage.rb) | Annotated Ractor usage patterns (1P1C, timeout, worker pool, pipeline, validate_shareable) |
|
|
270
|
+
| [`examples/02_performance.rb`](examples/02_performance.rb) | Throughput benchmarks across queue topologies and Ractor counts |
|
|
271
|
+
| [`docs/superpowers/specs/2026-04-10-atomic-queue-design.md`](docs/superpowers/specs/2026-04-10-atomic-queue-design.md) | Original design specification (C extension architecture, Rice bindings, API design decisions) |
|
|
272
|
+
| [`docs/superpowers/plans/`](docs/superpowers/plans/) | Implementation plans for each development phase |
|
|
273
|
+
|
|
274
|
+
---
|
|
275
|
+
|
|
276
|
+
## License
|
|
277
|
+
|
|
278
|
+
MIT. The vendored [max0x7ba/atomic_queue](https://github.com/max0x7ba/atomic_queue) C++ library is also MIT licensed.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
require "mkmf-rice"
|
|
2
|
+
|
|
3
|
+
$INCFLAGS << " -I$(srcdir)/../../vendor/atomic_queue/include"
|
|
4
|
+
$CPPFLAGS << " -std=c++17"
|
|
5
|
+
|
|
6
|
+
unless find_header("atomic_queue/atomic_queue.h")
|
|
7
|
+
abort "Cannot find atomic_queue/atomic_queue.h — check vendor/ directory"
|
|
8
|
+
end
|
|
9
|
+
|
|
10
|
+
create_makefile "ractor_queue/ractor_queue"
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
#include <rice/rice.hpp>
|
|
2
|
+
#include <atomic_queue/atomic_queue.h>
|
|
3
|
+
#include <ruby.h>
|
|
4
|
+
#include "standard_queue.h"
|
|
5
|
+
|
|
6
|
+
using namespace Rice;
|
|
7
|
+
|
|
8
|
+
// Global sentinel — a unique frozen Ruby Object used to signal "queue empty"
|
|
9
|
+
// from c_try_pop. Pinned as a permanent GC root so it is never collected.
|
|
10
|
+
VALUE g_empty_sentinel = Qnil; // Set in Init_ractor_queue
|
|
11
|
+
|
|
12
|
+
extern "C" void Init_ractor_queue() {
|
|
13
|
+
// Marks methods registered via rb_define_method (which Rice uses internally for
|
|
14
|
+
// define_method) as Ractor-safe. Verified working with Rice 4.x on Ruby 4.0 —
|
|
15
|
+
// cross-Ractor queue access passes without Ractor::IsolationError.
|
|
16
|
+
rb_ext_ractor_safe(true);
|
|
17
|
+
|
|
18
|
+
// Define RactorQueue as the class itself — wraps AtomicQueueB2<VALUE>.
|
|
19
|
+
// Arg("v").setValue() tells Rice 4.x to treat VALUE as a raw Ruby object
|
|
20
|
+
// pointer (no conversion). Return().setValue() does the same for return values.
|
|
21
|
+
Data_Type<StandardQueue> rb_cRQ = define_class<StandardQueue>("RactorQueue")
|
|
22
|
+
.define_constructor(Constructor<StandardQueue, unsigned>())
|
|
23
|
+
.define_method("c_try_push", &StandardQueue::try_push,
|
|
24
|
+
Arg("v").setValue())
|
|
25
|
+
.define_method("c_try_pop", &StandardQueue::try_pop,
|
|
26
|
+
Return().setValue())
|
|
27
|
+
.define_method("capacity", &StandardQueue::capacity)
|
|
28
|
+
.define_method("was_size", &StandardQueue::was_size)
|
|
29
|
+
.define_method("was_empty", &StandardQueue::was_empty)
|
|
30
|
+
.define_method("was_full", &StandardQueue::was_full);
|
|
31
|
+
|
|
32
|
+
// Create the permanent EMPTY_SENTINEL object and pin it as a GC root.
|
|
33
|
+
g_empty_sentinel = rb_obj_alloc(rb_cObject);
|
|
34
|
+
rb_obj_freeze(g_empty_sentinel);
|
|
35
|
+
rb_gc_register_mark_object(g_empty_sentinel);
|
|
36
|
+
rb_define_const(rb_cRQ, "EMPTY_SENTINEL", g_empty_sentinel);
|
|
37
|
+
|
|
38
|
+
// Mark the wrapped C++ type as Ractor-shareable when frozen.
|
|
39
|
+
// Rice only sets RUBY_TYPED_FREE_IMMEDIATELY; we OR-in RUBY_TYPED_FROZEN_SHAREABLE
|
|
40
|
+
// so that Ractor.make_shareable(instance) succeeds after Ruby-side freeze.
|
|
41
|
+
Data_Type<StandardQueue>::ruby_data_type()->flags |= RUBY_TYPED_FROZEN_SHAREABLE;
|
|
42
|
+
|
|
43
|
+
// Restore: methods defined after this point (by other code) are not auto-marked Ractor-safe.
|
|
44
|
+
rb_ext_ractor_safe(false);
|
|
45
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
#pragma once
|
|
2
|
+
#include <atomic_queue/atomic_queue.h>
|
|
3
|
+
#include <ruby.h>
|
|
4
|
+
|
|
5
|
+
// Global sentinel — initialized in Init_ractor_queue, returned by try_pop when empty.
|
|
6
|
+
// Never a valid user-pushed VALUE; only used by the Ruby layer to detect "empty."
|
|
7
|
+
extern VALUE g_empty_sentinel;
|
|
8
|
+
|
|
9
|
+
class StandardQueue {
|
|
10
|
+
atomic_queue::AtomicQueueB2<VALUE> q_;
|
|
11
|
+
|
|
12
|
+
public:
|
|
13
|
+
explicit StandardQueue(unsigned capacity) : q_(capacity) {}
|
|
14
|
+
|
|
15
|
+
// Non-blocking push. Returns true if element was enqueued, false if full.
|
|
16
|
+
bool try_push(VALUE v) { return q_.try_push(v); }
|
|
17
|
+
|
|
18
|
+
// Non-blocking pop. Returns the VALUE if one was available,
|
|
19
|
+
// or g_empty_sentinel if the queue was empty.
|
|
20
|
+
VALUE try_pop() {
|
|
21
|
+
VALUE v;
|
|
22
|
+
return q_.try_pop(v) ? v : g_empty_sentinel;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
unsigned capacity() const { return q_.capacity(); }
|
|
26
|
+
unsigned was_size() const { return q_.was_size(); }
|
|
27
|
+
bool was_empty() const { return q_.was_empty(); }
|
|
28
|
+
bool was_full() const { return q_.was_full(); }
|
|
29
|
+
};
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
class RactorQueue
|
|
2
|
+
module Interface
|
|
3
|
+
# Non-blocking push. Returns true if enqueued, false if full.
|
|
4
|
+
def try_push(obj)
|
|
5
|
+
validate_shareable!(obj) if @validate_shareable
|
|
6
|
+
c_try_push(obj)
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
# Non-blocking pop. Returns the object, or RactorQueue::EMPTY if the queue
|
|
10
|
+
# is empty. EMPTY is a unique frozen sentinel distinct from nil, so nil is
|
|
11
|
+
# an unambiguous payload value.
|
|
12
|
+
#
|
|
13
|
+
# entry = q.try_pop
|
|
14
|
+
# if entry.equal?(RactorQueue::EMPTY)
|
|
15
|
+
# # queue was empty
|
|
16
|
+
# else
|
|
17
|
+
# process(entry) # entry may be nil if nil was pushed
|
|
18
|
+
# end
|
|
19
|
+
def try_pop
|
|
20
|
+
c_try_pop
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Blocking push. Spins until space is available. Returns self.
|
|
24
|
+
# Raises RactorQueue::TimeoutError if timeout expires.
|
|
25
|
+
# Ruby interrupt-aware via Thread.pass between retries.
|
|
26
|
+
def push(obj, timeout: nil)
|
|
27
|
+
validate_shareable!(obj) if @validate_shareable
|
|
28
|
+
blocking_push(obj, timeout)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# Blocking pop. Spins until an element is available. Returns the object.
|
|
32
|
+
# Raises RactorQueue::TimeoutError if timeout expires.
|
|
33
|
+
def pop(timeout: nil)
|
|
34
|
+
blocking_pop(timeout)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
# Approximate current element count.
|
|
38
|
+
def size = was_size
|
|
39
|
+
|
|
40
|
+
# True if queue appears empty (approximate).
|
|
41
|
+
def empty? = was_empty
|
|
42
|
+
|
|
43
|
+
# True if queue appears full (approximate).
|
|
44
|
+
def full? = was_full
|
|
45
|
+
|
|
46
|
+
private
|
|
47
|
+
|
|
48
|
+
def validate_shareable!(obj)
|
|
49
|
+
raise NotShareableError, "#{obj.inspect} is not Ractor-shareable" \
|
|
50
|
+
unless Ractor.shareable?(obj)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# Ruby-level spin loop with exponential backoff.
|
|
54
|
+
#
|
|
55
|
+
# Phase 1 — Thread.pass (spins 0..SPIN_THRESHOLD-1):
|
|
56
|
+
# Cheap busy-wait. Fast when the queue clears quickly (light contention).
|
|
57
|
+
# Triggers Ruby interrupt checking, so Thread#raise / Ctrl-C can escape.
|
|
58
|
+
#
|
|
59
|
+
# Phase 2 — sleep(SLEEP_INTERVAL) after SPIN_THRESHOLD passes:
|
|
60
|
+
# Suspends the OS thread rather than calling sched_yield. Critical inside
|
|
61
|
+
# Ractors: each Ractor IS its own OS thread, so Thread.pass only calls
|
|
62
|
+
# sched_yield, which under high contention just rotates threads at the
|
|
63
|
+
# same priority without making progress. sleep() actually yields the core,
|
|
64
|
+
# preventing spin-wait storms when many Ractors share a full/empty queue.
|
|
65
|
+
#
|
|
66
|
+
# NOTE: timeout: 0 means "try once, raise if not immediately successful."
|
|
67
|
+
# The operation is attempted before the deadline check on the first iteration,
|
|
68
|
+
# so a single try is always made (never a pure no-op raise).
|
|
69
|
+
SPIN_THRESHOLD = 16
|
|
70
|
+
SLEEP_INTERVAL = 0.0001 # 100 µs — short enough to keep latency low
|
|
71
|
+
|
|
72
|
+
def blocking_push(obj, timeout)
|
|
73
|
+
deadline = timeout ? Process.clock_gettime(Process::CLOCK_MONOTONIC) + timeout : nil
|
|
74
|
+
spins = 0
|
|
75
|
+
loop do
|
|
76
|
+
return self if c_try_push(obj)
|
|
77
|
+
raise TimeoutError if deadline && Process.clock_gettime(Process::CLOCK_MONOTONIC) >= deadline
|
|
78
|
+
if spins < SPIN_THRESHOLD
|
|
79
|
+
spins += 1
|
|
80
|
+
Thread.pass
|
|
81
|
+
else
|
|
82
|
+
sleep(SLEEP_INTERVAL)
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Ruby-level spin loop for blocking pop (same backoff strategy).
|
|
88
|
+
# NOTE: timeout: 0 tries once before checking the deadline.
|
|
89
|
+
def blocking_pop(timeout)
|
|
90
|
+
deadline = timeout ? Process.clock_gettime(Process::CLOCK_MONOTONIC) + timeout : nil
|
|
91
|
+
spins = 0
|
|
92
|
+
loop do
|
|
93
|
+
result = c_try_pop
|
|
94
|
+
# No sentinel conversion here — only the sentinel (empty queue) continues the loop;
|
|
95
|
+
# any actual value (including nil) is returned as-is, consistent with try_pop behavior.
|
|
96
|
+
return result unless result.equal?(RactorQueue::EMPTY_SENTINEL)
|
|
97
|
+
raise TimeoutError if deadline && Process.clock_gettime(Process::CLOCK_MONOTONIC) >= deadline
|
|
98
|
+
if spins < SPIN_THRESHOLD
|
|
99
|
+
spins += 1
|
|
100
|
+
Thread.pass
|
|
101
|
+
else
|
|
102
|
+
sleep(SLEEP_INTERVAL)
|
|
103
|
+
end
|
|
104
|
+
end
|
|
105
|
+
end
|
|
106
|
+
end
|
|
107
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
class RactorQueue
|
|
2
|
+
include Interface
|
|
3
|
+
|
|
4
|
+
# Public sentinel returned by try_pop when the queue is empty.
|
|
5
|
+
# Use equal? (identity) to check — never == — so that nil is an
|
|
6
|
+
# unambiguous payload.
|
|
7
|
+
#
|
|
8
|
+
# entry = q.try_pop
|
|
9
|
+
# return if entry.equal?(RactorQueue::EMPTY)
|
|
10
|
+
EMPTY = EMPTY_SENTINEL
|
|
11
|
+
|
|
12
|
+
# @param capacity [Integer] Maximum number of elements the queue can hold.
|
|
13
|
+
# @param validate_shareable [Boolean] Raise NotShareableError on non-shareable pushes.
|
|
14
|
+
def self.new(capacity:, validate_shareable: false)
|
|
15
|
+
instance = super(capacity)
|
|
16
|
+
instance.instance_variable_set(:@validate_shareable, validate_shareable)
|
|
17
|
+
# Make the queue instance itself Ractor-shareable. This deep-freezes the Ruby
|
|
18
|
+
# wrapper object. The C++ AtomicQueueB2 buffer is not affected by Ruby's freeze.
|
|
19
|
+
Ractor.make_shareable(instance)
|
|
20
|
+
instance
|
|
21
|
+
end
|
|
22
|
+
end
|
data/lib/ractor_queue.rb
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
require "ractor_queue/ractor_queue.#{RbConfig::CONFIG['DLEXT']}" # native C extension (Standard, EMPTY_SENTINEL)
|
|
2
|
+
require_relative "ractor_queue/version"
|
|
3
|
+
require_relative "ractor_queue/errors"
|
|
4
|
+
require_relative "ractor_queue/interface"
|
|
5
|
+
require_relative "ractor_queue/ractor_queue" # Ruby layer (factory)
|