carbon_fiber 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +3 -0
- data/LICENSE +21 -0
- data/README.md +320 -0
- data/lib/carbon_fiber/async.rb +256 -0
- data/lib/carbon_fiber/native/fallback.rb +387 -0
- data/lib/carbon_fiber/native.rb +41 -0
- data/lib/carbon_fiber/scheduler.rb +396 -0
- data/lib/carbon_fiber/version.rb +6 -0
- data/lib/carbon_fiber.rb +4 -0
- metadata +155 -0
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Please note that this code is heavily AI-assisted.
|
|
4
|
+
|
|
5
|
+
module CarbonFiber
|
|
6
|
+
module Native
|
|
7
|
+
# Pure-Ruby fallback selector using threads and condition variables.
|
|
8
|
+
#
|
|
9
|
+
# Loaded automatically when the native Zig extension is unavailable.
|
|
10
|
+
# Provides the same Selector API as the native implementation so the
|
|
11
|
+
# Scheduler and Async adapter work unchanged.
|
|
12
|
+
class Selector
|
|
13
|
+
# @param loop_fiber [Fiber] the event loop fiber
|
|
14
|
+
def initialize(loop_fiber)
|
|
15
|
+
@loop_fiber = loop_fiber
|
|
16
|
+
@mutex = Thread::Mutex.new
|
|
17
|
+
@cv = Thread::ConditionVariable.new
|
|
18
|
+
@ready = []
|
|
19
|
+
@timers = {}
|
|
20
|
+
@next_timer = 1
|
|
21
|
+
@read_waits = {}
|
|
22
|
+
@next_wait_token = 1
|
|
23
|
+
|
|
24
|
+
# Fibers voluntarily parked in block() or do_io_wait, mapped to the
|
|
25
|
+
# sleep timer token (or nil). flush_ready consults this set to decide
|
|
26
|
+
# whether a fiber whose transfer returned unexpectedly was interrupted
|
|
27
|
+
# mid-execution (Ruby 4.0 Fiber#raise bypass) and needs re-queueing.
|
|
28
|
+
@blocked_fibers = {}
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
# No-op; nothing to release.
|
|
32
|
+
def destroy
|
|
33
|
+
true
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
# @return [Boolean] whether there is pending work
|
|
37
|
+
def pending?
|
|
38
|
+
@mutex.synchronize { @ready.any? || @timers.any? || @read_waits.any? }
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# Enqueue a fiber into the ready queue.
|
|
42
|
+
def push(fiber)
|
|
43
|
+
@mutex.synchronize do
|
|
44
|
+
@ready << [:resume, fiber, nil, false]
|
|
45
|
+
@cv.signal
|
|
46
|
+
end
|
|
47
|
+
fiber
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Enqueue a fiber with a return value.
|
|
51
|
+
def resume(fiber, value)
|
|
52
|
+
@mutex.synchronize do
|
|
53
|
+
@ready << [:resume, fiber, value, true]
|
|
54
|
+
@cv.signal
|
|
55
|
+
end
|
|
56
|
+
fiber
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Enqueue an exception delivery to a fiber.
|
|
60
|
+
def raise(fiber, exception)
|
|
61
|
+
@mutex.synchronize do
|
|
62
|
+
# Cancel any armed sleep timer for this fiber so its block() wakeup
|
|
63
|
+
# doesn't spuriously fire after the raise. Zero the token but leave
|
|
64
|
+
# the blocked_fibers entry—it's removed by cancel_block_timer when
|
|
65
|
+
# the fiber's ensure runs, so flush_ready's re-queue check still
|
|
66
|
+
# correctly treats the fiber as "parked" until it exits.
|
|
67
|
+
if @blocked_fibers.key?(fiber)
|
|
68
|
+
token = @blocked_fibers[fiber]
|
|
69
|
+
@timers.delete(token) if token
|
|
70
|
+
@blocked_fibers[fiber] = nil
|
|
71
|
+
end
|
|
72
|
+
@ready << [:raise, fiber, exception, true]
|
|
73
|
+
@cv.signal
|
|
74
|
+
end
|
|
75
|
+
fiber
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
# Wake the event loop.
|
|
79
|
+
def wakeup
|
|
80
|
+
@mutex.synchronize { @cv.signal }
|
|
81
|
+
true
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
# Transfer control to the event loop fiber.
|
|
85
|
+
def transfer
|
|
86
|
+
return nil if Fiber.current.equal?(@loop_fiber)
|
|
87
|
+
|
|
88
|
+
@loop_fiber.transfer
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Transfer to the event loop fiber. flush_ready's re-queue logic puts
|
|
92
|
+
# us back in the ready queue on the next pass—no explicit self-push
|
|
93
|
+
# needed, and avoiding it prevents duplicate ready entries.
|
|
94
|
+
def yield
|
|
95
|
+
return nil if Fiber.current.equal?(@loop_fiber)
|
|
96
|
+
|
|
97
|
+
@loop_fiber.transfer
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
# Run one event loop iteration.
|
|
101
|
+
def select(timeout = nil)
|
|
102
|
+
flush_ready
|
|
103
|
+
return 0 unless pending?
|
|
104
|
+
|
|
105
|
+
deadline = next_wait_deadline(timeout)
|
|
106
|
+
|
|
107
|
+
@mutex.synchronize do
|
|
108
|
+
until @ready.any?
|
|
109
|
+
collect_expired_timers_locked
|
|
110
|
+
break if @ready.any?
|
|
111
|
+
|
|
112
|
+
if deadline
|
|
113
|
+
remaining = deadline - monotonic_time
|
|
114
|
+
break if remaining <= 0
|
|
115
|
+
|
|
116
|
+
@cv.wait(@mutex, remaining)
|
|
117
|
+
else
|
|
118
|
+
@cv.wait(@mutex)
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
collect_expired_timers
|
|
124
|
+
flush_ready
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
# Suspend the current fiber until unblocked or timed out.
|
|
128
|
+
def block(fiber, timeout = nil)
|
|
129
|
+
token = nil
|
|
130
|
+
token = resume_after(fiber, timeout, false) if timeout
|
|
131
|
+
@mutex.synchronize { @blocked_fibers[fiber] = token }
|
|
132
|
+
|
|
133
|
+
result = @loop_fiber.transfer
|
|
134
|
+
|
|
135
|
+
# Normal wakeup path: drop the tracking entry and cancel any still-
|
|
136
|
+
# armed sleep timer. If raise() ran first, it already cancelled the
|
|
137
|
+
# timer and zeroed the token; the raise-unwind path then relies on
|
|
138
|
+
# cancel_block_timer (invoked from fiber_done) to remove the entry.
|
|
139
|
+
@mutex.synchronize do
|
|
140
|
+
stored = @blocked_fibers.delete(fiber)
|
|
141
|
+
@timers.delete(stored) if stored
|
|
142
|
+
end
|
|
143
|
+
result
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# Resume a fiber previously suspended by {#block}.
|
|
147
|
+
def unblock(fiber)
|
|
148
|
+
resume(fiber, true)
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
# Schedule an exception to be raised on a fiber after +duration+ seconds.
|
|
152
|
+
def raise_after(fiber, exception, duration)
|
|
153
|
+
schedule_timer(duration, :raise, fiber, exception)
|
|
154
|
+
end
|
|
155
|
+
|
|
156
|
+
# Cancel a pending timer by token.
|
|
157
|
+
def cancel_timer(token)
|
|
158
|
+
@mutex.synchronize { !!@timers.delete(token) }
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
# Called from Scheduler#fiber_done's ensure block. Removes the fiber
|
|
162
|
+
# from the blocked set and cancels any still-armed sleep timer. This is
|
|
163
|
+
# the only cleanup path when a raise() unwinds the fiber past block()'s
|
|
164
|
+
# normal return.
|
|
165
|
+
def cancel_block_timer(fiber)
|
|
166
|
+
@mutex.synchronize do
|
|
167
|
+
stored = @blocked_fibers.delete(fiber)
|
|
168
|
+
@timers.delete(stored) if stored
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
# Wait for read readiness on a file descriptor via IO.select on
|
|
173
|
+
# a background thread.
|
|
174
|
+
# Returns nil for non-READABLE events (handled by the Scheduler fallback).
|
|
175
|
+
def io_wait(fiber, fd, events)
|
|
176
|
+
return nil unless events == IO::READABLE
|
|
177
|
+
|
|
178
|
+
do_io_wait(fiber, fd, nil)
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# Like {#io_wait} but with a timeout.
|
|
182
|
+
def io_wait_with_timeout(fiber, fd, events, timeout)
|
|
183
|
+
return nil unless events == IO::READABLE
|
|
184
|
+
|
|
185
|
+
do_io_wait(fiber, fd, timeout)
|
|
186
|
+
end
|
|
187
|
+
|
|
188
|
+
# Cancel pending waiters on a closed descriptor.
|
|
189
|
+
def io_close(fd, exception)
|
|
190
|
+
woke = false
|
|
191
|
+
|
|
192
|
+
@mutex.synchronize do
|
|
193
|
+
wait = @read_waits.delete(fd)
|
|
194
|
+
if wait
|
|
195
|
+
@ready << [:raise, wait[:fiber], exception, true]
|
|
196
|
+
woke = true
|
|
197
|
+
@cv.signal
|
|
198
|
+
end
|
|
199
|
+
end
|
|
200
|
+
|
|
201
|
+
woke
|
|
202
|
+
end
|
|
203
|
+
|
|
204
|
+
# Returns nil; the Scheduler handles process_wait via background thread.
|
|
205
|
+
def process_wait(_fiber, _pid, _flags)
|
|
206
|
+
nil
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# Returns nil; the Scheduler handles io_read via background thread.
|
|
210
|
+
def io_read(_fd, _buffer, _length, _offset)
|
|
211
|
+
nil
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
# Returns nil; the Scheduler handles io_write via background thread.
|
|
215
|
+
def io_write(_fd, _buffer, _length, _offset)
|
|
216
|
+
nil
|
|
217
|
+
end
|
|
218
|
+
|
|
219
|
+
# Non-destructive check if a descriptor has data available to read.
|
|
220
|
+
def poll_readable_now(fd)
|
|
221
|
+
io = IO.new(fd, autoclose: false)
|
|
222
|
+
ready = IO.select([io], nil, nil, 0)
|
|
223
|
+
!!ready
|
|
224
|
+
rescue IOError, SystemCallError
|
|
225
|
+
false
|
|
226
|
+
ensure
|
|
227
|
+
io.close if io && !io.closed?
|
|
228
|
+
end
|
|
229
|
+
|
|
230
|
+
private
|
|
231
|
+
|
|
232
|
+
def do_io_wait(fiber, fd, timeout)
|
|
233
|
+
wait = @mutex.synchronize do
|
|
234
|
+
break nil if @read_waits.key?(fd)
|
|
235
|
+
|
|
236
|
+
token = @next_wait_token
|
|
237
|
+
@next_wait_token += 1
|
|
238
|
+
io = IO.new(fd, autoclose: false)
|
|
239
|
+
@read_waits[fd] = {token: token, fiber: fiber, io: io}
|
|
240
|
+
end
|
|
241
|
+
|
|
242
|
+
return nil unless wait
|
|
243
|
+
|
|
244
|
+
# Register as blocked before launching the worker—if the IO.select
|
|
245
|
+
# returns immediately, the worker's resume push must land while we're
|
|
246
|
+
# still marked parked so flush_ready doesn't treat our return as an
|
|
247
|
+
# interrupt.
|
|
248
|
+
@mutex.synchronize { @blocked_fibers[fiber] = nil }
|
|
249
|
+
|
|
250
|
+
Thread.new do
|
|
251
|
+
Thread.current.report_on_exception = false
|
|
252
|
+
|
|
253
|
+
begin
|
|
254
|
+
ready = IO.select([wait[:io]], nil, nil, timeout)
|
|
255
|
+
@mutex.synchronize do
|
|
256
|
+
current = @read_waits[fd]
|
|
257
|
+
if current && current[:token] == wait[:token]
|
|
258
|
+
@read_waits.delete(fd)
|
|
259
|
+
payload = ready ? IO::READABLE : false
|
|
260
|
+
@ready << [:resume, wait[:fiber], payload, true]
|
|
261
|
+
@cv.signal
|
|
262
|
+
end
|
|
263
|
+
end
|
|
264
|
+
rescue IOError, SystemCallError
|
|
265
|
+
ensure
|
|
266
|
+
wait[:io].close unless wait[:io].closed?
|
|
267
|
+
end
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
result = @loop_fiber.transfer
|
|
271
|
+
@mutex.synchronize { @blocked_fibers.delete(fiber) }
|
|
272
|
+
result
|
|
273
|
+
end
|
|
274
|
+
|
|
275
|
+
def resume_after(fiber, duration, value)
|
|
276
|
+
schedule_timer(duration, :resume, fiber, value)
|
|
277
|
+
end
|
|
278
|
+
|
|
279
|
+
def schedule_timer(duration, kind, fiber, payload)
|
|
280
|
+
token = nil
|
|
281
|
+
@mutex.synchronize do
|
|
282
|
+
token = @next_timer
|
|
283
|
+
@next_timer += 1
|
|
284
|
+
@timers[token] = [monotonic_time + duration, kind, fiber, payload]
|
|
285
|
+
@cv.signal
|
|
286
|
+
end
|
|
287
|
+
token
|
|
288
|
+
end
|
|
289
|
+
|
|
290
|
+
def next_wait_deadline(timeout)
|
|
291
|
+
timer_deadline = @mutex.synchronize do
|
|
292
|
+
@timers.values.map(&:first).min
|
|
293
|
+
end
|
|
294
|
+
|
|
295
|
+
timeout_deadline = timeout && (monotonic_time + timeout)
|
|
296
|
+
|
|
297
|
+
if timer_deadline && timeout_deadline
|
|
298
|
+
[timer_deadline, timeout_deadline].min
|
|
299
|
+
else
|
|
300
|
+
timer_deadline || timeout_deadline
|
|
301
|
+
end
|
|
302
|
+
end
|
|
303
|
+
|
|
304
|
+
def collect_expired_timers
|
|
305
|
+
now = monotonic_time
|
|
306
|
+
expired = []
|
|
307
|
+
|
|
308
|
+
@mutex.synchronize do
|
|
309
|
+
collect_expired_timers_into(now, expired)
|
|
310
|
+
@ready.concat(expired)
|
|
311
|
+
end
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
def collect_expired_timers_locked
|
|
315
|
+
expired = []
|
|
316
|
+
collect_expired_timers_into(monotonic_time, expired)
|
|
317
|
+
@ready.concat(expired)
|
|
318
|
+
end
|
|
319
|
+
|
|
320
|
+
def collect_expired_timers_into(now, expired)
|
|
321
|
+
# Sort expired timers by deadline so fibers waking up in the same tick
|
|
322
|
+
# resume in the order their sleeps were scheduled to fire. Without
|
|
323
|
+
# this sort, @timers' Hash insertion order leaks into the wake order
|
|
324
|
+
# whenever multiple timers expire before the event loop revisits
|
|
325
|
+
# them — visible as non-deterministic ordering on fast runners.
|
|
326
|
+
triggered = []
|
|
327
|
+
@timers.delete_if do |_token, entry|
|
|
328
|
+
next false if entry[0] > now
|
|
329
|
+
|
|
330
|
+
triggered << entry
|
|
331
|
+
true
|
|
332
|
+
end
|
|
333
|
+
triggered.sort_by! { |entry| entry[0] }
|
|
334
|
+
triggered.each do |(_deadline, kind, fiber, payload)|
|
|
335
|
+
expired << [kind, fiber, payload, true]
|
|
336
|
+
end
|
|
337
|
+
end
|
|
338
|
+
|
|
339
|
+
def flush_ready
|
|
340
|
+
# Snapshot the batch boundary. Entries re-queued during dispatch (by
|
|
341
|
+
# the Ruby 4.0 bypass handler below, or by fibers enqueueing new work
|
|
342
|
+
# as they run) are deferred to the next flush_ready call—without
|
|
343
|
+
# this cap, a yield-forever fiber would spin inside one call.
|
|
344
|
+
batch_size = @mutex.synchronize { @ready.size }
|
|
345
|
+
|
|
346
|
+
batch_size.times do
|
|
347
|
+
kind, fiber, payload, has_payload = @mutex.synchronize { @ready.shift }
|
|
348
|
+
break unless fiber
|
|
349
|
+
next unless fiber.alive?
|
|
350
|
+
|
|
351
|
+
case kind
|
|
352
|
+
when :resume
|
|
353
|
+
has_payload ? fiber.transfer(payload) : fiber.transfer
|
|
354
|
+
|
|
355
|
+
# Ruby 4.0's Fiber#raise bypasses fiber_interrupt and returns
|
|
356
|
+
# control to loop_fiber instead of the caller, stranding the
|
|
357
|
+
# fiber that invoked raise. When flush_ready's transfer returns
|
|
358
|
+
# with the fiber still alive AND not voluntarily parked, the
|
|
359
|
+
# return was unexpected—re-queue so the fiber can finish.
|
|
360
|
+
if fiber.alive?
|
|
361
|
+
parked = @mutex.synchronize { @blocked_fibers.key?(fiber) }
|
|
362
|
+
unless parked
|
|
363
|
+
@mutex.synchronize { @ready << [:resume, fiber, nil, false] }
|
|
364
|
+
end
|
|
365
|
+
end
|
|
366
|
+
when :raise
|
|
367
|
+
fiber.raise(payload)
|
|
368
|
+
end
|
|
369
|
+
end
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
def monotonic_time
|
|
373
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
|
374
|
+
end
|
|
375
|
+
end
|
|
376
|
+
|
|
377
|
+
module_function
|
|
378
|
+
|
|
379
|
+
def available?
|
|
380
|
+
false
|
|
381
|
+
end
|
|
382
|
+
|
|
383
|
+
def backend
|
|
384
|
+
"ruby_fallback"
|
|
385
|
+
end
|
|
386
|
+
end
|
|
387
|
+
end
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Loads the native Zig extension or falls back to a pure-Ruby implementation.
|
|
4
|
+
# Set +CARBON_FIBER_FORCE_FALLBACK=1+ to skip the native extension.
|
|
5
|
+
|
|
6
|
+
require "rbconfig"
|
|
7
|
+
|
|
8
|
+
native_paths = [
|
|
9
|
+
File.expand_path(
|
|
10
|
+
"#{RbConfig::CONFIG["ruby_version"]}/carbon_fiber_native.#{RbConfig::CONFIG["DLEXT"]}",
|
|
11
|
+
__dir__
|
|
12
|
+
),
|
|
13
|
+
File.expand_path(
|
|
14
|
+
"#{RbConfig::CONFIG["ruby_version"]}/carbon_fiber_native.so",
|
|
15
|
+
__dir__
|
|
16
|
+
),
|
|
17
|
+
File.expand_path(
|
|
18
|
+
"#{RbConfig::CONFIG["ruby_version"]}/carbon_fiber_native",
|
|
19
|
+
__dir__
|
|
20
|
+
)
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
native_loaded =
|
|
24
|
+
if ENV["CARBON_FIBER_FORCE_FALLBACK"] == "1"
|
|
25
|
+
false
|
|
26
|
+
else
|
|
27
|
+
native_paths.any? do |path|
|
|
28
|
+
require path
|
|
29
|
+
true
|
|
30
|
+
rescue LoadError
|
|
31
|
+
false
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
native_available =
|
|
36
|
+
native_loaded &&
|
|
37
|
+
defined?(CarbonFiber::Native) &&
|
|
38
|
+
CarbonFiber::Native.respond_to?(:available?) &&
|
|
39
|
+
CarbonFiber::Native.available?
|
|
40
|
+
|
|
41
|
+
require_relative "native/fallback" unless native_available
|