io-event 1.10.2 → 1.12.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/agent.md +47 -0
- data/ext/extconf.rb +10 -0
- data/ext/io/event/event.c +5 -1
- data/ext/io/event/event.h +4 -0
- data/ext/io/event/fiber.c +1 -1
- data/ext/io/event/selector/epoll.c +4 -0
- data/ext/io/event/selector/kqueue.c +4 -0
- data/ext/io/event/selector/selector.c +1 -6
- data/ext/io/event/selector/uring.c +19 -1
- data/ext/io/event/worker_pool.c +477 -0
- data/ext/io/event/worker_pool.h +8 -0
- data/ext/io/event/worker_pool_test.c +199 -0
- data/ext/io/event/worker_pool_test.h +9 -0
- data/lib/io/event/debug/selector.rb +2 -2
- data/lib/io/event/priority_heap.rb +1 -1
- data/lib/io/event/selector/select.rb +18 -93
- data/lib/io/event/support.rb +11 -18
- data/lib/io/event/version.rb +1 -1
- data/license.md +1 -1
- data/readme.md +12 -0
- data/releases.md +34 -0
- data.tar.gz.sig +0 -0
- metadata +9 -4
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 15970f96a2af0de9aa23889ee6ff50217b53216334de39ad5b4692ebf945942b
|
4
|
+
data.tar.gz: 5f44f0dbcdf09106e342e3fda71f56f8a0208dc5a435e82f8c80b1fdb8826fae
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 0b72815ede642358071d66b9720935603493b6a09a34fcf2eec6930c8648a7d2b6498358f630554f40c6537f2bd0fb3cb6b58334ad58dfbaacc5d6c1f0e407cf
|
7
|
+
data.tar.gz: a5eb4e8396cd510aa71e8ee45f8ebf60829ca707a96bded1104ce0c3f74c47e9343225d826c58e4ec7bcfed6aed81051d7c14a10f304c42fa61fe930e1099fef
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/agent.md
ADDED
@@ -0,0 +1,47 @@
|
|
1
|
+
# Agent
|
2
|
+
|
3
|
+
## Context
|
4
|
+
|
5
|
+
This section provides links to documentation from installed packages. It is automatically generated and may be updated by running `bake agent:context:install`.
|
6
|
+
|
7
|
+
**Important:** Before performing any code, documentation, or analysis tasks, always read and apply the full content of any relevant documentation referenced in the following sections. These context files contain authoritative standards and best practices for documentation, code style, and project-specific workflows. **Do not proceed with any actions until you have read and incorporated the guidance from relevant context files.**
|
8
|
+
|
9
|
+
### agent-context
|
10
|
+
|
11
|
+
Install and manage context files from Ruby gems.
|
12
|
+
|
13
|
+
#### [Usage Guide](.context/agent-context/usage.md)
|
14
|
+
|
15
|
+
`agent-context` is a tool that helps you discover and install contextual information from Ruby gems for AI agents. Gems can provide additional documentation, examples, and guidance in a `context/` ...
|
16
|
+
|
17
|
+
### decode
|
18
|
+
|
19
|
+
Code analysis for documentation generation.
|
20
|
+
|
21
|
+
#### [Getting Started with Decode](.context/decode/getting-started.md)
|
22
|
+
|
23
|
+
The Decode gem provides programmatic access to Ruby code structure and metadata. It can parse Ruby files and extract definitions, comments, and documentation pragmas, enabling code analysis, docume...
|
24
|
+
|
25
|
+
#### [Documentation Coverage](.context/decode/coverage.md)
|
26
|
+
|
27
|
+
This guide explains how to test and monitor documentation coverage in your Ruby projects using the Decode gem's built-in bake tasks.
|
28
|
+
|
29
|
+
#### [Ruby Documentation](.context/decode/ruby-documentation.md)
|
30
|
+
|
31
|
+
This guide covers documentation practices and pragmas supported by the Decode gem for documenting Ruby code. These pragmas provide structured documentation that can be parsed and used to generate A...
|
32
|
+
|
33
|
+
### sus
|
34
|
+
|
35
|
+
A fast and scalable test runner.
|
36
|
+
|
37
|
+
#### [Using Sus Testing Framework](.context/sus/usage.md)
|
38
|
+
|
39
|
+
Sus is a modern Ruby testing framework that provides a clean, BDD-style syntax for writing tests. It's designed to be fast, simple, and expressive.
|
40
|
+
|
41
|
+
#### [Mocking](.context/sus/mocking.md)
|
42
|
+
|
43
|
+
There are two types of mocking in sus: `receive` and `mock`. The `receive` matcher is a subset of full mocking and is used to set expectations on method calls, while `mock` can be used to replace m...
|
44
|
+
|
45
|
+
#### [Shared Test Behaviors and Fixtures](.context/sus/shared.md)
|
46
|
+
|
47
|
+
Sus provides shared test contexts which can be used to define common behaviours or tests that can be reused across one or more test files.
|
data/ext/extconf.rb
CHANGED
@@ -58,6 +58,16 @@ have_func("epoll_pwait2")
|
|
58
58
|
|
59
59
|
have_header("ruby/io/buffer.h")
|
60
60
|
|
61
|
+
# Feature detection for blocking operation support
|
62
|
+
if have_func("rb_fiber_scheduler_blocking_operation_extract")
|
63
|
+
# Feature detection for pthread support (needed for WorkerPool)
|
64
|
+
if have_header("pthread.h")
|
65
|
+
append_cflags(["-DHAVE_IO_EVENT_WORKER_POOL"])
|
66
|
+
$srcs << "io/event/worker_pool.c"
|
67
|
+
$srcs << "io/event/worker_pool_test.c"
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
61
71
|
if ENV.key?("RUBY_SANITIZE")
|
62
72
|
$stderr.puts "Enabling sanitizers..."
|
63
73
|
|
data/ext/io/event/event.c
CHANGED
@@ -14,7 +14,11 @@ void Init_IO_Event(void)
|
|
14
14
|
VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
|
15
15
|
|
16
16
|
Init_IO_Event_Fiber(IO_Event);
|
17
|
-
|
17
|
+
|
18
|
+
#ifdef HAVE_IO_EVENT_WORKER_POOL
|
19
|
+
Init_IO_Event_WorkerPool(IO_Event);
|
20
|
+
#endif
|
21
|
+
|
18
22
|
VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
19
23
|
Init_IO_Event_Selector(IO_Event_Selector);
|
20
24
|
|
data/ext/io/event/event.h
CHANGED
data/ext/io/event/fiber.c
CHANGED
@@ -35,7 +35,7 @@ VALUE IO_Event_Fiber_raise(VALUE fiber, int argc, VALUE *argv) {
|
|
35
35
|
#ifndef HAVE_RB_FIBER_CURRENT
|
36
36
|
static ID id_current;
|
37
37
|
|
38
|
-
|
38
|
+
VALUE IO_Event_Fiber_current(void) {
|
39
39
|
return rb_funcall(rb_cFiber, id_current, 0);
|
40
40
|
}
|
41
41
|
#endif
|
@@ -38,6 +38,10 @@ struct IO_Event_Selector_EPoll
|
|
38
38
|
{
|
39
39
|
struct IO_Event_Selector backend;
|
40
40
|
int descriptor;
|
41
|
+
|
42
|
+
// Flag indicating whether the selector is currently blocked in a system call.
|
43
|
+
// Set to 1 when blocked in epoll_wait() without GVL, 0 otherwise.
|
44
|
+
// Used by wakeup() to determine if an interrupt signal is needed.
|
41
45
|
int blocked;
|
42
46
|
|
43
47
|
struct timespec idle_duration;
|
@@ -47,6 +47,10 @@ struct IO_Event_Selector_KQueue
|
|
47
47
|
{
|
48
48
|
struct IO_Event_Selector backend;
|
49
49
|
int descriptor;
|
50
|
+
|
51
|
+
// Flag indicating whether the selector is currently blocked in a system call.
|
52
|
+
// Set to 1 when blocked in kevent() without GVL, 0 otherwise.
|
53
|
+
// Used by wakeup() to determine if an interrupt signal is needed.
|
50
54
|
int blocked;
|
51
55
|
|
52
56
|
struct timespec idle_duration;
|
@@ -29,10 +29,7 @@ VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags)
|
|
29
29
|
int IO_Event_Selector_nonblock_set(int file_descriptor)
|
30
30
|
{
|
31
31
|
#ifdef _WIN32
|
32
|
-
|
33
|
-
ioctlsocket(file_descriptor, FIONBIO, &nonblock);
|
34
|
-
// Windows does not provide any way to know this, so we always restore it back to unset:
|
35
|
-
return 0;
|
32
|
+
rb_w32_set_nonblock(file_descriptor);
|
36
33
|
#else
|
37
34
|
// Get the current mode:
|
38
35
|
int flags = fcntl(file_descriptor, F_GETFL, 0);
|
@@ -50,8 +47,6 @@ void IO_Event_Selector_nonblock_restore(int file_descriptor, int flags)
|
|
50
47
|
{
|
51
48
|
#ifdef _WIN32
|
52
49
|
// Yolo...
|
53
|
-
u_long nonblock = flags;
|
54
|
-
ioctlsocket(file_descriptor, FIONBIO, &nonblock);
|
55
50
|
#else
|
56
51
|
// The flags didn't have O_NONBLOCK set, so it would have been set, so we need to restore it:
|
57
52
|
if (!(flags & O_NONBLOCK)) {
|
@@ -30,6 +30,10 @@ struct IO_Event_Selector_URing
|
|
30
30
|
struct IO_Event_Selector backend;
|
31
31
|
struct io_uring ring;
|
32
32
|
size_t pending;
|
33
|
+
|
34
|
+
// Flag indicating whether the selector is currently blocked in a system call.
|
35
|
+
// Set to 1 when blocked in io_uring_wait_cqe_timeout() without GVL, 0 otherwise.
|
36
|
+
// Used by wakeup() to determine if an interrupt signal is needed.
|
33
37
|
int blocked;
|
34
38
|
|
35
39
|
struct timespec idle_duration;
|
@@ -707,6 +711,20 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
|
|
707
711
|
off_t from = io_seekable(descriptor);
|
708
712
|
|
709
713
|
size_t maximum_size = size - offset;
|
714
|
+
|
715
|
+
// Are we performing a non-blocking read?
|
716
|
+
if (!length) {
|
717
|
+
// If the (maximum) length is zero, that indicates we just want to read whatever is available without blocking.
|
718
|
+
// If we schedule this read into the URing, it will block until data is available, rather than returning immediately.
|
719
|
+
int state = IO_Event_Selector_nonblock_set(descriptor);
|
720
|
+
|
721
|
+
int result = read(descriptor, (char*)base+offset, maximum_size);
|
722
|
+
int error = errno;
|
723
|
+
|
724
|
+
IO_Event_Selector_nonblock_restore(descriptor, state);
|
725
|
+
return rb_fiber_scheduler_io_result(result, error);
|
726
|
+
}
|
727
|
+
|
710
728
|
while (maximum_size) {
|
711
729
|
int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size, from);
|
712
730
|
|
@@ -1093,7 +1111,7 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
|
|
1093
1111
|
}
|
1094
1112
|
}
|
1095
1113
|
|
1096
|
-
if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions
|
1114
|
+
if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions: completed=%d\n", completed);
|
1097
1115
|
|
1098
1116
|
return completed;
|
1099
1117
|
}
|
@@ -0,0 +1,477 @@
|
|
1
|
+
// Released under the MIT License.
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
3
|
+
|
4
|
+
#include "worker_pool.h"
|
5
|
+
#include "worker_pool_test.h"
|
6
|
+
#include "fiber.h"
|
7
|
+
|
8
|
+
#include <ruby/thread.h>
|
9
|
+
#include <ruby/fiber/scheduler.h>
|
10
|
+
|
11
|
+
#include <pthread.h>
|
12
|
+
#include <stdbool.h>
|
13
|
+
#include <stdlib.h>
|
14
|
+
#include <errno.h>
|
15
|
+
#include <string.h>
|
16
|
+
|
17
|
+
enum {
|
18
|
+
DEBUG = 0,
|
19
|
+
};
|
20
|
+
|
21
|
+
static VALUE IO_Event_WorkerPool;
|
22
|
+
static ID id_maximum_worker_count;
|
23
|
+
|
24
|
+
// Thread pool structure
|
25
|
+
struct IO_Event_WorkerPool_Worker {
|
26
|
+
VALUE thread;
|
27
|
+
|
28
|
+
// Flag to indicate this specific worker should exit:
|
29
|
+
bool interrupted;
|
30
|
+
|
31
|
+
// Currently executing operation:
|
32
|
+
rb_fiber_scheduler_blocking_operation_t *current_blocking_operation;
|
33
|
+
|
34
|
+
struct IO_Event_WorkerPool *pool;
|
35
|
+
struct IO_Event_WorkerPool_Worker *next;
|
36
|
+
};
|
37
|
+
|
38
|
+
// Work item structure
|
39
|
+
struct IO_Event_WorkerPool_Work {
|
40
|
+
rb_fiber_scheduler_blocking_operation_t *blocking_operation;
|
41
|
+
|
42
|
+
bool completed;
|
43
|
+
|
44
|
+
VALUE scheduler;
|
45
|
+
VALUE blocker;
|
46
|
+
VALUE fiber;
|
47
|
+
|
48
|
+
struct IO_Event_WorkerPool_Work *next;
|
49
|
+
};
|
50
|
+
|
51
|
+
// Worker pool structure
|
52
|
+
struct IO_Event_WorkerPool {
|
53
|
+
pthread_mutex_t mutex;
|
54
|
+
pthread_cond_t work_available;
|
55
|
+
|
56
|
+
struct IO_Event_WorkerPool_Work *work_queue;
|
57
|
+
struct IO_Event_WorkerPool_Work *work_queue_tail;
|
58
|
+
|
59
|
+
struct IO_Event_WorkerPool_Worker *workers;
|
60
|
+
size_t current_worker_count;
|
61
|
+
size_t maximum_worker_count;
|
62
|
+
|
63
|
+
size_t call_count;
|
64
|
+
size_t completed_count;
|
65
|
+
size_t cancelled_count;
|
66
|
+
|
67
|
+
bool shutdown;
|
68
|
+
};
|
69
|
+
|
70
|
+
// Free functions for Ruby GC
|
71
|
+
static void worker_pool_free(void *ptr) {
|
72
|
+
struct IO_Event_WorkerPool *pool = (struct IO_Event_WorkerPool *)ptr;
|
73
|
+
|
74
|
+
if (pool) {
|
75
|
+
// Signal shutdown to all workers
|
76
|
+
if (!pool->shutdown) {
|
77
|
+
pthread_mutex_lock(&pool->mutex);
|
78
|
+
pool->shutdown = true;
|
79
|
+
pthread_cond_broadcast(&pool->work_available);
|
80
|
+
pthread_mutex_unlock(&pool->mutex);
|
81
|
+
}
|
82
|
+
|
83
|
+
// Note: We don't free worker structures or wait for threads during GC
|
84
|
+
// as this can cause deadlocks. The Ruby GC will handle the thread objects.
|
85
|
+
// Workers will see the shutdown flag and exit cleanly.
|
86
|
+
}
|
87
|
+
}
|
88
|
+
|
89
|
+
// Size functions for Ruby GC
|
90
|
+
static size_t worker_pool_size(const void *ptr) {
|
91
|
+
return sizeof(struct IO_Event_WorkerPool);
|
92
|
+
}
|
93
|
+
|
94
|
+
// Ruby TypedData structures
|
95
|
+
static const rb_data_type_t IO_Event_WorkerPool_type = {
|
96
|
+
"IO::Event::WorkerPool",
|
97
|
+
{0, worker_pool_free, worker_pool_size,},
|
98
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
99
|
+
};
|
100
|
+
|
101
|
+
// Helper function to enqueue work (must be called with mutex held)
|
102
|
+
static void enqueue_work(struct IO_Event_WorkerPool *pool, struct IO_Event_WorkerPool_Work *work) {
|
103
|
+
if (pool->work_queue_tail) {
|
104
|
+
pool->work_queue_tail->next = work;
|
105
|
+
} else {
|
106
|
+
pool->work_queue = work;
|
107
|
+
}
|
108
|
+
pool->work_queue_tail = work;
|
109
|
+
}
|
110
|
+
|
111
|
+
// Helper function to dequeue work (must be called with mutex held)
|
112
|
+
static struct IO_Event_WorkerPool_Work *dequeue_work(struct IO_Event_WorkerPool *pool) {
|
113
|
+
struct IO_Event_WorkerPool_Work *work = pool->work_queue;
|
114
|
+
if (work) {
|
115
|
+
pool->work_queue = work->next;
|
116
|
+
if (!pool->work_queue) {
|
117
|
+
pool->work_queue_tail = NULL;
|
118
|
+
}
|
119
|
+
work->next = NULL; // Clear the next pointer for safety
|
120
|
+
}
|
121
|
+
return work;
|
122
|
+
}
|
123
|
+
|
124
|
+
// Unblock function to interrupt a specific worker.
|
125
|
+
static void worker_unblock_func(void *_worker) {
|
126
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
127
|
+
struct IO_Event_WorkerPool *pool = worker->pool;
|
128
|
+
|
129
|
+
// Mark this specific worker as interrupted
|
130
|
+
pthread_mutex_lock(&pool->mutex);
|
131
|
+
worker->interrupted = true;
|
132
|
+
pthread_cond_broadcast(&pool->work_available);
|
133
|
+
pthread_mutex_unlock(&pool->mutex);
|
134
|
+
|
135
|
+
// If there's a currently executing blocking operation, cancel it
|
136
|
+
if (worker->current_blocking_operation) {
|
137
|
+
rb_fiber_scheduler_blocking_operation_cancel(worker->current_blocking_operation);
|
138
|
+
}
|
139
|
+
}
|
140
|
+
|
141
|
+
// Function to wait for work and execute it without GVL.
|
142
|
+
static void *worker_wait_and_execute(void *_worker) {
|
143
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
144
|
+
struct IO_Event_WorkerPool *pool = worker->pool;
|
145
|
+
|
146
|
+
while (true) {
|
147
|
+
struct IO_Event_WorkerPool_Work *work = NULL;
|
148
|
+
|
149
|
+
pthread_mutex_lock(&pool->mutex);
|
150
|
+
|
151
|
+
// Wait for work, shutdown, or interruption
|
152
|
+
while (!pool->work_queue && !pool->shutdown && !worker->interrupted) {
|
153
|
+
pthread_cond_wait(&pool->work_available, &pool->mutex);
|
154
|
+
}
|
155
|
+
|
156
|
+
if (pool->shutdown || worker->interrupted) {
|
157
|
+
pthread_mutex_unlock(&pool->mutex);
|
158
|
+
break;
|
159
|
+
}
|
160
|
+
|
161
|
+
work = dequeue_work(pool);
|
162
|
+
|
163
|
+
pthread_mutex_unlock(&pool->mutex);
|
164
|
+
|
165
|
+
// Execute work WITHOUT GVL (this is the whole point!)
|
166
|
+
if (work) {
|
167
|
+
worker->current_blocking_operation = work->blocking_operation;
|
168
|
+
rb_fiber_scheduler_blocking_operation_execute(work->blocking_operation);
|
169
|
+
worker->current_blocking_operation = NULL;
|
170
|
+
}
|
171
|
+
|
172
|
+
return work;
|
173
|
+
}
|
174
|
+
|
175
|
+
return NULL; // Shutdown signal
|
176
|
+
}
|
177
|
+
|
178
|
+
static VALUE worker_thread_func(void *_worker) {
|
179
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
180
|
+
|
181
|
+
while (true) {
|
182
|
+
// Wait for work and execute it without holding GVL
|
183
|
+
struct IO_Event_WorkerPool_Work *work = (struct IO_Event_WorkerPool_Work *)rb_thread_call_without_gvl(worker_wait_and_execute, worker, worker_unblock_func, worker);
|
184
|
+
|
185
|
+
if (!work) {
|
186
|
+
// Shutdown signal received
|
187
|
+
break;
|
188
|
+
}
|
189
|
+
|
190
|
+
// Protected by GVL:
|
191
|
+
work->completed = true;
|
192
|
+
worker->pool->completed_count++;
|
193
|
+
|
194
|
+
// Work was executed without GVL, now unblock the waiting fiber (we have GVL here)
|
195
|
+
rb_fiber_scheduler_unblock(work->scheduler, work->blocker, work->fiber);
|
196
|
+
}
|
197
|
+
|
198
|
+
return Qnil;
|
199
|
+
}
|
200
|
+
|
201
|
+
// Create a new worker thread
|
202
|
+
static int create_worker_thread(struct IO_Event_WorkerPool *pool) {
|
203
|
+
if (pool->current_worker_count >= pool->maximum_worker_count) {
|
204
|
+
return -1;
|
205
|
+
}
|
206
|
+
|
207
|
+
struct IO_Event_WorkerPool_Worker *worker = malloc(sizeof(struct IO_Event_WorkerPool_Worker));
|
208
|
+
if (!worker) {
|
209
|
+
return -1;
|
210
|
+
}
|
211
|
+
|
212
|
+
worker->pool = pool;
|
213
|
+
worker->interrupted = false;
|
214
|
+
worker->current_blocking_operation = NULL;
|
215
|
+
worker->next = pool->workers;
|
216
|
+
|
217
|
+
worker->thread = rb_thread_create(worker_thread_func, worker);
|
218
|
+
if (NIL_P(worker->thread)) {
|
219
|
+
free(worker);
|
220
|
+
return -1;
|
221
|
+
}
|
222
|
+
|
223
|
+
pool->workers = worker;
|
224
|
+
pool->current_worker_count++;
|
225
|
+
|
226
|
+
return 0;
|
227
|
+
}
|
228
|
+
|
229
|
+
// Ruby constructor for WorkerPool
|
230
|
+
static VALUE worker_pool_initialize(int argc, VALUE *argv, VALUE self) {
|
231
|
+
size_t maximum_worker_count = 1; // Default
|
232
|
+
|
233
|
+
// Extract keyword arguments
|
234
|
+
VALUE kwargs = Qnil;
|
235
|
+
VALUE rb_maximum_worker_count = Qnil;
|
236
|
+
|
237
|
+
rb_scan_args(argc, argv, "0:", &kwargs);
|
238
|
+
|
239
|
+
if (!NIL_P(kwargs)) {
|
240
|
+
VALUE kwvals[1];
|
241
|
+
ID kwkeys[1] = {id_maximum_worker_count};
|
242
|
+
rb_get_kwargs(kwargs, kwkeys, 0, 1, kwvals);
|
243
|
+
rb_maximum_worker_count = kwvals[0];
|
244
|
+
}
|
245
|
+
|
246
|
+
if (!NIL_P(rb_maximum_worker_count)) {
|
247
|
+
maximum_worker_count = NUM2SIZET(rb_maximum_worker_count);
|
248
|
+
if (maximum_worker_count == 0) {
|
249
|
+
rb_raise(rb_eArgError, "maximum_worker_count must be greater than 0!");
|
250
|
+
}
|
251
|
+
}
|
252
|
+
|
253
|
+
// Get the pool that was allocated by worker_pool_allocate
|
254
|
+
struct IO_Event_WorkerPool *pool;
|
255
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
256
|
+
|
257
|
+
if (!pool) {
|
258
|
+
rb_raise(rb_eRuntimeError, "WorkerPool allocation failed!");
|
259
|
+
}
|
260
|
+
|
261
|
+
pthread_mutex_init(&pool->mutex, NULL);
|
262
|
+
pthread_cond_init(&pool->work_available, NULL);
|
263
|
+
|
264
|
+
pool->work_queue = NULL;
|
265
|
+
pool->work_queue_tail = NULL;
|
266
|
+
pool->workers = NULL;
|
267
|
+
pool->current_worker_count = 0;
|
268
|
+
pool->maximum_worker_count = maximum_worker_count;
|
269
|
+
pool->call_count = 0;
|
270
|
+
pool->completed_count = 0;
|
271
|
+
pool->cancelled_count = 0;
|
272
|
+
pool->shutdown = false;
|
273
|
+
|
274
|
+
// Create initial workers
|
275
|
+
for (size_t i = 0; i < maximum_worker_count; i++) {
|
276
|
+
if (create_worker_thread(pool) != 0) {
|
277
|
+
// Just set the maximum_worker_count for debugging, don't fail completely
|
278
|
+
// worker_pool_free(pool);
|
279
|
+
// rb_raise(rb_eRuntimeError, "Failed to create workers");
|
280
|
+
break;
|
281
|
+
}
|
282
|
+
}
|
283
|
+
|
284
|
+
return self;
|
285
|
+
}
|
286
|
+
|
287
|
+
static VALUE worker_pool_work_begin(VALUE _work) {
|
288
|
+
struct IO_Event_WorkerPool_Work *work = (void*)_work;
|
289
|
+
|
290
|
+
if (DEBUG) fprintf(stderr, "worker_pool_work_begin:rb_fiber_scheduler_block work=%p\n", work);
|
291
|
+
rb_fiber_scheduler_block(work->scheduler, work->blocker, Qnil);
|
292
|
+
|
293
|
+
return Qnil;
|
294
|
+
}
|
295
|
+
|
296
|
+
// Ruby method to submit work and wait for completion
|
297
|
+
static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
|
298
|
+
struct IO_Event_WorkerPool *pool;
|
299
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
300
|
+
|
301
|
+
if (pool->shutdown) {
|
302
|
+
rb_raise(rb_eRuntimeError, "Worker pool is shut down!");
|
303
|
+
}
|
304
|
+
|
305
|
+
// Increment call count (protected by GVL)
|
306
|
+
pool->call_count++;
|
307
|
+
|
308
|
+
// Get current fiber and scheduler
|
309
|
+
VALUE fiber = rb_fiber_current();
|
310
|
+
VALUE scheduler = rb_fiber_scheduler_current();
|
311
|
+
if (NIL_P(scheduler)) {
|
312
|
+
rb_raise(rb_eRuntimeError, "WorkerPool requires a fiber scheduler!");
|
313
|
+
}
|
314
|
+
|
315
|
+
// Extract blocking operation handle
|
316
|
+
rb_fiber_scheduler_blocking_operation_t *blocking_operation = rb_fiber_scheduler_blocking_operation_extract(_blocking_operation);
|
317
|
+
|
318
|
+
if (!blocking_operation) {
|
319
|
+
rb_raise(rb_eArgError, "Invalid blocking operation!");
|
320
|
+
}
|
321
|
+
|
322
|
+
// Create work item
|
323
|
+
struct IO_Event_WorkerPool_Work work = {
|
324
|
+
.blocking_operation = blocking_operation,
|
325
|
+
.completed = false,
|
326
|
+
.scheduler = scheduler,
|
327
|
+
.blocker = self,
|
328
|
+
.fiber = fiber,
|
329
|
+
.next = NULL
|
330
|
+
};
|
331
|
+
|
332
|
+
// Enqueue work:
|
333
|
+
pthread_mutex_lock(&pool->mutex);
|
334
|
+
enqueue_work(pool, &work);
|
335
|
+
pthread_cond_signal(&pool->work_available);
|
336
|
+
pthread_mutex_unlock(&pool->mutex);
|
337
|
+
|
338
|
+
// Block the current fiber until work is completed:
|
339
|
+
int state = 0;
|
340
|
+
while (true) {
|
341
|
+
int current_state = 0;
|
342
|
+
rb_protect(worker_pool_work_begin, (VALUE)&work, ¤t_state);
|
343
|
+
if (DEBUG) fprintf(stderr, "-- worker_pool_call:work completed=%d, current_state=%d, state=%d\n", work.completed, current_state, state);
|
344
|
+
|
345
|
+
// Store the first exception state:
|
346
|
+
if (!state) {
|
347
|
+
state = current_state;
|
348
|
+
}
|
349
|
+
|
350
|
+
// If the work is still in the queue, we must wait for a worker to complete it (even if cancelled):
|
351
|
+
if (work.completed) {
|
352
|
+
// The work was completed, we can exit the loop:
|
353
|
+
break;
|
354
|
+
} else {
|
355
|
+
if (DEBUG) fprintf(stderr, "worker_pool_call:rb_fiber_scheduler_blocking_operation_cancel\n");
|
356
|
+
// Ensure the blocking operation is cancelled:
|
357
|
+
rb_fiber_scheduler_blocking_operation_cancel(blocking_operation);
|
358
|
+
|
359
|
+
// The work was not completed, we need to wait for it to be completed, so we go around the loop again.
|
360
|
+
}
|
361
|
+
}
|
362
|
+
|
363
|
+
if (DEBUG) fprintf(stderr, "<- worker_pool_call:work completed=%d, state=%d\n", work.completed, state);
|
364
|
+
|
365
|
+
if (state) {
|
366
|
+
rb_jump_tag(state);
|
367
|
+
} else {
|
368
|
+
return Qtrue;
|
369
|
+
}
|
370
|
+
}
|
371
|
+
|
372
|
+
static VALUE worker_pool_allocate(VALUE klass) {
|
373
|
+
struct IO_Event_WorkerPool *pool;
|
374
|
+
VALUE self = TypedData_Make_Struct(klass, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
375
|
+
|
376
|
+
// Initialize to NULL/zero so we can detect uninitialized pools
|
377
|
+
memset(pool, 0, sizeof(struct IO_Event_WorkerPool));
|
378
|
+
|
379
|
+
return self;
|
380
|
+
}
|
381
|
+
|
382
|
+
// Ruby method to close the worker pool
|
383
|
+
static VALUE worker_pool_close(VALUE self) {
|
384
|
+
struct IO_Event_WorkerPool *pool;
|
385
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
386
|
+
|
387
|
+
if (!pool) {
|
388
|
+
rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
|
389
|
+
}
|
390
|
+
|
391
|
+
if (pool->shutdown) {
|
392
|
+
return Qnil; // Already closed
|
393
|
+
}
|
394
|
+
|
395
|
+
// Signal shutdown to all workers
|
396
|
+
pthread_mutex_lock(&pool->mutex);
|
397
|
+
pool->shutdown = true;
|
398
|
+
pthread_cond_broadcast(&pool->work_available);
|
399
|
+
pthread_mutex_unlock(&pool->mutex);
|
400
|
+
|
401
|
+
// Wait for all worker threads to finish
|
402
|
+
struct IO_Event_WorkerPool_Worker *worker = pool->workers;
|
403
|
+
while (worker) {
|
404
|
+
if (!NIL_P(worker->thread)) {
|
405
|
+
rb_funcall(worker->thread, rb_intern("join"), 0);
|
406
|
+
}
|
407
|
+
worker = worker->next;
|
408
|
+
}
|
409
|
+
|
410
|
+
// Clean up worker structures
|
411
|
+
worker = pool->workers;
|
412
|
+
while (worker) {
|
413
|
+
struct IO_Event_WorkerPool_Worker *next = worker->next;
|
414
|
+
free(worker);
|
415
|
+
worker = next;
|
416
|
+
}
|
417
|
+
pool->workers = NULL;
|
418
|
+
pool->current_worker_count = 0;
|
419
|
+
|
420
|
+
// Clean up mutex and condition variable
|
421
|
+
pthread_mutex_destroy(&pool->mutex);
|
422
|
+
pthread_cond_destroy(&pool->work_available);
|
423
|
+
|
424
|
+
return Qnil;
|
425
|
+
}
|
426
|
+
|
427
|
+
// Test helper: get pool statistics for debugging/testing
|
428
|
+
static VALUE worker_pool_statistics(VALUE self) {
|
429
|
+
struct IO_Event_WorkerPool *pool;
|
430
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
431
|
+
|
432
|
+
if (!pool) {
|
433
|
+
rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
|
434
|
+
}
|
435
|
+
|
436
|
+
VALUE stats = rb_hash_new();
|
437
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_worker_count")), SIZET2NUM(pool->current_worker_count));
|
438
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("maximum_worker_count")), SIZET2NUM(pool->maximum_worker_count));
|
439
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("call_count")), SIZET2NUM(pool->call_count));
|
440
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("completed_count")), SIZET2NUM(pool->completed_count));
|
441
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("cancelled_count")), SIZET2NUM(pool->cancelled_count));
|
442
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("shutdown")), pool->shutdown ? Qtrue : Qfalse);
|
443
|
+
|
444
|
+
// Count work items in queue (only if properly initialized)
|
445
|
+
if (pool->maximum_worker_count > 0) {
|
446
|
+
pthread_mutex_lock(&pool->mutex);
|
447
|
+
size_t current_queue_size = 0;
|
448
|
+
struct IO_Event_WorkerPool_Work *work = pool->work_queue;
|
449
|
+
while (work) {
|
450
|
+
current_queue_size++;
|
451
|
+
work = work->next;
|
452
|
+
}
|
453
|
+
pthread_mutex_unlock(&pool->mutex);
|
454
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(current_queue_size));
|
455
|
+
} else {
|
456
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(0));
|
457
|
+
}
|
458
|
+
|
459
|
+
return stats;
|
460
|
+
}
|
461
|
+
|
462
|
+
void Init_IO_Event_WorkerPool(VALUE IO_Event) {
|
463
|
+
// Initialize symbols
|
464
|
+
id_maximum_worker_count = rb_intern("maximum_worker_count");
|
465
|
+
|
466
|
+
IO_Event_WorkerPool = rb_define_class_under(IO_Event, "WorkerPool", rb_cObject);
|
467
|
+
rb_define_alloc_func(IO_Event_WorkerPool, worker_pool_allocate);
|
468
|
+
|
469
|
+
rb_define_method(IO_Event_WorkerPool, "initialize", worker_pool_initialize, -1);
|
470
|
+
rb_define_method(IO_Event_WorkerPool, "call", worker_pool_call, 1);
|
471
|
+
rb_define_method(IO_Event_WorkerPool, "close", worker_pool_close, 0);
|
472
|
+
|
473
|
+
rb_define_method(IO_Event_WorkerPool, "statistics", worker_pool_statistics, 0);
|
474
|
+
|
475
|
+
// Initialize test functions
|
476
|
+
Init_IO_Event_WorkerPool_Test(IO_Event_WorkerPool);
|
477
|
+
}
|
@@ -0,0 +1,199 @@
|
|
1
|
+
// worker_pool_test.c - Test functions for WorkerPool cancellation
|
2
|
+
// Released under the MIT License.
|
3
|
+
// Copyright, 2025, by Samuel Williams.
|
4
|
+
|
5
|
+
#include "worker_pool_test.h"
|
6
|
+
|
7
|
+
#include <ruby/thread.h>
|
8
|
+
#include <stdlib.h>
|
9
|
+
#include <string.h>
|
10
|
+
|
11
|
+
#include <unistd.h>
|
12
|
+
#include <errno.h>
|
13
|
+
#include <time.h>
|
14
|
+
|
15
|
+
static ID id_duration;
|
16
|
+
|
17
|
+
struct BusyOperationData {
|
18
|
+
int read_fd;
|
19
|
+
int write_fd;
|
20
|
+
volatile int cancelled;
|
21
|
+
double duration; // How long to wait (for testing)
|
22
|
+
clock_t start_time;
|
23
|
+
clock_t end_time;
|
24
|
+
int operation_result;
|
25
|
+
VALUE exception;
|
26
|
+
};
|
27
|
+
|
28
|
+
// The actual blocking operation that can be cancelled
|
29
|
+
static void* busy_blocking_operation(void *data) {
|
30
|
+
struct BusyOperationData *busy_data = (struct BusyOperationData*)data;
|
31
|
+
|
32
|
+
// Use select() to wait for the pipe to become readable
|
33
|
+
fd_set read_fds;
|
34
|
+
struct timeval timeout;
|
35
|
+
|
36
|
+
FD_ZERO(&read_fds);
|
37
|
+
FD_SET(busy_data->read_fd, &read_fds);
|
38
|
+
|
39
|
+
// Set timeout based on duration
|
40
|
+
timeout.tv_sec = (long)busy_data->duration;
|
41
|
+
timeout.tv_usec = ((busy_data->duration - timeout.tv_sec) * 1000000);
|
42
|
+
|
43
|
+
// This will block until:
|
44
|
+
// 1. The pipe becomes readable (cancellation)
|
45
|
+
// 2. The timeout expires
|
46
|
+
// 3. An error occurs
|
47
|
+
int result = select(busy_data->read_fd + 1, &read_fds, NULL, NULL, &timeout);
|
48
|
+
|
49
|
+
if (result > 0 && FD_ISSET(busy_data->read_fd, &read_fds)) {
|
50
|
+
// Pipe became readable - we were cancelled
|
51
|
+
char buffer;
|
52
|
+
read(busy_data->read_fd, &buffer, 1); // Consume the byte
|
53
|
+
busy_data->cancelled = 1;
|
54
|
+
return (void*)-1; // Indicate cancellation
|
55
|
+
} else if (result == 0) {
|
56
|
+
// Timeout - operation completed normally
|
57
|
+
return (void*)0; // Indicate success
|
58
|
+
} else {
|
59
|
+
// Error occurred
|
60
|
+
return (void*)-2; // Indicate error
|
61
|
+
}
|
62
|
+
}
|
63
|
+
|
64
|
+
// Unblock function that writes to the pipe to cancel the operation
|
65
|
+
static void busy_unblock_function(void *data) {
|
66
|
+
struct BusyOperationData *busy_data = (struct BusyOperationData*)data;
|
67
|
+
|
68
|
+
busy_data->cancelled = 1;
|
69
|
+
|
70
|
+
// Write a byte to the pipe to wake up the select()
|
71
|
+
char wake_byte = 1;
|
72
|
+
write(busy_data->write_fd, &wake_byte, 1);
|
73
|
+
}
|
74
|
+
|
75
|
+
// Function for the main operation execution (for rb_rescue)
|
76
|
+
static VALUE busy_operation_execute(VALUE data_value) {
|
77
|
+
struct BusyOperationData *busy_data = (struct BusyOperationData*)data_value;
|
78
|
+
|
79
|
+
// Record start time
|
80
|
+
busy_data->start_time = clock();
|
81
|
+
|
82
|
+
// Execute the blocking operation
|
83
|
+
void *block_result = rb_nogvl(
|
84
|
+
busy_blocking_operation,
|
85
|
+
busy_data,
|
86
|
+
busy_unblock_function,
|
87
|
+
busy_data,
|
88
|
+
RB_NOGVL_UBF_ASYNC_SAFE | RB_NOGVL_OFFLOAD_SAFE
|
89
|
+
);
|
90
|
+
|
91
|
+
// Record end time
|
92
|
+
busy_data->end_time = clock();
|
93
|
+
|
94
|
+
// Store the operation result
|
95
|
+
busy_data->operation_result = (int)(intptr_t)block_result;
|
96
|
+
|
97
|
+
return Qnil;
|
98
|
+
}
|
99
|
+
|
100
|
+
// Function for exception handling (for rb_rescue)
|
101
|
+
static VALUE busy_operation_rescue(VALUE data_value, VALUE exception) {
|
102
|
+
struct BusyOperationData *busy_data = (struct BusyOperationData*)data_value;
|
103
|
+
|
104
|
+
// Record end time even in case of exception
|
105
|
+
busy_data->end_time = clock();
|
106
|
+
|
107
|
+
// Mark that an exception was caught
|
108
|
+
busy_data->exception = exception;
|
109
|
+
|
110
|
+
return exception;
|
111
|
+
}
|
112
|
+
|
113
|
+
// Ruby method: IO::Event::WorkerPool.busy(duration: 1.0)
|
114
|
+
// This creates a cancellable blocking operation for testing
|
115
|
+
static VALUE worker_pool_test_busy(int argc, VALUE *argv, VALUE self) {
|
116
|
+
double duration = 1.0; // Default 1 second
|
117
|
+
|
118
|
+
// Extract keyword arguments
|
119
|
+
VALUE kwargs = Qnil;
|
120
|
+
VALUE rb_duration = Qnil;
|
121
|
+
|
122
|
+
rb_scan_args(argc, argv, "0:", &kwargs);
|
123
|
+
|
124
|
+
if (!NIL_P(kwargs)) {
|
125
|
+
VALUE kwvals[1];
|
126
|
+
ID kwkeys[1] = {id_duration};
|
127
|
+
rb_get_kwargs(kwargs, kwkeys, 0, 1, kwvals);
|
128
|
+
rb_duration = kwvals[0];
|
129
|
+
}
|
130
|
+
|
131
|
+
if (!NIL_P(rb_duration)) {
|
132
|
+
duration = NUM2DBL(rb_duration);
|
133
|
+
}
|
134
|
+
|
135
|
+
// Create pipe for cancellation
|
136
|
+
int pipe_fds[2];
|
137
|
+
if (pipe(pipe_fds) != 0) {
|
138
|
+
rb_sys_fail("pipe creation failed");
|
139
|
+
}
|
140
|
+
|
141
|
+
// Stack allocate operation data
|
142
|
+
struct BusyOperationData busy_data = {
|
143
|
+
.read_fd = pipe_fds[0],
|
144
|
+
.write_fd = pipe_fds[1],
|
145
|
+
.cancelled = 0,
|
146
|
+
.duration = duration,
|
147
|
+
.start_time = 0,
|
148
|
+
.end_time = 0,
|
149
|
+
.operation_result = 0,
|
150
|
+
.exception = Qnil
|
151
|
+
};
|
152
|
+
|
153
|
+
// Execute the blocking operation with exception handling using function pointers
|
154
|
+
rb_rescue(
|
155
|
+
busy_operation_execute,
|
156
|
+
(VALUE)&busy_data,
|
157
|
+
busy_operation_rescue,
|
158
|
+
(VALUE)&busy_data
|
159
|
+
);
|
160
|
+
|
161
|
+
// Calculate elapsed time from the state stored in busy_data
|
162
|
+
double elapsed = ((double)(busy_data.end_time - busy_data.start_time)) / CLOCKS_PER_SEC;
|
163
|
+
|
164
|
+
// Create result hash using the state from busy_data
|
165
|
+
VALUE result = rb_hash_new();
|
166
|
+
rb_hash_aset(result, ID2SYM(rb_intern("duration")), DBL2NUM(duration));
|
167
|
+
rb_hash_aset(result, ID2SYM(rb_intern("elapsed")), DBL2NUM(elapsed));
|
168
|
+
|
169
|
+
// Determine result based on operation outcome
|
170
|
+
if (busy_data.exception != Qnil) {
|
171
|
+
rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("exception")));
|
172
|
+
rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qtrue);
|
173
|
+
rb_hash_aset(result, ID2SYM(rb_intern("exception")), busy_data.exception);
|
174
|
+
} else if (busy_data.operation_result == -1) {
|
175
|
+
rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("cancelled")));
|
176
|
+
rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qtrue);
|
177
|
+
} else if (busy_data.operation_result == 0) {
|
178
|
+
rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("completed")));
|
179
|
+
rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qfalse);
|
180
|
+
} else {
|
181
|
+
rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("error")));
|
182
|
+
rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qfalse);
|
183
|
+
}
|
184
|
+
|
185
|
+
// Clean up pipe file descriptors
|
186
|
+
close(pipe_fds[0]);
|
187
|
+
close(pipe_fds[1]);
|
188
|
+
|
189
|
+
return result;
|
190
|
+
}
|
191
|
+
|
192
|
+
// Initialize the test functions
|
193
|
+
void Init_IO_Event_WorkerPool_Test(VALUE IO_Event_WorkerPool) {
|
194
|
+
// Initialize symbols
|
195
|
+
id_duration = rb_intern("duration");
|
196
|
+
|
197
|
+
// Add test methods to IO::Event::WorkerPool class
|
198
|
+
rb_define_singleton_method(IO_Event_WorkerPool, "busy", worker_pool_test_busy, -1);
|
199
|
+
}
|
@@ -116,9 +116,9 @@ module IO::Event
|
|
116
116
|
#
|
117
117
|
# @parameter fiber [Fiber] The fiber to raise the exception on.
|
118
118
|
# @parameter arguments [Array] The arguments to use when raising the exception.
|
119
|
-
def raise(fiber, *arguments)
|
119
|
+
def raise(fiber, *arguments, **options)
|
120
120
|
log("Raising exception on fiber #{fiber.inspect} with #{arguments.inspect}")
|
121
|
-
@selector.raise(fiber, *arguments)
|
121
|
+
@selector.raise(fiber, *arguments, **options)
|
122
122
|
end
|
123
123
|
|
124
124
|
# Check if the selector is ready.
|
@@ -82,7 +82,7 @@ class IO
|
|
82
82
|
# Validate the heap invariant. Every element except the root must not be smaller than its parent element. Note that it MAY be equal.
|
83
83
|
def valid?
|
84
84
|
# Notice we skip index 0 on purpose, because it has no parent
|
85
|
-
(1..(@contents.size - 1)).all? {
|
85
|
+
(1..(@contents.size - 1)).all? {|index| @contents[index] >= @contents[(index - 1) / 2]}
|
86
86
|
end
|
87
87
|
|
88
88
|
private
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
# Released under the MIT License.
|
4
|
-
# Copyright, 2021-
|
4
|
+
# Copyright, 2021-2025, by Samuel Williams.
|
5
5
|
# Copyright, 2023, by Math Ieu.
|
6
6
|
|
7
7
|
require_relative "../interrupt"
|
@@ -17,6 +17,9 @@ module IO::Event
|
|
17
17
|
|
18
18
|
@waiting = Hash.new.compare_by_identity
|
19
19
|
|
20
|
+
# Flag indicating whether the selector is currently blocked in a system call.
|
21
|
+
# Set to true when blocked in ::IO.select, false otherwise.
|
22
|
+
# Used by wakeup() to determine if an interrupt signal is needed.
|
20
23
|
@blocked = false
|
21
24
|
|
22
25
|
@ready = Queue.new
|
@@ -95,11 +98,11 @@ module IO::Event
|
|
95
98
|
end
|
96
99
|
|
97
100
|
# Transfer to the given fiber and raise an exception. Put the current fiber into the ready list.
|
98
|
-
def raise(fiber, *arguments)
|
101
|
+
def raise(fiber, *arguments, **options)
|
99
102
|
optional = Optional.new(Fiber.current)
|
100
103
|
@ready.push(optional)
|
101
104
|
|
102
|
-
fiber.raise(*arguments)
|
105
|
+
fiber.raise(*arguments, **options)
|
103
106
|
ensure
|
104
107
|
optional.nullify
|
105
108
|
end
|
@@ -196,7 +199,7 @@ module IO::Event
|
|
196
199
|
result = Fiber.blocking{buffer.read(io, 0, offset)}
|
197
200
|
|
198
201
|
if result < 0
|
199
|
-
if again?(result)
|
202
|
+
if length > 0 and again?(result)
|
200
203
|
self.io_wait(fiber, io, IO::READABLE)
|
201
204
|
else
|
202
205
|
return result
|
@@ -226,7 +229,7 @@ module IO::Event
|
|
226
229
|
result = Fiber.blocking{buffer.write(io, 0, offset)}
|
227
230
|
|
228
231
|
if result < 0
|
229
|
-
if again?(result)
|
232
|
+
if length > 0 and again?(result)
|
230
233
|
self.io_wait(fiber, io, IO::READABLE)
|
231
234
|
else
|
232
235
|
return result
|
@@ -302,96 +305,14 @@ module IO::Event
|
|
302
305
|
|
303
306
|
return total
|
304
307
|
end
|
305
|
-
elsif Support.fiber_scheduler_v1?
|
306
|
-
# Ruby <= 3.1, limited IO::Buffer support.
|
307
|
-
def io_read(fiber, _io, buffer, length, offset = 0)
|
308
|
-
# We need to avoid any internal buffering, so we use a duplicated IO object:
|
309
|
-
io = IO.for_fd(_io.fileno, autoclose: false)
|
310
|
-
|
311
|
-
total = 0
|
312
|
-
|
313
|
-
maximum_size = buffer.size - offset
|
314
|
-
while maximum_size > 0
|
315
|
-
case result = blocking{io.read_nonblock(maximum_size, exception: false)}
|
316
|
-
when :wait_readable
|
317
|
-
if length > 0
|
318
|
-
self.io_wait(fiber, io, IO::READABLE)
|
319
|
-
else
|
320
|
-
return EWOULDBLOCK
|
321
|
-
end
|
322
|
-
when :wait_writable
|
323
|
-
if length > 0
|
324
|
-
self.io_wait(fiber, io, IO::WRITABLE)
|
325
|
-
else
|
326
|
-
return EWOULDBLOCK
|
327
|
-
end
|
328
|
-
when nil
|
329
|
-
break
|
330
|
-
else
|
331
|
-
buffer.set_string(result, offset)
|
332
|
-
|
333
|
-
size = result.bytesize
|
334
|
-
total += size
|
335
|
-
offset += size
|
336
|
-
break if size >= length
|
337
|
-
length -= size
|
338
|
-
end
|
339
|
-
|
340
|
-
maximum_size = buffer.size - offset
|
341
|
-
end
|
342
|
-
|
343
|
-
return total
|
344
|
-
rescue IOError => error
|
345
|
-
return -Errno::EBADF::Errno
|
346
|
-
rescue SystemCallError => error
|
347
|
-
return -error.errno
|
348
|
-
end
|
349
|
-
|
350
|
-
def io_write(fiber, _io, buffer, length, offset = 0)
|
351
|
-
# We need to avoid any internal buffering, so we use a duplicated IO object:
|
352
|
-
io = IO.for_fd(_io.fileno, autoclose: false)
|
353
|
-
|
354
|
-
total = 0
|
355
|
-
|
356
|
-
maximum_size = buffer.size - offset
|
357
|
-
while maximum_size > 0
|
358
|
-
chunk = buffer.get_string(offset, maximum_size)
|
359
|
-
case result = blocking{io.write_nonblock(chunk, exception: false)}
|
360
|
-
when :wait_readable
|
361
|
-
if length > 0
|
362
|
-
self.io_wait(fiber, io, IO::READABLE)
|
363
|
-
else
|
364
|
-
return EWOULDBLOCK
|
365
|
-
end
|
366
|
-
when :wait_writable
|
367
|
-
if length > 0
|
368
|
-
self.io_wait(fiber, io, IO::WRITABLE)
|
369
|
-
else
|
370
|
-
return EWOULDBLOCK
|
371
|
-
end
|
372
|
-
else
|
373
|
-
total += result
|
374
|
-
offset += result
|
375
|
-
break if result >= length
|
376
|
-
length -= result
|
377
|
-
end
|
378
|
-
|
379
|
-
maximum_size = buffer.size - offset
|
380
|
-
end
|
381
|
-
|
382
|
-
return total
|
383
|
-
rescue IOError => error
|
384
|
-
return -Errno::EBADF::Errno
|
385
|
-
rescue SystemCallError => error
|
386
|
-
return -error.errno
|
387
|
-
end
|
388
|
-
|
389
|
-
def blocking(&block)
|
390
|
-
fiber = Fiber.new(blocking: true, &block)
|
391
|
-
return fiber.resume(fiber)
|
392
|
-
end
|
393
308
|
end
|
394
309
|
|
310
|
+
# Wait for a process to change state.
|
311
|
+
#
|
312
|
+
# @parameter fiber [Fiber] The fiber to resume after waiting.
|
313
|
+
# @parameter pid [Integer] The process ID to wait for.
|
314
|
+
# @parameter flags [Integer] Flags to pass to Process::Status.wait.
|
315
|
+
# @returns [Process::Status] The status of the waited process.
|
395
316
|
def process_wait(fiber, pid, flags)
|
396
317
|
Thread.new do
|
397
318
|
Process::Status.wait(pid, flags)
|
@@ -411,6 +332,10 @@ module IO::Event
|
|
411
332
|
end
|
412
333
|
end
|
413
334
|
|
335
|
+
# Wait for IO events or a timeout.
|
336
|
+
#
|
337
|
+
# @parameter duration [Numeric | Nil] The maximum time to wait, or nil for no timeout.
|
338
|
+
# @returns [Integer] The number of ready IO objects.
|
414
339
|
def select(duration = nil)
|
415
340
|
if pop_ready
|
416
341
|
# If we have popped items from the ready list, they may influence the duration calculation, so we don't delay the event loop:
|
data/lib/io/event/support.rb
CHANGED
@@ -14,35 +14,28 @@ class IO
|
|
14
14
|
IO.const_defined?(:Buffer)
|
15
15
|
end
|
16
16
|
|
17
|
-
# The basic fiber scheduler was introduced along side the IO::Buffer class.
|
18
|
-
#
|
19
|
-
# @returns [Boolean] Whether the IO::Buffer class is available.
|
20
|
-
#
|
21
|
-
# To be removed on 31 Mar 2025.
|
22
|
-
def self.fiber_scheduler_v1?
|
23
|
-
IO.const_defined?(:Buffer)
|
24
|
-
end
|
25
|
-
|
26
17
|
# More advanced read/write methods and blocking controls were introduced in Ruby 3.2.
|
27
18
|
#
|
28
19
|
# To be removed on 31 Mar 2026.
|
29
20
|
def self.fiber_scheduler_v2?
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
21
|
+
if RUBY_VERSION >= "3.2"
|
22
|
+
return true if RUBY_VERSION >= "3.2.6"
|
23
|
+
|
24
|
+
# Some interface changes were back-ported incorrectly and released in 3.2.5 <https://github.com/ruby/ruby/pull/10778> - Specifically "Improvements to IO::Buffer read/write/pread/pwrite." is missing correct size calculation.
|
25
|
+
return false if RUBY_VERSION >= "3.2.5"
|
26
|
+
|
27
|
+
# Feature detection:
|
28
|
+
IO.const_defined?(:Buffer) and Fiber.respond_to?(:blocking) and IO::Buffer.instance_method(:read).arity == -1
|
29
|
+
end
|
37
30
|
end
|
38
31
|
|
39
32
|
# Updated inferfaces for read/write and IO::Buffer were introduced in Ruby 3.3, including pread/pwrite.
|
40
33
|
#
|
41
34
|
# To become the default 31 Mar 2026.
|
42
35
|
def self.fiber_scheduler_v3?
|
36
|
+
return true if RUBY_VERSION >= "3.3"
|
37
|
+
|
43
38
|
if fiber_scheduler_v2?
|
44
|
-
return true if RUBY_VERSION >= "3.3"
|
45
|
-
|
46
39
|
# Feature detection if required:
|
47
40
|
begin
|
48
41
|
IO::Buffer.new.slice(0, 0).write(STDOUT)
|
data/lib/io/event/version.rb
CHANGED
data/license.md
CHANGED
@@ -9,7 +9,7 @@ Copyright, 2022, by Bruno Sutic.
|
|
9
9
|
Copyright, 2023, by Math Ieu.
|
10
10
|
Copyright, 2024, by Pavel Rosický.
|
11
11
|
Copyright, 2024, by Anthony Ross.
|
12
|
-
Copyright, 2024, by Shizuo Fujita.
|
12
|
+
Copyright, 2024-2025, by Shizuo Fujita.
|
13
13
|
Copyright, 2024, by Jean Boussier.
|
14
14
|
Copyright, 2025, by Stanislav (Stas) Katkov.
|
15
15
|
|
data/readme.md
CHANGED
@@ -18,6 +18,18 @@ Please see the [project documentation](https://socketry.github.io/io-event/) for
|
|
18
18
|
|
19
19
|
Please see the [project releases](https://socketry.github.io/io-event/releases/index) for all releases.
|
20
20
|
|
21
|
+
### v1.11.2
|
22
|
+
|
23
|
+
- Fix Windows build.
|
24
|
+
|
25
|
+
### v1.11.1
|
26
|
+
|
27
|
+
- Fix `read_nonblock` when using the `URing` selector, which was not handling zero-length reads correctly. This allows reading available data without blocking.
|
28
|
+
|
29
|
+
### v1.11.0
|
30
|
+
|
31
|
+
- [Introduce `IO::Event::WorkerPool` for off-loading blocking operations.](https://socketry.github.io/io-event/releases/index#introduce-io::event::workerpool-for-off-loading-blocking-operations.)
|
32
|
+
|
21
33
|
### v1.10.2
|
22
34
|
|
23
35
|
- Improved consistency of handling closed IO when invoking `#select`.
|
data/releases.md
CHANGED
@@ -1,5 +1,39 @@
|
|
1
1
|
# Releases
|
2
2
|
|
3
|
+
## v1.11.2
|
4
|
+
|
5
|
+
- Fix Windows build.
|
6
|
+
|
7
|
+
## v1.11.1
|
8
|
+
|
9
|
+
- Fix `read_nonblock` when using the `URing` selector, which was not handling zero-length reads correctly. This allows reading available data without blocking.
|
10
|
+
|
11
|
+
## v1.11.0
|
12
|
+
|
13
|
+
### Introduce `IO::Event::WorkerPool` for off-loading blocking operations.
|
14
|
+
|
15
|
+
The {ruby IO::Event::WorkerPool} provides a mechanism for executing blocking operations on separate OS threads while properly integrating with Ruby's fiber scheduler and GVL (Global VM Lock) management. This enables true parallelism for CPU-intensive or blocking operations that would otherwise block the event loop.
|
16
|
+
|
17
|
+
``` ruby
|
18
|
+
# Fiber scheduler integration via blocking_operation_wait hook
|
19
|
+
class MyScheduler
|
20
|
+
def initialize
|
21
|
+
@worker_pool = IO::Event::WorkerPool.new
|
22
|
+
end
|
23
|
+
|
24
|
+
def blocking_operation_wait(operation)
|
25
|
+
@worker_pool.call(operation)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
# Usage with automatic offloading
|
30
|
+
Fiber.set_scheduler(MyScheduler.new)
|
31
|
+
# Automatically offload `rb_nogvl(..., RB_NOGVL_OFFLOAD_SAFE)` to a background thread:
|
32
|
+
result = some_blocking_operation()
|
33
|
+
```
|
34
|
+
|
35
|
+
The implementation uses one or more background threads and a list of pending blocking operations. Those operations either execute through to completion or may be cancelled, which executes the "unblock function" provided to `rb_nogvl`.
|
36
|
+
|
3
37
|
## v1.10.2
|
4
38
|
|
5
39
|
- Improved consistency of handling closed IO when invoking `#select`.
|
data.tar.gz.sig
CHANGED
Binary file
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: io-event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.12.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
@@ -10,11 +10,11 @@ authors:
|
|
10
10
|
- Jean Boussier
|
11
11
|
- Benoit Daloze
|
12
12
|
- Bruno Sutic
|
13
|
+
- Shizuo Fujita
|
13
14
|
- Alex Matchneer
|
14
15
|
- Anthony Ross
|
15
16
|
- Delton Ding
|
16
17
|
- Pavel Rosický
|
17
|
-
- Shizuo Fujita
|
18
18
|
- Stanislav (Stas) Katkov
|
19
19
|
bindir: bin
|
20
20
|
cert_chain:
|
@@ -54,6 +54,7 @@ extensions:
|
|
54
54
|
- ext/extconf.rb
|
55
55
|
extra_rdoc_files: []
|
56
56
|
files:
|
57
|
+
- agent.md
|
57
58
|
- design.md
|
58
59
|
- ext/extconf.rb
|
59
60
|
- ext/io/event/array.h
|
@@ -75,6 +76,10 @@ files:
|
|
75
76
|
- ext/io/event/selector/uring.h
|
76
77
|
- ext/io/event/time.c
|
77
78
|
- ext/io/event/time.h
|
79
|
+
- ext/io/event/worker_pool.c
|
80
|
+
- ext/io/event/worker_pool.h
|
81
|
+
- ext/io/event/worker_pool_test.c
|
82
|
+
- ext/io/event/worker_pool_test.h
|
78
83
|
- lib/io/event.rb
|
79
84
|
- lib/io/event/debug/selector.rb
|
80
85
|
- lib/io/event/interrupt.rb
|
@@ -102,14 +107,14 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
102
107
|
requirements:
|
103
108
|
- - ">="
|
104
109
|
- !ruby/object:Gem::Version
|
105
|
-
version:
|
110
|
+
version: 3.2.6
|
106
111
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
107
112
|
requirements:
|
108
113
|
- - ">="
|
109
114
|
- !ruby/object:Gem::Version
|
110
115
|
version: '0'
|
111
116
|
requirements: []
|
112
|
-
rubygems_version: 3.7
|
117
|
+
rubygems_version: 3.6.7
|
113
118
|
specification_version: 4
|
114
119
|
summary: An event loop.
|
115
120
|
test_files: []
|
metadata.gz.sig
CHANGED
Binary file
|