io-event 1.9.0 → 1.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +18 -7
- data/ext/io/event/event.c +5 -4
- data/ext/io/event/event.h +4 -0
- data/ext/io/event/fiber.c +1 -1
- data/ext/io/event/selector/epoll.c +18 -0
- data/ext/io/event/selector/kqueue.c +19 -0
- data/ext/io/event/selector/uring.c +25 -3
- data/ext/io/event/worker_pool.c +464 -0
- data/ext/io/event/{profiler.h → worker_pool.h} +1 -1
- data/ext/io/event/worker_pool_test.c +200 -0
- data/ext/io/event/worker_pool_test.h +9 -0
- data/lib/io/event/selector/select.rb +18 -12
- data/lib/io/event/timers.rb +1 -2
- data/lib/io/event/version.rb +1 -1
- data/license.md +1 -0
- data/readme.md +16 -3
- data/releases.md +30 -39
- data.tar.gz.sig +0 -0
- metadata +8 -6
- metadata.gz.sig +0 -0
- data/ext/io/event/profiler.c +0 -505
- data/lib/io/event/profiler.rb +0 -18
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e5601614e57f23e24edb8f34dfdaf5b87ca33e902312c27fa51dc9af375ab76c
|
4
|
+
data.tar.gz: c791965fb7be4525b4894de46f1a14787a19ea68920d4009e89713e3aebc1001
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 6f662e6208b30d95aeb68d68ea9e3459bd51b6dc920d7bfcaa4794ffa455316443d2da3c6e954805bd8daadd0339341aab09c225054d9e81903866d63b39d9f0
|
7
|
+
data.tar.gz: 0016b73d68044784ec52d270725d7f4a8bde5064bfd54c5fd3d685f36771d235ae16719fa1ca0a3f9d4dd246894e84d74e8e5025605ebcfc86fb9b5d6be39696
|
checksums.yaml.gz.sig
CHANGED
Binary file
|
data/ext/extconf.rb
CHANGED
@@ -4,6 +4,7 @@
|
|
4
4
|
# Released under the MIT License.
|
5
5
|
# Copyright, 2021-2025, by Samuel Williams.
|
6
6
|
# Copyright, 2023, by Math Ieu.
|
7
|
+
# Copyright, 2025, by Stanislav (Stas) Katkov.
|
7
8
|
|
8
9
|
return if RUBY_DESCRIPTION =~ /jruby/
|
9
10
|
|
@@ -14,15 +15,15 @@ extension_name = "IO_Event"
|
|
14
15
|
|
15
16
|
# dir_config(extension_name)
|
16
17
|
|
17
|
-
|
18
|
+
append_cflags(["-Wall", "-Wno-unknown-pragmas", "-std=c99"])
|
18
19
|
|
19
20
|
if ENV.key?("RUBY_DEBUG")
|
20
21
|
$stderr.puts "Enabling debug mode..."
|
21
|
-
|
22
|
-
|
22
|
+
|
23
|
+
append_cflags(["-DRUBY_DEBUG", "-O0"])
|
23
24
|
end
|
24
25
|
|
25
|
-
$srcs = ["io/event/event.c", "io/event/time.c", "io/event/fiber.c", "io/event/
|
26
|
+
$srcs = ["io/event/event.c", "io/event/time.c", "io/event/fiber.c", "io/event/selector/selector.c"]
|
26
27
|
$VPATH << "$(srcdir)/io/event"
|
27
28
|
$VPATH << "$(srcdir)/io/event/selector"
|
28
29
|
|
@@ -32,7 +33,7 @@ have_func("&rb_fiber_transfer")
|
|
32
33
|
if have_library("uring") and have_header("liburing.h")
|
33
34
|
# We might want to consider using this in the future:
|
34
35
|
# have_func("io_uring_submit_and_wait_timeout", "liburing.h")
|
35
|
-
|
36
|
+
|
36
37
|
$srcs << "io/event/selector/uring.c"
|
37
38
|
end
|
38
39
|
|
@@ -57,11 +58,21 @@ have_func("epoll_pwait2")
|
|
57
58
|
|
58
59
|
have_header("ruby/io/buffer.h")
|
59
60
|
|
61
|
+
# Feature detection for blocking operation support
|
62
|
+
if have_func("rb_fiber_scheduler_blocking_operation_extract")
|
63
|
+
# Feature detection for pthread support (needed for WorkerPool)
|
64
|
+
if have_header("pthread.h")
|
65
|
+
append_cflags(["-DHAVE_IO_EVENT_WORKER_POOL"])
|
66
|
+
$srcs << "io/event/worker_pool.c"
|
67
|
+
$srcs << "io/event/worker_pool_test.c"
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
60
71
|
if ENV.key?("RUBY_SANITIZE")
|
61
72
|
$stderr.puts "Enabling sanitizers..."
|
62
|
-
|
73
|
+
|
63
74
|
# Add address and undefined behaviour sanitizers:
|
64
|
-
|
75
|
+
append_cflags(["-fsanitize=address", "-fsanitize=undefined", "-fno-omit-frame-pointer"])
|
65
76
|
$LDFLAGS << " -fsanitize=address -fsanitize=undefined"
|
66
77
|
end
|
67
78
|
|
data/ext/io/event/event.c
CHANGED
@@ -3,9 +3,7 @@
|
|
3
3
|
|
4
4
|
#include "event.h"
|
5
5
|
#include "fiber.h"
|
6
|
-
#include "profiler.h"
|
7
6
|
#include "selector/selector.h"
|
8
|
-
#include <complex.h>
|
9
7
|
|
10
8
|
void Init_IO_Event(void)
|
11
9
|
{
|
@@ -16,8 +14,11 @@ void Init_IO_Event(void)
|
|
16
14
|
VALUE IO_Event = rb_define_module_under(rb_cIO, "Event");
|
17
15
|
|
18
16
|
Init_IO_Event_Fiber(IO_Event);
|
19
|
-
|
20
|
-
|
17
|
+
|
18
|
+
#ifdef HAVE_IO_EVENT_WORKER_POOL
|
19
|
+
Init_IO_Event_WorkerPool(IO_Event);
|
20
|
+
#endif
|
21
|
+
|
21
22
|
VALUE IO_Event_Selector = rb_define_module_under(IO_Event, "Selector");
|
22
23
|
Init_IO_Event_Selector(IO_Event_Selector);
|
23
24
|
|
data/ext/io/event/event.h
CHANGED
data/ext/io/event/fiber.c
CHANGED
@@ -35,7 +35,7 @@ VALUE IO_Event_Fiber_raise(VALUE fiber, int argc, VALUE *argv) {
|
|
35
35
|
#ifndef HAVE_RB_FIBER_CURRENT
|
36
36
|
static ID id_current;
|
37
37
|
|
38
|
-
|
38
|
+
VALUE IO_Event_Fiber_current(void) {
|
39
39
|
return rb_funcall(rb_cFiber, id_current, 0);
|
40
40
|
}
|
41
41
|
#endif
|
@@ -1035,7 +1035,25 @@ VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
|
1035
1035
|
return Qfalse;
|
1036
1036
|
}
|
1037
1037
|
|
1038
|
+
static int IO_Event_Selector_EPoll_supported_p(void) {
|
1039
|
+
int fd = epoll_create1(EPOLL_CLOEXEC);
|
1040
|
+
|
1041
|
+
if (fd < 0) {
|
1042
|
+
rb_warn("epoll_create1() was available at compile time but failed at run time: %s\n", strerror(errno));
|
1043
|
+
|
1044
|
+
return 0;
|
1045
|
+
}
|
1046
|
+
|
1047
|
+
close(fd);
|
1048
|
+
|
1049
|
+
return 1;
|
1050
|
+
}
|
1051
|
+
|
1038
1052
|
void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
|
1053
|
+
if (!IO_Event_Selector_EPoll_supported_p()) {
|
1054
|
+
return;
|
1055
|
+
}
|
1056
|
+
|
1039
1057
|
VALUE IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
|
1040
1058
|
|
1041
1059
|
rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
|
@@ -1045,7 +1045,26 @@ VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
|
|
1045
1045
|
return Qfalse;
|
1046
1046
|
}
|
1047
1047
|
|
1048
|
+
|
1049
|
+
static int IO_Event_Selector_KQueue_supported_p(void) {
|
1050
|
+
int fd = kqueue();
|
1051
|
+
|
1052
|
+
if (fd < 0) {
|
1053
|
+
rb_warn("kqueue() was available at compile time but failed at run time: %s\n", strerror(errno));
|
1054
|
+
|
1055
|
+
return 0;
|
1056
|
+
}
|
1057
|
+
|
1058
|
+
close(fd);
|
1059
|
+
|
1060
|
+
return 1;
|
1061
|
+
}
|
1062
|
+
|
1048
1063
|
void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
|
1064
|
+
if (!IO_Event_Selector_KQueue_supported_p()) {
|
1065
|
+
return;
|
1066
|
+
}
|
1067
|
+
|
1049
1068
|
VALUE IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
|
1050
1069
|
|
1051
1070
|
rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
|
@@ -18,7 +18,7 @@
|
|
18
18
|
enum {
|
19
19
|
DEBUG = 0,
|
20
20
|
DEBUG_COMPLETION = 0,
|
21
|
-
|
21
|
+
DEBUG_CQE = 0,
|
22
22
|
};
|
23
23
|
|
24
24
|
enum {URING_ENTRIES = 64};
|
@@ -552,7 +552,10 @@ VALUE io_wait_transfer(VALUE _arguments) {
|
|
552
552
|
|
553
553
|
if (DEBUG) fprintf(stderr, "io_wait_transfer:waiting=%p, result=%d\n", (void*)arguments->waiting, arguments->waiting->result);
|
554
554
|
|
555
|
-
|
555
|
+
int32_t result = arguments->waiting->result;
|
556
|
+
if (result < 0) {
|
557
|
+
rb_syserr_fail(-result, "io_wait_transfer:io_uring_poll_add");
|
558
|
+
} else if (result > 0) {
|
556
559
|
// We explicitly filter the resulting events based on the requested events.
|
557
560
|
// In some cases, poll will report events we didn't ask for.
|
558
561
|
return RB_INT2NUM(events_from_poll_flags(arguments->waiting->result & arguments->flags));
|
@@ -1059,7 +1062,7 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
|
|
1059
1062
|
}
|
1060
1063
|
|
1061
1064
|
io_uring_for_each_cqe(ring, head, cqe) {
|
1062
|
-
if (
|
1065
|
+
if (DEBUG_CQE) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
|
1063
1066
|
|
1064
1067
|
++completed;
|
1065
1068
|
|
@@ -1175,7 +1178,26 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
|
|
1175
1178
|
|
1176
1179
|
#pragma mark - Native Methods
|
1177
1180
|
|
1181
|
+
static int IO_Event_Selector_URing_supported_p(void) {
|
1182
|
+
struct io_uring ring;
|
1183
|
+
int result = io_uring_queue_init(32, &ring, 0);
|
1184
|
+
|
1185
|
+
if (result < 0) {
|
1186
|
+
rb_warn("io_uring_queue_init() was available at compile time but failed at run time: %s\n", strerror(-result));
|
1187
|
+
|
1188
|
+
return 0;
|
1189
|
+
}
|
1190
|
+
|
1191
|
+
io_uring_queue_exit(&ring);
|
1192
|
+
|
1193
|
+
return 1;
|
1194
|
+
}
|
1195
|
+
|
1178
1196
|
void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
|
1197
|
+
if (!IO_Event_Selector_URing_supported_p()) {
|
1198
|
+
return;
|
1199
|
+
}
|
1200
|
+
|
1179
1201
|
VALUE IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
|
1180
1202
|
|
1181
1203
|
rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
|
@@ -0,0 +1,464 @@
|
|
1
|
+
// Released under the MIT License.
|
2
|
+
// Copyright, 2025, by Samuel Williams.
|
3
|
+
|
4
|
+
#include "worker_pool.h"
|
5
|
+
#include "worker_pool_test.h"
|
6
|
+
#include "fiber.h"
|
7
|
+
|
8
|
+
#include <ruby/thread.h>
|
9
|
+
#include <ruby/fiber/scheduler.h>
|
10
|
+
|
11
|
+
#include <pthread.h>
|
12
|
+
#include <stdbool.h>
|
13
|
+
#include <stdlib.h>
|
14
|
+
#include <errno.h>
|
15
|
+
#include <string.h>
|
16
|
+
|
17
|
+
enum {
|
18
|
+
DEBUG = 0,
|
19
|
+
};
|
20
|
+
|
21
|
+
static VALUE IO_Event_WorkerPool;
|
22
|
+
static ID id_maximum_worker_count;
|
23
|
+
|
24
|
+
// Thread pool structure
|
25
|
+
struct IO_Event_WorkerPool_Worker {
|
26
|
+
VALUE thread;
|
27
|
+
|
28
|
+
// Flag to indicate this specific worker should exit:
|
29
|
+
bool interrupted;
|
30
|
+
|
31
|
+
// Currently executing operation:
|
32
|
+
rb_fiber_scheduler_blocking_operation_t *current_blocking_operation;
|
33
|
+
|
34
|
+
struct IO_Event_WorkerPool *pool;
|
35
|
+
struct IO_Event_WorkerPool_Worker *next;
|
36
|
+
};
|
37
|
+
|
38
|
+
// Work item structure
|
39
|
+
struct IO_Event_WorkerPool_Work {
|
40
|
+
rb_fiber_scheduler_blocking_operation_t *blocking_operation;
|
41
|
+
|
42
|
+
bool completed;
|
43
|
+
|
44
|
+
VALUE scheduler;
|
45
|
+
VALUE blocker;
|
46
|
+
VALUE fiber;
|
47
|
+
|
48
|
+
struct IO_Event_WorkerPool_Work *next;
|
49
|
+
};
|
50
|
+
|
51
|
+
// Worker pool structure
|
52
|
+
struct IO_Event_WorkerPool {
|
53
|
+
pthread_mutex_t mutex;
|
54
|
+
pthread_cond_t work_available;
|
55
|
+
|
56
|
+
struct IO_Event_WorkerPool_Work *work_queue;
|
57
|
+
struct IO_Event_WorkerPool_Work *work_queue_tail;
|
58
|
+
|
59
|
+
struct IO_Event_WorkerPool_Worker *workers;
|
60
|
+
size_t current_worker_count;
|
61
|
+
size_t maximum_worker_count;
|
62
|
+
|
63
|
+
size_t call_count;
|
64
|
+
size_t completed_count;
|
65
|
+
size_t cancelled_count;
|
66
|
+
|
67
|
+
bool shutdown;
|
68
|
+
};
|
69
|
+
|
70
|
+
// Free functions for Ruby GC
|
71
|
+
static void worker_pool_free(void *ptr) {
|
72
|
+
struct IO_Event_WorkerPool *pool = (struct IO_Event_WorkerPool *)ptr;
|
73
|
+
|
74
|
+
if (pool) {
|
75
|
+
// Signal shutdown to all workers
|
76
|
+
if (!pool->shutdown) {
|
77
|
+
pthread_mutex_lock(&pool->mutex);
|
78
|
+
pool->shutdown = true;
|
79
|
+
pthread_cond_broadcast(&pool->work_available);
|
80
|
+
pthread_mutex_unlock(&pool->mutex);
|
81
|
+
}
|
82
|
+
|
83
|
+
// Note: We don't free worker structures or wait for threads during GC
|
84
|
+
// as this can cause deadlocks. The Ruby GC will handle the thread objects.
|
85
|
+
// Workers will see the shutdown flag and exit cleanly.
|
86
|
+
}
|
87
|
+
}
|
88
|
+
|
89
|
+
// Size functions for Ruby GC
|
90
|
+
static size_t worker_pool_size(const void *ptr) {
|
91
|
+
return sizeof(struct IO_Event_WorkerPool);
|
92
|
+
}
|
93
|
+
|
94
|
+
// Ruby TypedData structures
|
95
|
+
static const rb_data_type_t IO_Event_WorkerPool_type = {
|
96
|
+
"IO::Event::WorkerPool",
|
97
|
+
{0, worker_pool_free, worker_pool_size,},
|
98
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
99
|
+
};
|
100
|
+
|
101
|
+
// Helper function to enqueue work (must be called with mutex held)
|
102
|
+
static void enqueue_work(struct IO_Event_WorkerPool *pool, struct IO_Event_WorkerPool_Work *work) {
|
103
|
+
if (pool->work_queue_tail) {
|
104
|
+
pool->work_queue_tail->next = work;
|
105
|
+
} else {
|
106
|
+
pool->work_queue = work;
|
107
|
+
}
|
108
|
+
pool->work_queue_tail = work;
|
109
|
+
}
|
110
|
+
|
111
|
+
// Helper function to dequeue work (must be called with mutex held)
|
112
|
+
static struct IO_Event_WorkerPool_Work *dequeue_work(struct IO_Event_WorkerPool *pool) {
|
113
|
+
struct IO_Event_WorkerPool_Work *work = pool->work_queue;
|
114
|
+
if (work) {
|
115
|
+
pool->work_queue = work->next;
|
116
|
+
if (!pool->work_queue) {
|
117
|
+
pool->work_queue_tail = NULL;
|
118
|
+
}
|
119
|
+
work->next = NULL; // Clear the next pointer for safety
|
120
|
+
}
|
121
|
+
return work;
|
122
|
+
}
|
123
|
+
|
124
|
+
// Unblock function to interrupt a specific worker.
|
125
|
+
static void worker_unblock_func(void *_worker) {
|
126
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
127
|
+
struct IO_Event_WorkerPool *pool = worker->pool;
|
128
|
+
|
129
|
+
// Mark this specific worker as interrupted
|
130
|
+
pthread_mutex_lock(&pool->mutex);
|
131
|
+
worker->interrupted = true;
|
132
|
+
pthread_cond_broadcast(&pool->work_available);
|
133
|
+
pthread_mutex_unlock(&pool->mutex);
|
134
|
+
|
135
|
+
// If there's a currently executing blocking operation, cancel it
|
136
|
+
if (worker->current_blocking_operation) {
|
137
|
+
rb_fiber_scheduler_blocking_operation_cancel(worker->current_blocking_operation);
|
138
|
+
}
|
139
|
+
}
|
140
|
+
|
141
|
+
// Function to wait for work and execute it without GVL.
|
142
|
+
static void *worker_wait_and_execute(void *_worker) {
|
143
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
144
|
+
struct IO_Event_WorkerPool *pool = worker->pool;
|
145
|
+
|
146
|
+
while (true) {
|
147
|
+
struct IO_Event_WorkerPool_Work *work = NULL;
|
148
|
+
|
149
|
+
pthread_mutex_lock(&pool->mutex);
|
150
|
+
|
151
|
+
// Wait for work, shutdown, or interruption
|
152
|
+
while (!pool->work_queue && !pool->shutdown && !worker->interrupted) {
|
153
|
+
pthread_cond_wait(&pool->work_available, &pool->mutex);
|
154
|
+
}
|
155
|
+
|
156
|
+
if (pool->shutdown || worker->interrupted) {
|
157
|
+
pthread_mutex_unlock(&pool->mutex);
|
158
|
+
break;
|
159
|
+
}
|
160
|
+
|
161
|
+
work = dequeue_work(pool);
|
162
|
+
|
163
|
+
pthread_mutex_unlock(&pool->mutex);
|
164
|
+
|
165
|
+
// Execute work WITHOUT GVL (this is the whole point!)
|
166
|
+
if (work) {
|
167
|
+
worker->current_blocking_operation = work->blocking_operation;
|
168
|
+
rb_fiber_scheduler_blocking_operation_execute(work->blocking_operation);
|
169
|
+
worker->current_blocking_operation = NULL;
|
170
|
+
}
|
171
|
+
|
172
|
+
return work;
|
173
|
+
}
|
174
|
+
|
175
|
+
return NULL; // Shutdown signal
|
176
|
+
}
|
177
|
+
|
178
|
+
static VALUE worker_thread_func(void *_worker) {
|
179
|
+
struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
|
180
|
+
|
181
|
+
while (true) {
|
182
|
+
// Wait for work and execute it without holding GVL
|
183
|
+
struct IO_Event_WorkerPool_Work *work = (struct IO_Event_WorkerPool_Work *)rb_thread_call_without_gvl(worker_wait_and_execute, worker, worker_unblock_func, worker);
|
184
|
+
|
185
|
+
if (!work) {
|
186
|
+
// Shutdown signal received
|
187
|
+
break;
|
188
|
+
}
|
189
|
+
|
190
|
+
// Protected by GVL:
|
191
|
+
work->completed = true;
|
192
|
+
worker->pool->completed_count++;
|
193
|
+
|
194
|
+
// Work was executed without GVL, now unblock the waiting fiber (we have GVL here)
|
195
|
+
rb_fiber_scheduler_unblock(work->scheduler, work->blocker, work->fiber);
|
196
|
+
}
|
197
|
+
|
198
|
+
return Qnil;
|
199
|
+
}
|
200
|
+
|
201
|
+
// Create a new worker thread
|
202
|
+
static int create_worker_thread(struct IO_Event_WorkerPool *pool) {
|
203
|
+
if (pool->current_worker_count >= pool->maximum_worker_count) {
|
204
|
+
return -1;
|
205
|
+
}
|
206
|
+
|
207
|
+
struct IO_Event_WorkerPool_Worker *worker = malloc(sizeof(struct IO_Event_WorkerPool_Worker));
|
208
|
+
if (!worker) {
|
209
|
+
return -1;
|
210
|
+
}
|
211
|
+
|
212
|
+
worker->pool = pool;
|
213
|
+
worker->interrupted = false;
|
214
|
+
worker->current_blocking_operation = NULL;
|
215
|
+
worker->next = pool->workers;
|
216
|
+
|
217
|
+
worker->thread = rb_thread_create(worker_thread_func, worker);
|
218
|
+
if (NIL_P(worker->thread)) {
|
219
|
+
free(worker);
|
220
|
+
return -1;
|
221
|
+
}
|
222
|
+
|
223
|
+
pool->workers = worker;
|
224
|
+
pool->current_worker_count++;
|
225
|
+
|
226
|
+
return 0;
|
227
|
+
}
|
228
|
+
|
229
|
+
// Ruby constructor for WorkerPool
|
230
|
+
static VALUE worker_pool_initialize(int argc, VALUE *argv, VALUE self) {
|
231
|
+
size_t maximum_worker_count = 1; // Default
|
232
|
+
|
233
|
+
// Extract keyword arguments
|
234
|
+
VALUE kwargs = Qnil;
|
235
|
+
VALUE rb_maximum_worker_count = Qnil;
|
236
|
+
|
237
|
+
rb_scan_args(argc, argv, "0:", &kwargs);
|
238
|
+
|
239
|
+
if (!NIL_P(kwargs)) {
|
240
|
+
VALUE kwvals[1];
|
241
|
+
ID kwkeys[1] = {id_maximum_worker_count};
|
242
|
+
rb_get_kwargs(kwargs, kwkeys, 0, 1, kwvals);
|
243
|
+
rb_maximum_worker_count = kwvals[0];
|
244
|
+
}
|
245
|
+
|
246
|
+
if (!NIL_P(rb_maximum_worker_count)) {
|
247
|
+
maximum_worker_count = NUM2SIZET(rb_maximum_worker_count);
|
248
|
+
if (maximum_worker_count == 0) {
|
249
|
+
rb_raise(rb_eArgError, "maximum_worker_count must be greater than 0!");
|
250
|
+
}
|
251
|
+
}
|
252
|
+
|
253
|
+
// Get the pool that was allocated by worker_pool_allocate
|
254
|
+
struct IO_Event_WorkerPool *pool;
|
255
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
256
|
+
|
257
|
+
if (!pool) {
|
258
|
+
rb_raise(rb_eRuntimeError, "WorkerPool allocation failed!");
|
259
|
+
}
|
260
|
+
|
261
|
+
pthread_mutex_init(&pool->mutex, NULL);
|
262
|
+
pthread_cond_init(&pool->work_available, NULL);
|
263
|
+
|
264
|
+
pool->work_queue = NULL;
|
265
|
+
pool->work_queue_tail = NULL;
|
266
|
+
pool->workers = NULL;
|
267
|
+
pool->current_worker_count = 0;
|
268
|
+
pool->maximum_worker_count = maximum_worker_count;
|
269
|
+
pool->call_count = 0;
|
270
|
+
pool->completed_count = 0;
|
271
|
+
pool->cancelled_count = 0;
|
272
|
+
pool->shutdown = false;
|
273
|
+
|
274
|
+
// Create initial workers
|
275
|
+
for (size_t i = 0; i < maximum_worker_count; i++) {
|
276
|
+
if (create_worker_thread(pool) != 0) {
|
277
|
+
// Just set the maximum_worker_count for debugging, don't fail completely
|
278
|
+
// worker_pool_free(pool);
|
279
|
+
// rb_raise(rb_eRuntimeError, "Failed to create workers");
|
280
|
+
break;
|
281
|
+
}
|
282
|
+
}
|
283
|
+
|
284
|
+
return self;
|
285
|
+
}
|
286
|
+
|
287
|
+
static VALUE worker_pool_work_begin(VALUE _work) {
|
288
|
+
struct IO_Event_WorkerPool_Work *work = (void*)_work;
|
289
|
+
|
290
|
+
if (DEBUG) fprintf(stderr, "worker_pool_work_begin:rb_fiber_scheduler_block work=%p\n", work);
|
291
|
+
rb_fiber_scheduler_block(work->scheduler, work->blocker, Qnil);
|
292
|
+
|
293
|
+
return Qnil;
|
294
|
+
}
|
295
|
+
|
296
|
+
// Ruby method to submit work and wait for completion
|
297
|
+
static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
|
298
|
+
struct IO_Event_WorkerPool *pool;
|
299
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
300
|
+
|
301
|
+
if (pool->shutdown) {
|
302
|
+
rb_raise(rb_eRuntimeError, "Worker pool is shut down!");
|
303
|
+
}
|
304
|
+
|
305
|
+
// Increment call count (protected by GVL)
|
306
|
+
pool->call_count++;
|
307
|
+
|
308
|
+
// Get current fiber and scheduler
|
309
|
+
VALUE fiber = rb_fiber_current();
|
310
|
+
VALUE scheduler = rb_fiber_scheduler_current();
|
311
|
+
if (NIL_P(scheduler)) {
|
312
|
+
rb_raise(rb_eRuntimeError, "WorkerPool requires a fiber scheduler!");
|
313
|
+
}
|
314
|
+
|
315
|
+
// Extract blocking operation handle
|
316
|
+
rb_fiber_scheduler_blocking_operation_t *blocking_operation = rb_fiber_scheduler_blocking_operation_extract(_blocking_operation);
|
317
|
+
|
318
|
+
if (!blocking_operation) {
|
319
|
+
rb_raise(rb_eArgError, "Invalid blocking operation!");
|
320
|
+
}
|
321
|
+
|
322
|
+
// Create work item
|
323
|
+
struct IO_Event_WorkerPool_Work work = {
|
324
|
+
.blocking_operation = blocking_operation,
|
325
|
+
.completed = false,
|
326
|
+
.scheduler = scheduler,
|
327
|
+
.blocker = self,
|
328
|
+
.fiber = fiber,
|
329
|
+
.next = NULL
|
330
|
+
};
|
331
|
+
|
332
|
+
// Enqueue work:
|
333
|
+
pthread_mutex_lock(&pool->mutex);
|
334
|
+
enqueue_work(pool, &work);
|
335
|
+
pthread_cond_signal(&pool->work_available);
|
336
|
+
pthread_mutex_unlock(&pool->mutex);
|
337
|
+
|
338
|
+
// Block the current fiber until work is completed:
|
339
|
+
int state;
|
340
|
+
while (true) {
|
341
|
+
rb_protect(worker_pool_work_begin, (VALUE)&work, &state);
|
342
|
+
|
343
|
+
if (work.completed) {
|
344
|
+
break;
|
345
|
+
} else {
|
346
|
+
if (DEBUG) fprintf(stderr, "worker_pool_call:rb_fiber_scheduler_blocking_operation_cancel\n");
|
347
|
+
rb_fiber_scheduler_blocking_operation_cancel(blocking_operation);
|
348
|
+
// The work was not completed, we need to wait for it to be completed.
|
349
|
+
}
|
350
|
+
}
|
351
|
+
|
352
|
+
if (state) {
|
353
|
+
rb_jump_tag(state);
|
354
|
+
} else {
|
355
|
+
return Qtrue;
|
356
|
+
}
|
357
|
+
}
|
358
|
+
|
359
|
+
static VALUE worker_pool_allocate(VALUE klass) {
|
360
|
+
struct IO_Event_WorkerPool *pool;
|
361
|
+
VALUE self = TypedData_Make_Struct(klass, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
362
|
+
|
363
|
+
// Initialize to NULL/zero so we can detect uninitialized pools
|
364
|
+
memset(pool, 0, sizeof(struct IO_Event_WorkerPool));
|
365
|
+
|
366
|
+
return self;
|
367
|
+
}
|
368
|
+
|
369
|
+
// Ruby method to close the worker pool
|
370
|
+
static VALUE worker_pool_close(VALUE self) {
|
371
|
+
struct IO_Event_WorkerPool *pool;
|
372
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
373
|
+
|
374
|
+
if (!pool) {
|
375
|
+
rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
|
376
|
+
}
|
377
|
+
|
378
|
+
if (pool->shutdown) {
|
379
|
+
return Qnil; // Already closed
|
380
|
+
}
|
381
|
+
|
382
|
+
// Signal shutdown to all workers
|
383
|
+
pthread_mutex_lock(&pool->mutex);
|
384
|
+
pool->shutdown = true;
|
385
|
+
pthread_cond_broadcast(&pool->work_available);
|
386
|
+
pthread_mutex_unlock(&pool->mutex);
|
387
|
+
|
388
|
+
// Wait for all worker threads to finish
|
389
|
+
struct IO_Event_WorkerPool_Worker *worker = pool->workers;
|
390
|
+
while (worker) {
|
391
|
+
if (!NIL_P(worker->thread)) {
|
392
|
+
rb_funcall(worker->thread, rb_intern("join"), 0);
|
393
|
+
}
|
394
|
+
worker = worker->next;
|
395
|
+
}
|
396
|
+
|
397
|
+
// Clean up worker structures
|
398
|
+
worker = pool->workers;
|
399
|
+
while (worker) {
|
400
|
+
struct IO_Event_WorkerPool_Worker *next = worker->next;
|
401
|
+
free(worker);
|
402
|
+
worker = next;
|
403
|
+
}
|
404
|
+
pool->workers = NULL;
|
405
|
+
pool->current_worker_count = 0;
|
406
|
+
|
407
|
+
// Clean up mutex and condition variable
|
408
|
+
pthread_mutex_destroy(&pool->mutex);
|
409
|
+
pthread_cond_destroy(&pool->work_available);
|
410
|
+
|
411
|
+
return Qnil;
|
412
|
+
}
|
413
|
+
|
414
|
+
// Test helper: get pool statistics for debugging/testing
|
415
|
+
static VALUE worker_pool_statistics(VALUE self) {
|
416
|
+
struct IO_Event_WorkerPool *pool;
|
417
|
+
TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
|
418
|
+
|
419
|
+
if (!pool) {
|
420
|
+
rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
|
421
|
+
}
|
422
|
+
|
423
|
+
VALUE stats = rb_hash_new();
|
424
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_worker_count")), SIZET2NUM(pool->current_worker_count));
|
425
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("maximum_worker_count")), SIZET2NUM(pool->maximum_worker_count));
|
426
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("call_count")), SIZET2NUM(pool->call_count));
|
427
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("completed_count")), SIZET2NUM(pool->completed_count));
|
428
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("cancelled_count")), SIZET2NUM(pool->cancelled_count));
|
429
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("shutdown")), pool->shutdown ? Qtrue : Qfalse);
|
430
|
+
|
431
|
+
// Count work items in queue (only if properly initialized)
|
432
|
+
if (pool->maximum_worker_count > 0) {
|
433
|
+
pthread_mutex_lock(&pool->mutex);
|
434
|
+
size_t current_queue_size = 0;
|
435
|
+
struct IO_Event_WorkerPool_Work *work = pool->work_queue;
|
436
|
+
while (work) {
|
437
|
+
current_queue_size++;
|
438
|
+
work = work->next;
|
439
|
+
}
|
440
|
+
pthread_mutex_unlock(&pool->mutex);
|
441
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(current_queue_size));
|
442
|
+
} else {
|
443
|
+
rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(0));
|
444
|
+
}
|
445
|
+
|
446
|
+
return stats;
|
447
|
+
}
|
448
|
+
|
449
|
+
void Init_IO_Event_WorkerPool(VALUE IO_Event) {
|
450
|
+
// Initialize symbols
|
451
|
+
id_maximum_worker_count = rb_intern("maximum_worker_count");
|
452
|
+
|
453
|
+
IO_Event_WorkerPool = rb_define_class_under(IO_Event, "WorkerPool", rb_cObject);
|
454
|
+
rb_define_alloc_func(IO_Event_WorkerPool, worker_pool_allocate);
|
455
|
+
|
456
|
+
rb_define_method(IO_Event_WorkerPool, "initialize", worker_pool_initialize, -1);
|
457
|
+
rb_define_method(IO_Event_WorkerPool, "call", worker_pool_call, 1);
|
458
|
+
rb_define_method(IO_Event_WorkerPool, "close", worker_pool_close, 0);
|
459
|
+
|
460
|
+
rb_define_method(IO_Event_WorkerPool, "statistics", worker_pool_statistics, 0);
|
461
|
+
|
462
|
+
// Initialize test functions
|
463
|
+
Init_IO_Event_WorkerPool_Test(IO_Event_WorkerPool);
|
464
|
+
}
|