io-event 1.9.0 → 1.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,477 @@
1
+ // Released under the MIT License.
2
+ // Copyright, 2025, by Samuel Williams.
3
+
4
+ #include "worker_pool.h"
5
+ #include "worker_pool_test.h"
6
+ #include "fiber.h"
7
+
8
+ #include <ruby/thread.h>
9
+ #include <ruby/fiber/scheduler.h>
10
+
11
+ #include <pthread.h>
12
+ #include <stdbool.h>
13
+ #include <stdlib.h>
14
+ #include <errno.h>
15
+ #include <string.h>
16
+
17
+ enum {
18
+ DEBUG = 0,
19
+ };
20
+
21
+ static VALUE IO_Event_WorkerPool;
22
+ static ID id_maximum_worker_count;
23
+
24
+ // Thread pool structure
25
+ struct IO_Event_WorkerPool_Worker {
26
+ VALUE thread;
27
+
28
+ // Flag to indicate this specific worker should exit:
29
+ bool interrupted;
30
+
31
+ // Currently executing operation:
32
+ rb_fiber_scheduler_blocking_operation_t *current_blocking_operation;
33
+
34
+ struct IO_Event_WorkerPool *pool;
35
+ struct IO_Event_WorkerPool_Worker *next;
36
+ };
37
+
38
+ // Work item structure
39
+ struct IO_Event_WorkerPool_Work {
40
+ rb_fiber_scheduler_blocking_operation_t *blocking_operation;
41
+
42
+ bool completed;
43
+
44
+ VALUE scheduler;
45
+ VALUE blocker;
46
+ VALUE fiber;
47
+
48
+ struct IO_Event_WorkerPool_Work *next;
49
+ };
50
+
51
+ // Worker pool structure
52
+ struct IO_Event_WorkerPool {
53
+ pthread_mutex_t mutex;
54
+ pthread_cond_t work_available;
55
+
56
+ struct IO_Event_WorkerPool_Work *work_queue;
57
+ struct IO_Event_WorkerPool_Work *work_queue_tail;
58
+
59
+ struct IO_Event_WorkerPool_Worker *workers;
60
+ size_t current_worker_count;
61
+ size_t maximum_worker_count;
62
+
63
+ size_t call_count;
64
+ size_t completed_count;
65
+ size_t cancelled_count;
66
+
67
+ bool shutdown;
68
+ };
69
+
70
+ // Free functions for Ruby GC
71
+ static void worker_pool_free(void *ptr) {
72
+ struct IO_Event_WorkerPool *pool = (struct IO_Event_WorkerPool *)ptr;
73
+
74
+ if (pool) {
75
+ // Signal shutdown to all workers
76
+ if (!pool->shutdown) {
77
+ pthread_mutex_lock(&pool->mutex);
78
+ pool->shutdown = true;
79
+ pthread_cond_broadcast(&pool->work_available);
80
+ pthread_mutex_unlock(&pool->mutex);
81
+ }
82
+
83
+ // Note: We don't free worker structures or wait for threads during GC
84
+ // as this can cause deadlocks. The Ruby GC will handle the thread objects.
85
+ // Workers will see the shutdown flag and exit cleanly.
86
+ }
87
+ }
88
+
89
+ // Size functions for Ruby GC
90
+ static size_t worker_pool_size(const void *ptr) {
91
+ return sizeof(struct IO_Event_WorkerPool);
92
+ }
93
+
94
+ // Ruby TypedData structures
95
+ static const rb_data_type_t IO_Event_WorkerPool_type = {
96
+ "IO::Event::WorkerPool",
97
+ {0, worker_pool_free, worker_pool_size,},
98
+ 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
99
+ };
100
+
101
+ // Helper function to enqueue work (must be called with mutex held)
102
+ static void enqueue_work(struct IO_Event_WorkerPool *pool, struct IO_Event_WorkerPool_Work *work) {
103
+ if (pool->work_queue_tail) {
104
+ pool->work_queue_tail->next = work;
105
+ } else {
106
+ pool->work_queue = work;
107
+ }
108
+ pool->work_queue_tail = work;
109
+ }
110
+
111
+ // Helper function to dequeue work (must be called with mutex held)
112
+ static struct IO_Event_WorkerPool_Work *dequeue_work(struct IO_Event_WorkerPool *pool) {
113
+ struct IO_Event_WorkerPool_Work *work = pool->work_queue;
114
+ if (work) {
115
+ pool->work_queue = work->next;
116
+ if (!pool->work_queue) {
117
+ pool->work_queue_tail = NULL;
118
+ }
119
+ work->next = NULL; // Clear the next pointer for safety
120
+ }
121
+ return work;
122
+ }
123
+
124
+ // Unblock function to interrupt a specific worker.
125
+ static void worker_unblock_func(void *_worker) {
126
+ struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
127
+ struct IO_Event_WorkerPool *pool = worker->pool;
128
+
129
+ // Mark this specific worker as interrupted
130
+ pthread_mutex_lock(&pool->mutex);
131
+ worker->interrupted = true;
132
+ pthread_cond_broadcast(&pool->work_available);
133
+ pthread_mutex_unlock(&pool->mutex);
134
+
135
+ // If there's a currently executing blocking operation, cancel it
136
+ if (worker->current_blocking_operation) {
137
+ rb_fiber_scheduler_blocking_operation_cancel(worker->current_blocking_operation);
138
+ }
139
+ }
140
+
141
+ // Function to wait for work and execute it without GVL.
142
+ static void *worker_wait_and_execute(void *_worker) {
143
+ struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
144
+ struct IO_Event_WorkerPool *pool = worker->pool;
145
+
146
+ while (true) {
147
+ struct IO_Event_WorkerPool_Work *work = NULL;
148
+
149
+ pthread_mutex_lock(&pool->mutex);
150
+
151
+ // Wait for work, shutdown, or interruption
152
+ while (!pool->work_queue && !pool->shutdown && !worker->interrupted) {
153
+ pthread_cond_wait(&pool->work_available, &pool->mutex);
154
+ }
155
+
156
+ if (pool->shutdown || worker->interrupted) {
157
+ pthread_mutex_unlock(&pool->mutex);
158
+ break;
159
+ }
160
+
161
+ work = dequeue_work(pool);
162
+
163
+ pthread_mutex_unlock(&pool->mutex);
164
+
165
+ // Execute work WITHOUT GVL (this is the whole point!)
166
+ if (work) {
167
+ worker->current_blocking_operation = work->blocking_operation;
168
+ rb_fiber_scheduler_blocking_operation_execute(work->blocking_operation);
169
+ worker->current_blocking_operation = NULL;
170
+ }
171
+
172
+ return work;
173
+ }
174
+
175
+ return NULL; // Shutdown signal
176
+ }
177
+
178
+ static VALUE worker_thread_func(void *_worker) {
179
+ struct IO_Event_WorkerPool_Worker *worker = (struct IO_Event_WorkerPool_Worker *)_worker;
180
+
181
+ while (true) {
182
+ // Wait for work and execute it without holding GVL
183
+ struct IO_Event_WorkerPool_Work *work = (struct IO_Event_WorkerPool_Work *)rb_thread_call_without_gvl(worker_wait_and_execute, worker, worker_unblock_func, worker);
184
+
185
+ if (!work) {
186
+ // Shutdown signal received
187
+ break;
188
+ }
189
+
190
+ // Protected by GVL:
191
+ work->completed = true;
192
+ worker->pool->completed_count++;
193
+
194
+ // Work was executed without GVL, now unblock the waiting fiber (we have GVL here)
195
+ rb_fiber_scheduler_unblock(work->scheduler, work->blocker, work->fiber);
196
+ }
197
+
198
+ return Qnil;
199
+ }
200
+
201
+ // Create a new worker thread
202
+ static int create_worker_thread(struct IO_Event_WorkerPool *pool) {
203
+ if (pool->current_worker_count >= pool->maximum_worker_count) {
204
+ return -1;
205
+ }
206
+
207
+ struct IO_Event_WorkerPool_Worker *worker = malloc(sizeof(struct IO_Event_WorkerPool_Worker));
208
+ if (!worker) {
209
+ return -1;
210
+ }
211
+
212
+ worker->pool = pool;
213
+ worker->interrupted = false;
214
+ worker->current_blocking_operation = NULL;
215
+ worker->next = pool->workers;
216
+
217
+ worker->thread = rb_thread_create(worker_thread_func, worker);
218
+ if (NIL_P(worker->thread)) {
219
+ free(worker);
220
+ return -1;
221
+ }
222
+
223
+ pool->workers = worker;
224
+ pool->current_worker_count++;
225
+
226
+ return 0;
227
+ }
228
+
229
+ // Ruby constructor for WorkerPool
230
+ static VALUE worker_pool_initialize(int argc, VALUE *argv, VALUE self) {
231
+ size_t maximum_worker_count = 1; // Default
232
+
233
+ // Extract keyword arguments
234
+ VALUE kwargs = Qnil;
235
+ VALUE rb_maximum_worker_count = Qnil;
236
+
237
+ rb_scan_args(argc, argv, "0:", &kwargs);
238
+
239
+ if (!NIL_P(kwargs)) {
240
+ VALUE kwvals[1];
241
+ ID kwkeys[1] = {id_maximum_worker_count};
242
+ rb_get_kwargs(kwargs, kwkeys, 0, 1, kwvals);
243
+ rb_maximum_worker_count = kwvals[0];
244
+ }
245
+
246
+ if (!NIL_P(rb_maximum_worker_count)) {
247
+ maximum_worker_count = NUM2SIZET(rb_maximum_worker_count);
248
+ if (maximum_worker_count == 0) {
249
+ rb_raise(rb_eArgError, "maximum_worker_count must be greater than 0!");
250
+ }
251
+ }
252
+
253
+ // Get the pool that was allocated by worker_pool_allocate
254
+ struct IO_Event_WorkerPool *pool;
255
+ TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
256
+
257
+ if (!pool) {
258
+ rb_raise(rb_eRuntimeError, "WorkerPool allocation failed!");
259
+ }
260
+
261
+ pthread_mutex_init(&pool->mutex, NULL);
262
+ pthread_cond_init(&pool->work_available, NULL);
263
+
264
+ pool->work_queue = NULL;
265
+ pool->work_queue_tail = NULL;
266
+ pool->workers = NULL;
267
+ pool->current_worker_count = 0;
268
+ pool->maximum_worker_count = maximum_worker_count;
269
+ pool->call_count = 0;
270
+ pool->completed_count = 0;
271
+ pool->cancelled_count = 0;
272
+ pool->shutdown = false;
273
+
274
+ // Create initial workers
275
+ for (size_t i = 0; i < maximum_worker_count; i++) {
276
+ if (create_worker_thread(pool) != 0) {
277
+ // Just set the maximum_worker_count for debugging, don't fail completely
278
+ // worker_pool_free(pool);
279
+ // rb_raise(rb_eRuntimeError, "Failed to create workers");
280
+ break;
281
+ }
282
+ }
283
+
284
+ return self;
285
+ }
286
+
287
+ static VALUE worker_pool_work_begin(VALUE _work) {
288
+ struct IO_Event_WorkerPool_Work *work = (void*)_work;
289
+
290
+ if (DEBUG) fprintf(stderr, "worker_pool_work_begin:rb_fiber_scheduler_block work=%p\n", work);
291
+ rb_fiber_scheduler_block(work->scheduler, work->blocker, Qnil);
292
+
293
+ return Qnil;
294
+ }
295
+
296
+ // Ruby method to submit work and wait for completion
297
+ static VALUE worker_pool_call(VALUE self, VALUE _blocking_operation) {
298
+ struct IO_Event_WorkerPool *pool;
299
+ TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
300
+
301
+ if (pool->shutdown) {
302
+ rb_raise(rb_eRuntimeError, "Worker pool is shut down!");
303
+ }
304
+
305
+ // Increment call count (protected by GVL)
306
+ pool->call_count++;
307
+
308
+ // Get current fiber and scheduler
309
+ VALUE fiber = rb_fiber_current();
310
+ VALUE scheduler = rb_fiber_scheduler_current();
311
+ if (NIL_P(scheduler)) {
312
+ rb_raise(rb_eRuntimeError, "WorkerPool requires a fiber scheduler!");
313
+ }
314
+
315
+ // Extract blocking operation handle
316
+ rb_fiber_scheduler_blocking_operation_t *blocking_operation = rb_fiber_scheduler_blocking_operation_extract(_blocking_operation);
317
+
318
+ if (!blocking_operation) {
319
+ rb_raise(rb_eArgError, "Invalid blocking operation!");
320
+ }
321
+
322
+ // Create work item
323
+ struct IO_Event_WorkerPool_Work work = {
324
+ .blocking_operation = blocking_operation,
325
+ .completed = false,
326
+ .scheduler = scheduler,
327
+ .blocker = self,
328
+ .fiber = fiber,
329
+ .next = NULL
330
+ };
331
+
332
+ // Enqueue work:
333
+ pthread_mutex_lock(&pool->mutex);
334
+ enqueue_work(pool, &work);
335
+ pthread_cond_signal(&pool->work_available);
336
+ pthread_mutex_unlock(&pool->mutex);
337
+
338
+ // Block the current fiber until work is completed:
339
+ int state = 0;
340
+ while (true) {
341
+ int current_state = 0;
342
+ rb_protect(worker_pool_work_begin, (VALUE)&work, &current_state);
343
+ if (DEBUG) fprintf(stderr, "-- worker_pool_call:work completed=%d, current_state=%d, state=%d\n", work.completed, current_state, state);
344
+
345
+ // Store the first exception state:
346
+ if (!state) {
347
+ state = current_state;
348
+ }
349
+
350
+ // If the work is still in the queue, we must wait for a worker to complete it (even if cancelled):
351
+ if (work.completed) {
352
+ // The work was completed, we can exit the loop:
353
+ break;
354
+ } else {
355
+ if (DEBUG) fprintf(stderr, "worker_pool_call:rb_fiber_scheduler_blocking_operation_cancel\n");
356
+ // Ensure the blocking operation is cancelled:
357
+ rb_fiber_scheduler_blocking_operation_cancel(blocking_operation);
358
+
359
+ // The work was not completed, we need to wait for it to be completed, so we go around the loop again.
360
+ }
361
+ }
362
+
363
+ if (DEBUG) fprintf(stderr, "<- worker_pool_call:work completed=%d, state=%d\n", work.completed, state);
364
+
365
+ if (state) {
366
+ rb_jump_tag(state);
367
+ } else {
368
+ return Qtrue;
369
+ }
370
+ }
371
+
372
+ static VALUE worker_pool_allocate(VALUE klass) {
373
+ struct IO_Event_WorkerPool *pool;
374
+ VALUE self = TypedData_Make_Struct(klass, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
375
+
376
+ // Initialize to NULL/zero so we can detect uninitialized pools
377
+ memset(pool, 0, sizeof(struct IO_Event_WorkerPool));
378
+
379
+ return self;
380
+ }
381
+
382
+ // Ruby method to close the worker pool
383
+ static VALUE worker_pool_close(VALUE self) {
384
+ struct IO_Event_WorkerPool *pool;
385
+ TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
386
+
387
+ if (!pool) {
388
+ rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
389
+ }
390
+
391
+ if (pool->shutdown) {
392
+ return Qnil; // Already closed
393
+ }
394
+
395
+ // Signal shutdown to all workers
396
+ pthread_mutex_lock(&pool->mutex);
397
+ pool->shutdown = true;
398
+ pthread_cond_broadcast(&pool->work_available);
399
+ pthread_mutex_unlock(&pool->mutex);
400
+
401
+ // Wait for all worker threads to finish
402
+ struct IO_Event_WorkerPool_Worker *worker = pool->workers;
403
+ while (worker) {
404
+ if (!NIL_P(worker->thread)) {
405
+ rb_funcall(worker->thread, rb_intern("join"), 0);
406
+ }
407
+ worker = worker->next;
408
+ }
409
+
410
+ // Clean up worker structures
411
+ worker = pool->workers;
412
+ while (worker) {
413
+ struct IO_Event_WorkerPool_Worker *next = worker->next;
414
+ free(worker);
415
+ worker = next;
416
+ }
417
+ pool->workers = NULL;
418
+ pool->current_worker_count = 0;
419
+
420
+ // Clean up mutex and condition variable
421
+ pthread_mutex_destroy(&pool->mutex);
422
+ pthread_cond_destroy(&pool->work_available);
423
+
424
+ return Qnil;
425
+ }
426
+
427
+ // Test helper: get pool statistics for debugging/testing
428
+ static VALUE worker_pool_statistics(VALUE self) {
429
+ struct IO_Event_WorkerPool *pool;
430
+ TypedData_Get_Struct(self, struct IO_Event_WorkerPool, &IO_Event_WorkerPool_type, pool);
431
+
432
+ if (!pool) {
433
+ rb_raise(rb_eRuntimeError, "WorkerPool not initialized!");
434
+ }
435
+
436
+ VALUE stats = rb_hash_new();
437
+ rb_hash_aset(stats, ID2SYM(rb_intern("current_worker_count")), SIZET2NUM(pool->current_worker_count));
438
+ rb_hash_aset(stats, ID2SYM(rb_intern("maximum_worker_count")), SIZET2NUM(pool->maximum_worker_count));
439
+ rb_hash_aset(stats, ID2SYM(rb_intern("call_count")), SIZET2NUM(pool->call_count));
440
+ rb_hash_aset(stats, ID2SYM(rb_intern("completed_count")), SIZET2NUM(pool->completed_count));
441
+ rb_hash_aset(stats, ID2SYM(rb_intern("cancelled_count")), SIZET2NUM(pool->cancelled_count));
442
+ rb_hash_aset(stats, ID2SYM(rb_intern("shutdown")), pool->shutdown ? Qtrue : Qfalse);
443
+
444
+ // Count work items in queue (only if properly initialized)
445
+ if (pool->maximum_worker_count > 0) {
446
+ pthread_mutex_lock(&pool->mutex);
447
+ size_t current_queue_size = 0;
448
+ struct IO_Event_WorkerPool_Work *work = pool->work_queue;
449
+ while (work) {
450
+ current_queue_size++;
451
+ work = work->next;
452
+ }
453
+ pthread_mutex_unlock(&pool->mutex);
454
+ rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(current_queue_size));
455
+ } else {
456
+ rb_hash_aset(stats, ID2SYM(rb_intern("current_queue_size")), SIZET2NUM(0));
457
+ }
458
+
459
+ return stats;
460
+ }
461
+
462
+ void Init_IO_Event_WorkerPool(VALUE IO_Event) {
463
+ // Initialize symbols
464
+ id_maximum_worker_count = rb_intern("maximum_worker_count");
465
+
466
+ IO_Event_WorkerPool = rb_define_class_under(IO_Event, "WorkerPool", rb_cObject);
467
+ rb_define_alloc_func(IO_Event_WorkerPool, worker_pool_allocate);
468
+
469
+ rb_define_method(IO_Event_WorkerPool, "initialize", worker_pool_initialize, -1);
470
+ rb_define_method(IO_Event_WorkerPool, "call", worker_pool_call, 1);
471
+ rb_define_method(IO_Event_WorkerPool, "close", worker_pool_close, 0);
472
+
473
+ rb_define_method(IO_Event_WorkerPool, "statistics", worker_pool_statistics, 0);
474
+
475
+ // Initialize test functions
476
+ Init_IO_Event_WorkerPool_Test(IO_Event_WorkerPool);
477
+ }
@@ -5,4 +5,4 @@
5
5
 
6
6
  #include <ruby.h>
7
7
 
8
- void Init_IO_Event_Profiler(VALUE IO_Event);
8
+ void Init_IO_Event_WorkerPool(VALUE IO_Event);
@@ -0,0 +1,199 @@
1
+ // worker_pool_test.c - Test functions for WorkerPool cancellation
2
+ // Released under the MIT License.
3
+ // Copyright, 2025, by Samuel Williams.
4
+
5
+ #include "worker_pool_test.h"
6
+
7
+ #include <ruby/thread.h>
8
+ #include <stdlib.h>
9
+ #include <string.h>
10
+
11
+ #include <unistd.h>
12
+ #include <errno.h>
13
+ #include <time.h>
14
+
15
+ static ID id_duration;
16
+
17
+ struct BusyOperationData {
18
+ int read_fd;
19
+ int write_fd;
20
+ volatile int cancelled;
21
+ double duration; // How long to wait (for testing)
22
+ clock_t start_time;
23
+ clock_t end_time;
24
+ int operation_result;
25
+ VALUE exception;
26
+ };
27
+
28
+ // The actual blocking operation that can be cancelled
29
+ static void* busy_blocking_operation(void *data) {
30
+ struct BusyOperationData *busy_data = (struct BusyOperationData*)data;
31
+
32
+ // Use select() to wait for the pipe to become readable
33
+ fd_set read_fds;
34
+ struct timeval timeout;
35
+
36
+ FD_ZERO(&read_fds);
37
+ FD_SET(busy_data->read_fd, &read_fds);
38
+
39
+ // Set timeout based on duration
40
+ timeout.tv_sec = (long)busy_data->duration;
41
+ timeout.tv_usec = ((busy_data->duration - timeout.tv_sec) * 1000000);
42
+
43
+ // This will block until:
44
+ // 1. The pipe becomes readable (cancellation)
45
+ // 2. The timeout expires
46
+ // 3. An error occurs
47
+ int result = select(busy_data->read_fd + 1, &read_fds, NULL, NULL, &timeout);
48
+
49
+ if (result > 0 && FD_ISSET(busy_data->read_fd, &read_fds)) {
50
+ // Pipe became readable - we were cancelled
51
+ char buffer;
52
+ read(busy_data->read_fd, &buffer, 1); // Consume the byte
53
+ busy_data->cancelled = 1;
54
+ return (void*)-1; // Indicate cancellation
55
+ } else if (result == 0) {
56
+ // Timeout - operation completed normally
57
+ return (void*)0; // Indicate success
58
+ } else {
59
+ // Error occurred
60
+ return (void*)-2; // Indicate error
61
+ }
62
+ }
63
+
64
+ // Unblock function that writes to the pipe to cancel the operation
65
+ static void busy_unblock_function(void *data) {
66
+ struct BusyOperationData *busy_data = (struct BusyOperationData*)data;
67
+
68
+ busy_data->cancelled = 1;
69
+
70
+ // Write a byte to the pipe to wake up the select()
71
+ char wake_byte = 1;
72
+ write(busy_data->write_fd, &wake_byte, 1);
73
+ }
74
+
75
+ // Function for the main operation execution (for rb_rescue)
76
+ static VALUE busy_operation_execute(VALUE data_value) {
77
+ struct BusyOperationData *busy_data = (struct BusyOperationData*)data_value;
78
+
79
+ // Record start time
80
+ busy_data->start_time = clock();
81
+
82
+ // Execute the blocking operation
83
+ void *block_result = rb_nogvl(
84
+ busy_blocking_operation,
85
+ busy_data,
86
+ busy_unblock_function,
87
+ busy_data,
88
+ RB_NOGVL_UBF_ASYNC_SAFE | RB_NOGVL_OFFLOAD_SAFE
89
+ );
90
+
91
+ // Record end time
92
+ busy_data->end_time = clock();
93
+
94
+ // Store the operation result
95
+ busy_data->operation_result = (int)(intptr_t)block_result;
96
+
97
+ return Qnil;
98
+ }
99
+
100
+ // Function for exception handling (for rb_rescue)
101
+ static VALUE busy_operation_rescue(VALUE data_value, VALUE exception) {
102
+ struct BusyOperationData *busy_data = (struct BusyOperationData*)data_value;
103
+
104
+ // Record end time even in case of exception
105
+ busy_data->end_time = clock();
106
+
107
+ // Mark that an exception was caught
108
+ busy_data->exception = exception;
109
+
110
+ return exception;
111
+ }
112
+
113
+ // Ruby method: IO::Event::WorkerPool.busy(duration: 1.0)
114
+ // This creates a cancellable blocking operation for testing
115
+ static VALUE worker_pool_test_busy(int argc, VALUE *argv, VALUE self) {
116
+ double duration = 1.0; // Default 1 second
117
+
118
+ // Extract keyword arguments
119
+ VALUE kwargs = Qnil;
120
+ VALUE rb_duration = Qnil;
121
+
122
+ rb_scan_args(argc, argv, "0:", &kwargs);
123
+
124
+ if (!NIL_P(kwargs)) {
125
+ VALUE kwvals[1];
126
+ ID kwkeys[1] = {id_duration};
127
+ rb_get_kwargs(kwargs, kwkeys, 0, 1, kwvals);
128
+ rb_duration = kwvals[0];
129
+ }
130
+
131
+ if (!NIL_P(rb_duration)) {
132
+ duration = NUM2DBL(rb_duration);
133
+ }
134
+
135
+ // Create pipe for cancellation
136
+ int pipe_fds[2];
137
+ if (pipe(pipe_fds) != 0) {
138
+ rb_sys_fail("pipe creation failed");
139
+ }
140
+
141
+ // Stack allocate operation data
142
+ struct BusyOperationData busy_data = {
143
+ .read_fd = pipe_fds[0],
144
+ .write_fd = pipe_fds[1],
145
+ .cancelled = 0,
146
+ .duration = duration,
147
+ .start_time = 0,
148
+ .end_time = 0,
149
+ .operation_result = 0,
150
+ .exception = Qnil
151
+ };
152
+
153
+ // Execute the blocking operation with exception handling using function pointers
154
+ rb_rescue(
155
+ busy_operation_execute,
156
+ (VALUE)&busy_data,
157
+ busy_operation_rescue,
158
+ (VALUE)&busy_data
159
+ );
160
+
161
+ // Calculate elapsed time from the state stored in busy_data
162
+ double elapsed = ((double)(busy_data.end_time - busy_data.start_time)) / CLOCKS_PER_SEC;
163
+
164
+ // Create result hash using the state from busy_data
165
+ VALUE result = rb_hash_new();
166
+ rb_hash_aset(result, ID2SYM(rb_intern("duration")), DBL2NUM(duration));
167
+ rb_hash_aset(result, ID2SYM(rb_intern("elapsed")), DBL2NUM(elapsed));
168
+
169
+ // Determine result based on operation outcome
170
+ if (busy_data.exception != Qnil) {
171
+ rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("exception")));
172
+ rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qtrue);
173
+ rb_hash_aset(result, ID2SYM(rb_intern("exception")), busy_data.exception);
174
+ } else if (busy_data.operation_result == -1) {
175
+ rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("cancelled")));
176
+ rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qtrue);
177
+ } else if (busy_data.operation_result == 0) {
178
+ rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("completed")));
179
+ rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qfalse);
180
+ } else {
181
+ rb_hash_aset(result, ID2SYM(rb_intern("result")), ID2SYM(rb_intern("error")));
182
+ rb_hash_aset(result, ID2SYM(rb_intern("cancelled")), Qfalse);
183
+ }
184
+
185
+ // Clean up pipe file descriptors
186
+ close(pipe_fds[0]);
187
+ close(pipe_fds[1]);
188
+
189
+ return result;
190
+ }
191
+
192
+ // Initialize the test functions
193
+ void Init_IO_Event_WorkerPool_Test(VALUE IO_Event_WorkerPool) {
194
+ // Initialize symbols
195
+ id_duration = rb_intern("duration");
196
+
197
+ // Add test methods to IO::Event::WorkerPool class
198
+ rb_define_singleton_method(IO_Event_WorkerPool, "busy", worker_pool_test_busy, -1);
199
+ }
@@ -0,0 +1,9 @@
1
+ // worker_pool_test.h - Header for WorkerPool test functions
2
+ // Released under the MIT License.
3
+ // Copyright, 2025, by Samuel Williams.
4
+
5
+ #pragma once
6
+
7
+ #include <ruby.h>
8
+
9
+ void Init_IO_Event_WorkerPool_Test(VALUE IO_Event_WorkerPool);
@@ -82,7 +82,7 @@ class IO
82
82
  # Validate the heap invariant. Every element except the root must not be smaller than its parent element. Note that it MAY be equal.
83
83
  def valid?
84
84
  # Notice we skip index 0 on purpose, because it has no parent
85
- (1..(@contents.size - 1)).all? { |e| @contents[e] >= @contents[(e - 1) / 2] }
85
+ (1..(@contents.size - 1)).all? {|index| @contents[index] >= @contents[(index - 1) / 2]}
86
86
  end
87
87
 
88
88
  private