polyphony 0.64 → 0.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +1 -1
  3. data/CHANGELOG.md +22 -0
  4. data/Gemfile.lock +1 -1
  5. data/TODO.md +10 -40
  6. data/bin/pdbg +112 -0
  7. data/examples/core/await.rb +9 -1
  8. data/ext/polyphony/backend_common.c +14 -1
  9. data/ext/polyphony/backend_common.h +3 -1
  10. data/ext/polyphony/backend_io_uring.c +85 -25
  11. data/ext/polyphony/backend_io_uring_context.c +42 -0
  12. data/ext/polyphony/backend_io_uring_context.h +6 -9
  13. data/ext/polyphony/backend_libev.c +85 -39
  14. data/ext/polyphony/fiber.c +20 -0
  15. data/ext/polyphony/polyphony.c +2 -0
  16. data/ext/polyphony/polyphony.h +5 -2
  17. data/ext/polyphony/queue.c +1 -1
  18. data/ext/polyphony/runqueue.c +7 -3
  19. data/ext/polyphony/runqueue.h +4 -3
  20. data/ext/polyphony/runqueue_ring_buffer.c +25 -14
  21. data/ext/polyphony/runqueue_ring_buffer.h +2 -0
  22. data/ext/polyphony/thread.c +2 -8
  23. data/lib/polyphony.rb +6 -0
  24. data/lib/polyphony/debugger.rb +225 -0
  25. data/lib/polyphony/extensions/debug.rb +1 -1
  26. data/lib/polyphony/extensions/fiber.rb +64 -71
  27. data/lib/polyphony/extensions/io.rb +4 -2
  28. data/lib/polyphony/extensions/openssl.rb +66 -0
  29. data/lib/polyphony/extensions/socket.rb +8 -2
  30. data/lib/polyphony/net.rb +1 -0
  31. data/lib/polyphony/version.rb +1 -1
  32. data/test/helper.rb +6 -5
  33. data/test/stress.rb +6 -2
  34. data/test/test_backend.rb +13 -4
  35. data/test/test_fiber.rb +35 -11
  36. data/test/test_global_api.rb +9 -4
  37. data/test/test_io.rb +2 -0
  38. data/test/test_socket.rb +14 -11
  39. data/test/test_supervise.rb +24 -24
  40. data/test/test_thread.rb +3 -0
  41. data/test/test_thread_pool.rb +1 -1
  42. data/test/test_throttler.rb +2 -2
  43. data/test/test_timer.rb +5 -3
  44. metadata +5 -3
@@ -50,6 +50,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
50
50
  ctx->resume_value = Qnil;
51
51
  ctx->ref_count = 2;
52
52
  ctx->result = 0;
53
+ ctx->buffer_count = 0;
53
54
 
54
55
  store->taken_count++;
55
56
 
@@ -67,6 +68,8 @@ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
67
68
  ctx->ref_count--;
68
69
  if (ctx->ref_count) return 0;
69
70
 
71
+ if (ctx->buffer_count > 1) free(ctx->buffers);
72
+
70
73
  store->taken_count--;
71
74
  store->available_count++;
72
75
 
@@ -93,3 +96,42 @@ void context_store_free(op_context_store_t *store) {
93
96
  store->taken = next;
94
97
  }
95
98
  }
99
+
100
+ inline void context_store_mark_taken_buffers(op_context_store_t *store) {
101
+ op_context_t *ctx = store->taken;
102
+ while (ctx) {
103
+ for (unsigned int i = 0; i < ctx->buffer_count; i++)
104
+ rb_gc_mark(i == 0 ? ctx->buffer0 : ctx->buffers[i - 1]);
105
+ ctx = ctx->next;
106
+ }
107
+ }
108
+
109
+ inline void context_attach_buffers(op_context_t *ctx, unsigned int count, VALUE *buffers) {
110
+ // attaching buffers to the context is done in order to ensure that any GC
111
+ // pass done before the context is released will mark those buffers, even if
112
+ // the fiber has already been resumed and the buffers are not in use anymore.
113
+ // This is done in order to prevent a possible race condition where on the
114
+ // kernel side the buffers are still in use, but in userspace they have
115
+ // effectively been freed after a GC pass.
116
+ ctx->buffer_count = count;
117
+ if (count > 1)
118
+ ctx->buffers = malloc(sizeof(VALUE) * (count - 1));
119
+ for (unsigned int i = 0; i < count; i++)
120
+ if (!i) ctx->buffer0 = buffers[0];
121
+ else ctx->buffers[i - 1] = buffers[i];
122
+ }
123
+
124
+ inline void context_attach_buffers_v(op_context_t *ctx, unsigned int count, ...) {
125
+ va_list values;
126
+
127
+ va_start(values, count);
128
+
129
+ ctx->buffer_count = count;
130
+ if (count > 1)
131
+ ctx->buffers = malloc(sizeof(VALUE) * (count - 1));
132
+ for (unsigned int i = 0; i < count; i++)
133
+ if (!i) ctx->buffer0 = va_arg(values, VALUE);
134
+ else ctx->buffers[i - 1] = va_arg(values, VALUE);
135
+
136
+ va_end(values);
137
+ }
@@ -27,6 +27,9 @@ typedef struct op_context {
27
27
  int result;
28
28
  VALUE fiber;
29
29
  VALUE resume_value;
30
+ unsigned int buffer_count;
31
+ VALUE buffer0;
32
+ VALUE *buffers;
30
33
  } op_context_t;
31
34
 
32
35
  typedef struct op_context_store {
@@ -43,14 +46,8 @@ void context_store_initialize(op_context_store_t *store);
43
46
  op_context_t *context_store_acquire(op_context_store_t *store, enum op_type type);
44
47
  int context_store_release(op_context_store_t *store, op_context_t *ctx);
45
48
  void context_store_free(op_context_store_t *store);
46
-
47
- inline unsigned int OP_CONTEXT_RELEASE(op_context_store_t *store, op_context_t *ctx) {
48
- int completed = !ctx->ref_count;
49
- if (ctx->ref_count)
50
- ctx->ref_count -= 1;
51
- else
52
- context_store_release(store, ctx);
53
- return completed;
54
- }
49
+ void context_store_mark_taken_buffers(op_context_store_t *store);
50
+ void context_attach_buffers(op_context_t *ctx, unsigned int count, VALUE *buffers);
51
+ void context_attach_buffers_v(op_context_t *ctx, unsigned int count, ...);
55
52
 
56
53
  #endif /* BACKEND_IO_URING_CONTEXT_H */
@@ -941,9 +941,8 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
941
941
  error:
942
942
  return RAISE_EXCEPTION(switchpoint_result);
943
943
  }
944
- #endif
945
-
946
- VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
944
+ #else
945
+ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
947
946
  Backend_t *backend;
948
947
  struct libev_io watcher;
949
948
  VALUE switchpoint_result = Qnil;
@@ -1018,7 +1017,7 @@ error:
1018
1017
  return RAISE_EXCEPTION(switchpoint_result);
1019
1018
  }
1020
1019
 
1021
- VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1020
+ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1022
1021
  Backend_t *backend;
1023
1022
  struct libev_io watcher;
1024
1023
  VALUE switchpoint_result = Qnil;
@@ -1097,6 +1096,7 @@ done:
1097
1096
  error:
1098
1097
  return RAISE_EXCEPTION(switchpoint_result);
1099
1098
  }
1099
+ #endif
1100
1100
 
1101
1101
  VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1102
1102
  Backend_t *backend;
@@ -1384,6 +1384,64 @@ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct lib
1384
1384
  return 0;
1385
1385
  }
1386
1386
 
1387
+ static inline int splice_chunks_splice(Backend_t *backend, int src_fd, int dest_fd, int maxlen,
1388
+ struct libev_rw_io *watcher, VALUE *result, int *chunk_len) {
1389
+ #ifdef POLYPHONY_LINUX
1390
+ backend->base.op_count++;
1391
+ while (1) {
1392
+ *chunk_len = splice(src_fd, 0, dest_fd, 0, maxlen, 0);
1393
+ if (*chunk_len >= 0) return 0;
1394
+
1395
+ int err = errno;
1396
+ if (err != EWOULDBLOCK && err != EAGAIN) return err;
1397
+
1398
+ *result = libev_wait_rw_fd_with_watcher(backend, src_fd, dest_fd, watcher);
1399
+ if (TEST_EXCEPTION(*result)) return -1;
1400
+ }
1401
+ #else
1402
+ char *buf = malloc(maxlen);
1403
+ int ret;
1404
+
1405
+ backend->base.op_count++;
1406
+ while (1) {
1407
+ *chunk_len = read(src_fd, buf, maxlen);
1408
+ if (*chunk_len >= 0) break;
1409
+
1410
+ ret = errno;
1411
+ if ((ret != EWOULDBLOCK && ret != EAGAIN)) goto done;
1412
+
1413
+ *result = libev_wait_rw_fd_with_watcher(backend, src_fd, -1, watcher);
1414
+ if (TEST_EXCEPTION(*result)) goto exception;
1415
+ }
1416
+
1417
+ backend->base.op_count++;
1418
+ char *ptr = buf;
1419
+ int left = *chunk_len;
1420
+ while (left > 0) {
1421
+ ssize_t n = write(dest_fd, ptr, left);
1422
+ if (n < 0) {
1423
+ ret = errno;
1424
+ if ((ret != EWOULDBLOCK && ret != EAGAIN)) goto done;
1425
+
1426
+ *result = libev_wait_rw_fd_with_watcher(backend, -1, dest_fd, watcher);
1427
+
1428
+ if (TEST_EXCEPTION(*result)) goto exception;
1429
+ }
1430
+ else {
1431
+ ptr += n;
1432
+ left -= n;
1433
+ }
1434
+ }
1435
+ ret = 0;
1436
+ goto done;
1437
+ exception:
1438
+ ret = -1;
1439
+ done:
1440
+ free(buf);
1441
+ return ret;
1442
+ #endif
1443
+ }
1444
+
1387
1445
  VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1388
1446
  Backend_t *backend;
1389
1447
  GetBackend(self, backend);
@@ -1421,26 +1479,13 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1421
1479
  fcntl(pipefd[1], F_SETFL, O_NONBLOCK);
1422
1480
 
1423
1481
  if (prefix != Qnil) {
1424
- int err = splice_chunks_write(backend, dest_fptr->fd, prefix, &watcher, &result);
1482
+ err = splice_chunks_write(backend, dest_fptr->fd, prefix, &watcher, &result);
1425
1483
  if (err == -1) goto error; else if (err) goto syscallerror;
1426
1484
  }
1427
1485
  while (1) {
1428
- int chunk_len;
1429
- // splice to pipe
1430
- while (1) {
1431
- backend->base.op_count++;
1432
- chunk_len = splice(src_fptr->fd, 0, pipefd[1], 0, maxlen, 0);
1433
- if (chunk_len < 0) {
1434
- err = errno;
1435
- if (err != EWOULDBLOCK && err != EAGAIN) goto syscallerror;
1436
-
1437
- result = libev_wait_rw_fd_with_watcher(backend, src_fptr->fd, pipefd[1], &watcher);
1438
- if (TEST_EXCEPTION(result)) goto error;
1439
- }
1440
- else {
1441
- break;
1442
- }
1443
- }
1486
+ int chunk_len = 0;
1487
+ err = splice_chunks_splice(backend, src_fptr->fd, pipefd[1], maxlen, &watcher, &result, &chunk_len);
1488
+ if (err == -1) goto error; else if (err) goto syscallerror;
1444
1489
  if (chunk_len == 0) break;
1445
1490
 
1446
1491
  total += chunk_len;
@@ -1453,20 +1498,12 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1453
1498
  }
1454
1499
 
1455
1500
  int left = chunk_len;
1456
- while (1) {
1457
- backend->base.op_count++;
1458
- int n = splice(pipefd[0], 0, dest_fptr->fd, 0, left, 0);
1459
- if (n < 0) {
1460
- err = errno;
1461
- if (err != EWOULDBLOCK && err != EAGAIN) goto syscallerror;
1501
+ while (left > 0) {
1502
+ int len;
1503
+ err = splice_chunks_splice(backend, pipefd[0], dest_fptr->fd, left, &watcher, &result, &len);
1504
+ if (err == -1) goto error; else if (err) goto syscallerror;
1462
1505
 
1463
- result = libev_wait_rw_fd_with_watcher(backend, pipefd[0], dest_fptr->fd, &watcher);
1464
- if (TEST_EXCEPTION(result)) goto error;
1465
- }
1466
- else {
1467
- left -= n;
1468
- if (left == 0) break;
1469
- }
1506
+ left -= len;
1470
1507
  }
1471
1508
 
1472
1509
  if (chunk_postfix != Qnil) {
@@ -1516,6 +1553,20 @@ VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
1516
1553
  return self;
1517
1554
  }
1518
1555
 
1556
+ void Backend_park_fiber(VALUE self, VALUE fiber) {
1557
+ Backend_t *backend;
1558
+ GetBackend(self, backend);
1559
+
1560
+ backend_base_park_fiber(&backend->base, fiber);
1561
+ }
1562
+
1563
+ void Backend_unpark_fiber(VALUE self, VALUE fiber) {
1564
+ Backend_t *backend;
1565
+ GetBackend(self, backend);
1566
+
1567
+ backend_base_unpark_fiber(&backend->base, fiber);
1568
+ }
1569
+
1519
1570
  void Init_Backend() {
1520
1571
  ev_set_allocator(xrealloc);
1521
1572
 
@@ -1550,13 +1601,8 @@ void Init_Backend() {
1550
1601
  rb_define_method(cBackend, "sendv", Backend_sendv, 3);
1551
1602
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
1552
1603
 
1553
- #ifdef POLYPHONY_LINUX
1554
1604
  rb_define_method(cBackend, "splice", Backend_splice, 3);
1555
1605
  rb_define_method(cBackend, "splice_to_eof", Backend_splice_to_eof, 3);
1556
- #else
1557
- rb_define_method(cBackend, "splice", Backend_fake_splice, 3);
1558
- rb_define_method(cBackend, "splice_to_eof", Backend_fake_splice_to_eof, 3);
1559
- #endif
1560
1606
 
1561
1607
  rb_define_method(cBackend, "timeout", Backend_timeout, -1);
1562
1608
  rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
@@ -114,6 +114,22 @@ VALUE Fiber_receive_all_pending(VALUE self) {
114
114
  return (mailbox == Qnil) ? rb_ary_new() : Queue_shift_all(mailbox);
115
115
  }
116
116
 
117
+ VALUE Fiber_park(VALUE self) {
118
+ rb_ivar_set(self, ID_ivar_parked, Qtrue);
119
+ Backend_park_fiber(BACKEND(), self);
120
+ return self;
121
+ }
122
+
123
+ VALUE Fiber_unpark(VALUE self) {
124
+ rb_ivar_set(self, ID_ivar_parked, Qnil);
125
+ Backend_unpark_fiber(BACKEND(), self);
126
+ return self;
127
+ }
128
+
129
+ VALUE Fiber_parked_p(VALUE self) {
130
+ return rb_ivar_get(self, ID_ivar_parked);
131
+ }
132
+
117
133
  void Init_Fiber() {
118
134
  VALUE cFiber = rb_const_get(rb_cObject, rb_intern("Fiber"));
119
135
  rb_define_method(cFiber, "safe_transfer", Fiber_safe_transfer, -1);
@@ -128,6 +144,10 @@ void Init_Fiber() {
128
144
  rb_define_method(cFiber, "receive_all_pending", Fiber_receive_all_pending, 0);
129
145
  rb_define_method(cFiber, "mailbox", Fiber_mailbox, 0);
130
146
 
147
+ rb_define_method(cFiber, "__park__", Fiber_park, 0);
148
+ rb_define_method(cFiber, "__unpark__", Fiber_unpark, 0);
149
+ rb_define_method(cFiber, "__parked__?", Fiber_parked_p, 0);
150
+
131
151
  SYM_dead = ID2SYM(rb_intern("dead"));
132
152
  SYM_running = ID2SYM(rb_intern("running"));
133
153
  SYM_runnable = ID2SYM(rb_intern("runnable"));
@@ -12,6 +12,7 @@ ID ID_invoke;
12
12
  ID ID_new;
13
13
  ID ID_ivar_blocking_mode;
14
14
  ID ID_ivar_io;
15
+ ID ID_ivar_parked;
15
16
  ID ID_ivar_runnable;
16
17
  ID ID_ivar_running;
17
18
  ID ID_ivar_thread;
@@ -160,6 +161,7 @@ void Init_Polyphony() {
160
161
  ID_invoke = rb_intern("invoke");
161
162
  ID_ivar_blocking_mode = rb_intern("@blocking_mode");
162
163
  ID_ivar_io = rb_intern("@io");
164
+ ID_ivar_parked = rb_intern("@parked");
163
165
  ID_ivar_runnable = rb_intern("@runnable");
164
166
  ID_ivar_running = rb_intern("@running");
165
167
  ID_ivar_thread = rb_intern("@thread");
@@ -44,6 +44,7 @@ extern ID ID_invoke;
44
44
  extern ID ID_ivar_backend;
45
45
  extern ID ID_ivar_blocking_mode;
46
46
  extern ID ID_ivar_io;
47
+ extern ID ID_ivar_parked;
47
48
  extern ID ID_ivar_runnable;
48
49
  extern ID ID_ivar_running;
49
50
  extern ID ID_ivar_thread;
@@ -115,9 +116,11 @@ VALUE Backend_run_idle_tasks(VALUE self);
115
116
  VALUE Backend_switch_fiber(VALUE self);
116
117
  void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
117
118
  void Backend_unschedule_fiber(VALUE self, VALUE fiber);
119
+ void Backend_park_fiber(VALUE self, VALUE fiber);
120
+ void Backend_unpark_fiber(VALUE self, VALUE fiber);
118
121
 
119
- VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
120
- VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
122
+ void Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
123
+ void Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
121
124
  VALUE Thread_switch_fiber(VALUE thread);
122
125
 
123
126
  VALUE Polyphony_snooze(VALUE self);
@@ -78,7 +78,7 @@ inline void queue_schedule_blocked_fibers_to_capacity(Queue_t *queue) {
78
78
  }
79
79
  }
80
80
 
81
- inline void capped_queue_block_push(Queue_t *queue) {
81
+ static inline void capped_queue_block_push(Queue_t *queue) {
82
82
  VALUE fiber = rb_fiber_current();
83
83
  VALUE backend = rb_ivar_get(rb_thread_current(), ID_ivar_backend);
84
84
  VALUE switchpoint_result;
@@ -40,19 +40,23 @@ inline int runqueue_index_of(runqueue_t *runqueue, VALUE fiber) {
40
40
  return runqueue_ring_buffer_index_of(&runqueue->entries, fiber);
41
41
  }
42
42
 
43
+ inline void runqueue_migrate(runqueue_t *src, runqueue_t *dest, VALUE fiber) {
44
+ runqueue_ring_buffer_migrate(&src->entries, &dest->entries, fiber);
45
+ }
46
+
43
47
  inline void runqueue_clear(runqueue_t *runqueue) {
44
48
  runqueue_ring_buffer_clear(&runqueue->entries);
45
49
  }
46
50
 
47
- inline long runqueue_size(runqueue_t *runqueue) {
51
+ inline unsigned int runqueue_size(runqueue_t *runqueue) {
48
52
  return runqueue->entries.size;
49
53
  }
50
54
 
51
- inline long runqueue_len(runqueue_t *runqueue) {
55
+ inline unsigned int runqueue_len(runqueue_t *runqueue) {
52
56
  return runqueue->entries.count;
53
57
  }
54
58
 
55
- inline long runqueue_max_len(runqueue_t *runqueue) {
59
+ inline unsigned int runqueue_max_len(runqueue_t *runqueue) {
56
60
  unsigned int max_len = runqueue->high_watermark;
57
61
  runqueue->high_watermark = 0;
58
62
  return max_len;
@@ -18,10 +18,11 @@ void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int resche
18
18
  runqueue_entry runqueue_shift(runqueue_t *runqueue);
19
19
  void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
20
20
  int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
21
+ void runqueue_migrate(runqueue_t *src, runqueue_t *dest, VALUE fiber);
21
22
  void runqueue_clear(runqueue_t *runqueue);
22
- long runqueue_size(runqueue_t *runqueue);
23
- long runqueue_len(runqueue_t *runqueue);
24
- long runqueue_max_len(runqueue_t *runqueue);
23
+ unsigned int runqueue_size(runqueue_t *runqueue);
24
+ unsigned int runqueue_len(runqueue_t *runqueue);
25
+ unsigned int runqueue_max_len(runqueue_t *runqueue);
25
26
  int runqueue_empty_p(runqueue_t *runqueue);
26
27
 
27
28
  #endif /* RUNQUEUE_H */
@@ -1,7 +1,7 @@
1
1
  #include "polyphony.h"
2
2
  #include "runqueue_ring_buffer.h"
3
3
 
4
- void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer) {
4
+ inline void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer) {
5
5
  buffer->size = 1;
6
6
  buffer->count = 0;
7
7
  buffer->entries = malloc(buffer->size * sizeof(runqueue_entry));
@@ -9,17 +9,21 @@ void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer) {
9
9
  buffer->tail = 0;
10
10
  }
11
11
 
12
- void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer) {
12
+ inline void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer) {
13
13
  free(buffer->entries);
14
14
  }
15
15
 
16
- int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer) {
16
+ inline int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer) {
17
17
  return buffer->count == 0;
18
18
  }
19
19
 
20
+ inline void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer) {
21
+ buffer->count = buffer->head = buffer->tail = 0;
22
+ }
23
+
20
24
  static runqueue_entry nil_runqueue_entry = {(Qnil), (Qnil)};
21
25
 
22
- runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer) {
26
+ inline runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer) {
23
27
  if (buffer->count == 0) return nil_runqueue_entry;
24
28
 
25
29
  runqueue_entry value = buffer->entries[buffer->head];
@@ -28,7 +32,7 @@ runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer) {
28
32
  return value;
29
33
  }
30
34
 
31
- void runqueue_ring_buffer_resize(runqueue_ring_buffer *buffer) {
35
+ inline void runqueue_ring_buffer_resize(runqueue_ring_buffer *buffer) {
32
36
  unsigned int old_size = buffer->size;
33
37
  buffer->size = old_size == 1 ? 4 : old_size * 2;
34
38
  buffer->entries = realloc(buffer->entries, buffer->size * sizeof(runqueue_entry));
@@ -37,7 +41,7 @@ void runqueue_ring_buffer_resize(runqueue_ring_buffer *buffer) {
37
41
  buffer->tail = buffer->head + buffer->count;
38
42
  }
39
43
 
40
- void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
44
+ inline void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
41
45
  if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
42
46
 
43
47
  buffer->head = (buffer->head - 1) % buffer->size;
@@ -46,7 +50,7 @@ void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VAL
46
50
  buffer->count++;
47
51
  }
48
52
 
49
- void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
53
+ inline void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
50
54
  if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
51
55
 
52
56
  buffer->entries[buffer->tail].fiber = fiber;
@@ -55,14 +59,14 @@ void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE
55
59
  buffer->count++;
56
60
  }
57
61
 
58
- void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer) {
62
+ inline void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer) {
59
63
  for (unsigned int i = 0; i < buffer->count; i++) {
60
64
  rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].fiber);
61
65
  rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].value);
62
66
  }
63
67
  }
64
68
 
65
- void runqueue_ring_buffer_delete_at(runqueue_ring_buffer *buffer, unsigned int idx) {
69
+ inline void runqueue_ring_buffer_delete_at(runqueue_ring_buffer *buffer, unsigned int idx) {
66
70
  for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
67
71
  buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
68
72
  }
@@ -70,7 +74,7 @@ void runqueue_ring_buffer_delete_at(runqueue_ring_buffer *buffer, unsigned int i
70
74
  buffer->tail = (buffer->tail - 1) % buffer->size;
71
75
  }
72
76
 
73
- void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber) {
77
+ inline void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber) {
74
78
  for (unsigned int i = 0; i < buffer->count; i++) {
75
79
  unsigned int idx = (buffer->head + i) % buffer->size;
76
80
  if (buffer->entries[idx].fiber == fiber) {
@@ -80,7 +84,7 @@ void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber) {
80
84
  }
81
85
  }
82
86
 
83
- int runqueue_ring_buffer_index_of(runqueue_ring_buffer *buffer, VALUE fiber) {
87
+ inline int runqueue_ring_buffer_index_of(runqueue_ring_buffer *buffer, VALUE fiber) {
84
88
  for (unsigned int i = 0; i < buffer->count; i++) {
85
89
  unsigned int idx = (buffer->head + i) % buffer->size;
86
90
  if (buffer->entries[idx].fiber == fiber)
@@ -89,6 +93,13 @@ int runqueue_ring_buffer_index_of(runqueue_ring_buffer *buffer, VALUE fiber) {
89
93
  return -1;
90
94
  }
91
95
 
92
- void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer) {
93
- buffer->count = buffer->head = buffer->tail = 0;
94
- }
96
+ inline void runqueue_ring_buffer_migrate(runqueue_ring_buffer *src, runqueue_ring_buffer *dest, VALUE fiber) {
97
+ for (unsigned int i = 0; i < src->count; i++) {
98
+ unsigned int idx = (src->head + i) % src->size;
99
+ if (src->entries[idx].fiber == fiber) {
100
+ runqueue_ring_buffer_push(dest, src->entries[idx].fiber, src->entries[idx].value);
101
+ runqueue_ring_buffer_delete_at(src, idx);
102
+ return;
103
+ }
104
+ }
105
+ }