polyphony 0.45.1 → 0.46.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -0
- data/.gitmodules +0 -0
- data/CHANGELOG.md +35 -0
- data/Gemfile.lock +3 -3
- data/README.md +3 -3
- data/Rakefile +1 -1
- data/TODO.md +20 -14
- data/bin/test +4 -0
- data/examples/io/raw.rb +14 -0
- data/examples/io/reline.rb +18 -0
- data/examples/performance/fiber_transfer.rb +13 -4
- data/examples/performance/multi_snooze.rb +0 -1
- data/examples/performance/thread-vs-fiber/polyphony_server.rb +8 -20
- data/ext/liburing/liburing.h +585 -0
- data/ext/liburing/liburing/README.md +4 -0
- data/ext/liburing/liburing/barrier.h +73 -0
- data/ext/liburing/liburing/compat.h +15 -0
- data/ext/liburing/liburing/io_uring.h +343 -0
- data/ext/liburing/queue.c +333 -0
- data/ext/liburing/register.c +187 -0
- data/ext/liburing/setup.c +210 -0
- data/ext/liburing/syscall.c +54 -0
- data/ext/liburing/syscall.h +18 -0
- data/ext/polyphony/backend.h +1 -15
- data/ext/polyphony/backend_common.h +120 -0
- data/ext/polyphony/backend_io_uring.c +919 -0
- data/ext/polyphony/backend_io_uring_context.c +73 -0
- data/ext/polyphony/backend_io_uring_context.h +52 -0
- data/ext/polyphony/{libev_backend.c → backend_libev.c} +241 -297
- data/ext/polyphony/event.c +1 -1
- data/ext/polyphony/extconf.rb +31 -13
- data/ext/polyphony/fiber.c +107 -28
- data/ext/polyphony/libev.c +4 -0
- data/ext/polyphony/libev.h +8 -2
- data/ext/polyphony/liburing.c +8 -0
- data/ext/polyphony/playground.c +51 -0
- data/ext/polyphony/polyphony.c +6 -6
- data/ext/polyphony/polyphony.h +34 -14
- data/ext/polyphony/polyphony_ext.c +12 -4
- data/ext/polyphony/queue.c +1 -1
- data/ext/polyphony/runqueue.c +102 -0
- data/ext/polyphony/runqueue_ring_buffer.c +85 -0
- data/ext/polyphony/runqueue_ring_buffer.h +31 -0
- data/ext/polyphony/thread.c +42 -90
- data/lib/polyphony.rb +2 -2
- data/lib/polyphony/adapters/process.rb +0 -3
- data/lib/polyphony/adapters/trace.rb +2 -2
- data/lib/polyphony/core/exceptions.rb +0 -4
- data/lib/polyphony/core/global_api.rb +13 -11
- data/lib/polyphony/core/sync.rb +7 -5
- data/lib/polyphony/extensions/core.rb +14 -33
- data/lib/polyphony/extensions/debug.rb +13 -0
- data/lib/polyphony/extensions/fiber.rb +21 -44
- data/lib/polyphony/extensions/io.rb +15 -4
- data/lib/polyphony/extensions/openssl.rb +6 -0
- data/lib/polyphony/extensions/socket.rb +63 -10
- data/lib/polyphony/version.rb +1 -1
- data/polyphony.gemspec +1 -1
- data/test/helper.rb +36 -4
- data/test/io_uring_test.rb +55 -0
- data/test/stress.rb +4 -1
- data/test/test_backend.rb +15 -6
- data/test/test_ext.rb +1 -2
- data/test/test_fiber.rb +31 -24
- data/test/test_global_api.rb +71 -31
- data/test/test_io.rb +42 -0
- data/test/test_queue.rb +1 -1
- data/test/test_signal.rb +11 -8
- data/test/test_socket.rb +2 -2
- data/test/test_sync.rb +21 -0
- data/test/test_throttler.rb +3 -7
- data/test/test_trace.rb +7 -5
- metadata +31 -6
@@ -2,21 +2,29 @@
|
|
2
2
|
|
3
3
|
void Init_Fiber();
|
4
4
|
void Init_Polyphony();
|
5
|
-
void
|
5
|
+
void Init_Backend();
|
6
6
|
void Init_Queue();
|
7
7
|
void Init_Event();
|
8
|
+
void Init_Runqueue();
|
8
9
|
void Init_Thread();
|
9
10
|
void Init_Tracing();
|
10
11
|
|
11
|
-
|
12
|
-
|
12
|
+
#ifdef POLYPHONY_PLAYGROUND
|
13
|
+
extern void playground();
|
14
|
+
#endif
|
13
15
|
|
16
|
+
void Init_polyphony_ext() {
|
14
17
|
Init_Polyphony();
|
15
18
|
|
16
|
-
|
19
|
+
Init_Backend();
|
17
20
|
Init_Queue();
|
18
21
|
Init_Event();
|
22
|
+
Init_Runqueue();
|
19
23
|
Init_Fiber();
|
20
24
|
Init_Thread();
|
21
25
|
Init_Tracing();
|
26
|
+
|
27
|
+
#ifdef POLYPHONY_PLAYGROUND
|
28
|
+
playground();
|
29
|
+
#endif
|
22
30
|
}
|
data/ext/polyphony/queue.c
CHANGED
@@ -89,7 +89,7 @@ VALUE Queue_shift(VALUE self) {
|
|
89
89
|
VALUE switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
|
90
90
|
ring_buffer_delete(&queue->shift_queue, fiber);
|
91
91
|
|
92
|
-
|
92
|
+
RAISE_IF_EXCEPTION(switchpoint_result);
|
93
93
|
RB_GC_GUARD(switchpoint_result);
|
94
94
|
|
95
95
|
if (queue->values.count > 0)
|
@@ -0,0 +1,102 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "runqueue_ring_buffer.h"
|
3
|
+
|
4
|
+
typedef struct queue {
|
5
|
+
runqueue_ring_buffer entries;
|
6
|
+
} Runqueue_t;
|
7
|
+
|
8
|
+
VALUE cRunqueue = Qnil;
|
9
|
+
|
10
|
+
static void Runqueue_mark(void *ptr) {
|
11
|
+
Runqueue_t *runqueue = ptr;
|
12
|
+
runqueue_ring_buffer_mark(&runqueue->entries);
|
13
|
+
}
|
14
|
+
|
15
|
+
static void Runqueue_free(void *ptr) {
|
16
|
+
Runqueue_t *runqueue = ptr;
|
17
|
+
runqueue_ring_buffer_free(&runqueue->entries);
|
18
|
+
xfree(ptr);
|
19
|
+
}
|
20
|
+
|
21
|
+
static size_t Runqueue_size(const void *ptr) {
|
22
|
+
return sizeof(Runqueue_t);
|
23
|
+
}
|
24
|
+
|
25
|
+
static const rb_data_type_t Runqueue_type = {
|
26
|
+
"Runqueue",
|
27
|
+
{Runqueue_mark, Runqueue_free, Runqueue_size,},
|
28
|
+
0, 0, 0
|
29
|
+
};
|
30
|
+
|
31
|
+
static VALUE Runqueue_allocate(VALUE klass) {
|
32
|
+
Runqueue_t *runqueue;
|
33
|
+
|
34
|
+
runqueue = ALLOC(Runqueue_t);
|
35
|
+
return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
|
36
|
+
}
|
37
|
+
|
38
|
+
#define GetRunqueue(obj, runqueue) \
|
39
|
+
TypedData_Get_Struct((obj), Runqueue_t, &Runqueue_type, (runqueue))
|
40
|
+
|
41
|
+
static VALUE Runqueue_initialize(VALUE self) {
|
42
|
+
Runqueue_t *runqueue;
|
43
|
+
GetRunqueue(self, runqueue);
|
44
|
+
|
45
|
+
runqueue_ring_buffer_init(&runqueue->entries);
|
46
|
+
|
47
|
+
return self;
|
48
|
+
}
|
49
|
+
|
50
|
+
void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule) {
|
51
|
+
Runqueue_t *runqueue;
|
52
|
+
GetRunqueue(self, runqueue);
|
53
|
+
|
54
|
+
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
55
|
+
runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
|
56
|
+
}
|
57
|
+
|
58
|
+
void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
|
59
|
+
Runqueue_t *runqueue;
|
60
|
+
GetRunqueue(self, runqueue);
|
61
|
+
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
62
|
+
runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
|
63
|
+
}
|
64
|
+
|
65
|
+
runqueue_entry Runqueue_shift(VALUE self) {
|
66
|
+
Runqueue_t *runqueue;
|
67
|
+
GetRunqueue(self, runqueue);
|
68
|
+
return runqueue_ring_buffer_shift(&runqueue->entries);
|
69
|
+
}
|
70
|
+
|
71
|
+
void Runqueue_delete(VALUE self, VALUE fiber) {
|
72
|
+
Runqueue_t *runqueue;
|
73
|
+
GetRunqueue(self, runqueue);
|
74
|
+
runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
75
|
+
}
|
76
|
+
|
77
|
+
void Runqueue_clear(VALUE self) {
|
78
|
+
Runqueue_t *runqueue;
|
79
|
+
GetRunqueue(self, runqueue);
|
80
|
+
runqueue_ring_buffer_clear(&runqueue->entries);
|
81
|
+
}
|
82
|
+
|
83
|
+
long Runqueue_len(VALUE self) {
|
84
|
+
Runqueue_t *runqueue;
|
85
|
+
GetRunqueue(self, runqueue);
|
86
|
+
|
87
|
+
return runqueue->entries.count;
|
88
|
+
}
|
89
|
+
|
90
|
+
int Runqueue_empty_p(VALUE self) {
|
91
|
+
Runqueue_t *runqueue;
|
92
|
+
GetRunqueue(self, runqueue);
|
93
|
+
|
94
|
+
return (runqueue->entries.count == 0);
|
95
|
+
}
|
96
|
+
|
97
|
+
void Init_Runqueue() {
|
98
|
+
cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cData);
|
99
|
+
rb_define_alloc_func(cRunqueue, Runqueue_allocate);
|
100
|
+
|
101
|
+
rb_define_method(cRunqueue, "initialize", Runqueue_initialize, 0);
|
102
|
+
}
|
@@ -0,0 +1,85 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "runqueue_ring_buffer.h"
|
3
|
+
|
4
|
+
void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer) {
|
5
|
+
buffer->size = 1;
|
6
|
+
buffer->count = 0;
|
7
|
+
buffer->entries = malloc(buffer->size * sizeof(runqueue_entry));
|
8
|
+
buffer->head = 0;
|
9
|
+
buffer->tail = 0;
|
10
|
+
}
|
11
|
+
|
12
|
+
void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer) {
|
13
|
+
free(buffer->entries);
|
14
|
+
}
|
15
|
+
|
16
|
+
int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer) {
|
17
|
+
return buffer->count == 0;
|
18
|
+
}
|
19
|
+
|
20
|
+
static runqueue_entry nil_runqueue_entry = {(Qnil), (Qnil)};
|
21
|
+
|
22
|
+
runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer) {
|
23
|
+
if (buffer->count == 0) return nil_runqueue_entry;
|
24
|
+
|
25
|
+
runqueue_entry value = buffer->entries[buffer->head];
|
26
|
+
buffer->head = (buffer->head + 1) % buffer->size;
|
27
|
+
buffer->count--;
|
28
|
+
return value;
|
29
|
+
}
|
30
|
+
|
31
|
+
void runqueue_ring_buffer_resize(runqueue_ring_buffer *buffer) {
|
32
|
+
unsigned int old_size = buffer->size;
|
33
|
+
buffer->size = old_size == 1 ? 4 : old_size * 2;
|
34
|
+
buffer->entries = realloc(buffer->entries, buffer->size * sizeof(runqueue_entry));
|
35
|
+
for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
|
36
|
+
buffer->entries[old_size + idx] = buffer->entries[idx];
|
37
|
+
buffer->tail = buffer->head + buffer->count;
|
38
|
+
}
|
39
|
+
|
40
|
+
void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
|
41
|
+
if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
|
42
|
+
|
43
|
+
buffer->head = (buffer->head - 1) % buffer->size;
|
44
|
+
buffer->entries[buffer->head].fiber = fiber;
|
45
|
+
buffer->entries[buffer->head].value = value;
|
46
|
+
buffer->count++;
|
47
|
+
}
|
48
|
+
|
49
|
+
void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
|
50
|
+
if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
|
51
|
+
|
52
|
+
buffer->entries[buffer->tail].fiber = fiber;
|
53
|
+
buffer->entries[buffer->tail].value = value;
|
54
|
+
buffer->tail = (buffer->tail + 1) % buffer->size;
|
55
|
+
buffer->count++;
|
56
|
+
}
|
57
|
+
|
58
|
+
void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer) {
|
59
|
+
for (unsigned int i = 0; i < buffer->count; i++) {
|
60
|
+
rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].fiber);
|
61
|
+
rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].value);
|
62
|
+
}
|
63
|
+
}
|
64
|
+
|
65
|
+
void runqueue_ring_buffer_delete_at(runqueue_ring_buffer *buffer, unsigned int idx) {
|
66
|
+
for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
|
67
|
+
buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
|
68
|
+
}
|
69
|
+
buffer->count--;
|
70
|
+
buffer->tail = (buffer->tail - 1) % buffer->size;
|
71
|
+
}
|
72
|
+
|
73
|
+
void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber) {
|
74
|
+
for (unsigned int i = 0; i < buffer->count; i++) {
|
75
|
+
unsigned int idx = (buffer->head + i) % buffer->size;
|
76
|
+
if (buffer->entries[idx].fiber == fiber) {
|
77
|
+
runqueue_ring_buffer_delete_at(buffer, idx);
|
78
|
+
return;
|
79
|
+
}
|
80
|
+
}
|
81
|
+
}
|
82
|
+
|
83
|
+
void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer) {
|
84
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
85
|
+
}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
#ifndef RUNQUEUE_RING_BUFFER_H
|
2
|
+
#define RUNQUEUE_RING_BUFFER_H
|
3
|
+
|
4
|
+
#include "ruby.h"
|
5
|
+
|
6
|
+
typedef struct runqueue_entry {
|
7
|
+
VALUE fiber;
|
8
|
+
VALUE value;
|
9
|
+
} runqueue_entry;
|
10
|
+
|
11
|
+
typedef struct runqueue_ring_buffer {
|
12
|
+
runqueue_entry *entries;
|
13
|
+
unsigned int size;
|
14
|
+
unsigned int count;
|
15
|
+
unsigned int head;
|
16
|
+
unsigned int tail;
|
17
|
+
} runqueue_ring_buffer;
|
18
|
+
|
19
|
+
void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer);
|
20
|
+
void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer);
|
21
|
+
void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer);
|
22
|
+
int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer);
|
23
|
+
void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer);
|
24
|
+
|
25
|
+
runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer);
|
26
|
+
void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value);
|
27
|
+
void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value);
|
28
|
+
|
29
|
+
void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber);
|
30
|
+
|
31
|
+
#endif /* RUNQUEUE_RING_BUFFER_H */
|
data/ext/polyphony/thread.c
CHANGED
@@ -4,17 +4,15 @@ ID ID_deactivate_all_watchers_post_fork;
|
|
4
4
|
ID ID_ivar_backend;
|
5
5
|
ID ID_ivar_join_wait_queue;
|
6
6
|
ID ID_ivar_main_fiber;
|
7
|
-
ID ID_ivar_result;
|
8
7
|
ID ID_ivar_terminated;
|
9
|
-
ID
|
10
|
-
ID ID_runnable_next;
|
8
|
+
ID ID_ivar_runqueue;
|
11
9
|
ID ID_stop;
|
12
10
|
|
13
11
|
static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
14
|
-
VALUE
|
12
|
+
VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
|
15
13
|
|
16
14
|
rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
|
17
|
-
rb_ivar_set(self,
|
15
|
+
rb_ivar_set(self, ID_ivar_runqueue, runqueue);
|
18
16
|
|
19
17
|
return self;
|
20
18
|
}
|
@@ -35,10 +33,10 @@ static VALUE SYM_pending_watchers;
|
|
35
33
|
static VALUE Thread_fiber_scheduling_stats(VALUE self) {
|
36
34
|
VALUE backend = rb_ivar_get(self,ID_ivar_backend);
|
37
35
|
VALUE stats = rb_hash_new();
|
38
|
-
VALUE
|
36
|
+
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
39
37
|
long pending_count;
|
40
38
|
|
41
|
-
long scheduled_count =
|
39
|
+
long scheduled_count = Runqueue_len(runqueue);
|
42
40
|
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
|
43
41
|
|
44
42
|
pending_count = __BACKEND__.pending_count(backend);
|
@@ -47,30 +45,18 @@ static VALUE Thread_fiber_scheduling_stats(VALUE self) {
|
|
47
45
|
return stats;
|
48
46
|
}
|
49
47
|
|
50
|
-
|
51
|
-
VALUE
|
52
|
-
|
53
|
-
if (rb_fiber_alive_p(fiber) != Qtrue) return self;
|
54
|
-
|
55
|
-
int already_runnable = rb_ivar_get(fiber, ID_runnable) != Qnil;
|
48
|
+
void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
49
|
+
VALUE runqueue;
|
50
|
+
int already_runnable;
|
56
51
|
|
57
|
-
if (
|
58
|
-
|
59
|
-
|
60
|
-
// If the fiber is already runnable and the runnable value is an exception,
|
61
|
-
// we don't update the value, in order to prevent a race condition where
|
62
|
-
// exceptions will be lost (see issue #33)
|
63
|
-
if (TEST_EXCEPTION(current_runnable_value)) return self;
|
64
|
-
}
|
52
|
+
if (rb_fiber_alive_p(fiber) != Qtrue) return;
|
53
|
+
already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
|
65
54
|
|
66
|
-
rb_ivar_set(fiber, ID_runnable_value, value);
|
67
55
|
COND_TRACE(3, SYM_fiber_schedule, fiber, value);
|
68
|
-
|
56
|
+
runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
57
|
+
(prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
|
69
58
|
if (!already_runnable) {
|
70
|
-
|
71
|
-
Queue_push(queue, fiber);
|
72
|
-
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
73
|
-
|
59
|
+
rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
|
74
60
|
if (rb_thread_current() != self) {
|
75
61
|
// If the fiber scheduling is done across threads, we need to make sure the
|
76
62
|
// target thread is woken up in case it is in the middle of running its
|
@@ -81,46 +67,22 @@ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
|
|
81
67
|
__BACKEND__.wakeup(backend);
|
82
68
|
}
|
83
69
|
}
|
70
|
+
}
|
71
|
+
|
72
|
+
VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
|
73
|
+
schedule_fiber(self, fiber, value, 0);
|
84
74
|
return self;
|
85
75
|
}
|
86
76
|
|
87
77
|
VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value) {
|
88
|
-
|
89
|
-
|
90
|
-
if (rb_fiber_alive_p(fiber) != Qtrue) return self;
|
91
|
-
|
92
|
-
COND_TRACE(3, SYM_fiber_schedule, fiber, value);
|
93
|
-
rb_ivar_set(fiber, ID_runnable_value, value);
|
94
|
-
|
95
|
-
queue = rb_ivar_get(self, ID_run_queue);
|
96
|
-
|
97
|
-
// if fiber is already scheduled, remove it from the run queue
|
98
|
-
if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
|
99
|
-
Queue_delete(queue, fiber);
|
100
|
-
} else {
|
101
|
-
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
102
|
-
}
|
103
|
-
|
104
|
-
// the fiber is given priority by putting it at the front of the run queue
|
105
|
-
Queue_unshift(queue, fiber);
|
106
|
-
|
107
|
-
if (rb_thread_current() != self) {
|
108
|
-
// if the fiber scheduling is done across threads, we need to make sure the
|
109
|
-
// target thread is woken up in case it is in the middle of running its
|
110
|
-
// event loop. Otherwise it's gonna be stuck waiting for an event to
|
111
|
-
// happen, not knowing that it there's already a fiber ready to run in its
|
112
|
-
// run queue.
|
113
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
114
|
-
__BACKEND__.wakeup(backend);
|
115
|
-
}
|
78
|
+
schedule_fiber(self, fiber, value, 1);
|
116
79
|
return self;
|
117
80
|
}
|
118
81
|
|
119
82
|
VALUE Thread_switch_fiber(VALUE self) {
|
120
83
|
VALUE current_fiber = rb_fiber_current();
|
121
|
-
VALUE
|
122
|
-
|
123
|
-
VALUE value;
|
84
|
+
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
85
|
+
runqueue_entry next;
|
124
86
|
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
125
87
|
int ref_count;
|
126
88
|
int backend_was_polled = 0;
|
@@ -130,47 +92,40 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
130
92
|
|
131
93
|
ref_count = __BACKEND__.ref_count(backend);
|
132
94
|
while (1) {
|
133
|
-
|
134
|
-
if (
|
95
|
+
next = Runqueue_shift(runqueue);
|
96
|
+
if (next.fiber != Qnil) {
|
135
97
|
if (backend_was_polled == 0 && ref_count > 0) {
|
136
98
|
// this prevents event starvation in case the run queue never empties
|
137
|
-
__BACKEND__.poll(backend, Qtrue, current_fiber,
|
99
|
+
__BACKEND__.poll(backend, Qtrue, current_fiber, runqueue);
|
138
100
|
}
|
139
101
|
break;
|
140
102
|
}
|
141
103
|
if (ref_count == 0) break;
|
142
104
|
|
143
|
-
__BACKEND__.poll(backend, Qnil, current_fiber,
|
105
|
+
__BACKEND__.poll(backend, Qnil, current_fiber, runqueue);
|
144
106
|
backend_was_polled = 1;
|
145
107
|
}
|
146
108
|
|
147
|
-
if (
|
109
|
+
if (next.fiber == Qnil) return Qnil;
|
148
110
|
|
149
111
|
// run next fiber
|
150
|
-
|
151
|
-
COND_TRACE(3, SYM_fiber_run, next_fiber, value);
|
152
|
-
|
153
|
-
rb_ivar_set(next_fiber, ID_runnable, Qnil);
|
154
|
-
RB_GC_GUARD(next_fiber);
|
155
|
-
RB_GC_GUARD(value);
|
156
|
-
return (next_fiber == current_fiber) ?
|
157
|
-
value : rb_funcall(next_fiber, ID_transfer, 1, value);
|
158
|
-
}
|
112
|
+
COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
|
159
113
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
return
|
114
|
+
rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
|
115
|
+
RB_GC_GUARD(next.fiber);
|
116
|
+
RB_GC_GUARD(next.value);
|
117
|
+
return (next.fiber == current_fiber) ?
|
118
|
+
next.value : FIBER_TRANSFER(next.fiber, next.value);
|
164
119
|
}
|
165
120
|
|
166
121
|
VALUE Thread_reset_fiber_scheduling(VALUE self) {
|
167
|
-
VALUE queue = rb_ivar_get(self,
|
168
|
-
|
122
|
+
VALUE queue = rb_ivar_get(self, ID_ivar_runqueue);
|
123
|
+
Runqueue_clear(queue);
|
169
124
|
Thread_fiber_reset_ref_count(self);
|
170
125
|
return self;
|
171
126
|
}
|
172
127
|
|
173
|
-
VALUE
|
128
|
+
VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
|
174
129
|
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
175
130
|
if (fiber != Qnil) {
|
176
131
|
Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
|
@@ -193,25 +148,22 @@ void Init_Thread() {
|
|
193
148
|
rb_define_method(rb_cThread, "setup_fiber_scheduling", Thread_setup_fiber_scheduling, 0);
|
194
149
|
rb_define_method(rb_cThread, "reset_fiber_scheduling", Thread_reset_fiber_scheduling, 0);
|
195
150
|
rb_define_method(rb_cThread, "fiber_scheduling_stats", Thread_fiber_scheduling_stats, 0);
|
196
|
-
rb_define_method(rb_cThread, "
|
151
|
+
rb_define_method(rb_cThread, "schedule_and_wakeup", Thread_fiber_schedule_and_wakeup, 2);
|
197
152
|
|
198
153
|
rb_define_method(rb_cThread, "schedule_fiber", Thread_schedule_fiber, 2);
|
199
154
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
200
155
|
Thread_schedule_fiber_with_priority, 2);
|
201
156
|
rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
|
202
|
-
rb_define_method(rb_cThread, "run_queue_trace", Thread_run_queue_trace, 0);
|
203
157
|
|
204
158
|
rb_define_method(rb_cThread, "debug!", Thread_debug, 0);
|
205
159
|
|
206
|
-
ID_deactivate_all_watchers_post_fork
|
207
|
-
ID_ivar_backend
|
208
|
-
ID_ivar_join_wait_queue
|
209
|
-
ID_ivar_main_fiber
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
ID_runnable_next = rb_intern("runnable_next");
|
214
|
-
ID_stop = rb_intern("stop");
|
160
|
+
ID_deactivate_all_watchers_post_fork = rb_intern("deactivate_all_watchers_post_fork");
|
161
|
+
ID_ivar_backend = rb_intern("@backend");
|
162
|
+
ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
|
163
|
+
ID_ivar_main_fiber = rb_intern("@main_fiber");
|
164
|
+
ID_ivar_terminated = rb_intern("@terminated");
|
165
|
+
ID_ivar_runqueue = rb_intern("@runqueue");
|
166
|
+
ID_stop = rb_intern("stop");
|
215
167
|
|
216
168
|
SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
|
217
169
|
SYM_pending_watchers = ID2SYM(rb_intern("pending_watchers"));
|