polyphony 0.54.0 → 0.59
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +3 -1
- data/CHANGELOG.md +54 -25
- data/Gemfile.lock +1 -1
- data/TODO.md +0 -3
- data/examples/core/idle_gc.rb +21 -0
- data/examples/io/pipe.rb +11 -0
- data/examples/io/splice_chunks.rb +29 -0
- data/examples/io/stdio.rb +8 -0
- data/ext/polyphony/backend_common.c +288 -0
- data/ext/polyphony/backend_common.h +49 -130
- data/ext/polyphony/backend_io_uring.c +439 -122
- data/ext/polyphony/backend_io_uring_context.c +14 -3
- data/ext/polyphony/backend_io_uring_context.h +11 -11
- data/ext/polyphony/backend_libev.c +463 -94
- data/ext/polyphony/fiber.c +0 -2
- data/ext/polyphony/polyphony.c +17 -22
- data/ext/polyphony/polyphony.h +9 -16
- data/ext/polyphony/polyphony_ext.c +0 -4
- data/ext/polyphony/runqueue.c +35 -72
- data/ext/polyphony/runqueue.h +27 -0
- data/ext/polyphony/thread.c +10 -84
- data/lib/polyphony/extensions/fiber.rb +2 -2
- data/lib/polyphony/extensions/socket.rb +6 -20
- data/lib/polyphony/extensions/thread.rb +8 -0
- data/lib/polyphony/version.rb +1 -1
- data/test/helper.rb +3 -3
- data/test/test_backend.rb +137 -2
- data/test/test_fiber.rb +0 -1
- data/test/test_io.rb +6 -3
- data/test/test_signal.rb +1 -1
- data/test/test_thread.rb +57 -11
- data/test/test_thread_pool.rb +1 -1
- data/test/test_timer.rb +16 -10
- data/test/test_trace.rb +27 -49
- metadata +8 -4
- data/ext/polyphony/tracing.c +0 -11
- data/lib/polyphony/adapters/trace.rb +0 -138
data/ext/polyphony/fiber.c
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
#include "polyphony.h"
|
2
2
|
|
3
|
-
ID ID_fiber_trace;
|
4
3
|
ID ID_ivar_auto_watcher;
|
5
4
|
ID ID_ivar_mailbox;
|
6
5
|
ID ID_ivar_result;
|
@@ -169,7 +168,6 @@ void Init_Fiber() {
|
|
169
168
|
rb_global_variable(&SYM_runnable);
|
170
169
|
rb_global_variable(&SYM_waiting);
|
171
170
|
|
172
|
-
ID_fiber_trace = rb_intern("__fiber_trace__");
|
173
171
|
ID_ivar_auto_watcher = rb_intern("@auto_watcher");
|
174
172
|
ID_ivar_mailbox = rb_intern("@mailbox");
|
175
173
|
ID_ivar_result = rb_intern("@result");
|
data/ext/polyphony/polyphony.c
CHANGED
@@ -10,6 +10,7 @@ ID ID_each;
|
|
10
10
|
ID ID_inspect;
|
11
11
|
ID ID_invoke;
|
12
12
|
ID ID_new;
|
13
|
+
ID ID_ivar_blocking_mode;
|
13
14
|
ID ID_ivar_io;
|
14
15
|
ID ID_ivar_runnable;
|
15
16
|
ID ID_ivar_running;
|
@@ -41,11 +42,6 @@ static VALUE Polyphony_suspend(VALUE self) {
|
|
41
42
|
return ret;
|
42
43
|
}
|
43
44
|
|
44
|
-
VALUE Polyphony_trace(VALUE self, VALUE enabled) {
|
45
|
-
__tracing_enabled__ = RTEST(enabled) ? 1 : 0;
|
46
|
-
return Qnil;
|
47
|
-
}
|
48
|
-
|
49
45
|
VALUE Polyphony_backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
|
50
46
|
return Backend_accept(BACKEND(), server_socket, socket_class);
|
51
47
|
}
|
@@ -129,8 +125,6 @@ VALUE Polyphony_backend_write(int argc, VALUE *argv, VALUE self) {
|
|
129
125
|
void Init_Polyphony() {
|
130
126
|
mPolyphony = rb_define_module("Polyphony");
|
131
127
|
|
132
|
-
rb_define_singleton_method(mPolyphony, "trace", Polyphony_trace, 1);
|
133
|
-
|
134
128
|
// backend methods
|
135
129
|
rb_define_singleton_method(mPolyphony, "backend_accept", Polyphony_backend_accept, 2);
|
136
130
|
rb_define_singleton_method(mPolyphony, "backend_accept_loop", Polyphony_backend_accept_loop, 2);
|
@@ -158,19 +152,20 @@ void Init_Polyphony() {
|
|
158
152
|
|
159
153
|
cTimeoutException = rb_define_class_under(mPolyphony, "TimeoutException", rb_eException);
|
160
154
|
|
161
|
-
ID_call
|
162
|
-
ID_caller
|
163
|
-
ID_clear
|
164
|
-
ID_each
|
165
|
-
ID_inspect
|
166
|
-
ID_invoke
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
155
|
+
ID_call = rb_intern("call");
|
156
|
+
ID_caller = rb_intern("caller");
|
157
|
+
ID_clear = rb_intern("clear");
|
158
|
+
ID_each = rb_intern("each");
|
159
|
+
ID_inspect = rb_intern("inspect");
|
160
|
+
ID_invoke = rb_intern("invoke");
|
161
|
+
ID_ivar_blocking_mode = rb_intern("@blocking_mode");
|
162
|
+
ID_ivar_io = rb_intern("@io");
|
163
|
+
ID_ivar_runnable = rb_intern("@runnable");
|
164
|
+
ID_ivar_running = rb_intern("@running");
|
165
|
+
ID_ivar_thread = rb_intern("@thread");
|
166
|
+
ID_new = rb_intern("new");
|
167
|
+
ID_signal = rb_intern("signal");
|
168
|
+
ID_size = rb_intern("size");
|
169
|
+
ID_switch_fiber = rb_intern("switch_fiber");
|
170
|
+
ID_transfer = rb_intern("transfer");
|
176
171
|
}
|
data/ext/polyphony/polyphony.h
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
|
6
6
|
#include "ruby.h"
|
7
7
|
#include "runqueue_ring_buffer.h"
|
8
|
+
#include "backend_common.h"
|
8
9
|
|
9
10
|
// debugging
|
10
11
|
#define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
|
@@ -18,10 +19,6 @@
|
|
18
19
|
free(strings); \
|
19
20
|
}
|
20
21
|
|
21
|
-
// tracing
|
22
|
-
#define TRACE(...) rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__)
|
23
|
-
#define COND_TRACE(...) if (__tracing_enabled__) { TRACE(__VA_ARGS__); }
|
24
|
-
|
25
22
|
// exceptions
|
26
23
|
#define TEST_EXCEPTION(ret) (rb_obj_is_kind_of(ret, rb_eException) == Qtrue)
|
27
24
|
#define RAISE_EXCEPTION(e) rb_funcall(e, ID_invoke, 0);
|
@@ -36,17 +33,16 @@
|
|
36
33
|
extern VALUE mPolyphony;
|
37
34
|
extern VALUE cQueue;
|
38
35
|
extern VALUE cEvent;
|
39
|
-
extern VALUE cRunqueue;
|
40
36
|
extern VALUE cTimeoutException;
|
41
37
|
|
42
38
|
extern ID ID_call;
|
43
39
|
extern ID ID_caller;
|
44
40
|
extern ID ID_clear;
|
45
41
|
extern ID ID_each;
|
46
|
-
extern ID ID_fiber_trace;
|
47
42
|
extern ID ID_inspect;
|
48
43
|
extern ID ID_invoke;
|
49
44
|
extern ID ID_ivar_backend;
|
45
|
+
extern ID ID_ivar_blocking_mode;
|
50
46
|
extern ID ID_ivar_io;
|
51
47
|
extern ID ID_ivar_runnable;
|
52
48
|
extern ID ID_ivar_running;
|
@@ -66,14 +62,6 @@ extern VALUE SYM_fiber_schedule;
|
|
66
62
|
extern VALUE SYM_fiber_switchpoint;
|
67
63
|
extern VALUE SYM_fiber_terminate;
|
68
64
|
|
69
|
-
extern int __tracing_enabled__;
|
70
|
-
|
71
|
-
enum {
|
72
|
-
FIBER_STATE_NOT_SCHEDULED = 0,
|
73
|
-
FIBER_STATE_WAITING = 1,
|
74
|
-
FIBER_STATE_SCHEDULED = 2
|
75
|
-
};
|
76
|
-
|
77
65
|
VALUE Fiber_auto_watcher(VALUE self);
|
78
66
|
void Fiber_make_runnable(VALUE fiber, VALUE value);
|
79
67
|
|
@@ -90,6 +78,7 @@ int Runqueue_index_of(VALUE self, VALUE fiber);
|
|
90
78
|
void Runqueue_clear(VALUE self);
|
91
79
|
long Runqueue_len(VALUE self);
|
92
80
|
int Runqueue_empty_p(VALUE self);
|
81
|
+
int Runqueue_should_poll_nonblocking(VALUE self);
|
93
82
|
|
94
83
|
#ifdef POLYPHONY_BACKEND_LIBEV
|
95
84
|
#define Backend_recv_loop Backend_read_loop
|
@@ -119,10 +108,14 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write);
|
|
119
108
|
VALUE Backend_waitpid(VALUE self, VALUE pid);
|
120
109
|
VALUE Backend_write_m(int argc, VALUE *argv, VALUE self);
|
121
110
|
|
122
|
-
|
123
|
-
VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
|
111
|
+
VALUE Backend_poll(VALUE self, VALUE blocking);
|
124
112
|
VALUE Backend_wait_event(VALUE self, VALUE raise_on_exception);
|
125
113
|
VALUE Backend_wakeup(VALUE self);
|
114
|
+
VALUE Backend_run_idle_tasks(VALUE self);
|
115
|
+
VALUE Backend_switch_fiber(VALUE self);
|
116
|
+
void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
|
117
|
+
struct backend_stats Backend_stats(VALUE self);
|
118
|
+
void Backend_unschedule_fiber(VALUE self, VALUE fiber);
|
126
119
|
|
127
120
|
VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
|
128
121
|
VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
|
@@ -5,10 +5,8 @@ void Init_Polyphony();
|
|
5
5
|
void Init_Backend();
|
6
6
|
void Init_Queue();
|
7
7
|
void Init_Event();
|
8
|
-
void Init_Runqueue();
|
9
8
|
void Init_SocketExtensions();
|
10
9
|
void Init_Thread();
|
11
|
-
void Init_Tracing();
|
12
10
|
|
13
11
|
#ifdef POLYPHONY_PLAYGROUND
|
14
12
|
extern void playground();
|
@@ -20,10 +18,8 @@ void Init_polyphony_ext() {
|
|
20
18
|
Init_Backend();
|
21
19
|
Init_Queue();
|
22
20
|
Init_Event();
|
23
|
-
Init_Runqueue();
|
24
21
|
Init_Fiber();
|
25
22
|
Init_Thread();
|
26
|
-
Init_Tracing();
|
27
23
|
|
28
24
|
Init_SocketExtensions();
|
29
25
|
|
data/ext/polyphony/runqueue.c
CHANGED
@@ -1,108 +1,71 @@
|
|
1
1
|
#include "polyphony.h"
|
2
|
-
#include "
|
2
|
+
#include "runqueue.h"
|
3
3
|
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
VALUE cRunqueue = Qnil;
|
9
|
-
|
10
|
-
static void Runqueue_mark(void *ptr) {
|
11
|
-
Runqueue_t *runqueue = ptr;
|
12
|
-
runqueue_ring_buffer_mark(&runqueue->entries);
|
4
|
+
inline void runqueue_initialize(runqueue_t *runqueue) {
|
5
|
+
runqueue_ring_buffer_init(&runqueue->entries);
|
6
|
+
runqueue->high_watermark = 0;
|
7
|
+
runqueue->switch_count = 0;
|
13
8
|
}
|
14
9
|
|
15
|
-
|
16
|
-
Runqueue_t *runqueue = ptr;
|
10
|
+
inline void runqueue_finalize(runqueue_t *runqueue) {
|
17
11
|
runqueue_ring_buffer_free(&runqueue->entries);
|
18
|
-
xfree(ptr);
|
19
|
-
}
|
20
|
-
|
21
|
-
static size_t Runqueue_size(const void *ptr) {
|
22
|
-
return sizeof(Runqueue_t);
|
23
|
-
}
|
24
|
-
|
25
|
-
static const rb_data_type_t Runqueue_type = {
|
26
|
-
"Runqueue",
|
27
|
-
{Runqueue_mark, Runqueue_free, Runqueue_size,},
|
28
|
-
0, 0, 0
|
29
|
-
};
|
30
|
-
|
31
|
-
static VALUE Runqueue_allocate(VALUE klass) {
|
32
|
-
Runqueue_t *runqueue;
|
33
|
-
|
34
|
-
runqueue = ALLOC(Runqueue_t);
|
35
|
-
return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
|
36
12
|
}
|
37
13
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
static VALUE Runqueue_initialize(VALUE self) {
|
42
|
-
Runqueue_t *runqueue;
|
43
|
-
GetRunqueue(self, runqueue);
|
44
|
-
|
45
|
-
runqueue_ring_buffer_init(&runqueue->entries);
|
46
|
-
|
47
|
-
return self;
|
14
|
+
inline void runqueue_mark(runqueue_t *runqueue) {
|
15
|
+
runqueue_ring_buffer_mark(&runqueue->entries);
|
48
16
|
}
|
49
17
|
|
50
|
-
void
|
51
|
-
Runqueue_t *runqueue;
|
52
|
-
GetRunqueue(self, runqueue);
|
53
|
-
|
18
|
+
inline void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
|
54
19
|
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
55
20
|
runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
|
21
|
+
if (runqueue->entries.count > runqueue->high_watermark)
|
22
|
+
runqueue->high_watermark = runqueue->entries.count;
|
56
23
|
}
|
57
24
|
|
58
|
-
void
|
59
|
-
Runqueue_t *runqueue;
|
60
|
-
GetRunqueue(self, runqueue);
|
25
|
+
inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
|
61
26
|
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
62
27
|
runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
|
28
|
+
if (runqueue->entries.count > runqueue->high_watermark)
|
29
|
+
runqueue->high_watermark = runqueue->entries.count;
|
63
30
|
}
|
64
31
|
|
65
|
-
runqueue_entry
|
66
|
-
|
67
|
-
|
68
|
-
|
32
|
+
inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
|
33
|
+
runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
|
34
|
+
if (entry.fiber == Qnil)
|
35
|
+
runqueue->high_watermark = 0;
|
36
|
+
else
|
37
|
+
runqueue->switch_count += 1;
|
38
|
+
return entry;
|
69
39
|
}
|
70
40
|
|
71
|
-
void
|
72
|
-
Runqueue_t *runqueue;
|
73
|
-
GetRunqueue(self, runqueue);
|
41
|
+
inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
|
74
42
|
runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
75
43
|
}
|
76
44
|
|
77
|
-
int
|
78
|
-
Runqueue_t *runqueue;
|
79
|
-
GetRunqueue(self, runqueue);
|
45
|
+
inline int runqueue_index_of(runqueue_t *runqueue, VALUE fiber) {
|
80
46
|
return runqueue_ring_buffer_index_of(&runqueue->entries, fiber);
|
81
47
|
}
|
82
48
|
|
83
|
-
void
|
84
|
-
Runqueue_t *runqueue;
|
85
|
-
GetRunqueue(self, runqueue);
|
49
|
+
inline void runqueue_clear(runqueue_t *runqueue) {
|
86
50
|
runqueue_ring_buffer_clear(&runqueue->entries);
|
87
51
|
}
|
88
52
|
|
89
|
-
long
|
90
|
-
Runqueue_t *runqueue;
|
91
|
-
GetRunqueue(self, runqueue);
|
92
|
-
|
53
|
+
inline long runqueue_len(runqueue_t *runqueue) {
|
93
54
|
return runqueue->entries.count;
|
94
55
|
}
|
95
56
|
|
96
|
-
int
|
97
|
-
Runqueue_t *runqueue;
|
98
|
-
GetRunqueue(self, runqueue);
|
99
|
-
|
57
|
+
inline int runqueue_empty_p(runqueue_t *runqueue) {
|
100
58
|
return (runqueue->entries.count == 0);
|
101
59
|
}
|
102
60
|
|
103
|
-
|
104
|
-
|
105
|
-
|
61
|
+
static const unsigned int ANTI_STARVE_HIGH_WATERMARK_THRESHOLD = 128;
|
62
|
+
static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
|
63
|
+
|
64
|
+
inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
|
65
|
+
if (runqueue->high_watermark < ANTI_STARVE_HIGH_WATERMARK_THRESHOLD) return 0;
|
66
|
+
if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
|
106
67
|
|
107
|
-
|
68
|
+
// the
|
69
|
+
runqueue->switch_count = 0;
|
70
|
+
return 1;
|
108
71
|
}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
#ifndef RUNQUEUE_H
|
2
|
+
#define RUNQUEUE_H
|
3
|
+
|
4
|
+
#include "polyphony.h"
|
5
|
+
#include "runqueue_ring_buffer.h"
|
6
|
+
|
7
|
+
typedef struct runqueue {
|
8
|
+
runqueue_ring_buffer entries;
|
9
|
+
unsigned int high_watermark;
|
10
|
+
unsigned int switch_count;
|
11
|
+
} runqueue_t;
|
12
|
+
|
13
|
+
void runqueue_initialize(runqueue_t *runqueue);
|
14
|
+
void runqueue_finalize(runqueue_t *runqueue);
|
15
|
+
void runqueue_mark(runqueue_t *runqueue);
|
16
|
+
|
17
|
+
void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
|
18
|
+
void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
|
19
|
+
runqueue_entry runqueue_shift(runqueue_t *runqueue);
|
20
|
+
void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
|
21
|
+
int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
|
22
|
+
void runqueue_clear(runqueue_t *runqueue);
|
23
|
+
long runqueue_len(runqueue_t *runqueue);
|
24
|
+
int runqueue_empty_p(runqueue_t *runqueue);
|
25
|
+
int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
|
26
|
+
|
27
|
+
#endif /* RUNQUEUE_H */
|
data/ext/polyphony/thread.c
CHANGED
@@ -1,19 +1,15 @@
|
|
1
1
|
#include "polyphony.h"
|
2
|
+
#include "backend_common.h"
|
2
3
|
|
3
4
|
ID ID_deactivate_all_watchers_post_fork;
|
4
5
|
ID ID_ivar_backend;
|
5
6
|
ID ID_ivar_join_wait_queue;
|
6
7
|
ID ID_ivar_main_fiber;
|
7
8
|
ID ID_ivar_terminated;
|
8
|
-
ID ID_ivar_runqueue;
|
9
9
|
ID ID_stop;
|
10
10
|
|
11
11
|
static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
12
|
-
VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
|
13
|
-
|
14
12
|
rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
|
15
|
-
rb_ivar_set(self, ID_ivar_runqueue, runqueue);
|
16
|
-
|
17
13
|
return self;
|
18
14
|
}
|
19
15
|
|
@@ -21,53 +17,20 @@ static VALUE SYM_scheduled_fibers;
|
|
21
17
|
static VALUE SYM_pending_watchers;
|
22
18
|
|
23
19
|
static VALUE Thread_fiber_scheduling_stats(VALUE self) {
|
24
|
-
|
25
|
-
VALUE stats = rb_hash_new();
|
26
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
27
|
-
long pending_count;
|
28
|
-
|
29
|
-
long scheduled_count = Runqueue_len(runqueue);
|
30
|
-
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
|
31
|
-
|
32
|
-
pending_count = Backend_pending_count(backend);
|
33
|
-
rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
|
20
|
+
struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
|
34
21
|
|
22
|
+
VALUE stats = rb_hash_new();
|
23
|
+
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
|
24
|
+
rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
|
35
25
|
return stats;
|
36
26
|
}
|
37
27
|
|
38
|
-
void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
39
|
-
|
40
|
-
int already_runnable;
|
41
|
-
|
42
|
-
if (rb_fiber_alive_p(fiber) != Qtrue) return;
|
43
|
-
already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
|
44
|
-
|
45
|
-
COND_TRACE(3, SYM_fiber_schedule, fiber, value);
|
46
|
-
runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
47
|
-
(prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
|
48
|
-
if (!already_runnable) {
|
49
|
-
rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
|
50
|
-
if (rb_thread_current() != self) {
|
51
|
-
// If the fiber scheduling is done across threads, we need to make sure the
|
52
|
-
// target thread is woken up in case it is in the middle of running its
|
53
|
-
// event selector. Otherwise it's gonna be stuck waiting for an event to
|
54
|
-
// happen, not knowing that it there's already a fiber ready to run in its
|
55
|
-
// run queue.
|
56
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
57
|
-
Backend_wakeup(backend);
|
58
|
-
}
|
59
|
-
}
|
60
|
-
}
|
61
|
-
|
62
|
-
VALUE Thread_fiber_scheduling_index(VALUE self, VALUE fiber) {
|
63
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
64
|
-
|
65
|
-
return INT2NUM(Runqueue_index_of(runqueue, fiber));
|
28
|
+
inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
29
|
+
Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
|
66
30
|
}
|
67
31
|
|
68
32
|
VALUE Thread_fiber_unschedule(VALUE self, VALUE fiber) {
|
69
|
-
|
70
|
-
Runqueue_delete(runqueue, fiber);
|
33
|
+
Backend_unschedule_fiber(rb_ivar_get(self, ID_ivar_backend), fiber);
|
71
34
|
return self;
|
72
35
|
}
|
73
36
|
|
@@ -82,50 +45,15 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
|
|
82
45
|
}
|
83
46
|
|
84
47
|
VALUE Thread_switch_fiber(VALUE self) {
|
85
|
-
|
86
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
87
|
-
runqueue_entry next;
|
88
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
89
|
-
unsigned int pending_count = Backend_pending_count(backend);
|
90
|
-
unsigned int backend_was_polled = 0;
|
91
|
-
|
92
|
-
if (__tracing_enabled__ && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
|
93
|
-
TRACE(2, SYM_fiber_switchpoint, current_fiber);
|
94
|
-
|
95
|
-
while (1) {
|
96
|
-
next = Runqueue_shift(runqueue);
|
97
|
-
if (next.fiber != Qnil) {
|
98
|
-
if (!backend_was_polled && pending_count) {
|
99
|
-
// this prevents event starvation in case the run queue never empties
|
100
|
-
Backend_poll(backend, Qtrue, current_fiber, runqueue);
|
101
|
-
}
|
102
|
-
break;
|
103
|
-
}
|
104
|
-
if (pending_count == 0) break;
|
105
|
-
|
106
|
-
Backend_poll(backend, Qnil, current_fiber, runqueue);
|
107
|
-
backend_was_polled = 1;
|
108
|
-
}
|
109
|
-
|
110
|
-
if (next.fiber == Qnil) return Qnil;
|
111
|
-
|
112
|
-
// run next fiber
|
113
|
-
COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
|
114
|
-
|
115
|
-
rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
|
116
|
-
RB_GC_GUARD(next.fiber);
|
117
|
-
RB_GC_GUARD(next.value);
|
118
|
-
return (next.fiber == current_fiber) ?
|
119
|
-
next.value : FIBER_TRANSFER(next.fiber, next.value);
|
48
|
+
return Backend_switch_fiber(rb_ivar_get(self, ID_ivar_backend));
|
120
49
|
}
|
121
50
|
|
122
51
|
VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
|
123
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
124
52
|
if (fiber != Qnil) {
|
125
53
|
Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
|
126
54
|
}
|
127
55
|
|
128
|
-
if (Backend_wakeup(
|
56
|
+
if (Backend_wakeup(rb_ivar_get(self, ID_ivar_backend)) == Qnil) {
|
129
57
|
// we're not inside the ev_loop, so we just do a switchpoint
|
130
58
|
Thread_switch_fiber(self);
|
131
59
|
}
|
@@ -151,7 +79,6 @@ void Init_Thread() {
|
|
151
79
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
152
80
|
Thread_schedule_fiber_with_priority, 2);
|
153
81
|
rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
|
154
|
-
rb_define_method(rb_cThread, "fiber_scheduling_index", Thread_fiber_scheduling_index, 1);
|
155
82
|
rb_define_method(rb_cThread, "fiber_unschedule", Thread_fiber_unschedule, 1);
|
156
83
|
|
157
84
|
rb_define_singleton_method(rb_cThread, "backend", Thread_class_backend, 0);
|
@@ -163,7 +90,6 @@ void Init_Thread() {
|
|
163
90
|
ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
|
164
91
|
ID_ivar_main_fiber = rb_intern("@main_fiber");
|
165
92
|
ID_ivar_terminated = rb_intern("@terminated");
|
166
|
-
ID_ivar_runqueue = rb_intern("@runqueue");
|
167
93
|
ID_stop = rb_intern("stop");
|
168
94
|
|
169
95
|
SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
|