polyphony 0.58 → 0.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/Gemfile.lock +1 -1
- data/ext/polyphony/backend_common.c +95 -4
- data/ext/polyphony/backend_common.h +23 -2
- data/ext/polyphony/backend_io_uring.c +67 -21
- data/ext/polyphony/backend_libev.c +63 -17
- data/ext/polyphony/fiber.c +0 -2
- data/ext/polyphony/polyphony.c +0 -7
- data/ext/polyphony/polyphony.h +6 -16
- data/ext/polyphony/polyphony_ext.c +0 -4
- data/ext/polyphony/runqueue.c +17 -82
- data/ext/polyphony/runqueue.h +27 -0
- data/ext/polyphony/thread.c +10 -99
- data/lib/polyphony/extensions/fiber.rb +2 -2
- data/lib/polyphony/extensions/thread.rb +1 -1
- data/lib/polyphony/version.rb +1 -1
- data/test/test_backend.rb +3 -3
- data/test/test_thread.rb +5 -11
- data/test/test_trace.rb +27 -49
- metadata +3 -4
- data/ext/polyphony/tracing.c +0 -11
- data/lib/polyphony/adapters/trace.rb +0 -138
data/ext/polyphony/fiber.c
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
#include "polyphony.h"
|
2
2
|
|
3
|
-
ID ID_fiber_trace;
|
4
3
|
ID ID_ivar_auto_watcher;
|
5
4
|
ID ID_ivar_mailbox;
|
6
5
|
ID ID_ivar_result;
|
@@ -169,7 +168,6 @@ void Init_Fiber() {
|
|
169
168
|
rb_global_variable(&SYM_runnable);
|
170
169
|
rb_global_variable(&SYM_waiting);
|
171
170
|
|
172
|
-
ID_fiber_trace = rb_intern("__fiber_trace__");
|
173
171
|
ID_ivar_auto_watcher = rb_intern("@auto_watcher");
|
174
172
|
ID_ivar_mailbox = rb_intern("@mailbox");
|
175
173
|
ID_ivar_result = rb_intern("@result");
|
data/ext/polyphony/polyphony.c
CHANGED
@@ -42,11 +42,6 @@ static VALUE Polyphony_suspend(VALUE self) {
|
|
42
42
|
return ret;
|
43
43
|
}
|
44
44
|
|
45
|
-
VALUE Polyphony_trace(VALUE self, VALUE enabled) {
|
46
|
-
__tracing_enabled__ = RTEST(enabled) ? 1 : 0;
|
47
|
-
return Qnil;
|
48
|
-
}
|
49
|
-
|
50
45
|
VALUE Polyphony_backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
|
51
46
|
return Backend_accept(BACKEND(), server_socket, socket_class);
|
52
47
|
}
|
@@ -130,8 +125,6 @@ VALUE Polyphony_backend_write(int argc, VALUE *argv, VALUE self) {
|
|
130
125
|
void Init_Polyphony() {
|
131
126
|
mPolyphony = rb_define_module("Polyphony");
|
132
127
|
|
133
|
-
rb_define_singleton_method(mPolyphony, "trace", Polyphony_trace, 1);
|
134
|
-
|
135
128
|
// backend methods
|
136
129
|
rb_define_singleton_method(mPolyphony, "backend_accept", Polyphony_backend_accept, 2);
|
137
130
|
rb_define_singleton_method(mPolyphony, "backend_accept_loop", Polyphony_backend_accept_loop, 2);
|
data/ext/polyphony/polyphony.h
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
|
6
6
|
#include "ruby.h"
|
7
7
|
#include "runqueue_ring_buffer.h"
|
8
|
+
#include "backend_common.h"
|
8
9
|
|
9
10
|
// debugging
|
10
11
|
#define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
|
@@ -18,10 +19,6 @@
|
|
18
19
|
free(strings); \
|
19
20
|
}
|
20
21
|
|
21
|
-
// tracing
|
22
|
-
#define TRACE(...) rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__)
|
23
|
-
#define COND_TRACE(...) if (__tracing_enabled__) { TRACE(__VA_ARGS__); }
|
24
|
-
|
25
22
|
// exceptions
|
26
23
|
#define TEST_EXCEPTION(ret) (rb_obj_is_kind_of(ret, rb_eException) == Qtrue)
|
27
24
|
#define RAISE_EXCEPTION(e) rb_funcall(e, ID_invoke, 0);
|
@@ -36,14 +33,12 @@
|
|
36
33
|
extern VALUE mPolyphony;
|
37
34
|
extern VALUE cQueue;
|
38
35
|
extern VALUE cEvent;
|
39
|
-
extern VALUE cRunqueue;
|
40
36
|
extern VALUE cTimeoutException;
|
41
37
|
|
42
38
|
extern ID ID_call;
|
43
39
|
extern ID ID_caller;
|
44
40
|
extern ID ID_clear;
|
45
41
|
extern ID ID_each;
|
46
|
-
extern ID ID_fiber_trace;
|
47
42
|
extern ID ID_inspect;
|
48
43
|
extern ID ID_invoke;
|
49
44
|
extern ID ID_ivar_backend;
|
@@ -67,14 +62,6 @@ extern VALUE SYM_fiber_schedule;
|
|
67
62
|
extern VALUE SYM_fiber_switchpoint;
|
68
63
|
extern VALUE SYM_fiber_terminate;
|
69
64
|
|
70
|
-
extern int __tracing_enabled__;
|
71
|
-
|
72
|
-
enum {
|
73
|
-
FIBER_STATE_NOT_SCHEDULED = 0,
|
74
|
-
FIBER_STATE_WAITING = 1,
|
75
|
-
FIBER_STATE_SCHEDULED = 2
|
76
|
-
};
|
77
|
-
|
78
65
|
VALUE Fiber_auto_watcher(VALUE self);
|
79
66
|
void Fiber_make_runnable(VALUE fiber, VALUE value);
|
80
67
|
|
@@ -121,11 +108,14 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write);
|
|
121
108
|
VALUE Backend_waitpid(VALUE self, VALUE pid);
|
122
109
|
VALUE Backend_write_m(int argc, VALUE *argv, VALUE self);
|
123
110
|
|
124
|
-
|
125
|
-
VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
|
111
|
+
VALUE Backend_poll(VALUE self, VALUE blocking);
|
126
112
|
VALUE Backend_wait_event(VALUE self, VALUE raise_on_exception);
|
127
113
|
VALUE Backend_wakeup(VALUE self);
|
128
114
|
VALUE Backend_run_idle_tasks(VALUE self);
|
115
|
+
VALUE Backend_switch_fiber(VALUE self);
|
116
|
+
void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
|
117
|
+
struct backend_stats Backend_stats(VALUE self);
|
118
|
+
void Backend_unschedule_fiber(VALUE self, VALUE fiber);
|
129
119
|
|
130
120
|
VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
|
131
121
|
VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
|
@@ -5,10 +5,8 @@ void Init_Polyphony();
|
|
5
5
|
void Init_Backend();
|
6
6
|
void Init_Queue();
|
7
7
|
void Init_Event();
|
8
|
-
void Init_Runqueue();
|
9
8
|
void Init_SocketExtensions();
|
10
9
|
void Init_Thread();
|
11
|
-
void Init_Tracing();
|
12
10
|
|
13
11
|
#ifdef POLYPHONY_PLAYGROUND
|
14
12
|
extern void playground();
|
@@ -20,10 +18,8 @@ void Init_polyphony_ext() {
|
|
20
18
|
Init_Backend();
|
21
19
|
Init_Queue();
|
22
20
|
Init_Event();
|
23
|
-
Init_Runqueue();
|
24
21
|
Init_Fiber();
|
25
22
|
Init_Thread();
|
26
|
-
Init_Tracing();
|
27
23
|
|
28
24
|
Init_SocketExtensions();
|
29
25
|
|
data/ext/polyphony/runqueue.c
CHANGED
@@ -1,78 +1,35 @@
|
|
1
1
|
#include "polyphony.h"
|
2
|
-
#include "
|
3
|
-
|
4
|
-
typedef struct queue {
|
5
|
-
runqueue_ring_buffer entries;
|
6
|
-
unsigned int high_watermark;
|
7
|
-
unsigned int switch_count;
|
8
|
-
} Runqueue_t;
|
9
|
-
|
10
|
-
VALUE cRunqueue = Qnil;
|
11
|
-
|
12
|
-
static void Runqueue_mark(void *ptr) {
|
13
|
-
Runqueue_t *runqueue = ptr;
|
14
|
-
runqueue_ring_buffer_mark(&runqueue->entries);
|
15
|
-
}
|
16
|
-
|
17
|
-
static void Runqueue_free(void *ptr) {
|
18
|
-
Runqueue_t *runqueue = ptr;
|
19
|
-
runqueue_ring_buffer_free(&runqueue->entries);
|
20
|
-
xfree(ptr);
|
21
|
-
}
|
22
|
-
|
23
|
-
static size_t Runqueue_size(const void *ptr) {
|
24
|
-
return sizeof(Runqueue_t);
|
25
|
-
}
|
26
|
-
|
27
|
-
static const rb_data_type_t Runqueue_type = {
|
28
|
-
"Runqueue",
|
29
|
-
{Runqueue_mark, Runqueue_free, Runqueue_size,},
|
30
|
-
0, 0, 0
|
31
|
-
};
|
32
|
-
|
33
|
-
static VALUE Runqueue_allocate(VALUE klass) {
|
34
|
-
Runqueue_t *runqueue;
|
35
|
-
|
36
|
-
runqueue = ALLOC(Runqueue_t);
|
37
|
-
return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
|
38
|
-
}
|
39
|
-
|
40
|
-
#define GetRunqueue(obj, runqueue) \
|
41
|
-
TypedData_Get_Struct((obj), Runqueue_t, &Runqueue_type, (runqueue))
|
42
|
-
|
43
|
-
static VALUE Runqueue_initialize(VALUE self) {
|
44
|
-
Runqueue_t *runqueue;
|
45
|
-
GetRunqueue(self, runqueue);
|
2
|
+
#include "runqueue.h"
|
46
3
|
|
4
|
+
inline void runqueue_initialize(runqueue_t *runqueue) {
|
47
5
|
runqueue_ring_buffer_init(&runqueue->entries);
|
48
6
|
runqueue->high_watermark = 0;
|
49
7
|
runqueue->switch_count = 0;
|
8
|
+
}
|
50
9
|
|
51
|
-
|
10
|
+
inline void runqueue_finalize(runqueue_t *runqueue) {
|
11
|
+
runqueue_ring_buffer_free(&runqueue->entries);
|
52
12
|
}
|
53
13
|
|
54
|
-
void
|
55
|
-
|
56
|
-
|
14
|
+
inline void runqueue_mark(runqueue_t *runqueue) {
|
15
|
+
runqueue_ring_buffer_mark(&runqueue->entries);
|
16
|
+
}
|
57
17
|
|
18
|
+
inline void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
|
58
19
|
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
59
20
|
runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
|
60
21
|
if (runqueue->entries.count > runqueue->high_watermark)
|
61
22
|
runqueue->high_watermark = runqueue->entries.count;
|
62
23
|
}
|
63
24
|
|
64
|
-
void
|
65
|
-
Runqueue_t *runqueue;
|
66
|
-
GetRunqueue(self, runqueue);
|
25
|
+
inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
|
67
26
|
if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
68
27
|
runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
|
69
28
|
if (runqueue->entries.count > runqueue->high_watermark)
|
70
29
|
runqueue->high_watermark = runqueue->entries.count;
|
71
30
|
}
|
72
31
|
|
73
|
-
runqueue_entry
|
74
|
-
Runqueue_t *runqueue;
|
75
|
-
GetRunqueue(self, runqueue);
|
32
|
+
inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
|
76
33
|
runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
|
77
34
|
if (entry.fiber == Qnil)
|
78
35
|
runqueue->high_watermark = 0;
|
@@ -81,45 +38,30 @@ runqueue_entry Runqueue_shift(VALUE self) {
|
|
81
38
|
return entry;
|
82
39
|
}
|
83
40
|
|
84
|
-
void
|
85
|
-
Runqueue_t *runqueue;
|
86
|
-
GetRunqueue(self, runqueue);
|
41
|
+
inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
|
87
42
|
runqueue_ring_buffer_delete(&runqueue->entries, fiber);
|
88
43
|
}
|
89
44
|
|
90
|
-
int
|
91
|
-
Runqueue_t *runqueue;
|
92
|
-
GetRunqueue(self, runqueue);
|
45
|
+
inline int runqueue_index_of(runqueue_t *runqueue, VALUE fiber) {
|
93
46
|
return runqueue_ring_buffer_index_of(&runqueue->entries, fiber);
|
94
47
|
}
|
95
48
|
|
96
|
-
void
|
97
|
-
Runqueue_t *runqueue;
|
98
|
-
GetRunqueue(self, runqueue);
|
49
|
+
inline void runqueue_clear(runqueue_t *runqueue) {
|
99
50
|
runqueue_ring_buffer_clear(&runqueue->entries);
|
100
51
|
}
|
101
52
|
|
102
|
-
long
|
103
|
-
Runqueue_t *runqueue;
|
104
|
-
GetRunqueue(self, runqueue);
|
105
|
-
|
53
|
+
inline long runqueue_len(runqueue_t *runqueue) {
|
106
54
|
return runqueue->entries.count;
|
107
55
|
}
|
108
56
|
|
109
|
-
int
|
110
|
-
Runqueue_t *runqueue;
|
111
|
-
GetRunqueue(self, runqueue);
|
112
|
-
|
57
|
+
inline int runqueue_empty_p(runqueue_t *runqueue) {
|
113
58
|
return (runqueue->entries.count == 0);
|
114
59
|
}
|
115
60
|
|
116
61
|
static const unsigned int ANTI_STARVE_HIGH_WATERMARK_THRESHOLD = 128;
|
117
62
|
static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
|
118
63
|
|
119
|
-
int
|
120
|
-
Runqueue_t *runqueue;
|
121
|
-
GetRunqueue(self, runqueue);
|
122
|
-
|
64
|
+
inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
|
123
65
|
if (runqueue->high_watermark < ANTI_STARVE_HIGH_WATERMARK_THRESHOLD) return 0;
|
124
66
|
if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
|
125
67
|
|
@@ -127,10 +69,3 @@ int Runqueue_should_poll_nonblocking(VALUE self) {
|
|
127
69
|
runqueue->switch_count = 0;
|
128
70
|
return 1;
|
129
71
|
}
|
130
|
-
|
131
|
-
void Init_Runqueue() {
|
132
|
-
cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cObject);
|
133
|
-
rb_define_alloc_func(cRunqueue, Runqueue_allocate);
|
134
|
-
|
135
|
-
rb_define_method(cRunqueue, "initialize", Runqueue_initialize, 0);
|
136
|
-
}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
#ifndef RUNQUEUE_H
|
2
|
+
#define RUNQUEUE_H
|
3
|
+
|
4
|
+
#include "polyphony.h"
|
5
|
+
#include "runqueue_ring_buffer.h"
|
6
|
+
|
7
|
+
typedef struct runqueue {
|
8
|
+
runqueue_ring_buffer entries;
|
9
|
+
unsigned int high_watermark;
|
10
|
+
unsigned int switch_count;
|
11
|
+
} runqueue_t;
|
12
|
+
|
13
|
+
void runqueue_initialize(runqueue_t *runqueue);
|
14
|
+
void runqueue_finalize(runqueue_t *runqueue);
|
15
|
+
void runqueue_mark(runqueue_t *runqueue);
|
16
|
+
|
17
|
+
void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
|
18
|
+
void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
|
19
|
+
runqueue_entry runqueue_shift(runqueue_t *runqueue);
|
20
|
+
void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
|
21
|
+
int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
|
22
|
+
void runqueue_clear(runqueue_t *runqueue);
|
23
|
+
long runqueue_len(runqueue_t *runqueue);
|
24
|
+
int runqueue_empty_p(runqueue_t *runqueue);
|
25
|
+
int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
|
26
|
+
|
27
|
+
#endif /* RUNQUEUE_H */
|
data/ext/polyphony/thread.c
CHANGED
@@ -1,19 +1,15 @@
|
|
1
1
|
#include "polyphony.h"
|
2
|
+
#include "backend_common.h"
|
2
3
|
|
3
4
|
ID ID_deactivate_all_watchers_post_fork;
|
4
5
|
ID ID_ivar_backend;
|
5
6
|
ID ID_ivar_join_wait_queue;
|
6
7
|
ID ID_ivar_main_fiber;
|
7
8
|
ID ID_ivar_terminated;
|
8
|
-
ID ID_ivar_runqueue;
|
9
9
|
ID ID_stop;
|
10
10
|
|
11
11
|
static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
12
|
-
VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
|
13
|
-
|
14
12
|
rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
|
15
|
-
rb_ivar_set(self, ID_ivar_runqueue, runqueue);
|
16
|
-
|
17
13
|
return self;
|
18
14
|
}
|
19
15
|
|
@@ -21,53 +17,20 @@ static VALUE SYM_scheduled_fibers;
|
|
21
17
|
static VALUE SYM_pending_watchers;
|
22
18
|
|
23
19
|
static VALUE Thread_fiber_scheduling_stats(VALUE self) {
|
24
|
-
|
25
|
-
VALUE stats = rb_hash_new();
|
26
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
27
|
-
long pending_count;
|
28
|
-
|
29
|
-
long scheduled_count = Runqueue_len(runqueue);
|
30
|
-
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
|
31
|
-
|
32
|
-
pending_count = Backend_pending_count(backend);
|
33
|
-
rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
|
20
|
+
struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
|
34
21
|
|
22
|
+
VALUE stats = rb_hash_new();
|
23
|
+
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
|
24
|
+
rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
|
35
25
|
return stats;
|
36
26
|
}
|
37
27
|
|
38
|
-
void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
39
|
-
|
40
|
-
int already_runnable;
|
41
|
-
|
42
|
-
if (rb_fiber_alive_p(fiber) != Qtrue) return;
|
43
|
-
already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
|
44
|
-
|
45
|
-
COND_TRACE(3, SYM_fiber_schedule, fiber, value);
|
46
|
-
runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
47
|
-
(prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
|
48
|
-
if (!already_runnable) {
|
49
|
-
rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
|
50
|
-
if (rb_thread_current() != self) {
|
51
|
-
// If the fiber scheduling is done across threads, we need to make sure the
|
52
|
-
// target thread is woken up in case it is in the middle of running its
|
53
|
-
// event selector. Otherwise it's gonna be stuck waiting for an event to
|
54
|
-
// happen, not knowing that it there's already a fiber ready to run in its
|
55
|
-
// run queue.
|
56
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
57
|
-
Backend_wakeup(backend);
|
58
|
-
}
|
59
|
-
}
|
60
|
-
}
|
61
|
-
|
62
|
-
VALUE Thread_fiber_scheduling_index(VALUE self, VALUE fiber) {
|
63
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
64
|
-
|
65
|
-
return INT2NUM(Runqueue_index_of(runqueue, fiber));
|
28
|
+
inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
29
|
+
Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
|
66
30
|
}
|
67
31
|
|
68
32
|
VALUE Thread_fiber_unschedule(VALUE self, VALUE fiber) {
|
69
|
-
|
70
|
-
Runqueue_delete(runqueue, fiber);
|
33
|
+
Backend_unschedule_fiber(rb_ivar_get(self, ID_ivar_backend), fiber);
|
71
34
|
return self;
|
72
35
|
}
|
73
36
|
|
@@ -82,65 +45,15 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
|
|
82
45
|
}
|
83
46
|
|
84
47
|
VALUE Thread_switch_fiber(VALUE self) {
|
85
|
-
|
86
|
-
VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
|
87
|
-
runqueue_entry next;
|
88
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
89
|
-
unsigned int pending_ops_count = Backend_pending_count(backend);
|
90
|
-
unsigned int backend_was_polled = 0;
|
91
|
-
unsigned int idle_tasks_run_count = 0;
|
92
|
-
|
93
|
-
if (__tracing_enabled__ && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
|
94
|
-
TRACE(2, SYM_fiber_switchpoint, current_fiber);
|
95
|
-
|
96
|
-
while (1) {
|
97
|
-
next = Runqueue_shift(runqueue);
|
98
|
-
if (next.fiber != Qnil) {
|
99
|
-
// Polling for I/O op completion is normally done when the run queue is
|
100
|
-
// empty, but if the runqueue never empties, we'll never get to process
|
101
|
-
// any event completions. In order to prevent this, an anti-starve
|
102
|
-
// mechanism is employed, under the following conditions:
|
103
|
-
// - a blocking poll was not yet performed
|
104
|
-
// - there are pending blocking operations
|
105
|
-
// - the runqueue has signalled that a non-blocking poll should be
|
106
|
-
// performed
|
107
|
-
// - the run queue length high watermark has reached its threshold (currently 128)
|
108
|
-
// - the run queue switch counter has reached its threshold (currently 64)
|
109
|
-
if (!backend_was_polled && pending_ops_count && Runqueue_should_poll_nonblocking(runqueue)) {
|
110
|
-
// this prevents event starvation in case the run queue never empties
|
111
|
-
Backend_poll(backend, Qtrue, current_fiber, runqueue);
|
112
|
-
}
|
113
|
-
break;
|
114
|
-
}
|
115
|
-
|
116
|
-
if (!idle_tasks_run_count) {
|
117
|
-
idle_tasks_run_count++;
|
118
|
-
Backend_run_idle_tasks(backend);
|
119
|
-
}
|
120
|
-
if (pending_ops_count == 0) break;
|
121
|
-
Backend_poll(backend, Qnil, current_fiber, runqueue);
|
122
|
-
backend_was_polled = 1;
|
123
|
-
}
|
124
|
-
|
125
|
-
if (next.fiber == Qnil) return Qnil;
|
126
|
-
|
127
|
-
// run next fiber
|
128
|
-
COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
|
129
|
-
|
130
|
-
rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
|
131
|
-
RB_GC_GUARD(next.fiber);
|
132
|
-
RB_GC_GUARD(next.value);
|
133
|
-
return (next.fiber == current_fiber) ?
|
134
|
-
next.value : FIBER_TRANSFER(next.fiber, next.value);
|
48
|
+
return Backend_switch_fiber(rb_ivar_get(self, ID_ivar_backend));
|
135
49
|
}
|
136
50
|
|
137
51
|
VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
|
138
|
-
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
139
52
|
if (fiber != Qnil) {
|
140
53
|
Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
|
141
54
|
}
|
142
55
|
|
143
|
-
if (Backend_wakeup(
|
56
|
+
if (Backend_wakeup(rb_ivar_get(self, ID_ivar_backend)) == Qnil) {
|
144
57
|
// we're not inside the ev_loop, so we just do a switchpoint
|
145
58
|
Thread_switch_fiber(self);
|
146
59
|
}
|
@@ -166,7 +79,6 @@ void Init_Thread() {
|
|
166
79
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
167
80
|
Thread_schedule_fiber_with_priority, 2);
|
168
81
|
rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
|
169
|
-
rb_define_method(rb_cThread, "fiber_scheduling_index", Thread_fiber_scheduling_index, 1);
|
170
82
|
rb_define_method(rb_cThread, "fiber_unschedule", Thread_fiber_unschedule, 1);
|
171
83
|
|
172
84
|
rb_define_singleton_method(rb_cThread, "backend", Thread_class_backend, 0);
|
@@ -178,7 +90,6 @@ void Init_Thread() {
|
|
178
90
|
ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
|
179
91
|
ID_ivar_main_fiber = rb_intern("@main_fiber");
|
180
92
|
ID_ivar_terminated = rb_intern("@terminated");
|
181
|
-
ID_ivar_runqueue = rb_intern("@runqueue");
|
182
93
|
ID_stop = rb_intern("stop");
|
183
94
|
|
184
95
|
SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
|