polyphony 0.43.2 → 0.43.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -2
- data/CHANGELOG.md +43 -0
- data/Gemfile.lock +2 -2
- data/README.md +2 -0
- data/TODO.md +2 -3
- data/docs/_includes/head.html +40 -0
- data/docs/_includes/title.html +1 -0
- data/docs/_user-guide/web-server.md +11 -11
- data/docs/getting-started/overview.md +4 -4
- data/docs/index.md +4 -3
- data/docs/main-concepts/design-principles.md +23 -34
- data/docs/main-concepts/fiber-scheduling.md +1 -1
- data/docs/polyphony-logo.png +0 -0
- data/examples/adapters/concurrent-ruby.rb +9 -0
- data/examples/core/xx-daemon.rb +14 -0
- data/examples/io/xx-happy-eyeballs.rb +21 -22
- data/examples/io/xx-zip.rb +19 -0
- data/examples/performance/fiber_transfer.rb +47 -0
- data/examples/performance/mem-usage.rb +34 -28
- data/examples/performance/messaging.rb +29 -0
- data/examples/performance/multi_snooze.rb +11 -9
- data/examples/xx-spin.rb +32 -0
- data/ext/polyphony/libev_agent.c +181 -24
- data/ext/polyphony/polyphony.c +0 -2
- data/ext/polyphony/polyphony.h +14 -7
- data/ext/polyphony/polyphony_ext.c +2 -2
- data/ext/polyphony/queue.c +168 -0
- data/ext/polyphony/ring_buffer.c +96 -0
- data/ext/polyphony/ring_buffer.h +28 -0
- data/ext/polyphony/thread.c +16 -8
- data/lib/polyphony.rb +28 -12
- data/lib/polyphony/core/global_api.rb +5 -3
- data/lib/polyphony/core/resource_pool.rb +19 -9
- data/lib/polyphony/core/thread_pool.rb +1 -1
- data/lib/polyphony/event.rb +5 -15
- data/lib/polyphony/extensions/core.rb +40 -0
- data/lib/polyphony/extensions/fiber.rb +9 -14
- data/lib/polyphony/extensions/io.rb +17 -16
- data/lib/polyphony/extensions/openssl.rb +8 -0
- data/lib/polyphony/extensions/socket.rb +12 -0
- data/lib/polyphony/version.rb +1 -1
- data/test/helper.rb +1 -1
- data/test/q.rb +24 -0
- data/test/test_agent.rb +3 -3
- data/test/test_event.rb +11 -0
- data/test/test_fiber.rb +3 -3
- data/test/test_global_api.rb +48 -15
- data/test/test_io.rb +24 -2
- data/test/test_queue.rb +39 -1
- data/test/test_resource_pool.rb +12 -0
- data/test/test_throttler.rb +6 -5
- data/test/test_trace.rb +18 -17
- metadata +15 -4
- data/ext/polyphony/libev_queue.c +0 -217
data/ext/polyphony/polyphony.c
CHANGED
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
VALUE mPolyphony;
|
4
4
|
|
5
|
-
ID ID_await_no_raise;
|
6
5
|
ID ID_call;
|
7
6
|
ID ID_caller;
|
8
7
|
ID ID_clear;
|
@@ -54,7 +53,6 @@ void Init_Polyphony() {
|
|
54
53
|
rb_define_global_function("snooze", Polyphony_snooze, 0);
|
55
54
|
rb_define_global_function("suspend", Polyphony_suspend, 0);
|
56
55
|
|
57
|
-
ID_await_no_raise = rb_intern("await_no_raise");
|
58
56
|
ID_call = rb_intern("call");
|
59
57
|
ID_caller = rb_intern("caller");
|
60
58
|
ID_clear = rb_intern("clear");
|
data/ext/polyphony/polyphony.h
CHANGED
@@ -1,5 +1,5 @@
|
|
1
|
-
#ifndef
|
2
|
-
#define
|
1
|
+
#ifndef POLYPHONY_H
|
2
|
+
#define POLYPHONY_H
|
3
3
|
|
4
4
|
#include "ruby.h"
|
5
5
|
#include "ruby/io.h"
|
@@ -19,10 +19,9 @@
|
|
19
19
|
}
|
20
20
|
|
21
21
|
extern VALUE mPolyphony;
|
22
|
-
extern VALUE
|
22
|
+
extern VALUE cQueue;
|
23
23
|
extern VALUE cEvent;
|
24
24
|
|
25
|
-
extern ID ID_await_no_raise;
|
26
25
|
extern ID ID_call;
|
27
26
|
extern ID ID_caller;
|
28
27
|
extern ID ID_clear;
|
@@ -75,10 +74,18 @@ VALUE LibevAgent_ref(VALUE self);
|
|
75
74
|
VALUE LibevAgent_unref(VALUE self);
|
76
75
|
int LibevAgent_ref_count(VALUE self);
|
77
76
|
void LibevAgent_reset_ref_count(VALUE self);
|
77
|
+
VALUE LibevAgent_wait_event(VALUE self, VALUE raise);
|
78
78
|
|
79
|
-
VALUE
|
79
|
+
VALUE Queue_push(VALUE self, VALUE value);
|
80
|
+
VALUE Queue_unshift(VALUE self, VALUE value);
|
81
|
+
VALUE Queue_shift(VALUE self);
|
82
|
+
VALUE Queue_shift_no_wait(VALUE self);
|
83
|
+
VALUE Queue_clear(VALUE self);
|
84
|
+
VALUE Queue_delete(VALUE self, VALUE value);
|
85
|
+
long Queue_len(VALUE self);
|
86
|
+
void Queue_trace(VALUE self);
|
80
87
|
|
81
|
-
VALUE
|
88
|
+
VALUE Polyphony_snooze(VALUE self);
|
82
89
|
|
83
90
|
VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
|
84
91
|
VALUE Thread_switch_fiber(VALUE thread);
|
@@ -87,4 +94,4 @@ int io_setstrbuf(VALUE *str, long len);
|
|
87
94
|
void io_set_read_length(VALUE str, long n, int shrinkable);
|
88
95
|
VALUE io_enc_str(VALUE str, rb_io_t *fptr);
|
89
96
|
|
90
|
-
#endif /*
|
97
|
+
#endif /* POLYPHONY_H */
|
@@ -3,7 +3,7 @@
|
|
3
3
|
void Init_Fiber();
|
4
4
|
void Init_Polyphony();
|
5
5
|
void Init_LibevAgent();
|
6
|
-
void
|
6
|
+
void Init_Queue();
|
7
7
|
void Init_Thread();
|
8
8
|
void Init_Tracing();
|
9
9
|
|
@@ -12,7 +12,7 @@ void Init_polyphony_ext() {
|
|
12
12
|
|
13
13
|
Init_Polyphony();
|
14
14
|
Init_LibevAgent();
|
15
|
-
|
15
|
+
Init_Queue();
|
16
16
|
|
17
17
|
Init_Fiber();
|
18
18
|
Init_Thread();
|
@@ -0,0 +1,168 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "ring_buffer.h"
|
3
|
+
|
4
|
+
typedef struct queue {
|
5
|
+
ring_buffer values;
|
6
|
+
ring_buffer shift_queue;
|
7
|
+
} Queue_t;
|
8
|
+
|
9
|
+
VALUE cQueue = Qnil;
|
10
|
+
|
11
|
+
static void Queue_mark(void *ptr) {
|
12
|
+
Queue_t *queue = ptr;
|
13
|
+
ring_buffer_mark(&queue->values);
|
14
|
+
ring_buffer_mark(&queue->shift_queue);
|
15
|
+
}
|
16
|
+
|
17
|
+
static void Queue_free(void *ptr) {
|
18
|
+
Queue_t *queue = ptr;
|
19
|
+
ring_buffer_free(&queue->values);
|
20
|
+
ring_buffer_free(&queue->shift_queue);
|
21
|
+
xfree(ptr);
|
22
|
+
}
|
23
|
+
|
24
|
+
static size_t Queue_size(const void *ptr) {
|
25
|
+
return sizeof(Queue_t);
|
26
|
+
}
|
27
|
+
|
28
|
+
static const rb_data_type_t Queue_type = {
|
29
|
+
"Queue",
|
30
|
+
{Queue_mark, Queue_free, Queue_size,},
|
31
|
+
0, 0, 0
|
32
|
+
};
|
33
|
+
|
34
|
+
static VALUE Queue_allocate(VALUE klass) {
|
35
|
+
Queue_t *queue;
|
36
|
+
|
37
|
+
queue = ALLOC(Queue_t);
|
38
|
+
return TypedData_Wrap_Struct(klass, &Queue_type, queue);
|
39
|
+
}
|
40
|
+
|
41
|
+
#define GetQueue(obj, queue) \
|
42
|
+
TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
|
43
|
+
|
44
|
+
static VALUE Queue_initialize(VALUE self) {
|
45
|
+
Queue_t *queue;
|
46
|
+
GetQueue(self, queue);
|
47
|
+
|
48
|
+
ring_buffer_init(&queue->values);
|
49
|
+
ring_buffer_init(&queue->shift_queue);
|
50
|
+
|
51
|
+
return self;
|
52
|
+
}
|
53
|
+
|
54
|
+
VALUE Queue_push(VALUE self, VALUE value) {
|
55
|
+
Queue_t *queue;
|
56
|
+
GetQueue(self, queue);
|
57
|
+
if (queue->shift_queue.count > 0) {
|
58
|
+
VALUE fiber = ring_buffer_shift(&queue->shift_queue);
|
59
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
60
|
+
}
|
61
|
+
ring_buffer_push(&queue->values, value);
|
62
|
+
return self;
|
63
|
+
}
|
64
|
+
|
65
|
+
VALUE Queue_unshift(VALUE self, VALUE value) {
|
66
|
+
Queue_t *queue;
|
67
|
+
GetQueue(self, queue);
|
68
|
+
if (queue->shift_queue.count > 0) {
|
69
|
+
VALUE fiber = ring_buffer_shift(&queue->shift_queue);
|
70
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
71
|
+
}
|
72
|
+
ring_buffer_unshift(&queue->values, value);
|
73
|
+
return self;
|
74
|
+
}
|
75
|
+
|
76
|
+
VALUE Queue_shift(VALUE self) {
|
77
|
+
Queue_t *queue;
|
78
|
+
GetQueue(self, queue);
|
79
|
+
|
80
|
+
if (queue->values.count == 0) {
|
81
|
+
VALUE agent = rb_ivar_get(rb_thread_current(), ID_ivar_agent);
|
82
|
+
VALUE fiber = rb_fiber_current();
|
83
|
+
VALUE switchpoint_result = Qnil;
|
84
|
+
ring_buffer_push(&queue->shift_queue, fiber);
|
85
|
+
switchpoint_result = LibevAgent_wait_event(agent, Qnil);
|
86
|
+
if (RTEST(rb_obj_is_kind_of(switchpoint_result, rb_eException))) {
|
87
|
+
ring_buffer_delete(&queue->shift_queue, fiber);
|
88
|
+
return rb_funcall(rb_mKernel, ID_raise, 1, switchpoint_result);
|
89
|
+
}
|
90
|
+
RB_GC_GUARD(agent);
|
91
|
+
RB_GC_GUARD(switchpoint_result);
|
92
|
+
}
|
93
|
+
|
94
|
+
return ring_buffer_shift(&queue->values);
|
95
|
+
}
|
96
|
+
|
97
|
+
VALUE Queue_shift_no_wait(VALUE self) {
|
98
|
+
Queue_t *queue;
|
99
|
+
GetQueue(self, queue);
|
100
|
+
|
101
|
+
return ring_buffer_shift(&queue->values);
|
102
|
+
}
|
103
|
+
|
104
|
+
VALUE Queue_delete(VALUE self, VALUE value) {
|
105
|
+
Queue_t *queue;
|
106
|
+
GetQueue(self, queue);
|
107
|
+
|
108
|
+
ring_buffer_delete(&queue->values, value);
|
109
|
+
return self;
|
110
|
+
}
|
111
|
+
|
112
|
+
VALUE Queue_clear(VALUE self) {
|
113
|
+
Queue_t *queue;
|
114
|
+
GetQueue(self, queue);
|
115
|
+
|
116
|
+
ring_buffer_clear(&queue->values);
|
117
|
+
return self;
|
118
|
+
}
|
119
|
+
|
120
|
+
long Queue_len(VALUE self) {
|
121
|
+
Queue_t *queue;
|
122
|
+
GetQueue(self, queue);
|
123
|
+
|
124
|
+
return queue->values.count;
|
125
|
+
}
|
126
|
+
|
127
|
+
VALUE Queue_shift_each(VALUE self) {
|
128
|
+
Queue_t *queue;
|
129
|
+
GetQueue(self, queue);
|
130
|
+
|
131
|
+
ring_buffer_shift_each(&queue->values);
|
132
|
+
return self;
|
133
|
+
}
|
134
|
+
|
135
|
+
VALUE Queue_shift_all(VALUE self) {
|
136
|
+
Queue_t *queue;
|
137
|
+
GetQueue(self, queue);
|
138
|
+
|
139
|
+
return ring_buffer_shift_all(&queue->values);
|
140
|
+
}
|
141
|
+
|
142
|
+
VALUE Queue_empty_p(VALUE self) {
|
143
|
+
Queue_t *queue;
|
144
|
+
GetQueue(self, queue);
|
145
|
+
|
146
|
+
return (queue->values.count == 0) ? Qtrue : Qfalse;
|
147
|
+
}
|
148
|
+
|
149
|
+
void Init_Queue() {
|
150
|
+
cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
|
151
|
+
rb_define_alloc_func(cQueue, Queue_allocate);
|
152
|
+
|
153
|
+
rb_define_method(cQueue, "initialize", Queue_initialize, 0);
|
154
|
+
rb_define_method(cQueue, "push", Queue_push, 1);
|
155
|
+
rb_define_method(cQueue, "<<", Queue_push, 1);
|
156
|
+
rb_define_method(cQueue, "unshift", Queue_unshift, 1);
|
157
|
+
|
158
|
+
rb_define_method(cQueue, "shift", Queue_shift, 0);
|
159
|
+
rb_define_method(cQueue, "pop", Queue_shift, 0);
|
160
|
+
rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
|
161
|
+
rb_define_method(cQueue, "delete", Queue_delete, 1);
|
162
|
+
|
163
|
+
rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
|
164
|
+
rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
|
165
|
+
rb_define_method(cQueue, "empty?", Queue_empty_p, 0);
|
166
|
+
}
|
167
|
+
|
168
|
+
|
@@ -0,0 +1,96 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "ring_buffer.h"
|
3
|
+
|
4
|
+
void ring_buffer_init(ring_buffer *buffer) {
|
5
|
+
buffer->size = 1;
|
6
|
+
buffer->count = 0;
|
7
|
+
buffer->entries = malloc(buffer->size * sizeof(VALUE));
|
8
|
+
buffer->head = 0;
|
9
|
+
buffer->tail = 0;
|
10
|
+
}
|
11
|
+
|
12
|
+
void ring_buffer_free(ring_buffer *buffer) {
|
13
|
+
free(buffer->entries);
|
14
|
+
}
|
15
|
+
|
16
|
+
int ring_buffer_empty_p(ring_buffer *buffer) {
|
17
|
+
return buffer->count == 0;
|
18
|
+
}
|
19
|
+
|
20
|
+
VALUE ring_buffer_shift(ring_buffer *buffer) {
|
21
|
+
VALUE value;
|
22
|
+
if (buffer->count == 0) return Qnil;
|
23
|
+
|
24
|
+
value = buffer->entries[buffer->head];
|
25
|
+
buffer->head = (buffer->head + 1) % buffer->size;
|
26
|
+
buffer->count--;
|
27
|
+
// INSPECT(value);
|
28
|
+
return value;
|
29
|
+
}
|
30
|
+
|
31
|
+
void ring_buffer_resize(ring_buffer *buffer) {
|
32
|
+
unsigned int old_size = buffer->size;
|
33
|
+
buffer->size = old_size == 1 ? 4 : old_size * 2;
|
34
|
+
buffer->entries = realloc(buffer->entries, buffer->size * sizeof(VALUE));
|
35
|
+
for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
|
36
|
+
buffer->entries[old_size + idx] = buffer->entries[idx];
|
37
|
+
buffer->tail = buffer->head + buffer->count;
|
38
|
+
}
|
39
|
+
|
40
|
+
void ring_buffer_unshift(ring_buffer *buffer, VALUE value) {
|
41
|
+
if (buffer->count == buffer->size) ring_buffer_resize(buffer);
|
42
|
+
|
43
|
+
buffer->head = (buffer->head - 1) % buffer->size;
|
44
|
+
buffer->entries[buffer->head] = value;
|
45
|
+
buffer->count++;
|
46
|
+
}
|
47
|
+
|
48
|
+
void ring_buffer_push(ring_buffer *buffer, VALUE value) {
|
49
|
+
if (buffer->count == buffer->size) ring_buffer_resize(buffer);
|
50
|
+
|
51
|
+
buffer->entries[buffer->tail] = value;
|
52
|
+
buffer->tail = (buffer->tail + 1) % buffer->size;
|
53
|
+
buffer->count++;
|
54
|
+
}
|
55
|
+
|
56
|
+
void ring_buffer_mark(ring_buffer *buffer) {
|
57
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
58
|
+
rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size]);
|
59
|
+
}
|
60
|
+
|
61
|
+
void ring_buffer_shift_each(ring_buffer *buffer) {
|
62
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
63
|
+
rb_yield(buffer->entries[(buffer->head + i) % buffer->size]);
|
64
|
+
|
65
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
66
|
+
}
|
67
|
+
|
68
|
+
VALUE ring_buffer_shift_all(ring_buffer *buffer) {
|
69
|
+
VALUE array = rb_ary_new_capa(buffer->count);
|
70
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
71
|
+
rb_ary_push(array, buffer->entries[(buffer->head + i) % buffer->size]);
|
72
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
73
|
+
return array;
|
74
|
+
}
|
75
|
+
|
76
|
+
void ring_buffer_delete_at(ring_buffer *buffer, unsigned int idx) {
|
77
|
+
for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
|
78
|
+
buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
|
79
|
+
}
|
80
|
+
buffer->count--;
|
81
|
+
buffer->tail = (buffer->tail - 1) % buffer->size;
|
82
|
+
}
|
83
|
+
|
84
|
+
void ring_buffer_delete(ring_buffer *buffer, VALUE value) {
|
85
|
+
for (unsigned int i = 0; i < buffer->count; i++) {
|
86
|
+
unsigned int idx = (buffer->head + i) % buffer->size;
|
87
|
+
if (buffer->entries[idx] == value) {
|
88
|
+
ring_buffer_delete_at(buffer, idx);
|
89
|
+
return;
|
90
|
+
}
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
void ring_buffer_clear(ring_buffer *buffer) {
|
95
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
96
|
+
}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
#ifndef RING_BUFFER_H
|
2
|
+
#define RING_BUFFER_H
|
3
|
+
|
4
|
+
#include "ruby.h"
|
5
|
+
|
6
|
+
typedef struct ring_buffer {
|
7
|
+
VALUE *entries;
|
8
|
+
unsigned int size;
|
9
|
+
unsigned int count;
|
10
|
+
unsigned int head;
|
11
|
+
unsigned int tail;
|
12
|
+
} ring_buffer;
|
13
|
+
|
14
|
+
void ring_buffer_init(ring_buffer *buffer);
|
15
|
+
void ring_buffer_free(ring_buffer *buffer);
|
16
|
+
void ring_buffer_mark(ring_buffer *buffer);
|
17
|
+
int ring_buffer_empty_p(ring_buffer *buffer);
|
18
|
+
void ring_buffer_clear(ring_buffer *buffer);
|
19
|
+
|
20
|
+
VALUE ring_buffer_shift(ring_buffer *buffer);
|
21
|
+
void ring_buffer_unshift(ring_buffer *buffer, VALUE value);
|
22
|
+
void ring_buffer_push(ring_buffer *buffer, VALUE value);
|
23
|
+
|
24
|
+
void ring_buffer_shift_each(ring_buffer *buffer);
|
25
|
+
VALUE ring_buffer_shift_all(ring_buffer *buffer);
|
26
|
+
void ring_buffer_delete(ring_buffer *buffer, VALUE value);
|
27
|
+
|
28
|
+
#endif /* RING_BUFFER_H */
|
data/ext/polyphony/thread.c
CHANGED
@@ -11,10 +11,9 @@ ID ID_runnable_next;
|
|
11
11
|
ID ID_stop;
|
12
12
|
|
13
13
|
static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
14
|
-
VALUE queue;
|
14
|
+
VALUE queue = rb_funcall(cQueue, ID_new, 0);
|
15
15
|
|
16
16
|
rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
|
17
|
-
queue = rb_ary_new();
|
18
17
|
rb_ivar_set(self, ID_run_queue, queue);
|
19
18
|
|
20
19
|
return self;
|
@@ -61,7 +60,7 @@ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
|
|
61
60
|
}
|
62
61
|
|
63
62
|
queue = rb_ivar_get(self, ID_run_queue);
|
64
|
-
|
63
|
+
Queue_push(queue, fiber);
|
65
64
|
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
66
65
|
|
67
66
|
if (rb_thread_current() != self) {
|
@@ -88,13 +87,13 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
|
|
88
87
|
|
89
88
|
// if fiber is already scheduled, remove it from the run queue
|
90
89
|
if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
|
91
|
-
|
90
|
+
Queue_delete(queue, fiber);
|
92
91
|
} else {
|
93
92
|
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
94
93
|
}
|
95
94
|
|
96
95
|
// the fiber is given priority by putting it at the front of the run queue
|
97
|
-
|
96
|
+
Queue_unshift(queue, fiber);
|
98
97
|
|
99
98
|
if (rb_thread_current() != self) {
|
100
99
|
// if the fiber scheduling is done across threads, we need to make sure the
|
@@ -115,6 +114,7 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
115
114
|
VALUE value;
|
116
115
|
VALUE agent = rb_ivar_get(self, ID_ivar_agent);
|
117
116
|
int ref_count;
|
117
|
+
int agent_was_polled = 0;1;
|
118
118
|
|
119
119
|
if (__tracing_enabled__) {
|
120
120
|
if (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse) {
|
@@ -124,9 +124,9 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
124
124
|
|
125
125
|
ref_count = LibevAgent_ref_count(agent);
|
126
126
|
while (1) {
|
127
|
-
next_fiber =
|
127
|
+
next_fiber = Queue_shift_no_wait(queue);
|
128
128
|
if (next_fiber != Qnil) {
|
129
|
-
if (ref_count > 0) {
|
129
|
+
if (agent_was_polled == 0 && ref_count > 0) {
|
130
130
|
// this mechanism prevents event starvation in case the run queue never
|
131
131
|
// empties
|
132
132
|
LibevAgent_poll(agent, Qtrue, current_fiber, queue);
|
@@ -136,6 +136,7 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
136
136
|
if (ref_count == 0) break;
|
137
137
|
|
138
138
|
LibevAgent_poll(agent, Qnil, current_fiber, queue);
|
139
|
+
agent_was_polled = 1;
|
139
140
|
}
|
140
141
|
|
141
142
|
if (next_fiber == Qnil) return Qnil;
|
@@ -151,9 +152,15 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
151
152
|
value : rb_funcall(next_fiber, ID_transfer, 1, value);
|
152
153
|
}
|
153
154
|
|
155
|
+
VALUE Thread_run_queue_trace(VALUE self) {
|
156
|
+
VALUE queue = rb_ivar_get(self, ID_run_queue);
|
157
|
+
Queue_trace(queue);
|
158
|
+
return self;
|
159
|
+
}
|
160
|
+
|
154
161
|
VALUE Thread_reset_fiber_scheduling(VALUE self) {
|
155
162
|
VALUE queue = rb_ivar_get(self, ID_run_queue);
|
156
|
-
|
163
|
+
Queue_clear(queue);
|
157
164
|
Thread_fiber_reset_ref_count(self);
|
158
165
|
return self;
|
159
166
|
}
|
@@ -182,6 +189,7 @@ void Init_Thread() {
|
|
182
189
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
183
190
|
Thread_schedule_fiber_with_priority, 2);
|
184
191
|
rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
|
192
|
+
rb_define_method(rb_cThread, "run_queue_trace", Thread_run_queue_trace, 0);
|
185
193
|
|
186
194
|
ID_deactivate_all_watchers_post_fork = rb_intern("deactivate_all_watchers_post_fork");
|
187
195
|
ID_ivar_agent = rb_intern("@agent");
|