polyphony 0.43.4 → 0.43.10
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +1 -1
- data/CHANGELOG.md +45 -0
- data/Gemfile.lock +1 -1
- data/README.md +21 -4
- data/TODO.md +1 -6
- data/bin/stress.rb +28 -0
- data/docs/_includes/head.html +40 -0
- data/docs/_includes/title.html +1 -0
- data/docs/_user-guide/web-server.md +11 -11
- data/docs/getting-started/overview.md +2 -2
- data/docs/index.md +4 -3
- data/docs/main-concepts/design-principles.md +23 -34
- data/docs/main-concepts/fiber-scheduling.md +1 -1
- data/docs/polyphony-logo.png +0 -0
- data/examples/core/xx-channels.rb +4 -2
- data/examples/core/xx-using-a-mutex.rb +2 -1
- data/examples/io/xx-happy-eyeballs.rb +21 -22
- data/examples/io/xx-zip.rb +19 -0
- data/examples/performance/fiber_transfer.rb +47 -0
- data/examples/performance/messaging.rb +29 -0
- data/examples/performance/multi_snooze.rb +11 -9
- data/examples/xx-spin.rb +32 -0
- data/ext/polyphony/agent.h +39 -0
- data/ext/polyphony/event.c +86 -0
- data/ext/polyphony/fiber.c +0 -5
- data/ext/polyphony/libev_agent.c +231 -79
- data/ext/polyphony/polyphony.c +2 -2
- data/ext/polyphony/polyphony.h +19 -16
- data/ext/polyphony/polyphony_ext.c +4 -2
- data/ext/polyphony/queue.c +194 -0
- data/ext/polyphony/ring_buffer.c +96 -0
- data/ext/polyphony/ring_buffer.h +28 -0
- data/ext/polyphony/thread.c +48 -31
- data/lib/polyphony.rb +5 -6
- data/lib/polyphony/core/channel.rb +3 -34
- data/lib/polyphony/core/resource_pool.rb +13 -75
- data/lib/polyphony/core/sync.rb +12 -9
- data/lib/polyphony/core/thread_pool.rb +1 -1
- data/lib/polyphony/extensions/core.rb +9 -0
- data/lib/polyphony/extensions/fiber.rb +9 -2
- data/lib/polyphony/extensions/io.rb +16 -15
- data/lib/polyphony/extensions/openssl.rb +8 -0
- data/lib/polyphony/extensions/socket.rb +13 -9
- data/lib/polyphony/extensions/thread.rb +1 -1
- data/lib/polyphony/version.rb +1 -1
- data/test/helper.rb +2 -2
- data/test/q.rb +24 -0
- data/test/test_agent.rb +2 -2
- data/test/test_event.rb +12 -0
- data/test/test_global_api.rb +2 -2
- data/test/test_io.rb +24 -2
- data/test/test_queue.rb +59 -1
- data/test/test_resource_pool.rb +0 -43
- data/test/test_trace.rb +18 -17
- metadata +16 -5
- data/ext/polyphony/libev_queue.c +0 -217
- data/lib/polyphony/event.rb +0 -27
data/ext/polyphony/polyphony.c
CHANGED
@@ -2,7 +2,6 @@
|
|
2
2
|
|
3
3
|
VALUE mPolyphony;
|
4
4
|
|
5
|
-
ID ID_await_no_raise;
|
6
5
|
ID ID_call;
|
7
6
|
ID ID_caller;
|
8
7
|
ID ID_clear;
|
@@ -22,6 +21,8 @@ ID ID_R;
|
|
22
21
|
ID ID_W;
|
23
22
|
ID ID_RW;
|
24
23
|
|
24
|
+
agent_interface_t agent_interface;
|
25
|
+
|
25
26
|
VALUE Polyphony_snooze(VALUE self) {
|
26
27
|
VALUE ret;
|
27
28
|
VALUE fiber = rb_fiber_current();
|
@@ -54,7 +55,6 @@ void Init_Polyphony() {
|
|
54
55
|
rb_define_global_function("snooze", Polyphony_snooze, 0);
|
55
56
|
rb_define_global_function("suspend", Polyphony_suspend, 0);
|
56
57
|
|
57
|
-
ID_await_no_raise = rb_intern("await_no_raise");
|
58
58
|
ID_call = rb_intern("call");
|
59
59
|
ID_caller = rb_intern("caller");
|
60
60
|
ID_clear = rb_intern("clear");
|
data/ext/polyphony/polyphony.h
CHANGED
@@ -1,9 +1,10 @@
|
|
1
|
-
#ifndef
|
2
|
-
#define
|
1
|
+
#ifndef POLYPHONY_H
|
2
|
+
#define POLYPHONY_H
|
3
3
|
|
4
4
|
#include "ruby.h"
|
5
5
|
#include "ruby/io.h"
|
6
6
|
#include "libev.h"
|
7
|
+
#include "agent.h"
|
7
8
|
|
8
9
|
// debugging
|
9
10
|
#define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
|
@@ -18,11 +19,16 @@
|
|
18
19
|
return rb_funcall(rb_mKernel, ID_raise, 1, ret); \
|
19
20
|
}
|
20
21
|
|
22
|
+
extern agent_interface_t agent_interface;
|
23
|
+
// #define __AGENT_PASTER__(call) (agent_interface ## . ## call)
|
24
|
+
// #define __AGENT__(call) __AGENT_PASTER__(call)
|
25
|
+
#define __AGENT__ (agent_interface)
|
26
|
+
|
27
|
+
|
21
28
|
extern VALUE mPolyphony;
|
22
|
-
extern VALUE
|
29
|
+
extern VALUE cQueue;
|
23
30
|
extern VALUE cEvent;
|
24
31
|
|
25
|
-
extern ID ID_await_no_raise;
|
26
32
|
extern ID ID_call;
|
27
33
|
extern ID ID_caller;
|
28
34
|
extern ID ID_clear;
|
@@ -66,20 +72,17 @@ enum {
|
|
66
72
|
VALUE Fiber_auto_watcher(VALUE self);
|
67
73
|
void Fiber_make_runnable(VALUE fiber, VALUE value);
|
68
74
|
|
69
|
-
VALUE
|
70
|
-
VALUE
|
71
|
-
VALUE
|
72
|
-
VALUE
|
73
|
-
|
74
|
-
VALUE
|
75
|
-
|
76
|
-
|
77
|
-
void LibevAgent_reset_ref_count(VALUE self);
|
75
|
+
VALUE Queue_push(VALUE self, VALUE value);
|
76
|
+
VALUE Queue_unshift(VALUE self, VALUE value);
|
77
|
+
VALUE Queue_shift(VALUE self);
|
78
|
+
VALUE Queue_shift_no_wait(VALUE self);
|
79
|
+
VALUE Queue_clear(VALUE self);
|
80
|
+
VALUE Queue_delete(VALUE self, VALUE value);
|
81
|
+
long Queue_len(VALUE self);
|
82
|
+
void Queue_trace(VALUE self);
|
78
83
|
|
79
84
|
VALUE Polyphony_snooze(VALUE self);
|
80
85
|
|
81
|
-
VALUE Polyphony_Queue_push(VALUE self, VALUE value);
|
82
|
-
|
83
86
|
VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
|
84
87
|
VALUE Thread_switch_fiber(VALUE thread);
|
85
88
|
|
@@ -87,4 +90,4 @@ int io_setstrbuf(VALUE *str, long len);
|
|
87
90
|
void io_set_read_length(VALUE str, long n, int shrinkable);
|
88
91
|
VALUE io_enc_str(VALUE str, rb_io_t *fptr);
|
89
92
|
|
90
|
-
#endif /*
|
93
|
+
#endif /* POLYPHONY_H */
|
@@ -3,7 +3,8 @@
|
|
3
3
|
void Init_Fiber();
|
4
4
|
void Init_Polyphony();
|
5
5
|
void Init_LibevAgent();
|
6
|
-
void
|
6
|
+
void Init_Queue();
|
7
|
+
void Init_Event();
|
7
8
|
void Init_Thread();
|
8
9
|
void Init_Tracing();
|
9
10
|
|
@@ -12,7 +13,8 @@ void Init_polyphony_ext() {
|
|
12
13
|
|
13
14
|
Init_Polyphony();
|
14
15
|
Init_LibevAgent();
|
15
|
-
|
16
|
+
Init_Queue();
|
17
|
+
Init_Event();
|
16
18
|
|
17
19
|
Init_Fiber();
|
18
20
|
Init_Thread();
|
@@ -0,0 +1,194 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "ring_buffer.h"
|
3
|
+
|
4
|
+
typedef struct queue {
|
5
|
+
ring_buffer values;
|
6
|
+
ring_buffer shift_queue;
|
7
|
+
} Queue_t;
|
8
|
+
|
9
|
+
VALUE cQueue = Qnil;
|
10
|
+
|
11
|
+
static void Queue_mark(void *ptr) {
|
12
|
+
Queue_t *queue = ptr;
|
13
|
+
ring_buffer_mark(&queue->values);
|
14
|
+
ring_buffer_mark(&queue->shift_queue);
|
15
|
+
}
|
16
|
+
|
17
|
+
static void Queue_free(void *ptr) {
|
18
|
+
Queue_t *queue = ptr;
|
19
|
+
ring_buffer_free(&queue->values);
|
20
|
+
ring_buffer_free(&queue->shift_queue);
|
21
|
+
xfree(ptr);
|
22
|
+
}
|
23
|
+
|
24
|
+
static size_t Queue_size(const void *ptr) {
|
25
|
+
return sizeof(Queue_t);
|
26
|
+
}
|
27
|
+
|
28
|
+
static const rb_data_type_t Queue_type = {
|
29
|
+
"Queue",
|
30
|
+
{Queue_mark, Queue_free, Queue_size,},
|
31
|
+
0, 0, 0
|
32
|
+
};
|
33
|
+
|
34
|
+
static VALUE Queue_allocate(VALUE klass) {
|
35
|
+
Queue_t *queue;
|
36
|
+
|
37
|
+
queue = ALLOC(Queue_t);
|
38
|
+
return TypedData_Wrap_Struct(klass, &Queue_type, queue);
|
39
|
+
}
|
40
|
+
|
41
|
+
#define GetQueue(obj, queue) \
|
42
|
+
TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
|
43
|
+
|
44
|
+
static VALUE Queue_initialize(VALUE self) {
|
45
|
+
Queue_t *queue;
|
46
|
+
GetQueue(self, queue);
|
47
|
+
|
48
|
+
ring_buffer_init(&queue->values);
|
49
|
+
ring_buffer_init(&queue->shift_queue);
|
50
|
+
|
51
|
+
return self;
|
52
|
+
}
|
53
|
+
|
54
|
+
VALUE Queue_push(VALUE self, VALUE value) {
|
55
|
+
Queue_t *queue;
|
56
|
+
GetQueue(self, queue);
|
57
|
+
if (queue->shift_queue.count > 0) {
|
58
|
+
VALUE fiber = ring_buffer_shift(&queue->shift_queue);
|
59
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
60
|
+
}
|
61
|
+
ring_buffer_push(&queue->values, value);
|
62
|
+
return self;
|
63
|
+
}
|
64
|
+
|
65
|
+
VALUE Queue_unshift(VALUE self, VALUE value) {
|
66
|
+
Queue_t *queue;
|
67
|
+
GetQueue(self, queue);
|
68
|
+
if (queue->shift_queue.count > 0) {
|
69
|
+
VALUE fiber = ring_buffer_shift(&queue->shift_queue);
|
70
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
71
|
+
}
|
72
|
+
ring_buffer_unshift(&queue->values, value);
|
73
|
+
return self;
|
74
|
+
}
|
75
|
+
|
76
|
+
VALUE Queue_shift(VALUE self) {
|
77
|
+
Queue_t *queue;
|
78
|
+
GetQueue(self, queue);
|
79
|
+
|
80
|
+
if (queue->values.count == 0) {
|
81
|
+
VALUE agent = rb_ivar_get(rb_thread_current(), ID_ivar_agent);
|
82
|
+
VALUE fiber = rb_fiber_current();
|
83
|
+
VALUE switchpoint_result = Qnil;
|
84
|
+
ring_buffer_push(&queue->shift_queue, fiber);
|
85
|
+
switchpoint_result = __AGENT__.wait_event(agent, Qnil);
|
86
|
+
if (RTEST(rb_obj_is_kind_of(switchpoint_result, rb_eException))) {
|
87
|
+
ring_buffer_delete(&queue->shift_queue, fiber);
|
88
|
+
return rb_funcall(rb_mKernel, ID_raise, 1, switchpoint_result);
|
89
|
+
}
|
90
|
+
RB_GC_GUARD(agent);
|
91
|
+
RB_GC_GUARD(switchpoint_result);
|
92
|
+
}
|
93
|
+
|
94
|
+
return ring_buffer_shift(&queue->values);
|
95
|
+
}
|
96
|
+
|
97
|
+
VALUE Queue_shift_no_wait(VALUE self) {
|
98
|
+
Queue_t *queue;
|
99
|
+
GetQueue(self, queue);
|
100
|
+
|
101
|
+
return ring_buffer_shift(&queue->values);
|
102
|
+
}
|
103
|
+
|
104
|
+
VALUE Queue_delete(VALUE self, VALUE value) {
|
105
|
+
Queue_t *queue;
|
106
|
+
GetQueue(self, queue);
|
107
|
+
|
108
|
+
ring_buffer_delete(&queue->values, value);
|
109
|
+
return self;
|
110
|
+
}
|
111
|
+
|
112
|
+
VALUE Queue_clear(VALUE self) {
|
113
|
+
Queue_t *queue;
|
114
|
+
GetQueue(self, queue);
|
115
|
+
|
116
|
+
ring_buffer_clear(&queue->values);
|
117
|
+
return self;
|
118
|
+
}
|
119
|
+
|
120
|
+
long Queue_len(VALUE self) {
|
121
|
+
Queue_t *queue;
|
122
|
+
GetQueue(self, queue);
|
123
|
+
|
124
|
+
return queue->values.count;
|
125
|
+
}
|
126
|
+
|
127
|
+
VALUE Queue_shift_each(VALUE self) {
|
128
|
+
Queue_t *queue;
|
129
|
+
GetQueue(self, queue);
|
130
|
+
|
131
|
+
ring_buffer_shift_each(&queue->values);
|
132
|
+
return self;
|
133
|
+
}
|
134
|
+
|
135
|
+
VALUE Queue_shift_all(VALUE self) {
|
136
|
+
Queue_t *queue;
|
137
|
+
GetQueue(self, queue);
|
138
|
+
|
139
|
+
return ring_buffer_shift_all(&queue->values);
|
140
|
+
}
|
141
|
+
|
142
|
+
VALUE Queue_flush_waiters(VALUE self, VALUE value) {
|
143
|
+
Queue_t *queue;
|
144
|
+
GetQueue(self, queue);
|
145
|
+
|
146
|
+
while(1) {
|
147
|
+
VALUE fiber = ring_buffer_shift(&queue->shift_queue);
|
148
|
+
if (fiber == Qnil) return self;
|
149
|
+
|
150
|
+
Fiber_make_runnable(fiber, value);
|
151
|
+
}
|
152
|
+
}
|
153
|
+
|
154
|
+
VALUE Queue_empty_p(VALUE self) {
|
155
|
+
Queue_t *queue;
|
156
|
+
GetQueue(self, queue);
|
157
|
+
|
158
|
+
return (queue->values.count == 0) ? Qtrue : Qfalse;
|
159
|
+
}
|
160
|
+
|
161
|
+
VALUE Queue_size_m(VALUE self) {
|
162
|
+
Queue_t *queue;
|
163
|
+
GetQueue(self, queue);
|
164
|
+
|
165
|
+
return INT2NUM(queue->values.count);
|
166
|
+
}
|
167
|
+
|
168
|
+
void Queue_trace(VALUE self) {
|
169
|
+
Queue_t *queue;
|
170
|
+
GetQueue(self, queue);
|
171
|
+
|
172
|
+
printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
|
173
|
+
}
|
174
|
+
|
175
|
+
void Init_Queue() {
|
176
|
+
cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
|
177
|
+
rb_define_alloc_func(cQueue, Queue_allocate);
|
178
|
+
|
179
|
+
rb_define_method(cQueue, "initialize", Queue_initialize, 0);
|
180
|
+
rb_define_method(cQueue, "push", Queue_push, 1);
|
181
|
+
rb_define_method(cQueue, "<<", Queue_push, 1);
|
182
|
+
rb_define_method(cQueue, "unshift", Queue_unshift, 1);
|
183
|
+
|
184
|
+
rb_define_method(cQueue, "shift", Queue_shift, 0);
|
185
|
+
rb_define_method(cQueue, "pop", Queue_shift, 0);
|
186
|
+
rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
|
187
|
+
rb_define_method(cQueue, "delete", Queue_delete, 1);
|
188
|
+
|
189
|
+
rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
|
190
|
+
rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
|
191
|
+
rb_define_method(cQueue, "flush_waiters", Queue_flush_waiters, 1);
|
192
|
+
rb_define_method(cQueue, "empty?", Queue_empty_p, 0);
|
193
|
+
rb_define_method(cQueue, "size", Queue_size_m, 0);
|
194
|
+
}
|
@@ -0,0 +1,96 @@
|
|
1
|
+
#include "polyphony.h"
|
2
|
+
#include "ring_buffer.h"
|
3
|
+
|
4
|
+
void ring_buffer_init(ring_buffer *buffer) {
|
5
|
+
buffer->size = 1;
|
6
|
+
buffer->count = 0;
|
7
|
+
buffer->entries = malloc(buffer->size * sizeof(VALUE));
|
8
|
+
buffer->head = 0;
|
9
|
+
buffer->tail = 0;
|
10
|
+
}
|
11
|
+
|
12
|
+
void ring_buffer_free(ring_buffer *buffer) {
|
13
|
+
free(buffer->entries);
|
14
|
+
}
|
15
|
+
|
16
|
+
int ring_buffer_empty_p(ring_buffer *buffer) {
|
17
|
+
return buffer->count == 0;
|
18
|
+
}
|
19
|
+
|
20
|
+
VALUE ring_buffer_shift(ring_buffer *buffer) {
|
21
|
+
VALUE value;
|
22
|
+
if (buffer->count == 0) return Qnil;
|
23
|
+
|
24
|
+
value = buffer->entries[buffer->head];
|
25
|
+
buffer->head = (buffer->head + 1) % buffer->size;
|
26
|
+
buffer->count--;
|
27
|
+
// INSPECT(value);
|
28
|
+
return value;
|
29
|
+
}
|
30
|
+
|
31
|
+
void ring_buffer_resize(ring_buffer *buffer) {
|
32
|
+
unsigned int old_size = buffer->size;
|
33
|
+
buffer->size = old_size == 1 ? 4 : old_size * 2;
|
34
|
+
buffer->entries = realloc(buffer->entries, buffer->size * sizeof(VALUE));
|
35
|
+
for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
|
36
|
+
buffer->entries[old_size + idx] = buffer->entries[idx];
|
37
|
+
buffer->tail = buffer->head + buffer->count;
|
38
|
+
}
|
39
|
+
|
40
|
+
void ring_buffer_unshift(ring_buffer *buffer, VALUE value) {
|
41
|
+
if (buffer->count == buffer->size) ring_buffer_resize(buffer);
|
42
|
+
|
43
|
+
buffer->head = (buffer->head - 1) % buffer->size;
|
44
|
+
buffer->entries[buffer->head] = value;
|
45
|
+
buffer->count++;
|
46
|
+
}
|
47
|
+
|
48
|
+
void ring_buffer_push(ring_buffer *buffer, VALUE value) {
|
49
|
+
if (buffer->count == buffer->size) ring_buffer_resize(buffer);
|
50
|
+
|
51
|
+
buffer->entries[buffer->tail] = value;
|
52
|
+
buffer->tail = (buffer->tail + 1) % buffer->size;
|
53
|
+
buffer->count++;
|
54
|
+
}
|
55
|
+
|
56
|
+
void ring_buffer_mark(ring_buffer *buffer) {
|
57
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
58
|
+
rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size]);
|
59
|
+
}
|
60
|
+
|
61
|
+
void ring_buffer_shift_each(ring_buffer *buffer) {
|
62
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
63
|
+
rb_yield(buffer->entries[(buffer->head + i) % buffer->size]);
|
64
|
+
|
65
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
66
|
+
}
|
67
|
+
|
68
|
+
VALUE ring_buffer_shift_all(ring_buffer *buffer) {
|
69
|
+
VALUE array = rb_ary_new_capa(buffer->count);
|
70
|
+
for (unsigned int i = 0; i < buffer->count; i++)
|
71
|
+
rb_ary_push(array, buffer->entries[(buffer->head + i) % buffer->size]);
|
72
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
73
|
+
return array;
|
74
|
+
}
|
75
|
+
|
76
|
+
void ring_buffer_delete_at(ring_buffer *buffer, unsigned int idx) {
|
77
|
+
for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
|
78
|
+
buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
|
79
|
+
}
|
80
|
+
buffer->count--;
|
81
|
+
buffer->tail = (buffer->tail - 1) % buffer->size;
|
82
|
+
}
|
83
|
+
|
84
|
+
void ring_buffer_delete(ring_buffer *buffer, VALUE value) {
|
85
|
+
for (unsigned int i = 0; i < buffer->count; i++) {
|
86
|
+
unsigned int idx = (buffer->head + i) % buffer->size;
|
87
|
+
if (buffer->entries[idx] == value) {
|
88
|
+
ring_buffer_delete_at(buffer, idx);
|
89
|
+
return;
|
90
|
+
}
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
94
|
+
void ring_buffer_clear(ring_buffer *buffer) {
|
95
|
+
buffer->count = buffer->head = buffer->tail = 0;
|
96
|
+
}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
#ifndef RING_BUFFER_H
|
2
|
+
#define RING_BUFFER_H
|
3
|
+
|
4
|
+
#include "ruby.h"
|
5
|
+
|
6
|
+
typedef struct ring_buffer {
|
7
|
+
VALUE *entries;
|
8
|
+
unsigned int size;
|
9
|
+
unsigned int count;
|
10
|
+
unsigned int head;
|
11
|
+
unsigned int tail;
|
12
|
+
} ring_buffer;
|
13
|
+
|
14
|
+
void ring_buffer_init(ring_buffer *buffer);
|
15
|
+
void ring_buffer_free(ring_buffer *buffer);
|
16
|
+
void ring_buffer_mark(ring_buffer *buffer);
|
17
|
+
int ring_buffer_empty_p(ring_buffer *buffer);
|
18
|
+
void ring_buffer_clear(ring_buffer *buffer);
|
19
|
+
|
20
|
+
VALUE ring_buffer_shift(ring_buffer *buffer);
|
21
|
+
void ring_buffer_unshift(ring_buffer *buffer, VALUE value);
|
22
|
+
void ring_buffer_push(ring_buffer *buffer, VALUE value);
|
23
|
+
|
24
|
+
void ring_buffer_shift_each(ring_buffer *buffer);
|
25
|
+
VALUE ring_buffer_shift_all(ring_buffer *buffer);
|
26
|
+
void ring_buffer_delete(ring_buffer *buffer, VALUE value);
|
27
|
+
|
28
|
+
#endif /* RING_BUFFER_H */
|
data/ext/polyphony/thread.c
CHANGED
@@ -11,10 +11,9 @@ ID ID_runnable_next;
|
|
11
11
|
ID ID_stop;
|
12
12
|
|
13
13
|
static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
14
|
-
VALUE queue;
|
14
|
+
VALUE queue = rb_funcall(cQueue, ID_new, 0);
|
15
15
|
|
16
16
|
rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
|
17
|
-
queue = rb_ary_new();
|
18
17
|
rb_ivar_set(self, ID_run_queue, queue);
|
19
18
|
|
20
19
|
return self;
|
@@ -22,12 +21,12 @@ static VALUE Thread_setup_fiber_scheduling(VALUE self) {
|
|
22
21
|
|
23
22
|
int Thread_fiber_ref_count(VALUE self) {
|
24
23
|
VALUE agent = rb_ivar_get(self, ID_ivar_agent);
|
25
|
-
return NUM2INT(
|
24
|
+
return NUM2INT(__AGENT__.ref_count(agent));
|
26
25
|
}
|
27
26
|
|
28
27
|
inline void Thread_fiber_reset_ref_count(VALUE self) {
|
29
28
|
VALUE agent = rb_ivar_get(self, ID_ivar_agent);
|
30
|
-
|
29
|
+
__AGENT__.reset_ref_count(agent);
|
31
30
|
}
|
32
31
|
|
33
32
|
static VALUE SYM_scheduled_fibers;
|
@@ -42,7 +41,7 @@ static VALUE Thread_fiber_scheduling_stats(VALUE self) {
|
|
42
41
|
long scheduled_count = RARRAY_LEN(queue);
|
43
42
|
rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
|
44
43
|
|
45
|
-
pending_count =
|
44
|
+
pending_count = __AGENT__.pending_count(agent);
|
46
45
|
rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
|
47
46
|
|
48
47
|
return stats;
|
@@ -53,25 +52,34 @@ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
|
|
53
52
|
|
54
53
|
if (rb_fiber_alive_p(fiber) != Qtrue) return self;
|
55
54
|
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
55
|
+
int already_runnable = rb_ivar_get(fiber, ID_runnable) != Qnil;
|
56
|
+
|
57
|
+
if (already_runnable) {
|
58
|
+
VALUE current_runnable_value = rb_ivar_get(fiber, ID_runnable_value);
|
59
|
+
|
60
|
+
// If the fiber is already runnable and the runnable value is an exception,
|
61
|
+
// we don't update the value, in order to prevent a race condition where
|
62
|
+
// exceptions will be lost (see issue #33)
|
63
|
+
if (TEST_EXCEPTION(current_runnable_value)) return self;
|
61
64
|
}
|
62
65
|
|
63
|
-
|
64
|
-
|
65
|
-
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
66
|
+
rb_ivar_set(fiber, ID_runnable_value, value);
|
67
|
+
FIBER_TRACE(3, SYM_fiber_schedule, fiber, value);
|
66
68
|
|
67
|
-
if (
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
69
|
+
if (!already_runnable) {
|
70
|
+
queue = rb_ivar_get(self, ID_run_queue);
|
71
|
+
Queue_push(queue, fiber);
|
72
|
+
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
73
|
+
|
74
|
+
if (rb_thread_current() != self) {
|
75
|
+
// If the fiber scheduling is done across threads, we need to make sure the
|
76
|
+
// target thread is woken up in case it is in the middle of running its
|
77
|
+
// event selector. Otherwise it's gonna be stuck waiting for an event to
|
78
|
+
// happen, not knowing that it there's already a fiber ready to run in its
|
79
|
+
// run queue.
|
80
|
+
VALUE agent = rb_ivar_get(self,ID_ivar_agent);
|
81
|
+
__AGENT__.wakeup(agent);
|
82
|
+
}
|
75
83
|
}
|
76
84
|
return self;
|
77
85
|
}
|
@@ -88,13 +96,13 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
|
|
88
96
|
|
89
97
|
// if fiber is already scheduled, remove it from the run queue
|
90
98
|
if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
|
91
|
-
|
99
|
+
Queue_delete(queue, fiber);
|
92
100
|
} else {
|
93
101
|
rb_ivar_set(fiber, ID_runnable, Qtrue);
|
94
102
|
}
|
95
103
|
|
96
104
|
// the fiber is given priority by putting it at the front of the run queue
|
97
|
-
|
105
|
+
Queue_unshift(queue, fiber);
|
98
106
|
|
99
107
|
if (rb_thread_current() != self) {
|
100
108
|
// if the fiber scheduling is done across threads, we need to make sure the
|
@@ -103,7 +111,7 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
|
|
103
111
|
// happen, not knowing that it there's already a fiber ready to run in its
|
104
112
|
// run queue.
|
105
113
|
VALUE agent = rb_ivar_get(self, ID_ivar_agent);
|
106
|
-
|
114
|
+
__AGENT__.wakeup(agent);
|
107
115
|
}
|
108
116
|
return self;
|
109
117
|
}
|
@@ -115,6 +123,7 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
115
123
|
VALUE value;
|
116
124
|
VALUE agent = rb_ivar_get(self, ID_ivar_agent);
|
117
125
|
int ref_count;
|
126
|
+
int agent_was_polled = 0;1;
|
118
127
|
|
119
128
|
if (__tracing_enabled__) {
|
120
129
|
if (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse) {
|
@@ -122,20 +131,21 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
122
131
|
}
|
123
132
|
}
|
124
133
|
|
125
|
-
ref_count =
|
134
|
+
ref_count = __AGENT__.ref_count(agent);
|
126
135
|
while (1) {
|
127
|
-
next_fiber =
|
136
|
+
next_fiber = Queue_shift_no_wait(queue);
|
128
137
|
if (next_fiber != Qnil) {
|
129
|
-
if (ref_count > 0) {
|
138
|
+
if (agent_was_polled == 0 && ref_count > 0) {
|
130
139
|
// this mechanism prevents event starvation in case the run queue never
|
131
140
|
// empties
|
132
|
-
|
141
|
+
__AGENT__.poll(agent, Qtrue, current_fiber, queue);
|
133
142
|
}
|
134
143
|
break;
|
135
144
|
}
|
136
145
|
if (ref_count == 0) break;
|
137
146
|
|
138
|
-
|
147
|
+
__AGENT__.poll(agent, Qnil, current_fiber, queue);
|
148
|
+
agent_was_polled = 1;
|
139
149
|
}
|
140
150
|
|
141
151
|
if (next_fiber == Qnil) return Qnil;
|
@@ -151,9 +161,15 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
151
161
|
value : rb_funcall(next_fiber, ID_transfer, 1, value);
|
152
162
|
}
|
153
163
|
|
164
|
+
VALUE Thread_run_queue_trace(VALUE self) {
|
165
|
+
VALUE queue = rb_ivar_get(self, ID_run_queue);
|
166
|
+
Queue_trace(queue);
|
167
|
+
return self;
|
168
|
+
}
|
169
|
+
|
154
170
|
VALUE Thread_reset_fiber_scheduling(VALUE self) {
|
155
171
|
VALUE queue = rb_ivar_get(self, ID_run_queue);
|
156
|
-
|
172
|
+
Queue_clear(queue);
|
157
173
|
Thread_fiber_reset_ref_count(self);
|
158
174
|
return self;
|
159
175
|
}
|
@@ -164,7 +180,7 @@ VALUE Thread_fiber_break_out_of_ev_loop(VALUE self, VALUE fiber, VALUE resume_ob
|
|
164
180
|
Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
|
165
181
|
}
|
166
182
|
|
167
|
-
if (
|
183
|
+
if (__AGENT__.wakeup(agent) == Qnil) {
|
168
184
|
// we're not inside the ev_loop, so we just do a switchpoint
|
169
185
|
Thread_switch_fiber(self);
|
170
186
|
}
|
@@ -182,6 +198,7 @@ void Init_Thread() {
|
|
182
198
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
183
199
|
Thread_schedule_fiber_with_priority, 2);
|
184
200
|
rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
|
201
|
+
rb_define_method(rb_cThread, "run_queue_trace", Thread_run_queue_trace, 0);
|
185
202
|
|
186
203
|
ID_deactivate_all_watchers_post_fork = rb_intern("deactivate_all_watchers_post_fork");
|
187
204
|
ID_ivar_agent = rb_intern("@agent");
|