polyphony 0.43.5 → 0.43.11

Sign up to get free protection for your applications and to get access to all the features.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +45 -0
  3. data/Gemfile.lock +1 -1
  4. data/README.md +21 -4
  5. data/TODO.md +0 -7
  6. data/bin/stress.rb +28 -0
  7. data/docs/_includes/head.html +40 -0
  8. data/docs/_includes/title.html +1 -0
  9. data/docs/_user-guide/web-server.md +11 -11
  10. data/docs/getting-started/overview.md +2 -2
  11. data/docs/index.md +3 -1
  12. data/docs/polyphony-logo.png +0 -0
  13. data/examples/core/xx-channels.rb +4 -2
  14. data/examples/core/xx-using-a-mutex.rb +2 -1
  15. data/examples/io/xx-happy-eyeballs.rb +21 -22
  16. data/examples/io/xx-zip.rb +19 -0
  17. data/examples/performance/fiber_transfer.rb +47 -0
  18. data/examples/xx-spin.rb +32 -0
  19. data/ext/polyphony/agent.h +41 -0
  20. data/ext/polyphony/event.c +86 -0
  21. data/ext/polyphony/fiber.c +0 -5
  22. data/ext/polyphony/libev_agent.c +277 -135
  23. data/ext/polyphony/polyphony.c +2 -2
  24. data/ext/polyphony/polyphony.h +14 -21
  25. data/ext/polyphony/polyphony_ext.c +4 -2
  26. data/ext/polyphony/queue.c +208 -0
  27. data/ext/polyphony/ring_buffer.c +0 -24
  28. data/ext/polyphony/thread.c +42 -31
  29. data/lib/polyphony.rb +6 -7
  30. data/lib/polyphony/core/channel.rb +3 -34
  31. data/lib/polyphony/core/resource_pool.rb +13 -75
  32. data/lib/polyphony/core/sync.rb +12 -9
  33. data/lib/polyphony/extensions/fiber.rb +8 -8
  34. data/lib/polyphony/extensions/openssl.rb +8 -0
  35. data/lib/polyphony/extensions/socket.rb +11 -9
  36. data/lib/polyphony/extensions/thread.rb +1 -1
  37. data/lib/polyphony/net.rb +2 -1
  38. data/lib/polyphony/version.rb +1 -1
  39. data/test/helper.rb +2 -2
  40. data/test/test_agent.rb +2 -2
  41. data/test/test_event.rb +12 -0
  42. data/test/test_fiber.rb +1 -1
  43. data/test/test_io.rb +14 -0
  44. data/test/test_queue.rb +33 -0
  45. data/test/test_resource_pool.rb +24 -58
  46. data/test/test_trace.rb +18 -17
  47. metadata +12 -5
  48. data/ext/polyphony/libev_queue.c +0 -288
  49. data/lib/polyphony/event.rb +0 -27
@@ -2,7 +2,6 @@
2
2
 
3
3
  VALUE mPolyphony;
4
4
 
5
- ID ID_await_no_raise;
6
5
  ID ID_call;
7
6
  ID ID_caller;
8
7
  ID ID_clear;
@@ -22,6 +21,8 @@ ID ID_R;
22
21
  ID ID_W;
23
22
  ID ID_RW;
24
23
 
24
+ agent_interface_t agent_interface;
25
+
25
26
  VALUE Polyphony_snooze(VALUE self) {
26
27
  VALUE ret;
27
28
  VALUE fiber = rb_fiber_current();
@@ -54,7 +55,6 @@ void Init_Polyphony() {
54
55
  rb_define_global_function("snooze", Polyphony_snooze, 0);
55
56
  rb_define_global_function("suspend", Polyphony_suspend, 0);
56
57
 
57
- ID_await_no_raise = rb_intern("await_no_raise");
58
58
  ID_call = rb_intern("call");
59
59
  ID_caller = rb_intern("caller");
60
60
  ID_clear = rb_intern("clear");
@@ -4,10 +4,11 @@
4
4
  #include "ruby.h"
5
5
  #include "ruby/io.h"
6
6
  #include "libev.h"
7
+ #include "agent.h"
7
8
 
8
9
  // debugging
9
10
  #define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
10
- #define INSPECT(obj) { VALUE s = rb_funcall(obj, rb_intern("inspect"), 0); printf("%s\n", StringValueCStr(s));}
11
+ #define INSPECT(str, obj) { printf(str); VALUE s = rb_funcall(obj, rb_intern("inspect"), 0); printf("%s\n", StringValueCStr(s));}
11
12
  #define FIBER_TRACE(...) if (__tracing_enabled__) { \
12
13
  rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__); \
13
14
  }
@@ -18,11 +19,13 @@
18
19
  return rb_funcall(rb_mKernel, ID_raise, 1, ret); \
19
20
  }
20
21
 
22
+ extern agent_interface_t agent_interface;
23
+ #define __AGENT__ (agent_interface)
24
+
21
25
  extern VALUE mPolyphony;
22
- extern VALUE cLibevQueue;
26
+ extern VALUE cQueue;
23
27
  extern VALUE cEvent;
24
28
 
25
- extern ID ID_await_no_raise;
26
29
  extern ID ID_call;
27
30
  extern ID ID_caller;
28
31
  extern ID ID_clear;
@@ -66,24 +69,14 @@ enum {
66
69
  VALUE Fiber_auto_watcher(VALUE self);
67
70
  void Fiber_make_runnable(VALUE fiber, VALUE value);
68
71
 
69
- VALUE LibevAgent_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE queue);
70
- VALUE LibevAgent_break(VALUE self);
71
- VALUE LibevAgent_pending_count(VALUE self);
72
- VALUE LibevAgent_wait_io(VALUE self, VALUE io, VALUE write);
73
-
74
- VALUE LibevAgent_ref(VALUE self);
75
- VALUE LibevAgent_unref(VALUE self);
76
- int LibevAgent_ref_count(VALUE self);
77
- void LibevAgent_reset_ref_count(VALUE self);
78
-
79
- VALUE LibevQueue_push(VALUE self, VALUE value);
80
- VALUE LibevQueue_unshift(VALUE self, VALUE value);
81
- VALUE LibevQueue_shift(VALUE self);
82
- VALUE LibevQueue_shift_no_wait(VALUE self);
83
- VALUE LibevQueue_clear(VALUE self);
84
- VALUE LibevQueue_delete(VALUE self, VALUE value);
85
- long LibevQueue_len(VALUE self);
86
- void LibevQueue_trace(VALUE self);
72
+ VALUE Queue_push(VALUE self, VALUE value);
73
+ VALUE Queue_unshift(VALUE self, VALUE value);
74
+ VALUE Queue_shift(VALUE self);
75
+ VALUE Queue_shift_no_wait(VALUE self);
76
+ VALUE Queue_clear(VALUE self);
77
+ VALUE Queue_delete(VALUE self, VALUE value);
78
+ long Queue_len(VALUE self);
79
+ void Queue_trace(VALUE self);
87
80
 
88
81
  VALUE Polyphony_snooze(VALUE self);
89
82
 
@@ -3,7 +3,8 @@
3
3
  void Init_Fiber();
4
4
  void Init_Polyphony();
5
5
  void Init_LibevAgent();
6
- void Init_LibevQueue();
6
+ void Init_Queue();
7
+ void Init_Event();
7
8
  void Init_Thread();
8
9
  void Init_Tracing();
9
10
 
@@ -12,7 +13,8 @@ void Init_polyphony_ext() {
12
13
 
13
14
  Init_Polyphony();
14
15
  Init_LibevAgent();
15
- Init_LibevQueue();
16
+ Init_Queue();
17
+ Init_Event();
16
18
 
17
19
  Init_Fiber();
18
20
  Init_Thread();
@@ -0,0 +1,208 @@
1
+ #include "polyphony.h"
2
+ #include "ring_buffer.h"
3
+
4
+ typedef struct queue {
5
+ ring_buffer values;
6
+ ring_buffer shift_queue;
7
+ } Queue_t;
8
+
9
+ VALUE cQueue = Qnil;
10
+
11
+ static void Queue_mark(void *ptr) {
12
+ Queue_t *queue = ptr;
13
+ ring_buffer_mark(&queue->values);
14
+ ring_buffer_mark(&queue->shift_queue);
15
+ }
16
+
17
+ static void Queue_free(void *ptr) {
18
+ Queue_t *queue = ptr;
19
+ ring_buffer_free(&queue->values);
20
+ ring_buffer_free(&queue->shift_queue);
21
+ xfree(ptr);
22
+ }
23
+
24
+ static size_t Queue_size(const void *ptr) {
25
+ return sizeof(Queue_t);
26
+ }
27
+
28
+ static const rb_data_type_t Queue_type = {
29
+ "Queue",
30
+ {Queue_mark, Queue_free, Queue_size,},
31
+ 0, 0, 0
32
+ };
33
+
34
+ static VALUE Queue_allocate(VALUE klass) {
35
+ Queue_t *queue;
36
+
37
+ queue = ALLOC(Queue_t);
38
+ return TypedData_Wrap_Struct(klass, &Queue_type, queue);
39
+ }
40
+
41
+ #define GetQueue(obj, queue) \
42
+ TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
43
+
44
+ static VALUE Queue_initialize(VALUE self) {
45
+ Queue_t *queue;
46
+ GetQueue(self, queue);
47
+
48
+ ring_buffer_init(&queue->values);
49
+ ring_buffer_init(&queue->shift_queue);
50
+
51
+ return self;
52
+ }
53
+
54
+ VALUE Queue_push(VALUE self, VALUE value) {
55
+ Queue_t *queue;
56
+ GetQueue(self, queue);
57
+
58
+ if (queue->shift_queue.count > 0) {
59
+ VALUE fiber = ring_buffer_shift(&queue->shift_queue);
60
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
61
+ }
62
+ ring_buffer_push(&queue->values, value);
63
+ return self;
64
+ }
65
+
66
+ VALUE Queue_unshift(VALUE self, VALUE value) {
67
+ Queue_t *queue;
68
+ GetQueue(self, queue);
69
+ if (queue->shift_queue.count > 0) {
70
+ VALUE fiber = ring_buffer_shift(&queue->shift_queue);
71
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
72
+ }
73
+ ring_buffer_unshift(&queue->values, value);
74
+ return self;
75
+ }
76
+
77
+ VALUE Queue_shift(VALUE self) {
78
+ Queue_t *queue;
79
+ GetQueue(self, queue);
80
+
81
+ VALUE fiber = rb_fiber_current();
82
+ VALUE thread = rb_thread_current();
83
+ VALUE agent = rb_ivar_get(thread, ID_ivar_agent);
84
+
85
+ while (1) {
86
+ ring_buffer_push(&queue->shift_queue, fiber);
87
+ if (queue->values.count > 0) Fiber_make_runnable(fiber, Qnil);
88
+
89
+ VALUE switchpoint_result = __AGENT__.wait_event(agent, Qnil);
90
+ ring_buffer_delete(&queue->shift_queue, fiber);
91
+
92
+ if (RTEST(rb_obj_is_kind_of(switchpoint_result, rb_eException)))
93
+ return rb_funcall(rb_mKernel, ID_raise, 1, switchpoint_result);
94
+ RB_GC_GUARD(switchpoint_result);
95
+
96
+ if (queue->values.count > 0)
97
+ return ring_buffer_shift(&queue->values);
98
+ }
99
+
100
+ return Qnil;
101
+ }
102
+
103
+ VALUE Queue_shift_no_wait(VALUE self) {
104
+ Queue_t *queue;
105
+ GetQueue(self, queue);
106
+
107
+ return ring_buffer_shift(&queue->values);
108
+ }
109
+
110
+ VALUE Queue_delete(VALUE self, VALUE value) {
111
+ Queue_t *queue;
112
+ GetQueue(self, queue);
113
+
114
+ ring_buffer_delete(&queue->values, value);
115
+ return self;
116
+ }
117
+
118
+ VALUE Queue_clear(VALUE self) {
119
+ Queue_t *queue;
120
+ GetQueue(self, queue);
121
+
122
+ ring_buffer_clear(&queue->values);
123
+ return self;
124
+ }
125
+
126
+ long Queue_len(VALUE self) {
127
+ Queue_t *queue;
128
+ GetQueue(self, queue);
129
+
130
+ return queue->values.count;
131
+ }
132
+
133
+ VALUE Queue_shift_each(VALUE self) {
134
+ Queue_t *queue;
135
+ GetQueue(self, queue);
136
+
137
+ ring_buffer_shift_each(&queue->values);
138
+ return self;
139
+ }
140
+
141
+ VALUE Queue_shift_all(VALUE self) {
142
+ Queue_t *queue;
143
+ GetQueue(self, queue);
144
+
145
+ return ring_buffer_shift_all(&queue->values);
146
+ }
147
+
148
+ VALUE Queue_flush_waiters(VALUE self, VALUE value) {
149
+ Queue_t *queue;
150
+ GetQueue(self, queue);
151
+
152
+ while(1) {
153
+ VALUE fiber = ring_buffer_shift(&queue->shift_queue);
154
+ if (fiber == Qnil) return self;
155
+
156
+ Fiber_make_runnable(fiber, value);
157
+ }
158
+ }
159
+
160
+ VALUE Queue_empty_p(VALUE self) {
161
+ Queue_t *queue;
162
+ GetQueue(self, queue);
163
+
164
+ return (queue->values.count == 0) ? Qtrue : Qfalse;
165
+ }
166
+
167
+ VALUE Queue_pending_p(VALUE self) {
168
+ Queue_t *queue;
169
+ GetQueue(self, queue);
170
+
171
+ return (queue->shift_queue.count > 0) ? Qtrue : Qfalse;
172
+ }
173
+
174
+ VALUE Queue_size_m(VALUE self) {
175
+ Queue_t *queue;
176
+ GetQueue(self, queue);
177
+
178
+ return INT2NUM(queue->values.count);
179
+ }
180
+
181
+ void Queue_trace(VALUE self) {
182
+ Queue_t *queue;
183
+ GetQueue(self, queue);
184
+
185
+ printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
186
+ }
187
+
188
+ void Init_Queue() {
189
+ cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
190
+ rb_define_alloc_func(cQueue, Queue_allocate);
191
+
192
+ rb_define_method(cQueue, "initialize", Queue_initialize, 0);
193
+ rb_define_method(cQueue, "push", Queue_push, 1);
194
+ rb_define_method(cQueue, "<<", Queue_push, 1);
195
+ rb_define_method(cQueue, "unshift", Queue_unshift, 1);
196
+
197
+ rb_define_method(cQueue, "shift", Queue_shift, 0);
198
+ rb_define_method(cQueue, "pop", Queue_shift, 0);
199
+ rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
200
+ rb_define_method(cQueue, "delete", Queue_delete, 1);
201
+
202
+ rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
203
+ rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
204
+ rb_define_method(cQueue, "flush_waiters", Queue_flush_waiters, 1);
205
+ rb_define_method(cQueue, "empty?", Queue_empty_p, 0);
206
+ rb_define_method(cQueue, "pending?", Queue_pending_p, 0);
207
+ rb_define_method(cQueue, "size", Queue_size_m, 0);
208
+ }
@@ -17,18 +17,7 @@ int ring_buffer_empty_p(ring_buffer *buffer) {
17
17
  return buffer->count == 0;
18
18
  }
19
19
 
20
- #define TRACE_RING_BUFFER(func, buffer) printf( \
21
- "%s size: %d count: %d head: %d tail: %d\n", \
22
- func, \
23
- buffer->size, \
24
- buffer->count, \
25
- buffer->head, \
26
- buffer->tail \
27
- )
28
-
29
20
  VALUE ring_buffer_shift(ring_buffer *buffer) {
30
- // TRACE_RING_BUFFER("ring_buffer_shift", buffer);
31
-
32
21
  VALUE value;
33
22
  if (buffer->count == 0) return Qnil;
34
23
 
@@ -40,11 +29,8 @@ VALUE ring_buffer_shift(ring_buffer *buffer) {
40
29
  }
41
30
 
42
31
  void ring_buffer_resize(ring_buffer *buffer) {
43
- // TRACE_RING_BUFFER("ring_buffer_resize", buffer);
44
-
45
32
  unsigned int old_size = buffer->size;
46
33
  buffer->size = old_size == 1 ? 4 : old_size * 2;
47
- // printf("new size: %d\n", buffer->size);
48
34
  buffer->entries = realloc(buffer->entries, buffer->size * sizeof(VALUE));
49
35
  for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
50
36
  buffer->entries[old_size + idx] = buffer->entries[idx];
@@ -52,9 +38,6 @@ void ring_buffer_resize(ring_buffer *buffer) {
52
38
  }
53
39
 
54
40
  void ring_buffer_unshift(ring_buffer *buffer, VALUE value) {
55
- // TRACE_RING_BUFFER("ring_buffer_unshift", buffer);
56
- // INSPECT(value);
57
-
58
41
  if (buffer->count == buffer->size) ring_buffer_resize(buffer);
59
42
 
60
43
  buffer->head = (buffer->head - 1) % buffer->size;
@@ -63,8 +46,6 @@ void ring_buffer_unshift(ring_buffer *buffer, VALUE value) {
63
46
  }
64
47
 
65
48
  void ring_buffer_push(ring_buffer *buffer, VALUE value) {
66
- // TRACE_RING_BUFFER("ring_buffer_push", buffer);
67
- // INSPECT(value);
68
49
  if (buffer->count == buffer->size) ring_buffer_resize(buffer);
69
50
 
70
51
  buffer->entries[buffer->tail] = value;
@@ -78,8 +59,6 @@ void ring_buffer_mark(ring_buffer *buffer) {
78
59
  }
79
60
 
80
61
  void ring_buffer_shift_each(ring_buffer *buffer) {
81
- // TRACE_RING_BUFFER("ring_buffer_shift_each", buffer);
82
-
83
62
  for (unsigned int i = 0; i < buffer->count; i++)
84
63
  rb_yield(buffer->entries[(buffer->head + i) % buffer->size]);
85
64
 
@@ -87,7 +66,6 @@ void ring_buffer_shift_each(ring_buffer *buffer) {
87
66
  }
88
67
 
89
68
  VALUE ring_buffer_shift_all(ring_buffer *buffer) {
90
- // TRACE_RING_BUFFER("ring_buffer_all", buffer);
91
69
  VALUE array = rb_ary_new_capa(buffer->count);
92
70
  for (unsigned int i = 0; i < buffer->count; i++)
93
71
  rb_ary_push(array, buffer->entries[(buffer->head + i) % buffer->size]);
@@ -104,7 +82,6 @@ void ring_buffer_delete_at(ring_buffer *buffer, unsigned int idx) {
104
82
  }
105
83
 
106
84
  void ring_buffer_delete(ring_buffer *buffer, VALUE value) {
107
- // TRACE_RING_BUFFER("ring_buffer_delete", buffer);
108
85
  for (unsigned int i = 0; i < buffer->count; i++) {
109
86
  unsigned int idx = (buffer->head + i) % buffer->size;
110
87
  if (buffer->entries[idx] == value) {
@@ -115,6 +92,5 @@ void ring_buffer_delete(ring_buffer *buffer, VALUE value) {
115
92
  }
116
93
 
117
94
  void ring_buffer_clear(ring_buffer *buffer) {
118
- // TRACE_RING_BUFFER("ring_buffer_clear", buffer);
119
95
  buffer->count = buffer->head = buffer->tail = 0;
120
96
  }
@@ -11,7 +11,7 @@ ID ID_runnable_next;
11
11
  ID ID_stop;
12
12
 
13
13
  static VALUE Thread_setup_fiber_scheduling(VALUE self) {
14
- VALUE queue = rb_funcall(cLibevQueue, ID_new, 0);
14
+ VALUE queue = rb_funcall(cQueue, ID_new, 0);
15
15
 
16
16
  rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
17
17
  rb_ivar_set(self, ID_run_queue, queue);
@@ -21,12 +21,12 @@ static VALUE Thread_setup_fiber_scheduling(VALUE self) {
21
21
 
22
22
  int Thread_fiber_ref_count(VALUE self) {
23
23
  VALUE agent = rb_ivar_get(self, ID_ivar_agent);
24
- return NUM2INT(LibevAgent_ref_count(agent));
24
+ return NUM2INT(__AGENT__.ref_count(agent));
25
25
  }
26
26
 
27
27
  inline void Thread_fiber_reset_ref_count(VALUE self) {
28
28
  VALUE agent = rb_ivar_get(self, ID_ivar_agent);
29
- LibevAgent_reset_ref_count(agent);
29
+ __AGENT__.reset_ref_count(agent);
30
30
  }
31
31
 
32
32
  static VALUE SYM_scheduled_fibers;
@@ -41,7 +41,7 @@ static VALUE Thread_fiber_scheduling_stats(VALUE self) {
41
41
  long scheduled_count = RARRAY_LEN(queue);
42
42
  rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
43
43
 
44
- pending_count = LibevAgent_pending_count(agent);
44
+ pending_count = __AGENT__.pending_count(agent);
45
45
  rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
46
46
 
47
47
  return stats;
@@ -52,25 +52,34 @@ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
52
52
 
53
53
  if (rb_fiber_alive_p(fiber) != Qtrue) return self;
54
54
 
55
- FIBER_TRACE(3, SYM_fiber_schedule, fiber, value);
56
- // if fiber is already scheduled, just set the scheduled value, then return
57
- rb_ivar_set(fiber, ID_runnable_value, value);
58
- if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
59
- return self;
55
+ int already_runnable = rb_ivar_get(fiber, ID_runnable) != Qnil;
56
+
57
+ if (already_runnable) {
58
+ VALUE current_runnable_value = rb_ivar_get(fiber, ID_runnable_value);
59
+
60
+ // If the fiber is already runnable and the runnable value is an exception,
61
+ // we don't update the value, in order to prevent a race condition where
62
+ // exceptions will be lost (see issue #33)
63
+ if (TEST_EXCEPTION(current_runnable_value)) return self;
60
64
  }
61
65
 
62
- queue = rb_ivar_get(self, ID_run_queue);
63
- LibevQueue_push(queue, fiber);
64
- rb_ivar_set(fiber, ID_runnable, Qtrue);
66
+ rb_ivar_set(fiber, ID_runnable_value, value);
67
+ FIBER_TRACE(3, SYM_fiber_schedule, fiber, value);
65
68
 
66
- if (rb_thread_current() != self) {
67
- // if the fiber scheduling is done across threads, we need to make sure the
68
- // target thread is woken up in case it is in the middle of running its
69
- // event selector. Otherwise it's gonna be stuck waiting for an event to
70
- // happen, not knowing that it there's already a fiber ready to run in its
71
- // run queue.
72
- VALUE agent = rb_ivar_get(self,ID_ivar_agent);
73
- LibevAgent_break(agent);
69
+ if (!already_runnable) {
70
+ queue = rb_ivar_get(self, ID_run_queue);
71
+ Queue_push(queue, fiber);
72
+ rb_ivar_set(fiber, ID_runnable, Qtrue);
73
+
74
+ if (rb_thread_current() != self) {
75
+ // If the fiber scheduling is done across threads, we need to make sure the
76
+ // target thread is woken up in case it is in the middle of running its
77
+ // event selector. Otherwise it's gonna be stuck waiting for an event to
78
+ // happen, not knowing that it there's already a fiber ready to run in its
79
+ // run queue.
80
+ VALUE agent = rb_ivar_get(self,ID_ivar_agent);
81
+ __AGENT__.wakeup(agent);
82
+ }
74
83
  }
75
84
  return self;
76
85
  }
@@ -87,13 +96,13 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
87
96
 
88
97
  // if fiber is already scheduled, remove it from the run queue
89
98
  if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
90
- LibevQueue_delete(queue, fiber);
99
+ Queue_delete(queue, fiber);
91
100
  } else {
92
101
  rb_ivar_set(fiber, ID_runnable, Qtrue);
93
102
  }
94
103
 
95
104
  // the fiber is given priority by putting it at the front of the run queue
96
- LibevQueue_unshift(queue, fiber);
105
+ Queue_unshift(queue, fiber);
97
106
 
98
107
  if (rb_thread_current() != self) {
99
108
  // if the fiber scheduling is done across threads, we need to make sure the
@@ -102,7 +111,7 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
102
111
  // happen, not knowing that it there's already a fiber ready to run in its
103
112
  // run queue.
104
113
  VALUE agent = rb_ivar_get(self, ID_ivar_agent);
105
- LibevAgent_break(agent);
114
+ __AGENT__.wakeup(agent);
106
115
  }
107
116
  return self;
108
117
  }
@@ -114,6 +123,7 @@ VALUE Thread_switch_fiber(VALUE self) {
114
123
  VALUE value;
115
124
  VALUE agent = rb_ivar_get(self, ID_ivar_agent);
116
125
  int ref_count;
126
+ int agent_was_polled = 0;1;
117
127
 
118
128
  if (__tracing_enabled__) {
119
129
  if (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse) {
@@ -121,20 +131,21 @@ VALUE Thread_switch_fiber(VALUE self) {
121
131
  }
122
132
  }
123
133
 
124
- ref_count = LibevAgent_ref_count(agent);
134
+ ref_count = __AGENT__.ref_count(agent);
125
135
  while (1) {
126
- next_fiber = LibevQueue_shift_no_wait(queue);
136
+ next_fiber = Queue_shift_no_wait(queue);
127
137
  if (next_fiber != Qnil) {
128
- if (ref_count > 0) {
138
+ if (agent_was_polled == 0 && ref_count > 0) {
129
139
  // this mechanism prevents event starvation in case the run queue never
130
140
  // empties
131
- LibevAgent_poll(agent, Qtrue, current_fiber, queue);
141
+ __AGENT__.poll(agent, Qtrue, current_fiber, queue);
132
142
  }
133
143
  break;
134
144
  }
135
145
  if (ref_count == 0) break;
136
146
 
137
- LibevAgent_poll(agent, Qnil, current_fiber, queue);
147
+ __AGENT__.poll(agent, Qnil, current_fiber, queue);
148
+ agent_was_polled = 1;
138
149
  }
139
150
 
140
151
  if (next_fiber == Qnil) return Qnil;
@@ -152,13 +163,13 @@ VALUE Thread_switch_fiber(VALUE self) {
152
163
 
153
164
  VALUE Thread_run_queue_trace(VALUE self) {
154
165
  VALUE queue = rb_ivar_get(self, ID_run_queue);
155
- LibevQueue_trace(queue);
166
+ Queue_trace(queue);
156
167
  return self;
157
168
  }
158
169
 
159
170
  VALUE Thread_reset_fiber_scheduling(VALUE self) {
160
171
  VALUE queue = rb_ivar_get(self, ID_run_queue);
161
- LibevQueue_clear(queue);
172
+ Queue_clear(queue);
162
173
  Thread_fiber_reset_ref_count(self);
163
174
  return self;
164
175
  }
@@ -169,7 +180,7 @@ VALUE Thread_fiber_break_out_of_ev_loop(VALUE self, VALUE fiber, VALUE resume_ob
169
180
  Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
170
181
  }
171
182
 
172
- if (LibevAgent_break(agent) == Qnil) {
183
+ if (__AGENT__.wakeup(agent) == Qnil) {
173
184
  // we're not inside the ev_loop, so we just do a switchpoint
174
185
  Thread_switch_fiber(self);
175
186
  }