polyphony 0.57.0 → 0.60

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +27 -0
  3. data/Gemfile.lock +15 -29
  4. data/examples/core/message_based_supervision.rb +51 -0
  5. data/ext/polyphony/backend_common.c +108 -3
  6. data/ext/polyphony/backend_common.h +23 -0
  7. data/ext/polyphony/backend_io_uring.c +117 -39
  8. data/ext/polyphony/backend_io_uring_context.c +11 -3
  9. data/ext/polyphony/backend_io_uring_context.h +5 -3
  10. data/ext/polyphony/backend_libev.c +92 -30
  11. data/ext/polyphony/extconf.rb +2 -2
  12. data/ext/polyphony/fiber.c +1 -34
  13. data/ext/polyphony/polyphony.c +12 -19
  14. data/ext/polyphony/polyphony.h +10 -20
  15. data/ext/polyphony/polyphony_ext.c +0 -4
  16. data/ext/polyphony/queue.c +12 -12
  17. data/ext/polyphony/runqueue.c +17 -85
  18. data/ext/polyphony/runqueue.h +27 -0
  19. data/ext/polyphony/thread.c +10 -99
  20. data/lib/polyphony/core/timer.rb +2 -2
  21. data/lib/polyphony/extensions/fiber.rb +102 -82
  22. data/lib/polyphony/extensions/io.rb +10 -9
  23. data/lib/polyphony/extensions/openssl.rb +14 -4
  24. data/lib/polyphony/extensions/socket.rb +15 -15
  25. data/lib/polyphony/extensions/thread.rb +8 -0
  26. data/lib/polyphony/version.rb +1 -1
  27. data/polyphony.gemspec +0 -7
  28. data/test/test_backend.rb +71 -5
  29. data/test/test_ext.rb +1 -1
  30. data/test/test_fiber.rb +106 -18
  31. data/test/test_global_api.rb +1 -1
  32. data/test/test_io.rb +29 -0
  33. data/test/test_supervise.rb +100 -100
  34. data/test/test_thread.rb +57 -11
  35. data/test/test_thread_pool.rb +1 -1
  36. data/test/test_trace.rb +28 -49
  37. metadata +4 -108
  38. data/ext/polyphony/tracing.c +0 -11
  39. data/lib/polyphony/adapters/trace.rb +0 -138
@@ -5,6 +5,7 @@
5
5
 
6
6
  #include "ruby.h"
7
7
  #include "runqueue_ring_buffer.h"
8
+ #include "backend_common.h"
8
9
 
9
10
  // debugging
10
11
  #define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
@@ -18,10 +19,6 @@
18
19
  free(strings); \
19
20
  }
20
21
 
21
- // tracing
22
- #define TRACE(...) rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__)
23
- #define COND_TRACE(...) if (__tracing_enabled__) { TRACE(__VA_ARGS__); }
24
-
25
22
  // exceptions
26
23
  #define TEST_EXCEPTION(ret) (rb_obj_is_kind_of(ret, rb_eException) == Qtrue)
27
24
  #define RAISE_EXCEPTION(e) rb_funcall(e, ID_invoke, 0);
@@ -36,14 +33,12 @@
36
33
  extern VALUE mPolyphony;
37
34
  extern VALUE cQueue;
38
35
  extern VALUE cEvent;
39
- extern VALUE cRunqueue;
40
36
  extern VALUE cTimeoutException;
41
37
 
42
38
  extern ID ID_call;
43
39
  extern ID ID_caller;
44
40
  extern ID ID_clear;
45
41
  extern ID ID_each;
46
- extern ID ID_fiber_trace;
47
42
  extern ID ID_inspect;
48
43
  extern ID ID_invoke;
49
44
  extern ID ID_ivar_backend;
@@ -67,14 +62,6 @@ extern VALUE SYM_fiber_schedule;
67
62
  extern VALUE SYM_fiber_switchpoint;
68
63
  extern VALUE SYM_fiber_terminate;
69
64
 
70
- extern int __tracing_enabled__;
71
-
72
- enum {
73
- FIBER_STATE_NOT_SCHEDULED = 0,
74
- FIBER_STATE_WAITING = 1,
75
- FIBER_STATE_SCHEDULED = 2
76
- };
77
-
78
65
  VALUE Fiber_auto_watcher(VALUE self);
79
66
  void Fiber_make_runnable(VALUE fiber, VALUE value);
80
67
 
@@ -104,10 +91,10 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class);
104
91
  VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class);
105
92
  VALUE Backend_connect(VALUE self, VALUE io, VALUE addr, VALUE port);
106
93
  VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method);
107
- VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof);
108
- VALUE Backend_read_loop(VALUE self, VALUE io);
109
- VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length);
110
- VALUE Backend_recv_loop(VALUE self, VALUE io);
94
+ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof, VALUE pos);
95
+ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen);
96
+ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos);
97
+ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen);
111
98
  VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method);
112
99
  VALUE Backend_send(VALUE self, VALUE io, VALUE msg, VALUE flags);
113
100
  VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
@@ -121,11 +108,14 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write);
121
108
  VALUE Backend_waitpid(VALUE self, VALUE pid);
122
109
  VALUE Backend_write_m(int argc, VALUE *argv, VALUE self);
123
110
 
124
- unsigned int Backend_pending_count(VALUE self);
125
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
111
+ VALUE Backend_poll(VALUE self, VALUE blocking);
126
112
  VALUE Backend_wait_event(VALUE self, VALUE raise_on_exception);
127
113
  VALUE Backend_wakeup(VALUE self);
128
114
  VALUE Backend_run_idle_tasks(VALUE self);
115
+ VALUE Backend_switch_fiber(VALUE self);
116
+ void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
117
+ struct backend_stats Backend_stats(VALUE self);
118
+ void Backend_unschedule_fiber(VALUE self, VALUE fiber);
129
119
 
130
120
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
131
121
  VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
@@ -5,10 +5,8 @@ void Init_Polyphony();
5
5
  void Init_Backend();
6
6
  void Init_Queue();
7
7
  void Init_Event();
8
- void Init_Runqueue();
9
8
  void Init_SocketExtensions();
10
9
  void Init_Thread();
11
- void Init_Tracing();
12
10
 
13
11
  #ifdef POLYPHONY_PLAYGROUND
14
12
  extern void playground();
@@ -20,10 +18,8 @@ void Init_polyphony_ext() {
20
18
  Init_Backend();
21
19
  Init_Queue();
22
20
  Init_Event();
23
- Init_Runqueue();
24
21
  Init_Fiber();
25
22
  Init_Thread();
26
- Init_Tracing();
27
23
 
28
24
  Init_SocketExtensions();
29
25
 
@@ -57,21 +57,21 @@ static VALUE Queue_initialize(int argc, VALUE *argv, VALUE self) {
57
57
  return self;
58
58
  }
59
59
 
60
- inline void queue_resume_first_blocked_fiber(ring_buffer *queue) {
60
+ inline void queue_schedule_first_blocked_fiber(ring_buffer *queue) {
61
61
  if (queue->count) {
62
62
  VALUE fiber = ring_buffer_shift(queue);
63
63
  if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
64
64
  }
65
65
  }
66
66
 
67
- inline void queue_resume_all_blocked_fibers(ring_buffer *queue) {
67
+ inline void queue_schedule_all_blocked_fibers(ring_buffer *queue) {
68
68
  while (queue->count) {
69
69
  VALUE fiber = ring_buffer_shift(queue);
70
70
  if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
71
71
  }
72
72
  }
73
73
 
74
- inline void queue_resume_blocked_fibers_to_capacity(Queue_t *queue) {
74
+ inline void queue_schedule_blocked_fibers_to_capacity(Queue_t *queue) {
75
75
  for (unsigned int i = queue->values.count; (i < queue->capacity) && queue->push_queue.count; i++) {
76
76
  VALUE fiber = ring_buffer_shift(&queue->push_queue);
77
77
  if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
@@ -101,7 +101,7 @@ VALUE Queue_push(VALUE self, VALUE value) {
101
101
 
102
102
  if (queue->capacity) capped_queue_block_push(queue);
103
103
 
104
- queue_resume_first_blocked_fiber(&queue->shift_queue);
104
+ queue_schedule_first_blocked_fiber(&queue->shift_queue);
105
105
  ring_buffer_push(&queue->values, value);
106
106
 
107
107
  return self;
@@ -113,7 +113,7 @@ VALUE Queue_unshift(VALUE self, VALUE value) {
113
113
 
114
114
  if (queue->capacity) capped_queue_block_push(queue);
115
115
 
116
- queue_resume_first_blocked_fiber(&queue->shift_queue);
116
+ queue_schedule_first_blocked_fiber(&queue->shift_queue);
117
117
  ring_buffer_unshift(&queue->values, value);
118
118
 
119
119
  return self;
@@ -140,7 +140,7 @@ VALUE Queue_shift(VALUE self) {
140
140
  }
141
141
  VALUE value = ring_buffer_shift(&queue->values);
142
142
  if ((queue->capacity) && (queue->capacity > queue->values.count))
143
- queue_resume_first_blocked_fiber(&queue->push_queue);
143
+ queue_schedule_first_blocked_fiber(&queue->push_queue);
144
144
  RB_GC_GUARD(value);
145
145
  return value;
146
146
  }
@@ -152,7 +152,7 @@ VALUE Queue_delete(VALUE self, VALUE value) {
152
152
  ring_buffer_delete(&queue->values, value);
153
153
 
154
154
  if (queue->capacity && (queue->capacity > queue->values.count))
155
- queue_resume_first_blocked_fiber(&queue->push_queue);
155
+ queue_schedule_first_blocked_fiber(&queue->push_queue);
156
156
 
157
157
  return self;
158
158
  }
@@ -164,9 +164,9 @@ VALUE Queue_cap(VALUE self, VALUE cap) {
164
164
  queue->capacity = new_capacity;
165
165
 
166
166
  if (queue->capacity)
167
- queue_resume_blocked_fibers_to_capacity(queue);
167
+ queue_schedule_blocked_fibers_to_capacity(queue);
168
168
  else
169
- queue_resume_all_blocked_fibers(&queue->push_queue);
169
+ queue_schedule_all_blocked_fibers(&queue->push_queue);
170
170
 
171
171
  return self;
172
172
  }
@@ -183,7 +183,7 @@ VALUE Queue_clear(VALUE self) {
183
183
  GetQueue(self, queue);
184
184
 
185
185
  ring_buffer_clear(&queue->values);
186
- if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
186
+ if (queue->capacity) queue_schedule_blocked_fibers_to_capacity(queue);
187
187
 
188
188
  return self;
189
189
  }
@@ -200,7 +200,7 @@ VALUE Queue_shift_each(VALUE self) {
200
200
  GetQueue(self, queue);
201
201
 
202
202
  ring_buffer_shift_each(&queue->values);
203
- if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
203
+ if (queue->capacity) queue_schedule_blocked_fibers_to_capacity(queue);
204
204
  return self;
205
205
  }
206
206
 
@@ -209,7 +209,7 @@ VALUE Queue_shift_all(VALUE self) {
209
209
  GetQueue(self, queue);
210
210
 
211
211
  VALUE result = ring_buffer_shift_all(&queue->values);
212
- if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
212
+ if (queue->capacity) queue_schedule_blocked_fibers_to_capacity(queue);
213
213
  return result;
214
214
  }
215
215
 
@@ -1,78 +1,35 @@
1
1
  #include "polyphony.h"
2
- #include "runqueue_ring_buffer.h"
3
-
4
- typedef struct queue {
5
- runqueue_ring_buffer entries;
6
- unsigned int high_watermark;
7
- unsigned int switch_count;
8
- } Runqueue_t;
9
-
10
- VALUE cRunqueue = Qnil;
11
-
12
- static void Runqueue_mark(void *ptr) {
13
- Runqueue_t *runqueue = ptr;
14
- runqueue_ring_buffer_mark(&runqueue->entries);
15
- }
16
-
17
- static void Runqueue_free(void *ptr) {
18
- Runqueue_t *runqueue = ptr;
19
- runqueue_ring_buffer_free(&runqueue->entries);
20
- xfree(ptr);
21
- }
22
-
23
- static size_t Runqueue_size(const void *ptr) {
24
- return sizeof(Runqueue_t);
25
- }
26
-
27
- static const rb_data_type_t Runqueue_type = {
28
- "Runqueue",
29
- {Runqueue_mark, Runqueue_free, Runqueue_size,},
30
- 0, 0, 0
31
- };
32
-
33
- static VALUE Runqueue_allocate(VALUE klass) {
34
- Runqueue_t *runqueue;
35
-
36
- runqueue = ALLOC(Runqueue_t);
37
- return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
38
- }
39
-
40
- #define GetRunqueue(obj, runqueue) \
41
- TypedData_Get_Struct((obj), Runqueue_t, &Runqueue_type, (runqueue))
42
-
43
- static VALUE Runqueue_initialize(VALUE self) {
44
- Runqueue_t *runqueue;
45
- GetRunqueue(self, runqueue);
2
+ #include "runqueue.h"
46
3
 
4
+ inline void runqueue_initialize(runqueue_t *runqueue) {
47
5
  runqueue_ring_buffer_init(&runqueue->entries);
48
6
  runqueue->high_watermark = 0;
49
7
  runqueue->switch_count = 0;
8
+ }
50
9
 
51
- return self;
10
+ inline void runqueue_finalize(runqueue_t *runqueue) {
11
+ runqueue_ring_buffer_free(&runqueue->entries);
52
12
  }
53
13
 
54
- void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule) {
55
- Runqueue_t *runqueue;
56
- GetRunqueue(self, runqueue);
14
+ inline void runqueue_mark(runqueue_t *runqueue) {
15
+ runqueue_ring_buffer_mark(&runqueue->entries);
16
+ }
57
17
 
18
+ inline void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
58
19
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
59
20
  runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
60
21
  if (runqueue->entries.count > runqueue->high_watermark)
61
22
  runqueue->high_watermark = runqueue->entries.count;
62
23
  }
63
24
 
64
- void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
65
- Runqueue_t *runqueue;
66
- GetRunqueue(self, runqueue);
25
+ inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
67
26
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
68
27
  runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
69
28
  if (runqueue->entries.count > runqueue->high_watermark)
70
29
  runqueue->high_watermark = runqueue->entries.count;
71
30
  }
72
31
 
73
- runqueue_entry Runqueue_shift(VALUE self) {
74
- Runqueue_t *runqueue;
75
- GetRunqueue(self, runqueue);
32
+ inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
76
33
  runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
77
34
  if (entry.fiber == Qnil)
78
35
  runqueue->high_watermark = 0;
@@ -81,56 +38,31 @@ runqueue_entry Runqueue_shift(VALUE self) {
81
38
  return entry;
82
39
  }
83
40
 
84
- void Runqueue_delete(VALUE self, VALUE fiber) {
85
- Runqueue_t *runqueue;
86
- GetRunqueue(self, runqueue);
41
+ inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
87
42
  runqueue_ring_buffer_delete(&runqueue->entries, fiber);
88
43
  }
89
44
 
90
- int Runqueue_index_of(VALUE self, VALUE fiber) {
91
- Runqueue_t *runqueue;
92
- GetRunqueue(self, runqueue);
45
+ inline int runqueue_index_of(runqueue_t *runqueue, VALUE fiber) {
93
46
  return runqueue_ring_buffer_index_of(&runqueue->entries, fiber);
94
47
  }
95
48
 
96
- void Runqueue_clear(VALUE self) {
97
- Runqueue_t *runqueue;
98
- GetRunqueue(self, runqueue);
49
+ inline void runqueue_clear(runqueue_t *runqueue) {
99
50
  runqueue_ring_buffer_clear(&runqueue->entries);
100
51
  }
101
52
 
102
- long Runqueue_len(VALUE self) {
103
- Runqueue_t *runqueue;
104
- GetRunqueue(self, runqueue);
105
-
53
+ inline long runqueue_len(runqueue_t *runqueue) {
106
54
  return runqueue->entries.count;
107
55
  }
108
56
 
109
- int Runqueue_empty_p(VALUE self) {
110
- Runqueue_t *runqueue;
111
- GetRunqueue(self, runqueue);
112
-
57
+ inline int runqueue_empty_p(runqueue_t *runqueue) {
113
58
  return (runqueue->entries.count == 0);
114
59
  }
115
60
 
116
- static const unsigned int ANTI_STARVE_HIGH_WATERMARK_THRESHOLD = 128;
117
61
  static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
118
62
 
119
- int Runqueue_should_poll_nonblocking(VALUE self) {
120
- Runqueue_t *runqueue;
121
- GetRunqueue(self, runqueue);
122
-
123
- if (runqueue->high_watermark < ANTI_STARVE_HIGH_WATERMARK_THRESHOLD) return 0;
63
+ inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
124
64
  if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
125
65
 
126
- // the
127
66
  runqueue->switch_count = 0;
128
67
  return 1;
129
68
  }
130
-
131
- void Init_Runqueue() {
132
- cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cObject);
133
- rb_define_alloc_func(cRunqueue, Runqueue_allocate);
134
-
135
- rb_define_method(cRunqueue, "initialize", Runqueue_initialize, 0);
136
- }
@@ -0,0 +1,27 @@
1
+ #ifndef RUNQUEUE_H
2
+ #define RUNQUEUE_H
3
+
4
+ #include "polyphony.h"
5
+ #include "runqueue_ring_buffer.h"
6
+
7
+ typedef struct runqueue {
8
+ runqueue_ring_buffer entries;
9
+ unsigned int high_watermark;
10
+ unsigned int switch_count;
11
+ } runqueue_t;
12
+
13
+ void runqueue_initialize(runqueue_t *runqueue);
14
+ void runqueue_finalize(runqueue_t *runqueue);
15
+ void runqueue_mark(runqueue_t *runqueue);
16
+
17
+ void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
18
+ void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
19
+ runqueue_entry runqueue_shift(runqueue_t *runqueue);
20
+ void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
21
+ int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
22
+ void runqueue_clear(runqueue_t *runqueue);
23
+ long runqueue_len(runqueue_t *runqueue);
24
+ int runqueue_empty_p(runqueue_t *runqueue);
25
+ int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
26
+
27
+ #endif /* RUNQUEUE_H */
@@ -1,19 +1,15 @@
1
1
  #include "polyphony.h"
2
+ #include "backend_common.h"
2
3
 
3
4
  ID ID_deactivate_all_watchers_post_fork;
4
5
  ID ID_ivar_backend;
5
6
  ID ID_ivar_join_wait_queue;
6
7
  ID ID_ivar_main_fiber;
7
8
  ID ID_ivar_terminated;
8
- ID ID_ivar_runqueue;
9
9
  ID ID_stop;
10
10
 
11
11
  static VALUE Thread_setup_fiber_scheduling(VALUE self) {
12
- VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
13
-
14
12
  rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
15
- rb_ivar_set(self, ID_ivar_runqueue, runqueue);
16
-
17
13
  return self;
18
14
  }
19
15
 
@@ -21,53 +17,20 @@ static VALUE SYM_scheduled_fibers;
21
17
  static VALUE SYM_pending_watchers;
22
18
 
23
19
  static VALUE Thread_fiber_scheduling_stats(VALUE self) {
24
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
25
- VALUE stats = rb_hash_new();
26
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
27
- long pending_count;
28
-
29
- long scheduled_count = Runqueue_len(runqueue);
30
- rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
31
-
32
- pending_count = Backend_pending_count(backend);
33
- rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
20
+ struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
34
21
 
22
+ VALUE stats = rb_hash_new();
23
+ rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
24
+ rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
35
25
  return stats;
36
26
  }
37
27
 
38
- void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
39
- VALUE runqueue;
40
- int already_runnable;
41
-
42
- if (rb_fiber_alive_p(fiber) != Qtrue) return;
43
- already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
44
-
45
- COND_TRACE(3, SYM_fiber_schedule, fiber, value);
46
- runqueue = rb_ivar_get(self, ID_ivar_runqueue);
47
- (prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
48
- if (!already_runnable) {
49
- rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
50
- if (rb_thread_current() != self) {
51
- // If the fiber scheduling is done across threads, we need to make sure the
52
- // target thread is woken up in case it is in the middle of running its
53
- // event selector. Otherwise it's gonna be stuck waiting for an event to
54
- // happen, not knowing that it there's already a fiber ready to run in its
55
- // run queue.
56
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
57
- Backend_wakeup(backend);
58
- }
59
- }
60
- }
61
-
62
- VALUE Thread_fiber_scheduling_index(VALUE self, VALUE fiber) {
63
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
64
-
65
- return INT2NUM(Runqueue_index_of(runqueue, fiber));
28
+ inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
29
+ Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
66
30
  }
67
31
 
68
32
  VALUE Thread_fiber_unschedule(VALUE self, VALUE fiber) {
69
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
70
- Runqueue_delete(runqueue, fiber);
33
+ Backend_unschedule_fiber(rb_ivar_get(self, ID_ivar_backend), fiber);
71
34
  return self;
72
35
  }
73
36
 
@@ -82,65 +45,15 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
82
45
  }
83
46
 
84
47
  VALUE Thread_switch_fiber(VALUE self) {
85
- VALUE current_fiber = rb_fiber_current();
86
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
87
- runqueue_entry next;
88
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
89
- unsigned int pending_ops_count = Backend_pending_count(backend);
90
- unsigned int backend_was_polled = 0;
91
- unsigned int idle_tasks_run_count = 0;
92
-
93
- if (__tracing_enabled__ && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
94
- TRACE(2, SYM_fiber_switchpoint, current_fiber);
95
-
96
- while (1) {
97
- next = Runqueue_shift(runqueue);
98
- if (next.fiber != Qnil) {
99
- // Polling for I/O op completion is normally done when the run queue is
100
- // empty, but if the runqueue never empties, we'll never get to process
101
- // any event completions. In order to prevent this, an anti-starve
102
- // mechanism is employed, under the following conditions:
103
- // - a blocking poll was not yet performed
104
- // - there are pending blocking operations
105
- // - the runqueue has signalled that a non-blocking poll should be
106
- // performed
107
- // - the run queue length high watermark has reached its threshold (currently 128)
108
- // - the run queue switch counter has reached its threshold (currently 64)
109
- if (!backend_was_polled && pending_ops_count && Runqueue_should_poll_nonblocking(runqueue)) {
110
- // this prevents event starvation in case the run queue never empties
111
- Backend_poll(backend, Qtrue, current_fiber, runqueue);
112
- }
113
- break;
114
- }
115
-
116
- if (!idle_tasks_run_count) {
117
- idle_tasks_run_count++;
118
- Backend_run_idle_tasks(backend);
119
- }
120
- if (pending_ops_count == 0) break;
121
- Backend_poll(backend, Qnil, current_fiber, runqueue);
122
- backend_was_polled = 1;
123
- }
124
-
125
- if (next.fiber == Qnil) return Qnil;
126
-
127
- // run next fiber
128
- COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
129
-
130
- rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
131
- RB_GC_GUARD(next.fiber);
132
- RB_GC_GUARD(next.value);
133
- return (next.fiber == current_fiber) ?
134
- next.value : FIBER_TRANSFER(next.fiber, next.value);
48
+ return Backend_switch_fiber(rb_ivar_get(self, ID_ivar_backend));
135
49
  }
136
50
 
137
51
  VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
138
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
139
52
  if (fiber != Qnil) {
140
53
  Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
141
54
  }
142
55
 
143
- if (Backend_wakeup(backend) == Qnil) {
56
+ if (Backend_wakeup(rb_ivar_get(self, ID_ivar_backend)) == Qnil) {
144
57
  // we're not inside the ev_loop, so we just do a switchpoint
145
58
  Thread_switch_fiber(self);
146
59
  }
@@ -166,7 +79,6 @@ void Init_Thread() {
166
79
  rb_define_method(rb_cThread, "schedule_fiber_with_priority",
167
80
  Thread_schedule_fiber_with_priority, 2);
168
81
  rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
169
- rb_define_method(rb_cThread, "fiber_scheduling_index", Thread_fiber_scheduling_index, 1);
170
82
  rb_define_method(rb_cThread, "fiber_unschedule", Thread_fiber_unschedule, 1);
171
83
 
172
84
  rb_define_singleton_method(rb_cThread, "backend", Thread_class_backend, 0);
@@ -178,7 +90,6 @@ void Init_Thread() {
178
90
  ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
179
91
  ID_ivar_main_fiber = rb_intern("@main_fiber");
180
92
  ID_ivar_terminated = rb_intern("@terminated");
181
- ID_ivar_runqueue = rb_intern("@runqueue");
182
93
  ID_stop = rb_intern("stop");
183
94
 
184
95
  SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));