polyphony 0.55.0 → 0.59.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,5 @@
1
1
  #include "polyphony.h"
2
2
 
3
- ID ID_fiber_trace;
4
3
  ID ID_ivar_auto_watcher;
5
4
  ID ID_ivar_mailbox;
6
5
  ID ID_ivar_result;
@@ -169,7 +168,6 @@ void Init_Fiber() {
169
168
  rb_global_variable(&SYM_runnable);
170
169
  rb_global_variable(&SYM_waiting);
171
170
 
172
- ID_fiber_trace = rb_intern("__fiber_trace__");
173
171
  ID_ivar_auto_watcher = rb_intern("@auto_watcher");
174
172
  ID_ivar_mailbox = rb_intern("@mailbox");
175
173
  ID_ivar_result = rb_intern("@result");
@@ -10,6 +10,7 @@ ID ID_each;
10
10
  ID ID_inspect;
11
11
  ID ID_invoke;
12
12
  ID ID_new;
13
+ ID ID_ivar_blocking_mode;
13
14
  ID ID_ivar_io;
14
15
  ID ID_ivar_runnable;
15
16
  ID ID_ivar_running;
@@ -41,11 +42,6 @@ static VALUE Polyphony_suspend(VALUE self) {
41
42
  return ret;
42
43
  }
43
44
 
44
- VALUE Polyphony_trace(VALUE self, VALUE enabled) {
45
- __tracing_enabled__ = RTEST(enabled) ? 1 : 0;
46
- return Qnil;
47
- }
48
-
49
45
  VALUE Polyphony_backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
50
46
  return Backend_accept(BACKEND(), server_socket, socket_class);
51
47
  }
@@ -129,8 +125,6 @@ VALUE Polyphony_backend_write(int argc, VALUE *argv, VALUE self) {
129
125
  void Init_Polyphony() {
130
126
  mPolyphony = rb_define_module("Polyphony");
131
127
 
132
- rb_define_singleton_method(mPolyphony, "trace", Polyphony_trace, 1);
133
-
134
128
  // backend methods
135
129
  rb_define_singleton_method(mPolyphony, "backend_accept", Polyphony_backend_accept, 2);
136
130
  rb_define_singleton_method(mPolyphony, "backend_accept_loop", Polyphony_backend_accept_loop, 2);
@@ -158,19 +152,20 @@ void Init_Polyphony() {
158
152
 
159
153
  cTimeoutException = rb_define_class_under(mPolyphony, "TimeoutException", rb_eException);
160
154
 
161
- ID_call = rb_intern("call");
162
- ID_caller = rb_intern("caller");
163
- ID_clear = rb_intern("clear");
164
- ID_each = rb_intern("each");
165
- ID_inspect = rb_intern("inspect");
166
- ID_invoke = rb_intern("invoke");
167
- ID_ivar_io = rb_intern("@io");
168
- ID_ivar_runnable = rb_intern("@runnable");
169
- ID_ivar_running = rb_intern("@running");
170
- ID_ivar_thread = rb_intern("@thread");
171
- ID_new = rb_intern("new");
172
- ID_signal = rb_intern("signal");
173
- ID_size = rb_intern("size");
174
- ID_switch_fiber = rb_intern("switch_fiber");
175
- ID_transfer = rb_intern("transfer");
155
+ ID_call = rb_intern("call");
156
+ ID_caller = rb_intern("caller");
157
+ ID_clear = rb_intern("clear");
158
+ ID_each = rb_intern("each");
159
+ ID_inspect = rb_intern("inspect");
160
+ ID_invoke = rb_intern("invoke");
161
+ ID_ivar_blocking_mode = rb_intern("@blocking_mode");
162
+ ID_ivar_io = rb_intern("@io");
163
+ ID_ivar_runnable = rb_intern("@runnable");
164
+ ID_ivar_running = rb_intern("@running");
165
+ ID_ivar_thread = rb_intern("@thread");
166
+ ID_new = rb_intern("new");
167
+ ID_signal = rb_intern("signal");
168
+ ID_size = rb_intern("size");
169
+ ID_switch_fiber = rb_intern("switch_fiber");
170
+ ID_transfer = rb_intern("transfer");
176
171
  }
@@ -5,6 +5,7 @@
5
5
 
6
6
  #include "ruby.h"
7
7
  #include "runqueue_ring_buffer.h"
8
+ #include "backend_common.h"
8
9
 
9
10
  // debugging
10
11
  #define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
@@ -18,10 +19,6 @@
18
19
  free(strings); \
19
20
  }
20
21
 
21
- // tracing
22
- #define TRACE(...) rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__)
23
- #define COND_TRACE(...) if (__tracing_enabled__) { TRACE(__VA_ARGS__); }
24
-
25
22
  // exceptions
26
23
  #define TEST_EXCEPTION(ret) (rb_obj_is_kind_of(ret, rb_eException) == Qtrue)
27
24
  #define RAISE_EXCEPTION(e) rb_funcall(e, ID_invoke, 0);
@@ -36,17 +33,16 @@
36
33
  extern VALUE mPolyphony;
37
34
  extern VALUE cQueue;
38
35
  extern VALUE cEvent;
39
- extern VALUE cRunqueue;
40
36
  extern VALUE cTimeoutException;
41
37
 
42
38
  extern ID ID_call;
43
39
  extern ID ID_caller;
44
40
  extern ID ID_clear;
45
41
  extern ID ID_each;
46
- extern ID ID_fiber_trace;
47
42
  extern ID ID_inspect;
48
43
  extern ID ID_invoke;
49
44
  extern ID ID_ivar_backend;
45
+ extern ID ID_ivar_blocking_mode;
50
46
  extern ID ID_ivar_io;
51
47
  extern ID ID_ivar_runnable;
52
48
  extern ID ID_ivar_running;
@@ -66,14 +62,6 @@ extern VALUE SYM_fiber_schedule;
66
62
  extern VALUE SYM_fiber_switchpoint;
67
63
  extern VALUE SYM_fiber_terminate;
68
64
 
69
- extern int __tracing_enabled__;
70
-
71
- enum {
72
- FIBER_STATE_NOT_SCHEDULED = 0,
73
- FIBER_STATE_WAITING = 1,
74
- FIBER_STATE_SCHEDULED = 2
75
- };
76
-
77
65
  VALUE Fiber_auto_watcher(VALUE self);
78
66
  void Fiber_make_runnable(VALUE fiber, VALUE value);
79
67
 
@@ -120,10 +108,14 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write);
120
108
  VALUE Backend_waitpid(VALUE self, VALUE pid);
121
109
  VALUE Backend_write_m(int argc, VALUE *argv, VALUE self);
122
110
 
123
- unsigned int Backend_pending_count(VALUE self);
124
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
111
+ VALUE Backend_poll(VALUE self, VALUE blocking);
125
112
  VALUE Backend_wait_event(VALUE self, VALUE raise_on_exception);
126
113
  VALUE Backend_wakeup(VALUE self);
114
+ VALUE Backend_run_idle_tasks(VALUE self);
115
+ VALUE Backend_switch_fiber(VALUE self);
116
+ void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
117
+ struct backend_stats Backend_stats(VALUE self);
118
+ void Backend_unschedule_fiber(VALUE self, VALUE fiber);
127
119
 
128
120
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
129
121
  VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
@@ -5,10 +5,8 @@ void Init_Polyphony();
5
5
  void Init_Backend();
6
6
  void Init_Queue();
7
7
  void Init_Event();
8
- void Init_Runqueue();
9
8
  void Init_SocketExtensions();
10
9
  void Init_Thread();
11
- void Init_Tracing();
12
10
 
13
11
  #ifdef POLYPHONY_PLAYGROUND
14
12
  extern void playground();
@@ -20,10 +18,8 @@ void Init_polyphony_ext() {
20
18
  Init_Backend();
21
19
  Init_Queue();
22
20
  Init_Event();
23
- Init_Runqueue();
24
21
  Init_Fiber();
25
22
  Init_Thread();
26
- Init_Tracing();
27
23
 
28
24
  Init_SocketExtensions();
29
25
 
@@ -1,78 +1,35 @@
1
1
  #include "polyphony.h"
2
- #include "runqueue_ring_buffer.h"
3
-
4
- typedef struct queue {
5
- runqueue_ring_buffer entries;
6
- unsigned int high_watermark;
7
- unsigned int switch_count;
8
- } Runqueue_t;
9
-
10
- VALUE cRunqueue = Qnil;
11
-
12
- static void Runqueue_mark(void *ptr) {
13
- Runqueue_t *runqueue = ptr;
14
- runqueue_ring_buffer_mark(&runqueue->entries);
15
- }
16
-
17
- static void Runqueue_free(void *ptr) {
18
- Runqueue_t *runqueue = ptr;
19
- runqueue_ring_buffer_free(&runqueue->entries);
20
- xfree(ptr);
21
- }
22
-
23
- static size_t Runqueue_size(const void *ptr) {
24
- return sizeof(Runqueue_t);
25
- }
26
-
27
- static const rb_data_type_t Runqueue_type = {
28
- "Runqueue",
29
- {Runqueue_mark, Runqueue_free, Runqueue_size,},
30
- 0, 0, 0
31
- };
32
-
33
- static VALUE Runqueue_allocate(VALUE klass) {
34
- Runqueue_t *runqueue;
35
-
36
- runqueue = ALLOC(Runqueue_t);
37
- return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
38
- }
39
-
40
- #define GetRunqueue(obj, runqueue) \
41
- TypedData_Get_Struct((obj), Runqueue_t, &Runqueue_type, (runqueue))
42
-
43
- static VALUE Runqueue_initialize(VALUE self) {
44
- Runqueue_t *runqueue;
45
- GetRunqueue(self, runqueue);
2
+ #include "runqueue.h"
46
3
 
4
+ inline void runqueue_initialize(runqueue_t *runqueue) {
47
5
  runqueue_ring_buffer_init(&runqueue->entries);
48
6
  runqueue->high_watermark = 0;
49
7
  runqueue->switch_count = 0;
8
+ }
50
9
 
51
- return self;
10
+ inline void runqueue_finalize(runqueue_t *runqueue) {
11
+ runqueue_ring_buffer_free(&runqueue->entries);
52
12
  }
53
13
 
54
- void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule) {
55
- Runqueue_t *runqueue;
56
- GetRunqueue(self, runqueue);
14
+ inline void runqueue_mark(runqueue_t *runqueue) {
15
+ runqueue_ring_buffer_mark(&runqueue->entries);
16
+ }
57
17
 
18
+ inline void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
58
19
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
59
20
  runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
60
21
  if (runqueue->entries.count > runqueue->high_watermark)
61
22
  runqueue->high_watermark = runqueue->entries.count;
62
23
  }
63
24
 
64
- void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
65
- Runqueue_t *runqueue;
66
- GetRunqueue(self, runqueue);
25
+ inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule) {
67
26
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
68
27
  runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
69
28
  if (runqueue->entries.count > runqueue->high_watermark)
70
29
  runqueue->high_watermark = runqueue->entries.count;
71
30
  }
72
31
 
73
- runqueue_entry Runqueue_shift(VALUE self) {
74
- Runqueue_t *runqueue;
75
- GetRunqueue(self, runqueue);
32
+ inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
76
33
  runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
77
34
  if (entry.fiber == Qnil)
78
35
  runqueue->high_watermark = 0;
@@ -81,45 +38,30 @@ runqueue_entry Runqueue_shift(VALUE self) {
81
38
  return entry;
82
39
  }
83
40
 
84
- void Runqueue_delete(VALUE self, VALUE fiber) {
85
- Runqueue_t *runqueue;
86
- GetRunqueue(self, runqueue);
41
+ inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
87
42
  runqueue_ring_buffer_delete(&runqueue->entries, fiber);
88
43
  }
89
44
 
90
- int Runqueue_index_of(VALUE self, VALUE fiber) {
91
- Runqueue_t *runqueue;
92
- GetRunqueue(self, runqueue);
45
+ inline int runqueue_index_of(runqueue_t *runqueue, VALUE fiber) {
93
46
  return runqueue_ring_buffer_index_of(&runqueue->entries, fiber);
94
47
  }
95
48
 
96
- void Runqueue_clear(VALUE self) {
97
- Runqueue_t *runqueue;
98
- GetRunqueue(self, runqueue);
49
+ inline void runqueue_clear(runqueue_t *runqueue) {
99
50
  runqueue_ring_buffer_clear(&runqueue->entries);
100
51
  }
101
52
 
102
- long Runqueue_len(VALUE self) {
103
- Runqueue_t *runqueue;
104
- GetRunqueue(self, runqueue);
105
-
53
+ inline long runqueue_len(runqueue_t *runqueue) {
106
54
  return runqueue->entries.count;
107
55
  }
108
56
 
109
- int Runqueue_empty_p(VALUE self) {
110
- Runqueue_t *runqueue;
111
- GetRunqueue(self, runqueue);
112
-
57
+ inline int runqueue_empty_p(runqueue_t *runqueue) {
113
58
  return (runqueue->entries.count == 0);
114
59
  }
115
60
 
116
61
  static const unsigned int ANTI_STARVE_HIGH_WATERMARK_THRESHOLD = 128;
117
62
  static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
118
63
 
119
- int Runqueue_should_poll_nonblocking(VALUE self) {
120
- Runqueue_t *runqueue;
121
- GetRunqueue(self, runqueue);
122
-
64
+ inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
123
65
  if (runqueue->high_watermark < ANTI_STARVE_HIGH_WATERMARK_THRESHOLD) return 0;
124
66
  if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
125
67
 
@@ -127,10 +69,3 @@ int Runqueue_should_poll_nonblocking(VALUE self) {
127
69
  runqueue->switch_count = 0;
128
70
  return 1;
129
71
  }
130
-
131
- void Init_Runqueue() {
132
- cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cObject);
133
- rb_define_alloc_func(cRunqueue, Runqueue_allocate);
134
-
135
- rb_define_method(cRunqueue, "initialize", Runqueue_initialize, 0);
136
- }
@@ -0,0 +1,27 @@
1
+ #ifndef RUNQUEUE_H
2
+ #define RUNQUEUE_H
3
+
4
+ #include "polyphony.h"
5
+ #include "runqueue_ring_buffer.h"
6
+
7
+ typedef struct runqueue {
8
+ runqueue_ring_buffer entries;
9
+ unsigned int high_watermark;
10
+ unsigned int switch_count;
11
+ } runqueue_t;
12
+
13
+ void runqueue_initialize(runqueue_t *runqueue);
14
+ void runqueue_finalize(runqueue_t *runqueue);
15
+ void runqueue_mark(runqueue_t *runqueue);
16
+
17
+ void runqueue_push(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
18
+ void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int reschedule);
19
+ runqueue_entry runqueue_shift(runqueue_t *runqueue);
20
+ void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
21
+ int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
22
+ void runqueue_clear(runqueue_t *runqueue);
23
+ long runqueue_len(runqueue_t *runqueue);
24
+ int runqueue_empty_p(runqueue_t *runqueue);
25
+ int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
26
+
27
+ #endif /* RUNQUEUE_H */
@@ -1,19 +1,15 @@
1
1
  #include "polyphony.h"
2
+ #include "backend_common.h"
2
3
 
3
4
  ID ID_deactivate_all_watchers_post_fork;
4
5
  ID ID_ivar_backend;
5
6
  ID ID_ivar_join_wait_queue;
6
7
  ID ID_ivar_main_fiber;
7
8
  ID ID_ivar_terminated;
8
- ID ID_ivar_runqueue;
9
9
  ID ID_stop;
10
10
 
11
11
  static VALUE Thread_setup_fiber_scheduling(VALUE self) {
12
- VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
13
-
14
12
  rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
15
- rb_ivar_set(self, ID_ivar_runqueue, runqueue);
16
-
17
13
  return self;
18
14
  }
19
15
 
@@ -21,53 +17,20 @@ static VALUE SYM_scheduled_fibers;
21
17
  static VALUE SYM_pending_watchers;
22
18
 
23
19
  static VALUE Thread_fiber_scheduling_stats(VALUE self) {
24
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
25
- VALUE stats = rb_hash_new();
26
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
27
- long pending_count;
28
-
29
- long scheduled_count = Runqueue_len(runqueue);
30
- rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
31
-
32
- pending_count = Backend_pending_count(backend);
33
- rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(pending_count));
20
+ struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
34
21
 
22
+ VALUE stats = rb_hash_new();
23
+ rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
24
+ rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
35
25
  return stats;
36
26
  }
37
27
 
38
- void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
39
- VALUE runqueue;
40
- int already_runnable;
41
-
42
- if (rb_fiber_alive_p(fiber) != Qtrue) return;
43
- already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
44
-
45
- COND_TRACE(3, SYM_fiber_schedule, fiber, value);
46
- runqueue = rb_ivar_get(self, ID_ivar_runqueue);
47
- (prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
48
- if (!already_runnable) {
49
- rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
50
- if (rb_thread_current() != self) {
51
- // If the fiber scheduling is done across threads, we need to make sure the
52
- // target thread is woken up in case it is in the middle of running its
53
- // event selector. Otherwise it's gonna be stuck waiting for an event to
54
- // happen, not knowing that it there's already a fiber ready to run in its
55
- // run queue.
56
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
57
- Backend_wakeup(backend);
58
- }
59
- }
60
- }
61
-
62
- VALUE Thread_fiber_scheduling_index(VALUE self, VALUE fiber) {
63
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
64
-
65
- return INT2NUM(Runqueue_index_of(runqueue, fiber));
28
+ inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
29
+ Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
66
30
  }
67
31
 
68
32
  VALUE Thread_fiber_unschedule(VALUE self, VALUE fiber) {
69
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
70
- Runqueue_delete(runqueue, fiber);
33
+ Backend_unschedule_fiber(rb_ivar_get(self, ID_ivar_backend), fiber);
71
34
  return self;
72
35
  }
73
36
 
@@ -82,60 +45,15 @@ VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value)
82
45
  }
83
46
 
84
47
  VALUE Thread_switch_fiber(VALUE self) {
85
- VALUE current_fiber = rb_fiber_current();
86
- VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
87
- runqueue_entry next;
88
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
89
- unsigned int pending_ops_count = Backend_pending_count(backend);
90
- unsigned int backend_was_polled = 0;
91
-
92
- if (__tracing_enabled__ && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
93
- TRACE(2, SYM_fiber_switchpoint, current_fiber);
94
-
95
- while (1) {
96
- next = Runqueue_shift(runqueue);
97
- if (next.fiber != Qnil) {
98
- // Polling for I/O op completion is normally done when the run queue is
99
- // empty, but if the runqueue never empties, we'll never get to process
100
- // any event completions. In order to prevent this, an anti-starve
101
- // mechanism is employed, under the following conditions:
102
- // - a blocking poll was not yet performed
103
- // - there are pending blocking operations
104
- // - the runqueue has signalled that a non-blocking poll should be
105
- // performed
106
- // - the run queue length high watermark has reached its threshold (currently 128)
107
- // - the run queue switch counter has reached its threshold (currently 64)
108
- if (!backend_was_polled && pending_ops_count && Runqueue_should_poll_nonblocking(runqueue)) {
109
- // this prevents event starvation in case the run queue never empties
110
- Backend_poll(backend, Qtrue, current_fiber, runqueue);
111
- }
112
- break;
113
- }
114
-
115
- if (pending_ops_count == 0) break;
116
- Backend_poll(backend, Qnil, current_fiber, runqueue);
117
- backend_was_polled = 1;
118
- }
119
-
120
- if (next.fiber == Qnil) return Qnil;
121
-
122
- // run next fiber
123
- COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
124
-
125
- rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
126
- RB_GC_GUARD(next.fiber);
127
- RB_GC_GUARD(next.value);
128
- return (next.fiber == current_fiber) ?
129
- next.value : FIBER_TRANSFER(next.fiber, next.value);
48
+ return Backend_switch_fiber(rb_ivar_get(self, ID_ivar_backend));
130
49
  }
131
50
 
132
51
  VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
133
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
134
52
  if (fiber != Qnil) {
135
53
  Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
136
54
  }
137
55
 
138
- if (Backend_wakeup(backend) == Qnil) {
56
+ if (Backend_wakeup(rb_ivar_get(self, ID_ivar_backend)) == Qnil) {
139
57
  // we're not inside the ev_loop, so we just do a switchpoint
140
58
  Thread_switch_fiber(self);
141
59
  }
@@ -161,7 +79,6 @@ void Init_Thread() {
161
79
  rb_define_method(rb_cThread, "schedule_fiber_with_priority",
162
80
  Thread_schedule_fiber_with_priority, 2);
163
81
  rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
164
- rb_define_method(rb_cThread, "fiber_scheduling_index", Thread_fiber_scheduling_index, 1);
165
82
  rb_define_method(rb_cThread, "fiber_unschedule", Thread_fiber_unschedule, 1);
166
83
 
167
84
  rb_define_singleton_method(rb_cThread, "backend", Thread_class_backend, 0);
@@ -173,7 +90,6 @@ void Init_Thread() {
173
90
  ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
174
91
  ID_ivar_main_fiber = rb_intern("@main_fiber");
175
92
  ID_ivar_terminated = rb_intern("@terminated");
176
- ID_ivar_runqueue = rb_intern("@runqueue");
177
93
  ID_stop = rb_intern("stop");
178
94
 
179
95
  SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));