polyphony 0.45.4 → 0.45.5

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 52f8ebf1104d921c9e3b0afa0b4605ee8f912eabe8b6264898f23905d2cb6c6f
4
- data.tar.gz: 38f0f7cd62997ba5681185c3c4859f57d86de875d8292faf67fffa53c7160181
3
+ metadata.gz: fe5d86b07fc6f29d01897a7790deac8f0544bb61dbd486fb8ea104dd315b0e80
4
+ data.tar.gz: 3e1086b9395d63835a09051c52b7f8496013f190149df35532333b6c27b74169
5
5
  SHA512:
6
- metadata.gz: 2429259a2e79757ec879c4c00db8fd8b87302b9e0a61c6ae2ddd5fdb6e1a3a06ac63d551ed33a4c77480e156eb1247a0fab4a263179a60fd2e9a2d3173831a58
7
- data.tar.gz: 04f58dafb5faf1e21b08fd85d508ceb6e9869b0a69d63c133661a834f169eb750b599639ffdbff474abf7f92bc6bfee3a43b9442c2885a6dd88328657b0e1d36
6
+ metadata.gz: d6c7a78a46b9084f60ee838e82b7cde6f287eea2c500f779e5014103a400da98dc066adef58631033591a8de5278cc4f7b95ba8dcdac3ad22ecd68668a7d68e4
7
+ data.tar.gz: d32e7def9aa63ecb5c53b46020079e3384809ff786478e0b87fce1b141abb85d3c69d1f490b1d0a3bcbfaf972a3779939ac42bec512b12132d3b46b159754de5
@@ -1,3 +1,12 @@
1
+ ## 0.45.5
2
+
3
+ * Fix compilation error (#43)
4
+ * Add support for resetting move_on_after, cancel_after timeouts
5
+ * Optimize anti-event starvation polling
6
+ * Implement optimized runqueue for better performance
7
+ * Schedule parent with priority on uncaught exception
8
+ * Fix race condition in `Mutex#synchronize` (#41)
9
+
1
10
  ## 0.45.4
2
11
 
3
12
  * Improve signal trapping mechanism
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.45.4)
4
+ polyphony (0.45.5)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
data/TODO.md CHANGED
@@ -1,11 +1,6 @@
1
- (
2
- io_uring: some work has been done on an io_uring based scheduler here:
3
- https://github.com/dsh0416/evt
4
-
5
- This can serve as a starting point for doing stuff with io_uring
6
- )
1
+ - io_uring
7
2
 
8
- 0.45.4
3
+ 0.46
9
4
 
10
5
  - Adapter for io/console (what does `IO#raw` do?)
11
6
  - Adapter for Pry and IRB (Which fixes #5 and #6)
@@ -14,7 +9,7 @@
14
9
  - Fix backtrace for `Timeout.timeout` API (see timeout example).
15
10
  - Check why worker-thread example doesn't work.
16
11
 
17
- 0.46.0
12
+ 0.47
18
13
 
19
14
  - Debugging
20
15
  - Eat your own dogfood: need a good tool to check what's going on when some
@@ -128,8 +123,6 @@
128
123
  - discuss using `snooze` for ensuring responsiveness when executing CPU-bound work
129
124
 
130
125
 
131
- ## 0.47
132
-
133
126
  ### Some more API work, more docs
134
127
 
135
128
  - sintra app with database access (postgresql)
@@ -141,14 +134,10 @@
141
134
  - proceed from there
142
135
 
143
136
 
144
- ## 0.48
145
-
146
137
  ### Sinatra / Sidekiq
147
138
 
148
139
  - Pull out redis/postgres code, put into new `polyphony-xxx` gems
149
140
 
150
- ## 0.49
151
-
152
141
  ### Testing && Docs
153
142
 
154
143
  - More tests
@@ -159,8 +148,6 @@
159
148
  - `IO.foreach`
160
149
  - `Process.waitpid`
161
150
 
162
- ## 0.50 DNS
163
-
164
151
  ### DNS client
165
152
 
166
153
  ```ruby
@@ -18,7 +18,6 @@ def bm(fibers, iterations)
18
18
  Fiber.current.await_all_children
19
19
  dt = Time.now - t0
20
20
  puts "#{[fibers, iterations].inspect} setup: #{t0 - t_pre}s count: #{count} #{count / dt.to_f}/s"
21
- Thread.current.run_queue_trace
22
21
  end
23
22
 
24
23
  GC.disable
@@ -18,7 +18,7 @@
18
18
  // VALUE LibevBackend_write(int argc, VALUE *argv, VALUE self);
19
19
 
20
20
  typedef VALUE (* backend_pending_count_t)(VALUE self);
21
- typedef VALUE (*backend_poll_t)(VALUE self, VALUE nowait, VALUE current_fiber, VALUE queue);
21
+ typedef VALUE (*backend_poll_t)(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue);
22
22
  typedef VALUE (* backend_ref_t)(VALUE self);
23
23
  typedef int (* backend_ref_count_t)(VALUE self);
24
24
  typedef void (* backend_reset_ref_count_t)(VALUE self);
@@ -3,7 +3,6 @@
3
3
  ID ID_fiber_trace;
4
4
  ID ID_ivar_auto_watcher;
5
5
  ID ID_ivar_mailbox;
6
- ID ID_ivar_result;
7
6
  ID ID_ivar_waiting_fibers;
8
7
 
9
8
  VALUE SYM_dead;
@@ -39,31 +38,49 @@ inline VALUE Fiber_auto_watcher(VALUE self) {
39
38
  return watcher;
40
39
  }
41
40
 
41
+ void Fiber_make_runnable(VALUE fiber, VALUE value) {
42
+ VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
43
+ if (thread == Qnil) {
44
+ // rb_raise(rb_eRuntimeError, "No thread set for fiber");
45
+ rb_warn("No thread set for fiber");
46
+ return;
47
+ }
48
+
49
+ Thread_schedule_fiber(thread, fiber, value);
50
+ }
51
+
52
+ void Fiber_make_runnable_with_priority(VALUE fiber, VALUE value) {
53
+ VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
54
+ if (thread == Qnil) {
55
+ // rb_raise(rb_eRuntimeError, "No thread set for fiber");
56
+ rb_warn("No thread set for fiber");
57
+ return;
58
+ }
59
+
60
+ Thread_schedule_fiber_with_priority(thread, fiber, value);
61
+ }
62
+
42
63
  static VALUE Fiber_schedule(int argc, VALUE *argv, VALUE self) {
43
64
  VALUE value = (argc == 0) ? Qnil : argv[0];
44
65
  Fiber_make_runnable(self, value);
45
66
  return self;
46
67
  }
47
68
 
69
+ static VALUE Fiber_schedule_with_priority(int argc, VALUE *argv, VALUE self) {
70
+ VALUE value = (argc == 0) ? Qnil : argv[0];
71
+ Fiber_make_runnable_with_priority(self, value);
72
+ return self;
73
+ }
74
+
48
75
  static VALUE Fiber_state(VALUE self) {
49
76
  if (!rb_fiber_alive_p(self) || (rb_ivar_get(self, ID_ivar_running) == Qfalse))
50
77
  return SYM_dead;
51
78
  if (rb_fiber_current() == self) return SYM_running;
52
- if (rb_ivar_get(self, ID_runnable) != Qnil) return SYM_runnable;
79
+ if (rb_ivar_get(self, ID_ivar_runnable) != Qnil) return SYM_runnable;
53
80
 
54
81
  return SYM_waiting;
55
82
  }
56
83
 
57
- void Fiber_make_runnable(VALUE fiber, VALUE value) {
58
- VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
59
- if (thread != Qnil) {
60
- Thread_schedule_fiber(thread, fiber, value);
61
- }
62
- else {
63
- rb_warn("No thread set for fiber (fiber, value, caller):");
64
- }
65
- }
66
-
67
84
  VALUE Fiber_await(VALUE self) {
68
85
  VALUE result;
69
86
 
@@ -119,6 +136,7 @@ void Init_Fiber() {
119
136
  VALUE cFiber = rb_const_get(rb_cObject, rb_intern("Fiber"));
120
137
  rb_define_method(cFiber, "safe_transfer", Fiber_safe_transfer, -1);
121
138
  rb_define_method(cFiber, "schedule", Fiber_schedule, -1);
139
+ rb_define_method(cFiber, "schedule_with_priority", Fiber_schedule_with_priority, -1);
122
140
  rb_define_method(cFiber, "state", Fiber_state, 0);
123
141
  rb_define_method(cFiber, "auto_watcher", Fiber_auto_watcher, 0);
124
142
 
@@ -143,7 +161,6 @@ void Init_Fiber() {
143
161
  ID_fiber_trace = rb_intern("__fiber_trace__");
144
162
  ID_ivar_auto_watcher = rb_intern("@auto_watcher");
145
163
  ID_ivar_mailbox = rb_intern("@mailbox");
146
- ID_ivar_result = rb_intern("@result");
147
164
  ID_ivar_waiting_fibers = rb_intern("@waiting_fibers");
148
165
 
149
166
  SYM_fiber_create = ID2SYM(rb_intern("fiber_create"));
@@ -126,16 +126,17 @@ VALUE LibevBackend_pending_count(VALUE self) {
126
126
  return INT2NUM(count);
127
127
  }
128
128
 
129
- VALUE LibevBackend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE queue) {
129
+ VALUE LibevBackend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
130
130
  int is_nowait = nowait == Qtrue;
131
131
  LibevBackend_t *backend;
132
132
  GetLibevBackend(self, backend);
133
133
 
134
134
  if (is_nowait) {
135
- long runnable_count = Queue_len(queue);
136
135
  backend->run_no_wait_count++;
137
- if (backend->run_no_wait_count < runnable_count || backend->run_no_wait_count < 10)
138
- return self;
136
+ if (backend->run_no_wait_count < 10) return self;
137
+
138
+ long runnable_count = Runqueue_len(runqueue);
139
+ if (backend->run_no_wait_count < runnable_count) return self;
139
140
  }
140
141
 
141
142
  backend->run_no_wait_count = 0;
@@ -9,10 +9,10 @@ ID ID_each;
9
9
  ID ID_inspect;
10
10
  ID ID_invoke;
11
11
  ID ID_new;
12
+ ID ID_ivar_result;
13
+ ID ID_ivar_runnable;
12
14
  ID ID_ivar_running;
13
15
  ID ID_ivar_thread;
14
- ID ID_runnable;
15
- ID ID_runnable_value;
16
16
  ID ID_size;
17
17
  ID ID_signal;
18
18
  ID ID_switch_fiber;
@@ -61,11 +61,11 @@ void Init_Polyphony() {
61
61
  ID_each = rb_intern("each");
62
62
  ID_inspect = rb_intern("inspect");
63
63
  ID_invoke = rb_intern("invoke");
64
+ ID_ivar_result = rb_intern("@result");
65
+ ID_ivar_runnable = rb_intern("runnable");
64
66
  ID_ivar_running = rb_intern("@running");
65
67
  ID_ivar_thread = rb_intern("@thread");
66
68
  ID_new = rb_intern("new");
67
- ID_runnable = rb_intern("runnable");
68
- ID_runnable_value = rb_intern("runnable_value");
69
69
  ID_signal = rb_intern("signal");
70
70
  ID_size = rb_intern("size");
71
71
  ID_switch_fiber = rb_intern("switch_fiber");
@@ -5,6 +5,7 @@
5
5
  #include "ruby/io.h"
6
6
  #include "libev.h"
7
7
  #include "backend.h"
8
+ #include "runqueue_ring_buffer.h"
8
9
 
9
10
  // debugging
10
11
  #define OBJ_ID(obj) (NUM2LONG(rb_funcall(obj, rb_intern("object_id"), 0)))
@@ -30,6 +31,7 @@ extern backend_interface_t backend_interface;
30
31
  extern VALUE mPolyphony;
31
32
  extern VALUE cQueue;
32
33
  extern VALUE cEvent;
34
+ extern VALUE cRunqueue;
33
35
 
34
36
  extern ID ID_call;
35
37
  extern ID ID_caller;
@@ -39,12 +41,12 @@ extern ID ID_fiber_trace;
39
41
  extern ID ID_inspect;
40
42
  extern ID ID_invoke;
41
43
  extern ID ID_ivar_backend;
44
+ extern ID ID_ivar_result;
45
+ extern ID ID_ivar_runnable;
42
46
  extern ID ID_ivar_running;
43
47
  extern ID ID_ivar_thread;
44
48
  extern ID ID_new;
45
49
  extern ID ID_raise;
46
- extern ID ID_runnable;
47
- extern ID ID_runnable_value;
48
50
  extern ID ID_signal;
49
51
  extern ID ID_size;
50
52
  extern ID ID_switch_fiber;
@@ -79,7 +81,17 @@ VALUE Queue_delete(VALUE self, VALUE value);
79
81
  long Queue_len(VALUE self);
80
82
  void Queue_trace(VALUE self);
81
83
 
84
+
85
+ void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule);
86
+ void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule);
87
+ runqueue_entry Runqueue_shift(VALUE self);
88
+ void Runqueue_delete(VALUE self, VALUE fiber);
89
+ void Runqueue_clear(VALUE self);
90
+ long Runqueue_len(VALUE self);
91
+ int Runqueue_empty_p(VALUE self);
92
+
82
93
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
94
+ VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
83
95
  VALUE Thread_switch_fiber(VALUE thread);
84
96
 
85
97
  #endif /* POLYPHONY_H */
@@ -5,6 +5,7 @@ void Init_Polyphony();
5
5
  void Init_LibevBackend();
6
6
  void Init_Queue();
7
7
  void Init_Event();
8
+ void Init_Runqueue();
8
9
  void Init_Thread();
9
10
  void Init_Tracing();
10
11
 
@@ -16,6 +17,7 @@ void Init_polyphony_ext() {
16
17
  Init_LibevBackend();
17
18
  Init_Queue();
18
19
  Init_Event();
20
+ Init_Runqueue();
19
21
  Init_Fiber();
20
22
  Init_Thread();
21
23
  Init_Tracing();
@@ -0,0 +1,102 @@
1
+ #include "polyphony.h"
2
+ #include "runqueue_ring_buffer.h"
3
+
4
+ typedef struct queue {
5
+ runqueue_ring_buffer entries;
6
+ } Runqueue_t;
7
+
8
+ VALUE cRunqueue = Qnil;
9
+
10
+ static void Runqueue_mark(void *ptr) {
11
+ Runqueue_t *runqueue = ptr;
12
+ runqueue_ring_buffer_mark(&runqueue->entries);
13
+ }
14
+
15
+ static void Runqueue_free(void *ptr) {
16
+ Runqueue_t *runqueue = ptr;
17
+ runqueue_ring_buffer_free(&runqueue->entries);
18
+ xfree(ptr);
19
+ }
20
+
21
+ static size_t Runqueue_size(const void *ptr) {
22
+ return sizeof(Runqueue_t);
23
+ }
24
+
25
+ static const rb_data_type_t Runqueue_type = {
26
+ "Runqueue",
27
+ {Runqueue_mark, Runqueue_free, Runqueue_size,},
28
+ 0, 0, 0
29
+ };
30
+
31
+ static VALUE Runqueue_allocate(VALUE klass) {
32
+ Runqueue_t *runqueue;
33
+
34
+ runqueue = ALLOC(Runqueue_t);
35
+ return TypedData_Wrap_Struct(klass, &Runqueue_type, runqueue);
36
+ }
37
+
38
+ #define GetRunqueue(obj, runqueue) \
39
+ TypedData_Get_Struct((obj), Runqueue_t, &Runqueue_type, (runqueue))
40
+
41
+ static VALUE Runqueue_initialize(VALUE self) {
42
+ Runqueue_t *runqueue;
43
+ GetRunqueue(self, runqueue);
44
+
45
+ runqueue_ring_buffer_init(&runqueue->entries);
46
+
47
+ return self;
48
+ }
49
+
50
+ void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule) {
51
+ Runqueue_t *runqueue;
52
+ GetRunqueue(self, runqueue);
53
+
54
+ if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
55
+ runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
56
+ }
57
+
58
+ void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
59
+ Runqueue_t *runqueue;
60
+ GetRunqueue(self, runqueue);
61
+ if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
62
+ runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
63
+ }
64
+
65
+ runqueue_entry Runqueue_shift(VALUE self) {
66
+ Runqueue_t *runqueue;
67
+ GetRunqueue(self, runqueue);
68
+ return runqueue_ring_buffer_shift(&runqueue->entries);
69
+ }
70
+
71
+ void Runqueue_delete(VALUE self, VALUE fiber) {
72
+ Runqueue_t *runqueue;
73
+ GetRunqueue(self, runqueue);
74
+ runqueue_ring_buffer_delete(&runqueue->entries, fiber);
75
+ }
76
+
77
+ void Runqueue_clear(VALUE self) {
78
+ Runqueue_t *runqueue;
79
+ GetRunqueue(self, runqueue);
80
+ runqueue_ring_buffer_clear(&runqueue->entries);
81
+ }
82
+
83
+ long Runqueue_len(VALUE self) {
84
+ Runqueue_t *runqueue;
85
+ GetRunqueue(self, runqueue);
86
+
87
+ return runqueue->entries.count;
88
+ }
89
+
90
+ int Runqueue_empty_p(VALUE self) {
91
+ Runqueue_t *runqueue;
92
+ GetRunqueue(self, runqueue);
93
+
94
+ return (runqueue->entries.count == 0);
95
+ }
96
+
97
+ void Init_Runqueue() {
98
+ cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cData);
99
+ rb_define_alloc_func(cRunqueue, Runqueue_allocate);
100
+
101
+ rb_define_method(cRunqueue, "initialize", Runqueue_initialize, 0);
102
+ }
@@ -0,0 +1,85 @@
1
+ #include "polyphony.h"
2
+ #include "runqueue_ring_buffer.h"
3
+
4
+ void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer) {
5
+ buffer->size = 1;
6
+ buffer->count = 0;
7
+ buffer->entries = malloc(buffer->size * sizeof(runqueue_entry));
8
+ buffer->head = 0;
9
+ buffer->tail = 0;
10
+ }
11
+
12
+ void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer) {
13
+ free(buffer->entries);
14
+ }
15
+
16
+ int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer) {
17
+ return buffer->count == 0;
18
+ }
19
+
20
+ static runqueue_entry nil_runqueue_entry = {(Qnil), (Qnil)};
21
+
22
+ runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer) {
23
+ if (buffer->count == 0) return nil_runqueue_entry;
24
+
25
+ runqueue_entry value = buffer->entries[buffer->head];
26
+ buffer->head = (buffer->head + 1) % buffer->size;
27
+ buffer->count--;
28
+ return value;
29
+ }
30
+
31
+ void runqueue_ring_buffer_resize(runqueue_ring_buffer *buffer) {
32
+ unsigned int old_size = buffer->size;
33
+ buffer->size = old_size == 1 ? 4 : old_size * 2;
34
+ buffer->entries = realloc(buffer->entries, buffer->size * sizeof(runqueue_entry));
35
+ for (unsigned int idx = 0; idx < buffer->head && idx < buffer->tail; idx++)
36
+ buffer->entries[old_size + idx] = buffer->entries[idx];
37
+ buffer->tail = buffer->head + buffer->count;
38
+ }
39
+
40
+ void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
41
+ if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
42
+
43
+ buffer->head = (buffer->head - 1) % buffer->size;
44
+ buffer->entries[buffer->head].fiber = fiber;
45
+ buffer->entries[buffer->head].value = value;
46
+ buffer->count++;
47
+ }
48
+
49
+ void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value) {
50
+ if (buffer->count == buffer->size) runqueue_ring_buffer_resize(buffer);
51
+
52
+ buffer->entries[buffer->tail].fiber = fiber;
53
+ buffer->entries[buffer->tail].value = value;
54
+ buffer->tail = (buffer->tail + 1) % buffer->size;
55
+ buffer->count++;
56
+ }
57
+
58
+ void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer) {
59
+ for (unsigned int i = 0; i < buffer->count; i++) {
60
+ rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].fiber);
61
+ rb_gc_mark(buffer->entries[(buffer->head + i) % buffer->size].value);
62
+ }
63
+ }
64
+
65
+ void runqueue_ring_buffer_delete_at(runqueue_ring_buffer *buffer, unsigned int idx) {
66
+ for (unsigned int idx2 = idx; idx2 != buffer->tail; idx2 = (idx2 + 1) % buffer->size) {
67
+ buffer->entries[idx2] = buffer->entries[(idx2 + 1) % buffer->size];
68
+ }
69
+ buffer->count--;
70
+ buffer->tail = (buffer->tail - 1) % buffer->size;
71
+ }
72
+
73
+ void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber) {
74
+ for (unsigned int i = 0; i < buffer->count; i++) {
75
+ unsigned int idx = (buffer->head + i) % buffer->size;
76
+ if (buffer->entries[idx].fiber == fiber) {
77
+ runqueue_ring_buffer_delete_at(buffer, idx);
78
+ return;
79
+ }
80
+ }
81
+ }
82
+
83
+ void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer) {
84
+ buffer->count = buffer->head = buffer->tail = 0;
85
+ }
@@ -0,0 +1,31 @@
1
+ #ifndef RUNQUEUE_RING_BUFFER_H
2
+ #define RUNQUEUE_RING_BUFFER_H
3
+
4
+ #include "ruby.h"
5
+
6
+ typedef struct runqueue_entry {
7
+ VALUE fiber;
8
+ VALUE value;
9
+ } runqueue_entry;
10
+
11
+ typedef struct runqueue_ring_buffer {
12
+ runqueue_entry *entries;
13
+ unsigned int size;
14
+ unsigned int count;
15
+ unsigned int head;
16
+ unsigned int tail;
17
+ } runqueue_ring_buffer;
18
+
19
+ void runqueue_ring_buffer_init(runqueue_ring_buffer *buffer);
20
+ void runqueue_ring_buffer_free(runqueue_ring_buffer *buffer);
21
+ void runqueue_ring_buffer_mark(runqueue_ring_buffer *buffer);
22
+ int runqueue_ring_buffer_empty_p(runqueue_ring_buffer *buffer);
23
+ void runqueue_ring_buffer_clear(runqueue_ring_buffer *buffer);
24
+
25
+ runqueue_entry runqueue_ring_buffer_shift(runqueue_ring_buffer *buffer);
26
+ void runqueue_ring_buffer_unshift(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value);
27
+ void runqueue_ring_buffer_push(runqueue_ring_buffer *buffer, VALUE fiber, VALUE value);
28
+
29
+ void runqueue_ring_buffer_delete(runqueue_ring_buffer *buffer, VALUE fiber);
30
+
31
+ #endif /* RUNQUEUE_RING_BUFFER_H */
@@ -4,17 +4,15 @@ ID ID_deactivate_all_watchers_post_fork;
4
4
  ID ID_ivar_backend;
5
5
  ID ID_ivar_join_wait_queue;
6
6
  ID ID_ivar_main_fiber;
7
- ID ID_ivar_result;
8
7
  ID ID_ivar_terminated;
9
- ID ID_run_queue;
10
- ID ID_runnable_next;
8
+ ID ID_ivar_runqueue;
11
9
  ID ID_stop;
12
10
 
13
11
  static VALUE Thread_setup_fiber_scheduling(VALUE self) {
14
- VALUE queue = rb_funcall(cQueue, ID_new, 0);
12
+ VALUE runqueue = rb_funcall(cRunqueue, ID_new, 0);
15
13
 
16
14
  rb_ivar_set(self, ID_ivar_main_fiber, rb_fiber_current());
17
- rb_ivar_set(self, ID_run_queue, queue);
15
+ rb_ivar_set(self, ID_ivar_runqueue, runqueue);
18
16
 
19
17
  return self;
20
18
  }
@@ -35,10 +33,10 @@ static VALUE SYM_pending_watchers;
35
33
  static VALUE Thread_fiber_scheduling_stats(VALUE self) {
36
34
  VALUE backend = rb_ivar_get(self,ID_ivar_backend);
37
35
  VALUE stats = rb_hash_new();
38
- VALUE queue = rb_ivar_get(self, ID_run_queue);
36
+ VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
39
37
  long pending_count;
40
38
 
41
- long scheduled_count = RARRAY_LEN(queue);
39
+ long scheduled_count = Runqueue_len(runqueue);
42
40
  rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(scheduled_count));
43
41
 
44
42
  pending_count = __BACKEND__.pending_count(backend);
@@ -47,30 +45,18 @@ static VALUE Thread_fiber_scheduling_stats(VALUE self) {
47
45
  return stats;
48
46
  }
49
47
 
50
- VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
51
- VALUE queue;
52
-
53
- if (rb_fiber_alive_p(fiber) != Qtrue) return self;
54
-
55
- int already_runnable = rb_ivar_get(fiber, ID_runnable) != Qnil;
48
+ void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
49
+ VALUE runqueue;
50
+ int already_runnable;
56
51
 
57
- if (already_runnable) {
58
- VALUE current_runnable_value = rb_ivar_get(fiber, ID_runnable_value);
59
-
60
- // If the fiber is already runnable and the runnable value is an exception,
61
- // we don't update the value, in order to prevent a race condition where
62
- // exceptions will be lost (see issue #33)
63
- if (TEST_EXCEPTION(current_runnable_value)) return self;
64
- }
52
+ if (rb_fiber_alive_p(fiber) != Qtrue) return;
53
+ already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
65
54
 
66
- rb_ivar_set(fiber, ID_runnable_value, value);
67
55
  COND_TRACE(3, SYM_fiber_schedule, fiber, value);
68
-
56
+ runqueue = rb_ivar_get(self, ID_ivar_runqueue);
57
+ (prioritize ? Runqueue_unshift : Runqueue_push)(runqueue, fiber, value, already_runnable);
69
58
  if (!already_runnable) {
70
- queue = rb_ivar_get(self, ID_run_queue);
71
- Queue_push(queue, fiber);
72
- rb_ivar_set(fiber, ID_runnable, Qtrue);
73
-
59
+ rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
74
60
  if (rb_thread_current() != self) {
75
61
  // If the fiber scheduling is done across threads, we need to make sure the
76
62
  // target thread is woken up in case it is in the middle of running its
@@ -81,46 +67,22 @@ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
81
67
  __BACKEND__.wakeup(backend);
82
68
  }
83
69
  }
70
+ }
71
+
72
+ VALUE Thread_schedule_fiber(VALUE self, VALUE fiber, VALUE value) {
73
+ schedule_fiber(self, fiber, value, 0);
84
74
  return self;
85
75
  }
86
76
 
87
77
  VALUE Thread_schedule_fiber_with_priority(VALUE self, VALUE fiber, VALUE value) {
88
- VALUE queue;
89
-
90
- if (rb_fiber_alive_p(fiber) != Qtrue) return self;
91
-
92
- COND_TRACE(3, SYM_fiber_schedule, fiber, value);
93
- rb_ivar_set(fiber, ID_runnable_value, value);
94
-
95
- queue = rb_ivar_get(self, ID_run_queue);
96
-
97
- // if fiber is already scheduled, remove it from the run queue
98
- if (rb_ivar_get(fiber, ID_runnable) != Qnil) {
99
- Queue_delete(queue, fiber);
100
- } else {
101
- rb_ivar_set(fiber, ID_runnable, Qtrue);
102
- }
103
-
104
- // the fiber is given priority by putting it at the front of the run queue
105
- Queue_unshift(queue, fiber);
106
-
107
- if (rb_thread_current() != self) {
108
- // if the fiber scheduling is done across threads, we need to make sure the
109
- // target thread is woken up in case it is in the middle of running its
110
- // event loop. Otherwise it's gonna be stuck waiting for an event to
111
- // happen, not knowing that it there's already a fiber ready to run in its
112
- // run queue.
113
- VALUE backend = rb_ivar_get(self, ID_ivar_backend);
114
- __BACKEND__.wakeup(backend);
115
- }
78
+ schedule_fiber(self, fiber, value, 1);
116
79
  return self;
117
80
  }
118
81
 
119
82
  VALUE Thread_switch_fiber(VALUE self) {
120
83
  VALUE current_fiber = rb_fiber_current();
121
- VALUE queue = rb_ivar_get(self, ID_run_queue);
122
- VALUE next_fiber;
123
- VALUE value;
84
+ VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
85
+ runqueue_entry next;
124
86
  VALUE backend = rb_ivar_get(self, ID_ivar_backend);
125
87
  int ref_count;
126
88
  int backend_was_polled = 0;
@@ -130,42 +92,35 @@ VALUE Thread_switch_fiber(VALUE self) {
130
92
 
131
93
  ref_count = __BACKEND__.ref_count(backend);
132
94
  while (1) {
133
- next_fiber = Queue_shift_no_wait(queue);
134
- if (next_fiber != Qnil) {
95
+ next = Runqueue_shift(runqueue);
96
+ if (next.fiber != Qnil) {
135
97
  if (backend_was_polled == 0 && ref_count > 0) {
136
98
  // this prevents event starvation in case the run queue never empties
137
- __BACKEND__.poll(backend, Qtrue, current_fiber, queue);
99
+ __BACKEND__.poll(backend, Qtrue, current_fiber, runqueue);
138
100
  }
139
101
  break;
140
102
  }
141
103
  if (ref_count == 0) break;
142
104
 
143
- __BACKEND__.poll(backend, Qnil, current_fiber, queue);
105
+ __BACKEND__.poll(backend, Qnil, current_fiber, runqueue);
144
106
  backend_was_polled = 1;
145
107
  }
146
108
 
147
- if (next_fiber == Qnil) return Qnil;
109
+ if (next.fiber == Qnil) return Qnil;
148
110
 
149
111
  // run next fiber
150
- value = rb_ivar_get(next_fiber, ID_runnable_value);
151
- COND_TRACE(3, SYM_fiber_run, next_fiber, value);
152
-
153
- rb_ivar_set(next_fiber, ID_runnable, Qnil);
154
- RB_GC_GUARD(next_fiber);
155
- RB_GC_GUARD(value);
156
- return (next_fiber == current_fiber) ?
157
- value : rb_funcall(next_fiber, ID_transfer, 1, value);
158
- }
112
+ COND_TRACE(3, SYM_fiber_run, next.fiber, next.value);
159
113
 
160
- VALUE Thread_run_queue_trace(VALUE self) {
161
- VALUE queue = rb_ivar_get(self, ID_run_queue);
162
- Queue_trace(queue);
163
- return self;
114
+ rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
115
+ RB_GC_GUARD(next.fiber);
116
+ RB_GC_GUARD(next.value);
117
+ return (next.fiber == current_fiber) ?
118
+ next.value : rb_funcall(next.fiber, ID_transfer, 1, next.value);
164
119
  }
165
120
 
166
121
  VALUE Thread_reset_fiber_scheduling(VALUE self) {
167
- VALUE queue = rb_ivar_get(self, ID_run_queue);
168
- Queue_clear(queue);
122
+ VALUE queue = rb_ivar_get(self, ID_ivar_runqueue);
123
+ Runqueue_clear(queue);
169
124
  Thread_fiber_reset_ref_count(self);
170
125
  return self;
171
126
  }
@@ -199,7 +154,6 @@ void Init_Thread() {
199
154
  rb_define_method(rb_cThread, "schedule_fiber_with_priority",
200
155
  Thread_schedule_fiber_with_priority, 2);
201
156
  rb_define_method(rb_cThread, "switch_fiber", Thread_switch_fiber, 0);
202
- rb_define_method(rb_cThread, "run_queue_trace", Thread_run_queue_trace, 0);
203
157
 
204
158
  rb_define_method(rb_cThread, "debug!", Thread_debug, 0);
205
159
 
@@ -207,10 +161,8 @@ void Init_Thread() {
207
161
  ID_ivar_backend = rb_intern("@backend");
208
162
  ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
209
163
  ID_ivar_main_fiber = rb_intern("@main_fiber");
210
- ID_ivar_result = rb_intern("@result");
164
+ ID_ivar_runqueue = rb_intern("@runqueue");
211
165
  ID_ivar_terminated = rb_intern("@terminated");
212
- ID_run_queue = rb_intern("run_queue");
213
- ID_runnable_next = rb_intern("runnable_next");
214
166
  ID_stop = rb_intern("stop");
215
167
 
216
168
  SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
@@ -32,7 +32,7 @@ module Polyphony
32
32
  end
33
33
 
34
34
  def cancel_after_wrap_block(canceller, &block)
35
- block.call
35
+ block.call(canceller)
36
36
  ensure
37
37
  canceller.stop
38
38
  end
@@ -81,7 +81,7 @@ module Polyphony
81
81
  sleep interval
82
82
  fiber.schedule Polyphony::MoveOn.new(with_value)
83
83
  end
84
- block.call
84
+ block.call(canceller)
85
85
  rescue Polyphony::MoveOn => e
86
86
  e.value
87
87
  ensure
@@ -16,11 +16,13 @@ module Polyphony
16
16
 
17
17
  def synchronize_not_holding
18
18
  @token = @store.shift
19
- @holding_fiber = Fiber.current
20
- yield
21
- ensure
22
- @holding_fiber = nil
23
- @store << @token if @token
19
+ begin
20
+ @holding_fiber = Fiber.current
21
+ yield
22
+ ensure
23
+ @holding_fiber = nil
24
+ @store << @token if @token
25
+ end
24
26
  end
25
27
 
26
28
  def conditional_release
@@ -187,9 +187,9 @@ module Polyphony
187
187
  (@children ||= {}).keys
188
188
  end
189
189
 
190
- def spin(tag = nil, orig_caller = Kernel.caller, do_schedule: true, &block)
190
+ def spin(tag = nil, orig_caller = Kernel.caller, &block)
191
191
  f = Fiber.new { |v| f.run(v) }
192
- f.prepare(tag, block, orig_caller, self, do_schedule: do_schedule)
192
+ f.prepare(tag, block, orig_caller, self)
193
193
  (@children ||= {})[f] = true
194
194
  f
195
195
  end
@@ -227,14 +227,14 @@ module Polyphony
227
227
 
228
228
  # Fiber life cycle methods
229
229
  module FiberLifeCycle
230
- def prepare(tag, block, caller, parent, do_schedule: true)
230
+ def prepare(tag, block, caller, parent)
231
231
  @thread = Thread.current
232
232
  @tag = tag
233
233
  @parent = parent
234
234
  @caller = caller
235
235
  @block = block
236
236
  __fiber_trace__(:fiber_create, self)
237
- schedule if do_schedule
237
+ schedule
238
238
  end
239
239
 
240
240
  def run(first_value)
@@ -308,7 +308,7 @@ module Polyphony
308
308
  @waiting_fibers&.each_key { |f| f.schedule(result) }
309
309
 
310
310
  # propagate unaught exception to parent
311
- @parent&.schedule(result) if uncaught_exception && !@waiting_fibers
311
+ @parent&.schedule_with_priority(result) if uncaught_exception && !@waiting_fibers
312
312
  end
313
313
 
314
314
  def when_done(&block)
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Polyphony
4
- VERSION = '0.45.4'
4
+ VERSION = '0.45.5'
5
5
  end
@@ -122,6 +122,21 @@ class MoveOnAfterTest < MiniTest::Test
122
122
  assert_equal :bar, v
123
123
  end
124
124
 
125
+ def test_move_on_after_with_reset
126
+ t0 = Time.now
127
+ v = move_on_after(0.01, with_value: :moved_on) do |timeout|
128
+ sleep 0.007
129
+ timeout.reset
130
+ sleep 0.007
131
+ nil
132
+ end
133
+ t1 = Time.now
134
+
135
+ assert_nil v
136
+ assert t1 - t0 >= 0.014
137
+ assert t1 - t0 < 0.02
138
+ end
139
+
125
140
  def test_move_on_after_without_block
126
141
  t0 = Time.now
127
142
  f = move_on_after(0.01, with_value: 'foo')
@@ -160,6 +175,20 @@ class CancelAfterTest < MiniTest::Test
160
175
  assert t1 - t0 < 0.1
161
176
  end
162
177
 
178
+ def test_cancel_after_with_reset
179
+ t0 = Time.now
180
+ cancel_after(0.01) do |f|
181
+ assert_kind_of Fiber, f
182
+ assert_equal Fiber.current, f.parent
183
+ sleep 0.007
184
+ f.reset
185
+ sleep 0.007
186
+ end
187
+ t1 = Time.now
188
+ assert t1 - t0 >= 0.014
189
+ assert t1 - t0 < 0.02
190
+ end
191
+
163
192
  class CustomException < Exception
164
193
  end
165
194
 
@@ -20,6 +20,27 @@ class MutexTest < MiniTest::Test
20
20
  assert_equal ['>> 1', '<< 1', '>> 2', '<< 2', '>> 3', '<< 3'], buf
21
21
  end
22
22
 
23
+ def test_mutex_race_condition
24
+ lock = Polyphony::Mutex.new
25
+ buf = []
26
+ f1 = spin do
27
+ lock.synchronize { buf << 1; snooze; lock.synchronize { buf << 1.1 }; snooze }
28
+ end
29
+ f2 = spin do
30
+ lock.synchronize { buf << 2 }
31
+ end
32
+ f3 = spin do
33
+ lock.synchronize { buf << 3 }
34
+ end
35
+
36
+ snooze
37
+ f2.terminate
38
+
39
+ f3.await
40
+
41
+ assert_equal [1, 1.1, 3], buf
42
+ end
43
+
23
44
  def test_condition_variable
24
45
  buf = []
25
46
  lock1 = Polyphony::Mutex.new
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: polyphony
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.45.4
4
+ version: 0.45.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-09-06 00:00:00.000000000 Z
11
+ date: 2020-10-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake-compiler
@@ -428,6 +428,9 @@ files:
428
428
  - ext/polyphony/queue.c
429
429
  - ext/polyphony/ring_buffer.c
430
430
  - ext/polyphony/ring_buffer.h
431
+ - ext/polyphony/runqueue.c
432
+ - ext/polyphony/runqueue_ring_buffer.c
433
+ - ext/polyphony/runqueue_ring_buffer.h
431
434
  - ext/polyphony/thread.c
432
435
  - ext/polyphony/tracing.c
433
436
  - lib/polyphony.rb
@@ -507,7 +510,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
507
510
  - !ruby/object:Gem::Version
508
511
  version: '0'
509
512
  requirements: []
510
- rubygems_version: 3.1.2
513
+ rubygems_version: 3.1.4
511
514
  signing_key:
512
515
  specification_version: 4
513
516
  summary: Fine grained concurrency for Ruby