polyphony 0.57.0 → 0.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +27 -0
- data/Gemfile.lock +15 -29
- data/examples/core/message_based_supervision.rb +51 -0
- data/ext/polyphony/backend_common.c +108 -3
- data/ext/polyphony/backend_common.h +23 -0
- data/ext/polyphony/backend_io_uring.c +117 -39
- data/ext/polyphony/backend_io_uring_context.c +11 -3
- data/ext/polyphony/backend_io_uring_context.h +5 -3
- data/ext/polyphony/backend_libev.c +92 -30
- data/ext/polyphony/extconf.rb +2 -2
- data/ext/polyphony/fiber.c +1 -34
- data/ext/polyphony/polyphony.c +12 -19
- data/ext/polyphony/polyphony.h +10 -20
- data/ext/polyphony/polyphony_ext.c +0 -4
- data/ext/polyphony/queue.c +12 -12
- data/ext/polyphony/runqueue.c +17 -85
- data/ext/polyphony/runqueue.h +27 -0
- data/ext/polyphony/thread.c +10 -99
- data/lib/polyphony/core/timer.rb +2 -2
- data/lib/polyphony/extensions/fiber.rb +102 -82
- data/lib/polyphony/extensions/io.rb +10 -9
- data/lib/polyphony/extensions/openssl.rb +14 -4
- data/lib/polyphony/extensions/socket.rb +15 -15
- data/lib/polyphony/extensions/thread.rb +8 -0
- data/lib/polyphony/version.rb +1 -1
- data/polyphony.gemspec +0 -7
- data/test/test_backend.rb +71 -5
- data/test/test_ext.rb +1 -1
- data/test/test_fiber.rb +106 -18
- data/test/test_global_api.rb +1 -1
- data/test/test_io.rb +29 -0
- data/test/test_supervise.rb +100 -100
- data/test/test_thread.rb +57 -11
- data/test/test_thread_pool.rb +1 -1
- data/test/test_trace.rb +28 -49
- metadata +4 -108
- data/ext/polyphony/tracing.c +0 -11
- data/lib/polyphony/adapters/trace.rb +0 -138
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 515e9a5686bb0eedb02ad626e491b3e8acb350a765a468029e0b5357673f443c
|
4
|
+
data.tar.gz: 94fd7eaedd37c01f1ebc33ba78ffb00d3e8fc42f4a0266bf63ba8eedb83d008c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a546bcf43f556dc7d6bbc3c04b9448141a539e91d2d893441a161eecf9246ff516a54cf94cae476ef9358470e5475c98577c807fd1cef1f712f342337d3c8cc8
|
7
|
+
data.tar.gz: e0bb07cc0028c3205f3c2d87a59699e35e3d4d0e797eff63258892475548086125ed40e63152c56253a1c596b680eacc7080bf08f7d152576c34dc57df0206ab
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,30 @@
|
|
1
|
+
## 0.60 2021-07-15
|
2
|
+
|
3
|
+
|
4
|
+
- Fix linux version detection (for kernel version > 5.9)
|
5
|
+
- Fix op ctx leak in io_uring backend (when polling for I/O readiness)
|
6
|
+
- Add support for appending to buffer in `Backend#read`, `Backend#recv` methods
|
7
|
+
- Improve anti-event starvation mechanism
|
8
|
+
- Redesign fiber monitoring mechanism
|
9
|
+
- Implement `Fiber#attach`
|
10
|
+
- Add optional maxlen argument to `IO#read_loop`, `Socket#recv_loop` (#60)
|
11
|
+
- Implement `Fiber#detach` (#52)
|
12
|
+
|
13
|
+
## 0.59.1 2021-06-28
|
14
|
+
|
15
|
+
- Accept fiber tag in `Polyphony::Timer.new`
|
16
|
+
|
17
|
+
## 0.59 2021-06-28
|
18
|
+
|
19
|
+
- Redesign tracing mechanism and API - now completely separated from Ruby core
|
20
|
+
trace API
|
21
|
+
- Refactor C code - move run queue into backend
|
22
|
+
|
23
|
+
## 0.58 2021-06-25
|
24
|
+
|
25
|
+
- Implement `Thread#idle_gc_period`, `#on_idle` (#56)
|
26
|
+
- Implement `Backend#idle_block=` (#56)
|
27
|
+
|
1
28
|
## 0.57.0 2021-06-23
|
2
29
|
|
3
30
|
- Implement `Backend#splice_chunks` method for both libev and io_uring backends
|
data/Gemfile.lock
CHANGED
@@ -1,27 +1,25 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
polyphony (0.
|
4
|
+
polyphony (0.59.2)
|
5
5
|
|
6
6
|
GEM
|
7
7
|
remote: https://rubygems.org/
|
8
8
|
specs:
|
9
9
|
ansi (1.5.0)
|
10
|
-
ast (2.4.
|
10
|
+
ast (2.4.2)
|
11
11
|
builder (3.2.4)
|
12
12
|
coderay (1.1.3)
|
13
|
-
docile (1.
|
14
|
-
hiredis (0.6.3)
|
15
|
-
http_parser.rb (0.6.0)
|
13
|
+
docile (1.4.0)
|
16
14
|
httparty (0.17.1)
|
17
15
|
mime-types (~> 3.0)
|
18
16
|
multi_xml (>= 0.5.2)
|
19
|
-
json (2.
|
17
|
+
json (2.5.1)
|
20
18
|
localhost (1.1.8)
|
21
19
|
method_source (1.0.0)
|
22
20
|
mime-types (3.3.1)
|
23
21
|
mime-types-data (~> 3.2015)
|
24
|
-
mime-types-data (3.
|
22
|
+
mime-types-data (3.2021.0704)
|
25
23
|
minitest (5.14.4)
|
26
24
|
minitest-reporters (1.4.2)
|
27
25
|
ansi
|
@@ -30,22 +28,18 @@ GEM
|
|
30
28
|
ruby-progressbar
|
31
29
|
msgpack (1.4.2)
|
32
30
|
multi_xml (0.6.0)
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
ast (~> 2.4.0)
|
37
|
-
pg (1.1.4)
|
31
|
+
parallel (1.20.1)
|
32
|
+
parser (3.0.2.0)
|
33
|
+
ast (~> 2.4.1)
|
38
34
|
pry (0.13.1)
|
39
35
|
coderay (~> 1.1)
|
40
36
|
method_source (~> 1.0)
|
41
|
-
rack (2.2.3)
|
42
37
|
rainbow (3.0.0)
|
43
|
-
rake (13.0.
|
38
|
+
rake (13.0.6)
|
44
39
|
rake-compiler (1.1.1)
|
45
40
|
rake
|
46
|
-
|
47
|
-
|
48
|
-
rexml (3.2.4)
|
41
|
+
regexp_parser (2.1.1)
|
42
|
+
rexml (3.2.5)
|
49
43
|
rubocop (0.85.1)
|
50
44
|
parallel (~> 1.10)
|
51
45
|
parser (>= 2.7.0.1)
|
@@ -55,37 +49,29 @@ GEM
|
|
55
49
|
rubocop-ast (>= 0.0.3)
|
56
50
|
ruby-progressbar (~> 1.7)
|
57
51
|
unicode-display_width (>= 1.4.0, < 2.0)
|
58
|
-
rubocop-ast (
|
59
|
-
parser (>=
|
60
|
-
ruby-progressbar (1.
|
61
|
-
sequel (5.34.0)
|
52
|
+
rubocop-ast (1.8.0)
|
53
|
+
parser (>= 3.0.1.1)
|
54
|
+
ruby-progressbar (1.11.0)
|
62
55
|
simplecov (0.17.1)
|
63
56
|
docile (~> 1.1)
|
64
57
|
json (>= 1.8, < 3)
|
65
58
|
simplecov-html (~> 0.10.0)
|
66
59
|
simplecov-html (0.10.2)
|
67
|
-
unicode-display_width (1.
|
60
|
+
unicode-display_width (1.7.0)
|
68
61
|
|
69
62
|
PLATFORMS
|
70
63
|
ruby
|
71
64
|
|
72
65
|
DEPENDENCIES
|
73
|
-
hiredis (= 0.6.3)
|
74
|
-
http_parser.rb (~> 0.6.0)
|
75
66
|
httparty (= 0.17.1)
|
76
67
|
localhost (~> 1.1.4)
|
77
68
|
minitest (= 5.14.4)
|
78
69
|
minitest-reporters (= 1.4.2)
|
79
70
|
msgpack (= 1.4.2)
|
80
|
-
mysql2 (= 0.5.3)
|
81
|
-
pg (= 1.1.4)
|
82
71
|
polyphony!
|
83
72
|
pry (= 0.13.1)
|
84
|
-
rack (>= 2.0.8, < 2.3.0)
|
85
73
|
rake-compiler (= 1.1.1)
|
86
|
-
redis (= 4.1.0)
|
87
74
|
rubocop (= 0.85.1)
|
88
|
-
sequel (= 5.34.0)
|
89
75
|
simplecov (= 0.17.1)
|
90
76
|
|
91
77
|
BUNDLED WITH
|
@@ -0,0 +1,51 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'polyphony'
|
5
|
+
|
6
|
+
class Supervisor
|
7
|
+
def initialize(*fibers)
|
8
|
+
@fiber = spin { do_supervise }
|
9
|
+
@fiber.message_on_child_termination = true
|
10
|
+
fibers.each { |f| add(f) }
|
11
|
+
end
|
12
|
+
|
13
|
+
def await
|
14
|
+
@fiber.await
|
15
|
+
end
|
16
|
+
|
17
|
+
def spin(tag = nil, &block)
|
18
|
+
@fiber.spin(tag, &block)
|
19
|
+
end
|
20
|
+
|
21
|
+
def add(fiber)
|
22
|
+
fiber.attach(@fiber)
|
23
|
+
end
|
24
|
+
|
25
|
+
def do_supervise
|
26
|
+
loop do
|
27
|
+
msg = receive
|
28
|
+
# puts "Supervisor received #{msg.inspect}"
|
29
|
+
f, r = msg
|
30
|
+
puts "Fiber #{f.tag} terminated with #{r.inspect}, restarting..."
|
31
|
+
f.restart
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def supervise(*fibers)
|
37
|
+
supervisor = Supervisor.new(*fibers)
|
38
|
+
supervisor.await
|
39
|
+
end
|
40
|
+
|
41
|
+
def start_worker(id)
|
42
|
+
spin_loop(:"worker#{id}") do
|
43
|
+
duration = rand(0.5..1.0)
|
44
|
+
puts "Worker #{id} sleeping for #{duration} seconds"
|
45
|
+
sleep duration
|
46
|
+
raise 'foo' if rand > 0.7
|
47
|
+
break if rand > 0.6
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
supervise(start_worker(1), start_worker(2))
|
@@ -5,6 +5,107 @@
|
|
5
5
|
#include "polyphony.h"
|
6
6
|
#include "backend_common.h"
|
7
7
|
|
8
|
+
inline void backend_base_initialize(struct Backend_base *base) {
|
9
|
+
runqueue_initialize(&base->runqueue);
|
10
|
+
base->currently_polling = 0;
|
11
|
+
base->pending_count = 0;
|
12
|
+
base->idle_gc_period = 0;
|
13
|
+
base->idle_gc_last_time = 0;
|
14
|
+
base->idle_proc = Qnil;
|
15
|
+
base->trace_proc = Qnil;
|
16
|
+
}
|
17
|
+
|
18
|
+
inline void backend_base_finalize(struct Backend_base *base) {
|
19
|
+
runqueue_finalize(&base->runqueue);
|
20
|
+
}
|
21
|
+
|
22
|
+
inline void backend_base_mark(struct Backend_base *base) {
|
23
|
+
if (base->idle_proc != Qnil) rb_gc_mark(base->idle_proc);
|
24
|
+
if (base->trace_proc != Qnil) rb_gc_mark(base->trace_proc);
|
25
|
+
runqueue_mark(&base->runqueue);
|
26
|
+
}
|
27
|
+
|
28
|
+
inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
|
29
|
+
if (runqueue_should_poll_nonblocking(&base->runqueue) || next == current)
|
30
|
+
Backend_poll(backend, Qnil);
|
31
|
+
}
|
32
|
+
|
33
|
+
VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
|
34
|
+
VALUE current_fiber = rb_fiber_current();
|
35
|
+
runqueue_entry next;
|
36
|
+
unsigned int pending_ops_count = base->pending_count;
|
37
|
+
unsigned int backend_was_polled = 0;
|
38
|
+
unsigned int idle_tasks_run_count = 0;
|
39
|
+
|
40
|
+
COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
|
41
|
+
|
42
|
+
while (1) {
|
43
|
+
next = runqueue_shift(&base->runqueue);
|
44
|
+
if (next.fiber != Qnil) {
|
45
|
+
// Polling for I/O op completion is normally done when the run queue is
|
46
|
+
// empty, but if the runqueue never empties, we'll never get to process
|
47
|
+
// any event completions. In order to prevent this, an anti-starvation
|
48
|
+
// mechanism is employed, under the following conditions:
|
49
|
+
// - a blocking poll was not yet performed
|
50
|
+
// - there are pending blocking operations
|
51
|
+
// - the runqueue shift count has reached a fixed threshold (currently 64), or
|
52
|
+
// - the next fiber is the same as the current fiber (a single fiber is snoozing)
|
53
|
+
if (!backend_was_polled && pending_ops_count)
|
54
|
+
conditional_nonblocking_poll(backend, base, current_fiber, next.fiber);
|
55
|
+
|
56
|
+
break;
|
57
|
+
}
|
58
|
+
|
59
|
+
if (!idle_tasks_run_count) {
|
60
|
+
idle_tasks_run_count++;
|
61
|
+
backend_run_idle_tasks(base);
|
62
|
+
}
|
63
|
+
if (pending_ops_count == 0) break;
|
64
|
+
Backend_poll(backend, Qtrue);
|
65
|
+
backend_was_polled = 1;
|
66
|
+
}
|
67
|
+
|
68
|
+
if (next.fiber == Qnil) return Qnil;
|
69
|
+
|
70
|
+
// run next fiber
|
71
|
+
COND_TRACE(base, 3, SYM_fiber_run, next.fiber, next.value);
|
72
|
+
|
73
|
+
rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
|
74
|
+
RB_GC_GUARD(next.fiber);
|
75
|
+
RB_GC_GUARD(next.value);
|
76
|
+
return (next.fiber == current_fiber) ?
|
77
|
+
next.value : FIBER_TRANSFER(next.fiber, next.value);
|
78
|
+
}
|
79
|
+
|
80
|
+
void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize) {
|
81
|
+
int already_runnable;
|
82
|
+
|
83
|
+
if (rb_fiber_alive_p(fiber) != Qtrue) return;
|
84
|
+
already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
|
85
|
+
|
86
|
+
COND_TRACE(base, 4, SYM_fiber_schedule, fiber, value, prioritize ? Qtrue : Qfalse);
|
87
|
+
|
88
|
+
(prioritize ? runqueue_unshift : runqueue_push)(&base->runqueue, fiber, value, already_runnable);
|
89
|
+
if (!already_runnable) {
|
90
|
+
rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
|
91
|
+
if (rb_thread_current() != thread) {
|
92
|
+
// If the fiber scheduling is done across threads, we need to make sure the
|
93
|
+
// target thread is woken up in case it is in the middle of running its
|
94
|
+
// event selector. Otherwise it's gonna be stuck waiting for an event to
|
95
|
+
// happen, not knowing that it there's already a fiber ready to run in its
|
96
|
+
// run queue.
|
97
|
+
Backend_wakeup(backend);
|
98
|
+
}
|
99
|
+
}
|
100
|
+
}
|
101
|
+
|
102
|
+
|
103
|
+
inline void backend_trace(struct Backend_base *base, int argc, VALUE *argv) {
|
104
|
+
if (base->trace_proc == Qnil) return;
|
105
|
+
|
106
|
+
rb_funcallv(base->trace_proc, ID_call, argc, argv);
|
107
|
+
}
|
108
|
+
|
8
109
|
#ifdef POLYPHONY_USE_PIDFD_OPEN
|
9
110
|
#ifndef __NR_pidfd_open
|
10
111
|
#define __NR_pidfd_open 434 /* System call # on most architectures */
|
@@ -72,7 +173,7 @@ inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
|
|
72
173
|
//////////////////////////////////////////////////////////////////////
|
73
174
|
//////////////////////////////////////////////////////////////////////
|
74
175
|
|
75
|
-
VALUE backend_await(struct Backend_base *backend) {
|
176
|
+
inline VALUE backend_await(struct Backend_base *backend) {
|
76
177
|
VALUE ret;
|
77
178
|
backend->pending_count++;
|
78
179
|
ret = Thread_switch_fiber(rb_thread_current());
|
@@ -81,9 +182,10 @@ VALUE backend_await(struct Backend_base *backend) {
|
|
81
182
|
return ret;
|
82
183
|
}
|
83
184
|
|
84
|
-
VALUE backend_snooze() {
|
185
|
+
inline VALUE backend_snooze() {
|
186
|
+
VALUE ret;
|
85
187
|
Fiber_make_runnable(rb_fiber_current(), Qnil);
|
86
|
-
|
188
|
+
ret = Thread_switch_fiber(rb_thread_current());
|
87
189
|
return ret;
|
88
190
|
}
|
89
191
|
|
@@ -174,6 +276,9 @@ inline void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking) {
|
|
174
276
|
}
|
175
277
|
|
176
278
|
inline void backend_run_idle_tasks(struct Backend_base *base) {
|
279
|
+
if (base->idle_proc != Qnil)
|
280
|
+
rb_funcall(base->idle_proc, ID_call, 0);
|
281
|
+
|
177
282
|
if (base->idle_gc_period == 0) return;
|
178
283
|
|
179
284
|
double now = current_time();
|
@@ -3,14 +3,37 @@
|
|
3
3
|
|
4
4
|
#include "ruby.h"
|
5
5
|
#include "ruby/io.h"
|
6
|
+
#include "runqueue.h"
|
7
|
+
|
8
|
+
struct backend_stats {
|
9
|
+
int scheduled_fibers;
|
10
|
+
int pending_ops;
|
11
|
+
};
|
6
12
|
|
7
13
|
struct Backend_base {
|
14
|
+
runqueue_t runqueue;
|
8
15
|
unsigned int currently_polling;
|
9
16
|
unsigned int pending_count;
|
10
17
|
double idle_gc_period;
|
11
18
|
double idle_gc_last_time;
|
19
|
+
VALUE idle_proc;
|
20
|
+
VALUE trace_proc;
|
12
21
|
};
|
13
22
|
|
23
|
+
void backend_base_initialize(struct Backend_base *base);
|
24
|
+
void backend_base_finalize(struct Backend_base *base);
|
25
|
+
void backend_base_mark(struct Backend_base *base);
|
26
|
+
VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
|
27
|
+
void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
|
28
|
+
void backend_trace(struct Backend_base *base, int argc, VALUE *argv);
|
29
|
+
|
30
|
+
// tracing
|
31
|
+
#define SHOULD_TRACE(base) ((base)->trace_proc != Qnil)
|
32
|
+
#define TRACE(base, ...) rb_funcall((base)->trace_proc, ID_call, __VA_ARGS__)
|
33
|
+
#define COND_TRACE(base, ...) if (SHOULD_TRACE(base)) { TRACE(base, __VA_ARGS__); }
|
34
|
+
|
35
|
+
|
36
|
+
|
14
37
|
#ifdef POLYPHONY_USE_PIDFD_OPEN
|
15
38
|
int pidfd_open(pid_t pid, unsigned int flags);
|
16
39
|
#endif
|
@@ -42,13 +42,23 @@ typedef struct Backend_t {
|
|
42
42
|
int event_fd;
|
43
43
|
} Backend_t;
|
44
44
|
|
45
|
+
static void Backend_mark(void *ptr) {
|
46
|
+
Backend_t *backend = ptr;
|
47
|
+
backend_base_mark(&backend->base);
|
48
|
+
}
|
49
|
+
|
50
|
+
static void Backend_free(void *ptr) {
|
51
|
+
Backend_t *backend = ptr;
|
52
|
+
backend_base_finalize(&backend->base);
|
53
|
+
}
|
54
|
+
|
45
55
|
static size_t Backend_size(const void *ptr) {
|
46
56
|
return sizeof(Backend_t);
|
47
57
|
}
|
48
58
|
|
49
59
|
static const rb_data_type_t Backend_type = {
|
50
60
|
"IOUringBackend",
|
51
|
-
{
|
61
|
+
{Backend_mark, Backend_free, Backend_size,},
|
52
62
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
53
63
|
};
|
54
64
|
|
@@ -65,11 +75,7 @@ static VALUE Backend_initialize(VALUE self) {
|
|
65
75
|
Backend_t *backend;
|
66
76
|
GetBackend(self, backend);
|
67
77
|
|
68
|
-
backend->base
|
69
|
-
backend->base.pending_count = 0;
|
70
|
-
backend->base.idle_gc_period = 0;
|
71
|
-
backend->base.idle_gc_last_time = 0;
|
72
|
-
|
78
|
+
backend_base_initialize(&backend->base);
|
73
79
|
backend->pending_sqes = 0;
|
74
80
|
backend->prepared_limit = 2048;
|
75
81
|
|
@@ -104,13 +110,6 @@ VALUE Backend_post_fork(VALUE self) {
|
|
104
110
|
return self;
|
105
111
|
}
|
106
112
|
|
107
|
-
unsigned int Backend_pending_count(VALUE self) {
|
108
|
-
Backend_t *backend;
|
109
|
-
GetBackend(self, backend);
|
110
|
-
|
111
|
-
return backend->base.pending_count;
|
112
|
-
}
|
113
|
-
|
114
113
|
typedef struct poll_context {
|
115
114
|
struct io_uring *ring;
|
116
115
|
struct io_uring_cqe *cqe;
|
@@ -134,6 +133,7 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
|
|
134
133
|
op_context_t *ctx = io_uring_cqe_get_data(cqe);
|
135
134
|
if (!ctx) return;
|
136
135
|
|
136
|
+
// printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
|
137
137
|
ctx->result = cqe->res;
|
138
138
|
if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
|
139
139
|
Fiber_make_runnable(ctx->fiber, ctx->resume_value);
|
@@ -186,24 +186,64 @@ void io_uring_backend_poll(Backend_t *backend) {
|
|
186
186
|
io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
|
187
187
|
}
|
188
188
|
|
189
|
-
VALUE Backend_poll(VALUE self, VALUE
|
190
|
-
int
|
189
|
+
inline VALUE Backend_poll(VALUE self, VALUE blocking) {
|
190
|
+
int is_blocking = blocking == Qtrue;
|
191
191
|
Backend_t *backend;
|
192
192
|
GetBackend(self, backend);
|
193
193
|
|
194
|
-
if (
|
194
|
+
if (!is_blocking && backend->pending_sqes) {
|
195
195
|
backend->pending_sqes = 0;
|
196
196
|
io_uring_submit(&backend->ring);
|
197
197
|
}
|
198
198
|
|
199
|
-
COND_TRACE(2, SYM_fiber_event_poll_enter,
|
200
|
-
if (
|
199
|
+
COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
|
200
|
+
// if (SHOULD_TRACE(&backend->base))
|
201
|
+
// printf(
|
202
|
+
// "io_uring_poll(blocking_mode: %d, pending: %d, taken: %d, available: %d, runqueue: %d\n",
|
203
|
+
// is_blocking,
|
204
|
+
// backend->base.pending_count,
|
205
|
+
// backend->store.taken_count,
|
206
|
+
// backend->store.available_count,
|
207
|
+
// backend->base.runqueue.entries.count
|
208
|
+
// );
|
209
|
+
if (is_blocking) io_uring_backend_poll(backend);
|
201
210
|
io_uring_backend_handle_ready_cqes(backend);
|
202
|
-
COND_TRACE(2, SYM_fiber_event_poll_leave,
|
211
|
+
COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
|
203
212
|
|
204
213
|
return self;
|
205
214
|
}
|
206
215
|
|
216
|
+
inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
217
|
+
Backend_t *backend;
|
218
|
+
GetBackend(self, backend);
|
219
|
+
|
220
|
+
backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
|
221
|
+
}
|
222
|
+
|
223
|
+
inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
|
224
|
+
Backend_t *backend;
|
225
|
+
GetBackend(self, backend);
|
226
|
+
|
227
|
+
runqueue_delete(&backend->base.runqueue, fiber);
|
228
|
+
}
|
229
|
+
|
230
|
+
inline VALUE Backend_switch_fiber(VALUE self) {
|
231
|
+
Backend_t *backend;
|
232
|
+
GetBackend(self, backend);
|
233
|
+
|
234
|
+
return backend_base_switch_fiber(self, &backend->base);
|
235
|
+
}
|
236
|
+
|
237
|
+
inline struct backend_stats Backend_stats(VALUE self) {
|
238
|
+
Backend_t *backend;
|
239
|
+
GetBackend(self, backend);
|
240
|
+
|
241
|
+
return (struct backend_stats){
|
242
|
+
.scheduled_fibers = runqueue_len(&backend->base.runqueue),
|
243
|
+
.pending_ops = backend->base.pending_count
|
244
|
+
};
|
245
|
+
}
|
246
|
+
|
207
247
|
VALUE Backend_wakeup(VALUE self) {
|
208
248
|
Backend_t *backend;
|
209
249
|
GetBackend(self, backend);
|
@@ -270,17 +310,25 @@ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
|
|
270
310
|
io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
|
271
311
|
|
272
312
|
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
|
313
|
+
context_store_release(&backend->store, ctx);
|
314
|
+
|
273
315
|
RB_GC_GUARD(resumed_value);
|
274
316
|
return resumed_value;
|
275
317
|
}
|
276
318
|
|
277
|
-
VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof) {
|
319
|
+
VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof, VALUE pos) {
|
278
320
|
Backend_t *backend;
|
279
321
|
rb_io_t *fptr;
|
280
322
|
long dynamic_len = length == Qnil;
|
281
323
|
long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
|
282
|
-
|
283
|
-
|
324
|
+
long buf_pos = NUM2INT(pos);
|
325
|
+
if (str != Qnil) {
|
326
|
+
int current_len = RSTRING_LEN(str);
|
327
|
+
if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
|
328
|
+
}
|
329
|
+
else buf_pos = 0;
|
330
|
+
int shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
|
331
|
+
char *buf = RSTRING_PTR(str) + buf_pos;
|
284
332
|
long total = 0;
|
285
333
|
int read_to_eof = RTEST(to_eof);
|
286
334
|
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
@@ -317,9 +365,9 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
|
|
317
365
|
if (!dynamic_len) break;
|
318
366
|
|
319
367
|
// resize buffer
|
320
|
-
rb_str_resize(str, total);
|
368
|
+
rb_str_resize(str, buf_pos + total);
|
321
369
|
rb_str_modify_expand(str, buffer_size);
|
322
|
-
buf = RSTRING_PTR(str) + total;
|
370
|
+
buf = RSTRING_PTR(str) + buf_pos + total;
|
323
371
|
shrinkable = 0;
|
324
372
|
buffer_size += buffer_size;
|
325
373
|
}
|
@@ -327,7 +375,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
|
|
327
375
|
}
|
328
376
|
}
|
329
377
|
|
330
|
-
io_set_read_length(str, total, shrinkable);
|
378
|
+
io_set_read_length(str, buf_pos + total, shrinkable);
|
331
379
|
io_enc_str(str, fptr);
|
332
380
|
|
333
381
|
if (!total) return Qnil;
|
@@ -335,12 +383,12 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
|
|
335
383
|
return str;
|
336
384
|
}
|
337
385
|
|
338
|
-
VALUE Backend_read_loop(VALUE self, VALUE io) {
|
386
|
+
VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
|
339
387
|
Backend_t *backend;
|
340
388
|
rb_io_t *fptr;
|
341
389
|
VALUE str;
|
342
390
|
long total;
|
343
|
-
long len =
|
391
|
+
long len = NUM2INT(maxlen);
|
344
392
|
int shrinkable;
|
345
393
|
char *buf;
|
346
394
|
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
@@ -548,13 +596,19 @@ VALUE Backend_write_m(int argc, VALUE *argv, VALUE self) {
|
|
548
596
|
Backend_writev(self, argv[0], argc - 1, argv + 1);
|
549
597
|
}
|
550
598
|
|
551
|
-
VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
|
599
|
+
VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
|
552
600
|
Backend_t *backend;
|
553
601
|
rb_io_t *fptr;
|
554
602
|
long dynamic_len = length == Qnil;
|
555
603
|
long len = dynamic_len ? 4096 : NUM2INT(length);
|
556
|
-
|
557
|
-
|
604
|
+
long buf_pos = NUM2INT(pos);
|
605
|
+
if (str != Qnil) {
|
606
|
+
int current_len = RSTRING_LEN(str);
|
607
|
+
if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
|
608
|
+
}
|
609
|
+
else buf_pos = 0;
|
610
|
+
int shrinkable = io_setstrbuf(&str, buf_pos + len);
|
611
|
+
char *buf = RSTRING_PTR(str) + buf_pos;
|
558
612
|
long total = 0;
|
559
613
|
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
560
614
|
|
@@ -586,7 +640,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
|
|
586
640
|
}
|
587
641
|
}
|
588
642
|
|
589
|
-
io_set_read_length(str, total, shrinkable);
|
643
|
+
io_set_read_length(str, buf_pos + total, shrinkable);
|
590
644
|
io_enc_str(str, fptr);
|
591
645
|
|
592
646
|
if (!total) return Qnil;
|
@@ -594,12 +648,12 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
|
|
594
648
|
return str;
|
595
649
|
}
|
596
650
|
|
597
|
-
VALUE Backend_recv_loop(VALUE self, VALUE io) {
|
651
|
+
VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
|
598
652
|
Backend_t *backend;
|
599
653
|
rb_io_t *fptr;
|
600
654
|
VALUE str;
|
601
655
|
long total;
|
602
|
-
long len =
|
656
|
+
long len = NUM2INT(maxlen);
|
603
657
|
int shrinkable;
|
604
658
|
char *buf;
|
605
659
|
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
@@ -908,7 +962,6 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
|
|
908
962
|
|
909
963
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
910
964
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
911
|
-
|
912
965
|
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
|
913
966
|
return context_store_release(&backend->store, ctx);
|
914
967
|
}
|
@@ -1185,6 +1238,13 @@ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
|
|
1185
1238
|
return self;
|
1186
1239
|
}
|
1187
1240
|
|
1241
|
+
VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
|
1242
|
+
Backend_t *backend;
|
1243
|
+
GetBackend(self, backend);
|
1244
|
+
backend->base.idle_proc = block;
|
1245
|
+
return self;
|
1246
|
+
}
|
1247
|
+
|
1188
1248
|
inline VALUE Backend_run_idle_tasks(VALUE self) {
|
1189
1249
|
Backend_t *backend;
|
1190
1250
|
GetBackend(self, backend);
|
@@ -1346,6 +1406,21 @@ error:
|
|
1346
1406
|
return RAISE_EXCEPTION(switchpoint_result);
|
1347
1407
|
}
|
1348
1408
|
|
1409
|
+
VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
|
1410
|
+
Backend_t *backend;
|
1411
|
+
GetBackend(self, backend);
|
1412
|
+
backend_trace(&backend->base, argc, argv);
|
1413
|
+
return self;
|
1414
|
+
}
|
1415
|
+
|
1416
|
+
VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
|
1417
|
+
Backend_t *backend;
|
1418
|
+
GetBackend(self, backend);
|
1419
|
+
|
1420
|
+
backend->base.trace_proc = block;
|
1421
|
+
return self;
|
1422
|
+
}
|
1423
|
+
|
1349
1424
|
void Init_Backend() {
|
1350
1425
|
VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
|
1351
1426
|
rb_define_alloc_func(cBackend, Backend_allocate);
|
@@ -1353,23 +1428,26 @@ void Init_Backend() {
|
|
1353
1428
|
rb_define_method(cBackend, "initialize", Backend_initialize, 0);
|
1354
1429
|
rb_define_method(cBackend, "finalize", Backend_finalize, 0);
|
1355
1430
|
rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
|
1431
|
+
rb_define_method(cBackend, "trace", Backend_trace, -1);
|
1432
|
+
rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
|
1356
1433
|
|
1357
|
-
rb_define_method(cBackend, "poll", Backend_poll,
|
1434
|
+
rb_define_method(cBackend, "poll", Backend_poll, 1);
|
1358
1435
|
rb_define_method(cBackend, "break", Backend_wakeup, 0);
|
1359
1436
|
rb_define_method(cBackend, "kind", Backend_kind, 0);
|
1360
1437
|
rb_define_method(cBackend, "chain", Backend_chain, -1);
|
1361
1438
|
rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
|
1439
|
+
rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
|
1362
1440
|
rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
|
1363
1441
|
|
1364
1442
|
rb_define_method(cBackend, "accept", Backend_accept, 2);
|
1365
1443
|
rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
|
1366
1444
|
rb_define_method(cBackend, "connect", Backend_connect, 3);
|
1367
1445
|
rb_define_method(cBackend, "feed_loop", Backend_feed_loop, 3);
|
1368
|
-
rb_define_method(cBackend, "read", Backend_read,
|
1369
|
-
rb_define_method(cBackend, "read_loop", Backend_read_loop,
|
1370
|
-
rb_define_method(cBackend, "recv", Backend_recv,
|
1446
|
+
rb_define_method(cBackend, "read", Backend_read, 5);
|
1447
|
+
rb_define_method(cBackend, "read_loop", Backend_read_loop, 2);
|
1448
|
+
rb_define_method(cBackend, "recv", Backend_recv, 4);
|
1371
1449
|
rb_define_method(cBackend, "recv_feed_loop", Backend_recv_feed_loop, 3);
|
1372
|
-
rb_define_method(cBackend, "recv_loop", Backend_recv_loop,
|
1450
|
+
rb_define_method(cBackend, "recv_loop", Backend_recv_loop, 2);
|
1373
1451
|
rb_define_method(cBackend, "send", Backend_send, 3);
|
1374
1452
|
rb_define_method(cBackend, "sendv", Backend_sendv, 3);
|
1375
1453
|
rb_define_method(cBackend, "sleep", Backend_sleep, 1);
|