polyphony 0.84.1 → 0.85
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +10 -0
- data/Gemfile.lock +1 -1
- data/examples/core/multi_suspend.rb +39 -0
- data/examples/core/shutdown_all_children.rb +41 -0
- data/examples/io/gzip.rb +8 -0
- data/examples/io/splice_echo_server.rb +15 -0
- data/ext/polyphony/backend_io_uring.c +57 -31
- data/ext/polyphony/io_extensions.c +138 -26
- data/lib/polyphony/extensions/fiber.rb +3 -1
- data/lib/polyphony/extensions/io.rb +4 -0
- data/lib/polyphony/extensions/pipe.rb +4 -0
- data/lib/polyphony/extensions/socket.rb +4 -0
- data/lib/polyphony/version.rb +1 -1
- data/test/test_fiber.rb +5 -2
- metadata +6 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1a1839977c06a8387f6ab48b181fb9fffef43df902b4ca0644ca0e31eec82cc0
|
4
|
+
data.tar.gz: e24c712bfb1e1497adbdfc0cbd892c4b9087a983ebe541791b5beda99dd86cd4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 758b75666ed47b6c0d97abbc18a987cf6c60781f912c2f1ce7a4acb8104bc6279d09a387582f828ffdce10ff0694561be371edf311b8728d90e50b03d6bf1c7f
|
7
|
+
data.tar.gz: 01a69c90ea2e437ac2c58a9936401c5e453a626886046c84aea04a596237b4879a024922f31ba6ddb50697b834ebbcd8d205b5ee20741536e1c22543a7d8b9ea
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
## 0.85 2022-03-13
|
2
|
+
|
3
|
+
- Reduce write ops in `IO.gzip` (#77)
|
4
|
+
- Add support for read/write method detection for compression/decompression
|
5
|
+
methods (#77)
|
6
|
+
- Improve `Fiber#shutdown_all_children`
|
7
|
+
- Improve io_uring `wait_event` implementation (share single I/O poll across
|
8
|
+
multiple fibers)
|
9
|
+
- Fix io_uring `write` file offset
|
10
|
+
|
1
11
|
## 0.84 2022-03-11
|
2
12
|
|
3
13
|
- Implement `IO.tee` (#82)
|
data/Gemfile.lock
CHANGED
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'polyphony'
|
5
|
+
|
6
|
+
module ::Kernel
|
7
|
+
def trace(*args)
|
8
|
+
STDOUT.orig_write(format_trace(args))
|
9
|
+
end
|
10
|
+
|
11
|
+
def format_trace(args)
|
12
|
+
if args.first.is_a?(String)
|
13
|
+
if args.size > 1
|
14
|
+
format("%s: %p\n", args.shift, args)
|
15
|
+
else
|
16
|
+
format("%s\n", args.first)
|
17
|
+
end
|
18
|
+
else
|
19
|
+
format("%p\n", args.size == 1 ? args.first : args)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def monotonic_clock
|
24
|
+
::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
count = 10000
|
29
|
+
|
30
|
+
count.times do
|
31
|
+
spin { Polyphony.backend_wait_event(true) }
|
32
|
+
end
|
33
|
+
|
34
|
+
trace 'sleeping...'
|
35
|
+
sleep 1
|
36
|
+
|
37
|
+
trace 'shutting down children...'
|
38
|
+
Fiber.current.shutdown_all_children
|
39
|
+
trace 'done'
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'polyphony'
|
5
|
+
|
6
|
+
module ::Kernel
|
7
|
+
def trace(*args)
|
8
|
+
STDOUT.orig_write(format_trace(args))
|
9
|
+
end
|
10
|
+
|
11
|
+
def format_trace(args)
|
12
|
+
if args.first.is_a?(String)
|
13
|
+
if args.size > 1
|
14
|
+
format("%s: %p\n", args.shift, args)
|
15
|
+
else
|
16
|
+
format("%s\n", args.first)
|
17
|
+
end
|
18
|
+
else
|
19
|
+
format("%p\n", args.size == 1 ? args.first : args)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def monotonic_clock
|
24
|
+
::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
count = 10
|
29
|
+
|
30
|
+
f = spin do
|
31
|
+
count.times { spin { suspend } }
|
32
|
+
suspend
|
33
|
+
end
|
34
|
+
|
35
|
+
snooze
|
36
|
+
trace children_alive: f.children.size
|
37
|
+
|
38
|
+
trace 'shutting down children...'
|
39
|
+
f.shutdown_all_children
|
40
|
+
|
41
|
+
trace children_alive: f.children.size
|
data/examples/io/gzip.rb
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'polyphony'
|
5
|
+
|
6
|
+
require 'polyphony'
|
7
|
+
|
8
|
+
def handle_echo_client(conn)
|
9
|
+
buffer = Polyphony.pipe
|
10
|
+
spin { buffer.splice_to_eof_from(conn) }
|
11
|
+
spin { conn.splice_to_eof_from(buffer) }
|
12
|
+
end
|
13
|
+
|
14
|
+
puts "Serving echo on port 1234..."
|
15
|
+
TCPServer.new('127.0.0.1', 1234).accept_loop { |c| handle_echo_client(c) }
|
@@ -41,8 +41,10 @@ typedef struct Backend_t {
|
|
41
41
|
op_context_store_t store;
|
42
42
|
unsigned int pending_sqes;
|
43
43
|
unsigned int prepared_limit;
|
44
|
-
int event_fd;
|
45
44
|
int ring_initialized;
|
45
|
+
|
46
|
+
int event_fd;
|
47
|
+
op_context_t *event_fd_ctx;
|
46
48
|
} Backend_t;
|
47
49
|
|
48
50
|
static void Backend_mark(void *ptr) {
|
@@ -83,6 +85,7 @@ static VALUE Backend_initialize(VALUE self) {
|
|
83
85
|
backend->pending_sqes = 0;
|
84
86
|
backend->ring_initialized = 0;
|
85
87
|
backend->event_fd = -1;
|
88
|
+
backend->event_fd_ctx = NULL;
|
86
89
|
|
87
90
|
context_store_initialize(&backend->store);
|
88
91
|
|
@@ -183,13 +186,21 @@ done:
|
|
183
186
|
return;
|
184
187
|
}
|
185
188
|
|
189
|
+
inline void io_uring_backend_immediate_submit(Backend_t *backend) {
|
190
|
+
backend->pending_sqes = 0;
|
191
|
+
io_uring_submit(&backend->ring);
|
192
|
+
}
|
193
|
+
|
194
|
+
inline void io_uring_backend_defer_submit(Backend_t *backend) {
|
195
|
+
backend->pending_sqes += 1;
|
196
|
+
if (backend->pending_sqes >= backend->prepared_limit)
|
197
|
+
io_uring_backend_immediate_submit(backend);
|
198
|
+
}
|
199
|
+
|
186
200
|
void io_uring_backend_poll(Backend_t *backend) {
|
187
201
|
poll_context_t poll_ctx;
|
188
202
|
poll_ctx.ring = &backend->ring;
|
189
|
-
if (backend->pending_sqes)
|
190
|
-
backend->pending_sqes = 0;
|
191
|
-
io_uring_submit(&backend->ring);
|
192
|
-
}
|
203
|
+
if (backend->pending_sqes) io_uring_backend_immediate_submit(backend);
|
193
204
|
|
194
205
|
wait_cqe:
|
195
206
|
backend->base.currently_polling = 1;
|
@@ -211,10 +222,7 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
|
|
211
222
|
|
212
223
|
backend->base.poll_count++;
|
213
224
|
|
214
|
-
if (!is_blocking && backend->pending_sqes)
|
215
|
-
backend->pending_sqes = 0;
|
216
|
-
io_uring_submit(&backend->ring);
|
217
|
-
}
|
225
|
+
if (!is_blocking && backend->pending_sqes) io_uring_backend_immediate_submit(backend);
|
218
226
|
|
219
227
|
COND_TRACE(&backend->base, 2, SYM_enter_poll, rb_fiber_current());
|
220
228
|
|
@@ -263,8 +271,7 @@ VALUE Backend_wakeup(VALUE self) {
|
|
263
271
|
// NOP which would cause the io_uring_enter syscall to return
|
264
272
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
265
273
|
io_uring_prep_nop(sqe);
|
266
|
-
backend
|
267
|
-
io_uring_submit(&backend->ring);
|
274
|
+
io_uring_backend_immediate_submit(backend);
|
268
275
|
|
269
276
|
return Qtrue;
|
270
277
|
}
|
@@ -272,14 +279,6 @@ VALUE Backend_wakeup(VALUE self) {
|
|
272
279
|
return Qnil;
|
273
280
|
}
|
274
281
|
|
275
|
-
inline void io_uring_backend_defer_submit(Backend_t *backend) {
|
276
|
-
backend->pending_sqes += 1;
|
277
|
-
if (backend->pending_sqes >= backend->prepared_limit) {
|
278
|
-
backend->pending_sqes = 0;
|
279
|
-
io_uring_submit(&backend->ring);
|
280
|
-
}
|
281
|
-
}
|
282
|
-
|
283
282
|
int io_uring_backend_defer_submit_and_await(
|
284
283
|
Backend_t *backend,
|
285
284
|
struct io_uring_sqe *sqe,
|
@@ -305,8 +304,7 @@ int io_uring_backend_defer_submit_and_await(
|
|
305
304
|
ctx->result = -ECANCELED;
|
306
305
|
sqe = io_uring_get_sqe(&backend->ring);
|
307
306
|
io_uring_prep_cancel(sqe, (__u64)ctx, 0);
|
308
|
-
backend
|
309
|
-
io_uring_submit(&backend->ring);
|
307
|
+
io_uring_backend_immediate_submit(backend);
|
310
308
|
}
|
311
309
|
|
312
310
|
if (value_ptr) (*value_ptr) = switchpoint_result;
|
@@ -552,7 +550,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
|
|
552
550
|
int result;
|
553
551
|
int completed;
|
554
552
|
|
555
|
-
io_uring_prep_write(sqe, fd, buffer.ptr, left,
|
553
|
+
io_uring_prep_write(sqe, fd, buffer.ptr, left, -1);
|
556
554
|
|
557
555
|
result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
558
556
|
completed = context_store_release(&backend->store, ctx);
|
@@ -1161,8 +1159,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
|
|
1161
1159
|
// op was not completed, so we need to cancel it
|
1162
1160
|
sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
|
1163
1161
|
io_uring_prep_cancel(sqe, (__u64)timeout_ctx->ctx, 0);
|
1164
|
-
timeout_ctx->backend
|
1165
|
-
io_uring_submit(&timeout_ctx->backend->ring);
|
1162
|
+
io_uring_backend_immediate_submit(timeout_ctx->backend);
|
1166
1163
|
}
|
1167
1164
|
context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
|
1168
1165
|
return Qnil;
|
@@ -1237,6 +1234,13 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
|
|
1237
1234
|
return rb_ary_new_from_args(2, INT2NUM(ret), INT2NUM(WEXITSTATUS(status)));
|
1238
1235
|
}
|
1239
1236
|
|
1237
|
+
/*
|
1238
|
+
Blocks a fiber indefinitely. This is accomplished by using an eventfd that will
|
1239
|
+
never be signalled. The eventfd is needed so we could do a blocking polling for
|
1240
|
+
completions even when no other I/O operations are pending.
|
1241
|
+
|
1242
|
+
The eventfd is refcounted in order to allow multiple fibers to be blocked.
|
1243
|
+
*/
|
1240
1244
|
VALUE Backend_wait_event(VALUE self, VALUE raise) {
|
1241
1245
|
Backend_t *backend;
|
1242
1246
|
VALUE resume_value;
|
@@ -1251,7 +1255,32 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
|
|
1251
1255
|
}
|
1252
1256
|
}
|
1253
1257
|
|
1254
|
-
|
1258
|
+
if (!backend->event_fd_ctx) {
|
1259
|
+
struct io_uring_sqe *sqe;
|
1260
|
+
|
1261
|
+
backend->event_fd_ctx = context_store_acquire(&backend->store, OP_POLL);
|
1262
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1263
|
+
io_uring_prep_poll_add(sqe, backend->event_fd, POLLIN);
|
1264
|
+
backend->base.op_count++;
|
1265
|
+
io_uring_sqe_set_data(sqe, backend->event_fd_ctx);
|
1266
|
+
io_uring_backend_defer_submit(backend);
|
1267
|
+
}
|
1268
|
+
else
|
1269
|
+
backend->event_fd_ctx->ref_count += 1;
|
1270
|
+
|
1271
|
+
resume_value = backend_await((struct Backend_base *)backend);
|
1272
|
+
context_store_release(&backend->store, backend->event_fd_ctx);
|
1273
|
+
|
1274
|
+
if (backend->event_fd_ctx->ref_count == 1) {
|
1275
|
+
|
1276
|
+
// last fiber to use the eventfd, so we cancel the ongoing poll
|
1277
|
+
struct io_uring_sqe *sqe;
|
1278
|
+
sqe = io_uring_get_sqe(&backend->ring);
|
1279
|
+
io_uring_prep_cancel(sqe, (__u64)backend->event_fd_ctx, 0);
|
1280
|
+
io_uring_backend_immediate_submit(backend);
|
1281
|
+
backend->event_fd_ctx = NULL;
|
1282
|
+
}
|
1283
|
+
|
1255
1284
|
if (RTEST(raise)) RAISE_IF_EXCEPTION(resume_value);
|
1256
1285
|
RB_GC_GUARD(resume_value);
|
1257
1286
|
return resume_value;
|
@@ -1354,8 +1383,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1354
1383
|
ctx->result = -ECANCELED;
|
1355
1384
|
sqe = io_uring_get_sqe(&backend->ring);
|
1356
1385
|
io_uring_prep_cancel(sqe, (__u64)ctx, 0);
|
1357
|
-
backend
|
1358
|
-
io_uring_submit(&backend->ring);
|
1386
|
+
io_uring_backend_immediate_submit(backend);
|
1359
1387
|
}
|
1360
1388
|
else {
|
1361
1389
|
ctx->ref_count = 1;
|
@@ -1385,8 +1413,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1385
1413
|
ctx->result = -ECANCELED;
|
1386
1414
|
sqe = io_uring_get_sqe(&backend->ring);
|
1387
1415
|
io_uring_prep_cancel(sqe, (__u64)ctx, 0);
|
1388
|
-
backend
|
1389
|
-
io_uring_submit(&backend->ring);
|
1416
|
+
io_uring_backend_immediate_submit(backend);
|
1390
1417
|
RAISE_IF_EXCEPTION(resume_value);
|
1391
1418
|
return resume_value;
|
1392
1419
|
}
|
@@ -1452,8 +1479,7 @@ static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
|
|
1452
1479
|
ctx->result = -ECANCELED;
|
1453
1480
|
sqe = io_uring_get_sqe(&backend->ring);
|
1454
1481
|
io_uring_prep_cancel(sqe, (__u64)ctx, 0);
|
1455
|
-
backend
|
1456
|
-
io_uring_submit(&backend->ring);
|
1482
|
+
io_uring_backend_immediate_submit(backend);
|
1457
1483
|
}
|
1458
1484
|
|
1459
1485
|
static inline int splice_chunks_await_ops(
|
@@ -13,24 +13,68 @@
|
|
13
13
|
|
14
14
|
ID ID_at;
|
15
15
|
ID ID_read_method;
|
16
|
+
ID ID_readpartial;
|
16
17
|
ID ID_to_i;
|
17
18
|
ID ID_write_method;
|
19
|
+
ID ID_write;
|
18
20
|
|
21
|
+
VALUE SYM_backend_read;
|
22
|
+
VALUE SYM_backend_recv;
|
23
|
+
VALUE SYM_backend_send;
|
24
|
+
VALUE SYM_backend_write;
|
25
|
+
VALUE SYM_call;
|
26
|
+
VALUE SYM_comment;
|
19
27
|
VALUE SYM_mtime;
|
20
28
|
VALUE SYM_orig_name;
|
21
|
-
VALUE
|
29
|
+
VALUE SYM_readpartial;
|
22
30
|
|
23
31
|
enum read_method {
|
24
32
|
RM_BACKEND_READ,
|
25
|
-
RM_BACKEND_RECV
|
33
|
+
RM_BACKEND_RECV,
|
34
|
+
RM_READPARTIAL,
|
35
|
+
RM_CALL
|
26
36
|
};
|
27
37
|
|
28
38
|
enum write_method {
|
29
39
|
WM_BACKEND_WRITE,
|
30
|
-
WM_BACKEND_SEND
|
40
|
+
WM_BACKEND_SEND,
|
41
|
+
WM_WRITE,
|
42
|
+
WM_CALL
|
31
43
|
};
|
32
44
|
|
33
|
-
|
45
|
+
enum read_method detect_read_method(VALUE io) {
|
46
|
+
if (rb_respond_to(io, ID_read_method)) {
|
47
|
+
VALUE method = rb_funcall(io, ID_read_method, 0);
|
48
|
+
if (method == SYM_readpartial) return RM_READPARTIAL;
|
49
|
+
if (method == SYM_backend_read) return RM_BACKEND_READ;
|
50
|
+
if (method == SYM_backend_recv) return RM_BACKEND_RECV;
|
51
|
+
if (method == SYM_call) return RM_CALL;
|
52
|
+
|
53
|
+
rb_raise(rb_eRuntimeError, "Given io instance uses unsupported read method");
|
54
|
+
}
|
55
|
+
else if (rb_respond_to(io, ID_call))
|
56
|
+
return RM_CALL;
|
57
|
+
else
|
58
|
+
rb_raise(rb_eRuntimeError, "Given io instance should be a callable or respond to #__read_method__");
|
59
|
+
}
|
60
|
+
|
61
|
+
enum write_method detect_write_method(VALUE io) {
|
62
|
+
if (rb_respond_to(io, ID_write_method)) {
|
63
|
+
VALUE method = rb_funcall(io, ID_write_method, 0);
|
64
|
+
if (method == SYM_readpartial) return WM_WRITE;
|
65
|
+
if (method == SYM_backend_write) return WM_BACKEND_WRITE;
|
66
|
+
if (method == SYM_backend_send) return WM_BACKEND_SEND;
|
67
|
+
if (method == SYM_call) return WM_CALL;
|
68
|
+
|
69
|
+
rb_raise(rb_eRuntimeError, "Given io instance uses unsupported write method");
|
70
|
+
}
|
71
|
+
else if (rb_respond_to(io, ID_call))
|
72
|
+
return WM_CALL;
|
73
|
+
else
|
74
|
+
rb_raise(rb_eRuntimeError, "Given io instance should be a callable or respond to #__write_method__");
|
75
|
+
}
|
76
|
+
|
77
|
+
#define PRINT_BUFFER(prefix, ptr, len) { \
|
34
78
|
printf("%s buffer (%d): ", prefix, (int)len); \
|
35
79
|
for (int i = 0; i < len; i++) printf("%02X ", ptr[i]); \
|
36
80
|
printf("\n"); \
|
@@ -40,6 +84,7 @@ enum write_method {
|
|
40
84
|
#define MAX_WRITE_STR_LEN 16384
|
41
85
|
#define DEFAULT_LEVEL 9
|
42
86
|
#define DEFAULT_MEM_LEVEL 8
|
87
|
+
#define GZIP_FOOTER_LEN 8
|
43
88
|
|
44
89
|
/* from zutil.h */
|
45
90
|
#define OS_MSDOS 0x00
|
@@ -63,14 +108,65 @@ enum write_method {
|
|
63
108
|
#define OS_CODE OS_UNIX
|
64
109
|
#endif
|
65
110
|
|
111
|
+
|
66
112
|
static inline int read_to_raw_buffer(VALUE backend, VALUE io, enum read_method method, struct raw_buffer *buffer) {
|
67
|
-
|
68
|
-
|
113
|
+
switch (method) {
|
114
|
+
case RM_BACKEND_READ: {
|
115
|
+
VALUE len = Backend_read(backend, io, PTR2FIX(buffer), Qnil, Qfalse, INT2FIX(0));
|
116
|
+
return (len == Qnil) ? 0 : FIX2INT(len);
|
117
|
+
}
|
118
|
+
case RM_BACKEND_RECV: {
|
119
|
+
VALUE len = Backend_recv(backend, io, PTR2FIX(buffer), Qnil, INT2FIX(0));
|
120
|
+
return (len == Qnil) ? 0 : FIX2INT(len);
|
121
|
+
}
|
122
|
+
case RM_READPARTIAL: {
|
123
|
+
VALUE str = rb_funcall(io, ID_readpartial, 1, INT2FIX(buffer->len));
|
124
|
+
int len = RSTRING_LEN(str);
|
125
|
+
if (len) memcpy(buffer->ptr, RSTRING_PTR(str), len);
|
126
|
+
RB_GC_GUARD(str);
|
127
|
+
return len;
|
128
|
+
}
|
129
|
+
case RM_CALL: {
|
130
|
+
VALUE str = rb_funcall(io, ID_call, INT2FIX(buffer->len));
|
131
|
+
if (TYPE(str) != T_STRING)
|
132
|
+
rb_raise(rb_eRuntimeError, "io#call must return a string");
|
133
|
+
int len = RSTRING_LEN(str);
|
134
|
+
if (len > buffer->len) len = buffer->len;
|
135
|
+
if (len) memcpy(buffer->ptr, RSTRING_PTR(str), len);
|
136
|
+
RB_GC_GUARD(str);
|
137
|
+
return len;
|
138
|
+
}
|
139
|
+
}
|
69
140
|
}
|
70
141
|
|
71
142
|
static inline int write_from_raw_buffer(VALUE backend, VALUE io, enum write_method method, struct raw_buffer *buffer) {
|
72
|
-
|
73
|
-
|
143
|
+
printf("write_from_raw_buffer len: %d\n", buffer->len);
|
144
|
+
switch (method) {
|
145
|
+
case WM_BACKEND_WRITE: {
|
146
|
+
VALUE len = Backend_write(backend, io, PTR2FIX(buffer));
|
147
|
+
return FIX2INT(len);
|
148
|
+
}
|
149
|
+
case WM_BACKEND_SEND: {
|
150
|
+
VALUE len = Backend_send(backend, io, PTR2FIX(buffer), INT2FIX(0));
|
151
|
+
return FIX2INT(len);
|
152
|
+
}
|
153
|
+
case WM_WRITE: {
|
154
|
+
VALUE str = rb_str_new(0, buffer->len);
|
155
|
+
memcpy(RSTRING_PTR(str), buffer->ptr, buffer->len);
|
156
|
+
rb_str_modify_expand(str, buffer->len);
|
157
|
+
rb_funcall(io, ID_write, 1, str);
|
158
|
+
RB_GC_GUARD(str);
|
159
|
+
return buffer->len;
|
160
|
+
}
|
161
|
+
case WM_CALL: {
|
162
|
+
VALUE str = rb_str_new(0, buffer->len);
|
163
|
+
memcpy(RSTRING_PTR(str), buffer->ptr, buffer->len);
|
164
|
+
rb_str_modify_expand(str, buffer->len);
|
165
|
+
rb_funcall(io, ID_call, 1, str);
|
166
|
+
RB_GC_GUARD(str);
|
167
|
+
return buffer->len;
|
168
|
+
}
|
169
|
+
}
|
74
170
|
}
|
75
171
|
|
76
172
|
static inline int write_c_string_from_str(VALUE str, struct raw_buffer *buffer) {
|
@@ -165,12 +261,12 @@ int gzip_prepare_header(struct gzip_header_ctx *ctx, char *buffer, int maxlen) {
|
|
165
261
|
}
|
166
262
|
|
167
263
|
int gzip_prepare_footer(unsigned long crc32, unsigned long total_in, char *buffer, int maxlen) {
|
168
|
-
assert(maxlen >=
|
264
|
+
assert(maxlen >= GZIP_FOOTER_LEN);
|
169
265
|
|
170
266
|
gzfile_set32(crc32, buffer);
|
171
267
|
gzfile_set32(total_in, buffer + 4);
|
172
268
|
|
173
|
-
return
|
269
|
+
return GZIP_FOOTER_LEN;
|
174
270
|
}
|
175
271
|
|
176
272
|
enum stream_mode {
|
@@ -183,7 +279,11 @@ struct z_stream_ctx {
|
|
183
279
|
VALUE src;
|
184
280
|
VALUE dest;
|
185
281
|
|
282
|
+
enum read_method src_read_method;
|
283
|
+
enum write_method dest_write_method;
|
284
|
+
|
186
285
|
enum stream_mode mode;
|
286
|
+
int f_gzip_footer; // should a gzip footer be generated
|
187
287
|
z_stream strm;
|
188
288
|
|
189
289
|
unsigned char in[CHUNK];
|
@@ -216,11 +316,11 @@ void gzip_read_header(struct z_stream_ctx *ctx, struct gzip_header_ctx *header_c
|
|
216
316
|
int flags;
|
217
317
|
|
218
318
|
while (ctx->in_total < 10) {
|
219
|
-
int read = read_to_raw_buffer(ctx->backend, ctx->src,
|
319
|
+
int read = read_to_raw_buffer(ctx->backend, ctx->src, ctx->src_read_method, &in_buffer);
|
220
320
|
if (read == 0) goto error;
|
221
321
|
ctx->in_total += read;
|
222
322
|
}
|
223
|
-
//
|
323
|
+
// PRINT_BUFFER("read gzip header", ctx->in, ctx->in_total);
|
224
324
|
if (ctx->in[0] != GZ_MAGIC1) goto error;
|
225
325
|
if (ctx->in[1] != GZ_MAGIC2) goto error;
|
226
326
|
if (ctx->in[2] != GZ_METHOD_DEFLATE) goto error;
|
@@ -245,9 +345,6 @@ error:
|
|
245
345
|
rb_raise(rb_eRuntimeError, "Invalid gzip header");
|
246
346
|
}
|
247
347
|
|
248
|
-
// void gzip_read_footer(struct z_stream_ctx *ctx, struct gzip_footer_ctx *footer_ctx) {
|
249
|
-
// }
|
250
|
-
|
251
348
|
static inline int z_stream_write_out(struct z_stream_ctx *ctx, zlib_func fun, int eof) {
|
252
349
|
int ret;
|
253
350
|
int written;
|
@@ -260,8 +357,14 @@ static inline int z_stream_write_out(struct z_stream_ctx *ctx, zlib_func fun, in
|
|
260
357
|
written = avail_out_pre - ctx->strm.avail_out;
|
261
358
|
out_buffer.ptr = ctx->out;
|
262
359
|
out_buffer.len = ctx->out_pos + written;
|
360
|
+
|
361
|
+
if (eof && ctx->f_gzip_footer && (CHUNK - out_buffer.len >= GZIP_FOOTER_LEN)) {
|
362
|
+
gzip_prepare_footer(ctx->crc32, ctx->in_total, out_buffer.ptr + out_buffer.len, 8);
|
363
|
+
out_buffer.len += GZIP_FOOTER_LEN;
|
364
|
+
}
|
365
|
+
|
263
366
|
if (out_buffer.len) {
|
264
|
-
ret = write_from_raw_buffer(ctx->backend, ctx->dest,
|
367
|
+
ret = write_from_raw_buffer(ctx->backend, ctx->dest, ctx->dest_write_method, &out_buffer);
|
265
368
|
if (ctx->mode == SM_INFLATE)
|
266
369
|
ctx->crc32 = crc32(ctx->crc32, out_buffer.ptr + ctx->out_pos, written);
|
267
370
|
ctx->out_total += ret - ctx->out_pos;
|
@@ -271,7 +374,7 @@ static inline int z_stream_write_out(struct z_stream_ctx *ctx, zlib_func fun, in
|
|
271
374
|
}
|
272
375
|
|
273
376
|
void z_stream_io_loop(struct z_stream_ctx *ctx) {
|
274
|
-
zlib_func fun = (ctx->mode == SM_DEFLATE) ? deflate : inflate;
|
377
|
+
zlib_func fun = (ctx->mode == SM_DEFLATE) ? deflate : inflate;
|
275
378
|
|
276
379
|
if (ctx->in_total > ctx->in_pos) {
|
277
380
|
// In bytes already read for parsing gzip header, so we need to process the
|
@@ -289,7 +392,7 @@ void z_stream_io_loop(struct z_stream_ctx *ctx) {
|
|
289
392
|
while (1) {
|
290
393
|
struct raw_buffer in_buffer = {ctx->in, CHUNK};
|
291
394
|
ctx->strm.next_in = ctx->in;
|
292
|
-
int read_len = ctx->strm.avail_in = read_to_raw_buffer(ctx->backend, ctx->src,
|
395
|
+
int read_len = ctx->strm.avail_in = read_to_raw_buffer(ctx->backend, ctx->src, ctx->src_read_method, &in_buffer);
|
293
396
|
if (!read_len) break;
|
294
397
|
int eof = read_len < CHUNK;
|
295
398
|
|
@@ -297,7 +400,7 @@ void z_stream_io_loop(struct z_stream_ctx *ctx) {
|
|
297
400
|
ctx->crc32 = crc32(ctx->crc32, ctx->in, read_len);
|
298
401
|
ctx->in_total += read_len;
|
299
402
|
|
300
|
-
//
|
403
|
+
// PRINT_BUFFER("read stream", ctx->in, read_len);
|
301
404
|
|
302
405
|
while (1) {
|
303
406
|
// z_stream_write_out returns strm.avail_out. If there's still room in the
|
@@ -318,7 +421,10 @@ static inline void setup_ctx(struct z_stream_ctx *ctx, enum stream_mode mode, VA
|
|
318
421
|
ctx->backend = BACKEND();
|
319
422
|
ctx->src = src;
|
320
423
|
ctx->dest = dest;
|
424
|
+
ctx->src_read_method = detect_read_method(src);
|
425
|
+
ctx->dest_write_method = detect_write_method(dest);
|
321
426
|
ctx->mode = mode;
|
427
|
+
ctx->f_gzip_footer = 0;
|
322
428
|
ctx->strm.zalloc = Z_NULL;
|
323
429
|
ctx->strm.zfree = Z_NULL;
|
324
430
|
ctx->strm.opaque = Z_NULL;
|
@@ -349,14 +455,12 @@ VALUE IO_gzip(int argc, VALUE *argv, VALUE self) {
|
|
349
455
|
int ret;
|
350
456
|
|
351
457
|
setup_ctx(&ctx, SM_DEFLATE, src, dest);
|
458
|
+
ctx.f_gzip_footer = 1; // write gzip footer
|
352
459
|
ctx.out_pos = gzip_prepare_header(&header_ctx, ctx.out, sizeof(ctx.out));
|
353
460
|
|
354
461
|
ret = deflateInit2(&ctx.strm, level, Z_DEFLATED, -MAX_WBITS, DEFAULT_MEM_LEVEL, Z_DEFAULT_STRATEGY);
|
355
462
|
if (ret != Z_OK) return INT2FIX(ret);
|
356
463
|
z_stream_io_loop(&ctx);
|
357
|
-
int footer_len = gzip_prepare_footer(ctx.crc32, ctx.in_total, ctx.out, sizeof(ctx.out));
|
358
|
-
struct raw_buffer footer_buffer = {ctx.out, footer_len};
|
359
|
-
write_from_raw_buffer(ctx.backend, dest, WM_BACKEND_WRITE, &footer_buffer);
|
360
464
|
deflateEnd(&ctx.strm);
|
361
465
|
|
362
466
|
return INT2FIX(ctx.out_total);
|
@@ -431,10 +535,18 @@ void Init_IOExtensions() {
|
|
431
535
|
|
432
536
|
ID_at = rb_intern("at");
|
433
537
|
ID_read_method = rb_intern("__read_method__");
|
538
|
+
ID_readpartial = rb_intern("readpartial");
|
434
539
|
ID_to_i = rb_intern("to_i");
|
435
540
|
ID_write_method = rb_intern("__write_method__");
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
541
|
+
ID_write = rb_intern("write");
|
542
|
+
|
543
|
+
SYM_backend_read = ID2SYM(rb_intern("backend_read"));
|
544
|
+
SYM_backend_recv = ID2SYM(rb_intern("backend_recv"));
|
545
|
+
SYM_backend_send = ID2SYM(rb_intern("backend_send"));
|
546
|
+
SYM_backend_write = ID2SYM(rb_intern("backend_write"));
|
547
|
+
SYM_call = ID2SYM(rb_intern("call"));
|
548
|
+
SYM_comment = ID2SYM(rb_intern("comment"));
|
549
|
+
SYM_mtime = ID2SYM(rb_intern("mtime"));
|
550
|
+
SYM_orig_name = ID2SYM(rb_intern("orig_name"));
|
551
|
+
SYM_readpartial = ID2SYM(rb_intern("readpartial"));
|
440
552
|
}
|
@@ -390,12 +390,14 @@ module Polyphony
|
|
390
390
|
def shutdown_all_children(graceful = false)
|
391
391
|
return self unless @children
|
392
392
|
|
393
|
+
pending = []
|
393
394
|
@children.keys.each do |c|
|
394
395
|
next if c.dead?
|
395
396
|
|
396
397
|
c.terminate(graceful)
|
397
|
-
c
|
398
|
+
pending << c
|
398
399
|
end
|
400
|
+
Fiber.await(*pending)
|
399
401
|
self
|
400
402
|
end
|
401
403
|
|
data/lib/polyphony/version.rb
CHANGED
data/test/test_fiber.rb
CHANGED
@@ -1190,13 +1190,16 @@ end
|
|
1190
1190
|
|
1191
1191
|
class ChildrenTerminationTest < MiniTest::Test
|
1192
1192
|
def test_shutdown_all_children
|
1193
|
+
# TODO: check why this test fails when count = 1000
|
1194
|
+
count = 100
|
1195
|
+
|
1193
1196
|
f = spin do
|
1194
|
-
|
1197
|
+
count.times { spin { suspend } }
|
1195
1198
|
suspend
|
1196
1199
|
end
|
1197
1200
|
|
1198
1201
|
snooze
|
1199
|
-
assert_equal
|
1202
|
+
assert_equal count, f.children.size
|
1200
1203
|
|
1201
1204
|
f.shutdown_all_children
|
1202
1205
|
assert_equal 0, f.children.size
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: polyphony
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: '0.85'
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sharon Rosner
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2022-03-
|
11
|
+
date: 2022-03-13 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake-compiler
|
@@ -229,6 +229,7 @@ files:
|
|
229
229
|
- examples/core/idle_gc.rb
|
230
230
|
- examples/core/interrupt.rb
|
231
231
|
- examples/core/message_based_supervision.rb
|
232
|
+
- examples/core/multi_suspend.rb
|
232
233
|
- examples/core/nested.rb
|
233
234
|
- examples/core/pingpong.rb
|
234
235
|
- examples/core/queue.rb
|
@@ -236,6 +237,7 @@ files:
|
|
236
237
|
- examples/core/recurrent-timer.rb
|
237
238
|
- examples/core/resource_delegate.rb
|
238
239
|
- examples/core/ring.rb
|
240
|
+
- examples/core/shutdown_all_children.rb
|
239
241
|
- examples/core/spin.rb
|
240
242
|
- examples/core/spin_error_backtrace.rb
|
241
243
|
- examples/core/supervise-process.rb
|
@@ -258,6 +260,7 @@ files:
|
|
258
260
|
- examples/io/echo_server.rb
|
259
261
|
- examples/io/echo_server_with_timeout.rb
|
260
262
|
- examples/io/echo_stdin.rb
|
263
|
+
- examples/io/gzip.rb
|
261
264
|
- examples/io/happy-eyeballs.rb
|
262
265
|
- examples/io/httparty.rb
|
263
266
|
- examples/io/https_server.rb
|
@@ -270,6 +273,7 @@ files:
|
|
270
273
|
- examples/io/raw.rb
|
271
274
|
- examples/io/reline.rb
|
272
275
|
- examples/io/splice_chunks.rb
|
276
|
+
- examples/io/splice_echo_server.rb
|
273
277
|
- examples/io/stdio.rb
|
274
278
|
- examples/io/system.rb
|
275
279
|
- examples/io/tcp_proxy.rb
|