polyphony 0.55.0 → 0.59.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +26 -0
- data/Gemfile.lock +1 -1
- data/examples/core/idle_gc.rb +21 -0
- data/examples/io/pipe.rb +11 -0
- data/examples/io/splice_chunks.rb +29 -0
- data/ext/polyphony/backend_common.c +288 -0
- data/ext/polyphony/backend_common.h +49 -130
- data/ext/polyphony/backend_io_uring.c +263 -54
- data/ext/polyphony/backend_io_uring_context.c +2 -3
- data/ext/polyphony/backend_libev.c +466 -84
- data/ext/polyphony/fiber.c +0 -2
- data/ext/polyphony/polyphony.c +17 -22
- data/ext/polyphony/polyphony.h +8 -16
- data/ext/polyphony/polyphony_ext.c +0 -4
- data/ext/polyphony/runqueue.c +17 -82
- data/ext/polyphony/runqueue.h +27 -0
- data/ext/polyphony/thread.c +10 -94
- data/lib/polyphony/core/timer.rb +2 -2
- data/lib/polyphony/extensions/fiber.rb +2 -2
- data/lib/polyphony/extensions/thread.rb +8 -0
- data/lib/polyphony/version.rb +1 -1
- data/test/test_backend.rb +91 -0
- data/test/test_thread.rb +57 -11
- data/test/test_timer.rb +7 -7
- data/test/test_trace.rb +27 -49
- metadata +7 -4
- data/ext/polyphony/tracing.c +0 -11
- data/lib/polyphony/adapters/trace.rb +0 -138
@@ -4,7 +4,6 @@
|
|
4
4
|
#include <sys/socket.h>
|
5
5
|
#include <sys/uio.h>
|
6
6
|
#include <unistd.h>
|
7
|
-
#include <fcntl.h>
|
8
7
|
#include <netinet/in.h>
|
9
8
|
#include <arpa/inet.h>
|
10
9
|
#include <stdnoreturn.h>
|
@@ -19,6 +18,7 @@
|
|
19
18
|
#include "backend_io_uring_context.h"
|
20
19
|
#include "ruby/thread.h"
|
21
20
|
#include "ruby/io.h"
|
21
|
+
#include "backend_common.h"
|
22
22
|
|
23
23
|
VALUE SYM_io_uring;
|
24
24
|
VALUE SYM_send;
|
@@ -26,34 +26,13 @@ VALUE SYM_splice;
|
|
26
26
|
VALUE SYM_write;
|
27
27
|
|
28
28
|
#ifdef POLYPHONY_UNSET_NONBLOCK
|
29
|
-
|
30
|
-
|
31
|
-
// One of the changes introduced in Ruby 3.0 as part of the work on the
|
32
|
-
// FiberScheduler interface is that all created sockets are marked as
|
33
|
-
// non-blocking. This prevents the io_uring backend from working correctly,
|
34
|
-
// since it will return an EAGAIN error just like a normal syscall. So here
|
35
|
-
// instead of setting O_NONBLOCK (which is required for the libev backend), we
|
36
|
-
// unset it.
|
37
|
-
inline void io_unset_nonblock(rb_io_t *fptr, VALUE io) {
|
38
|
-
VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
|
39
|
-
if (is_nonblocking == Qfalse) return;
|
40
|
-
|
41
|
-
rb_ivar_set(io, ID_ivar_is_nonblocking, Qfalse);
|
42
|
-
|
43
|
-
int oflags = fcntl(fptr->fd, F_GETFL);
|
44
|
-
if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
|
45
|
-
oflags &= !O_NONBLOCK;
|
46
|
-
fcntl(fptr->fd, F_SETFL, oflags);
|
47
|
-
}
|
29
|
+
#define io_unset_nonblock(fptr, io) io_verify_blocking_mode(fptr, io, Qtrue)
|
48
30
|
#else
|
49
|
-
// NOP
|
50
31
|
#define io_unset_nonblock(fptr, io)
|
51
32
|
#endif
|
52
33
|
|
53
34
|
typedef struct Backend_t {
|
54
|
-
|
55
|
-
unsigned int currently_polling;
|
56
|
-
unsigned int pending_count;
|
35
|
+
struct Backend_base base;
|
57
36
|
|
58
37
|
// implementation-specific fields
|
59
38
|
struct io_uring ring;
|
@@ -63,7 +42,15 @@ typedef struct Backend_t {
|
|
63
42
|
int event_fd;
|
64
43
|
} Backend_t;
|
65
44
|
|
66
|
-
|
45
|
+
static void Backend_mark(void *ptr) {
|
46
|
+
Backend_t *backend = ptr;
|
47
|
+
backend_base_mark(&backend->base);
|
48
|
+
}
|
49
|
+
|
50
|
+
static void Backend_free(void *ptr) {
|
51
|
+
Backend_t *backend = ptr;
|
52
|
+
backend_base_finalize(&backend->base);
|
53
|
+
}
|
67
54
|
|
68
55
|
static size_t Backend_size(const void *ptr) {
|
69
56
|
return sizeof(Backend_t);
|
@@ -71,7 +58,7 @@ static size_t Backend_size(const void *ptr) {
|
|
71
58
|
|
72
59
|
static const rb_data_type_t Backend_type = {
|
73
60
|
"IOUringBackend",
|
74
|
-
{
|
61
|
+
{Backend_mark, Backend_free, Backend_size,},
|
75
62
|
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
76
63
|
};
|
77
64
|
|
@@ -88,8 +75,7 @@ static VALUE Backend_initialize(VALUE self) {
|
|
88
75
|
Backend_t *backend;
|
89
76
|
GetBackend(self, backend);
|
90
77
|
|
91
|
-
backend->
|
92
|
-
backend->pending_count = 0;
|
78
|
+
backend_base_initialize(&backend->base);
|
93
79
|
backend->pending_sqes = 0;
|
94
80
|
backend->prepared_limit = 2048;
|
95
81
|
|
@@ -117,20 +103,13 @@ VALUE Backend_post_fork(VALUE self) {
|
|
117
103
|
io_uring_queue_exit(&backend->ring);
|
118
104
|
io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
|
119
105
|
context_store_free(&backend->store);
|
120
|
-
backend->currently_polling = 0;
|
121
|
-
backend->pending_count = 0;
|
106
|
+
backend->base.currently_polling = 0;
|
107
|
+
backend->base.pending_count = 0;
|
122
108
|
backend->pending_sqes = 0;
|
123
109
|
|
124
110
|
return self;
|
125
111
|
}
|
126
112
|
|
127
|
-
unsigned int Backend_pending_count(VALUE self) {
|
128
|
-
Backend_t *backend;
|
129
|
-
GetBackend(self, backend);
|
130
|
-
|
131
|
-
return backend->pending_count;
|
132
|
-
}
|
133
|
-
|
134
113
|
typedef struct poll_context {
|
135
114
|
struct io_uring *ring;
|
136
115
|
struct io_uring_cqe *cqe;
|
@@ -154,6 +133,7 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
|
|
154
133
|
op_context_t *ctx = io_uring_cqe_get_data(cqe);
|
155
134
|
if (!ctx) return;
|
156
135
|
|
136
|
+
// printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
|
157
137
|
ctx->result = cqe->res;
|
158
138
|
if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
|
159
139
|
Fiber_make_runnable(ctx->fiber, ctx->resume_value);
|
@@ -197,38 +177,70 @@ void io_uring_backend_poll(Backend_t *backend) {
|
|
197
177
|
io_uring_submit(&backend->ring);
|
198
178
|
}
|
199
179
|
|
200
|
-
backend->currently_polling = 1;
|
180
|
+
backend->base.currently_polling = 1;
|
201
181
|
rb_thread_call_without_gvl(io_uring_backend_poll_without_gvl, (void *)&poll_ctx, RUBY_UBF_IO, 0);
|
202
|
-
backend->currently_polling = 0;
|
182
|
+
backend->base.currently_polling = 0;
|
203
183
|
if (poll_ctx.result < 0) return;
|
204
184
|
|
205
185
|
io_uring_backend_handle_completion(poll_ctx.cqe, backend);
|
206
186
|
io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
|
207
187
|
}
|
208
188
|
|
209
|
-
VALUE Backend_poll(VALUE self, VALUE
|
210
|
-
int
|
189
|
+
inline VALUE Backend_poll(VALUE self, VALUE blocking) {
|
190
|
+
int is_blocking = blocking == Qtrue;
|
211
191
|
Backend_t *backend;
|
212
192
|
GetBackend(self, backend);
|
213
193
|
|
214
|
-
if (
|
194
|
+
if (!is_blocking && backend->pending_sqes) {
|
215
195
|
backend->pending_sqes = 0;
|
216
196
|
io_uring_submit(&backend->ring);
|
217
197
|
}
|
218
198
|
|
219
|
-
COND_TRACE(2, SYM_fiber_event_poll_enter,
|
220
|
-
if (
|
199
|
+
COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
|
200
|
+
if (is_blocking) io_uring_backend_poll(backend);
|
221
201
|
io_uring_backend_handle_ready_cqes(backend);
|
222
|
-
COND_TRACE(2, SYM_fiber_event_poll_leave,
|
202
|
+
COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
|
223
203
|
|
224
204
|
return self;
|
225
205
|
}
|
226
206
|
|
207
|
+
inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
|
208
|
+
Backend_t *backend;
|
209
|
+
GetBackend(self, backend);
|
210
|
+
|
211
|
+
backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
|
212
|
+
}
|
213
|
+
|
214
|
+
inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
|
215
|
+
Backend_t *backend;
|
216
|
+
GetBackend(self, backend);
|
217
|
+
|
218
|
+
runqueue_delete(&backend->base.runqueue, fiber);
|
219
|
+
}
|
220
|
+
|
221
|
+
inline VALUE Backend_switch_fiber(VALUE self) {
|
222
|
+
Backend_t *backend;
|
223
|
+
GetBackend(self, backend);
|
224
|
+
|
225
|
+
return backend_base_switch_fiber(self, &backend->base);
|
226
|
+
}
|
227
|
+
|
228
|
+
inline struct backend_stats Backend_stats(VALUE self) {
|
229
|
+
Backend_t *backend;
|
230
|
+
GetBackend(self, backend);
|
231
|
+
|
232
|
+
return (struct backend_stats){
|
233
|
+
.scheduled_fibers = runqueue_len(&backend->base.runqueue),
|
234
|
+
.waiting_fibers = 0,
|
235
|
+
.pending_ops = backend->base.pending_count
|
236
|
+
};
|
237
|
+
}
|
238
|
+
|
227
239
|
VALUE Backend_wakeup(VALUE self) {
|
228
240
|
Backend_t *backend;
|
229
241
|
GetBackend(self, backend);
|
230
242
|
|
231
|
-
if (backend->currently_polling) {
|
243
|
+
if (backend->base.currently_polling) {
|
232
244
|
// Since we're currently blocking while waiting for a completion, we add a
|
233
245
|
// NOP which would cause the io_uring_enter syscall to return
|
234
246
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
@@ -259,11 +271,13 @@ int io_uring_backend_defer_submit_and_await(
|
|
259
271
|
{
|
260
272
|
VALUE switchpoint_result = Qnil;
|
261
273
|
|
262
|
-
|
263
|
-
|
274
|
+
if (sqe) {
|
275
|
+
io_uring_sqe_set_data(sqe, ctx);
|
276
|
+
io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
|
277
|
+
}
|
264
278
|
io_uring_backend_defer_submit(backend);
|
265
279
|
|
266
|
-
switchpoint_result = backend_await(backend);
|
280
|
+
switchpoint_result = backend_await((struct Backend_base *)backend);
|
267
281
|
|
268
282
|
if (ctx->ref_count > 1) {
|
269
283
|
// op was not completed (an exception was raised), so we need to cancel it
|
@@ -466,7 +480,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
|
|
466
480
|
VALUE resume_value = Qnil;
|
467
481
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
|
468
482
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
469
|
-
io_uring_prep_write(sqe, fptr->fd, buf, left,
|
483
|
+
io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
|
470
484
|
|
471
485
|
int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
|
472
486
|
int completed = context_store_release(&backend->store, ctx);
|
@@ -926,7 +940,6 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
|
|
926
940
|
|
927
941
|
op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
|
928
942
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
929
|
-
|
930
943
|
io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
|
931
944
|
return context_store_release(&backend->store, ctx);
|
932
945
|
}
|
@@ -1085,7 +1098,7 @@ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, V
|
|
1085
1098
|
long len = RSTRING_LEN(str);
|
1086
1099
|
|
1087
1100
|
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1088
|
-
io_uring_prep_write(sqe, fptr->fd, buf, len,
|
1101
|
+
io_uring_prep_write(sqe, fptr->fd, buf, len, 0);
|
1089
1102
|
return sqe;
|
1090
1103
|
}
|
1091
1104
|
|
@@ -1177,7 +1190,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1177
1190
|
|
1178
1191
|
ctx->ref_count = sqe_count + 1;
|
1179
1192
|
io_uring_backend_defer_submit(backend);
|
1180
|
-
resume_value = backend_await(backend);
|
1193
|
+
resume_value = backend_await((struct Backend_base *)backend);
|
1181
1194
|
int result = ctx->result;
|
1182
1195
|
int completed = context_store_release(&backend->store, ctx);
|
1183
1196
|
if (!completed) {
|
@@ -1195,6 +1208,197 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
|
|
1195
1208
|
return INT2NUM(result);
|
1196
1209
|
}
|
1197
1210
|
|
1211
|
+
VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
|
1212
|
+
Backend_t *backend;
|
1213
|
+
GetBackend(self, backend);
|
1214
|
+
backend->base.idle_gc_period = NUM2DBL(period);
|
1215
|
+
backend->base.idle_gc_last_time = current_time();
|
1216
|
+
return self;
|
1217
|
+
}
|
1218
|
+
|
1219
|
+
VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
|
1220
|
+
Backend_t *backend;
|
1221
|
+
GetBackend(self, backend);
|
1222
|
+
backend->base.idle_proc = block;
|
1223
|
+
return self;
|
1224
|
+
}
|
1225
|
+
|
1226
|
+
inline VALUE Backend_run_idle_tasks(VALUE self) {
|
1227
|
+
Backend_t *backend;
|
1228
|
+
GetBackend(self, backend);
|
1229
|
+
backend_run_idle_tasks(&backend->base);
|
1230
|
+
return self;
|
1231
|
+
}
|
1232
|
+
|
1233
|
+
static inline void splice_chunks_prep_write(op_context_t *ctx, struct io_uring_sqe *sqe, int fd, VALUE str) {
|
1234
|
+
char *buf = RSTRING_PTR(str);
|
1235
|
+
int len = RSTRING_LEN(str);
|
1236
|
+
io_uring_prep_write(sqe, fd, buf, len, 0);
|
1237
|
+
// io_uring_prep_send(sqe, fd, buf, len, 0);
|
1238
|
+
io_uring_sqe_set_data(sqe, ctx);
|
1239
|
+
}
|
1240
|
+
|
1241
|
+
static inline void splice_chunks_prep_splice(op_context_t *ctx, struct io_uring_sqe *sqe, int src, int dest, int maxlen) {
|
1242
|
+
io_uring_prep_splice(sqe, src, -1, dest, -1, maxlen, 0);
|
1243
|
+
io_uring_sqe_set_data(sqe, ctx);
|
1244
|
+
}
|
1245
|
+
|
1246
|
+
static inline void splice_chunks_get_sqe(
|
1247
|
+
Backend_t *backend,
|
1248
|
+
op_context_t **ctx,
|
1249
|
+
struct io_uring_sqe **sqe,
|
1250
|
+
enum op_type type
|
1251
|
+
)
|
1252
|
+
{
|
1253
|
+
if (*ctx) {
|
1254
|
+
if (*sqe) (*sqe)->flags |= IOSQE_IO_LINK;
|
1255
|
+
(*ctx)->ref_count++;
|
1256
|
+
}
|
1257
|
+
else
|
1258
|
+
*ctx = context_store_acquire(&backend->store, type);
|
1259
|
+
(*sqe) = io_uring_get_sqe(&backend->ring);
|
1260
|
+
}
|
1261
|
+
|
1262
|
+
static inline void splice_chunks_cancel(Backend_t *backend, op_context_t *ctx) {
|
1263
|
+
ctx->result = -ECANCELED;
|
1264
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
1265
|
+
io_uring_prep_cancel(sqe, ctx, 0);
|
1266
|
+
backend->pending_sqes = 0;
|
1267
|
+
io_uring_submit(&backend->ring);
|
1268
|
+
}
|
1269
|
+
|
1270
|
+
static inline int splice_chunks_await_ops(
|
1271
|
+
Backend_t *backend,
|
1272
|
+
op_context_t **ctx,
|
1273
|
+
int *result,
|
1274
|
+
VALUE *switchpoint_result
|
1275
|
+
)
|
1276
|
+
{
|
1277
|
+
int res = io_uring_backend_defer_submit_and_await(backend, 0, *ctx, switchpoint_result);
|
1278
|
+
if (result) (*result) = res;
|
1279
|
+
int completed = context_store_release(&backend->store, *ctx);
|
1280
|
+
if (!completed) {
|
1281
|
+
splice_chunks_cancel(backend, *ctx);
|
1282
|
+
if (TEST_EXCEPTION(*switchpoint_result)) return 1;
|
1283
|
+
}
|
1284
|
+
*ctx = 0;
|
1285
|
+
return 0;
|
1286
|
+
}
|
1287
|
+
|
1288
|
+
#define SPLICE_CHUNKS_AWAIT_OPS(backend, ctx, result, switchpoint_result) \
|
1289
|
+
if (splice_chunks_await_ops(backend, ctx, result, switchpoint_result)) goto error;
|
1290
|
+
|
1291
|
+
VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
|
1292
|
+
Backend_t *backend;
|
1293
|
+
GetBackend(self, backend);
|
1294
|
+
int total = 0;
|
1295
|
+
int err = 0;
|
1296
|
+
VALUE switchpoint_result = Qnil;
|
1297
|
+
op_context_t *ctx = 0;
|
1298
|
+
struct io_uring_sqe *sqe = 0;
|
1299
|
+
|
1300
|
+
rb_io_t *src_fptr;
|
1301
|
+
rb_io_t *dest_fptr;
|
1302
|
+
|
1303
|
+
VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
|
1304
|
+
if (underlying_io != Qnil) src = underlying_io;
|
1305
|
+
GetOpenFile(src, src_fptr);
|
1306
|
+
io_verify_blocking_mode(src_fptr, src, Qtrue);
|
1307
|
+
|
1308
|
+
underlying_io = rb_ivar_get(dest, ID_ivar_io);
|
1309
|
+
if (underlying_io != Qnil) dest = underlying_io;
|
1310
|
+
dest = rb_io_get_write_io(dest);
|
1311
|
+
GetOpenFile(dest, dest_fptr);
|
1312
|
+
io_verify_blocking_mode(dest_fptr, dest, Qtrue);
|
1313
|
+
|
1314
|
+
int maxlen = NUM2INT(chunk_size);
|
1315
|
+
VALUE str = Qnil;
|
1316
|
+
VALUE chunk_len_value = Qnil;
|
1317
|
+
|
1318
|
+
int pipefd[2] = { -1, -1 };
|
1319
|
+
if (pipe(pipefd) == -1) {
|
1320
|
+
err = errno;
|
1321
|
+
goto syscallerror;
|
1322
|
+
}
|
1323
|
+
|
1324
|
+
if (prefix != Qnil) {
|
1325
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1326
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
|
1327
|
+
}
|
1328
|
+
|
1329
|
+
while (1) {
|
1330
|
+
int chunk_len;
|
1331
|
+
VALUE chunk_prefix_str = Qnil;
|
1332
|
+
VALUE chunk_postfix_str = Qnil;
|
1333
|
+
|
1334
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
|
1335
|
+
splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
|
1336
|
+
|
1337
|
+
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
|
1338
|
+
if (chunk_len == 0) break;
|
1339
|
+
|
1340
|
+
total += chunk_len;
|
1341
|
+
chunk_len_value = INT2NUM(chunk_len);
|
1342
|
+
|
1343
|
+
|
1344
|
+
if (chunk_prefix != Qnil) {
|
1345
|
+
chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
|
1346
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1347
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
|
1348
|
+
}
|
1349
|
+
|
1350
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
|
1351
|
+
splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
|
1352
|
+
|
1353
|
+
if (chunk_postfix != Qnil) {
|
1354
|
+
chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
|
1355
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1356
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
|
1357
|
+
}
|
1358
|
+
|
1359
|
+
RB_GC_GUARD(chunk_prefix_str);
|
1360
|
+
RB_GC_GUARD(chunk_postfix_str);
|
1361
|
+
}
|
1362
|
+
|
1363
|
+
if (postfix != Qnil) {
|
1364
|
+
splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
|
1365
|
+
splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
|
1366
|
+
}
|
1367
|
+
if (ctx) {
|
1368
|
+
SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
|
1369
|
+
}
|
1370
|
+
|
1371
|
+
RB_GC_GUARD(str);
|
1372
|
+
RB_GC_GUARD(chunk_len_value);
|
1373
|
+
RB_GC_GUARD(switchpoint_result);
|
1374
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1375
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1376
|
+
return INT2NUM(total);
|
1377
|
+
syscallerror:
|
1378
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1379
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1380
|
+
rb_syserr_fail(err, strerror(err));
|
1381
|
+
error:
|
1382
|
+
if (pipefd[0] != -1) close(pipefd[0]);
|
1383
|
+
if (pipefd[1] != -1) close(pipefd[1]);
|
1384
|
+
return RAISE_EXCEPTION(switchpoint_result);
|
1385
|
+
}
|
1386
|
+
|
1387
|
+
VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
|
1388
|
+
Backend_t *backend;
|
1389
|
+
GetBackend(self, backend);
|
1390
|
+
backend_trace(&backend->base, argc, argv);
|
1391
|
+
return self;
|
1392
|
+
}
|
1393
|
+
|
1394
|
+
VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
|
1395
|
+
Backend_t *backend;
|
1396
|
+
GetBackend(self, backend);
|
1397
|
+
|
1398
|
+
backend->base.trace_proc = block;
|
1399
|
+
return self;
|
1400
|
+
}
|
1401
|
+
|
1198
1402
|
void Init_Backend() {
|
1199
1403
|
VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
|
1200
1404
|
rb_define_alloc_func(cBackend, Backend_allocate);
|
@@ -1202,11 +1406,16 @@ void Init_Backend() {
|
|
1202
1406
|
rb_define_method(cBackend, "initialize", Backend_initialize, 0);
|
1203
1407
|
rb_define_method(cBackend, "finalize", Backend_finalize, 0);
|
1204
1408
|
rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
|
1409
|
+
rb_define_method(cBackend, "trace", Backend_trace, -1);
|
1410
|
+
rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
|
1205
1411
|
|
1206
|
-
rb_define_method(cBackend, "poll", Backend_poll,
|
1412
|
+
rb_define_method(cBackend, "poll", Backend_poll, 1);
|
1207
1413
|
rb_define_method(cBackend, "break", Backend_wakeup, 0);
|
1208
1414
|
rb_define_method(cBackend, "kind", Backend_kind, 0);
|
1209
1415
|
rb_define_method(cBackend, "chain", Backend_chain, -1);
|
1416
|
+
rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
|
1417
|
+
rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
|
1418
|
+
rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
|
1210
1419
|
|
1211
1420
|
rb_define_method(cBackend, "accept", Backend_accept, 2);
|
1212
1421
|
rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
|