polyphony 0.58 → 0.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: e8002b8c0e03afa8e915b5ed67d64c1fc288a5fa220dfb2c32d3966a78046eee
4
- data.tar.gz: c3a46eeae1f048f4adee9d3130f5dd3593936e843c1874d928f742d1fa83053f
3
+ metadata.gz: 313e33321df0e375d128a79fccfab98e06183adbdbde7916347f6795f56ca369
4
+ data.tar.gz: 6428c0516544bdb955fa465bf36c40b9b2f162306f3e5689848ff487f24c4440
5
5
  SHA512:
6
- metadata.gz: e6681155761daee35b73c9674e69aa505786e576814db0c909028cb576d8609f99bccc79f80b7d06d90c7658747bc2589c9837d3bdb3419782147e573d39f278
7
- data.tar.gz: 90197a8db54405ca37492a54a20adf00c85f142af606cad26ad823161825ccf6bd9a414adcbbf3e27eaf93eea1d94a7cbc50a2fb5982ee4ce48969d4378400cd
6
+ metadata.gz: 40c88f5d1c2aa4307dea78dcf1a2ba8d347ef252e7e56e67d466c291066d39544d051864ccd0138dd94ca0beef7af71f5257052b3556ab0c7294b1b94b517d47
7
+ data.tar.gz: 109410d6b222846b0c2b3f54061358ef922b57c9b6559c37aec36f27116e7ca389ff51762e988e2e9293a7a1358d97b80ada74daa4e44d7d25c1e5335351d8c6
data/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## 0.59 2021-06-28
2
+
3
+ - Redesign tracing mechanism and API - now completely separated from Ruby core
4
+ trace API
5
+ - Refactor C code - move run queue into backend
6
+
1
7
  ## 0.58 2021-06-25
2
8
 
3
9
  - Implement `Thread#idle_gc_period`, `#on_idle` (#56)
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.58)
4
+ polyphony (0.59)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
@@ -5,12 +5,103 @@
5
5
  #include "polyphony.h"
6
6
  #include "backend_common.h"
7
7
 
8
- inline void initialize_backend_base(struct Backend_base *base) {
8
+ inline void backend_base_initialize(struct Backend_base *base) {
9
+ runqueue_initialize(&base->runqueue);
9
10
  base->currently_polling = 0;
10
11
  base->pending_count = 0;
11
12
  base->idle_gc_period = 0;
12
13
  base->idle_gc_last_time = 0;
13
- base->idle_block = Qnil;
14
+ base->idle_proc = Qnil;
15
+ base->trace_proc = Qnil;
16
+ }
17
+
18
+ inline void backend_base_finalize(struct Backend_base *base) {
19
+ runqueue_finalize(&base->runqueue);
20
+ }
21
+
22
+ inline void backend_base_mark(struct Backend_base *base) {
23
+ if (base->idle_proc != Qnil) rb_gc_mark(base->idle_proc);
24
+ if (base->trace_proc != Qnil) rb_gc_mark(base->trace_proc);
25
+ runqueue_mark(&base->runqueue);
26
+ }
27
+
28
+ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
29
+ VALUE current_fiber = rb_fiber_current();
30
+ runqueue_entry next;
31
+ unsigned int pending_ops_count = base->pending_count;
32
+ unsigned int backend_was_polled = 0;
33
+ unsigned int idle_tasks_run_count = 0;
34
+
35
+ if (SHOULD_TRACE(base) && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
36
+ TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
37
+
38
+ while (1) {
39
+ next = runqueue_shift(&base->runqueue);
40
+ if (next.fiber != Qnil) {
41
+ // Polling for I/O op completion is normally done when the run queue is
42
+ // empty, but if the runqueue never empties, we'll never get to process
43
+ // any event completions. In order to prevent this, an anti-starve
44
+ // mechanism is employed, under the following conditions:
45
+ // - a blocking poll was not yet performed
46
+ // - there are pending blocking operations
47
+ // - the runqueue has signalled that a non-blocking poll should be
48
+ // performed
49
+ // - the run queue length high watermark has reached its threshold (currently 128)
50
+ // - the run queue switch counter has reached its threshold (currently 64)
51
+ if (!backend_was_polled && pending_ops_count && runqueue_should_poll_nonblocking(&base->runqueue)) {
52
+ // this prevents event starvation in case the run queue never empties
53
+ Backend_poll(backend, Qnil);
54
+ }
55
+ break;
56
+ }
57
+
58
+ if (!idle_tasks_run_count) {
59
+ idle_tasks_run_count++;
60
+ backend_run_idle_tasks(base);
61
+ }
62
+ if (pending_ops_count == 0) break;
63
+ Backend_poll(backend, Qtrue);
64
+ backend_was_polled = 1;
65
+ }
66
+
67
+ if (next.fiber == Qnil) return Qnil;
68
+
69
+ // run next fiber
70
+ COND_TRACE(base, 3, SYM_fiber_run, next.fiber, next.value);
71
+
72
+ rb_ivar_set(next.fiber, ID_ivar_runnable, Qnil);
73
+ RB_GC_GUARD(next.fiber);
74
+ RB_GC_GUARD(next.value);
75
+ return (next.fiber == current_fiber) ?
76
+ next.value : FIBER_TRANSFER(next.fiber, next.value);
77
+ }
78
+
79
+ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize) {
80
+ int already_runnable;
81
+
82
+ if (rb_fiber_alive_p(fiber) != Qtrue) return;
83
+ already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
84
+
85
+ COND_TRACE(base, 3, SYM_fiber_schedule, fiber, value);
86
+ (prioritize ? runqueue_unshift : runqueue_push)(&base->runqueue, fiber, value, already_runnable);
87
+ if (!already_runnable) {
88
+ rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
89
+ if (rb_thread_current() != thread) {
90
+ // If the fiber scheduling is done across threads, we need to make sure the
91
+ // target thread is woken up in case it is in the middle of running its
92
+ // event selector. Otherwise it's gonna be stuck waiting for an event to
93
+ // happen, not knowing that it there's already a fiber ready to run in its
94
+ // run queue.
95
+ Backend_wakeup(backend);
96
+ }
97
+ }
98
+ }
99
+
100
+
101
+ inline void backend_trace(struct Backend_base *base, int argc, VALUE *argv) {
102
+ if (base->trace_proc == Qnil) return;
103
+
104
+ rb_funcallv(base->trace_proc, ID_call, argc, argv);
14
105
  }
15
106
 
16
107
  #ifdef POLYPHONY_USE_PIDFD_OPEN
@@ -182,8 +273,8 @@ inline void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking) {
182
273
  }
183
274
 
184
275
  inline void backend_run_idle_tasks(struct Backend_base *base) {
185
- if (base->idle_block != Qnil)
186
- rb_funcall(base->idle_block, ID_call, 0);
276
+ if (base->idle_proc != Qnil)
277
+ rb_funcall(base->idle_proc, ID_call, 0);
187
278
 
188
279
  if (base->idle_gc_period == 0) return;
189
280
 
@@ -3,16 +3,37 @@
3
3
 
4
4
  #include "ruby.h"
5
5
  #include "ruby/io.h"
6
+ #include "runqueue.h"
7
+
8
+ struct backend_stats {
9
+ int scheduled_fibers;
10
+ int waiting_fibers;
11
+ int pending_ops;
12
+ };
6
13
 
7
14
  struct Backend_base {
15
+ runqueue_t runqueue;
8
16
  unsigned int currently_polling;
9
17
  unsigned int pending_count;
10
18
  double idle_gc_period;
11
19
  double idle_gc_last_time;
12
- VALUE idle_block;
20
+ VALUE idle_proc;
21
+ VALUE trace_proc;
13
22
  };
14
23
 
15
- void initialize_backend_base(struct Backend_base *base);
24
+ void backend_base_initialize(struct Backend_base *base);
25
+ void backend_base_finalize(struct Backend_base *base);
26
+ void backend_base_mark(struct Backend_base *base);
27
+ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
28
+ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
29
+ void backend_trace(struct Backend_base *base, int argc, VALUE *argv);
30
+
31
+ // tracing
32
+ #define SHOULD_TRACE(base) ((base)->trace_proc != Qnil)
33
+ #define TRACE(base, ...) rb_funcall((base)->trace_proc, ID_call, __VA_ARGS__)
34
+ #define COND_TRACE(base, ...) if (SHOULD_TRACE(base)) { TRACE(base, __VA_ARGS__); }
35
+
36
+
16
37
 
17
38
  #ifdef POLYPHONY_USE_PIDFD_OPEN
18
39
  int pidfd_open(pid_t pid, unsigned int flags);
@@ -44,8 +44,12 @@ typedef struct Backend_t {
44
44
 
45
45
  static void Backend_mark(void *ptr) {
46
46
  Backend_t *backend = ptr;
47
- if (backend->base.idle_block != Qnil)
48
- rb_gc_mark(backend->base.idle_block);
47
+ backend_base_mark(&backend->base);
48
+ }
49
+
50
+ static void Backend_free(void *ptr) {
51
+ Backend_t *backend = ptr;
52
+ backend_base_finalize(&backend->base);
49
53
  }
50
54
 
51
55
  static size_t Backend_size(const void *ptr) {
@@ -54,7 +58,7 @@ static size_t Backend_size(const void *ptr) {
54
58
 
55
59
  static const rb_data_type_t Backend_type = {
56
60
  "IOUringBackend",
57
- {Backend_mark, 0, Backend_size,},
61
+ {Backend_mark, Backend_free, Backend_size,},
58
62
  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
59
63
  };
60
64
 
@@ -71,7 +75,7 @@ static VALUE Backend_initialize(VALUE self) {
71
75
  Backend_t *backend;
72
76
  GetBackend(self, backend);
73
77
 
74
- initialize_backend_base(&backend->base);
78
+ backend_base_initialize(&backend->base);
75
79
  backend->pending_sqes = 0;
76
80
  backend->prepared_limit = 2048;
77
81
 
@@ -106,13 +110,6 @@ VALUE Backend_post_fork(VALUE self) {
106
110
  return self;
107
111
  }
108
112
 
109
- unsigned int Backend_pending_count(VALUE self) {
110
- Backend_t *backend;
111
- GetBackend(self, backend);
112
-
113
- return backend->base.pending_count;
114
- }
115
-
116
113
  typedef struct poll_context {
117
114
  struct io_uring *ring;
118
115
  struct io_uring_cqe *cqe;
@@ -189,24 +186,56 @@ void io_uring_backend_poll(Backend_t *backend) {
189
186
  io_uring_cqe_seen(&backend->ring, poll_ctx.cqe);
190
187
  }
191
188
 
192
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
193
- int is_nowait = nowait == Qtrue;
189
+ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
190
+ int is_blocking = blocking == Qtrue;
194
191
  Backend_t *backend;
195
192
  GetBackend(self, backend);
196
193
 
197
- if (is_nowait && backend->pending_sqes) {
194
+ if (!is_blocking && backend->pending_sqes) {
198
195
  backend->pending_sqes = 0;
199
196
  io_uring_submit(&backend->ring);
200
197
  }
201
198
 
202
- COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
203
- if (!is_nowait) io_uring_backend_poll(backend);
199
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
200
+ if (is_blocking) io_uring_backend_poll(backend);
204
201
  io_uring_backend_handle_ready_cqes(backend);
205
- COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
202
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
206
203
 
207
204
  return self;
208
205
  }
209
206
 
207
+ inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
208
+ Backend_t *backend;
209
+ GetBackend(self, backend);
210
+
211
+ backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
212
+ }
213
+
214
+ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
215
+ Backend_t *backend;
216
+ GetBackend(self, backend);
217
+
218
+ runqueue_delete(&backend->base.runqueue, fiber);
219
+ }
220
+
221
+ inline VALUE Backend_switch_fiber(VALUE self) {
222
+ Backend_t *backend;
223
+ GetBackend(self, backend);
224
+
225
+ return backend_base_switch_fiber(self, &backend->base);
226
+ }
227
+
228
+ inline struct backend_stats Backend_stats(VALUE self) {
229
+ Backend_t *backend;
230
+ GetBackend(self, backend);
231
+
232
+ return (struct backend_stats){
233
+ .scheduled_fibers = runqueue_len(&backend->base.runqueue),
234
+ .waiting_fibers = 0,
235
+ .pending_ops = backend->base.pending_count
236
+ };
237
+ }
238
+
210
239
  VALUE Backend_wakeup(VALUE self) {
211
240
  Backend_t *backend;
212
241
  GetBackend(self, backend);
@@ -1187,10 +1216,10 @@ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1187
1216
  return self;
1188
1217
  }
1189
1218
 
1190
- VALUE Backend_idle_block_set(VALUE self, VALUE block) {
1219
+ VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
1191
1220
  Backend_t *backend;
1192
1221
  GetBackend(self, backend);
1193
- backend->base.idle_block = block;
1222
+ backend->base.idle_proc = block;
1194
1223
  return self;
1195
1224
  }
1196
1225
 
@@ -1355,6 +1384,21 @@ error:
1355
1384
  return RAISE_EXCEPTION(switchpoint_result);
1356
1385
  }
1357
1386
 
1387
+ VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
1388
+ Backend_t *backend;
1389
+ GetBackend(self, backend);
1390
+ backend_trace(&backend->base, argc, argv);
1391
+ return self;
1392
+ }
1393
+
1394
+ VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
1395
+ Backend_t *backend;
1396
+ GetBackend(self, backend);
1397
+
1398
+ backend->base.trace_proc = block;
1399
+ return self;
1400
+ }
1401
+
1358
1402
  void Init_Backend() {
1359
1403
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
1360
1404
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -1362,13 +1406,15 @@ void Init_Backend() {
1362
1406
  rb_define_method(cBackend, "initialize", Backend_initialize, 0);
1363
1407
  rb_define_method(cBackend, "finalize", Backend_finalize, 0);
1364
1408
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1409
+ rb_define_method(cBackend, "trace", Backend_trace, -1);
1410
+ rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1365
1411
 
1366
- rb_define_method(cBackend, "poll", Backend_poll, 3);
1412
+ rb_define_method(cBackend, "poll", Backend_poll, 1);
1367
1413
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1368
1414
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1369
1415
  rb_define_method(cBackend, "chain", Backend_chain, -1);
1370
1416
  rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1371
- rb_define_method(cBackend, "idle_block=", Backend_idle_block_set, 1);
1417
+ rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
1372
1418
  rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
1373
1419
 
1374
1420
  rb_define_method(cBackend, "accept", Backend_accept, 2);
@@ -75,8 +75,12 @@ typedef struct Backend_t {
75
75
 
76
76
  static void Backend_mark(void *ptr) {
77
77
  Backend_t *backend = ptr;
78
- if (backend->base.idle_block != Qnil)
79
- rb_gc_mark(backend->base.idle_block);
78
+ backend_base_mark(&backend->base);
79
+ }
80
+
81
+ static void Backend_free(void *ptr) {
82
+ Backend_t *backend = ptr;
83
+ backend_base_finalize(&backend->base);
80
84
  }
81
85
 
82
86
  static size_t Backend_size(const void *ptr) {
@@ -85,7 +89,7 @@ static size_t Backend_size(const void *ptr) {
85
89
 
86
90
  static const rb_data_type_t Backend_type = {
87
91
  "LibevBackend",
88
- {Backend_mark, 0, Backend_size,},
92
+ {Backend_mark, Backend_free, Backend_size,},
89
93
  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
90
94
  };
91
95
 
@@ -117,7 +121,7 @@ static VALUE Backend_initialize(VALUE self) {
117
121
 
118
122
  GetBackend(self, backend);
119
123
 
120
- initialize_backend_base(&backend->base);
124
+ backend_base_initialize(&backend->base);
121
125
  backend->ev_loop = libev_new_loop();
122
126
 
123
127
  // start async watcher used for breaking a poll op (from another thread)
@@ -156,24 +160,38 @@ VALUE Backend_post_fork(VALUE self) {
156
160
  return self;
157
161
  }
158
162
 
159
- inline unsigned int Backend_pending_count(VALUE self) {
163
+ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
160
164
  Backend_t *backend;
161
165
  GetBackend(self, backend);
162
166
 
163
- return backend->base.pending_count;
167
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
168
+ backend->base.currently_polling = 1;
169
+ ev_run(backend->ev_loop, blocking == Qtrue ? EVRUN_ONCE : EVRUN_NOWAIT);
170
+ backend->base.currently_polling = 0;
171
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
172
+
173
+ return self;
164
174
  }
165
175
 
166
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
176
+ inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
167
177
  Backend_t *backend;
168
178
  GetBackend(self, backend);
169
179
 
170
- COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
171
- backend->base.currently_polling = 1;
172
- ev_run(backend->ev_loop, nowait == Qtrue ? EVRUN_NOWAIT : EVRUN_ONCE);
173
- backend->base.currently_polling = 0;
174
- COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
180
+ backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
181
+ }
175
182
 
176
- return self;
183
+ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
184
+ Backend_t *backend;
185
+ GetBackend(self, backend);
186
+
187
+ runqueue_delete(&backend->base.runqueue, fiber);
188
+ }
189
+
190
+ inline VALUE Backend_switch_fiber(VALUE self) {
191
+ Backend_t *backend;
192
+ GetBackend(self, backend);
193
+
194
+ return backend_base_switch_fiber(self, &backend->base);
177
195
  }
178
196
 
179
197
  VALUE Backend_wakeup(VALUE self) {
@@ -193,6 +211,17 @@ VALUE Backend_wakeup(VALUE self) {
193
211
  return Qnil;
194
212
  }
195
213
 
214
+ inline struct backend_stats Backend_stats(VALUE self) {
215
+ Backend_t *backend;
216
+ GetBackend(self, backend);
217
+
218
+ return (struct backend_stats){
219
+ .scheduled_fibers = runqueue_len(&backend->base.runqueue),
220
+ .waiting_fibers = 0,
221
+ .pending_ops = backend->base.pending_count
222
+ };
223
+ }
224
+
196
225
  struct libev_io {
197
226
  struct ev_io io;
198
227
  VALUE fiber;
@@ -1293,10 +1322,10 @@ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1293
1322
  return self;
1294
1323
  }
1295
1324
 
1296
- VALUE Backend_idle_block_set(VALUE self, VALUE block) {
1325
+ VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
1297
1326
  Backend_t *backend;
1298
1327
  GetBackend(self, backend);
1299
- backend->base.idle_block = block;
1328
+ backend->base.idle_proc = block;
1300
1329
  return self;
1301
1330
  }
1302
1331
 
@@ -1443,6 +1472,21 @@ error:
1443
1472
  return RAISE_EXCEPTION(result);
1444
1473
  }
1445
1474
 
1475
+ VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
1476
+ Backend_t *backend;
1477
+ GetBackend(self, backend);
1478
+ backend_trace(&backend->base, argc, argv);
1479
+ return self;
1480
+ }
1481
+
1482
+ VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
1483
+ Backend_t *backend;
1484
+ GetBackend(self, backend);
1485
+
1486
+ backend->base.trace_proc = block;
1487
+ return self;
1488
+ }
1489
+
1446
1490
  void Init_Backend() {
1447
1491
  ev_set_allocator(xrealloc);
1448
1492
 
@@ -1452,13 +1496,15 @@ void Init_Backend() {
1452
1496
  rb_define_method(cBackend, "initialize", Backend_initialize, 0);
1453
1497
  rb_define_method(cBackend, "finalize", Backend_finalize, 0);
1454
1498
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1499
+ rb_define_method(cBackend, "trace", Backend_trace, -1);
1500
+ rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1455
1501
 
1456
- rb_define_method(cBackend, "poll", Backend_poll, 3);
1502
+ rb_define_method(cBackend, "poll", Backend_poll, 1);
1457
1503
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1458
1504
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1459
1505
  rb_define_method(cBackend, "chain", Backend_chain, -1);
1460
1506
  rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1461
- rb_define_method(cBackend, "idle_block=", Backend_idle_block_set, 1);
1507
+ rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
1462
1508
  rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
1463
1509
 
1464
1510
  rb_define_method(cBackend, "accept", Backend_accept, 2);