polyphony 0.59.1 → 0.60

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 3aba9d551e940893d6d3303d93d514b520b7b649a1db99b101bdbbcdec762501
4
- data.tar.gz: 601d33709dc92bb0c029d9dba5d555441a2b5a6721b807185a36d65cb5ef8170
3
+ metadata.gz: 515e9a5686bb0eedb02ad626e491b3e8acb350a765a468029e0b5357673f443c
4
+ data.tar.gz: 94fd7eaedd37c01f1ebc33ba78ffb00d3e8fc42f4a0266bf63ba8eedb83d008c
5
5
  SHA512:
6
- metadata.gz: 2a7e355b5f8857093082d75759942f5d3a63ab40cf3a0f46045829e2c223ae92dbcc370ceb2dd3f26bd04e0bb036eb03ab40ef65a011cd172dd6c122698541fa
7
- data.tar.gz: e8708853190d44c82f3088ba0baecf19f03110a11c2cf229f0015c800b23da01e1eb0d94af20cb2af7fd8dd7bdafde9bf218f484c57cda358c0da187dfeb4b6e
6
+ metadata.gz: a546bcf43f556dc7d6bbc3c04b9448141a539e91d2d893441a161eecf9246ff516a54cf94cae476ef9358470e5475c98577c807fd1cef1f712f342337d3c8cc8
7
+ data.tar.gz: e0bb07cc0028c3205f3c2d87a59699e35e3d4d0e797eff63258892475548086125ed40e63152c56253a1c596b680eacc7080bf08f7d152576c34dc57df0206ab
data/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## 0.60 2021-07-15
2
+
3
+
4
+ - Fix linux version detection (for kernel version > 5.9)
5
+ - Fix op ctx leak in io_uring backend (when polling for I/O readiness)
6
+ - Add support for appending to buffer in `Backend#read`, `Backend#recv` methods
7
+ - Improve anti-event starvation mechanism
8
+ - Redesign fiber monitoring mechanism
9
+ - Implement `Fiber#attach`
10
+ - Add optional maxlen argument to `IO#read_loop`, `Socket#recv_loop` (#60)
11
+ - Implement `Fiber#detach` (#52)
12
+
1
13
  ## 0.59.1 2021-06-28
2
14
 
3
15
  - Accept fiber tag in `Polyphony::Timer.new`
data/Gemfile.lock CHANGED
@@ -1,27 +1,25 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.59.1)
4
+ polyphony (0.59.2)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
8
8
  specs:
9
9
  ansi (1.5.0)
10
- ast (2.4.0)
10
+ ast (2.4.2)
11
11
  builder (3.2.4)
12
12
  coderay (1.1.3)
13
- docile (1.3.2)
14
- hiredis (0.6.3)
15
- http_parser.rb (0.6.0)
13
+ docile (1.4.0)
16
14
  httparty (0.17.1)
17
15
  mime-types (~> 3.0)
18
16
  multi_xml (>= 0.5.2)
19
- json (2.3.0)
17
+ json (2.5.1)
20
18
  localhost (1.1.8)
21
19
  method_source (1.0.0)
22
20
  mime-types (3.3.1)
23
21
  mime-types-data (~> 3.2015)
24
- mime-types-data (3.2020.0512)
22
+ mime-types-data (3.2021.0704)
25
23
  minitest (5.14.4)
26
24
  minitest-reporters (1.4.2)
27
25
  ansi
@@ -30,22 +28,18 @@ GEM
30
28
  ruby-progressbar
31
29
  msgpack (1.4.2)
32
30
  multi_xml (0.6.0)
33
- mysql2 (0.5.3)
34
- parallel (1.19.1)
35
- parser (2.7.0.2)
36
- ast (~> 2.4.0)
37
- pg (1.1.4)
31
+ parallel (1.20.1)
32
+ parser (3.0.2.0)
33
+ ast (~> 2.4.1)
38
34
  pry (0.13.1)
39
35
  coderay (~> 1.1)
40
36
  method_source (~> 1.0)
41
- rack (2.2.3)
42
37
  rainbow (3.0.0)
43
- rake (13.0.3)
38
+ rake (13.0.6)
44
39
  rake-compiler (1.1.1)
45
40
  rake
46
- redis (4.1.0)
47
- regexp_parser (1.7.1)
48
- rexml (3.2.4)
41
+ regexp_parser (2.1.1)
42
+ rexml (3.2.5)
49
43
  rubocop (0.85.1)
50
44
  parallel (~> 1.10)
51
45
  parser (>= 2.7.0.1)
@@ -55,37 +49,29 @@ GEM
55
49
  rubocop-ast (>= 0.0.3)
56
50
  ruby-progressbar (~> 1.7)
57
51
  unicode-display_width (>= 1.4.0, < 2.0)
58
- rubocop-ast (0.0.3)
59
- parser (>= 2.7.0.1)
60
- ruby-progressbar (1.10.1)
61
- sequel (5.34.0)
52
+ rubocop-ast (1.8.0)
53
+ parser (>= 3.0.1.1)
54
+ ruby-progressbar (1.11.0)
62
55
  simplecov (0.17.1)
63
56
  docile (~> 1.1)
64
57
  json (>= 1.8, < 3)
65
58
  simplecov-html (~> 0.10.0)
66
59
  simplecov-html (0.10.2)
67
- unicode-display_width (1.6.1)
60
+ unicode-display_width (1.7.0)
68
61
 
69
62
  PLATFORMS
70
63
  ruby
71
64
 
72
65
  DEPENDENCIES
73
- hiredis (= 0.6.3)
74
- http_parser.rb (~> 0.6.0)
75
66
  httparty (= 0.17.1)
76
67
  localhost (~> 1.1.4)
77
68
  minitest (= 5.14.4)
78
69
  minitest-reporters (= 1.4.2)
79
70
  msgpack (= 1.4.2)
80
- mysql2 (= 0.5.3)
81
- pg (= 1.1.4)
82
71
  polyphony!
83
72
  pry (= 0.13.1)
84
- rack (>= 2.0.8, < 2.3.0)
85
73
  rake-compiler (= 1.1.1)
86
- redis (= 4.1.0)
87
74
  rubocop (= 0.85.1)
88
- sequel (= 5.34.0)
89
75
  simplecov (= 0.17.1)
90
76
 
91
77
  BUNDLED WITH
@@ -0,0 +1,51 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+ require 'polyphony'
5
+
6
+ class Supervisor
7
+ def initialize(*fibers)
8
+ @fiber = spin { do_supervise }
9
+ @fiber.message_on_child_termination = true
10
+ fibers.each { |f| add(f) }
11
+ end
12
+
13
+ def await
14
+ @fiber.await
15
+ end
16
+
17
+ def spin(tag = nil, &block)
18
+ @fiber.spin(tag, &block)
19
+ end
20
+
21
+ def add(fiber)
22
+ fiber.attach(@fiber)
23
+ end
24
+
25
+ def do_supervise
26
+ loop do
27
+ msg = receive
28
+ # puts "Supervisor received #{msg.inspect}"
29
+ f, r = msg
30
+ puts "Fiber #{f.tag} terminated with #{r.inspect}, restarting..."
31
+ f.restart
32
+ end
33
+ end
34
+ end
35
+
36
+ def supervise(*fibers)
37
+ supervisor = Supervisor.new(*fibers)
38
+ supervisor.await
39
+ end
40
+
41
+ def start_worker(id)
42
+ spin_loop(:"worker#{id}") do
43
+ duration = rand(0.5..1.0)
44
+ puts "Worker #{id} sleeping for #{duration} seconds"
45
+ sleep duration
46
+ raise 'foo' if rand > 0.7
47
+ break if rand > 0.6
48
+ end
49
+ end
50
+
51
+ supervise(start_worker(1), start_worker(2))
@@ -25,6 +25,11 @@ inline void backend_base_mark(struct Backend_base *base) {
25
25
  runqueue_mark(&base->runqueue);
26
26
  }
27
27
 
28
+ inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
29
+ if (runqueue_should_poll_nonblocking(&base->runqueue) || next == current)
30
+ Backend_poll(backend, Qnil);
31
+ }
32
+
28
33
  VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
29
34
  VALUE current_fiber = rb_fiber_current();
30
35
  runqueue_entry next;
@@ -32,26 +37,22 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
32
37
  unsigned int backend_was_polled = 0;
33
38
  unsigned int idle_tasks_run_count = 0;
34
39
 
35
- if (SHOULD_TRACE(base) && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
36
- TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
40
+ COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
37
41
 
38
42
  while (1) {
39
43
  next = runqueue_shift(&base->runqueue);
40
44
  if (next.fiber != Qnil) {
41
45
  // Polling for I/O op completion is normally done when the run queue is
42
46
  // empty, but if the runqueue never empties, we'll never get to process
43
- // any event completions. In order to prevent this, an anti-starve
47
+ // any event completions. In order to prevent this, an anti-starvation
44
48
  // mechanism is employed, under the following conditions:
45
49
  // - a blocking poll was not yet performed
46
50
  // - there are pending blocking operations
47
- // - the runqueue has signalled that a non-blocking poll should be
48
- // performed
49
- // - the run queue length high watermark has reached its threshold (currently 128)
50
- // - the run queue switch counter has reached its threshold (currently 64)
51
- if (!backend_was_polled && pending_ops_count && runqueue_should_poll_nonblocking(&base->runqueue)) {
52
- // this prevents event starvation in case the run queue never empties
53
- Backend_poll(backend, Qnil);
54
- }
51
+ // - the runqueue shift count has reached a fixed threshold (currently 64), or
52
+ // - the next fiber is the same as the current fiber (a single fiber is snoozing)
53
+ if (!backend_was_polled && pending_ops_count)
54
+ conditional_nonblocking_poll(backend, base, current_fiber, next.fiber);
55
+
55
56
  break;
56
57
  }
57
58
 
@@ -82,7 +83,8 @@ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_bas
82
83
  if (rb_fiber_alive_p(fiber) != Qtrue) return;
83
84
  already_runnable = rb_ivar_get(fiber, ID_ivar_runnable) != Qnil;
84
85
 
85
- COND_TRACE(base, 3, SYM_fiber_schedule, fiber, value);
86
+ COND_TRACE(base, 4, SYM_fiber_schedule, fiber, value, prioritize ? Qtrue : Qfalse);
87
+
86
88
  (prioritize ? runqueue_unshift : runqueue_push)(&base->runqueue, fiber, value, already_runnable);
87
89
  if (!already_runnable) {
88
90
  rb_ivar_set(fiber, ID_ivar_runnable, Qtrue);
@@ -171,7 +173,7 @@ inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
171
173
  //////////////////////////////////////////////////////////////////////
172
174
  //////////////////////////////////////////////////////////////////////
173
175
 
174
- VALUE backend_await(struct Backend_base *backend) {
176
+ inline VALUE backend_await(struct Backend_base *backend) {
175
177
  VALUE ret;
176
178
  backend->pending_count++;
177
179
  ret = Thread_switch_fiber(rb_thread_current());
@@ -180,9 +182,10 @@ VALUE backend_await(struct Backend_base *backend) {
180
182
  return ret;
181
183
  }
182
184
 
183
- VALUE backend_snooze() {
185
+ inline VALUE backend_snooze() {
186
+ VALUE ret;
184
187
  Fiber_make_runnable(rb_fiber_current(), Qnil);
185
- VALUE ret = Thread_switch_fiber(rb_thread_current());
188
+ ret = Thread_switch_fiber(rb_thread_current());
186
189
  return ret;
187
190
  }
188
191
 
@@ -7,7 +7,6 @@
7
7
 
8
8
  struct backend_stats {
9
9
  int scheduled_fibers;
10
- int waiting_fibers;
11
10
  int pending_ops;
12
11
  };
13
12
 
@@ -197,6 +197,15 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
197
197
  }
198
198
 
199
199
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
200
+ // if (SHOULD_TRACE(&backend->base))
201
+ // printf(
202
+ // "io_uring_poll(blocking_mode: %d, pending: %d, taken: %d, available: %d, runqueue: %d\n",
203
+ // is_blocking,
204
+ // backend->base.pending_count,
205
+ // backend->store.taken_count,
206
+ // backend->store.available_count,
207
+ // backend->base.runqueue.entries.count
208
+ // );
200
209
  if (is_blocking) io_uring_backend_poll(backend);
201
210
  io_uring_backend_handle_ready_cqes(backend);
202
211
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
@@ -231,7 +240,6 @@ inline struct backend_stats Backend_stats(VALUE self) {
231
240
 
232
241
  return (struct backend_stats){
233
242
  .scheduled_fibers = runqueue_len(&backend->base.runqueue),
234
- .waiting_fibers = 0,
235
243
  .pending_ops = backend->base.pending_count
236
244
  };
237
245
  }
@@ -302,17 +310,25 @@ VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
302
310
  io_uring_prep_poll_add(sqe, fd, write ? POLLOUT : POLLIN);
303
311
 
304
312
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resumed_value);
313
+ context_store_release(&backend->store, ctx);
314
+
305
315
  RB_GC_GUARD(resumed_value);
306
316
  return resumed_value;
307
317
  }
308
318
 
309
- VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof) {
319
+ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof, VALUE pos) {
310
320
  Backend_t *backend;
311
321
  rb_io_t *fptr;
312
322
  long dynamic_len = length == Qnil;
313
323
  long buffer_size = dynamic_len ? 4096 : NUM2INT(length);
314
- int shrinkable = io_setstrbuf(&str, buffer_size);
315
- char *buf = RSTRING_PTR(str);
324
+ long buf_pos = NUM2INT(pos);
325
+ if (str != Qnil) {
326
+ int current_len = RSTRING_LEN(str);
327
+ if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
328
+ }
329
+ else buf_pos = 0;
330
+ int shrinkable = io_setstrbuf(&str, buf_pos + buffer_size);
331
+ char *buf = RSTRING_PTR(str) + buf_pos;
316
332
  long total = 0;
317
333
  int read_to_eof = RTEST(to_eof);
318
334
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
@@ -349,9 +365,9 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
349
365
  if (!dynamic_len) break;
350
366
 
351
367
  // resize buffer
352
- rb_str_resize(str, total);
368
+ rb_str_resize(str, buf_pos + total);
353
369
  rb_str_modify_expand(str, buffer_size);
354
- buf = RSTRING_PTR(str) + total;
370
+ buf = RSTRING_PTR(str) + buf_pos + total;
355
371
  shrinkable = 0;
356
372
  buffer_size += buffer_size;
357
373
  }
@@ -359,7 +375,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
359
375
  }
360
376
  }
361
377
 
362
- io_set_read_length(str, total, shrinkable);
378
+ io_set_read_length(str, buf_pos + total, shrinkable);
363
379
  io_enc_str(str, fptr);
364
380
 
365
381
  if (!total) return Qnil;
@@ -367,12 +383,12 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
367
383
  return str;
368
384
  }
369
385
 
370
- VALUE Backend_read_loop(VALUE self, VALUE io) {
386
+ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
371
387
  Backend_t *backend;
372
388
  rb_io_t *fptr;
373
389
  VALUE str;
374
390
  long total;
375
- long len = 8192;
391
+ long len = NUM2INT(maxlen);
376
392
  int shrinkable;
377
393
  char *buf;
378
394
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
@@ -580,13 +596,19 @@ VALUE Backend_write_m(int argc, VALUE *argv, VALUE self) {
580
596
  Backend_writev(self, argv[0], argc - 1, argv + 1);
581
597
  }
582
598
 
583
- VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
599
+ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
584
600
  Backend_t *backend;
585
601
  rb_io_t *fptr;
586
602
  long dynamic_len = length == Qnil;
587
603
  long len = dynamic_len ? 4096 : NUM2INT(length);
588
- int shrinkable = io_setstrbuf(&str, len);
589
- char *buf = RSTRING_PTR(str);
604
+ long buf_pos = NUM2INT(pos);
605
+ if (str != Qnil) {
606
+ int current_len = RSTRING_LEN(str);
607
+ if (buf_pos < 0 || buf_pos > current_len) buf_pos = current_len;
608
+ }
609
+ else buf_pos = 0;
610
+ int shrinkable = io_setstrbuf(&str, buf_pos + len);
611
+ char *buf = RSTRING_PTR(str) + buf_pos;
590
612
  long total = 0;
591
613
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
592
614
 
@@ -618,7 +640,7 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
618
640
  }
619
641
  }
620
642
 
621
- io_set_read_length(str, total, shrinkable);
643
+ io_set_read_length(str, buf_pos + total, shrinkable);
622
644
  io_enc_str(str, fptr);
623
645
 
624
646
  if (!total) return Qnil;
@@ -626,12 +648,12 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
626
648
  return str;
627
649
  }
628
650
 
629
- VALUE Backend_recv_loop(VALUE self, VALUE io) {
651
+ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
630
652
  Backend_t *backend;
631
653
  rb_io_t *fptr;
632
654
  VALUE str;
633
655
  long total;
634
- long len = 8192;
656
+ long len = NUM2INT(maxlen);
635
657
  int shrinkable;
636
658
  char *buf;
637
659
  VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
@@ -1421,11 +1443,11 @@ void Init_Backend() {
1421
1443
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
1422
1444
  rb_define_method(cBackend, "connect", Backend_connect, 3);
1423
1445
  rb_define_method(cBackend, "feed_loop", Backend_feed_loop, 3);
1424
- rb_define_method(cBackend, "read", Backend_read, 4);
1425
- rb_define_method(cBackend, "read_loop", Backend_read_loop, 1);
1426
- rb_define_method(cBackend, "recv", Backend_recv, 3);
1446
+ rb_define_method(cBackend, "read", Backend_read, 5);
1447
+ rb_define_method(cBackend, "read_loop", Backend_read_loop, 2);
1448
+ rb_define_method(cBackend, "recv", Backend_recv, 4);
1427
1449
  rb_define_method(cBackend, "recv_feed_loop", Backend_recv_feed_loop, 3);
1428
- rb_define_method(cBackend, "recv_loop", Backend_recv_loop, 1);
1450
+ rb_define_method(cBackend, "recv_loop", Backend_recv_loop, 2);
1429
1451
  rb_define_method(cBackend, "send", Backend_send, 3);
1430
1452
  rb_define_method(cBackend, "sendv", Backend_sendv, 3);
1431
1453
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
@@ -25,6 +25,8 @@ void context_store_initialize(op_context_store_t *store) {
25
25
  store->last_id = 0;
26
26
  store->available = NULL;
27
27
  store->taken = NULL;
28
+ store->available_count = 0;
29
+ store->taken_count = 0;
28
30
  }
29
31
 
30
32
  inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_type type) {
@@ -32,12 +34,12 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
32
34
  if (ctx) {
33
35
  if (ctx->next) ctx->next->prev = NULL;
34
36
  store->available = ctx->next;
37
+ store->available_count--;
35
38
  }
36
39
  else {
37
40
  ctx = malloc(sizeof(op_context_t));
38
41
  }
39
42
  ctx->id = (++store->last_id);
40
- // printf("acquire %p %d (%s)\n", ctx, ctx->id, op_type_to_str(type));
41
43
  ctx->prev = NULL;
42
44
  ctx->next = store->taken;
43
45
  if (store->taken) store->taken->prev = ctx;
@@ -49,6 +51,10 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
49
51
  ctx->ref_count = 2;
50
52
  ctx->result = 0;
51
53
 
54
+ store->taken_count++;
55
+
56
+ // printf("acquire %p %d (%s, ref_count: %d) taken: %d\n", ctx, ctx->id, op_type_to_str(type), ctx->ref_count, store->taken_count);
57
+
52
58
  return ctx;
53
59
  }
54
60
 
@@ -61,6 +67,9 @@ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
61
67
  ctx->ref_count--;
62
68
  if (ctx->ref_count) return 0;
63
69
 
70
+ store->taken_count--;
71
+ store->available_count++;
72
+
64
73
  if (ctx->next) ctx->next->prev = ctx->prev;
65
74
  if (ctx->prev) ctx->prev->next = ctx->next;
66
75
  if (store->taken == ctx) store->taken = ctx->next;