polyphony 0.60 → 0.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 515e9a5686bb0eedb02ad626e491b3e8acb350a765a468029e0b5357673f443c
4
- data.tar.gz: 94fd7eaedd37c01f1ebc33ba78ffb00d3e8fc42f4a0266bf63ba8eedb83d008c
3
+ metadata.gz: b7056ec954b2264a15f88934d2b1e65f53c76c301453851ceadf60715570e474
4
+ data.tar.gz: 1cd5835b7a5a3d4036e1eabf45ded2e09efdc7cacb207fc0a1a60ac67da02d90
5
5
  SHA512:
6
- metadata.gz: a546bcf43f556dc7d6bbc3c04b9448141a539e91d2d893441a161eecf9246ff516a54cf94cae476ef9358470e5475c98577c807fd1cef1f712f342337d3c8cc8
7
- data.tar.gz: e0bb07cc0028c3205f3c2d87a59699e35e3d4d0e797eff63258892475548086125ed40e63152c56253a1c596b680eacc7080bf08f7d152576c34dc57df0206ab
6
+ metadata.gz: b5e32edc6bc22ba580e7c6d7d42a73982803080e9a9782326cb7b8796aa62ad6b75bccb1e6607879f0d3e5a5c4db8e5597d8407b76014f08f3945d1d8ad0097a
7
+ data.tar.gz: b8fe0f9419061295f830ed12ae9c8ea089a966686388cff580ddb6e1f7cd8c17a04928f9af41083019a5a11e8cccce25af404959db30a730a2ade2359a7e07ef
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ ## 0.61 2021-07-20
2
+
3
+ - Add more statistics, move stats to `Backend#stats`
4
+
1
5
  ## 0.60 2021-07-15
2
6
 
3
7
 
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.59.2)
4
+ polyphony (0.61)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
@@ -3,16 +3,25 @@
3
3
  require 'bundler/setup'
4
4
  require 'polyphony'
5
5
 
6
+ spin_loop(interval: 5) { p Thread.backend.stats }
7
+
6
8
  server = TCPServer.open('127.0.0.1', 1234)
7
9
  puts "Pid: #{Process.pid}"
8
10
  puts 'Echoing on port 1234...'
9
- while (client = server.accept)
10
- spin do
11
- while (data = client.gets)
12
- # client.send("you said: #{data.chomp}!\n", 0)
13
- client.write('you said: ', data.chomp, "!\n")
11
+ begin
12
+ while (client = server.accept)
13
+ spin do
14
+ while (data = client.gets)
15
+ # client.send("you said: #{data.chomp}!\n", 0)
16
+ client.write('you said: ', data.chomp, "!\n")
17
+ end
18
+ rescue Errno::ECONNRESET
19
+ 'Connection reset...'
20
+ ensure
21
+ client.shutdown
22
+ client.close
14
23
  end
15
- rescue Errno::ECONNRESET
16
- 'Connection reset...'
17
24
  end
25
+ ensure
26
+ server.close
18
27
  end
@@ -8,6 +8,9 @@
8
8
  inline void backend_base_initialize(struct Backend_base *base) {
9
9
  runqueue_initialize(&base->runqueue);
10
10
  base->currently_polling = 0;
11
+ base->op_count = 0;
12
+ base->switch_count = 0;
13
+ base->poll_count = 0;
11
14
  base->pending_count = 0;
12
15
  base->idle_gc_period = 0;
13
16
  base->idle_gc_last_time = 0;
@@ -25,8 +28,10 @@ inline void backend_base_mark(struct Backend_base *base) {
25
28
  runqueue_mark(&base->runqueue);
26
29
  }
27
30
 
31
+ const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
32
+
28
33
  inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
29
- if (runqueue_should_poll_nonblocking(&base->runqueue) || next == current)
34
+ if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
30
35
  Backend_poll(backend, Qnil);
31
36
  }
32
37
 
@@ -36,7 +41,8 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
36
41
  unsigned int pending_ops_count = base->pending_count;
37
42
  unsigned int backend_was_polled = 0;
38
43
  unsigned int idle_tasks_run_count = 0;
39
-
44
+
45
+ base->switch_count++;
40
46
  COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
41
47
 
42
48
  while (1) {
@@ -106,6 +112,22 @@ inline void backend_trace(struct Backend_base *base, int argc, VALUE *argv) {
106
112
  rb_funcallv(base->trace_proc, ID_call, argc, argv);
107
113
  }
108
114
 
115
+ inline struct backend_stats backend_base_stats(struct Backend_base *base) {
116
+ struct backend_stats stats = {
117
+ .runqueue_length = runqueue_len(&base->runqueue),
118
+ .runqueue_max_length = runqueue_max_len(&base->runqueue),
119
+ .op_count = base->op_count,
120
+ .switch_count = base->switch_count,
121
+ .poll_count = base->poll_count,
122
+ .pending_ops = base->pending_count
123
+ };
124
+
125
+ base->op_count = 0;
126
+ base->switch_count = 0;
127
+ base->poll_count = 0;
128
+ return stats;
129
+ }
130
+
109
131
  #ifdef POLYPHONY_USE_PIDFD_OPEN
110
132
  #ifndef __NR_pidfd_open
111
133
  #define __NR_pidfd_open 434 /* System call # on most architectures */
@@ -289,3 +311,40 @@ inline void backend_run_idle_tasks(struct Backend_base *base) {
289
311
  rb_gc_start();
290
312
  rb_gc_disable();
291
313
  }
314
+
315
+ VALUE SYM_runqueue_length;
316
+ VALUE SYM_runqueue_max_length;
317
+ VALUE SYM_op_count;
318
+ VALUE SYM_switch_count;
319
+ VALUE SYM_poll_count;
320
+ VALUE SYM_pending_ops;
321
+
322
+ VALUE Backend_stats(VALUE self) {
323
+ struct backend_stats backend_stats = backend_get_stats(self);
324
+
325
+ VALUE stats = rb_hash_new();
326
+ rb_hash_aset(stats, SYM_runqueue_length, INT2NUM(backend_stats.runqueue_length));
327
+ rb_hash_aset(stats, SYM_runqueue_max_length, INT2NUM(backend_stats.runqueue_max_length));
328
+ rb_hash_aset(stats, SYM_op_count, INT2NUM(backend_stats.op_count));
329
+ rb_hash_aset(stats, SYM_switch_count, INT2NUM(backend_stats.switch_count));
330
+ rb_hash_aset(stats, SYM_poll_count, INT2NUM(backend_stats.poll_count));
331
+ rb_hash_aset(stats, SYM_pending_ops, INT2NUM(backend_stats.pending_ops));
332
+ RB_GC_GUARD(stats);
333
+ return stats;
334
+ }
335
+
336
+ void backend_setup_stats_symbols() {
337
+ SYM_runqueue_length = ID2SYM(rb_intern("runqueue_length"));
338
+ SYM_runqueue_max_length = ID2SYM(rb_intern("runqueue_max_length"));
339
+ SYM_op_count = ID2SYM(rb_intern("op_count"));
340
+ SYM_switch_count = ID2SYM(rb_intern("switch_count"));
341
+ SYM_poll_count = ID2SYM(rb_intern("poll_count"));
342
+ SYM_pending_ops = ID2SYM(rb_intern("pending_ops"));
343
+
344
+ rb_global_variable(&SYM_runqueue_length);
345
+ rb_global_variable(&SYM_runqueue_max_length);
346
+ rb_global_variable(&SYM_op_count);
347
+ rb_global_variable(&SYM_switch_count);
348
+ rb_global_variable(&SYM_poll_count);
349
+ rb_global_variable(&SYM_pending_ops);
350
+ }
@@ -6,13 +6,20 @@
6
6
  #include "runqueue.h"
7
7
 
8
8
  struct backend_stats {
9
- int scheduled_fibers;
10
- int pending_ops;
9
+ unsigned int runqueue_length;
10
+ unsigned int runqueue_max_length;
11
+ unsigned int op_count;
12
+ unsigned int switch_count;
13
+ unsigned int poll_count;
14
+ unsigned int pending_ops;
11
15
  };
12
16
 
13
17
  struct Backend_base {
14
18
  runqueue_t runqueue;
15
19
  unsigned int currently_polling;
20
+ unsigned int op_count;
21
+ unsigned int switch_count;
22
+ unsigned int poll_count;
16
23
  unsigned int pending_count;
17
24
  double idle_gc_period;
18
25
  double idle_gc_last_time;
@@ -26,6 +33,7 @@ void backend_base_mark(struct Backend_base *base);
26
33
  VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
27
34
  void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
28
35
  void backend_trace(struct Backend_base *base, int argc, VALUE *argv);
36
+ struct backend_stats backend_base_stats(struct Backend_base *base);
29
37
 
30
38
  // tracing
31
39
  #define SHOULD_TRACE(base) ((base)->trace_proc != Qnil)
@@ -59,6 +67,7 @@ VALUE io_enc_str(VALUE str, rb_io_t *fptr);
59
67
  //////////////////////////////////////////////////////////////////////
60
68
  //////////////////////////////////////////////////////////////////////
61
69
 
70
+ struct backend_stats backend_get_stats(VALUE self);
62
71
  VALUE backend_await(struct Backend_base *backend);
63
72
  VALUE backend_snooze();
64
73
 
@@ -91,7 +100,10 @@ VALUE backend_timeout_exception(VALUE exception);
91
100
  VALUE Backend_timeout_ensure_safe(VALUE arg);
92
101
  VALUE Backend_timeout_ensure_safe(VALUE arg);
93
102
  VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
103
+ VALUE Backend_stats(VALUE self);
94
104
  void backend_run_idle_tasks(struct Backend_base *base);
95
105
  void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
96
106
 
107
+ void backend_setup_stats_symbols();
108
+
97
109
  #endif /* BACKEND_COMMON_H */
@@ -191,6 +191,8 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
191
191
  Backend_t *backend;
192
192
  GetBackend(self, backend);
193
193
 
194
+ backend->base.poll_count++;
195
+
194
196
  if (!is_blocking && backend->pending_sqes) {
195
197
  backend->pending_sqes = 0;
196
198
  io_uring_submit(&backend->ring);
@@ -234,14 +236,11 @@ inline VALUE Backend_switch_fiber(VALUE self) {
234
236
  return backend_base_switch_fiber(self, &backend->base);
235
237
  }
236
238
 
237
- inline struct backend_stats Backend_stats(VALUE self) {
239
+ inline struct backend_stats backend_get_stats(VALUE self) {
238
240
  Backend_t *backend;
239
241
  GetBackend(self, backend);
240
242
 
241
- return (struct backend_stats){
242
- .scheduled_fibers = runqueue_len(&backend->base.runqueue),
243
- .pending_ops = backend->base.pending_count
244
- };
243
+ return backend_base_stats(&backend->base);
245
244
  }
246
245
 
247
246
  VALUE Backend_wakeup(VALUE self) {
@@ -279,6 +278,7 @@ int io_uring_backend_defer_submit_and_await(
279
278
  {
280
279
  VALUE switchpoint_result = Qnil;
281
280
 
281
+ backend->base.op_count++;
282
282
  if (sqe) {
283
283
  io_uring_sqe_set_data(sqe, ctx);
284
284
  io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
@@ -1043,6 +1043,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1043
1043
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1044
1044
  io_uring_sqe_set_data(sqe, ctx);
1045
1045
  io_uring_backend_defer_submit(backend);
1046
+ backend->base.op_count++;
1046
1047
 
1047
1048
  struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
1048
1049
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
@@ -1210,6 +1211,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1210
1211
  sqe_count++;
1211
1212
  }
1212
1213
 
1214
+ backend->base.op_count += sqe_count;
1213
1215
  ctx->ref_count = sqe_count + 1;
1214
1216
  io_uring_backend_defer_submit(backend);
1215
1217
  resume_value = backend_await((struct Backend_base *)backend);
@@ -1346,6 +1348,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1346
1348
  if (prefix != Qnil) {
1347
1349
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1348
1350
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
1351
+ backend->base.op_count++;
1349
1352
  }
1350
1353
 
1351
1354
  while (1) {
@@ -1355,7 +1358,8 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1355
1358
 
1356
1359
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1357
1360
  splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
1358
-
1361
+ backend->base.op_count++;
1362
+
1359
1363
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1360
1364
  if (chunk_len == 0) break;
1361
1365
 
@@ -1367,15 +1371,18 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1367
1371
  chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
1368
1372
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1369
1373
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
1374
+ backend->base.op_count++;
1370
1375
  }
1371
1376
 
1372
1377
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1373
1378
  splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
1379
+ backend->base.op_count++;
1374
1380
 
1375
1381
  if (chunk_postfix != Qnil) {
1376
1382
  chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
1377
1383
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1378
1384
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
1385
+ backend->base.op_count++;
1379
1386
  }
1380
1387
 
1381
1388
  RB_GC_GUARD(chunk_prefix_str);
@@ -1385,6 +1392,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1385
1392
  if (postfix != Qnil) {
1386
1393
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1387
1394
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
1395
+ backend->base.op_count++;
1388
1396
  }
1389
1397
  if (ctx) {
1390
1398
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
@@ -1430,6 +1438,7 @@ void Init_Backend() {
1430
1438
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1431
1439
  rb_define_method(cBackend, "trace", Backend_trace, -1);
1432
1440
  rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1441
+ rb_define_method(cBackend, "stats", Backend_stats, 0);
1433
1442
 
1434
1443
  rb_define_method(cBackend, "poll", Backend_poll, 1);
1435
1444
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
@@ -1469,6 +1478,8 @@ void Init_Backend() {
1469
1478
  SYM_send = ID2SYM(rb_intern("send"));
1470
1479
  SYM_splice = ID2SYM(rb_intern("splice"));
1471
1480
  SYM_write = ID2SYM(rb_intern("write"));
1481
+
1482
+ backend_setup_stats_symbols();
1472
1483
  }
1473
1484
 
1474
1485
  #endif // POLYPHONY_BACKEND_LIBURING
@@ -164,6 +164,8 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
164
164
  Backend_t *backend;
165
165
  GetBackend(self, backend);
166
166
 
167
+ backend->base.poll_count++;
168
+
167
169
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
168
170
  backend->base.currently_polling = 1;
169
171
  ev_run(backend->ev_loop, blocking == Qtrue ? EVRUN_ONCE : EVRUN_NOWAIT);
@@ -211,14 +213,11 @@ VALUE Backend_wakeup(VALUE self) {
211
213
  return Qnil;
212
214
  }
213
215
 
214
- inline struct backend_stats Backend_stats(VALUE self) {
216
+ inline struct backend_stats backend_get_stats(VALUE self) {
215
217
  Backend_t *backend;
216
218
  GetBackend(self, backend);
217
219
 
218
- return (struct backend_stats){
219
- .scheduled_fibers = runqueue_len(&backend->base.runqueue),
220
- .pending_ops = backend->base.pending_count
221
- };
220
+ return backend_base_stats(&backend->base);
222
221
  }
223
222
 
224
223
  struct libev_io {
@@ -289,6 +288,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
289
288
  OBJ_TAINT(str);
290
289
 
291
290
  while (1) {
291
+ backend->base.op_count++;
292
292
  ssize_t n = read(fptr->fd, buf, len - total);
293
293
  if (n < 0) {
294
294
  int e = errno;
@@ -359,6 +359,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
359
359
  watcher.fiber = Qnil;
360
360
 
361
361
  while (1) {
362
+ backend->base.op_count++;
362
363
  ssize_t n = read(fptr->fd, buf, len);
363
364
  if (n < 0) {
364
365
  int e = errno;
@@ -411,6 +412,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
411
412
  watcher.fiber = Qnil;
412
413
 
413
414
  while (1) {
415
+ backend->base.op_count++;
414
416
  ssize_t n = read(fptr->fd, buf, len);
415
417
  if (n < 0) {
416
418
  int e = errno;
@@ -458,6 +460,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
458
460
  watcher.fiber = Qnil;
459
461
 
460
462
  while (left > 0) {
463
+ backend->base.op_count++;
461
464
  ssize_t n = write(fptr->fd, buf, left);
462
465
  if (n < 0) {
463
466
  int e = errno;
@@ -517,6 +520,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
517
520
  iov_ptr = iov;
518
521
 
519
522
  while (1) {
523
+ backend->base.op_count++;
520
524
  ssize_t n = writev(fptr->fd, iov_ptr, iov_count);
521
525
  if (n < 0) {
522
526
  int e = errno;
@@ -588,6 +592,7 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
588
592
  io_verify_blocking_mode(fptr, server_socket, Qfalse);
589
593
  watcher.fiber = Qnil;
590
594
  while (1) {
595
+ backend->base.op_count++;
591
596
  fd = accept(fptr->fd, &addr, &len);
592
597
  if (fd < 0) {
593
598
  int e = errno;
@@ -646,6 +651,7 @@ VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
646
651
  watcher.fiber = Qnil;
647
652
 
648
653
  while (1) {
654
+ backend->base.op_count++;
649
655
  fd = accept(fptr->fd, &addr, &len);
650
656
  if (fd < 0) {
651
657
  int e = errno;
@@ -705,6 +711,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
705
711
  addr.sin_addr.s_addr = inet_addr(host_buf);
706
712
  addr.sin_port = htons(NUM2INT(port));
707
713
 
714
+ backend->base.op_count++;
708
715
  int result = connect(fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
709
716
  if (result < 0) {
710
717
  int e = errno;
@@ -745,6 +752,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
745
752
  watcher.fiber = Qnil;
746
753
 
747
754
  while (left > 0) {
755
+ backend->base.op_count++;
748
756
  ssize_t n = send(fptr->fd, buf, left, flags_int);
749
757
  if (n < 0) {
750
758
  int e = errno;
@@ -852,6 +860,7 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
852
860
 
853
861
  watcher.ctx.fiber = Qnil;
854
862
  while (1) {
863
+ backend->base.op_count++;
855
864
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
856
865
  if (len < 0) {
857
866
  int e = errno;
@@ -903,6 +912,7 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
903
912
 
904
913
  watcher.ctx.fiber = Qnil;
905
914
  while (1) {
915
+ backend->base.op_count++;
906
916
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
907
917
  if (len < 0) {
908
918
  int e = errno;
@@ -962,6 +972,7 @@ VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
962
972
  watcher.fiber = Qnil;
963
973
 
964
974
  while (1) {
975
+ backend->base.op_count++;
965
976
  ssize_t n = read(src_fptr->fd, buf, len);
966
977
  if (n < 0) {
967
978
  int e = errno;
@@ -977,6 +988,7 @@ VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
977
988
  }
978
989
 
979
990
  while (left > 0) {
991
+ backend->base.op_count++;
980
992
  ssize_t n = write(dest_fptr->fd, buf, left);
981
993
  if (n < 0) {
982
994
  int e = errno;
@@ -1037,6 +1049,7 @@ VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen
1037
1049
  while (1) {
1038
1050
  char *ptr = buf;
1039
1051
  while (1) {
1052
+ backend->base.op_count++;
1040
1053
  ssize_t n = read(src_fptr->fd, ptr, len);
1041
1054
  if (n < 0) {
1042
1055
  int e = errno;
@@ -1054,6 +1067,7 @@ VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen
1054
1067
  }
1055
1068
 
1056
1069
  while (left > 0) {
1070
+ backend->base.op_count++;
1057
1071
  ssize_t n = write(dest_fptr->fd, ptr, left);
1058
1072
  if (n < 0) {
1059
1073
  int e = errno;
@@ -1093,6 +1107,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1093
1107
  GetBackend(self, backend);
1094
1108
  GetOpenFile(io, fptr);
1095
1109
 
1110
+ backend->base.op_count++;
1096
1111
  return libev_wait_fd(backend, fptr->fd, events, 1);
1097
1112
  }
1098
1113
 
@@ -1116,6 +1131,7 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1116
1131
  watcher.fiber = rb_fiber_current();
1117
1132
  ev_timer_init(&watcher.timer, Backend_timer_callback, NUM2DBL(duration), 0.);
1118
1133
  ev_timer_start(backend->ev_loop, &watcher.timer);
1134
+ backend->base.op_count++;
1119
1135
 
1120
1136
  switchpoint_result = backend_await((struct Backend_base *)backend);
1121
1137
 
@@ -1145,6 +1161,7 @@ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1145
1161
  VALUE switchpoint_result = Qnil;
1146
1162
  ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1147
1163
  ev_timer_start(backend->ev_loop, &watcher.timer);
1164
+ backend->base.op_count++;
1148
1165
  switchpoint_result = backend_await((struct Backend_base *)backend);
1149
1166
  ev_timer_stop(backend->ev_loop, &watcher.timer);
1150
1167
  RAISE_IF_EXCEPTION(switchpoint_result);
@@ -1196,6 +1213,7 @@ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
1196
1213
  watcher.resume_value = timeout;
1197
1214
  ev_timer_init(&watcher.timer, Backend_timeout_callback, NUM2DBL(duration), 0.);
1198
1215
  ev_timer_start(backend->ev_loop, &watcher.timer);
1216
+ backend->base.op_count++;
1199
1217
 
1200
1218
  struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
1201
1219
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
@@ -1218,6 +1236,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1218
1236
  if (fd >= 0) {
1219
1237
  Backend_t *backend;
1220
1238
  GetBackend(self, backend);
1239
+ backend->base.op_count++;
1221
1240
 
1222
1241
  VALUE resume_value = libev_wait_fd(backend, fd, EV_READ, 0);
1223
1242
  close(fd);
@@ -1261,6 +1280,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1261
1280
  watcher.fiber = rb_fiber_current();
1262
1281
  ev_child_init(&watcher.child, Backend_child_callback, NUM2INT(pid), 0);
1263
1282
  ev_child_start(backend->ev_loop, &watcher.child);
1283
+ backend->base.op_count++;
1264
1284
 
1265
1285
  switchpoint_result = backend_await((struct Backend_base *)backend);
1266
1286
 
@@ -1283,6 +1303,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1283
1303
 
1284
1304
  ev_async_init(&async, Backend_async_callback);
1285
1305
  ev_async_start(backend->ev_loop, &async);
1306
+ backend->base.op_count++;
1286
1307
 
1287
1308
  switchpoint_result = backend_await((struct Backend_base *)backend);
1288
1309
 
@@ -1346,6 +1367,7 @@ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct lib
1346
1367
  int len = RSTRING_LEN(str);
1347
1368
  int left = len;
1348
1369
  while (left > 0) {
1370
+ backend->base.op_count++;
1349
1371
  ssize_t n = write(fd, buf, left);
1350
1372
  if (n < 0) {
1351
1373
  int err = errno;
@@ -1406,6 +1428,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1406
1428
  int chunk_len;
1407
1429
  // splice to pipe
1408
1430
  while (1) {
1431
+ backend->base.op_count++;
1409
1432
  chunk_len = splice(src_fptr->fd, 0, pipefd[1], 0, maxlen, 0);
1410
1433
  if (chunk_len < 0) {
1411
1434
  err = errno;
@@ -1431,6 +1454,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1431
1454
 
1432
1455
  int left = chunk_len;
1433
1456
  while (1) {
1457
+ backend->base.op_count++;
1434
1458
  int n = splice(pipefd[0], 0, dest_fptr->fd, 0, left, 0);
1435
1459
  if (n < 0) {
1436
1460
  err = errno;
@@ -1503,6 +1527,7 @@ void Init_Backend() {
1503
1527
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1504
1528
  rb_define_method(cBackend, "trace", Backend_trace, -1);
1505
1529
  rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1530
+ rb_define_method(cBackend, "stats", Backend_stats, 0);
1506
1531
 
1507
1532
  rb_define_method(cBackend, "poll", Backend_poll, 1);
1508
1533
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
@@ -1545,6 +1570,8 @@ void Init_Backend() {
1545
1570
  SYM_send = ID2SYM(rb_intern("send"));
1546
1571
  SYM_splice = ID2SYM(rb_intern("splice"));
1547
1572
  SYM_write = ID2SYM(rb_intern("write"));
1573
+
1574
+ backend_setup_stats_symbols();
1548
1575
  }
1549
1576
 
1550
1577
  #endif // POLYPHONY_BACKEND_LIBEV
@@ -114,7 +114,6 @@ VALUE Backend_wakeup(VALUE self);
114
114
  VALUE Backend_run_idle_tasks(VALUE self);
115
115
  VALUE Backend_switch_fiber(VALUE self);
116
116
  void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
117
- struct backend_stats Backend_stats(VALUE self);
118
117
  void Backend_unschedule_fiber(VALUE self, VALUE fiber);
119
118
 
120
119
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
@@ -4,7 +4,6 @@
4
4
  inline void runqueue_initialize(runqueue_t *runqueue) {
5
5
  runqueue_ring_buffer_init(&runqueue->entries);
6
6
  runqueue->high_watermark = 0;
7
- runqueue->switch_count = 0;
8
7
  }
9
8
 
10
9
  inline void runqueue_finalize(runqueue_t *runqueue) {
@@ -30,12 +29,7 @@ inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int
30
29
  }
31
30
 
32
31
  inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
33
- runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
34
- if (entry.fiber == Qnil)
35
- runqueue->high_watermark = 0;
36
- else
37
- runqueue->switch_count += 1;
38
- return entry;
32
+ return runqueue_ring_buffer_shift(&runqueue->entries);
39
33
  }
40
34
 
41
35
  inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
@@ -54,15 +48,12 @@ inline long runqueue_len(runqueue_t *runqueue) {
54
48
  return runqueue->entries.count;
55
49
  }
56
50
 
57
- inline int runqueue_empty_p(runqueue_t *runqueue) {
58
- return (runqueue->entries.count == 0);
51
+ inline long runqueue_max_len(runqueue_t *runqueue) {
52
+ unsigned int max_len = runqueue->high_watermark;
53
+ runqueue->high_watermark = 0;
54
+ return max_len;
59
55
  }
60
56
 
61
- static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
62
-
63
- inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
64
- if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
65
-
66
- runqueue->switch_count = 0;
67
- return 1;
57
+ inline int runqueue_empty_p(runqueue_t *runqueue) {
58
+ return (runqueue->entries.count == 0);
68
59
  }
@@ -7,7 +7,6 @@
7
7
  typedef struct runqueue {
8
8
  runqueue_ring_buffer entries;
9
9
  unsigned int high_watermark;
10
- unsigned int switch_count;
11
10
  } runqueue_t;
12
11
 
13
12
  void runqueue_initialize(runqueue_t *runqueue);
@@ -21,7 +20,7 @@ void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
21
20
  int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
22
21
  void runqueue_clear(runqueue_t *runqueue);
23
22
  long runqueue_len(runqueue_t *runqueue);
23
+ long runqueue_max_len(runqueue_t *runqueue);
24
24
  int runqueue_empty_p(runqueue_t *runqueue);
25
- int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
26
25
 
27
26
  #endif /* RUNQUEUE_H */
@@ -13,18 +13,6 @@ static VALUE Thread_setup_fiber_scheduling(VALUE self) {
13
13
  return self;
14
14
  }
15
15
 
16
- static VALUE SYM_scheduled_fibers;
17
- static VALUE SYM_pending_watchers;
18
-
19
- static VALUE Thread_fiber_scheduling_stats(VALUE self) {
20
- struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
21
-
22
- VALUE stats = rb_hash_new();
23
- rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
24
- rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
25
- return stats;
26
- }
27
-
28
16
  inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
29
17
  Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
30
18
  }
@@ -72,7 +60,6 @@ VALUE Thread_class_backend(VALUE _self) {
72
60
 
73
61
  void Init_Thread() {
74
62
  rb_define_method(rb_cThread, "setup_fiber_scheduling", Thread_setup_fiber_scheduling, 0);
75
- rb_define_method(rb_cThread, "fiber_scheduling_stats", Thread_fiber_scheduling_stats, 0);
76
63
  rb_define_method(rb_cThread, "schedule_and_wakeup", Thread_fiber_schedule_and_wakeup, 2);
77
64
 
78
65
  rb_define_method(rb_cThread, "schedule_fiber", Thread_schedule_fiber, 2);
@@ -91,9 +78,4 @@ void Init_Thread() {
91
78
  ID_ivar_main_fiber = rb_intern("@main_fiber");
92
79
  ID_ivar_terminated = rb_intern("@terminated");
93
80
  ID_stop = rb_intern("stop");
94
-
95
- SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
96
- SYM_pending_watchers = ID2SYM(rb_intern("pending_watchers"));
97
- rb_global_variable(&SYM_scheduled_fibers);
98
- rb_global_variable(&SYM_pending_watchers);
99
81
  }
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Polyphony
4
- VERSION = '0.60'
4
+ VERSION = '0.61'
5
5
  end
data/test/test_backend.rb CHANGED
@@ -26,7 +26,7 @@ class BackendTest < MiniTest::Test
26
26
  @backend.sleep 0.01
27
27
  count += 1
28
28
  }.await
29
- assert_in_delta 0.03, Time.now - t0, 0.01
29
+ assert_in_range 0.02..0.04, Time.now - t0
30
30
  assert_equal 3, count
31
31
  end
32
32
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: polyphony
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.60'
4
+ version: '0.61'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-07-15 00:00:00.000000000 Z
11
+ date: 2021-07-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake-compiler
@@ -419,7 +419,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
419
419
  - !ruby/object:Gem::Version
420
420
  version: '0'
421
421
  requirements: []
422
- rubygems_version: 3.1.4
422
+ rubygems_version: 3.1.6
423
423
  signing_key:
424
424
  specification_version: 4
425
425
  summary: Fine grained concurrency for Ruby