polyphony 0.60 → 0.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 515e9a5686bb0eedb02ad626e491b3e8acb350a765a468029e0b5357673f443c
4
- data.tar.gz: 94fd7eaedd37c01f1ebc33ba78ffb00d3e8fc42f4a0266bf63ba8eedb83d008c
3
+ metadata.gz: 5e3938b66b15caff258b95c7f1421f71bc0bb4f7b94dd9db4a3ad0614fd35ded
4
+ data.tar.gz: 2198ea0483f959491372074007d5c32416de69ebcf73a8aca51832ea6036ea87
5
5
  SHA512:
6
- metadata.gz: a546bcf43f556dc7d6bbc3c04b9448141a539e91d2d893441a161eecf9246ff516a54cf94cae476ef9358470e5475c98577c807fd1cef1f712f342337d3c8cc8
7
- data.tar.gz: e0bb07cc0028c3205f3c2d87a59699e35e3d4d0e797eff63258892475548086125ed40e63152c56253a1c596b680eacc7080bf08f7d152576c34dc57df0206ab
6
+ metadata.gz: 318b3b2549a6d7d3c2bb1b8396adcf95109ef17f6235e25bcf1540fd3c5777d6f2666858d5f539cf17dd0c2102553eb853ba3b38b96d9476d2727a7ddfb0ed70
7
+ data.tar.gz: 025f5037c922531235bcf56076ca6986cb1d983b37eceda8e1585fa90e23f67c0abe17a81b0329851e80c050e9ccce8ba53f7a0d95565e6568546dbb923f9a25
data/CHANGELOG.md CHANGED
@@ -1,3 +1,20 @@
1
+ ## 0.64 2021-07-26
2
+
3
+ - Add optional raise_on_eof argument to `#readpartial`
4
+
5
+ ## 0.63 2021-07-26
6
+
7
+ - Add support for specifying buf and buf_pos in `IO#read`
8
+ - Fix `Socket#read` to work and conform to `IO#read` interface
9
+
10
+ ## 0.62 2021-07-21
11
+
12
+ - Add `runqueue_size` to backend stats
13
+
14
+ ## 0.61 2021-07-20
15
+
16
+ - Add more statistics, move stats to `Backend#stats`
17
+
1
18
  ## 0.60 2021-07-15
2
19
 
3
20
 
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.59.2)
4
+ polyphony (0.64)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
data/Rakefile CHANGED
@@ -9,8 +9,8 @@ Rake::ExtensionTask.new("polyphony_ext") do |ext|
9
9
  end
10
10
 
11
11
  task :recompile => [:clean, :compile]
12
-
13
12
  task :default => [:compile, :test]
13
+
14
14
  task :test do
15
15
  exec 'ruby test/run.rb'
16
16
  end
@@ -3,16 +3,25 @@
3
3
  require 'bundler/setup'
4
4
  require 'polyphony'
5
5
 
6
+ spin_loop(interval: 5) { p Thread.backend.stats }
7
+
6
8
  server = TCPServer.open('127.0.0.1', 1234)
7
9
  puts "Pid: #{Process.pid}"
8
10
  puts 'Echoing on port 1234...'
9
- while (client = server.accept)
10
- spin do
11
- while (data = client.gets)
12
- # client.send("you said: #{data.chomp}!\n", 0)
13
- client.write('you said: ', data.chomp, "!\n")
11
+ begin
12
+ while (client = server.accept)
13
+ spin do
14
+ while (data = client.gets)
15
+ # client.send("you said: #{data.chomp}!\n", 0)
16
+ client.write('you said: ', data.chomp, "!\n")
17
+ end
18
+ rescue Errno::ECONNRESET
19
+ 'Connection reset...'
20
+ ensure
21
+ client.shutdown
22
+ client.close
14
23
  end
15
- rescue Errno::ECONNRESET
16
- 'Connection reset...'
17
24
  end
25
+ ensure
26
+ server.close
18
27
  end
@@ -8,6 +8,9 @@
8
8
  inline void backend_base_initialize(struct Backend_base *base) {
9
9
  runqueue_initialize(&base->runqueue);
10
10
  base->currently_polling = 0;
11
+ base->op_count = 0;
12
+ base->switch_count = 0;
13
+ base->poll_count = 0;
11
14
  base->pending_count = 0;
12
15
  base->idle_gc_period = 0;
13
16
  base->idle_gc_last_time = 0;
@@ -25,8 +28,10 @@ inline void backend_base_mark(struct Backend_base *base) {
25
28
  runqueue_mark(&base->runqueue);
26
29
  }
27
30
 
31
+ const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
32
+
28
33
  inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
29
- if (runqueue_should_poll_nonblocking(&base->runqueue) || next == current)
34
+ if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
30
35
  Backend_poll(backend, Qnil);
31
36
  }
32
37
 
@@ -36,7 +41,8 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
36
41
  unsigned int pending_ops_count = base->pending_count;
37
42
  unsigned int backend_was_polled = 0;
38
43
  unsigned int idle_tasks_run_count = 0;
39
-
44
+
45
+ base->switch_count++;
40
46
  COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
41
47
 
42
48
  while (1) {
@@ -289,3 +295,61 @@ inline void backend_run_idle_tasks(struct Backend_base *base) {
289
295
  rb_gc_start();
290
296
  rb_gc_disable();
291
297
  }
298
+
299
+ inline struct backend_stats backend_base_stats(struct Backend_base *base) {
300
+ struct backend_stats stats = {
301
+ .runqueue_size = runqueue_size(&base->runqueue),
302
+ .runqueue_length = runqueue_len(&base->runqueue),
303
+ .runqueue_max_length = runqueue_max_len(&base->runqueue),
304
+ .op_count = base->op_count,
305
+ .switch_count = base->switch_count,
306
+ .poll_count = base->poll_count,
307
+ .pending_ops = base->pending_count
308
+ };
309
+
310
+ base->op_count = 0;
311
+ base->switch_count = 0;
312
+ base->poll_count = 0;
313
+ return stats;
314
+ }
315
+
316
+ VALUE SYM_runqueue_size;
317
+ VALUE SYM_runqueue_length;
318
+ VALUE SYM_runqueue_max_length;
319
+ VALUE SYM_op_count;
320
+ VALUE SYM_switch_count;
321
+ VALUE SYM_poll_count;
322
+ VALUE SYM_pending_ops;
323
+
324
+ VALUE Backend_stats(VALUE self) {
325
+ struct backend_stats backend_stats = backend_get_stats(self);
326
+
327
+ VALUE stats = rb_hash_new();
328
+ rb_hash_aset(stats, SYM_runqueue_size, INT2NUM(backend_stats.runqueue_size));
329
+ rb_hash_aset(stats, SYM_runqueue_length, INT2NUM(backend_stats.runqueue_length));
330
+ rb_hash_aset(stats, SYM_runqueue_max_length, INT2NUM(backend_stats.runqueue_max_length));
331
+ rb_hash_aset(stats, SYM_op_count, INT2NUM(backend_stats.op_count));
332
+ rb_hash_aset(stats, SYM_switch_count, INT2NUM(backend_stats.switch_count));
333
+ rb_hash_aset(stats, SYM_poll_count, INT2NUM(backend_stats.poll_count));
334
+ rb_hash_aset(stats, SYM_pending_ops, INT2NUM(backend_stats.pending_ops));
335
+ RB_GC_GUARD(stats);
336
+ return stats;
337
+ }
338
+
339
+ void backend_setup_stats_symbols() {
340
+ SYM_runqueue_size = ID2SYM(rb_intern("runqueue_size"));
341
+ SYM_runqueue_length = ID2SYM(rb_intern("runqueue_length"));
342
+ SYM_runqueue_max_length = ID2SYM(rb_intern("runqueue_max_length"));
343
+ SYM_op_count = ID2SYM(rb_intern("op_count"));
344
+ SYM_switch_count = ID2SYM(rb_intern("switch_count"));
345
+ SYM_poll_count = ID2SYM(rb_intern("poll_count"));
346
+ SYM_pending_ops = ID2SYM(rb_intern("pending_ops"));
347
+
348
+ rb_global_variable(&SYM_runqueue_size);
349
+ rb_global_variable(&SYM_runqueue_length);
350
+ rb_global_variable(&SYM_runqueue_max_length);
351
+ rb_global_variable(&SYM_op_count);
352
+ rb_global_variable(&SYM_switch_count);
353
+ rb_global_variable(&SYM_poll_count);
354
+ rb_global_variable(&SYM_pending_ops);
355
+ }
@@ -6,13 +6,21 @@
6
6
  #include "runqueue.h"
7
7
 
8
8
  struct backend_stats {
9
- int scheduled_fibers;
10
- int pending_ops;
9
+ unsigned int runqueue_size;
10
+ unsigned int runqueue_length;
11
+ unsigned int runqueue_max_length;
12
+ unsigned int op_count;
13
+ unsigned int switch_count;
14
+ unsigned int poll_count;
15
+ unsigned int pending_ops;
11
16
  };
12
17
 
13
18
  struct Backend_base {
14
19
  runqueue_t runqueue;
15
20
  unsigned int currently_polling;
21
+ unsigned int op_count;
22
+ unsigned int switch_count;
23
+ unsigned int poll_count;
16
24
  unsigned int pending_count;
17
25
  double idle_gc_period;
18
26
  double idle_gc_last_time;
@@ -26,6 +34,7 @@ void backend_base_mark(struct Backend_base *base);
26
34
  VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
27
35
  void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
28
36
  void backend_trace(struct Backend_base *base, int argc, VALUE *argv);
37
+ struct backend_stats backend_base_stats(struct Backend_base *base);
29
38
 
30
39
  // tracing
31
40
  #define SHOULD_TRACE(base) ((base)->trace_proc != Qnil)
@@ -59,6 +68,7 @@ VALUE io_enc_str(VALUE str, rb_io_t *fptr);
59
68
  //////////////////////////////////////////////////////////////////////
60
69
  //////////////////////////////////////////////////////////////////////
61
70
 
71
+ struct backend_stats backend_get_stats(VALUE self);
62
72
  VALUE backend_await(struct Backend_base *backend);
63
73
  VALUE backend_snooze();
64
74
 
@@ -91,7 +101,10 @@ VALUE backend_timeout_exception(VALUE exception);
91
101
  VALUE Backend_timeout_ensure_safe(VALUE arg);
92
102
  VALUE Backend_timeout_ensure_safe(VALUE arg);
93
103
  VALUE Backend_sendv(VALUE self, VALUE io, VALUE ary, VALUE flags);
104
+ VALUE Backend_stats(VALUE self);
94
105
  void backend_run_idle_tasks(struct Backend_base *base);
95
106
  void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking);
96
107
 
108
+ void backend_setup_stats_symbols();
109
+
97
110
  #endif /* BACKEND_COMMON_H */
@@ -191,6 +191,8 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
191
191
  Backend_t *backend;
192
192
  GetBackend(self, backend);
193
193
 
194
+ backend->base.poll_count++;
195
+
194
196
  if (!is_blocking && backend->pending_sqes) {
195
197
  backend->pending_sqes = 0;
196
198
  io_uring_submit(&backend->ring);
@@ -234,14 +236,11 @@ inline VALUE Backend_switch_fiber(VALUE self) {
234
236
  return backend_base_switch_fiber(self, &backend->base);
235
237
  }
236
238
 
237
- inline struct backend_stats Backend_stats(VALUE self) {
239
+ inline struct backend_stats backend_get_stats(VALUE self) {
238
240
  Backend_t *backend;
239
241
  GetBackend(self, backend);
240
242
 
241
- return (struct backend_stats){
242
- .scheduled_fibers = runqueue_len(&backend->base.runqueue),
243
- .pending_ops = backend->base.pending_count
244
- };
243
+ return backend_base_stats(&backend->base);
245
244
  }
246
245
 
247
246
  VALUE Backend_wakeup(VALUE self) {
@@ -279,6 +278,7 @@ int io_uring_backend_defer_submit_and_await(
279
278
  {
280
279
  VALUE switchpoint_result = Qnil;
281
280
 
281
+ backend->base.op_count++;
282
282
  if (sqe) {
283
283
  io_uring_sqe_set_data(sqe, ctx);
284
284
  io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
@@ -1043,6 +1043,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1043
1043
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1044
1044
  io_uring_sqe_set_data(sqe, ctx);
1045
1045
  io_uring_backend_defer_submit(backend);
1046
+ backend->base.op_count++;
1046
1047
 
1047
1048
  struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
1048
1049
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
@@ -1210,6 +1211,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1210
1211
  sqe_count++;
1211
1212
  }
1212
1213
 
1214
+ backend->base.op_count += sqe_count;
1213
1215
  ctx->ref_count = sqe_count + 1;
1214
1216
  io_uring_backend_defer_submit(backend);
1215
1217
  resume_value = backend_await((struct Backend_base *)backend);
@@ -1346,6 +1348,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1346
1348
  if (prefix != Qnil) {
1347
1349
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1348
1350
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, prefix);
1351
+ backend->base.op_count++;
1349
1352
  }
1350
1353
 
1351
1354
  while (1) {
@@ -1355,7 +1358,8 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1355
1358
 
1356
1359
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1357
1360
  splice_chunks_prep_splice(ctx, sqe, src_fptr->fd, pipefd[1], maxlen);
1358
-
1361
+ backend->base.op_count++;
1362
+
1359
1363
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1360
1364
  if (chunk_len == 0) break;
1361
1365
 
@@ -1367,15 +1371,18 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1367
1371
  chunk_prefix_str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
1368
1372
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1369
1373
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_prefix_str);
1374
+ backend->base.op_count++;
1370
1375
  }
1371
1376
 
1372
1377
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_SPLICE);
1373
1378
  splice_chunks_prep_splice(ctx, sqe, pipefd[0], dest_fptr->fd, chunk_len);
1379
+ backend->base.op_count++;
1374
1380
 
1375
1381
  if (chunk_postfix != Qnil) {
1376
1382
  chunk_postfix_str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
1377
1383
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1378
1384
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, chunk_postfix_str);
1385
+ backend->base.op_count++;
1379
1386
  }
1380
1387
 
1381
1388
  RB_GC_GUARD(chunk_prefix_str);
@@ -1385,6 +1392,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1385
1392
  if (postfix != Qnil) {
1386
1393
  splice_chunks_get_sqe(backend, &ctx, &sqe, OP_WRITE);
1387
1394
  splice_chunks_prep_write(ctx, sqe, dest_fptr->fd, postfix);
1395
+ backend->base.op_count++;
1388
1396
  }
1389
1397
  if (ctx) {
1390
1398
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, 0, &switchpoint_result);
@@ -1430,6 +1438,7 @@ void Init_Backend() {
1430
1438
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1431
1439
  rb_define_method(cBackend, "trace", Backend_trace, -1);
1432
1440
  rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1441
+ rb_define_method(cBackend, "stats", Backend_stats, 0);
1433
1442
 
1434
1443
  rb_define_method(cBackend, "poll", Backend_poll, 1);
1435
1444
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
@@ -1469,6 +1478,8 @@ void Init_Backend() {
1469
1478
  SYM_send = ID2SYM(rb_intern("send"));
1470
1479
  SYM_splice = ID2SYM(rb_intern("splice"));
1471
1480
  SYM_write = ID2SYM(rb_intern("write"));
1481
+
1482
+ backend_setup_stats_symbols();
1472
1483
  }
1473
1484
 
1474
1485
  #endif // POLYPHONY_BACKEND_LIBURING
@@ -164,6 +164,8 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
164
164
  Backend_t *backend;
165
165
  GetBackend(self, backend);
166
166
 
167
+ backend->base.poll_count++;
168
+
167
169
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
168
170
  backend->base.currently_polling = 1;
169
171
  ev_run(backend->ev_loop, blocking == Qtrue ? EVRUN_ONCE : EVRUN_NOWAIT);
@@ -211,14 +213,11 @@ VALUE Backend_wakeup(VALUE self) {
211
213
  return Qnil;
212
214
  }
213
215
 
214
- inline struct backend_stats Backend_stats(VALUE self) {
216
+ inline struct backend_stats backend_get_stats(VALUE self) {
215
217
  Backend_t *backend;
216
218
  GetBackend(self, backend);
217
219
 
218
- return (struct backend_stats){
219
- .scheduled_fibers = runqueue_len(&backend->base.runqueue),
220
- .pending_ops = backend->base.pending_count
221
- };
220
+ return backend_base_stats(&backend->base);
222
221
  }
223
222
 
224
223
  struct libev_io {
@@ -289,6 +288,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
289
288
  OBJ_TAINT(str);
290
289
 
291
290
  while (1) {
291
+ backend->base.op_count++;
292
292
  ssize_t n = read(fptr->fd, buf, len - total);
293
293
  if (n < 0) {
294
294
  int e = errno;
@@ -359,6 +359,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
359
359
  watcher.fiber = Qnil;
360
360
 
361
361
  while (1) {
362
+ backend->base.op_count++;
362
363
  ssize_t n = read(fptr->fd, buf, len);
363
364
  if (n < 0) {
364
365
  int e = errno;
@@ -411,6 +412,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
411
412
  watcher.fiber = Qnil;
412
413
 
413
414
  while (1) {
415
+ backend->base.op_count++;
414
416
  ssize_t n = read(fptr->fd, buf, len);
415
417
  if (n < 0) {
416
418
  int e = errno;
@@ -458,6 +460,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
458
460
  watcher.fiber = Qnil;
459
461
 
460
462
  while (left > 0) {
463
+ backend->base.op_count++;
461
464
  ssize_t n = write(fptr->fd, buf, left);
462
465
  if (n < 0) {
463
466
  int e = errno;
@@ -517,6 +520,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
517
520
  iov_ptr = iov;
518
521
 
519
522
  while (1) {
523
+ backend->base.op_count++;
520
524
  ssize_t n = writev(fptr->fd, iov_ptr, iov_count);
521
525
  if (n < 0) {
522
526
  int e = errno;
@@ -588,6 +592,7 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
588
592
  io_verify_blocking_mode(fptr, server_socket, Qfalse);
589
593
  watcher.fiber = Qnil;
590
594
  while (1) {
595
+ backend->base.op_count++;
591
596
  fd = accept(fptr->fd, &addr, &len);
592
597
  if (fd < 0) {
593
598
  int e = errno;
@@ -646,6 +651,7 @@ VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
646
651
  watcher.fiber = Qnil;
647
652
 
648
653
  while (1) {
654
+ backend->base.op_count++;
649
655
  fd = accept(fptr->fd, &addr, &len);
650
656
  if (fd < 0) {
651
657
  int e = errno;
@@ -705,6 +711,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
705
711
  addr.sin_addr.s_addr = inet_addr(host_buf);
706
712
  addr.sin_port = htons(NUM2INT(port));
707
713
 
714
+ backend->base.op_count++;
708
715
  int result = connect(fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
709
716
  if (result < 0) {
710
717
  int e = errno;
@@ -745,6 +752,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
745
752
  watcher.fiber = Qnil;
746
753
 
747
754
  while (left > 0) {
755
+ backend->base.op_count++;
748
756
  ssize_t n = send(fptr->fd, buf, left, flags_int);
749
757
  if (n < 0) {
750
758
  int e = errno;
@@ -852,6 +860,7 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
852
860
 
853
861
  watcher.ctx.fiber = Qnil;
854
862
  while (1) {
863
+ backend->base.op_count++;
855
864
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
856
865
  if (len < 0) {
857
866
  int e = errno;
@@ -903,6 +912,7 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
903
912
 
904
913
  watcher.ctx.fiber = Qnil;
905
914
  while (1) {
915
+ backend->base.op_count++;
906
916
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
907
917
  if (len < 0) {
908
918
  int e = errno;
@@ -962,6 +972,7 @@ VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
962
972
  watcher.fiber = Qnil;
963
973
 
964
974
  while (1) {
975
+ backend->base.op_count++;
965
976
  ssize_t n = read(src_fptr->fd, buf, len);
966
977
  if (n < 0) {
967
978
  int e = errno;
@@ -977,6 +988,7 @@ VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
977
988
  }
978
989
 
979
990
  while (left > 0) {
991
+ backend->base.op_count++;
980
992
  ssize_t n = write(dest_fptr->fd, buf, left);
981
993
  if (n < 0) {
982
994
  int e = errno;
@@ -1037,6 +1049,7 @@ VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen
1037
1049
  while (1) {
1038
1050
  char *ptr = buf;
1039
1051
  while (1) {
1052
+ backend->base.op_count++;
1040
1053
  ssize_t n = read(src_fptr->fd, ptr, len);
1041
1054
  if (n < 0) {
1042
1055
  int e = errno;
@@ -1054,6 +1067,7 @@ VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen
1054
1067
  }
1055
1068
 
1056
1069
  while (left > 0) {
1070
+ backend->base.op_count++;
1057
1071
  ssize_t n = write(dest_fptr->fd, ptr, left);
1058
1072
  if (n < 0) {
1059
1073
  int e = errno;
@@ -1093,6 +1107,7 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
1093
1107
  GetBackend(self, backend);
1094
1108
  GetOpenFile(io, fptr);
1095
1109
 
1110
+ backend->base.op_count++;
1096
1111
  return libev_wait_fd(backend, fptr->fd, events, 1);
1097
1112
  }
1098
1113
 
@@ -1116,6 +1131,7 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1116
1131
  watcher.fiber = rb_fiber_current();
1117
1132
  ev_timer_init(&watcher.timer, Backend_timer_callback, NUM2DBL(duration), 0.);
1118
1133
  ev_timer_start(backend->ev_loop, &watcher.timer);
1134
+ backend->base.op_count++;
1119
1135
 
1120
1136
  switchpoint_result = backend_await((struct Backend_base *)backend);
1121
1137
 
@@ -1145,6 +1161,7 @@ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1145
1161
  VALUE switchpoint_result = Qnil;
1146
1162
  ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1147
1163
  ev_timer_start(backend->ev_loop, &watcher.timer);
1164
+ backend->base.op_count++;
1148
1165
  switchpoint_result = backend_await((struct Backend_base *)backend);
1149
1166
  ev_timer_stop(backend->ev_loop, &watcher.timer);
1150
1167
  RAISE_IF_EXCEPTION(switchpoint_result);
@@ -1196,6 +1213,7 @@ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
1196
1213
  watcher.resume_value = timeout;
1197
1214
  ev_timer_init(&watcher.timer, Backend_timeout_callback, NUM2DBL(duration), 0.);
1198
1215
  ev_timer_start(backend->ev_loop, &watcher.timer);
1216
+ backend->base.op_count++;
1199
1217
 
1200
1218
  struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
1201
1219
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
@@ -1218,6 +1236,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1218
1236
  if (fd >= 0) {
1219
1237
  Backend_t *backend;
1220
1238
  GetBackend(self, backend);
1239
+ backend->base.op_count++;
1221
1240
 
1222
1241
  VALUE resume_value = libev_wait_fd(backend, fd, EV_READ, 0);
1223
1242
  close(fd);
@@ -1261,6 +1280,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1261
1280
  watcher.fiber = rb_fiber_current();
1262
1281
  ev_child_init(&watcher.child, Backend_child_callback, NUM2INT(pid), 0);
1263
1282
  ev_child_start(backend->ev_loop, &watcher.child);
1283
+ backend->base.op_count++;
1264
1284
 
1265
1285
  switchpoint_result = backend_await((struct Backend_base *)backend);
1266
1286
 
@@ -1283,6 +1303,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1283
1303
 
1284
1304
  ev_async_init(&async, Backend_async_callback);
1285
1305
  ev_async_start(backend->ev_loop, &async);
1306
+ backend->base.op_count++;
1286
1307
 
1287
1308
  switchpoint_result = backend_await((struct Backend_base *)backend);
1288
1309
 
@@ -1346,6 +1367,7 @@ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct lib
1346
1367
  int len = RSTRING_LEN(str);
1347
1368
  int left = len;
1348
1369
  while (left > 0) {
1370
+ backend->base.op_count++;
1349
1371
  ssize_t n = write(fd, buf, left);
1350
1372
  if (n < 0) {
1351
1373
  int err = errno;
@@ -1406,6 +1428,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1406
1428
  int chunk_len;
1407
1429
  // splice to pipe
1408
1430
  while (1) {
1431
+ backend->base.op_count++;
1409
1432
  chunk_len = splice(src_fptr->fd, 0, pipefd[1], 0, maxlen, 0);
1410
1433
  if (chunk_len < 0) {
1411
1434
  err = errno;
@@ -1431,6 +1454,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1431
1454
 
1432
1455
  int left = chunk_len;
1433
1456
  while (1) {
1457
+ backend->base.op_count++;
1434
1458
  int n = splice(pipefd[0], 0, dest_fptr->fd, 0, left, 0);
1435
1459
  if (n < 0) {
1436
1460
  err = errno;
@@ -1503,6 +1527,7 @@ void Init_Backend() {
1503
1527
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1504
1528
  rb_define_method(cBackend, "trace", Backend_trace, -1);
1505
1529
  rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1530
+ rb_define_method(cBackend, "stats", Backend_stats, 0);
1506
1531
 
1507
1532
  rb_define_method(cBackend, "poll", Backend_poll, 1);
1508
1533
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
@@ -1545,6 +1570,8 @@ void Init_Backend() {
1545
1570
  SYM_send = ID2SYM(rb_intern("send"));
1546
1571
  SYM_splice = ID2SYM(rb_intern("splice"));
1547
1572
  SYM_write = ID2SYM(rb_intern("write"));
1573
+
1574
+ backend_setup_stats_symbols();
1548
1575
  }
1549
1576
 
1550
1577
  #endif // POLYPHONY_BACKEND_LIBEV
@@ -114,7 +114,6 @@ VALUE Backend_wakeup(VALUE self);
114
114
  VALUE Backend_run_idle_tasks(VALUE self);
115
115
  VALUE Backend_switch_fiber(VALUE self);
116
116
  void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize);
117
- struct backend_stats Backend_stats(VALUE self);
118
117
  void Backend_unschedule_fiber(VALUE self, VALUE fiber);
119
118
 
120
119
  VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
@@ -4,7 +4,6 @@
4
4
  inline void runqueue_initialize(runqueue_t *runqueue) {
5
5
  runqueue_ring_buffer_init(&runqueue->entries);
6
6
  runqueue->high_watermark = 0;
7
- runqueue->switch_count = 0;
8
7
  }
9
8
 
10
9
  inline void runqueue_finalize(runqueue_t *runqueue) {
@@ -30,12 +29,7 @@ inline void runqueue_unshift(runqueue_t *runqueue, VALUE fiber, VALUE value, int
30
29
  }
31
30
 
32
31
  inline runqueue_entry runqueue_shift(runqueue_t *runqueue) {
33
- runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
34
- if (entry.fiber == Qnil)
35
- runqueue->high_watermark = 0;
36
- else
37
- runqueue->switch_count += 1;
38
- return entry;
32
+ return runqueue_ring_buffer_shift(&runqueue->entries);
39
33
  }
40
34
 
41
35
  inline void runqueue_delete(runqueue_t *runqueue, VALUE fiber) {
@@ -50,19 +44,20 @@ inline void runqueue_clear(runqueue_t *runqueue) {
50
44
  runqueue_ring_buffer_clear(&runqueue->entries);
51
45
  }
52
46
 
47
+ inline long runqueue_size(runqueue_t *runqueue) {
48
+ return runqueue->entries.size;
49
+ }
50
+
53
51
  inline long runqueue_len(runqueue_t *runqueue) {
54
52
  return runqueue->entries.count;
55
53
  }
56
54
 
57
- inline int runqueue_empty_p(runqueue_t *runqueue) {
58
- return (runqueue->entries.count == 0);
55
+ inline long runqueue_max_len(runqueue_t *runqueue) {
56
+ unsigned int max_len = runqueue->high_watermark;
57
+ runqueue->high_watermark = 0;
58
+ return max_len;
59
59
  }
60
60
 
61
- static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
62
-
63
- inline int runqueue_should_poll_nonblocking(runqueue_t *runqueue) {
64
- if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
65
-
66
- runqueue->switch_count = 0;
67
- return 1;
61
+ inline int runqueue_empty_p(runqueue_t *runqueue) {
62
+ return (runqueue->entries.count == 0);
68
63
  }
@@ -7,7 +7,6 @@
7
7
  typedef struct runqueue {
8
8
  runqueue_ring_buffer entries;
9
9
  unsigned int high_watermark;
10
- unsigned int switch_count;
11
10
  } runqueue_t;
12
11
 
13
12
  void runqueue_initialize(runqueue_t *runqueue);
@@ -20,8 +19,9 @@ runqueue_entry runqueue_shift(runqueue_t *runqueue);
20
19
  void runqueue_delete(runqueue_t *runqueue, VALUE fiber);
21
20
  int runqueue_index_of(runqueue_t *runqueue, VALUE fiber);
22
21
  void runqueue_clear(runqueue_t *runqueue);
22
+ long runqueue_size(runqueue_t *runqueue);
23
23
  long runqueue_len(runqueue_t *runqueue);
24
+ long runqueue_max_len(runqueue_t *runqueue);
24
25
  int runqueue_empty_p(runqueue_t *runqueue);
25
- int runqueue_should_poll_nonblocking(runqueue_t *runqueue);
26
26
 
27
27
  #endif /* RUNQUEUE_H */
@@ -13,18 +13,6 @@ static VALUE Thread_setup_fiber_scheduling(VALUE self) {
13
13
  return self;
14
14
  }
15
15
 
16
- static VALUE SYM_scheduled_fibers;
17
- static VALUE SYM_pending_watchers;
18
-
19
- static VALUE Thread_fiber_scheduling_stats(VALUE self) {
20
- struct backend_stats backend_stats = Backend_stats(rb_ivar_get(self, ID_ivar_backend));
21
-
22
- VALUE stats = rb_hash_new();
23
- rb_hash_aset(stats, SYM_scheduled_fibers, INT2NUM(backend_stats.scheduled_fibers));
24
- rb_hash_aset(stats, SYM_pending_watchers, INT2NUM(backend_stats.pending_ops));
25
- return stats;
26
- }
27
-
28
16
  inline void schedule_fiber(VALUE self, VALUE fiber, VALUE value, int prioritize) {
29
17
  Backend_schedule_fiber(self, rb_ivar_get(self, ID_ivar_backend), fiber, value, prioritize);
30
18
  }
@@ -72,7 +60,6 @@ VALUE Thread_class_backend(VALUE _self) {
72
60
 
73
61
  void Init_Thread() {
74
62
  rb_define_method(rb_cThread, "setup_fiber_scheduling", Thread_setup_fiber_scheduling, 0);
75
- rb_define_method(rb_cThread, "fiber_scheduling_stats", Thread_fiber_scheduling_stats, 0);
76
63
  rb_define_method(rb_cThread, "schedule_and_wakeup", Thread_fiber_schedule_and_wakeup, 2);
77
64
 
78
65
  rb_define_method(rb_cThread, "schedule_fiber", Thread_schedule_fiber, 2);
@@ -91,9 +78,4 @@ void Init_Thread() {
91
78
  ID_ivar_main_fiber = rb_intern("@main_fiber");
92
79
  ID_ivar_terminated = rb_intern("@terminated");
93
80
  ID_stop = rb_intern("stop");
94
-
95
- SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
96
- SYM_pending_watchers = ID2SYM(rb_intern("pending_watchers"));
97
- rb_global_variable(&SYM_scheduled_fibers);
98
- rb_global_variable(&SYM_pending_watchers);
99
81
  }
@@ -108,7 +108,11 @@ class ::IO
108
108
  end
109
109
 
110
110
  alias_method :orig_read, :read
111
- def read(len = nil)
111
+ def read(len = nil, buf = nil, buf_pos = 0)
112
+ if buf
113
+ return Polyphony.backend_read(self, buf, len, true, buf_pos)
114
+ end
115
+
112
116
  @read_buffer ||= +''
113
117
  result = Polyphony.backend_read(self, @read_buffer, len, true, -1)
114
118
  return nil unless result
@@ -119,9 +123,9 @@ class ::IO
119
123
  end
120
124
 
121
125
  alias_method :orig_readpartial, :read
122
- def readpartial(len, str = +'', buffer_pos = 0)
126
+ def readpartial(len, str = +'', buffer_pos = 0, raise_on_eof = true)
123
127
  result = Polyphony.backend_read(self, str, len, false, buffer_pos)
124
- raise EOFError unless result
128
+ raise EOFError if !result && raise_on_eof
125
129
 
126
130
  result
127
131
  end
@@ -64,7 +64,21 @@ class ::OpenSSL::SSL::SSLSocket
64
64
  # @sync = osync
65
65
  end
66
66
 
67
- def readpartial(maxlen, buf = +'', buffer_pos = 0)
67
+ alias_method :orig_read, :read
68
+ def read(maxlen = nil, buf = nil, buf_pos = 0)
69
+ return readpartial(maxlen, buf, buf_pos) if buf
70
+
71
+ buf = +''
72
+ return readpartial(maxlen, buf) if maxlen
73
+
74
+ while true
75
+ readpartial(4096, buf, -1)
76
+ end
77
+ rescue EOFError
78
+ buf
79
+ end
80
+
81
+ def readpartial(maxlen, buf = +'', buffer_pos = 0, raise_on_eof = true)
68
82
  if buffer_pos != 0
69
83
  if (result = sysread(maxlen, +''))
70
84
  if buffer_pos == -1
@@ -76,7 +90,9 @@ class ::OpenSSL::SSL::SSLSocket
76
90
  else
77
91
  result = sysread(maxlen, buf)
78
92
  end
79
- result || (raise EOFError)
93
+
94
+ raise EOFError if !result && raise_on_eof
95
+ result
80
96
  end
81
97
 
82
98
  def read_loop(maxlen = 8192)
@@ -22,6 +22,23 @@ class ::Socket
22
22
  Polyphony.backend_connect(self, addr.ip_address, addr.ip_port)
23
23
  end
24
24
 
25
+ alias_method :orig_read, :read
26
+ def read(maxlen = nil, buf = nil, buf_pos = 0)
27
+ return Polyphony.backend_recv(self, buf, maxlen, buf_pos) if buf
28
+ return Polyphony.backend_recv(self, buf || +'', maxlen, 0) if maxlen
29
+
30
+ buf = +''
31
+ len = buf.bytesize
32
+ while true
33
+ Polyphony.backend_recv(self, buf, maxlen || 4096, -1)
34
+ new_len = buf.bytesize
35
+ break if new_len == len
36
+
37
+ len = new_len
38
+ end
39
+ buf
40
+ end
41
+
25
42
  def recv(maxlen, flags = 0, outbuf = nil)
26
43
  Polyphony.backend_recv(self, outbuf || +'', maxlen, 0)
27
44
  end
@@ -60,8 +77,9 @@ class ::Socket
60
77
  # Polyphony.backend_send(self, mesg, 0)
61
78
  # end
62
79
 
63
- def readpartial(maxlen, str = +'', buffer_pos = 0)
64
- Polyphony.backend_recv(self, str, maxlen, buffer_pos)
80
+ def readpartial(maxlen, str = +'', buffer_pos = 0, raise_on_eof = true)
81
+ result = Polyphony.backend_recv(self, str, maxlen, buffer_pos)
82
+ raise EOFError if !result && raise_on_eof
65
83
  end
66
84
 
67
85
  ZERO_LINGER = [0, 0].pack('ii').freeze
@@ -140,6 +158,23 @@ class ::TCPSocket
140
158
  setsockopt(::Socket::SOL_SOCKET, ::Socket::SO_REUSEPORT, 1)
141
159
  end
142
160
 
161
+ alias_method :orig_read, :read
162
+ def read(maxlen = nil, buf = nil, buf_pos = 0)
163
+ return Polyphony.backend_recv(self, buf, maxlen, buf_pos) if buf
164
+ return Polyphony.backend_recv(self, buf || +'', maxlen, 0) if maxlen
165
+
166
+ buf = +''
167
+ len = buf.bytesize
168
+ while true
169
+ Polyphony.backend_recv(self, buf, maxlen || 4096, -1)
170
+ new_len = buf.bytesize
171
+ break if new_len == len
172
+
173
+ len = new_len
174
+ end
175
+ buf
176
+ end
177
+
143
178
  def recv(maxlen, flags = 0, outbuf = nil)
144
179
  Polyphony.backend_recv(self, outbuf || +'', maxlen, 0)
145
180
  end
@@ -165,11 +200,10 @@ class ::TCPSocket
165
200
  # Polyphony.backend_send(self, mesg, 0)
166
201
  # end
167
202
 
168
- def readpartial(maxlen, str = +'', buffer_pos = 0)
203
+ def readpartial(maxlen, str = +'', buffer_pos = 0, raise_on_eof)
169
204
  result = Polyphony.backend_recv(self, str, maxlen, buffer_pos)
170
- raise EOFError unless result
171
-
172
- str
205
+ raise EOFError if !result && raise_on_eof
206
+ result
173
207
  end
174
208
 
175
209
  def read_nonblock(len, str = nil, exception: true)
@@ -217,6 +251,23 @@ class ::UNIXServer
217
251
  end
218
252
 
219
253
  class ::UNIXSocket
254
+ alias_method :orig_read, :read
255
+ def read(maxlen = nil, buf = nil, buf_pos = 0)
256
+ return Polyphony.backend_recv(self, buf, maxlen, buf_pos) if buf
257
+ return Polyphony.backend_recv(self, buf || +'', maxlen, 0) if maxlen
258
+
259
+ buf = +''
260
+ len = buf.bytesize
261
+ while true
262
+ Polyphony.backend_recv(self, buf, maxlen || 4096, -1)
263
+ new_len = buf.bytesize
264
+ break if new_len == len
265
+
266
+ len = new_len
267
+ end
268
+ buf
269
+ end
270
+
220
271
  def recv(maxlen, flags = 0, outbuf = nil)
221
272
  Polyphony.backend_recv(self, outbuf || +'', maxlen, 0)
222
273
  end
@@ -242,11 +293,10 @@ class ::UNIXSocket
242
293
  Polyphony.backend_send(self, mesg, 0)
243
294
  end
244
295
 
245
- def readpartial(maxlen, str = +'', buffer_pos = 0)
296
+ def readpartial(maxlen, str = +'', buffer_pos = 0, raise_on_eof)
246
297
  result = Polyphony.backend_recv(self, str, maxlen, buffer_pos)
247
- raise EOFError unless result
248
-
249
- str
298
+ raise EOFError if !result && raise_on_eof
299
+ result
250
300
  end
251
301
 
252
302
  def read_nonblock(len, str = nil, exception: true)
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Polyphony
4
- VERSION = '0.60'
4
+ VERSION = '0.64'
5
5
  end
data/test/test_backend.rb CHANGED
@@ -26,7 +26,7 @@ class BackendTest < MiniTest::Test
26
26
  @backend.sleep 0.01
27
27
  count += 1
28
28
  }.await
29
- assert_in_delta 0.03, Time.now - t0, 0.01
29
+ assert_in_range 0.02..0.04, Time.now - t0
30
30
  assert_equal 3, count
31
31
  end
32
32
 
@@ -98,6 +98,32 @@ class BackendTest < MiniTest::Test
98
98
  assert_equal return_value, buf
99
99
  end
100
100
 
101
+ def test_read_concat_big
102
+ i, o = IO.pipe
103
+
104
+ body = " " * 4000
105
+
106
+ data = "post /?q=time&blah=blah HTTP/1\r\nHost: dev.realiteq.net\r\n\r\n" +
107
+ "get /?q=time HTTP/1.1\r\nContent-Length: #{body.bytesize}\r\n\r\n#{body}" +
108
+ "get /?q=time HTTP/1.1\r\nCookie: foo\r\nCookie: bar\r\n\r\n"
109
+
110
+ o << data
111
+ o.close
112
+
113
+ buf = +''
114
+
115
+ @backend.read(i, buf, 4096, false, -1)
116
+ assert_equal 4096, buf.bytesize
117
+
118
+ @backend.read(i, buf, 1, false, -1)
119
+ assert_equal 4097, buf.bytesize
120
+
121
+ @backend.read(i, buf, 4096, false, -1)
122
+
123
+ assert_equal data.bytesize, buf.bytesize
124
+ assert_equal data, buf
125
+ end
126
+
101
127
  def test_waitpid
102
128
  pid = fork do
103
129
  @backend.post_fork
data/test/test_io.rb CHANGED
@@ -73,6 +73,26 @@ class IOTest < MiniTest::Test
73
73
  assert_equal [:wait_readable, 'foo'], results
74
74
  end
75
75
 
76
+ def test_read
77
+ i, o = IO.pipe
78
+
79
+ o << 'hi'
80
+ assert_equal 'hi', i.read(2)
81
+
82
+ o << 'foobarbaz'
83
+ assert_equal 'foo', i.read(3)
84
+ assert_equal 'bar', i.read(3)
85
+
86
+ buf = +'abc'
87
+ assert_equal 'baz', i.read(3, buf)
88
+ assert_equal 'baz', buf
89
+
90
+ buf = +'def'
91
+ o << 'foobar'
92
+ assert_equal 'deffoobar', i.read(6, buf, -1)
93
+ assert_equal 'deffoobar', buf
94
+ end
95
+
76
96
  def test_readpartial
77
97
  i, o = IO.pipe
78
98
 
data/test/test_socket.rb CHANGED
@@ -12,7 +12,6 @@ class SocketTest < MiniTest::Test
12
12
  def test_tcp
13
13
  port = rand(1234..5678)
14
14
  server = TCPServer.new('127.0.0.1', port)
15
-
16
15
  server_fiber = spin do
17
16
  while (socket = server.accept)
18
17
  spin do
@@ -34,6 +33,45 @@ class SocketTest < MiniTest::Test
34
33
  server&.close
35
34
  end
36
35
 
36
+ def test_read
37
+ port = rand(1234..5678)
38
+ server = TCPServer.new('127.0.0.1', port)
39
+ server_fiber = spin do
40
+ while (socket = server.accept)
41
+ spin do
42
+ while (data = socket.read(8192))
43
+ socket << data
44
+ end
45
+ end
46
+ end
47
+ end
48
+
49
+ snooze
50
+ client = TCPSocket.new('127.0.0.1', port)
51
+
52
+ client << 'hi'
53
+ assert_equal 'hi', client.read(2)
54
+
55
+ client << 'foobarbaz'
56
+ assert_equal 'foo', client.read(3)
57
+ assert_equal 'bar', client.read(3)
58
+
59
+ buf = +'abc'
60
+ assert_equal 'baz', client.read(3, buf)
61
+ assert_equal 'baz', buf
62
+
63
+ buf = +'def'
64
+ client << 'foobar'
65
+ assert_equal 'deffoobar', client.read(6, buf, -1)
66
+ assert_equal 'deffoobar', buf
67
+
68
+ client.close
69
+ ensure
70
+ server_fiber&.stop
71
+ server_fiber&.await
72
+ server&.close
73
+ end
74
+
37
75
  # sending multiple strings at once
38
76
  def test_sendv
39
77
  port = rand(1234..5678)
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: polyphony
3
3
  version: !ruby/object:Gem::Version
4
- version: '0.60'
4
+ version: '0.64'
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-07-15 00:00:00.000000000 Z
11
+ date: 2021-07-26 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake-compiler
@@ -419,7 +419,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
419
419
  - !ruby/object:Gem::Version
420
420
  version: '0'
421
421
  requirements: []
422
- rubygems_version: 3.1.4
422
+ rubygems_version: 3.1.6
423
423
  signing_key:
424
424
  specification_version: 4
425
425
  summary: Fine grained concurrency for Ruby