polyphony 0.54.0 → 0.55.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7c88e804739b62a55f7647c57e55582d2ae5cf6a73bca55160aa90138b80217a
4
- data.tar.gz: ecbeb0aeed96a7b6f21f8d41b67195574213b8b01b51bccd0f1bcf59165ab9f6
3
+ metadata.gz: 2f0f6698a072e4ba2913477d20f5884d5d7579b99cca1db0f2ffa3aeca42ebcc
4
+ data.tar.gz: 97fec9986f6693d685bfb0c1745995c8a85413a9b6cc1a1ff69393d55a5f3b74
5
5
  SHA512:
6
- metadata.gz: 93ff227cab8891c79bc4447c25359df03cd675ada579d13c5dc09d1f18e42fd00ed1b3ce5e433bd5addf83076af3a45c29cdd49ae8cb2cb9f19892cb27a40476
7
- data.tar.gz: 1e1f39c19d2edbd49406c3a3917848951914c48b81316fff6ff78f381fb7db70b407dc5428b94a45292261e202df1c91da5d4e971ab19ba91e7c96e0bbbb91bc
6
+ metadata.gz: 053beffc7c27658d129002b5efc29f64c7fcf1ff7fbf3f2eea1d588b2a9e40e89251640bf9bb82ce478837f38c32863987d8ac0af987d374d4ac242d012363dd
7
+ data.tar.gz: e89df4044f09f4df64fc57622f22e70d932f2f09ea3b2a5c2090698784c286367df43d352231068b8ef96d6a7ba89e5492b4c237c9960aeb5fa79af9ee738768
data/.gitignore CHANGED
@@ -56,4 +56,6 @@ lib/*.bundle
56
56
  lib/*.so
57
57
 
58
58
  _site
59
- .sass-cache
59
+ .sass-cache
60
+
61
+ log
data/CHANGELOG.md CHANGED
@@ -1,4 +1,11 @@
1
- ## 0.54.0
1
+ ## 0.55.0 2021-06-17
2
+
3
+ - Finish io_uring implementation of Backend#chain
4
+ - Reimplement io_uring op_context acquire/release algorithm (using ref count)
5
+ - Fix #gets on sockets
6
+ - Redesign event anti-starvation mechanism
7
+
8
+ ## 0.54.0 2021-06-14
2
9
 
3
10
  - Implement Mutex#owned?, #locked? (#50)
4
11
  - Fix arity for SSLSocket#peeraddr (#55)
@@ -6,15 +13,15 @@
6
13
  - Fix SSLSocket buffering behaviour
7
14
  - Add recv_loop alias for SSLSocket (#54)
8
15
 
9
- ## 0.53.2
16
+ ## 0.53.2 2021-05-10
10
17
 
11
18
  - Remove `splice` methods on libev backend on non-Linux OS (#43)
12
19
 
13
- ## 0.53.0
20
+ ## 0.53.0 2021-04-23
14
21
 
15
22
  - Implement `Backend#splice`, `Backend#splice_to_eof`, along with `IO#splice`, `IO#splice_to_eof`
16
23
 
17
- ## 0.52.0
24
+ ## 0.52.0 2021-02-28
18
25
 
19
26
  - Polyphony is now compatible with Ruby 3.0
20
27
  - Add `Backend#sendv` method for sending multiple strings
@@ -24,19 +31,19 @@
24
31
  - libev backend: Use` pidfd_open` for Linux 5.3+, otherwise use a libev child watcher
25
32
  - Use `:call` as default method in `#feed_loop`
26
33
 
27
- ## 0.51.0
34
+ ## 0.51.0 2021-02-02
28
35
 
29
36
  - Implement `IO#feed_loop`, `Socket#feed_loop`
30
37
  - Fix error handling in `Process.kill_and_await`
31
38
 
32
- ## 0.50.1
39
+ ## 0.50.1 2021-01-31
33
40
 
34
41
  - Set `IOSQE_ASYNC` flag in io_uring backend
35
42
  - Fix error handling in `Backend#waitpid`
36
43
  - Reimplement libev backend's `#waitpid` by using pidfd_open (in similar manner
37
44
  to the io_uring backend)
38
45
 
39
- ## 0.50.0
46
+ ## 0.50.0 2021-01-28
40
47
 
41
48
  - Use `Process::CLOCK_MONOTONIC` in Timer
42
49
  - Add `Timer#sleep`, `Timer#after`, `Timer#every`
@@ -44,50 +51,50 @@
44
51
  - Add `Thread#fiber_index_of` method
45
52
  - Use `Backend#wait_event` in `Fiber#await`
46
53
 
47
- ## 0.49.2
54
+ ## 0.49.2 2021-01-19
48
55
 
49
56
  - Fix hang with 100s or more child fibers when terminating
50
57
  - Fix double pending_count increment in io_uring backend
51
58
 
52
- ## 0.49.1
59
+ ## 0.49.1 2021-01-13
53
60
 
54
61
  - Use `TCPSocket` instead of `Socket` in `Net.tcp_connect`
55
62
  - Catch `Errno::ERSCH` in `Process.kill_and_await`
56
63
  - Set io_uring queue size to 2048
57
64
 
58
- ## 0.49.0
65
+ ## 0.49.0 2021-01-11
59
66
 
60
67
  - Implement `Polyphony::Timer` for performant timeouts
61
68
 
62
- ## 0.48.0
69
+ ## 0.48.0 2021-01-05
63
70
 
64
71
  - Implement graceful shutdown
65
72
  - Add support for `break` / `StopIteration` in `spin_loop`
66
73
  - Fix `IO#gets`, `IO#readpartial`
67
74
 
68
- ## 0.47.5.1
75
+ ## 0.47.5.1 2020-11-20
69
76
 
70
77
  - Add missing `Socket#accept_loop` method
71
78
 
72
- ## 0.47.5
79
+ ## 0.47.5 2020-11-20
73
80
 
74
81
  - Add `socket_class` argument to `Backend#accept`, `Backend#accept_loop`
75
82
  - Fix `#supervise` to stop when all children fibers are done
76
83
 
77
- ## 0.47.4
84
+ ## 0.47.4 2020-11-14
78
85
 
79
86
  - Add support for Unix sockets
80
87
 
81
- ## 0.47.3
88
+ ## 0.47.3 2020-11-12
82
89
 
83
90
  - Enable I/O in signal handlers (#45)
84
91
  - Accept `:interval` argument in `#spin_loop`
85
92
 
86
- ## 0.47.2
93
+ ## 0.47.2 2020-11-10
87
94
 
88
95
  - Fix API compatibility between TCPSocket and IO
89
96
 
90
- ## 0.47.0
97
+ ## 0.47.0 2020-11-10
91
98
 
92
99
  - Implement `#spin_scope` used for creating blocking fiber scopes
93
100
  - Reimplement `move_on_after`, `cancel_after`, `Timeout.timeout` using
@@ -95,18 +102,18 @@
95
102
  - Implement `Backend#timeout` API
96
103
  - Implemented capped queues
97
104
 
98
- ## 0.46.1
105
+ ## 0.46.1 2020-11-04
99
106
 
100
107
  - Add `TCPServer#accept_loop`, `OpenSSL::SSL::SSLSocket#accept_loop` method
101
108
  - Fix compilation error on MacOS (#43)
102
109
  - Fix backtrace for `Timeout.timeout`
103
110
  - Add `Backend#timer_loop`
104
111
 
105
- ## 0.46.0
112
+ ## 0.46.0 2020-10-08
106
113
 
107
114
  - Implement [io_uring backend](https://github.com/digital-fabric/polyphony/pull/44)
108
115
 
109
- ## 0.45.5
116
+ ## 0.45.5 2020-10-04
110
117
 
111
118
  - Fix compilation error (#43)
112
119
  - Add support for resetting move_on_after, cancel_after timeouts
@@ -115,22 +122,22 @@
115
122
  - Schedule parent with priority on uncaught exception
116
123
  - Fix race condition in `Mutex#synchronize` (#41)
117
124
 
118
- ## 0.45.4
125
+ ## 0.45.4 2020-09-06
119
126
 
120
127
  - Improve signal trapping mechanism
121
128
 
122
- ## 0.45.3
129
+ ## 0.45.3 2020-09-02
123
130
 
124
131
  - Don't swallow error in `Process#kill_and_await`
125
132
  - Add `Fiber#mailbox` attribute reader
126
133
  - Fix bug in `Fiber.await`
127
134
  - Implement `IO#getc`, `IO#getbyte`
128
135
 
129
- ## 0.45.2
136
+ ## 0.45.2 2020-08-03
130
137
 
131
138
  - Rewrite `Fiber#<<`, `Fiber#await`, `Fiber#receive` in C
132
139
 
133
- ## 0.45.1
140
+ ## 0.45.1 2020-08-01
134
141
 
135
142
  - Fix Net::HTTP compatibility
136
143
  - Fix fs adapter
@@ -140,7 +147,7 @@
140
147
  - Cleanup code
141
148
  - Improve support for Ruby 3 keyword args
142
149
 
143
- ## 0.45.0
150
+ ## 0.45.0 2020-07-29
144
151
 
145
152
  - Cleanup code
146
153
  - Rename `Agent` to `Backend`
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- polyphony (0.54.0)
4
+ polyphony (0.55.0)
5
5
 
6
6
  GEM
7
7
  remote: https://rubygems.org/
data/TODO.md CHANGED
@@ -10,9 +10,6 @@
10
10
 
11
11
  - Add support for `break` and `StopIteration` in all loops (with tests)
12
12
 
13
- - Change `IO#gets` to use `String#split` to cut into lines, much faster (see
14
- examples/performance/line_splitting.rb)
15
-
16
13
  - More tight loops
17
14
  - `IO#gets_loop`, `Socket#gets_loop`, `OpenSSL::Socket#gets_loop` (medium effort)
18
15
  - `Fiber#receive_loop` (very little effort, should be implemented in C)
@@ -0,0 +1,8 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'bundler/setup'
4
+ require 'polyphony'
5
+
6
+ puts 'Please enter your name:'
7
+ name = gets.chomp
8
+ puts "Hello, #{name}!"
@@ -26,7 +26,7 @@ struct io_internal_read_struct {
26
26
 
27
27
  #define StringValue(v) rb_string_value(&(v))
28
28
 
29
- int io_setstrbuf(VALUE *str, long len) {
29
+ inline int io_setstrbuf(VALUE *str, long len) {
30
30
  #ifdef _WIN32
31
31
  len = (len + 1) & ~1L; /* round up for wide char */
32
32
  #endif
@@ -55,7 +55,7 @@ inline void io_shrink_read_string(VALUE str, long n) {
55
55
  }
56
56
  }
57
57
 
58
- void io_set_read_length(VALUE str, long n, int shrinkable) {
58
+ inline void io_set_read_length(VALUE str, long n, int shrinkable) {
59
59
  if (RSTRING_LEN(str) != n) {
60
60
  rb_str_modify(str);
61
61
  rb_str_set_len(str, n);
@@ -64,16 +64,16 @@ void io_set_read_length(VALUE str, long n, int shrinkable) {
64
64
  }
65
65
 
66
66
  inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
67
- if (fptr->encs.enc) {
68
- return fptr->encs.enc;
69
- }
70
- return rb_default_external_encoding();
67
+ if (fptr->encs.enc) {
68
+ return fptr->encs.enc;
69
+ }
70
+ return rb_default_external_encoding();
71
71
  }
72
72
 
73
- VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
74
- OBJ_TAINT(str);
75
- rb_enc_associate(str, io_read_encoding(fptr));
76
- return str;
73
+ inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
74
+ OBJ_TAINT(str);
75
+ rb_enc_associate(str, io_read_encoding(fptr));
76
+ return str;
77
77
  }
78
78
 
79
79
  //////////////////////////////////////////////////////////////////////
@@ -21,6 +21,9 @@
21
21
  #include "ruby/io.h"
22
22
 
23
23
  VALUE SYM_io_uring;
24
+ VALUE SYM_send;
25
+ VALUE SYM_splice;
26
+ VALUE SYM_write;
24
27
 
25
28
  #ifdef POLYPHONY_UNSET_NONBLOCK
26
29
  ID ID_ivar_is_nonblocking;
@@ -51,7 +54,6 @@ typedef struct Backend_t {
51
54
  // common fields
52
55
  unsigned int currently_polling;
53
56
  unsigned int pending_count;
54
- unsigned int poll_no_wait_count;
55
57
 
56
58
  // implementation-specific fields
57
59
  struct io_uring ring;
@@ -88,7 +90,6 @@ static VALUE Backend_initialize(VALUE self) {
88
90
 
89
91
  backend->currently_polling = 0;
90
92
  backend->pending_count = 0;
91
- backend->poll_no_wait_count = 0;
92
93
  backend->pending_sqes = 0;
93
94
  backend->prepared_limit = 2048;
94
95
 
@@ -118,7 +119,6 @@ VALUE Backend_post_fork(VALUE self) {
118
119
  context_store_free(&backend->store);
119
120
  backend->currently_polling = 0;
120
121
  backend->pending_count = 0;
121
- backend->poll_no_wait_count = 0;
122
122
  backend->pending_sqes = 0;
123
123
 
124
124
  return self;
@@ -155,17 +155,9 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
155
155
  if (!ctx) return;
156
156
 
157
157
  ctx->result = cqe->res;
158
-
159
- if (ctx->completed)
160
- // already marked as deleted as result of fiber resuming before op
161
- // completion, so we can release the context
162
- context_store_release(&backend->store, ctx);
163
- else {
164
- // otherwise, we mark it as completed, schedule the fiber and let it deal
165
- // with releasing the context
166
- ctx->completed = 1;
167
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
168
- }
158
+ if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
159
+ Fiber_make_runnable(ctx->fiber, ctx->resume_value);
160
+ context_store_release(&backend->store, ctx);
169
161
  }
170
162
 
171
163
  // adapted from io_uring_peek_batch_cqe in queue.c
@@ -219,16 +211,6 @@ VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue
219
211
  Backend_t *backend;
220
212
  GetBackend(self, backend);
221
213
 
222
- if (is_nowait) {
223
- backend->poll_no_wait_count++;
224
- if (backend->poll_no_wait_count < 10) return self;
225
-
226
- long runnable_count = Runqueue_len(runqueue);
227
- if (backend->poll_no_wait_count < runnable_count) return self;
228
- }
229
-
230
- backend->poll_no_wait_count = 0;
231
-
232
214
  if (is_nowait && backend->pending_sqes) {
233
215
  backend->pending_sqes = 0;
234
216
  io_uring_submit(&backend->ring);
@@ -283,10 +265,9 @@ int io_uring_backend_defer_submit_and_await(
283
265
 
284
266
  switchpoint_result = backend_await(backend);
285
267
 
286
- if (!ctx->completed) {
268
+ if (ctx->ref_count > 1) {
269
+ // op was not completed (an exception was raised), so we need to cancel it
287
270
  ctx->result = -ECANCELED;
288
-
289
- // op was not completed, so we need to cancel it
290
271
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
291
272
  io_uring_prep_cancel(sqe, ctx, 0);
292
273
  backend->pending_sqes = 0;
@@ -300,7 +281,7 @@ int io_uring_backend_defer_submit_and_await(
300
281
  }
301
282
 
302
283
  VALUE io_uring_backend_wait_fd(Backend_t *backend, int fd, int write) {
303
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_POLL);
284
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_POLL);
304
285
  VALUE resumed_value = Qnil;
305
286
 
306
287
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -332,14 +313,14 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
332
313
 
333
314
  while (1) {
334
315
  VALUE resume_value = Qnil;
335
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
316
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
336
317
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
337
318
  io_uring_prep_read(sqe, fptr->fd, buf, buffer_size - total, -1);
338
319
 
339
320
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
340
- OP_CONTEXT_RELEASE(&backend->store, ctx);
321
+ int completed = context_store_release(&backend->store, ctx);
341
322
  RAISE_IF_EXCEPTION(resume_value);
342
- if (!ctx->completed) return resume_value;
323
+ if (!completed) return resume_value;
343
324
  RB_GC_GUARD(resume_value);
344
325
 
345
326
  if (result < 0)
@@ -393,14 +374,14 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
393
374
 
394
375
  while (1) {
395
376
  VALUE resume_value = Qnil;
396
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
377
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
397
378
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
398
379
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
399
380
 
400
381
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
401
- OP_CONTEXT_RELEASE(&backend->store, ctx);
382
+ int completed = context_store_release(&backend->store, ctx);
402
383
  RAISE_IF_EXCEPTION(resume_value);
403
- if (!ctx->completed) return resume_value;
384
+ if (!completed) return resume_value;
404
385
  RB_GC_GUARD(resume_value);
405
386
 
406
387
  if (result < 0)
@@ -440,14 +421,14 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
440
421
 
441
422
  while (1) {
442
423
  VALUE resume_value = Qnil;
443
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_READ);
424
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
444
425
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
445
426
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
446
427
 
447
428
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
448
- OP_CONTEXT_RELEASE(&backend->store, ctx);
429
+ int completed = context_store_release(&backend->store, ctx);
449
430
  RAISE_IF_EXCEPTION(resume_value);
450
- if (!ctx->completed) return resume_value;
431
+ if (!completed) return resume_value;
451
432
  RB_GC_GUARD(resume_value);
452
433
 
453
434
  if (result < 0)
@@ -483,14 +464,14 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
483
464
 
484
465
  while (left > 0) {
485
466
  VALUE resume_value = Qnil;
486
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITE);
467
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
487
468
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
488
469
  io_uring_prep_write(sqe, fptr->fd, buf, left, -1);
489
470
 
490
471
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
491
- OP_CONTEXT_RELEASE(&backend->store, ctx);
472
+ int completed = context_store_release(&backend->store, ctx);
492
473
  RAISE_IF_EXCEPTION(resume_value);
493
- if (!ctx->completed) return resume_value;
474
+ if (!completed) return resume_value;
494
475
  RB_GC_GUARD(resume_value);
495
476
 
496
477
  if (result < 0)
@@ -532,17 +513,17 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
532
513
 
533
514
  while (1) {
534
515
  VALUE resume_value = Qnil;
535
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_WRITEV);
516
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
536
517
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
537
518
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
538
519
 
539
520
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
540
- OP_CONTEXT_RELEASE(&backend->store, ctx);
521
+ int completed = context_store_release(&backend->store, ctx);
541
522
  if (TEST_EXCEPTION(resume_value)) {
542
523
  free(iov);
543
524
  RAISE_EXCEPTION(resume_value);
544
525
  }
545
- if (!ctx->completed) {
526
+ if (!completed) {
546
527
  free(iov);
547
528
  return resume_value;
548
529
  }
@@ -605,14 +586,14 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length) {
605
586
 
606
587
  while (1) {
607
588
  VALUE resume_value = Qnil;
608
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
589
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
609
590
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
610
591
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
611
592
 
612
593
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
613
- OP_CONTEXT_RELEASE(&backend->store, ctx);
594
+ int completed = context_store_release(&backend->store, ctx);
614
595
  RAISE_IF_EXCEPTION(resume_value);
615
- if (!ctx->completed) return resume_value;
596
+ if (!completed) return resume_value;
616
597
  RB_GC_GUARD(resume_value);
617
598
 
618
599
  if (result < 0)
@@ -652,14 +633,14 @@ VALUE Backend_recv_loop(VALUE self, VALUE io) {
652
633
 
653
634
  while (1) {
654
635
  VALUE resume_value = Qnil;
655
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
636
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
656
637
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
657
638
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
658
639
 
659
640
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
660
- OP_CONTEXT_RELEASE(&backend->store, ctx);
641
+ int completed = context_store_release(&backend->store, ctx);
661
642
  RAISE_IF_EXCEPTION(resume_value);
662
- if (!ctx->completed) return resume_value;
643
+ if (!completed) return resume_value;
663
644
  RB_GC_GUARD(resume_value);
664
645
 
665
646
  if (result < 0)
@@ -698,14 +679,14 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
698
679
 
699
680
  while (1) {
700
681
  VALUE resume_value = Qnil;
701
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_RECV);
682
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
702
683
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
703
684
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
704
685
 
705
686
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
706
- OP_CONTEXT_RELEASE(&backend->store, ctx);
687
+ int completed = context_store_release(&backend->store, ctx);
707
688
  RAISE_IF_EXCEPTION(resume_value);
708
- if (!ctx->completed) return resume_value;
689
+ if (!completed) return resume_value;
709
690
  RB_GC_GUARD(resume_value);
710
691
 
711
692
  if (result < 0)
@@ -741,14 +722,14 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
741
722
 
742
723
  while (left > 0) {
743
724
  VALUE resume_value = Qnil;
744
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SEND);
725
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
745
726
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
746
727
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
747
728
 
748
729
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
749
- OP_CONTEXT_RELEASE(&backend->store, ctx);
730
+ int completed = context_store_release(&backend->store, ctx);
750
731
  RAISE_IF_EXCEPTION(resume_value);
751
- if (!ctx->completed) return resume_value;
732
+ if (!completed) return resume_value;
752
733
  RB_GC_GUARD(resume_value);
753
734
 
754
735
  if (result < 0)
@@ -775,14 +756,14 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
775
756
 
776
757
  while (1) {
777
758
  VALUE resume_value = Qnil;
778
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_ACCEPT);
759
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
779
760
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
780
761
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
781
762
 
782
763
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
783
- OP_CONTEXT_RELEASE(&backend->store, ctx);
764
+ int completed = context_store_release(&backend->store, ctx);
784
765
  RAISE_IF_EXCEPTION(resume_value);
785
- if (!ctx->completed) return resume_value;
766
+ if (!completed) return resume_value;
786
767
  RB_GC_GUARD(resume_value);
787
768
 
788
769
  if (fd < 0)
@@ -846,14 +827,14 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
846
827
  VALUE resume_value = Qnil;
847
828
 
848
829
  while (1) {
849
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_SPLICE);
830
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
850
831
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
851
832
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
852
833
 
853
834
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
854
- OP_CONTEXT_RELEASE(&backend->store, ctx);
835
+ int completed = context_store_release(&backend->store, ctx);
855
836
  RAISE_IF_EXCEPTION(resume_value);
856
- if (!ctx->completed) return resume_value;
837
+ if (!completed) return resume_value;
857
838
 
858
839
  if (result < 0)
859
840
  rb_syserr_fail(-result, strerror(-result));
@@ -897,13 +878,13 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
897
878
  addr.sin_port = htons(NUM2INT(port));
898
879
 
899
880
  VALUE resume_value = Qnil;
900
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_CONNECT);
881
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CONNECT);
901
882
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
902
883
  io_uring_prep_connect(sqe, fptr->fd, (struct sockaddr *)&addr, sizeof(addr));
903
884
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
904
- OP_CONTEXT_RELEASE(&backend->store, ctx);
885
+ int completed = context_store_release(&backend->store, ctx);
905
886
  RAISE_IF_EXCEPTION(resume_value);
906
- if (!ctx->completed) return resume_value;
887
+ if (!completed) return resume_value;
907
888
  RB_GC_GUARD(resume_value);
908
889
 
909
890
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
@@ -943,12 +924,11 @@ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duratio
943
924
  struct __kernel_timespec ts = double_to_timespec(duration);
944
925
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
945
926
 
946
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
927
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
947
928
  io_uring_prep_timeout(sqe, &ts, 0, 0);
948
929
 
949
930
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
950
- OP_CONTEXT_RELEASE(&backend->store, ctx);
951
- return ctx->completed;
931
+ return context_store_release(&backend->store, ctx);
952
932
  }
953
933
 
954
934
  VALUE Backend_sleep(VALUE self, VALUE duration) {
@@ -996,7 +976,7 @@ struct Backend_timeout_ctx {
996
976
 
997
977
  VALUE Backend_timeout_ensure(VALUE arg) {
998
978
  struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
999
- if (!timeout_ctx->ctx->completed) {
979
+ if (timeout_ctx->ctx->ref_count) {
1000
980
  timeout_ctx->ctx->result = -ECANCELED;
1001
981
 
1002
982
  // op was not completed, so we need to cancel it
@@ -1005,7 +985,7 @@ VALUE Backend_timeout_ensure(VALUE arg) {
1005
985
  timeout_ctx->backend->pending_sqes = 0;
1006
986
  io_uring_submit(&timeout_ctx->backend->ring);
1007
987
  }
1008
- OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
988
+ context_store_release(&timeout_ctx->backend->store, timeout_ctx->ctx);
1009
989
  return Qnil;
1010
990
  }
1011
991
 
@@ -1023,7 +1003,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1023
1003
 
1024
1004
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1025
1005
 
1026
- op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
1006
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1027
1007
  ctx->resume_value = timeout;
1028
1008
  io_uring_prep_timeout(sqe, &ts, 0, 0);
1029
1009
  io_uring_sqe_set_data(sqe, ctx);
@@ -1091,6 +1071,130 @@ VALUE Backend_kind(VALUE self) {
1091
1071
  return SYM_io_uring;
1092
1072
  }
1093
1073
 
1074
+ struct io_uring_sqe *Backend_chain_prepare_write(Backend_t *backend, VALUE io, VALUE str) {
1075
+ rb_io_t *fptr;
1076
+ VALUE underlying_io;
1077
+
1078
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1079
+ if (underlying_io != Qnil) io = underlying_io;
1080
+ io = rb_io_get_write_io(io);
1081
+ GetOpenFile(io, fptr);
1082
+ io_unset_nonblock(fptr, io);
1083
+
1084
+ char *buf = StringValuePtr(str);
1085
+ long len = RSTRING_LEN(str);
1086
+
1087
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1088
+ io_uring_prep_write(sqe, fptr->fd, buf, len, -1);
1089
+ return sqe;
1090
+ }
1091
+
1092
+ struct io_uring_sqe *Backend_chain_prepare_send(Backend_t *backend, VALUE io, VALUE str, VALUE flags) {
1093
+ rb_io_t *fptr;
1094
+ VALUE underlying_io;
1095
+
1096
+ underlying_io = rb_ivar_get(io, ID_ivar_io);
1097
+ if (underlying_io != Qnil) io = underlying_io;
1098
+ io = rb_io_get_write_io(io);
1099
+ GetOpenFile(io, fptr);
1100
+ io_unset_nonblock(fptr, io);
1101
+
1102
+ char *buf = StringValuePtr(str);
1103
+ long len = RSTRING_LEN(str);
1104
+ int flags_int = NUM2INT(flags);
1105
+
1106
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1107
+ io_uring_prep_send(sqe, fptr->fd, buf, len, flags_int);
1108
+ return sqe;
1109
+ }
1110
+
1111
+ struct io_uring_sqe *Backend_chain_prepare_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE maxlen) {
1112
+ rb_io_t *src_fptr;
1113
+ rb_io_t *dest_fptr;
1114
+ VALUE underlying_io;
1115
+
1116
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1117
+ if (underlying_io != Qnil) src = underlying_io;
1118
+ GetOpenFile(src, src_fptr);
1119
+ io_unset_nonblock(src_fptr, src);
1120
+
1121
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1122
+ if (underlying_io != Qnil) dest = underlying_io;
1123
+ dest = rb_io_get_write_io(dest);
1124
+ GetOpenFile(dest, dest_fptr);
1125
+ io_unset_nonblock(dest_fptr, dest);
1126
+
1127
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1128
+ io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
1129
+ return sqe;
1130
+ }
1131
+
1132
+ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1133
+ VALUE resume_value = Qnil;
1134
+ unsigned int sqe_count = 0;
1135
+ struct io_uring_sqe *last_sqe = 0;
1136
+ Backend_t *backend;
1137
+ GetBackend(self, backend);
1138
+ if (argc == 0) return resume_value;
1139
+
1140
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_CHAIN);
1141
+ for (int i = 0; i < argc; i++) {
1142
+ VALUE op = argv[i];
1143
+ VALUE op_type = RARRAY_AREF(op, 0);
1144
+ VALUE op_len = RARRAY_LEN(op);
1145
+
1146
+ if (op_type == SYM_write && op_len == 3) {
1147
+ last_sqe = Backend_chain_prepare_write(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
1148
+ }
1149
+ else if (op_type == SYM_send && op_len == 4)
1150
+ last_sqe = Backend_chain_prepare_send(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1151
+ else if (op_type == SYM_splice && op_len == 4)
1152
+ last_sqe = Backend_chain_prepare_splice(backend, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1153
+ else {
1154
+ if (sqe_count) {
1155
+ io_uring_sqe_set_data(last_sqe, ctx);
1156
+ io_uring_sqe_set_flags(last_sqe, IOSQE_ASYNC);
1157
+
1158
+ ctx->ref_count = sqe_count;
1159
+ ctx->result = -ECANCELED;
1160
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1161
+ io_uring_prep_cancel(sqe, ctx, 0);
1162
+ backend->pending_sqes = 0;
1163
+ io_uring_submit(&backend->ring);
1164
+ }
1165
+ else {
1166
+ ctx->ref_count = 1;
1167
+ context_store_release(&backend->store, ctx);
1168
+ }
1169
+ rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1170
+ }
1171
+
1172
+ io_uring_sqe_set_data(last_sqe, ctx);
1173
+ unsigned int flags = (i == argc - 1) ? IOSQE_ASYNC : IOSQE_ASYNC & IOSQE_IO_LINK;
1174
+ io_uring_sqe_set_flags(last_sqe, flags);
1175
+ sqe_count++;
1176
+ }
1177
+
1178
+ ctx->ref_count = sqe_count + 1;
1179
+ io_uring_backend_defer_submit(backend);
1180
+ resume_value = backend_await(backend);
1181
+ int result = ctx->result;
1182
+ int completed = context_store_release(&backend->store, ctx);
1183
+ if (!completed) {
1184
+ // op was not completed (an exception was raised), so we need to cancel it
1185
+ ctx->result = -ECANCELED;
1186
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1187
+ io_uring_prep_cancel(sqe, ctx, 0);
1188
+ backend->pending_sqes = 0;
1189
+ io_uring_submit(&backend->ring);
1190
+ RAISE_IF_EXCEPTION(resume_value);
1191
+ return resume_value;
1192
+ }
1193
+
1194
+ RB_GC_GUARD(resume_value);
1195
+ return INT2NUM(result);
1196
+ }
1197
+
1094
1198
  void Init_Backend() {
1095
1199
  VALUE cBackend = rb_define_class_under(mPolyphony, "Backend", rb_cObject);
1096
1200
  rb_define_alloc_func(cBackend, Backend_allocate);
@@ -1102,6 +1206,7 @@ void Init_Backend() {
1102
1206
  rb_define_method(cBackend, "poll", Backend_poll, 3);
1103
1207
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1104
1208
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1209
+ rb_define_method(cBackend, "chain", Backend_chain, -1);
1105
1210
 
1106
1211
  rb_define_method(cBackend, "accept", Backend_accept, 2);
1107
1212
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
@@ -1130,6 +1235,9 @@ void Init_Backend() {
1130
1235
  #endif
1131
1236
 
1132
1237
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1238
+ SYM_send = ID2SYM(rb_intern("send"));
1239
+ SYM_splice = ID2SYM(rb_intern("splice"));
1240
+ SYM_write = ID2SYM(rb_intern("write"));
1133
1241
  }
1134
1242
 
1135
1243
  #endif // POLYPHONY_BACKEND_LIBURING
@@ -1,4 +1,5 @@
1
1
  #include <stdlib.h>
2
+ #include <assert.h>
2
3
  #include "ruby.h"
3
4
  #include "polyphony.h"
4
5
  #include "backend_io_uring_context.h"
@@ -15,6 +16,7 @@ const char *op_type_to_str(enum op_type type) {
15
16
  case OP_POLL: return "POLL";
16
17
  case OP_ACCEPT: return "ACCEPT";
17
18
  case OP_CONNECT: return "CONNECT";
19
+ case OP_CHAIN: return "CHAIN";
18
20
  default: return "";
19
21
  };
20
22
  }
@@ -35,6 +37,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
35
37
  ctx = malloc(sizeof(op_context_t));
36
38
  }
37
39
  ctx->id = (++store->last_id);
40
+ // printf("acquire %d (%s)\n", ctx->id, op_type_to_str(type));
38
41
 
39
42
  ctx->prev = NULL;
40
43
  ctx->next = store->taken;
@@ -44,13 +47,21 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
44
47
  ctx->type = type;
45
48
  ctx->fiber = rb_fiber_current();
46
49
  ctx->resume_value = Qnil;
47
- ctx->completed = 0;
50
+ ctx->ref_count = 2;
48
51
  ctx->result = 0;
49
52
 
50
53
  return ctx;
51
54
  }
52
55
 
53
- inline void context_store_release(op_context_store_t *store, op_context_t *ctx) {
56
+ // returns true if ctx was released
57
+ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
58
+ // printf("release %d (%s, ref_count: %d)\n", ctx->id, op_type_to_str(ctx->type), ctx->ref_count);
59
+
60
+ assert(ctx->ref_count);
61
+
62
+ ctx->ref_count--;
63
+ if (ctx->ref_count) return 0;
64
+
54
65
  if (ctx->next) ctx->next->prev = ctx->prev;
55
66
  if (ctx->prev) ctx->prev->next = ctx->next;
56
67
  if (store->taken == ctx) store->taken = ctx->next;
@@ -59,6 +70,7 @@ inline void context_store_release(op_context_store_t *store, op_context_t *ctx)
59
70
  ctx->next = store->available;
60
71
  if (ctx->next) ctx->next->prev = ctx;
61
72
  store->available = ctx;
73
+ return 1;
62
74
  }
63
75
 
64
76
  void context_store_free(op_context_store_t *store) {
@@ -14,14 +14,15 @@ enum op_type {
14
14
  OP_TIMEOUT,
15
15
  OP_POLL,
16
16
  OP_ACCEPT,
17
- OP_CONNECT
17
+ OP_CONNECT,
18
+ OP_CHAIN
18
19
  };
19
20
 
20
21
  typedef struct op_context {
21
22
  struct op_context *prev;
22
23
  struct op_context *next;
23
24
  enum op_type type: 16;
24
- int completed : 16;
25
+ unsigned int ref_count : 16;
25
26
  int id;
26
27
  int result;
27
28
  VALUE fiber;
@@ -38,17 +39,16 @@ const char *op_type_to_str(enum op_type type);
38
39
 
39
40
  void context_store_initialize(op_context_store_t *store);
40
41
  op_context_t *context_store_acquire(op_context_store_t *store, enum op_type type);
41
- void context_store_release(op_context_store_t *store, op_context_t *ctx);
42
+ int context_store_release(op_context_store_t *store, op_context_t *ctx);
42
43
  void context_store_free(op_context_store_t *store);
43
44
 
44
- #define OP_CONTEXT_ACQUIRE(store, op_type) context_store_acquire(store, op_type)
45
- #define OP_CONTEXT_RELEASE(store, ctx) { \
46
- if (ctx->completed) {\
47
- context_store_release(store, ctx); \
48
- } \
49
- else { \
50
- ctx->completed = 1; \
51
- } \
45
+ inline unsigned int OP_CONTEXT_RELEASE(op_context_store_t *store, op_context_t *ctx) {
46
+ int completed = !ctx->ref_count;
47
+ if (ctx->ref_count)
48
+ ctx->ref_count -= 1;
49
+ else
50
+ context_store_release(store, ctx);
51
+ return completed;
52
52
  }
53
53
 
54
54
  #endif /* BACKEND_IO_URING_CONTEXT_H */
@@ -90,7 +90,6 @@ typedef struct Backend_t {
90
90
  // common fields
91
91
  unsigned int currently_polling;
92
92
  unsigned int pending_count;
93
- unsigned int poll_no_wait_count;
94
93
 
95
94
  // implementation-specific fields
96
95
  struct ev_loop *ev_loop;
@@ -145,7 +144,6 @@ static VALUE Backend_initialize(VALUE self) {
145
144
 
146
145
  backend->currently_polling = 0;
147
146
  backend->pending_count = 0;
148
- backend->poll_no_wait_count = 0;
149
147
 
150
148
  return Qnil;
151
149
  }
@@ -184,23 +182,12 @@ unsigned int Backend_pending_count(VALUE self) {
184
182
  }
185
183
 
186
184
  VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
187
- int is_nowait = nowait == Qtrue;
188
185
  Backend_t *backend;
189
186
  GetBackend(self, backend);
190
187
 
191
- if (is_nowait) {
192
- backend->poll_no_wait_count++;
193
- if (backend->poll_no_wait_count < 10) return self;
194
-
195
- long runnable_count = Runqueue_len(runqueue);
196
- if (backend->poll_no_wait_count < runnable_count) return self;
197
- }
198
-
199
- backend->poll_no_wait_count = 0;
200
-
201
188
  COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
202
189
  backend->currently_polling = 1;
203
- ev_run(backend->ev_loop, is_nowait ? EVRUN_NOWAIT : EVRUN_ONCE);
190
+ ev_run(backend->ev_loop, nowait == Qtrue ? EVRUN_NOWAIT : EVRUN_ONCE);
204
191
  backend->currently_polling = 0;
205
192
  COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
206
193
 
@@ -90,6 +90,7 @@ int Runqueue_index_of(VALUE self, VALUE fiber);
90
90
  void Runqueue_clear(VALUE self);
91
91
  long Runqueue_len(VALUE self);
92
92
  int Runqueue_empty_p(VALUE self);
93
+ int Runqueue_should_poll_nonblocking(VALUE self);
93
94
 
94
95
  #ifdef POLYPHONY_BACKEND_LIBEV
95
96
  #define Backend_recv_loop Backend_read_loop
@@ -3,6 +3,8 @@
3
3
 
4
4
  typedef struct queue {
5
5
  runqueue_ring_buffer entries;
6
+ unsigned int high_watermark;
7
+ unsigned int switch_count;
6
8
  } Runqueue_t;
7
9
 
8
10
  VALUE cRunqueue = Qnil;
@@ -43,6 +45,8 @@ static VALUE Runqueue_initialize(VALUE self) {
43
45
  GetRunqueue(self, runqueue);
44
46
 
45
47
  runqueue_ring_buffer_init(&runqueue->entries);
48
+ runqueue->high_watermark = 0;
49
+ runqueue->switch_count = 0;
46
50
 
47
51
  return self;
48
52
  }
@@ -53,6 +57,8 @@ void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule) {
53
57
 
54
58
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
55
59
  runqueue_ring_buffer_push(&runqueue->entries, fiber, value);
60
+ if (runqueue->entries.count > runqueue->high_watermark)
61
+ runqueue->high_watermark = runqueue->entries.count;
56
62
  }
57
63
 
58
64
  void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
@@ -60,12 +66,19 @@ void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule) {
60
66
  GetRunqueue(self, runqueue);
61
67
  if (reschedule) runqueue_ring_buffer_delete(&runqueue->entries, fiber);
62
68
  runqueue_ring_buffer_unshift(&runqueue->entries, fiber, value);
69
+ if (runqueue->entries.count > runqueue->high_watermark)
70
+ runqueue->high_watermark = runqueue->entries.count;
63
71
  }
64
72
 
65
73
  runqueue_entry Runqueue_shift(VALUE self) {
66
74
  Runqueue_t *runqueue;
67
75
  GetRunqueue(self, runqueue);
68
- return runqueue_ring_buffer_shift(&runqueue->entries);
76
+ runqueue_entry entry = runqueue_ring_buffer_shift(&runqueue->entries);
77
+ if (entry.fiber == Qnil)
78
+ runqueue->high_watermark = 0;
79
+ else
80
+ runqueue->switch_count += 1;
81
+ return entry;
69
82
  }
70
83
 
71
84
  void Runqueue_delete(VALUE self, VALUE fiber) {
@@ -100,6 +113,21 @@ int Runqueue_empty_p(VALUE self) {
100
113
  return (runqueue->entries.count == 0);
101
114
  }
102
115
 
116
+ static const unsigned int ANTI_STARVE_HIGH_WATERMARK_THRESHOLD = 128;
117
+ static const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
118
+
119
+ int Runqueue_should_poll_nonblocking(VALUE self) {
120
+ Runqueue_t *runqueue;
121
+ GetRunqueue(self, runqueue);
122
+
123
+ if (runqueue->high_watermark < ANTI_STARVE_HIGH_WATERMARK_THRESHOLD) return 0;
124
+ if (runqueue->switch_count < ANTI_STARVE_SWITCH_COUNT_THRESHOLD) return 0;
125
+
126
+ // the
127
+ runqueue->switch_count = 0;
128
+ return 1;
129
+ }
130
+
103
131
  void Init_Runqueue() {
104
132
  cRunqueue = rb_define_class_under(mPolyphony, "Runqueue", rb_cObject);
105
133
  rb_define_alloc_func(cRunqueue, Runqueue_allocate);
@@ -86,7 +86,7 @@ VALUE Thread_switch_fiber(VALUE self) {
86
86
  VALUE runqueue = rb_ivar_get(self, ID_ivar_runqueue);
87
87
  runqueue_entry next;
88
88
  VALUE backend = rb_ivar_get(self, ID_ivar_backend);
89
- unsigned int pending_count = Backend_pending_count(backend);
89
+ unsigned int pending_ops_count = Backend_pending_count(backend);
90
90
  unsigned int backend_was_polled = 0;
91
91
 
92
92
  if (__tracing_enabled__ && (rb_ivar_get(current_fiber, ID_ivar_running) != Qfalse))
@@ -95,14 +95,24 @@ VALUE Thread_switch_fiber(VALUE self) {
95
95
  while (1) {
96
96
  next = Runqueue_shift(runqueue);
97
97
  if (next.fiber != Qnil) {
98
- if (!backend_was_polled && pending_count) {
98
+ // Polling for I/O op completion is normally done when the run queue is
99
+ // empty, but if the runqueue never empties, we'll never get to process
100
+ // any event completions. In order to prevent this, an anti-starve
101
+ // mechanism is employed, under the following conditions:
102
+ // - a blocking poll was not yet performed
103
+ // - there are pending blocking operations
104
+ // - the runqueue has signalled that a non-blocking poll should be
105
+ // performed
106
+ // - the run queue length high watermark has reached its threshold (currently 128)
107
+ // - the run queue switch counter has reached its threshold (currently 64)
108
+ if (!backend_was_polled && pending_ops_count && Runqueue_should_poll_nonblocking(runqueue)) {
99
109
  // this prevents event starvation in case the run queue never empties
100
110
  Backend_poll(backend, Qtrue, current_fiber, runqueue);
101
111
  }
102
112
  break;
103
113
  }
104
- if (pending_count == 0) break;
105
-
114
+
115
+ if (pending_ops_count == 0) break;
106
116
  Backend_poll(backend, Qnil, current_fiber, runqueue);
107
117
  backend_was_polled = 1;
108
118
  }
@@ -36,9 +36,9 @@ class ::Socket
36
36
  end
37
37
 
38
38
  def recvfrom(maxlen, flags = 0)
39
- @read_buffer ||= +''
39
+ buf = +''
40
40
  while true
41
- result = recvfrom_nonblock(maxlen, flags, @read_buffer, **NO_EXCEPTION)
41
+ result = recvfrom_nonblock(maxlen, flags, buf, **NO_EXCEPTION)
42
42
  case result
43
43
  when nil then raise IOError
44
44
  when :wait_readable then Polyphony.backend_wait_io(self, false)
@@ -165,17 +165,10 @@ class ::TCPSocket
165
165
  # Polyphony.backend_send(self, mesg, 0)
166
166
  # end
167
167
 
168
- def readpartial(maxlen, str = nil)
169
- @read_buffer ||= +''
170
- result = Polyphony.backend_recv(self, @read_buffer, maxlen)
168
+ def readpartial(maxlen, str = +'')
169
+ result = Polyphony.backend_recv(self, str, maxlen)
171
170
  raise EOFError unless result
172
171
 
173
- if str
174
- str << @read_buffer
175
- else
176
- str = @read_buffer
177
- end
178
- @read_buffer = +''
179
172
  str
180
173
  end
181
174
 
@@ -249,17 +242,10 @@ class ::UNIXSocket
249
242
  Polyphony.backend_send(self, mesg, 0)
250
243
  end
251
244
 
252
- def readpartial(maxlen, str = nil)
253
- @read_buffer ||= +''
254
- result = Polyphony.backend_recv(self, @read_buffer, maxlen)
245
+ def readpartial(maxlen, str = +'')
246
+ result = Polyphony.backend_recv(self, str, maxlen)
255
247
  raise EOFError unless result
256
248
 
257
- if str
258
- str << @read_buffer
259
- else
260
- str = @read_buffer
261
- end
262
- @read_buffer = +''
263
249
  str
264
250
  end
265
251
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Polyphony
4
- VERSION = '0.54.0'
4
+ VERSION = '0.55.0'
5
5
  end
data/test/helper.rb CHANGED
@@ -15,9 +15,9 @@ require 'minitest/reporters'
15
15
 
16
16
  ::Exception.__disable_sanitized_backtrace__ = true
17
17
 
18
- Minitest::Reporters.use! [
19
- Minitest::Reporters::SpecReporter.new
20
- ]
18
+ # Minitest::Reporters.use! [
19
+ # Minitest::Reporters::SpecReporter.new
20
+ # ]
21
21
 
22
22
  class ::Fiber
23
23
  attr_writer :auto_watcher
data/test/test_backend.rb CHANGED
@@ -26,7 +26,7 @@ class BackendTest < MiniTest::Test
26
26
  @backend.sleep 0.01
27
27
  count += 1
28
28
  }.await
29
- assert_in_delta 0.03, Time.now - t0, 0.005
29
+ assert_in_delta 0.03, Time.now - t0, 0.01
30
30
  assert_equal 3, count
31
31
  end
32
32
 
@@ -309,6 +309,36 @@ class BackendChainTest < MiniTest::Test
309
309
  assert_equal 'hello world', i.read
310
310
  end
311
311
 
312
+ def test_simple_send_chain
313
+ port = rand(1234..5678)
314
+ server = TCPServer.new('127.0.0.1', port)
315
+
316
+ server_fiber = spin do
317
+ while (socket = server.accept)
318
+ spin do
319
+ while (data = socket.gets(8192))
320
+ socket << data
321
+ end
322
+ end
323
+ end
324
+ end
325
+
326
+ snooze
327
+ client = TCPSocket.new('127.0.0.1', port)
328
+
329
+ result = Thread.backend.chain(
330
+ [:send, client, 'hello', 0],
331
+ [:send, client, " world\n", 0]
332
+ )
333
+ sleep 0.1
334
+ assert_equal "hello world\n", client.recv(8192)
335
+ client.close
336
+ ensure
337
+ server_fiber&.stop
338
+ server_fiber&.await
339
+ server&.close
340
+ end
341
+
312
342
  def chunk_header(len)
313
343
  "Content-Length: #{len}\r\n\r\n"
314
344
  end
@@ -346,7 +376,16 @@ class BackendChainTest < MiniTest::Test
346
376
 
347
377
  assert_raises(RuntimeError) {
348
378
  Thread.backend.chain(
349
- [:read, o]
379
+ [:read, i]
380
+ )
381
+ }
382
+
383
+ assert_raises(RuntimeError) {
384
+ Thread.backend.chain(
385
+ [:write, o, 'abc'],
386
+ [:write, o, 'abc'],
387
+ [:write, o, 'abc'],
388
+ [:read, i]
350
389
  )
351
390
  }
352
391
 
@@ -355,5 +394,10 @@ class BackendChainTest < MiniTest::Test
355
394
  [:write, o]
356
395
  )
357
396
  }
397
+
398
+ # Eventually we should add some APIs to the io_uring backend to query the
399
+ # contxt store, then add some tests here to verify that the chain op ctx is
400
+ # released properly before raising the error (for the time being this has
401
+ # been verified manually).
358
402
  end
359
403
  end
data/test/test_fiber.rb CHANGED
@@ -132,7 +132,6 @@ class FiberTest < MiniTest::Test
132
132
  event = Polyphony::Event.new
133
133
 
134
134
  t = Thread.new do
135
- f = spin_loop { snooze }
136
135
  sleep 0.001
137
136
  event.signal(:foo)
138
137
  end
data/test/test_io.rb CHANGED
@@ -98,8 +98,10 @@ class IOTest < MiniTest::Test
98
98
 
99
99
  buf = []
100
100
  f = spin do
101
+ peer = receive
101
102
  while (l = i.gets)
102
103
  buf << l
104
+ peer << true
103
105
  end
104
106
  end
105
107
 
@@ -107,11 +109,12 @@ class IOTest < MiniTest::Test
107
109
  assert_equal [], buf
108
110
 
109
111
  o << 'fab'
110
- snooze
112
+ f << Fiber.current
113
+ sleep 0.05
111
114
  assert_equal [], buf
112
115
 
113
116
  o << "ulous\n"
114
- sleep 0.01
117
+ receive
115
118
  assert_equal ["fabulous\n"], buf
116
119
 
117
120
  o.close
@@ -287,7 +290,7 @@ class IOClassMethodsTest < MiniTest::Test
287
290
  end
288
291
 
289
292
  def test_foreach
290
- skip "IO.foreach is not yet implemented"
293
+ skip 'IO.foreach is not yet implemented'
291
294
  lines = []
292
295
  IO.foreach(__FILE__) { |l| lines << l }
293
296
  assert_equal "# frozen_string_literal: true\n", lines[0]
data/test/test_signal.rb CHANGED
@@ -50,7 +50,7 @@ class SignalTrapTest < Minitest::Test
50
50
  ensure
51
51
  o.close
52
52
  end
53
- sleep 0.02
53
+ sleep 0.1
54
54
  o.close
55
55
  Process.kill('INT', pid)
56
56
  Thread.current.backend.waitpid(pid)
@@ -65,7 +65,7 @@ class ThreadPoolTest < MiniTest::Test
65
65
  end
66
66
  elapsed = Time.now - t0
67
67
 
68
- assert elapsed < 0.007
68
+ assert_in_range 0.0..0.009, elapsed
69
69
  assert buffer.size < 2
70
70
 
71
71
  sleep 0.1 # allow time for threads to spawn
data/test/test_timer.rb CHANGED
@@ -31,7 +31,7 @@ class TimerMoveOnAfterTest < MiniTest::Test
31
31
  end
32
32
  t1 = Time.now
33
33
 
34
- assert_in_range 0.01..0.025, t1 - t0
34
+ assert_in_range 0.01..0.05, t1 - t0
35
35
  assert_equal :bar, v
36
36
  end
37
37
 
@@ -75,14 +75,21 @@ class TimerCancelAfterTest < MiniTest::Test
75
75
  end
76
76
 
77
77
  def test_timer_cancel_after_with_reset
78
- t0 = Time.now
78
+ buf = []
79
79
  @timer.cancel_after(0.01) do
80
- sleep 0.007
80
+ sleep 0.005
81
+ buf << 1
81
82
  @timer.reset
82
- sleep 0.007
83
+ sleep 0.005
84
+ buf << 2
85
+ @timer.reset
86
+ sleep 0.005
87
+ buf << 3
88
+ @timer.reset
89
+ sleep 0.005
90
+ buf << 4
83
91
  end
84
- t1 = Time.now
85
- assert_in_range 0.012..0.024, t1 - t0
92
+ assert_equal [1, 2, 3, 4], buf
86
93
  end
87
94
 
88
95
  class CustomException < Exception
@@ -140,7 +147,6 @@ class TimerMiscTest < MiniTest::Test
140
147
  snooze
141
148
  assert_equal [], buffer
142
149
  sleep 0.1
143
- p :post_sleep
144
150
  assert_equal [2], buffer
145
151
  end
146
152
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: polyphony
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.54.0
4
+ version: 0.55.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sharon Rosner
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-06-14 00:00:00.000000000 Z
11
+ date: 2021-06-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake-compiler
@@ -359,6 +359,7 @@ files:
359
359
  - examples/io/rack_server.rb
360
360
  - examples/io/raw.rb
361
361
  - examples/io/reline.rb
362
+ - examples/io/stdio.rb
362
363
  - examples/io/system.rb
363
364
  - examples/io/tcp_proxy.rb
364
365
  - examples/io/tcpserver.rb