polyphony 0.55.0 → 0.59.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -37,8 +37,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
37
37
  ctx = malloc(sizeof(op_context_t));
38
38
  }
39
39
  ctx->id = (++store->last_id);
40
- // printf("acquire %d (%s)\n", ctx->id, op_type_to_str(type));
41
-
40
+ // printf("acquire %p %d (%s)\n", ctx, ctx->id, op_type_to_str(type));
42
41
  ctx->prev = NULL;
43
42
  ctx->next = store->taken;
44
43
  if (store->taken) store->taken->prev = ctx;
@@ -55,7 +54,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
55
54
 
56
55
  // returns true if ctx was released
57
56
  inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
58
- // printf("release %d (%s, ref_count: %d)\n", ctx->id, op_type_to_str(ctx->type), ctx->ref_count);
57
+ // printf("release %p %d (%s, ref_count: %d)\n", ctx, ctx->id, op_type_to_str(ctx->type), ctx->ref_count);
59
58
 
60
59
  assert(ctx->ref_count);
61
60
 
@@ -42,7 +42,6 @@ thread.
42
42
  #define _GNU_SOURCE 1
43
43
  #endif
44
44
 
45
- #include <fcntl.h>
46
45
  #include <netdb.h>
47
46
  #include <sys/socket.h>
48
47
  #include <sys/uio.h>
@@ -52,57 +51,45 @@ thread.
52
51
  #include <stdnoreturn.h>
53
52
  #include <sys/types.h>
54
53
  #include <sys/wait.h>
54
+ #include <fcntl.h>
55
55
 
56
56
  #include "polyphony.h"
57
57
  #include "../libev/ev.h"
58
58
  #include "ruby/io.h"
59
59
 
60
+ #include "../libev/ev.h"
61
+ #include "backend_common.h"
62
+
60
63
  VALUE SYM_libev;
61
64
  VALUE SYM_send;
62
65
  VALUE SYM_splice;
63
66
  VALUE SYM_write;
64
67
 
65
- ID ID_ivar_is_nonblocking;
66
-
67
- // Since we need to ensure that fd's are non-blocking before every I/O
68
- // operation, here we improve upon Ruby's rb_io_set_nonblock by caching the
69
- // "nonblock" state in an instance variable. Calling rb_ivar_get on every read
70
- // is still much cheaper than doing a fcntl syscall on every read! Preliminary
71
- // benchmarks (with a "hello world" HTTP server) show throughput is improved
72
- // by 10-13%.
73
- inline void io_set_nonblock(rb_io_t *fptr, VALUE io) {
74
- VALUE is_nonblocking = rb_ivar_get(io, ID_ivar_is_nonblocking);
75
- if (is_nonblocking == Qtrue) return;
76
-
77
- rb_ivar_set(io, ID_ivar_is_nonblocking, Qtrue);
78
-
79
- #ifdef _WIN32
80
- rb_w32_set_nonblock(fptr->fd);
81
- #elif defined(F_GETFL)
82
- int oflags = fcntl(fptr->fd, F_GETFL);
83
- if ((oflags == -1) && (oflags & O_NONBLOCK)) return;
84
- oflags |= O_NONBLOCK;
85
- fcntl(fptr->fd, F_SETFL, oflags);
86
- #endif
87
- }
88
-
89
68
  typedef struct Backend_t {
90
- // common fields
91
- unsigned int currently_polling;
92
- unsigned int pending_count;
69
+ struct Backend_base base;
93
70
 
94
71
  // implementation-specific fields
95
72
  struct ev_loop *ev_loop;
96
73
  struct ev_async break_async;
97
74
  } Backend_t;
98
75
 
76
+ static void Backend_mark(void *ptr) {
77
+ Backend_t *backend = ptr;
78
+ backend_base_mark(&backend->base);
79
+ }
80
+
81
+ static void Backend_free(void *ptr) {
82
+ Backend_t *backend = ptr;
83
+ backend_base_finalize(&backend->base);
84
+ }
85
+
99
86
  static size_t Backend_size(const void *ptr) {
100
87
  return sizeof(Backend_t);
101
88
  }
102
89
 
103
90
  static const rb_data_type_t Backend_type = {
104
91
  "LibevBackend",
105
- {0, 0, Backend_size,},
92
+ {Backend_mark, Backend_free, Backend_size,},
106
93
  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
107
94
  };
108
95
 
@@ -133,6 +120,8 @@ static VALUE Backend_initialize(VALUE self) {
133
120
  Backend_t *backend;
134
121
 
135
122
  GetBackend(self, backend);
123
+
124
+ backend_base_initialize(&backend->base);
136
125
  backend->ev_loop = libev_new_loop();
137
126
 
138
127
  // start async watcher used for breaking a poll op (from another thread)
@@ -142,9 +131,6 @@ static VALUE Backend_initialize(VALUE self) {
142
131
  // block when no other watcher is active
143
132
  ev_unref(backend->ev_loop);
144
133
 
145
- backend->currently_polling = 0;
146
- backend->pending_count = 0;
147
-
148
134
  return Qnil;
149
135
  }
150
136
 
@@ -174,31 +160,45 @@ VALUE Backend_post_fork(VALUE self) {
174
160
  return self;
175
161
  }
176
162
 
177
- unsigned int Backend_pending_count(VALUE self) {
163
+ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
178
164
  Backend_t *backend;
179
165
  GetBackend(self, backend);
180
166
 
181
- return backend->pending_count;
167
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
168
+ backend->base.currently_polling = 1;
169
+ ev_run(backend->ev_loop, blocking == Qtrue ? EVRUN_ONCE : EVRUN_NOWAIT);
170
+ backend->base.currently_polling = 0;
171
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
172
+
173
+ return self;
182
174
  }
183
175
 
184
- VALUE Backend_poll(VALUE self, VALUE nowait, VALUE current_fiber, VALUE runqueue) {
176
+ inline void Backend_schedule_fiber(VALUE thread, VALUE self, VALUE fiber, VALUE value, int prioritize) {
185
177
  Backend_t *backend;
186
178
  GetBackend(self, backend);
187
179
 
188
- COND_TRACE(2, SYM_fiber_event_poll_enter, current_fiber);
189
- backend->currently_polling = 1;
190
- ev_run(backend->ev_loop, nowait == Qtrue ? EVRUN_NOWAIT : EVRUN_ONCE);
191
- backend->currently_polling = 0;
192
- COND_TRACE(2, SYM_fiber_event_poll_leave, current_fiber);
180
+ backend_base_schedule_fiber(thread, self, &backend->base, fiber, value, prioritize);
181
+ }
193
182
 
194
- return self;
183
+ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
184
+ Backend_t *backend;
185
+ GetBackend(self, backend);
186
+
187
+ runqueue_delete(&backend->base.runqueue, fiber);
188
+ }
189
+
190
+ inline VALUE Backend_switch_fiber(VALUE self) {
191
+ Backend_t *backend;
192
+ GetBackend(self, backend);
193
+
194
+ return backend_base_switch_fiber(self, &backend->base);
195
195
  }
196
196
 
197
197
  VALUE Backend_wakeup(VALUE self) {
198
198
  Backend_t *backend;
199
199
  GetBackend(self, backend);
200
200
 
201
- if (backend->currently_polling) {
201
+ if (backend->base.currently_polling) {
202
202
  // Since the loop will run until at least one event has occurred, we signal
203
203
  // the selector's associated async watcher, which will cause the ev loop to
204
204
  // return. In contrast to using `ev_break` to break out of the loop, which
@@ -211,9 +211,16 @@ VALUE Backend_wakeup(VALUE self) {
211
211
  return Qnil;
212
212
  }
213
213
 
214
- #include "../libev/ev.h"
214
+ inline struct backend_stats Backend_stats(VALUE self) {
215
+ Backend_t *backend;
216
+ GetBackend(self, backend);
215
217
 
216
- #include "backend_common.h"
218
+ return (struct backend_stats){
219
+ .scheduled_fibers = runqueue_len(&backend->base.runqueue),
220
+ .waiting_fibers = 0,
221
+ .pending_ops = backend->base.pending_count
222
+ };
223
+ }
217
224
 
218
225
  struct libev_io {
219
226
  struct ev_io io;
@@ -235,7 +242,7 @@ VALUE libev_wait_fd_with_watcher(Backend_t *backend, int fd, struct libev_io *wa
235
242
  }
236
243
  ev_io_start(backend->ev_loop, &watcher->io);
237
244
 
238
- switchpoint_result = backend_await(backend);
245
+ switchpoint_result = backend_await((struct Backend_base *)backend);
239
246
 
240
247
  ev_io_stop(backend->ev_loop, &watcher->io);
241
248
  RB_GC_GUARD(switchpoint_result);
@@ -271,7 +278,7 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
271
278
  if (underlying_io != Qnil) io = underlying_io;
272
279
  GetOpenFile(io, fptr);
273
280
  rb_io_check_byte_readable(fptr);
274
- io_set_nonblock(fptr, io);
281
+ io_verify_blocking_mode(fptr, io, Qfalse);
275
282
  rectify_io_file_pos(fptr);
276
283
  watcher.fiber = Qnil;
277
284
  OBJ_TAINT(str);
@@ -288,7 +295,6 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof)
288
295
  }
289
296
  else {
290
297
  switchpoint_result = backend_snooze();
291
-
292
298
  if (TEST_EXCEPTION(switchpoint_result)) goto error;
293
299
 
294
300
  if (n == 0) break; // EOF
@@ -343,7 +349,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io) {
343
349
  if (underlying_io != Qnil) io = underlying_io;
344
350
  GetOpenFile(io, fptr);
345
351
  rb_io_check_byte_readable(fptr);
346
- io_set_nonblock(fptr, io);
352
+ io_verify_blocking_mode(fptr, io, Qfalse);
347
353
  rectify_io_file_pos(fptr);
348
354
  watcher.fiber = Qnil;
349
355
 
@@ -395,7 +401,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
395
401
  if (underlying_io != Qnil) io = underlying_io;
396
402
  GetOpenFile(io, fptr);
397
403
  rb_io_check_byte_readable(fptr);
398
- io_set_nonblock(fptr, io);
404
+ io_verify_blocking_mode(fptr, io, Qfalse);
399
405
  rectify_io_file_pos(fptr);
400
406
  watcher.fiber = Qnil;
401
407
 
@@ -443,7 +449,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
443
449
  GetBackend(self, backend);
444
450
  io = rb_io_get_write_io(io);
445
451
  GetOpenFile(io, fptr);
446
- io_set_nonblock(fptr, io);
452
+ io_verify_blocking_mode(fptr, io, Qfalse);
447
453
  watcher.fiber = Qnil;
448
454
 
449
455
  while (left > 0) {
@@ -493,7 +499,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
493
499
  GetBackend(self, backend);
494
500
  io = rb_io_get_write_io(io);
495
501
  GetOpenFile(io, fptr);
496
- io_set_nonblock(fptr, io);
502
+ io_verify_blocking_mode(fptr, io, Qfalse);
497
503
  watcher.fiber = Qnil;
498
504
 
499
505
  iov = malloc(iov_count * sizeof(struct iovec));
@@ -574,7 +580,7 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
574
580
 
575
581
  GetBackend(self, backend);
576
582
  GetOpenFile(server_socket, fptr);
577
- io_set_nonblock(fptr, server_socket);
583
+ io_verify_blocking_mode(fptr, server_socket, Qfalse);
578
584
  watcher.fiber = Qnil;
579
585
  while (1) {
580
586
  fd = accept(fptr->fd, &addr, &len);
@@ -602,7 +608,7 @@ VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
602
608
  fp->fd = fd;
603
609
  fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
604
610
  rb_io_ascii8bit_binmode(socket);
605
- io_set_nonblock(fp, socket);
611
+ io_verify_blocking_mode(fp, socket, Qfalse);
606
612
  rb_io_synchronized(fp);
607
613
 
608
614
  // if (rsock_do_not_reverse_lookup) {
@@ -631,7 +637,7 @@ VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
631
637
 
632
638
  GetBackend(self, backend);
633
639
  GetOpenFile(server_socket, fptr);
634
- io_set_nonblock(fptr, server_socket);
640
+ io_verify_blocking_mode(fptr, server_socket, Qfalse);
635
641
  watcher.fiber = Qnil;
636
642
 
637
643
  while (1) {
@@ -659,7 +665,7 @@ VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
659
665
  fp->fd = fd;
660
666
  fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
661
667
  rb_io_ascii8bit_binmode(socket);
662
- io_set_nonblock(fp, socket);
668
+ io_verify_blocking_mode(fp, socket, Qfalse);
663
669
  rb_io_synchronized(fp);
664
670
 
665
671
  rb_yield(socket);
@@ -687,7 +693,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
687
693
 
688
694
  GetBackend(self, backend);
689
695
  GetOpenFile(sock, fptr);
690
- io_set_nonblock(fptr, sock);
696
+ io_verify_blocking_mode(fptr, sock, Qfalse);
691
697
  watcher.fiber = Qnil;
692
698
 
693
699
  addr.sin_family = AF_INET;
@@ -730,7 +736,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
730
736
  GetBackend(self, backend);
731
737
  io = rb_io_get_write_io(io);
732
738
  GetOpenFile(io, fptr);
733
- io_set_nonblock(fptr, io);
739
+ io_verify_blocking_mode(fptr, io, Qfalse);
734
740
  watcher.fiber = Qnil;
735
741
 
736
742
  while (left > 0) {
@@ -763,10 +769,63 @@ error:
763
769
  return RAISE_EXCEPTION(switchpoint_result);
764
770
  }
765
771
 
772
+ struct libev_rw_ctx {
773
+ int ref_count;
774
+ VALUE fiber;
775
+ };
776
+
777
+ struct libev_ref_count_io {
778
+ struct ev_io io;
779
+ struct libev_rw_ctx *ctx;
780
+ };
781
+
782
+ struct libev_rw_io {
783
+ struct libev_ref_count_io r;
784
+ struct libev_ref_count_io w;
785
+ struct libev_rw_ctx ctx;
786
+ };
787
+
788
+ void Backend_rw_io_callback(EV_P_ ev_io *w, int revents)
789
+ {
790
+ struct libev_ref_count_io *watcher = (struct libev_ref_count_io *)w;
791
+ int ref_count = watcher->ctx->ref_count--;
792
+ if (!ref_count)
793
+ Fiber_make_runnable(watcher->ctx->fiber, Qnil);
794
+ }
795
+
796
+ VALUE libev_wait_rw_fd_with_watcher(Backend_t *backend, int r_fd, int w_fd, struct libev_rw_io *watcher) {
797
+ VALUE switchpoint_result = Qnil;
798
+
799
+ if (watcher->ctx.fiber == Qnil) watcher->ctx.fiber = rb_fiber_current();
800
+ watcher->ctx.ref_count = 0;
801
+ if (r_fd != -1) {
802
+ ev_io_init(&watcher->r.io, Backend_rw_io_callback, r_fd, EV_READ);
803
+ ev_io_start(backend->ev_loop, &watcher->r.io);
804
+ watcher->r.ctx = &watcher->ctx;
805
+ watcher->ctx.ref_count++;
806
+ }
807
+ if (w_fd != -1) {
808
+ ev_io_init(&watcher->w.io, Backend_rw_io_callback, w_fd, EV_WRITE);
809
+ ev_io_start(backend->ev_loop, &watcher->w.io);
810
+ watcher->w.ctx = &watcher->ctx;
811
+ watcher->ctx.ref_count++;
812
+ }
813
+
814
+ switchpoint_result = backend_await((struct Backend_base *)backend);
815
+
816
+ if (r_fd != -1) ev_io_stop(backend->ev_loop, &watcher->r.io);
817
+ if (w_fd != -1) ev_io_stop(backend->ev_loop, &watcher->w.io);
818
+ RB_GC_GUARD(switchpoint_result);
819
+ return switchpoint_result;
820
+ }
821
+
822
+
823
+
824
+
766
825
  #ifdef POLYPHONY_LINUX
767
826
  VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
768
827
  Backend_t *backend;
769
- struct libev_io watcher;
828
+ struct libev_rw_io watcher;
770
829
  VALUE switchpoint_result = Qnil;
771
830
  VALUE underlying_io;
772
831
  rb_io_t *src_fptr;
@@ -778,25 +837,22 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
778
837
  underlying_io = rb_ivar_get(src, ID_ivar_io);
779
838
  if (underlying_io != Qnil) src = underlying_io;
780
839
  GetOpenFile(src, src_fptr);
781
- io_set_nonblock(src_fptr, src);
840
+ io_verify_blocking_mode(src_fptr, src, Qfalse);
782
841
 
783
842
  underlying_io = rb_ivar_get(dest, ID_ivar_io);
784
843
  if (underlying_io != Qnil) dest = underlying_io;
785
844
  dest = rb_io_get_write_io(dest);
786
845
  GetOpenFile(dest, dest_fptr);
787
- io_set_nonblock(dest_fptr, dest);
846
+ io_verify_blocking_mode(dest_fptr, dest, Qfalse);
788
847
 
789
- watcher.fiber = Qnil;
848
+ watcher.ctx.fiber = Qnil;
790
849
  while (1) {
791
850
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
792
851
  if (len < 0) {
793
852
  int e = errno;
794
853
  if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
795
854
 
796
- switchpoint_result = libev_wait_fd_with_watcher(backend, src_fptr->fd, &watcher, EV_READ);
797
- if (TEST_EXCEPTION(switchpoint_result)) goto error;
798
-
799
- switchpoint_result = libev_wait_fd_with_watcher(backend, dest_fptr->fd, &watcher, EV_WRITE);
855
+ switchpoint_result = libev_wait_rw_fd_with_watcher(backend, src_fptr->fd, dest_fptr->fd, &watcher);
800
856
  if (TEST_EXCEPTION(switchpoint_result)) goto error;
801
857
  }
802
858
  else {
@@ -804,12 +860,12 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
804
860
  }
805
861
  }
806
862
 
807
- if (watcher.fiber == Qnil) {
863
+ if (watcher.ctx.fiber == Qnil) {
808
864
  switchpoint_result = backend_snooze();
809
865
  if (TEST_EXCEPTION(switchpoint_result)) goto error;
810
866
  }
811
867
 
812
- RB_GC_GUARD(watcher.fiber);
868
+ RB_GC_GUARD(watcher.ctx.fiber);
813
869
  RB_GC_GUARD(switchpoint_result);
814
870
 
815
871
  return INT2NUM(len);
@@ -819,7 +875,7 @@ error:
819
875
 
820
876
  VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
821
877
  Backend_t *backend;
822
- struct libev_io watcher;
878
+ struct libev_rw_io watcher;
823
879
  VALUE switchpoint_result = Qnil;
824
880
  VALUE underlying_io;
825
881
  rb_io_t *src_fptr;
@@ -832,25 +888,22 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
832
888
  underlying_io = rb_ivar_get(src, ID_ivar_io);
833
889
  if (underlying_io != Qnil) src = underlying_io;
834
890
  GetOpenFile(src, src_fptr);
835
- io_set_nonblock(src_fptr, src);
891
+ io_verify_blocking_mode(src_fptr, src, Qfalse);
836
892
 
837
893
  underlying_io = rb_ivar_get(dest, ID_ivar_io);
838
894
  if (underlying_io != Qnil) dest = underlying_io;
839
895
  dest = rb_io_get_write_io(dest);
840
896
  GetOpenFile(dest, dest_fptr);
841
- io_set_nonblock(dest_fptr, dest);
897
+ io_verify_blocking_mode(dest_fptr, dest, Qfalse);
842
898
 
843
- watcher.fiber = Qnil;
899
+ watcher.ctx.fiber = Qnil;
844
900
  while (1) {
845
901
  len = splice(src_fptr->fd, 0, dest_fptr->fd, 0, NUM2INT(maxlen), 0);
846
902
  if (len < 0) {
847
903
  int e = errno;
848
904
  if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
849
905
 
850
- switchpoint_result = libev_wait_fd_with_watcher(backend, src_fptr->fd, &watcher, EV_READ);
851
- if (TEST_EXCEPTION(switchpoint_result)) goto error;
852
-
853
- switchpoint_result = libev_wait_fd_with_watcher(backend, dest_fptr->fd, &watcher, EV_WRITE);
906
+ switchpoint_result = libev_wait_rw_fd_with_watcher(backend, src_fptr->fd, dest_fptr->fd, &watcher);
854
907
  if (TEST_EXCEPTION(switchpoint_result)) goto error;
855
908
  }
856
909
  else if (len == 0) {
@@ -861,6 +914,79 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
861
914
  }
862
915
  }
863
916
 
917
+ if (watcher.ctx.fiber == Qnil) {
918
+ switchpoint_result = backend_snooze();
919
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
920
+ }
921
+
922
+ RB_GC_GUARD(watcher.ctx.fiber);
923
+ RB_GC_GUARD(switchpoint_result);
924
+
925
+ return INT2NUM(total);
926
+ error:
927
+ return RAISE_EXCEPTION(switchpoint_result);
928
+ }
929
+ #endif
930
+
931
+ VALUE Backend_fake_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
932
+ Backend_t *backend;
933
+ struct libev_io watcher;
934
+ VALUE switchpoint_result = Qnil;
935
+ VALUE underlying_io;
936
+ rb_io_t *src_fptr;
937
+ rb_io_t *dest_fptr;
938
+ int len = NUM2INT(maxlen);
939
+ VALUE str = rb_str_new(0, len);
940
+ char *buf = RSTRING_PTR(str);
941
+ int left = 0;
942
+ int total = 0;
943
+
944
+ GetBackend(self, backend);
945
+
946
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
947
+ if (underlying_io != Qnil) src = underlying_io;
948
+ GetOpenFile(src, src_fptr);
949
+ io_verify_blocking_mode(src_fptr, src, Qfalse);
950
+
951
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
952
+ if (underlying_io != Qnil) dest = underlying_io;
953
+ dest = rb_io_get_write_io(dest);
954
+ GetOpenFile(dest, dest_fptr);
955
+ io_verify_blocking_mode(dest_fptr, dest, Qfalse);
956
+
957
+ watcher.fiber = Qnil;
958
+
959
+ while (1) {
960
+ ssize_t n = read(src_fptr->fd, buf, len);
961
+ if (n < 0) {
962
+ int e = errno;
963
+ if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
964
+
965
+ switchpoint_result = libev_wait_fd_with_watcher(backend, src_fptr->fd, &watcher, EV_READ);
966
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
967
+ }
968
+ else {
969
+ total = left = n;
970
+ break;
971
+ }
972
+ }
973
+
974
+ while (left > 0) {
975
+ ssize_t n = write(dest_fptr->fd, buf, left);
976
+ if (n < 0) {
977
+ int e = errno;
978
+ if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
979
+
980
+ switchpoint_result = libev_wait_fd_with_watcher(backend, dest_fptr->fd, &watcher, EV_WRITE);
981
+
982
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
983
+ }
984
+ else {
985
+ buf += n;
986
+ left -= n;
987
+ }
988
+ }
989
+
864
990
  if (watcher.fiber == Qnil) {
865
991
  switchpoint_result = backend_snooze();
866
992
  if (TEST_EXCEPTION(switchpoint_result)) goto error;
@@ -868,12 +994,90 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
868
994
 
869
995
  RB_GC_GUARD(watcher.fiber);
870
996
  RB_GC_GUARD(switchpoint_result);
997
+ RB_GC_GUARD(str);
998
+
999
+ return INT2NUM(total);
1000
+ error:
1001
+ return RAISE_EXCEPTION(switchpoint_result);
1002
+ }
1003
+
1004
+ VALUE Backend_fake_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1005
+ Backend_t *backend;
1006
+ struct libev_io watcher;
1007
+ VALUE switchpoint_result = Qnil;
1008
+ VALUE underlying_io;
1009
+ rb_io_t *src_fptr;
1010
+ rb_io_t *dest_fptr;
1011
+ int len = NUM2INT(maxlen);
1012
+ VALUE str = rb_str_new(0, len);
1013
+ char *buf = RSTRING_PTR(str);
1014
+ int left = 0;
1015
+ int total = 0;
1016
+
1017
+ GetBackend(self, backend);
1018
+
1019
+ underlying_io = rb_ivar_get(src, ID_ivar_io);
1020
+ if (underlying_io != Qnil) src = underlying_io;
1021
+ GetOpenFile(src, src_fptr);
1022
+ io_verify_blocking_mode(src_fptr, src, Qfalse);
1023
+
1024
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1025
+ if (underlying_io != Qnil) dest = underlying_io;
1026
+ dest = rb_io_get_write_io(dest);
1027
+ GetOpenFile(dest, dest_fptr);
1028
+ io_verify_blocking_mode(dest_fptr, dest, Qfalse);
1029
+
1030
+ watcher.fiber = Qnil;
1031
+
1032
+ while (1) {
1033
+ char *ptr = buf;
1034
+ while (1) {
1035
+ ssize_t n = read(src_fptr->fd, ptr, len);
1036
+ if (n < 0) {
1037
+ int e = errno;
1038
+ if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
1039
+
1040
+ switchpoint_result = libev_wait_fd_with_watcher(backend, src_fptr->fd, &watcher, EV_READ);
1041
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
1042
+ }
1043
+ else if (n == 0) goto done;
1044
+ else {
1045
+ total += n;
1046
+ left = n;
1047
+ break;
1048
+ }
1049
+ }
1050
+
1051
+ while (left > 0) {
1052
+ ssize_t n = write(dest_fptr->fd, ptr, left);
1053
+ if (n < 0) {
1054
+ int e = errno;
1055
+ if ((e != EWOULDBLOCK && e != EAGAIN)) rb_syserr_fail(e, strerror(e));
1056
+
1057
+ switchpoint_result = libev_wait_fd_with_watcher(backend, dest_fptr->fd, &watcher, EV_WRITE);
1058
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
1059
+ }
1060
+ else {
1061
+ ptr += n;
1062
+ left -= n;
1063
+ }
1064
+ }
1065
+ }
1066
+
1067
+ done:
1068
+ if (watcher.fiber == Qnil) {
1069
+ switchpoint_result = backend_snooze();
1070
+ if (TEST_EXCEPTION(switchpoint_result)) goto error;
1071
+ }
1072
+
1073
+ RB_GC_GUARD(watcher.fiber);
1074
+ RB_GC_GUARD(switchpoint_result);
1075
+ RB_GC_GUARD(str);
871
1076
 
872
1077
  return INT2NUM(total);
873
1078
  error:
874
1079
  return RAISE_EXCEPTION(switchpoint_result);
875
1080
  }
876
- #endif
877
1081
 
878
1082
  VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
879
1083
  Backend_t *backend;
@@ -908,7 +1112,7 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
908
1112
  ev_timer_init(&watcher.timer, Backend_timer_callback, NUM2DBL(duration), 0.);
909
1113
  ev_timer_start(backend->ev_loop, &watcher.timer);
910
1114
 
911
- switchpoint_result = backend_await(backend);
1115
+ switchpoint_result = backend_await((struct Backend_base *)backend);
912
1116
 
913
1117
  ev_timer_stop(backend->ev_loop, &watcher.timer);
914
1118
  RAISE_IF_EXCEPTION(switchpoint_result);
@@ -936,7 +1140,7 @@ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
936
1140
  VALUE switchpoint_result = Qnil;
937
1141
  ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
938
1142
  ev_timer_start(backend->ev_loop, &watcher.timer);
939
- switchpoint_result = backend_await(backend);
1143
+ switchpoint_result = backend_await((struct Backend_base *)backend);
940
1144
  ev_timer_stop(backend->ev_loop, &watcher.timer);
941
1145
  RAISE_IF_EXCEPTION(switchpoint_result);
942
1146
  RB_GC_GUARD(switchpoint_result);
@@ -1053,7 +1257,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1053
1257
  ev_child_init(&watcher.child, Backend_child_callback, NUM2INT(pid), 0);
1054
1258
  ev_child_start(backend->ev_loop, &watcher.child);
1055
1259
 
1056
- switchpoint_result = backend_await(backend);
1260
+ switchpoint_result = backend_await((struct Backend_base *)backend);
1057
1261
 
1058
1262
  ev_child_stop(backend->ev_loop, &watcher.child);
1059
1263
  RAISE_IF_EXCEPTION(switchpoint_result);
@@ -1075,7 +1279,7 @@ VALUE Backend_wait_event(VALUE self, VALUE raise) {
1075
1279
  ev_async_init(&async, Backend_async_callback);
1076
1280
  ev_async_start(backend->ev_loop, &async);
1077
1281
 
1078
- switchpoint_result = backend_await(backend);
1282
+ switchpoint_result = backend_await((struct Backend_base *)backend);
1079
1283
 
1080
1284
  ev_async_stop(backend->ev_loop, &async);
1081
1285
  if (RTEST(raise)) RAISE_IF_EXCEPTION(switchpoint_result);
@@ -1100,10 +1304,8 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1100
1304
  result = Backend_write(self, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2));
1101
1305
  else if (op_type == SYM_send && op_len == 4)
1102
1306
  result = Backend_send(self, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1103
- #ifdef POLYPHONY_LINUX
1104
1307
  else if (op_type == SYM_splice && op_len == 4)
1105
1308
  result = Backend_splice(self, RARRAY_AREF(op, 1), RARRAY_AREF(op, 2), RARRAY_AREF(op, 3));
1106
- #endif
1107
1309
  else
1108
1310
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1109
1311
  }
@@ -1112,6 +1314,179 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1112
1314
  return result;
1113
1315
  }
1114
1316
 
1317
+ VALUE Backend_idle_gc_period_set(VALUE self, VALUE period) {
1318
+ Backend_t *backend;
1319
+ GetBackend(self, backend);
1320
+ backend->base.idle_gc_period = NUM2DBL(period);
1321
+ backend->base.idle_gc_last_time = current_time();
1322
+ return self;
1323
+ }
1324
+
1325
+ VALUE Backend_idle_proc_set(VALUE self, VALUE block) {
1326
+ Backend_t *backend;
1327
+ GetBackend(self, backend);
1328
+ backend->base.idle_proc = block;
1329
+ return self;
1330
+ }
1331
+
1332
+ inline VALUE Backend_run_idle_tasks(VALUE self) {
1333
+ Backend_t *backend;
1334
+ GetBackend(self, backend);
1335
+ backend_run_idle_tasks(&backend->base);
1336
+ return self;
1337
+ }
1338
+
1339
+ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct libev_rw_io *watcher, VALUE *result) {
1340
+ char *buf = RSTRING_PTR(str);
1341
+ int len = RSTRING_LEN(str);
1342
+ int left = len;
1343
+ while (left > 0) {
1344
+ ssize_t n = write(fd, buf, left);
1345
+ if (n < 0) {
1346
+ int err = errno;
1347
+ if ((err != EWOULDBLOCK && err != EAGAIN)) return err;
1348
+
1349
+ *result = libev_wait_rw_fd_with_watcher(backend, -1, fd, watcher);
1350
+ if (TEST_EXCEPTION(*result)) return -1;
1351
+ }
1352
+ else {
1353
+ buf += n;
1354
+ left -= n;
1355
+ }
1356
+ }
1357
+ return 0;
1358
+ }
1359
+
1360
+ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VALUE postfix, VALUE chunk_prefix, VALUE chunk_postfix, VALUE chunk_size) {
1361
+ Backend_t *backend;
1362
+ GetBackend(self, backend);
1363
+ int total = 0;
1364
+ int err = 0;
1365
+ VALUE result = Qnil;
1366
+
1367
+ rb_io_t *src_fptr;
1368
+ rb_io_t *dest_fptr;
1369
+
1370
+ VALUE underlying_io = rb_ivar_get(src, ID_ivar_io);
1371
+ if (underlying_io != Qnil) src = underlying_io;
1372
+ GetOpenFile(src, src_fptr);
1373
+ io_verify_blocking_mode(src_fptr, src, Qfalse);
1374
+
1375
+ underlying_io = rb_ivar_get(dest, ID_ivar_io);
1376
+ if (underlying_io != Qnil) dest = underlying_io;
1377
+ dest = rb_io_get_write_io(dest);
1378
+ GetOpenFile(dest, dest_fptr);
1379
+ io_verify_blocking_mode(dest_fptr, dest, Qfalse);
1380
+
1381
+ struct libev_rw_io watcher;
1382
+ watcher.ctx.fiber = Qnil;
1383
+ int maxlen = NUM2INT(chunk_size);
1384
+ VALUE str = Qnil;
1385
+ VALUE chunk_len_value = Qnil;
1386
+
1387
+ int pipefd[2] = { -1, -1 };
1388
+ if (pipe(pipefd) == -1) {
1389
+ err = errno;
1390
+ goto syscallerror;
1391
+ }
1392
+
1393
+ fcntl(pipefd[0], F_SETFL, O_NONBLOCK);
1394
+ fcntl(pipefd[1], F_SETFL, O_NONBLOCK);
1395
+
1396
+ if (prefix != Qnil) {
1397
+ int err = splice_chunks_write(backend, dest_fptr->fd, prefix, &watcher, &result);
1398
+ if (err == -1) goto error; else if (err) goto syscallerror;
1399
+ }
1400
+ while (1) {
1401
+ int chunk_len;
1402
+ // splice to pipe
1403
+ while (1) {
1404
+ chunk_len = splice(src_fptr->fd, 0, pipefd[1], 0, maxlen, 0);
1405
+ if (chunk_len < 0) {
1406
+ err = errno;
1407
+ if (err != EWOULDBLOCK && err != EAGAIN) goto syscallerror;
1408
+
1409
+ result = libev_wait_rw_fd_with_watcher(backend, src_fptr->fd, pipefd[1], &watcher);
1410
+ if (TEST_EXCEPTION(result)) goto error;
1411
+ }
1412
+ else {
1413
+ break;
1414
+ }
1415
+ }
1416
+ if (chunk_len == 0) break;
1417
+
1418
+ total += chunk_len;
1419
+ chunk_len_value = INT2NUM(chunk_len);
1420
+
1421
+ if (chunk_prefix != Qnil) {
1422
+ VALUE str = (TYPE(chunk_prefix) == T_STRING) ? chunk_prefix : rb_funcall(chunk_prefix, ID_call, 1, chunk_len_value);
1423
+ int err = splice_chunks_write(backend, dest_fptr->fd, str, &watcher, &result);
1424
+ if (err == -1) goto error; else if (err) goto syscallerror;
1425
+ }
1426
+
1427
+ int left = chunk_len;
1428
+ while (1) {
1429
+ int n = splice(pipefd[0], 0, dest_fptr->fd, 0, left, 0);
1430
+ if (n < 0) {
1431
+ err = errno;
1432
+ if (err != EWOULDBLOCK && err != EAGAIN) goto syscallerror;
1433
+
1434
+ result = libev_wait_rw_fd_with_watcher(backend, pipefd[0], dest_fptr->fd, &watcher);
1435
+ if (TEST_EXCEPTION(result)) goto error;
1436
+ }
1437
+ else {
1438
+ left -= n;
1439
+ if (left == 0) break;
1440
+ }
1441
+ }
1442
+
1443
+ if (chunk_postfix != Qnil) {
1444
+ VALUE str = (TYPE(chunk_postfix) == T_STRING) ? chunk_postfix : rb_funcall(chunk_postfix, ID_call, 1, chunk_len_value);
1445
+ int err = splice_chunks_write(backend, dest_fptr->fd, str, &watcher, &result);
1446
+ if (err == -1) goto error; else if (err) goto syscallerror;
1447
+ }
1448
+ }
1449
+
1450
+ if (postfix != Qnil) {
1451
+ int err = splice_chunks_write(backend, dest_fptr->fd, postfix, &watcher, &result);
1452
+ if (err == -1) goto error; else if (err) goto syscallerror;
1453
+ }
1454
+
1455
+ if (watcher.ctx.fiber == Qnil) {
1456
+ result = backend_snooze();
1457
+ if (TEST_EXCEPTION(result)) goto error;
1458
+ }
1459
+ RB_GC_GUARD(str);
1460
+ RB_GC_GUARD(chunk_len_value);
1461
+ RB_GC_GUARD(result);
1462
+ if (pipefd[0] != -1) close(pipefd[0]);
1463
+ if (pipefd[1] != -1) close(pipefd[1]);
1464
+ return INT2NUM(total);
1465
+ syscallerror:
1466
+ if (pipefd[0] != -1) close(pipefd[0]);
1467
+ if (pipefd[1] != -1) close(pipefd[1]);
1468
+ rb_syserr_fail(err, strerror(err));
1469
+ error:
1470
+ if (pipefd[0] != -1) close(pipefd[0]);
1471
+ if (pipefd[1] != -1) close(pipefd[1]);
1472
+ return RAISE_EXCEPTION(result);
1473
+ }
1474
+
1475
+ VALUE Backend_trace(int argc, VALUE *argv, VALUE self) {
1476
+ Backend_t *backend;
1477
+ GetBackend(self, backend);
1478
+ backend_trace(&backend->base, argc, argv);
1479
+ return self;
1480
+ }
1481
+
1482
+ VALUE Backend_trace_proc_set(VALUE self, VALUE block) {
1483
+ Backend_t *backend;
1484
+ GetBackend(self, backend);
1485
+
1486
+ backend->base.trace_proc = block;
1487
+ return self;
1488
+ }
1489
+
1115
1490
  void Init_Backend() {
1116
1491
  ev_set_allocator(xrealloc);
1117
1492
 
@@ -1121,11 +1496,16 @@ void Init_Backend() {
1121
1496
  rb_define_method(cBackend, "initialize", Backend_initialize, 0);
1122
1497
  rb_define_method(cBackend, "finalize", Backend_finalize, 0);
1123
1498
  rb_define_method(cBackend, "post_fork", Backend_post_fork, 0);
1499
+ rb_define_method(cBackend, "trace", Backend_trace, -1);
1500
+ rb_define_method(cBackend, "trace_proc=", Backend_trace_proc_set, 1);
1124
1501
 
1125
- rb_define_method(cBackend, "poll", Backend_poll, 3);
1502
+ rb_define_method(cBackend, "poll", Backend_poll, 1);
1126
1503
  rb_define_method(cBackend, "break", Backend_wakeup, 0);
1127
1504
  rb_define_method(cBackend, "kind", Backend_kind, 0);
1128
1505
  rb_define_method(cBackend, "chain", Backend_chain, -1);
1506
+ rb_define_method(cBackend, "idle_gc_period=", Backend_idle_gc_period_set, 1);
1507
+ rb_define_method(cBackend, "idle_proc=", Backend_idle_proc_set, 1);
1508
+ rb_define_method(cBackend, "splice_chunks", Backend_splice_chunks, 7);
1129
1509
 
1130
1510
  rb_define_method(cBackend, "accept", Backend_accept, 2);
1131
1511
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
@@ -1143,6 +1523,9 @@ void Init_Backend() {
1143
1523
  #ifdef POLYPHONY_LINUX
1144
1524
  rb_define_method(cBackend, "splice", Backend_splice, 3);
1145
1525
  rb_define_method(cBackend, "splice_to_eof", Backend_splice_to_eof, 3);
1526
+ #else
1527
+ rb_define_method(cBackend, "splice", Backend_fake_splice, 3);
1528
+ rb_define_method(cBackend, "splice_to_eof", Backend_fake_splice_to_eof, 3);
1146
1529
  #endif
1147
1530
 
1148
1531
  rb_define_method(cBackend, "timeout", Backend_timeout, -1);
@@ -1152,7 +1535,6 @@ void Init_Backend() {
1152
1535
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
1153
1536
  rb_define_method(cBackend, "write", Backend_write_m, -1);
1154
1537
 
1155
- ID_ivar_is_nonblocking = rb_intern("@is_nonblocking");
1156
1538
  SYM_libev = ID2SYM(rb_intern("libev"));
1157
1539
 
1158
1540
  SYM_send = ID2SYM(rb_intern("send"));