polyphony 0.99 → 0.99.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (94) hide show
  1. checksums.yaml +4 -4
  2. data/.github/FUNDING.yml +1 -1
  3. data/.rubocop.yml +3 -3
  4. data/.yardopts +30 -0
  5. data/CHANGELOG.md +4 -0
  6. data/LICENSE +1 -1
  7. data/README.md +63 -29
  8. data/Rakefile +1 -5
  9. data/TODO.md +0 -4
  10. data/docs/{main-concepts/concurrency.md → concurrency.md} +2 -9
  11. data/docs/{main-concepts/design-principles.md → design-principles.md} +3 -9
  12. data/docs/{main-concepts/exception-handling.md → exception-handling.md} +2 -9
  13. data/docs/{main-concepts/extending.md → extending.md} +2 -9
  14. data/docs/faq.md +3 -16
  15. data/docs/{main-concepts/fiber-scheduling.md → fiber-scheduling.md} +1 -9
  16. data/docs/link_rewriter.rb +16 -0
  17. data/docs/{getting-started/overview.md → overview.md} +1 -30
  18. data/docs/{getting-started/tutorial.md → tutorial.md} +3 -28
  19. data/docs/{_posts/2020-07-26-polyphony-0.44.md → whats-new.md} +3 -1
  20. data/examples/adapters/redis_client.rb +3 -2
  21. data/examples/io/echo_server.rb +1 -1
  22. data/examples/io/echo_server_plain_ruby.rb +26 -0
  23. data/ext/polyphony/backend_io_uring.c +154 -9
  24. data/ext/polyphony/backend_io_uring_context.c +21 -12
  25. data/ext/polyphony/backend_io_uring_context.h +12 -7
  26. data/ext/polyphony/backend_libev.c +1 -1
  27. data/ext/polyphony/extconf.rb +24 -8
  28. data/ext/polyphony/fiber.c +79 -2
  29. data/ext/polyphony/io_extensions.c +53 -0
  30. data/ext/polyphony/pipe.c +42 -2
  31. data/ext/polyphony/polyphony.c +345 -31
  32. data/ext/polyphony/polyphony.h +9 -2
  33. data/ext/polyphony/queue.c +181 -0
  34. data/ext/polyphony/ring_buffer.c +0 -1
  35. data/ext/polyphony/runqueue.c +8 -1
  36. data/ext/polyphony/runqueue_ring_buffer.c +13 -0
  37. data/ext/polyphony/runqueue_ring_buffer.h +2 -1
  38. data/ext/polyphony/socket_extensions.c +6 -0
  39. data/ext/polyphony/thread.c +34 -2
  40. data/lib/polyphony/adapters/process.rb +11 -1
  41. data/lib/polyphony/adapters/sequel.rb +1 -1
  42. data/lib/polyphony/core/channel.rb +2 -0
  43. data/lib/polyphony/core/debug.rb +1 -1
  44. data/lib/polyphony/core/global_api.rb +25 -24
  45. data/lib/polyphony/core/resource_pool.rb +7 -6
  46. data/lib/polyphony/core/sync.rb +2 -2
  47. data/lib/polyphony/core/thread_pool.rb +3 -3
  48. data/lib/polyphony/core/timer.rb +8 -8
  49. data/lib/polyphony/extensions/exception.rb +2 -0
  50. data/lib/polyphony/extensions/fiber.rb +15 -13
  51. data/lib/polyphony/extensions/io.rb +127 -5
  52. data/lib/polyphony/extensions/kernel.rb +20 -2
  53. data/lib/polyphony/extensions/openssl.rb +100 -11
  54. data/lib/polyphony/extensions/pipe.rb +103 -7
  55. data/lib/polyphony/extensions/process.rb +13 -1
  56. data/lib/polyphony/extensions/socket.rb +93 -27
  57. data/lib/polyphony/extensions/thread.rb +9 -1
  58. data/lib/polyphony/extensions/timeout.rb +1 -1
  59. data/lib/polyphony/version.rb +2 -1
  60. data/lib/polyphony.rb +27 -7
  61. data/polyphony.gemspec +1 -8
  62. data/test/stress.rb +1 -1
  63. data/test/test_global_api.rb +45 -7
  64. data/test/test_socket.rb +96 -0
  65. data/test/test_timer.rb +5 -5
  66. metadata +17 -40
  67. data/docs/_config.yml +0 -64
  68. data/docs/_includes/head.html +0 -40
  69. data/docs/_includes/title.html +0 -1
  70. data/docs/_sass/custom/custom.scss +0 -10
  71. data/docs/_sass/overrides.scss +0 -0
  72. data/docs/api-reference/exception.md +0 -31
  73. data/docs/api-reference/fiber.md +0 -425
  74. data/docs/api-reference/index.md +0 -9
  75. data/docs/api-reference/io.md +0 -36
  76. data/docs/api-reference/object.md +0 -99
  77. data/docs/api-reference/polyphony-baseexception.md +0 -33
  78. data/docs/api-reference/polyphony-cancel.md +0 -26
  79. data/docs/api-reference/polyphony-moveon.md +0 -24
  80. data/docs/api-reference/polyphony-net.md +0 -20
  81. data/docs/api-reference/polyphony-process.md +0 -28
  82. data/docs/api-reference/polyphony-resourcepool.md +0 -59
  83. data/docs/api-reference/polyphony-restart.md +0 -18
  84. data/docs/api-reference/polyphony-terminate.md +0 -18
  85. data/docs/api-reference/polyphony-threadpool.md +0 -67
  86. data/docs/api-reference/polyphony-throttler.md +0 -77
  87. data/docs/api-reference/polyphony.md +0 -36
  88. data/docs/api-reference/thread.md +0 -88
  89. data/docs/favicon.ico +0 -0
  90. data/docs/getting-started/index.md +0 -10
  91. data/docs/getting-started/installing.md +0 -34
  92. /data/{docs/assets/img → assets}/echo-fibers.svg +0 -0
  93. /data/{docs → assets}/polyphony-logo.png +0 -0
  94. /data/{docs/assets/img → assets}/sleeping-fiber.svg +0 -0
@@ -90,8 +90,16 @@ static VALUE Backend_initialize(VALUE self) {
90
90
  context_store_initialize(&backend->store);
91
91
 
92
92
  backend->prepared_limit = 1024;
93
+ int flags = 0;
94
+ #ifdef HAVE_IORING_SETUP_SUBMIT_ALL
95
+ flags |= IORING_SETUP_SUBMIT_ALL;
96
+ #endif
97
+ #ifdef HAVE_IORING_SETUP_COOP_TASKRUN
98
+ flags |= IORING_SETUP_COOP_TASKRUN;
99
+ #endif
100
+
93
101
  while (1) {
94
- int ret = io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
102
+ int ret = io_uring_queue_init(backend->prepared_limit, &backend->ring, flags);
95
103
  if (!ret) break;
96
104
 
97
105
  // if ENOMEM is returned, use a smaller limit
@@ -141,23 +149,53 @@ void *io_uring_backend_poll_without_gvl(void *ptr) {
141
149
  return NULL;
142
150
  }
143
151
 
144
- // copied from queue.c
152
+ // copied from liburing/queue.c
145
153
  static inline bool cq_ring_needs_flush(struct io_uring *ring) {
146
154
  return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
147
155
  }
148
156
 
157
+ static void handle_multishot_accept_completion(op_context_t *ctx, struct io_uring_cqe *cqe, Backend_t *backend) {
158
+ // printf("handle_multishot_accept_completion result: %d\n", ctx->result);
159
+ if (ctx->result == -ECANCELED) {
160
+ context_store_release(&backend->store, ctx);
161
+ rb_ivar_set(ctx->resume_value, ID_ivar_multishot_accept_queue, Qnil);
162
+ }
163
+ else {
164
+ if (!(cqe->flags & IORING_CQE_F_MORE)) {
165
+ context_store_release(&backend->store, ctx);
166
+ }
167
+ VALUE queue = rb_ivar_get(ctx->resume_value, ID_ivar_multishot_accept_queue);
168
+ if (queue != Qnil)
169
+ Queue_push(queue, INT2NUM(ctx->result));
170
+ }
171
+ }
172
+
173
+ static void handle_multishot_completion(op_context_t *ctx, struct io_uring_cqe *cqe, Backend_t *backend) {
174
+ switch (ctx->type) {
175
+ case OP_MULTISHOT_ACCEPT:
176
+ return handle_multishot_accept_completion(ctx, cqe, backend);
177
+ default:
178
+ printf("Unexpected multishot completion for op type %d\n", ctx->type);
179
+ }
180
+ }
181
+
149
182
  static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
150
183
  op_context_t *ctx = io_uring_cqe_get_data(cqe);
151
184
  if (!ctx) return;
152
185
 
153
186
  // printf("cqe ctx %p id: %d result: %d (%s, ref_count: %d)\n", ctx, ctx->id, cqe->res, op_type_to_str(ctx->type), ctx->ref_count);
154
187
  ctx->result = cqe->res;
155
- if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
156
- Fiber_make_runnable(ctx->fiber, ctx->resume_value);
157
- context_store_release(&backend->store, ctx);
188
+ if (ctx->ref_count == MULTISHOT_REFCOUNT) {
189
+ handle_multishot_completion(ctx, cqe, backend);
190
+ }
191
+ else {
192
+ if (ctx->ref_count == 2 && ctx->result != -ECANCELED && ctx->fiber)
193
+ Fiber_make_runnable(ctx->fiber, ctx->resume_value);
194
+ context_store_release(&backend->store, ctx);
195
+ }
158
196
  }
159
197
 
160
- // adapted from io_uring_peek_batch_cqe in queue.c
198
+ // adapted from io_uring_peek_batch_cqe in liburing/queue.c
161
199
  // this peeks at cqes and handles each available cqe
162
200
  void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
163
201
  struct io_uring *ring = &backend->ring;
@@ -701,7 +739,7 @@ VALUE Backend_recvmsg(VALUE self, VALUE io, VALUE buffer, VALUE maxlen, VALUE po
701
739
 
702
740
  while (1) {
703
741
  VALUE resume_value = Qnil;
704
- op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
742
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_RECVMSG);
705
743
  struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
706
744
  int result;
707
745
  int completed;
@@ -904,7 +942,7 @@ VALUE Backend_sendmsg(VALUE self, VALUE io, VALUE buffer, VALUE flags, VALUE des
904
942
 
905
943
  while (left > 0) {
906
944
  VALUE resume_value = Qnil;
907
- op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
945
+ op_context_t *ctx = context_store_acquire(&backend->store, OP_SENDMSG);
908
946
  struct io_uring_sqe *sqe = io_uring_backend_get_sqe(backend);
909
947
  int result;
910
948
  int completed;
@@ -985,12 +1023,114 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
985
1023
  }
986
1024
 
987
1025
  VALUE Backend_accept(VALUE self, VALUE server_socket, VALUE socket_class) {
1026
+ #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1027
+ VALUE accept_queue = rb_ivar_get(server_socket, ID_ivar_multishot_accept_queue);
1028
+ if (accept_queue != Qnil) {
1029
+ VALUE next = Queue_shift(0, 0, accept_queue);
1030
+ int fd = NUM2INT(next);
1031
+ if (fd < 0)
1032
+ rb_syserr_fail(-fd, strerror(-fd));
1033
+ else {
1034
+ rb_io_t *fp;
1035
+
1036
+ VALUE socket = rb_obj_alloc(socket_class);
1037
+ MakeOpenFile(socket, fp);
1038
+ rb_update_max_fd(fd);
1039
+ fp->fd = fd;
1040
+ fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1041
+ rb_io_ascii8bit_binmode(socket);
1042
+ rb_io_synchronized(fp);
1043
+ return socket;
1044
+ }
1045
+ }
1046
+ #endif
1047
+
988
1048
  Backend_t *backend;
989
1049
  GetBackend(self, backend);
990
1050
  return io_uring_backend_accept(backend, server_socket, socket_class, 0);
991
1051
  }
992
1052
 
1053
+ #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1054
+
1055
+ struct multishot_accept_ctx {
1056
+ Backend_t *backend;
1057
+ VALUE server_socket;
1058
+ op_context_t *op_ctx;
1059
+ };
1060
+
1061
+ VALUE multishot_accept_start(struct multishot_accept_ctx *ctx) {
1062
+ int server_fd;
1063
+ rb_io_t *server_fptr;
1064
+ server_fd = fd_from_io(ctx->server_socket, &server_fptr, 0, 0);
1065
+ VALUE accept_queue = rb_funcall(cQueue, ID_new, 0);
1066
+ rb_ivar_set(ctx->server_socket, ID_ivar_multishot_accept_queue, accept_queue);
1067
+
1068
+ ctx->op_ctx = context_store_acquire(&ctx->backend->store, OP_MULTISHOT_ACCEPT);
1069
+ ctx->op_ctx->ref_count = -1;
1070
+ ctx->op_ctx->resume_value = ctx->server_socket;
1071
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(ctx->backend);
1072
+ io_uring_prep_multishot_accept(sqe, server_fd, 0, 0, 0);
1073
+ io_uring_sqe_set_data(sqe, ctx->op_ctx);
1074
+ io_uring_backend_defer_submit(ctx->backend);
1075
+
1076
+ rb_yield(ctx->server_socket);
1077
+
1078
+ return Qnil;
1079
+ }
1080
+
1081
+ VALUE multishot_accept_cleanup(struct multishot_accept_ctx *ctx) {
1082
+ struct io_uring_sqe *sqe = io_uring_backend_get_sqe(ctx->backend);
1083
+ io_uring_prep_cancel(sqe, ctx->op_ctx, 0);
1084
+ io_uring_sqe_set_data(sqe, NULL);
1085
+ io_uring_backend_defer_submit(ctx->backend);
1086
+
1087
+ rb_ivar_set(ctx->server_socket, ID_ivar_multishot_accept_queue, Qnil);
1088
+
1089
+ return Qnil;
1090
+ }
1091
+
1092
+ VALUE Backend_multishot_accept(VALUE self, VALUE server_socket) {
1093
+ Backend_t *backend;
1094
+ GetBackend(self, backend);
1095
+
1096
+ struct multishot_accept_ctx ctx;
1097
+ ctx.backend = backend;
1098
+ ctx.server_socket = server_socket;
1099
+
1100
+ return rb_ensure(
1101
+ SAFE(multishot_accept_start), (VALUE)&ctx,
1102
+ SAFE(multishot_accept_cleanup), (VALUE)&ctx
1103
+ );
1104
+ }
1105
+
1106
+ #endif
1107
+
993
1108
  VALUE Backend_accept_loop(VALUE self, VALUE server_socket, VALUE socket_class) {
1109
+ #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1110
+ VALUE accept_queue = rb_ivar_get(server_socket, ID_ivar_multishot_accept_queue);
1111
+ if (accept_queue != Qnil) {
1112
+ while (true) {
1113
+ VALUE next = Queue_shift(0, 0, accept_queue);
1114
+ int fd = NUM2INT(next);
1115
+ if (fd < 0)
1116
+ rb_syserr_fail(-fd, strerror(-fd));
1117
+ else {
1118
+ rb_io_t *fp;
1119
+
1120
+ VALUE socket = rb_obj_alloc(socket_class);
1121
+ MakeOpenFile(socket, fp);
1122
+ rb_update_max_fd(fd);
1123
+ fp->fd = fd;
1124
+ fp->mode = FMODE_READWRITE | FMODE_DUPLEX;
1125
+ rb_io_ascii8bit_binmode(socket);
1126
+ rb_io_synchronized(fp);
1127
+ rb_yield(socket);
1128
+ }
1129
+ }
1130
+ return self;
1131
+ }
1132
+ #endif
1133
+
994
1134
  Backend_t *backend;
995
1135
  GetBackend(self, backend);
996
1136
  io_uring_backend_accept(backend, server_socket, socket_class, 1);
@@ -1801,7 +1941,7 @@ VALUE Backend_snooze(VALUE self) {
1801
1941
  GetBackend(self, backend);
1802
1942
 
1803
1943
  Fiber_make_runnable(fiber, Qnil);
1804
- ret = Thread_switch_fiber(rb_thread_current());
1944
+ ret = backend_base_switch_fiber(self, &backend->base);
1805
1945
 
1806
1946
  COND_TRACE(&backend->base, 4, SYM_unblock, rb_fiber_current(), ret, CALLER());
1807
1947
 
@@ -1847,6 +1987,11 @@ void Init_Backend(void) {
1847
1987
  rb_define_method(cBackend, "accept_loop", Backend_accept_loop, 2);
1848
1988
  rb_define_method(cBackend, "connect", Backend_connect, 3);
1849
1989
  rb_define_method(cBackend, "feed_loop", Backend_feed_loop, 3);
1990
+
1991
+ #ifdef HAVE_IO_URING_PREP_MULTISHOT_ACCEPT
1992
+ rb_define_method(cBackend, "multishot_accept", Backend_multishot_accept, 1);
1993
+ #endif
1994
+
1850
1995
  rb_define_method(cBackend, "read", Backend_read, 5);
1851
1996
  rb_define_method(cBackend, "read_loop", Backend_read_loop, 2);
1852
1997
  rb_define_method(cBackend, "recv", Backend_recv, 4);
@@ -6,18 +6,21 @@
6
6
 
7
7
  const char *op_type_to_str(enum op_type type) {
8
8
  switch (type) {
9
- case OP_READ: return "READ";
10
- case OP_WRITEV: return "WRITEV";
11
- case OP_WRITE: return "WRITE";
12
- case OP_RECV: return "RECV";
13
- case OP_SEND: return "SEND";
14
- case OP_SPLICE: return "SPLICE";
15
- case OP_TIMEOUT: return "TIMEOUT";
16
- case OP_POLL: return "POLL";
17
- case OP_ACCEPT: return "ACCEPT";
18
- case OP_CONNECT: return "CONNECT";
19
- case OP_CHAIN: return "CHAIN";
20
- case OP_CLOSE: return "CLOSE";
9
+ case OP_ACCEPT: return "ACCEPT";
10
+ case OP_CHAIN: return "CHAIN";
11
+ case OP_CLOSE: return "CLOSE";
12
+ case OP_CONNECT: return "CONNECT";
13
+ case OP_POLL: return "POLL";
14
+ case OP_READ: return "READ";
15
+ case OP_RECV: return "RECV";
16
+ case OP_RECVMSG: return "RECVMSG";
17
+ case OP_SEND: return "SEND";
18
+ case OP_SENDMSG: return "SENDMSG";
19
+ case OP_SPLICE: return "SPLICE";
20
+ case OP_TIMEOUT: return "TIMEOUT";
21
+ case OP_WRITEV: return "WRITEV";
22
+ case OP_WRITE: return "WRITE";
23
+
21
24
  default: return "";
22
25
  };
23
26
  }
@@ -66,6 +69,12 @@ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
66
69
 
67
70
  assert(ctx->ref_count);
68
71
 
72
+ // If a multishot ctx is released, we pretend its ref count is 1, so it will
73
+ // be returned to the store.
74
+ if (ctx->ref_count == MULTISHOT_REFCOUNT) {
75
+ ctx->ref_count = 1;
76
+ }
77
+
69
78
  ctx->ref_count--;
70
79
  if (ctx->ref_count) return 0;
71
80
 
@@ -4,21 +4,26 @@
4
4
  #include "ruby.h"
5
5
 
6
6
  enum op_type {
7
+ OP_ACCEPT,
8
+ OP_CHAIN,
9
+ OP_CLOSE,
10
+ OP_CONNECT,
11
+ OP_MULTISHOT_ACCEPT,
7
12
  OP_NONE,
13
+ OP_POLL,
8
14
  OP_READ,
9
- OP_WRITEV,
10
- OP_WRITE,
11
15
  OP_RECV,
16
+ OP_RECVMSG,
12
17
  OP_SEND,
18
+ OP_SENDMSG,
13
19
  OP_SPLICE,
14
20
  OP_TIMEOUT,
15
- OP_POLL,
16
- OP_ACCEPT,
17
- OP_CONNECT,
18
- OP_CHAIN,
19
- OP_CLOSE
21
+ OP_WRITEV,
22
+ OP_WRITE
20
23
  };
21
24
 
25
+ #define MULTISHOT_REFCOUNT 0xFFFF
26
+
22
27
  typedef struct op_context {
23
28
  struct op_context *prev;
24
29
  struct op_context *next;
@@ -1584,7 +1584,7 @@ VALUE Backend_snooze(VALUE self) {
1584
1584
  GetBackend(self, backend);
1585
1585
 
1586
1586
  Fiber_make_runnable(fiber, Qnil);
1587
- ret = Thread_switch_fiber(rb_thread_current());
1587
+ ret = backend_base_switch_fiber(self, &backend->base);
1588
1588
 
1589
1589
  COND_TRACE(&backend->base, 4, SYM_unblock, rb_fiber_current(), ret, CALLER());
1590
1590
 
@@ -18,7 +18,12 @@ def get_config
18
18
 
19
19
  combined_version = version.to_i * 100 + major_revision.to_i
20
20
 
21
- config[:pidfd_open] = combined_version > 503
21
+ config[:pidfd_open] = combined_version > 503
22
+ config[:multishot_recv] = combined_version >= 600
23
+ config[:multishot_recvmsg] = combined_version >= 600
24
+ config[:multishot_accept] = combined_version >= 519
25
+ config[:submit_all_flag] = combined_version >= 518
26
+ config[:coop_taskrun_flag] = combined_version >= 519
22
27
 
23
28
  force_libev = ENV['POLYPHONY_LIBEV'] != nil
24
29
  config[:io_uring] = !force_libev && (combined_version >= 506) && (distribution != 'linuxkit')
@@ -46,24 +51,35 @@ if config[:io_uring]
46
51
  $LDFLAGS << " -L#{File.expand_path('../../vendor/liburing/src', __dir__)} -l uring"
47
52
  end
48
53
 
54
+ def define_bool(name, value)
55
+ $defs << "-D#{name}=#{value ? 1 : 0 }"
56
+ end
57
+
49
58
  $defs << '-DPOLYPHONY_USE_PIDFD_OPEN' if config[:pidfd_open]
50
59
  if config[:io_uring]
51
60
  $defs << "-DPOLYPHONY_BACKEND_LIBURING"
52
61
  $defs << "-DPOLYPHONY_LINUX"
53
62
  $defs << "-DPOLYPHONY_UNSET_NONBLOCK" if RUBY_VERSION =~ /^3/
63
+ $defs << "-DHAVE_IO_URING_PREP_MULTISHOT_ACCEPT" if config[:multishot_accept]
64
+ $defs << "-DHAVE_IO_URING_PREP_RECV_MULTISHOT" if config[:multishot_recv]
65
+ $defs << "-DHAVE_IO_URING_PREP_RECVMSG_MULTISHOT" if config[:multishot_recvmsg]
66
+ $defs << "-DHAVE_IORING_SETUP_SUBMIT_ALL" if config[:submit_all_flag]
67
+ $defs << "-DHAVE_IORING_SETUP_COOP_TASKRUN" if config[:coop_taskrun_flag]
54
68
  $CFLAGS << " -Wno-pointer-arith"
55
69
  else
56
70
  $defs << "-DPOLYPHONY_BACKEND_LIBEV"
57
71
  $defs << "-DPOLYPHONY_LINUX" if config[:linux]
58
72
 
59
73
  $defs << "-DEV_STANDALONE" # prevent libev from assuming "config.h" exists
60
- $defs << '-DEV_USE_EPOLL' if have_header('sys/epoll.h')
61
- $defs << '-DEV_USE_KQUEUE' if have_header('sys/event.h') && have_header('sys/queue.h')
62
- $defs << '-DEV_USE_LINUXAIO' if have_header('linux/aio_abi.h')
63
- $defs << '-DEV_USE_POLL' if have_type('port_event_t', 'poll.h')
64
- $defs << '-DEV_USE_PORT' if have_type('port_event_t', 'port.h')
65
- $defs << '-DEV_USE_SELECT' if have_header('sys/select.h')
66
-
74
+
75
+ define_bool('EV_USE_EPOLL', have_header('sys/epoll.h'))
76
+ define_bool('EV_USE_KQUEUE', have_header('sys/event.h') && have_header('sys/queue.h'))
77
+ define_bool('EV_USE_LINUXAIO', have_header('linux/aio_abi.h'))
78
+ define_bool('EV_USE_POLL', have_type('port_event_t', 'poll.h'))
79
+ define_bool('EV_USE_PORT', have_type('port_event_t', 'port.h'))
80
+ define_bool('EV_USE_SELECT', have_header('sys/select.h'))
81
+ define_bool('EV_USE_IOCP', false)
82
+
67
83
  $defs << '-DHAVE_SYS_RESOURCE_H' if have_header('sys/resource.h')
68
84
 
69
85
  $CFLAGS << " -Wno-comment"
@@ -18,6 +18,8 @@ VALUE SYM_schedule;
18
18
  VALUE SYM_block;
19
19
  VALUE SYM_terminate;
20
20
 
21
+ /* @!visibility private */
22
+
21
23
  static VALUE Fiber_safe_transfer(int argc, VALUE *argv, VALUE self) {
22
24
  VALUE arg = (argc == 0) ? Qnil : argv[0];
23
25
  VALUE ret = FIBER_TRANSFER(self, arg);
@@ -27,6 +29,8 @@ static VALUE Fiber_safe_transfer(int argc, VALUE *argv, VALUE self) {
27
29
  return ret;
28
30
  }
29
31
 
32
+ /* @!visibility private */
33
+
30
34
  inline VALUE Fiber_auto_watcher(VALUE self) {
31
35
  VALUE watcher;
32
36
 
@@ -38,6 +42,8 @@ inline VALUE Fiber_auto_watcher(VALUE self) {
38
42
  return watcher;
39
43
  }
40
44
 
45
+ /* @!visibility private */
46
+
41
47
  inline void Fiber_make_runnable(VALUE fiber, VALUE value) {
42
48
  VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
43
49
  if (thread == Qnil) rb_raise(rb_eRuntimeError, "No thread set for fiber");
@@ -45,6 +51,8 @@ inline void Fiber_make_runnable(VALUE fiber, VALUE value) {
45
51
  Thread_schedule_fiber(thread, fiber, value);
46
52
  }
47
53
 
54
+ /* @!visibility private */
55
+
48
56
  inline void Fiber_make_runnable_with_priority(VALUE fiber, VALUE value) {
49
57
  VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
50
58
  if (thread == Qnil) rb_raise(rb_eRuntimeError, "No thread set for fiber");
@@ -52,18 +60,46 @@ inline void Fiber_make_runnable_with_priority(VALUE fiber, VALUE value) {
52
60
  Thread_schedule_fiber_with_priority(thread, fiber, value);
53
61
  }
54
62
 
63
+ /* call-seq:
64
+ * fiber.schedule
65
+ * fiber.schedule(value)
66
+ *
67
+ * Adds the fiber to the runqueue with the given resume value or `nil`.
68
+ *
69
+ * @return [void]
70
+ */
71
+
55
72
  static VALUE Fiber_schedule(int argc, VALUE *argv, VALUE self) {
56
73
  VALUE value = (argc == 0) ? Qnil : argv[0];
57
74
  Fiber_make_runnable(self, value);
58
75
  return self;
59
76
  }
60
77
 
78
+ /* call-seq:
79
+ * fiber.schedule_with_priority
80
+ * fiber.schedule_with_priority(value)
81
+ *
82
+ * Adds the fiber to the head of the runqueue with the given resume value or
83
+ * `nil`.
84
+ *
85
+ * @return [void]
86
+ */
87
+
61
88
  static VALUE Fiber_schedule_with_priority(int argc, VALUE *argv, VALUE self) {
62
89
  VALUE value = (argc == 0) ? Qnil : argv[0];
63
90
  Fiber_make_runnable_with_priority(self, value);
64
91
  return self;
65
92
  }
66
93
 
94
+ /* call-seq:
95
+ * fiber.state -> sym
96
+ *
97
+ * Returns the current state for the fiber, one of `:running`, `:runnable`,
98
+ * `:waiting`, `:dead`.
99
+ *
100
+ * @return [Symbol]
101
+ */
102
+
67
103
  static VALUE Fiber_state(VALUE self) {
68
104
  if (!rb_fiber_alive_p(self) || (rb_ivar_get(self, ID_ivar_running) == Qfalse))
69
105
  return SYM_dead;
@@ -73,16 +109,35 @@ static VALUE Fiber_state(VALUE self) {
73
109
  return SYM_waiting;
74
110
  }
75
111
 
76
- VALUE Fiber_send(VALUE self, VALUE value) {
112
+ /* call-seq:
113
+ * fiber.send(msg)
114
+ *
115
+ * Sends a message to the given fiber. The message will be added to the fiber's
116
+ * mailbox.
117
+ *
118
+ * @param msg [any]
119
+ * @return [void]
120
+ */
121
+
122
+ VALUE Fiber_send(VALUE self, VALUE msg) {
77
123
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
78
124
  if (mailbox == Qnil) {
79
125
  mailbox = rb_funcall(cQueue, ID_new, 0);
80
126
  rb_ivar_set(self, ID_ivar_mailbox, mailbox);
81
127
  }
82
- Queue_push(mailbox, value);
128
+ Queue_push(mailbox, msg);
83
129
  return self;
84
130
  }
85
131
 
132
+ /* call-seq:
133
+ * fiber.receive -> msg
134
+ *
135
+ * Receive's a message from the fiber's mailbox. If no message is available,
136
+ * waits for a message to be sent to it.
137
+ *
138
+ * @return [any] received message
139
+ */
140
+
86
141
  VALUE Fiber_receive(VALUE self) {
87
142
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
88
143
  if (mailbox == Qnil) {
@@ -92,6 +147,14 @@ VALUE Fiber_receive(VALUE self) {
92
147
  return Queue_shift(0, 0, mailbox);
93
148
  }
94
149
 
150
+ /* call-seq:
151
+ * fiber.mailbox -> queue
152
+ *
153
+ * Returns the fiber's mailbox.
154
+ *
155
+ * @return [Queue]
156
+ */
157
+
95
158
  VALUE Fiber_mailbox(VALUE self) {
96
159
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
97
160
  if (mailbox == Qnil) {
@@ -101,23 +164,37 @@ VALUE Fiber_mailbox(VALUE self) {
101
164
  return mailbox;
102
165
  }
103
166
 
167
+ /* call-seq:
168
+ * fiber.receive_all_pending -> ary
169
+ *
170
+ * Receives all messages currently in the fiber's mailbox.
171
+ *
172
+ * @return [Array]
173
+ */
174
+
104
175
  VALUE Fiber_receive_all_pending(VALUE self) {
105
176
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
106
177
  return (mailbox == Qnil) ? rb_ary_new() : Queue_shift_all(mailbox);
107
178
  }
108
179
 
180
+ /* @!visibility private */
181
+
109
182
  VALUE Fiber_park(VALUE self) {
110
183
  rb_ivar_set(self, ID_ivar_parked, Qtrue);
111
184
  Backend_park_fiber(BACKEND(), self);
112
185
  return self;
113
186
  }
114
187
 
188
+ /* @!visibility private */
189
+
115
190
  VALUE Fiber_unpark(VALUE self) {
116
191
  rb_ivar_set(self, ID_ivar_parked, Qnil);
117
192
  Backend_unpark_fiber(BACKEND(), self);
118
193
  return self;
119
194
  }
120
195
 
196
+ /* @!visibility private */
197
+
121
198
  VALUE Fiber_parked_p(VALUE self) {
122
199
  return rb_ivar_get(self, ID_ivar_parked);
123
200
  }
@@ -507,6 +507,16 @@ static inline VALUE z_stream_cleanup(struct z_stream_ctx *ctx) {
507
507
  #define Z_STREAM_SAFE_IO_LOOP_WITH_CLEANUP(ctx) \
508
508
  rb_ensure(SAFE(z_stream_io_loop), (VALUE)&ctx, SAFE(z_stream_cleanup), (VALUE)&ctx)
509
509
 
510
+ /* call-seq:
511
+ * IO.gzip(src, dest) -> bytes_written
512
+ * IO.gzip(src, dest, opt) -> bytes_written
513
+ *
514
+ * Gzips data from the source IO to the destination IO, returning the number
515
+ * bytes written to the destination IO.
516
+ *
517
+ * @return [Integer]
518
+ */
519
+
510
520
  VALUE IO_gzip(int argc, VALUE *argv, VALUE self) {
511
521
  VALUE src;
512
522
  VALUE dest;
@@ -538,6 +548,16 @@ VALUE IO_gzip(int argc, VALUE *argv, VALUE self) {
538
548
 
539
549
  # define FIX2TIME(v) (rb_funcall(rb_cTime, ID_at, 1, v))
540
550
 
551
+ /* call-seq:
552
+ * IO.gunzip(src, dest) -> bytes_written
553
+ * IO.gunzip(src, dest, opt) -> bytes_written
554
+ *
555
+ * Gunzips data from the source IO to the destination IO, returning the number
556
+ * bytes written to the destination IO.
557
+ *
558
+ * @return [Integer]
559
+ */
560
+
541
561
  VALUE IO_gunzip(int argc, VALUE *argv, VALUE self) {
542
562
  VALUE src;
543
563
  VALUE dest;
@@ -574,6 +594,16 @@ VALUE IO_gunzip(int argc, VALUE *argv, VALUE self) {
574
594
  return INT2FIX(ctx.out_total);
575
595
  }
576
596
 
597
+ /* call-seq:
598
+ * IO.deflate(src, dest) -> bytes_written
599
+ * IO.deflate(src, dest, opt) -> bytes_written
600
+ *
601
+ * Defaltes data from the source IO to the destination IO, returning the number
602
+ * bytes written to the destination IO.
603
+ *
604
+ * @return [Integer]
605
+ */
606
+
577
607
  VALUE IO_deflate(VALUE self, VALUE src, VALUE dest) {
578
608
  struct z_stream_ctx ctx;
579
609
  int level = DEFAULT_LEVEL;
@@ -589,6 +619,16 @@ VALUE IO_deflate(VALUE self, VALUE src, VALUE dest) {
589
619
  return INT2FIX(ctx.out_total);
590
620
  }
591
621
 
622
+ /* call-seq:
623
+ * IO.inflate(src, dest) -> bytes_written
624
+ * IO.inflate(src, dest, opt) -> bytes_written
625
+ *
626
+ * Inflates data from the source IO to the destination IO, returning the number
627
+ * bytes written to the destination IO.
628
+ *
629
+ * @return [Integer]
630
+ */
631
+
592
632
  VALUE IO_inflate(VALUE self, VALUE src, VALUE dest) {
593
633
  struct z_stream_ctx ctx;
594
634
  int ret;
@@ -603,6 +643,19 @@ VALUE IO_inflate(VALUE self, VALUE src, VALUE dest) {
603
643
  return INT2FIX(ctx.out_total);
604
644
  }
605
645
 
646
+ /* call-seq:
647
+ * IO.http1_splice_chunked(src, dest, maxlen)
648
+ *
649
+ * Splices data from the source IO to the destination IO, writing it in HTTP1
650
+ * chunked encoding. A pipe is automatically created to buffer data between
651
+ * source and destination.
652
+ *
653
+ * @param src [IO] source
654
+ * @param dest [IO] destination
655
+ * @param maxlen [Integer] maximum bytes to splice
656
+ * @return [Integer] bytes spliced
657
+ */
658
+
606
659
  VALUE IO_http1_splice_chunked(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
607
660
  enum write_method method = detect_write_method(dest);
608
661
  VALUE backend = BACKEND();