uringmachine 0.24.0 → 0.26.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +1 -0
  3. data/.gitmodules +0 -3
  4. data/CHANGELOG.md +13 -0
  5. data/Gemfile +11 -0
  6. data/README.md +266 -112
  7. data/Rakefile +8 -0
  8. data/TODO.md +14 -21
  9. data/benchmark/common.rb +2 -0
  10. data/benchmark/openssl.rb +77 -0
  11. data/benchmark/openssl_socketpair.rb +112 -0
  12. data/benchmark/sqlite.rb +1 -1
  13. data/docs/design/buffer_pool.md +1 -1
  14. data/docs/wroclove.rb.md +52 -0
  15. data/ext/um/extconf.rb +15 -0
  16. data/ext/um/um.c +392 -358
  17. data/ext/um/um.h +48 -23
  18. data/ext/um/um_async_op.c +9 -8
  19. data/ext/um/um_async_op_class.c +34 -3
  20. data/ext/um/um_class.c +705 -19
  21. data/ext/um/um_const.c +31 -0
  22. data/ext/um/um_mutex_class.c +12 -0
  23. data/ext/um/um_op.c +15 -1
  24. data/ext/um/um_queue_class.c +16 -0
  25. data/ext/um/um_ssl.c +109 -0
  26. data/ext/um/um_stream.c +9 -8
  27. data/ext/um/um_sync.c +18 -11
  28. data/ext/um/um_utils.c +17 -8
  29. data/grant-2025/interim-report.md +1 -1
  30. data/grant-2025/journal.md +4 -4
  31. data/grant-2025/tasks.md +6 -4
  32. data/lib/uringmachine/dns_resolver.rb +38 -0
  33. data/lib/uringmachine/fiber_scheduler.rb +7 -5
  34. data/lib/uringmachine/version.rb +1 -1
  35. data/lib/uringmachine.rb +106 -6
  36. data/test/helper.rb +15 -0
  37. data/test/test_async_op.rb +3 -2
  38. data/test/test_fiber_scheduler.rb +41 -1
  39. data/test/test_ssl.rb +85 -0
  40. data/test/test_stream.rb +11 -0
  41. data/test/test_um.rb +445 -11
  42. data/uringmachine.gemspec +1 -7
  43. data/vendor/liburing/examples/send-zerocopy.c +43 -31
  44. data/vendor/liburing/examples/zcrx.c +260 -69
  45. data/vendor/liburing/liburing.spec +1 -1
  46. data/vendor/liburing/src/include/liburing/io_uring.h +12 -0
  47. data/vendor/liburing/src/include/liburing.h +3 -2
  48. data/vendor/liburing/src/liburing-ffi.map +4 -0
  49. data/vendor/liburing/src/liburing.map +4 -0
  50. data/vendor/liburing/src/queue.c +12 -0
  51. data/vendor/liburing/src/register.c +1 -0
  52. data/vendor/liburing/src/setup.c +15 -7
  53. data/vendor/liburing/test/Makefile +8 -4
  54. data/vendor/liburing/test/conn-unreach.c +1 -1
  55. data/vendor/liburing/test/epwait.c +32 -6
  56. data/vendor/liburing/test/io-wq-exit.c +131 -0
  57. data/vendor/liburing/test/iowait.c +1 -1
  58. data/vendor/liburing/test/min-timeout.c +3 -1
  59. data/vendor/liburing/test/open-close.c +39 -0
  60. data/vendor/liburing/test/poll-update-trigger.c +85 -0
  61. data/vendor/liburing/test/recvsend_bundle.c +14 -11
  62. data/vendor/liburing/test/sendzc-bug.c +146 -0
  63. data/vendor/liburing/test/sqe-mixed-nop.c +151 -7
  64. data/vendor/liburing/test/test.h +2 -0
  65. data/vendor/liburing/test/timestamp-bug.c +135 -0
  66. data/vendor/liburing/test/timestamp.c +5 -0
  67. data/vendor/liburing/test/vec-regbuf.c +136 -1
  68. metadata +38 -283
  69. data/vendor/libressl/.github/scripts/changelog.sh +0 -74
  70. data/vendor/libressl/.github/workflows/android.yml +0 -35
  71. data/vendor/libressl/.github/workflows/cifuzz.yml +0 -33
  72. data/vendor/libressl/.github/workflows/cmake-config.yml +0 -98
  73. data/vendor/libressl/.github/workflows/coverity.yml +0 -69
  74. data/vendor/libressl/.github/workflows/emscripten.yml +0 -71
  75. data/vendor/libressl/.github/workflows/fedora-rawhide.yml +0 -39
  76. data/vendor/libressl/.github/workflows/freebsd.yml +0 -71
  77. data/vendor/libressl/.github/workflows/linux.yml +0 -71
  78. data/vendor/libressl/.github/workflows/macos.yml +0 -37
  79. data/vendor/libressl/.github/workflows/release.yml +0 -81
  80. data/vendor/libressl/.github/workflows/rust-openssl.yml +0 -47
  81. data/vendor/libressl/.github/workflows/solaris.yml +0 -37
  82. data/vendor/libressl/.github/workflows/windows.yml +0 -70
  83. data/vendor/libressl/.gitignore +0 -333
  84. data/vendor/libressl/CMakeLists.txt +0 -581
  85. data/vendor/libressl/COPYING +0 -133
  86. data/vendor/libressl/ChangeLog +0 -3280
  87. data/vendor/libressl/FindLibreSSL.cmake +0 -232
  88. data/vendor/libressl/LibreSSLConfig.cmake.in +0 -36
  89. data/vendor/libressl/Makefile.am +0 -60
  90. data/vendor/libressl/Makefile.am.common +0 -20
  91. data/vendor/libressl/OPENBSD_BRANCH +0 -1
  92. data/vendor/libressl/README.md +0 -238
  93. data/vendor/libressl/README.mingw.md +0 -43
  94. data/vendor/libressl/apps/CMakeLists.txt +0 -18
  95. data/vendor/libressl/apps/Makefile.am +0 -5
  96. data/vendor/libressl/apps/nc/CMakeLists.txt +0 -67
  97. data/vendor/libressl/apps/nc/Makefile.am +0 -64
  98. data/vendor/libressl/apps/nc/compat/accept4.c +0 -17
  99. data/vendor/libressl/apps/nc/compat/readpassphrase.c +0 -205
  100. data/vendor/libressl/apps/nc/compat/socket.c +0 -29
  101. data/vendor/libressl/apps/nc/compat/sys/socket.h +0 -30
  102. data/vendor/libressl/apps/ocspcheck/CMakeLists.txt +0 -44
  103. data/vendor/libressl/apps/ocspcheck/Makefile.am +0 -45
  104. data/vendor/libressl/apps/ocspcheck/compat/.gitignore +0 -0
  105. data/vendor/libressl/apps/openssl/CMakeLists.txt +0 -97
  106. data/vendor/libressl/apps/openssl/Makefile.am +0 -108
  107. data/vendor/libressl/apps/openssl/apps_win.c +0 -138
  108. data/vendor/libressl/apps/openssl/certhash_win.c +0 -13
  109. data/vendor/libressl/apps/openssl/compat/clock_gettime_osx.c +0 -26
  110. data/vendor/libressl/apps/openssl/compat/poll_win.c +0 -329
  111. data/vendor/libressl/appveyor.yml +0 -53
  112. data/vendor/libressl/autogen.sh +0 -15
  113. data/vendor/libressl/check-release.sh +0 -86
  114. data/vendor/libressl/cmake_export_symbol.cmake +0 -71
  115. data/vendor/libressl/cmake_uninstall.cmake.in +0 -36
  116. data/vendor/libressl/config +0 -17
  117. data/vendor/libressl/configure.ac +0 -165
  118. data/vendor/libressl/crypto/CMakeLists.txt +0 -863
  119. data/vendor/libressl/crypto/Makefile.am +0 -962
  120. data/vendor/libressl/crypto/Makefile.am.arc4random +0 -46
  121. data/vendor/libressl/crypto/Makefile.am.elf-mips +0 -14
  122. data/vendor/libressl/crypto/Makefile.am.elf-mips64 +0 -14
  123. data/vendor/libressl/crypto/Makefile.am.elf-x86_64 +0 -35
  124. data/vendor/libressl/crypto/Makefile.am.macosx-x86_64 +0 -35
  125. data/vendor/libressl/crypto/Makefile.am.masm-x86_64 +0 -22
  126. data/vendor/libressl/crypto/Makefile.am.mingw64-x86_64 +0 -23
  127. data/vendor/libressl/crypto/arch/aarch64/crypto_cpu_caps_darwin.c +0 -60
  128. data/vendor/libressl/crypto/arch/aarch64/crypto_cpu_caps_linux.c +0 -62
  129. data/vendor/libressl/crypto/arch/aarch64/crypto_cpu_caps_none.c +0 -26
  130. data/vendor/libressl/crypto/arch/aarch64/crypto_cpu_caps_windows.c +0 -36
  131. data/vendor/libressl/crypto/arch/loongarch64/crypto_arch.h +0 -21
  132. data/vendor/libressl/crypto/arch/mips/crypto_arch.h +0 -21
  133. data/vendor/libressl/crypto/bn/arch/loongarch64/bn_arch.h +0 -23
  134. data/vendor/libressl/crypto/bn/arch/mips/bn_arch.h +0 -24
  135. data/vendor/libressl/crypto/compat/.gitignore +0 -31
  136. data/vendor/libressl/crypto/compat/arc4random.h +0 -41
  137. data/vendor/libressl/crypto/compat/b_win.c +0 -55
  138. data/vendor/libressl/crypto/compat/bsd-asprintf.c +0 -96
  139. data/vendor/libressl/crypto/compat/crypto_lock_win.c +0 -56
  140. data/vendor/libressl/crypto/compat/explicit_bzero_win.c +0 -13
  141. data/vendor/libressl/crypto/compat/freezero.c +0 -32
  142. data/vendor/libressl/crypto/compat/getdelim.c +0 -78
  143. data/vendor/libressl/crypto/compat/getline.c +0 -40
  144. data/vendor/libressl/crypto/compat/getopt_long.c +0 -528
  145. data/vendor/libressl/crypto/compat/getpagesize.c +0 -18
  146. data/vendor/libressl/crypto/compat/getprogname_linux.c +0 -23
  147. data/vendor/libressl/crypto/compat/getprogname_unimpl.c +0 -7
  148. data/vendor/libressl/crypto/compat/getprogname_windows.c +0 -13
  149. data/vendor/libressl/crypto/compat/posix_win.c +0 -296
  150. data/vendor/libressl/crypto/compat/syslog_r.c +0 -19
  151. data/vendor/libressl/crypto/compat/ui_openssl_win.c +0 -334
  152. data/vendor/libressl/dist.sh +0 -22
  153. data/vendor/libressl/gen-coverage-report.sh +0 -58
  154. data/vendor/libressl/gen-openbsd-tags.sh +0 -20
  155. data/vendor/libressl/include/CMakeLists.txt +0 -61
  156. data/vendor/libressl/include/Makefile.am +0 -79
  157. data/vendor/libressl/include/arch/loongarch64/opensslconf.h +0 -150
  158. data/vendor/libressl/include/arch/mips/opensslconf.h +0 -150
  159. data/vendor/libressl/include/compat/arpa/inet.h +0 -15
  160. data/vendor/libressl/include/compat/arpa/nameser.h +0 -25
  161. data/vendor/libressl/include/compat/cet.h +0 -19
  162. data/vendor/libressl/include/compat/dirent.h +0 -17
  163. data/vendor/libressl/include/compat/dirent_msvc.h +0 -611
  164. data/vendor/libressl/include/compat/endian.h +0 -161
  165. data/vendor/libressl/include/compat/err.h +0 -95
  166. data/vendor/libressl/include/compat/fcntl.h +0 -32
  167. data/vendor/libressl/include/compat/getopt.h +0 -50
  168. data/vendor/libressl/include/compat/limits.h +0 -25
  169. data/vendor/libressl/include/compat/netdb.h +0 -10
  170. data/vendor/libressl/include/compat/netinet/in.h +0 -19
  171. data/vendor/libressl/include/compat/netinet/ip.h +0 -49
  172. data/vendor/libressl/include/compat/netinet/tcp.h +0 -10
  173. data/vendor/libressl/include/compat/poll.h +0 -63
  174. data/vendor/libressl/include/compat/pthread.h +0 -122
  175. data/vendor/libressl/include/compat/readpassphrase.h +0 -44
  176. data/vendor/libressl/include/compat/resolv.h +0 -24
  177. data/vendor/libressl/include/compat/stdint.h +0 -31
  178. data/vendor/libressl/include/compat/stdio.h +0 -65
  179. data/vendor/libressl/include/compat/stdlib.h +0 -57
  180. data/vendor/libressl/include/compat/string.h +0 -98
  181. data/vendor/libressl/include/compat/sys/_null.h +0 -18
  182. data/vendor/libressl/include/compat/sys/ioctl.h +0 -11
  183. data/vendor/libressl/include/compat/sys/mman.h +0 -19
  184. data/vendor/libressl/include/compat/sys/param.h +0 -15
  185. data/vendor/libressl/include/compat/sys/queue.h +0 -536
  186. data/vendor/libressl/include/compat/sys/select.h +0 -10
  187. data/vendor/libressl/include/compat/sys/socket.h +0 -18
  188. data/vendor/libressl/include/compat/sys/stat.h +0 -129
  189. data/vendor/libressl/include/compat/sys/time.h +0 -37
  190. data/vendor/libressl/include/compat/sys/tree.h +0 -1006
  191. data/vendor/libressl/include/compat/sys/types.h +0 -69
  192. data/vendor/libressl/include/compat/sys/uio.h +0 -17
  193. data/vendor/libressl/include/compat/syslog.h +0 -38
  194. data/vendor/libressl/include/compat/time.h +0 -59
  195. data/vendor/libressl/include/compat/unistd.h +0 -83
  196. data/vendor/libressl/include/compat/win32netcompat.h +0 -57
  197. data/vendor/libressl/include/openssl/Makefile.am.tpl +0 -45
  198. data/vendor/libressl/libcrypto.pc.in +0 -28
  199. data/vendor/libressl/libressl.pub +0 -2
  200. data/vendor/libressl/libssl.pc.in +0 -28
  201. data/vendor/libressl/libtls.pc.in +0 -28
  202. data/vendor/libressl/m4/ax_add_fortify_source.m4 +0 -80
  203. data/vendor/libressl/m4/ax_check_compile_flag.m4 +0 -53
  204. data/vendor/libressl/m4/check-hardening-options.m4 +0 -110
  205. data/vendor/libressl/m4/check-libc.m4 +0 -189
  206. data/vendor/libressl/m4/check-os-options.m4 +0 -181
  207. data/vendor/libressl/m4/disable-compiler-warnings.m4 +0 -44
  208. data/vendor/libressl/man/CMakeLists.txt +0 -26
  209. data/vendor/libressl/man/links +0 -2780
  210. data/vendor/libressl/man/update_links.sh +0 -25
  211. data/vendor/libressl/openssl.pc.in +0 -11
  212. data/vendor/libressl/patches/bn_shift.patch +0 -34
  213. data/vendor/libressl/patches/crypto_arch.h.patch +0 -34
  214. data/vendor/libressl/patches/crypto_namespace.h.patch +0 -22
  215. data/vendor/libressl/patches/netcat.c.patch +0 -178
  216. data/vendor/libressl/patches/openssl.c.patch +0 -12
  217. data/vendor/libressl/patches/opensslfeatures.h.patch +0 -49
  218. data/vendor/libressl/patches/patch-amd64-crypto-cpu-caps.c.patch +0 -20
  219. data/vendor/libressl/patches/patch-i386-crypto-cpu-caps.c.patch +0 -20
  220. data/vendor/libressl/patches/speed.c.patch +0 -114
  221. data/vendor/libressl/patches/ssl_namespace.h.patch +0 -21
  222. data/vendor/libressl/patches/tls.h.patch +0 -16
  223. data/vendor/libressl/patches/tls_config.c.patch +0 -15
  224. data/vendor/libressl/patches/win32_amd64_bn_arch.h.patch +0 -28
  225. data/vendor/libressl/patches/windows_headers.patch +0 -80
  226. data/vendor/libressl/scripts/config.guess +0 -1774
  227. data/vendor/libressl/scripts/config.sub +0 -1907
  228. data/vendor/libressl/scripts/i686-w64-mingw32.cmake +0 -9
  229. data/vendor/libressl/scripts/test +0 -210
  230. data/vendor/libressl/scripts/wrap-compiler-for-flag-check +0 -31
  231. data/vendor/libressl/scripts/x86_64-w64-mingw32.cmake +0 -9
  232. data/vendor/libressl/ssl/CMakeLists.txt +0 -183
  233. data/vendor/libressl/ssl/Makefile.am +0 -187
  234. data/vendor/libressl/tests/CMakeLists.txt +0 -970
  235. data/vendor/libressl/tests/Makefile.am +0 -944
  236. data/vendor/libressl/tests/aeadtest.sh +0 -30
  237. data/vendor/libressl/tests/arc4randomforktest.sh +0 -21
  238. data/vendor/libressl/tests/asn1time_small.test +0 -10
  239. data/vendor/libressl/tests/cmake/CMakeLists.txt +0 -52
  240. data/vendor/libressl/tests/cmake/crypto.c +0 -7
  241. data/vendor/libressl/tests/cmake/ssl.c +0 -6
  242. data/vendor/libressl/tests/cmake/tls.c +0 -6
  243. data/vendor/libressl/tests/compat/pipe2.c +0 -186
  244. data/vendor/libressl/tests/dtlstest.sh +0 -28
  245. data/vendor/libressl/tests/evptest.sh +0 -22
  246. data/vendor/libressl/tests/keypairtest.sh +0 -27
  247. data/vendor/libressl/tests/mlkem_tests.sh +0 -39
  248. data/vendor/libressl/tests/ocsptest.bat +0 -25
  249. data/vendor/libressl/tests/ocsptest.sh +0 -23
  250. data/vendor/libressl/tests/openssl.cnf +0 -29
  251. data/vendor/libressl/tests/optionstest.c +0 -381
  252. data/vendor/libressl/tests/pidwraptest.c +0 -85
  253. data/vendor/libressl/tests/pidwraptest.sh +0 -26
  254. data/vendor/libressl/tests/quictest.bat +0 -27
  255. data/vendor/libressl/tests/quictest.sh +0 -30
  256. data/vendor/libressl/tests/renegotiation_test.bat +0 -27
  257. data/vendor/libressl/tests/renegotiation_test.sh +0 -30
  258. data/vendor/libressl/tests/rfc5280time_small.test +0 -10
  259. data/vendor/libressl/tests/servertest.bat +0 -27
  260. data/vendor/libressl/tests/servertest.sh +0 -30
  261. data/vendor/libressl/tests/shutdowntest.bat +0 -27
  262. data/vendor/libressl/tests/shutdowntest.sh +0 -30
  263. data/vendor/libressl/tests/ssltest.bat +0 -32
  264. data/vendor/libressl/tests/ssltest.sh +0 -48
  265. data/vendor/libressl/tests/testdsa.bat +0 -47
  266. data/vendor/libressl/tests/testdsa.sh +0 -57
  267. data/vendor/libressl/tests/testenc.bat +0 -85
  268. data/vendor/libressl/tests/testenc.sh +0 -93
  269. data/vendor/libressl/tests/testrsa.bat +0 -47
  270. data/vendor/libressl/tests/testrsa.sh +0 -57
  271. data/vendor/libressl/tests/testssl.bat +0 -171
  272. data/vendor/libressl/tests/tlstest.bat +0 -27
  273. data/vendor/libressl/tests/tlstest.sh +0 -28
  274. data/vendor/libressl/tls/CMakeLists.txt +0 -125
  275. data/vendor/libressl/tls/Makefile.am +0 -76
  276. data/vendor/libressl/tls/compat/ftruncate.c +0 -17
  277. data/vendor/libressl/tls/compat/pread.c +0 -29
  278. data/vendor/libressl/tls/compat/pwrite.c +0 -29
  279. data/vendor/libressl/update.sh +0 -460
data/ext/um/um.c CHANGED
@@ -60,8 +60,8 @@ inline void um_teardown(struct um *machine) {
60
60
  }
61
61
 
62
62
  inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
63
- DEBUG_PRINTF("-> %p um_get_sqe: op %p kind=%s unsubmitted=%d pending=%d total=%lu\n",
64
- &machine->ring, op, um_op_kind_name(op ? op->kind : OP_UNDEFINED),
63
+ DEBUG_PRINTF("* um_get_sqe: op %p kind=%s ref_count=%d flags=%x unsubmitted=%d pending=%d total=%lu\n",
64
+ op, um_op_kind_name(op ? op->kind : OP_UNDEFINED), op ? op->ref_count : 0, op ? op->flags : 0,
65
65
  machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
66
66
  );
67
67
 
@@ -112,12 +112,11 @@ void *um_submit_without_gvl(void *ptr) {
112
112
  }
113
113
 
114
114
  inline uint um_submit(struct um *machine) {
115
- DEBUG_PRINTF("-> %p um_submit: unsubmitted=%d pending=%d total=%lu\n",
116
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
117
- machine->metrics.total_ops
115
+ DEBUG_PRINTF("> um_submit: unsubmitted=%d pending=%d total=%lu\n",
116
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
118
117
  );
119
118
  if (!machine->metrics.ops_unsubmitted) {
120
- DEBUG_PRINTF("<- %p um_submit: no unsubmitted SQEs, early return\n", &machine->ring);
119
+ DEBUG_PRINTF("< %p um_submit: no unsubmitted SQEs, early return\n", &machine->ring);
121
120
  return 0;
122
121
  }
123
122
 
@@ -127,7 +126,7 @@ inline uint um_submit(struct um *machine) {
127
126
  else
128
127
  ctx.result = io_uring_submit(&machine->ring);
129
128
 
130
- DEBUG_PRINTF("<- %p um_submit: result=%d\n", &machine->ring, ctx.result);
129
+ DEBUG_PRINTF("< um_submit: result=%d\n", ctx.result);
131
130
 
132
131
  if (ctx.result < 0)
133
132
  rb_syserr_fail(-ctx.result, strerror(-ctx.result));
@@ -136,53 +135,76 @@ inline uint um_submit(struct um *machine) {
136
135
  return ctx.result;
137
136
  }
138
137
 
138
+ static inline void um_schedule_op(struct um *machine, struct um_op *op) {
139
+ op->flags |= OP_F_SCHEDULED;
140
+ um_runqueue_push(machine, op);
141
+ op->ref_count++;
142
+ }
143
+
139
144
  static inline void um_process_cqe(struct um *machine, struct io_uring_cqe *cqe) {
140
145
  struct um_op *op = (struct um_op *)cqe->user_data;
141
146
  if (DEBUG) {
142
147
  if (op) {
143
- DEBUG_PRINTF("<- %p um_process_cqe: op %p kind %s flags %d cqe_res %d cqe_flags %d pending %d\n",
144
- &machine->ring, op, um_op_kind_name(op->kind), op->flags, cqe->res, cqe->flags, machine->metrics.ops_pending
148
+ DEBUG_PRINTF("* um_process_cqe: op=%p kind=%s ref_count=%d flags=%x cqe_res=%d cqe_flags=%x pending=%d\n",
149
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags,
150
+ cqe->res, cqe->flags, machine->metrics.ops_pending
145
151
  );
146
152
  }
147
153
  else {
148
- DEBUG_PRINTF("<- %p um_process_cqe: op NULL cqe_res %d cqe_flags %d pending %d\n",
149
- &machine->ring, cqe->res, cqe->flags, machine->metrics.ops_pending
154
+ DEBUG_PRINTF("* um_process_cqe: op=NULL cqe_res=%d cqe_flags=%x pending=%d\n",
155
+ cqe->res, cqe->flags, machine->metrics.ops_pending
150
156
  );
151
157
  }
152
158
  }
153
159
  if (unlikely(!op)) return;
154
160
 
155
- if (!(cqe->flags & IORING_CQE_F_MORE))
156
- machine->metrics.ops_pending--;
157
-
158
- if (op->flags & OP_F_FREE_ON_COMPLETE) {
159
- if (op->flags & OP_F_TRANSIENT)
160
- um_op_transient_remove(machine, op);
161
+ // A multishot operation is still in progress if CQE has the F_MORE flag set
162
+ int done = OP_MULTISHOT_P(op) ? !(cqe->flags & IORING_CQE_F_MORE) : true;
161
163
 
162
- um_op_free(machine, op);
164
+ // F_TRANSIENT means the operation was put on the transient list. Transient
165
+ // ops are usually async ops where the app doesn't care when they are done or
166
+ // how. We hold on to those ops on the transient list, where we can mark the
167
+ // corresponding buffer during a GC.
168
+ if (OP_TRANSIENT_P(op)) {
169
+ machine->metrics.ops_pending--;
170
+ um_op_transient_remove(machine, op);
171
+ if (op->ref_count > 1) {
172
+ op->result.res = cqe->res;
173
+ op->result.flags = cqe->flags;
174
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
175
+ }
176
+ um_op_release(machine, op);
163
177
  return;
164
178
  }
165
179
 
166
- op->flags |= OP_F_COMPLETED;
167
- if (unlikely((cqe->res == -ECANCELED) && (op->flags & OP_F_IGNORE_CANCELED))) return;
168
- if (unlikely(op->flags & OP_F_CANCELED)) return;
180
+ if (done) {
181
+ machine->metrics.ops_pending--;
182
+ um_op_release(machine, op);
183
+ }
169
184
 
170
- if (op->flags & OP_F_TRANSIENT)
171
- um_op_transient_remove(machine, op);
185
+ if (unlikely(OP_CANCELED_P(op))) {
186
+ // multishot ops may generate multiple CQEs, we release only on the last
187
+ // one for the op.
188
+ if (done) {
189
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
190
+ um_op_release(machine, op);
191
+ }
192
+ return;
193
+ }
172
194
 
173
- if (op->flags & OP_F_MULTISHOT) {
195
+ if (OP_MULTISHOT_P(op)) {
174
196
  um_op_multishot_results_push(machine, op, cqe->res, cqe->flags);
175
- if (op->multishot_result_count > 1)
176
- return;
197
+
198
+ op->flags |= done ? (OP_F_CQE_SEEN | OP_F_CQE_DONE) : OP_F_CQE_SEEN;
199
+ if (!OP_SCHEDULED_P(op)) um_schedule_op(machine, op);
177
200
  }
178
201
  else {
202
+ // single shot
179
203
  op->result.res = cqe->res;
180
204
  op->result.flags = cqe->flags;
205
+ op->flags |= OP_F_CQE_SEEN | OP_F_CQE_DONE;
206
+ if (!OP_ASYNC_P(op)) um_schedule_op(machine, op);
181
207
  }
182
-
183
- if (op->flags & OP_F_ASYNC) return;
184
-
185
- um_runqueue_push(machine, op);
186
208
  }
187
209
 
188
210
  // copied from liburing/queue.c
@@ -191,8 +213,8 @@ static inline int cq_ring_needs_flush(struct io_uring *ring) {
191
213
  }
192
214
 
193
215
  static inline int um_process_ready_cqes(struct um *machine) {
194
- DEBUG_PRINTF("-> %p um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
195
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
216
+ DEBUG_PRINTF("> um_process_ready_cqes: unsubmitted=%d pending=%d total=%lu\n",
217
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending, machine->metrics.total_ops
196
218
  );
197
219
 
198
220
  unsigned total_count = 0;
@@ -211,9 +233,9 @@ iterate:
211
233
  if (overflow_checked) goto done;
212
234
 
213
235
  if (cq_ring_needs_flush(&machine->ring)) {
214
- DEBUG_PRINTF("-> %p io_uring_enter\n", &machine->ring);
236
+ DEBUG_PRINTF("> io_uring_enter\n");
215
237
  int ret = io_uring_enter(machine->ring.ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
216
- DEBUG_PRINTF("<- %p io_uring_enter: result=%d\n", &machine->ring, ret);
238
+ DEBUG_PRINTF("< io_uring_enter: result=%d\n", ret);
217
239
  if (ret < 0)
218
240
  rb_syserr_fail(-ret, strerror(-ret));
219
241
 
@@ -222,7 +244,7 @@ iterate:
222
244
  }
223
245
 
224
246
  done:
225
- DEBUG_PRINTF("<- %p um_process_ready_cqes: total_processed=%u\n", &machine->ring, total_count);
247
+ DEBUG_PRINTF("< um_process_ready_cqes: total_processed=%u\n", total_count);
226
248
 
227
249
  return total_count;
228
250
  }
@@ -237,8 +259,8 @@ struct wait_for_cqe_ctx {
237
259
  void *um_wait_for_cqe_without_gvl(void *ptr) {
238
260
  struct wait_for_cqe_ctx *ctx = ptr;
239
261
  if (ctx->machine->metrics.ops_unsubmitted) {
240
- DEBUG_PRINTF("-> %p io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
241
- &ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
262
+ DEBUG_PRINTF("> io_uring_submit_and_wait_timeout: unsubmitted=%d pending=%d total=%lu\n",
263
+ ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
242
264
  ctx->machine->metrics.total_ops
243
265
  );
244
266
 
@@ -249,16 +271,16 @@ void *um_wait_for_cqe_without_gvl(void *ptr) {
249
271
  // https://github.com/axboe/liburing/issues/1280
250
272
  int ret = io_uring_submit_and_wait_timeout(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
251
273
  ctx->machine->metrics.ops_unsubmitted = 0;
252
- DEBUG_PRINTF("<- %p io_uring_submit_and_wait_timeout: result=%d\n", &ctx->machine->ring, ret);
274
+ DEBUG_PRINTF("< io_uring_submit_and_wait_timeout: result=%d\n", ret);
253
275
  ctx->result = (ret > 0 && !ctx->cqe) ? -EINTR : ret;
254
276
  }
255
277
  else {
256
- DEBUG_PRINTF("-> %p io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
257
- &ctx->machine->ring, ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
278
+ DEBUG_PRINTF("> io_uring_wait_cqes: unsubmitted=%d pending=%d total=%lu\n",
279
+ ctx->machine->metrics.ops_unsubmitted, ctx->machine->metrics.ops_pending,
258
280
  ctx->machine->metrics.total_ops
259
281
  );
260
282
  ctx->result = io_uring_wait_cqes(&ctx->machine->ring, &ctx->cqe, ctx->wait_nr, NULL, NULL);
261
- DEBUG_PRINTF("<- %p io_uring_wait_cqes: result=%d\n", &ctx->machine->ring, ctx->result);
283
+ DEBUG_PRINTF("< io_uring_wait_cqes: result=%d\n", ctx->result);
262
284
  }
263
285
  return NULL;
264
286
  }
@@ -290,6 +312,7 @@ inline void *um_wait_for_sidecar_signal(void *ptr) {
290
312
  // either 1 - where we wait for at least one CQE to be ready, or 0, where we
291
313
  // don't wait, and just process any CQEs that already ready.
292
314
  static inline void um_wait_for_and_process_ready_cqes(struct um *machine, int wait_nr) {
315
+ DEBUG_PRINTF("* um_wait_for_and_process_ready_cqes wait_nr=%d\n", wait_nr);
293
316
  struct wait_for_cqe_ctx ctx = { .machine = machine, .cqe = NULL, .wait_nr = wait_nr };
294
317
  machine->metrics.total_waits++;
295
318
 
@@ -347,14 +370,16 @@ inline void um_profile_switch(struct um *machine, VALUE next_fiber) {
347
370
  }
348
371
 
349
372
  inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
350
- DEBUG_PRINTF("-> %p process_runqueue_op: op %p\n", &machine->ring, op);
373
+ DEBUG_PRINTF("* process_runqueue_op: op=%p kind=%s ref_count=%d flags=%x\n",
374
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
375
+ );
351
376
 
352
377
  machine->metrics.total_switches++;
353
378
  VALUE fiber = op->fiber;
354
379
  VALUE value = op->value;
355
380
 
356
- if (unlikely(op->flags & OP_F_TRANSIENT))
357
- um_op_free(machine, op);
381
+ op->flags &= ~OP_F_SCHEDULED;
382
+ um_op_release(machine, op);
358
383
 
359
384
  if (machine->profile_mode) um_profile_switch(machine, fiber);
360
385
  VALUE ret = rb_fiber_transfer(fiber, 1, &value);
@@ -364,15 +389,19 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
364
389
  }
365
390
 
366
391
  inline VALUE um_switch(struct um *machine) {
367
- DEBUG_PRINTF("-> %p um_switch: unsubmitted=%d pending=%d total=%lu\n",
368
- &machine->ring, machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
392
+ DEBUG_PRINTF("* um_switch: unsubmitted=%d pending=%d total=%lu\n",
393
+ machine->metrics.ops_unsubmitted, machine->metrics.ops_pending,
369
394
  machine->metrics.total_ops
370
395
  );
371
396
 
372
397
  while (true) {
373
398
  struct um_op *op = um_runqueue_shift(machine);
374
399
  if (op) {
375
- if (unlikely(op->flags & OP_F_RUNQUEUE_SKIP)) continue;
400
+ if (unlikely(OP_SKIP_P(op))) {
401
+ op->flags &= ~OP_F_SCHEDULED;
402
+ um_op_release(machine, op);
403
+ continue;
404
+ }
376
405
 
377
406
  // in test mode we want to process I/O on each snooze
378
407
  if (unlikely(machine->test_mode && (op->kind == OP_SCHEDULE))) {
@@ -393,29 +422,62 @@ inline VALUE um_yield(struct um *machine) {
393
422
  return ret;
394
423
  }
395
424
 
396
- void um_cancel_op(struct um *machine, struct um_op *op) {
425
+ inline void um_cancel_op(struct um *machine, struct um_op *op) {
397
426
  struct io_uring_sqe *sqe = um_get_sqe(machine, NULL);
398
- io_uring_prep_cancel64(sqe, (long long)op, 0);
427
+ io_uring_prep_cancel(sqe, op, IORING_ASYNC_CANCEL_USERDATA);
428
+ sqe->flags = IOSQE_CQE_SKIP_SUCCESS;
429
+ }
430
+
431
+ inline void um_cancel_op_and_discard_cqe(struct um *machine, struct um_op *op) {
432
+ DEBUG_PRINTF("* um_cancel_op_and_discard_cqe op=%p kind=%s ref_count=%d flags=%x\n",
433
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
434
+ );
435
+ um_cancel_op(machine, op);
436
+ op->flags |= OP_F_CANCELED;
399
437
  }
400
438
 
401
- inline void um_cancel_and_wait(struct um *machine, struct um_op *op) {
439
+ inline void um_cancel_op_and_await_cqe(struct um *machine, struct um_op *op) {
440
+ DEBUG_PRINTF("* um_cancel_op_and_await_cqe op=%p kind=%s ref_count=%d flags=%x\n",
441
+ op, um_op_kind_name(op->kind), op->ref_count, op->flags
442
+ );
402
443
  um_cancel_op(machine, op);
403
444
 
404
445
  VALUE fiber = rb_fiber_current();
405
446
  rb_set_add(machine->pending_fibers, fiber);
406
- while (!um_op_completed_p(op)) {
447
+ int multishot_wait_count = 0;
448
+ while (!OP_CQE_DONE_P(op)) {
449
+ if (OP_MULTISHOT_P(op)) {
450
+ // I noticed that with multishot timeout ops, there seems to be once in a
451
+ // while a race condition where the cancel would not register, causing
452
+ // this function to block forever, waiting for the operation to be done.
453
+ // The following mechanism reissues a cancel every 4 iterations, which
454
+ // seems to fix the problem. Not clear if this is a bug in io_uring.
455
+ multishot_wait_count++;
456
+ if (!(multishot_wait_count % 4)) um_cancel_op(machine, op);
457
+ um_op_multishot_results_clear(machine, op);
458
+ op->flags &= ~OP_F_CQE_SEEN;
459
+ }
407
460
  um_switch(machine);
408
461
  }
409
462
  rb_set_delete(machine->pending_fibers, fiber);
410
463
  }
411
464
 
412
- inline int um_check_completion(struct um *machine, struct um_op *op) {
413
- if (!um_op_completed_p(op)) {
414
- um_cancel_and_wait(machine, op);
465
+ int um_verify_op_completion(struct um *machine, struct um_op *op, int await_cancelled) {
466
+ if (unlikely(!OP_CQE_DONE_P(op))) {
467
+ if (await_cancelled)
468
+ um_cancel_op_and_await_cqe(machine, op);
469
+ else
470
+ um_cancel_op_and_discard_cqe(machine, op);
415
471
  return 0;
416
472
  }
417
473
 
418
- um_raise_on_error_result(op->result.res);
474
+ int res = op->result.res;
475
+
476
+ // on error we release the op and immediately raise an exception
477
+ if (unlikely(res < 0 && res != -ETIME)) {
478
+ um_op_release(machine, op);
479
+ um_raise_on_error_result(res);
480
+ }
419
481
  return 1;
420
482
  }
421
483
 
@@ -426,25 +488,31 @@ VALUE um_wakeup(struct um *machine) {
426
488
  return Qnil;
427
489
  }
428
490
 
429
- inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, unsigned flags) {
491
+ inline void um_prep_op(struct um *machine, struct um_op *op, enum um_op_kind kind, uint ref_count, unsigned flags) {
430
492
  memset(op, 0, sizeof(struct um_op));
431
493
  op->kind = kind;
494
+ op->ref_count = ref_count;
432
495
  op->flags = flags;
433
496
 
434
- VALUE fiber = (flags & OP_F_FREE_ON_COMPLETE) ? Qnil : rb_fiber_current();
497
+ VALUE fiber = OP_ASYNC_P(op) ? Qnil : rb_fiber_current();
498
+ if (OP_TRANSIENT_P(op)) um_op_transient_add(machine, op);
499
+
435
500
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
436
501
  RB_OBJ_WRITE(machine->self, &op->value, Qnil);
437
502
  RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
438
503
  }
439
504
 
440
505
  inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
441
- struct um_op *op = um_op_alloc(machine);
506
+ struct um_op *op = um_op_acquire(machine);
442
507
  memset(op, 0, sizeof(struct um_op));
443
508
  op->kind = OP_SCHEDULE;
444
- op->flags = OP_F_TRANSIENT;
509
+ op->ref_count = 1;
510
+ op->flags = OP_F_SCHEDULED;
511
+
445
512
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
446
513
  RB_OBJ_WRITE(machine->self, &op->value, value);
447
514
  RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
515
+
448
516
  um_runqueue_push(machine, op);
449
517
  }
450
518
 
@@ -457,18 +525,14 @@ struct op_ctx {
457
525
  struct um_queue *queue;
458
526
  void *read_buf;
459
527
  int read_maxlen;
460
- struct __kernel_timespec ts;
461
528
  int flags;
462
529
  };
463
530
 
464
531
  VALUE um_timeout_complete(VALUE arg) {
465
532
  struct op_ctx *ctx = (struct op_ctx *)arg;
466
533
 
467
- if (!um_op_completed_p(ctx->op)) {
468
- um_cancel_op(ctx->machine, ctx->op);
469
- ctx->op->flags |= OP_F_TRANSIENT | OP_F_IGNORE_CANCELED;
470
- um_op_transient_add(ctx->machine, ctx->op);
471
- }
534
+ if (!OP_CQE_DONE_P(ctx->op)) um_cancel_op_and_discard_cqe(ctx->machine, ctx->op);
535
+ um_op_release(ctx->machine, ctx->op);
472
536
 
473
537
  return Qnil;
474
538
  }
@@ -477,8 +541,8 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
477
541
  static ID ID_new = 0;
478
542
  if (!ID_new) ID_new = rb_intern("new");
479
543
 
480
- struct um_op *op = um_op_alloc(machine);
481
- um_prep_op(machine, op, OP_TIMEOUT, 0);
544
+ struct um_op *op = um_op_acquire(machine);
545
+ um_prep_op(machine, op, OP_TIMEOUT, 2, 0);
482
546
  op->ts = um_double_to_timespec(NUM2DBL(interval));
483
547
  RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
484
548
  RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
@@ -500,20 +564,20 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
500
564
  VALUE um_sleep(struct um *machine, double duration) {
501
565
  if (duration <= 0) duration = SLEEP_FOREVER_DURATION;
502
566
 
503
- struct um_op op;
504
- um_prep_op(machine, &op, OP_SLEEP, 0);
505
- op.ts = um_double_to_timespec(duration);
506
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
507
- io_uring_prep_timeout(sqe, &op.ts, 0, 0);
567
+ struct um_op *op = um_op_acquire(machine);
568
+ um_prep_op(machine, op, OP_SLEEP, 2, 0);
569
+ op->ts = um_double_to_timespec(duration);
570
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
571
+ io_uring_prep_timeout(sqe, &op->ts, 0, 0);
508
572
 
509
573
  VALUE ret = um_yield(machine);
510
574
 
511
- if (!um_op_completed_p(&op))
512
- um_cancel_and_wait(machine, &op);
513
- else {
514
- if (op.result.res != -ETIME) um_raise_on_error_result(op.result.res);
515
- ret = DBL2NUM(duration);
516
- }
575
+ DEBUG_PRINTF("sleep resume op %p ref_count %d flags: %x\n",
576
+ op, op->ref_count, op->flags
577
+ );
578
+
579
+ if (likely(um_verify_op_completion(machine, op, false))) ret = DBL2NUM(duration);
580
+ um_op_release(machine, op);
517
581
 
518
582
  RAISE_IF_EXCEPTION(ret);
519
583
  RB_GC_GUARD(ret);
@@ -521,19 +585,20 @@ VALUE um_sleep(struct um *machine, double duration) {
521
585
  }
522
586
 
523
587
  VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset, __u64 file_offset) {
524
- struct um_op op;
525
- um_prep_op(machine, &op, OP_READ, 0);
526
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
527
588
  void *ptr = um_prepare_read_buffer(buffer, maxlen, buffer_offset);
589
+
590
+ struct um_op *op = um_op_acquire(machine);
591
+ um_prep_op(machine, op, OP_READ, 2, 0);
592
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
528
593
  io_uring_prep_read(sqe, fd, ptr, maxlen, file_offset);
529
594
 
530
595
  VALUE ret = um_yield(machine);
531
596
 
532
- if (um_check_completion(machine, &op)) {
533
- um_update_read_buffer(machine, buffer, buffer_offset, op.result.res, op.result.flags);
534
- ret = INT2NUM(op.result.res);
535
-
597
+ if (likely(um_verify_op_completion(machine, op, true))) {
598
+ um_update_read_buffer(buffer, buffer_offset, op->result.res);
599
+ ret = INT2NUM(op->result.res);
536
600
  }
601
+ um_op_release(machine, op);
537
602
 
538
603
  RAISE_IF_EXCEPTION(ret);
539
604
  RB_GC_GUARD(ret);
@@ -541,45 +606,61 @@ VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t b
541
606
  }
542
607
 
543
608
  size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
544
- struct um_op op;
545
- um_prep_op(machine, &op, OP_READ, 0);
546
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
609
+ struct um_op *op = um_op_acquire(machine);
610
+ um_prep_op(machine, op, OP_READ, 2, 0);
611
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
547
612
  io_uring_prep_read(sqe, fd, buffer, maxlen, -1);
548
613
 
614
+ int res = 0;
549
615
  VALUE ret = um_yield(machine);
550
616
 
551
- if (um_check_completion(machine, &op)) {
552
- return op.result.res;
553
- }
617
+ if (likely(um_verify_op_completion(machine, op, true))) res = op->result.res;
618
+ um_op_release(machine, op);
554
619
 
555
620
  RAISE_IF_EXCEPTION(ret);
556
621
  RB_GC_GUARD(ret);
557
- return 0;
622
+ return res;
558
623
  }
559
624
 
560
625
  VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
561
626
  const void *base;
562
627
  size_t size;
563
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
628
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
564
629
  if ((len == (size_t)-1) || (len > size)) len = size;
565
630
  if (unlikely(!len)) return INT2NUM(0);
566
631
 
567
- struct um_op op;
568
- um_prep_op(machine, &op, OP_WRITE, 0);
569
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
570
-
632
+ struct um_op *op = um_op_acquire(machine);
633
+ um_prep_op(machine, op, OP_WRITE, 2, 0);
634
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
571
635
  io_uring_prep_write(sqe, fd, base, len, file_offset);
572
636
 
573
637
  VALUE ret = um_yield(machine);
574
638
 
575
- if (um_check_completion(machine, &op))
576
- ret = INT2NUM(op.result.res);
639
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
640
+ um_op_release(machine, op);
577
641
 
578
642
  RAISE_IF_EXCEPTION(ret);
579
643
  RB_GC_GUARD(ret);
580
644
  return ret;
581
645
  }
582
646
 
647
+ size_t um_write_raw(struct um *machine, int fd, const char *buffer, size_t maxlen) {
648
+ struct um_op *op = um_op_acquire(machine);
649
+ um_prep_op(machine, op, OP_WRITE, 2, 0);
650
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
651
+ io_uring_prep_write(sqe, fd, buffer, maxlen, 0);
652
+
653
+ int res = 0;
654
+ VALUE ret = um_yield(machine);
655
+
656
+ if (likely(um_verify_op_completion(machine, op, true))) res = op->result.res;
657
+ um_op_release(machine, op);
658
+
659
+ RAISE_IF_EXCEPTION(ret);
660
+ RB_GC_GUARD(ret);
661
+ return res;
662
+ }
663
+
583
664
  VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
584
665
  __u64 file_offset = -1;
585
666
  if (TYPE(argv[argc - 1]) == T_FIXNUM) {
@@ -591,7 +672,6 @@ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
591
672
  struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, &total_len);
592
673
  struct iovec *iovecs_ptr = iovecs;
593
674
  int iovecs_len = argc;
594
- struct um_op op;
595
675
  VALUE ret = Qnil;
596
676
  int writev_res = 0;
597
677
 
@@ -599,31 +679,34 @@ VALUE um_writev(struct um *machine, int fd, int argc, VALUE *argv) {
599
679
  free(iovecs);
600
680
  return INT2NUM(0);
601
681
  }
682
+
683
+ struct um_op *op = um_op_acquire(machine);
602
684
  len = total_len;
603
- while (len) {
604
- um_prep_op(machine, &op, OP_WRITEV, 0);
605
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
685
+ while (true) {
686
+ op->iovecs = iovecs;
687
+ um_prep_op(machine, op, OP_WRITEV, 2, OP_F_FREE_IOVECS);
688
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
606
689
  io_uring_prep_writev(sqe, fd, iovecs_ptr, iovecs_len, file_offset);
607
690
 
608
691
  ret = um_yield(machine);
609
692
 
610
- int completed = um_op_completed_p(&op);
611
- if (unlikely(!completed)) goto cancelled;
693
+ if (unlikely(!OP_CQE_DONE_P(op))) goto cancelled;
612
694
 
613
- writev_res = op.result.res;
695
+ writev_res = op->result.res;
614
696
  if (unlikely(writev_res < 0)) goto done;
615
697
 
616
698
  len -= writev_res;
617
- if (len) {
618
- um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
619
- if (file_offset != (__u64)-1) file_offset += writev_res;
620
- }
699
+ if (!len) goto done;
700
+
701
+ um_advance_iovecs_for_writing(&iovecs_ptr, &iovecs_len, (size_t)writev_res);
702
+ if (file_offset != (__u64)-1) file_offset += writev_res;
621
703
  }
622
704
 
623
705
  cancelled:
624
- um_cancel_and_wait(machine, &op);
706
+ um_cancel_op_and_await_cqe(machine, op);
625
707
  done:
626
- free(iovecs);
708
+ um_op_release(machine, op);
709
+
627
710
  RAISE_IF_EXCEPTION(ret);
628
711
  RB_GC_GUARD(ret);
629
712
  um_raise_on_error_result(writev_res);
@@ -633,31 +716,29 @@ done:
633
716
  VALUE um_write_async(struct um *machine, int fd, VALUE buffer, size_t len, __u64 file_offset) {
634
717
  const void *base;
635
718
  size_t size;
636
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
719
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
637
720
  if ((len == (size_t)-1) || (len > size)) len = size;
638
721
  if (unlikely(!len)) return INT2NUM(0);
639
722
 
640
- struct um_op *op = um_op_alloc(machine);
641
- um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
723
+ struct um_op *op = um_op_acquire(machine);
724
+ um_prep_op(machine, op, OP_WRITE_ASYNC, 1, OP_F_ASYNC | OP_F_TRANSIENT);
642
725
  RB_OBJ_WRITE(machine->self, &op->value, buffer);
643
-
644
726
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
645
727
  io_uring_prep_write(sqe, fd, base, len, file_offset);
646
- um_op_transient_add(machine, op);
647
728
 
648
729
  return buffer;
649
730
  }
650
731
 
651
732
  VALUE um_close(struct um *machine, int fd) {
652
- struct um_op op;
653
- um_prep_op(machine, &op, OP_CLOSE, 0);
654
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
733
+ struct um_op *op = um_op_acquire(machine);
734
+ um_prep_op(machine, op, OP_CLOSE, 2, 0);
735
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
655
736
  io_uring_prep_close(sqe, fd);
656
737
 
657
738
  VALUE ret = um_yield(machine);
658
739
 
659
- if (um_check_completion(machine, &op))
660
- ret = INT2NUM(fd);
740
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(fd);
741
+ um_op_release(machine, op);
661
742
 
662
743
  RAISE_IF_EXCEPTION(ret);
663
744
  RB_GC_GUARD(ret);
@@ -665,9 +746,8 @@ VALUE um_close(struct um *machine, int fd) {
665
746
  }
666
747
 
667
748
  VALUE um_close_async(struct um *machine, int fd) {
668
- struct um_op *op = um_op_alloc(machine);
669
- um_prep_op(machine, op, OP_CLOSE_ASYNC, OP_F_FREE_ON_COMPLETE);
670
-
749
+ struct um_op *op = um_op_acquire(machine);
750
+ um_prep_op(machine, op, OP_CLOSE_ASYNC, 1, OP_F_ASYNC);
671
751
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
672
752
  io_uring_prep_close(sqe, fd);
673
753
 
@@ -675,15 +755,15 @@ VALUE um_close_async(struct um *machine, int fd) {
675
755
  }
676
756
 
677
757
  VALUE um_accept(struct um *machine, int fd) {
678
- struct um_op op;
679
- um_prep_op(machine, &op, OP_ACCEPT, 0);
680
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
758
+ struct um_op *op = um_op_acquire(machine);
759
+ um_prep_op(machine, op, OP_ACCEPT, 2, 0);
760
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
681
761
  io_uring_prep_accept(sqe, fd, NULL, NULL, 0);
682
762
 
683
763
  VALUE ret = um_yield(machine);
684
764
 
685
- if (um_check_completion(machine, &op))
686
- ret = INT2NUM(op.result.res);
765
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
766
+ um_op_release(machine, op);
687
767
 
688
768
  RAISE_IF_EXCEPTION(ret);
689
769
  RB_GC_GUARD(ret);
@@ -691,15 +771,15 @@ VALUE um_accept(struct um *machine, int fd) {
691
771
  }
692
772
 
693
773
  VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags) {
694
- struct um_op op;
695
- um_prep_op(machine, &op, OP_SOCKET, 0);
696
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
774
+ struct um_op *op = um_op_acquire(machine);
775
+ um_prep_op(machine, op, OP_SOCKET, 2, 0);
776
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
697
777
  io_uring_prep_socket(sqe, domain, type, protocol, flags);
698
778
 
699
779
  VALUE ret = um_yield(machine);
700
780
 
701
- if (um_check_completion(machine, &op))
702
- ret = INT2NUM(op.result.res);
781
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
782
+ um_op_release(machine, op);
703
783
 
704
784
  RAISE_IF_EXCEPTION(ret);
705
785
  RB_GC_GUARD(ret);
@@ -707,15 +787,15 @@ VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint fla
707
787
  }
708
788
 
709
789
  VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen) {
710
- struct um_op op;
711
- um_prep_op(machine, &op, OP_CONNECT, 0);
712
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
790
+ struct um_op *op = um_op_acquire(machine);
791
+ um_prep_op(machine, op, OP_CONNECT, 2, 0);
792
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
713
793
  io_uring_prep_connect(sqe, fd, addr, addrlen);
714
794
 
715
795
  VALUE ret = um_yield(machine);
716
796
 
717
- if (um_check_completion(machine, &op))
718
- ret = INT2NUM(op.result.res);
797
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
798
+ um_op_release(machine, op);
719
799
 
720
800
  RAISE_IF_EXCEPTION(ret);
721
801
  RB_GC_GUARD(ret);
@@ -723,27 +803,24 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
723
803
  }
724
804
 
725
805
  VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
726
- struct um_op op;
727
- um_prep_op(machine, &op, OP_SEND, 0);
728
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
729
-
730
806
  const void *base;
731
807
  size_t size;
732
- um_get_buffer_bytes_for_writing(buffer, &base, &size);
808
+ um_get_buffer_bytes_for_writing(buffer, &base, &size, true);
733
809
  if ((len == (size_t)-1) || (len > size)) len = size;
734
810
 
811
+ struct um_op *op = um_op_acquire(machine);
812
+ um_prep_op(machine, op, OP_SEND, 2, 0);
813
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
735
814
  io_uring_prep_send(sqe, fd, base, len, flags);
736
815
 
737
816
  VALUE ret = um_yield(machine);
738
817
 
739
- if (um_check_completion(machine, &op))
740
- ret = INT2NUM(op.result.res);
818
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
819
+ um_op_release(machine, op);
741
820
 
742
821
  RAISE_IF_EXCEPTION(ret);
743
822
  RB_GC_GUARD(ret);
744
823
  return ret;
745
- // int ret = write(fd, base, len);
746
- // return UINT2NUM(ret);
747
824
  }
748
825
 
749
826
  // for some reason we don't get this define from liburing/io_uring.h
@@ -751,22 +828,17 @@ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
751
828
 
752
829
  VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
753
830
  struct iovec *iovecs = um_alloc_iovecs_for_writing(argc, argv, NULL);
754
- struct um_op op;
755
- um_prep_op(machine, &op, OP_SEND, 0);
756
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
757
-
831
+ struct um_op *op = um_op_acquire(machine);
832
+ op->iovecs = iovecs;
833
+ um_prep_op(machine, op, OP_SEND, 2, OP_F_FREE_IOVECS);
834
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
758
835
  io_uring_prep_send(sqe, fd, iovecs, argc, MSG_NOSIGNAL | MSG_WAITALL);
759
836
  sqe->ioprio |= IORING_SEND_VECTORIZED;
760
837
 
761
838
  VALUE ret = um_yield(machine);
762
839
 
763
- int completed = um_op_completed_p(&op);
764
- if (unlikely(!completed)) um_cancel_and_wait(machine, &op);
765
- free(iovecs);
766
- if (likely(completed)) {
767
- um_raise_on_error_result(op.result.res);
768
- ret = INT2NUM(op.result.res);
769
- }
840
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
841
+ um_op_release(machine, op);
770
842
 
771
843
  RAISE_IF_EXCEPTION(ret);
772
844
  RB_GC_GUARD(ret);
@@ -775,19 +847,17 @@ VALUE um_sendv(struct um *machine, int fd, int argc, VALUE *argv) {
775
847
 
776
848
  VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
777
849
  um_add_strings_to_buffer_ring(machine, bgid, strings);
778
-
779
- struct um_op op;
780
- um_prep_op(machine, &op, OP_SEND_BUNDLE, 0);
781
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
782
-
850
+ struct um_op *op = um_op_acquire(machine);
851
+ um_prep_op(machine, op, OP_SEND_BUNDLE, 2, 0);
852
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
783
853
  io_uring_prep_send_bundle(sqe, fd, 0, MSG_NOSIGNAL | MSG_WAITALL);
784
854
  sqe->flags |= IOSQE_BUFFER_SELECT;
785
855
  sqe->buf_group = bgid;
786
856
 
787
857
  VALUE ret = um_yield(machine);
788
858
 
789
- if (um_check_completion(machine, &op))
790
- ret = INT2NUM(op.result.res);
859
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
860
+ um_op_release(machine, op);
791
861
 
792
862
  RAISE_IF_EXCEPTION(ret);
793
863
  RB_GC_GUARD(ret);
@@ -795,19 +865,19 @@ VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
795
865
  }
796
866
 
797
867
  VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags) {
798
- struct um_op op;
799
- um_prep_op(machine, &op, OP_RECV, 0);
800
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
801
868
  void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
802
-
869
+ struct um_op *op = um_op_acquire(machine);
870
+ um_prep_op(machine, op, OP_RECV, 2, 0);
871
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
803
872
  io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
804
873
 
805
874
  VALUE ret = um_yield(machine);
806
875
 
807
- if (um_check_completion(machine, &op)) {
808
- um_update_read_buffer(machine, buffer, 0, op.result.res, op.result.flags);
809
- ret = INT2NUM(op.result.res);
876
+ if (likely(um_verify_op_completion(machine, op, true))) {
877
+ um_update_read_buffer(buffer, 0, op->result.res);
878
+ ret = INT2NUM(op->result.res);
810
879
  }
880
+ um_op_release(machine, op);
811
881
 
812
882
  RAISE_IF_EXCEPTION(ret);
813
883
  RB_GC_GUARD(ret);
@@ -815,15 +885,15 @@ VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags
815
885
  }
816
886
 
817
887
  VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen) {
818
- struct um_op op;
819
- um_prep_op(machine, &op, OP_BIND, 0);
820
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
888
+ struct um_op *op = um_op_acquire(machine);
889
+ um_prep_op(machine, op, OP_BIND, 2, 0);
890
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
821
891
  io_uring_prep_bind(sqe, fd, addr, addrlen);
822
892
 
823
893
  VALUE ret = um_yield(machine);
824
894
 
825
- if (um_check_completion(machine, &op))
826
- ret = INT2NUM(op.result.res);
895
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
896
+ um_op_release(machine, op);
827
897
 
828
898
  RAISE_IF_EXCEPTION(ret);
829
899
  RB_GC_GUARD(ret);
@@ -831,15 +901,15 @@ VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrl
831
901
  }
832
902
 
833
903
  VALUE um_listen(struct um *machine, int fd, int backlog) {
834
- struct um_op op;
835
- um_prep_op(machine, &op, OP_BIND, 0);
836
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
904
+ struct um_op *op = um_op_acquire(machine);
905
+ um_prep_op(machine, op, OP_BIND, 2, 0);
906
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
837
907
  io_uring_prep_listen(sqe, fd, backlog);
838
908
 
839
909
  VALUE ret = um_yield(machine);
840
910
 
841
- if (um_check_completion(machine, &op))
842
- ret = INT2NUM(op.result.res);
911
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
912
+ um_op_release(machine, op);
843
913
 
844
914
  RAISE_IF_EXCEPTION(ret);
845
915
  RB_GC_GUARD(ret);
@@ -848,17 +918,15 @@ VALUE um_listen(struct um *machine, int fd, int backlog) {
848
918
 
849
919
  VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
850
920
  VALUE ret = Qnil;
851
- int value;
852
-
853
- struct um_op op;
854
- um_prep_op(machine, &op, OP_GETSOCKOPT, 0);
855
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
856
- io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &value, sizeof(value));
921
+ struct um_op *op = um_op_acquire(machine);
922
+ um_prep_op(machine, op, OP_GETSOCKOPT, 2, 0);
923
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
924
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_GETSOCKOPT, fd, level, opt, &op->int_value, sizeof(op->int_value));
857
925
 
858
926
  ret = um_yield(machine);
859
927
 
860
- if (um_check_completion(machine, &op))
861
- ret = INT2NUM(value);
928
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->int_value);
929
+ um_op_release(machine, op);
862
930
 
863
931
  RAISE_IF_EXCEPTION(ret);
864
932
  RB_GC_GUARD(ret);
@@ -867,16 +935,16 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
867
935
 
868
936
  VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
869
937
  VALUE ret = Qnil;
870
-
871
- struct um_op op;
872
- um_prep_op(machine, &op, OP_SETSOCKOPT, 0);
873
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
874
- io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &value, sizeof(value));
938
+ struct um_op *op = um_op_acquire(machine);
939
+ um_prep_op(machine, op, OP_SETSOCKOPT, 2, 0);
940
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
941
+ op->int_value = value;
942
+ io_uring_prep_cmd_sock(sqe, SOCKET_URING_OP_SETSOCKOPT, fd, level, opt, &op->int_value, sizeof(op->int_value));
875
943
 
876
944
  ret = um_yield(machine);
877
945
 
878
- if (um_check_completion(machine, &op))
879
- ret = INT2NUM(op.result.res);
946
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
947
+ um_op_release(machine, op);
880
948
 
881
949
  RAISE_IF_EXCEPTION(ret);
882
950
  RB_GC_GUARD(ret);
@@ -885,16 +953,15 @@ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
885
953
 
886
954
  VALUE um_shutdown(struct um *machine, int fd, int how) {
887
955
  VALUE ret = Qnil;
888
-
889
- struct um_op op;
890
- um_prep_op(machine, &op, OP_SHUTDOWN, 0);
891
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
956
+ struct um_op *op = um_op_acquire(machine);
957
+ um_prep_op(machine, op, OP_SHUTDOWN, 2, 0);
958
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
892
959
  io_uring_prep_shutdown(sqe, fd, how);
893
960
 
894
961
  ret = um_yield(machine);
895
962
 
896
- if (um_check_completion(machine, &op))
897
- ret = INT2NUM(op.result.res);
963
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
964
+ um_op_release(machine, op);
898
965
 
899
966
  RAISE_IF_EXCEPTION(ret);
900
967
  RB_GC_GUARD(ret);
@@ -902,9 +969,8 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
902
969
  }
903
970
 
904
971
  VALUE um_shutdown_async(struct um *machine, int fd, int how) {
905
- struct um_op *op = um_op_alloc(machine);
906
- um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, OP_F_FREE_ON_COMPLETE);
907
-
972
+ struct um_op *op = um_op_acquire(machine);
973
+ um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, 1, OP_F_ASYNC);
908
974
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
909
975
  io_uring_prep_shutdown(sqe, fd, how);
910
976
 
@@ -912,15 +978,15 @@ VALUE um_shutdown_async(struct um *machine, int fd, int how) {
912
978
  }
913
979
 
914
980
  VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
915
- struct um_op op;
916
- um_prep_op(machine, &op, OP_OPEN, 0);
917
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
981
+ struct um_op *op = um_op_acquire(machine);
982
+ um_prep_op(machine, op, OP_OPEN, 2, 0);
983
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
918
984
  io_uring_prep_open(sqe, StringValueCStr(pathname), flags, mode);
919
985
 
920
986
  VALUE ret = um_yield(machine);
921
987
 
922
- if (um_check_completion(machine, &op))
923
- ret = INT2NUM(op.result.res);
988
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
989
+ um_op_release(machine, op);
924
990
 
925
991
  RAISE_IF_EXCEPTION(ret);
926
992
  RB_GC_GUARD(ret);
@@ -928,27 +994,25 @@ VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode) {
928
994
  }
929
995
 
930
996
  VALUE um_poll(struct um *machine, int fd, unsigned mask) {
931
- struct um_op op;
932
- um_prep_op(machine, &op, OP_POLL, 0);
933
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
997
+ struct um_op *op = um_op_acquire(machine);
998
+ um_prep_op(machine, op, OP_POLL, 2, 0);
999
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
934
1000
  io_uring_prep_poll_add(sqe, fd, mask);
935
1001
 
936
1002
  VALUE ret = um_yield(machine);
937
1003
 
938
- if (um_check_completion(machine, &op))
939
- ret = INT2NUM(op.result.res);
1004
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1005
+ um_op_release(machine, op);
940
1006
 
941
1007
  RAISE_IF_EXCEPTION(ret);
942
1008
  RB_GC_GUARD(ret);
943
- RB_GC_GUARD(op.fiber);
944
- RB_GC_GUARD(op.value);
945
1009
  return ret;
946
1010
  }
947
1011
 
948
- static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op *ops, VALUE fds, uint len, uint flags, uint event) {
1012
+ static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct um_op **ops, VALUE fds, uint len, uint flags, uint event) {
949
1013
  for (uint i = 0; i < len; i++) {
950
- struct um_op *op = ops + ((*idx)++);
951
- um_prep_op(machine, op, OP_POLL, flags | OP_F_IGNORE_CANCELED);
1014
+ struct um_op *op = ops[(*idx)++] = um_op_acquire(machine);
1015
+ um_prep_op(machine, op, OP_POLL, 2, flags);
952
1016
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
953
1017
  VALUE fd = rb_ary_entry(fds, i);
954
1018
  RB_OBJ_WRITE(machine->self, &op->value, fd);
@@ -957,8 +1021,9 @@ static inline void prepare_select_poll_ops(struct um *machine, uint *idx, struct
957
1021
  }
958
1022
 
959
1023
  VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, uint rfds_len, uint wfds_len, uint efds_len) {
960
- struct um_op op;
1024
+ struct um_op *op;
961
1025
  uint idx = 0;
1026
+
962
1027
  if (rfds_len)
963
1028
  prepare_select_poll_ops(machine, &idx, &op, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
964
1029
  else if (wfds_len)
@@ -969,12 +1034,14 @@ VALUE um_select_single(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds, u
969
1034
 
970
1035
  VALUE ret = um_yield(machine);
971
1036
 
972
- um_check_completion(machine, &op);
1037
+ um_verify_op_completion(machine, op, false);
1038
+ uint flags = op->flags;
1039
+ um_op_release(machine, op);
973
1040
  RAISE_IF_EXCEPTION(ret);
974
1041
 
975
- if (op.flags & OP_F_SELECT_POLLIN)
1042
+ if (flags & OP_F_SELECT_POLLIN)
976
1043
  return rb_ary_new3(3, rb_ary_new3(1, ret), rb_ary_new(), rb_ary_new());
977
- else if (op.flags & OP_F_SELECT_POLLOUT)
1044
+ else if (flags & OP_F_SELECT_POLLOUT)
978
1045
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new3(1, ret), rb_ary_new());
979
1046
  else
980
1047
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new3(1, ret));
@@ -993,7 +1060,7 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
993
1060
  if (unlikely(!total_len))
994
1061
  return rb_ary_new3(3, rb_ary_new(), rb_ary_new(), rb_ary_new());
995
1062
 
996
- struct um_op *ops = malloc(sizeof(struct um_op) * total_len);
1063
+ struct um_op **ops = malloc(sizeof(struct um_op *) * total_len);
997
1064
  uint idx = 0;
998
1065
  prepare_select_poll_ops(machine, &idx, ops, rfds, rfds_len, OP_F_SELECT_POLLIN, POLLIN);
999
1066
  prepare_select_poll_ops(machine, &idx, ops, wfds, wfds_len, OP_F_SELECT_POLLOUT, POLLOUT);
@@ -1001,50 +1068,35 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1001
1068
  assert(idx == total_len);
1002
1069
 
1003
1070
  VALUE ret = um_yield(machine);
1004
- if (unlikely(um_value_is_exception_p(ret))) {
1005
- free(ops);
1006
- um_raise_exception(ret);
1007
- }
1008
1071
 
1009
1072
  VALUE rfds_out = rb_ary_new();
1010
1073
  VALUE wfds_out = rb_ary_new();
1011
1074
  VALUE efds_out = rb_ary_new();
1012
1075
 
1013
1076
  int error_code = 0;
1014
- uint pending = total_len;
1015
1077
  for (uint i = 0; i < total_len; i++) {
1016
- if (um_op_completed_p(&ops[i])) {
1017
- ops[i].flags |= OP_F_RUNQUEUE_SKIP;
1018
- pending--;
1078
+ if (OP_CQE_DONE_P(ops[i])) {
1079
+ if (OP_SCHEDULED_P(ops[i])) ops[i]->flags |= OP_F_SKIP;
1019
1080
 
1020
- if (unlikely((ops[i].result.res < 0) && !error_code)) {
1021
- error_code = ops[i].result.res;
1081
+ if (unlikely((ops[i]->result.res < 0) && !error_code)) {
1082
+ error_code = ops[i]->result.res;
1022
1083
  }
1023
1084
  else {
1024
- if (ops[i].flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i].value);
1025
- if (ops[i].flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i].value);
1026
- if (ops[i].flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i].value);
1085
+ if (ops[i]->flags & OP_F_SELECT_POLLIN) rb_ary_push(rfds_out, ops[i]->value);
1086
+ if (ops[i]->flags & OP_F_SELECT_POLLOUT) rb_ary_push(wfds_out, ops[i]->value);
1087
+ if (ops[i]->flags & OP_F_SELECT_POLLPRI) rb_ary_push(efds_out, ops[i]->value);
1027
1088
  }
1028
1089
  }
1029
1090
  else {
1030
- ops[i].flags |= OP_F_CANCELED;
1031
- um_cancel_op(machine, &ops[i]);
1091
+ um_cancel_op_and_discard_cqe(machine, ops[i]);
1032
1092
  }
1033
1093
  }
1034
1094
 
1035
- while (pending) {
1036
- um_wait_for_and_process_ready_cqes(machine, 1);
1037
-
1038
- for (uint i = 0; i < total_len; i++) {
1039
- struct um_op *op = ops + i;
1040
- if (op->flags & OP_F_CANCELED && um_op_completed_p(op)) {
1041
- pending--;
1042
- }
1043
- }
1044
- }
1095
+ for (uint i = 0; i < total_len; i++) um_op_release(machine, ops[i]);
1045
1096
  free(ops);
1046
1097
 
1047
- if (error_code)
1098
+ RAISE_IF_EXCEPTION(ret);
1099
+ if (unlikely(error_code))
1048
1100
  um_raise_on_error_result(error_code);
1049
1101
 
1050
1102
  return rb_ary_new3(3, rfds_out, wfds_out, efds_out);
@@ -1054,44 +1106,49 @@ VALUE um_select(struct um *machine, VALUE rfds, VALUE wfds, VALUE efds) {
1054
1106
  RB_GC_GUARD(efds_out);
1055
1107
  }
1056
1108
 
1057
- VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
1058
- struct um_op op;
1059
- um_prep_op(machine, &op, OP_WAITID, 0);
1060
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1109
+ static inline VALUE siginfo_to_array(siginfo_t *info) {
1110
+ return rb_ary_new_from_args(
1111
+ 3,
1112
+ INT2NUM(info->si_pid),
1113
+ INT2NUM(info->si_status),
1114
+ INT2NUM(info->si_code)
1115
+ );
1116
+ }
1061
1117
 
1062
- siginfo_t infop;
1063
- io_uring_prep_waitid(sqe, idtype, id, &infop, options, 0);
1118
+ VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
1119
+ struct um_op *op = um_op_acquire(machine);
1120
+ um_prep_op(machine, op, OP_WAITID, 2, 0);
1121
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1122
+ io_uring_prep_waitid(sqe, idtype, id, &op->siginfo, options, 0);
1064
1123
 
1065
1124
  VALUE ret = um_yield(machine);
1066
1125
 
1067
- if (um_check_completion(machine, &op))
1068
- ret = INT2NUM(op.result.res);
1126
+ if (likely(um_verify_op_completion(machine, op, false))) ret = INT2NUM(op->result.res);
1127
+ um_op_release(machine, op);
1069
1128
 
1070
1129
  RAISE_IF_EXCEPTION(ret);
1071
1130
  RB_GC_GUARD(ret);
1072
1131
 
1073
- return rb_ary_new_from_args(
1074
- 3, INT2NUM(infop.si_pid), INT2NUM(infop.si_status), INT2NUM(infop.si_code)
1075
- );
1132
+ return siginfo_to_array(&op->siginfo);
1076
1133
  }
1077
1134
 
1078
1135
  #ifdef HAVE_RB_PROCESS_STATUS_NEW
1079
1136
  VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
1080
- struct um_op op;
1081
- um_prep_op(machine, &op, OP_WAITID, 0);
1082
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1083
-
1084
- siginfo_t infop;
1085
- io_uring_prep_waitid(sqe, idtype, id, &infop, options | WNOWAIT, 0);
1137
+ struct um_op *op = um_op_acquire(machine);
1138
+ um_prep_op(machine, op, OP_WAITID, 2, 0);
1139
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1140
+ io_uring_prep_waitid(sqe, idtype, id, &op->siginfo, options | WNOWAIT, 0);
1086
1141
 
1087
1142
  VALUE ret = um_yield(machine);
1088
- if (um_check_completion(machine, &op))
1089
- ret = INT2NUM(op.result.res);
1143
+
1144
+ if (likely(um_verify_op_completion(machine, op))) ret = INT2NUM(op->result.res);
1145
+ siginfo_t siginfo = op->siginfo;
1146
+ um_op_release(machine, op);
1090
1147
 
1091
1148
  RAISE_IF_EXCEPTION(ret);
1092
1149
  RB_GC_GUARD(ret);
1093
1150
 
1094
- return rb_process_status_new(infop.si_pid, (infop.si_status & 0xff) << 8, 0);
1151
+ return rb_process_status_new(siginfo.si_pid, (siginfo.si_status & 0xff) << 8, 0);
1095
1152
  }
1096
1153
  #endif
1097
1154
 
@@ -1121,10 +1178,9 @@ VALUE statx_to_hash(struct statx *stat) {
1121
1178
  VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned int mask) {
1122
1179
  static char empty_path[] = "";
1123
1180
 
1124
- struct um_op op;
1125
- um_prep_op(machine, &op, OP_STATX, 0);
1126
- struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
1127
-
1181
+ struct um_op *op = um_op_acquire(machine);
1182
+ um_prep_op(machine, op, OP_STATX, 2, 0);
1183
+ struct io_uring_sqe *sqe = um_get_sqe(machine, op);
1128
1184
  char *path_ptr = NIL_P(path) ? empty_path : StringValueCStr(path);
1129
1185
  struct statx stat;
1130
1186
  memset(&stat, 0, sizeof(stat));
@@ -1132,8 +1188,8 @@ VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned in
1132
1188
 
1133
1189
  VALUE ret = um_yield(machine);
1134
1190
 
1135
- if (um_check_completion(machine, &op))
1136
- ret = INT2NUM(op.result.res);
1191
+ if (likely(um_verify_op_completion(machine, op, true))) ret = INT2NUM(op->result.res);
1192
+ um_op_release(machine, op);
1137
1193
 
1138
1194
  RAISE_IF_EXCEPTION(ret);
1139
1195
  RB_GC_GUARD(ret);
@@ -1152,28 +1208,24 @@ VALUE accept_each_start(VALUE arg) {
1152
1208
 
1153
1209
  while (true) {
1154
1210
  VALUE ret = um_yield(ctx->machine);
1155
- if (!um_op_completed_p(ctx->op)) {
1156
- RAISE_IF_EXCEPTION(ret);
1157
- return ret;
1158
- }
1211
+
1212
+ RAISE_IF_EXCEPTION(ret);
1213
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1159
1214
  RB_GC_GUARD(ret);
1160
1215
 
1161
- int more = false;
1162
1216
  struct um_op_result *result = &ctx->op->result;
1163
1217
  while (result) {
1164
- more = (result->flags & IORING_CQE_F_MORE);
1165
- if (result->res < 0) {
1166
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1218
+ if (unlikely(result->res < 0)) {
1167
1219
  rb_syserr_fail(-result->res, strerror(-result->res));
1168
1220
  }
1169
1221
  rb_yield(INT2NUM(result->res));
1170
1222
  result = result->next;
1171
1223
  }
1224
+
1225
+ if (OP_CQE_DONE_P(ctx->op)) break;
1226
+
1172
1227
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1173
- if (more)
1174
- ctx->op->flags &= ~OP_F_COMPLETED;
1175
- else
1176
- break;
1228
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1177
1229
  }
1178
1230
 
1179
1231
  return Qnil;
@@ -1186,28 +1238,24 @@ VALUE accept_into_queue_start(VALUE arg) {
1186
1238
 
1187
1239
  while (true) {
1188
1240
  VALUE ret = um_yield(ctx->machine);
1189
- if (!um_op_completed_p(ctx->op)) {
1190
- RAISE_IF_EXCEPTION(ret);
1191
- return ret;
1192
- }
1241
+
1242
+ RAISE_IF_EXCEPTION(ret);
1243
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1193
1244
  RB_GC_GUARD(ret);
1194
1245
 
1195
- int more = false;
1196
1246
  struct um_op_result *result = &ctx->op->result;
1197
1247
  while (result) {
1198
- more = (result->flags & IORING_CQE_F_MORE);
1199
- if (result->res < 0) {
1200
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1248
+ if (unlikely(result->res < 0)) {
1201
1249
  rb_syserr_fail(-result->res, strerror(-result->res));
1202
1250
  }
1203
1251
  um_queue_push(ctx->machine, ctx->queue, INT2NUM(result->res));
1204
1252
  result = result->next;
1205
1253
  }
1254
+
1255
+ if (OP_CQE_DONE_P(ctx->op)) break;
1256
+
1206
1257
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1207
- if (more)
1208
- ctx->op->flags &= ~OP_F_COMPLETED;
1209
- else
1210
- break;
1258
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1211
1259
  }
1212
1260
 
1213
1261
  return Qnil;
@@ -1215,14 +1263,8 @@ VALUE accept_into_queue_start(VALUE arg) {
1215
1263
 
1216
1264
  VALUE multishot_complete(VALUE arg) {
1217
1265
  struct op_ctx *ctx = (struct op_ctx *)arg;
1218
- if (ctx->op->multishot_result_count) {
1219
- int more = ctx->op->multishot_result_tail->flags & IORING_CQE_F_MORE;
1220
- if (more)
1221
- ctx->op->flags &= ~OP_F_COMPLETED;
1222
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1223
- }
1224
- if (!um_op_completed_p(ctx->op))
1225
- um_cancel_and_wait(ctx->machine, ctx->op);
1266
+ um_verify_op_completion(ctx->machine, ctx->op, true);
1267
+ um_op_release(ctx->machine, ctx->op);
1226
1268
 
1227
1269
  if (ctx->read_buf)
1228
1270
  free(ctx->read_buf);
@@ -1231,19 +1273,20 @@ VALUE multishot_complete(VALUE arg) {
1231
1273
  }
1232
1274
 
1233
1275
  VALUE um_accept_each(struct um *machine, int fd) {
1234
- struct um_op op;
1235
- um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT, OP_F_MULTISHOT);
1276
+ struct um_op *op = um_op_acquire(machine);
1277
+ um_prep_op(machine, op, OP_ACCEPT_MULTISHOT, 2, OP_F_MULTISHOT);
1236
1278
 
1237
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .read_buf = NULL };
1279
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .read_buf = NULL };
1238
1280
  return rb_ensure(accept_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1239
1281
  }
1240
1282
 
1241
1283
  VALUE um_accept_into_queue(struct um *machine, int fd, VALUE queue) {
1242
- struct um_op op;
1243
- um_prep_op(machine, &op, OP_ACCEPT_MULTISHOT, OP_F_MULTISHOT);
1284
+ struct um_queue *queue_data = Queue_data(queue);
1285
+ struct um_op *op = um_op_acquire(machine);
1286
+ um_prep_op(machine, op, OP_ACCEPT_MULTISHOT, 2, OP_F_MULTISHOT);
1244
1287
 
1245
1288
  struct op_ctx ctx = {
1246
- .machine = machine, .op = &op, .fd = fd, .queue = Queue_data(queue), .read_buf = NULL
1289
+ .machine = machine, .op = op, .fd = fd, .queue = queue_data, .read_buf = NULL
1247
1290
  };
1248
1291
  return rb_ensure(accept_into_queue_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1249
1292
  }
@@ -1255,15 +1298,13 @@ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1255
1298
  int total = 0;
1256
1299
 
1257
1300
  while (1) {
1258
- um_prep_op(ctx->machine, ctx->op, OP_READ, 0);
1301
+ um_prep_op(ctx->machine, ctx->op, OP_READ, 2, 0);
1259
1302
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1260
1303
  io_uring_prep_read(sqe, ctx->fd, ctx->read_buf, ctx->read_maxlen, -1);
1261
1304
 
1262
1305
  VALUE ret = um_yield(ctx->machine);
1263
- if (um_op_completed_p(ctx->op)) {
1264
- um_raise_on_error_result(ctx->op->result.res);
1265
- if (!ctx->op->result.res) return total;
1266
1306
 
1307
+ if (likely(um_verify_op_completion(ctx->machine, ctx->op, true))) {
1267
1308
  VALUE buf = rb_str_new(ctx->read_buf, ctx->op->result.res);
1268
1309
  total += ctx->op->result.res;
1269
1310
  rb_yield(buf);
@@ -1271,10 +1312,11 @@ int um_read_each_singleshot_loop(struct op_ctx *ctx) {
1271
1312
  }
1272
1313
  else {
1273
1314
  RAISE_IF_EXCEPTION(ret);
1274
- return ret;
1315
+ return 0;
1275
1316
  }
1276
1317
  RB_GC_GUARD(ret);
1277
1318
  }
1319
+ return 0;
1278
1320
  }
1279
1321
 
1280
1322
  // // returns true if more results are expected
@@ -1321,91 +1363,83 @@ VALUE read_recv_each_start(VALUE arg) {
1321
1363
 
1322
1364
  while (true) {
1323
1365
  VALUE ret = um_yield(ctx->machine);
1324
- if (!um_op_completed_p(ctx->op)) {
1325
- RAISE_IF_EXCEPTION(ret);
1326
- return ret;
1327
- }
1366
+
1367
+ RAISE_IF_EXCEPTION(ret);
1368
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1328
1369
  RB_GC_GUARD(ret);
1329
1370
 
1330
- int more = false;
1331
1371
  struct um_op_result *result = &ctx->op->result;
1332
1372
  while (result) {
1333
1373
  um_raise_on_error_result(result->res);
1334
1374
 
1335
- more = (result->flags & IORING_CQE_F_MORE);
1336
1375
  if (!read_recv_each_multishot_process_result(ctx, result, &total))
1337
1376
  return Qnil;
1338
1377
 
1339
1378
  // rb_yield(INT2NUM(result->res));
1340
1379
  result = result->next;
1341
1380
  }
1381
+
1382
+ if (OP_CQE_DONE_P(ctx->op)) break;
1383
+
1342
1384
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1343
- if (more)
1344
- ctx->op->flags &= ~OP_F_COMPLETED;
1345
- else
1346
- break;
1385
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1347
1386
  }
1348
1387
 
1349
1388
  return Qnil;
1350
1389
  }
1351
1390
 
1352
1391
  VALUE um_read_each(struct um *machine, int fd, int bgid) {
1353
- struct um_op op;
1354
- um_prep_op(machine, &op, OP_READ_MULTISHOT, OP_F_MULTISHOT);
1392
+ struct um_op *op = um_op_acquire(machine);
1393
+ um_prep_op(machine, op, OP_READ_MULTISHOT, 2, OP_F_MULTISHOT);
1355
1394
 
1356
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL };
1395
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL };
1357
1396
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1358
1397
  }
1359
1398
 
1360
1399
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags) {
1361
- struct um_op op;
1362
- um_prep_op(machine, &op, OP_RECV_MULTISHOT, OP_F_MULTISHOT);
1400
+ struct um_op *op = um_op_acquire(machine);
1401
+ um_prep_op(machine, op, OP_RECV_MULTISHOT, 2, OP_F_MULTISHOT);
1363
1402
 
1364
- struct op_ctx ctx = { .machine = machine, .op = &op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
1403
+ struct op_ctx ctx = { .machine = machine, .op = op, .fd = fd, .bgid = bgid, .read_buf = NULL, .flags = flags };
1365
1404
  return rb_ensure(read_recv_each_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1366
1405
  }
1367
1406
 
1368
1407
  VALUE periodically_start(VALUE arg) {
1369
1408
  struct op_ctx *ctx = (struct op_ctx *)arg;
1370
1409
  struct io_uring_sqe *sqe = um_get_sqe(ctx->machine, ctx->op);
1371
- io_uring_prep_timeout(sqe, &ctx->ts, 0, IORING_TIMEOUT_MULTISHOT);
1410
+ io_uring_prep_timeout(sqe, &ctx->op->ts, 0, IORING_TIMEOUT_MULTISHOT);
1372
1411
 
1373
1412
  while (true) {
1374
1413
  VALUE ret = um_switch(ctx->machine);
1375
- if (!um_op_completed_p(ctx->op)) {
1376
- RAISE_IF_EXCEPTION(ret);
1377
- return ret;
1378
- }
1414
+
1415
+ RAISE_IF_EXCEPTION(ret);
1416
+ if (unlikely(!OP_CQE_SEEN_P(ctx->op))) return ret;
1379
1417
  RB_GC_GUARD(ret);
1380
1418
 
1381
- int more = false;
1382
1419
  struct um_op_result *result = &ctx->op->result;
1383
1420
  while (result) {
1384
- more = (result->flags & IORING_CQE_F_MORE);
1385
- if (result->res < 0 && result->res != -ETIME) {
1386
- um_op_multishot_results_clear(ctx->machine, ctx->op);
1387
- return Qnil;
1388
- }
1421
+ if (unlikely(result->res < 0 && result->res != -ETIME)) um_raise_on_error_result(result->res);
1422
+
1389
1423
  rb_yield(Qnil);
1390
1424
  result = result->next;
1391
1425
  }
1426
+ if (OP_CQE_DONE_P(ctx->op)) break;
1427
+
1392
1428
  um_op_multishot_results_clear(ctx->machine, ctx->op);
1393
- if (more)
1394
- ctx->op->flags &= ~OP_F_COMPLETED;
1395
- else
1396
- break;
1429
+ ctx->op->flags &= ~OP_F_CQE_SEEN;
1397
1430
  }
1398
1431
 
1399
1432
  return Qnil;
1400
1433
  }
1401
1434
 
1402
1435
  VALUE um_periodically(struct um *machine, double interval) {
1403
- struct um_op op;
1404
- um_prep_op(machine, &op, OP_SLEEP_MULTISHOT, OP_F_MULTISHOT);
1405
- op.ts = um_double_to_timespec(interval);
1436
+ struct um_op *op = um_op_acquire(machine);
1437
+ um_prep_op(machine, op, OP_TIMEOUT_MULTISHOT, 2, OP_F_MULTISHOT);
1438
+ op->ts = um_double_to_timespec(interval);
1406
1439
 
1407
- struct op_ctx ctx = { .machine = machine, .op = &op, .ts = op.ts, .read_buf = NULL };
1440
+ struct op_ctx ctx = { .machine = machine, .op = op, .read_buf = NULL };
1408
1441
  return rb_ensure(periodically_start, (VALUE)&ctx, multishot_complete, (VALUE)&ctx);
1442
+ return Qnil;
1409
1443
  }
1410
1444
 
1411
1445
  extern VALUE SYM_size;