polyphony 0.85 → 0.86

Sign up to get free protection for your applications and to get access to all the features.
Files changed (230) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +4 -0
  3. data/Gemfile.lock +1 -1
  4. data/ext/polyphony/io_extensions.c +2 -3
  5. data/lib/polyphony/version.rb +1 -1
  6. data/polyphony.gemspec +1 -1
  7. data/test/test_backend.rb +1 -1
  8. data/test/test_signal.rb +3 -3
  9. data/vendor/liburing/.github/pull_request_template.md +86 -0
  10. data/vendor/liburing/.github/workflows/build.yml +85 -0
  11. data/vendor/liburing/.github/workflows/shellcheck.yml +20 -0
  12. data/vendor/liburing/.gitignore +149 -0
  13. data/vendor/liburing/COPYING +502 -0
  14. data/vendor/liburing/COPYING.GPL +339 -0
  15. data/vendor/liburing/LICENSE +7 -0
  16. data/vendor/liburing/Makefile +82 -0
  17. data/vendor/liburing/Makefile.common +5 -0
  18. data/vendor/liburing/Makefile.quiet +11 -0
  19. data/vendor/liburing/README +46 -0
  20. data/vendor/liburing/configure +486 -0
  21. data/vendor/liburing/debian/README.Debian +7 -0
  22. data/vendor/liburing/debian/changelog +27 -0
  23. data/vendor/liburing/debian/compat +1 -0
  24. data/vendor/liburing/debian/control +48 -0
  25. data/vendor/liburing/debian/copyright +49 -0
  26. data/vendor/liburing/debian/liburing-dev.install +4 -0
  27. data/vendor/liburing/debian/liburing-dev.manpages +6 -0
  28. data/vendor/liburing/debian/liburing1-udeb.install +1 -0
  29. data/vendor/liburing/debian/liburing1.install +1 -0
  30. data/vendor/liburing/debian/liburing1.symbols +32 -0
  31. data/vendor/liburing/debian/patches/series +1 -0
  32. data/vendor/liburing/debian/rules +81 -0
  33. data/vendor/liburing/debian/source/format +1 -0
  34. data/vendor/liburing/debian/source/local-options +2 -0
  35. data/vendor/liburing/debian/source/options +1 -0
  36. data/vendor/liburing/debian/watch +3 -0
  37. data/vendor/liburing/examples/Makefile +38 -0
  38. data/vendor/liburing/examples/io_uring-cp.c +282 -0
  39. data/vendor/liburing/examples/io_uring-test.c +112 -0
  40. data/vendor/liburing/examples/link-cp.c +193 -0
  41. data/vendor/liburing/examples/ucontext-cp.c +273 -0
  42. data/vendor/liburing/liburing.pc.in +12 -0
  43. data/vendor/liburing/liburing.spec +66 -0
  44. data/vendor/liburing/make-debs.sh +53 -0
  45. data/vendor/liburing/man/io_uring.7 +754 -0
  46. data/vendor/liburing/man/io_uring_cq_advance.3 +35 -0
  47. data/vendor/liburing/man/io_uring_cq_ready.3 +25 -0
  48. data/vendor/liburing/man/io_uring_cqe_get_data.3 +34 -0
  49. data/vendor/liburing/man/io_uring_cqe_seen.3 +32 -0
  50. data/vendor/liburing/man/io_uring_enter.2 +1483 -0
  51. data/vendor/liburing/man/io_uring_free_probe.3 +24 -0
  52. data/vendor/liburing/man/io_uring_get_probe.3 +29 -0
  53. data/vendor/liburing/man/io_uring_get_sqe.3 +38 -0
  54. data/vendor/liburing/man/io_uring_opcode_supported.3 +29 -0
  55. data/vendor/liburing/man/io_uring_prep_msg_ring.3 +58 -0
  56. data/vendor/liburing/man/io_uring_prep_read.3 +50 -0
  57. data/vendor/liburing/man/io_uring_prep_read_fixed.3 +54 -0
  58. data/vendor/liburing/man/io_uring_prep_readv.3 +51 -0
  59. data/vendor/liburing/man/io_uring_prep_readv2.3 +79 -0
  60. data/vendor/liburing/man/io_uring_prep_write.3 +50 -0
  61. data/vendor/liburing/man/io_uring_prep_write_fixed.3 +54 -0
  62. data/vendor/liburing/man/io_uring_prep_writev.3 +51 -0
  63. data/vendor/liburing/man/io_uring_prep_writev2.3 +78 -0
  64. data/vendor/liburing/man/io_uring_queue_exit.3 +27 -0
  65. data/vendor/liburing/man/io_uring_queue_init.3 +44 -0
  66. data/vendor/liburing/man/io_uring_register.2 +688 -0
  67. data/vendor/liburing/man/io_uring_register_buffers.3 +41 -0
  68. data/vendor/liburing/man/io_uring_register_files.3 +35 -0
  69. data/vendor/liburing/man/io_uring_setup.2 +534 -0
  70. data/vendor/liburing/man/io_uring_sq_ready.3 +25 -0
  71. data/vendor/liburing/man/io_uring_sq_space_left.3 +25 -0
  72. data/vendor/liburing/man/io_uring_sqe_set_data.3 +30 -0
  73. data/vendor/liburing/man/io_uring_sqe_set_flags.3 +60 -0
  74. data/vendor/liburing/man/io_uring_sqring_wait.3 +30 -0
  75. data/vendor/liburing/man/io_uring_submit.3 +29 -0
  76. data/vendor/liburing/man/io_uring_submit_and_wait.3 +34 -0
  77. data/vendor/liburing/man/io_uring_submit_and_wait_timeout.3 +49 -0
  78. data/vendor/liburing/man/io_uring_unregister_buffers.3 +26 -0
  79. data/vendor/liburing/man/io_uring_unregister_files.3 +26 -0
  80. data/vendor/liburing/man/io_uring_wait_cqe.3 +33 -0
  81. data/vendor/liburing/man/io_uring_wait_cqe_nr.3 +36 -0
  82. data/vendor/liburing/man/io_uring_wait_cqe_timeout.3 +39 -0
  83. data/vendor/liburing/man/io_uring_wait_cqes.3 +46 -0
  84. data/vendor/liburing/src/Makefile +89 -0
  85. data/vendor/liburing/src/arch/aarch64/syscall.h +95 -0
  86. data/vendor/liburing/src/arch/generic/lib.h +21 -0
  87. data/vendor/liburing/src/arch/generic/syscall.h +87 -0
  88. data/vendor/liburing/src/arch/syscall-defs.h +67 -0
  89. data/vendor/liburing/src/arch/x86/lib.h +32 -0
  90. data/vendor/liburing/src/arch/x86/syscall.h +160 -0
  91. data/vendor/liburing/src/include/liburing/barrier.h +81 -0
  92. data/vendor/liburing/src/include/liburing/io_uring.h +442 -0
  93. data/vendor/liburing/src/include/liburing.h +921 -0
  94. data/vendor/liburing/src/int_flags.h +8 -0
  95. data/vendor/liburing/src/lib.h +57 -0
  96. data/vendor/liburing/src/liburing.map +53 -0
  97. data/vendor/liburing/src/nolibc.c +48 -0
  98. data/vendor/liburing/src/queue.c +403 -0
  99. data/vendor/liburing/src/register.c +293 -0
  100. data/vendor/liburing/src/setup.c +332 -0
  101. data/vendor/liburing/src/syscall.c +47 -0
  102. data/vendor/liburing/src/syscall.h +103 -0
  103. data/vendor/liburing/test/232c93d07b74-test.c +306 -0
  104. data/vendor/liburing/test/35fa71a030ca-test.c +329 -0
  105. data/vendor/liburing/test/500f9fbadef8-test.c +89 -0
  106. data/vendor/liburing/test/7ad0e4b2f83c-test.c +93 -0
  107. data/vendor/liburing/test/8a9973408177-test.c +106 -0
  108. data/vendor/liburing/test/917257daa0fe-test.c +53 -0
  109. data/vendor/liburing/test/Makefile +244 -0
  110. data/vendor/liburing/test/a0908ae19763-test.c +58 -0
  111. data/vendor/liburing/test/a4c0b3decb33-test.c +180 -0
  112. data/vendor/liburing/test/accept-link.c +254 -0
  113. data/vendor/liburing/test/accept-reuse.c +164 -0
  114. data/vendor/liburing/test/accept-test.c +79 -0
  115. data/vendor/liburing/test/accept.c +477 -0
  116. data/vendor/liburing/test/across-fork.c +283 -0
  117. data/vendor/liburing/test/b19062a56726-test.c +53 -0
  118. data/vendor/liburing/test/b5837bd5311d-test.c +77 -0
  119. data/vendor/liburing/test/ce593a6c480a-test.c +136 -0
  120. data/vendor/liburing/test/close-opath.c +122 -0
  121. data/vendor/liburing/test/config +10 -0
  122. data/vendor/liburing/test/connect.c +398 -0
  123. data/vendor/liburing/test/cq-full.c +96 -0
  124. data/vendor/liburing/test/cq-overflow.c +294 -0
  125. data/vendor/liburing/test/cq-peek-batch.c +102 -0
  126. data/vendor/liburing/test/cq-ready.c +94 -0
  127. data/vendor/liburing/test/cq-size.c +64 -0
  128. data/vendor/liburing/test/d4ae271dfaae-test.c +96 -0
  129. data/vendor/liburing/test/d77a67ed5f27-test.c +65 -0
  130. data/vendor/liburing/test/defer.c +307 -0
  131. data/vendor/liburing/test/double-poll-crash.c +185 -0
  132. data/vendor/liburing/test/drop-submit.c +92 -0
  133. data/vendor/liburing/test/eeed8b54e0df-test.c +114 -0
  134. data/vendor/liburing/test/empty-eownerdead.c +45 -0
  135. data/vendor/liburing/test/eventfd-disable.c +151 -0
  136. data/vendor/liburing/test/eventfd-reg.c +76 -0
  137. data/vendor/liburing/test/eventfd-ring.c +97 -0
  138. data/vendor/liburing/test/eventfd.c +112 -0
  139. data/vendor/liburing/test/exec-target.c +6 -0
  140. data/vendor/liburing/test/exit-no-cleanup.c +117 -0
  141. data/vendor/liburing/test/fadvise.c +202 -0
  142. data/vendor/liburing/test/fallocate.c +249 -0
  143. data/vendor/liburing/test/fc2a85cb02ef-test.c +131 -0
  144. data/vendor/liburing/test/file-register.c +858 -0
  145. data/vendor/liburing/test/file-update.c +173 -0
  146. data/vendor/liburing/test/file-verify.c +629 -0
  147. data/vendor/liburing/test/files-exit-hang-poll.c +128 -0
  148. data/vendor/liburing/test/files-exit-hang-timeout.c +134 -0
  149. data/vendor/liburing/test/fixed-link.c +90 -0
  150. data/vendor/liburing/test/fpos.c +252 -0
  151. data/vendor/liburing/test/fsync.c +224 -0
  152. data/vendor/liburing/test/hardlink.c +136 -0
  153. data/vendor/liburing/test/helpers.c +135 -0
  154. data/vendor/liburing/test/helpers.h +67 -0
  155. data/vendor/liburing/test/io-cancel.c +550 -0
  156. data/vendor/liburing/test/io_uring_enter.c +296 -0
  157. data/vendor/liburing/test/io_uring_register.c +676 -0
  158. data/vendor/liburing/test/io_uring_setup.c +192 -0
  159. data/vendor/liburing/test/iopoll.c +372 -0
  160. data/vendor/liburing/test/lfs-openat-write.c +119 -0
  161. data/vendor/liburing/test/lfs-openat.c +275 -0
  162. data/vendor/liburing/test/link-timeout.c +1107 -0
  163. data/vendor/liburing/test/link.c +496 -0
  164. data/vendor/liburing/test/link_drain.c +229 -0
  165. data/vendor/liburing/test/madvise.c +195 -0
  166. data/vendor/liburing/test/mkdir.c +108 -0
  167. data/vendor/liburing/test/msg-ring.c +234 -0
  168. data/vendor/liburing/test/multicqes_drain.c +387 -0
  169. data/vendor/liburing/test/nop-all-sizes.c +99 -0
  170. data/vendor/liburing/test/nop.c +115 -0
  171. data/vendor/liburing/test/open-close.c +261 -0
  172. data/vendor/liburing/test/openat2.c +308 -0
  173. data/vendor/liburing/test/personality.c +204 -0
  174. data/vendor/liburing/test/pipe-eof.c +83 -0
  175. data/vendor/liburing/test/pipe-reuse.c +105 -0
  176. data/vendor/liburing/test/poll-cancel-ton.c +135 -0
  177. data/vendor/liburing/test/poll-cancel.c +228 -0
  178. data/vendor/liburing/test/poll-link.c +230 -0
  179. data/vendor/liburing/test/poll-many.c +208 -0
  180. data/vendor/liburing/test/poll-mshot-update.c +273 -0
  181. data/vendor/liburing/test/poll-ring.c +48 -0
  182. data/vendor/liburing/test/poll-v-poll.c +353 -0
  183. data/vendor/liburing/test/poll.c +109 -0
  184. data/vendor/liburing/test/pollfree.c +426 -0
  185. data/vendor/liburing/test/probe.c +135 -0
  186. data/vendor/liburing/test/read-write.c +876 -0
  187. data/vendor/liburing/test/register-restrictions.c +633 -0
  188. data/vendor/liburing/test/rename.c +135 -0
  189. data/vendor/liburing/test/ring-leak.c +173 -0
  190. data/vendor/liburing/test/ring-leak2.c +249 -0
  191. data/vendor/liburing/test/rsrc_tags.c +449 -0
  192. data/vendor/liburing/test/runtests-loop.sh +16 -0
  193. data/vendor/liburing/test/runtests.sh +170 -0
  194. data/vendor/liburing/test/rw_merge_test.c +97 -0
  195. data/vendor/liburing/test/self.c +91 -0
  196. data/vendor/liburing/test/send_recv.c +286 -0
  197. data/vendor/liburing/test/send_recvmsg.c +345 -0
  198. data/vendor/liburing/test/sendmsg_fs_cve.c +200 -0
  199. data/vendor/liburing/test/shared-wq.c +84 -0
  200. data/vendor/liburing/test/short-read.c +75 -0
  201. data/vendor/liburing/test/shutdown.c +165 -0
  202. data/vendor/liburing/test/sigfd-deadlock.c +74 -0
  203. data/vendor/liburing/test/skip-cqe.c +429 -0
  204. data/vendor/liburing/test/socket-rw-eagain.c +158 -0
  205. data/vendor/liburing/test/socket-rw-offset.c +157 -0
  206. data/vendor/liburing/test/socket-rw.c +145 -0
  207. data/vendor/liburing/test/splice.c +512 -0
  208. data/vendor/liburing/test/sq-full-cpp.cc +45 -0
  209. data/vendor/liburing/test/sq-full.c +45 -0
  210. data/vendor/liburing/test/sq-poll-dup.c +204 -0
  211. data/vendor/liburing/test/sq-poll-kthread.c +169 -0
  212. data/vendor/liburing/test/sq-poll-share.c +137 -0
  213. data/vendor/liburing/test/sq-space_left.c +159 -0
  214. data/vendor/liburing/test/sqpoll-cancel-hang.c +157 -0
  215. data/vendor/liburing/test/sqpoll-disable-exit.c +196 -0
  216. data/vendor/liburing/test/sqpoll-exit-hang.c +78 -0
  217. data/vendor/liburing/test/sqpoll-sleep.c +69 -0
  218. data/vendor/liburing/test/statx.c +172 -0
  219. data/vendor/liburing/test/stdout.c +232 -0
  220. data/vendor/liburing/test/submit-link-fail.c +154 -0
  221. data/vendor/liburing/test/submit-reuse.c +239 -0
  222. data/vendor/liburing/test/symlink.c +116 -0
  223. data/vendor/liburing/test/teardowns.c +58 -0
  224. data/vendor/liburing/test/thread-exit.c +143 -0
  225. data/vendor/liburing/test/timeout-new.c +252 -0
  226. data/vendor/liburing/test/timeout-overflow.c +204 -0
  227. data/vendor/liburing/test/timeout.c +1523 -0
  228. data/vendor/liburing/test/unlink.c +112 -0
  229. data/vendor/liburing/test/wakeup-hang.c +162 -0
  230. metadata +223 -2
@@ -0,0 +1,921 @@
1
+ /* SPDX-License-Identifier: MIT */
2
+ #ifndef LIB_URING_H
3
+ #define LIB_URING_H
4
+
5
+ #ifndef _XOPEN_SOURCE
6
+ #define _XOPEN_SOURCE 500 /* Required for glibc to expose sigset_t */
7
+ #endif
8
+
9
+ #include <sys/socket.h>
10
+ #include <sys/stat.h>
11
+ #include <sys/uio.h>
12
+ #include <errno.h>
13
+ #include <signal.h>
14
+ #include <stdbool.h>
15
+ #include <inttypes.h>
16
+ #include <time.h>
17
+ #include <sched.h>
18
+ #include <linux/swab.h>
19
+ #include "liburing/compat.h"
20
+ #include "liburing/io_uring.h"
21
+ #include "liburing/barrier.h"
22
+
23
+ #ifndef uring_unlikely
24
+ # define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
25
+ #endif
26
+
27
+ #ifndef uring_likely
28
+ # define uring_likely(cond) __builtin_expect(!!(cond), 1)
29
+ #endif
30
+
31
+ #ifdef __cplusplus
32
+ extern "C" {
33
+ #endif
34
+
35
+ /*
36
+ * Library interface to io_uring
37
+ */
38
+ struct io_uring_sq {
39
+ unsigned *khead;
40
+ unsigned *ktail;
41
+ unsigned *kring_mask;
42
+ unsigned *kring_entries;
43
+ unsigned *kflags;
44
+ unsigned *kdropped;
45
+ unsigned *array;
46
+ struct io_uring_sqe *sqes;
47
+
48
+ unsigned sqe_head;
49
+ unsigned sqe_tail;
50
+
51
+ size_t ring_sz;
52
+ void *ring_ptr;
53
+
54
+ unsigned pad[4];
55
+ };
56
+
57
+ struct io_uring_cq {
58
+ unsigned *khead;
59
+ unsigned *ktail;
60
+ unsigned *kring_mask;
61
+ unsigned *kring_entries;
62
+ unsigned *kflags;
63
+ unsigned *koverflow;
64
+ struct io_uring_cqe *cqes;
65
+
66
+ size_t ring_sz;
67
+ void *ring_ptr;
68
+
69
+ unsigned pad[4];
70
+ };
71
+
72
+ struct io_uring {
73
+ struct io_uring_sq sq;
74
+ struct io_uring_cq cq;
75
+ unsigned flags;
76
+ int ring_fd;
77
+
78
+ unsigned features;
79
+ int enter_ring_fd;
80
+ __u8 int_flags;
81
+ __u8 pad[3];
82
+ unsigned pad2;
83
+ };
84
+
85
+ /*
86
+ * Library interface
87
+ */
88
+
89
+ /*
90
+ * return an allocated io_uring_probe structure, or NULL if probe fails (for
91
+ * example, if it is not available). The caller is responsible for freeing it
92
+ */
93
+ struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
94
+ /* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
95
+ struct io_uring_probe *io_uring_get_probe(void);
96
+
97
+ /*
98
+ * frees a probe allocated through io_uring_get_probe() or
99
+ * io_uring_get_probe_ring()
100
+ */
101
+ void io_uring_free_probe(struct io_uring_probe *probe);
102
+
103
+ static inline int io_uring_opcode_supported(const struct io_uring_probe *p, int op)
104
+ {
105
+ if (op > p->last_op)
106
+ return 0;
107
+ return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
108
+ }
109
+
110
+ int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
111
+ struct io_uring_params *p);
112
+ int io_uring_queue_init(unsigned entries, struct io_uring *ring,
113
+ unsigned flags);
114
+ int io_uring_queue_mmap(int fd, struct io_uring_params *p,
115
+ struct io_uring *ring);
116
+ int io_uring_ring_dontfork(struct io_uring *ring);
117
+ void io_uring_queue_exit(struct io_uring *ring);
118
+ unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
119
+ struct io_uring_cqe **cqes, unsigned count);
120
+ int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
121
+ unsigned wait_nr, struct __kernel_timespec *ts,
122
+ sigset_t *sigmask);
123
+ int io_uring_wait_cqe_timeout(struct io_uring *ring,
124
+ struct io_uring_cqe **cqe_ptr,
125
+ struct __kernel_timespec *ts);
126
+ int io_uring_submit(struct io_uring *ring);
127
+ int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
128
+ int io_uring_submit_and_wait_timeout(struct io_uring *ring,
129
+ struct io_uring_cqe **cqe_ptr,
130
+ unsigned wait_nr,
131
+ struct __kernel_timespec *ts,
132
+ sigset_t *sigmask);
133
+
134
+ int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
135
+ unsigned nr_iovecs);
136
+ int io_uring_register_buffers_tags(struct io_uring *ring,
137
+ const struct iovec *iovecs,
138
+ const __u64 *tags, unsigned nr);
139
+ int io_uring_register_buffers_update_tag(struct io_uring *ring,
140
+ unsigned off,
141
+ const struct iovec *iovecs,
142
+ const __u64 *tags, unsigned nr);
143
+ int io_uring_unregister_buffers(struct io_uring *ring);
144
+
145
+ int io_uring_register_files(struct io_uring *ring, const int *files,
146
+ unsigned nr_files);
147
+ int io_uring_register_files_tags(struct io_uring *ring, const int *files,
148
+ const __u64 *tags, unsigned nr);
149
+ int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
150
+ const int *files, const __u64 *tags,
151
+ unsigned nr_files);
152
+
153
+ int io_uring_unregister_files(struct io_uring *ring);
154
+ int io_uring_register_files_update(struct io_uring *ring, unsigned off,
155
+ int *files, unsigned nr_files);
156
+ int io_uring_register_eventfd(struct io_uring *ring, int fd);
157
+ int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
158
+ int io_uring_unregister_eventfd(struct io_uring *ring);
159
+ int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
160
+ unsigned nr);
161
+ int io_uring_register_personality(struct io_uring *ring);
162
+ int io_uring_unregister_personality(struct io_uring *ring, int id);
163
+ int io_uring_register_restrictions(struct io_uring *ring,
164
+ struct io_uring_restriction *res,
165
+ unsigned int nr_res);
166
+ int io_uring_enable_rings(struct io_uring *ring);
167
+ int __io_uring_sqring_wait(struct io_uring *ring);
168
+ int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
169
+ const cpu_set_t *mask);
170
+ int io_uring_unregister_iowq_aff(struct io_uring *ring);
171
+ int io_uring_register_iowq_max_workers(struct io_uring *ring,
172
+ unsigned int *values);
173
+ int io_uring_register_ring_fd(struct io_uring *ring);
174
+ int io_uring_unregister_ring_fd(struct io_uring *ring);
175
+
176
+ /*
177
+ * Helper for the peek/wait single cqe functions. Exported because of that,
178
+ * but probably shouldn't be used directly in an application.
179
+ */
180
+ int __io_uring_get_cqe(struct io_uring *ring,
181
+ struct io_uring_cqe **cqe_ptr, unsigned submit,
182
+ unsigned wait_nr, sigset_t *sigmask);
183
+
184
+ #define LIBURING_UDATA_TIMEOUT ((__u64) -1)
185
+
186
+ #define io_uring_for_each_cqe(ring, head, cqe) \
187
+ /* \
188
+ * io_uring_smp_load_acquire() enforces the order of tail \
189
+ * and CQE reads. \
190
+ */ \
191
+ for (head = *(ring)->cq.khead; \
192
+ (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
193
+ &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
194
+ head++) \
195
+
196
+ /*
197
+ * Must be called after io_uring_for_each_cqe()
198
+ */
199
+ static inline void io_uring_cq_advance(struct io_uring *ring,
200
+ unsigned nr)
201
+ {
202
+ if (nr) {
203
+ struct io_uring_cq *cq = &ring->cq;
204
+
205
+ /*
206
+ * Ensure that the kernel only sees the new value of the head
207
+ * index after the CQEs have been read.
208
+ */
209
+ io_uring_smp_store_release(cq->khead, *cq->khead + nr);
210
+ }
211
+ }
212
+
213
+ /*
214
+ * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
215
+ * been processed by the application.
216
+ */
217
+ static inline void io_uring_cqe_seen(struct io_uring *ring,
218
+ struct io_uring_cqe *cqe)
219
+ {
220
+ if (cqe)
221
+ io_uring_cq_advance(ring, 1);
222
+ }
223
+
224
+ /*
225
+ * Command prep helpers
226
+ */
227
+
228
+ /*
229
+ * Associate pointer @data with the sqe, for later retrieval from the cqe
230
+ * at command completion time with io_uring_cqe_get_data().
231
+ */
232
+ static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
233
+ {
234
+ sqe->user_data = (unsigned long) data;
235
+ }
236
+
237
+ static inline void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
238
+ {
239
+ return (void *) (uintptr_t) cqe->user_data;
240
+ }
241
+
242
+ /*
243
+ * Assign a 64-bit value to this sqe, which can get retrieved at completion
244
+ * time with io_uring_cqe_get_data64. Just like the non-64 variants, except
245
+ * these store a 64-bit type rather than a data pointer.
246
+ */
247
+ static inline void io_uring_sqe_set_data64(struct io_uring_sqe *sqe,
248
+ __u64 data)
249
+ {
250
+ sqe->user_data = data;
251
+ }
252
+
253
+ static inline __u64 io_uring_cqe_get_data64(const struct io_uring_cqe *cqe)
254
+ {
255
+ return cqe->user_data;
256
+ }
257
+
258
+ /*
259
+ * Tell the app the have the 64-bit variants of the get/set userdata
260
+ */
261
+ #define LIBURING_HAVE_DATA64
262
+
263
+ static inline void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
264
+ unsigned flags)
265
+ {
266
+ sqe->flags = (__u8) flags;
267
+ }
268
+
269
+ static inline void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
270
+ unsigned int file_index)
271
+ {
272
+ /* 0 means no fixed files, indexes should be encoded as "index + 1" */
273
+ sqe->file_index = file_index + 1;
274
+ }
275
+
276
+ static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
277
+ const void *addr, unsigned len,
278
+ __u64 offset)
279
+ {
280
+ sqe->opcode = (__u8) op;
281
+ sqe->flags = 0;
282
+ sqe->ioprio = 0;
283
+ sqe->fd = fd;
284
+ sqe->off = offset;
285
+ sqe->addr = (unsigned long) addr;
286
+ sqe->len = len;
287
+ sqe->rw_flags = 0;
288
+ sqe->buf_index = 0;
289
+ sqe->personality = 0;
290
+ sqe->file_index = 0;
291
+ sqe->__pad2[0] = sqe->__pad2[1] = 0;
292
+ }
293
+
294
+ /**
295
+ * @pre Either fd_in or fd_out must be a pipe.
296
+ * @param off_in If fd_in refers to a pipe, off_in must be (int64_t) -1;
297
+ * If fd_in does not refer to a pipe and off_in is (int64_t) -1,
298
+ * then bytes are read from fd_in starting from the file offset
299
+ * and it is adjust appropriately;
300
+ * If fd_in does not refer to a pipe and off_in is not
301
+ * (int64_t) -1, then the starting offset of fd_in will be
302
+ * off_in.
303
+ * @param off_out The description of off_in also applied to off_out.
304
+ * @param splice_flags see man splice(2) for description of flags.
305
+ *
306
+ * This splice operation can be used to implement sendfile by splicing to an
307
+ * intermediate pipe first, then splice to the final destination.
308
+ * In fact, the implementation of sendfile in kernel uses splice internally.
309
+ *
310
+ * NOTE that even if fd_in or fd_out refers to a pipe, the splice operation
311
+ * can still failed with EINVAL if one of the fd doesn't explicitly support
312
+ * splice operation, e.g. reading from terminal is unsupported from kernel 5.7
313
+ * to 5.11.
314
+ * Check issue #291 for more information.
315
+ */
316
+ static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
317
+ int fd_in, int64_t off_in,
318
+ int fd_out, int64_t off_out,
319
+ unsigned int nbytes,
320
+ unsigned int splice_flags)
321
+ {
322
+ io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes,
323
+ (__u64) off_out);
324
+ sqe->splice_off_in = (__u64) off_in;
325
+ sqe->splice_fd_in = fd_in;
326
+ sqe->splice_flags = splice_flags;
327
+ }
328
+
329
+ static inline void io_uring_prep_tee(struct io_uring_sqe *sqe,
330
+ int fd_in, int fd_out,
331
+ unsigned int nbytes,
332
+ unsigned int splice_flags)
333
+ {
334
+ io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, NULL, nbytes, 0);
335
+ sqe->splice_off_in = 0;
336
+ sqe->splice_fd_in = fd_in;
337
+ sqe->splice_flags = splice_flags;
338
+ }
339
+
340
+ static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
341
+ const struct iovec *iovecs,
342
+ unsigned nr_vecs, __u64 offset)
343
+ {
344
+ io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
345
+ }
346
+
347
+ static inline void io_uring_prep_readv2(struct io_uring_sqe *sqe, int fd,
348
+ const struct iovec *iovecs,
349
+ unsigned nr_vecs, __u64 offset, int flags)
350
+ {
351
+ io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
352
+ sqe->rw_flags = flags;
353
+ }
354
+
355
+ static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
356
+ void *buf, unsigned nbytes,
357
+ __u64 offset, int buf_index)
358
+ {
359
+ io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
360
+ sqe->buf_index = (__u16) buf_index;
361
+ }
362
+
363
+ static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
364
+ const struct iovec *iovecs,
365
+ unsigned nr_vecs, __u64 offset)
366
+ {
367
+ io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
368
+ }
369
+
370
+ static inline void io_uring_prep_writev2(struct io_uring_sqe *sqe, int fd,
371
+ const struct iovec *iovecs,
372
+ unsigned nr_vecs, __u64 offset, int flags)
373
+ {
374
+ io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
375
+ sqe->rw_flags = flags;
376
+ }
377
+
378
+ static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
379
+ const void *buf, unsigned nbytes,
380
+ __u64 offset, int buf_index)
381
+ {
382
+ io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
383
+ sqe->buf_index = (__u16) buf_index;
384
+ }
385
+
386
+ static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
387
+ struct msghdr *msg, unsigned flags)
388
+ {
389
+ io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
390
+ sqe->msg_flags = flags;
391
+ }
392
+
393
+ static inline void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
394
+ const struct msghdr *msg, unsigned flags)
395
+ {
396
+ io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
397
+ sqe->msg_flags = flags;
398
+ }
399
+
400
+ static inline unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
401
+ {
402
+ #if __BYTE_ORDER == __BIG_ENDIAN
403
+ poll_mask = __swahw32(poll_mask);
404
+ #endif
405
+ return poll_mask;
406
+ }
407
+
408
+ static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
409
+ unsigned poll_mask)
410
+ {
411
+ io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
412
+ sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
413
+ }
414
+
415
+ static inline void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
416
+ int fd, unsigned poll_mask)
417
+ {
418
+ io_uring_prep_poll_add(sqe, fd, poll_mask);
419
+ sqe->len = IORING_POLL_ADD_MULTI;
420
+ }
421
+
422
+ static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
423
+ __u64 user_data)
424
+ {
425
+ io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, 0, 0);
426
+ sqe->addr = user_data;
427
+ }
428
+
429
+ static inline void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
430
+ __u64 old_user_data,
431
+ __u64 new_user_data,
432
+ unsigned poll_mask, unsigned flags)
433
+ {
434
+ io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, flags,
435
+ new_user_data);
436
+ sqe->addr = old_user_data;
437
+ sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
438
+ }
439
+
440
+ static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
441
+ unsigned fsync_flags)
442
+ {
443
+ io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
444
+ sqe->fsync_flags = fsync_flags;
445
+ }
446
+
447
+ static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
448
+ {
449
+ io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
450
+ }
451
+
452
+ static inline void io_uring_prep_timeout(struct io_uring_sqe *sqe,
453
+ struct __kernel_timespec *ts,
454
+ unsigned count, unsigned flags)
455
+ {
456
+ io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
457
+ sqe->timeout_flags = flags;
458
+ }
459
+
460
+ static inline void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
461
+ __u64 user_data, unsigned flags)
462
+ {
463
+ io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0, 0);
464
+ sqe->addr = user_data;
465
+ sqe->timeout_flags = flags;
466
+ }
467
+
468
+ static inline void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
469
+ struct __kernel_timespec *ts,
470
+ __u64 user_data, unsigned flags)
471
+ {
472
+ io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0,
473
+ (uintptr_t) ts);
474
+ sqe->addr = user_data;
475
+ sqe->timeout_flags = flags | IORING_TIMEOUT_UPDATE;
476
+ }
477
+
478
+ static inline void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
479
+ struct sockaddr *addr,
480
+ socklen_t *addrlen, int flags)
481
+ {
482
+ io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
483
+ (__u64) (unsigned long) addrlen);
484
+ sqe->accept_flags = (__u32) flags;
485
+ }
486
+
487
+ /* accept directly into the fixed file table */
488
+ static inline void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
489
+ struct sockaddr *addr,
490
+ socklen_t *addrlen, int flags,
491
+ unsigned int file_index)
492
+ {
493
+ io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
494
+ __io_uring_set_target_fixed_file(sqe, file_index);
495
+ }
496
+
497
+ static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe,
498
+ __u64 user_data, int flags)
499
+ {
500
+ io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, NULL, 0, 0);
501
+ sqe->addr = user_data;
502
+ sqe->cancel_flags = (__u32) flags;
503
+ }
504
+
505
+ static inline void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
506
+ struct __kernel_timespec *ts,
507
+ unsigned flags)
508
+ {
509
+ io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
510
+ sqe->timeout_flags = flags;
511
+ }
512
+
513
+ static inline void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
514
+ const struct sockaddr *addr,
515
+ socklen_t addrlen)
516
+ {
517
+ io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
518
+ }
519
+
520
+ static inline void io_uring_prep_files_update(struct io_uring_sqe *sqe,
521
+ int *fds, unsigned nr_fds,
522
+ int offset)
523
+ {
524
+ io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds,
525
+ (__u64) offset);
526
+ }
527
+
528
+ static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
529
+ int mode, off_t offset, off_t len)
530
+ {
531
+
532
+ io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
533
+ (const uintptr_t *) (unsigned long) len,
534
+ (unsigned int) mode, (__u64) offset);
535
+ }
536
+
537
+ static inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
538
+ const char *path, int flags, mode_t mode)
539
+ {
540
+ io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
541
+ sqe->open_flags = (__u32) flags;
542
+ }
543
+
544
+ /* open directly into the fixed file table */
545
+ static inline void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
546
+ int dfd, const char *path,
547
+ int flags, mode_t mode,
548
+ unsigned file_index)
549
+ {
550
+ io_uring_prep_openat(sqe, dfd, path, flags, mode);
551
+ __io_uring_set_target_fixed_file(sqe, file_index);
552
+ }
553
+
554
+
555
+ static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
556
+ {
557
+ io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
558
+ }
559
+
560
+ static inline void io_uring_prep_close_direct(struct io_uring_sqe *sqe,
561
+ unsigned file_index)
562
+ {
563
+ io_uring_prep_close(sqe, 0);
564
+ __io_uring_set_target_fixed_file(sqe, file_index);
565
+ }
566
+
567
+ static inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
568
+ void *buf, unsigned nbytes, __u64 offset)
569
+ {
570
+ io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
571
+ }
572
+
573
+ static inline void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
574
+ const void *buf, unsigned nbytes, __u64 offset)
575
+ {
576
+ io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
577
+ }
578
+
579
+ struct statx;
580
+ static inline void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
581
+ const char *path, int flags, unsigned mask,
582
+ struct statx *statxbuf)
583
+ {
584
+ io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
585
+ (__u64) (unsigned long) statxbuf);
586
+ sqe->statx_flags = (__u32) flags;
587
+ }
588
+
589
+ static inline void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
590
+ __u64 offset, off_t len, int advice)
591
+ {
592
+ io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, (__u32) len, offset);
593
+ sqe->fadvise_advice = (__u32) advice;
594
+ }
595
+
596
+ static inline void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
597
+ off_t length, int advice)
598
+ {
599
+ io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, (__u32) length, 0);
600
+ sqe->fadvise_advice = (__u32) advice;
601
+ }
602
+
603
+ static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
604
+ const void *buf, size_t len, int flags)
605
+ {
606
+ io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, (__u32) len, 0);
607
+ sqe->msg_flags = (__u32) flags;
608
+ }
609
+
610
+ static inline void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
611
+ void *buf, size_t len, int flags)
612
+ {
613
+ io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, (__u32) len, 0);
614
+ sqe->msg_flags = (__u32) flags;
615
+ }
616
+
617
+ static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
618
+ const char *path, struct open_how *how)
619
+ {
620
+ io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
621
+ (uint64_t) (uintptr_t) how);
622
+ }
623
+
624
+ /* open directly into the fixed file table */
625
+ static inline void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
626
+ int dfd, const char *path,
627
+ struct open_how *how,
628
+ unsigned file_index)
629
+ {
630
+ io_uring_prep_openat2(sqe, dfd, path, how);
631
+ __io_uring_set_target_fixed_file(sqe, file_index);
632
+ }
633
+
634
+ struct epoll_event;
635
+ static inline void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
636
+ int fd, int op,
637
+ struct epoll_event *ev)
638
+ {
639
+ io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev,
640
+ (__u32) op, (__u32) fd);
641
+ }
642
+
643
+ static inline void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
644
+ void *addr, int len, int nr,
645
+ int bgid, int bid)
646
+ {
647
+ io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, (__u32) len,
648
+ (__u64) bid);
649
+ sqe->buf_group = (__u16) bgid;
650
+ }
651
+
652
+ static inline void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
653
+ int nr, int bgid)
654
+ {
655
+ io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
656
+ sqe->buf_group = (__u16) bgid;
657
+ }
658
+
659
+ static inline void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
660
+ int how)
661
+ {
662
+ io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, (__u32) how, 0);
663
+ }
664
+
665
+ static inline void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
666
+ const char *path, int flags)
667
+ {
668
+ io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
669
+ sqe->unlink_flags = (__u32) flags;
670
+ }
671
+
672
+ static inline void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
673
+ const char *oldpath, int newdfd,
674
+ const char *newpath, int flags)
675
+ {
676
+ io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath, (__u32) newdfd,
677
+ (uint64_t) (uintptr_t) newpath);
678
+ sqe->rename_flags = (__u32) flags;
679
+ }
680
+
681
+ static inline void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
682
+ int fd, unsigned len,
683
+ __u64 offset, int flags)
684
+ {
685
+ io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
686
+ sqe->sync_range_flags = (__u32) flags;
687
+ }
688
+
689
+ static inline void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
690
+ const char *path, mode_t mode)
691
+ {
692
+ io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
693
+ }
694
+
695
+ static inline void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
696
+ const char *target, int newdirfd, const char *linkpath)
697
+ {
698
+ io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
699
+ (uint64_t) (uintptr_t) linkpath);
700
+ }
701
+
702
+ static inline void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
703
+ const char *oldpath, int newdfd,
704
+ const char *newpath, int flags)
705
+ {
706
+ io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, (__u32) newdfd,
707
+ (uint64_t) (uintptr_t) newpath);
708
+ sqe->hardlink_flags = (__u32) flags;
709
+ }
710
+
711
+ static inline void io_uring_prep_msg_ring(struct io_uring_sqe *sqe, int fd,
712
+ unsigned int len, __u64 data,
713
+ unsigned int flags)
714
+ {
715
+ io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
716
+ sqe->rw_flags = flags;
717
+ }
718
+
719
+ /*
720
+ * Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
721
+ * the SQ ring
722
+ */
723
+ static inline unsigned io_uring_sq_ready(const struct io_uring *ring)
724
+ {
725
+ /*
726
+ * Without a barrier, we could miss an update and think the SQ wasn't
727
+ * ready. We don't need the load acquire for non-SQPOLL since then we
728
+ * drive updates.
729
+ */
730
+ if (ring->flags & IORING_SETUP_SQPOLL)
731
+ return ring->sq.sqe_tail - io_uring_smp_load_acquire(ring->sq.khead);
732
+
733
+ /* always use real head, to avoid losing sync for short submit */
734
+ return ring->sq.sqe_tail - *ring->sq.khead;
735
+ }
736
+
737
+ /*
738
+ * Returns how much space is left in the SQ ring.
739
+ */
740
+ static inline unsigned io_uring_sq_space_left(const struct io_uring *ring)
741
+ {
742
+ return *ring->sq.kring_entries - io_uring_sq_ready(ring);
743
+ }
744
+
745
+ /*
746
+ * Only applicable when using SQPOLL - allows the caller to wait for space
747
+ * to free up in the SQ ring, which happens when the kernel side thread has
748
+ * consumed one or more entries. If the SQ ring is currently non-full, no
749
+ * action is taken. Note: may return -EINVAL if the kernel doesn't support
750
+ * this feature.
751
+ */
752
+ static inline int io_uring_sqring_wait(struct io_uring *ring)
753
+ {
754
+ if (!(ring->flags & IORING_SETUP_SQPOLL))
755
+ return 0;
756
+ if (io_uring_sq_space_left(ring))
757
+ return 0;
758
+
759
+ return __io_uring_sqring_wait(ring);
760
+ }
761
+
762
+ /*
763
+ * Returns how many unconsumed entries are ready in the CQ ring
764
+ */
765
+ static inline unsigned io_uring_cq_ready(const struct io_uring *ring)
766
+ {
767
+ return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
768
+ }
769
+
770
+ /*
771
+ * Returns true if the eventfd notification is currently enabled
772
+ */
773
+ static inline bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
774
+ {
775
+ if (!ring->cq.kflags)
776
+ return true;
777
+
778
+ return !(*ring->cq.kflags & IORING_CQ_EVENTFD_DISABLED);
779
+ }
780
+
781
+ /*
782
+ * Toggle eventfd notification on or off, if an eventfd is registered with
783
+ * the ring.
784
+ */
785
+ static inline int io_uring_cq_eventfd_toggle(struct io_uring *ring,
786
+ bool enabled)
787
+ {
788
+ uint32_t flags;
789
+
790
+ if (!!enabled == io_uring_cq_eventfd_enabled(ring))
791
+ return 0;
792
+
793
+ if (!ring->cq.kflags)
794
+ return -EOPNOTSUPP;
795
+
796
+ flags = *ring->cq.kflags;
797
+
798
+ if (enabled)
799
+ flags &= ~IORING_CQ_EVENTFD_DISABLED;
800
+ else
801
+ flags |= IORING_CQ_EVENTFD_DISABLED;
802
+
803
+ IO_URING_WRITE_ONCE(*ring->cq.kflags, flags);
804
+
805
+ return 0;
806
+ }
807
+
808
+ /*
809
+ * Return an IO completion, waiting for 'wait_nr' completions if one isn't
810
+ * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
811
+ * failure.
812
+ */
813
+ static inline int io_uring_wait_cqe_nr(struct io_uring *ring,
814
+ struct io_uring_cqe **cqe_ptr,
815
+ unsigned wait_nr)
816
+ {
817
+ return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
818
+ }
819
+
820
+ static inline int __io_uring_peek_cqe(struct io_uring *ring,
821
+ struct io_uring_cqe **cqe_ptr,
822
+ unsigned *nr_available)
823
+ {
824
+ struct io_uring_cqe *cqe;
825
+ int err = 0;
826
+ unsigned available;
827
+ unsigned mask = *ring->cq.kring_mask;
828
+
829
+ do {
830
+ unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
831
+ unsigned head = *ring->cq.khead;
832
+
833
+ cqe = NULL;
834
+ available = tail - head;
835
+ if (!available)
836
+ break;
837
+
838
+ cqe = &ring->cq.cqes[head & mask];
839
+ if (!(ring->features & IORING_FEAT_EXT_ARG) &&
840
+ cqe->user_data == LIBURING_UDATA_TIMEOUT) {
841
+ if (cqe->res < 0)
842
+ err = cqe->res;
843
+ io_uring_cq_advance(ring, 1);
844
+ if (!err)
845
+ continue;
846
+ cqe = NULL;
847
+ }
848
+
849
+ break;
850
+ } while (1);
851
+
852
+ *cqe_ptr = cqe;
853
+ if (nr_available)
854
+ *nr_available = available;
855
+ return err;
856
+ }
857
+
858
+ /*
859
+ * Return an IO completion, if one is readily available. Returns 0 with
860
+ * cqe_ptr filled in on success, -errno on failure.
861
+ */
862
+ static inline int io_uring_peek_cqe(struct io_uring *ring,
863
+ struct io_uring_cqe **cqe_ptr)
864
+ {
865
+ if (__io_uring_peek_cqe(ring, cqe_ptr, NULL))
866
+ return 0;
867
+
868
+ return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
869
+ }
870
+
871
+ /*
872
+ * Return an IO completion, waiting for it if necessary. Returns 0 with
873
+ * cqe_ptr filled in on success, -errno on failure.
874
+ */
875
+ static inline int io_uring_wait_cqe(struct io_uring *ring,
876
+ struct io_uring_cqe **cqe_ptr)
877
+ {
878
+ if (__io_uring_peek_cqe(ring, cqe_ptr, NULL))
879
+ return 0;
880
+
881
+ return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
882
+ }
883
+
884
+ /*
885
+ * Return an sqe to fill. Application must later call io_uring_submit()
886
+ * when it's ready to tell the kernel about it. The caller may call this
887
+ * function multiple times before calling io_uring_submit().
888
+ *
889
+ * Returns a vacant sqe, or NULL if we're full.
890
+ */
891
+ static inline struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
892
+ {
893
+ struct io_uring_sq *sq = &ring->sq;
894
+ unsigned int head = io_uring_smp_load_acquire(sq->khead);
895
+ unsigned int next = sq->sqe_tail + 1;
896
+ struct io_uring_sqe *sqe = NULL;
897
+
898
+ if (next - head <= *sq->kring_entries) {
899
+ sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask];
900
+ sq->sqe_tail = next;
901
+ }
902
+ return sqe;
903
+ }
904
+
905
+ #ifndef LIBURING_INTERNAL
906
+ static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
907
+ {
908
+ return _io_uring_get_sqe(ring);
909
+ }
910
+ #else
911
+ struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
912
+ #endif
913
+
914
+ ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
915
+ ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
916
+
917
+ #ifdef __cplusplus
918
+ }
919
+ #endif
920
+
921
+ #endif