uringmachine 0.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.github/dependabot.yml +12 -0
- data/.github/workflows/test.yml +35 -0
- data/.gitignore +59 -0
- data/.gitmodules +3 -0
- data/CHANGELOG.md +7 -0
- data/Gemfile +3 -0
- data/LICENSE +21 -0
- data/README.md +11 -0
- data/Rakefile +39 -0
- data/TODO.md +0 -0
- data/examples/echo_server.rb +52 -0
- data/examples/event_loop.rb +69 -0
- data/examples/fibers.rb +105 -0
- data/examples/http_server.rb +56 -0
- data/examples/http_server_multishot.rb +57 -0
- data/examples/http_server_simpler.rb +34 -0
- data/ext/um/extconf.rb +71 -0
- data/ext/um/iou.h +101 -0
- data/ext/um/op_ctx.c +138 -0
- data/ext/um/ring.c +755 -0
- data/ext/um/um.c +267 -0
- data/ext/um/um.h +97 -0
- data/ext/um/um_class.c +175 -0
- data/ext/um/um_ext.c +11 -0
- data/ext/um/um_op.c +87 -0
- data/ext/um/um_utils.c +23 -0
- data/lib/uringmachine/version.rb +3 -0
- data/lib/uringmachine.rb +8 -0
- data/test/helper.rb +70 -0
- data/test/test_iou.rb +876 -0
- data/test/test_um.rb +168 -0
- data/uringmachine.gemspec +27 -0
- data/vendor/liburing/.github/actions/codespell/stopwords +7 -0
- data/vendor/liburing/.github/pull_request_template.md +86 -0
- data/vendor/liburing/.github/workflows/build.yml +137 -0
- data/vendor/liburing/.github/workflows/codespell.yml +25 -0
- data/vendor/liburing/.github/workflows/shellcheck.yml +20 -0
- data/vendor/liburing/.gitignore +41 -0
- data/vendor/liburing/CHANGELOG +111 -0
- data/vendor/liburing/CITATION.cff +11 -0
- data/vendor/liburing/COPYING +502 -0
- data/vendor/liburing/COPYING.GPL +339 -0
- data/vendor/liburing/LICENSE +20 -0
- data/vendor/liburing/Makefile +96 -0
- data/vendor/liburing/Makefile.common +7 -0
- data/vendor/liburing/Makefile.quiet +11 -0
- data/vendor/liburing/README +106 -0
- data/vendor/liburing/SECURITY.md +6 -0
- data/vendor/liburing/configure +624 -0
- data/vendor/liburing/debian/README.Debian +7 -0
- data/vendor/liburing/debian/changelog +38 -0
- data/vendor/liburing/debian/control +39 -0
- data/vendor/liburing/debian/copyright +49 -0
- data/vendor/liburing/debian/liburing-dev.install +4 -0
- data/vendor/liburing/debian/liburing-dev.manpages +5 -0
- data/vendor/liburing/debian/liburing2.install +1 -0
- data/vendor/liburing/debian/liburing2.symbols +56 -0
- data/vendor/liburing/debian/patches/series +1 -0
- data/vendor/liburing/debian/rules +29 -0
- data/vendor/liburing/debian/source/format +1 -0
- data/vendor/liburing/debian/source/local-options +2 -0
- data/vendor/liburing/debian/source/options +1 -0
- data/vendor/liburing/debian/watch +3 -0
- data/vendor/liburing/examples/Makefile +53 -0
- data/vendor/liburing/examples/helpers.c +62 -0
- data/vendor/liburing/examples/helpers.h +7 -0
- data/vendor/liburing/examples/io_uring-close-test.c +123 -0
- data/vendor/liburing/examples/io_uring-cp.c +282 -0
- data/vendor/liburing/examples/io_uring-test.c +112 -0
- data/vendor/liburing/examples/io_uring-udp.c +403 -0
- data/vendor/liburing/examples/link-cp.c +193 -0
- data/vendor/liburing/examples/napi-busy-poll-client.c +509 -0
- data/vendor/liburing/examples/napi-busy-poll-server.c +450 -0
- data/vendor/liburing/examples/poll-bench.c +101 -0
- data/vendor/liburing/examples/proxy.c +2461 -0
- data/vendor/liburing/examples/proxy.h +102 -0
- data/vendor/liburing/examples/rsrc-update-bench.c +100 -0
- data/vendor/liburing/examples/send-zerocopy.c +658 -0
- data/vendor/liburing/examples/ucontext-cp.c +258 -0
- data/vendor/liburing/liburing-ffi.pc.in +12 -0
- data/vendor/liburing/liburing.pc.in +12 -0
- data/vendor/liburing/liburing.spec +66 -0
- data/vendor/liburing/make-debs.sh +55 -0
- data/vendor/liburing/src/Makefile +129 -0
- data/vendor/liburing/src/arch/aarch64/lib.h +47 -0
- data/vendor/liburing/src/arch/aarch64/syscall.h +91 -0
- data/vendor/liburing/src/arch/generic/lib.h +17 -0
- data/vendor/liburing/src/arch/generic/syscall.h +100 -0
- data/vendor/liburing/src/arch/riscv64/lib.h +48 -0
- data/vendor/liburing/src/arch/riscv64/syscall.h +100 -0
- data/vendor/liburing/src/arch/syscall-defs.h +94 -0
- data/vendor/liburing/src/arch/x86/lib.h +11 -0
- data/vendor/liburing/src/arch/x86/syscall.h +296 -0
- data/vendor/liburing/src/ffi.c +15 -0
- data/vendor/liburing/src/include/liburing/barrier.h +81 -0
- data/vendor/liburing/src/include/liburing/io_uring.h +818 -0
- data/vendor/liburing/src/include/liburing.h +1602 -0
- data/vendor/liburing/src/int_flags.h +11 -0
- data/vendor/liburing/src/lib.h +52 -0
- data/vendor/liburing/src/liburing-ffi.map +211 -0
- data/vendor/liburing/src/liburing.map +104 -0
- data/vendor/liburing/src/nolibc.c +55 -0
- data/vendor/liburing/src/queue.c +468 -0
- data/vendor/liburing/src/register.c +374 -0
- data/vendor/liburing/src/setup.c +689 -0
- data/vendor/liburing/src/setup.h +9 -0
- data/vendor/liburing/src/syscall.c +29 -0
- data/vendor/liburing/src/syscall.h +53 -0
- data/vendor/liburing/src/version.c +21 -0
- data/vendor/liburing/test/232c93d07b74.c +305 -0
- data/vendor/liburing/test/35fa71a030ca.c +329 -0
- data/vendor/liburing/test/500f9fbadef8.c +91 -0
- data/vendor/liburing/test/7ad0e4b2f83c.c +94 -0
- data/vendor/liburing/test/8a9973408177.c +107 -0
- data/vendor/liburing/test/917257daa0fe.c +54 -0
- data/vendor/liburing/test/Makefile +297 -0
- data/vendor/liburing/test/a0908ae19763.c +59 -0
- data/vendor/liburing/test/a4c0b3decb33.c +181 -0
- data/vendor/liburing/test/accept-link.c +255 -0
- data/vendor/liburing/test/accept-non-empty.c +256 -0
- data/vendor/liburing/test/accept-reuse.c +163 -0
- data/vendor/liburing/test/accept-test.c +83 -0
- data/vendor/liburing/test/accept.c +919 -0
- data/vendor/liburing/test/across-fork.c +284 -0
- data/vendor/liburing/test/b19062a56726.c +54 -0
- data/vendor/liburing/test/b5837bd5311d.c +78 -0
- data/vendor/liburing/test/bind-listen.c +408 -0
- data/vendor/liburing/test/buf-ring-nommap.c +123 -0
- data/vendor/liburing/test/buf-ring-put.c +83 -0
- data/vendor/liburing/test/buf-ring.c +473 -0
- data/vendor/liburing/test/ce593a6c480a.c +139 -0
- data/vendor/liburing/test/close-opath.c +123 -0
- data/vendor/liburing/test/config +14 -0
- data/vendor/liburing/test/connect-rep.c +204 -0
- data/vendor/liburing/test/connect.c +442 -0
- data/vendor/liburing/test/coredump.c +60 -0
- data/vendor/liburing/test/cq-full.c +97 -0
- data/vendor/liburing/test/cq-overflow.c +530 -0
- data/vendor/liburing/test/cq-peek-batch.c +103 -0
- data/vendor/liburing/test/cq-ready.c +95 -0
- data/vendor/liburing/test/cq-size.c +65 -0
- data/vendor/liburing/test/d4ae271dfaae.c +96 -0
- data/vendor/liburing/test/d77a67ed5f27.c +65 -0
- data/vendor/liburing/test/defer-taskrun.c +391 -0
- data/vendor/liburing/test/defer-tw-timeout.c +173 -0
- data/vendor/liburing/test/defer.c +319 -0
- data/vendor/liburing/test/double-poll-crash.c +195 -0
- data/vendor/liburing/test/drop-submit.c +94 -0
- data/vendor/liburing/test/eeed8b54e0df.c +120 -0
- data/vendor/liburing/test/empty-eownerdead.c +45 -0
- data/vendor/liburing/test/eploop.c +74 -0
- data/vendor/liburing/test/eventfd-disable.c +179 -0
- data/vendor/liburing/test/eventfd-reg.c +77 -0
- data/vendor/liburing/test/eventfd-ring.c +98 -0
- data/vendor/liburing/test/eventfd.c +113 -0
- data/vendor/liburing/test/evloop.c +73 -0
- data/vendor/liburing/test/exec-target.c +6 -0
- data/vendor/liburing/test/exit-no-cleanup.c +117 -0
- data/vendor/liburing/test/fadvise.c +202 -0
- data/vendor/liburing/test/fallocate.c +265 -0
- data/vendor/liburing/test/fc2a85cb02ef.c +132 -0
- data/vendor/liburing/test/fd-install.c +500 -0
- data/vendor/liburing/test/fd-pass.c +237 -0
- data/vendor/liburing/test/fdinfo.c +419 -0
- data/vendor/liburing/test/file-register.c +1189 -0
- data/vendor/liburing/test/file-update.c +231 -0
- data/vendor/liburing/test/file-verify.c +654 -0
- data/vendor/liburing/test/files-exit-hang-poll.c +114 -0
- data/vendor/liburing/test/files-exit-hang-timeout.c +137 -0
- data/vendor/liburing/test/fixed-buf-iter.c +115 -0
- data/vendor/liburing/test/fixed-buf-merge.c +101 -0
- data/vendor/liburing/test/fixed-hugepage.c +411 -0
- data/vendor/liburing/test/fixed-link.c +90 -0
- data/vendor/liburing/test/fixed-reuse.c +160 -0
- data/vendor/liburing/test/fpos.c +255 -0
- data/vendor/liburing/test/fsnotify.c +118 -0
- data/vendor/liburing/test/fsync.c +224 -0
- data/vendor/liburing/test/futex.c +571 -0
- data/vendor/liburing/test/hardlink.c +170 -0
- data/vendor/liburing/test/helpers.c +318 -0
- data/vendor/liburing/test/helpers.h +108 -0
- data/vendor/liburing/test/ignore-single-mmap.c +48 -0
- data/vendor/liburing/test/init-mem.c +164 -0
- data/vendor/liburing/test/io-cancel.c +561 -0
- data/vendor/liburing/test/io_uring_enter.c +264 -0
- data/vendor/liburing/test/io_uring_passthrough.c +482 -0
- data/vendor/liburing/test/io_uring_register.c +503 -0
- data/vendor/liburing/test/io_uring_setup.c +110 -0
- data/vendor/liburing/test/iopoll-leak.c +85 -0
- data/vendor/liburing/test/iopoll-overflow.c +118 -0
- data/vendor/liburing/test/iopoll.c +465 -0
- data/vendor/liburing/test/lfs-openat-write.c +119 -0
- data/vendor/liburing/test/lfs-openat.c +273 -0
- data/vendor/liburing/test/link-timeout.c +1108 -0
- data/vendor/liburing/test/link.c +497 -0
- data/vendor/liburing/test/link_drain.c +255 -0
- data/vendor/liburing/test/madvise.c +195 -0
- data/vendor/liburing/test/min-timeout-wait.c +354 -0
- data/vendor/liburing/test/min-timeout.c +233 -0
- data/vendor/liburing/test/mkdir.c +112 -0
- data/vendor/liburing/test/msg-ring-fd.c +331 -0
- data/vendor/liburing/test/msg-ring-flags.c +212 -0
- data/vendor/liburing/test/msg-ring-overflow.c +159 -0
- data/vendor/liburing/test/msg-ring.c +467 -0
- data/vendor/liburing/test/multicqes_drain.c +429 -0
- data/vendor/liburing/test/napi-test.c +215 -0
- data/vendor/liburing/test/napi-test.sh +48 -0
- data/vendor/liburing/test/no-mmap-inval.c +42 -0
- data/vendor/liburing/test/nolibc.c +62 -0
- data/vendor/liburing/test/nop-all-sizes.c +99 -0
- data/vendor/liburing/test/nop.c +177 -0
- data/vendor/liburing/test/nvme.h +169 -0
- data/vendor/liburing/test/ooo-file-unreg.c +82 -0
- data/vendor/liburing/test/open-close.c +261 -0
- data/vendor/liburing/test/open-direct-link.c +188 -0
- data/vendor/liburing/test/open-direct-pick.c +180 -0
- data/vendor/liburing/test/openat2.c +312 -0
- data/vendor/liburing/test/personality.c +204 -0
- data/vendor/liburing/test/pipe-bug.c +95 -0
- data/vendor/liburing/test/pipe-eof.c +83 -0
- data/vendor/liburing/test/pipe-reuse.c +105 -0
- data/vendor/liburing/test/poll-cancel-all.c +496 -0
- data/vendor/liburing/test/poll-cancel-ton.c +135 -0
- data/vendor/liburing/test/poll-cancel.c +228 -0
- data/vendor/liburing/test/poll-link.c +221 -0
- data/vendor/liburing/test/poll-many.c +230 -0
- data/vendor/liburing/test/poll-mshot-overflow.c +265 -0
- data/vendor/liburing/test/poll-mshot-update.c +323 -0
- data/vendor/liburing/test/poll-race-mshot.c +276 -0
- data/vendor/liburing/test/poll-race.c +105 -0
- data/vendor/liburing/test/poll-ring.c +48 -0
- data/vendor/liburing/test/poll-v-poll.c +353 -0
- data/vendor/liburing/test/poll.c +327 -0
- data/vendor/liburing/test/probe.c +135 -0
- data/vendor/liburing/test/read-before-exit.c +129 -0
- data/vendor/liburing/test/read-mshot-empty.c +153 -0
- data/vendor/liburing/test/read-mshot.c +404 -0
- data/vendor/liburing/test/read-write.c +1013 -0
- data/vendor/liburing/test/recv-msgall-stream.c +398 -0
- data/vendor/liburing/test/recv-msgall.c +263 -0
- data/vendor/liburing/test/recv-multishot.c +602 -0
- data/vendor/liburing/test/recvsend_bundle.c +691 -0
- data/vendor/liburing/test/reg-fd-only.c +131 -0
- data/vendor/liburing/test/reg-hint.c +56 -0
- data/vendor/liburing/test/reg-reg-ring.c +90 -0
- data/vendor/liburing/test/regbuf-merge.c +91 -0
- data/vendor/liburing/test/register-restrictions.c +633 -0
- data/vendor/liburing/test/rename.c +132 -0
- data/vendor/liburing/test/ring-leak.c +283 -0
- data/vendor/liburing/test/ring-leak2.c +249 -0
- data/vendor/liburing/test/ringbuf-read.c +196 -0
- data/vendor/liburing/test/ringbuf-status.c +242 -0
- data/vendor/liburing/test/rsrc_tags.c +461 -0
- data/vendor/liburing/test/runtests-loop.sh +16 -0
- data/vendor/liburing/test/runtests-quiet.sh +11 -0
- data/vendor/liburing/test/runtests.sh +168 -0
- data/vendor/liburing/test/rw_merge_test.c +98 -0
- data/vendor/liburing/test/self.c +91 -0
- data/vendor/liburing/test/send-zerocopy.c +971 -0
- data/vendor/liburing/test/send_recv.c +412 -0
- data/vendor/liburing/test/send_recvmsg.c +444 -0
- data/vendor/liburing/test/shared-wq.c +84 -0
- data/vendor/liburing/test/short-read.c +75 -0
- data/vendor/liburing/test/shutdown.c +165 -0
- data/vendor/liburing/test/sigfd-deadlock.c +88 -0
- data/vendor/liburing/test/single-issuer.c +169 -0
- data/vendor/liburing/test/skip-cqe.c +428 -0
- data/vendor/liburing/test/socket-getsetsock-cmd.c +346 -0
- data/vendor/liburing/test/socket-io-cmd.c +237 -0
- data/vendor/liburing/test/socket-rw-eagain.c +149 -0
- data/vendor/liburing/test/socket-rw-offset.c +149 -0
- data/vendor/liburing/test/socket-rw.c +137 -0
- data/vendor/liburing/test/socket.c +408 -0
- data/vendor/liburing/test/splice.c +512 -0
- data/vendor/liburing/test/sq-full-cpp.cc +45 -0
- data/vendor/liburing/test/sq-full.c +45 -0
- data/vendor/liburing/test/sq-poll-dup.c +211 -0
- data/vendor/liburing/test/sq-poll-kthread.c +169 -0
- data/vendor/liburing/test/sq-poll-share.c +138 -0
- data/vendor/liburing/test/sq-space_left.c +159 -0
- data/vendor/liburing/test/sqpoll-disable-exit.c +196 -0
- data/vendor/liburing/test/sqpoll-exec.c +132 -0
- data/vendor/liburing/test/sqpoll-exit-hang.c +78 -0
- data/vendor/liburing/test/sqpoll-sleep.c +69 -0
- data/vendor/liburing/test/statx.c +172 -0
- data/vendor/liburing/test/stdout.c +232 -0
- data/vendor/liburing/test/submit-and-wait.c +108 -0
- data/vendor/liburing/test/submit-link-fail.c +156 -0
- data/vendor/liburing/test/submit-reuse.c +237 -0
- data/vendor/liburing/test/symlink.c +117 -0
- data/vendor/liburing/test/sync-cancel.c +235 -0
- data/vendor/liburing/test/teardowns.c +58 -0
- data/vendor/liburing/test/test.h +36 -0
- data/vendor/liburing/test/thread-exit.c +143 -0
- data/vendor/liburing/test/timeout-new.c +256 -0
- data/vendor/liburing/test/timeout.c +1798 -0
- data/vendor/liburing/test/truncate.c +186 -0
- data/vendor/liburing/test/tty-write-dpoll.c +60 -0
- data/vendor/liburing/test/unlink.c +112 -0
- data/vendor/liburing/test/version.c +25 -0
- data/vendor/liburing/test/wait-timeout.c +287 -0
- data/vendor/liburing/test/waitid.c +373 -0
- data/vendor/liburing/test/wakeup-hang.c +162 -0
- data/vendor/liburing/test/wq-aff.c +146 -0
- data/vendor/liburing/test/xattr.c +442 -0
- metadata +412 -0
@@ -0,0 +1,1602 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
#ifndef LIB_URING_H
|
3
|
+
#define LIB_URING_H
|
4
|
+
|
5
|
+
#include <sys/socket.h>
|
6
|
+
#include <sys/stat.h>
|
7
|
+
#include <sys/uio.h>
|
8
|
+
#include <errno.h>
|
9
|
+
#include <signal.h>
|
10
|
+
#include <stdbool.h>
|
11
|
+
#include <inttypes.h>
|
12
|
+
#include <time.h>
|
13
|
+
#include <fcntl.h>
|
14
|
+
#include <sched.h>
|
15
|
+
#include <linux/swab.h>
|
16
|
+
#include <sys/wait.h>
|
17
|
+
#include "liburing/compat.h"
|
18
|
+
#include "liburing/io_uring.h"
|
19
|
+
#include "liburing/io_uring_version.h"
|
20
|
+
#include "liburing/barrier.h"
|
21
|
+
|
22
|
+
#ifndef uring_unlikely
|
23
|
+
#define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
|
24
|
+
#endif
|
25
|
+
|
26
|
+
#ifndef uring_likely
|
27
|
+
#define uring_likely(cond) __builtin_expect(!!(cond), 1)
|
28
|
+
#endif
|
29
|
+
|
30
|
+
#ifndef IOURINGINLINE
|
31
|
+
#define IOURINGINLINE static inline
|
32
|
+
#endif
|
33
|
+
|
34
|
+
#ifdef __alpha__
|
35
|
+
/*
|
36
|
+
* alpha and mips are the exceptions, all other architectures have
|
37
|
+
* common numbers for new system calls.
|
38
|
+
*/
|
39
|
+
#ifndef __NR_io_uring_setup
|
40
|
+
#define __NR_io_uring_setup 535
|
41
|
+
#endif
|
42
|
+
#ifndef __NR_io_uring_enter
|
43
|
+
#define __NR_io_uring_enter 536
|
44
|
+
#endif
|
45
|
+
#ifndef __NR_io_uring_register
|
46
|
+
#define __NR_io_uring_register 537
|
47
|
+
#endif
|
48
|
+
#elif defined __mips__
|
49
|
+
#ifndef __NR_io_uring_setup
|
50
|
+
#define __NR_io_uring_setup (__NR_Linux + 425)
|
51
|
+
#endif
|
52
|
+
#ifndef __NR_io_uring_enter
|
53
|
+
#define __NR_io_uring_enter (__NR_Linux + 426)
|
54
|
+
#endif
|
55
|
+
#ifndef __NR_io_uring_register
|
56
|
+
#define __NR_io_uring_register (__NR_Linux + 427)
|
57
|
+
#endif
|
58
|
+
#else /* !__alpha__ and !__mips__ */
|
59
|
+
#ifndef __NR_io_uring_setup
|
60
|
+
#define __NR_io_uring_setup 425
|
61
|
+
#endif
|
62
|
+
#ifndef __NR_io_uring_enter
|
63
|
+
#define __NR_io_uring_enter 426
|
64
|
+
#endif
|
65
|
+
#ifndef __NR_io_uring_register
|
66
|
+
#define __NR_io_uring_register 427
|
67
|
+
#endif
|
68
|
+
#endif
|
69
|
+
|
70
|
+
#ifdef __cplusplus
|
71
|
+
extern "C" {
|
72
|
+
#endif
|
73
|
+
|
74
|
+
/*
|
75
|
+
* Library interface to io_uring
|
76
|
+
*/
|
77
|
+
struct io_uring_sq {
|
78
|
+
unsigned *khead;
|
79
|
+
unsigned *ktail;
|
80
|
+
// Deprecated: use `ring_mask` instead of `*kring_mask`
|
81
|
+
unsigned *kring_mask;
|
82
|
+
// Deprecated: use `ring_entries` instead of `*kring_entries`
|
83
|
+
unsigned *kring_entries;
|
84
|
+
unsigned *kflags;
|
85
|
+
unsigned *kdropped;
|
86
|
+
unsigned *array;
|
87
|
+
struct io_uring_sqe *sqes;
|
88
|
+
|
89
|
+
unsigned sqe_head;
|
90
|
+
unsigned sqe_tail;
|
91
|
+
|
92
|
+
size_t ring_sz;
|
93
|
+
void *ring_ptr;
|
94
|
+
|
95
|
+
unsigned ring_mask;
|
96
|
+
unsigned ring_entries;
|
97
|
+
|
98
|
+
unsigned pad[2];
|
99
|
+
};
|
100
|
+
|
101
|
+
struct io_uring_cq {
|
102
|
+
unsigned *khead;
|
103
|
+
unsigned *ktail;
|
104
|
+
// Deprecated: use `ring_mask` instead of `*kring_mask`
|
105
|
+
unsigned *kring_mask;
|
106
|
+
// Deprecated: use `ring_entries` instead of `*kring_entries`
|
107
|
+
unsigned *kring_entries;
|
108
|
+
unsigned *kflags;
|
109
|
+
unsigned *koverflow;
|
110
|
+
struct io_uring_cqe *cqes;
|
111
|
+
|
112
|
+
size_t ring_sz;
|
113
|
+
void *ring_ptr;
|
114
|
+
|
115
|
+
unsigned ring_mask;
|
116
|
+
unsigned ring_entries;
|
117
|
+
|
118
|
+
unsigned pad[2];
|
119
|
+
};
|
120
|
+
|
121
|
+
struct io_uring {
|
122
|
+
struct io_uring_sq sq;
|
123
|
+
struct io_uring_cq cq;
|
124
|
+
unsigned flags;
|
125
|
+
int ring_fd;
|
126
|
+
|
127
|
+
unsigned features;
|
128
|
+
int enter_ring_fd;
|
129
|
+
__u8 int_flags;
|
130
|
+
__u8 pad[3];
|
131
|
+
unsigned pad2;
|
132
|
+
};
|
133
|
+
|
134
|
+
/*
|
135
|
+
* Library interface
|
136
|
+
*/
|
137
|
+
|
138
|
+
/*
|
139
|
+
* return an allocated io_uring_probe structure, or NULL if probe fails (for
|
140
|
+
* example, if it is not available). The caller is responsible for freeing it
|
141
|
+
*/
|
142
|
+
struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
|
143
|
+
/* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
|
144
|
+
struct io_uring_probe *io_uring_get_probe(void);
|
145
|
+
|
146
|
+
/*
|
147
|
+
* frees a probe allocated through io_uring_get_probe() or
|
148
|
+
* io_uring_get_probe_ring()
|
149
|
+
*/
|
150
|
+
void io_uring_free_probe(struct io_uring_probe *probe);
|
151
|
+
|
152
|
+
IOURINGINLINE int io_uring_opcode_supported(const struct io_uring_probe *p,
|
153
|
+
int op)
|
154
|
+
{
|
155
|
+
if (op > p->last_op)
|
156
|
+
return 0;
|
157
|
+
return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
|
158
|
+
}
|
159
|
+
|
160
|
+
int io_uring_queue_init_mem(unsigned entries, struct io_uring *ring,
|
161
|
+
struct io_uring_params *p,
|
162
|
+
void *buf, size_t buf_size);
|
163
|
+
int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
|
164
|
+
struct io_uring_params *p);
|
165
|
+
int io_uring_queue_init(unsigned entries, struct io_uring *ring,
|
166
|
+
unsigned flags);
|
167
|
+
int io_uring_queue_mmap(int fd, struct io_uring_params *p,
|
168
|
+
struct io_uring *ring);
|
169
|
+
int io_uring_ring_dontfork(struct io_uring *ring);
|
170
|
+
void io_uring_queue_exit(struct io_uring *ring);
|
171
|
+
unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
|
172
|
+
struct io_uring_cqe **cqes, unsigned count);
|
173
|
+
int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
|
174
|
+
unsigned wait_nr, struct __kernel_timespec *ts,
|
175
|
+
sigset_t *sigmask);
|
176
|
+
int io_uring_wait_cqes_min_timeout(struct io_uring *ring,
|
177
|
+
struct io_uring_cqe **cqe_ptr,
|
178
|
+
unsigned wait_nr,
|
179
|
+
struct __kernel_timespec *ts,
|
180
|
+
unsigned int min_ts_usec,
|
181
|
+
sigset_t *sigmask);
|
182
|
+
int io_uring_wait_cqe_timeout(struct io_uring *ring,
|
183
|
+
struct io_uring_cqe **cqe_ptr,
|
184
|
+
struct __kernel_timespec *ts);
|
185
|
+
int io_uring_submit(struct io_uring *ring);
|
186
|
+
int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
|
187
|
+
int io_uring_submit_and_wait_timeout(struct io_uring *ring,
|
188
|
+
struct io_uring_cqe **cqe_ptr,
|
189
|
+
unsigned wait_nr,
|
190
|
+
struct __kernel_timespec *ts,
|
191
|
+
sigset_t *sigmask);
|
192
|
+
int io_uring_submit_and_wait_min_timeout(struct io_uring *ring,
|
193
|
+
struct io_uring_cqe **cqe_ptr,
|
194
|
+
unsigned wait_nr,
|
195
|
+
struct __kernel_timespec *ts,
|
196
|
+
unsigned min_wait,
|
197
|
+
sigset_t *sigmask);
|
198
|
+
|
199
|
+
int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
|
200
|
+
unsigned nr_iovecs);
|
201
|
+
int io_uring_register_buffers_tags(struct io_uring *ring,
|
202
|
+
const struct iovec *iovecs,
|
203
|
+
const __u64 *tags, unsigned nr);
|
204
|
+
int io_uring_register_buffers_sparse(struct io_uring *ring, unsigned nr);
|
205
|
+
int io_uring_register_buffers_update_tag(struct io_uring *ring,
|
206
|
+
unsigned off,
|
207
|
+
const struct iovec *iovecs,
|
208
|
+
const __u64 *tags, unsigned nr);
|
209
|
+
int io_uring_unregister_buffers(struct io_uring *ring);
|
210
|
+
|
211
|
+
int io_uring_register_files(struct io_uring *ring, const int *files,
|
212
|
+
unsigned nr_files);
|
213
|
+
int io_uring_register_files_tags(struct io_uring *ring, const int *files,
|
214
|
+
const __u64 *tags, unsigned nr);
|
215
|
+
int io_uring_register_files_sparse(struct io_uring *ring, unsigned nr);
|
216
|
+
int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
|
217
|
+
const int *files, const __u64 *tags,
|
218
|
+
unsigned nr_files);
|
219
|
+
|
220
|
+
int io_uring_unregister_files(struct io_uring *ring);
|
221
|
+
int io_uring_register_files_update(struct io_uring *ring, unsigned off,
|
222
|
+
const int *files, unsigned nr_files);
|
223
|
+
int io_uring_register_eventfd(struct io_uring *ring, int fd);
|
224
|
+
int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
|
225
|
+
int io_uring_unregister_eventfd(struct io_uring *ring);
|
226
|
+
int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
|
227
|
+
unsigned nr);
|
228
|
+
int io_uring_register_personality(struct io_uring *ring);
|
229
|
+
int io_uring_unregister_personality(struct io_uring *ring, int id);
|
230
|
+
int io_uring_register_restrictions(struct io_uring *ring,
|
231
|
+
struct io_uring_restriction *res,
|
232
|
+
unsigned int nr_res);
|
233
|
+
int io_uring_enable_rings(struct io_uring *ring);
|
234
|
+
int __io_uring_sqring_wait(struct io_uring *ring);
|
235
|
+
int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
|
236
|
+
const cpu_set_t *mask);
|
237
|
+
int io_uring_unregister_iowq_aff(struct io_uring *ring);
|
238
|
+
int io_uring_register_iowq_max_workers(struct io_uring *ring,
|
239
|
+
unsigned int *values);
|
240
|
+
int io_uring_register_ring_fd(struct io_uring *ring);
|
241
|
+
int io_uring_unregister_ring_fd(struct io_uring *ring);
|
242
|
+
int io_uring_close_ring_fd(struct io_uring *ring);
|
243
|
+
int io_uring_register_buf_ring(struct io_uring *ring,
|
244
|
+
struct io_uring_buf_reg *reg, unsigned int flags);
|
245
|
+
int io_uring_unregister_buf_ring(struct io_uring *ring, int bgid);
|
246
|
+
int io_uring_buf_ring_head(struct io_uring *ring, int buf_group, uint16_t *head);
|
247
|
+
int io_uring_register_sync_cancel(struct io_uring *ring,
|
248
|
+
struct io_uring_sync_cancel_reg *reg);
|
249
|
+
|
250
|
+
int io_uring_register_file_alloc_range(struct io_uring *ring,
|
251
|
+
unsigned off, unsigned len);
|
252
|
+
|
253
|
+
int io_uring_register_napi(struct io_uring *ring, struct io_uring_napi *napi);
|
254
|
+
int io_uring_unregister_napi(struct io_uring *ring, struct io_uring_napi *napi);
|
255
|
+
|
256
|
+
int io_uring_register_clock(struct io_uring *ring,
|
257
|
+
struct io_uring_clock_register *arg);
|
258
|
+
|
259
|
+
int io_uring_get_events(struct io_uring *ring);
|
260
|
+
int io_uring_submit_and_get_events(struct io_uring *ring);
|
261
|
+
|
262
|
+
/*
|
263
|
+
* io_uring syscalls.
|
264
|
+
*/
|
265
|
+
int io_uring_enter(unsigned int fd, unsigned int to_submit,
|
266
|
+
unsigned int min_complete, unsigned int flags, sigset_t *sig);
|
267
|
+
int io_uring_enter2(unsigned int fd, unsigned int to_submit,
|
268
|
+
unsigned int min_complete, unsigned int flags,
|
269
|
+
sigset_t *sig, size_t sz);
|
270
|
+
int io_uring_setup(unsigned int entries, struct io_uring_params *p);
|
271
|
+
int io_uring_register(unsigned int fd, unsigned int opcode, const void *arg,
|
272
|
+
unsigned int nr_args);
|
273
|
+
|
274
|
+
/*
|
275
|
+
* Mapped buffer ring alloc/register + unregister/free helpers
|
276
|
+
*/
|
277
|
+
struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
|
278
|
+
unsigned int nentries,
|
279
|
+
int bgid, unsigned int flags,
|
280
|
+
int *ret);
|
281
|
+
int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
|
282
|
+
unsigned int nentries, int bgid);
|
283
|
+
|
284
|
+
/*
|
285
|
+
* Helper for the peek/wait single cqe functions. Exported because of that,
|
286
|
+
* but probably shouldn't be used directly in an application.
|
287
|
+
*/
|
288
|
+
int __io_uring_get_cqe(struct io_uring *ring,
|
289
|
+
struct io_uring_cqe **cqe_ptr, unsigned submit,
|
290
|
+
unsigned wait_nr, sigset_t *sigmask);
|
291
|
+
|
292
|
+
#define LIBURING_UDATA_TIMEOUT ((__u64) -1)
|
293
|
+
|
294
|
+
/*
|
295
|
+
* Calculates the step size for CQE iteration.
|
296
|
+
* For standard CQE's its 1, for big CQE's its two.
|
297
|
+
*/
|
298
|
+
#define io_uring_cqe_shift(ring) \
|
299
|
+
(!!((ring)->flags & IORING_SETUP_CQE32))
|
300
|
+
|
301
|
+
#define io_uring_cqe_index(ring,ptr,mask) \
|
302
|
+
(((ptr) & (mask)) << io_uring_cqe_shift(ring))
|
303
|
+
|
304
|
+
#define io_uring_for_each_cqe(ring, head, cqe) \
|
305
|
+
/* \
|
306
|
+
* io_uring_smp_load_acquire() enforces the order of tail \
|
307
|
+
* and CQE reads. \
|
308
|
+
*/ \
|
309
|
+
for (head = *(ring)->cq.khead; \
|
310
|
+
(cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
|
311
|
+
&(ring)->cq.cqes[io_uring_cqe_index(ring, head, (ring)->cq.ring_mask)] : NULL)); \
|
312
|
+
head++) \
|
313
|
+
|
314
|
+
/*
|
315
|
+
* Must be called after io_uring_for_each_cqe()
|
316
|
+
*/
|
317
|
+
IOURINGINLINE void io_uring_cq_advance(struct io_uring *ring, unsigned nr)
|
318
|
+
{
|
319
|
+
if (nr) {
|
320
|
+
struct io_uring_cq *cq = &ring->cq;
|
321
|
+
|
322
|
+
/*
|
323
|
+
* Ensure that the kernel only sees the new value of the head
|
324
|
+
* index after the CQEs have been read.
|
325
|
+
*/
|
326
|
+
io_uring_smp_store_release(cq->khead, *cq->khead + nr);
|
327
|
+
}
|
328
|
+
}
|
329
|
+
|
330
|
+
/*
|
331
|
+
* Must be called after io_uring_{peek,wait}_cqe() after the cqe has
|
332
|
+
* been processed by the application.
|
333
|
+
*/
|
334
|
+
IOURINGINLINE void io_uring_cqe_seen(struct io_uring *ring,
|
335
|
+
struct io_uring_cqe *cqe)
|
336
|
+
{
|
337
|
+
if (cqe)
|
338
|
+
io_uring_cq_advance(ring, 1);
|
339
|
+
}
|
340
|
+
|
341
|
+
/*
|
342
|
+
* Command prep helpers
|
343
|
+
*/
|
344
|
+
|
345
|
+
/*
|
346
|
+
* Associate pointer @data with the sqe, for later retrieval from the cqe
|
347
|
+
* at command completion time with io_uring_cqe_get_data().
|
348
|
+
*/
|
349
|
+
IOURINGINLINE void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
|
350
|
+
{
|
351
|
+
sqe->user_data = (unsigned long) data;
|
352
|
+
}
|
353
|
+
|
354
|
+
IOURINGINLINE void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
|
355
|
+
{
|
356
|
+
return (void *) (uintptr_t) cqe->user_data;
|
357
|
+
}
|
358
|
+
|
359
|
+
/*
|
360
|
+
* Assign a 64-bit value to this sqe, which can get retrieved at completion
|
361
|
+
* time with io_uring_cqe_get_data64. Just like the non-64 variants, except
|
362
|
+
* these store a 64-bit type rather than a data pointer.
|
363
|
+
*/
|
364
|
+
IOURINGINLINE void io_uring_sqe_set_data64(struct io_uring_sqe *sqe,
|
365
|
+
__u64 data)
|
366
|
+
{
|
367
|
+
sqe->user_data = data;
|
368
|
+
}
|
369
|
+
|
370
|
+
IOURINGINLINE __u64 io_uring_cqe_get_data64(const struct io_uring_cqe *cqe)
|
371
|
+
{
|
372
|
+
return cqe->user_data;
|
373
|
+
}
|
374
|
+
|
375
|
+
/*
|
376
|
+
* Tell the app the have the 64-bit variants of the get/set userdata
|
377
|
+
*/
|
378
|
+
#define LIBURING_HAVE_DATA64
|
379
|
+
|
380
|
+
IOURINGINLINE void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
|
381
|
+
unsigned flags)
|
382
|
+
{
|
383
|
+
sqe->flags = (__u8) flags;
|
384
|
+
}
|
385
|
+
|
386
|
+
IOURINGINLINE void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
|
387
|
+
unsigned int file_index)
|
388
|
+
{
|
389
|
+
/* 0 means no fixed files, indexes should be encoded as "index + 1" */
|
390
|
+
sqe->file_index = file_index + 1;
|
391
|
+
}
|
392
|
+
|
393
|
+
IOURINGINLINE void io_uring_initialize_sqe(struct io_uring_sqe *sqe)
|
394
|
+
{
|
395
|
+
sqe->flags = 0;
|
396
|
+
sqe->ioprio = 0;
|
397
|
+
sqe->rw_flags = 0;
|
398
|
+
sqe->buf_index = 0;
|
399
|
+
sqe->personality = 0;
|
400
|
+
sqe->file_index = 0;
|
401
|
+
sqe->addr3 = 0;
|
402
|
+
sqe->__pad2[0] = 0;
|
403
|
+
}
|
404
|
+
|
405
|
+
IOURINGINLINE void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
|
406
|
+
const void *addr, unsigned len,
|
407
|
+
__u64 offset)
|
408
|
+
{
|
409
|
+
sqe->opcode = (__u8) op;
|
410
|
+
sqe->fd = fd;
|
411
|
+
sqe->off = offset;
|
412
|
+
sqe->addr = (unsigned long) addr;
|
413
|
+
sqe->len = len;
|
414
|
+
}
|
415
|
+
|
416
|
+
/*
|
417
|
+
* io_uring_prep_splice() - Either @fd_in or @fd_out must be a pipe.
|
418
|
+
*
|
419
|
+
* - If @fd_in refers to a pipe, @off_in is ignored and must be set to -1.
|
420
|
+
*
|
421
|
+
* - If @fd_in does not refer to a pipe and @off_in is -1, then @nbytes are read
|
422
|
+
* from @fd_in starting from the file offset, which is incremented by the
|
423
|
+
* number of bytes read.
|
424
|
+
*
|
425
|
+
* - If @fd_in does not refer to a pipe and @off_in is not -1, then the starting
|
426
|
+
* offset of @fd_in will be @off_in.
|
427
|
+
*
|
428
|
+
* This splice operation can be used to implement sendfile by splicing to an
|
429
|
+
* intermediate pipe first, then splice to the final destination.
|
430
|
+
* In fact, the implementation of sendfile in kernel uses splice internally.
|
431
|
+
*
|
432
|
+
* NOTE that even if fd_in or fd_out refers to a pipe, the splice operation
|
433
|
+
* can still fail with EINVAL if one of the fd doesn't explicitly support splice
|
434
|
+
* operation, e.g. reading from terminal is unsupported from kernel 5.7 to 5.11.
|
435
|
+
* Check issue #291 for more information.
|
436
|
+
*/
|
437
|
+
IOURINGINLINE void io_uring_prep_splice(struct io_uring_sqe *sqe,
|
438
|
+
int fd_in, int64_t off_in,
|
439
|
+
int fd_out, int64_t off_out,
|
440
|
+
unsigned int nbytes,
|
441
|
+
unsigned int splice_flags)
|
442
|
+
{
|
443
|
+
io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes,
|
444
|
+
(__u64) off_out);
|
445
|
+
sqe->splice_off_in = (__u64) off_in;
|
446
|
+
sqe->splice_fd_in = fd_in;
|
447
|
+
sqe->splice_flags = splice_flags;
|
448
|
+
}
|
449
|
+
|
450
|
+
IOURINGINLINE void io_uring_prep_tee(struct io_uring_sqe *sqe,
|
451
|
+
int fd_in, int fd_out,
|
452
|
+
unsigned int nbytes,
|
453
|
+
unsigned int splice_flags)
|
454
|
+
{
|
455
|
+
io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, NULL, nbytes, 0);
|
456
|
+
sqe->splice_off_in = 0;
|
457
|
+
sqe->splice_fd_in = fd_in;
|
458
|
+
sqe->splice_flags = splice_flags;
|
459
|
+
}
|
460
|
+
|
461
|
+
IOURINGINLINE void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
|
462
|
+
const struct iovec *iovecs,
|
463
|
+
unsigned nr_vecs, __u64 offset)
|
464
|
+
{
|
465
|
+
io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
|
466
|
+
}
|
467
|
+
|
468
|
+
IOURINGINLINE void io_uring_prep_readv2(struct io_uring_sqe *sqe, int fd,
|
469
|
+
const struct iovec *iovecs,
|
470
|
+
unsigned nr_vecs, __u64 offset,
|
471
|
+
int flags)
|
472
|
+
{
|
473
|
+
io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
|
474
|
+
sqe->rw_flags = flags;
|
475
|
+
}
|
476
|
+
|
477
|
+
IOURINGINLINE void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
|
478
|
+
void *buf, unsigned nbytes,
|
479
|
+
__u64 offset, int buf_index)
|
480
|
+
{
|
481
|
+
io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
|
482
|
+
sqe->buf_index = (__u16) buf_index;
|
483
|
+
}
|
484
|
+
|
485
|
+
IOURINGINLINE void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
|
486
|
+
const struct iovec *iovecs,
|
487
|
+
unsigned nr_vecs, __u64 offset)
|
488
|
+
{
|
489
|
+
io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
|
490
|
+
}
|
491
|
+
|
492
|
+
IOURINGINLINE void io_uring_prep_writev2(struct io_uring_sqe *sqe, int fd,
|
493
|
+
const struct iovec *iovecs,
|
494
|
+
unsigned nr_vecs, __u64 offset,
|
495
|
+
int flags)
|
496
|
+
{
|
497
|
+
io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
|
498
|
+
sqe->rw_flags = flags;
|
499
|
+
}
|
500
|
+
|
501
|
+
IOURINGINLINE void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
|
502
|
+
const void *buf, unsigned nbytes,
|
503
|
+
__u64 offset, int buf_index)
|
504
|
+
{
|
505
|
+
io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
|
506
|
+
sqe->buf_index = (__u16) buf_index;
|
507
|
+
}
|
508
|
+
|
509
|
+
IOURINGINLINE void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
|
510
|
+
struct msghdr *msg, unsigned flags)
|
511
|
+
{
|
512
|
+
io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
|
513
|
+
sqe->msg_flags = flags;
|
514
|
+
}
|
515
|
+
|
516
|
+
IOURINGINLINE void io_uring_prep_recvmsg_multishot(struct io_uring_sqe *sqe,
|
517
|
+
int fd, struct msghdr *msg,
|
518
|
+
unsigned flags)
|
519
|
+
{
|
520
|
+
io_uring_prep_recvmsg(sqe, fd, msg, flags);
|
521
|
+
sqe->ioprio |= IORING_RECV_MULTISHOT;
|
522
|
+
}
|
523
|
+
|
524
|
+
IOURINGINLINE void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
|
525
|
+
const struct msghdr *msg,
|
526
|
+
unsigned flags)
|
527
|
+
{
|
528
|
+
io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
|
529
|
+
sqe->msg_flags = flags;
|
530
|
+
}
|
531
|
+
|
532
|
+
IOURINGINLINE unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
|
533
|
+
{
|
534
|
+
#if __BYTE_ORDER == __BIG_ENDIAN
|
535
|
+
poll_mask = __swahw32(poll_mask);
|
536
|
+
#endif
|
537
|
+
return poll_mask;
|
538
|
+
}
|
539
|
+
|
540
|
+
IOURINGINLINE void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
|
541
|
+
unsigned poll_mask)
|
542
|
+
{
|
543
|
+
io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
|
544
|
+
sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
|
545
|
+
}
|
546
|
+
|
547
|
+
IOURINGINLINE void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
|
548
|
+
int fd, unsigned poll_mask)
|
549
|
+
{
|
550
|
+
io_uring_prep_poll_add(sqe, fd, poll_mask);
|
551
|
+
sqe->len = IORING_POLL_ADD_MULTI;
|
552
|
+
}
|
553
|
+
|
554
|
+
IOURINGINLINE void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
|
555
|
+
__u64 user_data)
|
556
|
+
{
|
557
|
+
io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, 0, 0);
|
558
|
+
sqe->addr = user_data;
|
559
|
+
}
|
560
|
+
|
561
|
+
IOURINGINLINE void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
|
562
|
+
__u64 old_user_data,
|
563
|
+
__u64 new_user_data,
|
564
|
+
unsigned poll_mask, unsigned flags)
|
565
|
+
{
|
566
|
+
io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, flags,
|
567
|
+
new_user_data);
|
568
|
+
sqe->addr = old_user_data;
|
569
|
+
sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
|
570
|
+
}
|
571
|
+
|
572
|
+
IOURINGINLINE void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
|
573
|
+
unsigned fsync_flags)
|
574
|
+
{
|
575
|
+
io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
|
576
|
+
sqe->fsync_flags = fsync_flags;
|
577
|
+
}
|
578
|
+
|
579
|
+
IOURINGINLINE void io_uring_prep_nop(struct io_uring_sqe *sqe)
|
580
|
+
{
|
581
|
+
io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
|
582
|
+
}
|
583
|
+
|
584
|
+
IOURINGINLINE void io_uring_prep_timeout(struct io_uring_sqe *sqe,
|
585
|
+
struct __kernel_timespec *ts,
|
586
|
+
unsigned count, unsigned flags)
|
587
|
+
{
|
588
|
+
io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
|
589
|
+
sqe->timeout_flags = flags;
|
590
|
+
}
|
591
|
+
|
592
|
+
IOURINGINLINE void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
|
593
|
+
__u64 user_data, unsigned flags)
|
594
|
+
{
|
595
|
+
io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0, 0);
|
596
|
+
sqe->addr = user_data;
|
597
|
+
sqe->timeout_flags = flags;
|
598
|
+
}
|
599
|
+
|
600
|
+
IOURINGINLINE void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
|
601
|
+
struct __kernel_timespec *ts,
|
602
|
+
__u64 user_data, unsigned flags)
|
603
|
+
{
|
604
|
+
io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0,
|
605
|
+
(uintptr_t) ts);
|
606
|
+
sqe->addr = user_data;
|
607
|
+
sqe->timeout_flags = flags | IORING_TIMEOUT_UPDATE;
|
608
|
+
}
|
609
|
+
|
610
|
+
IOURINGINLINE void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
|
611
|
+
struct sockaddr *addr,
|
612
|
+
socklen_t *addrlen, int flags)
|
613
|
+
{
|
614
|
+
io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
|
615
|
+
(__u64) (unsigned long) addrlen);
|
616
|
+
sqe->accept_flags = (__u32) flags;
|
617
|
+
}
|
618
|
+
|
619
|
+
/* accept directly into the fixed file table */
|
620
|
+
IOURINGINLINE void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
|
621
|
+
struct sockaddr *addr,
|
622
|
+
socklen_t *addrlen, int flags,
|
623
|
+
unsigned int file_index)
|
624
|
+
{
|
625
|
+
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
|
626
|
+
/* offset by 1 for allocation */
|
627
|
+
if (file_index == IORING_FILE_INDEX_ALLOC)
|
628
|
+
file_index--;
|
629
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
630
|
+
}
|
631
|
+
|
632
|
+
IOURINGINLINE void io_uring_prep_multishot_accept(struct io_uring_sqe *sqe,
|
633
|
+
int fd, struct sockaddr *addr,
|
634
|
+
socklen_t *addrlen, int flags)
|
635
|
+
{
|
636
|
+
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
|
637
|
+
sqe->ioprio |= IORING_ACCEPT_MULTISHOT;
|
638
|
+
}
|
639
|
+
|
640
|
+
/* multishot accept directly into the fixed file table */
|
641
|
+
IOURINGINLINE void io_uring_prep_multishot_accept_direct(struct io_uring_sqe *sqe,
|
642
|
+
int fd,
|
643
|
+
struct sockaddr *addr,
|
644
|
+
socklen_t *addrlen,
|
645
|
+
int flags)
|
646
|
+
{
|
647
|
+
io_uring_prep_multishot_accept(sqe, fd, addr, addrlen, flags);
|
648
|
+
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
|
649
|
+
}
|
650
|
+
|
651
|
+
IOURINGINLINE void io_uring_prep_cancel64(struct io_uring_sqe *sqe,
|
652
|
+
__u64 user_data, int flags)
|
653
|
+
{
|
654
|
+
io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, NULL, 0, 0);
|
655
|
+
sqe->addr = user_data;
|
656
|
+
sqe->cancel_flags = (__u32) flags;
|
657
|
+
}
|
658
|
+
|
659
|
+
IOURINGINLINE void io_uring_prep_cancel(struct io_uring_sqe *sqe,
|
660
|
+
void *user_data, int flags)
|
661
|
+
{
|
662
|
+
io_uring_prep_cancel64(sqe, (__u64) (uintptr_t) user_data, flags);
|
663
|
+
}
|
664
|
+
|
665
|
+
IOURINGINLINE void io_uring_prep_cancel_fd(struct io_uring_sqe *sqe, int fd,
|
666
|
+
unsigned int flags)
|
667
|
+
{
|
668
|
+
io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, fd, NULL, 0, 0);
|
669
|
+
sqe->cancel_flags = (__u32) flags | IORING_ASYNC_CANCEL_FD;
|
670
|
+
}
|
671
|
+
|
672
|
+
IOURINGINLINE void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
|
673
|
+
struct __kernel_timespec *ts,
|
674
|
+
unsigned flags)
|
675
|
+
{
|
676
|
+
io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
|
677
|
+
sqe->timeout_flags = flags;
|
678
|
+
}
|
679
|
+
|
680
|
+
IOURINGINLINE void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
|
681
|
+
const struct sockaddr *addr,
|
682
|
+
socklen_t addrlen)
|
683
|
+
{
|
684
|
+
io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
|
685
|
+
}
|
686
|
+
|
687
|
+
IOURINGINLINE void io_uring_prep_bind(struct io_uring_sqe *sqe, int fd,
|
688
|
+
struct sockaddr *addr,
|
689
|
+
socklen_t addrlen)
|
690
|
+
{
|
691
|
+
io_uring_prep_rw(IORING_OP_BIND, sqe, fd, addr, 0, addrlen);
|
692
|
+
}
|
693
|
+
|
694
|
+
IOURINGINLINE void io_uring_prep_listen(struct io_uring_sqe *sqe, int fd,
|
695
|
+
int backlog)
|
696
|
+
{
|
697
|
+
io_uring_prep_rw(IORING_OP_LISTEN, sqe, fd, 0, backlog, 0);
|
698
|
+
}
|
699
|
+
|
700
|
+
IOURINGINLINE void io_uring_prep_files_update(struct io_uring_sqe *sqe,
|
701
|
+
int *fds, unsigned nr_fds,
|
702
|
+
int offset)
|
703
|
+
{
|
704
|
+
io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds,
|
705
|
+
(__u64) offset);
|
706
|
+
}
|
707
|
+
|
708
|
+
IOURINGINLINE void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
|
709
|
+
int mode, __u64 offset, __u64 len)
|
710
|
+
{
|
711
|
+
io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
|
712
|
+
0, (unsigned int) mode, (__u64) offset);
|
713
|
+
sqe->addr = (__u64) len;
|
714
|
+
}
|
715
|
+
|
716
|
+
IOURINGINLINE void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
|
717
|
+
const char *path, int flags,
|
718
|
+
mode_t mode)
|
719
|
+
{
|
720
|
+
io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
|
721
|
+
sqe->open_flags = (__u32) flags;
|
722
|
+
}
|
723
|
+
|
724
|
+
/* open directly into the fixed file table */
|
725
|
+
IOURINGINLINE void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
|
726
|
+
int dfd, const char *path,
|
727
|
+
int flags, mode_t mode,
|
728
|
+
unsigned file_index)
|
729
|
+
{
|
730
|
+
io_uring_prep_openat(sqe, dfd, path, flags, mode);
|
731
|
+
/* offset by 1 for allocation */
|
732
|
+
if (file_index == IORING_FILE_INDEX_ALLOC)
|
733
|
+
file_index--;
|
734
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
735
|
+
}
|
736
|
+
|
737
|
+
IOURINGINLINE void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
|
738
|
+
{
|
739
|
+
io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
|
740
|
+
}
|
741
|
+
|
742
|
+
IOURINGINLINE void io_uring_prep_close_direct(struct io_uring_sqe *sqe,
|
743
|
+
unsigned file_index)
|
744
|
+
{
|
745
|
+
io_uring_prep_close(sqe, 0);
|
746
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
747
|
+
}
|
748
|
+
|
749
|
+
IOURINGINLINE void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
|
750
|
+
void *buf, unsigned nbytes, __u64 offset)
|
751
|
+
{
|
752
|
+
io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
|
753
|
+
}
|
754
|
+
|
755
|
+
IOURINGINLINE void io_uring_prep_read_multishot(struct io_uring_sqe *sqe,
|
756
|
+
int fd, unsigned nbytes,
|
757
|
+
__u64 offset, int buf_group)
|
758
|
+
{
|
759
|
+
io_uring_prep_rw(IORING_OP_READ_MULTISHOT, sqe, fd, NULL, nbytes,
|
760
|
+
offset);
|
761
|
+
sqe->buf_group = buf_group;
|
762
|
+
sqe->flags = IOSQE_BUFFER_SELECT;
|
763
|
+
}
|
764
|
+
|
765
|
+
IOURINGINLINE void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
|
766
|
+
const void *buf, unsigned nbytes,
|
767
|
+
__u64 offset)
|
768
|
+
{
|
769
|
+
io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
|
770
|
+
}
|
771
|
+
|
772
|
+
struct statx;
|
773
|
+
IOURINGINLINE void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
|
774
|
+
const char *path, int flags,
|
775
|
+
unsigned mask, struct statx *statxbuf)
|
776
|
+
{
|
777
|
+
io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
|
778
|
+
(__u64) (unsigned long) statxbuf);
|
779
|
+
sqe->statx_flags = (__u32) flags;
|
780
|
+
}
|
781
|
+
|
782
|
+
IOURINGINLINE void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
|
783
|
+
__u64 offset, __u32 len, int advice)
|
784
|
+
{
|
785
|
+
io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, (__u32) len, offset);
|
786
|
+
sqe->fadvise_advice = (__u32) advice;
|
787
|
+
}
|
788
|
+
|
789
|
+
IOURINGINLINE void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
|
790
|
+
__u32 length, int advice)
|
791
|
+
{
|
792
|
+
io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, (__u32) length, 0);
|
793
|
+
sqe->fadvise_advice = (__u32) advice;
|
794
|
+
}
|
795
|
+
|
796
|
+
IOURINGINLINE void io_uring_prep_fadvise64(struct io_uring_sqe *sqe, int fd,
|
797
|
+
__u64 offset, off_t len, int advice)
|
798
|
+
{
|
799
|
+
io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, 0, offset);
|
800
|
+
sqe->addr = len;
|
801
|
+
sqe->fadvise_advice = (__u32) advice;
|
802
|
+
}
|
803
|
+
|
804
|
+
IOURINGINLINE void io_uring_prep_madvise64(struct io_uring_sqe *sqe, void *addr,
|
805
|
+
off_t length, int advice)
|
806
|
+
{
|
807
|
+
io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, 0, length);
|
808
|
+
sqe->fadvise_advice = (__u32) advice;
|
809
|
+
}
|
810
|
+
|
811
|
+
IOURINGINLINE void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
|
812
|
+
const void *buf, size_t len, int flags)
|
813
|
+
{
|
814
|
+
io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, (__u32) len, 0);
|
815
|
+
sqe->msg_flags = (__u32) flags;
|
816
|
+
}
|
817
|
+
|
818
|
+
IOURINGINLINE void io_uring_prep_send_bundle(struct io_uring_sqe *sqe,
|
819
|
+
int sockfd, size_t len, int flags)
|
820
|
+
{
|
821
|
+
io_uring_prep_send(sqe, sockfd, NULL, len, flags);
|
822
|
+
sqe->ioprio |= IORING_RECVSEND_BUNDLE;
|
823
|
+
}
|
824
|
+
|
825
|
+
IOURINGINLINE void io_uring_prep_send_set_addr(struct io_uring_sqe *sqe,
|
826
|
+
const struct sockaddr *dest_addr,
|
827
|
+
__u16 addr_len)
|
828
|
+
{
|
829
|
+
sqe->addr2 = (unsigned long)(const void *)dest_addr;
|
830
|
+
sqe->addr_len = addr_len;
|
831
|
+
}
|
832
|
+
|
833
|
+
IOURINGINLINE void io_uring_prep_sendto(struct io_uring_sqe *sqe, int sockfd,
|
834
|
+
const void *buf, size_t len, int flags,
|
835
|
+
const struct sockaddr *addr,
|
836
|
+
socklen_t addrlen)
|
837
|
+
{
|
838
|
+
io_uring_prep_send(sqe, sockfd, buf, len, flags);
|
839
|
+
io_uring_prep_send_set_addr(sqe, addr, addrlen);
|
840
|
+
}
|
841
|
+
|
842
|
+
IOURINGINLINE void io_uring_prep_send_zc(struct io_uring_sqe *sqe, int sockfd,
|
843
|
+
const void *buf, size_t len, int flags,
|
844
|
+
unsigned zc_flags)
|
845
|
+
{
|
846
|
+
io_uring_prep_rw(IORING_OP_SEND_ZC, sqe, sockfd, buf, (__u32) len, 0);
|
847
|
+
sqe->msg_flags = (__u32) flags;
|
848
|
+
sqe->ioprio = zc_flags;
|
849
|
+
}
|
850
|
+
|
851
|
+
IOURINGINLINE void io_uring_prep_send_zc_fixed(struct io_uring_sqe *sqe,
|
852
|
+
int sockfd, const void *buf,
|
853
|
+
size_t len, int flags,
|
854
|
+
unsigned zc_flags,
|
855
|
+
unsigned buf_index)
|
856
|
+
{
|
857
|
+
io_uring_prep_send_zc(sqe, sockfd, buf, len, flags, zc_flags);
|
858
|
+
sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
|
859
|
+
sqe->buf_index = buf_index;
|
860
|
+
}
|
861
|
+
|
862
|
+
IOURINGINLINE void io_uring_prep_sendmsg_zc(struct io_uring_sqe *sqe, int fd,
|
863
|
+
const struct msghdr *msg,
|
864
|
+
unsigned flags)
|
865
|
+
{
|
866
|
+
io_uring_prep_sendmsg(sqe, fd, msg, flags);
|
867
|
+
sqe->opcode = IORING_OP_SENDMSG_ZC;
|
868
|
+
}
|
869
|
+
|
870
|
+
IOURINGINLINE void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
|
871
|
+
void *buf, size_t len, int flags)
|
872
|
+
{
|
873
|
+
io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, (__u32) len, 0);
|
874
|
+
sqe->msg_flags = (__u32) flags;
|
875
|
+
}
|
876
|
+
|
877
|
+
IOURINGINLINE void io_uring_prep_recv_multishot(struct io_uring_sqe *sqe,
|
878
|
+
int sockfd, void *buf,
|
879
|
+
size_t len, int flags)
|
880
|
+
{
|
881
|
+
io_uring_prep_recv(sqe, sockfd, buf, len, flags);
|
882
|
+
sqe->ioprio |= IORING_RECV_MULTISHOT;
|
883
|
+
}
|
884
|
+
|
885
|
+
IOURINGINLINE struct io_uring_recvmsg_out *
|
886
|
+
io_uring_recvmsg_validate(void *buf, int buf_len, struct msghdr *msgh)
|
887
|
+
{
|
888
|
+
unsigned long header = msgh->msg_controllen + msgh->msg_namelen +
|
889
|
+
sizeof(struct io_uring_recvmsg_out);
|
890
|
+
if (buf_len < 0 || (unsigned long)buf_len < header)
|
891
|
+
return NULL;
|
892
|
+
return (struct io_uring_recvmsg_out *)buf;
|
893
|
+
}
|
894
|
+
|
895
|
+
IOURINGINLINE void *io_uring_recvmsg_name(struct io_uring_recvmsg_out *o)
|
896
|
+
{
|
897
|
+
return (void *) &o[1];
|
898
|
+
}
|
899
|
+
|
900
|
+
IOURINGINLINE struct cmsghdr *
|
901
|
+
io_uring_recvmsg_cmsg_firsthdr(struct io_uring_recvmsg_out *o,
|
902
|
+
struct msghdr *msgh)
|
903
|
+
{
|
904
|
+
if (o->controllen < sizeof(struct cmsghdr))
|
905
|
+
return NULL;
|
906
|
+
|
907
|
+
return (struct cmsghdr *)((unsigned char *) io_uring_recvmsg_name(o) +
|
908
|
+
msgh->msg_namelen);
|
909
|
+
}
|
910
|
+
|
911
|
+
IOURINGINLINE struct cmsghdr *
|
912
|
+
io_uring_recvmsg_cmsg_nexthdr(struct io_uring_recvmsg_out *o, struct msghdr *msgh,
|
913
|
+
struct cmsghdr *cmsg)
|
914
|
+
{
|
915
|
+
unsigned char *end;
|
916
|
+
|
917
|
+
if (cmsg->cmsg_len < sizeof(struct cmsghdr))
|
918
|
+
return NULL;
|
919
|
+
end = (unsigned char *) io_uring_recvmsg_cmsg_firsthdr(o, msgh) +
|
920
|
+
o->controllen;
|
921
|
+
cmsg = (struct cmsghdr *)((unsigned char *) cmsg +
|
922
|
+
CMSG_ALIGN(cmsg->cmsg_len));
|
923
|
+
|
924
|
+
if ((unsigned char *) (cmsg + 1) > end)
|
925
|
+
return NULL;
|
926
|
+
if (((unsigned char *) cmsg) + CMSG_ALIGN(cmsg->cmsg_len) > end)
|
927
|
+
return NULL;
|
928
|
+
|
929
|
+
return cmsg;
|
930
|
+
}
|
931
|
+
|
932
|
+
IOURINGINLINE void *io_uring_recvmsg_payload(struct io_uring_recvmsg_out *o,
|
933
|
+
struct msghdr *msgh)
|
934
|
+
{
|
935
|
+
return (void *)((unsigned char *)io_uring_recvmsg_name(o) +
|
936
|
+
msgh->msg_namelen + msgh->msg_controllen);
|
937
|
+
}
|
938
|
+
|
939
|
+
IOURINGINLINE unsigned int
|
940
|
+
io_uring_recvmsg_payload_length(struct io_uring_recvmsg_out *o,
|
941
|
+
int buf_len, struct msghdr *msgh)
|
942
|
+
{
|
943
|
+
unsigned long payload_start, payload_end;
|
944
|
+
|
945
|
+
payload_start = (unsigned long) io_uring_recvmsg_payload(o, msgh);
|
946
|
+
payload_end = (unsigned long) o + buf_len;
|
947
|
+
return (unsigned int) (payload_end - payload_start);
|
948
|
+
}
|
949
|
+
|
950
|
+
IOURINGINLINE void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
|
951
|
+
const char *path, struct open_how *how)
|
952
|
+
{
|
953
|
+
io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
|
954
|
+
(uint64_t) (uintptr_t) how);
|
955
|
+
}
|
956
|
+
|
957
|
+
/* open directly into the fixed file table */
|
958
|
+
IOURINGINLINE void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
|
959
|
+
int dfd, const char *path,
|
960
|
+
struct open_how *how,
|
961
|
+
unsigned file_index)
|
962
|
+
{
|
963
|
+
io_uring_prep_openat2(sqe, dfd, path, how);
|
964
|
+
/* offset by 1 for allocation */
|
965
|
+
if (file_index == IORING_FILE_INDEX_ALLOC)
|
966
|
+
file_index--;
|
967
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
968
|
+
}
|
969
|
+
|
970
|
+
struct epoll_event;
|
971
|
+
IOURINGINLINE void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
|
972
|
+
int fd, int op,
|
973
|
+
struct epoll_event *ev)
|
974
|
+
{
|
975
|
+
io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev,
|
976
|
+
(__u32) op, (__u32) fd);
|
977
|
+
}
|
978
|
+
|
979
|
+
IOURINGINLINE void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
|
980
|
+
void *addr, int len, int nr,
|
981
|
+
int bgid, int bid)
|
982
|
+
{
|
983
|
+
io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, (__u32) len,
|
984
|
+
(__u64) bid);
|
985
|
+
sqe->buf_group = (__u16) bgid;
|
986
|
+
}
|
987
|
+
|
988
|
+
IOURINGINLINE void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
|
989
|
+
int nr, int bgid)
|
990
|
+
{
|
991
|
+
io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
|
992
|
+
sqe->buf_group = (__u16) bgid;
|
993
|
+
}
|
994
|
+
|
995
|
+
IOURINGINLINE void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
|
996
|
+
int how)
|
997
|
+
{
|
998
|
+
io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, (__u32) how, 0);
|
999
|
+
}
|
1000
|
+
|
1001
|
+
IOURINGINLINE void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
|
1002
|
+
const char *path, int flags)
|
1003
|
+
{
|
1004
|
+
io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
|
1005
|
+
sqe->unlink_flags = (__u32) flags;
|
1006
|
+
}
|
1007
|
+
|
1008
|
+
IOURINGINLINE void io_uring_prep_unlink(struct io_uring_sqe *sqe,
|
1009
|
+
const char *path, int flags)
|
1010
|
+
{
|
1011
|
+
io_uring_prep_unlinkat(sqe, AT_FDCWD, path, flags);
|
1012
|
+
}
|
1013
|
+
|
1014
|
+
IOURINGINLINE void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
|
1015
|
+
const char *oldpath, int newdfd,
|
1016
|
+
const char *newpath, unsigned int flags)
|
1017
|
+
{
|
1018
|
+
io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath,
|
1019
|
+
(__u32) newdfd,
|
1020
|
+
(uint64_t) (uintptr_t) newpath);
|
1021
|
+
sqe->rename_flags = (__u32) flags;
|
1022
|
+
}
|
1023
|
+
|
1024
|
+
IOURINGINLINE void io_uring_prep_rename(struct io_uring_sqe *sqe,
|
1025
|
+
const char *oldpath,
|
1026
|
+
const char *newpath)
|
1027
|
+
{
|
1028
|
+
io_uring_prep_renameat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, 0);
|
1029
|
+
}
|
1030
|
+
|
1031
|
+
IOURINGINLINE void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
|
1032
|
+
int fd, unsigned len,
|
1033
|
+
__u64 offset, int flags)
|
1034
|
+
{
|
1035
|
+
io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
|
1036
|
+
sqe->sync_range_flags = (__u32) flags;
|
1037
|
+
}
|
1038
|
+
|
1039
|
+
IOURINGINLINE void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
|
1040
|
+
const char *path, mode_t mode)
|
1041
|
+
{
|
1042
|
+
io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
|
1043
|
+
}
|
1044
|
+
|
1045
|
+
IOURINGINLINE void io_uring_prep_mkdir(struct io_uring_sqe *sqe,
|
1046
|
+
const char *path, mode_t mode)
|
1047
|
+
{
|
1048
|
+
io_uring_prep_mkdirat(sqe, AT_FDCWD, path, mode);
|
1049
|
+
}
|
1050
|
+
|
1051
|
+
IOURINGINLINE void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
|
1052
|
+
const char *target, int newdirfd,
|
1053
|
+
const char *linkpath)
|
1054
|
+
{
|
1055
|
+
io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
|
1056
|
+
(uint64_t) (uintptr_t) linkpath);
|
1057
|
+
}
|
1058
|
+
|
1059
|
+
IOURINGINLINE void io_uring_prep_symlink(struct io_uring_sqe *sqe,
|
1060
|
+
const char *target,
|
1061
|
+
const char *linkpath)
|
1062
|
+
{
|
1063
|
+
io_uring_prep_symlinkat(sqe, target, AT_FDCWD, linkpath);
|
1064
|
+
}
|
1065
|
+
|
1066
|
+
IOURINGINLINE void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
|
1067
|
+
const char *oldpath, int newdfd,
|
1068
|
+
const char *newpath, int flags)
|
1069
|
+
{
|
1070
|
+
io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, (__u32) newdfd,
|
1071
|
+
(uint64_t) (uintptr_t) newpath);
|
1072
|
+
sqe->hardlink_flags = (__u32) flags;
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
IOURINGINLINE void io_uring_prep_link(struct io_uring_sqe *sqe,
|
1076
|
+
const char *oldpath, const char *newpath,
|
1077
|
+
int flags)
|
1078
|
+
{
|
1079
|
+
io_uring_prep_linkat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, flags);
|
1080
|
+
}
|
1081
|
+
|
1082
|
+
IOURINGINLINE void io_uring_prep_msg_ring_cqe_flags(struct io_uring_sqe *sqe,
|
1083
|
+
int fd, unsigned int len, __u64 data,
|
1084
|
+
unsigned int flags, unsigned int cqe_flags)
|
1085
|
+
{
|
1086
|
+
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
|
1087
|
+
sqe->msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
|
1088
|
+
sqe->file_index = cqe_flags;
|
1089
|
+
}
|
1090
|
+
|
1091
|
+
IOURINGINLINE void io_uring_prep_msg_ring(struct io_uring_sqe *sqe, int fd,
|
1092
|
+
unsigned int len, __u64 data,
|
1093
|
+
unsigned int flags)
|
1094
|
+
{
|
1095
|
+
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
|
1096
|
+
sqe->msg_ring_flags = flags;
|
1097
|
+
}
|
1098
|
+
|
1099
|
+
IOURINGINLINE void io_uring_prep_msg_ring_fd(struct io_uring_sqe *sqe, int fd,
|
1100
|
+
int source_fd, int target_fd,
|
1101
|
+
__u64 data, unsigned int flags)
|
1102
|
+
{
|
1103
|
+
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd,
|
1104
|
+
(void *) (uintptr_t) IORING_MSG_SEND_FD, 0, data);
|
1105
|
+
sqe->addr3 = source_fd;
|
1106
|
+
/* offset by 1 for allocation */
|
1107
|
+
if ((unsigned int) target_fd == IORING_FILE_INDEX_ALLOC)
|
1108
|
+
target_fd--;
|
1109
|
+
__io_uring_set_target_fixed_file(sqe, target_fd);
|
1110
|
+
sqe->msg_ring_flags = flags;
|
1111
|
+
}
|
1112
|
+
|
1113
|
+
IOURINGINLINE void io_uring_prep_msg_ring_fd_alloc(struct io_uring_sqe *sqe,
|
1114
|
+
int fd, int source_fd,
|
1115
|
+
__u64 data, unsigned int flags)
|
1116
|
+
{
|
1117
|
+
io_uring_prep_msg_ring_fd(sqe, fd, source_fd, IORING_FILE_INDEX_ALLOC,
|
1118
|
+
data, flags);
|
1119
|
+
}
|
1120
|
+
|
1121
|
+
IOURINGINLINE void io_uring_prep_getxattr(struct io_uring_sqe *sqe,
|
1122
|
+
const char *name, char *value,
|
1123
|
+
const char *path, unsigned int len)
|
1124
|
+
{
|
1125
|
+
io_uring_prep_rw(IORING_OP_GETXATTR, sqe, 0, name, len,
|
1126
|
+
(__u64) (uintptr_t) value);
|
1127
|
+
sqe->addr3 = (__u64) (uintptr_t) path;
|
1128
|
+
sqe->xattr_flags = 0;
|
1129
|
+
}
|
1130
|
+
|
1131
|
+
IOURINGINLINE void io_uring_prep_setxattr(struct io_uring_sqe *sqe,
|
1132
|
+
const char *name, const char *value,
|
1133
|
+
const char *path, int flags,
|
1134
|
+
unsigned int len)
|
1135
|
+
{
|
1136
|
+
io_uring_prep_rw(IORING_OP_SETXATTR, sqe, 0, name, len,
|
1137
|
+
(__u64) (uintptr_t) value);
|
1138
|
+
sqe->addr3 = (__u64) (uintptr_t) path;
|
1139
|
+
sqe->xattr_flags = flags;
|
1140
|
+
}
|
1141
|
+
|
1142
|
+
IOURINGINLINE void io_uring_prep_fgetxattr(struct io_uring_sqe *sqe,
|
1143
|
+
int fd, const char *name,
|
1144
|
+
char *value, unsigned int len)
|
1145
|
+
{
|
1146
|
+
io_uring_prep_rw(IORING_OP_FGETXATTR, sqe, fd, name, len,
|
1147
|
+
(__u64) (uintptr_t) value);
|
1148
|
+
sqe->xattr_flags = 0;
|
1149
|
+
}
|
1150
|
+
|
1151
|
+
IOURINGINLINE void io_uring_prep_fsetxattr(struct io_uring_sqe *sqe, int fd,
|
1152
|
+
const char *name, const char *value,
|
1153
|
+
int flags, unsigned int len)
|
1154
|
+
{
|
1155
|
+
io_uring_prep_rw(IORING_OP_FSETXATTR, sqe, fd, name, len,
|
1156
|
+
(__u64) (uintptr_t) value);
|
1157
|
+
sqe->xattr_flags = flags;
|
1158
|
+
}
|
1159
|
+
|
1160
|
+
IOURINGINLINE void io_uring_prep_socket(struct io_uring_sqe *sqe, int domain,
|
1161
|
+
int type, int protocol,
|
1162
|
+
unsigned int flags)
|
1163
|
+
{
|
1164
|
+
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
1165
|
+
sqe->rw_flags = flags;
|
1166
|
+
}
|
1167
|
+
|
1168
|
+
IOURINGINLINE void io_uring_prep_socket_direct(struct io_uring_sqe *sqe,
|
1169
|
+
int domain, int type,
|
1170
|
+
int protocol,
|
1171
|
+
unsigned file_index,
|
1172
|
+
unsigned int flags)
|
1173
|
+
{
|
1174
|
+
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
1175
|
+
sqe->rw_flags = flags;
|
1176
|
+
/* offset by 1 for allocation */
|
1177
|
+
if (file_index == IORING_FILE_INDEX_ALLOC)
|
1178
|
+
file_index--;
|
1179
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
1180
|
+
}
|
1181
|
+
|
1182
|
+
IOURINGINLINE void io_uring_prep_socket_direct_alloc(struct io_uring_sqe *sqe,
|
1183
|
+
int domain, int type,
|
1184
|
+
int protocol,
|
1185
|
+
unsigned int flags)
|
1186
|
+
{
|
1187
|
+
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
1188
|
+
sqe->rw_flags = flags;
|
1189
|
+
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
|
1190
|
+
}
|
1191
|
+
|
1192
|
+
/*
|
1193
|
+
* Prepare commands for sockets
|
1194
|
+
*/
|
1195
|
+
IOURINGINLINE void io_uring_prep_cmd_sock(struct io_uring_sqe *sqe,
|
1196
|
+
int cmd_op,
|
1197
|
+
int fd,
|
1198
|
+
int level,
|
1199
|
+
int optname,
|
1200
|
+
void *optval,
|
1201
|
+
int optlen)
|
1202
|
+
{
|
1203
|
+
io_uring_prep_rw(IORING_OP_URING_CMD, sqe, fd, NULL, 0, 0);
|
1204
|
+
sqe->optval = (unsigned long) (uintptr_t) optval;
|
1205
|
+
sqe->optname = optname;
|
1206
|
+
sqe->optlen = optlen;
|
1207
|
+
sqe->cmd_op = cmd_op;
|
1208
|
+
sqe->level = level;
|
1209
|
+
}
|
1210
|
+
|
1211
|
+
IOURINGINLINE void io_uring_prep_waitid(struct io_uring_sqe *sqe,
|
1212
|
+
idtype_t idtype,
|
1213
|
+
id_t id,
|
1214
|
+
siginfo_t *infop,
|
1215
|
+
int options, unsigned int flags)
|
1216
|
+
{
|
1217
|
+
io_uring_prep_rw(IORING_OP_WAITID, sqe, id, NULL, (unsigned) idtype, 0);
|
1218
|
+
sqe->waitid_flags = flags;
|
1219
|
+
sqe->file_index = options;
|
1220
|
+
sqe->addr2 = (unsigned long) infop;
|
1221
|
+
}
|
1222
|
+
|
1223
|
+
IOURINGINLINE void io_uring_prep_futex_wake(struct io_uring_sqe *sqe,
|
1224
|
+
uint32_t *futex, uint64_t val,
|
1225
|
+
uint64_t mask, uint32_t futex_flags,
|
1226
|
+
unsigned int flags)
|
1227
|
+
{
|
1228
|
+
io_uring_prep_rw(IORING_OP_FUTEX_WAKE, sqe, futex_flags, futex, 0, val);
|
1229
|
+
sqe->futex_flags = flags;
|
1230
|
+
sqe->addr3 = mask;
|
1231
|
+
}
|
1232
|
+
|
1233
|
+
IOURINGINLINE void io_uring_prep_futex_wait(struct io_uring_sqe *sqe,
|
1234
|
+
uint32_t *futex, uint64_t val,
|
1235
|
+
uint64_t mask, uint32_t futex_flags,
|
1236
|
+
unsigned int flags)
|
1237
|
+
{
|
1238
|
+
io_uring_prep_rw(IORING_OP_FUTEX_WAIT, sqe, futex_flags, futex, 0, val);
|
1239
|
+
sqe->futex_flags = flags;
|
1240
|
+
sqe->addr3 = mask;
|
1241
|
+
}
|
1242
|
+
|
1243
|
+
struct futex_waitv;
|
1244
|
+
IOURINGINLINE void io_uring_prep_futex_waitv(struct io_uring_sqe *sqe,
|
1245
|
+
struct futex_waitv *futex,
|
1246
|
+
uint32_t nr_futex,
|
1247
|
+
unsigned int flags)
|
1248
|
+
{
|
1249
|
+
io_uring_prep_rw(IORING_OP_FUTEX_WAITV, sqe, 0, futex, nr_futex, 0);
|
1250
|
+
sqe->futex_flags = flags;
|
1251
|
+
}
|
1252
|
+
|
1253
|
+
IOURINGINLINE void io_uring_prep_fixed_fd_install(struct io_uring_sqe *sqe,
|
1254
|
+
int fd,
|
1255
|
+
unsigned int flags)
|
1256
|
+
{
|
1257
|
+
io_uring_prep_rw(IORING_OP_FIXED_FD_INSTALL, sqe, fd, NULL, 0, 0);
|
1258
|
+
sqe->flags = IOSQE_FIXED_FILE;
|
1259
|
+
sqe->install_fd_flags = flags;
|
1260
|
+
}
|
1261
|
+
|
1262
|
+
IOURINGINLINE void io_uring_prep_ftruncate(struct io_uring_sqe *sqe,
|
1263
|
+
int fd, loff_t len)
|
1264
|
+
{
|
1265
|
+
io_uring_prep_rw(IORING_OP_FTRUNCATE, sqe, fd, 0, 0, len);
|
1266
|
+
}
|
1267
|
+
|
1268
|
+
/*
|
1269
|
+
* Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
|
1270
|
+
* the SQ ring
|
1271
|
+
*/
|
1272
|
+
IOURINGINLINE unsigned io_uring_sq_ready(const struct io_uring *ring)
|
1273
|
+
{
|
1274
|
+
unsigned khead;
|
1275
|
+
|
1276
|
+
/*
|
1277
|
+
* Without a barrier, we could miss an update and think the SQ wasn't
|
1278
|
+
* ready. We don't need the load acquire for non-SQPOLL since then we
|
1279
|
+
* drive updates.
|
1280
|
+
*/
|
1281
|
+
if (ring->flags & IORING_SETUP_SQPOLL)
|
1282
|
+
khead = io_uring_smp_load_acquire(ring->sq.khead);
|
1283
|
+
else
|
1284
|
+
khead = *ring->sq.khead;
|
1285
|
+
|
1286
|
+
/* always use real head, to avoid losing sync for short submit */
|
1287
|
+
return ring->sq.sqe_tail - khead;
|
1288
|
+
}
|
1289
|
+
|
1290
|
+
/*
|
1291
|
+
* Returns how much space is left in the SQ ring.
|
1292
|
+
*/
|
1293
|
+
IOURINGINLINE unsigned io_uring_sq_space_left(const struct io_uring *ring)
|
1294
|
+
{
|
1295
|
+
return ring->sq.ring_entries - io_uring_sq_ready(ring);
|
1296
|
+
}
|
1297
|
+
|
1298
|
+
/*
|
1299
|
+
* Only applicable when using SQPOLL - allows the caller to wait for space
|
1300
|
+
* to free up in the SQ ring, which happens when the kernel side thread has
|
1301
|
+
* consumed one or more entries. If the SQ ring is currently non-full, no
|
1302
|
+
* action is taken. Note: may return -EINVAL if the kernel doesn't support
|
1303
|
+
* this feature.
|
1304
|
+
*/
|
1305
|
+
IOURINGINLINE int io_uring_sqring_wait(struct io_uring *ring)
|
1306
|
+
{
|
1307
|
+
if (!(ring->flags & IORING_SETUP_SQPOLL))
|
1308
|
+
return 0;
|
1309
|
+
if (io_uring_sq_space_left(ring))
|
1310
|
+
return 0;
|
1311
|
+
|
1312
|
+
return __io_uring_sqring_wait(ring);
|
1313
|
+
}
|
1314
|
+
|
1315
|
+
/*
|
1316
|
+
* Returns how many unconsumed entries are ready in the CQ ring
|
1317
|
+
*/
|
1318
|
+
IOURINGINLINE unsigned io_uring_cq_ready(const struct io_uring *ring)
|
1319
|
+
{
|
1320
|
+
return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
|
1321
|
+
}
|
1322
|
+
|
1323
|
+
/*
|
1324
|
+
* Returns true if there are overflow entries waiting to be flushed onto
|
1325
|
+
* the CQ ring
|
1326
|
+
*/
|
1327
|
+
IOURINGINLINE bool io_uring_cq_has_overflow(const struct io_uring *ring)
|
1328
|
+
{
|
1329
|
+
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
1330
|
+
}
|
1331
|
+
|
1332
|
+
/*
|
1333
|
+
* Returns true if the eventfd notification is currently enabled
|
1334
|
+
*/
|
1335
|
+
IOURINGINLINE bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
|
1336
|
+
{
|
1337
|
+
if (!ring->cq.kflags)
|
1338
|
+
return true;
|
1339
|
+
|
1340
|
+
return !(*ring->cq.kflags & IORING_CQ_EVENTFD_DISABLED);
|
1341
|
+
}
|
1342
|
+
|
1343
|
+
/*
|
1344
|
+
* Toggle eventfd notification on or off, if an eventfd is registered with
|
1345
|
+
* the ring.
|
1346
|
+
*/
|
1347
|
+
IOURINGINLINE int io_uring_cq_eventfd_toggle(struct io_uring *ring,
|
1348
|
+
bool enabled)
|
1349
|
+
{
|
1350
|
+
uint32_t flags;
|
1351
|
+
|
1352
|
+
if (!!enabled == io_uring_cq_eventfd_enabled(ring))
|
1353
|
+
return 0;
|
1354
|
+
|
1355
|
+
if (!ring->cq.kflags)
|
1356
|
+
return -EOPNOTSUPP;
|
1357
|
+
|
1358
|
+
flags = *ring->cq.kflags;
|
1359
|
+
|
1360
|
+
if (enabled)
|
1361
|
+
flags &= ~IORING_CQ_EVENTFD_DISABLED;
|
1362
|
+
else
|
1363
|
+
flags |= IORING_CQ_EVENTFD_DISABLED;
|
1364
|
+
|
1365
|
+
IO_URING_WRITE_ONCE(*ring->cq.kflags, flags);
|
1366
|
+
|
1367
|
+
return 0;
|
1368
|
+
}
|
1369
|
+
|
1370
|
+
/*
|
1371
|
+
* Return an IO completion, waiting for 'wait_nr' completions if one isn't
|
1372
|
+
* readily available. Returns 0 with cqe_ptr filled in on success, -errno on
|
1373
|
+
* failure.
|
1374
|
+
*/
|
1375
|
+
IOURINGINLINE int io_uring_wait_cqe_nr(struct io_uring *ring,
|
1376
|
+
struct io_uring_cqe **cqe_ptr,
|
1377
|
+
unsigned wait_nr)
|
1378
|
+
{
|
1379
|
+
return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
|
1380
|
+
}
|
1381
|
+
|
1382
|
+
/*
|
1383
|
+
* Internal helper, don't use directly in applications. Use one of the
|
1384
|
+
* "official" versions of this, io_uring_peek_cqe(), io_uring_wait_cqe(),
|
1385
|
+
* or io_uring_wait_cqes*().
|
1386
|
+
*/
|
1387
|
+
IOURINGINLINE int __io_uring_peek_cqe(struct io_uring *ring,
|
1388
|
+
struct io_uring_cqe **cqe_ptr,
|
1389
|
+
unsigned *nr_available)
|
1390
|
+
{
|
1391
|
+
struct io_uring_cqe *cqe;
|
1392
|
+
int err = 0;
|
1393
|
+
unsigned available;
|
1394
|
+
unsigned mask = ring->cq.ring_mask;
|
1395
|
+
int shift = 0;
|
1396
|
+
|
1397
|
+
if (ring->flags & IORING_SETUP_CQE32)
|
1398
|
+
shift = 1;
|
1399
|
+
|
1400
|
+
do {
|
1401
|
+
unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
|
1402
|
+
unsigned head = *ring->cq.khead;
|
1403
|
+
|
1404
|
+
cqe = NULL;
|
1405
|
+
available = tail - head;
|
1406
|
+
if (!available)
|
1407
|
+
break;
|
1408
|
+
|
1409
|
+
cqe = &ring->cq.cqes[(head & mask) << shift];
|
1410
|
+
if (!(ring->features & IORING_FEAT_EXT_ARG) &&
|
1411
|
+
cqe->user_data == LIBURING_UDATA_TIMEOUT) {
|
1412
|
+
if (cqe->res < 0)
|
1413
|
+
err = cqe->res;
|
1414
|
+
io_uring_cq_advance(ring, 1);
|
1415
|
+
if (!err)
|
1416
|
+
continue;
|
1417
|
+
cqe = NULL;
|
1418
|
+
}
|
1419
|
+
|
1420
|
+
break;
|
1421
|
+
} while (1);
|
1422
|
+
|
1423
|
+
*cqe_ptr = cqe;
|
1424
|
+
if (nr_available)
|
1425
|
+
*nr_available = available;
|
1426
|
+
return err;
|
1427
|
+
}
|
1428
|
+
|
1429
|
+
/*
|
1430
|
+
* Return an IO completion, if one is readily available. Returns 0 with
|
1431
|
+
* cqe_ptr filled in on success, -errno on failure.
|
1432
|
+
*/
|
1433
|
+
IOURINGINLINE int io_uring_peek_cqe(struct io_uring *ring,
|
1434
|
+
struct io_uring_cqe **cqe_ptr)
|
1435
|
+
{
|
1436
|
+
if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
|
1437
|
+
return 0;
|
1438
|
+
|
1439
|
+
return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
|
1440
|
+
}
|
1441
|
+
|
1442
|
+
/*
|
1443
|
+
* Return an IO completion, waiting for it if necessary. Returns 0 with
|
1444
|
+
* cqe_ptr filled in on success, -errno on failure.
|
1445
|
+
*/
|
1446
|
+
IOURINGINLINE int io_uring_wait_cqe(struct io_uring *ring,
|
1447
|
+
struct io_uring_cqe **cqe_ptr)
|
1448
|
+
{
|
1449
|
+
if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
|
1450
|
+
return 0;
|
1451
|
+
|
1452
|
+
return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
|
1453
|
+
}
|
1454
|
+
|
1455
|
+
/*
|
1456
|
+
* Return an sqe to fill. Application must later call io_uring_submit()
|
1457
|
+
* when it's ready to tell the kernel about it. The caller may call this
|
1458
|
+
* function multiple times before calling io_uring_submit().
|
1459
|
+
*
|
1460
|
+
* Returns a vacant sqe, or NULL if we're full.
|
1461
|
+
*/
|
1462
|
+
IOURINGINLINE struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
|
1463
|
+
{
|
1464
|
+
struct io_uring_sq *sq = &ring->sq;
|
1465
|
+
unsigned int head, next = sq->sqe_tail + 1;
|
1466
|
+
int shift = 0;
|
1467
|
+
|
1468
|
+
if (ring->flags & IORING_SETUP_SQE128)
|
1469
|
+
shift = 1;
|
1470
|
+
if (!(ring->flags & IORING_SETUP_SQPOLL))
|
1471
|
+
head = *sq->khead;
|
1472
|
+
else
|
1473
|
+
head = io_uring_smp_load_acquire(sq->khead);
|
1474
|
+
|
1475
|
+
if (next - head <= sq->ring_entries) {
|
1476
|
+
struct io_uring_sqe *sqe;
|
1477
|
+
|
1478
|
+
sqe = &sq->sqes[(sq->sqe_tail & sq->ring_mask) << shift];
|
1479
|
+
sq->sqe_tail = next;
|
1480
|
+
io_uring_initialize_sqe(sqe);
|
1481
|
+
return sqe;
|
1482
|
+
}
|
1483
|
+
|
1484
|
+
return NULL;
|
1485
|
+
}
|
1486
|
+
|
1487
|
+
/*
|
1488
|
+
* Return the appropriate mask for a buffer ring of size 'ring_entries'
|
1489
|
+
*/
|
1490
|
+
IOURINGINLINE int io_uring_buf_ring_mask(__u32 ring_entries)
|
1491
|
+
{
|
1492
|
+
return ring_entries - 1;
|
1493
|
+
}
|
1494
|
+
|
1495
|
+
IOURINGINLINE void io_uring_buf_ring_init(struct io_uring_buf_ring *br)
|
1496
|
+
{
|
1497
|
+
br->tail = 0;
|
1498
|
+
}
|
1499
|
+
|
1500
|
+
/*
|
1501
|
+
* Assign 'buf' with the addr/len/buffer ID supplied
|
1502
|
+
*/
|
1503
|
+
IOURINGINLINE void io_uring_buf_ring_add(struct io_uring_buf_ring *br,
|
1504
|
+
void *addr, unsigned int len,
|
1505
|
+
unsigned short bid, int mask,
|
1506
|
+
int buf_offset)
|
1507
|
+
{
|
1508
|
+
struct io_uring_buf *buf = &br->bufs[(br->tail + buf_offset) & mask];
|
1509
|
+
|
1510
|
+
buf->addr = (unsigned long) (uintptr_t) addr;
|
1511
|
+
buf->len = len;
|
1512
|
+
buf->bid = bid;
|
1513
|
+
}
|
1514
|
+
|
1515
|
+
/*
|
1516
|
+
* Make 'count' new buffers visible to the kernel. Called after
|
1517
|
+
* io_uring_buf_ring_add() has been called 'count' times to fill in new
|
1518
|
+
* buffers.
|
1519
|
+
*/
|
1520
|
+
IOURINGINLINE void io_uring_buf_ring_advance(struct io_uring_buf_ring *br,
|
1521
|
+
int count)
|
1522
|
+
{
|
1523
|
+
unsigned short new_tail = br->tail + count;
|
1524
|
+
|
1525
|
+
io_uring_smp_store_release(&br->tail, new_tail);
|
1526
|
+
}
|
1527
|
+
|
1528
|
+
IOURINGINLINE void __io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
1529
|
+
struct io_uring_buf_ring *br,
|
1530
|
+
int cq_count, int buf_count)
|
1531
|
+
{
|
1532
|
+
io_uring_buf_ring_advance(br, buf_count);
|
1533
|
+
io_uring_cq_advance(ring, cq_count);
|
1534
|
+
}
|
1535
|
+
|
1536
|
+
/*
|
1537
|
+
* Make 'count' new buffers visible to the kernel while at the same time
|
1538
|
+
* advancing the CQ ring seen entries. This can be used when the application
|
1539
|
+
* is using ring provided buffers and returns buffers while processing CQEs,
|
1540
|
+
* avoiding an extra atomic when needing to increment both the CQ ring and
|
1541
|
+
* the ring buffer index at the same time.
|
1542
|
+
*/
|
1543
|
+
IOURINGINLINE void io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
1544
|
+
struct io_uring_buf_ring *br,
|
1545
|
+
int count)
|
1546
|
+
{
|
1547
|
+
__io_uring_buf_ring_cq_advance(ring, br, count, count);
|
1548
|
+
}
|
1549
|
+
|
1550
|
+
IOURINGINLINE int io_uring_buf_ring_available(struct io_uring *ring,
|
1551
|
+
struct io_uring_buf_ring *br,
|
1552
|
+
unsigned short bgid)
|
1553
|
+
{
|
1554
|
+
uint16_t head;
|
1555
|
+
int ret;
|
1556
|
+
|
1557
|
+
ret = io_uring_buf_ring_head(ring, bgid, &head);
|
1558
|
+
if (ret)
|
1559
|
+
return ret;
|
1560
|
+
|
1561
|
+
return (uint16_t) (br->tail - head);
|
1562
|
+
}
|
1563
|
+
|
1564
|
+
#ifndef LIBURING_INTERNAL
|
1565
|
+
IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
|
1566
|
+
{
|
1567
|
+
return _io_uring_get_sqe(ring);
|
1568
|
+
}
|
1569
|
+
#else
|
1570
|
+
struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
|
1571
|
+
#endif
|
1572
|
+
|
1573
|
+
ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
|
1574
|
+
ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
|
1575
|
+
|
1576
|
+
/*
|
1577
|
+
* Versioning information for liburing.
|
1578
|
+
*
|
1579
|
+
* Use IO_URING_CHECK_VERSION() for compile time checks including from
|
1580
|
+
* preprocessor directives.
|
1581
|
+
*
|
1582
|
+
* Use io_uring_check_version() for runtime checks of the version of
|
1583
|
+
* liburing that was loaded by the dynamic linker.
|
1584
|
+
*/
|
1585
|
+
int io_uring_major_version(void);
|
1586
|
+
int io_uring_minor_version(void);
|
1587
|
+
bool io_uring_check_version(int major, int minor);
|
1588
|
+
|
1589
|
+
#define IO_URING_CHECK_VERSION(major,minor) \
|
1590
|
+
(major > IO_URING_VERSION_MAJOR || \
|
1591
|
+
(major == IO_URING_VERSION_MAJOR && \
|
1592
|
+
minor > IO_URING_VERSION_MINOR))
|
1593
|
+
|
1594
|
+
#ifdef __cplusplus
|
1595
|
+
}
|
1596
|
+
#endif
|
1597
|
+
|
1598
|
+
#ifdef IOURINGINLINE
|
1599
|
+
#undef IOURINGINLINE
|
1600
|
+
#endif
|
1601
|
+
|
1602
|
+
#endif
|