uringmachine 0.19.1 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +3 -4
- data/CHANGELOG.md +32 -1
- data/TODO.md +0 -39
- data/examples/bm_fileno.rb +33 -0
- data/examples/bm_mutex.rb +85 -0
- data/examples/bm_mutex_single.rb +33 -0
- data/examples/bm_queue.rb +29 -29
- data/examples/bm_send.rb +2 -5
- data/examples/bm_snooze.rb +20 -42
- data/examples/bm_write.rb +4 -1
- data/examples/fiber_scheduler_demo.rb +15 -51
- data/examples/fiber_scheduler_fork.rb +24 -0
- data/examples/nc_ssl.rb +71 -0
- data/ext/um/extconf.rb +5 -15
- data/ext/um/um.c +310 -74
- data/ext/um/um.h +66 -29
- data/ext/um/um_async_op.c +1 -1
- data/ext/um/um_async_op_class.c +2 -2
- data/ext/um/um_buffer.c +1 -1
- data/ext/um/um_class.c +178 -31
- data/ext/um/um_const.c +51 -3
- data/ext/um/um_mutex_class.c +1 -1
- data/ext/um/um_op.c +37 -0
- data/ext/um/um_queue_class.c +1 -1
- data/ext/um/um_stream.c +5 -5
- data/ext/um/um_stream_class.c +3 -0
- data/ext/um/um_sync.c +28 -39
- data/ext/um/um_utils.c +59 -19
- data/grant-2025/journal.md +353 -0
- data/grant-2025/tasks.md +135 -0
- data/lib/uringmachine/fiber_scheduler.rb +316 -57
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +6 -0
- data/test/test_fiber_scheduler.rb +640 -0
- data/test/test_stream.rb +2 -2
- data/test/test_um.rb +722 -54
- data/uringmachine.gemspec +5 -5
- data/vendor/liburing/.github/workflows/ci.yml +94 -1
- data/vendor/liburing/.github/workflows/test_build.c +9 -0
- data/vendor/liburing/configure +27 -0
- data/vendor/liburing/examples/Makefile +6 -0
- data/vendor/liburing/examples/helpers.c +8 -0
- data/vendor/liburing/examples/helpers.h +5 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/Makefile +9 -3
- data/vendor/liburing/src/include/liburing/barrier.h +11 -5
- data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
- data/vendor/liburing/src/include/liburing/io_uring.h +51 -0
- data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
- data/vendor/liburing/src/include/liburing.h +458 -121
- data/vendor/liburing/src/liburing-ffi.map +16 -0
- data/vendor/liburing/src/liburing.map +8 -0
- data/vendor/liburing/src/sanitize.c +4 -1
- data/vendor/liburing/src/setup.c +7 -4
- data/vendor/liburing/test/232c93d07b74.c +4 -16
- data/vendor/liburing/test/Makefile +15 -1
- data/vendor/liburing/test/accept.c +2 -13
- data/vendor/liburing/test/bind-listen.c +175 -13
- data/vendor/liburing/test/conn-unreach.c +132 -0
- data/vendor/liburing/test/fd-pass.c +32 -7
- data/vendor/liburing/test/fdinfo.c +39 -12
- data/vendor/liburing/test/fifo-futex-poll.c +114 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
- data/vendor/liburing/test/futex.c +1 -1
- data/vendor/liburing/test/helpers.c +99 -2
- data/vendor/liburing/test/helpers.h +9 -0
- data/vendor/liburing/test/io_uring_passthrough.c +6 -12
- data/vendor/liburing/test/mock_file.c +379 -0
- data/vendor/liburing/test/mock_file.h +47 -0
- data/vendor/liburing/test/nop.c +2 -2
- data/vendor/liburing/test/nop32-overflow.c +150 -0
- data/vendor/liburing/test/nop32.c +126 -0
- data/vendor/liburing/test/pipe.c +166 -0
- data/vendor/liburing/test/poll-race-mshot.c +13 -1
- data/vendor/liburing/test/read-write.c +4 -4
- data/vendor/liburing/test/recv-mshot-fair.c +81 -34
- data/vendor/liburing/test/recvsend_bundle.c +1 -1
- data/vendor/liburing/test/resize-rings.c +2 -0
- data/vendor/liburing/test/ring-query.c +322 -0
- data/vendor/liburing/test/ringbuf-loop.c +87 -0
- data/vendor/liburing/test/ringbuf-read.c +4 -4
- data/vendor/liburing/test/runtests.sh +2 -2
- data/vendor/liburing/test/send-zerocopy.c +43 -5
- data/vendor/liburing/test/send_recv.c +103 -32
- data/vendor/liburing/test/shutdown.c +2 -12
- data/vendor/liburing/test/socket-nb.c +3 -14
- data/vendor/liburing/test/socket-rw-eagain.c +2 -12
- data/vendor/liburing/test/socket-rw-offset.c +2 -12
- data/vendor/liburing/test/socket-rw.c +2 -12
- data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
- data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
- data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
- data/vendor/liburing/test/timestamp.c +56 -19
- data/vendor/liburing/test/vec-regbuf.c +2 -4
- data/vendor/liburing/test/wq-aff.c +7 -0
- metadata +37 -15
|
@@ -16,9 +16,8 @@
|
|
|
16
16
|
#include <sys/wait.h>
|
|
17
17
|
#include "liburing/compat.h"
|
|
18
18
|
#include "liburing/io_uring.h"
|
|
19
|
+
#include "liburing/io_uring/query.h"
|
|
19
20
|
#include "liburing/io_uring_version.h"
|
|
20
|
-
#include "liburing/barrier.h"
|
|
21
|
-
|
|
22
21
|
|
|
23
22
|
#ifndef uring_unlikely
|
|
24
23
|
#define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
|
|
@@ -29,15 +28,45 @@
|
|
|
29
28
|
#endif
|
|
30
29
|
|
|
31
30
|
/*
|
|
32
|
-
* NOTE:
|
|
33
|
-
*
|
|
31
|
+
* NOTE: Use IOURINGINLINE macro for "static inline" functions that are
|
|
32
|
+
* expected to be available in the FFI bindings. They must also
|
|
33
|
+
* be included in the liburing-ffi.map file.
|
|
34
|
+
*
|
|
35
|
+
* Use _LOCAL_INLINE macro for "static inline" functions that are
|
|
36
|
+
* not expected to be available in the FFI bindings.
|
|
34
37
|
*
|
|
35
|
-
*
|
|
36
|
-
*
|
|
38
|
+
* Don't use "static inline" directly when defining new functions
|
|
39
|
+
* in this header file.
|
|
40
|
+
*
|
|
41
|
+
* Reason:
|
|
42
|
+
* The C++20 module export feature fails to operate correctly
|
|
43
|
+
* with the "static inline" functions. Use "inline" instead of
|
|
44
|
+
* "static inline" when compiling with C++20 or later.
|
|
45
|
+
*
|
|
46
|
+
* See:
|
|
47
|
+
* https://github.com/axboe/liburing/issues/1457
|
|
48
|
+
* https://lore.kernel.org/io-uring/e0559c10-104d-4da8-9f7f-d2ffd73d8df3@acm.org
|
|
37
49
|
*/
|
|
38
50
|
#ifndef IOURINGINLINE
|
|
51
|
+
#if defined(__cplusplus) && __cplusplus >= 202002L
|
|
52
|
+
#define IOURINGINLINE inline
|
|
53
|
+
#else
|
|
39
54
|
#define IOURINGINLINE static inline
|
|
40
55
|
#endif
|
|
56
|
+
#endif
|
|
57
|
+
|
|
58
|
+
#ifndef _LOCAL_INLINE
|
|
59
|
+
#if defined(__cplusplus) && __cplusplus >= 202002L
|
|
60
|
+
#define _LOCAL_INLINE inline
|
|
61
|
+
#else
|
|
62
|
+
#define _LOCAL_INLINE static inline
|
|
63
|
+
#endif
|
|
64
|
+
#endif
|
|
65
|
+
|
|
66
|
+
/*
|
|
67
|
+
* barrier.h needs _LOCAL_INLINE.
|
|
68
|
+
*/
|
|
69
|
+
#include "liburing/barrier.h"
|
|
41
70
|
|
|
42
71
|
#ifdef __alpha__
|
|
43
72
|
/*
|
|
@@ -75,6 +104,12 @@
|
|
|
75
104
|
#endif
|
|
76
105
|
#endif
|
|
77
106
|
|
|
107
|
+
#ifdef __cplusplus
|
|
108
|
+
#define LIBURING_NOEXCEPT noexcept
|
|
109
|
+
#else
|
|
110
|
+
#define LIBURING_NOEXCEPT
|
|
111
|
+
#endif
|
|
112
|
+
|
|
78
113
|
#ifdef __cplusplus
|
|
79
114
|
extern "C" {
|
|
80
115
|
#endif
|
|
@@ -153,7 +188,7 @@ struct io_uring_zcrx_rq {
|
|
|
153
188
|
* Library interface
|
|
154
189
|
*/
|
|
155
190
|
|
|
156
|
-
|
|
191
|
+
_LOCAL_INLINE __u64 uring_ptr_to_u64(const void *ptr) LIBURING_NOEXCEPT
|
|
157
192
|
{
|
|
158
193
|
return (__u64) (unsigned long) ptr;
|
|
159
194
|
}
|
|
@@ -162,18 +197,19 @@ static inline __u64 uring_ptr_to_u64(const void *ptr)
|
|
|
162
197
|
* return an allocated io_uring_probe structure, or NULL if probe fails (for
|
|
163
198
|
* example, if it is not available). The caller is responsible for freeing it
|
|
164
199
|
*/
|
|
165
|
-
struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring)
|
|
200
|
+
struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring)
|
|
201
|
+
LIBURING_NOEXCEPT;
|
|
166
202
|
/* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
|
|
167
|
-
struct io_uring_probe *io_uring_get_probe(void);
|
|
203
|
+
struct io_uring_probe *io_uring_get_probe(void) LIBURING_NOEXCEPT;
|
|
168
204
|
|
|
169
205
|
/*
|
|
170
206
|
* frees a probe allocated through io_uring_get_probe() or
|
|
171
207
|
* io_uring_get_probe_ring()
|
|
172
208
|
*/
|
|
173
|
-
void io_uring_free_probe(struct io_uring_probe *probe);
|
|
209
|
+
void io_uring_free_probe(struct io_uring_probe *probe) LIBURING_NOEXCEPT;
|
|
174
210
|
|
|
175
211
|
IOURINGINLINE int io_uring_opcode_supported(const struct io_uring_probe *p,
|
|
176
|
-
int op)
|
|
212
|
+
int op) LIBURING_NOEXCEPT
|
|
177
213
|
{
|
|
178
214
|
if (op > p->last_op)
|
|
179
215
|
return 0;
|
|
@@ -182,143 +218,167 @@ IOURINGINLINE int io_uring_opcode_supported(const struct io_uring_probe *p,
|
|
|
182
218
|
|
|
183
219
|
int io_uring_queue_init_mem(unsigned entries, struct io_uring *ring,
|
|
184
220
|
struct io_uring_params *p,
|
|
185
|
-
void *buf, size_t buf_size);
|
|
221
|
+
void *buf, size_t buf_size) LIBURING_NOEXCEPT;
|
|
186
222
|
int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
|
|
187
|
-
struct io_uring_params *p);
|
|
223
|
+
struct io_uring_params *p) LIBURING_NOEXCEPT;
|
|
188
224
|
int io_uring_queue_init(unsigned entries, struct io_uring *ring,
|
|
189
|
-
unsigned flags);
|
|
225
|
+
unsigned flags) LIBURING_NOEXCEPT;
|
|
190
226
|
int io_uring_queue_mmap(int fd, struct io_uring_params *p,
|
|
191
|
-
struct io_uring *ring);
|
|
192
|
-
int io_uring_ring_dontfork(struct io_uring *ring);
|
|
193
|
-
void io_uring_queue_exit(struct io_uring *ring);
|
|
227
|
+
struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
228
|
+
int io_uring_ring_dontfork(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
229
|
+
void io_uring_queue_exit(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
194
230
|
unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
|
|
195
|
-
struct io_uring_cqe **cqes, unsigned count);
|
|
231
|
+
struct io_uring_cqe **cqes, unsigned count) LIBURING_NOEXCEPT;
|
|
196
232
|
int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
|
|
197
233
|
unsigned wait_nr, struct __kernel_timespec *ts,
|
|
198
|
-
sigset_t *sigmask);
|
|
234
|
+
sigset_t *sigmask) LIBURING_NOEXCEPT;
|
|
199
235
|
int io_uring_wait_cqes_min_timeout(struct io_uring *ring,
|
|
200
236
|
struct io_uring_cqe **cqe_ptr,
|
|
201
237
|
unsigned wait_nr,
|
|
202
238
|
struct __kernel_timespec *ts,
|
|
203
239
|
unsigned int min_ts_usec,
|
|
204
|
-
sigset_t *sigmask);
|
|
240
|
+
sigset_t *sigmask) LIBURING_NOEXCEPT;
|
|
205
241
|
int io_uring_wait_cqe_timeout(struct io_uring *ring,
|
|
206
242
|
struct io_uring_cqe **cqe_ptr,
|
|
207
|
-
struct __kernel_timespec *ts);
|
|
208
|
-
int io_uring_submit(struct io_uring *ring);
|
|
209
|
-
int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
|
|
243
|
+
struct __kernel_timespec *ts) LIBURING_NOEXCEPT;
|
|
244
|
+
int io_uring_submit(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
245
|
+
int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
|
|
246
|
+
LIBURING_NOEXCEPT;
|
|
210
247
|
int io_uring_submit_and_wait_timeout(struct io_uring *ring,
|
|
211
248
|
struct io_uring_cqe **cqe_ptr,
|
|
212
249
|
unsigned wait_nr,
|
|
213
250
|
struct __kernel_timespec *ts,
|
|
214
|
-
sigset_t *sigmask);
|
|
251
|
+
sigset_t *sigmask) LIBURING_NOEXCEPT;
|
|
215
252
|
int io_uring_submit_and_wait_min_timeout(struct io_uring *ring,
|
|
216
253
|
struct io_uring_cqe **cqe_ptr,
|
|
217
254
|
unsigned wait_nr,
|
|
218
255
|
struct __kernel_timespec *ts,
|
|
219
256
|
unsigned min_wait,
|
|
220
|
-
sigset_t *sigmask);
|
|
257
|
+
sigset_t *sigmask) LIBURING_NOEXCEPT;
|
|
221
258
|
int io_uring_submit_and_wait_reg(struct io_uring *ring,
|
|
222
259
|
struct io_uring_cqe **cqe_ptr, unsigned wait_nr,
|
|
223
|
-
int reg_index);
|
|
260
|
+
int reg_index) LIBURING_NOEXCEPT;
|
|
224
261
|
|
|
225
262
|
int io_uring_register_wait_reg(struct io_uring *ring,
|
|
226
|
-
struct io_uring_reg_wait *reg, int nr)
|
|
227
|
-
|
|
263
|
+
struct io_uring_reg_wait *reg, int nr)
|
|
264
|
+
LIBURING_NOEXCEPT;
|
|
265
|
+
int io_uring_resize_rings(struct io_uring *ring, struct io_uring_params *p)
|
|
266
|
+
LIBURING_NOEXCEPT;
|
|
228
267
|
int io_uring_clone_buffers_offset(struct io_uring *dst, struct io_uring *src,
|
|
229
268
|
unsigned int dst_off, unsigned int src_off,
|
|
230
|
-
unsigned int nr, unsigned int flags)
|
|
269
|
+
unsigned int nr, unsigned int flags)
|
|
270
|
+
LIBURING_NOEXCEPT;
|
|
231
271
|
int __io_uring_clone_buffers_offset(struct io_uring *dst, struct io_uring *src,
|
|
232
272
|
unsigned int dst_off, unsigned int src_off,
|
|
233
|
-
unsigned int nr, unsigned int flags)
|
|
234
|
-
|
|
273
|
+
unsigned int nr, unsigned int flags)
|
|
274
|
+
LIBURING_NOEXCEPT;
|
|
275
|
+
int io_uring_clone_buffers(struct io_uring *dst, struct io_uring *src)
|
|
276
|
+
LIBURING_NOEXCEPT;
|
|
235
277
|
int __io_uring_clone_buffers(struct io_uring *dst, struct io_uring *src,
|
|
236
|
-
|
|
278
|
+
unsigned int flags) LIBURING_NOEXCEPT;
|
|
237
279
|
int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
|
|
238
|
-
unsigned nr_iovecs);
|
|
280
|
+
unsigned nr_iovecs) LIBURING_NOEXCEPT;
|
|
239
281
|
int io_uring_register_buffers_tags(struct io_uring *ring,
|
|
240
282
|
const struct iovec *iovecs,
|
|
241
|
-
const __u64 *tags, unsigned nr)
|
|
242
|
-
|
|
283
|
+
const __u64 *tags, unsigned nr)
|
|
284
|
+
LIBURING_NOEXCEPT;
|
|
285
|
+
int io_uring_register_buffers_sparse(struct io_uring *ring, unsigned nr)
|
|
286
|
+
LIBURING_NOEXCEPT;
|
|
243
287
|
int io_uring_register_buffers_update_tag(struct io_uring *ring,
|
|
244
288
|
unsigned off,
|
|
245
289
|
const struct iovec *iovecs,
|
|
246
|
-
const __u64 *tags, unsigned nr)
|
|
247
|
-
|
|
290
|
+
const __u64 *tags, unsigned nr)
|
|
291
|
+
LIBURING_NOEXCEPT;
|
|
292
|
+
int io_uring_unregister_buffers(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
248
293
|
|
|
249
294
|
int io_uring_register_files(struct io_uring *ring, const int *files,
|
|
250
|
-
unsigned nr_files);
|
|
295
|
+
unsigned nr_files) LIBURING_NOEXCEPT;
|
|
251
296
|
int io_uring_register_files_tags(struct io_uring *ring, const int *files,
|
|
252
|
-
const __u64 *tags, unsigned nr)
|
|
253
|
-
|
|
297
|
+
const __u64 *tags, unsigned nr)
|
|
298
|
+
LIBURING_NOEXCEPT;
|
|
299
|
+
int io_uring_register_files_sparse(struct io_uring *ring, unsigned nr)
|
|
300
|
+
LIBURING_NOEXCEPT;
|
|
254
301
|
int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
|
|
255
302
|
const int *files, const __u64 *tags,
|
|
256
|
-
unsigned nr_files);
|
|
303
|
+
unsigned nr_files) LIBURING_NOEXCEPT;
|
|
257
304
|
|
|
258
|
-
int io_uring_unregister_files(struct io_uring *ring);
|
|
305
|
+
int io_uring_unregister_files(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
259
306
|
int io_uring_register_files_update(struct io_uring *ring, unsigned off,
|
|
260
|
-
const int *files, unsigned nr_files)
|
|
261
|
-
|
|
262
|
-
int
|
|
263
|
-
int
|
|
307
|
+
const int *files, unsigned nr_files)
|
|
308
|
+
LIBURING_NOEXCEPT;
|
|
309
|
+
int io_uring_register_eventfd(struct io_uring *ring, int fd) LIBURING_NOEXCEPT;
|
|
310
|
+
int io_uring_register_eventfd_async(struct io_uring *ring, int fd)
|
|
311
|
+
LIBURING_NOEXCEPT;
|
|
312
|
+
int io_uring_unregister_eventfd(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
264
313
|
int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
|
|
265
|
-
unsigned nr);
|
|
266
|
-
int io_uring_register_personality(struct io_uring *ring);
|
|
267
|
-
int io_uring_unregister_personality(struct io_uring *ring, int id)
|
|
314
|
+
unsigned nr) LIBURING_NOEXCEPT;
|
|
315
|
+
int io_uring_register_personality(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
316
|
+
int io_uring_unregister_personality(struct io_uring *ring, int id)
|
|
317
|
+
LIBURING_NOEXCEPT;
|
|
268
318
|
int io_uring_register_restrictions(struct io_uring *ring,
|
|
269
319
|
struct io_uring_restriction *res,
|
|
270
|
-
unsigned int nr_res);
|
|
271
|
-
int io_uring_enable_rings(struct io_uring *ring);
|
|
272
|
-
int __io_uring_sqring_wait(struct io_uring *ring);
|
|
320
|
+
unsigned int nr_res) LIBURING_NOEXCEPT;
|
|
321
|
+
int io_uring_enable_rings(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
322
|
+
int __io_uring_sqring_wait(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
273
323
|
#ifdef _GNU_SOURCE
|
|
274
324
|
int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
|
|
275
|
-
const cpu_set_t *mask);
|
|
325
|
+
const cpu_set_t *mask) LIBURING_NOEXCEPT;
|
|
276
326
|
#endif
|
|
277
|
-
int io_uring_unregister_iowq_aff(struct io_uring *ring);
|
|
327
|
+
int io_uring_unregister_iowq_aff(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
278
328
|
int io_uring_register_iowq_max_workers(struct io_uring *ring,
|
|
279
|
-
unsigned int *values);
|
|
280
|
-
int io_uring_register_ring_fd(struct io_uring *ring);
|
|
281
|
-
int io_uring_unregister_ring_fd(struct io_uring *ring);
|
|
282
|
-
int io_uring_close_ring_fd(struct io_uring *ring);
|
|
329
|
+
unsigned int *values) LIBURING_NOEXCEPT;
|
|
330
|
+
int io_uring_register_ring_fd(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
331
|
+
int io_uring_unregister_ring_fd(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
332
|
+
int io_uring_close_ring_fd(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
283
333
|
int io_uring_register_buf_ring(struct io_uring *ring,
|
|
284
|
-
|
|
285
|
-
int io_uring_unregister_buf_ring(struct io_uring *ring, int bgid)
|
|
286
|
-
|
|
334
|
+
struct io_uring_buf_reg *reg, unsigned int flags) LIBURING_NOEXCEPT;
|
|
335
|
+
int io_uring_unregister_buf_ring(struct io_uring *ring, int bgid)
|
|
336
|
+
LIBURING_NOEXCEPT;
|
|
337
|
+
int io_uring_buf_ring_head(struct io_uring *ring,
|
|
338
|
+
int buf_group, uint16_t *head) LIBURING_NOEXCEPT;
|
|
287
339
|
int io_uring_register_sync_cancel(struct io_uring *ring,
|
|
288
|
-
|
|
289
|
-
|
|
340
|
+
struct io_uring_sync_cancel_reg *reg)
|
|
341
|
+
LIBURING_NOEXCEPT;
|
|
342
|
+
int io_uring_register_sync_msg(struct io_uring_sqe *sqe) LIBURING_NOEXCEPT;
|
|
290
343
|
|
|
291
344
|
int io_uring_register_file_alloc_range(struct io_uring *ring,
|
|
292
|
-
|
|
345
|
+
unsigned off, unsigned len)
|
|
346
|
+
LIBURING_NOEXCEPT;
|
|
293
347
|
|
|
294
|
-
int io_uring_register_napi(struct io_uring *ring, struct io_uring_napi *napi)
|
|
295
|
-
|
|
348
|
+
int io_uring_register_napi(struct io_uring *ring, struct io_uring_napi *napi)
|
|
349
|
+
LIBURING_NOEXCEPT;
|
|
350
|
+
int io_uring_unregister_napi(struct io_uring *ring, struct io_uring_napi *napi)
|
|
351
|
+
LIBURING_NOEXCEPT;
|
|
296
352
|
int io_uring_register_ifq(struct io_uring *ring,
|
|
297
|
-
struct io_uring_zcrx_ifq_reg *reg);
|
|
353
|
+
struct io_uring_zcrx_ifq_reg *reg) LIBURING_NOEXCEPT;
|
|
298
354
|
|
|
299
355
|
int io_uring_register_clock(struct io_uring *ring,
|
|
300
|
-
struct io_uring_clock_register *arg)
|
|
356
|
+
struct io_uring_clock_register *arg)
|
|
357
|
+
LIBURING_NOEXCEPT;
|
|
301
358
|
|
|
302
|
-
int io_uring_get_events(struct io_uring *ring);
|
|
303
|
-
int io_uring_submit_and_get_events(struct io_uring *ring);
|
|
359
|
+
int io_uring_get_events(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
360
|
+
int io_uring_submit_and_get_events(struct io_uring *ring) LIBURING_NOEXCEPT;
|
|
304
361
|
|
|
305
362
|
/*
|
|
306
363
|
* io_uring syscalls.
|
|
307
364
|
*/
|
|
308
365
|
int io_uring_enter(unsigned int fd, unsigned int to_submit,
|
|
309
|
-
unsigned int min_complete, unsigned int flags, sigset_t *sig)
|
|
366
|
+
unsigned int min_complete, unsigned int flags, sigset_t *sig)
|
|
367
|
+
LIBURING_NOEXCEPT;
|
|
310
368
|
int io_uring_enter2(unsigned int fd, unsigned int to_submit,
|
|
311
369
|
unsigned int min_complete, unsigned int flags,
|
|
312
|
-
void *arg, size_t sz);
|
|
313
|
-
int io_uring_setup(unsigned int entries, struct io_uring_params *p)
|
|
370
|
+
void *arg, size_t sz) LIBURING_NOEXCEPT;
|
|
371
|
+
int io_uring_setup(unsigned int entries, struct io_uring_params *p)
|
|
372
|
+
LIBURING_NOEXCEPT;
|
|
314
373
|
int io_uring_register(unsigned int fd, unsigned int opcode, const void *arg,
|
|
315
|
-
unsigned int nr_args);
|
|
374
|
+
unsigned int nr_args) LIBURING_NOEXCEPT;
|
|
316
375
|
|
|
317
376
|
/*
|
|
318
377
|
* Mapped/registered regions
|
|
319
378
|
*/
|
|
320
379
|
int io_uring_register_region(struct io_uring *ring,
|
|
321
|
-
struct io_uring_mem_region_reg *reg)
|
|
380
|
+
struct io_uring_mem_region_reg *reg)
|
|
381
|
+
LIBURING_NOEXCEPT;
|
|
322
382
|
|
|
323
383
|
/*
|
|
324
384
|
* Mapped buffer ring alloc/register + unregister/free helpers
|
|
@@ -326,9 +386,9 @@ int io_uring_register_region(struct io_uring *ring,
|
|
|
326
386
|
struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
|
|
327
387
|
unsigned int nentries,
|
|
328
388
|
int bgid, unsigned int flags,
|
|
329
|
-
int *err);
|
|
389
|
+
int *err) LIBURING_NOEXCEPT;
|
|
330
390
|
int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
|
|
331
|
-
unsigned int nentries, int bgid);
|
|
391
|
+
unsigned int nentries, int bgid) LIBURING_NOEXCEPT;
|
|
332
392
|
|
|
333
393
|
/*
|
|
334
394
|
* Helper for the peek/wait single cqe functions. Exported because of that,
|
|
@@ -336,12 +396,13 @@ int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
|
|
|
336
396
|
*/
|
|
337
397
|
int __io_uring_get_cqe(struct io_uring *ring,
|
|
338
398
|
struct io_uring_cqe **cqe_ptr, unsigned submit,
|
|
339
|
-
unsigned wait_nr, sigset_t *sigmask);
|
|
399
|
+
unsigned wait_nr, sigset_t *sigmask) LIBURING_NOEXCEPT;
|
|
340
400
|
|
|
341
401
|
/*
|
|
342
402
|
* Enable/disable setting of iowait by the kernel.
|
|
343
403
|
*/
|
|
344
|
-
int io_uring_set_iowait(struct io_uring *ring, bool enable_iowait)
|
|
404
|
+
int io_uring_set_iowait(struct io_uring *ring, bool enable_iowait)
|
|
405
|
+
LIBURING_NOEXCEPT;
|
|
345
406
|
|
|
346
407
|
#define LIBURING_UDATA_TIMEOUT ((__u64) -1)
|
|
347
408
|
|
|
@@ -351,15 +412,22 @@ int io_uring_set_iowait(struct io_uring *ring, bool enable_iowait);
|
|
|
351
412
|
* CQE `index` can be computed as &cq.cqes[(index & cq.ring_mask) << cqe_shift].
|
|
352
413
|
*/
|
|
353
414
|
IOURINGINLINE unsigned io_uring_cqe_shift_from_flags(unsigned flags)
|
|
415
|
+
LIBURING_NOEXCEPT
|
|
354
416
|
{
|
|
355
417
|
return !!(flags & IORING_SETUP_CQE32);
|
|
356
418
|
}
|
|
357
419
|
|
|
358
420
|
IOURINGINLINE unsigned io_uring_cqe_shift(const struct io_uring *ring)
|
|
421
|
+
LIBURING_NOEXCEPT
|
|
359
422
|
{
|
|
360
423
|
return io_uring_cqe_shift_from_flags(ring->flags);
|
|
361
424
|
}
|
|
362
425
|
|
|
426
|
+
IOURINGINLINE unsigned io_uring_cqe_nr(const struct io_uring_cqe *cqe)
|
|
427
|
+
{
|
|
428
|
+
return 1U << !!(cqe->flags & IORING_CQE_F_32);
|
|
429
|
+
}
|
|
430
|
+
|
|
363
431
|
struct io_uring_cqe_iter {
|
|
364
432
|
struct io_uring_cqe *cqes;
|
|
365
433
|
unsigned mask;
|
|
@@ -368,8 +436,9 @@ struct io_uring_cqe_iter {
|
|
|
368
436
|
unsigned tail;
|
|
369
437
|
};
|
|
370
438
|
|
|
371
|
-
|
|
439
|
+
_LOCAL_INLINE struct io_uring_cqe_iter
|
|
372
440
|
io_uring_cqe_iter_init(const struct io_uring *ring)
|
|
441
|
+
LIBURING_NOEXCEPT
|
|
373
442
|
{
|
|
374
443
|
return (struct io_uring_cqe_iter) {
|
|
375
444
|
.cqes = ring->cq.cqes,
|
|
@@ -381,13 +450,16 @@ io_uring_cqe_iter_init(const struct io_uring *ring)
|
|
|
381
450
|
};
|
|
382
451
|
}
|
|
383
452
|
|
|
384
|
-
|
|
453
|
+
_LOCAL_INLINE bool io_uring_cqe_iter_next(struct io_uring_cqe_iter *iter,
|
|
385
454
|
struct io_uring_cqe **cqe)
|
|
455
|
+
LIBURING_NOEXCEPT
|
|
386
456
|
{
|
|
387
457
|
if (iter->head == iter->tail)
|
|
388
458
|
return false;
|
|
389
459
|
|
|
390
460
|
*cqe = &iter->cqes[(iter->head++ & iter->mask) << iter->shift];
|
|
461
|
+
if ((*cqe)->flags & IORING_CQE_F_32)
|
|
462
|
+
iter->head++;
|
|
391
463
|
return true;
|
|
392
464
|
}
|
|
393
465
|
|
|
@@ -406,6 +478,7 @@ static inline bool io_uring_cqe_iter_next(struct io_uring_cqe_iter *iter,
|
|
|
406
478
|
* Must be called after io_uring_for_each_cqe()
|
|
407
479
|
*/
|
|
408
480
|
IOURINGINLINE void io_uring_cq_advance(struct io_uring *ring, unsigned nr)
|
|
481
|
+
LIBURING_NOEXCEPT
|
|
409
482
|
{
|
|
410
483
|
if (nr) {
|
|
411
484
|
struct io_uring_cq *cq = &ring->cq;
|
|
@@ -424,9 +497,10 @@ IOURINGINLINE void io_uring_cq_advance(struct io_uring *ring, unsigned nr)
|
|
|
424
497
|
*/
|
|
425
498
|
IOURINGINLINE void io_uring_cqe_seen(struct io_uring *ring,
|
|
426
499
|
struct io_uring_cqe *cqe)
|
|
500
|
+
LIBURING_NOEXCEPT
|
|
427
501
|
{
|
|
428
502
|
if (cqe)
|
|
429
|
-
io_uring_cq_advance(ring,
|
|
503
|
+
io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
|
|
430
504
|
}
|
|
431
505
|
|
|
432
506
|
/*
|
|
@@ -438,11 +512,13 @@ IOURINGINLINE void io_uring_cqe_seen(struct io_uring *ring,
|
|
|
438
512
|
* at command completion time with io_uring_cqe_get_data().
|
|
439
513
|
*/
|
|
440
514
|
IOURINGINLINE void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
|
|
515
|
+
LIBURING_NOEXCEPT
|
|
441
516
|
{
|
|
442
517
|
sqe->user_data = (unsigned long) data;
|
|
443
518
|
}
|
|
444
519
|
|
|
445
520
|
IOURINGINLINE void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
|
|
521
|
+
LIBURING_NOEXCEPT
|
|
446
522
|
{
|
|
447
523
|
return (void *) (uintptr_t) cqe->user_data;
|
|
448
524
|
}
|
|
@@ -454,6 +530,7 @@ IOURINGINLINE void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
|
|
|
454
530
|
*/
|
|
455
531
|
IOURINGINLINE void io_uring_sqe_set_data64(struct io_uring_sqe *sqe,
|
|
456
532
|
__u64 data)
|
|
533
|
+
LIBURING_NOEXCEPT
|
|
457
534
|
{
|
|
458
535
|
sqe->user_data = data;
|
|
459
536
|
}
|
|
@@ -470,24 +547,28 @@ IOURINGINLINE __u64 io_uring_cqe_get_data64(const struct io_uring_cqe *cqe)
|
|
|
470
547
|
|
|
471
548
|
IOURINGINLINE void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
|
|
472
549
|
unsigned flags)
|
|
550
|
+
LIBURING_NOEXCEPT
|
|
473
551
|
{
|
|
474
552
|
sqe->flags = (__u8) flags;
|
|
475
553
|
}
|
|
476
554
|
|
|
477
555
|
IOURINGINLINE void io_uring_sqe_set_buf_group(struct io_uring_sqe *sqe,
|
|
478
556
|
int bgid)
|
|
557
|
+
LIBURING_NOEXCEPT
|
|
479
558
|
{
|
|
480
559
|
sqe->buf_group = (__u16) bgid;
|
|
481
560
|
}
|
|
482
561
|
|
|
483
|
-
|
|
562
|
+
_LOCAL_INLINE void __io_uring_set_target_fixed_file(struct io_uring_sqe *sqe,
|
|
484
563
|
unsigned int file_index)
|
|
564
|
+
LIBURING_NOEXCEPT
|
|
485
565
|
{
|
|
486
566
|
/* 0 means no fixed files, indexes should be encoded as "index + 1" */
|
|
487
567
|
sqe->file_index = file_index + 1;
|
|
488
568
|
}
|
|
489
569
|
|
|
490
570
|
IOURINGINLINE void io_uring_initialize_sqe(struct io_uring_sqe *sqe)
|
|
571
|
+
LIBURING_NOEXCEPT
|
|
491
572
|
{
|
|
492
573
|
sqe->flags = 0;
|
|
493
574
|
sqe->ioprio = 0;
|
|
@@ -502,6 +583,7 @@ IOURINGINLINE void io_uring_initialize_sqe(struct io_uring_sqe *sqe)
|
|
|
502
583
|
IOURINGINLINE void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
|
|
503
584
|
const void *addr, unsigned len,
|
|
504
585
|
__u64 offset)
|
|
586
|
+
LIBURING_NOEXCEPT
|
|
505
587
|
{
|
|
506
588
|
sqe->opcode = (__u8) op;
|
|
507
589
|
sqe->fd = fd;
|
|
@@ -536,6 +618,7 @@ IOURINGINLINE void io_uring_prep_splice(struct io_uring_sqe *sqe,
|
|
|
536
618
|
int fd_out, int64_t off_out,
|
|
537
619
|
unsigned int nbytes,
|
|
538
620
|
unsigned int splice_flags)
|
|
621
|
+
LIBURING_NOEXCEPT
|
|
539
622
|
{
|
|
540
623
|
io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes,
|
|
541
624
|
(__u64) off_out);
|
|
@@ -548,6 +631,7 @@ IOURINGINLINE void io_uring_prep_tee(struct io_uring_sqe *sqe,
|
|
|
548
631
|
int fd_in, int fd_out,
|
|
549
632
|
unsigned int nbytes,
|
|
550
633
|
unsigned int splice_flags)
|
|
634
|
+
LIBURING_NOEXCEPT
|
|
551
635
|
{
|
|
552
636
|
io_uring_prep_rw(IORING_OP_TEE, sqe, fd_out, NULL, nbytes, 0);
|
|
553
637
|
sqe->splice_off_in = 0;
|
|
@@ -558,6 +642,7 @@ IOURINGINLINE void io_uring_prep_tee(struct io_uring_sqe *sqe,
|
|
|
558
642
|
IOURINGINLINE void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
|
|
559
643
|
const struct iovec *iovecs,
|
|
560
644
|
unsigned nr_vecs, __u64 offset)
|
|
645
|
+
LIBURING_NOEXCEPT
|
|
561
646
|
{
|
|
562
647
|
io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
|
|
563
648
|
}
|
|
@@ -566,6 +651,7 @@ IOURINGINLINE void io_uring_prep_readv2(struct io_uring_sqe *sqe, int fd,
|
|
|
566
651
|
const struct iovec *iovecs,
|
|
567
652
|
unsigned nr_vecs, __u64 offset,
|
|
568
653
|
int flags)
|
|
654
|
+
LIBURING_NOEXCEPT
|
|
569
655
|
{
|
|
570
656
|
io_uring_prep_readv(sqe, fd, iovecs, nr_vecs, offset);
|
|
571
657
|
sqe->rw_flags = flags;
|
|
@@ -574,6 +660,7 @@ IOURINGINLINE void io_uring_prep_readv2(struct io_uring_sqe *sqe, int fd,
|
|
|
574
660
|
IOURINGINLINE void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
|
|
575
661
|
void *buf, unsigned nbytes,
|
|
576
662
|
__u64 offset, int buf_index)
|
|
663
|
+
LIBURING_NOEXCEPT
|
|
577
664
|
{
|
|
578
665
|
io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
|
|
579
666
|
sqe->buf_index = (__u16) buf_index;
|
|
@@ -583,6 +670,7 @@ IOURINGINLINE void io_uring_prep_readv_fixed(struct io_uring_sqe *sqe, int fd,
|
|
|
583
670
|
const struct iovec *iovecs,
|
|
584
671
|
unsigned nr_vecs, __u64 offset,
|
|
585
672
|
int flags, int buf_index)
|
|
673
|
+
LIBURING_NOEXCEPT
|
|
586
674
|
{
|
|
587
675
|
io_uring_prep_readv2(sqe, fd, iovecs, nr_vecs, offset, flags);
|
|
588
676
|
sqe->opcode = IORING_OP_READV_FIXED;
|
|
@@ -592,6 +680,7 @@ IOURINGINLINE void io_uring_prep_readv_fixed(struct io_uring_sqe *sqe, int fd,
|
|
|
592
680
|
IOURINGINLINE void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
|
|
593
681
|
const struct iovec *iovecs,
|
|
594
682
|
unsigned nr_vecs, __u64 offset)
|
|
683
|
+
LIBURING_NOEXCEPT
|
|
595
684
|
{
|
|
596
685
|
io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
|
|
597
686
|
}
|
|
@@ -600,6 +689,7 @@ IOURINGINLINE void io_uring_prep_writev2(struct io_uring_sqe *sqe, int fd,
|
|
|
600
689
|
const struct iovec *iovecs,
|
|
601
690
|
unsigned nr_vecs, __u64 offset,
|
|
602
691
|
int flags)
|
|
692
|
+
LIBURING_NOEXCEPT
|
|
603
693
|
{
|
|
604
694
|
io_uring_prep_writev(sqe, fd, iovecs, nr_vecs, offset);
|
|
605
695
|
sqe->rw_flags = flags;
|
|
@@ -608,6 +698,7 @@ IOURINGINLINE void io_uring_prep_writev2(struct io_uring_sqe *sqe, int fd,
|
|
|
608
698
|
IOURINGINLINE void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
|
|
609
699
|
const void *buf, unsigned nbytes,
|
|
610
700
|
__u64 offset, int buf_index)
|
|
701
|
+
LIBURING_NOEXCEPT
|
|
611
702
|
{
|
|
612
703
|
io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
|
|
613
704
|
sqe->buf_index = (__u16) buf_index;
|
|
@@ -617,6 +708,7 @@ IOURINGINLINE void io_uring_prep_writev_fixed(struct io_uring_sqe *sqe, int fd,
|
|
|
617
708
|
const struct iovec *iovecs,
|
|
618
709
|
unsigned nr_vecs, __u64 offset,
|
|
619
710
|
int flags, int buf_index)
|
|
711
|
+
LIBURING_NOEXCEPT
|
|
620
712
|
{
|
|
621
713
|
io_uring_prep_writev2(sqe, fd, iovecs, nr_vecs, offset, flags);
|
|
622
714
|
sqe->opcode = IORING_OP_WRITEV_FIXED;
|
|
@@ -625,6 +717,7 @@ IOURINGINLINE void io_uring_prep_writev_fixed(struct io_uring_sqe *sqe, int fd,
|
|
|
625
717
|
|
|
626
718
|
IOURINGINLINE void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
|
|
627
719
|
struct msghdr *msg, unsigned flags)
|
|
720
|
+
LIBURING_NOEXCEPT
|
|
628
721
|
{
|
|
629
722
|
io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
|
|
630
723
|
sqe->msg_flags = flags;
|
|
@@ -633,6 +726,7 @@ IOURINGINLINE void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
|
|
|
633
726
|
IOURINGINLINE void io_uring_prep_recvmsg_multishot(struct io_uring_sqe *sqe,
|
|
634
727
|
int fd, struct msghdr *msg,
|
|
635
728
|
unsigned flags)
|
|
729
|
+
LIBURING_NOEXCEPT
|
|
636
730
|
{
|
|
637
731
|
io_uring_prep_recvmsg(sqe, fd, msg, flags);
|
|
638
732
|
sqe->ioprio |= IORING_RECV_MULTISHOT;
|
|
@@ -641,12 +735,14 @@ IOURINGINLINE void io_uring_prep_recvmsg_multishot(struct io_uring_sqe *sqe,
|
|
|
641
735
|
IOURINGINLINE void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
|
|
642
736
|
const struct msghdr *msg,
|
|
643
737
|
unsigned flags)
|
|
738
|
+
LIBURING_NOEXCEPT
|
|
644
739
|
{
|
|
645
740
|
io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
|
|
646
741
|
sqe->msg_flags = flags;
|
|
647
742
|
}
|
|
648
743
|
|
|
649
|
-
|
|
744
|
+
_LOCAL_INLINE unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
|
|
745
|
+
LIBURING_NOEXCEPT
|
|
650
746
|
{
|
|
651
747
|
#if __BYTE_ORDER == __BIG_ENDIAN
|
|
652
748
|
poll_mask = __swahw32(poll_mask);
|
|
@@ -656,6 +752,7 @@ IOURINGINLINE unsigned __io_uring_prep_poll_mask(unsigned poll_mask)
|
|
|
656
752
|
|
|
657
753
|
IOURINGINLINE void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
|
|
658
754
|
unsigned poll_mask)
|
|
755
|
+
LIBURING_NOEXCEPT
|
|
659
756
|
{
|
|
660
757
|
io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
|
|
661
758
|
sqe->poll32_events = __io_uring_prep_poll_mask(poll_mask);
|
|
@@ -663,6 +760,7 @@ IOURINGINLINE void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
|
|
|
663
760
|
|
|
664
761
|
IOURINGINLINE void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
|
|
665
762
|
int fd, unsigned poll_mask)
|
|
763
|
+
LIBURING_NOEXCEPT
|
|
666
764
|
{
|
|
667
765
|
io_uring_prep_poll_add(sqe, fd, poll_mask);
|
|
668
766
|
sqe->len = IORING_POLL_ADD_MULTI;
|
|
@@ -670,6 +768,7 @@ IOURINGINLINE void io_uring_prep_poll_multishot(struct io_uring_sqe *sqe,
|
|
|
670
768
|
|
|
671
769
|
IOURINGINLINE void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
|
|
672
770
|
__u64 user_data)
|
|
771
|
+
LIBURING_NOEXCEPT
|
|
673
772
|
{
|
|
674
773
|
io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, 0, 0);
|
|
675
774
|
sqe->addr = user_data;
|
|
@@ -679,6 +778,7 @@ IOURINGINLINE void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
|
|
|
679
778
|
__u64 old_user_data,
|
|
680
779
|
__u64 new_user_data,
|
|
681
780
|
unsigned poll_mask, unsigned flags)
|
|
781
|
+
LIBURING_NOEXCEPT
|
|
682
782
|
{
|
|
683
783
|
io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, NULL, flags,
|
|
684
784
|
new_user_data);
|
|
@@ -688,19 +788,28 @@ IOURINGINLINE void io_uring_prep_poll_update(struct io_uring_sqe *sqe,
|
|
|
688
788
|
|
|
689
789
|
IOURINGINLINE void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
|
|
690
790
|
unsigned fsync_flags)
|
|
791
|
+
LIBURING_NOEXCEPT
|
|
691
792
|
{
|
|
692
793
|
io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
|
|
693
794
|
sqe->fsync_flags = fsync_flags;
|
|
694
795
|
}
|
|
695
796
|
|
|
696
797
|
IOURINGINLINE void io_uring_prep_nop(struct io_uring_sqe *sqe)
|
|
798
|
+
LIBURING_NOEXCEPT
|
|
697
799
|
{
|
|
698
800
|
io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
|
|
699
801
|
}
|
|
700
802
|
|
|
803
|
+
IOURINGINLINE void io_uring_prep_nop128(struct io_uring_sqe *sqe)
|
|
804
|
+
LIBURING_NOEXCEPT
|
|
805
|
+
{
|
|
806
|
+
io_uring_prep_rw(IORING_OP_NOP128, sqe, -1, NULL, 0, 0);
|
|
807
|
+
}
|
|
808
|
+
|
|
701
809
|
IOURINGINLINE void io_uring_prep_timeout(struct io_uring_sqe *sqe,
|
|
702
|
-
struct __kernel_timespec *ts,
|
|
810
|
+
const struct __kernel_timespec *ts,
|
|
703
811
|
unsigned count, unsigned flags)
|
|
812
|
+
LIBURING_NOEXCEPT
|
|
704
813
|
{
|
|
705
814
|
io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
|
|
706
815
|
sqe->timeout_flags = flags;
|
|
@@ -708,6 +817,7 @@ IOURINGINLINE void io_uring_prep_timeout(struct io_uring_sqe *sqe,
|
|
|
708
817
|
|
|
709
818
|
IOURINGINLINE void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
|
|
710
819
|
__u64 user_data, unsigned flags)
|
|
820
|
+
LIBURING_NOEXCEPT
|
|
711
821
|
{
|
|
712
822
|
io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0, 0);
|
|
713
823
|
sqe->addr = user_data;
|
|
@@ -715,8 +825,9 @@ IOURINGINLINE void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
|
|
|
715
825
|
}
|
|
716
826
|
|
|
717
827
|
IOURINGINLINE void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
|
|
718
|
-
struct __kernel_timespec *ts,
|
|
828
|
+
const struct __kernel_timespec *ts,
|
|
719
829
|
__u64 user_data, unsigned flags)
|
|
830
|
+
LIBURING_NOEXCEPT
|
|
720
831
|
{
|
|
721
832
|
io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1, NULL, 0,
|
|
722
833
|
(uintptr_t) ts);
|
|
@@ -727,6 +838,7 @@ IOURINGINLINE void io_uring_prep_timeout_update(struct io_uring_sqe *sqe,
|
|
|
727
838
|
IOURINGINLINE void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
|
|
728
839
|
struct sockaddr *addr,
|
|
729
840
|
socklen_t *addrlen, int flags)
|
|
841
|
+
LIBURING_NOEXCEPT
|
|
730
842
|
{
|
|
731
843
|
io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
|
|
732
844
|
uring_ptr_to_u64(addrlen));
|
|
@@ -738,6 +850,7 @@ IOURINGINLINE void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
|
|
|
738
850
|
struct sockaddr *addr,
|
|
739
851
|
socklen_t *addrlen, int flags,
|
|
740
852
|
unsigned int file_index)
|
|
853
|
+
LIBURING_NOEXCEPT
|
|
741
854
|
{
|
|
742
855
|
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
|
|
743
856
|
/* offset by 1 for allocation */
|
|
@@ -749,6 +862,7 @@ IOURINGINLINE void io_uring_prep_accept_direct(struct io_uring_sqe *sqe, int fd,
|
|
|
749
862
|
IOURINGINLINE void io_uring_prep_multishot_accept(struct io_uring_sqe *sqe,
|
|
750
863
|
int fd, struct sockaddr *addr,
|
|
751
864
|
socklen_t *addrlen, int flags)
|
|
865
|
+
LIBURING_NOEXCEPT
|
|
752
866
|
{
|
|
753
867
|
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
|
|
754
868
|
sqe->ioprio |= IORING_ACCEPT_MULTISHOT;
|
|
@@ -760,6 +874,7 @@ IOURINGINLINE void io_uring_prep_multishot_accept_direct(struct io_uring_sqe *sq
|
|
|
760
874
|
struct sockaddr *addr,
|
|
761
875
|
socklen_t *addrlen,
|
|
762
876
|
int flags)
|
|
877
|
+
LIBURING_NOEXCEPT
|
|
763
878
|
{
|
|
764
879
|
io_uring_prep_multishot_accept(sqe, fd, addr, addrlen, flags);
|
|
765
880
|
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
|
|
@@ -767,6 +882,7 @@ IOURINGINLINE void io_uring_prep_multishot_accept_direct(struct io_uring_sqe *sq
|
|
|
767
882
|
|
|
768
883
|
IOURINGINLINE void io_uring_prep_cancel64(struct io_uring_sqe *sqe,
|
|
769
884
|
__u64 user_data, int flags)
|
|
885
|
+
LIBURING_NOEXCEPT
|
|
770
886
|
{
|
|
771
887
|
io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, NULL, 0, 0);
|
|
772
888
|
sqe->addr = user_data;
|
|
@@ -774,21 +890,24 @@ IOURINGINLINE void io_uring_prep_cancel64(struct io_uring_sqe *sqe,
|
|
|
774
890
|
}
|
|
775
891
|
|
|
776
892
|
IOURINGINLINE void io_uring_prep_cancel(struct io_uring_sqe *sqe,
|
|
777
|
-
void *user_data, int flags)
|
|
893
|
+
const void *user_data, int flags)
|
|
894
|
+
LIBURING_NOEXCEPT
|
|
778
895
|
{
|
|
779
896
|
io_uring_prep_cancel64(sqe, (__u64) (uintptr_t) user_data, flags);
|
|
780
897
|
}
|
|
781
898
|
|
|
782
899
|
IOURINGINLINE void io_uring_prep_cancel_fd(struct io_uring_sqe *sqe, int fd,
|
|
783
900
|
unsigned int flags)
|
|
901
|
+
LIBURING_NOEXCEPT
|
|
784
902
|
{
|
|
785
903
|
io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, fd, NULL, 0, 0);
|
|
786
904
|
sqe->cancel_flags = (__u32) flags | IORING_ASYNC_CANCEL_FD;
|
|
787
905
|
}
|
|
788
906
|
|
|
789
907
|
IOURINGINLINE void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
|
|
790
|
-
struct __kernel_timespec *ts,
|
|
908
|
+
const struct __kernel_timespec *ts,
|
|
791
909
|
unsigned flags)
|
|
910
|
+
LIBURING_NOEXCEPT
|
|
792
911
|
{
|
|
793
912
|
io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
|
|
794
913
|
sqe->timeout_flags = flags;
|
|
@@ -797,19 +916,22 @@ IOURINGINLINE void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
|
|
|
797
916
|
IOURINGINLINE void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
|
|
798
917
|
const struct sockaddr *addr,
|
|
799
918
|
socklen_t addrlen)
|
|
919
|
+
LIBURING_NOEXCEPT
|
|
800
920
|
{
|
|
801
921
|
io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
|
|
802
922
|
}
|
|
803
923
|
|
|
804
924
|
IOURINGINLINE void io_uring_prep_bind(struct io_uring_sqe *sqe, int fd,
|
|
805
|
-
struct sockaddr *addr,
|
|
925
|
+
const struct sockaddr *addr,
|
|
806
926
|
socklen_t addrlen)
|
|
927
|
+
LIBURING_NOEXCEPT
|
|
807
928
|
{
|
|
808
929
|
io_uring_prep_rw(IORING_OP_BIND, sqe, fd, addr, 0, addrlen);
|
|
809
930
|
}
|
|
810
931
|
|
|
811
932
|
IOURINGINLINE void io_uring_prep_listen(struct io_uring_sqe *sqe, int fd,
|
|
812
|
-
|
|
933
|
+
int backlog)
|
|
934
|
+
LIBURING_NOEXCEPT
|
|
813
935
|
{
|
|
814
936
|
io_uring_prep_rw(IORING_OP_LISTEN, sqe, fd, 0, backlog, 0);
|
|
815
937
|
}
|
|
@@ -818,14 +940,16 @@ struct epoll_event;
|
|
|
818
940
|
IOURINGINLINE void io_uring_prep_epoll_wait(struct io_uring_sqe *sqe, int fd,
|
|
819
941
|
struct epoll_event *events,
|
|
820
942
|
int maxevents, unsigned flags)
|
|
943
|
+
LIBURING_NOEXCEPT
|
|
821
944
|
{
|
|
822
945
|
io_uring_prep_rw(IORING_OP_EPOLL_WAIT, sqe, fd, events, maxevents, 0);
|
|
823
946
|
sqe->rw_flags = flags;
|
|
824
947
|
}
|
|
825
948
|
|
|
826
949
|
IOURINGINLINE void io_uring_prep_files_update(struct io_uring_sqe *sqe,
|
|
827
|
-
int *fds, unsigned nr_fds,
|
|
950
|
+
const int *fds, unsigned nr_fds,
|
|
828
951
|
int offset)
|
|
952
|
+
LIBURING_NOEXCEPT
|
|
829
953
|
{
|
|
830
954
|
io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds,
|
|
831
955
|
(__u64) offset);
|
|
@@ -833,6 +957,7 @@ IOURINGINLINE void io_uring_prep_files_update(struct io_uring_sqe *sqe,
|
|
|
833
957
|
|
|
834
958
|
IOURINGINLINE void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
|
|
835
959
|
int mode, __u64 offset, __u64 len)
|
|
960
|
+
LIBURING_NOEXCEPT
|
|
836
961
|
{
|
|
837
962
|
io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
|
|
838
963
|
0, (unsigned int) mode, (__u64) offset);
|
|
@@ -842,6 +967,7 @@ IOURINGINLINE void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
|
|
|
842
967
|
IOURINGINLINE void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
|
|
843
968
|
const char *path, int flags,
|
|
844
969
|
mode_t mode)
|
|
970
|
+
LIBURING_NOEXCEPT
|
|
845
971
|
{
|
|
846
972
|
io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
|
|
847
973
|
sqe->open_flags = (__u32) flags;
|
|
@@ -852,6 +978,7 @@ IOURINGINLINE void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
|
|
|
852
978
|
int dfd, const char *path,
|
|
853
979
|
int flags, mode_t mode,
|
|
854
980
|
unsigned file_index)
|
|
981
|
+
LIBURING_NOEXCEPT
|
|
855
982
|
{
|
|
856
983
|
io_uring_prep_openat(sqe, dfd, path, flags, mode);
|
|
857
984
|
/* offset by 1 for allocation */
|
|
@@ -862,6 +989,7 @@ IOURINGINLINE void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
|
|
|
862
989
|
|
|
863
990
|
IOURINGINLINE void io_uring_prep_open(struct io_uring_sqe *sqe,
|
|
864
991
|
const char *path, int flags, mode_t mode)
|
|
992
|
+
LIBURING_NOEXCEPT
|
|
865
993
|
{
|
|
866
994
|
io_uring_prep_openat(sqe, AT_FDCWD, path, flags, mode);
|
|
867
995
|
}
|
|
@@ -870,17 +998,20 @@ IOURINGINLINE void io_uring_prep_open(struct io_uring_sqe *sqe,
|
|
|
870
998
|
IOURINGINLINE void io_uring_prep_open_direct(struct io_uring_sqe *sqe,
|
|
871
999
|
const char *path, int flags, mode_t mode,
|
|
872
1000
|
unsigned file_index)
|
|
1001
|
+
LIBURING_NOEXCEPT
|
|
873
1002
|
{
|
|
874
1003
|
io_uring_prep_openat_direct(sqe, AT_FDCWD, path, flags, mode, file_index);
|
|
875
1004
|
}
|
|
876
1005
|
|
|
877
1006
|
IOURINGINLINE void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
|
|
1007
|
+
LIBURING_NOEXCEPT
|
|
878
1008
|
{
|
|
879
1009
|
io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
|
|
880
1010
|
}
|
|
881
1011
|
|
|
882
1012
|
IOURINGINLINE void io_uring_prep_close_direct(struct io_uring_sqe *sqe,
|
|
883
1013
|
unsigned file_index)
|
|
1014
|
+
LIBURING_NOEXCEPT
|
|
884
1015
|
{
|
|
885
1016
|
io_uring_prep_close(sqe, 0);
|
|
886
1017
|
__io_uring_set_target_fixed_file(sqe, file_index);
|
|
@@ -888,6 +1019,7 @@ IOURINGINLINE void io_uring_prep_close_direct(struct io_uring_sqe *sqe,
|
|
|
888
1019
|
|
|
889
1020
|
IOURINGINLINE void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
|
|
890
1021
|
void *buf, unsigned nbytes, __u64 offset)
|
|
1022
|
+
LIBURING_NOEXCEPT
|
|
891
1023
|
{
|
|
892
1024
|
io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
|
|
893
1025
|
}
|
|
@@ -895,6 +1027,7 @@ IOURINGINLINE void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
|
|
|
895
1027
|
IOURINGINLINE void io_uring_prep_read_multishot(struct io_uring_sqe *sqe,
|
|
896
1028
|
int fd, unsigned nbytes,
|
|
897
1029
|
__u64 offset, int buf_group)
|
|
1030
|
+
LIBURING_NOEXCEPT
|
|
898
1031
|
{
|
|
899
1032
|
io_uring_prep_rw(IORING_OP_READ_MULTISHOT, sqe, fd, NULL, nbytes,
|
|
900
1033
|
offset);
|
|
@@ -905,6 +1038,7 @@ IOURINGINLINE void io_uring_prep_read_multishot(struct io_uring_sqe *sqe,
|
|
|
905
1038
|
IOURINGINLINE void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
|
|
906
1039
|
const void *buf, unsigned nbytes,
|
|
907
1040
|
__u64 offset)
|
|
1041
|
+
LIBURING_NOEXCEPT
|
|
908
1042
|
{
|
|
909
1043
|
io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
|
|
910
1044
|
}
|
|
@@ -913,6 +1047,7 @@ struct statx;
|
|
|
913
1047
|
IOURINGINLINE void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
|
|
914
1048
|
const char *path, int flags,
|
|
915
1049
|
unsigned mask, struct statx *statxbuf)
|
|
1050
|
+
LIBURING_NOEXCEPT
|
|
916
1051
|
{
|
|
917
1052
|
io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
|
|
918
1053
|
uring_ptr_to_u64(statxbuf));
|
|
@@ -921,6 +1056,7 @@ IOURINGINLINE void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
|
|
|
921
1056
|
|
|
922
1057
|
IOURINGINLINE void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
|
|
923
1058
|
__u64 offset, __u32 len, int advice)
|
|
1059
|
+
LIBURING_NOEXCEPT
|
|
924
1060
|
{
|
|
925
1061
|
io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, (__u32) len, offset);
|
|
926
1062
|
sqe->fadvise_advice = (__u32) advice;
|
|
@@ -928,6 +1064,7 @@ IOURINGINLINE void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
|
|
|
928
1064
|
|
|
929
1065
|
IOURINGINLINE void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
|
|
930
1066
|
__u32 length, int advice)
|
|
1067
|
+
LIBURING_NOEXCEPT
|
|
931
1068
|
{
|
|
932
1069
|
io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, (__u32) length, 0);
|
|
933
1070
|
sqe->fadvise_advice = (__u32) advice;
|
|
@@ -935,6 +1072,7 @@ IOURINGINLINE void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
|
|
|
935
1072
|
|
|
936
1073
|
IOURINGINLINE void io_uring_prep_fadvise64(struct io_uring_sqe *sqe, int fd,
|
|
937
1074
|
__u64 offset, off_t len, int advice)
|
|
1075
|
+
LIBURING_NOEXCEPT
|
|
938
1076
|
{
|
|
939
1077
|
io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, 0, offset);
|
|
940
1078
|
sqe->addr = len;
|
|
@@ -943,6 +1081,7 @@ IOURINGINLINE void io_uring_prep_fadvise64(struct io_uring_sqe *sqe, int fd,
|
|
|
943
1081
|
|
|
944
1082
|
IOURINGINLINE void io_uring_prep_madvise64(struct io_uring_sqe *sqe, void *addr,
|
|
945
1083
|
off_t length, int advice)
|
|
1084
|
+
LIBURING_NOEXCEPT
|
|
946
1085
|
{
|
|
947
1086
|
io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, 0, length);
|
|
948
1087
|
sqe->fadvise_advice = (__u32) advice;
|
|
@@ -950,6 +1089,7 @@ IOURINGINLINE void io_uring_prep_madvise64(struct io_uring_sqe *sqe, void *addr,
|
|
|
950
1089
|
|
|
951
1090
|
IOURINGINLINE void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
|
|
952
1091
|
const void *buf, size_t len, int flags)
|
|
1092
|
+
LIBURING_NOEXCEPT
|
|
953
1093
|
{
|
|
954
1094
|
io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, (__u32) len, 0);
|
|
955
1095
|
sqe->msg_flags = (__u32) flags;
|
|
@@ -957,6 +1097,7 @@ IOURINGINLINE void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
|
|
|
957
1097
|
|
|
958
1098
|
IOURINGINLINE void io_uring_prep_send_bundle(struct io_uring_sqe *sqe,
|
|
959
1099
|
int sockfd, size_t len, int flags)
|
|
1100
|
+
LIBURING_NOEXCEPT
|
|
960
1101
|
{
|
|
961
1102
|
io_uring_prep_send(sqe, sockfd, NULL, len, flags);
|
|
962
1103
|
sqe->ioprio |= IORING_RECVSEND_BUNDLE;
|
|
@@ -965,6 +1106,7 @@ IOURINGINLINE void io_uring_prep_send_bundle(struct io_uring_sqe *sqe,
|
|
|
965
1106
|
IOURINGINLINE void io_uring_prep_send_set_addr(struct io_uring_sqe *sqe,
|
|
966
1107
|
const struct sockaddr *dest_addr,
|
|
967
1108
|
__u16 addr_len)
|
|
1109
|
+
LIBURING_NOEXCEPT
|
|
968
1110
|
{
|
|
969
1111
|
sqe->addr2 = (unsigned long)(const void *)dest_addr;
|
|
970
1112
|
sqe->addr_len = addr_len;
|
|
@@ -974,6 +1116,7 @@ IOURINGINLINE void io_uring_prep_sendto(struct io_uring_sqe *sqe, int sockfd,
|
|
|
974
1116
|
const void *buf, size_t len, int flags,
|
|
975
1117
|
const struct sockaddr *addr,
|
|
976
1118
|
socklen_t addrlen)
|
|
1119
|
+
LIBURING_NOEXCEPT
|
|
977
1120
|
{
|
|
978
1121
|
io_uring_prep_send(sqe, sockfd, buf, len, flags);
|
|
979
1122
|
io_uring_prep_send_set_addr(sqe, addr, addrlen);
|
|
@@ -982,6 +1125,7 @@ IOURINGINLINE void io_uring_prep_sendto(struct io_uring_sqe *sqe, int sockfd,
|
|
|
982
1125
|
IOURINGINLINE void io_uring_prep_send_zc(struct io_uring_sqe *sqe, int sockfd,
|
|
983
1126
|
const void *buf, size_t len, int flags,
|
|
984
1127
|
unsigned zc_flags)
|
|
1128
|
+
LIBURING_NOEXCEPT
|
|
985
1129
|
{
|
|
986
1130
|
io_uring_prep_rw(IORING_OP_SEND_ZC, sqe, sockfd, buf, (__u32) len, 0);
|
|
987
1131
|
sqe->msg_flags = (__u32) flags;
|
|
@@ -993,6 +1137,7 @@ IOURINGINLINE void io_uring_prep_send_zc_fixed(struct io_uring_sqe *sqe,
|
|
|
993
1137
|
size_t len, int flags,
|
|
994
1138
|
unsigned zc_flags,
|
|
995
1139
|
unsigned buf_index)
|
|
1140
|
+
LIBURING_NOEXCEPT
|
|
996
1141
|
{
|
|
997
1142
|
io_uring_prep_send_zc(sqe, sockfd, buf, len, flags, zc_flags);
|
|
998
1143
|
sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
|
|
@@ -1002,6 +1147,7 @@ IOURINGINLINE void io_uring_prep_send_zc_fixed(struct io_uring_sqe *sqe,
|
|
|
1002
1147
|
IOURINGINLINE void io_uring_prep_sendmsg_zc(struct io_uring_sqe *sqe, int fd,
|
|
1003
1148
|
const struct msghdr *msg,
|
|
1004
1149
|
unsigned flags)
|
|
1150
|
+
LIBURING_NOEXCEPT
|
|
1005
1151
|
{
|
|
1006
1152
|
io_uring_prep_sendmsg(sqe, fd, msg, flags);
|
|
1007
1153
|
sqe->opcode = IORING_OP_SENDMSG_ZC;
|
|
@@ -1012,6 +1158,7 @@ IOURINGINLINE void io_uring_prep_sendmsg_zc_fixed(struct io_uring_sqe *sqe,
|
|
|
1012
1158
|
const struct msghdr *msg,
|
|
1013
1159
|
unsigned flags,
|
|
1014
1160
|
unsigned buf_index)
|
|
1161
|
+
LIBURING_NOEXCEPT
|
|
1015
1162
|
{
|
|
1016
1163
|
io_uring_prep_sendmsg_zc(sqe, fd, msg, flags);
|
|
1017
1164
|
sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
|
|
@@ -1020,6 +1167,7 @@ IOURINGINLINE void io_uring_prep_sendmsg_zc_fixed(struct io_uring_sqe *sqe,
|
|
|
1020
1167
|
|
|
1021
1168
|
IOURINGINLINE void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
|
|
1022
1169
|
void *buf, size_t len, int flags)
|
|
1170
|
+
LIBURING_NOEXCEPT
|
|
1023
1171
|
{
|
|
1024
1172
|
io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, (__u32) len, 0);
|
|
1025
1173
|
sqe->msg_flags = (__u32) flags;
|
|
@@ -1028,6 +1176,7 @@ IOURINGINLINE void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
|
|
|
1028
1176
|
IOURINGINLINE void io_uring_prep_recv_multishot(struct io_uring_sqe *sqe,
|
|
1029
1177
|
int sockfd, void *buf,
|
|
1030
1178
|
size_t len, int flags)
|
|
1179
|
+
LIBURING_NOEXCEPT
|
|
1031
1180
|
{
|
|
1032
1181
|
io_uring_prep_recv(sqe, sockfd, buf, len, flags);
|
|
1033
1182
|
sqe->ioprio |= IORING_RECV_MULTISHOT;
|
|
@@ -1035,6 +1184,7 @@ IOURINGINLINE void io_uring_prep_recv_multishot(struct io_uring_sqe *sqe,
|
|
|
1035
1184
|
|
|
1036
1185
|
IOURINGINLINE struct io_uring_recvmsg_out *
|
|
1037
1186
|
io_uring_recvmsg_validate(void *buf, int buf_len, struct msghdr *msgh)
|
|
1187
|
+
LIBURING_NOEXCEPT
|
|
1038
1188
|
{
|
|
1039
1189
|
unsigned long header = msgh->msg_controllen + msgh->msg_namelen +
|
|
1040
1190
|
sizeof(struct io_uring_recvmsg_out);
|
|
@@ -1044,6 +1194,7 @@ io_uring_recvmsg_validate(void *buf, int buf_len, struct msghdr *msgh)
|
|
|
1044
1194
|
}
|
|
1045
1195
|
|
|
1046
1196
|
IOURINGINLINE void *io_uring_recvmsg_name(struct io_uring_recvmsg_out *o)
|
|
1197
|
+
LIBURING_NOEXCEPT
|
|
1047
1198
|
{
|
|
1048
1199
|
return (void *) &o[1];
|
|
1049
1200
|
}
|
|
@@ -1051,6 +1202,7 @@ IOURINGINLINE void *io_uring_recvmsg_name(struct io_uring_recvmsg_out *o)
|
|
|
1051
1202
|
IOURINGINLINE struct cmsghdr *
|
|
1052
1203
|
io_uring_recvmsg_cmsg_firsthdr(struct io_uring_recvmsg_out *o,
|
|
1053
1204
|
struct msghdr *msgh)
|
|
1205
|
+
LIBURING_NOEXCEPT
|
|
1054
1206
|
{
|
|
1055
1207
|
if (o->controllen < sizeof(struct cmsghdr))
|
|
1056
1208
|
return NULL;
|
|
@@ -1062,6 +1214,7 @@ io_uring_recvmsg_cmsg_firsthdr(struct io_uring_recvmsg_out *o,
|
|
|
1062
1214
|
IOURINGINLINE struct cmsghdr *
|
|
1063
1215
|
io_uring_recvmsg_cmsg_nexthdr(struct io_uring_recvmsg_out *o, struct msghdr *msgh,
|
|
1064
1216
|
struct cmsghdr *cmsg)
|
|
1217
|
+
LIBURING_NOEXCEPT
|
|
1065
1218
|
{
|
|
1066
1219
|
unsigned char *end;
|
|
1067
1220
|
|
|
@@ -1082,6 +1235,7 @@ io_uring_recvmsg_cmsg_nexthdr(struct io_uring_recvmsg_out *o, struct msghdr *msg
|
|
|
1082
1235
|
|
|
1083
1236
|
IOURINGINLINE void *io_uring_recvmsg_payload(struct io_uring_recvmsg_out *o,
|
|
1084
1237
|
struct msghdr *msgh)
|
|
1238
|
+
LIBURING_NOEXCEPT
|
|
1085
1239
|
{
|
|
1086
1240
|
return (void *)((unsigned char *)io_uring_recvmsg_name(o) +
|
|
1087
1241
|
msgh->msg_namelen + msgh->msg_controllen);
|
|
@@ -1090,6 +1244,7 @@ IOURINGINLINE void *io_uring_recvmsg_payload(struct io_uring_recvmsg_out *o,
|
|
|
1090
1244
|
IOURINGINLINE unsigned int
|
|
1091
1245
|
io_uring_recvmsg_payload_length(struct io_uring_recvmsg_out *o,
|
|
1092
1246
|
int buf_len, struct msghdr *msgh)
|
|
1247
|
+
LIBURING_NOEXCEPT
|
|
1093
1248
|
{
|
|
1094
1249
|
unsigned long payload_start, payload_end;
|
|
1095
1250
|
|
|
@@ -1099,7 +1254,8 @@ io_uring_recvmsg_payload_length(struct io_uring_recvmsg_out *o,
|
|
|
1099
1254
|
}
|
|
1100
1255
|
|
|
1101
1256
|
IOURINGINLINE void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
|
|
1102
|
-
const char *path, struct open_how *how)
|
|
1257
|
+
const char *path, const struct open_how *how)
|
|
1258
|
+
LIBURING_NOEXCEPT
|
|
1103
1259
|
{
|
|
1104
1260
|
io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
|
|
1105
1261
|
(uint64_t) (uintptr_t) how);
|
|
@@ -1108,8 +1264,9 @@ IOURINGINLINE void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
|
|
|
1108
1264
|
/* open directly into the fixed file table */
|
|
1109
1265
|
IOURINGINLINE void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
|
|
1110
1266
|
int dfd, const char *path,
|
|
1111
|
-
struct open_how *how,
|
|
1267
|
+
const struct open_how *how,
|
|
1112
1268
|
unsigned file_index)
|
|
1269
|
+
LIBURING_NOEXCEPT
|
|
1113
1270
|
{
|
|
1114
1271
|
io_uring_prep_openat2(sqe, dfd, path, how);
|
|
1115
1272
|
/* offset by 1 for allocation */
|
|
@@ -1121,7 +1278,8 @@ IOURINGINLINE void io_uring_prep_openat2_direct(struct io_uring_sqe *sqe,
|
|
|
1121
1278
|
struct epoll_event;
|
|
1122
1279
|
IOURINGINLINE void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
|
|
1123
1280
|
int fd, int op,
|
|
1124
|
-
struct epoll_event *ev)
|
|
1281
|
+
const struct epoll_event *ev)
|
|
1282
|
+
LIBURING_NOEXCEPT
|
|
1125
1283
|
{
|
|
1126
1284
|
io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev,
|
|
1127
1285
|
(__u32) op, (__u32) fd);
|
|
@@ -1130,6 +1288,7 @@ IOURINGINLINE void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
|
|
|
1130
1288
|
IOURINGINLINE void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
|
|
1131
1289
|
void *addr, int len, int nr,
|
|
1132
1290
|
int bgid, int bid)
|
|
1291
|
+
LIBURING_NOEXCEPT
|
|
1133
1292
|
{
|
|
1134
1293
|
io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, (__u32) len,
|
|
1135
1294
|
(__u64) bid);
|
|
@@ -1138,6 +1297,7 @@ IOURINGINLINE void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
|
|
|
1138
1297
|
|
|
1139
1298
|
IOURINGINLINE void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
|
|
1140
1299
|
int nr, int bgid)
|
|
1300
|
+
LIBURING_NOEXCEPT
|
|
1141
1301
|
{
|
|
1142
1302
|
io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
|
|
1143
1303
|
sqe->buf_group = (__u16) bgid;
|
|
@@ -1145,12 +1305,14 @@ IOURINGINLINE void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
|
|
|
1145
1305
|
|
|
1146
1306
|
IOURINGINLINE void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
|
|
1147
1307
|
int how)
|
|
1308
|
+
LIBURING_NOEXCEPT
|
|
1148
1309
|
{
|
|
1149
1310
|
io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, (__u32) how, 0);
|
|
1150
1311
|
}
|
|
1151
1312
|
|
|
1152
1313
|
IOURINGINLINE void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
|
|
1153
1314
|
const char *path, int flags)
|
|
1315
|
+
LIBURING_NOEXCEPT
|
|
1154
1316
|
{
|
|
1155
1317
|
io_uring_prep_rw(IORING_OP_UNLINKAT, sqe, dfd, path, 0, 0);
|
|
1156
1318
|
sqe->unlink_flags = (__u32) flags;
|
|
@@ -1158,6 +1320,7 @@ IOURINGINLINE void io_uring_prep_unlinkat(struct io_uring_sqe *sqe, int dfd,
|
|
|
1158
1320
|
|
|
1159
1321
|
IOURINGINLINE void io_uring_prep_unlink(struct io_uring_sqe *sqe,
|
|
1160
1322
|
const char *path, int flags)
|
|
1323
|
+
LIBURING_NOEXCEPT
|
|
1161
1324
|
{
|
|
1162
1325
|
io_uring_prep_unlinkat(sqe, AT_FDCWD, path, flags);
|
|
1163
1326
|
}
|
|
@@ -1165,6 +1328,7 @@ IOURINGINLINE void io_uring_prep_unlink(struct io_uring_sqe *sqe,
|
|
|
1165
1328
|
IOURINGINLINE void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
|
|
1166
1329
|
const char *oldpath, int newdfd,
|
|
1167
1330
|
const char *newpath, unsigned int flags)
|
|
1331
|
+
LIBURING_NOEXCEPT
|
|
1168
1332
|
{
|
|
1169
1333
|
io_uring_prep_rw(IORING_OP_RENAMEAT, sqe, olddfd, oldpath,
|
|
1170
1334
|
(__u32) newdfd,
|
|
@@ -1175,6 +1339,7 @@ IOURINGINLINE void io_uring_prep_renameat(struct io_uring_sqe *sqe, int olddfd,
|
|
|
1175
1339
|
IOURINGINLINE void io_uring_prep_rename(struct io_uring_sqe *sqe,
|
|
1176
1340
|
const char *oldpath,
|
|
1177
1341
|
const char *newpath)
|
|
1342
|
+
LIBURING_NOEXCEPT
|
|
1178
1343
|
{
|
|
1179
1344
|
io_uring_prep_renameat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, 0);
|
|
1180
1345
|
}
|
|
@@ -1182,6 +1347,7 @@ IOURINGINLINE void io_uring_prep_rename(struct io_uring_sqe *sqe,
|
|
|
1182
1347
|
IOURINGINLINE void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
|
|
1183
1348
|
int fd, unsigned len,
|
|
1184
1349
|
__u64 offset, int flags)
|
|
1350
|
+
LIBURING_NOEXCEPT
|
|
1185
1351
|
{
|
|
1186
1352
|
io_uring_prep_rw(IORING_OP_SYNC_FILE_RANGE, sqe, fd, NULL, len, offset);
|
|
1187
1353
|
sqe->sync_range_flags = (__u32) flags;
|
|
@@ -1189,12 +1355,14 @@ IOURINGINLINE void io_uring_prep_sync_file_range(struct io_uring_sqe *sqe,
|
|
|
1189
1355
|
|
|
1190
1356
|
IOURINGINLINE void io_uring_prep_mkdirat(struct io_uring_sqe *sqe, int dfd,
|
|
1191
1357
|
const char *path, mode_t mode)
|
|
1358
|
+
LIBURING_NOEXCEPT
|
|
1192
1359
|
{
|
|
1193
1360
|
io_uring_prep_rw(IORING_OP_MKDIRAT, sqe, dfd, path, mode, 0);
|
|
1194
1361
|
}
|
|
1195
1362
|
|
|
1196
1363
|
IOURINGINLINE void io_uring_prep_mkdir(struct io_uring_sqe *sqe,
|
|
1197
1364
|
const char *path, mode_t mode)
|
|
1365
|
+
LIBURING_NOEXCEPT
|
|
1198
1366
|
{
|
|
1199
1367
|
io_uring_prep_mkdirat(sqe, AT_FDCWD, path, mode);
|
|
1200
1368
|
}
|
|
@@ -1202,6 +1370,7 @@ IOURINGINLINE void io_uring_prep_mkdir(struct io_uring_sqe *sqe,
|
|
|
1202
1370
|
IOURINGINLINE void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
|
|
1203
1371
|
const char *target, int newdirfd,
|
|
1204
1372
|
const char *linkpath)
|
|
1373
|
+
LIBURING_NOEXCEPT
|
|
1205
1374
|
{
|
|
1206
1375
|
io_uring_prep_rw(IORING_OP_SYMLINKAT, sqe, newdirfd, target, 0,
|
|
1207
1376
|
(uint64_t) (uintptr_t) linkpath);
|
|
@@ -1210,6 +1379,7 @@ IOURINGINLINE void io_uring_prep_symlinkat(struct io_uring_sqe *sqe,
|
|
|
1210
1379
|
IOURINGINLINE void io_uring_prep_symlink(struct io_uring_sqe *sqe,
|
|
1211
1380
|
const char *target,
|
|
1212
1381
|
const char *linkpath)
|
|
1382
|
+
LIBURING_NOEXCEPT
|
|
1213
1383
|
{
|
|
1214
1384
|
io_uring_prep_symlinkat(sqe, target, AT_FDCWD, linkpath);
|
|
1215
1385
|
}
|
|
@@ -1217,6 +1387,7 @@ IOURINGINLINE void io_uring_prep_symlink(struct io_uring_sqe *sqe,
|
|
|
1217
1387
|
IOURINGINLINE void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
|
|
1218
1388
|
const char *oldpath, int newdfd,
|
|
1219
1389
|
const char *newpath, int flags)
|
|
1390
|
+
LIBURING_NOEXCEPT
|
|
1220
1391
|
{
|
|
1221
1392
|
io_uring_prep_rw(IORING_OP_LINKAT, sqe, olddfd, oldpath, (__u32) newdfd,
|
|
1222
1393
|
(uint64_t) (uintptr_t) newpath);
|
|
@@ -1226,6 +1397,7 @@ IOURINGINLINE void io_uring_prep_linkat(struct io_uring_sqe *sqe, int olddfd,
|
|
|
1226
1397
|
IOURINGINLINE void io_uring_prep_link(struct io_uring_sqe *sqe,
|
|
1227
1398
|
const char *oldpath, const char *newpath,
|
|
1228
1399
|
int flags)
|
|
1400
|
+
LIBURING_NOEXCEPT
|
|
1229
1401
|
{
|
|
1230
1402
|
io_uring_prep_linkat(sqe, AT_FDCWD, oldpath, AT_FDCWD, newpath, flags);
|
|
1231
1403
|
}
|
|
@@ -1233,6 +1405,7 @@ IOURINGINLINE void io_uring_prep_link(struct io_uring_sqe *sqe,
|
|
|
1233
1405
|
IOURINGINLINE void io_uring_prep_msg_ring_cqe_flags(struct io_uring_sqe *sqe,
|
|
1234
1406
|
int fd, unsigned int len, __u64 data,
|
|
1235
1407
|
unsigned int flags, unsigned int cqe_flags)
|
|
1408
|
+
LIBURING_NOEXCEPT
|
|
1236
1409
|
{
|
|
1237
1410
|
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
|
|
1238
1411
|
sqe->msg_ring_flags = IORING_MSG_RING_FLAGS_PASS | flags;
|
|
@@ -1242,6 +1415,7 @@ IOURINGINLINE void io_uring_prep_msg_ring_cqe_flags(struct io_uring_sqe *sqe,
|
|
|
1242
1415
|
IOURINGINLINE void io_uring_prep_msg_ring(struct io_uring_sqe *sqe, int fd,
|
|
1243
1416
|
unsigned int len, __u64 data,
|
|
1244
1417
|
unsigned int flags)
|
|
1418
|
+
LIBURING_NOEXCEPT
|
|
1245
1419
|
{
|
|
1246
1420
|
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd, NULL, len, data);
|
|
1247
1421
|
sqe->msg_ring_flags = flags;
|
|
@@ -1250,6 +1424,7 @@ IOURINGINLINE void io_uring_prep_msg_ring(struct io_uring_sqe *sqe, int fd,
|
|
|
1250
1424
|
IOURINGINLINE void io_uring_prep_msg_ring_fd(struct io_uring_sqe *sqe, int fd,
|
|
1251
1425
|
int source_fd, int target_fd,
|
|
1252
1426
|
__u64 data, unsigned int flags)
|
|
1427
|
+
LIBURING_NOEXCEPT
|
|
1253
1428
|
{
|
|
1254
1429
|
io_uring_prep_rw(IORING_OP_MSG_RING, sqe, fd,
|
|
1255
1430
|
(void *) (uintptr_t) IORING_MSG_SEND_FD, 0, data);
|
|
@@ -1264,6 +1439,7 @@ IOURINGINLINE void io_uring_prep_msg_ring_fd(struct io_uring_sqe *sqe, int fd,
|
|
|
1264
1439
|
IOURINGINLINE void io_uring_prep_msg_ring_fd_alloc(struct io_uring_sqe *sqe,
|
|
1265
1440
|
int fd, int source_fd,
|
|
1266
1441
|
__u64 data, unsigned int flags)
|
|
1442
|
+
LIBURING_NOEXCEPT
|
|
1267
1443
|
{
|
|
1268
1444
|
io_uring_prep_msg_ring_fd(sqe, fd, source_fd, IORING_FILE_INDEX_ALLOC,
|
|
1269
1445
|
data, flags);
|
|
@@ -1272,6 +1448,7 @@ IOURINGINLINE void io_uring_prep_msg_ring_fd_alloc(struct io_uring_sqe *sqe,
|
|
|
1272
1448
|
IOURINGINLINE void io_uring_prep_getxattr(struct io_uring_sqe *sqe,
|
|
1273
1449
|
const char *name, char *value,
|
|
1274
1450
|
const char *path, unsigned int len)
|
|
1451
|
+
LIBURING_NOEXCEPT
|
|
1275
1452
|
{
|
|
1276
1453
|
io_uring_prep_rw(IORING_OP_GETXATTR, sqe, 0, name, len,
|
|
1277
1454
|
(__u64) (uintptr_t) value);
|
|
@@ -1283,6 +1460,7 @@ IOURINGINLINE void io_uring_prep_setxattr(struct io_uring_sqe *sqe,
|
|
|
1283
1460
|
const char *name, const char *value,
|
|
1284
1461
|
const char *path, int flags,
|
|
1285
1462
|
unsigned int len)
|
|
1463
|
+
LIBURING_NOEXCEPT
|
|
1286
1464
|
{
|
|
1287
1465
|
io_uring_prep_rw(IORING_OP_SETXATTR, sqe, 0, name, len,
|
|
1288
1466
|
(__u64) (uintptr_t) value);
|
|
@@ -1293,6 +1471,7 @@ IOURINGINLINE void io_uring_prep_setxattr(struct io_uring_sqe *sqe,
|
|
|
1293
1471
|
IOURINGINLINE void io_uring_prep_fgetxattr(struct io_uring_sqe *sqe,
|
|
1294
1472
|
int fd, const char *name,
|
|
1295
1473
|
char *value, unsigned int len)
|
|
1474
|
+
LIBURING_NOEXCEPT
|
|
1296
1475
|
{
|
|
1297
1476
|
io_uring_prep_rw(IORING_OP_FGETXATTR, sqe, fd, name, len,
|
|
1298
1477
|
(__u64) (uintptr_t) value);
|
|
@@ -1302,6 +1481,7 @@ IOURINGINLINE void io_uring_prep_fgetxattr(struct io_uring_sqe *sqe,
|
|
|
1302
1481
|
IOURINGINLINE void io_uring_prep_fsetxattr(struct io_uring_sqe *sqe, int fd,
|
|
1303
1482
|
const char *name, const char *value,
|
|
1304
1483
|
int flags, unsigned int len)
|
|
1484
|
+
LIBURING_NOEXCEPT
|
|
1305
1485
|
{
|
|
1306
1486
|
io_uring_prep_rw(IORING_OP_FSETXATTR, sqe, fd, name, len,
|
|
1307
1487
|
(__u64) (uintptr_t) value);
|
|
@@ -1311,6 +1491,7 @@ IOURINGINLINE void io_uring_prep_fsetxattr(struct io_uring_sqe *sqe, int fd,
|
|
|
1311
1491
|
IOURINGINLINE void io_uring_prep_socket(struct io_uring_sqe *sqe, int domain,
|
|
1312
1492
|
int type, int protocol,
|
|
1313
1493
|
unsigned int flags)
|
|
1494
|
+
LIBURING_NOEXCEPT
|
|
1314
1495
|
{
|
|
1315
1496
|
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
|
1316
1497
|
sqe->rw_flags = flags;
|
|
@@ -1321,6 +1502,7 @@ IOURINGINLINE void io_uring_prep_socket_direct(struct io_uring_sqe *sqe,
|
|
|
1321
1502
|
int protocol,
|
|
1322
1503
|
unsigned file_index,
|
|
1323
1504
|
unsigned int flags)
|
|
1505
|
+
LIBURING_NOEXCEPT
|
|
1324
1506
|
{
|
|
1325
1507
|
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
|
1326
1508
|
sqe->rw_flags = flags;
|
|
@@ -1334,12 +1516,43 @@ IOURINGINLINE void io_uring_prep_socket_direct_alloc(struct io_uring_sqe *sqe,
|
|
|
1334
1516
|
int domain, int type,
|
|
1335
1517
|
int protocol,
|
|
1336
1518
|
unsigned int flags)
|
|
1519
|
+
LIBURING_NOEXCEPT
|
|
1337
1520
|
{
|
|
1338
1521
|
io_uring_prep_rw(IORING_OP_SOCKET, sqe, domain, NULL, protocol, type);
|
|
1339
1522
|
sqe->rw_flags = flags;
|
|
1340
1523
|
__io_uring_set_target_fixed_file(sqe, IORING_FILE_INDEX_ALLOC - 1);
|
|
1341
1524
|
}
|
|
1342
1525
|
|
|
1526
|
+
IOURINGINLINE void __io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
|
|
1527
|
+
int op,
|
|
1528
|
+
__u32 cmd_op,
|
|
1529
|
+
int fd)
|
|
1530
|
+
LIBURING_NOEXCEPT
|
|
1531
|
+
{
|
|
1532
|
+
sqe->opcode = (__u8) op;
|
|
1533
|
+
sqe->fd = fd;
|
|
1534
|
+
sqe->cmd_op = cmd_op;
|
|
1535
|
+
sqe->__pad1 = 0;
|
|
1536
|
+
sqe->addr = 0ul;
|
|
1537
|
+
sqe->len = 0;
|
|
1538
|
+
}
|
|
1539
|
+
|
|
1540
|
+
IOURINGINLINE void io_uring_prep_uring_cmd(struct io_uring_sqe *sqe,
|
|
1541
|
+
int cmd_op,
|
|
1542
|
+
int fd)
|
|
1543
|
+
LIBURING_NOEXCEPT
|
|
1544
|
+
{
|
|
1545
|
+
__io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD, cmd_op, fd);
|
|
1546
|
+
}
|
|
1547
|
+
|
|
1548
|
+
IOURINGINLINE void io_uring_prep_uring_cmd128(struct io_uring_sqe *sqe,
|
|
1549
|
+
int cmd_op,
|
|
1550
|
+
int fd)
|
|
1551
|
+
LIBURING_NOEXCEPT
|
|
1552
|
+
{
|
|
1553
|
+
__io_uring_prep_uring_cmd(sqe, IORING_OP_URING_CMD128, cmd_op, fd);
|
|
1554
|
+
}
|
|
1555
|
+
|
|
1343
1556
|
/*
|
|
1344
1557
|
* Prepare commands for sockets
|
|
1345
1558
|
*/
|
|
@@ -1350,20 +1563,34 @@ IOURINGINLINE void io_uring_prep_cmd_sock(struct io_uring_sqe *sqe,
|
|
|
1350
1563
|
int optname,
|
|
1351
1564
|
void *optval,
|
|
1352
1565
|
int optlen)
|
|
1566
|
+
LIBURING_NOEXCEPT
|
|
1353
1567
|
{
|
|
1354
|
-
|
|
1568
|
+
io_uring_prep_uring_cmd(sqe, cmd_op, fd);
|
|
1355
1569
|
sqe->optval = (unsigned long) (uintptr_t) optval;
|
|
1356
1570
|
sqe->optname = optname;
|
|
1357
1571
|
sqe->optlen = optlen;
|
|
1358
|
-
sqe->cmd_op = cmd_op;
|
|
1359
1572
|
sqe->level = level;
|
|
1360
1573
|
}
|
|
1361
1574
|
|
|
1575
|
+
IOURINGINLINE void io_uring_prep_cmd_getsockname(struct io_uring_sqe *sqe,
|
|
1576
|
+
int fd, struct sockaddr *sockaddr,
|
|
1577
|
+
socklen_t *sockaddr_len,
|
|
1578
|
+
int peer)
|
|
1579
|
+
LIBURING_NOEXCEPT
|
|
1580
|
+
{
|
|
1581
|
+
io_uring_prep_uring_cmd(sqe, SOCKET_URING_OP_GETSOCKNAME, fd);
|
|
1582
|
+
|
|
1583
|
+
sqe->addr = (uintptr_t) sockaddr;
|
|
1584
|
+
sqe->addr3 = (unsigned long) (uintptr_t) sockaddr_len;
|
|
1585
|
+
sqe->optlen = peer;
|
|
1586
|
+
}
|
|
1587
|
+
|
|
1362
1588
|
IOURINGINLINE void io_uring_prep_waitid(struct io_uring_sqe *sqe,
|
|
1363
1589
|
idtype_t idtype,
|
|
1364
1590
|
id_t id,
|
|
1365
1591
|
siginfo_t *infop,
|
|
1366
1592
|
int options, unsigned int flags)
|
|
1593
|
+
LIBURING_NOEXCEPT
|
|
1367
1594
|
{
|
|
1368
1595
|
io_uring_prep_rw(IORING_OP_WAITID, sqe, id, NULL, (unsigned) idtype, 0);
|
|
1369
1596
|
sqe->waitid_flags = flags;
|
|
@@ -1372,9 +1599,10 @@ IOURINGINLINE void io_uring_prep_waitid(struct io_uring_sqe *sqe,
|
|
|
1372
1599
|
}
|
|
1373
1600
|
|
|
1374
1601
|
IOURINGINLINE void io_uring_prep_futex_wake(struct io_uring_sqe *sqe,
|
|
1375
|
-
uint32_t *futex, uint64_t val,
|
|
1602
|
+
const uint32_t *futex, uint64_t val,
|
|
1376
1603
|
uint64_t mask, uint32_t futex_flags,
|
|
1377
1604
|
unsigned int flags)
|
|
1605
|
+
LIBURING_NOEXCEPT
|
|
1378
1606
|
{
|
|
1379
1607
|
io_uring_prep_rw(IORING_OP_FUTEX_WAKE, sqe, futex_flags, futex, 0, val);
|
|
1380
1608
|
sqe->futex_flags = flags;
|
|
@@ -1382,9 +1610,10 @@ IOURINGINLINE void io_uring_prep_futex_wake(struct io_uring_sqe *sqe,
|
|
|
1382
1610
|
}
|
|
1383
1611
|
|
|
1384
1612
|
IOURINGINLINE void io_uring_prep_futex_wait(struct io_uring_sqe *sqe,
|
|
1385
|
-
uint32_t *futex, uint64_t val,
|
|
1613
|
+
const uint32_t *futex, uint64_t val,
|
|
1386
1614
|
uint64_t mask, uint32_t futex_flags,
|
|
1387
1615
|
unsigned int flags)
|
|
1616
|
+
LIBURING_NOEXCEPT
|
|
1388
1617
|
{
|
|
1389
1618
|
io_uring_prep_rw(IORING_OP_FUTEX_WAIT, sqe, futex_flags, futex, 0, val);
|
|
1390
1619
|
sqe->futex_flags = flags;
|
|
@@ -1393,9 +1622,10 @@ IOURINGINLINE void io_uring_prep_futex_wait(struct io_uring_sqe *sqe,
|
|
|
1393
1622
|
|
|
1394
1623
|
struct futex_waitv;
|
|
1395
1624
|
IOURINGINLINE void io_uring_prep_futex_waitv(struct io_uring_sqe *sqe,
|
|
1396
|
-
struct futex_waitv *futex,
|
|
1625
|
+
const struct futex_waitv *futex,
|
|
1397
1626
|
uint32_t nr_futex,
|
|
1398
1627
|
unsigned int flags)
|
|
1628
|
+
LIBURING_NOEXCEPT
|
|
1399
1629
|
{
|
|
1400
1630
|
io_uring_prep_rw(IORING_OP_FUTEX_WAITV, sqe, 0, futex, nr_futex, 0);
|
|
1401
1631
|
sqe->futex_flags = flags;
|
|
@@ -1404,6 +1634,7 @@ IOURINGINLINE void io_uring_prep_futex_waitv(struct io_uring_sqe *sqe,
|
|
|
1404
1634
|
IOURINGINLINE void io_uring_prep_fixed_fd_install(struct io_uring_sqe *sqe,
|
|
1405
1635
|
int fd,
|
|
1406
1636
|
unsigned int flags)
|
|
1637
|
+
LIBURING_NOEXCEPT
|
|
1407
1638
|
{
|
|
1408
1639
|
io_uring_prep_rw(IORING_OP_FIXED_FD_INSTALL, sqe, fd, NULL, 0, 0);
|
|
1409
1640
|
sqe->flags = IOSQE_FIXED_FILE;
|
|
@@ -1413,6 +1644,7 @@ IOURINGINLINE void io_uring_prep_fixed_fd_install(struct io_uring_sqe *sqe,
|
|
|
1413
1644
|
#ifdef _GNU_SOURCE
|
|
1414
1645
|
IOURINGINLINE void io_uring_prep_ftruncate(struct io_uring_sqe *sqe,
|
|
1415
1646
|
int fd, loff_t len)
|
|
1647
|
+
LIBURING_NOEXCEPT
|
|
1416
1648
|
{
|
|
1417
1649
|
io_uring_prep_rw(IORING_OP_FTRUNCATE, sqe, fd, 0, 0, len);
|
|
1418
1650
|
}
|
|
@@ -1421,15 +1653,35 @@ IOURINGINLINE void io_uring_prep_ftruncate(struct io_uring_sqe *sqe,
|
|
|
1421
1653
|
IOURINGINLINE void io_uring_prep_cmd_discard(struct io_uring_sqe *sqe,
|
|
1422
1654
|
int fd,
|
|
1423
1655
|
uint64_t offset, uint64_t nbytes)
|
|
1656
|
+
LIBURING_NOEXCEPT
|
|
1424
1657
|
{
|
|
1425
|
-
|
|
1426
|
-
sqe->cmd_op = BLOCK_URING_CMD_DISCARD;
|
|
1658
|
+
io_uring_prep_uring_cmd(sqe, BLOCK_URING_CMD_DISCARD, fd);
|
|
1427
1659
|
sqe->addr = offset;
|
|
1428
1660
|
sqe->addr3 = nbytes;
|
|
1429
1661
|
}
|
|
1430
1662
|
|
|
1663
|
+
IOURINGINLINE void io_uring_prep_pipe(struct io_uring_sqe *sqe, int *fds,
|
|
1664
|
+
int pipe_flags)
|
|
1665
|
+
{
|
|
1666
|
+
io_uring_prep_rw(IORING_OP_PIPE, sqe, 0, fds, 0, 0);
|
|
1667
|
+
sqe->pipe_flags = (__u32) pipe_flags;
|
|
1668
|
+
}
|
|
1669
|
+
|
|
1670
|
+
/* setup pipe directly into the fixed file table */
|
|
1671
|
+
IOURINGINLINE void io_uring_prep_pipe_direct(struct io_uring_sqe *sqe, int *fds,
|
|
1672
|
+
int pipe_flags,
|
|
1673
|
+
unsigned int file_index)
|
|
1674
|
+
{
|
|
1675
|
+
io_uring_prep_pipe(sqe, fds, pipe_flags);
|
|
1676
|
+
/* offset by 1 for allocation */
|
|
1677
|
+
if (file_index == IORING_FILE_INDEX_ALLOC)
|
|
1678
|
+
file_index--;
|
|
1679
|
+
__io_uring_set_target_fixed_file(sqe, file_index);
|
|
1680
|
+
}
|
|
1681
|
+
|
|
1431
1682
|
/* Read the kernel's SQ head index with appropriate memory ordering */
|
|
1432
1683
|
IOURINGINLINE unsigned io_uring_load_sq_head(const struct io_uring *ring)
|
|
1684
|
+
LIBURING_NOEXCEPT
|
|
1433
1685
|
{
|
|
1434
1686
|
/*
|
|
1435
1687
|
* Without acquire ordering, we could overwrite a SQE before the kernel
|
|
@@ -1447,6 +1699,7 @@ IOURINGINLINE unsigned io_uring_load_sq_head(const struct io_uring *ring)
|
|
|
1447
1699
|
* the SQ ring
|
|
1448
1700
|
*/
|
|
1449
1701
|
IOURINGINLINE unsigned io_uring_sq_ready(const struct io_uring *ring)
|
|
1702
|
+
LIBURING_NOEXCEPT
|
|
1450
1703
|
{
|
|
1451
1704
|
/* always use real head, to avoid losing sync for short submit */
|
|
1452
1705
|
return ring->sq.sqe_tail - io_uring_load_sq_head(ring);
|
|
@@ -1456,6 +1709,7 @@ IOURINGINLINE unsigned io_uring_sq_ready(const struct io_uring *ring)
|
|
|
1456
1709
|
* Returns how much space is left in the SQ ring.
|
|
1457
1710
|
*/
|
|
1458
1711
|
IOURINGINLINE unsigned io_uring_sq_space_left(const struct io_uring *ring)
|
|
1712
|
+
LIBURING_NOEXCEPT
|
|
1459
1713
|
{
|
|
1460
1714
|
return ring->sq.ring_entries - io_uring_sq_ready(ring);
|
|
1461
1715
|
}
|
|
@@ -1466,11 +1720,13 @@ IOURINGINLINE unsigned io_uring_sq_space_left(const struct io_uring *ring)
|
|
|
1466
1720
|
* SQE `index` can be computed as &sq.sqes[(index & sq.ring_mask) << sqe_shift].
|
|
1467
1721
|
*/
|
|
1468
1722
|
IOURINGINLINE unsigned io_uring_sqe_shift_from_flags(unsigned flags)
|
|
1723
|
+
LIBURING_NOEXCEPT
|
|
1469
1724
|
{
|
|
1470
1725
|
return !!(flags & IORING_SETUP_SQE128);
|
|
1471
1726
|
}
|
|
1472
1727
|
|
|
1473
1728
|
IOURINGINLINE unsigned io_uring_sqe_shift(const struct io_uring *ring)
|
|
1729
|
+
LIBURING_NOEXCEPT
|
|
1474
1730
|
{
|
|
1475
1731
|
return io_uring_sqe_shift_from_flags(ring->flags);
|
|
1476
1732
|
}
|
|
@@ -1483,6 +1739,7 @@ IOURINGINLINE unsigned io_uring_sqe_shift(const struct io_uring *ring)
|
|
|
1483
1739
|
* this feature.
|
|
1484
1740
|
*/
|
|
1485
1741
|
IOURINGINLINE int io_uring_sqring_wait(struct io_uring *ring)
|
|
1742
|
+
LIBURING_NOEXCEPT
|
|
1486
1743
|
{
|
|
1487
1744
|
if (!(ring->flags & IORING_SETUP_SQPOLL))
|
|
1488
1745
|
return 0;
|
|
@@ -1496,6 +1753,7 @@ IOURINGINLINE int io_uring_sqring_wait(struct io_uring *ring)
|
|
|
1496
1753
|
* Returns how many unconsumed entries are ready in the CQ ring
|
|
1497
1754
|
*/
|
|
1498
1755
|
IOURINGINLINE unsigned io_uring_cq_ready(const struct io_uring *ring)
|
|
1756
|
+
LIBURING_NOEXCEPT
|
|
1499
1757
|
{
|
|
1500
1758
|
return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
|
|
1501
1759
|
}
|
|
@@ -1505,6 +1763,7 @@ IOURINGINLINE unsigned io_uring_cq_ready(const struct io_uring *ring)
|
|
|
1505
1763
|
* the CQ ring
|
|
1506
1764
|
*/
|
|
1507
1765
|
IOURINGINLINE bool io_uring_cq_has_overflow(const struct io_uring *ring)
|
|
1766
|
+
LIBURING_NOEXCEPT
|
|
1508
1767
|
{
|
|
1509
1768
|
return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
|
|
1510
1769
|
}
|
|
@@ -1513,6 +1772,7 @@ IOURINGINLINE bool io_uring_cq_has_overflow(const struct io_uring *ring)
|
|
|
1513
1772
|
* Returns true if the eventfd notification is currently enabled
|
|
1514
1773
|
*/
|
|
1515
1774
|
IOURINGINLINE bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
|
|
1775
|
+
LIBURING_NOEXCEPT
|
|
1516
1776
|
{
|
|
1517
1777
|
if (!ring->cq.kflags)
|
|
1518
1778
|
return true;
|
|
@@ -1526,6 +1786,7 @@ IOURINGINLINE bool io_uring_cq_eventfd_enabled(const struct io_uring *ring)
|
|
|
1526
1786
|
*/
|
|
1527
1787
|
IOURINGINLINE int io_uring_cq_eventfd_toggle(struct io_uring *ring,
|
|
1528
1788
|
bool enabled)
|
|
1789
|
+
LIBURING_NOEXCEPT
|
|
1529
1790
|
{
|
|
1530
1791
|
uint32_t flags;
|
|
1531
1792
|
|
|
@@ -1555,18 +1816,36 @@ IOURINGINLINE int io_uring_cq_eventfd_toggle(struct io_uring *ring,
|
|
|
1555
1816
|
IOURINGINLINE int io_uring_wait_cqe_nr(struct io_uring *ring,
|
|
1556
1817
|
struct io_uring_cqe **cqe_ptr,
|
|
1557
1818
|
unsigned wait_nr)
|
|
1819
|
+
LIBURING_NOEXCEPT
|
|
1558
1820
|
{
|
|
1559
1821
|
return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
|
|
1560
1822
|
}
|
|
1561
1823
|
|
|
1824
|
+
_LOCAL_INLINE bool io_uring_skip_cqe(struct io_uring *ring,
|
|
1825
|
+
struct io_uring_cqe *cqe, int *err)
|
|
1826
|
+
{
|
|
1827
|
+
if (cqe->flags & IORING_CQE_F_SKIP)
|
|
1828
|
+
goto out;
|
|
1829
|
+
if (ring->features & IORING_FEAT_EXT_ARG)
|
|
1830
|
+
return false;
|
|
1831
|
+
if (cqe->user_data != LIBURING_UDATA_TIMEOUT)
|
|
1832
|
+
return false;
|
|
1833
|
+
if (cqe->res < 0)
|
|
1834
|
+
*err = cqe->res;
|
|
1835
|
+
out:
|
|
1836
|
+
io_uring_cq_advance(ring, io_uring_cqe_nr(cqe));
|
|
1837
|
+
return !*err;
|
|
1838
|
+
}
|
|
1839
|
+
|
|
1562
1840
|
/*
|
|
1563
1841
|
* Internal helper, don't use directly in applications. Use one of the
|
|
1564
1842
|
* "official" versions of this, io_uring_peek_cqe(), io_uring_wait_cqe(),
|
|
1565
1843
|
* or io_uring_wait_cqes*().
|
|
1566
1844
|
*/
|
|
1567
|
-
|
|
1845
|
+
_LOCAL_INLINE int __io_uring_peek_cqe(struct io_uring *ring,
|
|
1568
1846
|
struct io_uring_cqe **cqe_ptr,
|
|
1569
1847
|
unsigned *nr_available)
|
|
1848
|
+
LIBURING_NOEXCEPT
|
|
1570
1849
|
{
|
|
1571
1850
|
struct io_uring_cqe *cqe;
|
|
1572
1851
|
int err = 0;
|
|
@@ -1576,7 +1855,12 @@ IOURINGINLINE int __io_uring_peek_cqe(struct io_uring *ring,
|
|
|
1576
1855
|
|
|
1577
1856
|
do {
|
|
1578
1857
|
unsigned tail = io_uring_smp_load_acquire(ring->cq.ktail);
|
|
1579
|
-
|
|
1858
|
+
|
|
1859
|
+
/**
|
|
1860
|
+
* A load_acquire on the head prevents reordering with the
|
|
1861
|
+
* cqe load below, ensuring that we see the correct cq entry.
|
|
1862
|
+
*/
|
|
1863
|
+
unsigned head = io_uring_smp_load_acquire(ring->cq.khead);
|
|
1580
1864
|
|
|
1581
1865
|
cqe = NULL;
|
|
1582
1866
|
available = tail - head;
|
|
@@ -1584,17 +1868,9 @@ IOURINGINLINE int __io_uring_peek_cqe(struct io_uring *ring,
|
|
|
1584
1868
|
break;
|
|
1585
1869
|
|
|
1586
1870
|
cqe = &ring->cq.cqes[(head & mask) << shift];
|
|
1587
|
-
if (!(ring
|
|
1588
|
-
|
|
1589
|
-
|
|
1590
|
-
err = cqe->res;
|
|
1591
|
-
io_uring_cq_advance(ring, 1);
|
|
1592
|
-
if (!err)
|
|
1593
|
-
continue;
|
|
1594
|
-
cqe = NULL;
|
|
1595
|
-
}
|
|
1596
|
-
|
|
1597
|
-
break;
|
|
1871
|
+
if (!io_uring_skip_cqe(ring, cqe, &err))
|
|
1872
|
+
break;
|
|
1873
|
+
cqe = NULL;
|
|
1598
1874
|
} while (1);
|
|
1599
1875
|
|
|
1600
1876
|
*cqe_ptr = cqe;
|
|
@@ -1609,6 +1885,7 @@ IOURINGINLINE int __io_uring_peek_cqe(struct io_uring *ring,
|
|
|
1609
1885
|
*/
|
|
1610
1886
|
IOURINGINLINE int io_uring_peek_cqe(struct io_uring *ring,
|
|
1611
1887
|
struct io_uring_cqe **cqe_ptr)
|
|
1888
|
+
LIBURING_NOEXCEPT
|
|
1612
1889
|
{
|
|
1613
1890
|
if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
|
|
1614
1891
|
return 0;
|
|
@@ -1622,6 +1899,7 @@ IOURINGINLINE int io_uring_peek_cqe(struct io_uring *ring,
|
|
|
1622
1899
|
*/
|
|
1623
1900
|
IOURINGINLINE int io_uring_wait_cqe(struct io_uring *ring,
|
|
1624
1901
|
struct io_uring_cqe **cqe_ptr)
|
|
1902
|
+
LIBURING_NOEXCEPT
|
|
1625
1903
|
{
|
|
1626
1904
|
if (!__io_uring_peek_cqe(ring, cqe_ptr, NULL) && *cqe_ptr)
|
|
1627
1905
|
return 0;
|
|
@@ -1637,6 +1915,7 @@ IOURINGINLINE int io_uring_wait_cqe(struct io_uring *ring,
|
|
|
1637
1915
|
* Returns a vacant sqe, or NULL if we're full.
|
|
1638
1916
|
*/
|
|
1639
1917
|
IOURINGINLINE struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
|
|
1918
|
+
LIBURING_NOEXCEPT
|
|
1640
1919
|
{
|
|
1641
1920
|
struct io_uring_sq *sq = &ring->sq;
|
|
1642
1921
|
unsigned head = io_uring_load_sq_head(ring), tail = sq->sqe_tail;
|
|
@@ -1655,11 +1934,13 @@ IOURINGINLINE struct io_uring_sqe *_io_uring_get_sqe(struct io_uring *ring)
|
|
|
1655
1934
|
* Return the appropriate mask for a buffer ring of size 'ring_entries'
|
|
1656
1935
|
*/
|
|
1657
1936
|
IOURINGINLINE int io_uring_buf_ring_mask(__u32 ring_entries)
|
|
1937
|
+
LIBURING_NOEXCEPT
|
|
1658
1938
|
{
|
|
1659
1939
|
return ring_entries - 1;
|
|
1660
1940
|
}
|
|
1661
1941
|
|
|
1662
1942
|
IOURINGINLINE void io_uring_buf_ring_init(struct io_uring_buf_ring *br)
|
|
1943
|
+
LIBURING_NOEXCEPT
|
|
1663
1944
|
{
|
|
1664
1945
|
br->tail = 0;
|
|
1665
1946
|
}
|
|
@@ -1671,6 +1952,7 @@ IOURINGINLINE void io_uring_buf_ring_add(struct io_uring_buf_ring *br,
|
|
|
1671
1952
|
void *addr, unsigned int len,
|
|
1672
1953
|
unsigned short bid, int mask,
|
|
1673
1954
|
int buf_offset)
|
|
1955
|
+
LIBURING_NOEXCEPT
|
|
1674
1956
|
{
|
|
1675
1957
|
struct io_uring_buf *buf = &br->bufs[(br->tail + buf_offset) & mask];
|
|
1676
1958
|
|
|
@@ -1686,6 +1968,7 @@ IOURINGINLINE void io_uring_buf_ring_add(struct io_uring_buf_ring *br,
|
|
|
1686
1968
|
*/
|
|
1687
1969
|
IOURINGINLINE void io_uring_buf_ring_advance(struct io_uring_buf_ring *br,
|
|
1688
1970
|
int count)
|
|
1971
|
+
LIBURING_NOEXCEPT
|
|
1689
1972
|
{
|
|
1690
1973
|
unsigned short new_tail = br->tail + count;
|
|
1691
1974
|
|
|
@@ -1695,6 +1978,7 @@ IOURINGINLINE void io_uring_buf_ring_advance(struct io_uring_buf_ring *br,
|
|
|
1695
1978
|
IOURINGINLINE void __io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
|
1696
1979
|
struct io_uring_buf_ring *br,
|
|
1697
1980
|
int cq_count, int buf_count)
|
|
1981
|
+
LIBURING_NOEXCEPT
|
|
1698
1982
|
{
|
|
1699
1983
|
io_uring_buf_ring_advance(br, buf_count);
|
|
1700
1984
|
io_uring_cq_advance(ring, cq_count);
|
|
@@ -1710,6 +1994,7 @@ IOURINGINLINE void __io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
|
|
1710
1994
|
IOURINGINLINE void io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
|
1711
1995
|
struct io_uring_buf_ring *br,
|
|
1712
1996
|
int count)
|
|
1997
|
+
LIBURING_NOEXCEPT
|
|
1713
1998
|
{
|
|
1714
1999
|
__io_uring_buf_ring_cq_advance(ring, br, count, count);
|
|
1715
2000
|
}
|
|
@@ -1717,6 +2002,7 @@ IOURINGINLINE void io_uring_buf_ring_cq_advance(struct io_uring *ring,
|
|
|
1717
2002
|
IOURINGINLINE int io_uring_buf_ring_available(struct io_uring *ring,
|
|
1718
2003
|
struct io_uring_buf_ring *br,
|
|
1719
2004
|
unsigned short bgid)
|
|
2005
|
+
LIBURING_NOEXCEPT
|
|
1720
2006
|
{
|
|
1721
2007
|
uint16_t head;
|
|
1722
2008
|
int ret;
|
|
@@ -1749,6 +2035,7 @@ IOURINGINLINE int io_uring_buf_ring_available(struct io_uring *ring,
|
|
|
1749
2035
|
*/
|
|
1750
2036
|
#ifndef LIBURING_INTERNAL
|
|
1751
2037
|
IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
|
|
2038
|
+
LIBURING_NOEXCEPT
|
|
1752
2039
|
{
|
|
1753
2040
|
return _io_uring_get_sqe(ring);
|
|
1754
2041
|
}
|
|
@@ -1756,11 +2043,57 @@ IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
|
|
|
1756
2043
|
struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
|
|
1757
2044
|
#endif
|
|
1758
2045
|
|
|
1759
|
-
ssize_t io_uring_mlock_size(unsigned entries, unsigned flags);
|
|
1760
|
-
ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p);
|
|
1761
2046
|
|
|
1762
|
-
|
|
1763
|
-
|
|
2047
|
+
/*
|
|
2048
|
+
* Return a 128B sqe to fill. Applications must later call io_uring_submit()
|
|
2049
|
+
* when it's ready to tell the kernel about it. The caller may call this
|
|
2050
|
+
* function multiple times before calling io_uring_submit().
|
|
2051
|
+
*
|
|
2052
|
+
* Returns a vacant 128B sqe, or NULL if we're full. If the current tail is the
|
|
2053
|
+
* last entry in the ring, this function will insert a nop + skip complete such
|
|
2054
|
+
* that the 128b entry wraps back to the beginning of the queue for a
|
|
2055
|
+
* contiguous big sq entry. It's up to the caller to use a 128b opcode in order
|
|
2056
|
+
* for the kernel to know how to advance its sq head pointer.
|
|
2057
|
+
*/
|
|
2058
|
+
IOURINGINLINE struct io_uring_sqe *io_uring_get_sqe128(struct io_uring *ring)
|
|
2059
|
+
LIBURING_NOEXCEPT
|
|
2060
|
+
{
|
|
2061
|
+
struct io_uring_sq *sq = &ring->sq;
|
|
2062
|
+
unsigned head = io_uring_load_sq_head(ring), tail = sq->sqe_tail;
|
|
2063
|
+
struct io_uring_sqe *sqe;
|
|
2064
|
+
|
|
2065
|
+
if (ring->flags & IORING_SETUP_SQE128)
|
|
2066
|
+
return io_uring_get_sqe(ring);
|
|
2067
|
+
if (!(ring->flags & IORING_SETUP_SQE_MIXED))
|
|
2068
|
+
return NULL;
|
|
2069
|
+
|
|
2070
|
+
if (((tail + 1) & sq->ring_mask) == 0) {
|
|
2071
|
+
if ((tail + 2) - head >= sq->ring_entries)
|
|
2072
|
+
return NULL;
|
|
2073
|
+
|
|
2074
|
+
sqe = _io_uring_get_sqe(ring);
|
|
2075
|
+
io_uring_prep_nop(sqe);
|
|
2076
|
+
sqe->flags |= IOSQE_CQE_SKIP_SUCCESS;
|
|
2077
|
+
tail = sq->sqe_tail;
|
|
2078
|
+
} else if ((tail + 1) - head >= sq->ring_entries) {
|
|
2079
|
+
return NULL;
|
|
2080
|
+
}
|
|
2081
|
+
|
|
2082
|
+
sqe = &sq->sqes[tail & sq->ring_mask];
|
|
2083
|
+
sq->sqe_tail = tail + 2;
|
|
2084
|
+
io_uring_initialize_sqe(sqe);
|
|
2085
|
+
return sqe;
|
|
2086
|
+
}
|
|
2087
|
+
|
|
2088
|
+
ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
|
|
2089
|
+
LIBURING_NOEXCEPT;
|
|
2090
|
+
ssize_t io_uring_mlock_size_params(unsigned entries, struct io_uring_params *p)
|
|
2091
|
+
LIBURING_NOEXCEPT;
|
|
2092
|
+
|
|
2093
|
+
ssize_t io_uring_memory_size(unsigned entries, unsigned flags)
|
|
2094
|
+
LIBURING_NOEXCEPT;
|
|
2095
|
+
ssize_t io_uring_memory_size_params(unsigned entries, struct io_uring_params *p)
|
|
2096
|
+
LIBURING_NOEXCEPT;
|
|
1764
2097
|
|
|
1765
2098
|
/*
|
|
1766
2099
|
* Versioning information for liburing.
|
|
@@ -1771,9 +2104,9 @@ ssize_t io_uring_memory_size_params(unsigned entries, struct io_uring_params *p)
|
|
|
1771
2104
|
* Use io_uring_check_version() for runtime checks of the version of
|
|
1772
2105
|
* liburing that was loaded by the dynamic linker.
|
|
1773
2106
|
*/
|
|
1774
|
-
int io_uring_major_version(void);
|
|
1775
|
-
int io_uring_minor_version(void);
|
|
1776
|
-
bool io_uring_check_version(int major, int minor);
|
|
2107
|
+
int io_uring_major_version(void) LIBURING_NOEXCEPT;
|
|
2108
|
+
int io_uring_minor_version(void) LIBURING_NOEXCEPT;
|
|
2109
|
+
bool io_uring_check_version(int major, int minor) LIBURING_NOEXCEPT;
|
|
1777
2110
|
|
|
1778
2111
|
#define IO_URING_CHECK_VERSION(major,minor) \
|
|
1779
2112
|
(major > IO_URING_VERSION_MAJOR || \
|
|
@@ -1788,4 +2121,8 @@ bool io_uring_check_version(int major, int minor);
|
|
|
1788
2121
|
#undef IOURINGINLINE
|
|
1789
2122
|
#endif
|
|
1790
2123
|
|
|
2124
|
+
#ifdef _LOCAL_INLINE
|
|
2125
|
+
#undef _LOCAL_INLINE
|
|
2126
|
+
#endif
|
|
2127
|
+
|
|
1791
2128
|
#endif
|