uringmachine 0.2 → 0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +15 -0
- data/README.md +85 -0
- data/TODO.md +5 -0
- data/examples/echo_server.rb +18 -40
- data/examples/inout.rb +19 -0
- data/examples/nc.rb +36 -0
- data/ext/um/extconf.rb +6 -15
- data/ext/um/um.c +340 -53
- data/ext/um/um.h +33 -11
- data/ext/um/um_class.c +101 -119
- data/ext/um/um_const.c +184 -0
- data/ext/um/um_op.c +39 -18
- data/ext/um/um_utils.c +48 -3
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +12 -0
- data/test/helper.rb +13 -12
- data/test/test_um.rb +301 -3
- data/vendor/liburing/.github/workflows/build.yml +29 -1
- data/vendor/liburing/.gitignore +1 -0
- data/vendor/liburing/CHANGELOG +15 -0
- data/vendor/liburing/CONTRIBUTING.md +165 -0
- data/vendor/liburing/configure +32 -0
- data/vendor/liburing/examples/Makefile +8 -1
- data/vendor/liburing/examples/kdigest.c +405 -0
- data/vendor/liburing/examples/proxy.c +75 -8
- data/vendor/liburing/liburing.pc.in +1 -1
- data/vendor/liburing/src/Makefile +16 -2
- data/vendor/liburing/src/include/liburing/io_uring.h +31 -0
- data/vendor/liburing/src/include/liburing/sanitize.h +39 -0
- data/vendor/liburing/src/include/liburing.h +31 -4
- data/vendor/liburing/src/liburing-ffi.map +5 -0
- data/vendor/liburing/src/liburing.map +1 -0
- data/vendor/liburing/src/queue.c +3 -0
- data/vendor/liburing/src/register.c +36 -0
- data/vendor/liburing/src/sanitize.c +176 -0
- data/vendor/liburing/src/setup.c +1 -1
- data/vendor/liburing/test/35fa71a030ca.c +7 -0
- data/vendor/liburing/test/500f9fbadef8.c +2 -0
- data/vendor/liburing/test/7ad0e4b2f83c.c +0 -25
- data/vendor/liburing/test/917257daa0fe.c +7 -0
- data/vendor/liburing/test/Makefile +31 -4
- data/vendor/liburing/test/a0908ae19763.c +7 -0
- data/vendor/liburing/test/a4c0b3decb33.c +7 -0
- data/vendor/liburing/test/accept.c +14 -4
- data/vendor/liburing/test/b19062a56726.c +7 -0
- data/vendor/liburing/test/bind-listen.c +2 -2
- data/vendor/liburing/test/buf-ring-nommap.c +10 -3
- data/vendor/liburing/test/buf-ring.c +2 -0
- data/vendor/liburing/test/coredump.c +7 -0
- data/vendor/liburing/test/cq-overflow.c +13 -1
- data/vendor/liburing/test/d4ae271dfaae.c +11 -3
- data/vendor/liburing/test/defer-taskrun.c +2 -2
- data/vendor/liburing/test/defer-tw-timeout.c +4 -1
- data/vendor/liburing/test/defer.c +2 -2
- data/vendor/liburing/test/double-poll-crash.c +1 -1
- data/vendor/liburing/test/eeed8b54e0df.c +2 -0
- data/vendor/liburing/test/eventfd.c +0 -1
- data/vendor/liburing/test/exit-no-cleanup.c +11 -0
- data/vendor/liburing/test/fadvise.c +9 -26
- data/vendor/liburing/test/fdinfo.c +9 -1
- data/vendor/liburing/test/file-register.c +14 -2
- data/vendor/liburing/test/file-update.c +1 -1
- data/vendor/liburing/test/file-verify.c +27 -16
- data/vendor/liburing/test/files-exit-hang-timeout.c +1 -2
- data/vendor/liburing/test/fixed-buf-iter.c +3 -1
- data/vendor/liburing/test/fixed-hugepage.c +12 -1
- data/vendor/liburing/test/fsnotify.c +1 -0
- data/vendor/liburing/test/futex.c +16 -4
- data/vendor/liburing/test/helpers.c +47 -0
- data/vendor/liburing/test/helpers.h +6 -0
- data/vendor/liburing/test/init-mem.c +5 -3
- data/vendor/liburing/test/io-cancel.c +0 -24
- data/vendor/liburing/test/io_uring_passthrough.c +2 -0
- data/vendor/liburing/test/io_uring_register.c +25 -6
- data/vendor/liburing/test/iopoll-leak.c +4 -0
- data/vendor/liburing/test/iopoll-overflow.c +1 -1
- data/vendor/liburing/test/iopoll.c +3 -3
- data/vendor/liburing/test/kallsyms.c +203 -0
- data/vendor/liburing/test/link-timeout.c +159 -0
- data/vendor/liburing/test/linked-defer-close.c +224 -0
- data/vendor/liburing/test/madvise.c +12 -25
- data/vendor/liburing/test/min-timeout-wait.c +0 -25
- data/vendor/liburing/test/min-timeout.c +0 -25
- data/vendor/liburing/test/mkdir.c +6 -0
- data/vendor/liburing/test/msg-ring.c +8 -2
- data/vendor/liburing/test/napi-test.c +15 -2
- data/vendor/liburing/test/no-mmap-inval.c +2 -0
- data/vendor/liburing/test/nop.c +44 -0
- data/vendor/liburing/test/ooo-file-unreg.c +1 -1
- data/vendor/liburing/test/open-close.c +40 -0
- data/vendor/liburing/test/openat2.c +37 -14
- data/vendor/liburing/test/poll-many.c +13 -7
- data/vendor/liburing/test/poll-mshot-update.c +17 -10
- data/vendor/liburing/test/poll-v-poll.c +6 -3
- data/vendor/liburing/test/pollfree.c +148 -0
- data/vendor/liburing/test/read-mshot-empty.c +156 -153
- data/vendor/liburing/test/read-mshot.c +276 -27
- data/vendor/liburing/test/read-write.c +78 -13
- data/vendor/liburing/test/recv-msgall-stream.c +3 -0
- data/vendor/liburing/test/recv-msgall.c +5 -0
- data/vendor/liburing/test/recvsend_bundle-inc.c +680 -0
- data/vendor/liburing/test/recvsend_bundle.c +92 -29
- data/vendor/liburing/test/reg-fd-only.c +14 -4
- data/vendor/liburing/test/regbuf-clone.c +187 -0
- data/vendor/liburing/test/regbuf-merge.c +7 -0
- data/vendor/liburing/test/register-restrictions.c +86 -85
- data/vendor/liburing/test/rename.c +59 -1
- data/vendor/liburing/test/ringbuf-read.c +5 -0
- data/vendor/liburing/test/ringbuf-status.c +5 -1
- data/vendor/liburing/test/runtests.sh +16 -1
- data/vendor/liburing/test/send-zerocopy.c +59 -0
- data/vendor/liburing/test/short-read.c +1 -0
- data/vendor/liburing/test/socket.c +43 -0
- data/vendor/liburing/test/splice.c +3 -1
- data/vendor/liburing/test/sq-poll-dup.c +1 -1
- data/vendor/liburing/test/sq-poll-share.c +2 -0
- data/vendor/liburing/test/sqpoll-disable-exit.c +8 -0
- data/vendor/liburing/test/sqpoll-exit-hang.c +1 -25
- data/vendor/liburing/test/sqpoll-sleep.c +1 -25
- data/vendor/liburing/test/statx.c +89 -0
- data/vendor/liburing/test/stdout.c +2 -0
- data/vendor/liburing/test/submit-and-wait.c +1 -25
- data/vendor/liburing/test/submit-reuse.c +4 -26
- data/vendor/liburing/test/symlink.c +12 -1
- data/vendor/liburing/test/sync-cancel.c +48 -21
- data/vendor/liburing/test/thread-exit.c +5 -0
- data/vendor/liburing/test/timeout-new.c +1 -26
- data/vendor/liburing/test/timeout.c +12 -26
- data/vendor/liburing/test/unlink.c +94 -1
- data/vendor/liburing/test/uring_cmd_ublk.c +1252 -0
- data/vendor/liburing/test/waitid.c +62 -8
- data/vendor/liburing/test/wq-aff.c +35 -0
- data/vendor/liburing/test/xfail_prep_link_timeout_out_of_scope.c +46 -0
- data/vendor/liburing/test/xfail_register_buffers_out_of_scope.c +51 -0
- metadata +17 -4
- data/examples/event_loop.rb +0 -69
- data/examples/fibers.rb +0 -105
|
@@ -440,11 +440,21 @@ struct io_uring_cqe {
|
|
|
440
440
|
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
|
|
441
441
|
* IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
|
|
442
442
|
* them from sends.
|
|
443
|
+
* IORING_CQE_F_BUF_MORE If set, the buffer ID set in the completion will get
|
|
444
|
+
* more completions. In other words, the buffer is being
|
|
445
|
+
* partially consumed, and will be used by the kernel for
|
|
446
|
+
* more completions. This is only set for buffers used via
|
|
447
|
+
* the incremental buffer consumption, as provided by
|
|
448
|
+
* a ring buffer setup with IOU_PBUF_RING_INC. For any
|
|
449
|
+
* other provided buffer type, all completions with a
|
|
450
|
+
* buffer passed back is automatically returned to the
|
|
451
|
+
* application.
|
|
443
452
|
*/
|
|
444
453
|
#define IORING_CQE_F_BUFFER (1U << 0)
|
|
445
454
|
#define IORING_CQE_F_MORE (1U << 1)
|
|
446
455
|
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
|
|
447
456
|
#define IORING_CQE_F_NOTIF (1U << 3)
|
|
457
|
+
#define IORING_CQE_F_BUF_MORE (1U << 4)
|
|
448
458
|
|
|
449
459
|
#define IORING_CQE_BUFFER_SHIFT 16
|
|
450
460
|
|
|
@@ -599,6 +609,9 @@ enum io_uring_register_op {
|
|
|
599
609
|
|
|
600
610
|
IORING_REGISTER_CLOCK = 29,
|
|
601
611
|
|
|
612
|
+
/* clone registered buffers from source ring to current ring */
|
|
613
|
+
IORING_REGISTER_CLONE_BUFFERS = 30,
|
|
614
|
+
|
|
602
615
|
/* this goes last */
|
|
603
616
|
IORING_REGISTER_LAST,
|
|
604
617
|
|
|
@@ -684,6 +697,16 @@ struct io_uring_clock_register {
|
|
|
684
697
|
__u32 __resv[3];
|
|
685
698
|
};
|
|
686
699
|
|
|
700
|
+
enum {
|
|
701
|
+
IORING_REGISTER_SRC_REGISTERED = 1,
|
|
702
|
+
};
|
|
703
|
+
|
|
704
|
+
struct io_uring_clone_buffers {
|
|
705
|
+
__u32 src_fd;
|
|
706
|
+
__u32 flags;
|
|
707
|
+
__u32 pad[6];
|
|
708
|
+
};
|
|
709
|
+
|
|
687
710
|
struct io_uring_buf {
|
|
688
711
|
__u64 addr;
|
|
689
712
|
__u32 len;
|
|
@@ -716,9 +739,17 @@ struct io_uring_buf_ring {
|
|
|
716
739
|
* mmap(2) with the offset set as:
|
|
717
740
|
* IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
|
|
718
741
|
* to get a virtual mapping for the ring.
|
|
742
|
+
* IOU_PBUF_RING_INC: If set, buffers consumed from this buffer ring can be
|
|
743
|
+
* consumed incrementally. Normally one (or more) buffers
|
|
744
|
+
* are fully consumed. With incremental consumptions, it's
|
|
745
|
+
* feasible to register big ranges of buffers, and each
|
|
746
|
+
* use of it will consume only as much as it needs. This
|
|
747
|
+
* requires that both the kernel and application keep
|
|
748
|
+
* track of where the current read/recv index is at.
|
|
719
749
|
*/
|
|
720
750
|
enum io_uring_register_pbuf_ring_flags {
|
|
721
751
|
IOU_PBUF_RING_MMAP = 1,
|
|
752
|
+
IOU_PBUF_RING_INC = 2,
|
|
722
753
|
};
|
|
723
754
|
|
|
724
755
|
/* argument for IORING_(UN)REGISTER_PBUF_RING */
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
|
2
|
+
#ifndef LIBURING_SANITIZE_H
|
|
3
|
+
#define LIBURING_SANITIZE_H
|
|
4
|
+
|
|
5
|
+
#ifdef __cplusplus
|
|
6
|
+
extern "C" {
|
|
7
|
+
#endif
|
|
8
|
+
|
|
9
|
+
struct io_uring;
|
|
10
|
+
struct iovec;
|
|
11
|
+
|
|
12
|
+
#if defined(CONFIG_USE_SANITIZER)
|
|
13
|
+
void liburing_sanitize_ring(struct io_uring *ring);
|
|
14
|
+
void liburing_sanitize_address(const void *addr);
|
|
15
|
+
void liburing_sanitize_region(const void *addr, unsigned int len);
|
|
16
|
+
void liburing_sanitize_iovecs(const struct iovec *iovecs, unsigned nr);
|
|
17
|
+
#else
|
|
18
|
+
#define __maybe_unused __attribute__((__unused__))
|
|
19
|
+
static inline void liburing_sanitize_ring(struct io_uring __maybe_unused *ring)
|
|
20
|
+
{
|
|
21
|
+
}
|
|
22
|
+
static inline void liburing_sanitize_address(const void __maybe_unused *addr)
|
|
23
|
+
{
|
|
24
|
+
}
|
|
25
|
+
static inline void liburing_sanitize_region(const void __maybe_unused *addr,
|
|
26
|
+
unsigned int __maybe_unused len)
|
|
27
|
+
{
|
|
28
|
+
}
|
|
29
|
+
static inline void liburing_sanitize_iovecs(const struct iovec __maybe_unused *iovecs,
|
|
30
|
+
unsigned __maybe_unused nr)
|
|
31
|
+
{
|
|
32
|
+
}
|
|
33
|
+
#endif
|
|
34
|
+
|
|
35
|
+
#ifdef __cplusplus
|
|
36
|
+
}
|
|
37
|
+
#endif
|
|
38
|
+
|
|
39
|
+
#endif
|
|
@@ -19,6 +19,7 @@
|
|
|
19
19
|
#include "liburing/io_uring_version.h"
|
|
20
20
|
#include "liburing/barrier.h"
|
|
21
21
|
|
|
22
|
+
|
|
22
23
|
#ifndef uring_unlikely
|
|
23
24
|
#define uring_unlikely(cond) __builtin_expect(!!(cond), 0)
|
|
24
25
|
#endif
|
|
@@ -196,6 +197,7 @@ int io_uring_submit_and_wait_min_timeout(struct io_uring *ring,
|
|
|
196
197
|
unsigned min_wait,
|
|
197
198
|
sigset_t *sigmask);
|
|
198
199
|
|
|
200
|
+
int io_uring_clone_buffers(struct io_uring *dst, struct io_uring *src);
|
|
199
201
|
int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
|
|
200
202
|
unsigned nr_iovecs);
|
|
201
203
|
int io_uring_register_buffers_tags(struct io_uring *ring,
|
|
@@ -232,8 +234,10 @@ int io_uring_register_restrictions(struct io_uring *ring,
|
|
|
232
234
|
unsigned int nr_res);
|
|
233
235
|
int io_uring_enable_rings(struct io_uring *ring);
|
|
234
236
|
int __io_uring_sqring_wait(struct io_uring *ring);
|
|
237
|
+
#ifdef _GNU_SOURCE
|
|
235
238
|
int io_uring_register_iowq_aff(struct io_uring *ring, size_t cpusz,
|
|
236
239
|
const cpu_set_t *mask);
|
|
240
|
+
#endif
|
|
237
241
|
int io_uring_unregister_iowq_aff(struct io_uring *ring);
|
|
238
242
|
int io_uring_register_iowq_max_workers(struct io_uring *ring,
|
|
239
243
|
unsigned int *values);
|
|
@@ -301,15 +305,22 @@ int __io_uring_get_cqe(struct io_uring *ring,
|
|
|
301
305
|
#define io_uring_cqe_index(ring,ptr,mask) \
|
|
302
306
|
(((ptr) & (mask)) << io_uring_cqe_shift(ring))
|
|
303
307
|
|
|
308
|
+
/*
|
|
309
|
+
* NOTE: we should just get rid of the 'head' being passed in here, it doesn't
|
|
310
|
+
* serve a purpose anymore. The below is a bit of a work-around to ensure that
|
|
311
|
+
* the compiler doesn't complain about 'head' being unused (or only written,
|
|
312
|
+
* never read), as we use a local iterator for both the head and tail tracking.
|
|
313
|
+
*/
|
|
304
314
|
#define io_uring_for_each_cqe(ring, head, cqe) \
|
|
305
315
|
/* \
|
|
306
316
|
* io_uring_smp_load_acquire() enforces the order of tail \
|
|
307
317
|
* and CQE reads. \
|
|
308
318
|
*/ \
|
|
309
|
-
for (head = *(ring)->cq.khead
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
319
|
+
for (__u32 __HEAD__ = (head) = *(ring)->cq.khead, \
|
|
320
|
+
__TAIL__ = io_uring_smp_load_acquire((ring)->cq.ktail); \
|
|
321
|
+
(cqe = ((head) != __TAIL__ ? \
|
|
322
|
+
&(ring)->cq.cqes[io_uring_cqe_index(ring, __HEAD__, (ring)->cq.ring_mask)] : NULL)); \
|
|
323
|
+
(head) = ++__HEAD__)
|
|
313
324
|
|
|
314
325
|
/*
|
|
315
326
|
* Must be called after io_uring_for_each_cqe()
|
|
@@ -734,6 +745,20 @@ IOURINGINLINE void io_uring_prep_openat_direct(struct io_uring_sqe *sqe,
|
|
|
734
745
|
__io_uring_set_target_fixed_file(sqe, file_index);
|
|
735
746
|
}
|
|
736
747
|
|
|
748
|
+
IOURINGINLINE void io_uring_prep_open(struct io_uring_sqe *sqe,
|
|
749
|
+
const char *path, int flags, mode_t mode)
|
|
750
|
+
{
|
|
751
|
+
io_uring_prep_openat(sqe, AT_FDCWD, path, flags, mode);
|
|
752
|
+
}
|
|
753
|
+
|
|
754
|
+
/* open directly into the fixed file table */
|
|
755
|
+
IOURINGINLINE void io_uring_prep_open_direct(struct io_uring_sqe *sqe,
|
|
756
|
+
const char *path, int flags, mode_t mode,
|
|
757
|
+
unsigned file_index)
|
|
758
|
+
{
|
|
759
|
+
io_uring_prep_openat_direct(sqe, AT_FDCWD, path, flags, mode, file_index);
|
|
760
|
+
}
|
|
761
|
+
|
|
737
762
|
IOURINGINLINE void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
|
|
738
763
|
{
|
|
739
764
|
io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
|
|
@@ -1259,11 +1284,13 @@ IOURINGINLINE void io_uring_prep_fixed_fd_install(struct io_uring_sqe *sqe,
|
|
|
1259
1284
|
sqe->install_fd_flags = flags;
|
|
1260
1285
|
}
|
|
1261
1286
|
|
|
1287
|
+
#ifdef _GNU_SOURCE
|
|
1262
1288
|
IOURINGINLINE void io_uring_prep_ftruncate(struct io_uring_sqe *sqe,
|
|
1263
1289
|
int fd, loff_t len)
|
|
1264
1290
|
{
|
|
1265
1291
|
io_uring_prep_rw(IORING_OP_FTRUNCATE, sqe, fd, 0, 0, len);
|
|
1266
1292
|
}
|
|
1293
|
+
#endif
|
|
1267
1294
|
|
|
1268
1295
|
/*
|
|
1269
1296
|
* Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
|
|
@@ -208,4 +208,9 @@ LIBURING_2.7 {
|
|
|
208
208
|
LIBURING_2.8 {
|
|
209
209
|
global:
|
|
210
210
|
io_uring_register_clock;
|
|
211
|
+
io_uring_submit_and_wait_min_timeout;
|
|
212
|
+
io_uring_wait_cqes_min_timeout;
|
|
213
|
+
io_uring_clone_buffers;
|
|
214
|
+
io_uring_prep_open;
|
|
215
|
+
io_uring_prep_open_direct;
|
|
211
216
|
} LIBURING_2.7;
|
data/vendor/liburing/src/queue.c
CHANGED
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
#include "syscall.h"
|
|
6
6
|
#include "liburing.h"
|
|
7
7
|
#include "int_flags.h"
|
|
8
|
+
#include "liburing/sanitize.h"
|
|
8
9
|
#include "liburing/compat.h"
|
|
9
10
|
#include "liburing/io_uring.h"
|
|
10
11
|
|
|
@@ -405,6 +406,8 @@ static int __io_uring_submit(struct io_uring *ring, unsigned submitted,
|
|
|
405
406
|
unsigned flags;
|
|
406
407
|
int ret;
|
|
407
408
|
|
|
409
|
+
liburing_sanitize_ring(ring);
|
|
410
|
+
|
|
408
411
|
flags = 0;
|
|
409
412
|
if (sq_ring_needs_enter(ring, submitted, &flags) || cq_needs_enter) {
|
|
410
413
|
if (cq_needs_enter)
|
|
@@ -7,12 +7,15 @@
|
|
|
7
7
|
#include "int_flags.h"
|
|
8
8
|
#include "liburing/compat.h"
|
|
9
9
|
#include "liburing/io_uring.h"
|
|
10
|
+
#include "liburing/sanitize.h"
|
|
10
11
|
|
|
11
12
|
static inline int do_register(struct io_uring *ring, unsigned int opcode,
|
|
12
13
|
const void *arg, unsigned int nr_args)
|
|
13
14
|
{
|
|
14
15
|
int fd;
|
|
15
16
|
|
|
17
|
+
liburing_sanitize_address(arg);
|
|
18
|
+
|
|
16
19
|
if (ring->int_flags & INT_FLAG_REG_REG_RING) {
|
|
17
20
|
opcode |= IORING_REGISTER_USE_REGISTERED_RING;
|
|
18
21
|
fd = ring->enter_ring_fd;
|
|
@@ -28,6 +31,8 @@ int io_uring_register_buffers_update_tag(struct io_uring *ring, unsigned off,
|
|
|
28
31
|
const __u64 *tags,
|
|
29
32
|
unsigned nr)
|
|
30
33
|
{
|
|
34
|
+
liburing_sanitize_iovecs(iovecs, nr);
|
|
35
|
+
|
|
31
36
|
struct io_uring_rsrc_update2 up = {
|
|
32
37
|
.offset = off,
|
|
33
38
|
.data = (unsigned long)iovecs,
|
|
@@ -43,6 +48,8 @@ int io_uring_register_buffers_tags(struct io_uring *ring,
|
|
|
43
48
|
const __u64 *tags,
|
|
44
49
|
unsigned nr)
|
|
45
50
|
{
|
|
51
|
+
liburing_sanitize_iovecs(iovecs, nr);
|
|
52
|
+
|
|
46
53
|
struct io_uring_rsrc_register reg = {
|
|
47
54
|
.nr = nr,
|
|
48
55
|
.data = (unsigned long)iovecs,
|
|
@@ -65,6 +72,8 @@ int io_uring_register_buffers_sparse(struct io_uring *ring, unsigned nr)
|
|
|
65
72
|
int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
|
|
66
73
|
unsigned nr_iovecs)
|
|
67
74
|
{
|
|
75
|
+
liburing_sanitize_iovecs(iovecs, nr_iovecs);
|
|
76
|
+
|
|
68
77
|
return do_register(ring, IORING_REGISTER_BUFFERS, iovecs, nr_iovecs);
|
|
69
78
|
}
|
|
70
79
|
|
|
@@ -77,6 +86,9 @@ int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
|
|
|
77
86
|
const int *files, const __u64 *tags,
|
|
78
87
|
unsigned nr_files)
|
|
79
88
|
{
|
|
89
|
+
liburing_sanitize_address(files);
|
|
90
|
+
liburing_sanitize_address(tags);
|
|
91
|
+
|
|
80
92
|
struct io_uring_rsrc_update2 up = {
|
|
81
93
|
.offset = off,
|
|
82
94
|
.data = (unsigned long)files,
|
|
@@ -97,6 +109,8 @@ int io_uring_register_files_update_tag(struct io_uring *ring, unsigned off,
|
|
|
97
109
|
int io_uring_register_files_update(struct io_uring *ring, unsigned off,
|
|
98
110
|
const int *files, unsigned nr_files)
|
|
99
111
|
{
|
|
112
|
+
liburing_sanitize_address(files);
|
|
113
|
+
|
|
100
114
|
struct io_uring_files_update up = {
|
|
101
115
|
.offset = off,
|
|
102
116
|
.fds = (unsigned long) files,
|
|
@@ -148,6 +162,9 @@ int io_uring_register_files_sparse(struct io_uring *ring, unsigned nr)
|
|
|
148
162
|
int io_uring_register_files_tags(struct io_uring *ring, const int *files,
|
|
149
163
|
const __u64 *tags, unsigned nr)
|
|
150
164
|
{
|
|
165
|
+
liburing_sanitize_address(files);
|
|
166
|
+
liburing_sanitize_address(tags);
|
|
167
|
+
|
|
151
168
|
struct io_uring_rsrc_register reg = {
|
|
152
169
|
.nr = nr,
|
|
153
170
|
.data = (unsigned long)files,
|
|
@@ -175,6 +192,8 @@ int io_uring_register_files(struct io_uring *ring, const int *files,
|
|
|
175
192
|
{
|
|
176
193
|
int ret, did_increase = 0;
|
|
177
194
|
|
|
195
|
+
liburing_sanitize_address(files);
|
|
196
|
+
|
|
178
197
|
do {
|
|
179
198
|
ret = do_register(ring, IORING_REGISTER_FILES, files, nr_files);
|
|
180
199
|
if (ret >= 0)
|
|
@@ -316,6 +335,7 @@ int io_uring_register_buf_ring(struct io_uring *ring,
|
|
|
316
335
|
struct io_uring_buf_reg *reg,
|
|
317
336
|
unsigned int __maybe_unused flags)
|
|
318
337
|
{
|
|
338
|
+
reg->flags |= flags;
|
|
319
339
|
return do_register(ring, IORING_REGISTER_PBUF_RING, reg, 1);
|
|
320
340
|
}
|
|
321
341
|
|
|
@@ -328,6 +348,8 @@ int io_uring_unregister_buf_ring(struct io_uring *ring, int bgid)
|
|
|
328
348
|
|
|
329
349
|
int io_uring_buf_ring_head(struct io_uring *ring, int buf_group, uint16_t *head)
|
|
330
350
|
{
|
|
351
|
+
liburing_sanitize_address(head);
|
|
352
|
+
|
|
331
353
|
struct io_uring_buf_status buf_status = {
|
|
332
354
|
.buf_group = buf_group,
|
|
333
355
|
};
|
|
@@ -372,3 +394,17 @@ int io_uring_register_clock(struct io_uring *ring,
|
|
|
372
394
|
{
|
|
373
395
|
return do_register(ring, IORING_REGISTER_CLOCK, arg, 0);
|
|
374
396
|
}
|
|
397
|
+
|
|
398
|
+
int io_uring_clone_buffers(struct io_uring *dst, struct io_uring *src)
|
|
399
|
+
{
|
|
400
|
+
struct io_uring_clone_buffers buf = { .src_fd = src->ring_fd, };
|
|
401
|
+
|
|
402
|
+
if (src->int_flags & INT_FLAG_REG_REG_RING) {
|
|
403
|
+
buf.src_fd = src->enter_ring_fd;
|
|
404
|
+
buf.flags = IORING_REGISTER_SRC_REGISTERED;
|
|
405
|
+
} else {
|
|
406
|
+
buf.src_fd = src->ring_fd;
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
return do_register(dst, IORING_REGISTER_CLONE_BUFFERS, &buf, 1);
|
|
410
|
+
}
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
|
2
|
+
|
|
3
|
+
#include "liburing/sanitize.h"
|
|
4
|
+
|
|
5
|
+
#include <sanitizer/asan_interface.h>
|
|
6
|
+
#include <stdlib.h>
|
|
7
|
+
#include "liburing.h"
|
|
8
|
+
|
|
9
|
+
static inline void sanitize_sqe_addr(struct io_uring_sqe *sqe)
|
|
10
|
+
{
|
|
11
|
+
if (__asan_address_is_poisoned((void *) (unsigned long) sqe->addr) != 0) {
|
|
12
|
+
__asan_describe_address((void *) (unsigned long) sqe->addr);
|
|
13
|
+
exit(1);
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
static inline void sanitize_sqe_optval(struct io_uring_sqe *sqe)
|
|
17
|
+
{
|
|
18
|
+
if (__asan_region_is_poisoned((void *) (unsigned long) sqe->optval, sqe->optlen) != 0) {
|
|
19
|
+
__asan_describe_address((void *) (unsigned long) sqe->optval);
|
|
20
|
+
exit(1);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
static inline void sanitize_sqe_addr2(struct io_uring_sqe *sqe)
|
|
24
|
+
{
|
|
25
|
+
if (__asan_address_is_poisoned((void *) (unsigned long) sqe->addr2) != 0) {
|
|
26
|
+
__asan_describe_address((void *) (unsigned long) sqe->addr2);
|
|
27
|
+
exit(1);
|
|
28
|
+
}
|
|
29
|
+
}
|
|
30
|
+
static inline void sanitize_sqe_addr3(struct io_uring_sqe *sqe)
|
|
31
|
+
{
|
|
32
|
+
if (__asan_address_is_poisoned((void *) (unsigned long) sqe->addr3) != 0) {
|
|
33
|
+
__asan_describe_address((void *) (unsigned long) sqe->addr3);
|
|
34
|
+
exit(1);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
static inline void sanitize_sqe_addr_and_add2(struct io_uring_sqe *sqe)
|
|
38
|
+
{
|
|
39
|
+
sanitize_sqe_addr(sqe);
|
|
40
|
+
sanitize_sqe_addr2(sqe);
|
|
41
|
+
}
|
|
42
|
+
static inline void sanitize_sqe_addr_and_add3(struct io_uring_sqe *sqe)
|
|
43
|
+
{
|
|
44
|
+
sanitize_sqe_addr(sqe);
|
|
45
|
+
sanitize_sqe_addr3(sqe);
|
|
46
|
+
}
|
|
47
|
+
static inline void sanitize_sqe_nop(struct io_uring_sqe *sqe)
|
|
48
|
+
{
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
typedef void (*sanitize_sqe_handler)(struct io_uring_sqe *sqe);
|
|
52
|
+
sanitize_sqe_handler sanitize_handlers[IORING_OP_LAST];
|
|
53
|
+
bool sanitize_handlers_initialized = false;
|
|
54
|
+
|
|
55
|
+
static inline void initialize_sanitize_handlers()
|
|
56
|
+
{
|
|
57
|
+
if (sanitize_handlers_initialized)
|
|
58
|
+
return;
|
|
59
|
+
|
|
60
|
+
sanitize_handlers[IORING_OP_NOP] = sanitize_sqe_nop;
|
|
61
|
+
sanitize_handlers[IORING_OP_READV] = sanitize_sqe_addr;
|
|
62
|
+
sanitize_handlers[IORING_OP_WRITEV] = sanitize_sqe_addr;
|
|
63
|
+
sanitize_handlers[IORING_OP_FSYNC] = sanitize_sqe_addr;
|
|
64
|
+
sanitize_handlers[IORING_OP_READ_FIXED] = sanitize_sqe_addr;
|
|
65
|
+
sanitize_handlers[IORING_OP_WRITE_FIXED] = sanitize_sqe_addr;
|
|
66
|
+
sanitize_handlers[IORING_OP_POLL_ADD] = sanitize_sqe_addr;
|
|
67
|
+
sanitize_handlers[IORING_OP_POLL_REMOVE] = sanitize_sqe_nop;
|
|
68
|
+
sanitize_handlers[IORING_OP_SYNC_FILE_RANGE] = sanitize_sqe_addr;
|
|
69
|
+
sanitize_handlers[IORING_OP_SENDMSG] = sanitize_sqe_addr;
|
|
70
|
+
sanitize_handlers[IORING_OP_RECVMSG] = sanitize_sqe_addr;
|
|
71
|
+
sanitize_handlers[IORING_OP_TIMEOUT] = sanitize_sqe_addr;
|
|
72
|
+
sanitize_handlers[IORING_OP_TIMEOUT_REMOVE] = sanitize_sqe_nop;
|
|
73
|
+
sanitize_handlers[IORING_OP_ACCEPT] = sanitize_sqe_addr;
|
|
74
|
+
sanitize_handlers[IORING_OP_ASYNC_CANCEL] = sanitize_sqe_nop;
|
|
75
|
+
sanitize_handlers[IORING_OP_LINK_TIMEOUT] = sanitize_sqe_addr;
|
|
76
|
+
sanitize_handlers[IORING_OP_CONNECT] = sanitize_sqe_addr;
|
|
77
|
+
sanitize_handlers[IORING_OP_FALLOCATE] = sanitize_sqe_nop;
|
|
78
|
+
sanitize_handlers[IORING_OP_OPENAT] = sanitize_sqe_addr;
|
|
79
|
+
sanitize_handlers[IORING_OP_CLOSE] = sanitize_sqe_addr;
|
|
80
|
+
sanitize_handlers[IORING_OP_FILES_UPDATE] = sanitize_sqe_addr;
|
|
81
|
+
sanitize_handlers[IORING_OP_STATX] = sanitize_sqe_addr;
|
|
82
|
+
sanitize_handlers[IORING_OP_READ] = sanitize_sqe_addr;
|
|
83
|
+
sanitize_handlers[IORING_OP_WRITE] = sanitize_sqe_addr;
|
|
84
|
+
sanitize_handlers[IORING_OP_FADVISE] = sanitize_sqe_nop;
|
|
85
|
+
sanitize_handlers[IORING_OP_MADVISE] = sanitize_sqe_addr;
|
|
86
|
+
sanitize_handlers[IORING_OP_SEND] = sanitize_sqe_addr_and_add2;
|
|
87
|
+
sanitize_handlers[IORING_OP_RECV] = sanitize_sqe_addr;
|
|
88
|
+
sanitize_handlers[IORING_OP_OPENAT2] = sanitize_sqe_addr;
|
|
89
|
+
sanitize_handlers[IORING_OP_EPOLL_CTL] = sanitize_sqe_addr;
|
|
90
|
+
sanitize_handlers[IORING_OP_SPLICE] = sanitize_sqe_nop;
|
|
91
|
+
sanitize_handlers[IORING_OP_PROVIDE_BUFFERS] = sanitize_sqe_addr;
|
|
92
|
+
sanitize_handlers[IORING_OP_REMOVE_BUFFERS] = sanitize_sqe_addr;
|
|
93
|
+
sanitize_handlers[IORING_OP_TEE] = sanitize_sqe_nop;
|
|
94
|
+
sanitize_handlers[IORING_OP_SHUTDOWN] = sanitize_sqe_addr;
|
|
95
|
+
sanitize_handlers[IORING_OP_RENAMEAT] = sanitize_sqe_addr;
|
|
96
|
+
sanitize_handlers[IORING_OP_UNLINKAT] = sanitize_sqe_addr;
|
|
97
|
+
sanitize_handlers[IORING_OP_MKDIRAT] = sanitize_sqe_addr;
|
|
98
|
+
sanitize_handlers[IORING_OP_SYMLINKAT] = sanitize_sqe_addr;
|
|
99
|
+
sanitize_handlers[IORING_OP_LINKAT] = sanitize_sqe_addr;
|
|
100
|
+
sanitize_handlers[IORING_OP_MSG_RING] = sanitize_sqe_addr_and_add3;
|
|
101
|
+
sanitize_handlers[IORING_OP_FSETXATTR] = sanitize_sqe_addr;
|
|
102
|
+
sanitize_handlers[IORING_OP_SETXATTR] = sanitize_sqe_addr_and_add3;
|
|
103
|
+
sanitize_handlers[IORING_OP_FGETXATTR] = sanitize_sqe_addr;
|
|
104
|
+
sanitize_handlers[IORING_OP_GETXATTR] = sanitize_sqe_addr_and_add3;
|
|
105
|
+
sanitize_handlers[IORING_OP_SOCKET] = sanitize_sqe_addr;
|
|
106
|
+
sanitize_handlers[IORING_OP_URING_CMD] = sanitize_sqe_optval;
|
|
107
|
+
sanitize_handlers[IORING_OP_SEND_ZC] = sanitize_sqe_addr;
|
|
108
|
+
sanitize_handlers[IORING_OP_SENDMSG_ZC] = sanitize_sqe_addr;
|
|
109
|
+
sanitize_handlers[IORING_OP_READ_MULTISHOT] = sanitize_sqe_addr;
|
|
110
|
+
sanitize_handlers[IORING_OP_WAITID] = sanitize_sqe_addr_and_add2;
|
|
111
|
+
sanitize_handlers[IORING_OP_FUTEX_WAIT] = sanitize_sqe_addr;
|
|
112
|
+
sanitize_handlers[IORING_OP_FUTEX_WAKE] = sanitize_sqe_addr;
|
|
113
|
+
sanitize_handlers[IORING_OP_FUTEX_WAITV] = sanitize_sqe_addr;
|
|
114
|
+
sanitize_handlers[IORING_OP_FIXED_FD_INSTALL] = sanitize_sqe_addr;
|
|
115
|
+
sanitize_handlers[IORING_OP_FTRUNCATE] = sanitize_sqe_addr;
|
|
116
|
+
sanitize_handlers[IORING_OP_BIND] = sanitize_sqe_addr;
|
|
117
|
+
sanitize_handlers[IORING_OP_LISTEN] = sanitize_sqe_addr;
|
|
118
|
+
sanitize_handlers_initialized = true;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
void liburing_sanitize_ring(struct io_uring *ring)
|
|
122
|
+
{
|
|
123
|
+
struct io_uring_sq *sq = &ring->sq;
|
|
124
|
+
struct io_uring_sqe *sqe;
|
|
125
|
+
unsigned int head;
|
|
126
|
+
int shift = 0;
|
|
127
|
+
|
|
128
|
+
initialize_sanitize_handlers();
|
|
129
|
+
|
|
130
|
+
if (ring->flags & IORING_SETUP_SQE128)
|
|
131
|
+
shift = 1;
|
|
132
|
+
if (!(ring->flags & IORING_SETUP_SQPOLL))
|
|
133
|
+
head = *sq->khead;
|
|
134
|
+
else
|
|
135
|
+
head = io_uring_smp_load_acquire(sq->khead);
|
|
136
|
+
|
|
137
|
+
while (head != sq->sqe_tail) {
|
|
138
|
+
sqe = &sq->sqes[(head & sq->ring_mask) << shift];
|
|
139
|
+
if (sqe->opcode < IORING_OP_LAST)
|
|
140
|
+
sanitize_handlers[sqe->opcode](sqe);
|
|
141
|
+
head++;
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
void liburing_sanitize_address(const void *addr)
|
|
146
|
+
{
|
|
147
|
+
if (__asan_address_is_poisoned(addr) != 0) {
|
|
148
|
+
__asan_describe_address((void *)addr);
|
|
149
|
+
exit(1);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
void liburing_sanitize_region(const void *addr, unsigned int len)
|
|
154
|
+
{
|
|
155
|
+
if (__asan_region_is_poisoned((void *)addr, len) != 0) {
|
|
156
|
+
__asan_describe_address((void *)addr);
|
|
157
|
+
exit(1);
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
void liburing_sanitize_iovecs(const struct iovec *iovecs, unsigned nr)
|
|
162
|
+
{
|
|
163
|
+
unsigned i;
|
|
164
|
+
|
|
165
|
+
if (__asan_address_is_poisoned((void *)iovecs) != 0) {
|
|
166
|
+
__asan_describe_address((void *)iovecs);
|
|
167
|
+
exit(1);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
for (i = 0; i < nr; i++) {
|
|
171
|
+
if (__asan_region_is_poisoned((void *)iovecs[i].iov_base, iovecs[i].iov_len) != 0) {
|
|
172
|
+
__asan_describe_address((void *)iovecs[i].iov_base);
|
|
173
|
+
exit(1);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
}
|
data/vendor/liburing/src/setup.c
CHANGED
|
@@ -433,7 +433,7 @@ __cold void io_uring_queue_exit(struct io_uring *ring)
|
|
|
433
433
|
struct io_uring_cq *cq = &ring->cq;
|
|
434
434
|
size_t sqe_size;
|
|
435
435
|
|
|
436
|
-
if (!sq->ring_sz) {
|
|
436
|
+
if (!sq->ring_sz && !(ring->int_flags & INT_FLAG_APP_MEM)) {
|
|
437
437
|
sqe_size = sizeof(struct io_uring_sqe);
|
|
438
438
|
if (ring->flags & IORING_SETUP_SQE128)
|
|
439
439
|
sqe_size += 64;
|
|
@@ -28,6 +28,7 @@
|
|
|
28
28
|
#include "helpers.h"
|
|
29
29
|
#include "../src/syscall.h"
|
|
30
30
|
|
|
31
|
+
#ifndef CONFIG_USE_SANITIZER
|
|
31
32
|
#if !defined(SYS_futex) && defined(SYS_futex_time64)
|
|
32
33
|
# define SYS_futex SYS_futex_time64
|
|
33
34
|
#endif
|
|
@@ -327,3 +328,9 @@ int main(int argc, char *argv[])
|
|
|
327
328
|
loop();
|
|
328
329
|
return T_EXIT_PASS;
|
|
329
330
|
}
|
|
331
|
+
#else
|
|
332
|
+
int main(int argc, char *argv[])
|
|
333
|
+
{
|
|
334
|
+
return T_EXIT_SKIP;
|
|
335
|
+
}
|
|
336
|
+
#endif
|
|
@@ -78,10 +78,12 @@ int main(int argc, char *argv[])
|
|
|
78
78
|
|
|
79
79
|
close(fd);
|
|
80
80
|
unlink(buf);
|
|
81
|
+
free(iov.iov_base);
|
|
81
82
|
return T_EXIT_PASS;
|
|
82
83
|
err:
|
|
83
84
|
close(fd);
|
|
84
85
|
unlink(buf);
|
|
86
|
+
free(iov.iov_base);
|
|
85
87
|
return T_EXIT_FAIL;
|
|
86
88
|
skipped:
|
|
87
89
|
fprintf(stderr, "Polling not supported in current dir, test skipped\n");
|
|
@@ -5,31 +5,6 @@
|
|
|
5
5
|
#include "liburing.h"
|
|
6
6
|
#include "helpers.h"
|
|
7
7
|
|
|
8
|
-
static unsigned long long mtime_since(const struct timeval *s,
|
|
9
|
-
const struct timeval *e)
|
|
10
|
-
{
|
|
11
|
-
long long sec, usec;
|
|
12
|
-
|
|
13
|
-
sec = e->tv_sec - s->tv_sec;
|
|
14
|
-
usec = (e->tv_usec - s->tv_usec);
|
|
15
|
-
if (sec > 0 && usec < 0) {
|
|
16
|
-
sec--;
|
|
17
|
-
usec += 1000000;
|
|
18
|
-
}
|
|
19
|
-
|
|
20
|
-
sec *= 1000;
|
|
21
|
-
usec /= 1000;
|
|
22
|
-
return sec + usec;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
static unsigned long long mtime_since_now(struct timeval *tv)
|
|
26
|
-
{
|
|
27
|
-
struct timeval end;
|
|
28
|
-
|
|
29
|
-
gettimeofday(&end, NULL);
|
|
30
|
-
return mtime_since(tv, &end);
|
|
31
|
-
}
|
|
32
|
-
|
|
33
8
|
int main(int argc, char *argv[])
|
|
34
9
|
{
|
|
35
10
|
struct __kernel_timespec ts1, ts2;
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
#include "helpers.h"
|
|
15
15
|
#include "../src/syscall.h"
|
|
16
16
|
|
|
17
|
+
#ifndef CONFIG_USE_SANITIZER
|
|
17
18
|
int main(int argc, char *argv[])
|
|
18
19
|
{
|
|
19
20
|
if (argc > 1)
|
|
@@ -52,3 +53,9 @@ int main(int argc, char *argv[])
|
|
|
52
53
|
__sys_io_uring_setup(0x7a6, (struct io_uring_params *) 0x20000000UL);
|
|
53
54
|
return T_EXIT_PASS;
|
|
54
55
|
}
|
|
56
|
+
#else
|
|
57
|
+
int main(int argc, char *argv[])
|
|
58
|
+
{
|
|
59
|
+
return T_EXIT_SKIP;
|
|
60
|
+
}
|
|
61
|
+
#endif
|