uringmachine 0.4 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -1
- data/CHANGELOG.md +16 -0
- data/README.md +44 -1
- data/TODO.md +12 -3
- data/examples/bm_snooze.rb +89 -0
- data/examples/bm_sqlite.rb +89 -0
- data/examples/bm_write.rb +56 -0
- data/examples/dns_client.rb +12 -0
- data/examples/http_server.rb +42 -43
- data/examples/pg.rb +85 -0
- data/examples/server_client.rb +64 -0
- data/examples/snooze.rb +44 -0
- data/examples/stream.rb +85 -0
- data/examples/write_dev_null.rb +16 -0
- data/ext/um/extconf.rb +81 -14
- data/ext/um/um.c +468 -414
- data/ext/um/um.h +149 -40
- data/ext/um/um_async_op.c +40 -0
- data/ext/um/um_async_op_class.c +136 -0
- data/ext/um/um_buffer.c +49 -0
- data/ext/um/um_class.c +176 -44
- data/ext/um/um_const.c +174 -9
- data/ext/um/um_ext.c +8 -0
- data/ext/um/um_mutex_class.c +47 -0
- data/ext/um/um_op.c +89 -111
- data/ext/um/um_queue_class.c +58 -0
- data/ext/um/um_ssl.c +850 -0
- data/ext/um/um_ssl.h +22 -0
- data/ext/um/um_ssl_class.c +138 -0
- data/ext/um/um_sync.c +273 -0
- data/ext/um/um_utils.c +1 -1
- data/lib/uringmachine/dns_resolver.rb +84 -0
- data/lib/uringmachine/ssl/context_builder.rb +96 -0
- data/lib/uringmachine/ssl.rb +394 -0
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +27 -3
- data/supressions/ruby.supp +71 -0
- data/test/helper.rb +6 -0
- data/test/test_async_op.rb +119 -0
- data/test/test_ssl.rb +155 -0
- data/test/test_um.rb +464 -47
- data/uringmachine.gemspec +3 -2
- data/vendor/liburing/.gitignore +5 -0
- data/vendor/liburing/CHANGELOG +1 -0
- data/vendor/liburing/configure +32 -0
- data/vendor/liburing/examples/Makefile +1 -0
- data/vendor/liburing/examples/reg-wait.c +159 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
- data/vendor/liburing/src/include/liburing.h +28 -2
- data/vendor/liburing/src/int_flags.h +10 -3
- data/vendor/liburing/src/liburing-ffi.map +13 -2
- data/vendor/liburing/src/liburing.map +9 -0
- data/vendor/liburing/src/queue.c +25 -16
- data/vendor/liburing/src/register.c +73 -4
- data/vendor/liburing/src/setup.c +46 -18
- data/vendor/liburing/src/setup.h +6 -0
- data/vendor/liburing/test/Makefile +7 -0
- data/vendor/liburing/test/cmd-discard.c +427 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
- data/vendor/liburing/test/file-exit-unreg.c +48 -0
- data/vendor/liburing/test/io_uring_passthrough.c +2 -0
- data/vendor/liburing/test/io_uring_register.c +13 -2
- data/vendor/liburing/test/napi-test.c +1 -1
- data/vendor/liburing/test/no-mmap-inval.c +1 -1
- data/vendor/liburing/test/read-mshot-empty.c +2 -0
- data/vendor/liburing/test/read-mshot-stdin.c +121 -0
- data/vendor/liburing/test/read-mshot.c +6 -0
- data/vendor/liburing/test/recvsend_bundle.c +2 -2
- data/vendor/liburing/test/reg-fd-only.c +1 -1
- data/vendor/liburing/test/reg-wait.c +251 -0
- data/vendor/liburing/test/regbuf-clone.c +458 -0
- data/vendor/liburing/test/resize-rings.c +643 -0
- data/vendor/liburing/test/rsrc_tags.c +1 -1
- data/vendor/liburing/test/sqpoll-sleep.c +39 -8
- data/vendor/liburing/test/sqwait.c +136 -0
- data/vendor/liburing/test/sync-cancel.c +8 -1
- data/vendor/liburing/test/timeout.c +13 -8
- metadata +52 -8
- data/examples/http_server_multishot.rb +0 -57
- data/examples/http_server_simpler.rb +0 -34
data/vendor/liburing/src/setup.c
CHANGED
@@ -6,7 +6,6 @@
|
|
6
6
|
#include "liburing.h"
|
7
7
|
#include "int_flags.h"
|
8
8
|
#include "setup.h"
|
9
|
-
#include "liburing/compat.h"
|
10
9
|
#include "liburing/io_uring.h"
|
11
10
|
|
12
11
|
#define KERN_MAX_ENTRIES 32768
|
@@ -59,7 +58,7 @@ static int get_sq_cq_entries(unsigned entries, struct io_uring_params *p,
|
|
59
58
|
return 0;
|
60
59
|
}
|
61
60
|
|
62
|
-
|
61
|
+
void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
|
63
62
|
{
|
64
63
|
if (sq->ring_sz)
|
65
64
|
__sys_munmap(sq->ring_ptr, sq->ring_sz);
|
@@ -67,9 +66,9 @@ static void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
|
|
67
66
|
__sys_munmap(cq->ring_ptr, cq->ring_sz);
|
68
67
|
}
|
69
68
|
|
70
|
-
|
71
|
-
|
72
|
-
|
69
|
+
void io_uring_setup_ring_pointers(struct io_uring_params *p,
|
70
|
+
struct io_uring_sq *sq,
|
71
|
+
struct io_uring_cq *cq)
|
73
72
|
{
|
74
73
|
sq->khead = sq->ring_ptr + p->sq_off.head;
|
75
74
|
sq->ktail = sq->ring_ptr + p->sq_off.tail;
|
@@ -95,8 +94,8 @@ static void io_uring_setup_ring_pointers(struct io_uring_params *p,
|
|
95
94
|
cq->ring_entries = *cq->kring_entries;
|
96
95
|
}
|
97
96
|
|
98
|
-
|
99
|
-
|
97
|
+
int io_uring_mmap(int fd, struct io_uring_params *p, struct io_uring_sq *sq,
|
98
|
+
struct io_uring_cq *cq)
|
100
99
|
{
|
101
100
|
size_t size;
|
102
101
|
int ret;
|
@@ -222,9 +221,9 @@ static int io_uring_alloc_huge(unsigned entries, struct io_uring_params *p,
|
|
222
221
|
ring_mem = KRING_SIZE;
|
223
222
|
|
224
223
|
sqes_mem = sq_entries * sizeof(struct io_uring_sqe);
|
225
|
-
sqes_mem = (sqes_mem + page_size - 1) & ~(page_size - 1);
|
226
224
|
if (!(p->flags & IORING_SETUP_NO_SQARRAY))
|
227
225
|
sqes_mem += sq_entries * sizeof(unsigned);
|
226
|
+
sqes_mem = (sqes_mem + page_size - 1) & ~(page_size - 1);
|
228
227
|
|
229
228
|
cqes_mem = cq_entries * sizeof(struct io_uring_cqe);
|
230
229
|
if (p->flags & IORING_SETUP_CQE32)
|
@@ -595,7 +594,7 @@ __cold ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
|
|
595
594
|
#if defined(__hppa__)
|
596
595
|
static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
597
596
|
unsigned int nentries, int bgid,
|
598
|
-
unsigned int flags, int *
|
597
|
+
unsigned int flags, int *err)
|
599
598
|
{
|
600
599
|
struct io_uring_buf_ring *br;
|
601
600
|
struct io_uring_buf_reg reg;
|
@@ -608,10 +607,10 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
608
607
|
reg.bgid = bgid;
|
609
608
|
reg.flags = IOU_PBUF_RING_MMAP;
|
610
609
|
|
611
|
-
*
|
610
|
+
*err = 0;
|
612
611
|
lret = io_uring_register_buf_ring(ring, ®, flags);
|
613
612
|
if (lret) {
|
614
|
-
*
|
613
|
+
*err = lret;
|
615
614
|
return NULL;
|
616
615
|
}
|
617
616
|
|
@@ -620,7 +619,7 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
620
619
|
br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
|
621
620
|
MAP_SHARED | MAP_POPULATE, ring->ring_fd, off);
|
622
621
|
if (IS_ERR(br)) {
|
623
|
-
*
|
622
|
+
*err = PTR_ERR(br);
|
624
623
|
return NULL;
|
625
624
|
}
|
626
625
|
|
@@ -629,7 +628,7 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
629
628
|
#else
|
630
629
|
static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
631
630
|
unsigned int nentries, int bgid,
|
632
|
-
unsigned int flags, int *
|
631
|
+
unsigned int flags, int *err)
|
633
632
|
{
|
634
633
|
struct io_uring_buf_ring *br;
|
635
634
|
struct io_uring_buf_reg reg;
|
@@ -641,7 +640,7 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
641
640
|
br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
|
642
641
|
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
643
642
|
if (IS_ERR(br)) {
|
644
|
-
*
|
643
|
+
*err = PTR_ERR(br);
|
645
644
|
return NULL;
|
646
645
|
}
|
647
646
|
|
@@ -649,11 +648,11 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
649
648
|
reg.ring_entries = nentries;
|
650
649
|
reg.bgid = bgid;
|
651
650
|
|
652
|
-
*
|
651
|
+
*err = 0;
|
653
652
|
lret = io_uring_register_buf_ring(ring, ®, flags);
|
654
653
|
if (lret) {
|
655
654
|
__sys_munmap(br, ring_size);
|
656
|
-
*
|
655
|
+
*err = lret;
|
657
656
|
br = NULL;
|
658
657
|
}
|
659
658
|
|
@@ -664,11 +663,11 @@ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
|
|
664
663
|
struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
|
665
664
|
unsigned int nentries,
|
666
665
|
int bgid, unsigned int flags,
|
667
|
-
int *
|
666
|
+
int *err)
|
668
667
|
{
|
669
668
|
struct io_uring_buf_ring *br;
|
670
669
|
|
671
|
-
br = br_setup(ring, nentries, bgid, flags,
|
670
|
+
br = br_setup(ring, nentries, bgid, flags, err);
|
672
671
|
if (br)
|
673
672
|
io_uring_buf_ring_init(br);
|
674
673
|
|
@@ -687,3 +686,32 @@ int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
|
|
687
686
|
__sys_munmap(br, nentries * sizeof(struct io_uring_buf));
|
688
687
|
return 0;
|
689
688
|
}
|
689
|
+
|
690
|
+
void io_uring_free_reg_wait(struct io_uring_reg_wait *reg, unsigned nentries)
|
691
|
+
{
|
692
|
+
__sys_munmap(reg, nentries * sizeof(struct io_uring_reg_wait));
|
693
|
+
}
|
694
|
+
|
695
|
+
struct io_uring_reg_wait *io_uring_setup_reg_wait(struct io_uring *ring,
|
696
|
+
unsigned nentries, int *err)
|
697
|
+
{
|
698
|
+
struct io_uring_reg_wait *reg;
|
699
|
+
size_t size = nentries * sizeof(*reg);
|
700
|
+
int ret;
|
701
|
+
|
702
|
+
reg = __sys_mmap(NULL, size, PROT_READ | PROT_WRITE,
|
703
|
+
MAP_SHARED|MAP_POPULATE|MAP_ANONYMOUS, -1, 0);
|
704
|
+
if (IS_ERR(reg)) {
|
705
|
+
*err = PTR_ERR(reg);
|
706
|
+
return NULL;
|
707
|
+
}
|
708
|
+
|
709
|
+
memset(reg, 0, size);
|
710
|
+
ret = io_uring_register_wait_reg(ring, reg, nentries);
|
711
|
+
if (!ret)
|
712
|
+
return reg;
|
713
|
+
|
714
|
+
__sys_munmap(reg, size);
|
715
|
+
*err = ret;
|
716
|
+
return NULL;
|
717
|
+
}
|
data/vendor/liburing/src/setup.h
CHANGED
@@ -5,5 +5,11 @@
|
|
5
5
|
int __io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
|
6
6
|
struct io_uring_params *p, void *buf,
|
7
7
|
size_t buf_size);
|
8
|
+
void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq);
|
9
|
+
int io_uring_mmap(int fd, struct io_uring_params *p, struct io_uring_sq *sq,
|
10
|
+
struct io_uring_cq *cq);
|
11
|
+
void io_uring_setup_ring_pointers(struct io_uring_params *p,
|
12
|
+
struct io_uring_sq *sq,
|
13
|
+
struct io_uring_cq *cq);
|
8
14
|
|
9
15
|
#endif
|
@@ -65,6 +65,7 @@ test_srcs := \
|
|
65
65
|
connect.c \
|
66
66
|
connect-rep.c \
|
67
67
|
coredump.c \
|
68
|
+
cmd-discard.c \
|
68
69
|
cq-full.c \
|
69
70
|
cq-overflow.c \
|
70
71
|
cq-peek-batch.c \
|
@@ -93,6 +94,8 @@ test_srcs := \
|
|
93
94
|
fd-install.c \
|
94
95
|
fd-pass.c \
|
95
96
|
fdinfo.c \
|
97
|
+
fifo-nonblock-read.c \
|
98
|
+
file-exit-unreg.c \
|
96
99
|
file-register.c \
|
97
100
|
files-exit-hang-poll.c \
|
98
101
|
files-exit-hang-timeout.c \
|
@@ -164,6 +167,7 @@ test_srcs := \
|
|
164
167
|
read-before-exit.c \
|
165
168
|
read-mshot.c \
|
166
169
|
read-mshot-empty.c \
|
170
|
+
read-mshot-stdin.c \
|
167
171
|
read-write.c \
|
168
172
|
recv-msgall.c \
|
169
173
|
recv-msgall-stream.c \
|
@@ -171,10 +175,12 @@ test_srcs := \
|
|
171
175
|
reg-fd-only.c \
|
172
176
|
reg-hint.c \
|
173
177
|
reg-reg-ring.c \
|
178
|
+
reg-wait.c \
|
174
179
|
regbuf-clone.c \
|
175
180
|
regbuf-merge.c \
|
176
181
|
register-restrictions.c \
|
177
182
|
rename.c \
|
183
|
+
resize-rings.c \
|
178
184
|
ringbuf-read.c \
|
179
185
|
ringbuf-status.c \
|
180
186
|
ring-leak2.c \
|
@@ -210,6 +216,7 @@ test_srcs := \
|
|
210
216
|
sq-poll-share.c \
|
211
217
|
sqpoll-sleep.c \
|
212
218
|
sq-space_left.c \
|
219
|
+
sqwait.c \
|
213
220
|
stdout.c \
|
214
221
|
submit-and-wait.c \
|
215
222
|
submit-link-fail.c \
|
@@ -0,0 +1,427 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
|
3
|
+
#include <stdio.h>
|
4
|
+
#include <assert.h>
|
5
|
+
#include <string.h>
|
6
|
+
#include <unistd.h>
|
7
|
+
#include <stdlib.h>
|
8
|
+
#include <sys/ioctl.h>
|
9
|
+
#include <linux/fs.h>
|
10
|
+
|
11
|
+
#include "liburing.h"
|
12
|
+
#include "helpers.h"
|
13
|
+
|
14
|
+
#define MAX_TEST_LBAS 1024
|
15
|
+
|
16
|
+
static const char *filename;
|
17
|
+
struct opcode {
|
18
|
+
int op;
|
19
|
+
bool test;
|
20
|
+
bool not_supported;
|
21
|
+
};
|
22
|
+
|
23
|
+
#define TEST_BLOCK_URING_CMD_MAX 3
|
24
|
+
|
25
|
+
static struct opcode opcodes[TEST_BLOCK_URING_CMD_MAX] = {
|
26
|
+
{ .op = BLOCK_URING_CMD_DISCARD, .test = true, },
|
27
|
+
{ .test = false, },
|
28
|
+
{ .test = false, },
|
29
|
+
};
|
30
|
+
|
31
|
+
static int lba_size;
|
32
|
+
static uint64_t bdev_size;
|
33
|
+
static uint64_t bdev_size_lbas;
|
34
|
+
static char *buffer;
|
35
|
+
|
36
|
+
static void prep_blk_cmd(struct io_uring_sqe *sqe, int fd,
|
37
|
+
uint64_t from, uint64_t len,
|
38
|
+
int cmd_op)
|
39
|
+
{
|
40
|
+
assert(cmd_op == BLOCK_URING_CMD_DISCARD);
|
41
|
+
|
42
|
+
io_uring_prep_cmd_discard(sqe, fd, from, len);
|
43
|
+
}
|
44
|
+
|
45
|
+
static int queue_cmd_range(struct io_uring *ring, int bdev_fd,
|
46
|
+
uint64_t from, uint64_t len, int cmd_op)
|
47
|
+
{
|
48
|
+
struct io_uring_sqe *sqe;
|
49
|
+
struct io_uring_cqe *cqe;
|
50
|
+
int err;
|
51
|
+
|
52
|
+
sqe = io_uring_get_sqe(ring);
|
53
|
+
assert(sqe != NULL);
|
54
|
+
prep_blk_cmd(sqe, bdev_fd, from, len, cmd_op);
|
55
|
+
|
56
|
+
err = io_uring_submit_and_wait(ring, 1);
|
57
|
+
if (err != 1) {
|
58
|
+
fprintf(stderr, "io_uring_submit_and_wait failed %d\n", err);
|
59
|
+
exit(1);
|
60
|
+
}
|
61
|
+
|
62
|
+
err = io_uring_wait_cqe(ring, &cqe);
|
63
|
+
if (err) {
|
64
|
+
fprintf(stderr, "io_uring_wait_cqe failed %d (op %i)\n",
|
65
|
+
err, cmd_op);
|
66
|
+
exit(1);
|
67
|
+
}
|
68
|
+
|
69
|
+
err = cqe->res;
|
70
|
+
io_uring_cqe_seen(ring, cqe);
|
71
|
+
return err;
|
72
|
+
}
|
73
|
+
|
74
|
+
static int queue_cmd_lba(struct io_uring *ring, int bdev_fd,
|
75
|
+
uint64_t from, uint64_t nr_lba, int cmd_op)
|
76
|
+
{
|
77
|
+
return queue_cmd_range(ring, bdev_fd, from * lba_size,
|
78
|
+
nr_lba * lba_size, cmd_op);
|
79
|
+
}
|
80
|
+
|
81
|
+
static int queue_discard_lba(struct io_uring *ring, int bdev_fd,
|
82
|
+
uint64_t from, uint64_t nr_lba)
|
83
|
+
{
|
84
|
+
return queue_cmd_lba(ring, bdev_fd, from, nr_lba,
|
85
|
+
BLOCK_URING_CMD_DISCARD);
|
86
|
+
}
|
87
|
+
|
88
|
+
static int test_parallel(struct io_uring *ring, int fd, int cmd_op)
|
89
|
+
{
|
90
|
+
struct io_uring_sqe *sqe;
|
91
|
+
struct io_uring_cqe *cqe;
|
92
|
+
int inflight = 0;
|
93
|
+
int max_inflight = 16;
|
94
|
+
int left = 1000;
|
95
|
+
int ret;
|
96
|
+
|
97
|
+
while (left || inflight) {
|
98
|
+
int queued = 0;
|
99
|
+
unsigned head, nr_cqes = 0;
|
100
|
+
int lba_len = 8;
|
101
|
+
|
102
|
+
while (inflight < max_inflight && left) {
|
103
|
+
int off = rand() % (MAX_TEST_LBAS - lba_len);
|
104
|
+
sqe = io_uring_get_sqe(ring);
|
105
|
+
assert(sqe != NULL);
|
106
|
+
|
107
|
+
prep_blk_cmd(sqe, fd, off * lba_size,
|
108
|
+
lba_len * lba_size, cmd_op);
|
109
|
+
if (rand() & 1)
|
110
|
+
sqe->flags |= IOSQE_ASYNC;
|
111
|
+
|
112
|
+
queued++;
|
113
|
+
left--;
|
114
|
+
inflight++;
|
115
|
+
}
|
116
|
+
if (queued) {
|
117
|
+
ret = io_uring_submit(ring);
|
118
|
+
if (ret != queued) {
|
119
|
+
fprintf(stderr, "io_uring_submit failed %d\n", ret);
|
120
|
+
return T_EXIT_FAIL;
|
121
|
+
}
|
122
|
+
}
|
123
|
+
|
124
|
+
ret = io_uring_wait_cqe(ring, &cqe);
|
125
|
+
if (ret) {
|
126
|
+
fprintf(stderr, "io_uring_wait_cqe failed %d\n", ret);
|
127
|
+
exit(1);
|
128
|
+
}
|
129
|
+
|
130
|
+
io_uring_for_each_cqe(ring, head, cqe) {
|
131
|
+
nr_cqes++;
|
132
|
+
inflight--;
|
133
|
+
if (cqe->res != 0) {
|
134
|
+
fprintf(stderr, "cmd %i failed %i\n", cmd_op,
|
135
|
+
cqe->res);
|
136
|
+
return T_EXIT_FAIL;
|
137
|
+
}
|
138
|
+
}
|
139
|
+
io_uring_cq_advance(ring, nr_cqes);
|
140
|
+
}
|
141
|
+
|
142
|
+
return 0;
|
143
|
+
}
|
144
|
+
|
145
|
+
static int cmd_issue_verify(struct io_uring *ring, int fd, int lba, int len,
|
146
|
+
int cmd_op)
|
147
|
+
{
|
148
|
+
int verify = (cmd_op != BLOCK_URING_CMD_DISCARD);
|
149
|
+
int ret, i;
|
150
|
+
ssize_t res;
|
151
|
+
|
152
|
+
if (verify) {
|
153
|
+
for (i = 0; i < len; i++) {
|
154
|
+
size_t off = (i + lba) * lba_size;
|
155
|
+
|
156
|
+
res = pwrite(fd, buffer, lba_size, off);
|
157
|
+
if (res == -1) {
|
158
|
+
fprintf(stderr, "pwrite failed\n");
|
159
|
+
return T_EXIT_FAIL;
|
160
|
+
}
|
161
|
+
}
|
162
|
+
}
|
163
|
+
|
164
|
+
ret = queue_cmd_lba(ring, fd, lba, len, cmd_op);
|
165
|
+
if (ret) {
|
166
|
+
if (ret == -EINVAL || ret == -EOPNOTSUPP)
|
167
|
+
return T_EXIT_SKIP;
|
168
|
+
|
169
|
+
fprintf(stderr, "cmd_issue_verify %i fail lba %i len %i ret %i\n",
|
170
|
+
cmd_op, lba, len, ret);
|
171
|
+
return T_EXIT_FAIL;
|
172
|
+
}
|
173
|
+
|
174
|
+
if (verify) {
|
175
|
+
for (i = 0; i < len; i++) {
|
176
|
+
size_t off = (i + lba) * lba_size;
|
177
|
+
|
178
|
+
res = pread(fd, buffer, lba_size, off);
|
179
|
+
if (res == -1) {
|
180
|
+
fprintf(stderr, "pread failed\n");
|
181
|
+
return T_EXIT_FAIL;
|
182
|
+
}
|
183
|
+
if (!memchr(buffer, 0, lba_size)) {
|
184
|
+
fprintf(stderr, "mem cmp failed, lba %i\n", lba + i);
|
185
|
+
return T_EXIT_FAIL;
|
186
|
+
}
|
187
|
+
}
|
188
|
+
}
|
189
|
+
return 0;
|
190
|
+
}
|
191
|
+
|
192
|
+
static int basic_cmd_test(struct io_uring *ring, int op)
|
193
|
+
{
|
194
|
+
int cmd_op = opcodes[op].op;
|
195
|
+
int ret, fd;
|
196
|
+
|
197
|
+
if (!opcodes[op].test)
|
198
|
+
return T_EXIT_SKIP;
|
199
|
+
|
200
|
+
fd = open(filename, O_DIRECT | O_RDWR | O_EXCL);
|
201
|
+
if (fd < 0) {
|
202
|
+
if (errno == -EINVAL || errno == -EBUSY)
|
203
|
+
return T_EXIT_SKIP;
|
204
|
+
fprintf(stderr, "open failed %i\n", errno);
|
205
|
+
return T_EXIT_FAIL;
|
206
|
+
}
|
207
|
+
|
208
|
+
ret = cmd_issue_verify(ring, fd, 0, 1, cmd_op);
|
209
|
+
if (ret == T_EXIT_SKIP) {
|
210
|
+
printf("cmd %i not supported, skip\n", cmd_op);
|
211
|
+
opcodes[op].not_supported = 1;
|
212
|
+
close(fd);
|
213
|
+
return T_EXIT_SKIP;
|
214
|
+
} else if (ret) {
|
215
|
+
fprintf(stderr, "cmd %i fail 0 1\n", cmd_op);
|
216
|
+
return T_EXIT_FAIL;
|
217
|
+
}
|
218
|
+
|
219
|
+
ret = cmd_issue_verify(ring, fd, 7, 15, cmd_op);
|
220
|
+
if (ret) {
|
221
|
+
fprintf(stderr, "cmd %i fail 7 15 %i\n", cmd_op, ret);
|
222
|
+
return T_EXIT_FAIL;
|
223
|
+
}
|
224
|
+
|
225
|
+
ret = cmd_issue_verify(ring, fd, 1, MAX_TEST_LBAS - 1, cmd_op);
|
226
|
+
if (ret) {
|
227
|
+
fprintf(stderr, "large cmd %i failed %i\n", cmd_op, ret);
|
228
|
+
return T_EXIT_FAIL;
|
229
|
+
}
|
230
|
+
|
231
|
+
ret = test_parallel(ring, fd, cmd_op);
|
232
|
+
if (ret) {
|
233
|
+
fprintf(stderr, "test_parallel() %i failed %i\n", cmd_op, ret);
|
234
|
+
return T_EXIT_FAIL;
|
235
|
+
}
|
236
|
+
|
237
|
+
close(fd);
|
238
|
+
return 0;
|
239
|
+
}
|
240
|
+
|
241
|
+
static int test_fail_edge_cases(struct io_uring *ring, int op)
|
242
|
+
{
|
243
|
+
int cmd_op = opcodes[op].op;
|
244
|
+
int ret, fd;
|
245
|
+
|
246
|
+
if (!opcodes[op].test)
|
247
|
+
return T_EXIT_SKIP;
|
248
|
+
|
249
|
+
fd = open(filename, O_DIRECT | O_RDWR | O_EXCL);
|
250
|
+
if (fd < 0) {
|
251
|
+
fprintf(stderr, "open failed %i\n", errno);
|
252
|
+
return T_EXIT_FAIL;
|
253
|
+
}
|
254
|
+
|
255
|
+
ret = queue_cmd_lba(ring, fd, bdev_size_lbas, 1, cmd_op);
|
256
|
+
if (ret >= 0) {
|
257
|
+
fprintf(stderr, "cmd %i beyond capacity %i\n",
|
258
|
+
cmd_op, ret);
|
259
|
+
return 1;
|
260
|
+
}
|
261
|
+
|
262
|
+
ret = queue_cmd_lba(ring, fd, bdev_size_lbas - 1, 2, cmd_op);
|
263
|
+
if (ret >= 0) {
|
264
|
+
fprintf(stderr, "cmd %i beyond capacity with overlap %i\n",
|
265
|
+
cmd_op, ret);
|
266
|
+
return 1;
|
267
|
+
}
|
268
|
+
|
269
|
+
ret = queue_cmd_range(ring, fd, (uint64_t)-lba_size, lba_size + 2,
|
270
|
+
cmd_op);
|
271
|
+
if (ret >= 0) {
|
272
|
+
fprintf(stderr, "cmd %i range overflow %i\n",
|
273
|
+
cmd_op, ret);
|
274
|
+
return 1;
|
275
|
+
}
|
276
|
+
|
277
|
+
ret = queue_cmd_range(ring, fd, lba_size / 2, lba_size, cmd_op);
|
278
|
+
if (ret >= 0) {
|
279
|
+
fprintf(stderr, "cmd %i unaligned offset %i\n",
|
280
|
+
cmd_op, ret);
|
281
|
+
return 1;
|
282
|
+
}
|
283
|
+
|
284
|
+
ret = queue_cmd_range(ring, fd, 0, lba_size / 2, cmd_op);
|
285
|
+
if (ret >= 0) {
|
286
|
+
fprintf(stderr, "cmd %i unaligned size %i\n",
|
287
|
+
cmd_op, ret);
|
288
|
+
return 1;
|
289
|
+
}
|
290
|
+
|
291
|
+
close(fd);
|
292
|
+
return 0;
|
293
|
+
}
|
294
|
+
|
295
|
+
static int test_rdonly(struct io_uring *ring, int op)
|
296
|
+
{
|
297
|
+
int ret, fd;
|
298
|
+
int ro;
|
299
|
+
|
300
|
+
if (!opcodes[op].test)
|
301
|
+
return T_EXIT_SKIP;
|
302
|
+
|
303
|
+
fd = open(filename, O_DIRECT | O_RDONLY | O_EXCL);
|
304
|
+
if (fd < 0) {
|
305
|
+
fprintf(stderr, "open failed %i\n", errno);
|
306
|
+
return T_EXIT_FAIL;
|
307
|
+
}
|
308
|
+
|
309
|
+
ret = queue_discard_lba(ring, fd, 0, 1);
|
310
|
+
if (ret >= 0) {
|
311
|
+
fprintf(stderr, "discarded with O_RDONLY %i\n", ret);
|
312
|
+
return 1;
|
313
|
+
}
|
314
|
+
close(fd);
|
315
|
+
|
316
|
+
fd = open(filename, O_DIRECT | O_RDWR | O_EXCL);
|
317
|
+
if (fd < 0) {
|
318
|
+
fprintf(stderr, "open failed %i\n", errno);
|
319
|
+
return T_EXIT_FAIL;
|
320
|
+
}
|
321
|
+
|
322
|
+
ro = 1;
|
323
|
+
ret = ioctl(fd, BLKROSET, &ro);
|
324
|
+
if (ret) {
|
325
|
+
fprintf(stderr, "BLKROSET 1 failed %i\n", errno);
|
326
|
+
return T_EXIT_FAIL;
|
327
|
+
}
|
328
|
+
|
329
|
+
ret = queue_discard_lba(ring, fd, 0, 1);
|
330
|
+
if (ret >= 0) {
|
331
|
+
fprintf(stderr, "discarded with O_RDONLY %i\n", ret);
|
332
|
+
return 1;
|
333
|
+
}
|
334
|
+
|
335
|
+
ro = 0;
|
336
|
+
ret = ioctl(fd, BLKROSET, &ro);
|
337
|
+
if (ret) {
|
338
|
+
fprintf(stderr, "BLKROSET 0 failed %i\n", errno);
|
339
|
+
return T_EXIT_FAIL;
|
340
|
+
}
|
341
|
+
close(fd);
|
342
|
+
return 0;
|
343
|
+
}
|
344
|
+
|
345
|
+
int main(int argc, char *argv[])
|
346
|
+
{
|
347
|
+
struct io_uring ring;
|
348
|
+
int fd, ret, i, fret;
|
349
|
+
int cmd_op;
|
350
|
+
|
351
|
+
if (argc != 2)
|
352
|
+
return T_EXIT_SKIP;
|
353
|
+
filename = argv[1];
|
354
|
+
|
355
|
+
fd = open(filename, O_DIRECT | O_RDONLY | O_EXCL);
|
356
|
+
if (fd < 0) {
|
357
|
+
fprintf(stderr, "open failed %i\n", errno);
|
358
|
+
return T_EXIT_FAIL;
|
359
|
+
}
|
360
|
+
|
361
|
+
ret = ioctl(fd, BLKGETSIZE64, &bdev_size);
|
362
|
+
if (ret < 0) {
|
363
|
+
fprintf(stderr, "BLKGETSIZE64 failed %i\n", errno);
|
364
|
+
return T_EXIT_FAIL;
|
365
|
+
}
|
366
|
+
ret = ioctl(fd, BLKSSZGET, &lba_size);
|
367
|
+
if (ret < 0) {
|
368
|
+
fprintf(stderr, "BLKSSZGET failed %i\n", errno);
|
369
|
+
return T_EXIT_FAIL;
|
370
|
+
}
|
371
|
+
assert(bdev_size % lba_size == 0);
|
372
|
+
bdev_size_lbas = bdev_size / lba_size;
|
373
|
+
close(fd);
|
374
|
+
|
375
|
+
buffer = aligned_alloc(lba_size, lba_size);
|
376
|
+
if (!buffer) {
|
377
|
+
fprintf(stderr, "aligned_alloc failed\n");
|
378
|
+
return T_EXIT_FAIL;
|
379
|
+
}
|
380
|
+
for (i = 0; i < lba_size; i++)
|
381
|
+
buffer[i] = i ^ 0xA7;
|
382
|
+
|
383
|
+
if (bdev_size_lbas < MAX_TEST_LBAS) {
|
384
|
+
fprintf(stderr, "the device is too small, skip\n");
|
385
|
+
return T_EXIT_SKIP;
|
386
|
+
}
|
387
|
+
|
388
|
+
ret = io_uring_queue_init(16, &ring, 0);
|
389
|
+
if (ret) {
|
390
|
+
fprintf(stderr, "queue init failed: %d\n", ret);
|
391
|
+
return T_EXIT_FAIL;
|
392
|
+
}
|
393
|
+
|
394
|
+
fret = T_EXIT_SKIP;
|
395
|
+
for (cmd_op = 0; cmd_op < TEST_BLOCK_URING_CMD_MAX; cmd_op++) {
|
396
|
+
if (!opcodes[cmd_op].test)
|
397
|
+
continue;
|
398
|
+
ret = basic_cmd_test(&ring, cmd_op);
|
399
|
+
if (ret) {
|
400
|
+
if (ret == T_EXIT_SKIP)
|
401
|
+
continue;
|
402
|
+
|
403
|
+
fprintf(stderr, "basic_cmd_test() failed, cmd %i\n",
|
404
|
+
cmd_op);
|
405
|
+
return T_EXIT_FAIL;
|
406
|
+
}
|
407
|
+
|
408
|
+
ret = test_rdonly(&ring, cmd_op);
|
409
|
+
if (ret) {
|
410
|
+
fprintf(stderr, "test_rdonly() failed, cmd %i\n",
|
411
|
+
cmd_op);
|
412
|
+
return T_EXIT_FAIL;
|
413
|
+
}
|
414
|
+
|
415
|
+
ret = test_fail_edge_cases(&ring, cmd_op);
|
416
|
+
if (ret) {
|
417
|
+
fprintf(stderr, "test_fail_edge_cases() failed, cmd %i\n",
|
418
|
+
cmd_op);
|
419
|
+
return T_EXIT_FAIL;
|
420
|
+
}
|
421
|
+
fret = T_EXIT_PASS;
|
422
|
+
}
|
423
|
+
|
424
|
+
io_uring_queue_exit(&ring);
|
425
|
+
free(buffer);
|
426
|
+
return fret;
|
427
|
+
}
|
@@ -0,0 +1,69 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
/*
|
3
|
+
* Description: Test O_NONBLOCK reading from fifo, should result in proper
|
4
|
+
* retry and a positive read results. Buggy result would be
|
5
|
+
* -EAGAIN being returned to the user.
|
6
|
+
*/
|
7
|
+
#include <stdio.h>
|
8
|
+
#include <stdlib.h>
|
9
|
+
#include <unistd.h>
|
10
|
+
|
11
|
+
#include "liburing.h"
|
12
|
+
#include "helpers.h"
|
13
|
+
|
14
|
+
int main(int argc, char *argv[])
|
15
|
+
{
|
16
|
+
struct io_uring_sqe *sqe;
|
17
|
+
struct io_uring_cqe *cqe;
|
18
|
+
struct io_uring ring;
|
19
|
+
char buf[32];
|
20
|
+
int fds[2];
|
21
|
+
int flags;
|
22
|
+
int ret;
|
23
|
+
|
24
|
+
io_uring_queue_init(1, &ring, 0);
|
25
|
+
|
26
|
+
if (pipe(fds) < 0) {
|
27
|
+
perror("pipe");
|
28
|
+
return T_EXIT_FAIL;
|
29
|
+
}
|
30
|
+
|
31
|
+
flags = fcntl(fds[0], F_GETFL, 0);
|
32
|
+
if (flags < 0) {
|
33
|
+
perror("fcntl get");
|
34
|
+
return T_EXIT_FAIL;
|
35
|
+
}
|
36
|
+
flags |= O_NONBLOCK;
|
37
|
+
ret = fcntl(fds[0], F_SETFL, flags);
|
38
|
+
if (ret < 0) {
|
39
|
+
perror("fcntl set");
|
40
|
+
return T_EXIT_FAIL;
|
41
|
+
}
|
42
|
+
|
43
|
+
sqe = io_uring_get_sqe(&ring);
|
44
|
+
io_uring_prep_read(sqe, fds[0], buf, sizeof(buf), 0);
|
45
|
+
io_uring_submit(&ring);
|
46
|
+
|
47
|
+
usleep(10000);
|
48
|
+
|
49
|
+
ret = write(fds[1], "Hello\n", 6);
|
50
|
+
if (ret < 0) {
|
51
|
+
perror("pipe write");
|
52
|
+
return T_EXIT_FAIL;
|
53
|
+
}
|
54
|
+
|
55
|
+
ret = io_uring_wait_cqe(&ring, &cqe);
|
56
|
+
if (ret < 0) {
|
57
|
+
fprintf(stderr, "wait=%d\n", ret);
|
58
|
+
return T_EXIT_FAIL;
|
59
|
+
}
|
60
|
+
|
61
|
+
if (cqe->res < 0) {
|
62
|
+
fprintf(stderr, "cqe res %d\n", cqe->res);
|
63
|
+
return T_EXIT_FAIL;
|
64
|
+
}
|
65
|
+
|
66
|
+
io_uring_cqe_seen(&ring, cqe);
|
67
|
+
io_uring_queue_exit(&ring);
|
68
|
+
return T_EXIT_PASS;
|
69
|
+
}
|