uringmachine 0.4 → 0.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -1
- data/CHANGELOG.md +14 -0
- data/README.md +44 -1
- data/TODO.md +12 -3
- data/examples/bm_snooze.rb +89 -0
- data/examples/bm_write.rb +56 -0
- data/examples/dns_client.rb +12 -0
- data/examples/http_server.rb +42 -43
- data/examples/server_client.rb +64 -0
- data/examples/snooze.rb +44 -0
- data/examples/write_dev_null.rb +16 -0
- data/ext/um/extconf.rb +24 -14
- data/ext/um/um.c +468 -414
- data/ext/um/um.h +129 -39
- data/ext/um/um_buffer.c +49 -0
- data/ext/um/um_class.c +148 -24
- data/ext/um/um_const.c +30 -1
- data/ext/um/um_ext.c +4 -0
- data/ext/um/um_mutex_class.c +47 -0
- data/ext/um/um_op.c +86 -111
- data/ext/um/um_queue_class.c +58 -0
- data/ext/um/um_sync.c +273 -0
- data/ext/um/um_utils.c +1 -1
- data/lib/uringmachine/dns_resolver.rb +84 -0
- data/lib/uringmachine/version.rb +1 -1
- data/lib/uringmachine.rb +19 -3
- data/supressions/ruby.supp +71 -0
- data/test/test_um.rb +466 -47
- data/vendor/liburing/.gitignore +5 -0
- data/vendor/liburing/CHANGELOG +1 -0
- data/vendor/liburing/configure +32 -0
- data/vendor/liburing/examples/Makefile +1 -0
- data/vendor/liburing/examples/reg-wait.c +159 -0
- data/vendor/liburing/liburing.spec +1 -1
- data/vendor/liburing/src/include/liburing/io_uring.h +48 -2
- data/vendor/liburing/src/include/liburing.h +28 -2
- data/vendor/liburing/src/int_flags.h +10 -3
- data/vendor/liburing/src/liburing-ffi.map +13 -2
- data/vendor/liburing/src/liburing.map +9 -0
- data/vendor/liburing/src/queue.c +25 -16
- data/vendor/liburing/src/register.c +73 -4
- data/vendor/liburing/src/setup.c +46 -18
- data/vendor/liburing/src/setup.h +6 -0
- data/vendor/liburing/test/Makefile +7 -0
- data/vendor/liburing/test/cmd-discard.c +427 -0
- data/vendor/liburing/test/fifo-nonblock-read.c +69 -0
- data/vendor/liburing/test/file-exit-unreg.c +48 -0
- data/vendor/liburing/test/io_uring_passthrough.c +2 -0
- data/vendor/liburing/test/io_uring_register.c +13 -2
- data/vendor/liburing/test/napi-test.c +1 -1
- data/vendor/liburing/test/no-mmap-inval.c +1 -1
- data/vendor/liburing/test/read-mshot-empty.c +2 -0
- data/vendor/liburing/test/read-mshot-stdin.c +121 -0
- data/vendor/liburing/test/read-mshot.c +6 -0
- data/vendor/liburing/test/recvsend_bundle.c +2 -2
- data/vendor/liburing/test/reg-fd-only.c +1 -1
- data/vendor/liburing/test/reg-wait.c +251 -0
- data/vendor/liburing/test/regbuf-clone.c +458 -0
- data/vendor/liburing/test/resize-rings.c +643 -0
- data/vendor/liburing/test/rsrc_tags.c +1 -1
- data/vendor/liburing/test/sqpoll-sleep.c +39 -8
- data/vendor/liburing/test/sqwait.c +136 -0
- data/vendor/liburing/test/sync-cancel.c +8 -1
- data/vendor/liburing/test/timeout.c +13 -8
- metadata +22 -4
- data/examples/http_server_multishot.rb +0 -57
- data/examples/http_server_simpler.rb +0 -34
@@ -0,0 +1,643 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
/*
|
3
|
+
* Description: test sq/cq ring resizing
|
4
|
+
*
|
5
|
+
*/
|
6
|
+
#include <errno.h>
|
7
|
+
#include <stdio.h>
|
8
|
+
#include <unistd.h>
|
9
|
+
#include <stdlib.h>
|
10
|
+
#include <string.h>
|
11
|
+
#include <fcntl.h>
|
12
|
+
#include <pthread.h>
|
13
|
+
|
14
|
+
#include "liburing.h"
|
15
|
+
#include "helpers.h"
|
16
|
+
|
17
|
+
#define NVECS 128
|
18
|
+
|
19
|
+
#define min(a, b) ((a) < (b) ? (a) : (b))
|
20
|
+
|
21
|
+
struct data {
|
22
|
+
pthread_t thread;
|
23
|
+
int fd;
|
24
|
+
int nr_writes;
|
25
|
+
int failed;
|
26
|
+
};
|
27
|
+
|
28
|
+
static void *thread_fn(void *__data)
|
29
|
+
{
|
30
|
+
struct data *d = __data;
|
31
|
+
char buffer[8];
|
32
|
+
int to_write = d->nr_writes;
|
33
|
+
|
34
|
+
memset(buffer, 0x5a, sizeof(buffer));
|
35
|
+
usleep(10000);
|
36
|
+
while (to_write) {
|
37
|
+
int ret = write(d->fd, buffer, sizeof(buffer));
|
38
|
+
|
39
|
+
if (ret < 0) {
|
40
|
+
perror("write");
|
41
|
+
d->failed = 1;
|
42
|
+
break;
|
43
|
+
} else if (ret != sizeof(buffer)) {
|
44
|
+
printf("short write %d\n", ret);
|
45
|
+
}
|
46
|
+
to_write--;
|
47
|
+
usleep(5);
|
48
|
+
}
|
49
|
+
return NULL;
|
50
|
+
}
|
51
|
+
|
52
|
+
static int test_pipes(struct io_uring *ring, int async)
|
53
|
+
{
|
54
|
+
struct io_uring_params p = { };
|
55
|
+
struct io_uring_sqe *sqe;
|
56
|
+
struct io_uring_cqe *cqe;
|
57
|
+
unsigned long ud = 0;
|
58
|
+
struct data d = { };
|
59
|
+
int ret, i, fds[2], to_read;
|
60
|
+
char buffer[8];
|
61
|
+
void *tret;
|
62
|
+
|
63
|
+
p.sq_entries = 128;
|
64
|
+
p.cq_entries = 128;
|
65
|
+
ret = io_uring_resize_rings(ring, &p);
|
66
|
+
if (ret < 0) {
|
67
|
+
fprintf(stderr, "Failed to resize ring: %d\n", ret);
|
68
|
+
return T_EXIT_FAIL;
|
69
|
+
}
|
70
|
+
|
71
|
+
if (pipe(fds) < 0) {
|
72
|
+
perror("pipe");
|
73
|
+
return T_EXIT_FAIL;
|
74
|
+
}
|
75
|
+
|
76
|
+
/*
|
77
|
+
* Put NVECS inflight, then resize while waiting. Repeat until
|
78
|
+
* 'to_read' has been read.
|
79
|
+
*/
|
80
|
+
d.nr_writes = 4096;
|
81
|
+
d.fd = fds[1];
|
82
|
+
p.sq_entries = 64;
|
83
|
+
p.cq_entries = 256;
|
84
|
+
|
85
|
+
pthread_create(&d.thread, NULL, thread_fn, &d);
|
86
|
+
|
87
|
+
to_read = d.nr_writes - 128;
|
88
|
+
while (to_read && !d.failed) {
|
89
|
+
unsigned long start_ud = -1UL, end_ud;
|
90
|
+
int to_wait;
|
91
|
+
|
92
|
+
to_wait = NVECS;
|
93
|
+
if (to_wait > to_read)
|
94
|
+
to_wait = to_read;
|
95
|
+
|
96
|
+
for (i = 0; i < to_wait; i++) {
|
97
|
+
sqe = io_uring_get_sqe(ring);
|
98
|
+
/* resized smaller */
|
99
|
+
if (!sqe)
|
100
|
+
break;
|
101
|
+
io_uring_prep_read(sqe, fds[0], buffer, sizeof(buffer), 0);
|
102
|
+
if (async)
|
103
|
+
sqe->flags |= IOSQE_ASYNC;
|
104
|
+
if (start_ud == -1UL)
|
105
|
+
start_ud = ud;
|
106
|
+
sqe->user_data = ++ud;
|
107
|
+
to_read--;
|
108
|
+
}
|
109
|
+
end_ud = ud;
|
110
|
+
ret = io_uring_submit(ring);
|
111
|
+
if (ret != i) {
|
112
|
+
fprintf(stderr, "submitted; %d\n", ret);
|
113
|
+
return T_EXIT_FAIL;
|
114
|
+
}
|
115
|
+
|
116
|
+
to_wait = i;
|
117
|
+
for (i = 0; i < to_wait; i++) {
|
118
|
+
if (i == 0) {
|
119
|
+
ret = io_uring_resize_rings(ring, &p);
|
120
|
+
if (ret < 0) {
|
121
|
+
if (ret != -EOVERFLOW) {
|
122
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
123
|
+
return T_EXIT_FAIL;
|
124
|
+
}
|
125
|
+
}
|
126
|
+
p.sq_entries = 32;
|
127
|
+
p.cq_entries = 128;
|
128
|
+
}
|
129
|
+
if (d.failed)
|
130
|
+
break;
|
131
|
+
ret = io_uring_wait_cqe(ring, &cqe);
|
132
|
+
if (ret) {
|
133
|
+
fprintf(stderr, "wait cqe: %d\n", ret);
|
134
|
+
return T_EXIT_FAIL;
|
135
|
+
}
|
136
|
+
if (cqe->res < 0) {
|
137
|
+
fprintf(stderr, "cqe res %d\n", cqe->res);
|
138
|
+
return T_EXIT_FAIL;
|
139
|
+
}
|
140
|
+
if (cqe->user_data < start_ud ||
|
141
|
+
cqe->user_data > end_ud) {
|
142
|
+
fprintf(stderr, "use_data out-of-range: <%lu-%lu>: %lu\n",
|
143
|
+
start_ud, end_ud, (long) cqe->user_data);
|
144
|
+
return T_EXIT_FAIL;
|
145
|
+
}
|
146
|
+
io_uring_cqe_seen(ring, cqe);
|
147
|
+
if (!(i % 17)) {
|
148
|
+
ret = io_uring_resize_rings(ring, &p);
|
149
|
+
if (ret < 0) {
|
150
|
+
if (ret == -EOVERFLOW)
|
151
|
+
continue;
|
152
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
153
|
+
return T_EXIT_FAIL;
|
154
|
+
}
|
155
|
+
if (p.sq_entries == 32)
|
156
|
+
p.sq_entries = 64;
|
157
|
+
else if (p.sq_entries == 64)
|
158
|
+
p.sq_entries = 16;
|
159
|
+
else
|
160
|
+
p.sq_entries = 32;
|
161
|
+
if (p.cq_entries == 128)
|
162
|
+
p.cq_entries = 256;
|
163
|
+
else
|
164
|
+
p.cq_entries = 128;
|
165
|
+
}
|
166
|
+
}
|
167
|
+
}
|
168
|
+
|
169
|
+
pthread_join(d.thread, &tret);
|
170
|
+
close(fds[0]);
|
171
|
+
close(fds[0]);
|
172
|
+
return 0;
|
173
|
+
}
|
174
|
+
|
175
|
+
static int test_reads(struct io_uring *ring, int fd, int async)
|
176
|
+
{
|
177
|
+
struct io_uring_params p = { };
|
178
|
+
struct io_uring_sqe *sqe;
|
179
|
+
struct io_uring_cqe *cqe;
|
180
|
+
struct iovec vecs[NVECS];
|
181
|
+
unsigned long to_read;
|
182
|
+
unsigned long ud = 0;
|
183
|
+
unsigned long offset;
|
184
|
+
int ret, i;
|
185
|
+
|
186
|
+
if (fd == -1)
|
187
|
+
return T_EXIT_SKIP;
|
188
|
+
|
189
|
+
p.sq_entries = 128;
|
190
|
+
p.cq_entries = 128;
|
191
|
+
ret = io_uring_resize_rings(ring, &p);
|
192
|
+
if (ret < 0) {
|
193
|
+
fprintf(stderr, "Failed to resize ring: %d\n", ret);
|
194
|
+
return T_EXIT_FAIL;
|
195
|
+
}
|
196
|
+
|
197
|
+
for (i = 0; i < NVECS; i++) {
|
198
|
+
if (posix_memalign(&vecs[i].iov_base, 4096, 4096))
|
199
|
+
return T_EXIT_FAIL;
|
200
|
+
vecs[i].iov_len = 4096;
|
201
|
+
}
|
202
|
+
|
203
|
+
/*
|
204
|
+
* Put NVECS inflight, then resize while waiting. Repeat until
|
205
|
+
* 'to_read' has been read.
|
206
|
+
*/
|
207
|
+
to_read = 64*1024*1024;
|
208
|
+
p.sq_entries = 64;
|
209
|
+
p.cq_entries = 256;
|
210
|
+
offset = 0;
|
211
|
+
while (to_read) {
|
212
|
+
unsigned long start_ud = -1UL, end_ud;
|
213
|
+
int to_wait;
|
214
|
+
|
215
|
+
for (i = 0; i < NVECS; i++) {
|
216
|
+
sqe = io_uring_get_sqe(ring);
|
217
|
+
/* resized smaller */
|
218
|
+
if (!sqe)
|
219
|
+
break;
|
220
|
+
io_uring_prep_read(sqe, fd, vecs[i].iov_base,
|
221
|
+
vecs[i].iov_len, offset);
|
222
|
+
if (async)
|
223
|
+
sqe->flags |= IOSQE_ASYNC;
|
224
|
+
offset += 8192;
|
225
|
+
if (start_ud == -1UL)
|
226
|
+
start_ud = ud;
|
227
|
+
sqe->user_data = ++ud;
|
228
|
+
}
|
229
|
+
end_ud = ud;
|
230
|
+
ret = io_uring_submit(ring);
|
231
|
+
if (ret != i) {
|
232
|
+
fprintf(stderr, "submitted; %d\n", ret);
|
233
|
+
return T_EXIT_FAIL;
|
234
|
+
}
|
235
|
+
|
236
|
+
to_wait = i;
|
237
|
+
for (i = 0; i < to_wait; i++) {
|
238
|
+
if (i == 0) {
|
239
|
+
ret = io_uring_resize_rings(ring, &p);
|
240
|
+
if (ret < 0) {
|
241
|
+
if (ret != -EOVERFLOW) {
|
242
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
243
|
+
return T_EXIT_FAIL;
|
244
|
+
}
|
245
|
+
}
|
246
|
+
p.sq_entries = 32;
|
247
|
+
p.cq_entries = 128;
|
248
|
+
}
|
249
|
+
ret = io_uring_wait_cqe(ring, &cqe);
|
250
|
+
if (ret) {
|
251
|
+
fprintf(stderr, "wait cqe: %d\n", ret);
|
252
|
+
return T_EXIT_FAIL;
|
253
|
+
}
|
254
|
+
if (cqe->res < 0) {
|
255
|
+
fprintf(stderr, "cqe res %d\n", cqe->res);
|
256
|
+
return T_EXIT_FAIL;
|
257
|
+
}
|
258
|
+
if (cqe->user_data < start_ud ||
|
259
|
+
cqe->user_data > end_ud) {
|
260
|
+
fprintf(stderr, "use_data out-of-range: <%lu-%lu>: %lu\n",
|
261
|
+
start_ud, end_ud, (long) cqe->user_data);
|
262
|
+
return T_EXIT_FAIL;
|
263
|
+
}
|
264
|
+
io_uring_cqe_seen(ring, cqe);
|
265
|
+
if (to_read)
|
266
|
+
to_read -= min(to_read, 4096);
|
267
|
+
if (!(i % 17)) {
|
268
|
+
ret = io_uring_resize_rings(ring, &p);
|
269
|
+
if (ret < 0) {
|
270
|
+
if (ret == -EOVERFLOW)
|
271
|
+
continue;
|
272
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
273
|
+
return T_EXIT_FAIL;
|
274
|
+
}
|
275
|
+
if (p.sq_entries == 32)
|
276
|
+
p.sq_entries = 64;
|
277
|
+
else if (p.sq_entries == 64)
|
278
|
+
p.sq_entries = 16;
|
279
|
+
else
|
280
|
+
p.sq_entries = 32;
|
281
|
+
if (p.cq_entries == 128)
|
282
|
+
p.cq_entries = 256;
|
283
|
+
else
|
284
|
+
p.cq_entries = 128;
|
285
|
+
}
|
286
|
+
}
|
287
|
+
}
|
288
|
+
|
289
|
+
return 0;
|
290
|
+
}
|
291
|
+
|
292
|
+
static int test_basic(struct io_uring *ring, int async)
|
293
|
+
{
|
294
|
+
struct io_uring_params p = { };
|
295
|
+
struct io_uring_sqe *sqe;
|
296
|
+
struct io_uring_cqe *cqe;
|
297
|
+
int i, ret;
|
298
|
+
|
299
|
+
sqe = io_uring_get_sqe(ring);
|
300
|
+
io_uring_prep_nop(sqe);
|
301
|
+
if (async)
|
302
|
+
sqe->flags |= IOSQE_ASYNC;
|
303
|
+
sqe->user_data = 1;
|
304
|
+
io_uring_submit(ring);
|
305
|
+
|
306
|
+
p.sq_entries = 32;
|
307
|
+
p.cq_entries = 64;
|
308
|
+
ret = io_uring_resize_rings(ring, &p);
|
309
|
+
if (ret == -EINVAL)
|
310
|
+
return T_EXIT_SKIP;
|
311
|
+
|
312
|
+
sqe = io_uring_get_sqe(ring);
|
313
|
+
io_uring_prep_nop(sqe);
|
314
|
+
if (async)
|
315
|
+
sqe->flags |= IOSQE_ASYNC;
|
316
|
+
sqe->user_data = 2;
|
317
|
+
io_uring_submit(ring);
|
318
|
+
|
319
|
+
for (i = 0; i < 2; i++) {
|
320
|
+
ret = io_uring_wait_cqe(ring, &cqe);
|
321
|
+
if (ret) {
|
322
|
+
fprintf(stderr, "wait cqe %d\n", ret);
|
323
|
+
return T_EXIT_FAIL;
|
324
|
+
}
|
325
|
+
if (cqe->user_data != i + 1) {
|
326
|
+
fprintf(stderr, "bad user_data %ld\n", (long) cqe->user_data);
|
327
|
+
return T_EXIT_FAIL;
|
328
|
+
}
|
329
|
+
io_uring_cqe_seen(ring, cqe);
|
330
|
+
}
|
331
|
+
|
332
|
+
return T_EXIT_PASS;
|
333
|
+
}
|
334
|
+
|
335
|
+
static int test_all_copy(struct io_uring *ring)
|
336
|
+
{
|
337
|
+
struct io_uring_params p = { };
|
338
|
+
struct io_uring_sqe *sqe;
|
339
|
+
struct io_uring_cqe *cqe;
|
340
|
+
unsigned head;
|
341
|
+
int i, ret;
|
342
|
+
|
343
|
+
p.sq_entries = 32;
|
344
|
+
p.cq_entries = 64;
|
345
|
+
ret = io_uring_resize_rings(ring, &p);
|
346
|
+
if (ret) {
|
347
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
348
|
+
return T_EXIT_FAIL;
|
349
|
+
}
|
350
|
+
|
351
|
+
for (i = 0; i < 32; i++) {
|
352
|
+
sqe = io_uring_get_sqe(ring);
|
353
|
+
io_uring_prep_nop(sqe);
|
354
|
+
sqe->user_data = i + 1;
|
355
|
+
}
|
356
|
+
|
357
|
+
io_uring_submit(ring);
|
358
|
+
|
359
|
+
memset(&p, 0, sizeof(p));
|
360
|
+
p.sq_entries = 64;
|
361
|
+
p.cq_entries = 128;
|
362
|
+
ret = io_uring_resize_rings(ring, &p);
|
363
|
+
if (ret) {
|
364
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
365
|
+
return T_EXIT_FAIL;
|
366
|
+
}
|
367
|
+
|
368
|
+
i = 1;
|
369
|
+
io_uring_for_each_cqe(ring, head, cqe) {
|
370
|
+
if (cqe->user_data != i) {
|
371
|
+
fprintf(stderr, "Found cqe at wrong offset\n");
|
372
|
+
return T_EXIT_FAIL;
|
373
|
+
}
|
374
|
+
i++;
|
375
|
+
}
|
376
|
+
io_uring_cq_advance(ring, 32);
|
377
|
+
return T_EXIT_PASS;
|
378
|
+
}
|
379
|
+
|
380
|
+
static int test_overflow(struct io_uring *ring)
|
381
|
+
{
|
382
|
+
struct io_uring_params p = { };
|
383
|
+
struct io_uring_sqe *sqe;
|
384
|
+
int i, ret;
|
385
|
+
|
386
|
+
p.sq_entries = 32;
|
387
|
+
p.cq_entries = 64;
|
388
|
+
ret = io_uring_resize_rings(ring, &p);
|
389
|
+
if (ret) {
|
390
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
391
|
+
return T_EXIT_FAIL;
|
392
|
+
}
|
393
|
+
|
394
|
+
for (i = 0; i < 32; i++) {
|
395
|
+
sqe = io_uring_get_sqe(ring);
|
396
|
+
io_uring_prep_nop(sqe);
|
397
|
+
sqe->user_data = i + 1;
|
398
|
+
}
|
399
|
+
|
400
|
+
io_uring_submit(ring);
|
401
|
+
|
402
|
+
/* have 32 CQEs pending, resize to CQ size 32 which should work */
|
403
|
+
memset(&p, 0, sizeof(p));
|
404
|
+
p.sq_entries = 32;
|
405
|
+
p.cq_entries = 32;
|
406
|
+
ret = io_uring_resize_rings(ring, &p);
|
407
|
+
if (ret) {
|
408
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
409
|
+
return T_EXIT_FAIL;
|
410
|
+
}
|
411
|
+
|
412
|
+
/* now resize to CQ size 16, which should fail with -EOVERFLOW */
|
413
|
+
memset(&p, 0, sizeof(p));
|
414
|
+
p.sq_entries = 8;
|
415
|
+
p.cq_entries = 16;
|
416
|
+
ret = io_uring_resize_rings(ring, &p);
|
417
|
+
if (ret != -EOVERFLOW) {
|
418
|
+
fprintf(stderr, "Expected overflow, got %d\n", ret);
|
419
|
+
return T_EXIT_FAIL;
|
420
|
+
}
|
421
|
+
|
422
|
+
io_uring_cq_advance(ring, 32);
|
423
|
+
return T_EXIT_PASS;
|
424
|
+
}
|
425
|
+
|
426
|
+
static int test_same_resize(int flags)
|
427
|
+
{
|
428
|
+
struct io_uring_params p = { };
|
429
|
+
struct io_uring_sqe *sqe;
|
430
|
+
struct io_uring_cqe *cqe;
|
431
|
+
struct io_uring ring;
|
432
|
+
int i, ret;
|
433
|
+
|
434
|
+
ret = io_uring_queue_init(32, &ring, flags);
|
435
|
+
if (ret)
|
436
|
+
return T_EXIT_FAIL;
|
437
|
+
|
438
|
+
p.sq_entries = 32;
|
439
|
+
p.cq_entries = 64;
|
440
|
+
ret = io_uring_resize_rings(&ring, &p);
|
441
|
+
if (ret) {
|
442
|
+
fprintf(stderr, "resize failed: %d\n", ret);
|
443
|
+
return T_EXIT_FAIL;
|
444
|
+
}
|
445
|
+
|
446
|
+
for (i = 0; i < 32; i++) {
|
447
|
+
sqe = io_uring_get_sqe(&ring);
|
448
|
+
io_uring_prep_nop(sqe);
|
449
|
+
sqe->user_data = i + 1;
|
450
|
+
}
|
451
|
+
|
452
|
+
io_uring_submit(&ring);
|
453
|
+
|
454
|
+
for (i = 0; i < 32; i++) {
|
455
|
+
ret = io_uring_wait_cqe(&ring, &cqe);
|
456
|
+
if (ret) {
|
457
|
+
fprintf(stderr, "wait_cqe: %d\n", ret);
|
458
|
+
return T_EXIT_FAIL;
|
459
|
+
}
|
460
|
+
if (cqe->user_data != i + 1) {
|
461
|
+
fprintf(stderr, "Found cqe at wrong offset\n");
|
462
|
+
return T_EXIT_FAIL;
|
463
|
+
}
|
464
|
+
io_uring_cqe_seen(&ring, cqe);
|
465
|
+
}
|
466
|
+
|
467
|
+
io_uring_queue_exit(&ring);
|
468
|
+
return T_EXIT_PASS;
|
469
|
+
}
|
470
|
+
|
471
|
+
static int mmap_child(struct io_uring *__ring, struct io_uring_params *__p)
|
472
|
+
{
|
473
|
+
struct io_uring ring = *__ring;
|
474
|
+
struct timeval tv;
|
475
|
+
int ret;
|
476
|
+
|
477
|
+
gettimeofday(&tv, NULL);
|
478
|
+
do {
|
479
|
+
struct io_uring_params p = *__p;
|
480
|
+
void *sq_ptr, *cq_ptr;
|
481
|
+
|
482
|
+
ret = io_uring_queue_mmap(__ring->ring_fd, &p, &ring);
|
483
|
+
if (ret)
|
484
|
+
continue;
|
485
|
+
|
486
|
+
sq_ptr = ring.sq.ring_ptr + 2 * sizeof(__u32);
|
487
|
+
cq_ptr = ring.cq.ring_ptr + 2 * sizeof(__u32);
|
488
|
+
memset(sq_ptr, 0x5a, ring.sq.ring_sz - 2 * sizeof(__u32));
|
489
|
+
memset(cq_ptr, 0xa5, ring.cq.ring_sz - 2 * sizeof(__u32));
|
490
|
+
io_uring_unmap_rings(&ring.sq, &ring.cq);
|
491
|
+
} while (mtime_since_now(&tv) < 2500);
|
492
|
+
|
493
|
+
exit(T_EXIT_PASS);
|
494
|
+
}
|
495
|
+
|
496
|
+
static int test_mmap_race(struct io_uring *ring, struct io_uring_params *__p)
|
497
|
+
{
|
498
|
+
unsigned long useless_sum;
|
499
|
+
int i, w, nr_children;
|
500
|
+
struct timeval tv;
|
501
|
+
pid_t pid;
|
502
|
+
|
503
|
+
nr_children = sysconf(_SC_NPROCESSORS_ONLN);
|
504
|
+
if (nr_children < 0)
|
505
|
+
nr_children = 4;
|
506
|
+
|
507
|
+
for (i = 0; i < nr_children; i++) {
|
508
|
+
pid = fork();
|
509
|
+
if (!pid) {
|
510
|
+
mmap_child(ring, __p);
|
511
|
+
return T_EXIT_PASS;
|
512
|
+
}
|
513
|
+
}
|
514
|
+
|
515
|
+
useless_sum = 0;
|
516
|
+
gettimeofday(&tv, NULL);
|
517
|
+
do {
|
518
|
+
struct io_uring_params p = { .sq_entries = 32, };
|
519
|
+
void *ptr;
|
520
|
+
|
521
|
+
io_uring_resize_rings(ring, &p);
|
522
|
+
|
523
|
+
ptr = memchr(ring->sq.ring_ptr, 0x5a, ring->sq.ring_sz);
|
524
|
+
if (ptr)
|
525
|
+
useless_sum += ptr - ring->sq.ring_ptr;
|
526
|
+
|
527
|
+
ptr = memchr(ring->cq.ring_ptr, 0xa5, ring->cq.ring_sz);
|
528
|
+
if (ptr)
|
529
|
+
useless_sum += ptr - ring->cq.ring_ptr;
|
530
|
+
|
531
|
+
p.sq_entries = 128;
|
532
|
+
io_uring_resize_rings(ring, &p);
|
533
|
+
} while (mtime_since_now(&tv) < 2500);
|
534
|
+
|
535
|
+
for (i = 0; i < nr_children; i++)
|
536
|
+
wait(&w);
|
537
|
+
|
538
|
+
if (useless_sum)
|
539
|
+
return T_EXIT_PASS;
|
540
|
+
return T_EXIT_PASS;
|
541
|
+
}
|
542
|
+
|
543
|
+
static int test(int flags, int fd, int async)
|
544
|
+
{
|
545
|
+
struct io_uring_params p = {
|
546
|
+
.flags = flags,
|
547
|
+
};
|
548
|
+
struct io_uring ring;
|
549
|
+
int ret;
|
550
|
+
|
551
|
+
ret = io_uring_queue_init_params(8, &ring, &p);
|
552
|
+
if (ret < 0) {
|
553
|
+
fprintf(stderr, "ring setup failed: %d\n", ret);
|
554
|
+
return T_EXIT_FAIL;
|
555
|
+
}
|
556
|
+
|
557
|
+
ret = test_basic(&ring, async);
|
558
|
+
if (ret == T_EXIT_SKIP) {
|
559
|
+
return T_EXIT_SKIP;
|
560
|
+
} else if (ret == T_EXIT_FAIL) {
|
561
|
+
fprintf(stderr, "test_basic %x failed\n", flags);
|
562
|
+
return T_EXIT_FAIL;
|
563
|
+
}
|
564
|
+
|
565
|
+
ret = test_reads(&ring, fd, async);
|
566
|
+
if (ret == T_EXIT_FAIL) {
|
567
|
+
fprintf(stderr, "test_reads %x failed\n", flags);
|
568
|
+
return T_EXIT_FAIL;
|
569
|
+
}
|
570
|
+
|
571
|
+
ret = test_pipes(&ring, async);
|
572
|
+
if (ret == T_EXIT_FAIL) {
|
573
|
+
fprintf(stderr, "test_pipes %x failed\n", flags);
|
574
|
+
return T_EXIT_FAIL;
|
575
|
+
}
|
576
|
+
|
577
|
+
if (async)
|
578
|
+
return T_EXIT_PASS;
|
579
|
+
|
580
|
+
ret = test_all_copy(&ring);
|
581
|
+
if (ret == T_EXIT_FAIL) {
|
582
|
+
fprintf(stderr, "test_all_copy %x failed\n", flags);
|
583
|
+
return T_EXIT_FAIL;
|
584
|
+
}
|
585
|
+
|
586
|
+
ret = test_overflow(&ring);
|
587
|
+
if (ret == T_EXIT_FAIL) {
|
588
|
+
fprintf(stderr, "test_overflow %x failed\n", flags);
|
589
|
+
return T_EXIT_FAIL;
|
590
|
+
}
|
591
|
+
|
592
|
+
ret = test_same_resize(flags);
|
593
|
+
if (ret == T_EXIT_FAIL) {
|
594
|
+
fprintf(stderr, "test_same_resize %x failed\n", flags);
|
595
|
+
return T_EXIT_FAIL;
|
596
|
+
}
|
597
|
+
|
598
|
+
/* must go at the end, insert more tests above this one */
|
599
|
+
ret = test_mmap_race(&ring, &p);
|
600
|
+
if (ret == T_EXIT_FAIL) {
|
601
|
+
fprintf(stderr, "test_mmap_race %x failed\n", flags);
|
602
|
+
return T_EXIT_FAIL;
|
603
|
+
}
|
604
|
+
|
605
|
+
io_uring_queue_exit(&ring);
|
606
|
+
return T_EXIT_PASS;
|
607
|
+
}
|
608
|
+
|
609
|
+
int main(int argc, char *argv[])
|
610
|
+
{
|
611
|
+
int ret, fd = -1;
|
612
|
+
|
613
|
+
if (argc > 1)
|
614
|
+
fd = open("/dev/nvme0n1", O_RDONLY | O_DIRECT);
|
615
|
+
|
616
|
+
ret = test(0, fd, 0);
|
617
|
+
if (ret == T_EXIT_SKIP)
|
618
|
+
return T_EXIT_SKIP;
|
619
|
+
else if (ret == T_EXIT_FAIL)
|
620
|
+
return T_EXIT_FAIL;
|
621
|
+
|
622
|
+
ret = test(0, fd, 1);
|
623
|
+
if (ret == T_EXIT_FAIL)
|
624
|
+
return T_EXIT_FAIL;
|
625
|
+
|
626
|
+
ret = test(IORING_SETUP_SQPOLL, fd, 0);
|
627
|
+
if (ret == T_EXIT_FAIL)
|
628
|
+
return T_EXIT_FAIL;
|
629
|
+
|
630
|
+
ret = test(IORING_SETUP_SQPOLL, fd, 1);
|
631
|
+
if (ret == T_EXIT_FAIL)
|
632
|
+
return T_EXIT_FAIL;
|
633
|
+
|
634
|
+
ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, fd, 0);
|
635
|
+
if (ret == T_EXIT_FAIL)
|
636
|
+
return T_EXIT_FAIL;
|
637
|
+
|
638
|
+
ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, fd, 1);
|
639
|
+
if (ret == T_EXIT_FAIL)
|
640
|
+
return T_EXIT_FAIL;
|
641
|
+
|
642
|
+
return T_EXIT_PASS;
|
643
|
+
}
|
@@ -184,7 +184,7 @@ static int test_buffers_update(void)
|
|
184
184
|
|
185
185
|
/* test that CQE is not emitted before we're done with a buffer */
|
186
186
|
sqe = io_uring_get_sqe(&ring);
|
187
|
-
io_uring_prep_read_fixed(sqe, pipes[0], tmp_buf, 10, 0,
|
187
|
+
io_uring_prep_read_fixed(sqe, pipes[0], tmp_buf, 10, 0, 1);
|
188
188
|
sqe->user_data = 100;
|
189
189
|
ret = io_uring_submit(&ring);
|
190
190
|
if (ret != 1) {
|
@@ -14,12 +14,16 @@
|
|
14
14
|
int main(int argc, char *argv[])
|
15
15
|
{
|
16
16
|
struct io_uring_params p = {};
|
17
|
+
struct io_uring_sqe *sqe;
|
18
|
+
struct io_uring_cqe *cqe;
|
17
19
|
struct timeval tv;
|
18
20
|
struct io_uring ring;
|
21
|
+
unsigned long elapsed;
|
22
|
+
bool seen_wakeup;
|
19
23
|
int ret;
|
20
24
|
|
21
25
|
if (argc > 1)
|
22
|
-
return
|
26
|
+
return T_EXIT_SKIP;
|
23
27
|
|
24
28
|
p.flags = IORING_SETUP_SQPOLL;
|
25
29
|
p.sq_thread_idle = 100;
|
@@ -28,18 +32,45 @@ int main(int argc, char *argv[])
|
|
28
32
|
if (ret) {
|
29
33
|
if (geteuid()) {
|
30
34
|
printf("%s: skipped, not root\n", argv[0]);
|
31
|
-
return
|
35
|
+
return T_EXIT_SKIP;
|
32
36
|
}
|
33
37
|
fprintf(stderr, "queue_init=%d\n", ret);
|
34
|
-
return
|
38
|
+
return T_EXIT_FAIL;
|
35
39
|
}
|
36
40
|
|
41
|
+
sqe = io_uring_get_sqe(&ring);
|
42
|
+
io_uring_prep_nop(sqe);
|
43
|
+
io_uring_submit(&ring);
|
44
|
+
|
45
|
+
ret = io_uring_wait_cqe(&ring, &cqe);
|
46
|
+
if (ret) {
|
47
|
+
fprintf(stderr, "wait_cqe: %d\n", ret);
|
48
|
+
return T_EXIT_FAIL;
|
49
|
+
}
|
50
|
+
io_uring_cqe_seen(&ring, cqe);
|
51
|
+
|
52
|
+
elapsed = 0;
|
53
|
+
seen_wakeup = false;
|
37
54
|
gettimeofday(&tv, NULL);
|
38
55
|
do {
|
39
|
-
usleep(
|
40
|
-
if ((*ring.sq.kflags) & IORING_SQ_NEED_WAKEUP)
|
41
|
-
|
42
|
-
|
56
|
+
usleep(100);
|
57
|
+
if (IO_URING_READ_ONCE(*ring.sq.kflags) & IORING_SQ_NEED_WAKEUP) {
|
58
|
+
seen_wakeup = true;
|
59
|
+
break;
|
60
|
+
}
|
61
|
+
elapsed = mtime_since_now(&tv);
|
62
|
+
} while (elapsed < 1000);
|
63
|
+
|
64
|
+
if (!seen_wakeup) {
|
65
|
+
fprintf(stderr, "SQPOLL didn't flag wakeup\n");
|
66
|
+
return T_EXIT_FAIL;
|
67
|
+
}
|
68
|
+
|
69
|
+
/* should be around 100 msec */
|
70
|
+
if (elapsed < 90 || elapsed > 110) {
|
71
|
+
fprintf(stderr, "SQPOLL wakeup timing off %lu\n", elapsed);
|
72
|
+
return T_EXIT_FAIL;
|
73
|
+
}
|
43
74
|
|
44
|
-
return
|
75
|
+
return T_EXIT_PASS;
|
45
76
|
}
|