uringmachine 0.19.1 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/test.yml +3 -4
  3. data/CHANGELOG.md +32 -1
  4. data/TODO.md +0 -39
  5. data/examples/bm_fileno.rb +33 -0
  6. data/examples/bm_mutex.rb +85 -0
  7. data/examples/bm_mutex_single.rb +33 -0
  8. data/examples/bm_queue.rb +29 -29
  9. data/examples/bm_send.rb +2 -5
  10. data/examples/bm_snooze.rb +20 -42
  11. data/examples/bm_write.rb +4 -1
  12. data/examples/fiber_scheduler_demo.rb +15 -51
  13. data/examples/fiber_scheduler_fork.rb +24 -0
  14. data/examples/nc_ssl.rb +71 -0
  15. data/ext/um/extconf.rb +5 -15
  16. data/ext/um/um.c +310 -74
  17. data/ext/um/um.h +66 -29
  18. data/ext/um/um_async_op.c +1 -1
  19. data/ext/um/um_async_op_class.c +2 -2
  20. data/ext/um/um_buffer.c +1 -1
  21. data/ext/um/um_class.c +178 -31
  22. data/ext/um/um_const.c +51 -3
  23. data/ext/um/um_mutex_class.c +1 -1
  24. data/ext/um/um_op.c +37 -0
  25. data/ext/um/um_queue_class.c +1 -1
  26. data/ext/um/um_stream.c +5 -5
  27. data/ext/um/um_stream_class.c +3 -0
  28. data/ext/um/um_sync.c +28 -39
  29. data/ext/um/um_utils.c +59 -19
  30. data/grant-2025/journal.md +353 -0
  31. data/grant-2025/tasks.md +135 -0
  32. data/lib/uringmachine/fiber_scheduler.rb +316 -57
  33. data/lib/uringmachine/version.rb +1 -1
  34. data/lib/uringmachine.rb +6 -0
  35. data/test/test_fiber_scheduler.rb +640 -0
  36. data/test/test_stream.rb +2 -2
  37. data/test/test_um.rb +722 -54
  38. data/uringmachine.gemspec +5 -5
  39. data/vendor/liburing/.github/workflows/ci.yml +94 -1
  40. data/vendor/liburing/.github/workflows/test_build.c +9 -0
  41. data/vendor/liburing/configure +27 -0
  42. data/vendor/liburing/examples/Makefile +6 -0
  43. data/vendor/liburing/examples/helpers.c +8 -0
  44. data/vendor/liburing/examples/helpers.h +5 -0
  45. data/vendor/liburing/liburing.spec +1 -1
  46. data/vendor/liburing/src/Makefile +9 -3
  47. data/vendor/liburing/src/include/liburing/barrier.h +11 -5
  48. data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
  49. data/vendor/liburing/src/include/liburing/io_uring.h +51 -0
  50. data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
  51. data/vendor/liburing/src/include/liburing.h +458 -121
  52. data/vendor/liburing/src/liburing-ffi.map +16 -0
  53. data/vendor/liburing/src/liburing.map +8 -0
  54. data/vendor/liburing/src/sanitize.c +4 -1
  55. data/vendor/liburing/src/setup.c +7 -4
  56. data/vendor/liburing/test/232c93d07b74.c +4 -16
  57. data/vendor/liburing/test/Makefile +15 -1
  58. data/vendor/liburing/test/accept.c +2 -13
  59. data/vendor/liburing/test/bind-listen.c +175 -13
  60. data/vendor/liburing/test/conn-unreach.c +132 -0
  61. data/vendor/liburing/test/fd-pass.c +32 -7
  62. data/vendor/liburing/test/fdinfo.c +39 -12
  63. data/vendor/liburing/test/fifo-futex-poll.c +114 -0
  64. data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
  65. data/vendor/liburing/test/futex.c +1 -1
  66. data/vendor/liburing/test/helpers.c +99 -2
  67. data/vendor/liburing/test/helpers.h +9 -0
  68. data/vendor/liburing/test/io_uring_passthrough.c +6 -12
  69. data/vendor/liburing/test/mock_file.c +379 -0
  70. data/vendor/liburing/test/mock_file.h +47 -0
  71. data/vendor/liburing/test/nop.c +2 -2
  72. data/vendor/liburing/test/nop32-overflow.c +150 -0
  73. data/vendor/liburing/test/nop32.c +126 -0
  74. data/vendor/liburing/test/pipe.c +166 -0
  75. data/vendor/liburing/test/poll-race-mshot.c +13 -1
  76. data/vendor/liburing/test/read-write.c +4 -4
  77. data/vendor/liburing/test/recv-mshot-fair.c +81 -34
  78. data/vendor/liburing/test/recvsend_bundle.c +1 -1
  79. data/vendor/liburing/test/resize-rings.c +2 -0
  80. data/vendor/liburing/test/ring-query.c +322 -0
  81. data/vendor/liburing/test/ringbuf-loop.c +87 -0
  82. data/vendor/liburing/test/ringbuf-read.c +4 -4
  83. data/vendor/liburing/test/runtests.sh +2 -2
  84. data/vendor/liburing/test/send-zerocopy.c +43 -5
  85. data/vendor/liburing/test/send_recv.c +103 -32
  86. data/vendor/liburing/test/shutdown.c +2 -12
  87. data/vendor/liburing/test/socket-nb.c +3 -14
  88. data/vendor/liburing/test/socket-rw-eagain.c +2 -12
  89. data/vendor/liburing/test/socket-rw-offset.c +2 -12
  90. data/vendor/liburing/test/socket-rw.c +2 -12
  91. data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
  92. data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
  93. data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
  94. data/vendor/liburing/test/timestamp.c +56 -19
  95. data/vendor/liburing/test/vec-regbuf.c +2 -4
  96. data/vendor/liburing/test/wq-aff.c +7 -0
  97. metadata +37 -15
@@ -0,0 +1,166 @@
1
+ // SPDX-License-Identifier: MIT
2
+ /*
3
+ * Description: test pipe creation through io_uring
4
+ *
5
+ */
6
+ #include <stdio.h>
7
+ #include <string.h>
8
+ #include <unistd.h>
9
+
10
+ #include "helpers.h"
11
+ #include "liburing.h"
12
+
13
+ static int no_pipe;
14
+
15
+ struct params {
16
+ int fixed;
17
+ int async;
18
+ int too_small;
19
+ };
20
+
21
+ static int pipe_comms(struct io_uring *ring, int *fds, struct params *p)
22
+ {
23
+ struct io_uring_sqe *sqe;
24
+ struct io_uring_cqe *cqe;
25
+ char src[32], dst[32];
26
+ int i, ret;
27
+
28
+ memset(src, 0x5a, sizeof(src));
29
+ memset(dst, 0, sizeof(dst));
30
+
31
+ sqe = io_uring_get_sqe(ring);
32
+ io_uring_prep_write(sqe, fds[1], src, sizeof(src), 0);
33
+ if (p->fixed)
34
+ sqe->flags |= IOSQE_FIXED_FILE;
35
+ sqe->user_data = 1;
36
+ io_uring_submit(ring);
37
+
38
+ sqe = io_uring_get_sqe(ring);
39
+ io_uring_prep_read(sqe, fds[0], dst, sizeof(dst), 0);
40
+ if (p->fixed)
41
+ sqe->flags |= IOSQE_FIXED_FILE;
42
+ sqe->user_data = 2;
43
+ io_uring_submit(ring);
44
+
45
+ for (i = 0; i < 2; i++) {
46
+ ret = io_uring_wait_cqe(ring, &cqe);
47
+ if (ret) {
48
+ fprintf(stderr, "wait cqe %d\n", ret);
49
+ return 1;
50
+ }
51
+ if (cqe->res != 32) {
52
+ printf("ud=%d, res=%d\n", (int) cqe->user_data, cqe->res);
53
+ return 1;
54
+ }
55
+ io_uring_cqe_seen(ring, cqe);
56
+ }
57
+
58
+ return memcmp(dst, src, sizeof(src));
59
+ }
60
+
61
+ static int pipe_test(int init_flags, struct params *p)
62
+ {
63
+ struct io_uring ring;
64
+ struct io_uring_sqe *sqe;
65
+ struct io_uring_cqe *cqe;
66
+ int ret, fds[2];
67
+
68
+ ret = io_uring_queue_init(8, &ring, init_flags);
69
+ /* can hit -ENOMEM due to repeated ring creation and teardowns */
70
+ if (ret == -ENOMEM) {
71
+ usleep(1000);
72
+ return 0;
73
+ } else if (ret) {
74
+ fprintf(stderr, "ring_init: %d\n", ret);
75
+ return 1;
76
+ }
77
+
78
+ if (p->fixed) {
79
+ int sz;
80
+
81
+ if (p->too_small)
82
+ sz = 1;
83
+ else
84
+ sz = 100;
85
+ ret = io_uring_register_files_sparse(&ring, sz);
86
+ if (ret) {
87
+ if (ret == -EINVAL) {
88
+ no_pipe = 1;
89
+ return 0;
90
+ }
91
+ fprintf(stderr, "Failed to register sparse table %d\n", ret);
92
+ return 1;
93
+ }
94
+ }
95
+
96
+ fds[0] = fds[1] = -1;
97
+ sqe = io_uring_get_sqe(&ring);
98
+ if (!p->fixed)
99
+ io_uring_prep_pipe(sqe, fds, 0);
100
+ else
101
+ io_uring_prep_pipe_direct(sqe, fds, 0, IORING_FILE_INDEX_ALLOC);
102
+ if (p->async)
103
+ sqe->flags |= IOSQE_ASYNC;
104
+
105
+ io_uring_submit(&ring);
106
+ ret = io_uring_wait_cqe(&ring, &cqe);
107
+ if (ret) {
108
+ fprintf(stderr, "wait: %d\n", ret);
109
+ return 1;
110
+ }
111
+ if (cqe->res) {
112
+ if (cqe->res == -EINVAL) {
113
+ no_pipe = 1;
114
+ return 0;
115
+ }
116
+ if (p->fixed && p->too_small && cqe->res == -ENFILE)
117
+ goto done;
118
+ fprintf(stderr, "Bad cqe res %d\n", cqe->res);
119
+ return 1;
120
+ }
121
+
122
+ io_uring_cqe_seen(&ring, cqe);
123
+
124
+ ret = pipe_comms(&ring, fds, p);
125
+ if (!p->fixed) {
126
+ close(fds[0]);
127
+ close(fds[1]);
128
+ }
129
+ done:
130
+ io_uring_queue_exit(&ring);
131
+ return ret;
132
+ }
133
+
134
+ int main(int argc, char *argv[])
135
+ {
136
+ int init_flags[] = { 0, IORING_SETUP_SQPOLL, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER };
137
+ struct params ps[] = {
138
+ { 0, 0, 0 },
139
+ { 0, 1, 0 },
140
+ { 1, 0, 0 },
141
+ { 1, 1, 0 },
142
+ { 0, 0, 1 },
143
+ { 0, 1, 1 },
144
+ { 1, 0, 1 },
145
+ { 1, 1, 1 } };
146
+
147
+ int i, j;
148
+
149
+ if (argc > 1)
150
+ return T_EXIT_SKIP;
151
+
152
+ for (i = 0; i < ARRAY_SIZE(init_flags); i++) {
153
+ for (j = 0; j < ARRAY_SIZE(ps); j++) {
154
+ if (pipe_test(init_flags[i], &ps[j])) {
155
+ fprintf(stderr, "pipe %x %d/%d/%d failed\n",
156
+ init_flags[i], ps[j].fixed, ps[j].async,
157
+ ps[j].too_small);
158
+ return T_EXIT_FAIL;
159
+ }
160
+ if (no_pipe)
161
+ return T_EXIT_SKIP;
162
+ }
163
+ }
164
+
165
+ return T_EXIT_PASS;
166
+ }
@@ -158,7 +158,7 @@ static int test_mshot(struct io_uring *ring, struct data *d)
158
158
 
159
159
  d->fd = fd[1];
160
160
 
161
- if (posix_memalign((void *) &buf, 16384, BUF_SIZE * NREQS))
161
+ if (posix_memalign((void **) &buf, 16384, BUF_SIZE * NREQS))
162
162
  return T_EXIT_FAIL;
163
163
 
164
164
  br = io_uring_setup_buf_ring(ring, NREQS, 1, 0, &ret);
@@ -221,6 +221,18 @@ static int test_mshot(struct io_uring *ring, struct data *d)
221
221
  fprintf(stderr, "Got too many requests?\n");
222
222
  return T_EXIT_FAIL;
223
223
  }
224
+ /*
225
+ * We're using unix sockets, and later kernels got support added
226
+ * for msg_inq querying. On those kernels, we cannot rely on
227
+ * the multishot terminating on a zero receive, as io_uring
228
+ * will not do that retry as it KNOWS there's zero bytes
229
+ * pending. Hence we need to actively quiet at that point. Inc
230
+ * 'i' as well as we don't get the non-MORE CQE posted.
231
+ */
232
+ if (i == NREQS) {
233
+ i++;
234
+ break;
235
+ }
224
236
  } while (1);
225
237
 
226
238
  if (i != NREQS + 1) {
@@ -462,7 +462,7 @@ static int test_buf_select_short(const char *filename, int nonvec)
462
462
  }
463
463
 
464
464
  exp_len = 0;
465
- for (i = 0; i < BUFFERS; i++) {
465
+ for (i = 0; i < 2 * BUFFERS; i++) {
466
466
  sqe = io_uring_get_sqe(&ring);
467
467
  io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
468
468
  vecs[i].iov_len / 2, 1, 1, i);
@@ -471,12 +471,12 @@ static int test_buf_select_short(const char *filename, int nonvec)
471
471
  }
472
472
 
473
473
  ret = io_uring_submit(&ring);
474
- if (ret != BUFFERS) {
474
+ if (ret != BUFFERS * 2) {
475
475
  fprintf(stderr, "submit: %d\n", ret);
476
476
  return -1;
477
477
  }
478
478
 
479
- for (i = 0; i < BUFFERS; i++) {
479
+ for (i = 0; i < BUFFERS * 2; i++) {
480
480
  ret = io_uring_wait_cqe(&ring, &cqe);
481
481
  if (cqe->res < 0) {
482
482
  fprintf(stderr, "cqe->res=%d\n", cqe->res);
@@ -935,7 +935,7 @@ int main(int argc, char *argv[])
935
935
 
936
936
  signal(SIGXFSZ, SIG_IGN);
937
937
 
938
- vecs = t_create_buffers(BUFFERS, BS);
938
+ vecs = t_create_buffers(2 * BUFFERS, BS);
939
939
 
940
940
  /* if we don't have nonvec read, skip testing that */
941
941
  nr = has_nonvec_read() ? 64 : 32;
@@ -28,6 +28,7 @@
28
28
 
29
29
  #define NR_RDS 4
30
30
 
31
+ static bool no_iter_support;
31
32
  static bool no_limit_support;
32
33
 
33
34
  static int use_port = PORT;
@@ -48,6 +49,7 @@ struct recv_data {
48
49
  int total_bytes;
49
50
 
50
51
  int recv_bundle;
52
+ int mshot_limit;
51
53
 
52
54
  int use_port;
53
55
  int id;
@@ -64,13 +66,11 @@ static void arm_recv(struct io_uring *ring, struct recv_data *rd)
64
66
  struct io_uring_sqe *sqe;
65
67
  int len = PER_ITER_LIMIT;
66
68
 
67
- if (rd->total_bytes && rd->bytes_since_arm > PER_MSHOT_LIMIT)
68
- rd->mshot_too_big++;
69
-
70
69
  rd->bytes_since_arm = 0;
71
70
  sqe = io_uring_get_sqe(ring);
72
71
  io_uring_prep_recv_multishot(sqe, rd->accept_fd, NULL, len, 0);
73
- sqe->optlen = PER_MSHOT_LIMIT;
72
+ if (rd->mshot_limit)
73
+ sqe->optlen = PER_MSHOT_LIMIT;
74
74
  if (rd->recv_bundle)
75
75
  sqe->ioprio |= IORING_RECVSEND_BUNDLE;
76
76
  sqe->buf_group = RECV_BGID;
@@ -155,7 +155,7 @@ static int recv_get_cqe(struct io_uring *ring,
155
155
  int ret;
156
156
 
157
157
  do {
158
- ret = io_uring_submit_and_wait_timeout(ring, cqe, 1, &ts, NULL);
158
+ ret = io_uring_wait_cqe_timeout(ring, cqe, &ts);
159
159
  if (!ret) {
160
160
  struct recv_data *rd = io_uring_cqe_get_data(*cqe);
161
161
 
@@ -196,6 +196,7 @@ static int do_recv(struct io_uring *ring)
196
196
  struct recv_data *rd;
197
197
  int ret = 1;
198
198
  int done = 0;
199
+ int pending_submit = 0;
199
200
 
200
201
  last_rd = NULL;
201
202
  bytes_since_last = 0;
@@ -223,9 +224,15 @@ static int do_recv(struct io_uring *ring)
223
224
  done++;
224
225
  continue;
225
226
  }
227
+ if (rd->mshot_limit && rd->bytes_since_arm > PER_MSHOT_LIMIT)
228
+ rd->mshot_too_big++;
226
229
  io_uring_cqe_seen(ring, cqe);
227
- if (!(cqe->flags & IORING_CQE_F_MORE) && rd->recv_bytes)
230
+ if (!(cqe->flags & IORING_CQE_F_MORE) && rd->recv_bytes) {
228
231
  arm_recv(ring, rd);
232
+ pending_submit++;
233
+ }
234
+ if (pending_submit == NR_RDS)
235
+ io_uring_submit(ring);
229
236
  } while (done < NR_RDS);
230
237
 
231
238
  ret = 0;
@@ -241,7 +248,7 @@ static void *recv_fn(void *data)
241
248
  struct io_uring_buf_ring *br = NULL;
242
249
  struct io_uring ring;
243
250
  unsigned int buf_len;
244
- void *buf, *ptr;
251
+ void *buf = NULL, *ptr;
245
252
  int ret, sock[NR_RDS], i;
246
253
  int brflags, ring_setup = 0;
247
254
 
@@ -255,7 +262,7 @@ static void *recv_fn(void *data)
255
262
  ret = io_uring_queue_init_params(16, &ring, &p);
256
263
  if (ret) {
257
264
  if (ret == -EINVAL) {
258
- no_limit_support = true;
265
+ no_iter_support = true;
259
266
  goto skip;
260
267
  }
261
268
  fprintf(stderr, "ring init: %d\n", ret);
@@ -270,7 +277,7 @@ static void *recv_fn(void *data)
270
277
  br = io_uring_setup_buf_ring(&ring, RECV_BIDS, RECV_BGID, brflags, &ret);
271
278
  if (!br) {
272
279
  if (ret == -EINVAL) {
273
- no_limit_support = true;
280
+ no_iter_support = true;
274
281
  goto skip;
275
282
  }
276
283
  fprintf(stderr, "failed setting up recv ring %d\n", ret);
@@ -284,7 +291,7 @@ static void *recv_fn(void *data)
284
291
  ptr += buf_len;
285
292
  }
286
293
  io_uring_buf_ring_advance(br, RECV_BIDS);
287
-
294
+
288
295
  for (i = 0; i < NR_RDS; i++) {
289
296
  ret = recv_prep(&ring, &rds[i], &sock[i]);
290
297
  if (ret) {
@@ -299,7 +306,10 @@ static void *recv_fn(void *data)
299
306
 
300
307
  if (!io_uring_peek_cqe(&ring, &cqe)) {
301
308
  if (cqe->res == -EINVAL) {
302
- no_limit_support = true;
309
+ if (rds[0].mshot_limit)
310
+ no_limit_support = true;
311
+ else
312
+ no_iter_support = true;
303
313
  goto skip;
304
314
  }
305
315
  }
@@ -319,6 +329,7 @@ skip:
319
329
  io_uring_free_buf_ring(&ring, br, RECV_BIDS, RECV_BGID);
320
330
  if (ring_setup)
321
331
  io_uring_queue_exit(&ring);
332
+ free(buf);
322
333
  err:
323
334
  return (void *)(intptr_t)ret;
324
335
  }
@@ -379,7 +390,8 @@ struct res {
379
390
  int unfair;
380
391
  };
381
392
 
382
- static int test(int nr_send, int bundle, unsigned int queue_flags, struct res *r)
393
+ static int test(int nr_send, int bundle, int mshot_limit,
394
+ unsigned int queue_flags, struct res *r)
383
395
  {
384
396
  struct recv_data rds[NR_RDS] = { };
385
397
  pthread_t recv_thread;
@@ -401,6 +413,7 @@ static int test(int nr_send, int bundle, unsigned int queue_flags, struct res *r
401
413
  rd->max_sends = nr_send;
402
414
  rd->recv_bundle = bundle;
403
415
  rd->recv_bytes = nr_send * 4096;
416
+ rd->mshot_limit = mshot_limit;
404
417
  rd->id = i + 1;
405
418
  }
406
419
 
@@ -435,9 +448,9 @@ static int run_tests(void)
435
448
  struct res r;
436
449
  int ret;
437
450
 
438
- ret = test(2, 1, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
451
+ ret = test(2, 1, 0, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
439
452
  if (ret) {
440
- if (no_limit_support)
453
+ if (no_iter_support)
441
454
  return T_EXIT_SKIP;
442
455
  fprintf(stderr, "test DEFER bundle failed\n");
443
456
  return T_EXIT_FAIL;
@@ -449,52 +462,86 @@ static int run_tests(void)
449
462
  return T_EXIT_FAIL;
450
463
  }
451
464
 
452
- ret = test(2, 0, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
465
+ ret = test(2, 0, 0, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
453
466
  if (ret) {
454
467
  fprintf(stderr, "test DEFER failed\n");
455
468
  return T_EXIT_FAIL;
456
469
  }
457
470
 
458
- /* DEFER_TASKRUN should be fully fair and not have overshoots */
459
471
  if (r.unfair || r.mshot_too_big) {
460
472
  fprintf(stderr, "DEFER unfair=%d, too_big=%d\n", r.unfair, r.mshot_too_big);
461
473
  return T_EXIT_FAIL;
462
474
  }
463
475
 
464
- ret = test(2, 1, IORING_SETUP_COOP_TASKRUN, &r);
476
+ ret = test(2, 1, 0, IORING_SETUP_COOP_TASKRUN, &r);
465
477
  if (ret) {
466
478
  fprintf(stderr, "test COOP bundle failed\n");
467
479
  return T_EXIT_FAIL;
468
480
  }
469
481
 
470
- /*
471
- * normal task_work should not have overshoots, but may not be fair
472
- * because of the re-arming.
473
- */
474
- if (r.unfair)
475
- fprintf(stdout, "!DEFER bundle unfair, expected\n");
476
- if (r.mshot_too_big) {
477
- fprintf(stderr, "!DEFER bundle too_big=%d\n", r.mshot_too_big);
482
+ if (r.unfair || r.mshot_too_big) {
483
+ fprintf(stderr, "COOP bundle too_big=%d\n", r.mshot_too_big);
478
484
  return T_EXIT_FAIL;
479
485
  }
480
486
 
481
- ret = test(2, 0, IORING_SETUP_COOP_TASKRUN, &r);
487
+ ret = test(2, 0, 0, IORING_SETUP_COOP_TASKRUN, &r);
482
488
  if (ret) {
483
489
  fprintf(stderr, "test COOP failed\n");
484
490
  return T_EXIT_FAIL;
485
491
  }
486
492
 
487
- /*
488
- * normal task_work should not have overshoots, but may not be fair
489
- * because of the re-arming.
490
- */
491
- if (r.unfair)
492
- fprintf(stdout, "!DEFER unfair, expected\n");
493
- if (r.mshot_too_big) {
493
+ if (r.unfair || r.mshot_too_big) {
494
494
  fprintf(stderr, "!DEFER too_big=%d\n", r.mshot_too_big);
495
495
  return T_EXIT_FAIL;
496
496
  }
497
-
497
+
498
+ ret = test(2, 1, 1, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
499
+ if (ret) {
500
+ if (no_limit_support)
501
+ return T_EXIT_PASS;
502
+ fprintf(stderr, "test DEFER bundle cap failed\n");
503
+ return T_EXIT_FAIL;
504
+ }
505
+
506
+ /* DEFER_TASKRUN should be fully fair and not have overshoots */
507
+ if (r.unfair || r.mshot_too_big) {
508
+ fprintf(stderr, "DEFER bundle cap unfair=%d, too_big=%d\n", r.unfair, r.mshot_too_big);
509
+ return T_EXIT_FAIL;
510
+ }
511
+
512
+ ret = test(2, 0, 1, IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_SINGLE_ISSUER, &r);
513
+ if (ret) {
514
+ fprintf(stderr, "test DEFER cap failed\n");
515
+ return T_EXIT_FAIL;
516
+ }
517
+
518
+ if (r.unfair || r.mshot_too_big) {
519
+ fprintf(stderr, "DEFER cap unfair=%d, too_big=%d\n", r.unfair, r.mshot_too_big);
520
+ return T_EXIT_FAIL;
521
+ }
522
+
523
+ ret = test(2, 1, 1, IORING_SETUP_COOP_TASKRUN, &r);
524
+ if (ret) {
525
+ fprintf(stderr, "test COOP bundle cap failed\n");
526
+ return T_EXIT_FAIL;
527
+ }
528
+
529
+ if (r.unfair || r.mshot_too_big) {
530
+ fprintf(stderr, "COOP bundle cap too_big=%d\n", r.mshot_too_big);
531
+ return T_EXIT_FAIL;
532
+ }
533
+
534
+ ret = test(2, 0, 1, IORING_SETUP_COOP_TASKRUN, &r);
535
+ if (ret) {
536
+ fprintf(stderr, "test COOP cap failed\n");
537
+ return T_EXIT_FAIL;
538
+ }
539
+
540
+ if (r.unfair || r.mshot_too_big) {
541
+ fprintf(stderr, "COOP cap too_big=%d\n", r.mshot_too_big);
542
+ return T_EXIT_FAIL;
543
+ }
544
+
498
545
  return T_EXIT_PASS;
499
546
  }
500
547
 
@@ -21,7 +21,7 @@ static int nr_msgs;
21
21
  static int use_tcp;
22
22
  static int classic_buffers;
23
23
 
24
- #define RECV_BIDS 8192
24
+ #define RECV_BIDS 16384
25
25
  #define RECV_BID_MASK (RECV_BIDS - 1)
26
26
 
27
27
  #include "liburing.h"
@@ -657,6 +657,8 @@ try_defer:
657
657
  ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, fd, 0);
658
658
  if (ret == T_EXIT_FAIL)
659
659
  return T_EXIT_FAIL;
660
+ else if (ret == T_EXIT_SKIP)
661
+ return T_EXIT_SKIP;
660
662
 
661
663
  ret = test(IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN, fd, 1);
662
664
  if (ret == T_EXIT_FAIL)