polyphony 1.4 → 1.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. checksums.yaml +4 -4
  2. data/.rubocop.yml +3 -0
  3. data/CHANGELOG.md +22 -0
  4. data/TODO.md +5 -14
  5. data/examples/pipes/http_server.rb +42 -12
  6. data/examples/pipes/http_server2.rb +45 -0
  7. data/ext/polyphony/backend_common.h +5 -0
  8. data/ext/polyphony/backend_io_uring.c +174 -121
  9. data/ext/polyphony/backend_io_uring_context.c +24 -18
  10. data/ext/polyphony/backend_io_uring_context.h +4 -2
  11. data/ext/polyphony/backend_libev.c +46 -22
  12. data/ext/polyphony/event.c +21 -0
  13. data/ext/polyphony/extconf.rb +25 -19
  14. data/ext/polyphony/fiber.c +0 -2
  15. data/ext/polyphony/pipe.c +1 -1
  16. data/ext/polyphony/polyphony.c +2 -20
  17. data/ext/polyphony/polyphony.h +5 -5
  18. data/ext/polyphony/ring_buffer.c +1 -0
  19. data/ext/polyphony/runqueue_ring_buffer.c +1 -0
  20. data/ext/polyphony/thread.c +63 -0
  21. data/ext/polyphony/win_uio.h +18 -0
  22. data/lib/polyphony/adapters/open3.rb +190 -0
  23. data/lib/polyphony/core/sync.rb +83 -13
  24. data/lib/polyphony/core/timer.rb +7 -25
  25. data/lib/polyphony/extensions/exception.rb +15 -0
  26. data/lib/polyphony/extensions/fiber.rb +14 -13
  27. data/lib/polyphony/extensions/io.rb +56 -14
  28. data/lib/polyphony/extensions/kernel.rb +1 -1
  29. data/lib/polyphony/extensions/object.rb +1 -13
  30. data/lib/polyphony/extensions/process.rb +76 -1
  31. data/lib/polyphony/extensions/socket.rb +0 -14
  32. data/lib/polyphony/extensions/thread.rb +19 -27
  33. data/lib/polyphony/extensions/timeout.rb +5 -1
  34. data/lib/polyphony/version.rb +1 -1
  35. data/lib/polyphony.rb +11 -5
  36. data/test/helper.rb +46 -4
  37. data/test/open3/envutil.rb +380 -0
  38. data/test/open3/find_executable.rb +24 -0
  39. data/test/stress.rb +11 -7
  40. data/test/test_backend.rb +11 -4
  41. data/test/test_event.rb +10 -3
  42. data/test/test_ext.rb +16 -1
  43. data/test/test_fiber.rb +16 -4
  44. data/test/test_global_api.rb +17 -16
  45. data/test/test_io.rb +39 -0
  46. data/test/test_kernel.rb +2 -2
  47. data/test/test_monitor.rb +356 -0
  48. data/test/test_open3.rb +338 -0
  49. data/test/test_signal.rb +5 -1
  50. data/test/test_socket.rb +6 -98
  51. data/test/test_sync.rb +46 -0
  52. data/test/test_thread.rb +10 -1
  53. data/test/test_thread_pool.rb +5 -0
  54. data/test/test_throttler.rb +1 -1
  55. data/test/test_timer.rb +8 -2
  56. data/test/test_trace.rb +2 -0
  57. data/vendor/liburing/.github/workflows/build.yml +8 -0
  58. data/vendor/liburing/.gitignore +1 -0
  59. data/vendor/liburing/CHANGELOG +8 -0
  60. data/vendor/liburing/configure +17 -25
  61. data/vendor/liburing/debian/liburing-dev.manpages +2 -0
  62. data/vendor/liburing/debian/rules +2 -1
  63. data/vendor/liburing/examples/Makefile +2 -1
  64. data/vendor/liburing/examples/io_uring-udp.c +11 -3
  65. data/vendor/liburing/examples/rsrc-update-bench.c +100 -0
  66. data/vendor/liburing/liburing.spec +1 -1
  67. data/vendor/liburing/make-debs.sh +4 -2
  68. data/vendor/liburing/src/Makefile +5 -5
  69. data/vendor/liburing/src/arch/aarch64/lib.h +1 -1
  70. data/vendor/liburing/src/include/liburing/io_uring.h +41 -16
  71. data/vendor/liburing/src/include/liburing.h +86 -11
  72. data/vendor/liburing/src/int_flags.h +1 -0
  73. data/vendor/liburing/src/liburing-ffi.map +12 -0
  74. data/vendor/liburing/src/liburing.map +8 -0
  75. data/vendor/liburing/src/register.c +7 -2
  76. data/vendor/liburing/src/setup.c +373 -81
  77. data/vendor/liburing/test/232c93d07b74.c +3 -3
  78. data/vendor/liburing/test/Makefile +10 -3
  79. data/vendor/liburing/test/accept.c +2 -1
  80. data/vendor/liburing/test/buf-ring.c +35 -75
  81. data/vendor/liburing/test/connect-rep.c +204 -0
  82. data/vendor/liburing/test/coredump.c +59 -0
  83. data/vendor/liburing/test/fallocate.c +9 -0
  84. data/vendor/liburing/test/fd-pass.c +34 -3
  85. data/vendor/liburing/test/file-verify.c +27 -6
  86. data/vendor/liburing/test/helpers.c +3 -1
  87. data/vendor/liburing/test/io_uring_register.c +25 -28
  88. data/vendor/liburing/test/io_uring_setup.c +1 -1
  89. data/vendor/liburing/test/poll-cancel-all.c +29 -5
  90. data/vendor/liburing/test/poll-race-mshot.c +6 -22
  91. data/vendor/liburing/test/read-write.c +53 -0
  92. data/vendor/liburing/test/recv-msgall.c +21 -23
  93. data/vendor/liburing/test/reg-fd-only.c +55 -0
  94. data/vendor/liburing/test/reg-hint.c +56 -0
  95. data/vendor/liburing/test/regbuf-merge.c +91 -0
  96. data/vendor/liburing/test/ringbuf-read.c +2 -10
  97. data/vendor/liburing/test/send_recvmsg.c +5 -16
  98. data/vendor/liburing/test/shutdown.c +2 -1
  99. data/vendor/liburing/test/socket-io-cmd.c +215 -0
  100. data/vendor/liburing/test/socket-rw-eagain.c +2 -1
  101. data/vendor/liburing/test/socket-rw-offset.c +2 -1
  102. data/vendor/liburing/test/socket-rw.c +2 -1
  103. data/vendor/liburing/test/timeout.c +276 -0
  104. data/vendor/liburing/test/xattr.c +38 -25
  105. metadata +20 -7
  106. data/vendor/liburing/test/timeout-overflow.c +0 -204
@@ -8,13 +8,91 @@
8
8
  #include "liburing/compat.h"
9
9
  #include "liburing/io_uring.h"
10
10
 
11
+ #define KERN_MAX_ENTRIES 32768
12
+ #define KERN_MAX_CQ_ENTRIES (2 * KERN_MAX_ENTRIES)
13
+
14
+ static inline int __fls(int x)
15
+ {
16
+ if (!x)
17
+ return 0;
18
+ return 8 * sizeof(x) - __builtin_clz(x);
19
+ }
20
+
21
+ static unsigned roundup_pow2(unsigned depth)
22
+ {
23
+ return 1U << __fls(depth - 1);
24
+ }
25
+
26
+ static int get_sq_cq_entries(unsigned entries, struct io_uring_params *p,
27
+ unsigned *sq, unsigned *cq)
28
+ {
29
+ unsigned cq_entries;
30
+
31
+ if (!entries)
32
+ return -EINVAL;
33
+ if (entries > KERN_MAX_ENTRIES) {
34
+ if (!(p->flags & IORING_SETUP_CLAMP))
35
+ return -EINVAL;
36
+ entries = KERN_MAX_ENTRIES;
37
+ }
38
+
39
+ entries = roundup_pow2(entries);
40
+ if (p->flags & IORING_SETUP_CQSIZE) {
41
+ if (!p->cq_entries)
42
+ return -EINVAL;
43
+ cq_entries = p->cq_entries;
44
+ if (cq_entries > KERN_MAX_CQ_ENTRIES) {
45
+ if (!(p->flags & IORING_SETUP_CLAMP))
46
+ return -EINVAL;
47
+ cq_entries = KERN_MAX_CQ_ENTRIES;
48
+ }
49
+ cq_entries = roundup_pow2(cq_entries);
50
+ if (cq_entries < entries)
51
+ return -EINVAL;
52
+ } else {
53
+ cq_entries = 2 * entries;
54
+ }
55
+
56
+ *sq = entries;
57
+ *cq = cq_entries;
58
+ return 0;
59
+ }
60
+
11
61
  static void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
12
62
  {
13
- __sys_munmap(sq->ring_ptr, sq->ring_sz);
14
- if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
63
+ if (sq->ring_sz)
64
+ __sys_munmap(sq->ring_ptr, sq->ring_sz);
65
+ if (cq->ring_ptr && cq->ring_sz && cq->ring_ptr != sq->ring_ptr)
15
66
  __sys_munmap(cq->ring_ptr, cq->ring_sz);
16
67
  }
17
68
 
69
+ static void io_uring_setup_ring_pointers(struct io_uring_params *p,
70
+ struct io_uring_sq *sq,
71
+ struct io_uring_cq *cq)
72
+ {
73
+ sq->khead = sq->ring_ptr + p->sq_off.head;
74
+ sq->ktail = sq->ring_ptr + p->sq_off.tail;
75
+ sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
76
+ sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
77
+ sq->kflags = sq->ring_ptr + p->sq_off.flags;
78
+ sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
79
+ sq->array = sq->ring_ptr + p->sq_off.array;
80
+
81
+ cq->khead = cq->ring_ptr + p->cq_off.head;
82
+ cq->ktail = cq->ring_ptr + p->cq_off.tail;
83
+ cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
84
+ cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
85
+ cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
86
+ cq->cqes = cq->ring_ptr + p->cq_off.cqes;
87
+ if (p->cq_off.flags)
88
+ cq->kflags = cq->ring_ptr + p->cq_off.flags;
89
+
90
+ sq->ring_mask = *sq->kring_mask;
91
+ sq->ring_entries = *sq->kring_entries;
92
+ cq->ring_mask = *cq->kring_mask;
93
+ cq->ring_entries = *cq->kring_entries;
94
+ }
95
+
18
96
  static int io_uring_mmap(int fd, struct io_uring_params *p,
19
97
  struct io_uring_sq *sq, struct io_uring_cq *cq)
20
98
  {
@@ -52,14 +130,6 @@ static int io_uring_mmap(int fd, struct io_uring_params *p,
52
130
  }
53
131
  }
54
132
 
55
- sq->khead = sq->ring_ptr + p->sq_off.head;
56
- sq->ktail = sq->ring_ptr + p->sq_off.tail;
57
- sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
58
- sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
59
- sq->kflags = sq->ring_ptr + p->sq_off.flags;
60
- sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
61
- sq->array = sq->ring_ptr + p->sq_off.array;
62
-
63
133
  size = sizeof(struct io_uring_sqe);
64
134
  if (p->flags & IORING_SETUP_SQE128)
65
135
  size += 64;
@@ -72,19 +142,7 @@ err:
72
142
  return ret;
73
143
  }
74
144
 
75
- cq->khead = cq->ring_ptr + p->cq_off.head;
76
- cq->ktail = cq->ring_ptr + p->cq_off.tail;
77
- cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
78
- cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
79
- cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
80
- cq->cqes = cq->ring_ptr + p->cq_off.cqes;
81
- if (p->cq_off.flags)
82
- cq->kflags = cq->ring_ptr + p->cq_off.flags;
83
-
84
- sq->ring_mask = *sq->kring_mask;
85
- sq->ring_entries = *sq->kring_entries;
86
- cq->ring_mask = *cq->kring_mask;
87
- cq->ring_entries = *cq->kring_entries;
145
+ io_uring_setup_ring_pointers(p, sq, cq);
88
146
  return 0;
89
147
  }
90
148
 
@@ -97,16 +155,8 @@ err:
97
155
  __cold int io_uring_queue_mmap(int fd, struct io_uring_params *p,
98
156
  struct io_uring *ring)
99
157
  {
100
- int ret;
101
-
102
158
  memset(ring, 0, sizeof(*ring));
103
- ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
104
- if (!ret) {
105
- ring->flags = p->flags;
106
- ring->ring_fd = ring->enter_ring_fd = fd;
107
- ring->int_flags = 0;
108
- }
109
- return ret;
159
+ return io_uring_mmap(fd, p, &ring->sq, &ring->cq);
110
160
  }
111
161
 
112
162
  /*
@@ -144,21 +194,142 @@ __cold int io_uring_ring_dontfork(struct io_uring *ring)
144
194
  return 0;
145
195
  }
146
196
 
147
- __cold int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
148
- struct io_uring_params *p)
197
+ /* FIXME */
198
+ static size_t huge_page_size = 2 * 1024 * 1024;
199
+
200
+ /*
201
+ * Returns negative for error, or number of bytes used in the buffer on success
202
+ */
203
+ static int io_uring_alloc_huge(unsigned entries, struct io_uring_params *p,
204
+ struct io_uring_sq *sq, struct io_uring_cq *cq,
205
+ void *buf, size_t buf_size)
206
+ {
207
+ unsigned long page_size = get_page_size();
208
+ unsigned sq_entries, cq_entries;
209
+ size_t ring_mem, sqes_mem;
210
+ unsigned long mem_used = 0;
211
+ void *ptr;
212
+ int ret;
213
+
214
+ ret = get_sq_cq_entries(entries, p, &sq_entries, &cq_entries);
215
+ if (ret)
216
+ return ret;
217
+
218
+ sqes_mem = sq_entries * sizeof(struct io_uring_sqe);
219
+ sqes_mem = (sqes_mem + page_size - 1) & ~(page_size - 1);
220
+ ring_mem = cq_entries * sizeof(struct io_uring_cqe);
221
+ if (p->flags & IORING_SETUP_CQE32)
222
+ ring_mem *= 2;
223
+ ring_mem += sq_entries * sizeof(unsigned);
224
+ mem_used = sqes_mem + ring_mem;
225
+ mem_used = (mem_used + page_size - 1) & ~(page_size - 1);
226
+
227
+ /*
228
+ * A maxed-out number of CQ entries with IORING_SETUP_CQE32 fills a 2MB
229
+ * huge page by itself, so the SQ entries won't fit in the same huge
230
+ * page. For SQEs, that shouldn't be possible given KERN_MAX_ENTRIES,
231
+ * but check that too to future-proof (e.g. against different huge page
232
+ * sizes). Bail out early so we don't overrun.
233
+ */
234
+ if (!buf && (sqes_mem > huge_page_size || ring_mem > huge_page_size))
235
+ return -ENOMEM;
236
+
237
+ if (buf) {
238
+ if (mem_used > buf_size)
239
+ return -ENOMEM;
240
+ ptr = buf;
241
+ } else {
242
+ int map_hugetlb = 0;
243
+ if (sqes_mem <= page_size)
244
+ buf_size = page_size;
245
+ else {
246
+ buf_size = huge_page_size;
247
+ map_hugetlb = MAP_HUGETLB;
248
+ }
249
+ ptr = __sys_mmap(NULL, buf_size, PROT_READ|PROT_WRITE,
250
+ MAP_SHARED|MAP_ANONYMOUS|map_hugetlb,
251
+ -1, 0);
252
+ if (IS_ERR(ptr))
253
+ return PTR_ERR(ptr);
254
+ }
255
+
256
+ sq->sqes = ptr;
257
+ if (mem_used <= buf_size) {
258
+ sq->ring_ptr = (void *) sq->sqes + sqes_mem;
259
+ /* clear ring sizes, we have just one mmap() to undo */
260
+ cq->ring_sz = 0;
261
+ sq->ring_sz = 0;
262
+ } else {
263
+ int map_hugetlb = 0;
264
+ if (ring_mem <= page_size)
265
+ buf_size = page_size;
266
+ else {
267
+ buf_size = huge_page_size;
268
+ map_hugetlb = MAP_HUGETLB;
269
+ }
270
+ ptr = __sys_mmap(NULL, buf_size, PROT_READ|PROT_WRITE,
271
+ MAP_SHARED|MAP_ANONYMOUS|map_hugetlb,
272
+ -1, 0);
273
+ if (IS_ERR(ptr)) {
274
+ __sys_munmap(sq->sqes, 1);
275
+ return PTR_ERR(ptr);
276
+ }
277
+ sq->ring_ptr = ptr;
278
+ sq->ring_sz = buf_size;
279
+ cq->ring_sz = 0;
280
+ }
281
+
282
+ cq->ring_ptr = (void *) sq->ring_ptr;
283
+ p->sq_off.user_addr = (unsigned long) sq->sqes;
284
+ p->cq_off.user_addr = (unsigned long) sq->ring_ptr;
285
+ return (int) mem_used;
286
+ }
287
+
288
+ static int __io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
289
+ struct io_uring_params *p, void *buf,
290
+ size_t buf_size)
149
291
  {
150
- int fd, ret;
292
+ int fd, ret = 0;
151
293
  unsigned *sq_array;
152
294
  unsigned sq_entries, index;
153
295
 
296
+ memset(ring, 0, sizeof(*ring));
297
+
298
+ /*
299
+ * The kernel does this check already, but checking it here allows us
300
+ * to avoid handling it below.
301
+ */
302
+ if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY
303
+ && !(p->flags & IORING_SETUP_NO_MMAP))
304
+ return -EINVAL;
305
+
306
+ if (p->flags & IORING_SETUP_NO_MMAP) {
307
+ ret = io_uring_alloc_huge(entries, p, &ring->sq, &ring->cq,
308
+ buf, buf_size);
309
+ if (ret < 0)
310
+ return ret;
311
+ if (buf)
312
+ ring->int_flags |= INT_FLAG_APP_MEM;
313
+ }
314
+
154
315
  fd = __sys_io_uring_setup(entries, p);
155
- if (fd < 0)
316
+ if (fd < 0) {
317
+ if ((p->flags & IORING_SETUP_NO_MMAP) &&
318
+ !(ring->int_flags & INT_FLAG_APP_MEM)) {
319
+ __sys_munmap(ring->sq.sqes, 1);
320
+ io_uring_unmap_rings(&ring->sq, &ring->cq);
321
+ }
156
322
  return fd;
323
+ }
157
324
 
158
- ret = io_uring_queue_mmap(fd, p, ring);
159
- if (ret) {
160
- __sys_close(fd);
161
- return ret;
325
+ if (!(p->flags & IORING_SETUP_NO_MMAP)) {
326
+ ret = io_uring_queue_mmap(fd, p, ring);
327
+ if (ret) {
328
+ __sys_close(fd);
329
+ return ret;
330
+ }
331
+ } else {
332
+ io_uring_setup_ring_pointers(p, &ring->sq, &ring->cq);
162
333
  }
163
334
 
164
335
  /*
@@ -170,7 +341,46 @@ __cold int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
170
341
  sq_array[index] = index;
171
342
 
172
343
  ring->features = p->features;
173
- return 0;
344
+ ring->flags = p->flags;
345
+ ring->enter_ring_fd = fd;
346
+ if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY) {
347
+ ring->ring_fd = -1;
348
+ ring->int_flags |= INT_FLAG_REG_RING | INT_FLAG_REG_REG_RING;
349
+ } else {
350
+ ring->ring_fd = fd;
351
+ }
352
+
353
+ return ret;
354
+ }
355
+
356
+ /*
357
+ * Like io_uring_queue_init_params(), except it allows the application to pass
358
+ * in a pre-allocated memory range that is used for the shared data between
359
+ * the kernel and the application. This includes the sqes array, and the two
360
+ * rings. The memory must be contigious, the use case here is that the app
361
+ * allocates a huge page and passes it in.
362
+ *
363
+ * Returns the number of bytes used in the buffer, the app can then reuse
364
+ * the buffer with the returned offset to put more rings in the same huge
365
+ * page. Returns -ENOMEM if there's not enough room left in the buffer to
366
+ * host the ring.
367
+ */
368
+ int io_uring_queue_init_mem(unsigned entries, struct io_uring *ring,
369
+ struct io_uring_params *p,
370
+ void *buf, size_t buf_size)
371
+ {
372
+ /* should already be set... */
373
+ p->flags |= IORING_SETUP_NO_MMAP;
374
+ return __io_uring_queue_init_params(entries, ring, p, buf, buf_size);
375
+ }
376
+
377
+ int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
378
+ struct io_uring_params *p)
379
+ {
380
+ int ret;
381
+
382
+ ret = __io_uring_queue_init_params(entries, ring, p, NULL, 0);
383
+ return ret >= 0 ? 0 : ret;
174
384
  }
175
385
 
176
386
  /*
@@ -194,11 +404,20 @@ __cold void io_uring_queue_exit(struct io_uring *ring)
194
404
  struct io_uring_cq *cq = &ring->cq;
195
405
  size_t sqe_size;
196
406
 
197
- sqe_size = sizeof(struct io_uring_sqe);
198
- if (ring->flags & IORING_SETUP_SQE128)
199
- sqe_size += 64;
200
- __sys_munmap(sq->sqes, sqe_size * sq->ring_entries);
201
- io_uring_unmap_rings(sq, cq);
407
+ if (!sq->ring_sz) {
408
+ sqe_size = sizeof(struct io_uring_sqe);
409
+ if (ring->flags & IORING_SETUP_SQE128)
410
+ sqe_size += 64;
411
+ __sys_munmap(sq->sqes, sqe_size * sq->ring_entries);
412
+ io_uring_unmap_rings(sq, cq);
413
+ } else {
414
+ if (!(ring->int_flags & INT_FLAG_APP_MEM)) {
415
+ __sys_munmap(sq->sqes,
416
+ *sq->kring_entries * sizeof(struct io_uring_sqe));
417
+ io_uring_unmap_rings(sq, cq);
418
+ }
419
+ }
420
+
202
421
  /*
203
422
  * Not strictly required, but frees up the slot we used now rather
204
423
  * than at process exit time.
@@ -249,18 +468,6 @@ __cold void io_uring_free_probe(struct io_uring_probe *probe)
249
468
  free(probe);
250
469
  }
251
470
 
252
- static inline int __fls(unsigned long x)
253
- {
254
- if (!x)
255
- return 0;
256
- return 8 * sizeof(x) - __builtin_clzl(x);
257
- }
258
-
259
- static unsigned roundup_pow2(unsigned depth)
260
- {
261
- return 1U << __fls(depth - 1);
262
- }
263
-
264
471
  static size_t npages(size_t size, long page_size)
265
472
  {
266
473
  size--;
@@ -291,9 +498,6 @@ static size_t rings_size(struct io_uring_params *p, unsigned entries,
291
498
  return pages * page_size;
292
499
  }
293
500
 
294
- #define KERN_MAX_ENTRIES 32768
295
- #define KERN_MAX_CQ_ENTRIES (2 * KERN_MAX_ENTRIES)
296
-
297
501
  /*
298
502
  * Return the required ulimit -l memlock memory required for a given ring
299
503
  * setup, in bytes. May return -errno on error. On newer (5.12+) kernels,
@@ -305,11 +509,14 @@ static size_t rings_size(struct io_uring_params *p, unsigned entries,
305
509
  __cold ssize_t io_uring_mlock_size_params(unsigned entries,
306
510
  struct io_uring_params *p)
307
511
  {
308
- struct io_uring_params lp = { };
512
+ struct io_uring_params lp;
309
513
  struct io_uring ring;
310
- unsigned cq_entries;
514
+ unsigned cq_entries, sq;
311
515
  long page_size;
312
516
  ssize_t ret;
517
+ int cret;
518
+
519
+ memset(&lp, 0, sizeof(lp));
313
520
 
314
521
  /*
315
522
  * We only really use this inited ring to see if the kernel is newer
@@ -337,25 +544,12 @@ __cold ssize_t io_uring_mlock_size_params(unsigned entries,
337
544
  entries = KERN_MAX_ENTRIES;
338
545
  }
339
546
 
340
- entries = roundup_pow2(entries);
341
- if (p->flags & IORING_SETUP_CQSIZE) {
342
- if (!p->cq_entries)
343
- return -EINVAL;
344
- cq_entries = p->cq_entries;
345
- if (cq_entries > KERN_MAX_CQ_ENTRIES) {
346
- if (!(p->flags & IORING_SETUP_CLAMP))
347
- return -EINVAL;
348
- cq_entries = KERN_MAX_CQ_ENTRIES;
349
- }
350
- cq_entries = roundup_pow2(cq_entries);
351
- if (cq_entries < entries)
352
- return -EINVAL;
353
- } else {
354
- cq_entries = 2 * entries;
355
- }
547
+ cret = get_sq_cq_entries(entries, p, &sq, &cq_entries);
548
+ if (cret)
549
+ return cret;
356
550
 
357
551
  page_size = get_page_size();
358
- return rings_size(p, entries, cq_entries, page_size);
552
+ return rings_size(p, sq, cq_entries, page_size);
359
553
  }
360
554
 
361
555
  /*
@@ -364,7 +558,105 @@ __cold ssize_t io_uring_mlock_size_params(unsigned entries,
364
558
  */
365
559
  __cold ssize_t io_uring_mlock_size(unsigned entries, unsigned flags)
366
560
  {
367
- struct io_uring_params p = { .flags = flags, };
561
+ struct io_uring_params p;
368
562
 
563
+ memset(&p, 0, sizeof(p));
564
+ p.flags = flags;
369
565
  return io_uring_mlock_size_params(entries, &p);
370
566
  }
567
+
568
+ #if defined(__hppa__)
569
+ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
570
+ unsigned int nentries, int bgid,
571
+ unsigned int flags, int *ret)
572
+ {
573
+ struct io_uring_buf_ring *br;
574
+ struct io_uring_buf_reg reg;
575
+ size_t ring_size;
576
+ off_t off;
577
+ int lret;
578
+
579
+ memset(&reg, 0, sizeof(reg));
580
+ reg.ring_entries = nentries;
581
+ reg.bgid = bgid;
582
+ reg.flags = IOU_PBUF_RING_MMAP;
583
+
584
+ *ret = 0;
585
+ lret = io_uring_register_buf_ring(ring, &reg, flags);
586
+ if (lret) {
587
+ *ret = lret;
588
+ return NULL;
589
+ }
590
+
591
+ off = IORING_OFF_PBUF_RING | (unsigned long long) bgid << IORING_OFF_PBUF_SHIFT;
592
+ ring_size = nentries * sizeof(struct io_uring_buf);
593
+ br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
594
+ MAP_SHARED | MAP_POPULATE, ring->ring_fd, off);
595
+ if (IS_ERR(br)) {
596
+ *ret = PTR_ERR(br);
597
+ return NULL;
598
+ }
599
+
600
+ return br;
601
+ }
602
+ #else
603
+ static struct io_uring_buf_ring *br_setup(struct io_uring *ring,
604
+ unsigned int nentries, int bgid,
605
+ unsigned int flags, int *ret)
606
+ {
607
+ struct io_uring_buf_ring *br;
608
+ struct io_uring_buf_reg reg;
609
+ size_t ring_size;
610
+ int lret;
611
+
612
+ memset(&reg, 0, sizeof(reg));
613
+ ring_size = nentries * sizeof(struct io_uring_buf);
614
+ br = __sys_mmap(NULL, ring_size, PROT_READ | PROT_WRITE,
615
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
616
+ if (IS_ERR(br)) {
617
+ *ret = PTR_ERR(br);
618
+ return NULL;
619
+ }
620
+
621
+ reg.ring_addr = (unsigned long) (uintptr_t) br;
622
+ reg.ring_entries = nentries;
623
+ reg.bgid = bgid;
624
+
625
+ *ret = 0;
626
+ lret = io_uring_register_buf_ring(ring, &reg, flags);
627
+ if (lret) {
628
+ __sys_munmap(br, ring_size);
629
+ *ret = lret;
630
+ br = NULL;
631
+ }
632
+
633
+ return br;
634
+ }
635
+ #endif
636
+
637
+ struct io_uring_buf_ring *io_uring_setup_buf_ring(struct io_uring *ring,
638
+ unsigned int nentries,
639
+ int bgid, unsigned int flags,
640
+ int *ret)
641
+ {
642
+ struct io_uring_buf_ring *br;
643
+
644
+ br = br_setup(ring, nentries, bgid, flags, ret);
645
+ if (br)
646
+ io_uring_buf_ring_init(br);
647
+
648
+ return br;
649
+ }
650
+
651
+ int io_uring_free_buf_ring(struct io_uring *ring, struct io_uring_buf_ring *br,
652
+ unsigned int nentries, int bgid)
653
+ {
654
+ int ret;
655
+
656
+ ret = io_uring_unregister_buf_ring(ring, bgid);
657
+ if (ret)
658
+ return ret;
659
+
660
+ __sys_munmap(br, nentries * sizeof(struct io_uring_buf));
661
+ return 0;
662
+ }
@@ -64,8 +64,7 @@ static void *rcv(void *arg)
64
64
  int res;
65
65
 
66
66
  if (p->tcp) {
67
- int val = 1;
68
-
67
+ int ret, val = 1;
69
68
 
70
69
  s0 = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC, IPPROTO_TCP);
71
70
  res = setsockopt(s0, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
@@ -77,7 +76,8 @@ static void *rcv(void *arg)
77
76
 
78
77
  addr.sin_family = AF_INET;
79
78
  addr.sin_addr.s_addr = inet_addr("127.0.0.1");
80
- assert(t_bind_ephemeral_port(s0, &addr) == 0);
79
+ ret = t_bind_ephemeral_port(s0, &addr);
80
+ assert(!ret);
81
81
  p->bind_port = addr.sin_port;
82
82
  } else {
83
83
  s0 = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
@@ -55,6 +55,8 @@ test_srcs := \
55
55
  ce593a6c480a.c \
56
56
  close-opath.c \
57
57
  connect.c \
58
+ connect-rep.c \
59
+ coredump.c \
58
60
  cq-full.c \
59
61
  cq-overflow.c \
60
62
  cq-peek-batch.c \
@@ -141,7 +143,10 @@ test_srcs := \
141
143
  recv-msgall.c \
142
144
  recv-msgall-stream.c \
143
145
  recv-multishot.c \
146
+ reg-fd-only.c \
147
+ reg-hint.c \
144
148
  reg-reg-ring.c \
149
+ regbuf-merge.c \
145
150
  register-restrictions.c \
146
151
  rename.c \
147
152
  ringbuf-read.c \
@@ -161,6 +166,7 @@ test_srcs := \
161
166
  single-issuer.c \
162
167
  skip-cqe.c \
163
168
  socket.c \
169
+ socket-io-cmd.c \
164
170
  socket-rw.c \
165
171
  socket-rw-eagain.c \
166
172
  socket-rw-offset.c \
@@ -185,7 +191,6 @@ test_srcs := \
185
191
  thread-exit.c \
186
192
  timeout.c \
187
193
  timeout-new.c \
188
- timeout-overflow.c \
189
194
  tty-write-dpoll.c \
190
195
  unlink.c \
191
196
  version.c \
@@ -221,7 +226,9 @@ all: $(test_targets)
221
226
  helpers.o: helpers.c
222
227
  $(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) -o $@ -c $<
223
228
 
224
- %.t: %.c $(helpers) helpers.h ../src/liburing.a
229
+ LIBURING := $(shell if [ -e ../src/liburing.a ]; then echo ../src/liburing.a; fi)
230
+
231
+ %.t: %.c $(helpers) helpers.h $(LIBURING)
225
232
  $(QUIET_CC)$(CC) $(CPPFLAGS) $(CFLAGS) -o $@ $< $(helpers) $(LDFLAGS)
226
233
 
227
234
  #
@@ -230,7 +237,7 @@ helpers.o: helpers.c
230
237
  # cc1plus: warning: command-line option '-Wmissing-prototypes' \
231
238
  # is valid for C/ObjC but not for C++
232
239
  #
233
- %.t: %.cc $(helpers) helpers.h ../src/liburing.a
240
+ %.t: %.cc $(helpers) helpers.h $(LIBURING)
234
241
  $(QUIET_CXX)$(CXX) \
235
242
  $(patsubst -Wmissing-prototypes,,$(CPPFLAGS)) \
236
243
  $(patsubst -Wmissing-prototypes,,$(CXXFLAGS)) \
@@ -195,7 +195,8 @@ static int start_accept_listen(struct sockaddr_in *addr, int port_off,
195
195
 
196
196
  addr->sin_family = AF_INET;
197
197
  addr->sin_addr.s_addr = inet_addr("127.0.0.1");
198
- assert(!t_bind_ephemeral_port(fd, addr));
198
+ ret = t_bind_ephemeral_port(fd, addr);
199
+ assert(!ret);
199
200
  ret = listen(fd, 128);
200
201
  assert(ret != -1);
201
202