polyphony 0.83 → 0.84

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,585 +0,0 @@
1
- /* SPDX-License-Identifier: MIT */
2
- #ifndef LIB_URING_H
3
- #define LIB_URING_H
4
-
5
- #include <sys/socket.h>
6
- #include <sys/uio.h>
7
- #include <sys/stat.h>
8
- #include <errno.h>
9
- #include <signal.h>
10
- #include <stdbool.h>
11
- #include <inttypes.h>
12
- #include <time.h>
13
- #include <linux/swab.h>
14
- #include "liburing/compat.h"
15
- #include "liburing/io_uring.h"
16
- #include "liburing/barrier.h"
17
-
18
- #ifdef __cplusplus
19
- extern "C" {
20
- #endif
21
-
22
- /*
23
- * Library interface to io_uring
24
- */
25
- struct io_uring_sq {
26
- unsigned *khead;
27
- unsigned *ktail;
28
- unsigned *kring_mask;
29
- unsigned *kring_entries;
30
- unsigned *kflags;
31
- unsigned *kdropped;
32
- unsigned *array;
33
- struct io_uring_sqe *sqes;
34
-
35
- unsigned sqe_head;
36
- unsigned sqe_tail;
37
-
38
- size_t ring_sz;
39
- void *ring_ptr;
40
-
41
- unsigned pad[4];
42
- };
43
-
44
- struct io_uring_cq {
45
- unsigned *khead;
46
- unsigned *ktail;
47
- unsigned *kring_mask;
48
- unsigned *kring_entries;
49
- unsigned *kflags;
50
- unsigned *koverflow;
51
- struct io_uring_cqe *cqes;
52
-
53
- size_t ring_sz;
54
- void *ring_ptr;
55
-
56
- unsigned pad[4];
57
- };
58
-
59
- struct io_uring {
60
- struct io_uring_sq sq;
61
- struct io_uring_cq cq;
62
- unsigned flags;
63
- int ring_fd;
64
-
65
- unsigned pad[4];
66
- };
67
-
68
- /*
69
- * Library interface
70
- */
71
-
72
- /*
73
- * return an allocated io_uring_probe structure, or NULL if probe fails (for
74
- * example, if it is not available). The caller is responsible for freeing it
75
- */
76
- extern struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring);
77
- /* same as io_uring_get_probe_ring, but takes care of ring init and teardown */
78
- extern struct io_uring_probe *io_uring_get_probe(void);
79
-
80
- /*
81
- * frees a probe allocated through io_uring_get_probe() or
82
- * io_uring_get_probe_ring()
83
- */
84
- extern void io_uring_free_probe(struct io_uring_probe *probe);
85
-
86
- static inline int io_uring_opcode_supported(struct io_uring_probe *p, int op)
87
- {
88
- if (op > p->last_op)
89
- return 0;
90
- return (p->ops[op].flags & IO_URING_OP_SUPPORTED) != 0;
91
- }
92
-
93
- extern int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
94
- struct io_uring_params *p);
95
- extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
96
- unsigned flags);
97
- extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
98
- struct io_uring *ring);
99
- extern int io_uring_ring_dontfork(struct io_uring *ring);
100
- extern void io_uring_queue_exit(struct io_uring *ring);
101
- unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
102
- struct io_uring_cqe **cqes, unsigned count);
103
- extern int io_uring_wait_cqes(struct io_uring *ring,
104
- struct io_uring_cqe **cqe_ptr, unsigned wait_nr,
105
- struct __kernel_timespec *ts, sigset_t *sigmask);
106
- extern int io_uring_wait_cqe_timeout(struct io_uring *ring,
107
- struct io_uring_cqe **cqe_ptr, struct __kernel_timespec *ts);
108
- extern int io_uring_submit(struct io_uring *ring);
109
- extern int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr);
110
- extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
111
-
112
- extern int io_uring_register_buffers(struct io_uring *ring,
113
- const struct iovec *iovecs,
114
- unsigned nr_iovecs);
115
- extern int io_uring_unregister_buffers(struct io_uring *ring);
116
- extern int io_uring_register_files(struct io_uring *ring, const int *files,
117
- unsigned nr_files);
118
- extern int io_uring_unregister_files(struct io_uring *ring);
119
- extern int io_uring_register_files_update(struct io_uring *ring, unsigned off,
120
- int *files, unsigned nr_files);
121
- extern int io_uring_register_eventfd(struct io_uring *ring, int fd);
122
- extern int io_uring_register_eventfd_async(struct io_uring *ring, int fd);
123
- extern int io_uring_unregister_eventfd(struct io_uring *ring);
124
- extern int io_uring_register_probe(struct io_uring *ring,
125
- struct io_uring_probe *p, unsigned nr);
126
- extern int io_uring_register_personality(struct io_uring *ring);
127
- extern int io_uring_unregister_personality(struct io_uring *ring, int id);
128
- extern int io_uring_register_restrictions(struct io_uring *ring,
129
- struct io_uring_restriction *res,
130
- unsigned int nr_res);
131
- extern int io_uring_enable_rings(struct io_uring *ring);
132
- extern int __io_uring_sqring_wait(struct io_uring *ring);
133
-
134
- /*
135
- * Helper for the peek/wait single cqe functions. Exported because of that,
136
- * but probably shouldn't be used directly in an application.
137
- */
138
- extern int __io_uring_get_cqe(struct io_uring *ring,
139
- struct io_uring_cqe **cqe_ptr, unsigned submit,
140
- unsigned wait_nr, sigset_t *sigmask);
141
-
142
- #define LIBURING_UDATA_TIMEOUT ((__u64) -1)
143
-
144
- #define io_uring_for_each_cqe(ring, head, cqe) \
145
- /* \
146
- * io_uring_smp_load_acquire() enforces the order of tail \
147
- * and CQE reads. \
148
- */ \
149
- for (head = *(ring)->cq.khead; \
150
- (cqe = (head != io_uring_smp_load_acquire((ring)->cq.ktail) ? \
151
- &(ring)->cq.cqes[head & (*(ring)->cq.kring_mask)] : NULL)); \
152
- head++) \
153
-
154
- /*
155
- * Must be called after io_uring_for_each_cqe()
156
- */
157
- static inline void io_uring_cq_advance(struct io_uring *ring,
158
- unsigned nr)
159
- {
160
- if (nr) {
161
- struct io_uring_cq *cq = &ring->cq;
162
-
163
- /*
164
- * Ensure that the kernel only sees the new value of the head
165
- * index after the CQEs have been read.
166
- */
167
- io_uring_smp_store_release(cq->khead, *cq->khead + nr);
168
- }
169
- }
170
-
171
- /*
172
- * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
173
- * been processed by the application.
174
- */
175
- static inline void io_uring_cqe_seen(struct io_uring *ring,
176
- struct io_uring_cqe *cqe)
177
- {
178
- if (cqe)
179
- io_uring_cq_advance(ring, 1);
180
- }
181
-
182
- /*
183
- * Command prep helpers
184
- */
185
- static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
186
- {
187
- sqe->user_data = (unsigned long) data;
188
- }
189
-
190
- static inline void *io_uring_cqe_get_data(const struct io_uring_cqe *cqe)
191
- {
192
- return (void *) (uintptr_t) cqe->user_data;
193
- }
194
-
195
- static inline void io_uring_sqe_set_flags(struct io_uring_sqe *sqe,
196
- unsigned flags)
197
- {
198
- sqe->flags = flags;
199
- }
200
-
201
- static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
202
- const void *addr, unsigned len,
203
- __u64 offset)
204
- {
205
- sqe->opcode = op;
206
- sqe->flags = 0;
207
- sqe->ioprio = 0;
208
- sqe->fd = fd;
209
- sqe->off = offset;
210
- sqe->addr = (unsigned long) addr;
211
- sqe->len = len;
212
- sqe->rw_flags = 0;
213
- sqe->user_data = 0;
214
- sqe->__pad2[0] = sqe->__pad2[1] = sqe->__pad2[2] = 0;
215
- }
216
-
217
- static inline void io_uring_prep_splice(struct io_uring_sqe *sqe,
218
- int fd_in, int64_t off_in,
219
- int fd_out, int64_t off_out,
220
- unsigned int nbytes,
221
- unsigned int splice_flags)
222
- {
223
- io_uring_prep_rw(IORING_OP_SPLICE, sqe, fd_out, NULL, nbytes, off_out);
224
- sqe->splice_off_in = off_in;
225
- sqe->splice_fd_in = fd_in;
226
- sqe->splice_flags = splice_flags;
227
- }
228
-
229
- static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
230
- const struct iovec *iovecs,
231
- unsigned nr_vecs, off_t offset)
232
- {
233
- io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
234
- }
235
-
236
- static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
237
- void *buf, unsigned nbytes,
238
- off_t offset, int buf_index)
239
- {
240
- io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
241
- sqe->buf_index = buf_index;
242
- }
243
-
244
- static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
245
- const struct iovec *iovecs,
246
- unsigned nr_vecs, off_t offset)
247
- {
248
- io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
249
- }
250
-
251
- static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
252
- const void *buf, unsigned nbytes,
253
- off_t offset, int buf_index)
254
- {
255
- io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
256
- sqe->buf_index = buf_index;
257
- }
258
-
259
- static inline void io_uring_prep_recvmsg(struct io_uring_sqe *sqe, int fd,
260
- struct msghdr *msg, unsigned flags)
261
- {
262
- io_uring_prep_rw(IORING_OP_RECVMSG, sqe, fd, msg, 1, 0);
263
- sqe->msg_flags = flags;
264
- }
265
-
266
- static inline void io_uring_prep_sendmsg(struct io_uring_sqe *sqe, int fd,
267
- const struct msghdr *msg, unsigned flags)
268
- {
269
- io_uring_prep_rw(IORING_OP_SENDMSG, sqe, fd, msg, 1, 0);
270
- sqe->msg_flags = flags;
271
- }
272
-
273
- static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
274
- unsigned poll_mask)
275
- {
276
- io_uring_prep_rw(IORING_OP_POLL_ADD, sqe, fd, NULL, 0, 0);
277
- #if __BYTE_ORDER == __BIG_ENDIAN
278
- poll_mask = __swahw32(poll_mask);
279
- #endif
280
- sqe->poll32_events = poll_mask;
281
- }
282
-
283
- static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
284
- void *user_data)
285
- {
286
- io_uring_prep_rw(IORING_OP_POLL_REMOVE, sqe, -1, user_data, 0, 0);
287
- }
288
-
289
- static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
290
- unsigned fsync_flags)
291
- {
292
- io_uring_prep_rw(IORING_OP_FSYNC, sqe, fd, NULL, 0, 0);
293
- sqe->fsync_flags = fsync_flags;
294
- }
295
-
296
- static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
297
- {
298
- io_uring_prep_rw(IORING_OP_NOP, sqe, -1, NULL, 0, 0);
299
- }
300
-
301
- static inline void io_uring_prep_timeout(struct io_uring_sqe *sqe,
302
- struct __kernel_timespec *ts,
303
- unsigned count, unsigned flags)
304
- {
305
- io_uring_prep_rw(IORING_OP_TIMEOUT, sqe, -1, ts, 1, count);
306
- sqe->timeout_flags = flags;
307
- }
308
-
309
- static inline void io_uring_prep_timeout_remove(struct io_uring_sqe *sqe,
310
- __u64 user_data, unsigned flags)
311
- {
312
- io_uring_prep_rw(IORING_OP_TIMEOUT_REMOVE, sqe, -1,
313
- (void *)(unsigned long)user_data, 0, 0);
314
- sqe->timeout_flags = flags;
315
- }
316
-
317
- static inline void io_uring_prep_accept(struct io_uring_sqe *sqe, int fd,
318
- struct sockaddr *addr,
319
- socklen_t *addrlen, int flags)
320
- {
321
- io_uring_prep_rw(IORING_OP_ACCEPT, sqe, fd, addr, 0,
322
- (__u64) (unsigned long) addrlen);
323
- sqe->accept_flags = flags;
324
- }
325
-
326
- static inline void io_uring_prep_cancel(struct io_uring_sqe *sqe, void *user_data,
327
- int flags)
328
- {
329
- io_uring_prep_rw(IORING_OP_ASYNC_CANCEL, sqe, -1, user_data, 0, 0);
330
- sqe->cancel_flags = flags;
331
- }
332
-
333
- static inline void io_uring_prep_link_timeout(struct io_uring_sqe *sqe,
334
- struct __kernel_timespec *ts,
335
- unsigned flags)
336
- {
337
- io_uring_prep_rw(IORING_OP_LINK_TIMEOUT, sqe, -1, ts, 1, 0);
338
- sqe->timeout_flags = flags;
339
- }
340
-
341
- static inline void io_uring_prep_connect(struct io_uring_sqe *sqe, int fd,
342
- const struct sockaddr *addr,
343
- socklen_t addrlen)
344
- {
345
- io_uring_prep_rw(IORING_OP_CONNECT, sqe, fd, addr, 0, addrlen);
346
- }
347
-
348
- static inline void io_uring_prep_files_update(struct io_uring_sqe *sqe,
349
- int *fds, unsigned nr_fds,
350
- int offset)
351
- {
352
- io_uring_prep_rw(IORING_OP_FILES_UPDATE, sqe, -1, fds, nr_fds, offset);
353
- }
354
-
355
- static inline void io_uring_prep_fallocate(struct io_uring_sqe *sqe, int fd,
356
- int mode, off_t offset, off_t len)
357
- {
358
-
359
- io_uring_prep_rw(IORING_OP_FALLOCATE, sqe, fd,
360
- (const uintptr_t *) (unsigned long) len, mode, offset);
361
- }
362
-
363
- static inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
364
- const char *path, int flags, mode_t mode)
365
- {
366
- io_uring_prep_rw(IORING_OP_OPENAT, sqe, dfd, path, mode, 0);
367
- sqe->open_flags = flags;
368
- }
369
-
370
- static inline void io_uring_prep_close(struct io_uring_sqe *sqe, int fd)
371
- {
372
- io_uring_prep_rw(IORING_OP_CLOSE, sqe, fd, NULL, 0, 0);
373
- }
374
-
375
- static inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
376
- void *buf, unsigned nbytes, off_t offset)
377
- {
378
- io_uring_prep_rw(IORING_OP_READ, sqe, fd, buf, nbytes, offset);
379
- }
380
-
381
- static inline void io_uring_prep_write(struct io_uring_sqe *sqe, int fd,
382
- const void *buf, unsigned nbytes, off_t offset)
383
- {
384
- io_uring_prep_rw(IORING_OP_WRITE, sqe, fd, buf, nbytes, offset);
385
- }
386
-
387
- struct statx;
388
- static inline void io_uring_prep_statx(struct io_uring_sqe *sqe, int dfd,
389
- const char *path, int flags, unsigned mask,
390
- struct statx *statxbuf)
391
- {
392
- io_uring_prep_rw(IORING_OP_STATX, sqe, dfd, path, mask,
393
- (__u64) (unsigned long) statxbuf);
394
- sqe->statx_flags = flags;
395
- }
396
-
397
- static inline void io_uring_prep_fadvise(struct io_uring_sqe *sqe, int fd,
398
- off_t offset, off_t len, int advice)
399
- {
400
- io_uring_prep_rw(IORING_OP_FADVISE, sqe, fd, NULL, len, offset);
401
- sqe->fadvise_advice = advice;
402
- }
403
-
404
- static inline void io_uring_prep_madvise(struct io_uring_sqe *sqe, void *addr,
405
- off_t length, int advice)
406
- {
407
- io_uring_prep_rw(IORING_OP_MADVISE, sqe, -1, addr, length, 0);
408
- sqe->fadvise_advice = advice;
409
- }
410
-
411
- static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
412
- const void *buf, size_t len, int flags)
413
- {
414
- io_uring_prep_rw(IORING_OP_SEND, sqe, sockfd, buf, len, 0);
415
- sqe->msg_flags = flags;
416
- }
417
-
418
- static inline void io_uring_prep_recv(struct io_uring_sqe *sqe, int sockfd,
419
- void *buf, size_t len, int flags)
420
- {
421
- io_uring_prep_rw(IORING_OP_RECV, sqe, sockfd, buf, len, 0);
422
- sqe->msg_flags = flags;
423
- }
424
-
425
- static inline void io_uring_prep_openat2(struct io_uring_sqe *sqe, int dfd,
426
- const char *path, struct open_how *how)
427
- {
428
- io_uring_prep_rw(IORING_OP_OPENAT2, sqe, dfd, path, sizeof(*how),
429
- (uint64_t) (uintptr_t) how);
430
- }
431
-
432
- struct epoll_event;
433
- static inline void io_uring_prep_epoll_ctl(struct io_uring_sqe *sqe, int epfd,
434
- int fd, int op,
435
- struct epoll_event *ev)
436
- {
437
- io_uring_prep_rw(IORING_OP_EPOLL_CTL, sqe, epfd, ev, op, fd);
438
- }
439
-
440
- static inline void io_uring_prep_provide_buffers(struct io_uring_sqe *sqe,
441
- void *addr, int len, int nr,
442
- int bgid, int bid)
443
- {
444
- io_uring_prep_rw(IORING_OP_PROVIDE_BUFFERS, sqe, nr, addr, len, bid);
445
- sqe->buf_group = bgid;
446
- }
447
-
448
- static inline void io_uring_prep_remove_buffers(struct io_uring_sqe *sqe,
449
- int nr, int bgid)
450
- {
451
- io_uring_prep_rw(IORING_OP_REMOVE_BUFFERS, sqe, nr, NULL, 0, 0);
452
- sqe->buf_group = bgid;
453
- }
454
-
455
- static inline void io_uring_prep_shutdown(struct io_uring_sqe *sqe, int fd,
456
- int how)
457
- {
458
- io_uring_prep_rw(IORING_OP_SHUTDOWN, sqe, fd, NULL, how, 0);
459
- }
460
-
461
- /*
462
- * Returns number of unconsumed (if SQPOLL) or unsubmitted entries exist in
463
- * the SQ ring
464
- */
465
- static inline unsigned io_uring_sq_ready(struct io_uring *ring)
466
- {
467
- /*
468
- * Without a barrier, we could miss an update and think the SQ wasn't ready.
469
- * We don't need the load acquire for non-SQPOLL since then we drive updates.
470
- */
471
- if (ring->flags & IORING_SETUP_SQPOLL)
472
- return ring->sq.sqe_tail - io_uring_smp_load_acquire(ring->sq.khead);
473
-
474
- /* always use real head, to avoid losing sync for short submit */
475
- return ring->sq.sqe_tail - *ring->sq.khead;
476
- }
477
-
478
- /*
479
- * Returns how much space is left in the SQ ring.
480
- */
481
- static inline unsigned io_uring_sq_space_left(struct io_uring *ring)
482
- {
483
- return *ring->sq.kring_entries - io_uring_sq_ready(ring);
484
- }
485
-
486
- /*
487
- * Only applicable when using SQPOLL - allows the caller to wait for space
488
- * to free up in the SQ ring, which happens when the kernel side thread has
489
- * consumed one or more entries. If the SQ ring is currently non-full, no
490
- * action is taken. Note: may return -EINVAL if the kernel doesn't support
491
- * this feature.
492
- */
493
- static inline int io_uring_sqring_wait(struct io_uring *ring)
494
- {
495
- if (!(ring->flags & IORING_SETUP_SQPOLL))
496
- return 0;
497
- if (io_uring_sq_space_left(ring))
498
- return 0;
499
-
500
- return __io_uring_sqring_wait(ring);
501
- }
502
-
503
- /*
504
- * Returns how many unconsumed entries are ready in the CQ ring
505
- */
506
- static inline unsigned io_uring_cq_ready(struct io_uring *ring)
507
- {
508
- return io_uring_smp_load_acquire(ring->cq.ktail) - *ring->cq.khead;
509
- }
510
-
511
- /*
512
- * Returns true if the eventfd notification is currently enabled
513
- */
514
- static inline bool io_uring_cq_eventfd_enabled(struct io_uring *ring)
515
- {
516
- if (!ring->cq.kflags)
517
- return true;
518
-
519
- return !(*ring->cq.kflags & IORING_CQ_EVENTFD_DISABLED);
520
- }
521
-
522
- /*
523
- * Toggle eventfd notification on or off, if an eventfd is registered with
524
- * the ring.
525
- */
526
- static inline int io_uring_cq_eventfd_toggle(struct io_uring *ring,
527
- bool enabled)
528
- {
529
- uint32_t flags;
530
-
531
- if (!!enabled == io_uring_cq_eventfd_enabled(ring))
532
- return 0;
533
-
534
- if (!ring->cq.kflags)
535
- return -EOPNOTSUPP;
536
-
537
- flags = *ring->cq.kflags;
538
-
539
- if (enabled)
540
- flags &= ~IORING_CQ_EVENTFD_DISABLED;
541
- else
542
- flags |= IORING_CQ_EVENTFD_DISABLED;
543
-
544
- IO_URING_WRITE_ONCE(*ring->cq.kflags, flags);
545
-
546
- return 0;
547
- }
548
-
549
- /*
550
- * Return an IO completion, waiting for 'wait_nr' completions if one isn't
551
- * readily available. Returns 0 with cqe_ptr filled in on success, -errno on
552
- * failure.
553
- */
554
- static inline int io_uring_wait_cqe_nr(struct io_uring *ring,
555
- struct io_uring_cqe **cqe_ptr,
556
- unsigned wait_nr)
557
- {
558
- return __io_uring_get_cqe(ring, cqe_ptr, 0, wait_nr, NULL);
559
- }
560
-
561
- /*
562
- * Return an IO completion, if one is readily available. Returns 0 with
563
- * cqe_ptr filled in on success, -errno on failure.
564
- */
565
- static inline int io_uring_peek_cqe(struct io_uring *ring,
566
- struct io_uring_cqe **cqe_ptr)
567
- {
568
- return io_uring_wait_cqe_nr(ring, cqe_ptr, 0);
569
- }
570
-
571
- /*
572
- * Return an IO completion, waiting for it if necessary. Returns 0 with
573
- * cqe_ptr filled in on success, -errno on failure.
574
- */
575
- static inline int io_uring_wait_cqe(struct io_uring *ring,
576
- struct io_uring_cqe **cqe_ptr)
577
- {
578
- return io_uring_wait_cqe_nr(ring, cqe_ptr, 1);
579
- }
580
-
581
- #ifdef __cplusplus
582
- }
583
- #endif
584
-
585
- #endif