polyphony 0.83 → 0.84

Sign up to get free protection for your applications and to get access to all the features.
data/ext/liburing/queue.c DELETED
@@ -1,333 +0,0 @@
1
- /* SPDX-License-Identifier: MIT */
2
- #include <sys/types.h>
3
- #include <sys/stat.h>
4
- #include <sys/mman.h>
5
- #include <unistd.h>
6
- #include <errno.h>
7
- #include <string.h>
8
- #include <stdbool.h>
9
-
10
- #include "liburing/compat.h"
11
- #include "liburing/io_uring.h"
12
- #include "liburing.h"
13
- #include "liburing/barrier.h"
14
-
15
- #include "syscall.h"
16
-
17
- /*
18
- * Returns true if we're not using SQ thread (thus nobody submits but us)
19
- * or if IORING_SQ_NEED_WAKEUP is set, so submit thread must be explicitly
20
- * awakened. For the latter case, we set the thread wakeup flag.
21
- */
22
- static inline bool sq_ring_needs_enter(struct io_uring *ring, unsigned *flags)
23
- {
24
- if (!(ring->flags & IORING_SETUP_SQPOLL))
25
- return true;
26
- if (IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_NEED_WAKEUP) {
27
- *flags |= IORING_ENTER_SQ_WAKEUP;
28
- return true;
29
- }
30
-
31
- return false;
32
- }
33
-
34
- static inline bool cq_ring_needs_flush(struct io_uring *ring)
35
- {
36
- return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
37
- }
38
-
39
- static int __io_uring_peek_cqe(struct io_uring *ring,
40
- struct io_uring_cqe **cqe_ptr)
41
- {
42
- struct io_uring_cqe *cqe;
43
- unsigned head;
44
- int err = 0;
45
-
46
- do {
47
- io_uring_for_each_cqe(ring, head, cqe)
48
- break;
49
- if (cqe) {
50
- if (cqe->user_data == LIBURING_UDATA_TIMEOUT) {
51
- if (cqe->res < 0)
52
- err = cqe->res;
53
- io_uring_cq_advance(ring, 1);
54
- if (!err)
55
- continue;
56
- cqe = NULL;
57
- }
58
- }
59
- break;
60
- } while (1);
61
-
62
- *cqe_ptr = cqe;
63
- return err;
64
- }
65
-
66
- int __io_uring_get_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
67
- unsigned submit, unsigned wait_nr, sigset_t *sigmask)
68
- {
69
- struct io_uring_cqe *cqe = NULL;
70
- const int to_wait = wait_nr;
71
- int ret = 0, err;
72
-
73
- do {
74
- bool cq_overflow_flush = false;
75
- unsigned flags = 0;
76
-
77
- err = __io_uring_peek_cqe(ring, &cqe);
78
- if (err)
79
- break;
80
- if (!cqe && !to_wait && !submit) {
81
- if (!cq_ring_needs_flush(ring)) {
82
- err = -EAGAIN;
83
- break;
84
- }
85
- cq_overflow_flush = true;
86
- }
87
- if (wait_nr && cqe)
88
- wait_nr--;
89
- if (wait_nr || cq_overflow_flush)
90
- flags = IORING_ENTER_GETEVENTS;
91
- if (submit)
92
- sq_ring_needs_enter(ring, &flags);
93
- if (wait_nr || submit || cq_overflow_flush)
94
- ret = __sys_io_uring_enter(ring->ring_fd, submit,
95
- wait_nr, flags, sigmask);
96
- if (ret < 0) {
97
- err = -errno;
98
- } else if (ret == (int)submit) {
99
- submit = 0;
100
- /*
101
- * When SETUP_IOPOLL is set, __sys_io_uring enter()
102
- * must be called to reap new completions but the call
103
- * won't be made if both wait_nr and submit are zero
104
- * so preserve wait_nr.
105
- */
106
- if (!(ring->flags & IORING_SETUP_IOPOLL))
107
- wait_nr = 0;
108
- } else {
109
- submit -= ret;
110
- }
111
- if (cqe)
112
- break;
113
- } while (!err);
114
-
115
- *cqe_ptr = cqe;
116
- return err;
117
- }
118
-
119
- /*
120
- * Fill in an array of IO completions up to count, if any are available.
121
- * Returns the amount of IO completions filled.
122
- */
123
- unsigned io_uring_peek_batch_cqe(struct io_uring *ring,
124
- struct io_uring_cqe **cqes, unsigned count)
125
- {
126
- unsigned ready;
127
- bool overflow_checked = false;
128
-
129
- again:
130
- ready = io_uring_cq_ready(ring);
131
- if (ready) {
132
- unsigned head = *ring->cq.khead;
133
- unsigned mask = *ring->cq.kring_mask;
134
- unsigned last;
135
- int i = 0;
136
-
137
- count = count > ready ? ready : count;
138
- last = head + count;
139
- for (;head != last; head++, i++)
140
- cqes[i] = &ring->cq.cqes[head & mask];
141
-
142
- return count;
143
- }
144
-
145
- if (overflow_checked)
146
- goto done;
147
-
148
- if (cq_ring_needs_flush(ring)) {
149
- __sys_io_uring_enter(ring->ring_fd, 0, 0,
150
- IORING_ENTER_GETEVENTS, NULL);
151
- overflow_checked = true;
152
- goto again;
153
- }
154
-
155
- done:
156
- return 0;
157
- }
158
-
159
- /*
160
- * Sync internal state with kernel ring state on the SQ side. Returns the
161
- * number of pending items in the SQ ring, for the shared ring.
162
- */
163
- int __io_uring_flush_sq(struct io_uring *ring)
164
- {
165
- struct io_uring_sq *sq = &ring->sq;
166
- const unsigned mask = *sq->kring_mask;
167
- unsigned ktail, to_submit;
168
-
169
- if (sq->sqe_head == sq->sqe_tail) {
170
- ktail = *sq->ktail;
171
- goto out;
172
- }
173
-
174
- /*
175
- * Fill in sqes that we have queued up, adding them to the kernel ring
176
- */
177
- ktail = *sq->ktail;
178
- to_submit = sq->sqe_tail - sq->sqe_head;
179
- while (to_submit--) {
180
- sq->array[ktail & mask] = sq->sqe_head & mask;
181
- ktail++;
182
- sq->sqe_head++;
183
- }
184
-
185
- /*
186
- * Ensure that the kernel sees the SQE updates before it sees the tail
187
- * update.
188
- */
189
- io_uring_smp_store_release(sq->ktail, ktail);
190
- out:
191
- return ktail - *sq->khead;
192
- }
193
-
194
- /*
195
- * Like io_uring_wait_cqe(), except it accepts a timeout value as well. Note
196
- * that an sqe is used internally to handle the timeout. Applications using
197
- * this function must never set sqe->user_data to LIBURING_UDATA_TIMEOUT!
198
- *
199
- * If 'ts' is specified, the application need not call io_uring_submit() before
200
- * calling this function, as we will do that on its behalf. From this it also
201
- * follows that this function isn't safe to use for applications that split SQ
202
- * and CQ handling between two threads and expect that to work without
203
- * synchronization, as this function manipulates both the SQ and CQ side.
204
- */
205
- int io_uring_wait_cqes(struct io_uring *ring, struct io_uring_cqe **cqe_ptr,
206
- unsigned wait_nr, struct __kernel_timespec *ts,
207
- sigset_t *sigmask)
208
- {
209
- unsigned to_submit = 0;
210
-
211
- if (ts) {
212
- struct io_uring_sqe *sqe;
213
- int ret;
214
-
215
- /*
216
- * If the SQ ring is full, we may need to submit IO first
217
- */
218
- sqe = io_uring_get_sqe(ring);
219
- if (!sqe) {
220
- ret = io_uring_submit(ring);
221
- if (ret < 0)
222
- return ret;
223
- sqe = io_uring_get_sqe(ring);
224
- if (!sqe)
225
- return -EAGAIN;
226
- }
227
- io_uring_prep_timeout(sqe, ts, wait_nr, 0);
228
- sqe->user_data = LIBURING_UDATA_TIMEOUT;
229
- to_submit = __io_uring_flush_sq(ring);
230
- }
231
-
232
- return __io_uring_get_cqe(ring, cqe_ptr, to_submit, wait_nr, sigmask);
233
- }
234
-
235
- /*
236
- * See io_uring_wait_cqes() - this function is the same, it just always uses
237
- * '1' as the wait_nr.
238
- */
239
- int io_uring_wait_cqe_timeout(struct io_uring *ring,
240
- struct io_uring_cqe **cqe_ptr,
241
- struct __kernel_timespec *ts)
242
- {
243
- return io_uring_wait_cqes(ring, cqe_ptr, 1, ts, NULL);
244
- }
245
-
246
- /*
247
- * Submit sqes acquired from io_uring_get_sqe() to the kernel.
248
- *
249
- * Returns number of sqes submitted
250
- */
251
- static int __io_uring_submit(struct io_uring *ring, unsigned submitted,
252
- unsigned wait_nr)
253
- {
254
- unsigned flags;
255
- int ret;
256
-
257
- flags = 0;
258
- if (sq_ring_needs_enter(ring, &flags) || wait_nr) {
259
- if (wait_nr || (ring->flags & IORING_SETUP_IOPOLL))
260
- flags |= IORING_ENTER_GETEVENTS;
261
-
262
- ret = __sys_io_uring_enter(ring->ring_fd, submitted, wait_nr,
263
- flags, NULL);
264
- if (ret < 0)
265
- return -errno;
266
- } else
267
- ret = submitted;
268
-
269
- return ret;
270
- }
271
-
272
- static int __io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
273
- {
274
- return __io_uring_submit(ring, __io_uring_flush_sq(ring), wait_nr);
275
- }
276
-
277
- /*
278
- * Submit sqes acquired from io_uring_get_sqe() to the kernel.
279
- *
280
- * Returns number of sqes submitted
281
- */
282
- int io_uring_submit(struct io_uring *ring)
283
- {
284
- return __io_uring_submit_and_wait(ring, 0);
285
- }
286
-
287
- /*
288
- * Like io_uring_submit(), but allows waiting for events as well.
289
- *
290
- * Returns number of sqes submitted
291
- */
292
- int io_uring_submit_and_wait(struct io_uring *ring, unsigned wait_nr)
293
- {
294
- return __io_uring_submit_and_wait(ring, wait_nr);
295
- }
296
-
297
- static inline struct io_uring_sqe *
298
- __io_uring_get_sqe(struct io_uring_sq *sq, unsigned int __head)
299
- {
300
- unsigned int __next = (sq)->sqe_tail + 1;
301
- struct io_uring_sqe *__sqe = NULL;
302
-
303
- if (__next - __head <= *(sq)->kring_entries) {
304
- __sqe = &(sq)->sqes[(sq)->sqe_tail & *(sq)->kring_mask];
305
- (sq)->sqe_tail = __next;
306
- }
307
- return __sqe;
308
- }
309
-
310
- /*
311
- * Return an sqe to fill. Application must later call io_uring_submit()
312
- * when it's ready to tell the kernel about it. The caller may call this
313
- * function multiple times before calling io_uring_submit().
314
- *
315
- * Returns a vacant sqe, or NULL if we're full.
316
- */
317
- struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
318
- {
319
- struct io_uring_sq *sq = &ring->sq;
320
-
321
- return __io_uring_get_sqe(sq, io_uring_smp_load_acquire(sq->khead));
322
- }
323
-
324
- int __io_uring_sqring_wait(struct io_uring *ring)
325
- {
326
- int ret;
327
-
328
- ret = __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_SQ_WAIT,
329
- NULL);
330
- if (ret < 0)
331
- ret = -errno;
332
- return ret;
333
- }
@@ -1,187 +0,0 @@
1
- /* SPDX-License-Identifier: MIT */
2
- #include <sys/types.h>
3
- #include <sys/stat.h>
4
- #include <sys/mman.h>
5
- #include <unistd.h>
6
- #include <errno.h>
7
- #include <string.h>
8
-
9
- #include "liburing/compat.h"
10
- #include "liburing/io_uring.h"
11
- #include "liburing.h"
12
-
13
- #include "syscall.h"
14
-
15
- int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
16
- unsigned nr_iovecs)
17
- {
18
- int ret;
19
-
20
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_BUFFERS,
21
- iovecs, nr_iovecs);
22
- if (ret < 0)
23
- return -errno;
24
-
25
- return 0;
26
- }
27
-
28
- int io_uring_unregister_buffers(struct io_uring *ring)
29
- {
30
- int ret;
31
-
32
- ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_BUFFERS,
33
- NULL, 0);
34
- if (ret < 0)
35
- return -errno;
36
-
37
- return 0;
38
- }
39
-
40
- /*
41
- * Register an update for an existing file set. The updates will start at
42
- * 'off' in the original array, and 'nr_files' is the number of files we'll
43
- * update.
44
- *
45
- * Returns number of files updated on success, -ERROR on failure.
46
- */
47
- int io_uring_register_files_update(struct io_uring *ring, unsigned off,
48
- int *files, unsigned nr_files)
49
- {
50
- struct io_uring_files_update up = {
51
- .offset = off,
52
- .fds = (unsigned long) files,
53
- };
54
- int ret;
55
-
56
- ret = __sys_io_uring_register(ring->ring_fd,
57
- IORING_REGISTER_FILES_UPDATE, &up,
58
- nr_files);
59
- if (ret < 0)
60
- return -errno;
61
-
62
- return ret;
63
- }
64
-
65
- int io_uring_register_files(struct io_uring *ring, const int *files,
66
- unsigned nr_files)
67
- {
68
- int ret;
69
-
70
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_FILES,
71
- files, nr_files);
72
- if (ret < 0)
73
- return -errno;
74
-
75
- return 0;
76
- }
77
-
78
- int io_uring_unregister_files(struct io_uring *ring)
79
- {
80
- int ret;
81
-
82
- ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_FILES,
83
- NULL, 0);
84
- if (ret < 0)
85
- return -errno;
86
-
87
- return 0;
88
- }
89
-
90
- int io_uring_register_eventfd(struct io_uring *ring, int event_fd)
91
- {
92
- int ret;
93
-
94
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD,
95
- &event_fd, 1);
96
- if (ret < 0)
97
- return -errno;
98
-
99
- return 0;
100
- }
101
-
102
- int io_uring_unregister_eventfd(struct io_uring *ring)
103
- {
104
- int ret;
105
-
106
- ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_EVENTFD,
107
- NULL, 0);
108
- if (ret < 0)
109
- return -errno;
110
-
111
- return 0;
112
- }
113
-
114
- int io_uring_register_eventfd_async(struct io_uring *ring, int event_fd)
115
- {
116
- int ret;
117
-
118
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD_ASYNC,
119
- &event_fd, 1);
120
- if (ret < 0)
121
- return -errno;
122
-
123
- return 0;
124
- }
125
-
126
- int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
127
- unsigned int nr_ops)
128
- {
129
- int ret;
130
-
131
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PROBE,
132
- p, nr_ops);
133
- if (ret < 0)
134
- return -errno;
135
-
136
- return 0;
137
- }
138
-
139
- int io_uring_register_personality(struct io_uring *ring)
140
- {
141
- int ret;
142
-
143
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PERSONALITY,
144
- NULL, 0);
145
- if (ret < 0)
146
- return -errno;
147
-
148
- return ret;
149
- }
150
-
151
- int io_uring_unregister_personality(struct io_uring *ring, int id)
152
- {
153
- int ret;
154
-
155
- ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_PERSONALITY,
156
- NULL, id);
157
- if (ret < 0)
158
- return -errno;
159
-
160
- return ret;
161
- }
162
-
163
- int io_uring_register_restrictions(struct io_uring *ring,
164
- struct io_uring_restriction *res,
165
- unsigned int nr_res)
166
- {
167
- int ret;
168
-
169
- ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_RESTRICTIONS,
170
- res, nr_res);
171
- if (ret < 0)
172
- return -errno;
173
-
174
- return 0;
175
- }
176
-
177
- int io_uring_enable_rings(struct io_uring *ring)
178
- {
179
- int ret;
180
-
181
- ret = __sys_io_uring_register(ring->ring_fd,
182
- IORING_REGISTER_ENABLE_RINGS, NULL, 0);
183
- if (ret < 0)
184
- return -errno;
185
-
186
- return ret;
187
- }
data/ext/liburing/setup.c DELETED
@@ -1,210 +0,0 @@
1
- /* SPDX-License-Identifier: MIT */
2
- #include <sys/types.h>
3
- #include <sys/stat.h>
4
- #include <sys/mman.h>
5
- #include <unistd.h>
6
- #include <errno.h>
7
- #include <string.h>
8
- #include <stdlib.h>
9
-
10
- #include "liburing/compat.h"
11
- #include "liburing/io_uring.h"
12
- #include "liburing.h"
13
-
14
- #include "syscall.h"
15
-
16
- static void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
17
- {
18
- munmap(sq->ring_ptr, sq->ring_sz);
19
- if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
20
- munmap(cq->ring_ptr, cq->ring_sz);
21
- }
22
-
23
- static int io_uring_mmap(int fd, struct io_uring_params *p,
24
- struct io_uring_sq *sq, struct io_uring_cq *cq)
25
- {
26
- size_t size;
27
- int ret;
28
-
29
- sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
30
- cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
31
-
32
- if (p->features & IORING_FEAT_SINGLE_MMAP) {
33
- if (cq->ring_sz > sq->ring_sz)
34
- sq->ring_sz = cq->ring_sz;
35
- cq->ring_sz = sq->ring_sz;
36
- }
37
- sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
38
- MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
39
- if (sq->ring_ptr == MAP_FAILED)
40
- return -errno;
41
-
42
- if (p->features & IORING_FEAT_SINGLE_MMAP) {
43
- cq->ring_ptr = sq->ring_ptr;
44
- } else {
45
- cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
46
- MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
47
- if (cq->ring_ptr == MAP_FAILED) {
48
- cq->ring_ptr = NULL;
49
- ret = -errno;
50
- goto err;
51
- }
52
- }
53
-
54
- sq->khead = sq->ring_ptr + p->sq_off.head;
55
- sq->ktail = sq->ring_ptr + p->sq_off.tail;
56
- sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
57
- sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
58
- sq->kflags = sq->ring_ptr + p->sq_off.flags;
59
- sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
60
- sq->array = sq->ring_ptr + p->sq_off.array;
61
-
62
- size = p->sq_entries * sizeof(struct io_uring_sqe);
63
- sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
64
- MAP_SHARED | MAP_POPULATE, fd,
65
- IORING_OFF_SQES);
66
- if (sq->sqes == MAP_FAILED) {
67
- ret = -errno;
68
- err:
69
- io_uring_unmap_rings(sq, cq);
70
- return ret;
71
- }
72
-
73
- cq->khead = cq->ring_ptr + p->cq_off.head;
74
- cq->ktail = cq->ring_ptr + p->cq_off.tail;
75
- cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
76
- cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
77
- cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
78
- cq->cqes = cq->ring_ptr + p->cq_off.cqes;
79
- if (p->cq_off.flags)
80
- cq->kflags = cq->ring_ptr + p->cq_off.flags;
81
- return 0;
82
- }
83
-
84
- /*
85
- * For users that want to specify sq_thread_cpu or sq_thread_idle, this
86
- * interface is a convenient helper for mmap()ing the rings.
87
- * Returns -errno on error, or zero on success. On success, 'ring'
88
- * contains the necessary information to read/write to the rings.
89
- */
90
- int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
91
- {
92
- int ret;
93
-
94
- memset(ring, 0, sizeof(*ring));
95
- ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
96
- if (!ret) {
97
- ring->flags = p->flags;
98
- ring->ring_fd = fd;
99
- }
100
- return ret;
101
- }
102
-
103
- /*
104
- * Ensure that the mmap'ed rings aren't available to a child after a fork(2).
105
- * This uses madvise(..., MADV_DONTFORK) on the mmap'ed ranges.
106
- */
107
- int io_uring_ring_dontfork(struct io_uring *ring)
108
- {
109
- size_t len;
110
- int ret;
111
-
112
- if (!ring->sq.ring_ptr || !ring->sq.sqes || !ring->cq.ring_ptr)
113
- return -EINVAL;
114
-
115
- len = *ring->sq.kring_entries * sizeof(struct io_uring_sqe);
116
- ret = madvise(ring->sq.sqes, len, MADV_DONTFORK);
117
- if (ret == -1)
118
- return -errno;
119
-
120
- len = ring->sq.ring_sz;
121
- ret = madvise(ring->sq.ring_ptr, len, MADV_DONTFORK);
122
- if (ret == -1)
123
- return -errno;
124
-
125
- if (ring->cq.ring_ptr != ring->sq.ring_ptr) {
126
- len = ring->cq.ring_sz;
127
- ret = madvise(ring->cq.ring_ptr, len, MADV_DONTFORK);
128
- if (ret == -1)
129
- return -errno;
130
- }
131
-
132
- return 0;
133
- }
134
-
135
- int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
136
- struct io_uring_params *p)
137
- {
138
- int fd, ret;
139
-
140
- fd = __sys_io_uring_setup(entries, p);
141
- if (fd < 0)
142
- return -errno;
143
-
144
- ret = io_uring_queue_mmap(fd, p, ring);
145
- if (ret)
146
- close(fd);
147
-
148
- return ret;
149
- }
150
-
151
- /*
152
- * Returns -errno on error, or zero on success. On success, 'ring'
153
- * contains the necessary information to read/write to the rings.
154
- */
155
- int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
156
- {
157
- struct io_uring_params p;
158
-
159
- memset(&p, 0, sizeof(p));
160
- p.flags = flags;
161
-
162
- return io_uring_queue_init_params(entries, ring, &p);
163
- }
164
-
165
- void io_uring_queue_exit(struct io_uring *ring)
166
- {
167
- struct io_uring_sq *sq = &ring->sq;
168
- struct io_uring_cq *cq = &ring->cq;
169
-
170
- munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
171
- io_uring_unmap_rings(sq, cq);
172
- close(ring->ring_fd);
173
- }
174
-
175
- struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring)
176
- {
177
- struct io_uring_probe *probe;
178
- int r;
179
-
180
- size_t len = sizeof(*probe) + 256 * sizeof(struct io_uring_probe_op);
181
- probe = malloc(len);
182
- memset(probe, 0, len);
183
- r = io_uring_register_probe(ring, probe, 256);
184
- if (r < 0)
185
- goto fail;
186
-
187
- return probe;
188
- fail:
189
- free(probe);
190
- return NULL;
191
- }
192
-
193
- struct io_uring_probe *io_uring_get_probe(void)
194
- {
195
- struct io_uring ring;
196
- struct io_uring_probe* probe = NULL;
197
-
198
- int r = io_uring_queue_init(2, &ring, 0);
199
- if (r < 0)
200
- return NULL;
201
-
202
- probe = io_uring_get_probe_ring(&ring);
203
- io_uring_queue_exit(&ring);
204
- return probe;
205
- }
206
-
207
- void io_uring_free_probe(struct io_uring_probe *probe)
208
- {
209
- free(probe);
210
- }