polyphony 0.45.5 → 0.46.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -0
- data/.gitmodules +0 -0
- data/CHANGELOG.md +4 -0
- data/Gemfile.lock +1 -1
- data/README.md +3 -3
- data/Rakefile +1 -1
- data/TODO.md +4 -4
- data/examples/performance/thread-vs-fiber/polyphony_server.rb +1 -2
- data/ext/liburing/liburing.h +585 -0
- data/ext/liburing/liburing/README.md +4 -0
- data/ext/liburing/liburing/barrier.h +73 -0
- data/ext/liburing/liburing/compat.h +15 -0
- data/ext/liburing/liburing/io_uring.h +343 -0
- data/ext/liburing/queue.c +333 -0
- data/ext/liburing/register.c +187 -0
- data/ext/liburing/setup.c +210 -0
- data/ext/liburing/syscall.c +54 -0
- data/ext/liburing/syscall.h +18 -0
- data/ext/polyphony/backend.h +0 -14
- data/ext/polyphony/backend_common.h +109 -0
- data/ext/polyphony/backend_io_uring.c +884 -0
- data/ext/polyphony/backend_io_uring_context.c +73 -0
- data/ext/polyphony/backend_io_uring_context.h +52 -0
- data/ext/polyphony/{libev_backend.c → backend_libev.c} +202 -294
- data/ext/polyphony/event.c +1 -1
- data/ext/polyphony/extconf.rb +31 -13
- data/ext/polyphony/fiber.c +29 -22
- data/ext/polyphony/libev.c +4 -0
- data/ext/polyphony/libev.h +8 -2
- data/ext/polyphony/liburing.c +8 -0
- data/ext/polyphony/playground.c +51 -0
- data/ext/polyphony/polyphony.c +5 -5
- data/ext/polyphony/polyphony.h +16 -12
- data/ext/polyphony/polyphony_ext.c +10 -4
- data/ext/polyphony/queue.c +1 -1
- data/ext/polyphony/thread.c +11 -9
- data/lib/polyphony/adapters/trace.rb +2 -2
- data/lib/polyphony/core/global_api.rb +1 -4
- data/lib/polyphony/extensions/debug.rb +13 -0
- data/lib/polyphony/extensions/fiber.rb +2 -2
- data/lib/polyphony/extensions/socket.rb +59 -10
- data/lib/polyphony/version.rb +1 -1
- data/test/helper.rb +36 -4
- data/test/io_uring_test.rb +55 -0
- data/test/stress.rb +5 -2
- data/test/test_backend.rb +4 -6
- data/test/test_ext.rb +1 -2
- data/test/test_fiber.rb +22 -16
- data/test/test_global_api.rb +33 -35
- data/test/test_throttler.rb +3 -6
- data/test/test_trace.rb +7 -5
- metadata +22 -3
@@ -0,0 +1,187 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
#include <sys/types.h>
|
3
|
+
#include <sys/stat.h>
|
4
|
+
#include <sys/mman.h>
|
5
|
+
#include <unistd.h>
|
6
|
+
#include <errno.h>
|
7
|
+
#include <string.h>
|
8
|
+
|
9
|
+
#include "liburing/compat.h"
|
10
|
+
#include "liburing/io_uring.h"
|
11
|
+
#include "liburing.h"
|
12
|
+
|
13
|
+
#include "syscall.h"
|
14
|
+
|
15
|
+
int io_uring_register_buffers(struct io_uring *ring, const struct iovec *iovecs,
|
16
|
+
unsigned nr_iovecs)
|
17
|
+
{
|
18
|
+
int ret;
|
19
|
+
|
20
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_BUFFERS,
|
21
|
+
iovecs, nr_iovecs);
|
22
|
+
if (ret < 0)
|
23
|
+
return -errno;
|
24
|
+
|
25
|
+
return 0;
|
26
|
+
}
|
27
|
+
|
28
|
+
int io_uring_unregister_buffers(struct io_uring *ring)
|
29
|
+
{
|
30
|
+
int ret;
|
31
|
+
|
32
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_BUFFERS,
|
33
|
+
NULL, 0);
|
34
|
+
if (ret < 0)
|
35
|
+
return -errno;
|
36
|
+
|
37
|
+
return 0;
|
38
|
+
}
|
39
|
+
|
40
|
+
/*
|
41
|
+
* Register an update for an existing file set. The updates will start at
|
42
|
+
* 'off' in the original array, and 'nr_files' is the number of files we'll
|
43
|
+
* update.
|
44
|
+
*
|
45
|
+
* Returns number of files updated on success, -ERROR on failure.
|
46
|
+
*/
|
47
|
+
int io_uring_register_files_update(struct io_uring *ring, unsigned off,
|
48
|
+
int *files, unsigned nr_files)
|
49
|
+
{
|
50
|
+
struct io_uring_files_update up = {
|
51
|
+
.offset = off,
|
52
|
+
.fds = (unsigned long) files,
|
53
|
+
};
|
54
|
+
int ret;
|
55
|
+
|
56
|
+
ret = __sys_io_uring_register(ring->ring_fd,
|
57
|
+
IORING_REGISTER_FILES_UPDATE, &up,
|
58
|
+
nr_files);
|
59
|
+
if (ret < 0)
|
60
|
+
return -errno;
|
61
|
+
|
62
|
+
return ret;
|
63
|
+
}
|
64
|
+
|
65
|
+
int io_uring_register_files(struct io_uring *ring, const int *files,
|
66
|
+
unsigned nr_files)
|
67
|
+
{
|
68
|
+
int ret;
|
69
|
+
|
70
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_FILES,
|
71
|
+
files, nr_files);
|
72
|
+
if (ret < 0)
|
73
|
+
return -errno;
|
74
|
+
|
75
|
+
return 0;
|
76
|
+
}
|
77
|
+
|
78
|
+
int io_uring_unregister_files(struct io_uring *ring)
|
79
|
+
{
|
80
|
+
int ret;
|
81
|
+
|
82
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_FILES,
|
83
|
+
NULL, 0);
|
84
|
+
if (ret < 0)
|
85
|
+
return -errno;
|
86
|
+
|
87
|
+
return 0;
|
88
|
+
}
|
89
|
+
|
90
|
+
int io_uring_register_eventfd(struct io_uring *ring, int event_fd)
|
91
|
+
{
|
92
|
+
int ret;
|
93
|
+
|
94
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD,
|
95
|
+
&event_fd, 1);
|
96
|
+
if (ret < 0)
|
97
|
+
return -errno;
|
98
|
+
|
99
|
+
return 0;
|
100
|
+
}
|
101
|
+
|
102
|
+
int io_uring_unregister_eventfd(struct io_uring *ring)
|
103
|
+
{
|
104
|
+
int ret;
|
105
|
+
|
106
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_EVENTFD,
|
107
|
+
NULL, 0);
|
108
|
+
if (ret < 0)
|
109
|
+
return -errno;
|
110
|
+
|
111
|
+
return 0;
|
112
|
+
}
|
113
|
+
|
114
|
+
int io_uring_register_eventfd_async(struct io_uring *ring, int event_fd)
|
115
|
+
{
|
116
|
+
int ret;
|
117
|
+
|
118
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_EVENTFD_ASYNC,
|
119
|
+
&event_fd, 1);
|
120
|
+
if (ret < 0)
|
121
|
+
return -errno;
|
122
|
+
|
123
|
+
return 0;
|
124
|
+
}
|
125
|
+
|
126
|
+
int io_uring_register_probe(struct io_uring *ring, struct io_uring_probe *p,
|
127
|
+
unsigned int nr_ops)
|
128
|
+
{
|
129
|
+
int ret;
|
130
|
+
|
131
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PROBE,
|
132
|
+
p, nr_ops);
|
133
|
+
if (ret < 0)
|
134
|
+
return -errno;
|
135
|
+
|
136
|
+
return 0;
|
137
|
+
}
|
138
|
+
|
139
|
+
int io_uring_register_personality(struct io_uring *ring)
|
140
|
+
{
|
141
|
+
int ret;
|
142
|
+
|
143
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_PERSONALITY,
|
144
|
+
NULL, 0);
|
145
|
+
if (ret < 0)
|
146
|
+
return -errno;
|
147
|
+
|
148
|
+
return ret;
|
149
|
+
}
|
150
|
+
|
151
|
+
int io_uring_unregister_personality(struct io_uring *ring, int id)
|
152
|
+
{
|
153
|
+
int ret;
|
154
|
+
|
155
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_UNREGISTER_PERSONALITY,
|
156
|
+
NULL, id);
|
157
|
+
if (ret < 0)
|
158
|
+
return -errno;
|
159
|
+
|
160
|
+
return ret;
|
161
|
+
}
|
162
|
+
|
163
|
+
int io_uring_register_restrictions(struct io_uring *ring,
|
164
|
+
struct io_uring_restriction *res,
|
165
|
+
unsigned int nr_res)
|
166
|
+
{
|
167
|
+
int ret;
|
168
|
+
|
169
|
+
ret = __sys_io_uring_register(ring->ring_fd, IORING_REGISTER_RESTRICTIONS,
|
170
|
+
res, nr_res);
|
171
|
+
if (ret < 0)
|
172
|
+
return -errno;
|
173
|
+
|
174
|
+
return 0;
|
175
|
+
}
|
176
|
+
|
177
|
+
int io_uring_enable_rings(struct io_uring *ring)
|
178
|
+
{
|
179
|
+
int ret;
|
180
|
+
|
181
|
+
ret = __sys_io_uring_register(ring->ring_fd,
|
182
|
+
IORING_REGISTER_ENABLE_RINGS, NULL, 0);
|
183
|
+
if (ret < 0)
|
184
|
+
return -errno;
|
185
|
+
|
186
|
+
return ret;
|
187
|
+
}
|
@@ -0,0 +1,210 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
#include <sys/types.h>
|
3
|
+
#include <sys/stat.h>
|
4
|
+
#include <sys/mman.h>
|
5
|
+
#include <unistd.h>
|
6
|
+
#include <errno.h>
|
7
|
+
#include <string.h>
|
8
|
+
#include <stdlib.h>
|
9
|
+
|
10
|
+
#include "liburing/compat.h"
|
11
|
+
#include "liburing/io_uring.h"
|
12
|
+
#include "liburing.h"
|
13
|
+
|
14
|
+
#include "syscall.h"
|
15
|
+
|
16
|
+
static void io_uring_unmap_rings(struct io_uring_sq *sq, struct io_uring_cq *cq)
|
17
|
+
{
|
18
|
+
munmap(sq->ring_ptr, sq->ring_sz);
|
19
|
+
if (cq->ring_ptr && cq->ring_ptr != sq->ring_ptr)
|
20
|
+
munmap(cq->ring_ptr, cq->ring_sz);
|
21
|
+
}
|
22
|
+
|
23
|
+
static int io_uring_mmap(int fd, struct io_uring_params *p,
|
24
|
+
struct io_uring_sq *sq, struct io_uring_cq *cq)
|
25
|
+
{
|
26
|
+
size_t size;
|
27
|
+
int ret;
|
28
|
+
|
29
|
+
sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned);
|
30
|
+
cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe);
|
31
|
+
|
32
|
+
if (p->features & IORING_FEAT_SINGLE_MMAP) {
|
33
|
+
if (cq->ring_sz > sq->ring_sz)
|
34
|
+
sq->ring_sz = cq->ring_sz;
|
35
|
+
cq->ring_sz = sq->ring_sz;
|
36
|
+
}
|
37
|
+
sq->ring_ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE,
|
38
|
+
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING);
|
39
|
+
if (sq->ring_ptr == MAP_FAILED)
|
40
|
+
return -errno;
|
41
|
+
|
42
|
+
if (p->features & IORING_FEAT_SINGLE_MMAP) {
|
43
|
+
cq->ring_ptr = sq->ring_ptr;
|
44
|
+
} else {
|
45
|
+
cq->ring_ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE,
|
46
|
+
MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING);
|
47
|
+
if (cq->ring_ptr == MAP_FAILED) {
|
48
|
+
cq->ring_ptr = NULL;
|
49
|
+
ret = -errno;
|
50
|
+
goto err;
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
sq->khead = sq->ring_ptr + p->sq_off.head;
|
55
|
+
sq->ktail = sq->ring_ptr + p->sq_off.tail;
|
56
|
+
sq->kring_mask = sq->ring_ptr + p->sq_off.ring_mask;
|
57
|
+
sq->kring_entries = sq->ring_ptr + p->sq_off.ring_entries;
|
58
|
+
sq->kflags = sq->ring_ptr + p->sq_off.flags;
|
59
|
+
sq->kdropped = sq->ring_ptr + p->sq_off.dropped;
|
60
|
+
sq->array = sq->ring_ptr + p->sq_off.array;
|
61
|
+
|
62
|
+
size = p->sq_entries * sizeof(struct io_uring_sqe);
|
63
|
+
sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE,
|
64
|
+
MAP_SHARED | MAP_POPULATE, fd,
|
65
|
+
IORING_OFF_SQES);
|
66
|
+
if (sq->sqes == MAP_FAILED) {
|
67
|
+
ret = -errno;
|
68
|
+
err:
|
69
|
+
io_uring_unmap_rings(sq, cq);
|
70
|
+
return ret;
|
71
|
+
}
|
72
|
+
|
73
|
+
cq->khead = cq->ring_ptr + p->cq_off.head;
|
74
|
+
cq->ktail = cq->ring_ptr + p->cq_off.tail;
|
75
|
+
cq->kring_mask = cq->ring_ptr + p->cq_off.ring_mask;
|
76
|
+
cq->kring_entries = cq->ring_ptr + p->cq_off.ring_entries;
|
77
|
+
cq->koverflow = cq->ring_ptr + p->cq_off.overflow;
|
78
|
+
cq->cqes = cq->ring_ptr + p->cq_off.cqes;
|
79
|
+
if (p->cq_off.flags)
|
80
|
+
cq->kflags = cq->ring_ptr + p->cq_off.flags;
|
81
|
+
return 0;
|
82
|
+
}
|
83
|
+
|
84
|
+
/*
|
85
|
+
* For users that want to specify sq_thread_cpu or sq_thread_idle, this
|
86
|
+
* interface is a convenient helper for mmap()ing the rings.
|
87
|
+
* Returns -errno on error, or zero on success. On success, 'ring'
|
88
|
+
* contains the necessary information to read/write to the rings.
|
89
|
+
*/
|
90
|
+
int io_uring_queue_mmap(int fd, struct io_uring_params *p, struct io_uring *ring)
|
91
|
+
{
|
92
|
+
int ret;
|
93
|
+
|
94
|
+
memset(ring, 0, sizeof(*ring));
|
95
|
+
ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq);
|
96
|
+
if (!ret) {
|
97
|
+
ring->flags = p->flags;
|
98
|
+
ring->ring_fd = fd;
|
99
|
+
}
|
100
|
+
return ret;
|
101
|
+
}
|
102
|
+
|
103
|
+
/*
|
104
|
+
* Ensure that the mmap'ed rings aren't available to a child after a fork(2).
|
105
|
+
* This uses madvise(..., MADV_DONTFORK) on the mmap'ed ranges.
|
106
|
+
*/
|
107
|
+
int io_uring_ring_dontfork(struct io_uring *ring)
|
108
|
+
{
|
109
|
+
size_t len;
|
110
|
+
int ret;
|
111
|
+
|
112
|
+
if (!ring->sq.ring_ptr || !ring->sq.sqes || !ring->cq.ring_ptr)
|
113
|
+
return -EINVAL;
|
114
|
+
|
115
|
+
len = *ring->sq.kring_entries * sizeof(struct io_uring_sqe);
|
116
|
+
ret = madvise(ring->sq.sqes, len, MADV_DONTFORK);
|
117
|
+
if (ret == -1)
|
118
|
+
return -errno;
|
119
|
+
|
120
|
+
len = ring->sq.ring_sz;
|
121
|
+
ret = madvise(ring->sq.ring_ptr, len, MADV_DONTFORK);
|
122
|
+
if (ret == -1)
|
123
|
+
return -errno;
|
124
|
+
|
125
|
+
if (ring->cq.ring_ptr != ring->sq.ring_ptr) {
|
126
|
+
len = ring->cq.ring_sz;
|
127
|
+
ret = madvise(ring->cq.ring_ptr, len, MADV_DONTFORK);
|
128
|
+
if (ret == -1)
|
129
|
+
return -errno;
|
130
|
+
}
|
131
|
+
|
132
|
+
return 0;
|
133
|
+
}
|
134
|
+
|
135
|
+
int io_uring_queue_init_params(unsigned entries, struct io_uring *ring,
|
136
|
+
struct io_uring_params *p)
|
137
|
+
{
|
138
|
+
int fd, ret;
|
139
|
+
|
140
|
+
fd = __sys_io_uring_setup(entries, p);
|
141
|
+
if (fd < 0)
|
142
|
+
return -errno;
|
143
|
+
|
144
|
+
ret = io_uring_queue_mmap(fd, p, ring);
|
145
|
+
if (ret)
|
146
|
+
close(fd);
|
147
|
+
|
148
|
+
return ret;
|
149
|
+
}
|
150
|
+
|
151
|
+
/*
|
152
|
+
* Returns -errno on error, or zero on success. On success, 'ring'
|
153
|
+
* contains the necessary information to read/write to the rings.
|
154
|
+
*/
|
155
|
+
int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
|
156
|
+
{
|
157
|
+
struct io_uring_params p;
|
158
|
+
|
159
|
+
memset(&p, 0, sizeof(p));
|
160
|
+
p.flags = flags;
|
161
|
+
|
162
|
+
return io_uring_queue_init_params(entries, ring, &p);
|
163
|
+
}
|
164
|
+
|
165
|
+
void io_uring_queue_exit(struct io_uring *ring)
|
166
|
+
{
|
167
|
+
struct io_uring_sq *sq = &ring->sq;
|
168
|
+
struct io_uring_cq *cq = &ring->cq;
|
169
|
+
|
170
|
+
munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe));
|
171
|
+
io_uring_unmap_rings(sq, cq);
|
172
|
+
close(ring->ring_fd);
|
173
|
+
}
|
174
|
+
|
175
|
+
struct io_uring_probe *io_uring_get_probe_ring(struct io_uring *ring)
|
176
|
+
{
|
177
|
+
struct io_uring_probe *probe;
|
178
|
+
int r;
|
179
|
+
|
180
|
+
size_t len = sizeof(*probe) + 256 * sizeof(struct io_uring_probe_op);
|
181
|
+
probe = malloc(len);
|
182
|
+
memset(probe, 0, len);
|
183
|
+
r = io_uring_register_probe(ring, probe, 256);
|
184
|
+
if (r < 0)
|
185
|
+
goto fail;
|
186
|
+
|
187
|
+
return probe;
|
188
|
+
fail:
|
189
|
+
free(probe);
|
190
|
+
return NULL;
|
191
|
+
}
|
192
|
+
|
193
|
+
struct io_uring_probe *io_uring_get_probe(void)
|
194
|
+
{
|
195
|
+
struct io_uring ring;
|
196
|
+
struct io_uring_probe* probe = NULL;
|
197
|
+
|
198
|
+
int r = io_uring_queue_init(2, &ring, 0);
|
199
|
+
if (r < 0)
|
200
|
+
return NULL;
|
201
|
+
|
202
|
+
probe = io_uring_get_probe_ring(&ring);
|
203
|
+
io_uring_queue_exit(&ring);
|
204
|
+
return probe;
|
205
|
+
}
|
206
|
+
|
207
|
+
void io_uring_free_probe(struct io_uring_probe *probe)
|
208
|
+
{
|
209
|
+
free(probe);
|
210
|
+
}
|
@@ -0,0 +1,54 @@
|
|
1
|
+
/* SPDX-License-Identifier: MIT */
|
2
|
+
/*
|
3
|
+
* Will go away once libc support is there
|
4
|
+
*/
|
5
|
+
#include <unistd.h>
|
6
|
+
#include <sys/syscall.h>
|
7
|
+
#include <sys/uio.h>
|
8
|
+
#include "liburing/compat.h"
|
9
|
+
#include "liburing/io_uring.h"
|
10
|
+
#include "syscall.h"
|
11
|
+
|
12
|
+
#ifdef __alpha__
|
13
|
+
/*
|
14
|
+
* alpha is the only exception, all other architectures
|
15
|
+
* have common numbers for new system calls.
|
16
|
+
*/
|
17
|
+
# ifndef __NR_io_uring_setup
|
18
|
+
# define __NR_io_uring_setup 535
|
19
|
+
# endif
|
20
|
+
# ifndef __NR_io_uring_enter
|
21
|
+
# define __NR_io_uring_enter 536
|
22
|
+
# endif
|
23
|
+
# ifndef __NR_io_uring_register
|
24
|
+
# define __NR_io_uring_register 537
|
25
|
+
# endif
|
26
|
+
#else /* !__alpha__ */
|
27
|
+
# ifndef __NR_io_uring_setup
|
28
|
+
# define __NR_io_uring_setup 425
|
29
|
+
# endif
|
30
|
+
# ifndef __NR_io_uring_enter
|
31
|
+
# define __NR_io_uring_enter 426
|
32
|
+
# endif
|
33
|
+
# ifndef __NR_io_uring_register
|
34
|
+
# define __NR_io_uring_register 427
|
35
|
+
# endif
|
36
|
+
#endif
|
37
|
+
|
38
|
+
int __sys_io_uring_register(int fd, unsigned opcode, const void *arg,
|
39
|
+
unsigned nr_args)
|
40
|
+
{
|
41
|
+
return syscall(__NR_io_uring_register, fd, opcode, arg, nr_args);
|
42
|
+
}
|
43
|
+
|
44
|
+
int __sys_io_uring_setup(unsigned entries, struct io_uring_params *p)
|
45
|
+
{
|
46
|
+
return syscall(__NR_io_uring_setup, entries, p);
|
47
|
+
}
|
48
|
+
|
49
|
+
int __sys_io_uring_enter(int fd, unsigned to_submit, unsigned min_complete,
|
50
|
+
unsigned flags, sigset_t *sig)
|
51
|
+
{
|
52
|
+
return syscall(__NR_io_uring_enter, fd, to_submit, min_complete,
|
53
|
+
flags, sig, _NSIG / 8);
|
54
|
+
}
|