event 0.2.2 → 0.4.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/ext/event/backend/backend.h +2 -0
- data/ext/event/backend/epoll.c +71 -12
- data/ext/event/backend/kqueue.c +184 -43
- data/ext/event/backend/uring.c +138 -28
- data/ext/event/backend/uring.h +1 -0
- data/ext/event/extconf.rb +2 -0
- data/lib/event.rb +1 -1
- data/lib/event/backend.rb +49 -0
- data/lib/event/backend/select.rb +13 -0
- data/lib/event/debug/selector.rb +9 -0
- data/lib/event/version.rb +1 -1
- metadata +4 -4
- data/ext/event/Makefile +0 -266
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1adbb7e42901c2b8d7cd402e48ebf24fc763dd37a0c5e6da48262fd8bb33771a
|
4
|
+
data.tar.gz: 0a85f5629cadf9eb37813fcaabf16be2e459bdcaff72756640f6dc93f34e00f1
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: c0f0fd8577bb468b2a093b5720c7651eb530f496867ace6caba28ace95abe8f7c45351c7e98feaa45443654521bdea43d8e157547be1d430f42af20232e011e6
|
7
|
+
data.tar.gz: 7b2230479a18b9d818c1fafa49589f8eea607da8e0283292a807e5d12d15d59f1aa84e6e1d5ac75197474807eeabb0982f15534c98e85cc81ad64c7f7f11dcc7
|
data/ext/event/backend/backend.h
CHANGED
data/ext/event/backend/epoll.c
CHANGED
@@ -28,7 +28,7 @@
|
|
28
28
|
static VALUE Event_Backend_EPoll = Qnil;
|
29
29
|
static ID id_fileno, id_transfer;
|
30
30
|
|
31
|
-
|
31
|
+
enum {EPOLL_MAX_EVENTS = 64};
|
32
32
|
|
33
33
|
struct Event_Backend_EPoll {
|
34
34
|
VALUE loop;
|
@@ -41,13 +41,19 @@ void Event_Backend_EPoll_Type_mark(void *_data)
|
|
41
41
|
rb_gc_mark(data->loop);
|
42
42
|
}
|
43
43
|
|
44
|
+
static
|
45
|
+
void close_internal(struct Event_Backend_EPoll *data) {
|
46
|
+
if (data->descriptor >= 0) {
|
47
|
+
close(data->descriptor);
|
48
|
+
data->descriptor = -1;
|
49
|
+
}
|
50
|
+
}
|
51
|
+
|
44
52
|
void Event_Backend_EPoll_Type_free(void *_data)
|
45
53
|
{
|
46
54
|
struct Event_Backend_EPoll *data = _data;
|
47
55
|
|
48
|
-
|
49
|
-
close(data->descriptor);
|
50
|
-
}
|
56
|
+
close_internal(data);
|
51
57
|
|
52
58
|
free(data);
|
53
59
|
}
|
@@ -96,6 +102,15 @@ VALUE Event_Backend_EPoll_initialize(VALUE self, VALUE loop) {
|
|
96
102
|
return self;
|
97
103
|
}
|
98
104
|
|
105
|
+
VALUE Event_Backend_EPoll_close(VALUE self) {
|
106
|
+
struct Event_Backend_EPoll *data = NULL;
|
107
|
+
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
108
|
+
|
109
|
+
close_internal(data);
|
110
|
+
|
111
|
+
return Qnil;
|
112
|
+
}
|
113
|
+
|
99
114
|
static inline
|
100
115
|
uint32_t epoll_flags_from_events(int events) {
|
101
116
|
uint32_t flags = 0;
|
@@ -212,28 +227,71 @@ int make_timeout(VALUE duration) {
|
|
212
227
|
rb_raise(rb_eRuntimeError, "unable to convert timeout");
|
213
228
|
}
|
214
229
|
|
230
|
+
struct select_arguments {
|
231
|
+
struct Event_Backend_EPoll *data;
|
232
|
+
|
233
|
+
int count;
|
234
|
+
struct epoll_event events[EPOLL_MAX_EVENTS];
|
235
|
+
|
236
|
+
int timeout;
|
237
|
+
};
|
238
|
+
|
239
|
+
static
|
240
|
+
void * select_internal(void *_arguments) {
|
241
|
+
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
242
|
+
|
243
|
+
arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
|
244
|
+
|
245
|
+
return NULL;
|
246
|
+
}
|
247
|
+
|
248
|
+
static
|
249
|
+
void select_internal_without_gvl(struct select_arguments *arguments) {
|
250
|
+
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
251
|
+
|
252
|
+
if (arguments->count == -1) {
|
253
|
+
rb_sys_fail("select_internal_without_gvl:epoll_wait");
|
254
|
+
}
|
255
|
+
}
|
256
|
+
|
257
|
+
static
|
258
|
+
void select_internal_with_gvl(struct select_arguments *arguments) {
|
259
|
+
select_internal((void *)arguments);
|
260
|
+
|
261
|
+
if (arguments->count == -1) {
|
262
|
+
rb_sys_fail("select_internal_with_gvl:epoll_wait");
|
263
|
+
}
|
264
|
+
}
|
265
|
+
|
215
266
|
VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
|
216
267
|
struct Event_Backend_EPoll *data = NULL;
|
217
268
|
TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
|
218
269
|
|
219
|
-
struct
|
270
|
+
struct select_arguments arguments = {
|
271
|
+
.data = data,
|
272
|
+
.timeout = 0
|
273
|
+
};
|
220
274
|
|
221
|
-
|
275
|
+
select_internal_with_gvl(&arguments);
|
222
276
|
|
223
|
-
if (count ==
|
224
|
-
|
277
|
+
if (arguments.count == 0) {
|
278
|
+
arguments.timeout = make_timeout(duration);
|
279
|
+
|
280
|
+
if (arguments.timeout != 0) {
|
281
|
+
select_internal_without_gvl(&arguments);
|
282
|
+
}
|
225
283
|
}
|
226
284
|
|
227
|
-
for (int i = 0; i < count; i += 1) {
|
228
|
-
VALUE fiber = (VALUE)events[i].data.ptr;
|
229
|
-
VALUE result = INT2NUM(events[i].events);
|
285
|
+
for (int i = 0; i < arguments.count; i += 1) {
|
286
|
+
VALUE fiber = (VALUE)arguments.events[i].data.ptr;
|
287
|
+
VALUE result = INT2NUM(arguments.events[i].events);
|
230
288
|
|
231
289
|
// fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
|
232
290
|
|
233
291
|
rb_funcall(fiber, id_transfer, 1, result);
|
234
292
|
}
|
235
293
|
|
236
|
-
return INT2NUM(count);
|
294
|
+
return INT2NUM(arguments.count);
|
237
295
|
}
|
238
296
|
|
239
297
|
void Init_Event_Backend_EPoll(VALUE Event_Backend) {
|
@@ -244,6 +302,7 @@ void Init_Event_Backend_EPoll(VALUE Event_Backend) {
|
|
244
302
|
|
245
303
|
rb_define_alloc_func(Event_Backend_EPoll, Event_Backend_EPoll_allocate);
|
246
304
|
rb_define_method(Event_Backend_EPoll, "initialize", Event_Backend_EPoll_initialize, 1);
|
305
|
+
rb_define_method(Event_Backend_EPoll, "close", Event_Backend_EPoll_close, 0);
|
247
306
|
|
248
307
|
rb_define_method(Event_Backend_EPoll, "io_wait", Event_Backend_EPoll_io_wait, 3);
|
249
308
|
rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
|
data/ext/event/backend/kqueue.c
CHANGED
@@ -28,7 +28,7 @@
|
|
28
28
|
static VALUE Event_Backend_KQueue = Qnil;
|
29
29
|
static ID id_fileno, id_transfer;
|
30
30
|
|
31
|
-
|
31
|
+
enum {KQUEUE_MAX_EVENTS = 64};
|
32
32
|
|
33
33
|
struct Event_Backend_KQueue {
|
34
34
|
VALUE loop;
|
@@ -41,13 +41,19 @@ void Event_Backend_KQueue_Type_mark(void *_data)
|
|
41
41
|
rb_gc_mark(data->loop);
|
42
42
|
}
|
43
43
|
|
44
|
+
static
|
45
|
+
void close_internal(struct Event_Backend_KQueue *data) {
|
46
|
+
if (data->descriptor >= 0) {
|
47
|
+
close(data->descriptor);
|
48
|
+
data->descriptor = -1;
|
49
|
+
}
|
50
|
+
}
|
51
|
+
|
44
52
|
void Event_Backend_KQueue_Type_free(void *_data)
|
45
53
|
{
|
46
54
|
struct Event_Backend_KQueue *data = _data;
|
47
55
|
|
48
|
-
|
49
|
-
close(data->descriptor);
|
50
|
-
}
|
56
|
+
close_internal(data);
|
51
57
|
|
52
58
|
free(data);
|
53
59
|
}
|
@@ -97,50 +103,121 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
|
|
97
103
|
return self;
|
98
104
|
}
|
99
105
|
|
100
|
-
|
101
|
-
|
102
|
-
|
106
|
+
VALUE Event_Backend_KQueue_close(VALUE self) {
|
107
|
+
struct Event_Backend_KQueue *data = NULL;
|
108
|
+
TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
|
103
109
|
|
104
|
-
|
105
|
-
if (events & PRIORITY) filter |= EV_OOBAND;
|
106
|
-
if (events & WRITABLE) filter |= EVFILT_WRITE;
|
110
|
+
close_internal(data);
|
107
111
|
|
108
|
-
return
|
112
|
+
return Qnil;
|
109
113
|
}
|
110
114
|
|
111
|
-
static
|
112
|
-
int
|
113
|
-
int
|
115
|
+
static
|
116
|
+
int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
|
117
|
+
int count = 0;
|
118
|
+
struct kevent kevents[2] = {0};
|
119
|
+
|
120
|
+
if (events & READABLE) {
|
121
|
+
kevents[count].ident = ident;
|
122
|
+
kevents[count].filter = EVFILT_READ;
|
123
|
+
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
|
124
|
+
kevents[count].udata = (void*)fiber;
|
125
|
+
|
126
|
+
// #ifdef EV_OOBAND
|
127
|
+
// if (events & PRIORITY) {
|
128
|
+
// kevents[count].flags |= EV_OOBAND;
|
129
|
+
// }
|
130
|
+
// #endif
|
131
|
+
|
132
|
+
count++;
|
133
|
+
}
|
134
|
+
|
135
|
+
if (events & WRITABLE) {
|
136
|
+
kevents[count].ident = ident;
|
137
|
+
kevents[count].filter = EVFILT_WRITE;
|
138
|
+
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
|
139
|
+
kevents[count].udata = (void*)fiber;
|
140
|
+
count++;
|
141
|
+
}
|
142
|
+
|
143
|
+
int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
|
114
144
|
|
115
|
-
if (
|
116
|
-
|
117
|
-
|
145
|
+
if (result == -1) {
|
146
|
+
rb_sys_fail("kevent(register)");
|
147
|
+
}
|
118
148
|
|
119
|
-
return
|
149
|
+
return events;
|
120
150
|
}
|
121
151
|
|
122
|
-
|
123
|
-
|
124
|
-
|
152
|
+
static
|
153
|
+
void io_remove_filters(int descriptor, int ident, int events) {
|
154
|
+
int count = 0;
|
155
|
+
struct kevent kevents[2] = {0};
|
125
156
|
|
126
|
-
|
157
|
+
if (events & READABLE) {
|
158
|
+
kevents[count].ident = ident;
|
159
|
+
kevents[count].filter = EVFILT_READ;
|
160
|
+
kevents[count].flags = EV_DELETE;
|
161
|
+
|
162
|
+
count++;
|
163
|
+
}
|
127
164
|
|
128
|
-
|
165
|
+
if (events & WRITABLE) {
|
166
|
+
kevents[count].ident = ident;
|
167
|
+
kevents[count].filter = EVFILT_WRITE;
|
168
|
+
kevents[count].flags = EV_DELETE;
|
169
|
+
count++;
|
170
|
+
}
|
129
171
|
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
172
|
+
// Ignore the result.
|
173
|
+
kevent(descriptor, kevents, count, NULL, 0, NULL);
|
174
|
+
}
|
175
|
+
|
176
|
+
struct io_wait_arguments {
|
177
|
+
struct Event_Backend_KQueue *data;
|
178
|
+
int events;
|
179
|
+
int descriptor;
|
180
|
+
};
|
181
|
+
|
182
|
+
static
|
183
|
+
VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
|
184
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
134
185
|
|
135
|
-
|
136
|
-
int result = kevent(data->descriptor, &event, 1, NULL, 0, NULL);
|
186
|
+
io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
|
137
187
|
|
138
|
-
|
139
|
-
|
140
|
-
|
188
|
+
rb_exc_raise(exception);
|
189
|
+
};
|
190
|
+
|
191
|
+
static inline
|
192
|
+
int events_from_kqueue_filter(int filter) {
|
193
|
+
if (filter == EVFILT_READ) return READABLE;
|
194
|
+
if (filter == EVFILT_WRITE) return WRITABLE;
|
195
|
+
|
196
|
+
return 0;
|
197
|
+
}
|
198
|
+
|
199
|
+
static
|
200
|
+
VALUE io_wait_transfer(VALUE _arguments) {
|
201
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
202
|
+
|
203
|
+
VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
|
141
204
|
|
142
|
-
VALUE result = rb_funcall(data->loop, id_transfer, 0);
|
143
205
|
return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
|
206
|
+
};
|
207
|
+
|
208
|
+
VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
209
|
+
struct Event_Backend_KQueue *data = NULL;
|
210
|
+
TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
|
211
|
+
|
212
|
+
int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
|
213
|
+
|
214
|
+
struct io_wait_arguments io_wait_arguments = {
|
215
|
+
.events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
|
216
|
+
.data = data,
|
217
|
+
.descriptor = descriptor,
|
218
|
+
};
|
219
|
+
|
220
|
+
return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
|
144
221
|
}
|
145
222
|
|
146
223
|
static
|
@@ -158,7 +235,7 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
|
|
158
235
|
|
159
236
|
else if (RB_FLOAT_TYPE_P(duration)) {
|
160
237
|
double value = RFLOAT_VALUE(duration);
|
161
|
-
time_t seconds =
|
238
|
+
time_t seconds = value;
|
162
239
|
|
163
240
|
storage->tv_sec = seconds;
|
164
241
|
storage->tv_nsec = (value - seconds) * 1000000000L;
|
@@ -169,26 +246,89 @@ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
|
|
169
246
|
rb_raise(rb_eRuntimeError, "unable to convert timeout");
|
170
247
|
}
|
171
248
|
|
249
|
+
static
|
250
|
+
int timeout_nonblocking(struct timespec * timespec) {
|
251
|
+
return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
|
252
|
+
}
|
253
|
+
|
254
|
+
struct select_arguments {
|
255
|
+
struct Event_Backend_KQueue *data;
|
256
|
+
|
257
|
+
int count;
|
258
|
+
struct kevent events[KQUEUE_MAX_EVENTS];
|
259
|
+
|
260
|
+
struct timespec storage;
|
261
|
+
struct timespec *timeout;
|
262
|
+
};
|
263
|
+
|
264
|
+
static
|
265
|
+
void * select_internal(void *_arguments) {
|
266
|
+
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
267
|
+
|
268
|
+
arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
|
269
|
+
|
270
|
+
return NULL;
|
271
|
+
}
|
272
|
+
|
273
|
+
static
|
274
|
+
void select_internal_without_gvl(struct select_arguments *arguments) {
|
275
|
+
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
276
|
+
|
277
|
+
if (arguments->count == -1) {
|
278
|
+
rb_sys_fail("select_internal_without_gvl:kevent");
|
279
|
+
}
|
280
|
+
}
|
281
|
+
|
282
|
+
static
|
283
|
+
void select_internal_with_gvl(struct select_arguments *arguments) {
|
284
|
+
select_internal((void *)arguments);
|
285
|
+
|
286
|
+
if (arguments->count == -1) {
|
287
|
+
rb_sys_fail("select_internal_with_gvl:kevent");
|
288
|
+
}
|
289
|
+
}
|
290
|
+
|
172
291
|
VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
|
173
292
|
struct Event_Backend_KQueue *data = NULL;
|
174
293
|
TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
|
175
294
|
|
176
|
-
struct
|
177
|
-
|
295
|
+
struct select_arguments arguments = {
|
296
|
+
.data = data,
|
297
|
+
.count = KQUEUE_MAX_EVENTS,
|
298
|
+
.storage = {
|
299
|
+
.tv_sec = 0,
|
300
|
+
.tv_nsec = 0
|
301
|
+
}
|
302
|
+
};
|
178
303
|
|
179
|
-
|
304
|
+
// We break this implementation into two parts.
|
305
|
+
// (1) count = kevent(..., timeout = 0)
|
306
|
+
// (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
|
307
|
+
// This allows us to avoid releasing and reacquiring the GVL.
|
308
|
+
// Non-comprehensive testing shows this gives a 1.5x speedup.
|
309
|
+
arguments.timeout = &arguments.storage;
|
180
310
|
|
181
|
-
|
182
|
-
|
311
|
+
// First do the syscall with no timeout to get any immediately available events:
|
312
|
+
select_internal_with_gvl(&arguments);
|
313
|
+
|
314
|
+
// If there were no pending events, if we have a timeout, wait for more events:
|
315
|
+
if (arguments.count == 0) {
|
316
|
+
arguments.timeout = make_timeout(duration, &arguments.storage);
|
317
|
+
|
318
|
+
if (!timeout_nonblocking(arguments.timeout)) {
|
319
|
+
arguments.count = KQUEUE_MAX_EVENTS;
|
320
|
+
|
321
|
+
select_internal_without_gvl(&arguments);
|
322
|
+
}
|
183
323
|
}
|
184
324
|
|
185
|
-
for (int i = 0; i < count; i += 1) {
|
186
|
-
VALUE fiber = (VALUE)events[i].udata;
|
187
|
-
VALUE result = INT2NUM(events[i].filter);
|
325
|
+
for (int i = 0; i < arguments.count; i += 1) {
|
326
|
+
VALUE fiber = (VALUE)arguments.events[i].udata;
|
327
|
+
VALUE result = INT2NUM(arguments.events[i].filter);
|
188
328
|
rb_funcall(fiber, id_transfer, 1, result);
|
189
329
|
}
|
190
330
|
|
191
|
-
return INT2NUM(count);
|
331
|
+
return INT2NUM(arguments.count);
|
192
332
|
}
|
193
333
|
|
194
334
|
void Init_Event_Backend_KQueue(VALUE Event_Backend) {
|
@@ -199,6 +339,7 @@ void Init_Event_Backend_KQueue(VALUE Event_Backend) {
|
|
199
339
|
|
200
340
|
rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
|
201
341
|
rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
|
342
|
+
rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
|
202
343
|
|
203
344
|
rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
|
204
345
|
rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
|
data/ext/event/backend/uring.c
CHANGED
@@ -28,8 +28,8 @@
|
|
28
28
|
static VALUE Event_Backend_URing = Qnil;
|
29
29
|
static ID id_fileno, id_transfer;
|
30
30
|
|
31
|
-
|
32
|
-
|
31
|
+
enum {URING_ENTRIES = 128};
|
32
|
+
enum {URING_MAX_EVENTS = 128};
|
33
33
|
|
34
34
|
struct Event_Backend_URing {
|
35
35
|
VALUE loop;
|
@@ -42,14 +42,19 @@ void Event_Backend_URing_Type_mark(void *_data)
|
|
42
42
|
rb_gc_mark(data->loop);
|
43
43
|
}
|
44
44
|
|
45
|
-
|
46
|
-
{
|
47
|
-
struct Event_Backend_URing *data = _data;
|
48
|
-
|
45
|
+
static
|
46
|
+
void close_internal(struct Event_Backend_URing *data) {
|
49
47
|
if (data->ring.ring_fd >= 0) {
|
50
48
|
io_uring_queue_exit(&data->ring);
|
51
49
|
data->ring.ring_fd = -1;
|
52
50
|
}
|
51
|
+
}
|
52
|
+
|
53
|
+
void Event_Backend_URing_Type_free(void *_data)
|
54
|
+
{
|
55
|
+
struct Event_Backend_URing *data = _data;
|
56
|
+
|
57
|
+
close_internal(data);
|
53
58
|
|
54
59
|
free(data);
|
55
60
|
}
|
@@ -97,6 +102,15 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
|
|
97
102
|
return self;
|
98
103
|
}
|
99
104
|
|
105
|
+
VALUE Event_Backend_URing_close(VALUE self) {
|
106
|
+
struct Event_Backend_URing *data = NULL;
|
107
|
+
TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
|
108
|
+
|
109
|
+
close_internal(data);
|
110
|
+
|
111
|
+
return Qnil;
|
112
|
+
}
|
113
|
+
|
100
114
|
static inline
|
101
115
|
short poll_flags_from_events(int events) {
|
102
116
|
short flags = 0;
|
@@ -122,28 +136,75 @@ int events_from_poll_flags(short flags) {
|
|
122
136
|
return events;
|
123
137
|
}
|
124
138
|
|
139
|
+
struct io_wait_arguments {
|
140
|
+
struct Event_Backend_URing *data;
|
141
|
+
VALUE fiber;
|
142
|
+
short flags;
|
143
|
+
};
|
144
|
+
|
145
|
+
struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
|
146
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
|
147
|
+
|
148
|
+
while (sqe == NULL) {
|
149
|
+
sqe = io_uring_get_sqe(&data->ring);
|
150
|
+
}
|
151
|
+
|
152
|
+
return sqe;
|
153
|
+
}
|
154
|
+
|
155
|
+
static
|
156
|
+
VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
|
157
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
158
|
+
struct Event_Backend_URing *data = arguments->data;
|
159
|
+
|
160
|
+
struct io_uring_sqe *sqe = io_get_sqe(data);
|
161
|
+
|
162
|
+
// fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
|
163
|
+
|
164
|
+
io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
|
165
|
+
io_uring_submit(&data->ring);
|
166
|
+
|
167
|
+
rb_exc_raise(exception);
|
168
|
+
};
|
169
|
+
|
170
|
+
static
|
171
|
+
VALUE io_wait_transfer(VALUE _arguments) {
|
172
|
+
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
173
|
+
struct Event_Backend_URing *data = arguments->data;
|
174
|
+
|
175
|
+
VALUE result = rb_funcall(data->loop, id_transfer, 0);
|
176
|
+
|
177
|
+
// We explicitly filter the resulting events based on the requested events.
|
178
|
+
// In some cases, poll will report events we didn't ask for.
|
179
|
+
short flags = arguments->flags & NUM2INT(result);
|
180
|
+
|
181
|
+
return INT2NUM(events_from_poll_flags(flags));
|
182
|
+
};
|
183
|
+
|
125
184
|
VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
126
185
|
struct Event_Backend_URing *data = NULL;
|
127
186
|
TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
|
128
187
|
|
129
188
|
int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
|
130
|
-
struct io_uring_sqe *sqe =
|
189
|
+
struct io_uring_sqe *sqe = io_get_sqe(data);
|
190
|
+
|
191
|
+
if (!sqe) return INT2NUM(0);
|
131
192
|
|
132
193
|
short flags = poll_flags_from_events(NUM2INT(events));
|
133
194
|
|
134
|
-
// fprintf(stderr, "poll_add(%p, %d, %d)\n", sqe, descriptor, flags);
|
195
|
+
// fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
|
135
196
|
|
136
197
|
io_uring_prep_poll_add(sqe, descriptor, flags);
|
137
198
|
io_uring_sqe_set_data(sqe, (void*)fiber);
|
138
199
|
io_uring_submit(&data->ring);
|
139
200
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
201
|
+
struct io_wait_arguments io_wait_arguments = {
|
202
|
+
.data = data,
|
203
|
+
.fiber = fiber,
|
204
|
+
.flags = flags
|
205
|
+
};
|
145
206
|
|
146
|
-
return
|
207
|
+
return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
|
147
208
|
}
|
148
209
|
|
149
210
|
inline static
|
@@ -176,7 +237,7 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
|
|
176
237
|
resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
|
177
238
|
|
178
239
|
int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
|
179
|
-
struct io_uring_sqe *sqe =
|
240
|
+
struct io_uring_sqe *sqe = io_get_sqe(data);
|
180
241
|
|
181
242
|
struct iovec iovecs[1];
|
182
243
|
iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
|
@@ -208,7 +269,7 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
|
|
208
269
|
}
|
209
270
|
|
210
271
|
int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
|
211
|
-
struct io_uring_sqe *sqe =
|
272
|
+
struct io_uring_sqe *sqe = io_get_sqe(data);
|
212
273
|
|
213
274
|
struct iovec iovecs[1];
|
214
275
|
iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
|
@@ -244,7 +305,7 @@ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec
|
|
244
305
|
|
245
306
|
else if (RB_FLOAT_TYPE_P(duration)) {
|
246
307
|
double value = RFLOAT_VALUE(duration);
|
247
|
-
time_t seconds =
|
308
|
+
time_t seconds = value;
|
248
309
|
|
249
310
|
storage->tv_sec = seconds;
|
250
311
|
storage->tv_nsec = (value - seconds) * 1000000000L;
|
@@ -255,36 +316,84 @@ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec
|
|
255
316
|
rb_raise(rb_eRuntimeError, "unable to convert timeout");
|
256
317
|
}
|
257
318
|
|
319
|
+
static
|
320
|
+
int timeout_nonblocking(struct __kernel_timespec *timespec) {
|
321
|
+
return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
|
322
|
+
}
|
323
|
+
|
324
|
+
struct select_arguments {
|
325
|
+
struct Event_Backend_URing *data;
|
326
|
+
|
327
|
+
int count;
|
328
|
+
struct io_uring_cqe **cqes;
|
329
|
+
|
330
|
+
struct __kernel_timespec storage;
|
331
|
+
struct __kernel_timespec *timeout;
|
332
|
+
};
|
333
|
+
|
334
|
+
static
|
335
|
+
void * select_internal(void *_arguments) {
|
336
|
+
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
337
|
+
|
338
|
+
arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
|
339
|
+
|
340
|
+
// If waiting resulted in a timeout, there are 0 events.
|
341
|
+
if (arguments->count == -ETIME) {
|
342
|
+
arguments->count = 0;
|
343
|
+
}
|
344
|
+
|
345
|
+
return NULL;
|
346
|
+
}
|
347
|
+
|
348
|
+
static
|
349
|
+
int select_internal_without_gvl(struct select_arguments *arguments) {
|
350
|
+
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
351
|
+
|
352
|
+
if (arguments->count < 0) {
|
353
|
+
rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
|
354
|
+
}
|
355
|
+
|
356
|
+
return arguments->count;
|
357
|
+
}
|
358
|
+
|
258
359
|
VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
|
259
360
|
struct Event_Backend_URing *data = NULL;
|
260
361
|
TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
|
261
362
|
|
262
363
|
struct io_uring_cqe *cqes[URING_MAX_EVENTS];
|
263
|
-
struct __kernel_timespec storage;
|
264
364
|
|
365
|
+
// This is a non-blocking operation:
|
265
366
|
int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
|
266
367
|
|
267
|
-
// fprintf(stderr, "result = %d\n", result);
|
268
|
-
|
269
368
|
if (result < 0) {
|
270
369
|
rb_syserr_fail(-result, strerror(-result));
|
271
|
-
} else if (result == 0
|
272
|
-
|
370
|
+
} else if (result == 0) {
|
371
|
+
// We might need to wait for events:
|
372
|
+
struct select_arguments arguments = {
|
373
|
+
.data = data,
|
374
|
+
.cqes = cqes,
|
375
|
+
.timeout = NULL,
|
376
|
+
};
|
273
377
|
|
274
|
-
|
378
|
+
arguments.timeout = make_timeout(duration, &arguments.storage);
|
275
379
|
|
276
|
-
if (
|
277
|
-
result =
|
278
|
-
} else if (result < 0) {
|
279
|
-
rb_syserr_fail(-result, strerror(-result));
|
380
|
+
if (!timeout_nonblocking(arguments.timeout)) {
|
381
|
+
result = select_internal_without_gvl(&arguments);
|
280
382
|
}
|
281
383
|
}
|
282
384
|
|
385
|
+
// fprintf(stderr, "cqes count=%d\n", result);
|
386
|
+
|
283
387
|
for (int i = 0; i < result; i += 1) {
|
388
|
+
// If the operation was cancelled, or the operation has no user data (fiber):
|
389
|
+
if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
|
390
|
+
continue;
|
391
|
+
}
|
392
|
+
|
284
393
|
VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
|
285
394
|
VALUE result = INT2NUM(cqes[i]->res);
|
286
395
|
|
287
|
-
// fprintf(stderr, "cqes[i]
|
396
|
+
// fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
|
288
397
|
|
289
398
|
io_uring_cqe_seen(&data->ring, cqes[i]);
|
290
399
|
|
@@ -302,6 +411,7 @@ void Init_Event_Backend_URing(VALUE Event_Backend) {
|
|
302
411
|
|
303
412
|
rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
|
304
413
|
rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
|
414
|
+
rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
|
305
415
|
|
306
416
|
rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
|
307
417
|
rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
|
data/ext/event/backend/uring.h
CHANGED
data/ext/event/extconf.rb
CHANGED
data/lib/event.rb
CHANGED
@@ -0,0 +1,49 @@
|
|
1
|
+
# Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
|
2
|
+
#
|
3
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
4
|
+
# of this software and associated documentation files (the "Software"), to deal
|
5
|
+
# in the Software without restriction, including without limitation the rights
|
6
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7
|
+
# copies of the Software, and to permit persons to whom the Software is
|
8
|
+
# furnished to do so, subject to the following conditions:
|
9
|
+
#
|
10
|
+
# The above copyright notice and this permission notice shall be included in
|
11
|
+
# all copies or substantial portions of the Software.
|
12
|
+
#
|
13
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19
|
+
# THE SOFTWARE.
|
20
|
+
|
21
|
+
require_relative 'backend/select'
|
22
|
+
|
23
|
+
module Event
|
24
|
+
module Backend
|
25
|
+
def self.default(env = ENV)
|
26
|
+
if backend = env['EVENT_BACKEND']&.to_sym
|
27
|
+
if Event::Backend.const_defined?(backend)
|
28
|
+
return Event::Backend.const_get(backend)
|
29
|
+
else
|
30
|
+
warn "Could not find EVENT_BACKEND=#{backend}!"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
if self.const_defined?(:URing)
|
35
|
+
return Event::Backend::URing
|
36
|
+
elsif self.const_defined?(:KQueue)
|
37
|
+
return Event::Backend::KQueue
|
38
|
+
elsif self.const_defined?(:EPoll)
|
39
|
+
return Event::Backend::EPoll
|
40
|
+
else
|
41
|
+
return Event::Backend::Select
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.new(...)
|
46
|
+
default.new(...)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
data/lib/event/backend/select.rb
CHANGED
@@ -28,16 +28,29 @@ module Event
|
|
28
28
|
@writable = {}
|
29
29
|
end
|
30
30
|
|
31
|
+
def close
|
32
|
+
@loop = nil
|
33
|
+
@readable = nil
|
34
|
+
@writable = nil
|
35
|
+
end
|
36
|
+
|
31
37
|
def io_wait(fiber, io, events)
|
38
|
+
remove_readable = remove_writable = false
|
39
|
+
|
32
40
|
if (events & READABLE) > 0 or (events & PRIORITY) > 0
|
33
41
|
@readable[io] = fiber
|
42
|
+
remove_readable = true
|
34
43
|
end
|
35
44
|
|
36
45
|
if (events & WRITABLE) > 0
|
37
46
|
@writable[io] = fiber
|
47
|
+
remove_writable = true
|
38
48
|
end
|
39
49
|
|
40
50
|
@loop.transfer
|
51
|
+
ensure
|
52
|
+
@readable.delete(io) if remove_readable
|
53
|
+
@writable.delete(io) if remove_writable
|
41
54
|
end
|
42
55
|
|
43
56
|
def select(duration = nil)
|
data/lib/event/debug/selector.rb
CHANGED
@@ -31,6 +31,15 @@ module Event
|
|
31
31
|
@priority = {}
|
32
32
|
end
|
33
33
|
|
34
|
+
def close
|
35
|
+
if @selector.nil?
|
36
|
+
raise "Selector already closed!"
|
37
|
+
end
|
38
|
+
|
39
|
+
@selector.close
|
40
|
+
@selector = nil
|
41
|
+
end
|
42
|
+
|
34
43
|
def io_wait(fiber, io, events)
|
35
44
|
register_readable(fiber, io, events)
|
36
45
|
end
|
data/lib/event/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: event
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.4.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Samuel Williams
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2021-
|
11
|
+
date: 2021-05-07 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bake
|
@@ -73,7 +73,6 @@ extensions:
|
|
73
73
|
- ext/event/extconf.rb
|
74
74
|
extra_rdoc_files: []
|
75
75
|
files:
|
76
|
-
- ext/event/Makefile
|
77
76
|
- ext/event/backend/backend.h
|
78
77
|
- ext/event/backend/epoll.c
|
79
78
|
- ext/event/backend/epoll.h
|
@@ -85,6 +84,7 @@ files:
|
|
85
84
|
- ext/event/event.h
|
86
85
|
- ext/event/extconf.rb
|
87
86
|
- lib/event.rb
|
87
|
+
- lib/event/backend.rb
|
88
88
|
- lib/event/backend/select.rb
|
89
89
|
- lib/event/debug/selector.rb
|
90
90
|
- lib/event/selector.rb
|
@@ -108,7 +108,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
108
108
|
- !ruby/object:Gem::Version
|
109
109
|
version: '0'
|
110
110
|
requirements: []
|
111
|
-
rubygems_version: 3.2
|
111
|
+
rubygems_version: 3.1.2
|
112
112
|
signing_key:
|
113
113
|
specification_version: 4
|
114
114
|
summary: An event loop.
|
data/ext/event/Makefile
DELETED
@@ -1,266 +0,0 @@
|
|
1
|
-
|
2
|
-
SHELL = /bin/sh
|
3
|
-
|
4
|
-
# V=0 quiet, V=1 verbose. other values don't work.
|
5
|
-
V = 0
|
6
|
-
Q1 = $(V:1=)
|
7
|
-
Q = $(Q1:0=@)
|
8
|
-
ECHO1 = $(V:1=@ :)
|
9
|
-
ECHO = $(ECHO1:0=@ echo)
|
10
|
-
NULLCMD = :
|
11
|
-
|
12
|
-
#### Start of system configuration section. ####
|
13
|
-
|
14
|
-
srcdir = .
|
15
|
-
topdir = /home/samuel/.rubies/ruby-3.0.0/include/ruby-3.0.0
|
16
|
-
hdrdir = $(topdir)
|
17
|
-
arch_hdrdir = /home/samuel/.rubies/ruby-3.0.0/include/ruby-3.0.0/x86_64-linux
|
18
|
-
PATH_SEPARATOR = :
|
19
|
-
VPATH = $(srcdir):$(arch_hdrdir)/ruby:$(hdrdir)/ruby:$(srcdir)/backend
|
20
|
-
prefix = $(DESTDIR)/home/samuel/.rubies/ruby-3.0.0
|
21
|
-
rubysitearchprefix = $(rubylibprefix)/$(sitearch)
|
22
|
-
rubyarchprefix = $(rubylibprefix)/$(arch)
|
23
|
-
rubylibprefix = $(libdir)/$(RUBY_BASE_NAME)
|
24
|
-
exec_prefix = $(prefix)
|
25
|
-
vendorarchhdrdir = $(vendorhdrdir)/$(sitearch)
|
26
|
-
sitearchhdrdir = $(sitehdrdir)/$(sitearch)
|
27
|
-
rubyarchhdrdir = $(rubyhdrdir)/$(arch)
|
28
|
-
vendorhdrdir = $(rubyhdrdir)/vendor_ruby
|
29
|
-
sitehdrdir = $(rubyhdrdir)/site_ruby
|
30
|
-
rubyhdrdir = $(includedir)/$(RUBY_VERSION_NAME)
|
31
|
-
vendorarchdir = $(vendorlibdir)/$(sitearch)
|
32
|
-
vendorlibdir = $(vendordir)/$(ruby_version)
|
33
|
-
vendordir = $(rubylibprefix)/vendor_ruby
|
34
|
-
sitearchdir = $(sitelibdir)/$(sitearch)
|
35
|
-
sitelibdir = $(sitedir)/$(ruby_version)
|
36
|
-
sitedir = $(rubylibprefix)/site_ruby
|
37
|
-
rubyarchdir = $(rubylibdir)/$(arch)
|
38
|
-
rubylibdir = $(rubylibprefix)/$(ruby_version)
|
39
|
-
sitearchincludedir = $(includedir)/$(sitearch)
|
40
|
-
archincludedir = $(includedir)/$(arch)
|
41
|
-
sitearchlibdir = $(libdir)/$(sitearch)
|
42
|
-
archlibdir = $(libdir)/$(arch)
|
43
|
-
ridir = $(datarootdir)/$(RI_BASE_NAME)
|
44
|
-
mandir = $(datarootdir)/man
|
45
|
-
localedir = $(datarootdir)/locale
|
46
|
-
libdir = $(exec_prefix)/lib
|
47
|
-
psdir = $(docdir)
|
48
|
-
pdfdir = $(docdir)
|
49
|
-
dvidir = $(docdir)
|
50
|
-
htmldir = $(docdir)
|
51
|
-
infodir = $(datarootdir)/info
|
52
|
-
docdir = $(datarootdir)/doc/$(PACKAGE)
|
53
|
-
oldincludedir = $(DESTDIR)/usr/include
|
54
|
-
includedir = $(prefix)/include
|
55
|
-
runstatedir = $(localstatedir)/run
|
56
|
-
localstatedir = $(prefix)/var
|
57
|
-
sharedstatedir = $(prefix)/com
|
58
|
-
sysconfdir = $(prefix)/etc
|
59
|
-
datadir = $(datarootdir)
|
60
|
-
datarootdir = $(prefix)/share
|
61
|
-
libexecdir = $(exec_prefix)/libexec
|
62
|
-
sbindir = $(exec_prefix)/sbin
|
63
|
-
bindir = $(exec_prefix)/bin
|
64
|
-
archdir = $(rubyarchdir)
|
65
|
-
|
66
|
-
|
67
|
-
CC_WRAPPER =
|
68
|
-
CC = gcc
|
69
|
-
CXX = g++
|
70
|
-
LIBRUBY = $(LIBRUBY_A)
|
71
|
-
LIBRUBY_A = lib$(RUBY_SO_NAME)-static.a
|
72
|
-
LIBRUBYARG_SHARED = -Wl,-rpath,$(libdir) -L$(libdir)
|
73
|
-
LIBRUBYARG_STATIC = -Wl,-rpath,$(libdir) -L$(libdir) -l$(RUBY_SO_NAME)-static $(MAINLIBS)
|
74
|
-
empty =
|
75
|
-
OUTFLAG = -o $(empty)
|
76
|
-
COUTFLAG = -o $(empty)
|
77
|
-
CSRCFLAG = $(empty)
|
78
|
-
|
79
|
-
RUBY_EXTCONF_H = extconf.h
|
80
|
-
cflags = $(optflags) $(debugflags) $(warnflags)
|
81
|
-
cxxflags =
|
82
|
-
optflags = -O3
|
83
|
-
debugflags = -ggdb3
|
84
|
-
warnflags = -Wall -Wextra -Wdeprecated-declarations -Wduplicated-cond -Wimplicit-function-declaration -Wimplicit-int -Wmisleading-indentation -Wpointer-arith -Wwrite-strings -Wimplicit-fallthrough=0 -Wmissing-noreturn -Wno-cast-function-type -Wno-constant-logical-operand -Wno-long-long -Wno-missing-field-initializers -Wno-overlength-strings -Wno-packed-bitfield-compat -Wno-parentheses-equality -Wno-self-assign -Wno-tautological-compare -Wno-unused-parameter -Wno-unused-value -Wsuggest-attribute=format -Wsuggest-attribute=noreturn -Wunused-variable
|
85
|
-
cppflags =
|
86
|
-
CCDLFLAGS = -fPIC
|
87
|
-
CFLAGS = $(CCDLFLAGS) $(cflags) $(ARCH_FLAG)
|
88
|
-
INCFLAGS = -I. -I$(arch_hdrdir) -I$(hdrdir)/ruby/backward -I$(hdrdir) -I$(srcdir)
|
89
|
-
DEFS =
|
90
|
-
CPPFLAGS = -DRUBY_EXTCONF_H=\"$(RUBY_EXTCONF_H)\" $(DEFS) $(cppflags)
|
91
|
-
CXXFLAGS = $(CCDLFLAGS) $(ARCH_FLAG)
|
92
|
-
ldflags = -L. -fstack-protector-strong -rdynamic -Wl,-export-dynamic
|
93
|
-
dldflags = -Wl,--compress-debug-sections=zlib
|
94
|
-
ARCH_FLAG =
|
95
|
-
DLDFLAGS = $(ldflags) $(dldflags) $(ARCH_FLAG)
|
96
|
-
LDSHARED = $(CC) -shared
|
97
|
-
LDSHAREDXX = $(CXX) -shared
|
98
|
-
AR = gcc-ar
|
99
|
-
EXEEXT =
|
100
|
-
|
101
|
-
RUBY_INSTALL_NAME = $(RUBY_BASE_NAME)
|
102
|
-
RUBY_SO_NAME = ruby
|
103
|
-
RUBYW_INSTALL_NAME =
|
104
|
-
RUBY_VERSION_NAME = $(RUBY_BASE_NAME)-$(ruby_version)
|
105
|
-
RUBYW_BASE_NAME = rubyw
|
106
|
-
RUBY_BASE_NAME = ruby
|
107
|
-
|
108
|
-
arch = x86_64-linux
|
109
|
-
sitearch = $(arch)
|
110
|
-
ruby_version = 3.0.0
|
111
|
-
ruby = $(bindir)/$(RUBY_BASE_NAME)
|
112
|
-
RUBY = $(ruby)
|
113
|
-
ruby_headers = $(hdrdir)/ruby.h $(hdrdir)/ruby/backward.h $(hdrdir)/ruby/ruby.h $(hdrdir)/ruby/defines.h $(hdrdir)/ruby/missing.h $(hdrdir)/ruby/intern.h $(hdrdir)/ruby/st.h $(hdrdir)/ruby/subst.h $(arch_hdrdir)/ruby/config.h $(RUBY_EXTCONF_H)
|
114
|
-
|
115
|
-
RM = rm -f
|
116
|
-
RM_RF = $(RUBY) -run -e rm -- -rf
|
117
|
-
RMDIRS = rmdir --ignore-fail-on-non-empty -p
|
118
|
-
MAKEDIRS = /usr/bin/mkdir -p
|
119
|
-
INSTALL = /usr/bin/install -c
|
120
|
-
INSTALL_PROG = $(INSTALL) -m 0755
|
121
|
-
INSTALL_DATA = $(INSTALL) -m 644
|
122
|
-
COPY = cp
|
123
|
-
TOUCH = exit >
|
124
|
-
|
125
|
-
#### End of system configuration section. ####
|
126
|
-
|
127
|
-
preload =
|
128
|
-
libpath = . $(libdir)
|
129
|
-
LIBPATH = -L. -L$(libdir) -Wl,-rpath,$(libdir)
|
130
|
-
DEFFILE =
|
131
|
-
|
132
|
-
CLEANFILES = mkmf.log
|
133
|
-
DISTCLEANFILES =
|
134
|
-
DISTCLEANDIRS =
|
135
|
-
|
136
|
-
extout =
|
137
|
-
extout_prefix =
|
138
|
-
target_prefix = /event
|
139
|
-
LOCAL_LIBS =
|
140
|
-
LIBS = -luring -lm -lc
|
141
|
-
ORIG_SRCS = event.c
|
142
|
-
SRCS = $(ORIG_SRCS) event.c uring.c epoll.c
|
143
|
-
OBJS = event.o uring.o epoll.o
|
144
|
-
HDRS = $(srcdir)/event.h $(srcdir)/extconf.h
|
145
|
-
LOCAL_HDRS =
|
146
|
-
TARGET = event
|
147
|
-
TARGET_NAME = event
|
148
|
-
TARGET_ENTRY = Init_$(TARGET_NAME)
|
149
|
-
DLLIB = $(TARGET).so
|
150
|
-
EXTSTATIC =
|
151
|
-
STATIC_LIB =
|
152
|
-
|
153
|
-
TIMESTAMP_DIR = .
|
154
|
-
BINDIR = $(bindir)
|
155
|
-
RUBYCOMMONDIR = $(sitedir)$(target_prefix)
|
156
|
-
RUBYLIBDIR = $(sitelibdir)$(target_prefix)
|
157
|
-
RUBYARCHDIR = $(sitearchdir)$(target_prefix)
|
158
|
-
HDRDIR = $(rubyhdrdir)/ruby$(target_prefix)
|
159
|
-
ARCHHDRDIR = $(rubyhdrdir)/$(arch)/ruby$(target_prefix)
|
160
|
-
TARGET_SO_DIR =
|
161
|
-
TARGET_SO = $(TARGET_SO_DIR)$(DLLIB)
|
162
|
-
CLEANLIBS = $(TARGET_SO)
|
163
|
-
CLEANOBJS = *.o *.bak
|
164
|
-
|
165
|
-
all: $(DLLIB)
|
166
|
-
static: $(STATIC_LIB)
|
167
|
-
.PHONY: all install static install-so install-rb
|
168
|
-
.PHONY: clean clean-so clean-static clean-rb
|
169
|
-
|
170
|
-
clean-static::
|
171
|
-
clean-rb-default::
|
172
|
-
clean-rb::
|
173
|
-
clean-so::
|
174
|
-
clean: clean-so clean-static clean-rb-default clean-rb
|
175
|
-
-$(Q)$(RM) $(CLEANLIBS) $(CLEANOBJS) $(CLEANFILES) .*.time
|
176
|
-
|
177
|
-
distclean-rb-default::
|
178
|
-
distclean-rb::
|
179
|
-
distclean-so::
|
180
|
-
distclean-static::
|
181
|
-
distclean: clean distclean-so distclean-static distclean-rb-default distclean-rb
|
182
|
-
-$(Q)$(RM) Makefile $(RUBY_EXTCONF_H) conftest.* mkmf.log
|
183
|
-
-$(Q)$(RM) core ruby$(EXEEXT) *~ $(DISTCLEANFILES)
|
184
|
-
-$(Q)$(RMDIRS) $(DISTCLEANDIRS) 2> /dev/null || true
|
185
|
-
|
186
|
-
realclean: distclean
|
187
|
-
install: install-so install-rb
|
188
|
-
|
189
|
-
install-so: $(DLLIB) $(TIMESTAMP_DIR)/.sitearchdir.-.event.time
|
190
|
-
$(INSTALL_PROG) $(DLLIB) $(RUBYARCHDIR)
|
191
|
-
clean-static::
|
192
|
-
-$(Q)$(RM) $(STATIC_LIB)
|
193
|
-
install-rb: pre-install-rb do-install-rb install-rb-default
|
194
|
-
install-rb-default: pre-install-rb-default do-install-rb-default
|
195
|
-
pre-install-rb: Makefile
|
196
|
-
pre-install-rb-default: Makefile
|
197
|
-
do-install-rb:
|
198
|
-
do-install-rb-default:
|
199
|
-
pre-install-rb-default:
|
200
|
-
@$(NULLCMD)
|
201
|
-
$(TIMESTAMP_DIR)/.sitearchdir.-.event.time:
|
202
|
-
$(Q) $(MAKEDIRS) $(@D) $(RUBYARCHDIR)
|
203
|
-
$(Q) $(TOUCH) $@
|
204
|
-
|
205
|
-
site-install: site-install-so site-install-rb
|
206
|
-
site-install-so: install-so
|
207
|
-
site-install-rb: install-rb
|
208
|
-
|
209
|
-
.SUFFIXES: .c .m .cc .mm .cxx .cpp .o .S
|
210
|
-
|
211
|
-
.cc.o:
|
212
|
-
$(ECHO) compiling $(<)
|
213
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
214
|
-
|
215
|
-
.cc.S:
|
216
|
-
$(ECHO) translating $(<)
|
217
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
218
|
-
|
219
|
-
.mm.o:
|
220
|
-
$(ECHO) compiling $(<)
|
221
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
222
|
-
|
223
|
-
.mm.S:
|
224
|
-
$(ECHO) translating $(<)
|
225
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
226
|
-
|
227
|
-
.cxx.o:
|
228
|
-
$(ECHO) compiling $(<)
|
229
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
230
|
-
|
231
|
-
.cxx.S:
|
232
|
-
$(ECHO) translating $(<)
|
233
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
234
|
-
|
235
|
-
.cpp.o:
|
236
|
-
$(ECHO) compiling $(<)
|
237
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
238
|
-
|
239
|
-
.cpp.S:
|
240
|
-
$(ECHO) translating $(<)
|
241
|
-
$(Q) $(CXX) $(INCFLAGS) $(CPPFLAGS) $(CXXFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
242
|
-
|
243
|
-
.c.o:
|
244
|
-
$(ECHO) compiling $(<)
|
245
|
-
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
246
|
-
|
247
|
-
.c.S:
|
248
|
-
$(ECHO) translating $(<)
|
249
|
-
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
250
|
-
|
251
|
-
.m.o:
|
252
|
-
$(ECHO) compiling $(<)
|
253
|
-
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -c $(CSRCFLAG)$<
|
254
|
-
|
255
|
-
.m.S:
|
256
|
-
$(ECHO) translating $(<)
|
257
|
-
$(Q) $(CC) $(INCFLAGS) $(CPPFLAGS) $(CFLAGS) $(COUTFLAG)$@ -S $(CSRCFLAG)$<
|
258
|
-
|
259
|
-
$(TARGET_SO): $(OBJS) Makefile
|
260
|
-
$(ECHO) linking shared-object event/$(DLLIB)
|
261
|
-
-$(Q)$(RM) $(@)
|
262
|
-
$(Q) $(LDSHARED) -o $@ $(OBJS) $(LIBPATH) $(DLDFLAGS) $(LOCAL_LIBS) $(LIBS)
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
$(OBJS): $(HDRS) $(ruby_headers)
|