event 0.7.0 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -22,6 +22,6 @@
22
22
 
23
23
  #include <ruby.h>
24
24
 
25
- #define EVENT_BACKEND_KQUEUE
25
+ #define EVENT_SELECTOR_KQUEUE
26
26
 
27
- void Init_Event_Backend_KQueue(VALUE Event_Backend);
27
+ void Init_Event_Selector_KQueue(VALUE Event_Selector);
File without changes
@@ -0,0 +1,263 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "selector.h"
22
+ #include <fcntl.h>
23
+
24
+ static ID id_transfer, id_alive_p;
25
+
26
+ #ifndef HAVE__RB_FIBER_TRANSFER
27
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
28
+ return rb_funcallv(fiber, id_transfer, argc, argv);
29
+ }
30
+ #endif
31
+
32
+ #ifndef HAVE__RB_FIBER_RAISE
33
+ static ID id_raise;
34
+
35
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv) {
36
+ return rb_funcallv(fiber, id_raise, argc, argv);
37
+ }
38
+ #endif
39
+
40
+ #ifndef HAVE_RB_IO_DESCRIPTOR
41
+ static ID id_fileno;
42
+
43
+ int Event_Selector_io_descriptor(VALUE io) {
44
+ return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
45
+ }
46
+ #endif
47
+
48
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
49
+ static ID id_wait;
50
+ static VALUE rb_Process_Status = Qnil;
51
+
52
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid)
53
+ {
54
+ return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
55
+ }
56
+ #endif
57
+
58
+ int Event_Selector_nonblock_set(int file_descriptor)
59
+ {
60
+ int flags = fcntl(file_descriptor, F_GETFL, 0);
61
+
62
+ if (!(flags & O_NONBLOCK)) {
63
+ fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
64
+ }
65
+
66
+ return flags;
67
+ }
68
+
69
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags)
70
+ {
71
+ if (!(flags & O_NONBLOCK)) {
72
+ fcntl(file_descriptor, F_SETFL, flags & ~flags);
73
+ }
74
+ }
75
+
76
+ void Init_Event_Selector(VALUE Event_Selector) {
77
+ id_transfer = rb_intern("transfer");
78
+ id_alive_p = rb_intern("alive?");
79
+
80
+ #ifndef HAVE__RB_FIBER_RAISE
81
+ id_raise = rb_intern("raise");
82
+ #endif
83
+
84
+ #ifndef HAVE_RB_IO_DESCRIPTOR
85
+ id_fileno = rb_intern("fileno");
86
+ #endif
87
+
88
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
89
+ id_wait = rb_intern("wait");
90
+ rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
91
+ #endif
92
+ }
93
+
94
+ struct wait_and_transfer_arguments {
95
+ int argc;
96
+ VALUE *argv;
97
+
98
+ struct Event_Selector *backend;
99
+ struct Event_Selector_Queue *waiting;
100
+ };
101
+
102
+ static void queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
103
+ if (waiting->behind) {
104
+ waiting->behind->infront = waiting->infront;
105
+ } else {
106
+ backend->waiting = waiting->infront;
107
+ }
108
+
109
+ if (waiting->infront) {
110
+ waiting->infront->behind = waiting->behind;
111
+ } else {
112
+ backend->ready = waiting->behind;
113
+ }
114
+ }
115
+
116
+ static void queue_push(struct Event_Selector *backend, struct Event_Selector_Queue *waiting) {
117
+ if (backend->waiting) {
118
+ backend->waiting->behind = waiting;
119
+ waiting->infront = backend->waiting;
120
+ } else {
121
+ backend->ready = waiting;
122
+ }
123
+
124
+ backend->waiting = waiting;
125
+ }
126
+
127
+ static VALUE wait_and_transfer(VALUE _arguments) {
128
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
129
+
130
+ VALUE fiber = arguments->argv[0];
131
+ int argc = arguments->argc - 1;
132
+ VALUE *argv = arguments->argv + 1;
133
+
134
+ return Event_Selector_fiber_transfer(fiber, argc, argv);
135
+ }
136
+
137
+ static VALUE wait_and_transfer_ensure(VALUE _arguments) {
138
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
139
+
140
+ queue_pop(arguments->backend, arguments->waiting);
141
+
142
+ return Qnil;
143
+ }
144
+
145
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv)
146
+ {
147
+ rb_check_arity(argc, 1, UNLIMITED_ARGUMENTS);
148
+
149
+ struct Event_Selector_Queue waiting = {
150
+ .behind = NULL,
151
+ .infront = NULL,
152
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
153
+ .fiber = rb_fiber_current()
154
+ };
155
+
156
+ queue_push(backend, &waiting);
157
+
158
+ struct wait_and_transfer_arguments arguments = {
159
+ .argc = argc,
160
+ .argv = argv,
161
+ .backend = backend,
162
+ .waiting = &waiting,
163
+ };
164
+
165
+ return rb_ensure(wait_and_transfer, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
166
+ }
167
+
168
+ static VALUE wait_and_raise(VALUE _arguments) {
169
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
170
+
171
+ VALUE fiber = arguments->argv[0];
172
+ int argc = arguments->argc - 1;
173
+ VALUE *argv = arguments->argv + 1;
174
+
175
+ return Event_Selector_fiber_raise(fiber, argc, argv);
176
+ }
177
+
178
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv)
179
+ {
180
+ rb_check_arity(argc, 2, UNLIMITED_ARGUMENTS);
181
+
182
+ struct Event_Selector_Queue waiting = {
183
+ .behind = NULL,
184
+ .infront = NULL,
185
+ .flags = EVENT_SELECTOR_QUEUE_FIBER,
186
+ .fiber = rb_fiber_current()
187
+ };
188
+
189
+ queue_push(backend, &waiting);
190
+
191
+ struct wait_and_transfer_arguments arguments = {
192
+ .argc = argc,
193
+ .argv = argv,
194
+ .backend = backend,
195
+ .waiting = &waiting,
196
+ };
197
+
198
+ return rb_ensure(wait_and_raise, (VALUE)&arguments, wait_and_transfer_ensure, (VALUE)&arguments);
199
+ }
200
+
201
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber)
202
+ {
203
+ struct Event_Selector_Queue *waiting = malloc(sizeof(struct Event_Selector_Queue));
204
+
205
+ waiting->behind = NULL;
206
+ waiting->infront = NULL;
207
+ waiting->flags = EVENT_SELECTOR_QUEUE_INTERNAL;
208
+ waiting->fiber = fiber;
209
+
210
+ queue_push(backend, waiting);
211
+ }
212
+
213
+ static inline
214
+ void Event_Selector_queue_pop(struct Event_Selector *backend, struct Event_Selector_Queue *ready)
215
+ {
216
+ if (ready->flags & EVENT_SELECTOR_QUEUE_FIBER) {
217
+ Event_Selector_fiber_transfer(ready->fiber, 0, NULL);
218
+ } else {
219
+ VALUE fiber = ready->fiber;
220
+ queue_pop(backend, ready);
221
+ free(ready);
222
+
223
+ if (RTEST(rb_funcall(fiber, id_alive_p, 0))) {
224
+ rb_funcall(fiber, id_transfer, 0);
225
+ }
226
+ }
227
+ }
228
+
229
+ int Event_Selector_queue_flush(struct Event_Selector *backend)
230
+ {
231
+ int count = 0;
232
+
233
+ // Get the current tail and head of the queue:
234
+ struct Event_Selector_Queue *waiting = backend->waiting;
235
+
236
+ // Process from head to tail in order:
237
+ // During this, more items may be appended to tail.
238
+ while (backend->ready) {
239
+ struct Event_Selector_Queue *ready = backend->ready;
240
+
241
+ count += 1;
242
+ Event_Selector_queue_pop(backend, ready);
243
+
244
+ if (ready == waiting) break;
245
+ }
246
+
247
+ return count;
248
+ }
249
+
250
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
251
+ {
252
+ if ((stop->tv_nsec - start->tv_nsec) < 0) {
253
+ duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
254
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
255
+ } else {
256
+ duration->tv_sec = stop->tv_sec - start->tv_sec;
257
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
258
+ }
259
+ }
260
+
261
+ void Event_Selector_current_time(struct timespec *time) {
262
+ clock_gettime(CLOCK_MONOTONIC, time);
263
+ }
@@ -0,0 +1,127 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <ruby.h>
22
+ #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
30
+
31
+ enum Event {
32
+ EVENT_READABLE = 1,
33
+ EVENT_PRIORITY = 2,
34
+ EVENT_WRITABLE = 4,
35
+ EVENT_ERROR = 8,
36
+ EVENT_HANGUP = 16
37
+ };
38
+
39
+ void Init_Event_Selector();
40
+
41
+ #ifdef HAVE__RB_FIBER_TRANSFER
42
+ #define Event_Selector_fiber_transfer(fiber, argc, argv) rb_fiber_transfer(fiber, argc, argv)
43
+ #else
44
+ VALUE Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv);
45
+ #endif
46
+
47
+ #ifdef HAVE__RB_FIBER_RAISE
48
+ #define Event_Selector_fiber_raise(fiber, argc, argv) rb_fiber_raise(fiber, argc, argv)
49
+ #else
50
+ VALUE Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
51
+ #endif
52
+
53
+ #ifdef HAVE_RB_IO_DESCRIPTOR
54
+ #define Event_Selector_io_descriptor(io) rb_io_descriptor(io)
55
+ #else
56
+ int Event_Selector_io_descriptor(VALUE io);
57
+ #endif
58
+
59
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
60
+ #define Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
61
+ #else
62
+ VALUE Event_Selector_process_status_wait(rb_pid_t pid);
63
+ #endif
64
+
65
+ int Event_Selector_nonblock_set(int file_descriptor);
66
+ void Event_Selector_nonblock_restore(int file_descriptor, int flags);
67
+
68
+ enum Event_Selector_Queue_Flags {
69
+ EVENT_SELECTOR_QUEUE_FIBER = 1,
70
+ EVENT_SELECTOR_QUEUE_INTERNAL = 2,
71
+ };
72
+
73
+ struct Event_Selector_Queue {
74
+ struct Event_Selector_Queue *behind;
75
+ struct Event_Selector_Queue *infront;
76
+
77
+ enum Event_Selector_Queue_Flags flags;
78
+
79
+ VALUE fiber;
80
+ };
81
+
82
+ struct Event_Selector {
83
+ VALUE loop;
84
+
85
+ struct Event_Selector_Queue *free;
86
+
87
+ // Append to waiting.
88
+ struct Event_Selector_Queue *waiting;
89
+ // Process from ready.
90
+ struct Event_Selector_Queue *ready;
91
+ };
92
+
93
+ static inline
94
+ void Event_Selector_initialize(struct Event_Selector *backend, VALUE loop) {
95
+ backend->loop = loop;
96
+ backend->waiting = NULL;
97
+ backend->ready = NULL;
98
+ }
99
+
100
+ static inline
101
+ void Event_Selector_mark(struct Event_Selector *backend) {
102
+ rb_gc_mark(backend->loop);
103
+
104
+ struct Event_Selector_Queue *ready = backend->ready;
105
+ while (ready) {
106
+ rb_gc_mark(ready->fiber);
107
+ ready = ready->behind;
108
+ }
109
+ }
110
+
111
+ VALUE Event_Selector_wait_and_transfer(struct Event_Selector *backend, int argc, VALUE *argv);
112
+ VALUE Event_Selector_wait_and_raise(struct Event_Selector *backend, int argc, VALUE *argv);
113
+
114
+ static inline
115
+ VALUE Event_Selector_yield(struct Event_Selector *backend)
116
+ {
117
+ return Event_Selector_wait_and_transfer(backend, 1, &backend->loop);
118
+ }
119
+
120
+ void Event_Selector_queue_push(struct Event_Selector *backend, VALUE fiber);
121
+ int Event_Selector_queue_flush(struct Event_Selector *backend);
122
+
123
+ void Event_Selector_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
124
+ void Event_Selector_current_time(struct timespec *time);
125
+
126
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
127
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -19,7 +19,7 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "uring.h"
22
- #include "backend.h"
22
+ #include "selector.h"
23
23
 
24
24
  #include <liburing.h>
25
25
  #include <poll.h>
@@ -32,60 +32,60 @@ static const int DEBUG = 0;
32
32
  // This option controls whether to all `io_uring_submit()` after every operation:
33
33
  static const int EARLY_SUBMIT = 1;
34
34
 
35
- static VALUE Event_Backend_URing = Qnil;
35
+ static VALUE Event_Selector_URing = Qnil;
36
36
 
37
37
  enum {URING_ENTRIES = 64};
38
38
 
39
- struct Event_Backend_URing {
40
- struct Event_Backend backend;
39
+ struct Event_Selector_URing {
40
+ struct Event_Selector backend;
41
41
  struct io_uring ring;
42
42
  size_t pending;
43
43
  };
44
44
 
45
- void Event_Backend_URing_Type_mark(void *_data)
45
+ void Event_Selector_URing_Type_mark(void *_data)
46
46
  {
47
- struct Event_Backend_URing *data = _data;
48
- Event_Backend_mark(&data->backend);
47
+ struct Event_Selector_URing *data = _data;
48
+ Event_Selector_mark(&data->backend);
49
49
  }
50
50
 
51
51
  static
52
- void close_internal(struct Event_Backend_URing *data) {
52
+ void close_internal(struct Event_Selector_URing *data) {
53
53
  if (data->ring.ring_fd >= 0) {
54
54
  io_uring_queue_exit(&data->ring);
55
55
  data->ring.ring_fd = -1;
56
56
  }
57
57
  }
58
58
 
59
- void Event_Backend_URing_Type_free(void *_data)
59
+ void Event_Selector_URing_Type_free(void *_data)
60
60
  {
61
- struct Event_Backend_URing *data = _data;
61
+ struct Event_Selector_URing *data = _data;
62
62
 
63
63
  close_internal(data);
64
64
 
65
65
  free(data);
66
66
  }
67
67
 
68
- size_t Event_Backend_URing_Type_size(const void *data)
68
+ size_t Event_Selector_URing_Type_size(const void *data)
69
69
  {
70
- return sizeof(struct Event_Backend_URing);
70
+ return sizeof(struct Event_Selector_URing);
71
71
  }
72
72
 
73
- static const rb_data_type_t Event_Backend_URing_Type = {
73
+ static const rb_data_type_t Event_Selector_URing_Type = {
74
74
  .wrap_struct_name = "Event::Backend::URing",
75
75
  .function = {
76
- .dmark = Event_Backend_URing_Type_mark,
77
- .dfree = Event_Backend_URing_Type_free,
78
- .dsize = Event_Backend_URing_Type_size,
76
+ .dmark = Event_Selector_URing_Type_mark,
77
+ .dfree = Event_Selector_URing_Type_free,
78
+ .dsize = Event_Selector_URing_Type_size,
79
79
  },
80
80
  .data = NULL,
81
81
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
82
82
  };
83
83
 
84
- VALUE Event_Backend_URing_allocate(VALUE self) {
85
- struct Event_Backend_URing *data = NULL;
86
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
84
+ VALUE Event_Selector_URing_allocate(VALUE self) {
85
+ struct Event_Selector_URing *data = NULL;
86
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
87
87
 
88
- Event_Backend_initialize(&data->backend, Qnil);
88
+ Event_Selector_initialize(&data->backend, Qnil);
89
89
  data->ring.ring_fd = -1;
90
90
 
91
91
  data->pending = 0;
@@ -93,11 +93,11 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
93
93
  return instance;
94
94
  }
95
95
 
96
- VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
97
- struct Event_Backend_URing *data = NULL;
98
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
96
+ VALUE Event_Selector_URing_initialize(VALUE self, VALUE loop) {
97
+ struct Event_Selector_URing *data = NULL;
98
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
99
99
 
100
- Event_Backend_initialize(&data->backend, loop);
100
+ Event_Selector_initialize(&data->backend, loop);
101
101
  int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
102
102
 
103
103
  if (result < 0) {
@@ -109,44 +109,62 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
109
109
  return self;
110
110
  }
111
111
 
112
- VALUE Event_Backend_URing_close(VALUE self) {
113
- struct Event_Backend_URing *data = NULL;
114
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
112
+ VALUE Event_Selector_URing_close(VALUE self) {
113
+ struct Event_Selector_URing *data = NULL;
114
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
115
115
 
116
116
  close_internal(data);
117
117
 
118
118
  return Qnil;
119
119
  }
120
120
 
121
- VALUE Event_Backend_URing_transfer(VALUE self, VALUE fiber)
121
+ VALUE Event_Selector_URing_transfer(int argc, VALUE *argv, VALUE self)
122
122
  {
123
- struct Event_Backend_URing *data = NULL;
124
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
123
+ struct Event_Selector_URing *data = NULL;
124
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
125
125
 
126
- Event_Backend_wait_and_transfer(&data->backend, fiber);
126
+ Event_Selector_wait_and_transfer(&data->backend, argc, argv);
127
127
 
128
128
  return Qnil;
129
129
  }
130
130
 
131
- VALUE Event_Backend_URing_defer(VALUE self)
131
+ VALUE Event_Selector_URing_yield(VALUE self)
132
132
  {
133
- struct Event_Backend_URing *data = NULL;
134
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
133
+ struct Event_Selector_URing *data = NULL;
134
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
135
135
 
136
- Event_Backend_defer(&data->backend);
136
+ Event_Selector_yield(&data->backend);
137
137
 
138
138
  return Qnil;
139
139
  }
140
140
 
141
- VALUE Event_Backend_URing_ready_p(VALUE self) {
142
- struct Event_Backend_URing *data = NULL;
143
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
141
+ VALUE Event_Selector_URing_push(VALUE self, VALUE fiber)
142
+ {
143
+ struct Event_Selector_URing *data = NULL;
144
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
145
+
146
+ Event_Selector_queue_push(&data->backend, fiber);
147
+
148
+ return Qnil;
149
+ }
150
+
151
+ VALUE Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
152
+ {
153
+ struct Event_Selector_URing *data = NULL;
154
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
155
+
156
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
157
+ }
158
+
159
+ VALUE Event_Selector_URing_ready_p(VALUE self) {
160
+ struct Event_Selector_URing *data = NULL;
161
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
144
162
 
145
163
  return data->backend.ready ? Qtrue : Qfalse;
146
164
  }
147
165
 
148
166
  static
149
- int io_uring_submit_flush(struct Event_Backend_URing *data) {
167
+ int io_uring_submit_flush(struct Event_Selector_URing *data) {
150
168
  if (data->pending) {
151
169
  if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
152
170
 
@@ -167,7 +185,7 @@ int io_uring_submit_flush(struct Event_Backend_URing *data) {
167
185
  }
168
186
 
169
187
  static
170
- int io_uring_submit_now(struct Event_Backend_URing *data) {
188
+ int io_uring_submit_now(struct Event_Selector_URing *data) {
171
189
  while (true) {
172
190
  int result = io_uring_submit(&data->ring);
173
191
 
@@ -177,7 +195,7 @@ int io_uring_submit_now(struct Event_Backend_URing *data) {
177
195
  }
178
196
 
179
197
  if (result == -EBUSY || result == -EAGAIN) {
180
- Event_Backend_defer(&data->backend);
198
+ Event_Selector_yield(&data->backend);
181
199
  } else {
182
200
  rb_syserr_fail(-result, "io_uring_submit_now");
183
201
  }
@@ -185,7 +203,7 @@ int io_uring_submit_now(struct Event_Backend_URing *data) {
185
203
  }
186
204
 
187
205
  static
188
- void io_uring_submit_pending(struct Event_Backend_URing *data) {
206
+ void io_uring_submit_pending(struct Event_Selector_URing *data) {
189
207
  if (EARLY_SUBMIT) {
190
208
  io_uring_submit_now(data);
191
209
  } else {
@@ -193,7 +211,7 @@ void io_uring_submit_pending(struct Event_Backend_URing *data) {
193
211
  }
194
212
  }
195
213
 
196
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
214
+ struct io_uring_sqe * io_get_sqe(struct Event_Selector_URing *data) {
197
215
  struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
198
216
 
199
217
  while (sqe == NULL) {
@@ -207,7 +225,7 @@ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
207
225
  }
208
226
 
209
227
  struct process_wait_arguments {
210
- struct Event_Backend_URing *data;
228
+ struct Event_Selector_URing *data;
211
229
  pid_t pid;
212
230
  int flags;
213
231
  int descriptor;
@@ -217,9 +235,9 @@ static
217
235
  VALUE process_wait_transfer(VALUE _arguments) {
218
236
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
219
237
 
220
- Event_Backend_fiber_transfer(arguments->data->backend.loop);
238
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
221
239
 
222
- return Event_Backend_process_status_wait(arguments->pid);
240
+ return Event_Selector_process_status_wait(arguments->pid);
223
241
  }
224
242
 
225
243
  static
@@ -231,9 +249,9 @@ VALUE process_wait_ensure(VALUE _arguments) {
231
249
  return Qnil;
232
250
  }
233
251
 
234
- VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
235
- struct Event_Backend_URing *data = NULL;
236
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
252
+ VALUE Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
253
+ struct Event_Selector_URing *data = NULL;
254
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
237
255
 
238
256
  struct process_wait_arguments process_wait_arguments = {
239
257
  .data = data,
@@ -246,7 +264,7 @@ VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE
246
264
 
247
265
  struct io_uring_sqe *sqe = io_get_sqe(data);
248
266
 
249
- if (DEBUG) fprintf(stderr, "Event_Backend_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
267
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
250
268
  io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
251
269
  io_uring_sqe_set_data(sqe, (void*)fiber);
252
270
  io_uring_submit_pending(data);
@@ -258,9 +276,9 @@ static inline
258
276
  short poll_flags_from_events(int events) {
259
277
  short flags = 0;
260
278
 
261
- if (events & READABLE) flags |= POLLIN;
262
- if (events & PRIORITY) flags |= POLLPRI;
263
- if (events & WRITABLE) flags |= POLLOUT;
279
+ if (events & EVENT_READABLE) flags |= POLLIN;
280
+ if (events & EVENT_PRIORITY) flags |= POLLPRI;
281
+ if (events & EVENT_WRITABLE) flags |= POLLOUT;
264
282
 
265
283
  flags |= POLLERR;
266
284
  flags |= POLLHUP;
@@ -272,15 +290,15 @@ static inline
272
290
  int events_from_poll_flags(short flags) {
273
291
  int events = 0;
274
292
 
275
- if (flags & POLLIN) events |= READABLE;
276
- if (flags & POLLPRI) events |= PRIORITY;
277
- if (flags & POLLOUT) events |= WRITABLE;
293
+ if (flags & POLLIN) events |= EVENT_READABLE;
294
+ if (flags & POLLPRI) events |= EVENT_PRIORITY;
295
+ if (flags & POLLOUT) events |= EVENT_WRITABLE;
278
296
 
279
297
  return events;
280
298
  }
281
299
 
282
300
  struct io_wait_arguments {
283
- struct Event_Backend_URing *data;
301
+ struct Event_Selector_URing *data;
284
302
  VALUE fiber;
285
303
  short flags;
286
304
  };
@@ -288,24 +306,25 @@ struct io_wait_arguments {
288
306
  static
289
307
  VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
290
308
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
291
- struct Event_Backend_URing *data = arguments->data;
309
+ struct Event_Selector_URing *data = arguments->data;
292
310
 
293
311
  struct io_uring_sqe *sqe = io_get_sqe(data);
294
312
 
295
313
  if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
296
314
 
297
315
  io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
298
-
316
+ io_uring_submit_now(data);
317
+
299
318
  rb_exc_raise(exception);
300
319
  };
301
320
 
302
321
  static
303
322
  VALUE io_wait_transfer(VALUE _arguments) {
304
323
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
305
- struct Event_Backend_URing *data = arguments->data;
324
+ struct Event_Selector_URing *data = arguments->data;
306
325
 
307
- VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
308
- if (DEBUG) fprintf(stderr, "io_wait:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
326
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
327
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
309
328
 
310
329
  // We explicitly filter the resulting events based on the requested events.
311
330
  // In some cases, poll will report events we didn't ask for.
@@ -314,16 +333,16 @@ VALUE io_wait_transfer(VALUE _arguments) {
314
333
  return INT2NUM(events_from_poll_flags(flags));
315
334
  };
316
335
 
317
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
318
- struct Event_Backend_URing *data = NULL;
319
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
336
+ VALUE Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
337
+ struct Event_Selector_URing *data = NULL;
338
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
320
339
 
321
- int descriptor = Event_Backend_io_descriptor(io);
340
+ int descriptor = Event_Selector_io_descriptor(io);
322
341
  struct io_uring_sqe *sqe = io_get_sqe(data);
323
342
 
324
343
  short flags = poll_flags_from_events(NUM2INT(events));
325
344
 
326
- if (DEBUG) fprintf(stderr, "Event_Backend_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
345
+ if (DEBUG) fprintf(stderr, "Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
327
346
 
328
347
  io_uring_prep_poll_add(sqe, descriptor, flags);
329
348
  io_uring_sqe_set_data(sqe, (void*)fiber);
@@ -340,7 +359,7 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
340
359
 
341
360
  #ifdef HAVE_RUBY_IO_BUFFER_H
342
361
 
343
- static int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
362
+ static int io_read(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
344
363
  struct io_uring_sqe *sqe = io_get_sqe(data);
345
364
 
346
365
  if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
@@ -349,17 +368,17 @@ static int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor
349
368
  io_uring_sqe_set_data(sqe, (void*)fiber);
350
369
  io_uring_submit_pending(data);
351
370
 
352
- VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
353
- if (DEBUG) fprintf(stderr, "io_read:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
371
+ VALUE result = Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
372
+ if (DEBUG) fprintf(stderr, "io_read:Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
354
373
 
355
374
  return RB_NUM2INT(result);
356
375
  }
357
376
 
358
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
359
- struct Event_Backend_URing *data = NULL;
360
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
377
+ VALUE Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
378
+ struct Event_Selector_URing *data = NULL;
379
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
361
380
 
362
- int descriptor = Event_Backend_io_descriptor(io);
381
+ int descriptor = Event_Selector_io_descriptor(io);
363
382
 
364
383
  void *base;
365
384
  size_t size;
@@ -379,7 +398,7 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
379
398
  if ((size_t)result >= length) break;
380
399
  length -= result;
381
400
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
382
- Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(READABLE));
401
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_READABLE));
383
402
  } else {
384
403
  rb_syserr_fail(-result, strerror(-result));
385
404
  }
@@ -389,7 +408,7 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
389
408
  }
390
409
 
391
410
  static
392
- int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
411
+ int io_write(struct Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
393
412
  struct io_uring_sqe *sqe = io_get_sqe(data);
394
413
 
395
414
  if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
@@ -398,17 +417,17 @@ int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char
398
417
  io_uring_sqe_set_data(sqe, (void*)fiber);
399
418
  io_uring_submit_pending(data);
400
419
 
401
- int result = RB_NUM2INT(Event_Backend_fiber_transfer(data->backend.loop));
402
- if (DEBUG) fprintf(stderr, "io_write:Event_Backend_fiber_transfer -> %d\n", result);
420
+ int result = RB_NUM2INT(Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
421
+ if (DEBUG) fprintf(stderr, "io_write:Event_Selector_fiber_transfer -> %d\n", result);
403
422
 
404
423
  return result;
405
424
  }
406
425
 
407
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
408
- struct Event_Backend_URing *data = NULL;
409
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
426
+ VALUE Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
427
+ struct Event_Selector_URing *data = NULL;
428
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
410
429
 
411
- int descriptor = Event_Backend_io_descriptor(io);
430
+ int descriptor = Event_Selector_io_descriptor(io);
412
431
 
413
432
  const void *base;
414
433
  size_t size;
@@ -429,7 +448,7 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
429
448
  if ((size_t)result >= length) break;
430
449
  length -= result;
431
450
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
432
- Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(WRITABLE));
451
+ Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(EVENT_WRITABLE));
433
452
  } else {
434
453
  rb_syserr_fail(-result, strerror(-result));
435
454
  }
@@ -442,11 +461,11 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
442
461
 
443
462
  static const int ASYNC_CLOSE = 2;
444
463
 
445
- VALUE Event_Backend_URing_io_close(VALUE self, VALUE io) {
446
- struct Event_Backend_URing *data = NULL;
447
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
464
+ VALUE Event_Selector_URing_io_close(VALUE self, VALUE io) {
465
+ struct Event_Selector_URing *data = NULL;
466
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
448
467
 
449
- int descriptor = Event_Backend_io_descriptor(io);
468
+ int descriptor = Event_Selector_io_descriptor(io);
450
469
 
451
470
  if (ASYNC_CLOSE) {
452
471
  struct io_uring_sqe *sqe = io_get_sqe(data);
@@ -497,7 +516,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
497
516
  }
498
517
 
499
518
  struct select_arguments {
500
- struct Event_Backend_URing *data;
519
+ struct Event_Selector_URing *data;
501
520
 
502
521
  int result;
503
522
 
@@ -555,7 +574,7 @@ unsigned select_process_completions(struct io_uring *ring) {
555
574
 
556
575
  io_uring_cq_advance(ring, 1);
557
576
 
558
- Event_Backend_fiber_transfer_result(fiber, result);
577
+ Event_Selector_fiber_transfer(fiber, 1, &result);
559
578
  }
560
579
 
561
580
  // io_uring_cq_advance(ring, completed);
@@ -565,11 +584,11 @@ unsigned select_process_completions(struct io_uring *ring) {
565
584
  return completed;
566
585
  }
567
586
 
568
- VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
569
- struct Event_Backend_URing *data = NULL;
570
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
587
+ VALUE Event_Selector_URing_select(VALUE self, VALUE duration) {
588
+ struct Event_Selector_URing *data = NULL;
589
+ TypedData_Get_Struct(self, struct Event_Selector_URing, &Event_Selector_URing_Type, data);
571
590
 
572
- Event_Backend_ready_pop(&data->backend);
591
+ Event_Selector_queue_flush(&data->backend);
573
592
 
574
593
  int result = 0;
575
594
 
@@ -603,25 +622,30 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
603
622
  return RB_INT2NUM(result);
604
623
  }
605
624
 
606
- void Init_Event_Backend_URing(VALUE Event_Backend) {
607
- Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
625
+ void Init_Event_Selector_URing(VALUE Event_Selector) {
626
+ Event_Selector_URing = rb_define_class_under(Event_Selector, "URing", rb_cObject);
627
+
628
+ rb_define_alloc_func(Event_Selector_URing, Event_Selector_URing_allocate);
629
+ rb_define_method(Event_Selector_URing, "initialize", Event_Selector_URing_initialize, 1);
630
+
631
+ rb_define_method(Event_Selector_URing, "transfer", Event_Selector_URing_transfer, -1);
632
+ rb_define_method(Event_Selector_URing, "yield", Event_Selector_URing_yield, 0);
633
+ rb_define_method(Event_Selector_URing, "push", Event_Selector_URing_push, 1);
634
+ rb_define_method(Event_Selector_URing, "raise", Event_Selector_URing_raise, -1);
635
+
636
+ rb_define_method(Event_Selector_URing, "ready?", Event_Selector_URing_ready_p, 0);
608
637
 
609
- rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
610
- rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
611
- rb_define_method(Event_Backend_URing, "transfer", Event_Backend_URing_transfer, 1);
612
- rb_define_method(Event_Backend_URing, "defer", Event_Backend_URing_defer, 0);
613
- rb_define_method(Event_Backend_URing, "ready?", Event_Backend_URing_ready_p, 0);
614
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
615
- rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
638
+ rb_define_method(Event_Selector_URing, "select", Event_Selector_URing_select, 1);
639
+ rb_define_method(Event_Selector_URing, "close", Event_Selector_URing_close, 0);
616
640
 
617
- rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
641
+ rb_define_method(Event_Selector_URing, "io_wait", Event_Selector_URing_io_wait, 3);
618
642
 
619
643
  #ifdef HAVE_RUBY_IO_BUFFER_H
620
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 4);
621
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 4);
644
+ rb_define_method(Event_Selector_URing, "io_read", Event_Selector_URing_io_read, 4);
645
+ rb_define_method(Event_Selector_URing, "io_write", Event_Selector_URing_io_write, 4);
622
646
  #endif
623
647
 
624
- rb_define_method(Event_Backend_URing, "io_close", Event_Backend_URing_io_close, 1);
648
+ rb_define_method(Event_Selector_URing, "io_close", Event_Selector_URing_io_close, 1);
625
649
 
626
- rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
650
+ rb_define_method(Event_Selector_URing, "process_wait", Event_Selector_URing_process_wait, 3);
627
651
  }