opal-up 0.0.2 → 0.0.4

Sign up to get free protection for your applications and to get access to all the features.
Files changed (81) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +209 -0
  3. data/README.md +97 -29
  4. data/bin/up_ruby +4 -0
  5. data/bin/up_ruby_cluster +4 -0
  6. data/ext/up_ext/App.h +606 -0
  7. data/ext/up_ext/AsyncSocket.h +355 -0
  8. data/ext/up_ext/AsyncSocketData.h +87 -0
  9. data/ext/up_ext/BloomFilter.h +83 -0
  10. data/ext/up_ext/ChunkedEncoding.h +236 -0
  11. data/ext/up_ext/ClientApp.h +36 -0
  12. data/ext/up_ext/HttpContext.h +502 -0
  13. data/ext/up_ext/HttpContextData.h +56 -0
  14. data/ext/up_ext/HttpErrors.h +53 -0
  15. data/ext/up_ext/HttpParser.h +680 -0
  16. data/ext/up_ext/HttpResponse.h +578 -0
  17. data/ext/up_ext/HttpResponseData.h +95 -0
  18. data/ext/up_ext/HttpRouter.h +380 -0
  19. data/ext/up_ext/Loop.h +204 -0
  20. data/ext/up_ext/LoopData.h +112 -0
  21. data/ext/up_ext/MoveOnlyFunction.h +377 -0
  22. data/ext/up_ext/PerMessageDeflate.h +315 -0
  23. data/ext/up_ext/ProxyParser.h +163 -0
  24. data/ext/up_ext/QueryParser.h +120 -0
  25. data/ext/up_ext/TopicTree.h +363 -0
  26. data/ext/up_ext/Utilities.h +66 -0
  27. data/ext/up_ext/WebSocket.h +381 -0
  28. data/ext/up_ext/WebSocketContext.h +434 -0
  29. data/ext/up_ext/WebSocketContextData.h +109 -0
  30. data/ext/up_ext/WebSocketData.h +86 -0
  31. data/ext/up_ext/WebSocketExtensions.h +256 -0
  32. data/ext/up_ext/WebSocketHandshake.h +145 -0
  33. data/ext/up_ext/WebSocketProtocol.h +506 -0
  34. data/ext/up_ext/bsd.c +767 -0
  35. data/ext/up_ext/bsd.h +109 -0
  36. data/ext/up_ext/context.c +524 -0
  37. data/ext/up_ext/epoll_kqueue.c +458 -0
  38. data/ext/up_ext/epoll_kqueue.h +67 -0
  39. data/ext/up_ext/extconf.rb +5 -0
  40. data/ext/up_ext/internal.h +224 -0
  41. data/ext/up_ext/libusockets.h +350 -0
  42. data/ext/up_ext/libuwebsockets.cpp +1344 -0
  43. data/ext/up_ext/libuwebsockets.h +396 -0
  44. data/ext/up_ext/loop.c +386 -0
  45. data/ext/up_ext/loop_data.h +38 -0
  46. data/ext/up_ext/socket.c +231 -0
  47. data/ext/up_ext/up_ext.c +930 -0
  48. data/lib/up/bun/rack_env.rb +1 -13
  49. data/lib/up/bun/server.rb +93 -19
  50. data/lib/up/cli.rb +3 -0
  51. data/lib/up/client.rb +68 -0
  52. data/lib/up/ruby/cluster.rb +39 -0
  53. data/lib/up/ruby/cluster_cli.rb +10 -0
  54. data/lib/up/{node → ruby}/rack_cluster.rb +5 -4
  55. data/lib/up/{node → ruby}/rack_server.rb +4 -4
  56. data/lib/up/ruby/server_cli.rb +10 -0
  57. data/lib/up/u_web_socket/cluster.rb +18 -3
  58. data/lib/up/u_web_socket/server.rb +108 -15
  59. data/lib/up/version.rb +1 -1
  60. metadata +72 -30
  61. data/.gitignore +0 -5
  62. data/Gemfile +0 -2
  63. data/bin/up_node +0 -12
  64. data/bin/up_node_cluster +0 -12
  65. data/example_rack_app/Gemfile +0 -3
  66. data/example_rack_app/config.ru +0 -6
  67. data/example_rack_app/rack_app.rb +0 -5
  68. data/example_roda_app/Gemfile +0 -6
  69. data/example_roda_app/config.ru +0 -6
  70. data/example_roda_app/roda_app.rb +0 -37
  71. data/example_sinatra_app/Gemfile +0 -6
  72. data/example_sinatra_app/config.ru +0 -6
  73. data/example_sinatra_app/sinatra_app.rb +0 -7
  74. data/lib/up/node/cluster.rb +0 -39
  75. data/lib/up/node/cluster_cli.rb +0 -15
  76. data/lib/up/node/rack_env.rb +0 -106
  77. data/lib/up/node/server.rb +0 -84
  78. data/lib/up/node/server_cli.rb +0 -15
  79. data/lib/up/u_web_socket/rack_env.rb +0 -101
  80. data/opal-up.gemspec +0 -27
  81. data/up_logo.svg +0 -256
@@ -0,0 +1,458 @@
1
+ /*
2
+ * Authored by Alex Hultman, 2018-2019.
3
+ * Intellectual property of third-party.
4
+
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+
18
+ #include "libusockets.h"
19
+ #include "internal.h"
20
+ #include <stdlib.h>
21
+
22
+ #if defined(LIBUS_USE_EPOLL) || defined(LIBUS_USE_KQUEUE)
23
+
24
+ /* Cannot include this one on Windows */
25
+ #include <unistd.h>
26
+
27
+ #ifdef LIBUS_USE_EPOLL
28
+ #define GET_READY_POLL(loop, index) (struct us_poll_t *) loop->ready_polls[index].data.ptr
29
+ #define SET_READY_POLL(loop, index, poll) loop->ready_polls[index].data.ptr = poll
30
+ #else
31
+ #define GET_READY_POLL(loop, index) (struct us_poll_t *) loop->ready_polls[index].udata
32
+ #define SET_READY_POLL(loop, index, poll) loop->ready_polls[index].udata = poll
33
+ #endif
34
+
35
+ /* Loop */
36
+ void us_loop_free(struct us_loop_t *loop) {
37
+ us_internal_loop_data_free(loop);
38
+ close(loop->fd);
39
+ free(loop);
40
+ }
41
+
42
+ /* Poll */
43
+ struct us_poll_t *us_create_poll(struct us_loop_t *loop, int fallthrough, unsigned int ext_size) {
44
+ if (!fallthrough) {
45
+ loop->num_polls++;
46
+ }
47
+ return malloc(sizeof(struct us_poll_t) + ext_size);
48
+ }
49
+
50
+ /* Todo: this one should be us_internal_poll_free */
51
+ void us_poll_free(struct us_poll_t *p, struct us_loop_t *loop) {
52
+ loop->num_polls--;
53
+ free(p);
54
+ }
55
+
56
+ void *us_poll_ext(struct us_poll_t *p) {
57
+ return p + 1;
58
+ }
59
+
60
+ /* Todo: why have us_poll_create AND us_poll_init!? libuv legacy! */
61
+ void us_poll_init(struct us_poll_t *p, LIBUS_SOCKET_DESCRIPTOR fd, int poll_type) {
62
+ p->state.fd = fd;
63
+ p->state.poll_type = poll_type;
64
+ }
65
+
66
+ int us_poll_events(struct us_poll_t *p) {
67
+ return ((p->state.poll_type & POLL_TYPE_POLLING_IN) ? LIBUS_SOCKET_READABLE : 0) | ((p->state.poll_type & POLL_TYPE_POLLING_OUT) ? LIBUS_SOCKET_WRITABLE : 0);
68
+ }
69
+
70
+ LIBUS_SOCKET_DESCRIPTOR us_poll_fd(struct us_poll_t *p) {
71
+ return p->state.fd;
72
+ }
73
+
74
+ /* Returns any of listen socket, socket, shut down socket or callback */
75
+ int us_internal_poll_type(struct us_poll_t *p) {
76
+ return p->state.poll_type & 3;
77
+ }
78
+
79
+ /* Bug: doesn't really SET, rather read and change, so needs to be inited first! */
80
+ void us_internal_poll_set_type(struct us_poll_t *p, int poll_type) {
81
+ p->state.poll_type = poll_type | (p->state.poll_type & 12);
82
+ }
83
+
84
+ /* Timer */
85
+ void *us_timer_ext(struct us_timer_t *timer) {
86
+ return ((struct us_internal_callback_t *) timer) + 1;
87
+ }
88
+
89
+ struct us_loop_t *us_timer_loop(struct us_timer_t *t) {
90
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) t;
91
+
92
+ return internal_cb->loop;
93
+ }
94
+
95
+ /* Loop */
96
+ struct us_loop_t *us_create_loop(void *hint, void (*wakeup_cb)(struct us_loop_t *loop), void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop), unsigned int ext_size) {
97
+ struct us_loop_t *loop = (struct us_loop_t *) malloc(sizeof(struct us_loop_t) + ext_size);
98
+ loop->num_polls = 0;
99
+ /* These could be accessed if we close a poll before starting the loop */
100
+ loop->num_ready_polls = 0;
101
+ loop->current_ready_poll = 0;
102
+
103
+ #ifdef LIBUS_USE_EPOLL
104
+ loop->fd = epoll_create1(EPOLL_CLOEXEC);
105
+ #else
106
+ loop->fd = kqueue();
107
+ #endif
108
+
109
+ us_internal_loop_data_init(loop, wakeup_cb, pre_cb, post_cb);
110
+ return loop;
111
+ }
112
+
113
+ void us_loop_run(struct us_loop_t *loop) {
114
+ us_loop_integrate(loop);
115
+
116
+ /* While we have non-fallthrough polls we shouldn't fall through */
117
+ while (loop->num_polls) {
118
+ /* Emit pre callback */
119
+ us_internal_loop_pre(loop);
120
+
121
+ /* Fetch ready polls */
122
+ #ifdef LIBUS_USE_EPOLL
123
+ loop->num_ready_polls = epoll_wait(loop->fd, loop->ready_polls, 1024, -1);
124
+ #else
125
+ loop->num_ready_polls = kevent(loop->fd, NULL, 0, loop->ready_polls, 1024, NULL);
126
+ #endif
127
+
128
+ /* Iterate ready polls, dispatching them by type */
129
+ for (loop->current_ready_poll = 0; loop->current_ready_poll < loop->num_ready_polls; loop->current_ready_poll++) {
130
+ struct us_poll_t *poll = GET_READY_POLL(loop, loop->current_ready_poll);
131
+ /* Any ready poll marked with nullptr will be ignored */
132
+ if (poll) {
133
+ #ifdef LIBUS_USE_EPOLL
134
+ int events = loop->ready_polls[loop->current_ready_poll].events;
135
+ int error = loop->ready_polls[loop->current_ready_poll].events & (EPOLLERR | EPOLLHUP);
136
+ #else
137
+ /* EVFILT_READ, EVFILT_TIME, EVFILT_USER are all mapped to LIBUS_SOCKET_READABLE */
138
+ int events = LIBUS_SOCKET_READABLE;
139
+ if (loop->ready_polls[loop->current_ready_poll].filter == EVFILT_WRITE) {
140
+ events = LIBUS_SOCKET_WRITABLE;
141
+ }
142
+ int error = loop->ready_polls[loop->current_ready_poll].flags & (EV_ERROR | EV_EOF);
143
+ #endif
144
+ /* Always filter all polls by what they actually poll for (callback polls always poll for readable) */
145
+ events &= us_poll_events(poll);
146
+ if (events || error) {
147
+ us_internal_dispatch_ready_poll(poll, error, events);
148
+ }
149
+ }
150
+ }
151
+ /* Emit post callback */
152
+ us_internal_loop_post(loop);
153
+ }
154
+ }
155
+
156
+ void us_internal_loop_update_pending_ready_polls(struct us_loop_t *loop, struct us_poll_t *old_poll, struct us_poll_t *new_poll, int old_events, int new_events) {
157
+ #ifdef LIBUS_USE_EPOLL
158
+ /* Epoll only has one ready poll per poll */
159
+ int num_entries_possibly_remaining = 1;
160
+ #else
161
+ /* Ready polls may contain same poll twice under kqueue, as one poll may hold two filters */
162
+ int num_entries_possibly_remaining = 2;//((old_events & LIBUS_SOCKET_READABLE) ? 1 : 0) + ((old_events & LIBUS_SOCKET_WRITABLE) ? 1 : 0);
163
+ #endif
164
+
165
+ /* Todo: for kqueue if we track things in us_change_poll it is possible to have a fast path with no seeking in cases of:
166
+ * current poll being us AND we only poll for one thing */
167
+
168
+ for (int i = loop->current_ready_poll; i < loop->num_ready_polls && num_entries_possibly_remaining; i++) {
169
+ if (GET_READY_POLL(loop, i) == old_poll) {
170
+
171
+ // if new events does not contain the ready events of this poll then remove (no we filter that out later on)
172
+ SET_READY_POLL(loop, i, new_poll);
173
+
174
+ num_entries_possibly_remaining--;
175
+ }
176
+ }
177
+ }
178
+
179
+ /* Poll */
180
+
181
+ #ifdef LIBUS_USE_KQUEUE
182
+ /* Helper function for setting or updating EVFILT_READ and EVFILT_WRITE */
183
+ int kqueue_change(int kqfd, int fd, int old_events, int new_events, void *user_data) {
184
+ struct kevent change_list[2];
185
+ int change_length = 0;
186
+
187
+ /* Do they differ in readable? */
188
+ if ((new_events & LIBUS_SOCKET_READABLE) != (old_events & LIBUS_SOCKET_READABLE)) {
189
+ EV_SET(&change_list[change_length++], fd, EVFILT_READ, (new_events & LIBUS_SOCKET_READABLE) ? EV_ADD : EV_DELETE, 0, 0, user_data);
190
+ }
191
+
192
+ /* Do they differ in writable? */
193
+ if ((new_events & LIBUS_SOCKET_WRITABLE) != (old_events & LIBUS_SOCKET_WRITABLE)) {
194
+ EV_SET(&change_list[change_length++], fd, EVFILT_WRITE, (new_events & LIBUS_SOCKET_WRITABLE) ? EV_ADD : EV_DELETE, 0, 0, user_data);
195
+ }
196
+
197
+ int ret = kevent(kqfd, change_list, change_length, NULL, 0, NULL);
198
+
199
+ // ret should be 0 in most cases (not guaranteed when removing async)
200
+
201
+ return ret;
202
+ }
203
+ #endif
204
+
205
+ struct us_poll_t *us_poll_resize(struct us_poll_t *p, struct us_loop_t *loop, unsigned int ext_size) {
206
+ int events = us_poll_events(p);
207
+
208
+ struct us_poll_t *new_p = realloc(p, sizeof(struct us_poll_t) + ext_size);
209
+ if (p != new_p && events) {
210
+ #ifdef LIBUS_USE_EPOLL
211
+ /* Hack: forcefully update poll by stripping away already set events */
212
+ new_p->state.poll_type = us_internal_poll_type(new_p);
213
+ us_poll_change(new_p, loop, events);
214
+ #else
215
+ /* Forcefully update poll by resetting them with new_p as user data */
216
+ kqueue_change(loop->fd, new_p->state.fd, 0, events, new_p);
217
+ #endif
218
+
219
+ /* This is needed for epoll also (us_change_poll doesn't update the old poll) */
220
+ us_internal_loop_update_pending_ready_polls(loop, p, new_p, events, events);
221
+ }
222
+
223
+ return new_p;
224
+ }
225
+
226
+ void us_poll_start(struct us_poll_t *p, struct us_loop_t *loop, int events) {
227
+ p->state.poll_type = us_internal_poll_type(p) | ((events & LIBUS_SOCKET_READABLE) ? POLL_TYPE_POLLING_IN : 0) | ((events & LIBUS_SOCKET_WRITABLE) ? POLL_TYPE_POLLING_OUT : 0);
228
+
229
+ #ifdef LIBUS_USE_EPOLL
230
+ struct epoll_event event;
231
+ event.events = events;
232
+ event.data.ptr = p;
233
+ epoll_ctl(loop->fd, EPOLL_CTL_ADD, p->state.fd, &event);
234
+ #else
235
+ kqueue_change(loop->fd, p->state.fd, 0, events, p);
236
+ #endif
237
+ }
238
+
239
+ void us_poll_change(struct us_poll_t *p, struct us_loop_t *loop, int events) {
240
+ int old_events = us_poll_events(p);
241
+ if (old_events != events) {
242
+
243
+ p->state.poll_type = us_internal_poll_type(p) | ((events & LIBUS_SOCKET_READABLE) ? POLL_TYPE_POLLING_IN : 0) | ((events & LIBUS_SOCKET_WRITABLE) ? POLL_TYPE_POLLING_OUT : 0);
244
+
245
+ #ifdef LIBUS_USE_EPOLL
246
+ struct epoll_event event;
247
+ event.events = events;
248
+ event.data.ptr = p;
249
+ epoll_ctl(loop->fd, EPOLL_CTL_MOD, p->state.fd, &event);
250
+ #else
251
+ kqueue_change(loop->fd, p->state.fd, old_events, events, p);
252
+ #endif
253
+ /* Set all removed events to null-polls in pending ready poll list */
254
+ //us_internal_loop_update_pending_ready_polls(loop, p, p, old_events, events);
255
+ }
256
+ }
257
+
258
+ void us_poll_stop(struct us_poll_t *p, struct us_loop_t *loop) {
259
+ int old_events = us_poll_events(p);
260
+ int new_events = 0;
261
+ #ifdef LIBUS_USE_EPOLL
262
+ struct epoll_event event;
263
+ epoll_ctl(loop->fd, EPOLL_CTL_DEL, p->state.fd, &event);
264
+ #else
265
+ if (old_events) {
266
+ kqueue_change(loop->fd, p->state.fd, old_events, new_events, NULL);
267
+ }
268
+ #endif
269
+
270
+ /* Disable any instance of us in the pending ready poll list */
271
+ us_internal_loop_update_pending_ready_polls(loop, p, 0, old_events, new_events);
272
+ }
273
+
274
+ unsigned int us_internal_accept_poll_event(struct us_poll_t *p) {
275
+ #ifdef LIBUS_USE_EPOLL
276
+ int fd = us_poll_fd(p);
277
+ uint64_t buf;
278
+ int read_length = read(fd, &buf, 8);
279
+ (void)read_length;
280
+ return buf;
281
+ #else
282
+ /* Kqueue has no underlying FD for timers or user events */
283
+ return 0;
284
+ #endif
285
+ }
286
+
287
+ /* Timer */
288
+ #ifdef LIBUS_USE_EPOLL
289
+ struct us_timer_t *us_create_timer(struct us_loop_t *loop, int fallthrough, unsigned int ext_size) {
290
+ struct us_poll_t *p = us_create_poll(loop, fallthrough, sizeof(struct us_internal_callback_t) + ext_size);
291
+ int timerfd = timerfd_create(CLOCK_REALTIME, TFD_NONBLOCK | TFD_CLOEXEC);
292
+ if (timerfd == -1) {
293
+ return NULL;
294
+ }
295
+ us_poll_init(p, timerfd, POLL_TYPE_CALLBACK);
296
+
297
+ struct us_internal_callback_t *cb = (struct us_internal_callback_t *) p;
298
+ cb->loop = loop;
299
+ cb->cb_expects_the_loop = 0;
300
+ cb->leave_poll_ready = 0;
301
+
302
+ return (struct us_timer_t *) cb;
303
+ }
304
+ #else
305
+ struct us_timer_t *us_create_timer(struct us_loop_t *loop, int fallthrough, unsigned int ext_size) {
306
+ struct us_internal_callback_t *cb = malloc(sizeof(struct us_internal_callback_t) + ext_size);
307
+
308
+ cb->loop = loop;
309
+ cb->cb_expects_the_loop = 0;
310
+ cb->leave_poll_ready = 0;
311
+
312
+ /* Bug: us_internal_poll_set_type does not SET the type, it only CHANGES it */
313
+ cb->p.state.poll_type = POLL_TYPE_POLLING_IN;
314
+ us_internal_poll_set_type((struct us_poll_t *) cb, POLL_TYPE_CALLBACK);
315
+
316
+ if (!fallthrough) {
317
+ loop->num_polls++;
318
+ }
319
+
320
+ return (struct us_timer_t *) cb;
321
+ }
322
+ #endif
323
+
324
+ #ifdef LIBUS_USE_EPOLL
325
+ void us_timer_close(struct us_timer_t *timer) {
326
+ struct us_internal_callback_t *cb = (struct us_internal_callback_t *) timer;
327
+
328
+ us_poll_stop(&cb->p, cb->loop);
329
+ close(us_poll_fd(&cb->p));
330
+
331
+ /* (regular) sockets are the only polls which are not freed immediately */
332
+ us_poll_free((struct us_poll_t *) timer, cb->loop);
333
+ }
334
+
335
+ void us_timer_set(struct us_timer_t *t, void (*cb)(struct us_timer_t *t), int ms, int repeat_ms) {
336
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) t;
337
+
338
+ internal_cb->cb = (void (*)(struct us_internal_callback_t *)) cb;
339
+
340
+ struct itimerspec timer_spec = {
341
+ {repeat_ms / 1000, (long) (repeat_ms % 1000) * (long) 1000000},
342
+ {ms / 1000, (long) (ms % 1000) * (long) 1000000}
343
+ };
344
+
345
+ timerfd_settime(us_poll_fd((struct us_poll_t *) t), 0, &timer_spec, NULL);
346
+ us_poll_start((struct us_poll_t *) t, internal_cb->loop, LIBUS_SOCKET_READABLE);
347
+ }
348
+ #else
349
+ void us_timer_close(struct us_timer_t *timer) {
350
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) timer;
351
+
352
+ struct kevent event;
353
+ EV_SET(&event, (uintptr_t) internal_cb, EVFILT_TIMER, EV_DELETE, 0, 0, internal_cb);
354
+ kevent(internal_cb->loop->fd, &event, 1, NULL, 0, NULL);
355
+
356
+ /* (regular) sockets are the only polls which are not freed immediately */
357
+ us_poll_free((struct us_poll_t *) timer, internal_cb->loop);
358
+ }
359
+
360
+ void us_timer_set(struct us_timer_t *t, void (*cb)(struct us_timer_t *t), int ms, int repeat_ms) {
361
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) t;
362
+
363
+ internal_cb->cb = (void (*)(struct us_internal_callback_t *)) cb;
364
+
365
+ /* Bug: repeat_ms must be the same as ms, or 0 */
366
+ struct kevent event;
367
+ EV_SET(&event, (uintptr_t) internal_cb, EVFILT_TIMER, EV_ADD | (repeat_ms ? 0 : EV_ONESHOT), 0, ms, internal_cb);
368
+ kevent(internal_cb->loop->fd, &event, 1, NULL, 0, NULL);
369
+ }
370
+ #endif
371
+
372
+ /* Async (internal helper for loop's wakeup feature) */
373
+ #ifdef LIBUS_USE_EPOLL
374
+ struct us_internal_async *us_internal_create_async(struct us_loop_t *loop, int fallthrough, unsigned int ext_size) {
375
+ struct us_poll_t *p = us_create_poll(loop, fallthrough, sizeof(struct us_internal_callback_t) + ext_size);
376
+ us_poll_init(p, eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC), POLL_TYPE_CALLBACK);
377
+
378
+ struct us_internal_callback_t *cb = (struct us_internal_callback_t *) p;
379
+ cb->loop = loop;
380
+ cb->cb_expects_the_loop = 1;
381
+ cb->leave_poll_ready = 0;
382
+
383
+ return (struct us_internal_async *) cb;
384
+ }
385
+
386
+ // identical code as for timer, make it shared for "callback types"
387
+ void us_internal_async_close(struct us_internal_async *a) {
388
+ struct us_internal_callback_t *cb = (struct us_internal_callback_t *) a;
389
+
390
+ us_poll_stop(&cb->p, cb->loop);
391
+ close(us_poll_fd(&cb->p));
392
+
393
+ /* (regular) sockets are the only polls which are not freed immediately */
394
+ us_poll_free((struct us_poll_t *) a, cb->loop);
395
+ }
396
+
397
+ void us_internal_async_set(struct us_internal_async *a, void (*cb)(struct us_internal_async *)) {
398
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) a;
399
+
400
+ internal_cb->cb = (void (*)(struct us_internal_callback_t *)) cb;
401
+
402
+ us_poll_start((struct us_poll_t *) a, internal_cb->loop, LIBUS_SOCKET_READABLE);
403
+ }
404
+
405
+ void us_internal_async_wakeup(struct us_internal_async *a) {
406
+ uint64_t one = 1;
407
+ int written = write(us_poll_fd((struct us_poll_t *) a), &one, 8);
408
+ (void)written;
409
+ }
410
+ #else
411
+ struct us_internal_async *us_internal_create_async(struct us_loop_t *loop, int fallthrough, unsigned int ext_size) {
412
+ struct us_internal_callback_t *cb = malloc(sizeof(struct us_internal_callback_t) + ext_size);
413
+
414
+ cb->loop = loop;
415
+ cb->cb_expects_the_loop = 1;
416
+ cb->leave_poll_ready = 0;
417
+
418
+ /* Bug: us_internal_poll_set_type does not SET the type, it only CHANGES it */
419
+ cb->p.state.poll_type = POLL_TYPE_POLLING_IN;
420
+ us_internal_poll_set_type((struct us_poll_t *) cb, POLL_TYPE_CALLBACK);
421
+
422
+ if (!fallthrough) {
423
+ loop->num_polls++;
424
+ }
425
+
426
+ return (struct us_internal_async *) cb;
427
+ }
428
+
429
+ // identical code as for timer, make it shared for "callback types"
430
+ void us_internal_async_close(struct us_internal_async *a) {
431
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) a;
432
+
433
+ /* Note: This will fail most of the time as there probably is no pending trigger */
434
+ struct kevent event;
435
+ EV_SET(&event, (uintptr_t) internal_cb, EVFILT_USER, EV_DELETE, 0, 0, internal_cb);
436
+ kevent(internal_cb->loop->fd, &event, 1, NULL, 0, NULL);
437
+
438
+ /* (regular) sockets are the only polls which are not freed immediately */
439
+ us_poll_free((struct us_poll_t *) a, internal_cb->loop);
440
+ }
441
+
442
+ void us_internal_async_set(struct us_internal_async *a, void (*cb)(struct us_internal_async *)) {
443
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) a;
444
+
445
+ internal_cb->cb = (void (*)(struct us_internal_callback_t *)) cb;
446
+ }
447
+
448
+ void us_internal_async_wakeup(struct us_internal_async *a) {
449
+ struct us_internal_callback_t *internal_cb = (struct us_internal_callback_t *) a;
450
+
451
+ /* In kqueue you really only need to add a triggered oneshot event */
452
+ struct kevent event;
453
+ EV_SET(&event, (uintptr_t) internal_cb, EVFILT_USER, EV_ADD | EV_ONESHOT, NOTE_TRIGGER, 0, internal_cb);
454
+ kevent(internal_cb->loop->fd, &event, 1, NULL, 0, NULL);
455
+ }
456
+ #endif
457
+
458
+ #endif
@@ -0,0 +1,67 @@
1
+ /*
2
+ * Authored by Alex Hultman, 2018-2019.
3
+ * Intellectual property of third-party.
4
+
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+
18
+ #ifndef EPOLL_KQUEUE_H
19
+ #define EPOLL_KQUEUE_H
20
+
21
+ #include "loop_data.h"
22
+
23
+ #ifdef LIBUS_USE_EPOLL
24
+ #include <sys/epoll.h>
25
+ #include <sys/timerfd.h>
26
+ #include <sys/eventfd.h>
27
+ #define LIBUS_SOCKET_READABLE EPOLLIN
28
+ #define LIBUS_SOCKET_WRITABLE EPOLLOUT
29
+ #else
30
+ #include <sys/event.h>
31
+ /* Kqueue's EVFILT_ is NOT a bitfield, you cannot OR together them.
32
+ * We therefore have our own bitfield we then translate in every call */
33
+ #define LIBUS_SOCKET_READABLE 1
34
+ #define LIBUS_SOCKET_WRITABLE 2
35
+ #endif
36
+
37
+ struct us_loop_t {
38
+ alignas(LIBUS_EXT_ALIGNMENT) struct us_internal_loop_data_t data;
39
+
40
+ /* Number of non-fallthrough polls in the loop */
41
+ int num_polls;
42
+
43
+ /* Number of ready polls this iteration */
44
+ int num_ready_polls;
45
+
46
+ /* Current index in list of ready polls */
47
+ int current_ready_poll;
48
+
49
+ /* Loop's own file descriptor */
50
+ int fd;
51
+
52
+ /* The list of ready polls */
53
+ #ifdef LIBUS_USE_EPOLL
54
+ struct epoll_event ready_polls[1024];
55
+ #else
56
+ struct kevent ready_polls[1024];
57
+ #endif
58
+ };
59
+
60
+ struct us_poll_t {
61
+ alignas(LIBUS_EXT_ALIGNMENT) struct {
62
+ signed int fd : 28; // we could have this unsigned if we wanted to, -1 should never be used
63
+ unsigned int poll_type : 4;
64
+ } state;
65
+ };
66
+
67
+ #endif // EPOLL_KQUEUE_H
@@ -0,0 +1,5 @@
1
+ require 'mkmf'
2
+
3
+ $CFLAGS << ' -O3 -Wall -DLIBUS_NO_SSL '
4
+
5
+ create_makefile('up_ext')