opal-up 0.0.2 → 0.0.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/LICENSE +209 -0
- data/README.md +97 -29
- data/bin/up_ruby +4 -0
- data/bin/up_ruby_cluster +4 -0
- data/ext/up_ext/App.h +606 -0
- data/ext/up_ext/AsyncSocket.h +355 -0
- data/ext/up_ext/AsyncSocketData.h +87 -0
- data/ext/up_ext/BloomFilter.h +83 -0
- data/ext/up_ext/ChunkedEncoding.h +236 -0
- data/ext/up_ext/ClientApp.h +36 -0
- data/ext/up_ext/HttpContext.h +502 -0
- data/ext/up_ext/HttpContextData.h +56 -0
- data/ext/up_ext/HttpErrors.h +53 -0
- data/ext/up_ext/HttpParser.h +680 -0
- data/ext/up_ext/HttpResponse.h +578 -0
- data/ext/up_ext/HttpResponseData.h +95 -0
- data/ext/up_ext/HttpRouter.h +380 -0
- data/ext/up_ext/Loop.h +204 -0
- data/ext/up_ext/LoopData.h +112 -0
- data/ext/up_ext/MoveOnlyFunction.h +377 -0
- data/ext/up_ext/PerMessageDeflate.h +315 -0
- data/ext/up_ext/ProxyParser.h +163 -0
- data/ext/up_ext/QueryParser.h +120 -0
- data/ext/up_ext/TopicTree.h +363 -0
- data/ext/up_ext/Utilities.h +66 -0
- data/ext/up_ext/WebSocket.h +381 -0
- data/ext/up_ext/WebSocketContext.h +434 -0
- data/ext/up_ext/WebSocketContextData.h +109 -0
- data/ext/up_ext/WebSocketData.h +86 -0
- data/ext/up_ext/WebSocketExtensions.h +256 -0
- data/ext/up_ext/WebSocketHandshake.h +145 -0
- data/ext/up_ext/WebSocketProtocol.h +506 -0
- data/ext/up_ext/bsd.c +767 -0
- data/ext/up_ext/bsd.h +109 -0
- data/ext/up_ext/context.c +524 -0
- data/ext/up_ext/epoll_kqueue.c +458 -0
- data/ext/up_ext/epoll_kqueue.h +67 -0
- data/ext/up_ext/extconf.rb +5 -0
- data/ext/up_ext/internal.h +224 -0
- data/ext/up_ext/libusockets.h +350 -0
- data/ext/up_ext/libuwebsockets.cpp +1344 -0
- data/ext/up_ext/libuwebsockets.h +396 -0
- data/ext/up_ext/loop.c +386 -0
- data/ext/up_ext/loop_data.h +38 -0
- data/ext/up_ext/socket.c +231 -0
- data/ext/up_ext/up_ext.c +930 -0
- data/lib/up/bun/rack_env.rb +1 -13
- data/lib/up/bun/server.rb +93 -19
- data/lib/up/cli.rb +3 -0
- data/lib/up/client.rb +68 -0
- data/lib/up/ruby/cluster.rb +39 -0
- data/lib/up/ruby/cluster_cli.rb +10 -0
- data/lib/up/{node → ruby}/rack_cluster.rb +5 -4
- data/lib/up/{node → ruby}/rack_server.rb +4 -4
- data/lib/up/ruby/server_cli.rb +10 -0
- data/lib/up/u_web_socket/cluster.rb +18 -3
- data/lib/up/u_web_socket/server.rb +108 -15
- data/lib/up/version.rb +1 -1
- metadata +72 -30
- data/.gitignore +0 -5
- data/Gemfile +0 -2
- data/bin/up_node +0 -12
- data/bin/up_node_cluster +0 -12
- data/example_rack_app/Gemfile +0 -3
- data/example_rack_app/config.ru +0 -6
- data/example_rack_app/rack_app.rb +0 -5
- data/example_roda_app/Gemfile +0 -6
- data/example_roda_app/config.ru +0 -6
- data/example_roda_app/roda_app.rb +0 -37
- data/example_sinatra_app/Gemfile +0 -6
- data/example_sinatra_app/config.ru +0 -6
- data/example_sinatra_app/sinatra_app.rb +0 -7
- data/lib/up/node/cluster.rb +0 -39
- data/lib/up/node/cluster_cli.rb +0 -15
- data/lib/up/node/rack_env.rb +0 -106
- data/lib/up/node/server.rb +0 -84
- data/lib/up/node/server_cli.rb +0 -15
- data/lib/up/u_web_socket/rack_env.rb +0 -101
- data/opal-up.gemspec +0 -27
- data/up_logo.svg +0 -256
data/ext/up_ext/loop.c
ADDED
@@ -0,0 +1,386 @@
|
|
1
|
+
/*
|
2
|
+
* Authored by Alex Hultman, 2018-2021.
|
3
|
+
* Intellectual property of third-party.
|
4
|
+
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*/
|
17
|
+
|
18
|
+
#ifndef LIBUS_USE_IO_URING
|
19
|
+
|
20
|
+
#include "libusockets.h"
|
21
|
+
#include "internal.h"
|
22
|
+
#include <stdlib.h>
|
23
|
+
|
24
|
+
/* The loop has 2 fallthrough polls */
|
25
|
+
void us_internal_loop_data_init(struct us_loop_t *loop, void (*wakeup_cb)(struct us_loop_t *loop),
|
26
|
+
void (*pre_cb)(struct us_loop_t *loop), void (*post_cb)(struct us_loop_t *loop)) {
|
27
|
+
loop->data.sweep_timer = us_create_timer(loop, 1, 0);
|
28
|
+
loop->data.recv_buf = malloc(LIBUS_RECV_BUFFER_LENGTH + LIBUS_RECV_BUFFER_PADDING * 2);
|
29
|
+
loop->data.ssl_data = 0;
|
30
|
+
loop->data.head = 0;
|
31
|
+
loop->data.iterator = 0;
|
32
|
+
loop->data.closed_head = 0;
|
33
|
+
loop->data.low_prio_head = 0;
|
34
|
+
loop->data.low_prio_budget = 0;
|
35
|
+
|
36
|
+
loop->data.pre_cb = pre_cb;
|
37
|
+
loop->data.post_cb = post_cb;
|
38
|
+
loop->data.iteration_nr = 0;
|
39
|
+
|
40
|
+
loop->data.wakeup_async = us_internal_create_async(loop, 1, 0);
|
41
|
+
us_internal_async_set(loop->data.wakeup_async, (void (*)(struct us_internal_async *)) wakeup_cb);
|
42
|
+
}
|
43
|
+
|
44
|
+
void us_internal_loop_data_free(struct us_loop_t *loop) {
|
45
|
+
#ifndef LIBUS_NO_SSL
|
46
|
+
us_internal_free_loop_ssl_data(loop);
|
47
|
+
#endif
|
48
|
+
|
49
|
+
free(loop->data.recv_buf);
|
50
|
+
|
51
|
+
us_timer_close(loop->data.sweep_timer);
|
52
|
+
us_internal_async_close(loop->data.wakeup_async);
|
53
|
+
}
|
54
|
+
|
55
|
+
void us_wakeup_loop(struct us_loop_t *loop) {
|
56
|
+
us_internal_async_wakeup(loop->data.wakeup_async);
|
57
|
+
}
|
58
|
+
|
59
|
+
void us_internal_loop_link(struct us_loop_t *loop, struct us_socket_context_t *context) {
|
60
|
+
/* Insert this context as the head of loop */
|
61
|
+
context->next = loop->data.head;
|
62
|
+
context->prev = 0;
|
63
|
+
if (loop->data.head) {
|
64
|
+
loop->data.head->prev = context;
|
65
|
+
}
|
66
|
+
loop->data.head = context;
|
67
|
+
}
|
68
|
+
|
69
|
+
/* Unlink is called before free */
|
70
|
+
void us_internal_loop_unlink(struct us_loop_t *loop, struct us_socket_context_t *context) {
|
71
|
+
if (loop->data.head == context) {
|
72
|
+
loop->data.head = context->next;
|
73
|
+
if (loop->data.head) {
|
74
|
+
loop->data.head->prev = 0;
|
75
|
+
}
|
76
|
+
} else {
|
77
|
+
context->prev->next = context->next;
|
78
|
+
if (context->next) {
|
79
|
+
context->next->prev = context->prev;
|
80
|
+
}
|
81
|
+
}
|
82
|
+
}
|
83
|
+
|
84
|
+
/* This functions should never run recursively */
|
85
|
+
void us_internal_timer_sweep(struct us_loop_t *loop) {
|
86
|
+
struct us_internal_loop_data_t *loop_data = &loop->data;
|
87
|
+
/* For all socket contexts in this loop */
|
88
|
+
for (loop_data->iterator = loop_data->head; loop_data->iterator; loop_data->iterator = loop_data->iterator->next) {
|
89
|
+
|
90
|
+
struct us_socket_context_t *context = loop_data->iterator;
|
91
|
+
|
92
|
+
/* Update this context's timestamps (this could be moved to loop and done once) */
|
93
|
+
context->global_tick++;
|
94
|
+
unsigned char short_ticks = context->timestamp = context->global_tick % 240;
|
95
|
+
unsigned char long_ticks = context->long_timestamp = (context->global_tick / 15) % 240;
|
96
|
+
|
97
|
+
/* Begin at head */
|
98
|
+
struct us_socket_t *s = context->head_sockets;
|
99
|
+
while (s) {
|
100
|
+
/* Seek until end or timeout found (tightest loop) */
|
101
|
+
while (1) {
|
102
|
+
/* We only read from 1 random cache line here */
|
103
|
+
if (short_ticks == s->timeout || long_ticks == s->long_timeout) {
|
104
|
+
break;
|
105
|
+
}
|
106
|
+
|
107
|
+
/* Did we reach the end without a find? */
|
108
|
+
if ((s = s->next) == 0) {
|
109
|
+
goto next_context;
|
110
|
+
}
|
111
|
+
}
|
112
|
+
|
113
|
+
/* Here we have a timeout to emit (slow path) */
|
114
|
+
context->iterator = s;
|
115
|
+
|
116
|
+
if (short_ticks == s->timeout) {
|
117
|
+
s->timeout = 255;
|
118
|
+
context->on_socket_timeout(s);
|
119
|
+
}
|
120
|
+
|
121
|
+
if (context->iterator == s && long_ticks == s->long_timeout) {
|
122
|
+
s->long_timeout = 255;
|
123
|
+
context->on_socket_long_timeout(s);
|
124
|
+
}
|
125
|
+
|
126
|
+
/* Check for unlink / link (if the event handler did not modify the chain, we step 1) */
|
127
|
+
if (s == context->iterator) {
|
128
|
+
s = s->next;
|
129
|
+
} else {
|
130
|
+
/* The iterator was changed by event handler */
|
131
|
+
s = context->iterator;
|
132
|
+
}
|
133
|
+
}
|
134
|
+
/* We always store a 0 to context->iterator here since we are no longer iterating this context */
|
135
|
+
next_context:
|
136
|
+
context->iterator = 0;
|
137
|
+
}
|
138
|
+
}
|
139
|
+
|
140
|
+
/* We do not want to block the loop with tons and tons of CPU-intensive work for SSL handshakes.
|
141
|
+
* Spread it out during many loop iterations, prioritizing already open connections, they are far
|
142
|
+
* easier on CPU */
|
143
|
+
static const int MAX_LOW_PRIO_SOCKETS_PER_LOOP_ITERATION = 5;
|
144
|
+
|
145
|
+
void us_internal_handle_low_priority_sockets(struct us_loop_t *loop) {
|
146
|
+
struct us_internal_loop_data_t *loop_data = &loop->data;
|
147
|
+
struct us_socket_t *s;
|
148
|
+
|
149
|
+
loop_data->low_prio_budget = MAX_LOW_PRIO_SOCKETS_PER_LOOP_ITERATION;
|
150
|
+
|
151
|
+
for (s = loop_data->low_prio_head; s && loop_data->low_prio_budget > 0; s = loop_data->low_prio_head, loop_data->low_prio_budget--) {
|
152
|
+
/* Unlink this socket from the low-priority queue */
|
153
|
+
loop_data->low_prio_head = s->next;
|
154
|
+
if (s->next) s->next->prev = 0;
|
155
|
+
s->next = 0;
|
156
|
+
|
157
|
+
us_internal_socket_context_link_socket(s->context, s);
|
158
|
+
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) | LIBUS_SOCKET_READABLE);
|
159
|
+
|
160
|
+
s->low_prio_state = 2;
|
161
|
+
}
|
162
|
+
}
|
163
|
+
|
164
|
+
/* Note: Properly takes the linked list and timeout sweep into account */
|
165
|
+
void us_internal_free_closed_sockets(struct us_loop_t *loop) {
|
166
|
+
/* Free all closed sockets (maybe it is better to reverse order?) */
|
167
|
+
if (loop->data.closed_head) {
|
168
|
+
for (struct us_socket_t *s = loop->data.closed_head; s; ) {
|
169
|
+
struct us_socket_t *next = s->next;
|
170
|
+
us_poll_free((struct us_poll_t *) s, loop);
|
171
|
+
s = next;
|
172
|
+
}
|
173
|
+
loop->data.closed_head = 0;
|
174
|
+
}
|
175
|
+
}
|
176
|
+
|
177
|
+
void sweep_timer_cb(struct us_internal_callback_t *cb) {
|
178
|
+
us_internal_timer_sweep(cb->loop);
|
179
|
+
}
|
180
|
+
|
181
|
+
long long us_loop_iteration_number(struct us_loop_t *loop) {
|
182
|
+
return loop->data.iteration_nr;
|
183
|
+
}
|
184
|
+
|
185
|
+
/* These may have somewhat different meaning depending on the underlying event library */
|
186
|
+
void us_internal_loop_pre(struct us_loop_t *loop) {
|
187
|
+
loop->data.iteration_nr++;
|
188
|
+
us_internal_handle_low_priority_sockets(loop);
|
189
|
+
loop->data.pre_cb(loop);
|
190
|
+
}
|
191
|
+
|
192
|
+
void us_internal_loop_post(struct us_loop_t *loop) {
|
193
|
+
us_internal_free_closed_sockets(loop);
|
194
|
+
loop->data.post_cb(loop);
|
195
|
+
}
|
196
|
+
|
197
|
+
struct us_socket_t *us_adopt_accepted_socket(int ssl, struct us_socket_context_t *context, LIBUS_SOCKET_DESCRIPTOR accepted_fd,
|
198
|
+
unsigned int socket_ext_size, char *addr_ip, int addr_ip_length) {
|
199
|
+
#ifndef LIBUS_NO_SSL
|
200
|
+
if (ssl) {
|
201
|
+
return (struct us_socket_t *)us_internal_ssl_adopt_accepted_socket((struct us_internal_ssl_socket_context_t *)context, accepted_fd,
|
202
|
+
socket_ext_size, addr_ip, addr_ip_length);
|
203
|
+
}
|
204
|
+
#endif
|
205
|
+
struct us_poll_t *accepted_p = us_create_poll(context->loop, 0, sizeof(struct us_socket_t) - sizeof(struct us_poll_t) + socket_ext_size);
|
206
|
+
us_poll_init(accepted_p, accepted_fd, POLL_TYPE_SOCKET);
|
207
|
+
us_poll_start(accepted_p, context->loop, LIBUS_SOCKET_READABLE);
|
208
|
+
|
209
|
+
struct us_socket_t *s = (struct us_socket_t *) accepted_p;
|
210
|
+
|
211
|
+
s->context = context;
|
212
|
+
s->timeout = 255;
|
213
|
+
s->long_timeout = 255;
|
214
|
+
s->low_prio_state = 0;
|
215
|
+
|
216
|
+
/* We always use nodelay */
|
217
|
+
bsd_socket_nodelay(accepted_fd, 1);
|
218
|
+
|
219
|
+
us_internal_socket_context_link_socket(context, s);
|
220
|
+
|
221
|
+
context->on_open(s, 0, addr_ip, addr_ip_length);
|
222
|
+
return s;
|
223
|
+
}
|
224
|
+
|
225
|
+
void us_internal_dispatch_ready_poll(struct us_poll_t *p, int error, int events) {
|
226
|
+
switch (us_internal_poll_type(p)) {
|
227
|
+
case POLL_TYPE_CALLBACK: {
|
228
|
+
struct us_internal_callback_t *cb = (struct us_internal_callback_t *) p;
|
229
|
+
/* Timers, asyncs should accept (read), while UDP sockets should obviously not */
|
230
|
+
if (!cb->leave_poll_ready) {
|
231
|
+
/* Let's just have this macro to silence the CodeQL alert regarding empty function when using libuv */
|
232
|
+
#ifndef LIBUS_USE_LIBUV
|
233
|
+
us_internal_accept_poll_event(p);
|
234
|
+
#endif
|
235
|
+
}
|
236
|
+
cb->cb(cb->cb_expects_the_loop ? (struct us_internal_callback_t *) cb->loop : (struct us_internal_callback_t *) &cb->p);
|
237
|
+
}
|
238
|
+
break;
|
239
|
+
case POLL_TYPE_SEMI_SOCKET: {
|
240
|
+
/* Both connect and listen sockets are semi-sockets
|
241
|
+
* but they poll for different events */
|
242
|
+
if (us_poll_events(p) == LIBUS_SOCKET_WRITABLE) {
|
243
|
+
struct us_socket_t *s = (struct us_socket_t *) p;
|
244
|
+
|
245
|
+
/* It is perfectly possible to come here with an error */
|
246
|
+
if (error) {
|
247
|
+
/* Emit error, close without emitting on_close */
|
248
|
+
s->context->on_connect_error(s, 0);
|
249
|
+
us_socket_close_connecting(0, s);
|
250
|
+
} else {
|
251
|
+
/* All sockets poll for readable */
|
252
|
+
us_poll_change(p, s->context->loop, LIBUS_SOCKET_READABLE);
|
253
|
+
|
254
|
+
/* We always use nodelay */
|
255
|
+
bsd_socket_nodelay(us_poll_fd(p), 1);
|
256
|
+
|
257
|
+
/* We are now a proper socket */
|
258
|
+
us_internal_poll_set_type(p, POLL_TYPE_SOCKET);
|
259
|
+
|
260
|
+
/* If we used a connection timeout we have to reset it here */
|
261
|
+
us_socket_timeout(0, s, 0);
|
262
|
+
|
263
|
+
s->context->on_open(s, 1, 0, 0);
|
264
|
+
}
|
265
|
+
} else {
|
266
|
+
struct us_listen_socket_t *listen_socket = (struct us_listen_socket_t *) p;
|
267
|
+
struct bsd_addr_t addr;
|
268
|
+
|
269
|
+
LIBUS_SOCKET_DESCRIPTOR client_fd = bsd_accept_socket(us_poll_fd(p), &addr);
|
270
|
+
if (client_fd == LIBUS_SOCKET_ERROR) {
|
271
|
+
/* Todo: start timer here */
|
272
|
+
|
273
|
+
} else {
|
274
|
+
|
275
|
+
/* Todo: stop timer if any */
|
276
|
+
|
277
|
+
do {
|
278
|
+
struct us_socket_context_t *context = us_socket_context(0, &listen_socket->s);
|
279
|
+
/* See if we want to export the FD or keep it here (this event can be unset) */
|
280
|
+
if (context->on_pre_open == 0 || context->on_pre_open(client_fd) == client_fd) {
|
281
|
+
|
282
|
+
/* Adopt the newly accepted socket */
|
283
|
+
us_adopt_accepted_socket(0, context,
|
284
|
+
client_fd, listen_socket->socket_ext_size, bsd_addr_get_ip(&addr), bsd_addr_get_ip_length(&addr));
|
285
|
+
|
286
|
+
/* Exit accept loop if listen socket was closed in on_open handler */
|
287
|
+
if (us_socket_is_closed(0, &listen_socket->s)) {
|
288
|
+
break;
|
289
|
+
}
|
290
|
+
|
291
|
+
}
|
292
|
+
|
293
|
+
} while ((client_fd = bsd_accept_socket(us_poll_fd(p), &addr)) != LIBUS_SOCKET_ERROR);
|
294
|
+
}
|
295
|
+
}
|
296
|
+
}
|
297
|
+
break;
|
298
|
+
case POLL_TYPE_SOCKET_SHUT_DOWN:
|
299
|
+
case POLL_TYPE_SOCKET: {
|
300
|
+
/* We should only use s, no p after this point */
|
301
|
+
struct us_socket_t *s = (struct us_socket_t *) p;
|
302
|
+
|
303
|
+
/* Such as epollerr epollhup */
|
304
|
+
if (error) {
|
305
|
+
/* Todo: decide what code we give here */
|
306
|
+
s = us_socket_close(0, s, 0, NULL);
|
307
|
+
return;
|
308
|
+
}
|
309
|
+
|
310
|
+
if (events & LIBUS_SOCKET_WRITABLE) {
|
311
|
+
/* Note: if we failed a write as a socket of one loop then adopted
|
312
|
+
* to another loop, this will be wrong. Absurd case though */
|
313
|
+
s->context->loop->data.last_write_failed = 0;
|
314
|
+
|
315
|
+
s = s->context->on_writable(s);
|
316
|
+
|
317
|
+
if (us_socket_is_closed(0, s)) {
|
318
|
+
return;
|
319
|
+
}
|
320
|
+
|
321
|
+
/* If we have no failed write or if we shut down, then stop polling for more writable */
|
322
|
+
if (!s->context->loop->data.last_write_failed || us_socket_is_shut_down(0, s)) {
|
323
|
+
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_READABLE);
|
324
|
+
}
|
325
|
+
}
|
326
|
+
|
327
|
+
if (events & LIBUS_SOCKET_READABLE) {
|
328
|
+
/* Contexts may prioritize down sockets that are currently readable, e.g. when SSL handshake has to be done.
|
329
|
+
* SSL handshakes are CPU intensive, so we limit the number of handshakes per loop iteration, and move the rest
|
330
|
+
* to the low-priority queue */
|
331
|
+
if (s->context->is_low_prio(s)) {
|
332
|
+
if (s->low_prio_state == 2) {
|
333
|
+
s->low_prio_state = 0; /* Socket has been delayed and now it's time to process incoming data for one iteration */
|
334
|
+
} else if (s->context->loop->data.low_prio_budget > 0) {
|
335
|
+
s->context->loop->data.low_prio_budget--; /* Still having budget for this iteration - do normal processing */
|
336
|
+
} else {
|
337
|
+
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
338
|
+
us_internal_socket_context_unlink_socket(s->context, s);
|
339
|
+
|
340
|
+
/* Link this socket to the low-priority queue - we use a LIFO queue, to prioritize newer clients that are
|
341
|
+
* maybe not already timeouted - sounds unfair, but works better in real-life with smaller client-timeouts
|
342
|
+
* under high load */
|
343
|
+
s->prev = 0;
|
344
|
+
s->next = s->context->loop->data.low_prio_head;
|
345
|
+
if (s->next) s->next->prev = s;
|
346
|
+
s->context->loop->data.low_prio_head = s;
|
347
|
+
|
348
|
+
s->low_prio_state = 1;
|
349
|
+
|
350
|
+
break;
|
351
|
+
}
|
352
|
+
}
|
353
|
+
|
354
|
+
int length = bsd_recv(us_poll_fd(&s->p), s->context->loop->data.recv_buf + LIBUS_RECV_BUFFER_PADDING, LIBUS_RECV_BUFFER_LENGTH, 0);
|
355
|
+
if (length > 0) {
|
356
|
+
s = s->context->on_data(s, s->context->loop->data.recv_buf + LIBUS_RECV_BUFFER_PADDING, length);
|
357
|
+
} else if (!length) {
|
358
|
+
if (us_socket_is_shut_down(0, s)) {
|
359
|
+
/* We got FIN back after sending it */
|
360
|
+
/* Todo: We should give "CLEAN SHUTDOWN" as reason here */
|
361
|
+
s = us_socket_close(0, s, 0, NULL);
|
362
|
+
} else {
|
363
|
+
/* We got FIN, so stop polling for readable */
|
364
|
+
us_poll_change(&s->p, us_socket_context(0, s)->loop, us_poll_events(&s->p) & LIBUS_SOCKET_WRITABLE);
|
365
|
+
s = s->context->on_end(s);
|
366
|
+
}
|
367
|
+
} else if (length == LIBUS_SOCKET_ERROR && !bsd_would_block()) {
|
368
|
+
/* Todo: decide also here what kind of reason we should give */
|
369
|
+
s = us_socket_close(0, s, 0, NULL);
|
370
|
+
}
|
371
|
+
}
|
372
|
+
}
|
373
|
+
break;
|
374
|
+
}
|
375
|
+
}
|
376
|
+
|
377
|
+
/* Integration only requires the timer to be set up */
|
378
|
+
void us_loop_integrate(struct us_loop_t *loop) {
|
379
|
+
us_timer_set(loop->data.sweep_timer, (void (*)(struct us_timer_t *)) sweep_timer_cb, LIBUS_TIMEOUT_GRANULARITY * 1000, LIBUS_TIMEOUT_GRANULARITY * 1000);
|
380
|
+
}
|
381
|
+
|
382
|
+
void *us_loop_ext(struct us_loop_t *loop) {
|
383
|
+
return loop + 1;
|
384
|
+
}
|
385
|
+
|
386
|
+
#endif
|
@@ -0,0 +1,38 @@
|
|
1
|
+
/*
|
2
|
+
* Authored by Alex Hultman, 2018-2019.
|
3
|
+
* Intellectual property of third-party.
|
4
|
+
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*/
|
17
|
+
|
18
|
+
#ifndef LOOP_DATA_H
|
19
|
+
#define LOOP_DATA_H
|
20
|
+
|
21
|
+
struct us_internal_loop_data_t {
|
22
|
+
struct us_timer_t *sweep_timer;
|
23
|
+
struct us_internal_async *wakeup_async;
|
24
|
+
int last_write_failed;
|
25
|
+
struct us_socket_context_t *head;
|
26
|
+
struct us_socket_context_t *iterator;
|
27
|
+
char *recv_buf;
|
28
|
+
void *ssl_data;
|
29
|
+
void (*pre_cb)(struct us_loop_t *);
|
30
|
+
void (*post_cb)(struct us_loop_t *);
|
31
|
+
struct us_socket_t *closed_head;
|
32
|
+
struct us_socket_t *low_prio_head;
|
33
|
+
int low_prio_budget;
|
34
|
+
/* We do not care if this flips or not, it doesn't matter */
|
35
|
+
long long iteration_nr;
|
36
|
+
};
|
37
|
+
|
38
|
+
#endif // LOOP_DATA_H
|
data/ext/up_ext/socket.c
ADDED
@@ -0,0 +1,231 @@
|
|
1
|
+
/*
|
2
|
+
* Authored by Alex Hultman, 2018-2021.
|
3
|
+
* Intellectual property of third-party.
|
4
|
+
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*/
|
17
|
+
|
18
|
+
#ifndef LIBUS_USE_IO_URING
|
19
|
+
|
20
|
+
#include "libusockets.h"
|
21
|
+
#include "internal.h"
|
22
|
+
#include <stdlib.h>
|
23
|
+
#include <string.h>
|
24
|
+
#include <stdint.h>
|
25
|
+
|
26
|
+
/* Shared with SSL */
|
27
|
+
|
28
|
+
int us_socket_local_port(int ssl, struct us_socket_t *s) {
|
29
|
+
struct bsd_addr_t addr;
|
30
|
+
if (bsd_local_addr(us_poll_fd(&s->p), &addr)) {
|
31
|
+
return -1;
|
32
|
+
} else {
|
33
|
+
return bsd_addr_get_port(&addr);
|
34
|
+
}
|
35
|
+
}
|
36
|
+
|
37
|
+
int us_socket_remote_port(int ssl, struct us_socket_t *s) {
|
38
|
+
struct bsd_addr_t addr;
|
39
|
+
if (bsd_remote_addr(us_poll_fd(&s->p), &addr)) {
|
40
|
+
return -1;
|
41
|
+
} else {
|
42
|
+
return bsd_addr_get_port(&addr);
|
43
|
+
}
|
44
|
+
}
|
45
|
+
|
46
|
+
void us_socket_shutdown_read(int ssl, struct us_socket_t *s) {
|
47
|
+
/* This syscall is idempotent so no extra check is needed */
|
48
|
+
bsd_shutdown_socket_read(us_poll_fd((struct us_poll_t *) s));
|
49
|
+
}
|
50
|
+
|
51
|
+
void us_socket_remote_address(int ssl, struct us_socket_t *s, char *buf, int *length) {
|
52
|
+
struct bsd_addr_t addr;
|
53
|
+
if (bsd_remote_addr(us_poll_fd(&s->p), &addr) || *length < bsd_addr_get_ip_length(&addr)) {
|
54
|
+
*length = 0;
|
55
|
+
} else {
|
56
|
+
*length = bsd_addr_get_ip_length(&addr);
|
57
|
+
memcpy(buf, bsd_addr_get_ip(&addr), *length);
|
58
|
+
}
|
59
|
+
}
|
60
|
+
|
61
|
+
struct us_socket_context_t *us_socket_context(int ssl, struct us_socket_t *s) {
|
62
|
+
return s->context;
|
63
|
+
}
|
64
|
+
|
65
|
+
void us_socket_timeout(int ssl, struct us_socket_t *s, unsigned int seconds) {
|
66
|
+
if (seconds) {
|
67
|
+
s->timeout = ((unsigned int)s->context->timestamp + ((seconds + 3) >> 2)) % 240;
|
68
|
+
} else {
|
69
|
+
s->timeout = 255;
|
70
|
+
}
|
71
|
+
}
|
72
|
+
|
73
|
+
void us_socket_long_timeout(int ssl, struct us_socket_t *s, unsigned int minutes) {
|
74
|
+
if (minutes) {
|
75
|
+
s->long_timeout = ((unsigned int)s->context->long_timestamp + minutes) % 240;
|
76
|
+
} else {
|
77
|
+
s->long_timeout = 255;
|
78
|
+
}
|
79
|
+
}
|
80
|
+
|
81
|
+
void us_socket_flush(int ssl, struct us_socket_t *s) {
|
82
|
+
if (!us_socket_is_shut_down(0, s)) {
|
83
|
+
bsd_socket_flush(us_poll_fd((struct us_poll_t *) s));
|
84
|
+
}
|
85
|
+
}
|
86
|
+
|
87
|
+
int us_socket_is_closed(int ssl, struct us_socket_t *s) {
|
88
|
+
return s->prev == (struct us_socket_t *) s->context;
|
89
|
+
}
|
90
|
+
|
91
|
+
int us_socket_is_established(int ssl, struct us_socket_t *s) {
|
92
|
+
/* Everything that is not POLL_TYPE_SEMI_SOCKET is established */
|
93
|
+
return us_internal_poll_type((struct us_poll_t *) s) != POLL_TYPE_SEMI_SOCKET;
|
94
|
+
}
|
95
|
+
|
96
|
+
/* Exactly the same as us_socket_close but does not emit on_close event */
|
97
|
+
struct us_socket_t *us_socket_close_connecting(int ssl, struct us_socket_t *s) {
|
98
|
+
if (!us_socket_is_closed(0, s)) {
|
99
|
+
us_internal_socket_context_unlink_socket(s->context, s);
|
100
|
+
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
101
|
+
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
|
102
|
+
|
103
|
+
/* Link this socket to the close-list and let it be deleted after this iteration */
|
104
|
+
s->next = s->context->loop->data.closed_head;
|
105
|
+
s->context->loop->data.closed_head = s;
|
106
|
+
|
107
|
+
/* Any socket with prev = context is marked as closed */
|
108
|
+
s->prev = (struct us_socket_t *) s->context;
|
109
|
+
|
110
|
+
//return s->context->on_close(s, code, reason);
|
111
|
+
}
|
112
|
+
return s;
|
113
|
+
}
|
114
|
+
|
115
|
+
/* Same as above but emits on_close */
|
116
|
+
struct us_socket_t *us_socket_close(int ssl, struct us_socket_t *s, int code, void *reason) {
|
117
|
+
if (!us_socket_is_closed(0, s)) {
|
118
|
+
if (s->low_prio_state == 1) {
|
119
|
+
/* Unlink this socket from the low-priority queue */
|
120
|
+
if (!s->prev) s->context->loop->data.low_prio_head = s->next;
|
121
|
+
else s->prev->next = s->next;
|
122
|
+
|
123
|
+
if (s->next) s->next->prev = s->prev;
|
124
|
+
|
125
|
+
s->prev = 0;
|
126
|
+
s->next = 0;
|
127
|
+
s->low_prio_state = 0;
|
128
|
+
} else {
|
129
|
+
us_internal_socket_context_unlink_socket(s->context, s);
|
130
|
+
}
|
131
|
+
us_poll_stop((struct us_poll_t *) s, s->context->loop);
|
132
|
+
bsd_close_socket(us_poll_fd((struct us_poll_t *) s));
|
133
|
+
|
134
|
+
/* Link this socket to the close-list and let it be deleted after this iteration */
|
135
|
+
s->next = s->context->loop->data.closed_head;
|
136
|
+
s->context->loop->data.closed_head = s;
|
137
|
+
|
138
|
+
/* Any socket with prev = context is marked as closed */
|
139
|
+
s->prev = (struct us_socket_t *) s->context;
|
140
|
+
|
141
|
+
return s->context->on_close(s, code, reason);
|
142
|
+
}
|
143
|
+
return s;
|
144
|
+
}
|
145
|
+
|
146
|
+
/* Not shared with SSL */
|
147
|
+
|
148
|
+
void *us_socket_get_native_handle(int ssl, struct us_socket_t *s) {
|
149
|
+
#ifndef LIBUS_NO_SSL
|
150
|
+
if (ssl) {
|
151
|
+
return us_internal_ssl_socket_get_native_handle((struct us_internal_ssl_socket_t *) s);
|
152
|
+
}
|
153
|
+
#endif
|
154
|
+
|
155
|
+
return (void *) (uintptr_t) us_poll_fd((struct us_poll_t *) s);
|
156
|
+
}
|
157
|
+
|
158
|
+
/* This is not available for SSL sockets as it makes no sense. */
|
159
|
+
int us_socket_write2(int ssl, struct us_socket_t *s, const char *header, int header_length, const char *payload, int payload_length) {
|
160
|
+
|
161
|
+
if (us_socket_is_closed(ssl, s) || us_socket_is_shut_down(ssl, s)) {
|
162
|
+
return 0;
|
163
|
+
}
|
164
|
+
|
165
|
+
int written = bsd_write2(us_poll_fd(&s->p), header, header_length, payload, payload_length);
|
166
|
+
if (written != header_length + payload_length) {
|
167
|
+
us_poll_change(&s->p, s->context->loop, LIBUS_SOCKET_READABLE | LIBUS_SOCKET_WRITABLE);
|
168
|
+
}
|
169
|
+
|
170
|
+
return written < 0 ? 0 : written;
|
171
|
+
}
|
172
|
+
|
173
|
+
int us_socket_write(int ssl, struct us_socket_t *s, const char *data, int length, int msg_more) {
|
174
|
+
#ifndef LIBUS_NO_SSL
|
175
|
+
if (ssl) {
|
176
|
+
return us_internal_ssl_socket_write((struct us_internal_ssl_socket_t *) s, data, length, msg_more);
|
177
|
+
}
|
178
|
+
#endif
|
179
|
+
|
180
|
+
if (us_socket_is_closed(ssl, s) || us_socket_is_shut_down(ssl, s)) {
|
181
|
+
return 0;
|
182
|
+
}
|
183
|
+
|
184
|
+
int written = bsd_send(us_poll_fd(&s->p), data, length, msg_more);
|
185
|
+
if (written != length) {
|
186
|
+
s->context->loop->data.last_write_failed = 1;
|
187
|
+
us_poll_change(&s->p, s->context->loop, LIBUS_SOCKET_READABLE | LIBUS_SOCKET_WRITABLE);
|
188
|
+
}
|
189
|
+
|
190
|
+
return written < 0 ? 0 : written;
|
191
|
+
}
|
192
|
+
|
193
|
+
void *us_socket_ext(int ssl, struct us_socket_t *s) {
|
194
|
+
#ifndef LIBUS_NO_SSL
|
195
|
+
if (ssl) {
|
196
|
+
return us_internal_ssl_socket_ext((struct us_internal_ssl_socket_t *) s);
|
197
|
+
}
|
198
|
+
#endif
|
199
|
+
|
200
|
+
return s + 1;
|
201
|
+
}
|
202
|
+
|
203
|
+
int us_socket_is_shut_down(int ssl, struct us_socket_t *s) {
|
204
|
+
#ifndef LIBUS_NO_SSL
|
205
|
+
if (ssl) {
|
206
|
+
return us_internal_ssl_socket_is_shut_down((struct us_internal_ssl_socket_t *) s);
|
207
|
+
}
|
208
|
+
#endif
|
209
|
+
|
210
|
+
return us_internal_poll_type(&s->p) == POLL_TYPE_SOCKET_SHUT_DOWN;
|
211
|
+
}
|
212
|
+
|
213
|
+
void us_socket_shutdown(int ssl, struct us_socket_t *s) {
|
214
|
+
#ifndef LIBUS_NO_SSL
|
215
|
+
if (ssl) {
|
216
|
+
us_internal_ssl_socket_shutdown((struct us_internal_ssl_socket_t *) s);
|
217
|
+
return;
|
218
|
+
}
|
219
|
+
#endif
|
220
|
+
|
221
|
+
/* Todo: should we emit on_close if calling shutdown on an already half-closed socket?
|
222
|
+
* We need more states in that case, we need to track RECEIVED_FIN
|
223
|
+
* so far, the app has to track this and call close as needed */
|
224
|
+
if (!us_socket_is_closed(ssl, s) && !us_socket_is_shut_down(ssl, s)) {
|
225
|
+
us_internal_poll_set_type(&s->p, POLL_TYPE_SOCKET_SHUT_DOWN);
|
226
|
+
us_poll_change(&s->p, s->context->loop, us_poll_events(&s->p) & LIBUS_SOCKET_READABLE);
|
227
|
+
bsd_shutdown_socket(us_poll_fd((struct us_poll_t *) s));
|
228
|
+
}
|
229
|
+
}
|
230
|
+
|
231
|
+
#endif
|