iodine 0.2.0 → 0.2.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/README.md +3 -3
- data/bin/ws-echo +5 -0
- data/ext/iodine/http_response_http1.h +10 -10
- data/ext/iodine/iodine_websocket.c +20 -1
- data/ext/iodine/libserver.c +102 -119
- data/ext/iodine/libsock.c +8 -0
- data/ext/iodine/libsock.h +25 -21
- data/ext/iodine/websockets.c +83 -90
- data/ext/iodine/websockets.h +29 -19
- data/iodine.gemspec +3 -3
- data/lib/iodine/version.rb +1 -1
- metadata +3 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 72136c13b8660b82247aa72226d0fc5ef37cdd75
|
4
|
+
data.tar.gz: cf96f47a2b9b0e99f833127ae0db4869ecd33761
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 5fb48e67f652c4b01e1ef20ca578a1a0b23cc32a6b80bb681985d52ea00b509239e7e153eb1fae3b738be24b9505e3c3782a53fc9ea7826c28449e95a8113cc9
|
7
|
+
data.tar.gz: a828b6192aa581be423fb18ad6523a42db5edea8765f07f03705ffe56a4cfd0da24908fcb69d6296bfcb4d6333cb66da225c140fe25f0d0852602b296539fb0e
|
data/CHANGELOG.md
CHANGED
@@ -6,6 +6,18 @@ Please notice that this change log contains changes for upcoming releases as wel
|
|
6
6
|
|
7
7
|
## Changes:
|
8
8
|
|
9
|
+
Change log v.0.2.1
|
10
|
+
|
11
|
+
**Notice**: The [Rack Websocket Draft](https://github.com/rack/rack/pull/1107) does not support the `each` and `defer` methods. Although I tried to maintain these as part of the draft, the community preferred to leave the implementation of these to the client (rather then the server). If collisions occur, these methods might be removed in the future.
|
12
|
+
|
13
|
+
**Update**: Websockets now support the `has_pending?` method and `on_ready` callback, as suggested by the [Rack Websocket Draft](https://github.com/rack/rack/pull/1107).
|
14
|
+
|
15
|
+
**Update**: deprecated the websocket method `uuid` in favor of `conn_id`, as suggested by the [Rack Websocket Draft](https://github.com/rack/rack/pull/1107).
|
16
|
+
|
17
|
+
**Fix**: fixed an issue were the server would crash when attempting to send a long enough websocket message.
|
18
|
+
|
19
|
+
***
|
20
|
+
|
9
21
|
Change log v.0.2.0
|
10
22
|
|
11
23
|
This version is a total rewrite. The API is totally changed, nothing stayed.
|
data/README.md
CHANGED
@@ -42,7 +42,7 @@ To get the most out of Iodine, consider the amount of CPU cores available and th
|
|
42
42
|
Puma's model of 16 threads and 4 processes is easily adopted and proved to provide a good enough balance for most use-cases. Use:
|
43
43
|
|
44
44
|
```bash
|
45
|
-
bundler exec iodine -p $PORT -t 16 -
|
45
|
+
bundler exec iodine -p $PORT -t 16 -w 4
|
46
46
|
```
|
47
47
|
|
48
48
|
### Static file serving support
|
@@ -56,7 +56,7 @@ To setup native static file service, setup the public folder's address **before*
|
|
56
56
|
This can be done when starting the server from the command line:
|
57
57
|
|
58
58
|
```bash
|
59
|
-
bundler exec iodine -p $PORT -t 16 -
|
59
|
+
bundler exec iodine -p $PORT -t 16 -w 4 -www /my/public/folder
|
60
60
|
```
|
61
61
|
|
62
62
|
Or by adding a single line to the application. i.e. (a `config.ru` example):
|
@@ -132,7 +132,7 @@ Iodine.start
|
|
132
132
|
|
133
133
|
This design has a number of benefits, some of them related to better IO handling, resource optimization (no need for two IO polling systems) etc'. This also allows us to use middleware without interfering with connection upgrades and provides up with backwards compatibility.
|
134
134
|
|
135
|
-
Iodine::Rack imposes a few restrictions for performance and security reasons, such as that the headers (both sending and receiving) must be less
|
135
|
+
Iodine::Rack imposes a few restrictions for performance and security reasons, such as that the headers (both sending and receiving) must be less than 8Kb in size. These restrictions shouldn't be an issue and are similar to limitations imposed by Apache.
|
136
136
|
|
137
137
|
Here's a small HTTP and Websocket broadcast server with Iodine::Rack, which can be used directly from `irb`:
|
138
138
|
|
data/bin/ws-echo
CHANGED
@@ -35,9 +35,14 @@ class WSEcho
|
|
35
35
|
puts "I'm shutting down #{self}"
|
36
36
|
end
|
37
37
|
|
38
|
+
def on_ready
|
39
|
+
puts "on_ready called foe #{self}"
|
40
|
+
end
|
41
|
+
|
38
42
|
def on_message(data)
|
39
43
|
puts "got message: #{data}"
|
40
44
|
write data
|
45
|
+
write (data * 16_384) if data =~ /^multi me$/
|
41
46
|
end
|
42
47
|
|
43
48
|
def echo(data)
|
@@ -69,16 +69,6 @@ h1p_finalize_headers(http_response_s *response) {
|
|
69
69
|
status = http_response_status_str(response->status);
|
70
70
|
}
|
71
71
|
|
72
|
-
/* write the keep-alive (connection) header, if missing */
|
73
|
-
if (!response->metadata.connection_written) {
|
74
|
-
if (response->metadata.should_close) {
|
75
|
-
h1p_protected_copy(response, "Connection:close\r\n", 18);
|
76
|
-
} else {
|
77
|
-
h1p_protected_copy(response, "Connection:keep-alive\r\n"
|
78
|
-
"Keep-Alive:timeout=2\r\n",
|
79
|
-
45);
|
80
|
-
}
|
81
|
-
}
|
82
72
|
/* write the content length header, unless forced not to (<0) */
|
83
73
|
if (response->metadata.content_length_written == 0 &&
|
84
74
|
!(response->content_length < 0) && response->status >= 200 &&
|
@@ -110,6 +100,16 @@ h1p_finalize_headers(http_response_s *response) {
|
|
110
100
|
*(response->metadata.headers_pos++) = '\r';
|
111
101
|
*(response->metadata.headers_pos++) = '\n';
|
112
102
|
}
|
103
|
+
/* write the keep-alive (connection) header, if missing */
|
104
|
+
if (!response->metadata.connection_written) {
|
105
|
+
if (response->metadata.should_close) {
|
106
|
+
h1p_protected_copy(response, "Connection:close\r\n", 18);
|
107
|
+
} else {
|
108
|
+
h1p_protected_copy(response, "Connection:keep-alive\r\n"
|
109
|
+
"Keep-Alive:timeout=2\r\n",
|
110
|
+
45);
|
111
|
+
}
|
112
|
+
}
|
113
113
|
/* write the headers completion marker (empty line - `\r\n`) */
|
114
114
|
*(response->metadata.headers_pos++) = '\r';
|
115
115
|
*(response->metadata.headers_pos++) = '\n';
|
@@ -133,6 +133,16 @@ static VALUE iodine_ws_count(VALUE self) {
|
|
133
133
|
return LONG2FIX(websocket_count(ws));
|
134
134
|
}
|
135
135
|
|
136
|
+
/**
|
137
|
+
Returns a weak indication as to the state of the socket's buffer. If the server
|
138
|
+
has data in the buffer that wasn't written to the socket, `has_pending?` will
|
139
|
+
return `true`, otherwise `false` will be returned.
|
140
|
+
*/
|
141
|
+
static VALUE iodine_ws_has_pending(VALUE self) {
|
142
|
+
intptr_t uuid = get_uuid(self);
|
143
|
+
return sock_packets_pending(uuid) ? Qtrue : Qfalse;
|
144
|
+
}
|
145
|
+
|
136
146
|
/**
|
137
147
|
Returns a connection's UUID which is valid for **this process** (not a machine
|
138
148
|
or internet unique value).
|
@@ -319,6 +329,12 @@ void ws_on_shutdown(ws_s *ws) {
|
|
319
329
|
return;
|
320
330
|
RubyCaller.call(handler, on_shutdown_func_id);
|
321
331
|
}
|
332
|
+
void ws_on_ready(ws_s *ws) {
|
333
|
+
VALUE handler = get_handler(ws);
|
334
|
+
if (!handler)
|
335
|
+
return;
|
336
|
+
RubyCaller.call(handler, on_ready_func_id);
|
337
|
+
}
|
322
338
|
void ws_on_data(ws_s *ws, char *data, size_t length, uint8_t is_text) {
|
323
339
|
VALUE handler = get_handler(ws);
|
324
340
|
if (!handler)
|
@@ -363,7 +379,7 @@ void iodine_websocket_upgrade(http_request_s *request,
|
|
363
379
|
websocket_upgrade(.request = request, .response = response,
|
364
380
|
.udata = (void *)handler, .on_close = ws_on_close,
|
365
381
|
.on_open = ws_on_open, .on_shutdown = ws_on_shutdown,
|
366
|
-
.on_message = ws_on_data,
|
382
|
+
.on_ready = ws_on_ready, .on_message = ws_on_data,
|
367
383
|
.max_msg_size = iodine_websocket_max_msg_size,
|
368
384
|
.timeout = iodine_websocket_timeout);
|
369
385
|
}
|
@@ -413,10 +429,13 @@ void Init_iodine_websocket(void) {
|
|
413
429
|
// rb_define_method(rWebsocket, "on_message", def_dyn_message, 1);
|
414
430
|
rb_define_method(rWebsocket, "on_shutdown", empty_func, 0);
|
415
431
|
rb_define_method(rWebsocket, "on_close", empty_func, 0);
|
432
|
+
rb_define_method(rWebsocket, "on_ready", empty_func, 0);
|
416
433
|
rb_define_method(rWebsocket, "write", iodine_ws_write, 1);
|
417
434
|
rb_define_method(rWebsocket, "close", iodine_ws_close, 0);
|
418
435
|
|
419
436
|
rb_define_method(rWebsocket, "uuid", iodine_ws_uuid, 0);
|
437
|
+
rb_define_method(rWebsocket, "conn_id", iodine_ws_uuid, 0);
|
438
|
+
rb_define_method(rWebsocket, "has_pending?", iodine_ws_has_pending, 0);
|
420
439
|
rb_define_method(rWebsocket, "defer", iodine_defer, -1);
|
421
440
|
rb_define_method(rWebsocket, "each", iodine_ws_each, 0);
|
422
441
|
rb_define_method(rWebsocket, "count", iodine_ws_count, 0);
|
data/ext/iodine/libserver.c
CHANGED
@@ -19,7 +19,7 @@ Feel free to copy, use and enjoy according to the license provided.
|
|
19
19
|
Connection Data
|
20
20
|
*/
|
21
21
|
typedef struct {
|
22
|
-
protocol_s*
|
22
|
+
protocol_s *protocol;
|
23
23
|
time_t active;
|
24
24
|
uint8_t timeout;
|
25
25
|
spn_lock_i lock;
|
@@ -35,14 +35,14 @@ These macros mean we won't need to change code if we change the locking system.
|
|
35
35
|
#define try_lock_fd(fd) spn_trylock(&((fd)->lock))
|
36
36
|
#define lock_fd(fd) spn_lock(&((fd)->lock))
|
37
37
|
#define unlock_fd(fd) spn_unlock(&((fd)->lock))
|
38
|
-
#define clear_fd_data(fd_data)
|
38
|
+
#define clear_fd_data(fd_data) \
|
39
39
|
{ *(fd_data) = (fd_data_s){.lock = (fd_data)->lock}; }
|
40
40
|
|
41
41
|
/* *****************************************************************************
|
42
42
|
Server Core Data
|
43
43
|
*/
|
44
44
|
static struct {
|
45
|
-
fd_data_s*
|
45
|
+
fd_data_s *fds;
|
46
46
|
time_t last_tick;
|
47
47
|
void (*on_idle)(void);
|
48
48
|
size_t capacity;
|
@@ -66,12 +66,12 @@ These macros help prevent code changes when changing the data struct.
|
|
66
66
|
|
67
67
|
#define fduuid_get(ifd) (server_data.fds[(ifd)].uuid)
|
68
68
|
|
69
|
-
#define protocol_is_busy(protocol)
|
70
|
-
spn_is_locked(&(((protocol_s*)(protocol))->callback_lock))
|
71
|
-
#define protocol_unset_busy(protocol)
|
72
|
-
spn_unlock(&(((protocol_s*)(protocol))->callback_lock))
|
73
|
-
#define protocol_set_busy(protocol)
|
74
|
-
spn_trylock(&(((protocol_s*)(protocol))->callback_lock))
|
69
|
+
#define protocol_is_busy(protocol) \
|
70
|
+
spn_is_locked(&(((protocol_s *)(protocol))->callback_lock))
|
71
|
+
#define protocol_unset_busy(protocol) \
|
72
|
+
spn_unlock(&(((protocol_s *)(protocol))->callback_lock))
|
73
|
+
#define protocol_set_busy(protocol) \
|
74
|
+
spn_trylock(&(((protocol_s *)(protocol))->callback_lock))
|
75
75
|
|
76
76
|
#define try_lock_uuid(uuid) try_lock_fd(server_data.fds + sock_uuid2fd(uuid))
|
77
77
|
#define lock_uuid(uuid) lock_fd(server_data.fds + sock_uuid2fd(uuid))
|
@@ -128,10 +128,10 @@ static void init_server(void) {
|
|
128
128
|
}
|
129
129
|
|
130
130
|
/** initializes the library if it wasn't already initialized. */
|
131
|
-
#define validate_mem()
|
132
|
-
{
|
133
|
-
if (server_data.fds == NULL)
|
134
|
-
init_server();
|
131
|
+
#define validate_mem() \
|
132
|
+
{ \
|
133
|
+
if (server_data.fds == NULL) \
|
134
|
+
init_server(); \
|
135
135
|
}
|
136
136
|
|
137
137
|
/* *****************************************************************************
|
@@ -147,9 +147,9 @@ void sock_touch(intptr_t uuid) {
|
|
147
147
|
The Reactor Callback Implementation
|
148
148
|
*/
|
149
149
|
|
150
|
-
void reactor_on_close_async(void*
|
150
|
+
void reactor_on_close_async(void *_pr) {
|
151
151
|
if (protocol_set_busy(_pr) == 0) {
|
152
|
-
((protocol_s*)_pr)->on_close(_pr);
|
152
|
+
((protocol_s *)_pr)->on_close(_pr);
|
153
153
|
return;
|
154
154
|
}
|
155
155
|
async_run(reactor_on_close_async, _pr);
|
@@ -159,7 +159,7 @@ void reactor_on_close(intptr_t uuid) {
|
|
159
159
|
if (server_data.fds) {
|
160
160
|
// get the currect state
|
161
161
|
lock_uuid(uuid);
|
162
|
-
protocol_s*
|
162
|
+
protocol_s *protocol = protocol_uuid(uuid);
|
163
163
|
// clear state
|
164
164
|
clear_uuid(uuid);
|
165
165
|
unlock_uuid(uuid);
|
@@ -169,7 +169,7 @@ void reactor_on_close(intptr_t uuid) {
|
|
169
169
|
}
|
170
170
|
}
|
171
171
|
|
172
|
-
void reactor_on_data_async(void*
|
172
|
+
void reactor_on_data_async(void *_fduuid) {
|
173
173
|
intptr_t fduuid = (intptr_t)_fduuid;
|
174
174
|
if (!valid_uuid(fduuid) || protocol_uuid(fduuid) == NULL)
|
175
175
|
return;
|
@@ -177,7 +177,7 @@ void reactor_on_data_async(void* _fduuid) {
|
|
177
177
|
if (try_lock_uuid(fduuid))
|
178
178
|
goto no_lock;
|
179
179
|
// get current state (protocol might have changed during this time)
|
180
|
-
protocol_s*
|
180
|
+
protocol_s *protocol = protocol_uuid(fduuid);
|
181
181
|
// review protocol and get use privilage
|
182
182
|
if (protocol == NULL || protocol_set_busy(protocol)) {
|
183
183
|
// fprintf(stderr, "fduuid is busy %p\n", _fduuid);
|
@@ -199,15 +199,15 @@ no_lock:
|
|
199
199
|
}
|
200
200
|
|
201
201
|
void reactor_on_data(intptr_t fd) {
|
202
|
-
async_run(reactor_on_data_async, (void*)fd);
|
202
|
+
async_run(reactor_on_data_async, (void *)fd);
|
203
203
|
}
|
204
204
|
|
205
205
|
void reactor_on_ready(intptr_t uuid) {
|
206
206
|
uuid_data(uuid).active = server_data.last_tick;
|
207
207
|
lock_uuid(uuid);
|
208
|
-
protocol_s*
|
208
|
+
protocol_s *protocol = protocol_uuid(uuid);
|
209
209
|
unlock_uuid(uuid);
|
210
|
-
if (protocol && protocol->on_ready)
|
210
|
+
if (protocol && protocol->on_ready && !sock_packets_pending(uuid))
|
211
211
|
protocol->on_ready(uuid, protocol);
|
212
212
|
}
|
213
213
|
|
@@ -263,19 +263,19 @@ inline static void listen_for_stop_signal(void) {
|
|
263
263
|
The Listenning Protocol
|
264
264
|
*/
|
265
265
|
|
266
|
-
static const char*
|
266
|
+
static const char *listener_protocol_name = "listening protocol __internal__";
|
267
267
|
|
268
268
|
struct ListenerProtocol {
|
269
269
|
protocol_s protocol;
|
270
|
-
protocol_s*
|
271
|
-
void*
|
272
|
-
void (*on_start)(void*
|
273
|
-
void (*on_finish)(void*
|
270
|
+
protocol_s *(*on_open)(intptr_t uuid, void *udata);
|
271
|
+
void *udata;
|
272
|
+
void (*on_start)(void *udata);
|
273
|
+
void (*on_finish)(void *udata);
|
274
274
|
};
|
275
275
|
|
276
|
-
static void listener_on_data(intptr_t uuid, protocol_s*
|
276
|
+
static void listener_on_data(intptr_t uuid, protocol_s *_listener) {
|
277
277
|
intptr_t new_client;
|
278
|
-
struct ListenerProtocol*
|
278
|
+
struct ListenerProtocol *listener = (void *)_listener;
|
279
279
|
while ((new_client = sock_accept(uuid)) != -1) {
|
280
280
|
// make sure it's a clean slate... although it should be assumed to be.
|
281
281
|
lock_uuid(new_client);
|
@@ -294,20 +294,18 @@ static void listener_on_data(intptr_t uuid, protocol_s* _listener) {
|
|
294
294
|
}
|
295
295
|
}
|
296
296
|
|
297
|
-
static void free_listenner(void* _li)
|
298
|
-
free(_li);
|
299
|
-
}
|
297
|
+
static void free_listenner(void *_li) { free(_li); }
|
300
298
|
|
301
|
-
static void listener_on_close(protocol_s*
|
302
|
-
if (((struct ListenerProtocol*)_listener)->on_finish)
|
303
|
-
((struct ListenerProtocol*)_listener)
|
304
|
-
->on_finish(((struct ListenerProtocol*)_listener)->udata);
|
299
|
+
static void listener_on_close(protocol_s *_listener) {
|
300
|
+
if (((struct ListenerProtocol *)_listener)->on_finish)
|
301
|
+
((struct ListenerProtocol *)_listener)
|
302
|
+
->on_finish(((struct ListenerProtocol *)_listener)->udata);
|
305
303
|
free_listenner(_listener);
|
306
304
|
}
|
307
305
|
|
308
|
-
static inline struct ListenerProtocol*
|
309
|
-
|
310
|
-
struct ListenerProtocol*
|
306
|
+
static inline struct ListenerProtocol *
|
307
|
+
listener_alloc(struct ServerServiceSettings settings) {
|
308
|
+
struct ListenerProtocol *listener = malloc(sizeof(*listener));
|
311
309
|
if (listener) {
|
312
310
|
*listener = (struct ListenerProtocol){
|
313
311
|
.protocol.service = listener_protocol_name,
|
@@ -329,9 +327,9 @@ inline static void listener_on_server_start(void) {
|
|
329
327
|
if (reactor_add(sock_fd2uuid(i)))
|
330
328
|
perror("Couldn't register listenning socket"), exit(4);
|
331
329
|
// call the on_init callback
|
332
|
-
if (((struct ListenerProtocol*)protocol_fd(i))->on_start)
|
333
|
-
((struct ListenerProtocol*)protocol_fd(i))
|
334
|
-
->on_start(((struct ListenerProtocol*)protocol_fd(i))->udata);
|
330
|
+
if (((struct ListenerProtocol *)protocol_fd(i))->on_start)
|
331
|
+
((struct ListenerProtocol *)protocol_fd(i))
|
332
|
+
->on_start(((struct ListenerProtocol *)protocol_fd(i))->udata);
|
335
333
|
}
|
336
334
|
}
|
337
335
|
}
|
@@ -354,16 +352,16 @@ typedef struct {
|
|
354
352
|
protocol_s protocol;
|
355
353
|
size_t milliseconds;
|
356
354
|
size_t repetitions;
|
357
|
-
void (*task)(void*);
|
358
|
-
void (*on_finish)(void*);
|
359
|
-
void*
|
355
|
+
void (*task)(void *);
|
356
|
+
void (*on_finish)(void *);
|
357
|
+
void *arg;
|
360
358
|
} timer_protocol_s;
|
361
359
|
|
362
|
-
#define prot2timer(protocol) (*((timer_protocol_s*)(protocol)))
|
360
|
+
#define prot2timer(protocol) (*((timer_protocol_s *)(protocol)))
|
363
361
|
|
364
|
-
const char*
|
362
|
+
const char *timer_protocol_name = "timer protocol __internal__";
|
365
363
|
|
366
|
-
static void timer_on_data(intptr_t uuid, protocol_s*
|
364
|
+
static void timer_on_data(intptr_t uuid, protocol_s *protocol) {
|
367
365
|
prot2timer(protocol).task(prot2timer(protocol).arg);
|
368
366
|
if (prot2timer(protocol).repetitions) {
|
369
367
|
prot2timer(protocol).repetitions -= 1;
|
@@ -376,19 +374,18 @@ static void timer_on_data(intptr_t uuid, protocol_s* protocol) {
|
|
376
374
|
reactor_reset_timer(uuid);
|
377
375
|
}
|
378
376
|
|
379
|
-
static void timer_on_close(protocol_s*
|
377
|
+
static void timer_on_close(protocol_s *protocol) {
|
380
378
|
// fprintf(stderr, "timer closed\n");
|
381
379
|
if (prot2timer(protocol).on_finish)
|
382
380
|
prot2timer(protocol).on_finish(prot2timer(protocol).arg);
|
383
381
|
free(protocol);
|
384
382
|
}
|
385
383
|
|
386
|
-
static inline timer_protocol_s*
|
387
|
-
void* arg,
|
384
|
+
static inline timer_protocol_s *timer_alloc(void (*task)(void *), void *arg,
|
388
385
|
size_t milliseconds,
|
389
386
|
size_t repetitions,
|
390
|
-
void (*on_finish)(void*)) {
|
391
|
-
timer_protocol_s*
|
387
|
+
void (*on_finish)(void *)) {
|
388
|
+
timer_protocol_s *t = malloc(sizeof(*t));
|
392
389
|
if (t)
|
393
390
|
*t = (timer_protocol_s){
|
394
391
|
.protocol.service = timer_protocol_name,
|
@@ -424,7 +421,7 @@ static inline void timeout_review(void) {
|
|
424
421
|
time(&review);
|
425
422
|
for (size_t i = 0; i < server_data.capacity; i++) {
|
426
423
|
if (protocol_fd(i) == NULL)
|
427
|
-
continue;
|
424
|
+
continue; // Protocol objects are required for open connections.
|
428
425
|
if (fd_data(i).timeout == 0) {
|
429
426
|
if (protocol_fd(i) && protocol_fd(i)->service != listener_protocol_name &&
|
430
427
|
protocol_fd(i)->service != timer_protocol_name &&
|
@@ -444,7 +441,7 @@ static inline void timeout_review(void) {
|
|
444
441
|
}
|
445
442
|
}
|
446
443
|
|
447
|
-
static void server_cycle(void*
|
444
|
+
static void server_cycle(void *_) {
|
448
445
|
static int8_t perform_idle = 1;
|
449
446
|
time(&server_data.last_tick);
|
450
447
|
if (server_data.running) {
|
@@ -489,7 +486,7 @@ int server_listen(struct ServerServiceSettings settings) {
|
|
489
486
|
if (fduuid == -1)
|
490
487
|
return -1;
|
491
488
|
server_data.fds[sock_uuid2fd(fduuid)].protocol =
|
492
|
-
(void*)listener_alloc(settings);
|
489
|
+
(void *)listener_alloc(settings);
|
493
490
|
if (server_data.fds[sock_uuid2fd(fduuid)].protocol == NULL)
|
494
491
|
goto error;
|
495
492
|
if (server_data.running && reactor_add(fduuid))
|
@@ -517,19 +514,17 @@ ssize_t server_run(struct ServerSettings settings) {
|
|
517
514
|
|
518
515
|
#if defined(SERVER_PRINT_STATE) && SERVER_PRINT_STATE == 1
|
519
516
|
if (settings.threads == 0)
|
520
|
-
fprintf(stderr,
|
521
|
-
|
522
|
-
" in single thread mode.\n",
|
517
|
+
fprintf(stderr, "* Running %lu processes"
|
518
|
+
" in single thread mode.\n",
|
523
519
|
settings.processes);
|
524
520
|
else
|
525
|
-
fprintf(stderr,
|
526
|
-
|
527
|
-
" X %lu threads.\n",
|
521
|
+
fprintf(stderr, "* Running %lu processes"
|
522
|
+
" X %lu threads.\n",
|
528
523
|
settings.processes, settings.threads);
|
529
524
|
#endif
|
530
525
|
|
531
526
|
pid_t rootpid = getpid();
|
532
|
-
pid_t*
|
527
|
+
pid_t *children = NULL;
|
533
528
|
if (settings.processes > 1) {
|
534
529
|
children = malloc(sizeof(*children) * settings.processes);
|
535
530
|
for (size_t i = 0; i < settings.processes - 1; i++) {
|
@@ -576,15 +571,11 @@ ssize_t server_run(struct ServerSettings settings) {
|
|
576
571
|
return 0;
|
577
572
|
}
|
578
573
|
|
579
|
-
void server_stop(void) {
|
580
|
-
server_data.running = 0;
|
581
|
-
}
|
574
|
+
void server_stop(void) { server_data.running = 0; }
|
582
575
|
/**
|
583
576
|
Returns the last time the server reviewed any pending IO events.
|
584
577
|
*/
|
585
|
-
time_t server_last_tick(void) {
|
586
|
-
return server_data.last_tick;
|
587
|
-
}
|
578
|
+
time_t server_last_tick(void) { return server_data.last_tick; }
|
588
579
|
|
589
580
|
/* *****************************************************************************
|
590
581
|
* Socket actions
|
@@ -598,10 +589,10 @@ all resources are released.
|
|
598
589
|
|
599
590
|
Returns -1 on error (i.e. connection closed), otherwise returns 0.
|
600
591
|
*/
|
601
|
-
ssize_t server_switch_protocol(intptr_t fd, protocol_s*
|
592
|
+
ssize_t server_switch_protocol(intptr_t fd, protocol_s *new_protocol) {
|
602
593
|
if (new_protocol == NULL || valid_uuid(fd) == 0)
|
603
594
|
return -1;
|
604
|
-
protocol_s*
|
595
|
+
protocol_s *old_protocol;
|
605
596
|
lock_uuid(fd);
|
606
597
|
old_protocol = uuid_data(fd).protocol;
|
607
598
|
uuid_data(fd).protocol = new_protocol;
|
@@ -616,10 +607,10 @@ Gets the active protocol object for the requested file descriptor.
|
|
616
607
|
Returns NULL on error (i.e. connection closed), otherwise returns a `protocol_s`
|
617
608
|
pointer.
|
618
609
|
*/
|
619
|
-
protocol_s*
|
610
|
+
protocol_s *server_get_protocol(intptr_t uuid) {
|
620
611
|
if (valid_uuid(uuid) == 0)
|
621
612
|
return NULL;
|
622
|
-
protocol_s*
|
613
|
+
protocol_s *protocol;
|
623
614
|
lock_uuid(uuid);
|
624
615
|
protocol = uuid_data(uuid).protocol;
|
625
616
|
unlock_uuid(uuid);
|
@@ -644,7 +635,7 @@ based resources asynchronously (i.e. database resources etc').
|
|
644
635
|
|
645
636
|
On failure the fduuid_u.data.fd value will be -1.
|
646
637
|
*/
|
647
|
-
intptr_t server_attach(int fd, protocol_s*
|
638
|
+
intptr_t server_attach(int fd, protocol_s *protocol) {
|
648
639
|
intptr_t uuid = sock_open(fd);
|
649
640
|
if (uuid == -1)
|
650
641
|
return -1;
|
@@ -670,7 +661,7 @@ int server_hijack(intptr_t uuid) {
|
|
670
661
|
sock_flush_strong(uuid);
|
671
662
|
if (sock_isvalid(uuid) == 0)
|
672
663
|
return -1;
|
673
|
-
protocol_s*
|
664
|
+
protocol_s *old_protocol;
|
674
665
|
lock_uuid(uuid);
|
675
666
|
old_protocol = uuid_data(uuid).protocol;
|
676
667
|
uuid_data(uuid).protocol = NULL;
|
@@ -681,7 +672,7 @@ int server_hijack(intptr_t uuid) {
|
|
681
672
|
}
|
682
673
|
/** Counts the number of connections for the specified protocol (NULL = all
|
683
674
|
protocols). */
|
684
|
-
long server_count(char*
|
675
|
+
long server_count(char *service) {
|
685
676
|
long count = 0;
|
686
677
|
if (service == NULL) {
|
687
678
|
for (size_t i = 0; i < server_data.capacity; i++) {
|
@@ -708,43 +699,41 @@ Task core data
|
|
708
699
|
typedef struct {
|
709
700
|
intptr_t origin;
|
710
701
|
intptr_t target;
|
711
|
-
const char*
|
712
|
-
void (*task)(intptr_t fd, protocol_s*
|
713
|
-
void*
|
714
|
-
void*
|
702
|
+
const char *service;
|
703
|
+
void (*task)(intptr_t fd, protocol_s *protocol, void *arg);
|
704
|
+
void *on_finish;
|
705
|
+
void *arg;
|
715
706
|
} srv_task_s;
|
716
707
|
|
717
708
|
/* Get task from void pointer. */
|
718
|
-
#define p2task(task) (*((srv_task_s*)(task)))
|
709
|
+
#define p2task(task) (*((srv_task_s *)(task)))
|
719
710
|
|
720
711
|
/* Get fallback callback from the task object. */
|
721
|
-
#define task2fallback(task)
|
722
|
-
((void (*)(intptr_t, void*))(p2task(task).on_finish))
|
712
|
+
#define task2fallback(task) \
|
713
|
+
((void (*)(intptr_t, void *))(p2task(task).on_finish))
|
723
714
|
|
724
715
|
/* Get on_finished callback from the task object. */
|
725
|
-
#define task2on_done(task)
|
726
|
-
((void (*)(intptr_t, protocol_s*, void*))(p2task(task).on_finish))
|
716
|
+
#define task2on_done(task) \
|
717
|
+
((void (*)(intptr_t, protocol_s *, void *))(p2task(task).on_finish))
|
727
718
|
/* allows for later implementation of a task pool with minimal code updates. */
|
728
|
-
static inline srv_task_s*
|
719
|
+
static inline srv_task_s *task_alloc(void) {
|
729
720
|
return malloc(sizeof(srv_task_s));
|
730
721
|
}
|
731
722
|
|
732
723
|
/* allows for later implementation of a task pool with minimal code updates. */
|
733
|
-
static inline void task_free(srv_task_s* task)
|
734
|
-
return free(task);
|
735
|
-
}
|
724
|
+
static inline void task_free(srv_task_s *task) { return free(task); }
|
736
725
|
|
737
726
|
/* performs a single connection task. */
|
738
|
-
static void perform_single_task(void*
|
727
|
+
static void perform_single_task(void *task) {
|
739
728
|
if (sock_isvalid(p2task(task).target) == 0) {
|
740
|
-
if (p2task(task).on_finish)
|
729
|
+
if (p2task(task).on_finish) // an invalid connection fallback
|
741
730
|
task2fallback(task)(p2task(task).origin, p2task(task).arg);
|
742
731
|
task_free(task);
|
743
732
|
return;
|
744
733
|
}
|
745
734
|
if (try_lock_uuid(p2task(task).target) == 0) {
|
746
735
|
// get protocol
|
747
|
-
protocol_s*
|
736
|
+
protocol_s *protocol = protocol_uuid(p2task(task).target);
|
748
737
|
if (protocol_set_busy(protocol) == 0) {
|
749
738
|
// clear the original busy flag
|
750
739
|
unlock_uuid(p2task(task).target);
|
@@ -759,9 +748,9 @@ static void perform_single_task(void* task) {
|
|
759
748
|
}
|
760
749
|
|
761
750
|
/* performs a connection group task. */
|
762
|
-
static void perform_each_task(void*
|
751
|
+
static void perform_each_task(void *task) {
|
763
752
|
intptr_t uuid;
|
764
|
-
protocol_s*
|
753
|
+
protocol_s *protocol;
|
765
754
|
while (p2task(task).target < server_data.capacity) {
|
766
755
|
uuid = sock_fd2uuid(p2task(task).target);
|
767
756
|
if (uuid == -1 || uuid == p2task(task).origin) {
|
@@ -791,12 +780,12 @@ static void perform_each_task(void* task) {
|
|
791
780
|
async_run(perform_each_task, task);
|
792
781
|
return;
|
793
782
|
}
|
794
|
-
if (p2task(task).on_finish) {
|
795
|
-
task2on_done(task)(
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
783
|
+
if (p2task(task).on_finish) { // finished group task callback
|
784
|
+
task2on_done(task)(p2task(task).origin,
|
785
|
+
(sock_isvalid(p2task(task).origin)
|
786
|
+
? protocol_uuid(p2task(task).origin)
|
787
|
+
: NULL),
|
788
|
+
p2task(task).arg);
|
800
789
|
}
|
801
790
|
task_free(task);
|
802
791
|
return;
|
@@ -810,14 +799,11 @@ API
|
|
810
799
|
Schedules a specific task to run asyncronously for each connection (except the
|
811
800
|
origin connection) on a specific protocol.
|
812
801
|
*/
|
813
|
-
void server_each(intptr_t origin_fd,
|
814
|
-
|
815
|
-
void (*
|
816
|
-
|
817
|
-
|
818
|
-
protocol_s* protocol,
|
819
|
-
void* arg)) {
|
820
|
-
srv_task_s* t = NULL;
|
802
|
+
void server_each(intptr_t origin_fd, const char *service,
|
803
|
+
void (*task)(intptr_t fd, protocol_s *protocol, void *arg),
|
804
|
+
void *arg, void (*on_finish)(intptr_t fd, protocol_s *protocol,
|
805
|
+
void *arg)) {
|
806
|
+
srv_task_s *t = NULL;
|
821
807
|
if (service == NULL || task == NULL)
|
822
808
|
goto error;
|
823
809
|
t = task_alloc();
|
@@ -842,10 +828,9 @@ error:
|
|
842
828
|
/** Schedules a specific task to run asyncronously for a specific connection.
|
843
829
|
*/
|
844
830
|
void server_task(intptr_t caller_fd,
|
845
|
-
void (*task)(intptr_t fd, protocol_s*
|
846
|
-
void* arg
|
847
|
-
|
848
|
-
srv_task_s* t = NULL;
|
831
|
+
void (*task)(intptr_t fd, protocol_s *protocol, void *arg),
|
832
|
+
void *arg, void (*fallback)(intptr_t fd, void *arg)) {
|
833
|
+
srv_task_s *t = NULL;
|
849
834
|
if (task == NULL)
|
850
835
|
goto error;
|
851
836
|
t = task_alloc();
|
@@ -872,15 +857,13 @@ timer to the reactor. The task will repeat `repetitions` times. if
|
|
872
857
|
`repetitions` is set to 0, task will repeat forever. Returns -1 on error
|
873
858
|
or the new file descriptor on succeess.
|
874
859
|
*/
|
875
|
-
int server_run_every(size_t milliseconds,
|
876
|
-
|
877
|
-
void (*
|
878
|
-
void* arg,
|
879
|
-
void (*on_finish)(void*)) {
|
860
|
+
int server_run_every(size_t milliseconds, size_t repetitions,
|
861
|
+
void (*task)(void *), void *arg,
|
862
|
+
void (*on_finish)(void *)) {
|
880
863
|
validate_mem();
|
881
864
|
if (task == NULL)
|
882
865
|
return -1;
|
883
|
-
timer_protocol_s*
|
866
|
+
timer_protocol_s *protocol = NULL;
|
884
867
|
intptr_t uuid = -1;
|
885
868
|
int fd = reactor_make_timer();
|
886
869
|
if (fd == -1) {
|
@@ -894,7 +877,7 @@ int server_run_every(size_t milliseconds,
|
|
894
877
|
protocol = timer_alloc(task, arg, milliseconds, repetitions, on_finish);
|
895
878
|
if (protocol == NULL)
|
896
879
|
goto error;
|
897
|
-
protocol_fd(fd) = (protocol_s*)protocol;
|
880
|
+
protocol_fd(fd) = (protocol_s *)protocol;
|
898
881
|
if (server_data.running && reactor_add_timer(uuid, milliseconds))
|
899
882
|
goto error;
|
900
883
|
return 0;
|
@@ -906,7 +889,7 @@ error:
|
|
906
889
|
|
907
890
|
if (protocol != NULL) {
|
908
891
|
protocol_fd(fd) = NULL;
|
909
|
-
timer_on_close((protocol_s*)protocol);
|
892
|
+
timer_on_close((protocol_s *)protocol);
|
910
893
|
}
|
911
894
|
return -1;
|
912
895
|
}
|