iodine 0.2.17 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/README.md +36 -3
- data/bin/config.ru +23 -2
- data/bin/http-hello +1 -1
- data/bin/ws-shootout +5 -0
- data/ext/iodine/defer.c +468 -0
- data/ext/iodine/defer.h +105 -0
- data/ext/iodine/evio.c +263 -0
- data/ext/iodine/evio.h +133 -0
- data/ext/iodine/extconf.rb +2 -1
- data/ext/iodine/facil.c +958 -0
- data/ext/iodine/facil.h +423 -0
- data/ext/iodine/http.c +90 -0
- data/ext/iodine/http.h +50 -12
- data/ext/iodine/http1.c +200 -267
- data/ext/iodine/http1.h +17 -26
- data/ext/iodine/http1_request.c +81 -0
- data/ext/iodine/http1_request.h +58 -0
- data/ext/iodine/http1_response.c +403 -0
- data/ext/iodine/http1_response.h +90 -0
- data/ext/iodine/http1_simple_parser.c +124 -108
- data/ext/iodine/http1_simple_parser.h +8 -3
- data/ext/iodine/http_request.c +104 -0
- data/ext/iodine/http_request.h +58 -102
- data/ext/iodine/http_response.c +212 -208
- data/ext/iodine/http_response.h +89 -252
- data/ext/iodine/iodine_core.c +57 -46
- data/ext/iodine/iodine_core.h +3 -1
- data/ext/iodine/iodine_http.c +105 -81
- data/ext/iodine/iodine_websocket.c +17 -13
- data/ext/iodine/iodine_websocket.h +1 -0
- data/ext/iodine/rb-call.c +9 -7
- data/ext/iodine/{rb-libasync.h → rb-defer.c} +57 -49
- data/ext/iodine/rb-rack-io.c +12 -6
- data/ext/iodine/rb-rack-io.h +1 -1
- data/ext/iodine/rb-registry.c +5 -2
- data/ext/iodine/sock.c +1159 -0
- data/ext/iodine/{libsock.h → sock.h} +138 -142
- data/ext/iodine/spnlock.inc +77 -0
- data/ext/iodine/websockets.c +101 -112
- data/ext/iodine/websockets.h +38 -19
- data/iodine.gemspec +3 -3
- data/lib/iodine/version.rb +1 -1
- data/lib/rack/handler/iodine.rb +6 -6
- metadata +23 -19
- data/ext/iodine/http_response_http1.h +0 -382
- data/ext/iodine/libasync.c +0 -570
- data/ext/iodine/libasync.h +0 -122
- data/ext/iodine/libreact.c +0 -350
- data/ext/iodine/libreact.h +0 -244
- data/ext/iodine/libserver.c +0 -957
- data/ext/iodine/libserver.h +0 -481
- data/ext/iodine/libsock.c +0 -1025
- data/ext/iodine/spnlock.h +0 -243
data/ext/iodine/rb-call.c
CHANGED
@@ -25,14 +25,15 @@ struct RubyArgCall {
|
|
25
25
|
};
|
26
26
|
|
27
27
|
// running the actual method call
|
28
|
-
static VALUE run_ruby_method_unsafe(VALUE
|
29
|
-
struct RubyArgCall *task = (void *)
|
28
|
+
static VALUE run_ruby_method_unsafe(VALUE tsk_) {
|
29
|
+
struct RubyArgCall *task = (void *)tsk_;
|
30
30
|
return rb_funcall2(task->obj, task->method, task->argc, task->argv);
|
31
31
|
}
|
32
32
|
|
33
33
|
////////////////////////////////////////////////////////////////////////////
|
34
34
|
// Handling exceptions (printing the backtrace doesn't really work well).
|
35
|
-
static void *handle_exception(void *
|
35
|
+
static void *handle_exception(void *ignr) {
|
36
|
+
(void)ignr;
|
36
37
|
VALUE exc = rb_errinfo();
|
37
38
|
if (exc != Qnil) {
|
38
39
|
VALUE msg = RubyCaller.call(exc, rb_intern("message"));
|
@@ -44,8 +45,9 @@ static void *handle_exception(void *_) {
|
|
44
45
|
(int)RSTRING_LEN(exc_class), RSTRING_PTR(exc_class),
|
45
46
|
(int)RSTRING_LEN(msg), RSTRING_PTR(msg), StringValueCStr(bt));
|
46
47
|
} else {
|
47
|
-
fprintf(stderr,
|
48
|
-
|
48
|
+
fprintf(stderr,
|
49
|
+
"Iodine caught an unprotected exception - %.*s: %.*s\n"
|
50
|
+
"No backtrace available.\n",
|
49
51
|
(int)RSTRING_LEN(exc_class), RSTRING_PTR(exc_class),
|
50
52
|
(int)RSTRING_LEN(msg), RSTRING_PTR(msg));
|
51
53
|
}
|
@@ -56,8 +58,8 @@ static void *handle_exception(void *_) {
|
|
56
58
|
}
|
57
59
|
|
58
60
|
// GVL gateway
|
59
|
-
static void *run_ruby_method_within_gvl(void *
|
60
|
-
struct RubyArgCall *task =
|
61
|
+
static void *run_ruby_method_within_gvl(void *tsk_) {
|
62
|
+
struct RubyArgCall *task = tsk_;
|
61
63
|
int state = 0;
|
62
64
|
task->returned = rb_protect(run_ruby_method_unsafe, (VALUE)(task), &state);
|
63
65
|
if (state)
|
@@ -4,81 +4,89 @@ License: MIT
|
|
4
4
|
|
5
5
|
Feel free to copy, use and enjoy according to the license provided.
|
6
6
|
*/
|
7
|
-
#ifndef RB_ASYNC_EXT_H
|
8
|
-
#define RB_ASYNC_EXT_H
|
9
7
|
// clang-format off
|
10
8
|
#include "rb-registry.h"
|
11
9
|
#include <ruby.h>
|
12
10
|
#include <ruby/thread.h>
|
13
11
|
// clang-format on
|
14
12
|
|
15
|
-
|
16
|
-
Portability - used to help port this to different frameworks (i.e. Ruby).
|
17
|
-
*/
|
18
|
-
|
19
|
-
#define THREAD_TYPE VALUE
|
20
|
-
|
21
|
-
/* Don't use sentinals with Ruby */
|
22
|
-
#ifndef ASYNC_USE_SENTINEL
|
23
|
-
#define ASYNC_USE_SENTINEL 0
|
24
|
-
#endif
|
25
|
-
|
26
|
-
/* The unused directive */
|
27
|
-
#ifndef UNUSED_FUNC
|
28
|
-
#define UNUSED_FUNC __attribute__((unused))
|
29
|
-
#endif
|
30
|
-
|
31
|
-
/* used here but declared elsewhere */
|
32
|
-
void async_signal();
|
33
|
-
|
34
|
-
/* used here but declared elsewhere */
|
35
|
-
void call_async_signal(void *_) {
|
36
|
-
(void)(_);
|
37
|
-
async_signal();
|
38
|
-
}
|
13
|
+
#include "defer.h"
|
39
14
|
|
40
|
-
/*
|
41
|
-
|
42
|
-
|
43
|
-
}
|
44
|
-
|
45
|
-
/* join a ruby thread */
|
46
|
-
UNUSED_FUNC static void *join_thread(THREAD_TYPE thr) {
|
47
|
-
void *ret = rb_thread_call_with_gvl(_inner_join_with_rbthread, (void *)thr);
|
48
|
-
Registry.remove(thr);
|
49
|
-
return ret;
|
50
|
-
}
|
15
|
+
/* *****************************************************************************
|
16
|
+
Local helpers
|
17
|
+
***************************************************************************** */
|
51
18
|
/* used to create Ruby threads and pass them the information they need */
|
52
19
|
struct CreateThreadArgs {
|
53
20
|
void *(*thread_func)(void *);
|
54
21
|
void *arg;
|
55
22
|
};
|
56
23
|
|
24
|
+
/* used here but declared elsewhere */
|
25
|
+
void call_async_signal(void *pool) { defer_pool_stop((pool_pt)pool); }
|
26
|
+
|
57
27
|
/* the thread's GVL release */
|
58
|
-
static VALUE thread_loop(void *
|
59
|
-
struct CreateThreadArgs *args =
|
28
|
+
static VALUE thread_loop(void *args_) {
|
29
|
+
struct CreateThreadArgs *args = args_;
|
60
30
|
void *(*thread_func)(void *) = args->thread_func;
|
61
31
|
void *arg = args->arg;
|
62
|
-
free(
|
32
|
+
free(args_);
|
63
33
|
rb_thread_call_without_gvl2(thread_func, arg,
|
64
34
|
(void (*)(void *))call_async_signal, arg);
|
65
35
|
return Qnil;
|
66
36
|
}
|
67
37
|
|
68
38
|
/* Within the GVL, creates a Ruby thread using an API call */
|
69
|
-
static void *create_ruby_thread_gvl(void *
|
70
|
-
return (void *)Registry.add(rb_thread_create(thread_loop,
|
39
|
+
static void *create_ruby_thread_gvl(void *args) {
|
40
|
+
return (void *)Registry.add(rb_thread_create(thread_loop, args));
|
71
41
|
}
|
72
42
|
|
73
|
-
/*
|
74
|
-
|
75
|
-
|
43
|
+
/* protect the call to join from any exceptions */
|
44
|
+
static void *_inner_join_with_rbthread(void *rbt) {
|
45
|
+
return (void *)rb_funcall((VALUE)rbt, rb_intern("join"), 0);
|
46
|
+
}
|
47
|
+
|
48
|
+
/* *****************************************************************************
|
49
|
+
The Defer library overriding functions
|
50
|
+
***************************************************************************** */
|
51
|
+
|
52
|
+
/**
|
53
|
+
OVERRIDE THIS to replace the default pthread implementation.
|
54
|
+
*/
|
55
|
+
void *defer_new_thread(void *(*thread_func)(void *), pool_pt pool) {
|
76
56
|
struct CreateThreadArgs *data = malloc(sizeof(*data));
|
77
57
|
if (!data)
|
78
|
-
return
|
79
|
-
*data = (struct CreateThreadArgs){.thread_func = thread_func, .arg =
|
80
|
-
*thr =
|
81
|
-
|
58
|
+
return NULL;
|
59
|
+
*data = (struct CreateThreadArgs){.thread_func = thread_func, .arg = pool};
|
60
|
+
void *thr = rb_thread_call_with_gvl(create_ruby_thread_gvl, data);
|
61
|
+
if (thr == (void *)Qnil)
|
62
|
+
thr = NULL;
|
63
|
+
return thr;
|
64
|
+
}
|
65
|
+
|
66
|
+
/**
|
67
|
+
OVERRIDE THIS to replace the default pthread implementation.
|
68
|
+
*/
|
69
|
+
int defer_join_thread(void *thr) {
|
70
|
+
rb_thread_call_with_gvl(_inner_join_with_rbthread, (void *)thr);
|
71
|
+
Registry.remove((VALUE)thr);
|
72
|
+
return 0;
|
82
73
|
}
|
83
74
|
|
75
|
+
/******************************************************************************
|
76
|
+
Portability - used to help port this to different frameworks (i.e. Ruby).
|
77
|
+
*/
|
78
|
+
|
79
|
+
#define THREAD_TYPE VALUE
|
80
|
+
|
81
|
+
/* Don't use sentinals with Ruby */
|
82
|
+
#ifndef ASYNC_USE_SENTINEL
|
83
|
+
#define ASYNC_USE_SENTINEL 0
|
84
|
+
#endif
|
85
|
+
|
86
|
+
/* The unused directive */
|
87
|
+
#ifndef UNUSED_FUNC
|
88
|
+
#define UNUSED_FUNC __attribute__((unused))
|
84
89
|
#endif
|
90
|
+
|
91
|
+
/* used here but declared elsewhere */
|
92
|
+
void async_signal();
|
data/ext/iodine/rb-rack-io.c
CHANGED
@@ -63,7 +63,7 @@ static VALUE TCPSOCKET_CLASS;
|
|
63
63
|
static ID for_fd_id;
|
64
64
|
|
65
65
|
#define set_uuid(object, request) \
|
66
|
-
rb_ivar_set((object), fd_var_id, ULONG2NUM((request)->
|
66
|
+
rb_ivar_set((object), fd_var_id, ULONG2NUM((request)->fd))
|
67
67
|
|
68
68
|
inline static intptr_t get_uuid(VALUE obj) {
|
69
69
|
VALUE i = rb_ivar_get(obj, fd_var_id);
|
@@ -158,7 +158,7 @@ static VALUE strio_read(int argc, VALUE *argv, VALUE self) {
|
|
158
158
|
} else {
|
159
159
|
// make sure the buffer is binary encoded.
|
160
160
|
rb_enc_associate(buffer, BinaryEncoding);
|
161
|
-
if (rb_str_capacity(buffer) < len)
|
161
|
+
if (rb_str_capacity(buffer) < (size_t)len)
|
162
162
|
rb_str_resize(buffer, len);
|
163
163
|
}
|
164
164
|
// read the data.
|
@@ -173,7 +173,10 @@ no_data:
|
|
173
173
|
}
|
174
174
|
|
175
175
|
// Does nothing - this is controlled by the server.
|
176
|
-
static VALUE strio_close(VALUE self) {
|
176
|
+
static VALUE strio_close(VALUE self) {
|
177
|
+
(void)self;
|
178
|
+
return Qnil;
|
179
|
+
}
|
177
180
|
|
178
181
|
// Rewinds the IO, so that it is read from the begining.
|
179
182
|
static VALUE rio_rewind(VALUE self) {
|
@@ -282,7 +285,7 @@ static VALUE tfio_read(int argc, VALUE *argv, VALUE self) {
|
|
282
285
|
} else {
|
283
286
|
// make sure the buffer is binary encoded.
|
284
287
|
rb_enc_associate(buffer, BinaryEncoding);
|
285
|
-
if (rb_str_capacity(buffer) < len)
|
288
|
+
if (rb_str_capacity(buffer) < (size_t)len)
|
286
289
|
rb_str_resize(buffer, len);
|
287
290
|
}
|
288
291
|
// read the data.
|
@@ -298,7 +301,10 @@ no_data:
|
|
298
301
|
}
|
299
302
|
|
300
303
|
// Does nothing - this is controlled by the server.
|
301
|
-
static VALUE tfio_close(VALUE self) {
|
304
|
+
static VALUE tfio_close(VALUE self) {
|
305
|
+
(void)self;
|
306
|
+
return Qnil;
|
307
|
+
}
|
302
308
|
|
303
309
|
// Passes each line of the input to the block. This should be avoided.
|
304
310
|
static VALUE tfio_each(VALUE self) {
|
@@ -397,5 +403,5 @@ static void init_rack_io(void) {
|
|
397
403
|
////////////////////////////////////////////////////////////////////////////
|
398
404
|
// the API interface
|
399
405
|
struct _RackIO_ RackIO = {
|
400
|
-
.
|
406
|
+
.create = new_rack_io, .init = init_rack_io,
|
401
407
|
};
|
data/ext/iodine/rb-rack-io.h
CHANGED
@@ -13,7 +13,7 @@ Feel free to copy, use and enjoy according to the license provided.
|
|
13
13
|
#include "http_request.h"
|
14
14
|
|
15
15
|
extern struct _RackIO_ {
|
16
|
-
VALUE (*
|
16
|
+
VALUE (*create)(http_request_s *request, VALUE env);
|
17
17
|
void (*init)(void);
|
18
18
|
} RackIO;
|
19
19
|
|
data/ext/iodine/rb-registry.c
CHANGED
@@ -5,9 +5,10 @@ License: MIT
|
|
5
5
|
Feel free to copy, use and enjoy according to the license provided.
|
6
6
|
*/
|
7
7
|
#include "rb-registry.h"
|
8
|
-
#include "spnlock.h"
|
9
8
|
#include <ruby.h>
|
10
9
|
|
10
|
+
#include "spnlock.inc"
|
11
|
+
|
11
12
|
// #define RUBY_REG_DBG
|
12
13
|
|
13
14
|
#define REGISTRY_POOL_SIZE 1024
|
@@ -125,6 +126,7 @@ finish:
|
|
125
126
|
|
126
127
|
// a callback for the GC (marking active objects)
|
127
128
|
static void registry_mark(void *ignore) {
|
129
|
+
(void)ignore;
|
128
130
|
#ifdef RUBY_REG_DBG
|
129
131
|
Registry.print();
|
130
132
|
#endif
|
@@ -140,6 +142,7 @@ static void registry_mark(void *ignore) {
|
|
140
142
|
|
141
143
|
// clear the registry (end of lifetime)
|
142
144
|
static void registry_clear(void *ignore) {
|
145
|
+
(void)ignore;
|
143
146
|
lock_registry();
|
144
147
|
struct Object *line;
|
145
148
|
struct Object *to_free;
|
@@ -195,7 +198,7 @@ static void print(void) {
|
|
195
198
|
long index = 0;
|
196
199
|
while (line) {
|
197
200
|
fprintf(stderr, "[%lu] => %d X obj %lu type %d at %p\n", index++,
|
198
|
-
line->count, line->obj, TYPE(line->obj), line);
|
201
|
+
line->count, line->obj, TYPE(line->obj), (void *)line);
|
199
202
|
line = line->next;
|
200
203
|
}
|
201
204
|
fprintf(stderr, "Total of %lu registered objects being marked\n", index);
|
data/ext/iodine/sock.c
ADDED
@@ -0,0 +1,1159 @@
|
|
1
|
+
/*
|
2
|
+
Copyright: Boaz Segev, 2016-2017
|
3
|
+
License: MIT
|
4
|
+
|
5
|
+
Feel free to copy, use and enjoy according to the license provided.
|
6
|
+
*/
|
7
|
+
#ifndef _GNU_SOURCE
|
8
|
+
#define _GNU_SOURCE
|
9
|
+
#endif
|
10
|
+
|
11
|
+
#include "sock.h"
|
12
|
+
#include "spnlock.inc"
|
13
|
+
/* *****************************************************************************
|
14
|
+
Includes and state
|
15
|
+
***************************************************************************** */
|
16
|
+
|
17
|
+
#include <errno.h>
|
18
|
+
#include <fcntl.h>
|
19
|
+
#include <limits.h>
|
20
|
+
#include <netdb.h>
|
21
|
+
#include <stdio.h>
|
22
|
+
#include <string.h>
|
23
|
+
#include <sys/mman.h>
|
24
|
+
#include <sys/resource.h>
|
25
|
+
#include <sys/socket.h>
|
26
|
+
#include <sys/time.h>
|
27
|
+
#include <sys/types.h>
|
28
|
+
#include <time.h>
|
29
|
+
|
30
|
+
#if BUFFER_PACKET_SIZE < (BUFFER_FILE_READ_SIZE + 64)
|
31
|
+
#error BUFFER_PACKET_POOL must be bigger than BUFFER_FILE_READ_SIZE + 64.
|
32
|
+
#endif
|
33
|
+
|
34
|
+
/* *****************************************************************************
|
35
|
+
OS Sendfile settings.
|
36
|
+
*/
|
37
|
+
|
38
|
+
#ifndef USE_SENDFILE
|
39
|
+
|
40
|
+
#if defined(__linux__) /* linux sendfile works */
|
41
|
+
#include <sys/sendfile.h>
|
42
|
+
#define USE_SENDFILE 1
|
43
|
+
#elif defined(__unix__) /* BSD sendfile should work, but isn't tested */
|
44
|
+
#include <sys/uio.h>
|
45
|
+
#define USE_SENDFILE 1
|
46
|
+
#elif defined(__APPLE__) /* Is the apple sendfile still broken? */
|
47
|
+
#include <sys/uio.h>
|
48
|
+
#define USE_SENDFILE 1
|
49
|
+
#else /* sendfile might not be available - always set to 0 */
|
50
|
+
#define USE_SENDFILE 0
|
51
|
+
#endif
|
52
|
+
|
53
|
+
#endif
|
54
|
+
|
55
|
+
/* *****************************************************************************
|
56
|
+
Support an on_close callback.
|
57
|
+
*/
|
58
|
+
|
59
|
+
#pragma weak sock_on_close
|
60
|
+
void __attribute__((weak)) sock_on_close(intptr_t uuid) { (void)(uuid); }
|
61
|
+
|
62
|
+
/* *****************************************************************************
|
63
|
+
Support timeout setting.
|
64
|
+
*/
|
65
|
+
#pragma weak sock_touch
|
66
|
+
void __attribute__((weak)) sock_touch(intptr_t uuid) { (void)(uuid); }
|
67
|
+
|
68
|
+
/* *****************************************************************************
|
69
|
+
Support `defer``.
|
70
|
+
*/
|
71
|
+
|
72
|
+
#pragma weak defer
|
73
|
+
int defer(void (*func)(void *, void *), void *arg, void *arg2) {
|
74
|
+
func(arg, arg2);
|
75
|
+
return 0;
|
76
|
+
}
|
77
|
+
static void sock_flush_defer(void *arg, void *ignored) {
|
78
|
+
sock_flush((intptr_t)arg);
|
79
|
+
return;
|
80
|
+
(void)ignored;
|
81
|
+
}
|
82
|
+
/* *****************************************************************************
|
83
|
+
Support `evio`.
|
84
|
+
*/
|
85
|
+
|
86
|
+
#pragma weak evio_remove
|
87
|
+
int evio_remove(intptr_t uuid) {
|
88
|
+
(void)(uuid);
|
89
|
+
return -1;
|
90
|
+
}
|
91
|
+
|
92
|
+
/* *****************************************************************************
|
93
|
+
User-Land Buffer and Packets
|
94
|
+
***************************************************************************** */
|
95
|
+
|
96
|
+
typedef struct packet_s {
|
97
|
+
struct packet_metadata_s {
|
98
|
+
int (*write_func)(int fd, struct packet_s *packet);
|
99
|
+
void (*free_func)(struct packet_s *packet);
|
100
|
+
struct packet_s *next;
|
101
|
+
} metadata;
|
102
|
+
sock_buffer_s buffer;
|
103
|
+
} packet_s;
|
104
|
+
|
105
|
+
struct {
|
106
|
+
packet_s *next;
|
107
|
+
spn_lock_i lock;
|
108
|
+
uint8_t init;
|
109
|
+
packet_s mem[BUFFER_PACKET_POOL];
|
110
|
+
} packet_pool;
|
111
|
+
|
112
|
+
void SOCK_DEALLOC_NOOP(void *arg) { (void)arg; }
|
113
|
+
|
114
|
+
static inline void sock_packet_clear(packet_s *packet) {
|
115
|
+
packet->metadata.free_func(packet);
|
116
|
+
packet->metadata = (struct packet_metadata_s){
|
117
|
+
.free_func = (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
118
|
+
packet->buffer.len = 0;
|
119
|
+
}
|
120
|
+
|
121
|
+
static inline void sock_packet_free(packet_s *packet) {
|
122
|
+
sock_packet_clear(packet);
|
123
|
+
if (packet >= packet_pool.mem &&
|
124
|
+
packet <= packet_pool.mem + (BUFFER_PACKET_POOL - 1)) {
|
125
|
+
spn_lock(&packet_pool.lock);
|
126
|
+
packet->metadata.next = packet_pool.next;
|
127
|
+
packet_pool.next = packet;
|
128
|
+
spn_unlock(&packet_pool.lock);
|
129
|
+
} else
|
130
|
+
free(packet);
|
131
|
+
}
|
132
|
+
|
133
|
+
static inline packet_s *sock_packet_try_grab(void) {
|
134
|
+
packet_s *packet = NULL;
|
135
|
+
spn_lock(&packet_pool.lock);
|
136
|
+
packet = packet_pool.next;
|
137
|
+
if (packet == NULL)
|
138
|
+
goto none_in_pool;
|
139
|
+
packet_pool.next = packet->metadata.next;
|
140
|
+
spn_unlock(&packet_pool.lock);
|
141
|
+
packet->metadata = (struct packet_metadata_s){
|
142
|
+
.free_func = (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
143
|
+
packet->buffer.len = 0;
|
144
|
+
return packet;
|
145
|
+
none_in_pool:
|
146
|
+
if (!packet_pool.init)
|
147
|
+
goto init;
|
148
|
+
spn_unlock(&packet_pool.lock);
|
149
|
+
return NULL;
|
150
|
+
init:
|
151
|
+
packet_pool.init = 1;
|
152
|
+
packet_pool.mem[0].metadata.free_func =
|
153
|
+
(void (*)(packet_s *))SOCK_DEALLOC_NOOP;
|
154
|
+
for (size_t i = 2; i < BUFFER_PACKET_POOL; i++) {
|
155
|
+
packet_pool.mem[i - 1].metadata.next = packet_pool.mem + i;
|
156
|
+
packet_pool.mem[i - 1].metadata.free_func =
|
157
|
+
(void (*)(packet_s *))SOCK_DEALLOC_NOOP;
|
158
|
+
}
|
159
|
+
packet_pool.mem[BUFFER_PACKET_POOL - 1].metadata.free_func =
|
160
|
+
(void (*)(packet_s *))SOCK_DEALLOC_NOOP;
|
161
|
+
packet_pool.next = packet_pool.mem + 1;
|
162
|
+
spn_unlock(&packet_pool.lock);
|
163
|
+
packet = packet_pool.mem;
|
164
|
+
packet->metadata = (struct packet_metadata_s){
|
165
|
+
.free_func = (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
166
|
+
packet->buffer.len = 0;
|
167
|
+
return packet;
|
168
|
+
}
|
169
|
+
|
170
|
+
static inline packet_s *sock_packet_grab(void) {
|
171
|
+
packet_s *packet = sock_packet_try_grab();
|
172
|
+
if (packet)
|
173
|
+
return packet;
|
174
|
+
while (packet == NULL) {
|
175
|
+
sock_flush_all();
|
176
|
+
packet = sock_packet_try_grab();
|
177
|
+
};
|
178
|
+
return packet;
|
179
|
+
}
|
180
|
+
|
181
|
+
/* *****************************************************************************
|
182
|
+
Default Socket Read/Write Hook
|
183
|
+
***************************************************************************** */
|
184
|
+
|
185
|
+
static ssize_t sock_default_hooks_read(intptr_t uuid, void *buf, size_t count) {
|
186
|
+
return read(sock_uuid2fd(uuid), buf, count);
|
187
|
+
}
|
188
|
+
static ssize_t sock_default_hooks_write(intptr_t uuid, const void *buf,
|
189
|
+
size_t count) {
|
190
|
+
return write(sock_uuid2fd(uuid), buf, count);
|
191
|
+
}
|
192
|
+
|
193
|
+
static void sock_default_hooks_on_close(intptr_t fduuid,
|
194
|
+
struct sock_rw_hook_s *rw_hook) {
|
195
|
+
(void)rw_hook;
|
196
|
+
(void)fduuid;
|
197
|
+
}
|
198
|
+
|
199
|
+
static ssize_t sock_default_hooks_flush(intptr_t uuid) {
|
200
|
+
return (((void)(uuid)), 0);
|
201
|
+
}
|
202
|
+
|
203
|
+
sock_rw_hook_s sock_default_hooks = {
|
204
|
+
.read = sock_default_hooks_read,
|
205
|
+
.write = sock_default_hooks_write,
|
206
|
+
.flush = sock_default_hooks_flush,
|
207
|
+
.on_close = sock_default_hooks_on_close,
|
208
|
+
};
|
209
|
+
/* *****************************************************************************
|
210
|
+
Socket Data Structures
|
211
|
+
***************************************************************************** */
|
212
|
+
struct fd_data_s {
|
213
|
+
/** Connection counter - collision protection. */
|
214
|
+
uint8_t counter;
|
215
|
+
/** Connection lock */
|
216
|
+
spn_lock_i lock;
|
217
|
+
/** Connection is open */
|
218
|
+
unsigned open : 1;
|
219
|
+
/** indicated that the connection should be closed. */
|
220
|
+
unsigned close : 1;
|
221
|
+
/** future flags. */
|
222
|
+
unsigned rsv : 5;
|
223
|
+
/** data sent from current packet - this is per packet. */
|
224
|
+
size_t sent;
|
225
|
+
/** the currently active packet to be sent. */
|
226
|
+
packet_s *packet;
|
227
|
+
/** RW hooks. */
|
228
|
+
sock_rw_hook_s *rw_hooks;
|
229
|
+
/** Peer/listenning address. */
|
230
|
+
struct sockaddr_in6 addrinfo;
|
231
|
+
/** address length. */
|
232
|
+
socklen_t addrlen;
|
233
|
+
};
|
234
|
+
|
235
|
+
static struct sock_data_store {
|
236
|
+
size_t capacity;
|
237
|
+
uint8_t exit_init;
|
238
|
+
struct fd_data_s *fds;
|
239
|
+
} sock_data_store;
|
240
|
+
|
241
|
+
#define fd2uuid(fd) \
|
242
|
+
(((uintptr_t)(fd) << 8) | (sock_data_store.fds[(fd)].counter))
|
243
|
+
#define fdinfo(fd) sock_data_store.fds[(fd)]
|
244
|
+
|
245
|
+
#define lock_fd(fd) spn_lock(&sock_data_store.fds[(fd)].lock)
|
246
|
+
#define unlock_fd(fd) spn_unlock(&sock_data_store.fds[(fd)].lock)
|
247
|
+
|
248
|
+
static inline int validate_uuid(uintptr_t uuid) {
|
249
|
+
uintptr_t fd = sock_uuid2fd(uuid);
|
250
|
+
if ((intptr_t)uuid == -1 || sock_data_store.capacity <= fd ||
|
251
|
+
fdinfo(fd).counter != (uuid & 0xFF))
|
252
|
+
return -1;
|
253
|
+
return 0;
|
254
|
+
}
|
255
|
+
|
256
|
+
static inline void sock_packet_rotate_unsafe(uintptr_t fd) {
|
257
|
+
packet_s *packet = fdinfo(fd).packet;
|
258
|
+
fdinfo(fd).packet = packet->metadata.next;
|
259
|
+
fdinfo(fd).sent = 0;
|
260
|
+
sock_packet_free(packet);
|
261
|
+
}
|
262
|
+
|
263
|
+
static void clear_sock_lib(void) { free(sock_data_store.fds); }
|
264
|
+
|
265
|
+
static inline int initialize_sock_lib(size_t capacity) {
|
266
|
+
static uint8_t init_exit = 0;
|
267
|
+
if (sock_data_store.capacity >= capacity)
|
268
|
+
return 0;
|
269
|
+
struct fd_data_s *new_collection =
|
270
|
+
realloc(sock_data_store.fds, sizeof(struct fd_data_s) * capacity);
|
271
|
+
if (new_collection) {
|
272
|
+
sock_data_store.fds = new_collection;
|
273
|
+
for (size_t i = sock_data_store.capacity; i < capacity; i++) {
|
274
|
+
fdinfo(i) = (struct fd_data_s){.open = 0,
|
275
|
+
.lock = SPN_LOCK_INIT,
|
276
|
+
.rw_hooks = &sock_default_hooks,
|
277
|
+
.counter = 0};
|
278
|
+
}
|
279
|
+
sock_data_store.capacity = capacity;
|
280
|
+
|
281
|
+
#ifdef DEBUG
|
282
|
+
fprintf(stderr,
|
283
|
+
"\nInitialized libsock for %lu sockets, "
|
284
|
+
"each one requires %lu bytes.\n"
|
285
|
+
"overall ovearhead: %lu bytes.\n"
|
286
|
+
"Initialized packet pool for %d elements, "
|
287
|
+
"each one %lu bytes.\n"
|
288
|
+
"overall buffer ovearhead: %lu bytes.\n"
|
289
|
+
"=== Socket Library Total: %lu bytes ===\n\n",
|
290
|
+
capacity, sizeof(struct fd_data_s),
|
291
|
+
sizeof(struct fd_data_s) * capacity, BUFFER_PACKET_POOL,
|
292
|
+
sizeof(packet_s), sizeof(packet_s) * BUFFER_PACKET_POOL,
|
293
|
+
(sizeof(packet_s) * BUFFER_PACKET_POOL) +
|
294
|
+
(sizeof(struct fd_data_s) * capacity));
|
295
|
+
#endif
|
296
|
+
|
297
|
+
if (init_exit)
|
298
|
+
return 0;
|
299
|
+
init_exit = 1;
|
300
|
+
atexit(clear_sock_lib);
|
301
|
+
return 0;
|
302
|
+
}
|
303
|
+
return -1;
|
304
|
+
}
|
305
|
+
|
306
|
+
static inline int clear_fd(uintptr_t fd, uint8_t is_open) {
|
307
|
+
if (sock_data_store.capacity <= fd)
|
308
|
+
goto reinitialize;
|
309
|
+
packet_s *packet;
|
310
|
+
clear:
|
311
|
+
spn_lock(&(fdinfo(fd).lock));
|
312
|
+
struct fd_data_s old_data = fdinfo(fd);
|
313
|
+
sock_data_store.fds[fd] =
|
314
|
+
(struct fd_data_s){.open = is_open,
|
315
|
+
.lock = fdinfo(fd).lock,
|
316
|
+
.rw_hooks = &sock_default_hooks,
|
317
|
+
.counter = fdinfo(fd).counter + 1};
|
318
|
+
spn_unlock(&(fdinfo(fd).lock));
|
319
|
+
packet = old_data.packet;
|
320
|
+
while (old_data.packet) {
|
321
|
+
old_data.packet = old_data.packet->metadata.next;
|
322
|
+
sock_packet_free(packet);
|
323
|
+
packet = old_data.packet;
|
324
|
+
}
|
325
|
+
old_data.rw_hooks->on_close(((fd << 8) | old_data.counter),
|
326
|
+
old_data.rw_hooks);
|
327
|
+
if (old_data.open || (old_data.rw_hooks != &sock_default_hooks)) {
|
328
|
+
sock_on_close((fd << 8) | old_data.counter);
|
329
|
+
evio_remove((fd << 8) | old_data.counter);
|
330
|
+
}
|
331
|
+
return 0;
|
332
|
+
reinitialize:
|
333
|
+
if (initialize_sock_lib(fd << 1))
|
334
|
+
return -1;
|
335
|
+
goto clear;
|
336
|
+
}
|
337
|
+
|
338
|
+
/* *****************************************************************************
|
339
|
+
Writing - from memory
|
340
|
+
***************************************************************************** */
|
341
|
+
|
342
|
+
struct sock_packet_ext_data_s {
|
343
|
+
uint8_t *buffer;
|
344
|
+
uint8_t *to_free;
|
345
|
+
void (*dealloc)(void *);
|
346
|
+
};
|
347
|
+
|
348
|
+
static int sock_write_buffer(int fd, struct packet_s *packet) {
|
349
|
+
int written = fdinfo(fd).rw_hooks->write(
|
350
|
+
fd2uuid(fd), packet->buffer.buf + fdinfo(fd).sent,
|
351
|
+
packet->buffer.len - fdinfo(fd).sent);
|
352
|
+
if (written > 0) {
|
353
|
+
fdinfo(fd).sent += written;
|
354
|
+
if (fdinfo(fd).sent == packet->buffer.len)
|
355
|
+
sock_packet_rotate_unsafe(fd);
|
356
|
+
return written;
|
357
|
+
}
|
358
|
+
return written;
|
359
|
+
}
|
360
|
+
|
361
|
+
static int sock_write_buffer_ext(int fd, struct packet_s *packet) {
|
362
|
+
struct sock_packet_ext_data_s *ext = (void *)packet->buffer.buf;
|
363
|
+
int written =
|
364
|
+
fdinfo(fd).rw_hooks->write(fd2uuid(fd), ext->buffer + fdinfo(fd).sent,
|
365
|
+
packet->buffer.len - fdinfo(fd).sent);
|
366
|
+
if (written > 0) {
|
367
|
+
fdinfo(fd).sent += written;
|
368
|
+
if (fdinfo(fd).sent == packet->buffer.len)
|
369
|
+
sock_packet_rotate_unsafe(fd);
|
370
|
+
return written;
|
371
|
+
}
|
372
|
+
if (written < 0 && (errno == EWOULDBLOCK || errno == EAGAIN ||
|
373
|
+
errno == EINTR || errno == ENOTCONN))
|
374
|
+
return 0;
|
375
|
+
return -1;
|
376
|
+
}
|
377
|
+
|
378
|
+
static void sock_free_buffer_ext(packet_s *packet) {
|
379
|
+
struct sock_packet_ext_data_s *ext = (void *)packet->buffer.buf;
|
380
|
+
ext->dealloc(ext->to_free);
|
381
|
+
}
|
382
|
+
|
383
|
+
/* *****************************************************************************
|
384
|
+
Writing - from files
|
385
|
+
***************************************************************************** */
|
386
|
+
|
387
|
+
struct sock_packet_file_data_s {
|
388
|
+
intptr_t fd;
|
389
|
+
off_t offset;
|
390
|
+
void (*close)(void *);
|
391
|
+
uint8_t buffer[];
|
392
|
+
};
|
393
|
+
|
394
|
+
static void sock_perform_close_fd(intptr_t fd) { close(fd); }
|
395
|
+
|
396
|
+
static void sock_close_from_fd(packet_s *packet) {
|
397
|
+
struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
398
|
+
ext->close((void *)ext->fd);
|
399
|
+
}
|
400
|
+
|
401
|
+
static int sock_write_from_fd(int fd, struct packet_s *packet) {
|
402
|
+
struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
403
|
+
ssize_t count = 0;
|
404
|
+
do {
|
405
|
+
fdinfo(fd).sent += count;
|
406
|
+
packet->buffer.len -= count;
|
407
|
+
retry:
|
408
|
+
count = (packet->buffer.len < BUFFER_FILE_READ_SIZE)
|
409
|
+
? pread(ext->fd, ext->buffer, packet->buffer.len,
|
410
|
+
ext->offset + fdinfo(fd).sent)
|
411
|
+
: pread(ext->fd, ext->buffer, BUFFER_FILE_READ_SIZE,
|
412
|
+
ext->offset + fdinfo(fd).sent);
|
413
|
+
if (count <= 0)
|
414
|
+
goto read_error;
|
415
|
+
count = fdinfo(fd).rw_hooks->write(fd2uuid(fd), ext->buffer, count);
|
416
|
+
} while (count == BUFFER_FILE_READ_SIZE && packet->buffer.len);
|
417
|
+
if (count < 0)
|
418
|
+
return -1;
|
419
|
+
fdinfo(fd).sent += count;
|
420
|
+
packet->buffer.len -= count;
|
421
|
+
if (!packet->buffer.len) {
|
422
|
+
sock_packet_rotate_unsafe(fd);
|
423
|
+
return 1;
|
424
|
+
}
|
425
|
+
return count;
|
426
|
+
|
427
|
+
read_error:
|
428
|
+
if (count == 0) {
|
429
|
+
sock_packet_rotate_unsafe(fd);
|
430
|
+
return 1;
|
431
|
+
}
|
432
|
+
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
|
433
|
+
goto retry;
|
434
|
+
return -1;
|
435
|
+
}
|
436
|
+
|
437
|
+
#if USE_SENDFILE == 1
|
438
|
+
|
439
|
+
#if defined(__linux__) /* linux sendfile API */
|
440
|
+
|
441
|
+
static int sock_sendfile_from_fd(int fd, struct packet_s *packet) {
|
442
|
+
struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
443
|
+
ssize_t sent;
|
444
|
+
sent = sendfile64(fd, ext->fd, &ext->offset, packet->buffer.len);
|
445
|
+
if (sent < 0)
|
446
|
+
return -1;
|
447
|
+
packet->buffer.len -= sent;
|
448
|
+
if (!packet->buffer.len)
|
449
|
+
sock_packet_rotate_unsafe(fd);
|
450
|
+
return sent;
|
451
|
+
}
|
452
|
+
|
453
|
+
#elif defined(__APPLE__) || defined(__unix__) /* BSD / Apple API */
|
454
|
+
|
455
|
+
static int sock_sendfile_from_fd(int fd, struct packet_s *packet) {
|
456
|
+
struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
457
|
+
off_t act_sent = 0;
|
458
|
+
ssize_t count = 0;
|
459
|
+
do {
|
460
|
+
fdinfo(fd).sent += act_sent;
|
461
|
+
packet->buffer.len -= act_sent;
|
462
|
+
act_sent = packet->buffer.len;
|
463
|
+
#if defined(__APPLE__)
|
464
|
+
count = sendfile(ext->fd, fd, ext->offset + fdinfo(fd).sent, &act_sent,
|
465
|
+
NULL, 0);
|
466
|
+
#else
|
467
|
+
count = sendfile(ext->fd, fd, ext->offset + fdinfo(fd).sent,
|
468
|
+
(size_t)act_sent, NULL, &act_sent, 0);
|
469
|
+
#endif
|
470
|
+
} while (count >= 0 && packet->buffer.len > (size_t)act_sent);
|
471
|
+
if (count < 0)
|
472
|
+
return -1;
|
473
|
+
sock_packet_rotate_unsafe(fd);
|
474
|
+
return act_sent;
|
475
|
+
}
|
476
|
+
|
477
|
+
#else
|
478
|
+
static int (*sock_sendfile_from_fd)(int fd, struct packet_s *packet) =
|
479
|
+
sock_write_from_fd;
|
480
|
+
|
481
|
+
#endif
|
482
|
+
#else
|
483
|
+
static int (*sock_sendfile_from_fd)(int fd, struct packet_s *packet) =
|
484
|
+
sock_write_from_fd;
|
485
|
+
#endif
|
486
|
+
|
487
|
+
/* *****************************************************************************
|
488
|
+
The API
|
489
|
+
***************************************************************************** */
|
490
|
+
|
491
|
+
/* *****************************************************************************
|
492
|
+
Process wide and helper sock_API.
|
493
|
+
*/
|
494
|
+
|
495
|
+
/**
|
496
|
+
Sets a socket to non blocking state.
|
497
|
+
|
498
|
+
This function is called automatically for the new socket, when using
|
499
|
+
`sock_accept` or `sock_connect`.
|
500
|
+
*/
|
501
|
+
int sock_set_non_block(int fd) {
|
502
|
+
/* If they have O_NONBLOCK, use the Posix way to do it */
|
503
|
+
#if defined(O_NONBLOCK)
|
504
|
+
/* Fixme: O_NONBLOCK is defined but broken on SunOS 4.1.x and AIX 3.2.5. */
|
505
|
+
int flags;
|
506
|
+
if (-1 == (flags = fcntl(fd, F_GETFL, 0)))
|
507
|
+
flags = 0;
|
508
|
+
// printf("flags initial value was %d\n", flags);
|
509
|
+
return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
510
|
+
#else
|
511
|
+
/* Otherwise, use the old way of doing it */
|
512
|
+
static int flags = 1;
|
513
|
+
return ioctl(fd, FIOBIO, &flags);
|
514
|
+
#endif
|
515
|
+
}
|
516
|
+
|
517
|
+
/**
|
518
|
+
Gets the maximum number of file descriptors this process can be allowed to
|
519
|
+
access (== maximum fd value + 1).
|
520
|
+
|
521
|
+
If the "soft" limit is lower then the "hard" limit, the process's limits will be
|
522
|
+
extended to the allowed "hard" limit.
|
523
|
+
*/
|
524
|
+
ssize_t sock_max_capacity(void) {
|
525
|
+
// get current limits
|
526
|
+
static ssize_t flim = 0;
|
527
|
+
if (flim)
|
528
|
+
return flim;
|
529
|
+
#ifdef _SC_OPEN_MAX
|
530
|
+
flim = sysconf(_SC_OPEN_MAX);
|
531
|
+
#elif defined(OPEN_MAX)
|
532
|
+
flim = OPEN_MAX;
|
533
|
+
#endif
|
534
|
+
// try to maximize limits - collect max and set to max
|
535
|
+
struct rlimit rlim = {.rlim_max = 0};
|
536
|
+
getrlimit(RLIMIT_NOFILE, &rlim);
|
537
|
+
// printf("Meximum open files are %llu out of %llu\n", rlim.rlim_cur,
|
538
|
+
// rlim.rlim_max);
|
539
|
+
#if defined(__APPLE__) /* Apple's getrlimit is broken. */
|
540
|
+
rlim.rlim_cur = rlim.rlim_max >= OPEN_MAX ? OPEN_MAX : rlim.rlim_max;
|
541
|
+
#else
|
542
|
+
rlim.rlim_cur = rlim.rlim_max;
|
543
|
+
#endif
|
544
|
+
|
545
|
+
setrlimit(RLIMIT_NOFILE, &rlim);
|
546
|
+
getrlimit(RLIMIT_NOFILE, &rlim);
|
547
|
+
// printf("Meximum open files are %llu out of %llu\n", rlim.rlim_cur,
|
548
|
+
// rlim.rlim_max);
|
549
|
+
// if the current limit is higher than it was, update
|
550
|
+
if (flim < ((ssize_t)rlim.rlim_cur))
|
551
|
+
flim = rlim.rlim_cur;
|
552
|
+
// initialize library to maximum capacity
|
553
|
+
initialize_sock_lib(flim);
|
554
|
+
// return what we have
|
555
|
+
return flim;
|
556
|
+
}
|
557
|
+
|
558
|
+
/* *****************************************************************************
|
559
|
+
The main sock_API.
|
560
|
+
*/
|
561
|
+
|
562
|
+
/**
|
563
|
+
Opens a listening non-blocking socket. Return's the socket's UUID.
|
564
|
+
|
565
|
+
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
566
|
+
|
567
|
+
UUIDs with values less then -1 are valid values, depending on the system's
|
568
|
+
byte-ordering.
|
569
|
+
|
570
|
+
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
571
|
+
They protect against connection mixups on concurrent systems (i.e. when saving
|
572
|
+
client data for "broadcasting" or when an old client task is preparing a
|
573
|
+
response in the background while a disconnection and a new connection occur on
|
574
|
+
the same `fd`).
|
575
|
+
*/
|
576
|
+
intptr_t sock_listen(const char *address, const char *port) {
|
577
|
+
int srvfd;
|
578
|
+
// setup the address
|
579
|
+
struct addrinfo hints;
|
580
|
+
struct addrinfo *servinfo; // will point to the results
|
581
|
+
memset(&hints, 0, sizeof hints); // make sure the struct is empty
|
582
|
+
hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
|
583
|
+
hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
|
584
|
+
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
|
585
|
+
if (getaddrinfo(address, port, &hints, &servinfo)) {
|
586
|
+
// perror("addr err");
|
587
|
+
return -1;
|
588
|
+
}
|
589
|
+
// get the file descriptor
|
590
|
+
srvfd =
|
591
|
+
socket(servinfo->ai_family, servinfo->ai_socktype, servinfo->ai_protocol);
|
592
|
+
if (srvfd <= 0) {
|
593
|
+
// perror("socket err");
|
594
|
+
freeaddrinfo(servinfo);
|
595
|
+
return -1;
|
596
|
+
}
|
597
|
+
// make sure the socket is non-blocking
|
598
|
+
if (sock_set_non_block(srvfd) < 0) {
|
599
|
+
// perror("couldn't set socket as non blocking! ");
|
600
|
+
freeaddrinfo(servinfo);
|
601
|
+
close(srvfd);
|
602
|
+
return -1;
|
603
|
+
}
|
604
|
+
// avoid the "address taken"
|
605
|
+
{
|
606
|
+
int optval = 1;
|
607
|
+
setsockopt(srvfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
|
608
|
+
}
|
609
|
+
// bind the address to the socket
|
610
|
+
{
|
611
|
+
int bound = 0;
|
612
|
+
for (struct addrinfo *p = servinfo; p != NULL; p = p->ai_next) {
|
613
|
+
if (!bind(srvfd, p->ai_addr, p->ai_addrlen))
|
614
|
+
bound = 1;
|
615
|
+
}
|
616
|
+
|
617
|
+
if (!bound) {
|
618
|
+
// perror("bind err");
|
619
|
+
freeaddrinfo(servinfo);
|
620
|
+
close(srvfd);
|
621
|
+
return -1;
|
622
|
+
}
|
623
|
+
}
|
624
|
+
freeaddrinfo(servinfo);
|
625
|
+
// listen in
|
626
|
+
if (listen(srvfd, SOMAXCONN) < 0) {
|
627
|
+
// perror("couldn't start listening");
|
628
|
+
close(srvfd);
|
629
|
+
return -1;
|
630
|
+
}
|
631
|
+
clear_fd(srvfd, 1);
|
632
|
+
return fd2uuid(srvfd);
|
633
|
+
}
|
634
|
+
|
635
|
+
/**
|
636
|
+
`sock_accept` accepts a new socket connection from the listening socket
|
637
|
+
`server_fd`, allowing the use of `sock_` functions with this new file
|
638
|
+
descriptor.
|
639
|
+
|
640
|
+
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
641
|
+
listen for events.
|
642
|
+
|
643
|
+
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
644
|
+
|
645
|
+
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
646
|
+
They protect against connection mixups on concurrent systems (i.e. when saving
|
647
|
+
client data for "broadcasting" or when an old client task is preparing a
|
648
|
+
response in the background while a disconnection and a new connection occur on
|
649
|
+
the same `fd`).
|
650
|
+
*/
|
651
|
+
intptr_t sock_accept(intptr_t srv_uuid) {
|
652
|
+
struct sockaddr_in6 addrinfo;
|
653
|
+
socklen_t addrlen = sizeof(addrinfo);
|
654
|
+
int client;
|
655
|
+
#ifdef SOCK_NONBLOCK
|
656
|
+
client = accept4(sock_uuid2fd(srv_uuid), (struct sockaddr *)&addrinfo,
|
657
|
+
&addrlen, SOCK_NONBLOCK);
|
658
|
+
if (client <= 0)
|
659
|
+
return -1;
|
660
|
+
#else
|
661
|
+
client =
|
662
|
+
accept(sock_uuid2fd(srv_uuid), (struct sockaddr *)&addrinfo, &addrlen);
|
663
|
+
if (client <= 0)
|
664
|
+
return -1;
|
665
|
+
sock_set_non_block(client);
|
666
|
+
#endif
|
667
|
+
clear_fd(client, 1);
|
668
|
+
fdinfo(client).addrinfo = addrinfo;
|
669
|
+
fdinfo(client).addrlen = addrlen;
|
670
|
+
// sock_touch(srv_uuid);
|
671
|
+
return fd2uuid(client);
|
672
|
+
}
|
673
|
+
|
674
|
+
/**
|
675
|
+
`sock_connect` is similar to `sock_accept` but should be used to initiate a
|
676
|
+
client connection to the address requested.
|
677
|
+
|
678
|
+
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
679
|
+
|
680
|
+
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
681
|
+
They protect against connection mixups on concurrent systems (i.e. when saving
|
682
|
+
client data for "broadcasting" or when an old client task is preparing a
|
683
|
+
response in the background while a disconnection and a new connection occur on
|
684
|
+
the same `fd`).
|
685
|
+
|
686
|
+
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
687
|
+
listen for events.
|
688
|
+
|
689
|
+
NOTICE:
|
690
|
+
|
691
|
+
This function is non-blocking, meaning that the connection probably wasn't
|
692
|
+
established by the time the function returns (this prevents the function from
|
693
|
+
hanging while waiting for a network timeout).
|
694
|
+
|
695
|
+
Use select, poll, `libreact` or other solutions to review the connection state
|
696
|
+
before attempting to write to the socket.
|
697
|
+
*/
|
698
|
+
intptr_t sock_connect(char *address, char *port) {
|
699
|
+
int fd;
|
700
|
+
// setup the address
|
701
|
+
struct addrinfo hints;
|
702
|
+
struct addrinfo *addrinfo; // will point to the results
|
703
|
+
memset(&hints, 0, sizeof hints); // make sure the struct is empty
|
704
|
+
hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
|
705
|
+
hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
|
706
|
+
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
|
707
|
+
if (getaddrinfo(address, port, &hints, &addrinfo)) {
|
708
|
+
return -1;
|
709
|
+
}
|
710
|
+
// get the file descriptor
|
711
|
+
fd =
|
712
|
+
socket(addrinfo->ai_family, addrinfo->ai_socktype, addrinfo->ai_protocol);
|
713
|
+
if (fd <= 0) {
|
714
|
+
freeaddrinfo(addrinfo);
|
715
|
+
return -1;
|
716
|
+
}
|
717
|
+
// make sure the socket is non-blocking
|
718
|
+
if (sock_set_non_block(fd) < 0) {
|
719
|
+
freeaddrinfo(addrinfo);
|
720
|
+
close(fd);
|
721
|
+
return -1;
|
722
|
+
}
|
723
|
+
|
724
|
+
if (connect(fd, addrinfo->ai_addr, addrinfo->ai_addrlen) < 0 &&
|
725
|
+
errno != EINPROGRESS) {
|
726
|
+
close(fd);
|
727
|
+
freeaddrinfo(addrinfo);
|
728
|
+
return -1;
|
729
|
+
}
|
730
|
+
clear_fd(fd, 1);
|
731
|
+
fdinfo(fd).addrinfo = *((struct sockaddr_in6 *)addrinfo->ai_addr);
|
732
|
+
fdinfo(fd).addrlen = addrinfo->ai_addrlen;
|
733
|
+
freeaddrinfo(addrinfo);
|
734
|
+
return fd2uuid(fd);
|
735
|
+
}
|
736
|
+
|
737
|
+
/**
|
738
|
+
`sock_open` takes an existing file descriptor `fd` and initializes it's status
|
739
|
+
as open and available for `sock_API` calls, returning a valid UUID.
|
740
|
+
|
741
|
+
This will reinitialize the data (user buffer etc') for the file descriptor
|
742
|
+
provided, calling the `reactor_on_close` callback if the `fd` was previously
|
743
|
+
marked as used.
|
744
|
+
|
745
|
+
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
746
|
+
listen for events.
|
747
|
+
|
748
|
+
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
749
|
+
|
750
|
+
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
751
|
+
They protect against connection mixups on concurrent systems (i.e. when saving
|
752
|
+
client data for "broadcasting" or when an old client task is preparing a
|
753
|
+
response in the background while a disconnection and a new connection occur on
|
754
|
+
the same `fd`).
|
755
|
+
*/
|
756
|
+
intptr_t sock_open(int fd) {
|
757
|
+
clear_fd(fd, 1);
|
758
|
+
return fd2uuid(fd);
|
759
|
+
}
|
760
|
+
|
761
|
+
/** Returns the information available about the socket's peer address. */
|
762
|
+
sock_peer_addr_s sock_peer_addr(intptr_t uuid) {
|
763
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).addrlen)
|
764
|
+
return (sock_peer_addr_s){.addr = NULL};
|
765
|
+
return (sock_peer_addr_s){
|
766
|
+
.addrlen = fdinfo(sock_uuid2fd(uuid)).addrlen,
|
767
|
+
.addr = (struct sockaddr *)&fdinfo(sock_uuid2fd(uuid)).addrinfo,
|
768
|
+
};
|
769
|
+
}
|
770
|
+
|
771
|
+
/**
|
772
|
+
Returns 1 if the uuid refers to a valid and open, socket.
|
773
|
+
|
774
|
+
Returns 0 if not.
|
775
|
+
*/
|
776
|
+
int sock_isvalid(intptr_t uuid) {
|
777
|
+
return validate_uuid(uuid) == 0 && fdinfo(sock_uuid2fd(uuid)).open;
|
778
|
+
}
|
779
|
+
|
780
|
+
/**
|
781
|
+
`sock_fd2uuid` takes an existing file decriptor `fd` and returns it's active
|
782
|
+
`uuid`.
|
783
|
+
|
784
|
+
If the file descriptor is marked as closed (wasn't opened / registered with
|
785
|
+
`libsock`) the function returns -1;
|
786
|
+
|
787
|
+
If the file descriptor was closed remotely (or not using `libsock`), a false
|
788
|
+
positive will be possible. This is not an issue, since the use of an invalid fd
|
789
|
+
will result in the registry being updated and the fd being closed.
|
790
|
+
|
791
|
+
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
792
|
+
*/
|
793
|
+
intptr_t sock_fd2uuid(int fd) {
|
794
|
+
return (fd > 0 && sock_data_store.capacity > (size_t)fd &&
|
795
|
+
sock_data_store.fds[fd].open)
|
796
|
+
? (intptr_t)(fd2uuid(fd))
|
797
|
+
: -1;
|
798
|
+
}
|
799
|
+
|
800
|
+
/**
|
801
|
+
`sock_read` attempts to read up to count bytes from the socket into the buffer
|
802
|
+
starting at buf.
|
803
|
+
|
804
|
+
On a connection error (NOT EAGAIN or EWOULDBLOCK), signal interrupt, or when the
|
805
|
+
connection was closed, `sock_read` returns -1.
|
806
|
+
|
807
|
+
The value 0 is the valid value indicating no data was read.
|
808
|
+
|
809
|
+
Data might be available in the kernel's buffer while it is not available to be
|
810
|
+
read using `sock_read` (i.e., when using a transport layer, such as TLS).
|
811
|
+
*/
|
812
|
+
ssize_t sock_read(intptr_t uuid, void *buf, size_t count) {
|
813
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open) {
|
814
|
+
errno = EBADF;
|
815
|
+
return -1;
|
816
|
+
}
|
817
|
+
lock_fd(sock_uuid2fd(uuid));
|
818
|
+
if (!fdinfo(sock_uuid2fd(uuid)).open) {
|
819
|
+
unlock_fd(sock_uuid2fd(uuid));
|
820
|
+
errno = EBADF;
|
821
|
+
return -1;
|
822
|
+
}
|
823
|
+
ssize_t ret = fdinfo(sock_uuid2fd(uuid)).rw_hooks->read(uuid, buf, count);
|
824
|
+
unlock_fd(sock_uuid2fd(uuid));
|
825
|
+
sock_touch(uuid);
|
826
|
+
if (ret > 0)
|
827
|
+
return ret;
|
828
|
+
if (ret < 0 && (errno == EWOULDBLOCK || errno == EAGAIN || errno == EINTR ||
|
829
|
+
errno == ENOTCONN))
|
830
|
+
return 0;
|
831
|
+
int old_errno = errno;
|
832
|
+
sock_force_close(uuid);
|
833
|
+
errno = ret ? old_errno : ECONNRESET;
|
834
|
+
return -1;
|
835
|
+
}
|
836
|
+
|
837
|
+
/**
|
838
|
+
`sock_write2_fn` is the actual function behind the macro `sock_write2`.
|
839
|
+
*/
|
840
|
+
ssize_t sock_write2_fn(sock_write_info_s options) {
|
841
|
+
int fd = sock_uuid2fd(options.uuid);
|
842
|
+
if (validate_uuid(options.uuid))
|
843
|
+
clear_fd(fd, 0);
|
844
|
+
// avoid work when an error is expected to occur.
|
845
|
+
if (!fdinfo(fd).open || options.offset < 0) {
|
846
|
+
if (options.move == 0) {
|
847
|
+
errno = (options.offset < 0) ? ERANGE : EBADF;
|
848
|
+
return -1;
|
849
|
+
}
|
850
|
+
if (options.move)
|
851
|
+
(options.dealloc ? options.dealloc : free)((void *)options.buffer);
|
852
|
+
else
|
853
|
+
(options.dealloc ? (void (*)(intptr_t))options.dealloc
|
854
|
+
: sock_perform_close_fd)(options.data_fd);
|
855
|
+
errno = (options.offset < 0) ? ERANGE : EBADF;
|
856
|
+
return -1;
|
857
|
+
}
|
858
|
+
// if (options.offset < 0)
|
859
|
+
// options.offset = 0;
|
860
|
+
packet_s *packet = sock_packet_grab();
|
861
|
+
packet->buffer.len = options.length;
|
862
|
+
if (options.is_fd == 0) { /* is data */
|
863
|
+
if (options.move == 0) { /* memory is copied. */
|
864
|
+
if (options.length <= BUFFER_PACKET_SIZE) {
|
865
|
+
/* small enough for internal buffer */
|
866
|
+
memcpy(packet->buffer.buf, (uint8_t *)options.buffer + options.offset,
|
867
|
+
options.length);
|
868
|
+
packet->metadata = (struct packet_metadata_s){
|
869
|
+
.write_func = sock_write_buffer,
|
870
|
+
.free_func = (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
871
|
+
goto place_packet_in_queue;
|
872
|
+
}
|
873
|
+
/* too big for the pre-allocated buffer */
|
874
|
+
void *copy = malloc(options.length);
|
875
|
+
memcpy(copy, (uint8_t *)options.buffer + options.offset, options.length);
|
876
|
+
options.offset = 0;
|
877
|
+
options.buffer = copy;
|
878
|
+
}
|
879
|
+
/* memory moved, not copied. */
|
880
|
+
struct sock_packet_ext_data_s *ext = (void *)packet->buffer.buf;
|
881
|
+
ext->buffer = (uint8_t *)options.buffer + options.offset;
|
882
|
+
ext->to_free = (uint8_t *)options.buffer;
|
883
|
+
ext->dealloc = options.dealloc ? options.dealloc : free;
|
884
|
+
packet->metadata = (struct packet_metadata_s){
|
885
|
+
.write_func = sock_write_buffer_ext, .free_func = sock_free_buffer_ext};
|
886
|
+
} else { /* is file */
|
887
|
+
struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
888
|
+
ext->fd = options.data_fd;
|
889
|
+
ext->close = options.dealloc ? options.dealloc
|
890
|
+
: (void (*)(void *))sock_perform_close_fd;
|
891
|
+
ext->offset = options.offset;
|
892
|
+
packet->metadata = (struct packet_metadata_s){
|
893
|
+
.write_func =
|
894
|
+
(fdinfo(sock_uuid2fd(options.uuid)).rw_hooks == &sock_default_hooks
|
895
|
+
? sock_sendfile_from_fd
|
896
|
+
: sock_write_from_fd),
|
897
|
+
.free_func = options.move ? sock_close_from_fd
|
898
|
+
: (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
899
|
+
}
|
900
|
+
/* place packet in queue */
|
901
|
+
place_packet_in_queue:
|
902
|
+
if (validate_uuid(options.uuid))
|
903
|
+
goto error;
|
904
|
+
lock_fd(fd);
|
905
|
+
if (!fdinfo(fd).open) {
|
906
|
+
unlock_fd(fd);
|
907
|
+
goto error;
|
908
|
+
}
|
909
|
+
packet_s **pos = &fdinfo(fd).packet;
|
910
|
+
if (options.urgent == 0) {
|
911
|
+
while (*pos)
|
912
|
+
pos = &(*pos)->metadata.next;
|
913
|
+
} else {
|
914
|
+
if (*pos && fdinfo(fd).sent)
|
915
|
+
pos = &(*pos)->metadata.next;
|
916
|
+
packet->metadata.next = *pos;
|
917
|
+
}
|
918
|
+
*pos = packet;
|
919
|
+
unlock_fd(fd);
|
920
|
+
sock_touch(options.uuid);
|
921
|
+
defer(sock_flush_defer, (void *)options.uuid, NULL);
|
922
|
+
return 0;
|
923
|
+
|
924
|
+
error:
|
925
|
+
sock_packet_free(packet);
|
926
|
+
errno = EBADF;
|
927
|
+
return -1;
|
928
|
+
}
|
929
|
+
#define sock_write2(...) sock_write2_fn((sock_write_info_s){__VA_ARGS__})
|
930
|
+
|
931
|
+
/**
|
932
|
+
`sock_flush` writes the data in the internal buffer to the underlying file
|
933
|
+
descriptor and closes the underlying fd once it's marked for closure (and all
|
934
|
+
the data was sent).
|
935
|
+
|
936
|
+
Return value: 0 will be returned on success and -1 will be returned on an error
|
937
|
+
or when the connection is closed.
|
938
|
+
|
939
|
+
**Please Note**: when using `libreact`, the `sock_flush` will be called
|
940
|
+
automatically when the socket is ready.
|
941
|
+
*/
|
942
|
+
ssize_t sock_flush(intptr_t uuid) {
|
943
|
+
int fd = sock_uuid2fd(uuid);
|
944
|
+
if (validate_uuid(uuid) || !fdinfo(fd).open)
|
945
|
+
return -1;
|
946
|
+
ssize_t ret;
|
947
|
+
lock_fd(fd);
|
948
|
+
retry:
|
949
|
+
while ((ret = fdinfo(fd).rw_hooks->flush(fd)) > 0)
|
950
|
+
;
|
951
|
+
if (ret == -1) {
|
952
|
+
if (errno == EINTR)
|
953
|
+
goto retry;
|
954
|
+
if (errno == EWOULDBLOCK || errno == EAGAIN || errno == ENOTCONN)
|
955
|
+
goto finish;
|
956
|
+
goto error;
|
957
|
+
}
|
958
|
+
while (fdinfo(fd).packet && (ret = fdinfo(fd).packet->metadata.write_func(
|
959
|
+
fd, fdinfo(fd).packet)) > 0)
|
960
|
+
;
|
961
|
+
if (ret == -1) {
|
962
|
+
if (errno == EINTR)
|
963
|
+
goto retry;
|
964
|
+
if (errno == EWOULDBLOCK || errno == EAGAIN || errno == ENOTCONN)
|
965
|
+
goto finish;
|
966
|
+
goto error;
|
967
|
+
}
|
968
|
+
if (fdinfo(fd).close && !fdinfo(fd).packet)
|
969
|
+
goto error;
|
970
|
+
finish:
|
971
|
+
unlock_fd(fd);
|
972
|
+
sock_touch(uuid);
|
973
|
+
return 0;
|
974
|
+
error:
|
975
|
+
unlock_fd(fd);
|
976
|
+
sock_force_close(uuid);
|
977
|
+
return -1;
|
978
|
+
}
|
979
|
+
/**
|
980
|
+
`sock_flush_strong` performs the same action as `sock_flush` but returns only
|
981
|
+
after all the data was sent. This is a "busy" wait, polling isn't performed.
|
982
|
+
*/
|
983
|
+
void sock_flush_strong(intptr_t uuid) {
|
984
|
+
while (sock_flush(uuid) == 0)
|
985
|
+
;
|
986
|
+
}
|
987
|
+
/**
|
988
|
+
Calls `sock_flush` for each file descriptor that's buffer isn't empty.
|
989
|
+
*/
|
990
|
+
void sock_flush_all(void) {
|
991
|
+
for (size_t fd = 0; fd < sock_data_store.capacity; fd++) {
|
992
|
+
if (!fdinfo(fd).open || !fdinfo(fd).packet)
|
993
|
+
continue;
|
994
|
+
sock_flush(fd2uuid(fd));
|
995
|
+
}
|
996
|
+
}
|
997
|
+
/**
|
998
|
+
`sock_close` marks the connection for disconnection once all the data was sent.
|
999
|
+
The actual disconnection will be managed by the `sock_flush` function.
|
1000
|
+
|
1001
|
+
`sock_flash` will automatically be called.
|
1002
|
+
*/
|
1003
|
+
void sock_close(intptr_t uuid) {
|
1004
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open)
|
1005
|
+
return;
|
1006
|
+
fdinfo(sock_uuid2fd(uuid)).close = 1;
|
1007
|
+
sock_flush(uuid);
|
1008
|
+
}
|
1009
|
+
/**
|
1010
|
+
`sock_force_close` closes the connection immediately, without adhering to any
|
1011
|
+
protocol restrictions and without sending any remaining data in the connection
|
1012
|
+
buffer.
|
1013
|
+
*/
|
1014
|
+
void sock_force_close(intptr_t uuid) {
|
1015
|
+
if (validate_uuid(uuid))
|
1016
|
+
return;
|
1017
|
+
shutdown(sock_uuid2fd(uuid), SHUT_RDWR);
|
1018
|
+
close(sock_uuid2fd(uuid));
|
1019
|
+
clear_fd(sock_uuid2fd(uuid), 0);
|
1020
|
+
}
|
1021
|
+
|
1022
|
+
/* *****************************************************************************
|
1023
|
+
Direct user level buffer API.
|
1024
|
+
|
1025
|
+
The following API allows data to be written directly to the packet, minimizing
|
1026
|
+
memory copy operations.
|
1027
|
+
*/
|
1028
|
+
|
1029
|
+
/**
|
1030
|
+
Checks out a `sock_buffer_s` from the buffer pool.
|
1031
|
+
*/
|
1032
|
+
sock_buffer_s *sock_buffer_checkout(void) {
|
1033
|
+
packet_s *ret = sock_packet_grab();
|
1034
|
+
return &ret->buffer;
|
1035
|
+
}
|
1036
|
+
/**
|
1037
|
+
Attaches a packet to a socket's output buffer and calls `sock_flush` for the
|
1038
|
+
socket.
|
1039
|
+
|
1040
|
+
Returns -1 on error. Returns 0 on success. The `buffer` memory is always
|
1041
|
+
automatically managed.
|
1042
|
+
*/
|
1043
|
+
ssize_t sock_buffer_send(intptr_t uuid, sock_buffer_s *buffer) {
|
1044
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open) {
|
1045
|
+
sock_buffer_free(buffer);
|
1046
|
+
return -1;
|
1047
|
+
}
|
1048
|
+
packet_s **tmp, *packet = (packet_s *)((uintptr_t)(buffer) -
|
1049
|
+
sizeof(struct packet_metadata_s));
|
1050
|
+
packet->metadata = (struct packet_metadata_s){
|
1051
|
+
.write_func = sock_write_buffer,
|
1052
|
+
.free_func = (void (*)(packet_s *))SOCK_DEALLOC_NOOP};
|
1053
|
+
int fd = sock_uuid2fd(uuid);
|
1054
|
+
lock_fd(fd);
|
1055
|
+
tmp = &fdinfo(fd).packet;
|
1056
|
+
while (*tmp)
|
1057
|
+
tmp = &(*tmp)->metadata.next;
|
1058
|
+
*tmp = packet;
|
1059
|
+
unlock_fd(fd);
|
1060
|
+
return 0;
|
1061
|
+
}
|
1062
|
+
|
1063
|
+
/**
|
1064
|
+
Returns TRUE (non 0) if there is data waiting to be written to the socket in the
|
1065
|
+
user-land buffer.
|
1066
|
+
*/
|
1067
|
+
int sock_has_pending(intptr_t uuid) {
|
1068
|
+
return validate_uuid(uuid) == 0 && fdinfo(sock_uuid2fd(uuid)).open &&
|
1069
|
+
fdinfo(sock_uuid2fd(uuid)).packet;
|
1070
|
+
}
|
1071
|
+
|
1072
|
+
/**
|
1073
|
+
Use `sock_buffer_free` to free unused buffers that were checked-out using
|
1074
|
+
`sock_buffer_checkout`.
|
1075
|
+
*/
|
1076
|
+
void sock_buffer_free(sock_buffer_s *buffer) {
|
1077
|
+
packet_s *packet =
|
1078
|
+
(packet_s *)((uintptr_t)(buffer) - sizeof(struct packet_metadata_s));
|
1079
|
+
sock_packet_free(packet);
|
1080
|
+
}
|
1081
|
+
|
1082
|
+
/* *****************************************************************************
|
1083
|
+
TLC - Transport Layer Callback.
|
1084
|
+
|
1085
|
+
Experimental
|
1086
|
+
*/
|
1087
|
+
|
1088
|
+
/** Gets a socket hook state (a pointer to the struct). */
|
1089
|
+
struct sock_rw_hook_s *sock_rw_hook_get(intptr_t uuid) {
|
1090
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open ||
|
1091
|
+
((uuid = sock_uuid2fd(uuid)),
|
1092
|
+
fdinfo(uuid).rw_hooks == &sock_default_hooks))
|
1093
|
+
return NULL;
|
1094
|
+
return fdinfo(uuid).rw_hooks;
|
1095
|
+
}
|
1096
|
+
|
1097
|
+
/** Sets a socket hook state (a pointer to the struct). */
|
1098
|
+
int sock_rw_hook_set(intptr_t uuid, sock_rw_hook_s *rw_hooks) {
|
1099
|
+
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open)
|
1100
|
+
return -1;
|
1101
|
+
if (!rw_hooks->read)
|
1102
|
+
rw_hooks->read = sock_default_hooks_read;
|
1103
|
+
if (!rw_hooks->write)
|
1104
|
+
rw_hooks->write = sock_default_hooks_write;
|
1105
|
+
if (!rw_hooks->flush)
|
1106
|
+
rw_hooks->flush = sock_default_hooks_flush;
|
1107
|
+
if (!rw_hooks->on_close)
|
1108
|
+
rw_hooks->on_close = sock_default_hooks_on_close;
|
1109
|
+
uuid = sock_uuid2fd(uuid);
|
1110
|
+
lock_fd(sock_uuid2fd(uuid));
|
1111
|
+
fdinfo(uuid).rw_hooks = rw_hooks;
|
1112
|
+
unlock_fd(uuid);
|
1113
|
+
return 0;
|
1114
|
+
}
|
1115
|
+
|
1116
|
+
/* *****************************************************************************
|
1117
|
+
test
|
1118
|
+
*/
|
1119
|
+
#ifdef DEBUG
|
1120
|
+
void sock_libtest(void) {
|
1121
|
+
char request[] = "GET / HTTP/1.1\r\n"
|
1122
|
+
"Host: www.google.com\r\n"
|
1123
|
+
"\r\n";
|
1124
|
+
char buff[1024];
|
1125
|
+
ssize_t i_read;
|
1126
|
+
intptr_t uuid = sock_connect("www.google.com", "80");
|
1127
|
+
if (uuid == -1)
|
1128
|
+
perror("sock_connect failed"), exit(1);
|
1129
|
+
if (sock_write(uuid, request, sizeof(request) - 1) < 0)
|
1130
|
+
perror("sock_write error ");
|
1131
|
+
|
1132
|
+
while ((i_read = sock_read(uuid, buff, 1024)) >= 0) {
|
1133
|
+
if (i_read == 0) { // could be we hadn't finished connecting yet.
|
1134
|
+
sock_flush(uuid);
|
1135
|
+
reschedule_thread();
|
1136
|
+
} else {
|
1137
|
+
fprintf(stderr, "\n%.*s\n\n", (int)i_read, buff);
|
1138
|
+
break;
|
1139
|
+
}
|
1140
|
+
}
|
1141
|
+
if (i_read < 0)
|
1142
|
+
perror("Error with sock_read ");
|
1143
|
+
fprintf(stderr, "done.\n");
|
1144
|
+
sock_close(uuid);
|
1145
|
+
packet_s *head, *pos;
|
1146
|
+
pos = head = packet_pool.next;
|
1147
|
+
size_t count = 0;
|
1148
|
+
while (pos) {
|
1149
|
+
count++;
|
1150
|
+
pos = pos->metadata.next;
|
1151
|
+
}
|
1152
|
+
fprintf(stderr, "Packet pool test %s (%d =? %lu)\n",
|
1153
|
+
count == BUFFER_PACKET_POOL ? "PASS" : "FAIL", BUFFER_PACKET_POOL,
|
1154
|
+
count);
|
1155
|
+
count = sock_max_capacity();
|
1156
|
+
printf("Allocated sock capacity %lu X %lu\n", count,
|
1157
|
+
sizeof(struct fd_data_s));
|
1158
|
+
}
|
1159
|
+
#endif
|