iodine 0.6.5 → 0.7.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of iodine might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -0
- data/README.md +4 -4
- data/SPEC-Websocket-Draft.md +3 -6
- data/bin/mustache.rb +128 -0
- data/examples/test_template.mustache +16 -0
- data/ext/iodine/fio.c +9397 -0
- data/ext/iodine/fio.h +4723 -0
- data/ext/iodine/fio_ary.h +353 -54
- data/ext/iodine/fio_cli.c +351 -361
- data/ext/iodine/fio_cli.h +84 -105
- data/ext/iodine/fio_hashmap.h +70 -16
- data/ext/iodine/fio_json_parser.h +35 -24
- data/ext/iodine/fio_siphash.c +104 -4
- data/ext/iodine/fio_siphash.h +18 -2
- data/ext/iodine/fio_str.h +1218 -0
- data/ext/iodine/fio_tmpfile.h +1 -1
- data/ext/iodine/fiobj.h +13 -8
- data/ext/iodine/fiobj4sock.h +6 -8
- data/ext/iodine/fiobj_ary.c +107 -17
- data/ext/iodine/fiobj_ary.h +36 -4
- data/ext/iodine/fiobj_data.c +146 -127
- data/ext/iodine/fiobj_data.h +25 -23
- data/ext/iodine/fiobj_hash.c +7 -7
- data/ext/iodine/fiobj_hash.h +6 -5
- data/ext/iodine/fiobj_json.c +20 -17
- data/ext/iodine/fiobj_json.h +5 -5
- data/ext/iodine/fiobj_mem.h +71 -0
- data/ext/iodine/fiobj_mustache.c +310 -0
- data/ext/iodine/fiobj_mustache.h +40 -0
- data/ext/iodine/fiobj_numbers.c +199 -94
- data/ext/iodine/fiobj_numbers.h +7 -7
- data/ext/iodine/fiobj_str.c +142 -333
- data/ext/iodine/fiobj_str.h +65 -55
- data/ext/iodine/fiobject.c +49 -11
- data/ext/iodine/fiobject.h +40 -39
- data/ext/iodine/http.c +382 -190
- data/ext/iodine/http.h +124 -80
- data/ext/iodine/http1.c +99 -127
- data/ext/iodine/http1.h +5 -5
- data/ext/iodine/http1_parser.c +3 -2
- data/ext/iodine/http1_parser.h +2 -2
- data/ext/iodine/http_internal.c +14 -12
- data/ext/iodine/http_internal.h +25 -19
- data/ext/iodine/iodine.c +37 -18
- data/ext/iodine/iodine.h +4 -0
- data/ext/iodine/iodine_caller.c +9 -2
- data/ext/iodine/iodine_caller.h +2 -0
- data/ext/iodine/iodine_connection.c +82 -117
- data/ext/iodine/iodine_defer.c +57 -50
- data/ext/iodine/iodine_defer.h +0 -1
- data/ext/iodine/iodine_fiobj2rb.h +4 -2
- data/ext/iodine/iodine_helpers.c +4 -4
- data/ext/iodine/iodine_http.c +25 -32
- data/ext/iodine/iodine_json.c +2 -1
- data/ext/iodine/iodine_mustache.c +423 -0
- data/ext/iodine/iodine_mustache.h +6 -0
- data/ext/iodine/iodine_pubsub.c +48 -153
- data/ext/iodine/iodine_pubsub.h +5 -4
- data/ext/iodine/iodine_rack_io.c +7 -5
- data/ext/iodine/iodine_store.c +16 -13
- data/ext/iodine/iodine_tcp.c +26 -34
- data/ext/iodine/mustache_parser.h +1085 -0
- data/ext/iodine/redis_engine.c +740 -646
- data/ext/iodine/redis_engine.h +13 -15
- data/ext/iodine/resp_parser.h +11 -5
- data/ext/iodine/websocket_parser.h +13 -13
- data/ext/iodine/websockets.c +240 -393
- data/ext/iodine/websockets.h +52 -113
- data/lib/iodine.rb +1 -1
- data/lib/iodine/mustache.rb +140 -0
- data/lib/iodine/version.rb +1 -1
- metadata +15 -28
- data/ext/iodine/defer.c +0 -566
- data/ext/iodine/defer.h +0 -148
- data/ext/iodine/evio.c +0 -26
- data/ext/iodine/evio.h +0 -161
- data/ext/iodine/evio_callbacks.c +0 -26
- data/ext/iodine/evio_epoll.c +0 -251
- data/ext/iodine/evio_kqueue.c +0 -194
- data/ext/iodine/facil.c +0 -2325
- data/ext/iodine/facil.h +0 -616
- data/ext/iodine/fio_base64.c +0 -277
- data/ext/iodine/fio_base64.h +0 -71
- data/ext/iodine/fio_llist.h +0 -257
- data/ext/iodine/fio_mem.c +0 -675
- data/ext/iodine/fio_mem.h +0 -143
- data/ext/iodine/fio_random.c +0 -248
- data/ext/iodine/fio_random.h +0 -45
- data/ext/iodine/fio_sha1.c +0 -362
- data/ext/iodine/fio_sha1.h +0 -107
- data/ext/iodine/fio_sha2.c +0 -842
- data/ext/iodine/fio_sha2.h +0 -169
- data/ext/iodine/pubsub.c +0 -867
- data/ext/iodine/pubsub.h +0 -221
- data/ext/iodine/sock.c +0 -1366
- data/ext/iodine/sock.h +0 -566
- data/ext/iodine/spnlock.inc +0 -111
data/ext/iodine/pubsub.h
DELETED
@@ -1,221 +0,0 @@
|
|
1
|
-
/*
|
2
|
-
Copyright: Boaz segev, 2017
|
3
|
-
License: MIT
|
4
|
-
|
5
|
-
Feel free to copy, use and enjoy according to the license provided.
|
6
|
-
*/
|
7
|
-
#ifndef H_FACIL_PUBSUB_H
|
8
|
-
/**
|
9
|
-
* This pub/sub API is designed to unload pub/sub stress from external messanging
|
10
|
-
* systems onto the local process.
|
11
|
-
|
12
|
-
* For example, the NULL pub/sub engine, which is routed to the facil_cluster
|
13
|
-
* engine, will only publish a single message per process instead of a message
|
14
|
-
per
|
15
|
-
* client, allowing the cluster communication channel to be less crowded when
|
16
|
-
* possible.
|
17
|
-
*
|
18
|
-
* This should allow pub/sub engines, such as Redis, to spread their workload
|
19
|
-
* between all of an application's processes, enhancing overall performance.
|
20
|
-
*/
|
21
|
-
#define H_FACIL_PUBSUB_H
|
22
|
-
#include "facil.h"
|
23
|
-
#include "fiobj.h"
|
24
|
-
|
25
|
-
/* support C++ */
|
26
|
-
#ifdef __cplusplus
|
27
|
-
extern "C" {
|
28
|
-
#endif
|
29
|
-
|
30
|
-
#ifndef FIO_PUBBSUB_MAX_CHANNEL_LEN
|
31
|
-
#define FIO_PUBBSUB_MAX_CHANNEL_LEN 1024
|
32
|
-
#endif
|
33
|
-
|
34
|
-
/** An opaque pointer used to identify a subscription. */
|
35
|
-
typedef struct pubsub_sub_s *pubsub_sub_pt;
|
36
|
-
|
37
|
-
/** A pub/sub engine data structure. See details later on. */
|
38
|
-
typedef struct pubsub_engine_s pubsub_engine_s;
|
39
|
-
|
40
|
-
/** The default pub/sub engine.
|
41
|
-
* This engine performs pub/sub within a group of processes (process cluster).
|
42
|
-
*
|
43
|
-
* The process cluser is initialized by the `facil_run` command with `processes`
|
44
|
-
* set to more than 1.
|
45
|
-
*/
|
46
|
-
extern pubsub_engine_s const *PUBSUB_CLUSTER_ENGINE;
|
47
|
-
|
48
|
-
/** An engine that performs pub/sub only within a single process. */
|
49
|
-
extern pubsub_engine_s const *PUBSUB_PROCESS_ENGINE;
|
50
|
-
|
51
|
-
/** Allows process wide changes to the default Pub/Sub Engine.
|
52
|
-
* Setting a new default before calling `facil_run` will change the default for
|
53
|
-
* the whole process cluster.
|
54
|
-
*/
|
55
|
-
extern pubsub_engine_s *PUBSUB_DEFAULT_ENGINE;
|
56
|
-
|
57
|
-
/** Publishing and on_message callback arguments. */
|
58
|
-
typedef struct pubsub_message_s {
|
59
|
-
/** The pub/sub engine that should be used to farward this message. */
|
60
|
-
pubsub_engine_s const *engine;
|
61
|
-
/** The pub/sub target channnel. */
|
62
|
-
FIOBJ channel;
|
63
|
-
/** The pub/sub message. */
|
64
|
-
FIOBJ message;
|
65
|
-
/** The subscription that prompted the message to be routed to the client. */
|
66
|
-
pubsub_sub_pt subscription;
|
67
|
-
/** Client opaque data pointer (from the `subscribe`) function call. */
|
68
|
-
void *udata1;
|
69
|
-
/** Client opaque data pointer (from the `subscribe`) function call. */
|
70
|
-
void *udata2;
|
71
|
-
} pubsub_message_s;
|
72
|
-
|
73
|
-
/** The arguments used for `pubsub_subscribe` or `pubsub_find_sub`. */
|
74
|
-
struct pubsub_subscribe_args {
|
75
|
-
/** The channel namr used for the subscription. */
|
76
|
-
FIOBJ channel;
|
77
|
-
/** The on message callback. the `*msg` pointer is to a temporary object. */
|
78
|
-
void (*on_message)(pubsub_message_s *msg);
|
79
|
-
/** An optional callback for when a subscription is fully canceled. */
|
80
|
-
void (*on_unsubscribe)(void *udata1, void *udata2);
|
81
|
-
/** Opaque user data#1 */
|
82
|
-
void *udata1;
|
83
|
-
/** Opaque user data#2 .. using two allows allocation to be avoided. */
|
84
|
-
void *udata2;
|
85
|
-
/** Use pattern matching for channel subscription. */
|
86
|
-
unsigned use_pattern : 1;
|
87
|
-
};
|
88
|
-
|
89
|
-
/**
|
90
|
-
* Subscribes to a specific channel.
|
91
|
-
*
|
92
|
-
* Returns a subscription pointer or NULL (failure).
|
93
|
-
*/
|
94
|
-
pubsub_sub_pt pubsub_subscribe(struct pubsub_subscribe_args);
|
95
|
-
#define pubsub_subscribe(...) \
|
96
|
-
pubsub_subscribe((struct pubsub_subscribe_args){__VA_ARGS__})
|
97
|
-
|
98
|
-
/**
|
99
|
-
* This helper searches for an existing subscription.
|
100
|
-
*
|
101
|
-
* Use with care, NEVER call `pubsub_unsubscribe` more times than you have
|
102
|
-
* called `pubsub_subscribe`, since the subscription handle memory is realesed
|
103
|
-
* onnce the reference count reaches 0.
|
104
|
-
*
|
105
|
-
* Returns a subscription pointer or NULL (none found).
|
106
|
-
*/
|
107
|
-
pubsub_sub_pt pubsub_find_sub(struct pubsub_subscribe_args);
|
108
|
-
#define pubsub_find_sub(...) \
|
109
|
-
pubsub_find_sub((struct pubsub_subscribe_args){__VA_ARGS__})
|
110
|
-
|
111
|
-
/**
|
112
|
-
* This helper returns a temporary handle to an existing subscription's channel.
|
113
|
-
*
|
114
|
-
* To keep the handle beyond the lifetime of the subscription, use `fiobj_dup`.
|
115
|
-
*/
|
116
|
-
FIOBJ pubsub_sub_channel(pubsub_sub_pt);
|
117
|
-
|
118
|
-
/**
|
119
|
-
* Unsubscribes from a specific subscription.
|
120
|
-
*
|
121
|
-
* Note: This should be called exactly the same number times as
|
122
|
-
* `pubsub_subscribe`. Any less and a memory leak might occur. Any more, and the
|
123
|
-
* program might crash.
|
124
|
-
*
|
125
|
-
* Returns 0 on success and -1 on failure.
|
126
|
-
*/
|
127
|
-
int pubsub_unsubscribe(pubsub_sub_pt subscription);
|
128
|
-
|
129
|
-
/**
|
130
|
-
* Publishes a message to a channel belonging to a pub/sub service (engine).
|
131
|
-
*
|
132
|
-
* Returns 0 on success and -1 on failure (i.e., no channel, no message or no
|
133
|
-
* known subscriptions).
|
134
|
-
*
|
135
|
-
* NOTE: Memory ownership is retained by the calling function. Both the channel
|
136
|
-
* and the message should be freed when the caller is done with them.
|
137
|
-
*/
|
138
|
-
int pubsub_publish(struct pubsub_message_s);
|
139
|
-
#define pubsub_publish(...) \
|
140
|
-
pubsub_publish((struct pubsub_message_s){__VA_ARGS__})
|
141
|
-
|
142
|
-
/**
|
143
|
-
* defers message hadling if it can't be performed (i.e., resource is busy) or
|
144
|
-
* should be fragmented (allowing large tasks to be broken down).
|
145
|
-
*
|
146
|
-
* This should only be called from within the `on_message` callback.
|
147
|
-
*
|
148
|
-
* It's recommended that the `on_message` callback return immediately following
|
149
|
-
* this function call, as code might run concurrently.
|
150
|
-
*
|
151
|
-
* Uses reference counting for zero copy.
|
152
|
-
*
|
153
|
-
* It's impossible to use a different `on_message` callbck without resorting to
|
154
|
-
* memory allocations... so when in need, manage routing withing the
|
155
|
-
* `on_message` callback.
|
156
|
-
*/
|
157
|
-
void pubsub_defer(pubsub_message_s *msg);
|
158
|
-
|
159
|
-
/**
|
160
|
-
* Pub/Sub services (engines) MUST provide the listed function pointers.
|
161
|
-
*
|
162
|
-
* Engines should also register using the `pubsub_engine_register` function.
|
163
|
-
*
|
164
|
-
* Engines should deregister, before being destroyed, by using the
|
165
|
-
* `pubsub_engine_deregister` function.
|
166
|
-
*
|
167
|
-
* When an engine received a message to publish, they should call the
|
168
|
-
* `pubsub_publish` function with the engine to which the message is forwarded.
|
169
|
-
* i.e.:
|
170
|
-
*
|
171
|
-
* pubsub_publish(
|
172
|
-
* .engine = PUBSUB_PROCESS_ENGINE,
|
173
|
-
* .channel = channel_name,
|
174
|
-
* .message = msg_body );
|
175
|
-
*
|
176
|
-
* Engines MUST NOT free any of the FIOBJ objects they receive.
|
177
|
-
*
|
178
|
-
*/
|
179
|
-
struct pubsub_engine_s {
|
180
|
-
/** Must subscribe channel. Failures are ignored. */
|
181
|
-
void (*subscribe)(const pubsub_engine_s *eng, FIOBJ channel,
|
182
|
-
uint8_t use_pattern);
|
183
|
-
/** Must unsubscribe channel. Failures are ignored. */
|
184
|
-
void (*unsubscribe)(const pubsub_engine_s *eng, FIOBJ channel,
|
185
|
-
uint8_t use_pattern);
|
186
|
-
/** Should return 0 on success and -1 on failure. */
|
187
|
-
int (*publish)(const pubsub_engine_s *eng, FIOBJ channel, FIOBJ msg);
|
188
|
-
/**
|
189
|
-
* facil.io will call this callback whenever starting, or restarting, the
|
190
|
-
* reactor.
|
191
|
-
*
|
192
|
-
* This will be called when facil.io starts (the master process).
|
193
|
-
*
|
194
|
-
* This will also be called when forking, after facil.io closes all
|
195
|
-
* connections and claim to shut down (running all deferred event).
|
196
|
-
*/
|
197
|
-
void (*on_startup)(const pubsub_engine_s *eng);
|
198
|
-
};
|
199
|
-
|
200
|
-
/** Registers an engine, so it's callback can be called. */
|
201
|
-
void pubsub_engine_register(pubsub_engine_s *engine);
|
202
|
-
|
203
|
-
/** Unregisters an engine, so it could be safely destroyed. */
|
204
|
-
void pubsub_engine_deregister(pubsub_engine_s *engine);
|
205
|
-
|
206
|
-
/**
|
207
|
-
* Engines can ask facil.io to resubscribe to all active channels.
|
208
|
-
*
|
209
|
-
* This allows engines that lost their connection to their Pub/Sub service to
|
210
|
-
* resubscribe all the currently active channels with the new connection.
|
211
|
-
*
|
212
|
-
* CAUTION: This is an evented task... try not to free the engine's memory while
|
213
|
-
* resubscriptions are under way...
|
214
|
-
*/
|
215
|
-
void pubsub_engine_resubscribe(pubsub_engine_s *eng);
|
216
|
-
|
217
|
-
#ifdef __cplusplus
|
218
|
-
} /* extern "C" */
|
219
|
-
#endif
|
220
|
-
|
221
|
-
#endif /* H_FACIL_PUBSUB_H */
|
data/ext/iodine/sock.c
DELETED
@@ -1,1366 +0,0 @@
|
|
1
|
-
/*
|
2
|
-
Copyright: Boaz Segev, 2016-2017
|
3
|
-
License: MIT
|
4
|
-
|
5
|
-
Feel free to copy, use and enjoy according to the license provided.
|
6
|
-
*/
|
7
|
-
#ifndef _GNU_SOURCE
|
8
|
-
#define _GNU_SOURCE
|
9
|
-
#endif
|
10
|
-
|
11
|
-
#include "sock.h"
|
12
|
-
#include "spnlock.inc"
|
13
|
-
/* *****************************************************************************
|
14
|
-
Includes and state
|
15
|
-
***************************************************************************** */
|
16
|
-
|
17
|
-
#include <errno.h>
|
18
|
-
#include <fcntl.h>
|
19
|
-
#include <limits.h>
|
20
|
-
#include <netdb.h>
|
21
|
-
#include <netinet/in.h>
|
22
|
-
#include <netinet/tcp.h>
|
23
|
-
#include <stdio.h>
|
24
|
-
#include <stdlib.h>
|
25
|
-
#include <string.h>
|
26
|
-
#include <sys/ioctl.h>
|
27
|
-
#include <sys/resource.h>
|
28
|
-
#include <sys/socket.h>
|
29
|
-
#include <sys/stat.h>
|
30
|
-
#include <sys/types.h>
|
31
|
-
#include <sys/un.h>
|
32
|
-
|
33
|
-
#include "fio_mem.h"
|
34
|
-
|
35
|
-
/* *****************************************************************************
|
36
|
-
OS Sendfile settings.
|
37
|
-
*/
|
38
|
-
|
39
|
-
#ifndef USE_SENDFILE
|
40
|
-
|
41
|
-
#if defined(__linux__) /* linux sendfile works */
|
42
|
-
#include <sys/sendfile.h>
|
43
|
-
#define USE_SENDFILE 1
|
44
|
-
#elif defined(__unix__) /* BSD sendfile should work, but isn't tested */
|
45
|
-
#include <sys/uio.h>
|
46
|
-
#define USE_SENDFILE 1
|
47
|
-
#elif defined(__APPLE__) /* Is the apple sendfile still broken? */
|
48
|
-
#include <sys/uio.h>
|
49
|
-
#define USE_SENDFILE 1
|
50
|
-
#else /* sendfile might not be available - always set to 0 */
|
51
|
-
#define USE_SENDFILE 0
|
52
|
-
#endif
|
53
|
-
|
54
|
-
#endif
|
55
|
-
|
56
|
-
/* *****************************************************************************
|
57
|
-
Support an on_close callback.
|
58
|
-
*/
|
59
|
-
|
60
|
-
#pragma weak sock_on_close
|
61
|
-
void __attribute__((weak)) sock_on_close(intptr_t uuid) { (void)(uuid); }
|
62
|
-
|
63
|
-
/* *****************************************************************************
|
64
|
-
Support timeout setting.
|
65
|
-
*/
|
66
|
-
#pragma weak sock_touch
|
67
|
-
void __attribute__((weak)) sock_touch(intptr_t uuid) { (void)(uuid); }
|
68
|
-
|
69
|
-
/* *****************************************************************************
|
70
|
-
Support `defer``.
|
71
|
-
*/
|
72
|
-
|
73
|
-
#pragma weak defer
|
74
|
-
int defer(void (*func)(void *, void *), void *arg, void *arg2) {
|
75
|
-
func(arg, arg2);
|
76
|
-
return 0;
|
77
|
-
}
|
78
|
-
|
79
|
-
#pragma weak sock_flush_defer
|
80
|
-
void sock_flush_defer(void *arg, void *ignored) {
|
81
|
-
sock_flush((intptr_t)arg);
|
82
|
-
return;
|
83
|
-
(void)ignored;
|
84
|
-
}
|
85
|
-
|
86
|
-
/* *****************************************************************************
|
87
|
-
User-Land Buffer and Packets
|
88
|
-
***************************************************************************** */
|
89
|
-
|
90
|
-
#ifndef BUFFER_PACKET_POOL
|
91
|
-
/* ~4 pages of memory */
|
92
|
-
#define BUFFER_PACKET_POOL (((4096 << 2) - 16) / sizeof(packet_s))
|
93
|
-
#endif
|
94
|
-
|
95
|
-
typedef struct packet_s {
|
96
|
-
struct packet_s *next;
|
97
|
-
int (*write_func)(int fd, struct packet_s *packet);
|
98
|
-
union {
|
99
|
-
void (*free_func)(void *);
|
100
|
-
void (*close_func)(intptr_t);
|
101
|
-
};
|
102
|
-
union {
|
103
|
-
void *buffer;
|
104
|
-
intptr_t fd;
|
105
|
-
};
|
106
|
-
intptr_t offset;
|
107
|
-
uintptr_t length;
|
108
|
-
} packet_s;
|
109
|
-
|
110
|
-
static struct {
|
111
|
-
packet_s *next;
|
112
|
-
spn_lock_i lock;
|
113
|
-
uint8_t init;
|
114
|
-
packet_s mem[BUFFER_PACKET_POOL];
|
115
|
-
} packet_pool;
|
116
|
-
|
117
|
-
void SOCK_DEALLOC_NOOP(void *arg) { (void)arg; }
|
118
|
-
|
119
|
-
typedef struct func_s { void (*task)(void *); } func_s;
|
120
|
-
|
121
|
-
static void sock_packet_free_cb(void *task, void *buffer) {
|
122
|
-
func_s *t = (void *)&task;
|
123
|
-
t->task(buffer);
|
124
|
-
}
|
125
|
-
|
126
|
-
static void sock_packet_free_attempt(void *packet_, void *ignr) {
|
127
|
-
if (spn_trylock(&packet_pool.lock)) {
|
128
|
-
defer(sock_packet_free_attempt, packet_, ignr);
|
129
|
-
return;
|
130
|
-
}
|
131
|
-
packet_s *packet = packet_;
|
132
|
-
packet->next = packet_pool.next;
|
133
|
-
packet_pool.next = packet;
|
134
|
-
spn_unlock(&packet_pool.lock);
|
135
|
-
}
|
136
|
-
|
137
|
-
static inline void sock_packet_free(packet_s *packet) {
|
138
|
-
if (packet->free_func == fio_free) {
|
139
|
-
fio_free(packet->buffer);
|
140
|
-
} else if (packet->free_func == free) {
|
141
|
-
free(packet->buffer);
|
142
|
-
} else {
|
143
|
-
defer(sock_packet_free_cb, (void *)((uintptr_t)packet->free_func),
|
144
|
-
packet->buffer);
|
145
|
-
}
|
146
|
-
if (packet >= packet_pool.mem &&
|
147
|
-
packet <= packet_pool.mem + (BUFFER_PACKET_POOL - 1)) {
|
148
|
-
sock_packet_free_attempt(packet, NULL);
|
149
|
-
} else
|
150
|
-
fio_free(packet);
|
151
|
-
}
|
152
|
-
|
153
|
-
static inline packet_s *sock_packet_new(void) {
|
154
|
-
packet_s *packet;
|
155
|
-
if (spn_trylock(&packet_pool.lock))
|
156
|
-
goto no_lock;
|
157
|
-
packet = packet_pool.next;
|
158
|
-
if (packet == NULL)
|
159
|
-
goto none_in_pool;
|
160
|
-
packet_pool.next = packet->next;
|
161
|
-
spn_unlock(&packet_pool.lock);
|
162
|
-
return packet;
|
163
|
-
none_in_pool:
|
164
|
-
if (!packet_pool.init)
|
165
|
-
goto init;
|
166
|
-
spn_unlock(&packet_pool.lock);
|
167
|
-
no_lock:
|
168
|
-
packet = fio_malloc(sizeof(*packet));
|
169
|
-
if (!packet) {
|
170
|
-
perror("FATAL ERROR: memory allocation failed");
|
171
|
-
exit(errno);
|
172
|
-
}
|
173
|
-
return packet;
|
174
|
-
init:
|
175
|
-
packet_pool.init = 1;
|
176
|
-
for (size_t i = 2; i < BUFFER_PACKET_POOL; i++) {
|
177
|
-
packet_pool.mem[i - 1].next = packet_pool.mem + i;
|
178
|
-
}
|
179
|
-
packet_pool.next = packet_pool.mem + 1;
|
180
|
-
spn_unlock(&packet_pool.lock);
|
181
|
-
packet = packet_pool.mem;
|
182
|
-
return packet;
|
183
|
-
}
|
184
|
-
|
185
|
-
/* *****************************************************************************
|
186
|
-
Default Socket Read/Write Hook
|
187
|
-
***************************************************************************** */
|
188
|
-
|
189
|
-
static ssize_t sock_default_hooks_read(intptr_t uuid, void *udata, void *buf,
|
190
|
-
size_t count) {
|
191
|
-
return read(sock_uuid2fd(uuid), buf, count);
|
192
|
-
(void)(udata);
|
193
|
-
}
|
194
|
-
static ssize_t sock_default_hooks_write(intptr_t uuid, void *udata,
|
195
|
-
const void *buf, size_t count) {
|
196
|
-
return write(sock_uuid2fd(uuid), buf, count);
|
197
|
-
(void)(udata);
|
198
|
-
}
|
199
|
-
|
200
|
-
static void sock_default_hooks_on_close(intptr_t fduuid,
|
201
|
-
struct sock_rw_hook_s *rw_hook,
|
202
|
-
void *udata) {
|
203
|
-
(void)udata;
|
204
|
-
(void)rw_hook;
|
205
|
-
(void)fduuid;
|
206
|
-
}
|
207
|
-
|
208
|
-
static ssize_t sock_default_hooks_flush(intptr_t uuid, void *udata) {
|
209
|
-
return 0;
|
210
|
-
(void)(uuid);
|
211
|
-
(void)(udata);
|
212
|
-
}
|
213
|
-
|
214
|
-
const sock_rw_hook_s SOCK_DEFAULT_HOOKS = {
|
215
|
-
.read = sock_default_hooks_read,
|
216
|
-
.write = sock_default_hooks_write,
|
217
|
-
.flush = sock_default_hooks_flush,
|
218
|
-
.on_close = sock_default_hooks_on_close,
|
219
|
-
};
|
220
|
-
|
221
|
-
/* *****************************************************************************
|
222
|
-
Socket Data Structures
|
223
|
-
***************************************************************************** */
|
224
|
-
struct fd_data_s {
|
225
|
-
/** Connection counter - collision protection. */
|
226
|
-
uint8_t counter;
|
227
|
-
/** Connection lock */
|
228
|
-
spn_lock_i lock;
|
229
|
-
/** Connection is open */
|
230
|
-
unsigned open : 1;
|
231
|
-
/** indicated that the connection should be closed. */
|
232
|
-
unsigned close : 1;
|
233
|
-
/** future flags. */
|
234
|
-
unsigned rsv : 5;
|
235
|
-
/** the currently active packet to be sent. */
|
236
|
-
packet_s *packet;
|
237
|
-
/** the last packet in the queue. */
|
238
|
-
packet_s **packet_last;
|
239
|
-
/** The number of pending packets that are in the queue. */
|
240
|
-
size_t packet_count;
|
241
|
-
/** RW hooks. */
|
242
|
-
sock_rw_hook_s *rw_hooks;
|
243
|
-
/** RW udata. */
|
244
|
-
void *rw_udata;
|
245
|
-
/** Peer/listenning address. */
|
246
|
-
struct sockaddr_in6 addrinfo;
|
247
|
-
/** address length. */
|
248
|
-
socklen_t addrlen;
|
249
|
-
};
|
250
|
-
|
251
|
-
static struct sock_data_store_s {
|
252
|
-
size_t capacity;
|
253
|
-
struct fd_data_s *fds;
|
254
|
-
} sock_data_store;
|
255
|
-
|
256
|
-
#define fd2uuid(fd) \
|
257
|
-
(((uintptr_t)(fd) << 8) | (sock_data_store.fds[(fd)].counter & 0xFF))
|
258
|
-
#define fdinfo(fd) sock_data_store.fds[(fd)]
|
259
|
-
#define uuidinfo(fd) sock_data_store.fds[sock_uuid2fd((fd))]
|
260
|
-
|
261
|
-
#define lock_fd(fd) spn_lock(&sock_data_store.fds[(fd)].lock)
|
262
|
-
#define unlock_fd(fd) spn_unlock(&sock_data_store.fds[(fd)].lock)
|
263
|
-
|
264
|
-
static inline int validate_uuid(uintptr_t uuid) {
|
265
|
-
uintptr_t fd = (uintptr_t)sock_uuid2fd(uuid);
|
266
|
-
if ((intptr_t)uuid == -1 || sock_data_store.capacity <= fd ||
|
267
|
-
fdinfo(fd).counter != (uuid & 0xFF))
|
268
|
-
return -1;
|
269
|
-
return 0;
|
270
|
-
}
|
271
|
-
|
272
|
-
static inline void sock_packet_rotate_unsafe(uintptr_t fd) {
|
273
|
-
packet_s *packet = fdinfo(fd).packet;
|
274
|
-
fdinfo(fd).packet = packet->next;
|
275
|
-
if (&packet->next == fdinfo(fd).packet_last) {
|
276
|
-
fdinfo(fd).packet_last = &fdinfo(fd).packet;
|
277
|
-
}
|
278
|
-
--fdinfo(fd).packet_count;
|
279
|
-
sock_packet_free(packet);
|
280
|
-
}
|
281
|
-
|
282
|
-
static void clear_sock_lib(void) {
|
283
|
-
free(sock_data_store.fds);
|
284
|
-
sock_data_store.fds = NULL;
|
285
|
-
sock_data_store.capacity = 0;
|
286
|
-
}
|
287
|
-
|
288
|
-
static inline int initialize_sock_lib(size_t capacity) {
|
289
|
-
static uint8_t init_exit = 0;
|
290
|
-
if (capacity > LIB_SOCK_MAX_CAPACITY)
|
291
|
-
capacity = LIB_SOCK_MAX_CAPACITY;
|
292
|
-
if (sock_data_store.capacity >= capacity)
|
293
|
-
goto finish;
|
294
|
-
struct fd_data_s *new_collection =
|
295
|
-
realloc(sock_data_store.fds, sizeof(*new_collection) * capacity);
|
296
|
-
if (!new_collection)
|
297
|
-
return -1;
|
298
|
-
sock_data_store.fds = new_collection;
|
299
|
-
for (size_t i = sock_data_store.capacity; i < capacity; i++) {
|
300
|
-
fdinfo(i) = (struct fd_data_s){
|
301
|
-
.open = 0,
|
302
|
-
.lock = SPN_LOCK_INIT,
|
303
|
-
.rw_hooks = (sock_rw_hook_s *)&SOCK_DEFAULT_HOOKS,
|
304
|
-
.packet_last = &fdinfo(i).packet,
|
305
|
-
.counter = 0,
|
306
|
-
};
|
307
|
-
}
|
308
|
-
sock_data_store.capacity = capacity;
|
309
|
-
|
310
|
-
#ifdef DEBUG
|
311
|
-
fprintf(stderr,
|
312
|
-
"\nInitialized libsock for %lu sockets, "
|
313
|
-
"each one requires %lu bytes.\n"
|
314
|
-
"overall ovearhead: %lu bytes.\n"
|
315
|
-
"Initialized packet pool for %lu elements, "
|
316
|
-
"each one %lu bytes.\n"
|
317
|
-
"overall buffer ovearhead: %lu bytes.\n"
|
318
|
-
"=== Socket Library Total: %lu bytes ===\n\n",
|
319
|
-
capacity, sizeof(struct fd_data_s),
|
320
|
-
sizeof(struct fd_data_s) * capacity, BUFFER_PACKET_POOL,
|
321
|
-
sizeof(packet_s), sizeof(packet_s) * BUFFER_PACKET_POOL,
|
322
|
-
(sizeof(packet_s) * BUFFER_PACKET_POOL) +
|
323
|
-
(sizeof(struct fd_data_s) * capacity));
|
324
|
-
#endif
|
325
|
-
|
326
|
-
finish:
|
327
|
-
packet_pool.lock = SPN_LOCK_INIT;
|
328
|
-
for (size_t i = 0; i < sock_data_store.capacity; ++i) {
|
329
|
-
sock_data_store.fds[i].lock = SPN_LOCK_INIT;
|
330
|
-
}
|
331
|
-
if (init_exit)
|
332
|
-
return 0;
|
333
|
-
init_exit = 1;
|
334
|
-
atexit(clear_sock_lib);
|
335
|
-
return 0;
|
336
|
-
}
|
337
|
-
|
338
|
-
static inline int clear_fd(uintptr_t fd, uint8_t is_open) {
|
339
|
-
if (sock_data_store.capacity <= fd)
|
340
|
-
goto reinitialize;
|
341
|
-
packet_s *packet;
|
342
|
-
clear:
|
343
|
-
spn_lock(&(fdinfo(fd).lock));
|
344
|
-
struct fd_data_s old_data = fdinfo(fd);
|
345
|
-
sock_data_store.fds[fd] = (struct fd_data_s){
|
346
|
-
.open = is_open,
|
347
|
-
.lock = fdinfo(fd).lock,
|
348
|
-
.rw_hooks = (sock_rw_hook_s *)&SOCK_DEFAULT_HOOKS,
|
349
|
-
.counter = fdinfo(fd).counter + 1,
|
350
|
-
.packet_last = &sock_data_store.fds[fd].packet,
|
351
|
-
};
|
352
|
-
spn_unlock(&(fdinfo(fd).lock));
|
353
|
-
while (old_data.packet) {
|
354
|
-
packet = old_data.packet;
|
355
|
-
old_data.packet = old_data.packet->next;
|
356
|
-
sock_packet_free(packet);
|
357
|
-
}
|
358
|
-
old_data.rw_hooks->on_close(((fd << 8) | old_data.counter), old_data.rw_hooks,
|
359
|
-
old_data.rw_udata);
|
360
|
-
if (old_data.open) {
|
361
|
-
sock_on_close((fd << 8) | old_data.counter);
|
362
|
-
}
|
363
|
-
return 0;
|
364
|
-
reinitialize:
|
365
|
-
if (fd >= LIB_SOCK_MAX_CAPACITY) {
|
366
|
-
close(fd);
|
367
|
-
return -1;
|
368
|
-
}
|
369
|
-
if (initialize_sock_lib(fd << 1))
|
370
|
-
return -1;
|
371
|
-
goto clear;
|
372
|
-
}
|
373
|
-
|
374
|
-
/* *****************************************************************************
|
375
|
-
Writing - from memory
|
376
|
-
***************************************************************************** */
|
377
|
-
|
378
|
-
static int sock_write_buffer(int fd, struct packet_s *packet) {
|
379
|
-
int written = fdinfo(fd).rw_hooks->write(
|
380
|
-
fd2uuid(fd), fdinfo(fd).rw_udata,
|
381
|
-
((uint8_t *)packet->buffer + packet->offset), packet->length);
|
382
|
-
if (written > 0) {
|
383
|
-
packet->length -= written;
|
384
|
-
packet->offset += written;
|
385
|
-
if (!packet->length)
|
386
|
-
sock_packet_rotate_unsafe(fd);
|
387
|
-
}
|
388
|
-
return written;
|
389
|
-
}
|
390
|
-
|
391
|
-
/* *****************************************************************************
|
392
|
-
Writing - from files
|
393
|
-
***************************************************************************** */
|
394
|
-
|
395
|
-
#ifndef BUFFER_FILE_READ_SIZE
|
396
|
-
#define BUFFER_FILE_READ_SIZE 16384
|
397
|
-
#endif
|
398
|
-
|
399
|
-
static void sock_perform_close_fd(intptr_t fd) { close(fd); }
|
400
|
-
static void sock_perform_close_pfd(void *pfd) {
|
401
|
-
close(*(int *)pfd);
|
402
|
-
free(pfd);
|
403
|
-
}
|
404
|
-
|
405
|
-
static int sock_write_from_fd(int fd, struct packet_s *packet) {
|
406
|
-
ssize_t asked = 0;
|
407
|
-
ssize_t sent = 0;
|
408
|
-
ssize_t total = 0;
|
409
|
-
char buff[BUFFER_FILE_READ_SIZE];
|
410
|
-
do {
|
411
|
-
packet->offset += sent;
|
412
|
-
packet->length -= sent;
|
413
|
-
retry:
|
414
|
-
asked =
|
415
|
-
(packet->length < BUFFER_FILE_READ_SIZE)
|
416
|
-
? pread(packet->fd, buff, packet->length, packet->offset)
|
417
|
-
: pread(packet->fd, buff, BUFFER_FILE_READ_SIZE, packet->offset);
|
418
|
-
if (asked <= 0)
|
419
|
-
goto read_error;
|
420
|
-
sent = fdinfo(fd).rw_hooks->write(fd2uuid(fd), fdinfo(fd).rw_udata, buff,
|
421
|
-
asked);
|
422
|
-
} while (sent == asked && packet->length);
|
423
|
-
if (sent >= 0) {
|
424
|
-
packet->offset += sent;
|
425
|
-
packet->length -= sent;
|
426
|
-
total += sent;
|
427
|
-
if (!packet->length) {
|
428
|
-
sock_packet_rotate_unsafe(fd);
|
429
|
-
return 1;
|
430
|
-
}
|
431
|
-
}
|
432
|
-
return total;
|
433
|
-
|
434
|
-
read_error:
|
435
|
-
if (sent == 0) {
|
436
|
-
sock_packet_rotate_unsafe(fd);
|
437
|
-
return 1;
|
438
|
-
}
|
439
|
-
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
|
440
|
-
goto retry;
|
441
|
-
return -1;
|
442
|
-
}
|
443
|
-
|
444
|
-
#if USE_SENDFILE && defined(__linux__) /* linux sendfile API */
|
445
|
-
|
446
|
-
static int sock_sendfile_from_fd(int fd, struct packet_s *packet) {
|
447
|
-
ssize_t sent;
|
448
|
-
sent = sendfile64(fd, packet->fd, &packet->offset, packet->length);
|
449
|
-
if (sent < 0)
|
450
|
-
return -1;
|
451
|
-
packet->length -= sent;
|
452
|
-
if (!packet->length)
|
453
|
-
sock_packet_rotate_unsafe(fd);
|
454
|
-
return sent;
|
455
|
-
}
|
456
|
-
|
457
|
-
#elif USE_SENDFILE && \
|
458
|
-
(defined(__APPLE__) || defined(__unix__)) /* BSD / Apple API */
|
459
|
-
|
460
|
-
static int sock_sendfile_from_fd(int fd, struct packet_s *packet) {
|
461
|
-
off_t act_sent = 0;
|
462
|
-
ssize_t ret = 0;
|
463
|
-
while (packet->length) {
|
464
|
-
act_sent = packet->length;
|
465
|
-
#if defined(__APPLE__)
|
466
|
-
ret = sendfile(packet->fd, fd, packet->offset, &act_sent, NULL, 0);
|
467
|
-
#else
|
468
|
-
ret = sendfile(packet->fd, fd, packet->offset, (size_t)act_sent, NULL,
|
469
|
-
&act_sent, 0);
|
470
|
-
#endif
|
471
|
-
if (ret < 0)
|
472
|
-
goto error;
|
473
|
-
packet->length -= act_sent;
|
474
|
-
packet->offset += act_sent;
|
475
|
-
}
|
476
|
-
sock_packet_rotate_unsafe(fd);
|
477
|
-
return act_sent;
|
478
|
-
error:
|
479
|
-
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR) {
|
480
|
-
packet->length -= act_sent;
|
481
|
-
packet->offset += act_sent;
|
482
|
-
}
|
483
|
-
return -1;
|
484
|
-
}
|
485
|
-
|
486
|
-
// static int sock_sendfile_from_fd(int fd, struct packet_s *packet) {
|
487
|
-
// struct sock_packet_file_data_s *ext = (void *)packet->buffer.buf;
|
488
|
-
// off_t act_sent = 0;
|
489
|
-
// ssize_t count = 0;
|
490
|
-
// do {
|
491
|
-
// fdinfo(fd).sent += act_sent;
|
492
|
-
// packet->buffer.len -= act_sent;
|
493
|
-
// act_sent = packet->buffer.len;
|
494
|
-
// #if defined(__APPLE__)
|
495
|
-
// count = sendfile(ext->fd, fd, ext->offset + fdinfo(fd).sent, &act_sent,
|
496
|
-
// NULL, 0);
|
497
|
-
// #else
|
498
|
-
// count = sendfile(ext->fd, fd, ext->offset + fdinfo(fd).sent,
|
499
|
-
// (size_t)act_sent, NULL, &act_sent, 0);
|
500
|
-
// #endif
|
501
|
-
// } while (count >= 0 && packet->buffer.len > (size_t)act_sent);
|
502
|
-
// if (!act_sent) {
|
503
|
-
// fprintf(stderr, "Rotating after sent == %lu and length == %lu\n",
|
504
|
-
// (size_t)act_sent, packet->buffer.len);
|
505
|
-
// sock_packet_rotate_unsafe(fd);
|
506
|
-
// }
|
507
|
-
// if (count < 0)
|
508
|
-
// return -1;
|
509
|
-
// return act_sent;
|
510
|
-
// }
|
511
|
-
|
512
|
-
#else
|
513
|
-
static int (*sock_sendfile_from_fd)(int fd, struct packet_s *packet) =
|
514
|
-
sock_write_from_fd;
|
515
|
-
|
516
|
-
#endif
|
517
|
-
|
518
|
-
static int sock_sendfile_from_pfd(int fd, struct packet_s *packet) {
|
519
|
-
int ret;
|
520
|
-
struct packet_s tmp = *packet;
|
521
|
-
tmp.fd = ((intptr_t *)tmp.buffer)[0];
|
522
|
-
ret = sock_sendfile_from_fd(fd, &tmp);
|
523
|
-
tmp.fd = packet->fd;
|
524
|
-
*packet = tmp;
|
525
|
-
return ret;
|
526
|
-
}
|
527
|
-
|
528
|
-
static int sock_write_from_pfd(int fd, struct packet_s *packet) {
|
529
|
-
int ret;
|
530
|
-
struct packet_s tmp = *packet;
|
531
|
-
tmp.fd = ((intptr_t *)tmp.buffer)[0];
|
532
|
-
ret = sock_write_from_fd(fd, &tmp);
|
533
|
-
tmp.fd = packet->fd;
|
534
|
-
*packet = tmp;
|
535
|
-
return ret;
|
536
|
-
}
|
537
|
-
|
538
|
-
/* *****************************************************************************
|
539
|
-
The API
|
540
|
-
***************************************************************************** */
|
541
|
-
|
542
|
-
/* *****************************************************************************
|
543
|
-
Process wide and helper sock API.
|
544
|
-
*/
|
545
|
-
|
546
|
-
/** MUST be called after forking a process. */
|
547
|
-
void sock_on_fork(void) { initialize_sock_lib(0); }
|
548
|
-
|
549
|
-
/**
|
550
|
-
Sets a socket to non blocking state.
|
551
|
-
|
552
|
-
This function is called automatically for the new socket, when using
|
553
|
-
`sock_accept` or `sock_connect`.
|
554
|
-
*/
|
555
|
-
int sock_set_non_block(int fd) {
|
556
|
-
/* If they have O_NONBLOCK, use the Posix way to do it */
|
557
|
-
#if defined(O_NONBLOCK)
|
558
|
-
/* Fixme: O_NONBLOCK is defined but broken on SunOS 4.1.x and AIX 3.2.5. */
|
559
|
-
int flags;
|
560
|
-
if (-1 == (flags = fcntl(fd, F_GETFL, 0)))
|
561
|
-
flags = 0;
|
562
|
-
// printf("flags initial value was %d\n", flags);
|
563
|
-
return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
|
564
|
-
#elif defined(FIONBIO)
|
565
|
-
/* Otherwise, use the old way of doing it */
|
566
|
-
static int flags = 1;
|
567
|
-
return ioctl(fd, FIONBIO, &flags);
|
568
|
-
#else
|
569
|
-
#error No functions / argumnet macros for non-blocking sockets.
|
570
|
-
#endif
|
571
|
-
}
|
572
|
-
|
573
|
-
/**
|
574
|
-
Gets the maximum number of file descriptors this process can be allowed to
|
575
|
-
access (== maximum fd value + 1).
|
576
|
-
|
577
|
-
If the "soft" limit is lower then the "hard" limit, the process's limits will be
|
578
|
-
extended to the allowed "hard" limit.
|
579
|
-
*/
|
580
|
-
ssize_t sock_max_capacity(void) {
|
581
|
-
// get current limits
|
582
|
-
static ssize_t flim = 0;
|
583
|
-
if (flim)
|
584
|
-
return flim;
|
585
|
-
#ifdef _SC_OPEN_MAX
|
586
|
-
flim = sysconf(_SC_OPEN_MAX);
|
587
|
-
#elif defined(FOPEN_MAX)
|
588
|
-
flim = FOPEN_MAX;
|
589
|
-
#endif
|
590
|
-
// try to maximize limits - collect max and set to max
|
591
|
-
struct rlimit rlim = {.rlim_max = 0};
|
592
|
-
if (getrlimit(RLIMIT_NOFILE, &rlim) == -1) {
|
593
|
-
fprintf(stderr, "WARNING: `getrlimit` failed in `sock_max_capacity`.\n");
|
594
|
-
} else {
|
595
|
-
// #if defined(__APPLE__) /* Apple's getrlimit is broken. */
|
596
|
-
// rlim.rlim_cur = rlim.rlim_max >= FOPEN_MAX ? FOPEN_MAX :
|
597
|
-
// rlim.rlim_max;
|
598
|
-
// #else
|
599
|
-
rlim.rlim_cur = rlim.rlim_max;
|
600
|
-
// #endif
|
601
|
-
|
602
|
-
if (rlim.rlim_cur > LIB_SOCK_MAX_CAPACITY)
|
603
|
-
rlim.rlim_cur = LIB_SOCK_MAX_CAPACITY;
|
604
|
-
|
605
|
-
if (!setrlimit(RLIMIT_NOFILE, &rlim))
|
606
|
-
getrlimit(RLIMIT_NOFILE, &rlim);
|
607
|
-
flim = rlim.rlim_cur;
|
608
|
-
}
|
609
|
-
#if DEBUG
|
610
|
-
fprintf(stderr,
|
611
|
-
"libsock capacity initialization:\n"
|
612
|
-
"* Meximum open files %lu out of %lu\n",
|
613
|
-
(unsigned long)flim, (unsigned long)rlim.rlim_max);
|
614
|
-
#endif
|
615
|
-
// initialize library to maximum capacity
|
616
|
-
initialize_sock_lib(flim);
|
617
|
-
// return what we have
|
618
|
-
return sock_data_store.capacity;
|
619
|
-
}
|
620
|
-
|
621
|
-
/* *****************************************************************************
|
622
|
-
The main sock API.
|
623
|
-
*/
|
624
|
-
|
625
|
-
/**
|
626
|
-
Opens a listening non-blocking socket. Return's the socket's UUID.
|
627
|
-
|
628
|
-
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
629
|
-
|
630
|
-
UUIDs with values less then -1 are valid values, depending on the system's
|
631
|
-
byte-ordering.
|
632
|
-
|
633
|
-
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
634
|
-
They protect against connection mixups on concurrent systems (i.e. when saving
|
635
|
-
client data for "broadcasting" or when an old client task is preparing a
|
636
|
-
response in the background while a disconnection and a new connection occur on
|
637
|
-
the same `fd`).
|
638
|
-
*/
|
639
|
-
intptr_t sock_listen(const char *address, const char *port) {
|
640
|
-
int srvfd;
|
641
|
-
if (!port || *port == 0 || (port[0] == '0' && port[1] == 0)) {
|
642
|
-
/* Unix socket */
|
643
|
-
if (!address) {
|
644
|
-
errno = EINVAL;
|
645
|
-
fprintf(
|
646
|
-
stderr,
|
647
|
-
"ERROR: (sock) sock_listen - a Unix socket requires a valid address."
|
648
|
-
" or specify port for TCP/IP.\n");
|
649
|
-
return -1;
|
650
|
-
}
|
651
|
-
struct sockaddr_un addr = {0};
|
652
|
-
size_t addr_len = strlen(address);
|
653
|
-
if (addr_len >= sizeof(addr.sun_path)) {
|
654
|
-
errno = ENAMETOOLONG;
|
655
|
-
return -1;
|
656
|
-
}
|
657
|
-
addr.sun_family = AF_UNIX;
|
658
|
-
memcpy(addr.sun_path, address, addr_len + 1); /* copy the NUL byte. */
|
659
|
-
#if defined(__APPLE__)
|
660
|
-
addr.sun_len = addr_len;
|
661
|
-
#endif
|
662
|
-
// get the file descriptor
|
663
|
-
srvfd = socket(AF_UNIX, SOCK_STREAM, 0);
|
664
|
-
if (srvfd == -1) {
|
665
|
-
return -1;
|
666
|
-
}
|
667
|
-
if (sock_set_non_block(srvfd) == -1) {
|
668
|
-
close(srvfd);
|
669
|
-
return -1;
|
670
|
-
}
|
671
|
-
unlink(addr.sun_path);
|
672
|
-
if (bind(srvfd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
|
673
|
-
close(srvfd);
|
674
|
-
return -1;
|
675
|
-
}
|
676
|
-
/* chmod for foriegn connections */
|
677
|
-
fchmod(srvfd, 0777);
|
678
|
-
|
679
|
-
} else {
|
680
|
-
/* TCP/IP socket */
|
681
|
-
// setup the address
|
682
|
-
struct addrinfo hints = {0};
|
683
|
-
struct addrinfo *servinfo; // will point to the results
|
684
|
-
memset(&hints, 0, sizeof hints); // make sure the struct is empty
|
685
|
-
hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
|
686
|
-
hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
|
687
|
-
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
|
688
|
-
if (getaddrinfo(address, port, &hints, &servinfo)) {
|
689
|
-
// perror("addr err");
|
690
|
-
return -1;
|
691
|
-
}
|
692
|
-
// get the file descriptor
|
693
|
-
srvfd = socket(servinfo->ai_family, servinfo->ai_socktype,
|
694
|
-
servinfo->ai_protocol);
|
695
|
-
if (srvfd <= 0) {
|
696
|
-
// perror("socket err");
|
697
|
-
freeaddrinfo(servinfo);
|
698
|
-
return -1;
|
699
|
-
}
|
700
|
-
// make sure the socket is non-blocking
|
701
|
-
if (sock_set_non_block(srvfd) < 0) {
|
702
|
-
// perror("couldn't set socket as non blocking! ");
|
703
|
-
freeaddrinfo(servinfo);
|
704
|
-
close(srvfd);
|
705
|
-
return -1;
|
706
|
-
}
|
707
|
-
// avoid the "address taken"
|
708
|
-
{
|
709
|
-
int optval = 1;
|
710
|
-
setsockopt(srvfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
|
711
|
-
}
|
712
|
-
// bind the address to the socket
|
713
|
-
{
|
714
|
-
int bound = 0;
|
715
|
-
for (struct addrinfo *p = servinfo; p != NULL; p = p->ai_next) {
|
716
|
-
if (!bind(srvfd, p->ai_addr, p->ai_addrlen))
|
717
|
-
bound = 1;
|
718
|
-
}
|
719
|
-
|
720
|
-
if (!bound) {
|
721
|
-
// perror("bind err");
|
722
|
-
freeaddrinfo(servinfo);
|
723
|
-
close(srvfd);
|
724
|
-
return -1;
|
725
|
-
}
|
726
|
-
}
|
727
|
-
#ifdef TCP_FASTOPEN
|
728
|
-
// support TCP Fast Open when available
|
729
|
-
{
|
730
|
-
int optval = 128;
|
731
|
-
setsockopt(srvfd, servinfo->ai_protocol, TCP_FASTOPEN, &optval,
|
732
|
-
sizeof(optval));
|
733
|
-
}
|
734
|
-
#endif
|
735
|
-
freeaddrinfo(servinfo);
|
736
|
-
}
|
737
|
-
// listen in
|
738
|
-
if (listen(srvfd, SOMAXCONN) < 0) {
|
739
|
-
// perror("couldn't start listening");
|
740
|
-
close(srvfd);
|
741
|
-
return -1;
|
742
|
-
}
|
743
|
-
if (clear_fd(srvfd, 1))
|
744
|
-
return -1;
|
745
|
-
return fd2uuid(srvfd);
|
746
|
-
}
|
747
|
-
|
748
|
-
/**
|
749
|
-
`sock_accept` accepts a new socket connection from the listening socket
|
750
|
-
`server_fd`, allowing the use of `sock_` functions with this new file
|
751
|
-
descriptor.
|
752
|
-
|
753
|
-
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
754
|
-
listen for events.
|
755
|
-
|
756
|
-
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
757
|
-
|
758
|
-
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
759
|
-
They protect against connection mixups on concurrent systems (i.e. when saving
|
760
|
-
client data for "broadcasting" or when an old client task is preparing a
|
761
|
-
response in the background while a disconnection and a new connection occur on
|
762
|
-
the same `fd`).
|
763
|
-
*/
|
764
|
-
intptr_t sock_accept(intptr_t srv_uuid) {
|
765
|
-
struct sockaddr_in6 addrinfo;
|
766
|
-
socklen_t addrlen = sizeof(addrinfo);
|
767
|
-
int client;
|
768
|
-
#ifdef SOCK_NONBLOCK
|
769
|
-
client = accept4(sock_uuid2fd(srv_uuid), (struct sockaddr *)&addrinfo,
|
770
|
-
&addrlen, SOCK_NONBLOCK);
|
771
|
-
if (client <= 0)
|
772
|
-
return -1;
|
773
|
-
#else
|
774
|
-
client =
|
775
|
-
accept(sock_uuid2fd(srv_uuid), (struct sockaddr *)&addrinfo, &addrlen);
|
776
|
-
if (client <= 0)
|
777
|
-
return -1;
|
778
|
-
sock_set_non_block(client);
|
779
|
-
#endif
|
780
|
-
// avoid the TCP delay algorithm.
|
781
|
-
{
|
782
|
-
int optval = 1;
|
783
|
-
setsockopt(client, IPPROTO_TCP, TCP_NODELAY, &optval, sizeof(optval));
|
784
|
-
}
|
785
|
-
// handle socket buffers.
|
786
|
-
{
|
787
|
-
int optval = 0;
|
788
|
-
socklen_t size = (socklen_t)sizeof(optval);
|
789
|
-
if (!getsockopt(client, SOL_SOCKET, SO_SNDBUF, &optval, &size) &&
|
790
|
-
optval <= 131072) {
|
791
|
-
optval = 131072;
|
792
|
-
setsockopt(client, SOL_SOCKET, SO_SNDBUF, &optval, sizeof(optval));
|
793
|
-
optval = 131072;
|
794
|
-
setsockopt(client, SOL_SOCKET, SO_RCVBUF, &optval, sizeof(optval));
|
795
|
-
}
|
796
|
-
}
|
797
|
-
if (clear_fd(client, 1))
|
798
|
-
return -1;
|
799
|
-
fdinfo(client).addrinfo = addrinfo;
|
800
|
-
fdinfo(client).addrlen = addrlen;
|
801
|
-
return fd2uuid(client);
|
802
|
-
}
|
803
|
-
|
804
|
-
/**
|
805
|
-
`sock_connect` is similar to `sock_accept` but should be used to initiate a
|
806
|
-
client connection to the address requested.
|
807
|
-
|
808
|
-
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
809
|
-
|
810
|
-
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
811
|
-
They protect against connection mixups on concurrent systems (i.e. when saving
|
812
|
-
client data for "broadcasting" or when an old client task is preparing a
|
813
|
-
response in the background while a disconnection and a new connection occur on
|
814
|
-
the same `fd`).
|
815
|
-
|
816
|
-
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
817
|
-
listen for events.
|
818
|
-
|
819
|
-
NOTICE:
|
820
|
-
|
821
|
-
This function is non-blocking, meaning that the connection probably wasn't
|
822
|
-
established by the time the function returns (this prevents the function from
|
823
|
-
hanging while waiting for a network timeout).
|
824
|
-
|
825
|
-
Use select, poll, `libreact` or other solutions to review the connection state
|
826
|
-
before attempting to write to the socket.
|
827
|
-
*/
|
828
|
-
intptr_t sock_connect(char *address, char *port) {
|
829
|
-
int fd;
|
830
|
-
int one = 1;
|
831
|
-
if (!port || *port == 0 || (port[0] == '0' && port[1] == 0)) {
|
832
|
-
/* Unix socket */
|
833
|
-
if (!address) {
|
834
|
-
errno = EINVAL;
|
835
|
-
fprintf(
|
836
|
-
stderr,
|
837
|
-
"ERROR: (sock) sock_listen - a Unix socket requires a valid address."
|
838
|
-
" or specify port for TCP/IP.\n");
|
839
|
-
return -1;
|
840
|
-
}
|
841
|
-
|
842
|
-
struct sockaddr_un addr = {.sun_family = AF_UNIX};
|
843
|
-
size_t addr_len = strlen(address);
|
844
|
-
if (addr_len >= sizeof(addr.sun_path)) {
|
845
|
-
errno = ENAMETOOLONG;
|
846
|
-
return -1;
|
847
|
-
}
|
848
|
-
addr.sun_family = AF_UNIX;
|
849
|
-
memcpy(addr.sun_path, address, addr_len + 1); /* copy the NUL byte. */
|
850
|
-
#if defined(__APPLE__)
|
851
|
-
addr.sun_len = addr_len;
|
852
|
-
#endif
|
853
|
-
// get the file descriptor
|
854
|
-
fd = socket(AF_UNIX, SOCK_STREAM, 0);
|
855
|
-
if (fd == -1) {
|
856
|
-
return -1;
|
857
|
-
}
|
858
|
-
if (sock_set_non_block(fd) == -1) {
|
859
|
-
close(fd);
|
860
|
-
return -1;
|
861
|
-
}
|
862
|
-
if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) == -1 &&
|
863
|
-
errno != EINPROGRESS) {
|
864
|
-
close(fd);
|
865
|
-
return -1;
|
866
|
-
}
|
867
|
-
if (clear_fd(fd, 1))
|
868
|
-
return -1;
|
869
|
-
} else {
|
870
|
-
// setup the address
|
871
|
-
struct addrinfo hints;
|
872
|
-
struct addrinfo *addrinfo; // will point to the results
|
873
|
-
memset(&hints, 0, sizeof hints); // make sure the struct is empty
|
874
|
-
hints.ai_family = AF_UNSPEC; // don't care IPv4 or IPv6
|
875
|
-
hints.ai_socktype = SOCK_STREAM; // TCP stream sockets
|
876
|
-
hints.ai_flags = AI_PASSIVE; // fill in my IP for me
|
877
|
-
if (getaddrinfo(address, port, &hints, &addrinfo)) {
|
878
|
-
return -1;
|
879
|
-
}
|
880
|
-
// get the file descriptor
|
881
|
-
fd = socket(addrinfo->ai_family, addrinfo->ai_socktype,
|
882
|
-
addrinfo->ai_protocol);
|
883
|
-
if (fd <= 0) {
|
884
|
-
freeaddrinfo(addrinfo);
|
885
|
-
return -1;
|
886
|
-
}
|
887
|
-
// make sure the socket is non-blocking
|
888
|
-
if (sock_set_non_block(fd) < 0) {
|
889
|
-
freeaddrinfo(addrinfo);
|
890
|
-
close(fd);
|
891
|
-
return -1;
|
892
|
-
}
|
893
|
-
|
894
|
-
for (struct addrinfo *i = addrinfo; i; i = i->ai_next) {
|
895
|
-
if (connect(fd, i->ai_addr, i->ai_addrlen) == 0 || errno == EINPROGRESS)
|
896
|
-
goto connection_requested;
|
897
|
-
}
|
898
|
-
freeaddrinfo(addrinfo);
|
899
|
-
close(fd);
|
900
|
-
return -1;
|
901
|
-
|
902
|
-
connection_requested:
|
903
|
-
|
904
|
-
setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one));
|
905
|
-
if (clear_fd(fd, 1))
|
906
|
-
return -1;
|
907
|
-
memcpy(&fdinfo(fd).addrinfo, addrinfo->ai_addr, addrinfo->ai_addrlen);
|
908
|
-
fdinfo(fd).addrlen = addrinfo->ai_addrlen;
|
909
|
-
freeaddrinfo(addrinfo);
|
910
|
-
}
|
911
|
-
return fd2uuid(fd);
|
912
|
-
}
|
913
|
-
|
914
|
-
/**
|
915
|
-
`sock_open` takes an existing file descriptor `fd` and initializes it's status
|
916
|
-
as open and available for `sock_*` API calls, returning a valid UUID.
|
917
|
-
|
918
|
-
This will reinitialize the data (user buffer etc') for the file descriptor
|
919
|
-
provided, calling the `reactor_on_close` callback if the `fd` was previously
|
920
|
-
marked as used.
|
921
|
-
|
922
|
-
When using `libreact`, remember to call `int reactor_add(intptr_t uuid);` to
|
923
|
-
listen for events.
|
924
|
-
|
925
|
-
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
926
|
-
|
927
|
-
Socket UUIDs are predictable and shouldn't be used outside the local system.
|
928
|
-
They protect against connection mixups on concurrent systems (i.e. when saving
|
929
|
-
client data for "broadcasting" or when an old client task is preparing a
|
930
|
-
response in the background while a disconnection and a new connection occur on
|
931
|
-
the same `fd`).
|
932
|
-
*/
|
933
|
-
intptr_t sock_open(int fd) {
|
934
|
-
if (clear_fd(fd, 1))
|
935
|
-
return -1;
|
936
|
-
return fd2uuid(fd);
|
937
|
-
}
|
938
|
-
|
939
|
-
/**
|
940
|
-
* `sock_hijack` is the reverse of the `sock_open` function, removing the
|
941
|
-
* connection from the `sock` library and clearing it's data without closing it
|
942
|
-
* (`sock_on_close` will NOT be called).
|
943
|
-
*
|
944
|
-
* Returns the original `fd` for the socket. On error returns -1.
|
945
|
-
*/
|
946
|
-
int sock_hijack(intptr_t uuid) {
|
947
|
-
const int fd = sock_uuid2fd(uuid);
|
948
|
-
if (validate_uuid(uuid) && fdinfo(fd).open) {
|
949
|
-
fprintf(stderr, "WARNING: SOCK HIJACK FAILING!\n");
|
950
|
-
return -1;
|
951
|
-
}
|
952
|
-
fdinfo(fd).open = 0;
|
953
|
-
clear_fd(fd, 0);
|
954
|
-
return fd;
|
955
|
-
}
|
956
|
-
|
957
|
-
/** Returns the information available about the socket's peer address. */
|
958
|
-
sock_peer_addr_s sock_peer_addr(intptr_t uuid) {
|
959
|
-
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).addrlen)
|
960
|
-
return (sock_peer_addr_s){.addr = NULL};
|
961
|
-
return (sock_peer_addr_s){
|
962
|
-
.addrlen = uuidinfo(uuid).addrlen,
|
963
|
-
.addr = (struct sockaddr *)&uuidinfo(uuid).addrinfo,
|
964
|
-
};
|
965
|
-
}
|
966
|
-
|
967
|
-
/**
|
968
|
-
Returns 1 if the uuid refers to a valid and open, socket.
|
969
|
-
|
970
|
-
Returns 0 if not.
|
971
|
-
*/
|
972
|
-
int sock_isvalid(intptr_t uuid) {
|
973
|
-
return validate_uuid(uuid) == 0 && uuidinfo(uuid).open;
|
974
|
-
}
|
975
|
-
|
976
|
-
/**
|
977
|
-
Returns 1 if the uuid is invalid or the socket is flagged to be closed.
|
978
|
-
|
979
|
-
Returns 0 if the socket is valid, open and isn't flagged to be closed.
|
980
|
-
*/
|
981
|
-
int sock_isclosed(intptr_t uuid) {
|
982
|
-
return validate_uuid(uuid) || !uuidinfo(uuid).open || uuidinfo(uuid).close;
|
983
|
-
}
|
984
|
-
|
985
|
-
/**
|
986
|
-
`sock_fd2uuid` takes an existing file decriptor `fd` and returns it's active
|
987
|
-
`uuid`.
|
988
|
-
|
989
|
-
If the file descriptor is marked as closed (wasn't opened / registered with
|
990
|
-
`libsock`) the function returns -1;
|
991
|
-
|
992
|
-
If the file descriptor was closed remotely (or not using `libsock`), a false
|
993
|
-
positive will be possible. This is not an issue, since the use of an invalid fd
|
994
|
-
will result in the registry being updated and the fd being closed.
|
995
|
-
|
996
|
-
Returns -1 on error. Returns a valid socket (non-random) UUID.
|
997
|
-
*/
|
998
|
-
intptr_t sock_fd2uuid(int fd) {
|
999
|
-
return (fd > 0 && sock_data_store.capacity > (size_t)fd &&
|
1000
|
-
sock_data_store.fds[fd].open)
|
1001
|
-
? (intptr_t)(fd2uuid(fd))
|
1002
|
-
: -1;
|
1003
|
-
}
|
1004
|
-
|
1005
|
-
/**
|
1006
|
-
`sock_read` attempts to read up to count bytes from the socket into the buffer
|
1007
|
-
starting at buf.
|
1008
|
-
|
1009
|
-
On a connection error (NOT EAGAIN or EWOULDBLOCK), signal interrupt, or when the
|
1010
|
-
connection was closed, `sock_read` returns -1.
|
1011
|
-
|
1012
|
-
The value 0 is the valid value indicating no data was read.
|
1013
|
-
|
1014
|
-
Data might be available in the kernel's buffer while it is not available to be
|
1015
|
-
read using `sock_read` (i.e., when using a transport layer, such as TLS).
|
1016
|
-
*/
|
1017
|
-
ssize_t sock_read(intptr_t uuid, void *buf, size_t count) {
|
1018
|
-
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open) {
|
1019
|
-
errno = EBADF;
|
1020
|
-
return -1;
|
1021
|
-
}
|
1022
|
-
lock_fd(sock_uuid2fd(uuid));
|
1023
|
-
if (!fdinfo(sock_uuid2fd(uuid)).open) {
|
1024
|
-
unlock_fd(sock_uuid2fd(uuid));
|
1025
|
-
errno = EBADF;
|
1026
|
-
return -1;
|
1027
|
-
}
|
1028
|
-
sock_rw_hook_s *rw = fdinfo(sock_uuid2fd(uuid)).rw_hooks;
|
1029
|
-
void *udata = fdinfo(sock_uuid2fd(uuid)).rw_udata;
|
1030
|
-
unlock_fd(sock_uuid2fd(uuid));
|
1031
|
-
if (count == 0)
|
1032
|
-
return rw->read(uuid, udata, buf, count);
|
1033
|
-
int old_errno = errno;
|
1034
|
-
ssize_t ret;
|
1035
|
-
retry_int:
|
1036
|
-
ret = rw->read(uuid, udata, buf, count);
|
1037
|
-
if (ret > 0) {
|
1038
|
-
sock_touch(uuid);
|
1039
|
-
return ret;
|
1040
|
-
}
|
1041
|
-
if (ret < 0 && errno == EINTR)
|
1042
|
-
goto retry_int;
|
1043
|
-
if (ret < 0 &&
|
1044
|
-
(errno == EWOULDBLOCK || errno == EAGAIN || errno == ENOTCONN)) {
|
1045
|
-
errno = old_errno;
|
1046
|
-
return 0;
|
1047
|
-
}
|
1048
|
-
sock_force_close(uuid);
|
1049
|
-
return -1;
|
1050
|
-
}
|
1051
|
-
|
1052
|
-
/**
|
1053
|
-
`sock_write2_fn` is the actual function behind the macro `sock_write2`.
|
1054
|
-
*/
|
1055
|
-
ssize_t sock_write2_fn(sock_write_info_s options) {
|
1056
|
-
int fd = sock_uuid2fd(options.uuid);
|
1057
|
-
|
1058
|
-
/* this extra work can be avoided if an error is already known to occur...
|
1059
|
-
* but the extra complexity and branching isn't worth it, considering the
|
1060
|
-
* common case should be that there's no expected error.
|
1061
|
-
*
|
1062
|
-
* It also important to point out that errors should handle deallocation,
|
1063
|
-
* simplifying client-side error handling logic (this is a framework wide
|
1064
|
-
* design choice where callbacks are passed).
|
1065
|
-
*/
|
1066
|
-
packet_s *packet = sock_packet_new();
|
1067
|
-
packet->length = options.length;
|
1068
|
-
packet->offset = options.offset;
|
1069
|
-
packet->buffer = (void *)options.buffer;
|
1070
|
-
if (options.is_fd) {
|
1071
|
-
packet->write_func = (fdinfo(fd).rw_hooks == &SOCK_DEFAULT_HOOKS)
|
1072
|
-
? sock_sendfile_from_fd
|
1073
|
-
: sock_write_from_fd;
|
1074
|
-
packet->free_func =
|
1075
|
-
(options.dealloc ? options.dealloc
|
1076
|
-
: (void (*)(void *))sock_perform_close_fd);
|
1077
|
-
} else if (options.is_pfd) {
|
1078
|
-
packet->write_func = (fdinfo(fd).rw_hooks == &SOCK_DEFAULT_HOOKS)
|
1079
|
-
? sock_sendfile_from_pfd
|
1080
|
-
: sock_write_from_pfd;
|
1081
|
-
packet->free_func =
|
1082
|
-
(options.dealloc ? options.dealloc : sock_perform_close_pfd);
|
1083
|
-
} else {
|
1084
|
-
packet->write_func = sock_write_buffer;
|
1085
|
-
packet->free_func = (options.dealloc ? options.dealloc : free);
|
1086
|
-
}
|
1087
|
-
|
1088
|
-
/* place packet in queue */
|
1089
|
-
|
1090
|
-
if (validate_uuid(options.uuid) || !options.buffer)
|
1091
|
-
goto error;
|
1092
|
-
lock_fd(fd);
|
1093
|
-
if (!fdinfo(fd).open || fdinfo(fd).close) {
|
1094
|
-
unlock_fd(fd);
|
1095
|
-
goto error;
|
1096
|
-
}
|
1097
|
-
packet->next = NULL;
|
1098
|
-
if (fdinfo(fd).packet == NULL) {
|
1099
|
-
fdinfo(fd).packet_last = &packet->next;
|
1100
|
-
fdinfo(fd).packet = packet;
|
1101
|
-
} else if (options.urgent == 0) {
|
1102
|
-
*fdinfo(fd).packet_last = packet;
|
1103
|
-
fdinfo(fd).packet_last = &packet->next;
|
1104
|
-
} else {
|
1105
|
-
packet_s **pos = &fdinfo(fd).packet;
|
1106
|
-
if (*pos)
|
1107
|
-
pos = &(*pos)->next;
|
1108
|
-
packet->next = *pos;
|
1109
|
-
*pos = packet;
|
1110
|
-
if (!packet->next) {
|
1111
|
-
fdinfo(fd).packet_last = &packet->next;
|
1112
|
-
}
|
1113
|
-
}
|
1114
|
-
++fdinfo(fd).packet_count;
|
1115
|
-
unlock_fd(fd);
|
1116
|
-
sock_touch(options.uuid);
|
1117
|
-
defer(sock_flush_defer, (void *)options.uuid, NULL);
|
1118
|
-
return 0;
|
1119
|
-
|
1120
|
-
error:
|
1121
|
-
sock_packet_free(packet);
|
1122
|
-
errno = EBADF;
|
1123
|
-
return -1;
|
1124
|
-
}
|
1125
|
-
#define sock_write2(...) sock_write2_fn((sock_write_info_s){__VA_ARGS__})
|
1126
|
-
|
1127
|
-
/**
|
1128
|
-
`sock_close` marks the connection for disconnection once all the data was sent.
|
1129
|
-
The actual disconnection will be managed by the `sock_flush` function.
|
1130
|
-
|
1131
|
-
`sock_flash` will automatically be called.
|
1132
|
-
*/
|
1133
|
-
void sock_close(intptr_t uuid) {
|
1134
|
-
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open)
|
1135
|
-
return;
|
1136
|
-
fdinfo(sock_uuid2fd(uuid)).close = 1;
|
1137
|
-
sock_flush_defer((void *)uuid, (void *)uuid);
|
1138
|
-
}
|
1139
|
-
/**
|
1140
|
-
`sock_force_close` closes the connection immediately, without adhering to any
|
1141
|
-
protocol restrictions and without sending any remaining data in the connection
|
1142
|
-
buffer.
|
1143
|
-
*/
|
1144
|
-
void sock_force_close(intptr_t uuid) {
|
1145
|
-
if (validate_uuid(uuid))
|
1146
|
-
return;
|
1147
|
-
// fprintf(stderr,
|
1148
|
-
// "INFO: (%d) `sock_force_close` called"
|
1149
|
-
// " for %p (fd: %u) with errno %d\n",
|
1150
|
-
// getpid(), (void *)uuid, (unsigned int)sock_uuid2fd(uuid), errno);
|
1151
|
-
// perror("errno");
|
1152
|
-
// // We might avoid shutdown, it has side-effects that aren't always clear
|
1153
|
-
// shutdown(sock_uuid2fd(uuid), SHUT_RDWR);
|
1154
|
-
close(sock_uuid2fd(uuid));
|
1155
|
-
clear_fd(sock_uuid2fd(uuid), 0);
|
1156
|
-
}
|
1157
|
-
|
1158
|
-
/* *****************************************************************************
|
1159
|
-
Direct user level buffer API.
|
1160
|
-
|
1161
|
-
The following API allows data to be written directly to the packet, minimizing
|
1162
|
-
memory copy operations.
|
1163
|
-
*/
|
1164
|
-
|
1165
|
-
/**
|
1166
|
-
* `sock_flush` writes the data in the internal buffer to the underlying file
|
1167
|
-
* descriptor and closes the underlying fd once it's marked for closure (and all
|
1168
|
-
* the data was sent).
|
1169
|
-
*
|
1170
|
-
* Return values: 1 will be returned if `sock_flush` should be called again. 0
|
1171
|
-
* will be returned if the socket was fully flushed. -1 will be returned on an
|
1172
|
-
* error or when the connection is closed.
|
1173
|
-
*/
|
1174
|
-
ssize_t sock_flush(intptr_t uuid) {
|
1175
|
-
int fd = sock_uuid2fd(uuid);
|
1176
|
-
if (validate_uuid(uuid) || !fdinfo(fd).open)
|
1177
|
-
return -1;
|
1178
|
-
ssize_t ret;
|
1179
|
-
uint8_t touch = 0;
|
1180
|
-
lock_fd(fd);
|
1181
|
-
sock_rw_hook_s *rw;
|
1182
|
-
void *rw_udata;
|
1183
|
-
retry:
|
1184
|
-
rw = fdinfo(fd).rw_hooks;
|
1185
|
-
rw_udata = fdinfo(fd).rw_udata;
|
1186
|
-
unlock_fd(fd);
|
1187
|
-
while ((ret = rw->flush(uuid, rw_udata)) > 0) {
|
1188
|
-
touch = 1;
|
1189
|
-
}
|
1190
|
-
if (ret == -1) {
|
1191
|
-
if (errno == EINTR)
|
1192
|
-
goto retry;
|
1193
|
-
if (errno == EWOULDBLOCK || errno == EAGAIN || errno == ENOTCONN ||
|
1194
|
-
errno == ENOSPC)
|
1195
|
-
goto finish;
|
1196
|
-
goto error;
|
1197
|
-
}
|
1198
|
-
lock_fd(fd);
|
1199
|
-
while (fdinfo(fd).packet &&
|
1200
|
-
(ret = fdinfo(fd).packet->write_func(fd, fdinfo(fd).packet)) > 0) {
|
1201
|
-
touch = 1;
|
1202
|
-
}
|
1203
|
-
if (ret == -1) {
|
1204
|
-
if (errno == EINTR)
|
1205
|
-
goto retry;
|
1206
|
-
if (errno == EWOULDBLOCK || errno == EAGAIN || errno == ENOTCONN ||
|
1207
|
-
errno == ENOSPC)
|
1208
|
-
goto finish;
|
1209
|
-
goto error;
|
1210
|
-
}
|
1211
|
-
if (!touch && fdinfo(fd).close && !fdinfo(fd).packet)
|
1212
|
-
goto error;
|
1213
|
-
finish:
|
1214
|
-
unlock_fd(fd);
|
1215
|
-
if (touch) {
|
1216
|
-
sock_touch(uuid);
|
1217
|
-
return 1;
|
1218
|
-
}
|
1219
|
-
return fdinfo(fd).packet != NULL || fdinfo(fd).close;
|
1220
|
-
error:
|
1221
|
-
unlock_fd(fd);
|
1222
|
-
// fprintf(stderr,
|
1223
|
-
// "ERROR: sock `flush` failed"
|
1224
|
-
// " for %p with %d\n",
|
1225
|
-
// (void *)uuid, errno);
|
1226
|
-
sock_force_close(uuid);
|
1227
|
-
return -1;
|
1228
|
-
}
|
1229
|
-
|
1230
|
-
/**
|
1231
|
-
`sock_flush_strong` performs the same action as `sock_flush` but returns only
|
1232
|
-
after all the data was sent. This is a "busy" wait, polling isn't performed.
|
1233
|
-
*/
|
1234
|
-
void sock_flush_strong(intptr_t uuid) {
|
1235
|
-
errno = 0;
|
1236
|
-
while (sock_flush(uuid) == 0 && errno == 0)
|
1237
|
-
;
|
1238
|
-
}
|
1239
|
-
|
1240
|
-
/**
|
1241
|
-
Calls `sock_flush` for each file descriptor that's buffer isn't empty.
|
1242
|
-
*/
|
1243
|
-
void sock_flush_all(void) {
|
1244
|
-
for (size_t fd = 0; fd < sock_data_store.capacity; fd++) {
|
1245
|
-
if (!fdinfo(fd).open || !fdinfo(fd).packet)
|
1246
|
-
continue;
|
1247
|
-
sock_flush(fd2uuid(fd));
|
1248
|
-
}
|
1249
|
-
}
|
1250
|
-
|
1251
|
-
/**
|
1252
|
-
Returns the number of `sock_write` calls that are waiting in the socket's queue
|
1253
|
-
and haven't been processed.
|
1254
|
-
*/
|
1255
|
-
int sock_has_pending(intptr_t uuid) {
|
1256
|
-
if (validate_uuid(uuid) || !uuidinfo(uuid).open)
|
1257
|
-
return 0;
|
1258
|
-
return (int)(uuidinfo(uuid).packet_count + uuidinfo(uuid).close);
|
1259
|
-
}
|
1260
|
-
|
1261
|
-
/**
|
1262
|
-
* Returns the number of `sock_write` calls that are waiting in the socket's
|
1263
|
-
* queue and haven't been processed.
|
1264
|
-
*/
|
1265
|
-
size_t sock_pending(intptr_t uuid) {
|
1266
|
-
if (validate_uuid(uuid) || !uuidinfo(uuid).open)
|
1267
|
-
return 0;
|
1268
|
-
return (uuidinfo(uuid).packet_count + uuidinfo(uuid).close);
|
1269
|
-
}
|
1270
|
-
|
1271
|
-
/* *****************************************************************************
|
1272
|
-
TLC - Transport Layer Callback.
|
1273
|
-
|
1274
|
-
Experimental
|
1275
|
-
*/
|
1276
|
-
|
1277
|
-
/** Gets a socket hook state (a pointer to the struct). */
|
1278
|
-
struct sock_rw_hook_s *sock_rw_hook_get(intptr_t uuid) {
|
1279
|
-
if (validate_uuid(uuid) || !uuidinfo(uuid).open ||
|
1280
|
-
((void)(uuid = sock_uuid2fd(uuid)),
|
1281
|
-
fdinfo(uuid).rw_hooks == &SOCK_DEFAULT_HOOKS))
|
1282
|
-
return NULL;
|
1283
|
-
return fdinfo(uuid).rw_hooks;
|
1284
|
-
}
|
1285
|
-
|
1286
|
-
/** Returns the socket's udata associated with the read/write hook. */
|
1287
|
-
void *sock_rw_udata(intptr_t uuid) {
|
1288
|
-
if (validate_uuid(uuid) || !fdinfo(sock_uuid2fd(uuid)).open)
|
1289
|
-
return NULL;
|
1290
|
-
uuid = sock_uuid2fd(uuid);
|
1291
|
-
return fdinfo(uuid).rw_udata;
|
1292
|
-
}
|
1293
|
-
|
1294
|
-
/** Sets a socket hook state (a pointer to the struct). */
|
1295
|
-
int sock_rw_hook_set(intptr_t uuid, sock_rw_hook_s *rw_hooks, void *udata) {
|
1296
|
-
if (validate_uuid(uuid) || !uuidinfo(uuid).open)
|
1297
|
-
return -1;
|
1298
|
-
if (!rw_hooks->read)
|
1299
|
-
rw_hooks->read = sock_default_hooks_read;
|
1300
|
-
if (!rw_hooks->write)
|
1301
|
-
rw_hooks->write = sock_default_hooks_write;
|
1302
|
-
if (!rw_hooks->flush)
|
1303
|
-
rw_hooks->flush = sock_default_hooks_flush;
|
1304
|
-
if (!rw_hooks->on_close)
|
1305
|
-
rw_hooks->on_close = sock_default_hooks_on_close;
|
1306
|
-
uuid = sock_uuid2fd(uuid);
|
1307
|
-
lock_fd(uuid);
|
1308
|
-
fdinfo(uuid).rw_hooks = rw_hooks;
|
1309
|
-
fdinfo(uuid).rw_udata = udata;
|
1310
|
-
unlock_fd(uuid);
|
1311
|
-
return 0;
|
1312
|
-
}
|
1313
|
-
|
1314
|
-
/* *****************************************************************************
|
1315
|
-
test
|
1316
|
-
*/
|
1317
|
-
#ifdef DEBUG
|
1318
|
-
void sock_libtest(void) {
|
1319
|
-
if (0) { /* this test can't be performed witout initializeing `facil`. */
|
1320
|
-
char request[] = "GET / HTTP/1.1\r\n"
|
1321
|
-
"Host: www.google.com\r\n"
|
1322
|
-
"\r\n";
|
1323
|
-
char buff[1024];
|
1324
|
-
ssize_t i_read;
|
1325
|
-
intptr_t uuid = sock_connect("www.google.com", "80");
|
1326
|
-
if (uuid == -1) {
|
1327
|
-
perror("sock_connect failed");
|
1328
|
-
exit(1);
|
1329
|
-
}
|
1330
|
-
if (sock_write(uuid, request, sizeof(request) - 1) < 0)
|
1331
|
-
perror("sock_write error ");
|
1332
|
-
|
1333
|
-
while ((i_read = sock_read(uuid, buff, 1024)) >= 0) {
|
1334
|
-
if (i_read == 0) { // could be we hadn't finished connecting yet.
|
1335
|
-
sock_flush(uuid);
|
1336
|
-
reschedule_thread();
|
1337
|
-
} else {
|
1338
|
-
fprintf(stderr, "\n%.*s\n\n", (int)i_read, buff);
|
1339
|
-
break;
|
1340
|
-
}
|
1341
|
-
}
|
1342
|
-
if (i_read < 0)
|
1343
|
-
perror("Error with sock_read ");
|
1344
|
-
fprintf(stderr, "done.\n");
|
1345
|
-
sock_close(uuid);
|
1346
|
-
}
|
1347
|
-
sock_max_capacity();
|
1348
|
-
for (int i = 0; i < 4; ++i) {
|
1349
|
-
packet_s *packet = sock_packet_new();
|
1350
|
-
sock_packet_free(packet);
|
1351
|
-
}
|
1352
|
-
packet_s *head, *pos;
|
1353
|
-
pos = head = packet_pool.next;
|
1354
|
-
size_t count = 0;
|
1355
|
-
while (pos) {
|
1356
|
-
count++;
|
1357
|
-
pos = pos->next;
|
1358
|
-
}
|
1359
|
-
fprintf(stderr, "Packet pool test %s (%lu =? %lu)\n",
|
1360
|
-
count == BUFFER_PACKET_POOL ? "PASS" : "FAIL",
|
1361
|
-
(unsigned long)BUFFER_PACKET_POOL, (unsigned long)count);
|
1362
|
-
printf("Allocated sock capacity %lu X %lu\n",
|
1363
|
-
(unsigned long)sock_data_store.capacity,
|
1364
|
-
(unsigned long)sizeof(struct fd_data_s));
|
1365
|
-
}
|
1366
|
-
#endif
|