spiped 0.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/ext/spiped/extconf.rb +3 -0
- data/ext/spiped/spiped-source/BUILDING +46 -0
- data/ext/spiped/spiped-source/CHANGELOG +44 -0
- data/ext/spiped/spiped-source/COPYRIGHT +33 -0
- data/ext/spiped/spiped-source/Makefile +47 -0
- data/ext/spiped/spiped-source/Makefile.POSIX +27 -0
- data/ext/spiped/spiped-source/Makefile.inc +20 -0
- data/ext/spiped/spiped-source/Makefile.prog +23 -0
- data/ext/spiped/spiped-source/POSIX/README +10 -0
- data/ext/spiped/spiped-source/POSIX/posix-cflags.sh +10 -0
- data/ext/spiped/spiped-source/POSIX/posix-clock_realtime.c +3 -0
- data/ext/spiped/spiped-source/POSIX/posix-l.c +1 -0
- data/ext/spiped/spiped-source/POSIX/posix-l.sh +14 -0
- data/ext/spiped/spiped-source/POSIX/posix-msg_nosignal.c +3 -0
- data/ext/spiped/spiped-source/README +198 -0
- data/ext/spiped/spiped-source/STYLE +151 -0
- data/ext/spiped/spiped-source/lib/dnsthread/dnsthread.c +464 -0
- data/ext/spiped/spiped-source/lib/dnsthread/dnsthread.h +45 -0
- data/ext/spiped/spiped-source/libcperciva/alg/sha256.c +442 -0
- data/ext/spiped/spiped-source/libcperciva/alg/sha256.h +95 -0
- data/ext/spiped/spiped-source/libcperciva/cpusupport/Build/cpusupport-X86-AESNI.c +13 -0
- data/ext/spiped/spiped-source/libcperciva/cpusupport/Build/cpusupport-X86-CPUID.c +8 -0
- data/ext/spiped/spiped-source/libcperciva/cpusupport/Build/cpusupport.sh +37 -0
- data/ext/spiped/spiped-source/libcperciva/cpusupport/cpusupport.h +63 -0
- data/ext/spiped/spiped-source/libcperciva/cpusupport/cpusupport_x86_aesni.c +30 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aes.c +166 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aes.h +31 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aes_aesni.c +229 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aes_aesni.h +31 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aesctr.c +124 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_aesctr.h +41 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_dh.c +293 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_dh.h +43 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_dh_group14.c +46 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_dh_group14.h +9 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_entropy.c +215 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_entropy.h +14 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_verify_bytes.c +21 -0
- data/ext/spiped/spiped-source/libcperciva/crypto/crypto_verify_bytes.h +14 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/elasticarray.c +276 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/elasticarray.h +167 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/mpool.h +85 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/ptrheap.c +334 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/ptrheap.h +89 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/timerqueue.c +241 -0
- data/ext/spiped/spiped-source/libcperciva/datastruct/timerqueue.h +60 -0
- data/ext/spiped/spiped-source/libcperciva/events/events.c +203 -0
- data/ext/spiped/spiped-source/libcperciva/events/events.h +106 -0
- data/ext/spiped/spiped-source/libcperciva/events/events_immediate.c +149 -0
- data/ext/spiped/spiped-source/libcperciva/events/events_internal.h +95 -0
- data/ext/spiped/spiped-source/libcperciva/events/events_network.c +347 -0
- data/ext/spiped/spiped-source/libcperciva/events/events_network_selectstats.c +106 -0
- data/ext/spiped/spiped-source/libcperciva/events/events_timer.c +273 -0
- data/ext/spiped/spiped-source/libcperciva/network/network.h +95 -0
- data/ext/spiped/spiped-source/libcperciva/network/network_accept.c +103 -0
- data/ext/spiped/spiped-source/libcperciva/network/network_connect.c +258 -0
- data/ext/spiped/spiped-source/libcperciva/network/network_read.c +155 -0
- data/ext/spiped/spiped-source/libcperciva/network/network_write.c +188 -0
- data/ext/spiped/spiped-source/libcperciva/util/asprintf.c +49 -0
- data/ext/spiped/spiped-source/libcperciva/util/asprintf.h +16 -0
- data/ext/spiped/spiped-source/libcperciva/util/daemonize.c +134 -0
- data/ext/spiped/spiped-source/libcperciva/util/daemonize.h +10 -0
- data/ext/spiped/spiped-source/libcperciva/util/entropy.c +76 -0
- data/ext/spiped/spiped-source/libcperciva/util/entropy.h +13 -0
- data/ext/spiped/spiped-source/libcperciva/util/imalloc.h +33 -0
- data/ext/spiped/spiped-source/libcperciva/util/insecure_memzero.c +19 -0
- data/ext/spiped/spiped-source/libcperciva/util/insecure_memzero.h +33 -0
- data/ext/spiped/spiped-source/libcperciva/util/monoclock.c +52 -0
- data/ext/spiped/spiped-source/libcperciva/util/monoclock.h +14 -0
- data/ext/spiped/spiped-source/libcperciva/util/noeintr.c +54 -0
- data/ext/spiped/spiped-source/libcperciva/util/noeintr.h +14 -0
- data/ext/spiped/spiped-source/libcperciva/util/sock.c +472 -0
- data/ext/spiped/spiped-source/libcperciva/util/sock.h +56 -0
- data/ext/spiped/spiped-source/libcperciva/util/sock_internal.h +14 -0
- data/ext/spiped/spiped-source/libcperciva/util/sock_util.c +271 -0
- data/ext/spiped/spiped-source/libcperciva/util/sock_util.h +51 -0
- data/ext/spiped/spiped-source/libcperciva/util/sysendian.h +146 -0
- data/ext/spiped/spiped-source/libcperciva/util/warnp.c +76 -0
- data/ext/spiped/spiped-source/libcperciva/util/warnp.h +59 -0
- data/ext/spiped/spiped-source/proto/proto_conn.c +362 -0
- data/ext/spiped/spiped-source/proto/proto_conn.h +25 -0
- data/ext/spiped/spiped-source/proto/proto_crypt.c +396 -0
- data/ext/spiped/spiped-source/proto/proto_crypt.h +102 -0
- data/ext/spiped/spiped-source/proto/proto_handshake.c +330 -0
- data/ext/spiped/spiped-source/proto/proto_handshake.h +30 -0
- data/ext/spiped/spiped-source/proto/proto_pipe.c +202 -0
- data/ext/spiped/spiped-source/proto/proto_pipe.h +23 -0
- data/ext/spiped/spiped-source/spipe/Makefile +90 -0
- data/ext/spiped/spiped-source/spipe/README +24 -0
- data/ext/spiped/spiped-source/spipe/main.c +178 -0
- data/ext/spiped/spiped-source/spipe/pushbits.c +101 -0
- data/ext/spiped/spiped-source/spipe/pushbits.h +10 -0
- data/ext/spiped/spiped-source/spipe/spipe.1 +60 -0
- data/ext/spiped/spiped-source/spiped/Makefile +98 -0
- data/ext/spiped/spiped-source/spiped/README +62 -0
- data/ext/spiped/spiped-source/spiped/dispatch.c +214 -0
- data/ext/spiped/spiped-source/spiped/dispatch.h +27 -0
- data/ext/spiped/spiped-source/spiped/main.c +267 -0
- data/ext/spiped/spiped-source/spiped/spiped.1 +112 -0
- data/lib/spiped.rb +3 -0
- metadata +143 -0
@@ -0,0 +1,106 @@
|
|
1
|
+
#ifndef _EVENTS_H_
|
2
|
+
#define _EVENTS_H_
|
3
|
+
|
4
|
+
#include <sys/select.h>
|
5
|
+
|
6
|
+
/**
|
7
|
+
* events_immediate_register(func, cookie, prio):
|
8
|
+
* Register ${func}(${cookie}) to be run the next time events_run is invoked,
|
9
|
+
* after immediate events with smaller ${prio} values and before events with
|
10
|
+
* larger ${prio} values. The value ${prio} must be in the range [0, 31].
|
11
|
+
* Return a cookie which can be passed to events_immediate_cancel.
|
12
|
+
*/
|
13
|
+
void * events_immediate_register(int (*)(void *), void *, int);
|
14
|
+
|
15
|
+
/**
|
16
|
+
* events_immediate_cancel(cookie):
|
17
|
+
* Cancel the immediate event for which the cookie ${cookie} was returned by
|
18
|
+
* events_immediate_register.
|
19
|
+
*/
|
20
|
+
void events_immediate_cancel(void *);
|
21
|
+
|
22
|
+
/* "op" parameter to events_network_register. */
|
23
|
+
#define EVENTS_NETWORK_OP_READ 0
|
24
|
+
#define EVENTS_NETWORK_OP_WRITE 1
|
25
|
+
|
26
|
+
/**
|
27
|
+
* events_network_register(func, cookie, s, op):
|
28
|
+
* Register ${func}(${cookie}) to be run when socket ${s} is ready for
|
29
|
+
* reading or writing depending on whether ${op} is EVENTS_NETWORK_OP_READ or
|
30
|
+
* EVENTS_NETWORK_OP_WRITE. If there is already an event registration for
|
31
|
+
* this ${s}/${op} pair, errno will be set to EEXIST and the function will
|
32
|
+
* fail.
|
33
|
+
*/
|
34
|
+
int events_network_register(int (*)(void *), void *, int, int);
|
35
|
+
|
36
|
+
/**
|
37
|
+
* events_network_cancel(s, op):
|
38
|
+
* Cancel the event registered for the socket/operation pair ${s}/${op}. If
|
39
|
+
* there is no such registration, errno will be set to ENOENT and the
|
40
|
+
* function will fail.
|
41
|
+
*/
|
42
|
+
int events_network_cancel(int, int);
|
43
|
+
|
44
|
+
/**
|
45
|
+
* events_network_selectstats(N, mu, va, max):
|
46
|
+
* Return statistics on the inter-select durations since the last time this
|
47
|
+
* function was called.
|
48
|
+
*/
|
49
|
+
void events_network_selectstats(double *, double *, double *, double *);
|
50
|
+
|
51
|
+
/**
|
52
|
+
* events_timer_register(func, cookie, timeo):
|
53
|
+
* Register ${func}(${cookie}) to be run ${timeo} in the future. Return a
|
54
|
+
* cookie which can be passed to events_timer_cancel or events_timer_reset.
|
55
|
+
*/
|
56
|
+
void * events_timer_register(int (*)(void *), void *, const struct timeval *);
|
57
|
+
|
58
|
+
/**
|
59
|
+
* events_timer_register_double(func, cookie, timeo):
|
60
|
+
* As events_timer_register, but ${timeo} is a double-precision floating point
|
61
|
+
* value specifying a number of seconds.
|
62
|
+
*/
|
63
|
+
void * events_timer_register_double(int (*)(void *), void *, double);
|
64
|
+
|
65
|
+
/**
|
66
|
+
* events_timer_cancel(cookie):
|
67
|
+
* Cancel the timer for which the cookie ${cookie} was returned by
|
68
|
+
* events_timer_register.
|
69
|
+
*/
|
70
|
+
void events_timer_cancel(void *);
|
71
|
+
|
72
|
+
/**
|
73
|
+
* events_timer_reset(cookie):
|
74
|
+
* Reset the timer for which the cookie ${cookie} was returned by
|
75
|
+
* events_timer_register to its initial value.
|
76
|
+
*/
|
77
|
+
int events_timer_reset(void *);
|
78
|
+
|
79
|
+
/**
|
80
|
+
* events_run(void):
|
81
|
+
* Run events. Events registered via events_immediate_register will be run
|
82
|
+
* first, in order of increasing ${prio} values; then events associated with
|
83
|
+
* ready sockets registered via events_network_register; finally, events
|
84
|
+
* associated with expired timers registered via events_timer_register will
|
85
|
+
* be run. If any event function returns a non-zero result, no further
|
86
|
+
* events will be run and said non-zero result will be returned; on error,
|
87
|
+
* -1 will be returned.
|
88
|
+
*/
|
89
|
+
int events_run(void);
|
90
|
+
|
91
|
+
/**
|
92
|
+
* events_spin(done):
|
93
|
+
* Run events until ${done} is non-zero (and return 0), an error occurs (and
|
94
|
+
* return -1), or a callback returns a non-zero status (and return the status
|
95
|
+
* code from the callback).
|
96
|
+
*/
|
97
|
+
int events_spin(int *);
|
98
|
+
|
99
|
+
/**
|
100
|
+
* events_shutdown(void):
|
101
|
+
* Clean up and free memory. This call is not necessary on program exit and
|
102
|
+
* is only expected to be useful when checking for memory leaks.
|
103
|
+
*/
|
104
|
+
void events_shutdown(void);
|
105
|
+
|
106
|
+
#endif /* !_EVENTS_H_ */
|
@@ -0,0 +1,149 @@
|
|
1
|
+
#include <assert.h>
|
2
|
+
#include <stdint.h>
|
3
|
+
#include <stdlib.h>
|
4
|
+
|
5
|
+
#include "mpool.h"
|
6
|
+
|
7
|
+
#include "events_internal.h"
|
8
|
+
#include "events.h"
|
9
|
+
|
10
|
+
struct eventq {
|
11
|
+
struct eventrec * r;
|
12
|
+
struct eventq * next;
|
13
|
+
struct eventq * prev;
|
14
|
+
int prio;
|
15
|
+
};
|
16
|
+
|
17
|
+
MPOOL(eventq, struct eventq, 4096);
|
18
|
+
|
19
|
+
/* First nodes in the linked lists. */
|
20
|
+
static struct eventq * heads[32] = {
|
21
|
+
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
22
|
+
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
23
|
+
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
24
|
+
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
|
25
|
+
};
|
26
|
+
|
27
|
+
/* For non-NULL heads[i], tails[i] is the last node in the list. */
|
28
|
+
static struct eventq * tails[32];
|
29
|
+
|
30
|
+
/* For i < minq, heads[i] == NULL. */
|
31
|
+
static int minq = 32;
|
32
|
+
|
33
|
+
/**
|
34
|
+
* events_immediate_register(func, cookie, prio):
|
35
|
+
* Register ${func}(${cookie}) to be run the next time events_run is invoked,
|
36
|
+
* after immediate events with smaller ${prio} values and before events with
|
37
|
+
* larger ${prio} values. The value ${prio} must be in the range [0, 31].
|
38
|
+
* Return a cookie which can be passed to events_immediate_cancel.
|
39
|
+
*/
|
40
|
+
void *
|
41
|
+
events_immediate_register(int (*func)(void *), void * cookie, int prio)
|
42
|
+
{
|
43
|
+
struct eventrec * r;
|
44
|
+
struct eventq * q;
|
45
|
+
|
46
|
+
/* Sanity check. */
|
47
|
+
assert((prio >= 0) && (prio < 32));
|
48
|
+
|
49
|
+
/* Bundle into an eventrec record. */
|
50
|
+
if ((r = events_mkrec(func, cookie)) == NULL)
|
51
|
+
goto err0;
|
52
|
+
|
53
|
+
/* Create a linked list node. */
|
54
|
+
if ((q = mpool_eventq_malloc()) == NULL)
|
55
|
+
goto err1;
|
56
|
+
q->r = r;
|
57
|
+
q->next = NULL;
|
58
|
+
q->prev = NULL;
|
59
|
+
q->prio = prio;
|
60
|
+
|
61
|
+
/* Add to the queue. */
|
62
|
+
if (heads[prio] == NULL) {
|
63
|
+
heads[prio] = q;
|
64
|
+
if (prio < minq)
|
65
|
+
minq = prio;
|
66
|
+
} else {
|
67
|
+
tails[prio]->next = q;
|
68
|
+
q->prev = tails[prio];
|
69
|
+
}
|
70
|
+
tails[prio] = q;
|
71
|
+
|
72
|
+
/* Success! */
|
73
|
+
return (q);
|
74
|
+
|
75
|
+
err1:
|
76
|
+
events_freerec(r);
|
77
|
+
err0:
|
78
|
+
/* Failure! */
|
79
|
+
return (NULL);
|
80
|
+
}
|
81
|
+
|
82
|
+
/**
|
83
|
+
* events_immediate_cancel(cookie):
|
84
|
+
* Cancel the immediate event for which the cookie ${cookie} was returned by
|
85
|
+
* events_immediate_register.
|
86
|
+
*/
|
87
|
+
void
|
88
|
+
events_immediate_cancel(void * cookie)
|
89
|
+
{
|
90
|
+
struct eventq * q = cookie;
|
91
|
+
int prio = q->prio;
|
92
|
+
|
93
|
+
/* If we have a predecessor, point it at our successor. */
|
94
|
+
if (q->prev != NULL)
|
95
|
+
q->prev->next = q->next;
|
96
|
+
else
|
97
|
+
heads[prio] = q->next;
|
98
|
+
|
99
|
+
/* If we have a successor, point it at our predecessor. */
|
100
|
+
if (q->next != NULL)
|
101
|
+
q->next->prev = q->prev;
|
102
|
+
else
|
103
|
+
tails[prio] = q->prev;
|
104
|
+
|
105
|
+
/* Free the eventrec. */
|
106
|
+
events_freerec(q->r);
|
107
|
+
|
108
|
+
/* Return the node to the malloc pool. */
|
109
|
+
mpool_eventq_free(q);
|
110
|
+
}
|
111
|
+
|
112
|
+
/**
|
113
|
+
* events_immediate_get(void):
|
114
|
+
* Remove and return an eventrec structure from the immediate event queue,
|
115
|
+
* or return NULL if there are no such events. The caller is responsible for
|
116
|
+
* freeing the returned memory.
|
117
|
+
*/
|
118
|
+
struct eventrec *
|
119
|
+
events_immediate_get(void)
|
120
|
+
{
|
121
|
+
struct eventq * q;
|
122
|
+
struct eventrec * r;
|
123
|
+
|
124
|
+
/* Advance past priorities which have no events. */
|
125
|
+
while ((minq < 32) && (heads[minq] == NULL))
|
126
|
+
minq++;
|
127
|
+
|
128
|
+
/* Are there any events? */
|
129
|
+
if (minq == 32)
|
130
|
+
return (NULL);
|
131
|
+
|
132
|
+
/*
|
133
|
+
* Remove the first node from the highest priority non-empty linked
|
134
|
+
* list.
|
135
|
+
*/
|
136
|
+
q = heads[minq];
|
137
|
+
heads[minq] = q->next;
|
138
|
+
if (heads[minq] != NULL)
|
139
|
+
heads[minq]->prev = NULL;
|
140
|
+
|
141
|
+
/* Extract the eventrec. */
|
142
|
+
r = q->r;
|
143
|
+
|
144
|
+
/* Return the node to the malloc pool. */
|
145
|
+
mpool_eventq_free(q);
|
146
|
+
|
147
|
+
/* Return the eventrec. */
|
148
|
+
return (r);
|
149
|
+
}
|
@@ -0,0 +1,95 @@
|
|
1
|
+
#ifndef _EVENTS_INTERNAL_H_
|
2
|
+
#define _EVENTS_INTERNAL_H_
|
3
|
+
|
4
|
+
#include <sys/time.h>
|
5
|
+
|
6
|
+
/* Opaque event structure. */
|
7
|
+
struct eventrec;
|
8
|
+
|
9
|
+
/**
|
10
|
+
* events_mkrec(func, cookie):
|
11
|
+
* Package ${func}, ${cookie} into a struct eventrec.
|
12
|
+
*/
|
13
|
+
struct eventrec * events_mkrec(int (*)(void *), void *);
|
14
|
+
|
15
|
+
/**
|
16
|
+
* events_freerec(r):
|
17
|
+
* Free the eventrec ${r}.
|
18
|
+
*/
|
19
|
+
void events_freerec(struct eventrec *);
|
20
|
+
|
21
|
+
/**
|
22
|
+
* events_immediate_get(void):
|
23
|
+
* Remove and return an eventrec structure from the immediate event queue,
|
24
|
+
* or return NULL if there are no such events. The caller is responsible for
|
25
|
+
* freeing the returned memory.
|
26
|
+
*/
|
27
|
+
struct eventrec * events_immediate_get(void);
|
28
|
+
|
29
|
+
/**
|
30
|
+
* events_network_select(tv):
|
31
|
+
* Check for socket readiness events, waiting up to ${tv} time if there are
|
32
|
+
* no sockets immediately ready, or indefinitely if ${tv} is NULL. The value
|
33
|
+
* stored in ${tv} may be modified.
|
34
|
+
*/
|
35
|
+
int events_network_select(struct timeval *);
|
36
|
+
|
37
|
+
/**
|
38
|
+
* events_network_selectstats_startclock(void):
|
39
|
+
* Start the inter-select duration clock: There is a selectable event.
|
40
|
+
*/
|
41
|
+
void events_network_selectstats_startclock(void);
|
42
|
+
|
43
|
+
/**
|
44
|
+
* events_network_selectstats_stopclock(void):
|
45
|
+
* Stop the inter-select duration clock: There are no selectable events.
|
46
|
+
*/
|
47
|
+
void events_network_selectstats_stopclock(void);
|
48
|
+
|
49
|
+
/**
|
50
|
+
* events_network_selectstats_select(void):
|
51
|
+
* Update inter-select duration statistics in relation to an upcoming
|
52
|
+
* select(2) call.
|
53
|
+
*/
|
54
|
+
void events_network_selectstats_select(void);
|
55
|
+
|
56
|
+
/**
|
57
|
+
* events_network_get(void):
|
58
|
+
* Find a socket readiness event which was identified by a previous call to
|
59
|
+
* events_network_select, and return it as an eventrec structure; or return
|
60
|
+
* NULL if there are no such events available. The caller is responsible for
|
61
|
+
* freeing the returned memory.
|
62
|
+
*/
|
63
|
+
struct eventrec * events_network_get(void);
|
64
|
+
|
65
|
+
/**
|
66
|
+
* events_network_shutdown(void)
|
67
|
+
* Clean up and free memory. This call is not necessary on program exit and
|
68
|
+
* is only expected to be useful when checking for memory leaks.
|
69
|
+
*/
|
70
|
+
void events_network_shutdown(void);
|
71
|
+
|
72
|
+
/**
|
73
|
+
* events_timer_min(timeo):
|
74
|
+
* Return via ${timeo} a pointer to the minimum time which must be waited
|
75
|
+
* before a timer will expire; or to NULL if there are no timers. The caller
|
76
|
+
* is responsible for freeing the returned pointer.
|
77
|
+
*/
|
78
|
+
int events_timer_min(struct timeval **);
|
79
|
+
|
80
|
+
/**
|
81
|
+
* events_timer_get(r):
|
82
|
+
* Return via ${r} a pointer to an eventrec structure corresponding to an
|
83
|
+
* expired timer, and delete said timer; or to NULL if there are no expired
|
84
|
+
* timers. The caller is responsible for freeing the returned pointer.
|
85
|
+
*/
|
86
|
+
int events_timer_get(struct eventrec **);
|
87
|
+
|
88
|
+
/**
|
89
|
+
* events_timer_shutdown(void):
|
90
|
+
* Clean up and free memory. This call is not necessary on program exit and
|
91
|
+
* is only expected to be useful when checking for memory leaks.
|
92
|
+
*/
|
93
|
+
void events_timer_shutdown(void);
|
94
|
+
|
95
|
+
#endif /* !_EVENTS_INTERNAL_H_ */
|
@@ -0,0 +1,347 @@
|
|
1
|
+
#include <sys/select.h>
|
2
|
+
|
3
|
+
#include <errno.h>
|
4
|
+
#include <stdlib.h>
|
5
|
+
|
6
|
+
#include "elasticarray.h"
|
7
|
+
#include "warnp.h"
|
8
|
+
|
9
|
+
#include "events_internal.h"
|
10
|
+
#include "events.h"
|
11
|
+
|
12
|
+
/* Structure for holding readability and writability events for a socket. */
|
13
|
+
struct socketrec {
|
14
|
+
struct eventrec * reader;
|
15
|
+
struct eventrec * writer;
|
16
|
+
};
|
17
|
+
|
18
|
+
/* List of sockets. */
|
19
|
+
ELASTICARRAY_DECL(SOCKETLIST, socketlist, struct socketrec);
|
20
|
+
static SOCKETLIST S = NULL;
|
21
|
+
|
22
|
+
/* File descriptor sets containing unevented ready sockets. */
|
23
|
+
static fd_set readfds;
|
24
|
+
static fd_set writefds;
|
25
|
+
|
26
|
+
/* Position to which events_network_get has scanned in *fds. */
|
27
|
+
static size_t fdscanpos;
|
28
|
+
|
29
|
+
/* Number of registered events. */
|
30
|
+
static size_t nev;
|
31
|
+
|
32
|
+
/* Initialize the socket list if we haven't already done so. */
|
33
|
+
static int
|
34
|
+
initsocketlist(void)
|
35
|
+
{
|
36
|
+
|
37
|
+
/* If we're already initialized, do nothing. */
|
38
|
+
if (S != NULL)
|
39
|
+
goto done;
|
40
|
+
|
41
|
+
/* Initialize the socket list. */
|
42
|
+
if ((S = socketlist_init(0)) == NULL)
|
43
|
+
goto err0;
|
44
|
+
|
45
|
+
/* There are no events registered. */
|
46
|
+
nev = 0;
|
47
|
+
|
48
|
+
/* There are no unevented ready sockets. */
|
49
|
+
fdscanpos = FD_SETSIZE;
|
50
|
+
|
51
|
+
done:
|
52
|
+
/* Success! */
|
53
|
+
return (0);
|
54
|
+
|
55
|
+
err0:
|
56
|
+
/* Failure! */
|
57
|
+
return (-1);
|
58
|
+
}
|
59
|
+
|
60
|
+
/* Grow the socket list and initialize new records. */
|
61
|
+
static int
|
62
|
+
growsocketlist(size_t nrec)
|
63
|
+
{
|
64
|
+
size_t i;
|
65
|
+
|
66
|
+
/* Get the old size. */
|
67
|
+
i = socketlist_getsize(S);
|
68
|
+
|
69
|
+
/* Grow the list. */
|
70
|
+
if (socketlist_resize(S, nrec))
|
71
|
+
goto err0;
|
72
|
+
|
73
|
+
/* Initialize new members. */
|
74
|
+
for (; i < nrec; i++) {
|
75
|
+
socketlist_get(S, i)->reader = NULL;
|
76
|
+
socketlist_get(S, i)->writer = NULL;
|
77
|
+
}
|
78
|
+
|
79
|
+
/* Success! */
|
80
|
+
return (0);
|
81
|
+
|
82
|
+
err0:
|
83
|
+
/* Failure! */
|
84
|
+
return (-1);
|
85
|
+
}
|
86
|
+
|
87
|
+
/**
|
88
|
+
* events_network_register(func, cookie, s, op):
|
89
|
+
* Register ${func}(${cookie}) to be run when socket ${s} is ready for
|
90
|
+
* reading or writing depending on whether ${op} is EVENTS_NETWORK_OP_READ or
|
91
|
+
* EVENTS_NETWORK_OP_WRITE. If there is already an event registration for
|
92
|
+
* this ${s}/${op} pair, errno will be set to EEXIST and the function will
|
93
|
+
* fail.
|
94
|
+
*/
|
95
|
+
int
|
96
|
+
events_network_register(int (*func)(void *), void * cookie, int s, int op)
|
97
|
+
{
|
98
|
+
struct eventrec ** r;
|
99
|
+
|
100
|
+
/* Initialize if necessary. */
|
101
|
+
if (initsocketlist())
|
102
|
+
goto err0;
|
103
|
+
|
104
|
+
/* Sanity-check socket number. */
|
105
|
+
if ((s < 0) || (s >= (int)FD_SETSIZE)) {
|
106
|
+
warn0("Invalid file descriptor for network event: %d", s);
|
107
|
+
goto err0;
|
108
|
+
}
|
109
|
+
|
110
|
+
/* Sanity-check operation. */
|
111
|
+
if ((op != EVENTS_NETWORK_OP_READ) &&
|
112
|
+
(op != EVENTS_NETWORK_OP_WRITE)) {
|
113
|
+
warn0("Invalid operation for network event: %d", op);
|
114
|
+
goto err0;
|
115
|
+
}
|
116
|
+
|
117
|
+
/* Grow the array if necessary. */
|
118
|
+
if (((size_t)(s) >= socketlist_getsize(S)) &&
|
119
|
+
(growsocketlist(s + 1) != 0))
|
120
|
+
goto err0;
|
121
|
+
|
122
|
+
/* Look up the relevant event pointer. */
|
123
|
+
if (op == EVENTS_NETWORK_OP_READ)
|
124
|
+
r = &socketlist_get(S, s)->reader;
|
125
|
+
else
|
126
|
+
r = &socketlist_get(S, s)->writer;
|
127
|
+
|
128
|
+
/* Error out if we already have an event registered. */
|
129
|
+
if (*r != NULL) {
|
130
|
+
errno = EEXIST;
|
131
|
+
goto err0;
|
132
|
+
}
|
133
|
+
|
134
|
+
/* Register the new event. */
|
135
|
+
if ((*r = events_mkrec(func, cookie)) == NULL)
|
136
|
+
goto err0;
|
137
|
+
|
138
|
+
/*
|
139
|
+
* Increment events-registered counter; and if it was zero, start the
|
140
|
+
* inter-select duration clock.
|
141
|
+
*/
|
142
|
+
if (nev++ == 0)
|
143
|
+
events_network_selectstats_startclock();
|
144
|
+
|
145
|
+
/* Success! */
|
146
|
+
return (0);
|
147
|
+
|
148
|
+
err0:
|
149
|
+
/* Failure! */
|
150
|
+
return (-1);
|
151
|
+
}
|
152
|
+
|
153
|
+
/**
|
154
|
+
* events_network_cancel(s, op):
|
155
|
+
* Cancel the event registered for the socket/operation pair ${s}/${op}. If
|
156
|
+
* there is no such registration, errno will be set to ENOENT and the
|
157
|
+
* function will fail.
|
158
|
+
*/
|
159
|
+
int
|
160
|
+
events_network_cancel(int s, int op)
|
161
|
+
{
|
162
|
+
struct eventrec ** r;
|
163
|
+
|
164
|
+
/* Initialize if necessary. */
|
165
|
+
if (initsocketlist())
|
166
|
+
goto err0;
|
167
|
+
|
168
|
+
/* Sanity-check socket number. */
|
169
|
+
if ((s < 0) || (s >= (int)FD_SETSIZE)) {
|
170
|
+
warn0("Invalid file descriptor for network event: %d", s);
|
171
|
+
goto err0;
|
172
|
+
}
|
173
|
+
|
174
|
+
/* Sanity-check operation. */
|
175
|
+
if ((op != EVENTS_NETWORK_OP_READ) &&
|
176
|
+
(op != EVENTS_NETWORK_OP_WRITE)) {
|
177
|
+
warn0("Invalid operation for network event: %d", op);
|
178
|
+
goto err0;
|
179
|
+
}
|
180
|
+
|
181
|
+
/* We have no events registered beyond the end of the array. */
|
182
|
+
if ((size_t)(s) >= socketlist_getsize(S)) {
|
183
|
+
errno = ENOENT;
|
184
|
+
goto err0;
|
185
|
+
}
|
186
|
+
|
187
|
+
/* Look up the relevant event pointer. */
|
188
|
+
if (op == EVENTS_NETWORK_OP_READ)
|
189
|
+
r = &socketlist_get(S, s)->reader;
|
190
|
+
else
|
191
|
+
r = &socketlist_get(S, s)->writer;
|
192
|
+
|
193
|
+
/* Check if we have an event. */
|
194
|
+
if (*r == NULL) {
|
195
|
+
errno = ENOENT;
|
196
|
+
goto err0;
|
197
|
+
}
|
198
|
+
|
199
|
+
/* Free the event. */
|
200
|
+
events_freerec(*r);
|
201
|
+
*r = NULL;
|
202
|
+
|
203
|
+
/*
|
204
|
+
* Since there is no longer an event registered for this socket /
|
205
|
+
* operation pair, it doesn't make any sense for it to be ready.
|
206
|
+
*/
|
207
|
+
if (op == EVENTS_NETWORK_OP_READ)
|
208
|
+
FD_CLR(s, &readfds);
|
209
|
+
else
|
210
|
+
FD_CLR(s, &writefds);
|
211
|
+
|
212
|
+
/*
|
213
|
+
* Decrement events-registered counter; and if it is becoming zero,
|
214
|
+
* stop the inter-select duration clock.
|
215
|
+
*/
|
216
|
+
if (--nev == 0)
|
217
|
+
events_network_selectstats_stopclock();
|
218
|
+
|
219
|
+
/* Success! */
|
220
|
+
return (0);
|
221
|
+
|
222
|
+
err0:
|
223
|
+
/* Failure! */
|
224
|
+
return (-1);
|
225
|
+
}
|
226
|
+
|
227
|
+
/**
|
228
|
+
* events_network_select(tv):
|
229
|
+
* Check for socket readiness events, waiting up to ${tv} time if there are
|
230
|
+
* no sockets immediately ready, or indefinitely if ${tv} is NULL. The value
|
231
|
+
* stored in ${tv} may be modified.
|
232
|
+
*/
|
233
|
+
int
|
234
|
+
events_network_select(struct timeval * tv)
|
235
|
+
{
|
236
|
+
size_t i;
|
237
|
+
|
238
|
+
/* Initialize if necessary. */
|
239
|
+
if (initsocketlist())
|
240
|
+
goto err0;
|
241
|
+
|
242
|
+
/* Zero the fd sets... */
|
243
|
+
FD_ZERO(&readfds);
|
244
|
+
FD_ZERO(&writefds);
|
245
|
+
|
246
|
+
/* ... and add the ones we care about. */
|
247
|
+
for (i = 0; i < socketlist_getsize(S); i++) {
|
248
|
+
if (socketlist_get(S, i)->reader)
|
249
|
+
FD_SET(i, &readfds);
|
250
|
+
if (socketlist_get(S, i)->writer)
|
251
|
+
FD_SET(i, &writefds);
|
252
|
+
}
|
253
|
+
|
254
|
+
/* We're about to call select! */
|
255
|
+
events_network_selectstats_select();
|
256
|
+
|
257
|
+
/* Select. */
|
258
|
+
while (select(socketlist_getsize(S), &readfds, &writefds,
|
259
|
+
NULL, tv) == -1) {
|
260
|
+
/* EINTR is harmless. */
|
261
|
+
if (errno == EINTR)
|
262
|
+
continue;
|
263
|
+
|
264
|
+
/* Anything else is an error. */
|
265
|
+
warnp("select()");
|
266
|
+
goto err0;
|
267
|
+
}
|
268
|
+
|
269
|
+
/* If we have any events registered, start the clock again. */
|
270
|
+
if (nev > 0)
|
271
|
+
events_network_selectstats_startclock();
|
272
|
+
|
273
|
+
/* We should start scanning for events at the beginning. */
|
274
|
+
fdscanpos = 0;
|
275
|
+
|
276
|
+
/* Success! */
|
277
|
+
return (0);
|
278
|
+
|
279
|
+
err0:
|
280
|
+
/* Failure! */
|
281
|
+
return (-1);
|
282
|
+
}
|
283
|
+
|
284
|
+
/**
|
285
|
+
* events_network_get(void):
|
286
|
+
* Find a socket readiness event which was identified by a previous call to
|
287
|
+
* events_network_select, and return it as an eventrec structure; or return
|
288
|
+
* NULL if there are no such events available. The caller is responsible for
|
289
|
+
* freeing the returned memory.
|
290
|
+
*/
|
291
|
+
struct eventrec *
|
292
|
+
events_network_get(void)
|
293
|
+
{
|
294
|
+
struct eventrec * r;
|
295
|
+
size_t nfds = socketlist_getsize(S);
|
296
|
+
|
297
|
+
/* We haven't found any events yet. */
|
298
|
+
r = NULL;
|
299
|
+
|
300
|
+
/* Scan through the fd sets looking for ready sockets. */
|
301
|
+
for (; fdscanpos < nfds; fdscanpos++) {
|
302
|
+
/* Are we ready for reading? */
|
303
|
+
if (FD_ISSET(fdscanpos, &readfds)) {
|
304
|
+
r = socketlist_get(S, fdscanpos)->reader;
|
305
|
+
socketlist_get(S, fdscanpos)->reader = NULL;
|
306
|
+
if (--nev == 0)
|
307
|
+
events_network_selectstats_stopclock();
|
308
|
+
FD_CLR(fdscanpos, &readfds);
|
309
|
+
break;
|
310
|
+
}
|
311
|
+
|
312
|
+
/* Are we ready for writing? */
|
313
|
+
if (FD_ISSET(fdscanpos, &writefds)) {
|
314
|
+
r = socketlist_get(S, fdscanpos)->writer;
|
315
|
+
socketlist_get(S, fdscanpos)->writer = NULL;
|
316
|
+
if (--nev == 0)
|
317
|
+
events_network_selectstats_stopclock();
|
318
|
+
FD_CLR(fdscanpos, &writefds);
|
319
|
+
break;
|
320
|
+
}
|
321
|
+
}
|
322
|
+
|
323
|
+
/* Return the event we found, or NULL if we didn't find any. */
|
324
|
+
return (r);
|
325
|
+
}
|
326
|
+
|
327
|
+
/**
|
328
|
+
* events_network_shutdown(void)
|
329
|
+
* Clean up and free memory. This call is not necessary on program exit and
|
330
|
+
* is only expected to be useful when checking for memory leaks.
|
331
|
+
*/
|
332
|
+
void
|
333
|
+
events_network_shutdown(void)
|
334
|
+
{
|
335
|
+
|
336
|
+
/* If we're not initialized, do nothing. */
|
337
|
+
if (S == NULL)
|
338
|
+
return;
|
339
|
+
|
340
|
+
/* If we have any registered events, do nothing. */
|
341
|
+
if (nev > 0)
|
342
|
+
return;
|
343
|
+
|
344
|
+
/* Free the socket list. */
|
345
|
+
socketlist_free(S);
|
346
|
+
S = NULL;
|
347
|
+
}
|