rev 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +58 -0
- data/README +107 -0
- data/ext/libev/ev.c +2440 -0
- data/ext/libev/ev.h +551 -0
- data/ext/libev/ev_epoll.c +174 -0
- data/ext/libev/ev_kqueue.c +186 -0
- data/ext/libev/ev_poll.c +127 -0
- data/ext/libev/ev_port.c +155 -0
- data/ext/libev/ev_select.c +236 -0
- data/ext/libev/ev_vars.h +108 -0
- data/ext/libev/ev_win32.c +117 -0
- data/ext/libev/ev_wrap.h +132 -0
- data/ext/rev/extconf.rb +36 -0
- data/ext/rev/rev.h +44 -0
- data/ext/rev/rev_ext.c +29 -0
- data/ext/rev/rev_io_watcher.c +157 -0
- data/ext/rev/rev_loop.c +254 -0
- data/ext/rev/rev_timer_watcher.c +153 -0
- data/ext/rev/rev_watcher.c +222 -0
- data/ext/rev/rev_watcher.h +79 -0
- data/lib/rev.rb +21 -0
- data/lib/rev/buffered_io.rb +123 -0
- data/lib/rev/dns_resolver.rb +178 -0
- data/lib/rev/io_watcher.rb +18 -0
- data/lib/rev/listener.rb +50 -0
- data/lib/rev/loop.rb +101 -0
- data/lib/rev/server.rb +53 -0
- data/lib/rev/socket.rb +186 -0
- data/lib/rev/timer_watcher.rb +18 -0
- data/lib/rev/watcher.rb +49 -0
- data/spec/rev_spec.rb +26 -0
- metadata +93 -0
@@ -0,0 +1,174 @@
|
|
1
|
+
/*
|
2
|
+
* libev epoll fd activity backend
|
3
|
+
*
|
4
|
+
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are
|
9
|
+
* met:
|
10
|
+
*
|
11
|
+
* * Redistributions of source code must retain the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer.
|
13
|
+
*
|
14
|
+
* * Redistributions in binary form must reproduce the above
|
15
|
+
* copyright notice, this list of conditions and the following
|
16
|
+
* disclaimer in the documentation and/or other materials provided
|
17
|
+
* with the distribution.
|
18
|
+
*
|
19
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
*/
|
31
|
+
|
32
|
+
/*
|
33
|
+
* general notes about epoll:
|
34
|
+
*
|
35
|
+
* a) epoll silently removes fds from the fd set. as nothing tells us
|
36
|
+
* that an fd has been removed otherwise, we have to continually
|
37
|
+
* "rearm" fds that we suspect *might* have changed (same
|
38
|
+
* problem with kqueue, but much less costly there).
|
39
|
+
* b) the fact that ADD != MOD creates a lot of extra syscalls due to a)
|
40
|
+
* and seems not to have any advantage.
|
41
|
+
* c) the inability to handle fork or file descriptors (think dup)
|
42
|
+
* limits the applicability over poll, so this is not a generic
|
43
|
+
* poll replacement.
|
44
|
+
*
|
45
|
+
* lots of "weird code" and complication handling in this file is due
|
46
|
+
* to these design problems with epoll, as we try very hard to avoid
|
47
|
+
* epoll_ctl syscalls for common usage patterns.
|
48
|
+
*/
|
49
|
+
|
50
|
+
#include <sys/epoll.h>
|
51
|
+
|
52
|
+
static void
|
53
|
+
epoll_modify (EV_P_ int fd, int oev, int nev)
|
54
|
+
{
|
55
|
+
struct epoll_event ev;
|
56
|
+
|
57
|
+
/*
|
58
|
+
* we handle EPOLL_CTL_DEL by ignoring it here
|
59
|
+
* on the assumption that the fd is gone anyways
|
60
|
+
* if that is wrong, we have to handle the spurious
|
61
|
+
* event in epoll_poll.
|
62
|
+
*/
|
63
|
+
if (!nev)
|
64
|
+
return;
|
65
|
+
|
66
|
+
ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */
|
67
|
+
ev.events = (nev & EV_READ ? EPOLLIN : 0)
|
68
|
+
| (nev & EV_WRITE ? EPOLLOUT : 0);
|
69
|
+
|
70
|
+
if (expect_true (!epoll_ctl (backend_fd, oev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
|
71
|
+
return;
|
72
|
+
|
73
|
+
if (expect_true (errno == ENOENT))
|
74
|
+
{
|
75
|
+
/* on ENOENT the fd went away, so try to do the right thing */
|
76
|
+
if (!nev)
|
77
|
+
return;
|
78
|
+
|
79
|
+
if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
|
80
|
+
return;
|
81
|
+
}
|
82
|
+
else if (expect_true (errno == EEXIST))
|
83
|
+
{
|
84
|
+
/* on EEXIST we ignored a previous DEL */
|
85
|
+
if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
|
86
|
+
return;
|
87
|
+
}
|
88
|
+
|
89
|
+
fd_kill (EV_A_ fd);
|
90
|
+
}
|
91
|
+
|
92
|
+
static void
|
93
|
+
epoll_poll (EV_P_ ev_tstamp timeout)
|
94
|
+
{
|
95
|
+
int i;
|
96
|
+
int eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, (int)ceil (timeout * 1000.));
|
97
|
+
|
98
|
+
if (expect_false (eventcnt < 0))
|
99
|
+
{
|
100
|
+
if (errno != EINTR)
|
101
|
+
syserr ("(libev) epoll_wait");
|
102
|
+
|
103
|
+
return;
|
104
|
+
}
|
105
|
+
|
106
|
+
for (i = 0; i < eventcnt; ++i)
|
107
|
+
{
|
108
|
+
struct epoll_event *ev = epoll_events + i;
|
109
|
+
|
110
|
+
int fd = ev->data.u64;
|
111
|
+
int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
|
112
|
+
| (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
|
113
|
+
int want = anfds [fd].events;
|
114
|
+
|
115
|
+
if (expect_false (got & ~want))
|
116
|
+
{
|
117
|
+
/* we received an event but are not interested in it, try mod or del */
|
118
|
+
ev->events = (want & EV_READ ? EPOLLIN : 0)
|
119
|
+
| (want & EV_WRITE ? EPOLLOUT : 0);
|
120
|
+
|
121
|
+
epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev);
|
122
|
+
}
|
123
|
+
|
124
|
+
fd_event (EV_A_ fd, got);
|
125
|
+
}
|
126
|
+
|
127
|
+
/* if the receive array was full, increase its size */
|
128
|
+
if (expect_false (eventcnt == epoll_eventmax))
|
129
|
+
{
|
130
|
+
ev_free (epoll_events);
|
131
|
+
epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
|
132
|
+
epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
|
133
|
+
}
|
134
|
+
}
|
135
|
+
|
136
|
+
int inline_size
|
137
|
+
epoll_init (EV_P_ int flags)
|
138
|
+
{
|
139
|
+
backend_fd = epoll_create (256);
|
140
|
+
|
141
|
+
if (backend_fd < 0)
|
142
|
+
return 0;
|
143
|
+
|
144
|
+
fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
|
145
|
+
|
146
|
+
backend_fudge = 0.; /* kernel sources seem to indicate this to be zero */
|
147
|
+
backend_modify = epoll_modify;
|
148
|
+
backend_poll = epoll_poll;
|
149
|
+
|
150
|
+
epoll_eventmax = 64; /* intiial number of events receivable per poll */
|
151
|
+
epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
|
152
|
+
|
153
|
+
return EVBACKEND_EPOLL;
|
154
|
+
}
|
155
|
+
|
156
|
+
void inline_size
|
157
|
+
epoll_destroy (EV_P)
|
158
|
+
{
|
159
|
+
ev_free (epoll_events);
|
160
|
+
}
|
161
|
+
|
162
|
+
void inline_size
|
163
|
+
epoll_fork (EV_P)
|
164
|
+
{
|
165
|
+
close (backend_fd);
|
166
|
+
|
167
|
+
while ((backend_fd = epoll_create (256)) < 0)
|
168
|
+
syserr ("(libev) epoll_create");
|
169
|
+
|
170
|
+
fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
|
171
|
+
|
172
|
+
fd_rearm_all (EV_A);
|
173
|
+
}
|
174
|
+
|
@@ -0,0 +1,186 @@
|
|
1
|
+
/*
|
2
|
+
* libev kqueue backend
|
3
|
+
*
|
4
|
+
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are
|
9
|
+
* met:
|
10
|
+
*
|
11
|
+
* * Redistributions of source code must retain the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer.
|
13
|
+
*
|
14
|
+
* * Redistributions in binary form must reproduce the above
|
15
|
+
* copyright notice, this list of conditions and the following
|
16
|
+
* disclaimer in the documentation and/or other materials provided
|
17
|
+
* with the distribution.
|
18
|
+
*
|
19
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
*/
|
31
|
+
|
32
|
+
#include <sys/types.h>
|
33
|
+
#include <sys/time.h>
|
34
|
+
#include <sys/queue.h>
|
35
|
+
#include <sys/event.h>
|
36
|
+
#include <string.h>
|
37
|
+
#include <errno.h>
|
38
|
+
|
39
|
+
void inline_speed
|
40
|
+
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
|
41
|
+
{
|
42
|
+
++kqueue_changecnt;
|
43
|
+
array_needsize (struct kevent, kqueue_changes, kqueue_changemax, kqueue_changecnt, EMPTY2);
|
44
|
+
|
45
|
+
EV_SET (&kqueue_changes [kqueue_changecnt - 1], fd, filter, flags, fflags, 0, 0);
|
46
|
+
}
|
47
|
+
|
48
|
+
#ifndef NOTE_EOF
|
49
|
+
# define NOTE_EOF 0
|
50
|
+
#endif
|
51
|
+
|
52
|
+
static void
|
53
|
+
kqueue_modify (EV_P_ int fd, int oev, int nev)
|
54
|
+
{
|
55
|
+
if (oev != nev)
|
56
|
+
{
|
57
|
+
if (oev & EV_READ)
|
58
|
+
kqueue_change (EV_A_ fd, EVFILT_READ , EV_DELETE, 0);
|
59
|
+
|
60
|
+
if (oev & EV_WRITE)
|
61
|
+
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_DELETE, 0);
|
62
|
+
}
|
63
|
+
|
64
|
+
/* to detect close/reopen reliably, we have to re-add */
|
65
|
+
/* event requests even when oev == nev */
|
66
|
+
|
67
|
+
if (nev & EV_READ)
|
68
|
+
kqueue_change (EV_A_ fd, EVFILT_READ , EV_ADD, NOTE_EOF);
|
69
|
+
|
70
|
+
if (nev & EV_WRITE)
|
71
|
+
kqueue_change (EV_A_ fd, EVFILT_WRITE, EV_ADD, NOTE_EOF);
|
72
|
+
}
|
73
|
+
|
74
|
+
static void
|
75
|
+
kqueue_poll (EV_P_ ev_tstamp timeout)
|
76
|
+
{
|
77
|
+
int res, i;
|
78
|
+
struct timespec ts;
|
79
|
+
|
80
|
+
/* need to resize so there is enough space for errors */
|
81
|
+
if (kqueue_changecnt > kqueue_eventmax)
|
82
|
+
{
|
83
|
+
ev_free (kqueue_events);
|
84
|
+
kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_changecnt);
|
85
|
+
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
|
86
|
+
}
|
87
|
+
|
88
|
+
ts.tv_sec = (time_t)timeout;
|
89
|
+
ts.tv_nsec = (long)((timeout - (ev_tstamp)ts.tv_sec) * 1e9);
|
90
|
+
res = kevent (backend_fd, kqueue_changes, kqueue_changecnt, kqueue_events, kqueue_eventmax, &ts);
|
91
|
+
kqueue_changecnt = 0;
|
92
|
+
|
93
|
+
if (expect_false (res < 0))
|
94
|
+
{
|
95
|
+
if (errno != EINTR)
|
96
|
+
syserr ("(libev) kevent");
|
97
|
+
|
98
|
+
return;
|
99
|
+
}
|
100
|
+
|
101
|
+
for (i = 0; i < res; ++i)
|
102
|
+
{
|
103
|
+
int fd = kqueue_events [i].ident;
|
104
|
+
|
105
|
+
if (expect_false (kqueue_events [i].flags & EV_ERROR))
|
106
|
+
{
|
107
|
+
int err = kqueue_events [i].data;
|
108
|
+
|
109
|
+
/* we are only interested in errors for fds that we are interested in :) */
|
110
|
+
if (anfds [fd].events)
|
111
|
+
{
|
112
|
+
if (err == ENOENT) /* resubmit changes on ENOENT */
|
113
|
+
kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
|
114
|
+
else if (err == EBADF) /* on EBADF, we re-check the fd */
|
115
|
+
{
|
116
|
+
if (fd_valid (fd))
|
117
|
+
kqueue_modify (EV_A_ fd, 0, anfds [fd].events);
|
118
|
+
else
|
119
|
+
fd_kill (EV_A_ fd);
|
120
|
+
}
|
121
|
+
else /* on all other errors, we error out on the fd */
|
122
|
+
fd_kill (EV_A_ fd);
|
123
|
+
}
|
124
|
+
}
|
125
|
+
else
|
126
|
+
fd_event (
|
127
|
+
EV_A_
|
128
|
+
fd,
|
129
|
+
kqueue_events [i].filter == EVFILT_READ ? EV_READ
|
130
|
+
: kqueue_events [i].filter == EVFILT_WRITE ? EV_WRITE
|
131
|
+
: 0
|
132
|
+
);
|
133
|
+
}
|
134
|
+
|
135
|
+
if (expect_false (res == kqueue_eventmax))
|
136
|
+
{
|
137
|
+
ev_free (kqueue_events);
|
138
|
+
kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1);
|
139
|
+
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
|
140
|
+
}
|
141
|
+
}
|
142
|
+
|
143
|
+
int inline_size
|
144
|
+
kqueue_init (EV_P_ int flags)
|
145
|
+
{
|
146
|
+
/* Initalize the kernel queue */
|
147
|
+
if ((backend_fd = kqueue ()) < 0)
|
148
|
+
return 0;
|
149
|
+
|
150
|
+
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
|
151
|
+
|
152
|
+
backend_fudge = 0.;
|
153
|
+
backend_modify = kqueue_modify;
|
154
|
+
backend_poll = kqueue_poll;
|
155
|
+
|
156
|
+
kqueue_eventmax = 64; /* initial number of events receivable per poll */
|
157
|
+
kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
|
158
|
+
|
159
|
+
kqueue_changes = 0;
|
160
|
+
kqueue_changemax = 0;
|
161
|
+
kqueue_changecnt = 0;
|
162
|
+
|
163
|
+
return EVBACKEND_KQUEUE;
|
164
|
+
}
|
165
|
+
|
166
|
+
void inline_size
|
167
|
+
kqueue_destroy (EV_P)
|
168
|
+
{
|
169
|
+
ev_free (kqueue_events);
|
170
|
+
ev_free (kqueue_changes);
|
171
|
+
}
|
172
|
+
|
173
|
+
void inline_size
|
174
|
+
kqueue_fork (EV_P)
|
175
|
+
{
|
176
|
+
close (backend_fd);
|
177
|
+
|
178
|
+
while ((backend_fd = kqueue ()) < 0)
|
179
|
+
syserr ("(libev) kqueue");
|
180
|
+
|
181
|
+
fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
|
182
|
+
|
183
|
+
/* re-register interest in fds */
|
184
|
+
fd_rearm_all (EV_A);
|
185
|
+
}
|
186
|
+
|
data/ext/libev/ev_poll.c
ADDED
@@ -0,0 +1,127 @@
|
|
1
|
+
/*
|
2
|
+
* libev poll fd activity backend
|
3
|
+
*
|
4
|
+
* Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
|
5
|
+
* All rights reserved.
|
6
|
+
*
|
7
|
+
* Redistribution and use in source and binary forms, with or without
|
8
|
+
* modification, are permitted provided that the following conditions are
|
9
|
+
* met:
|
10
|
+
*
|
11
|
+
* * Redistributions of source code must retain the above copyright
|
12
|
+
* notice, this list of conditions and the following disclaimer.
|
13
|
+
*
|
14
|
+
* * Redistributions in binary form must reproduce the above
|
15
|
+
* copyright notice, this list of conditions and the following
|
16
|
+
* disclaimer in the documentation and/or other materials provided
|
17
|
+
* with the distribution.
|
18
|
+
*
|
19
|
+
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20
|
+
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21
|
+
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22
|
+
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23
|
+
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24
|
+
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25
|
+
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26
|
+
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27
|
+
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28
|
+
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
*/
|
31
|
+
|
32
|
+
#include <poll.h>
|
33
|
+
|
34
|
+
void inline_size
|
35
|
+
pollidx_init (int *base, int count)
|
36
|
+
{
|
37
|
+
while (count--)
|
38
|
+
*base++ = -1;
|
39
|
+
}
|
40
|
+
|
41
|
+
static void
|
42
|
+
poll_modify (EV_P_ int fd, int oev, int nev)
|
43
|
+
{
|
44
|
+
int idx;
|
45
|
+
|
46
|
+
if (oev == nev)
|
47
|
+
return;
|
48
|
+
|
49
|
+
array_needsize (int, pollidxs, pollidxmax, fd + 1, pollidx_init);
|
50
|
+
|
51
|
+
idx = pollidxs [fd];
|
52
|
+
|
53
|
+
if (idx < 0) /* need to allocate a new pollfd */
|
54
|
+
{
|
55
|
+
pollidxs [fd] = idx = pollcnt++;
|
56
|
+
array_needsize (struct pollfd, polls, pollmax, pollcnt, EMPTY2);
|
57
|
+
polls [idx].fd = fd;
|
58
|
+
}
|
59
|
+
|
60
|
+
assert (polls [idx].fd == fd);
|
61
|
+
|
62
|
+
if (nev)
|
63
|
+
polls [idx].events =
|
64
|
+
(nev & EV_READ ? POLLIN : 0)
|
65
|
+
| (nev & EV_WRITE ? POLLOUT : 0);
|
66
|
+
else /* remove pollfd */
|
67
|
+
{
|
68
|
+
pollidxs [fd] = -1;
|
69
|
+
|
70
|
+
if (expect_true (idx < --pollcnt))
|
71
|
+
{
|
72
|
+
polls [idx] = polls [pollcnt];
|
73
|
+
pollidxs [polls [idx].fd] = idx;
|
74
|
+
}
|
75
|
+
}
|
76
|
+
}
|
77
|
+
|
78
|
+
static void
|
79
|
+
poll_poll (EV_P_ ev_tstamp timeout)
|
80
|
+
{
|
81
|
+
int i;
|
82
|
+
int res = poll (polls, pollcnt, (int)ceil (timeout * 1000.));
|
83
|
+
|
84
|
+
if (expect_false (res < 0))
|
85
|
+
{
|
86
|
+
if (errno == EBADF)
|
87
|
+
fd_ebadf (EV_A);
|
88
|
+
else if (errno == ENOMEM && !syserr_cb)
|
89
|
+
fd_enomem (EV_A);
|
90
|
+
else if (errno != EINTR)
|
91
|
+
syserr ("(libev) poll");
|
92
|
+
|
93
|
+
return;
|
94
|
+
}
|
95
|
+
|
96
|
+
for (i = 0; i < pollcnt; ++i)
|
97
|
+
if (expect_false (polls [i].revents & POLLNVAL))
|
98
|
+
fd_kill (EV_A_ polls [i].fd);
|
99
|
+
else
|
100
|
+
fd_event (
|
101
|
+
EV_A_
|
102
|
+
polls [i].fd,
|
103
|
+
(polls [i].revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
|
104
|
+
| (polls [i].revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
|
105
|
+
);
|
106
|
+
}
|
107
|
+
|
108
|
+
int inline_size
|
109
|
+
poll_init (EV_P_ int flags)
|
110
|
+
{
|
111
|
+
backend_fudge = 0.; /* posix says this is zero */
|
112
|
+
backend_modify = poll_modify;
|
113
|
+
backend_poll = poll_poll;
|
114
|
+
|
115
|
+
pollidxs = 0; pollidxmax = 0;
|
116
|
+
polls = 0; pollmax = 0; pollcnt = 0;
|
117
|
+
|
118
|
+
return EVBACKEND_POLL;
|
119
|
+
}
|
120
|
+
|
121
|
+
void inline_size
|
122
|
+
poll_destroy (EV_P)
|
123
|
+
{
|
124
|
+
ev_free (pollidxs);
|
125
|
+
ev_free (polls);
|
126
|
+
}
|
127
|
+
|