eventmachine-le 1.1.0.beta.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +21 -0
- data/.yardopts +7 -0
- data/GNU +281 -0
- data/LICENSE +60 -0
- data/README.md +80 -0
- data/Rakefile +19 -0
- data/eventmachine-le.gemspec +42 -0
- data/ext/binder.cpp +124 -0
- data/ext/binder.h +46 -0
- data/ext/cmain.cpp +841 -0
- data/ext/ed.cpp +1995 -0
- data/ext/ed.h +424 -0
- data/ext/em.cpp +2377 -0
- data/ext/em.h +243 -0
- data/ext/eventmachine.h +126 -0
- data/ext/extconf.rb +166 -0
- data/ext/fastfilereader/extconf.rb +94 -0
- data/ext/fastfilereader/mapper.cpp +214 -0
- data/ext/fastfilereader/mapper.h +59 -0
- data/ext/fastfilereader/rubymain.cpp +127 -0
- data/ext/kb.cpp +79 -0
- data/ext/page.cpp +107 -0
- data/ext/page.h +51 -0
- data/ext/pipe.cpp +347 -0
- data/ext/project.h +155 -0
- data/ext/rubymain.cpp +1269 -0
- data/ext/ssl.cpp +468 -0
- data/ext/ssl.h +94 -0
- data/lib/em/buftok.rb +110 -0
- data/lib/em/callback.rb +58 -0
- data/lib/em/channel.rb +64 -0
- data/lib/em/completion.rb +304 -0
- data/lib/em/connection.rb +728 -0
- data/lib/em/deferrable.rb +210 -0
- data/lib/em/deferrable/pool.rb +2 -0
- data/lib/em/file_watch.rb +73 -0
- data/lib/em/future.rb +61 -0
- data/lib/em/iterator.rb +313 -0
- data/lib/em/messages.rb +66 -0
- data/lib/em/pool.rb +151 -0
- data/lib/em/process_watch.rb +45 -0
- data/lib/em/processes.rb +123 -0
- data/lib/em/protocols.rb +37 -0
- data/lib/em/protocols/header_and_content.rb +138 -0
- data/lib/em/protocols/httpclient.rb +279 -0
- data/lib/em/protocols/httpclient2.rb +600 -0
- data/lib/em/protocols/line_and_text.rb +125 -0
- data/lib/em/protocols/line_protocol.rb +29 -0
- data/lib/em/protocols/linetext2.rb +161 -0
- data/lib/em/protocols/memcache.rb +331 -0
- data/lib/em/protocols/object_protocol.rb +46 -0
- data/lib/em/protocols/postgres3.rb +246 -0
- data/lib/em/protocols/saslauth.rb +175 -0
- data/lib/em/protocols/smtpclient.rb +365 -0
- data/lib/em/protocols/smtpserver.rb +663 -0
- data/lib/em/protocols/socks4.rb +66 -0
- data/lib/em/protocols/stomp.rb +202 -0
- data/lib/em/protocols/tcptest.rb +54 -0
- data/lib/em/queue.rb +71 -0
- data/lib/em/resolver.rb +195 -0
- data/lib/em/spawnable.rb +84 -0
- data/lib/em/streamer.rb +118 -0
- data/lib/em/threaded_resource.rb +90 -0
- data/lib/em/tick_loop.rb +85 -0
- data/lib/em/timers.rb +106 -0
- data/lib/em/version.rb +3 -0
- data/lib/eventmachine-le.rb +10 -0
- data/lib/eventmachine.rb +1548 -0
- data/rakelib/cpp.rake_example +77 -0
- data/rakelib/package.rake +98 -0
- data/rakelib/test.rake +8 -0
- data/tests/client.crt +31 -0
- data/tests/client.key +51 -0
- data/tests/em_test_helper.rb +143 -0
- data/tests/test_attach.rb +148 -0
- data/tests/test_basic.rb +294 -0
- data/tests/test_channel.rb +62 -0
- data/tests/test_completion.rb +177 -0
- data/tests/test_connection_count.rb +33 -0
- data/tests/test_defer.rb +18 -0
- data/tests/test_deferrable.rb +35 -0
- data/tests/test_epoll.rb +134 -0
- data/tests/test_error_handler.rb +38 -0
- data/tests/test_exc.rb +28 -0
- data/tests/test_file_watch.rb +65 -0
- data/tests/test_futures.rb +170 -0
- data/tests/test_get_sock_opt.rb +37 -0
- data/tests/test_handler_check.rb +35 -0
- data/tests/test_hc.rb +155 -0
- data/tests/test_httpclient.rb +190 -0
- data/tests/test_httpclient2.rb +128 -0
- data/tests/test_inactivity_timeout.rb +54 -0
- data/tests/test_ipv4.rb +125 -0
- data/tests/test_ipv6.rb +131 -0
- data/tests/test_iterator.rb +110 -0
- data/tests/test_kb.rb +34 -0
- data/tests/test_line_protocol.rb +33 -0
- data/tests/test_ltp.rb +138 -0
- data/tests/test_ltp2.rb +288 -0
- data/tests/test_next_tick.rb +104 -0
- data/tests/test_object_protocol.rb +36 -0
- data/tests/test_pause.rb +78 -0
- data/tests/test_pending_connect_timeout.rb +52 -0
- data/tests/test_pool.rb +196 -0
- data/tests/test_process_watch.rb +48 -0
- data/tests/test_processes.rb +133 -0
- data/tests/test_proxy_connection.rb +168 -0
- data/tests/test_pure.rb +88 -0
- data/tests/test_queue.rb +50 -0
- data/tests/test_resolver.rb +55 -0
- data/tests/test_running.rb +14 -0
- data/tests/test_sasl.rb +47 -0
- data/tests/test_send_file.rb +217 -0
- data/tests/test_servers.rb +33 -0
- data/tests/test_set_sock_opt.rb +41 -0
- data/tests/test_shutdown_hooks.rb +23 -0
- data/tests/test_smtpclient.rb +55 -0
- data/tests/test_smtpserver.rb +120 -0
- data/tests/test_spawn.rb +293 -0
- data/tests/test_ssl_args.rb +78 -0
- data/tests/test_ssl_methods.rb +48 -0
- data/tests/test_ssl_verify.rb +82 -0
- data/tests/test_threaded_resource.rb +55 -0
- data/tests/test_tick_loop.rb +59 -0
- data/tests/test_timers.rb +180 -0
- data/tests/test_ud.rb +8 -0
- data/tests/test_udp46.rb +53 -0
- data/tests/test_unbind_reason.rb +48 -0
- metadata +390 -0
data/ext/ed.cpp
ADDED
@@ -0,0 +1,1995 @@
|
|
1
|
+
/*****************************************************************************
|
2
|
+
|
3
|
+
$Id$
|
4
|
+
|
5
|
+
File: ed.cpp
|
6
|
+
Date: 06Apr06
|
7
|
+
|
8
|
+
Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
|
9
|
+
Gmail: blackhedd
|
10
|
+
|
11
|
+
This program is free software; you can redistribute it and/or modify
|
12
|
+
it under the terms of either: 1) the GNU General Public License
|
13
|
+
as published by the Free Software Foundation; either version 2 of the
|
14
|
+
License, or (at your option) any later version; or 2) Ruby's License.
|
15
|
+
|
16
|
+
See the file COPYING for complete licensing information.
|
17
|
+
|
18
|
+
*****************************************************************************/
|
19
|
+
|
20
|
+
#include "project.h"
|
21
|
+
|
22
|
+
|
23
|
+
|
24
|
+
/********************
|
25
|
+
SetSocketNonblocking
|
26
|
+
********************/
|
27
|
+
|
28
|
+
bool SetSocketNonblocking (SOCKET sd)
|
29
|
+
{
|
30
|
+
#ifdef OS_UNIX
|
31
|
+
int val = fcntl (sd, F_GETFL, 0);
|
32
|
+
return (fcntl (sd, F_SETFL, val | O_NONBLOCK) != SOCKET_ERROR) ? true : false;
|
33
|
+
#endif
|
34
|
+
|
35
|
+
#ifdef OS_WIN32
|
36
|
+
#ifdef BUILD_FOR_RUBY
|
37
|
+
// 14Jun09 Ruby provides its own wrappers for ioctlsocket. On 1.8 this is a simple wrapper,
|
38
|
+
// however, 1.9 keeps its own state about the socket.
|
39
|
+
// NOTE: F_GETFL is not supported
|
40
|
+
return (fcntl (sd, F_SETFL, O_NONBLOCK) == 0) ? true : false;
|
41
|
+
#else
|
42
|
+
unsigned long one = 1;
|
43
|
+
return (ioctlsocket (sd, FIONBIO, &one) == 0) ? true : false;
|
44
|
+
#endif
|
45
|
+
#endif
|
46
|
+
}
|
47
|
+
|
48
|
+
|
49
|
+
/****************************************
|
50
|
+
EventableDescriptor::EventableDescriptor
|
51
|
+
****************************************/
|
52
|
+
|
53
|
+
EventableDescriptor::EventableDescriptor (int sd, EventMachine_t *em, bool autoclose):
|
54
|
+
bAutoClose (autoclose),
|
55
|
+
bCloseNow (false),
|
56
|
+
bCloseAfterWriting (false),
|
57
|
+
MySocket (sd),
|
58
|
+
bAttached (false),
|
59
|
+
bWatchOnly (false),
|
60
|
+
EventCallback (NULL),
|
61
|
+
bCallbackUnbind (true),
|
62
|
+
UnbindReasonCode (0),
|
63
|
+
ProxyTarget(NULL),
|
64
|
+
ProxiedFrom(NULL),
|
65
|
+
MaxOutboundBufSize(0),
|
66
|
+
MyEventMachine (em),
|
67
|
+
PendingConnectTimeout(20000000),
|
68
|
+
InactivityTimeout (0),
|
69
|
+
bPaused (false)
|
70
|
+
{
|
71
|
+
/* There are three ways to close a socket, all of which should
|
72
|
+
* automatically signal to the event machine that this object
|
73
|
+
* should be removed from the polling scheduler.
|
74
|
+
* First is a hard close, intended for bad errors or possible
|
75
|
+
* security violations. It immediately closes the connection
|
76
|
+
* and puts this object into an error state.
|
77
|
+
* Second is to set bCloseNow, which will cause the event machine
|
78
|
+
* to delete this object (and thus close the connection in our
|
79
|
+
* destructor) the next chance it gets. bCloseNow also inhibits
|
80
|
+
* the writing of new data on the socket (but not necessarily
|
81
|
+
* the reading of new data).
|
82
|
+
* The third way is to set bCloseAfterWriting, which inhibits
|
83
|
+
* the writing of new data and converts to bCloseNow as soon
|
84
|
+
* as everything in the outbound queue has been written.
|
85
|
+
* bCloseAfterWriting is really for use only by protocol handlers
|
86
|
+
* (for example, HTTP writes an HTML page and then closes the
|
87
|
+
* connection). All of the error states we generate internally
|
88
|
+
* cause an immediate close to be scheduled, which may have the
|
89
|
+
* effect of discarding outbound data.
|
90
|
+
*/
|
91
|
+
|
92
|
+
if (sd == INVALID_SOCKET)
|
93
|
+
throw std::runtime_error ("bad eventable descriptor");
|
94
|
+
if (MyEventMachine == NULL)
|
95
|
+
throw std::runtime_error ("bad em in eventable descriptor");
|
96
|
+
CreatedAt = MyEventMachine->GetCurrentLoopTime();
|
97
|
+
|
98
|
+
#ifdef HAVE_EPOLL
|
99
|
+
EpollEvent.events = 0;
|
100
|
+
EpollEvent.data.ptr = this;
|
101
|
+
#endif
|
102
|
+
LastActivity = MyEventMachine->GetCurrentLoopTime();
|
103
|
+
}
|
104
|
+
|
105
|
+
|
106
|
+
/*****************************************
|
107
|
+
EventableDescriptor::~EventableDescriptor
|
108
|
+
*****************************************/
|
109
|
+
|
110
|
+
EventableDescriptor::~EventableDescriptor()
|
111
|
+
{
|
112
|
+
if (NextHeartbeat)
|
113
|
+
MyEventMachine->ClearHeartbeat(NextHeartbeat, this);
|
114
|
+
if (EventCallback && bCallbackUnbind)
|
115
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_UNBOUND, NULL, UnbindReasonCode);
|
116
|
+
if (ProxiedFrom) {
|
117
|
+
(*EventCallback)(ProxiedFrom->GetBinding(), EM_PROXY_TARGET_UNBOUND, NULL, 0);
|
118
|
+
ProxiedFrom->StopProxy();
|
119
|
+
}
|
120
|
+
MyEventMachine->NumCloseScheduled--;
|
121
|
+
StopProxy();
|
122
|
+
if (bAutoClose) {
|
123
|
+
Close();
|
124
|
+
}
|
125
|
+
}
|
126
|
+
|
127
|
+
|
128
|
+
/*************************************
|
129
|
+
EventableDescriptor::SetEventCallback
|
130
|
+
*************************************/
|
131
|
+
|
132
|
+
void EventableDescriptor::SetEventCallback (EMCallback cb)
|
133
|
+
{
|
134
|
+
EventCallback = cb;
|
135
|
+
}
|
136
|
+
|
137
|
+
|
138
|
+
/**************************
|
139
|
+
EventableDescriptor::Close
|
140
|
+
**************************/
|
141
|
+
|
142
|
+
void EventableDescriptor::Close()
|
143
|
+
{
|
144
|
+
/* EventMachine relies on the fact that when close(fd)
|
145
|
+
* is called that the fd is removed from any
|
146
|
+
* epoll event queues.
|
147
|
+
*
|
148
|
+
* However, this is not *always* the behavior of close(fd)
|
149
|
+
*
|
150
|
+
* See man 4 epoll Q6/A6 and then consider what happens
|
151
|
+
* when using pipes with eventmachine.
|
152
|
+
* (As is often done when communicating with a subprocess)
|
153
|
+
*
|
154
|
+
* The pipes end up looking like:
|
155
|
+
*
|
156
|
+
* ls -l /proc/<pid>/fd
|
157
|
+
* ...
|
158
|
+
* lr-x------ 1 root root 64 2011-08-19 21:31 3 -> pipe:[940970]
|
159
|
+
* l-wx------ 1 root root 64 2011-08-19 21:31 4 -> pipe:[940970]
|
160
|
+
*
|
161
|
+
* This meets the critera from man 4 epoll Q6/A4 for not
|
162
|
+
* removing fds from epoll event queues until all fds
|
163
|
+
* that reference the underlying file have been removed.
|
164
|
+
*
|
165
|
+
* If the EventableDescriptor associated with fd 3 is deleted,
|
166
|
+
* its dtor will call EventableDescriptor::Close(),
|
167
|
+
* which will call ::close(int fd).
|
168
|
+
*
|
169
|
+
* However, unless the EventableDescriptor associated with fd 4 is
|
170
|
+
* also deleted before the next call to epoll_wait, events may fire
|
171
|
+
* for fd 3 that were registered with an already deleted
|
172
|
+
* EventableDescriptor.
|
173
|
+
*
|
174
|
+
* Therefore, it is necessary to notify EventMachine that
|
175
|
+
* the fd associated with this EventableDescriptor is
|
176
|
+
* closing.
|
177
|
+
*
|
178
|
+
* EventMachine also never closes fds for STDIN, STDOUT and
|
179
|
+
* STDERR (0, 1 & 2)
|
180
|
+
*/
|
181
|
+
|
182
|
+
// Close the socket right now. Intended for emergencies.
|
183
|
+
if (MySocket != INVALID_SOCKET) {
|
184
|
+
MyEventMachine->Deregister (this);
|
185
|
+
|
186
|
+
// Do not close STDIN, STDOUT, STDERR
|
187
|
+
if (MySocket > 2 && !bAttached) {
|
188
|
+
shutdown (MySocket, 1);
|
189
|
+
close (MySocket);
|
190
|
+
}
|
191
|
+
|
192
|
+
MySocket = INVALID_SOCKET;
|
193
|
+
}
|
194
|
+
}
|
195
|
+
|
196
|
+
|
197
|
+
/*********************************
|
198
|
+
EventableDescriptor::ShouldDelete
|
199
|
+
*********************************/
|
200
|
+
|
201
|
+
bool EventableDescriptor::ShouldDelete()
|
202
|
+
{
|
203
|
+
/* For use by a socket manager, which needs to know if this object
|
204
|
+
* should be removed from scheduling events and deleted.
|
205
|
+
* Has an immediate close been scheduled, or are we already closed?
|
206
|
+
* If either of these are the case, return true. In theory, the manager will
|
207
|
+
* then delete us, which in turn will make sure the socket is closed.
|
208
|
+
* Note, if bCloseAfterWriting is true, we check a virtual method to see
|
209
|
+
* if there is outbound data to write, and only request a close if there is none.
|
210
|
+
*/
|
211
|
+
|
212
|
+
return ((MySocket == INVALID_SOCKET) || bCloseNow || (bCloseAfterWriting && (GetOutboundDataSize() <= 0)));
|
213
|
+
}
|
214
|
+
|
215
|
+
|
216
|
+
/**********************************
|
217
|
+
EventableDescriptor::ScheduleClose
|
218
|
+
**********************************/
|
219
|
+
|
220
|
+
void EventableDescriptor::ScheduleClose (bool after_writing)
|
221
|
+
{
|
222
|
+
MyEventMachine->NumCloseScheduled++;
|
223
|
+
// KEEP THIS SYNCHRONIZED WITH ::IsCloseScheduled.
|
224
|
+
if (after_writing)
|
225
|
+
bCloseAfterWriting = true;
|
226
|
+
else
|
227
|
+
bCloseNow = true;
|
228
|
+
}
|
229
|
+
|
230
|
+
|
231
|
+
/*************************************
|
232
|
+
EventableDescriptor::IsCloseScheduled
|
233
|
+
*************************************/
|
234
|
+
|
235
|
+
bool EventableDescriptor::IsCloseScheduled()
|
236
|
+
{
|
237
|
+
// KEEP THIS SYNCHRONIZED WITH ::ScheduleClose.
|
238
|
+
return (bCloseNow || bCloseAfterWriting);
|
239
|
+
}
|
240
|
+
|
241
|
+
|
242
|
+
/*******************************
|
243
|
+
EventableDescriptor::StartProxy
|
244
|
+
*******************************/
|
245
|
+
|
246
|
+
void EventableDescriptor::StartProxy(const unsigned long to, const unsigned long bufsize, const unsigned long length)
|
247
|
+
{
|
248
|
+
EventableDescriptor *ed = dynamic_cast <EventableDescriptor*> (Bindable_t::GetObject (to));
|
249
|
+
if (ed) {
|
250
|
+
StopProxy();
|
251
|
+
ProxyTarget = ed;
|
252
|
+
BytesToProxy = length;
|
253
|
+
ed->SetProxiedFrom(this, bufsize);
|
254
|
+
return;
|
255
|
+
}
|
256
|
+
throw std::runtime_error ("Tried to proxy to an invalid descriptor");
|
257
|
+
}
|
258
|
+
|
259
|
+
|
260
|
+
/******************************
|
261
|
+
EventableDescriptor::StopProxy
|
262
|
+
******************************/
|
263
|
+
|
264
|
+
void EventableDescriptor::StopProxy()
|
265
|
+
{
|
266
|
+
if (ProxyTarget) {
|
267
|
+
ProxyTarget->SetProxiedFrom(NULL, 0);
|
268
|
+
ProxyTarget = NULL;
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
|
273
|
+
/***********************************
|
274
|
+
EventableDescriptor::SetProxiedFrom
|
275
|
+
***********************************/
|
276
|
+
|
277
|
+
void EventableDescriptor::SetProxiedFrom(EventableDescriptor *from, const unsigned long bufsize)
|
278
|
+
{
|
279
|
+
if (from != NULL && ProxiedFrom != NULL)
|
280
|
+
throw std::runtime_error ("Tried to proxy to a busy target");
|
281
|
+
|
282
|
+
ProxiedFrom = from;
|
283
|
+
MaxOutboundBufSize = bufsize;
|
284
|
+
}
|
285
|
+
|
286
|
+
|
287
|
+
/********************************************
|
288
|
+
EventableDescriptor::_GenericInboundDispatch
|
289
|
+
********************************************/
|
290
|
+
|
291
|
+
void EventableDescriptor::_GenericInboundDispatch(const char *buf, int size)
|
292
|
+
{
|
293
|
+
assert(EventCallback);
|
294
|
+
|
295
|
+
if (ProxyTarget) {
|
296
|
+
if (BytesToProxy > 0) {
|
297
|
+
unsigned long proxied = min(BytesToProxy, (unsigned long) size);
|
298
|
+
ProxyTarget->SendOutboundData(buf, proxied);
|
299
|
+
BytesToProxy -= proxied;
|
300
|
+
if (BytesToProxy == 0) {
|
301
|
+
StopProxy();
|
302
|
+
(*EventCallback)(GetBinding(), EM_PROXY_COMPLETED, NULL, 0);
|
303
|
+
if (proxied < size) {
|
304
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_READ, buf + proxied, size - proxied);
|
305
|
+
}
|
306
|
+
}
|
307
|
+
} else {
|
308
|
+
ProxyTarget->SendOutboundData(buf, size);
|
309
|
+
}
|
310
|
+
} else {
|
311
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_READ, buf, size);
|
312
|
+
}
|
313
|
+
}
|
314
|
+
|
315
|
+
|
316
|
+
/*********************************************
|
317
|
+
EventableDescriptor::GetPendingConnectTimeout
|
318
|
+
*********************************************/
|
319
|
+
|
320
|
+
uint64_t EventableDescriptor::GetPendingConnectTimeout()
|
321
|
+
{
|
322
|
+
return PendingConnectTimeout / 1000;
|
323
|
+
}
|
324
|
+
|
325
|
+
|
326
|
+
/*********************************************
|
327
|
+
EventableDescriptor::SetPendingConnectTimeout
|
328
|
+
*********************************************/
|
329
|
+
|
330
|
+
int EventableDescriptor::SetPendingConnectTimeout (uint64_t value)
|
331
|
+
{
|
332
|
+
if (value > 0) {
|
333
|
+
PendingConnectTimeout = value * 1000;
|
334
|
+
MyEventMachine->QueueHeartbeat(this);
|
335
|
+
return 1;
|
336
|
+
}
|
337
|
+
return 0;
|
338
|
+
}
|
339
|
+
|
340
|
+
|
341
|
+
/*************************************
|
342
|
+
EventableDescriptor::GetNextHeartbeat
|
343
|
+
*************************************/
|
344
|
+
|
345
|
+
uint64_t EventableDescriptor::GetNextHeartbeat()
|
346
|
+
{
|
347
|
+
if (NextHeartbeat)
|
348
|
+
MyEventMachine->ClearHeartbeat(NextHeartbeat, this);
|
349
|
+
|
350
|
+
NextHeartbeat = 0;
|
351
|
+
|
352
|
+
if (!ShouldDelete()) {
|
353
|
+
uint64_t time_til_next = InactivityTimeout;
|
354
|
+
if (IsConnectPending()) {
|
355
|
+
if (time_til_next == 0 || PendingConnectTimeout < time_til_next)
|
356
|
+
time_til_next = PendingConnectTimeout;
|
357
|
+
}
|
358
|
+
if (time_til_next == 0)
|
359
|
+
return 0;
|
360
|
+
NextHeartbeat = time_til_next + MyEventMachine->GetRealTime();
|
361
|
+
}
|
362
|
+
|
363
|
+
return NextHeartbeat;
|
364
|
+
}
|
365
|
+
|
366
|
+
|
367
|
+
/******************************************
|
368
|
+
ConnectionDescriptor::ConnectionDescriptor
|
369
|
+
******************************************/
|
370
|
+
|
371
|
+
ConnectionDescriptor::ConnectionDescriptor (int sd, EventMachine_t *em):
|
372
|
+
EventableDescriptor (sd, em),
|
373
|
+
bConnectPending (false),
|
374
|
+
bNotifyReadable (false),
|
375
|
+
bNotifyWritable (false),
|
376
|
+
bReadAttemptedAfterClose (false),
|
377
|
+
bWriteAttemptedAfterClose (false),
|
378
|
+
OutboundDataSize (0),
|
379
|
+
#ifdef WITH_SSL
|
380
|
+
SslBox (NULL),
|
381
|
+
bHandshakeSignaled (false),
|
382
|
+
bSslVerifyPeer (false),
|
383
|
+
bSslPeerAccepted(false),
|
384
|
+
#endif
|
385
|
+
#ifdef HAVE_KQUEUE
|
386
|
+
bGotExtraKqueueEvent(false),
|
387
|
+
#endif
|
388
|
+
bIsServer (false)
|
389
|
+
{
|
390
|
+
// 22Jan09: Moved ArmKqueueWriter into SetConnectPending() to fix assertion failure in _WriteOutboundData()
|
391
|
+
// 5May09: Moved EPOLLOUT into SetConnectPending() so it doesn't happen for attached read pipes
|
392
|
+
}
|
393
|
+
|
394
|
+
|
395
|
+
/*******************************************
|
396
|
+
ConnectionDescriptor::~ConnectionDescriptor
|
397
|
+
*******************************************/
|
398
|
+
|
399
|
+
ConnectionDescriptor::~ConnectionDescriptor()
|
400
|
+
{
|
401
|
+
// Run down any stranded outbound data.
|
402
|
+
for (size_t i=0; i < OutboundPages.size(); i++)
|
403
|
+
OutboundPages[i].Free();
|
404
|
+
|
405
|
+
#ifdef WITH_SSL
|
406
|
+
if (SslBox)
|
407
|
+
delete SslBox;
|
408
|
+
#endif
|
409
|
+
}
|
410
|
+
|
411
|
+
|
412
|
+
/***********************************
|
413
|
+
ConnectionDescriptor::_UpdateEvents
|
414
|
+
************************************/
|
415
|
+
|
416
|
+
void ConnectionDescriptor::_UpdateEvents()
|
417
|
+
{
|
418
|
+
_UpdateEvents(true, true);
|
419
|
+
}
|
420
|
+
|
421
|
+
void ConnectionDescriptor::_UpdateEvents(bool read, bool write)
|
422
|
+
{
|
423
|
+
if (MySocket == INVALID_SOCKET)
|
424
|
+
return;
|
425
|
+
|
426
|
+
#ifdef HAVE_EPOLL
|
427
|
+
unsigned int old = EpollEvent.events;
|
428
|
+
|
429
|
+
if (read) {
|
430
|
+
if (SelectForRead())
|
431
|
+
EpollEvent.events |= EPOLLIN;
|
432
|
+
else
|
433
|
+
EpollEvent.events &= ~EPOLLIN;
|
434
|
+
}
|
435
|
+
|
436
|
+
if (write) {
|
437
|
+
if (SelectForWrite())
|
438
|
+
EpollEvent.events |= EPOLLOUT;
|
439
|
+
else
|
440
|
+
EpollEvent.events &= ~EPOLLOUT;
|
441
|
+
}
|
442
|
+
|
443
|
+
if (old != EpollEvent.events)
|
444
|
+
MyEventMachine->Modify (this);
|
445
|
+
#endif
|
446
|
+
|
447
|
+
#ifdef HAVE_KQUEUE
|
448
|
+
if (read && SelectForRead())
|
449
|
+
MyEventMachine->ArmKqueueReader (this);
|
450
|
+
if (write && SelectForWrite())
|
451
|
+
MyEventMachine->ArmKqueueWriter (this);
|
452
|
+
#endif
|
453
|
+
}
|
454
|
+
|
455
|
+
/***************************************
|
456
|
+
ConnectionDescriptor::SetConnectPending
|
457
|
+
****************************************/
|
458
|
+
|
459
|
+
void ConnectionDescriptor::SetConnectPending(bool f)
|
460
|
+
{
|
461
|
+
bConnectPending = f;
|
462
|
+
_UpdateEvents();
|
463
|
+
}
|
464
|
+
|
465
|
+
|
466
|
+
/**********************************
|
467
|
+
ConnectionDescriptor::SetAttached
|
468
|
+
***********************************/
|
469
|
+
|
470
|
+
void ConnectionDescriptor::SetAttached(bool state)
|
471
|
+
{
|
472
|
+
bAttached = state;
|
473
|
+
}
|
474
|
+
|
475
|
+
|
476
|
+
/**********************************
|
477
|
+
ConnectionDescriptor::SetWatchOnly
|
478
|
+
***********************************/
|
479
|
+
|
480
|
+
void ConnectionDescriptor::SetWatchOnly(bool watching)
|
481
|
+
{
|
482
|
+
bWatchOnly = watching;
|
483
|
+
_UpdateEvents();
|
484
|
+
}
|
485
|
+
|
486
|
+
|
487
|
+
/*********************************
|
488
|
+
ConnectionDescriptor::HandleError
|
489
|
+
*********************************/
|
490
|
+
|
491
|
+
void ConnectionDescriptor::HandleError()
|
492
|
+
{
|
493
|
+
if (bWatchOnly) {
|
494
|
+
// An EPOLLHUP | EPOLLIN condition will call Read() before HandleError(), in which case the
|
495
|
+
// socket is already detached and invalid, so we don't need to do anything.
|
496
|
+
if (MySocket == INVALID_SOCKET) return;
|
497
|
+
|
498
|
+
// HandleError() is called on WatchOnly descriptors by the epoll reactor
|
499
|
+
// when it gets a EPOLLERR | EPOLLHUP. Usually this would show up as a readable and
|
500
|
+
// writable event on other reactors, so we have to fire those events ourselves.
|
501
|
+
if (bNotifyReadable) Read();
|
502
|
+
if (bNotifyWritable) Write();
|
503
|
+
} else {
|
504
|
+
ScheduleClose (false);
|
505
|
+
}
|
506
|
+
}
|
507
|
+
|
508
|
+
|
509
|
+
/***********************************
|
510
|
+
ConnectionDescriptor::ScheduleClose
|
511
|
+
***********************************/
|
512
|
+
|
513
|
+
void ConnectionDescriptor::ScheduleClose (bool after_writing)
|
514
|
+
{
|
515
|
+
if (bWatchOnly)
|
516
|
+
throw std::runtime_error ("cannot close 'watch only' connections");
|
517
|
+
|
518
|
+
EventableDescriptor::ScheduleClose(after_writing);
|
519
|
+
}
|
520
|
+
|
521
|
+
|
522
|
+
/***************************************
|
523
|
+
ConnectionDescriptor::SetNotifyReadable
|
524
|
+
****************************************/
|
525
|
+
|
526
|
+
void ConnectionDescriptor::SetNotifyReadable(bool readable)
|
527
|
+
{
|
528
|
+
if (!bWatchOnly)
|
529
|
+
throw std::runtime_error ("notify_readable must be on 'watch only' connections");
|
530
|
+
|
531
|
+
bNotifyReadable = readable;
|
532
|
+
_UpdateEvents(true, false);
|
533
|
+
}
|
534
|
+
|
535
|
+
|
536
|
+
/***************************************
|
537
|
+
ConnectionDescriptor::SetNotifyWritable
|
538
|
+
****************************************/
|
539
|
+
|
540
|
+
void ConnectionDescriptor::SetNotifyWritable(bool writable)
|
541
|
+
{
|
542
|
+
if (!bWatchOnly)
|
543
|
+
throw std::runtime_error ("notify_writable must be on 'watch only' connections");
|
544
|
+
|
545
|
+
bNotifyWritable = writable;
|
546
|
+
_UpdateEvents(false, true);
|
547
|
+
}
|
548
|
+
|
549
|
+
|
550
|
+
/**************************************
|
551
|
+
ConnectionDescriptor::SendOutboundData
|
552
|
+
**************************************/
|
553
|
+
|
554
|
+
int ConnectionDescriptor::SendOutboundData (const char *data, int length)
|
555
|
+
{
|
556
|
+
if (bWatchOnly)
|
557
|
+
throw std::runtime_error ("cannot send data on a 'watch only' connection");
|
558
|
+
|
559
|
+
if (ProxiedFrom && MaxOutboundBufSize && (unsigned int)(GetOutboundDataSize() + length) > MaxOutboundBufSize)
|
560
|
+
ProxiedFrom->Pause();
|
561
|
+
|
562
|
+
#ifdef WITH_SSL
|
563
|
+
if (SslBox) {
|
564
|
+
if (length > 0) {
|
565
|
+
int w = SslBox->PutPlaintext (data, length);
|
566
|
+
if (w < 0)
|
567
|
+
ScheduleClose (false);
|
568
|
+
else
|
569
|
+
_DispatchCiphertext();
|
570
|
+
}
|
571
|
+
// TODO: What's the correct return value?
|
572
|
+
return 1; // That's a wild guess, almost certainly wrong.
|
573
|
+
}
|
574
|
+
else
|
575
|
+
#endif
|
576
|
+
return _SendRawOutboundData (data, length);
|
577
|
+
}
|
578
|
+
|
579
|
+
|
580
|
+
|
581
|
+
/******************************************
|
582
|
+
ConnectionDescriptor::_SendRawOutboundData
|
583
|
+
******************************************/
|
584
|
+
|
585
|
+
int ConnectionDescriptor::_SendRawOutboundData (const char *data, int length)
|
586
|
+
{
|
587
|
+
/* This internal method is called to schedule bytes that
|
588
|
+
* will be sent out to the remote peer.
|
589
|
+
* It's not directly accessed by the caller, who hits ::SendOutboundData,
|
590
|
+
* which may or may not filter or encrypt the caller's data before
|
591
|
+
* sending it here.
|
592
|
+
*/
|
593
|
+
|
594
|
+
// Highly naive and incomplete implementation.
|
595
|
+
// There's no throttle for runaways (which should abort only this connection
|
596
|
+
// and not the whole process), and no coalescing of small pages.
|
597
|
+
// (Well, not so bad, small pages are coalesced in ::Write)
|
598
|
+
|
599
|
+
if (IsCloseScheduled())
|
600
|
+
return 0;
|
601
|
+
|
602
|
+
// 25Mar10: Ignore 0 length packets as they are not meaningful in TCP (as opposed to UDP)
|
603
|
+
// and can cause the assert(nbytes>0) to fail when OutboundPages has a bunch of 0 length pages.
|
604
|
+
if (length == 0)
|
605
|
+
return 0;
|
606
|
+
|
607
|
+
if (!data && (length > 0))
|
608
|
+
throw std::runtime_error ("bad outbound data");
|
609
|
+
char *buffer = (char *) malloc (length + 1);
|
610
|
+
if (!buffer)
|
611
|
+
throw std::runtime_error ("no allocation for outbound data");
|
612
|
+
|
613
|
+
memcpy (buffer, data, length);
|
614
|
+
buffer [length] = 0;
|
615
|
+
OutboundPages.push_back (OutboundPage (buffer, length));
|
616
|
+
OutboundDataSize += length;
|
617
|
+
|
618
|
+
_UpdateEvents(false, true);
|
619
|
+
|
620
|
+
return length;
|
621
|
+
}
|
622
|
+
|
623
|
+
|
624
|
+
|
625
|
+
/***********************************
|
626
|
+
ConnectionDescriptor::SelectForRead
|
627
|
+
***********************************/
|
628
|
+
|
629
|
+
bool ConnectionDescriptor::SelectForRead()
|
630
|
+
{
|
631
|
+
/* A connection descriptor is always scheduled for read,
|
632
|
+
* UNLESS it's in a pending-connect state.
|
633
|
+
* On Linux, unlike Unix, a nonblocking socket on which
|
634
|
+
* connect has been called, does NOT necessarily select
|
635
|
+
* both readable and writable in case of error.
|
636
|
+
* The socket will select writable when the disposition
|
637
|
+
* of the connect is known. On the other hand, a socket
|
638
|
+
* which successfully connects and selects writable may
|
639
|
+
* indeed have some data available on it, so it will
|
640
|
+
* select readable in that case, violating expectations!
|
641
|
+
* So we will not poll for readability until the socket
|
642
|
+
* is known to be in a connected state.
|
643
|
+
*/
|
644
|
+
|
645
|
+
if (bPaused)
|
646
|
+
return false;
|
647
|
+
else if (bConnectPending)
|
648
|
+
return false;
|
649
|
+
else if (bWatchOnly)
|
650
|
+
return bNotifyReadable ? true : false;
|
651
|
+
else
|
652
|
+
return true;
|
653
|
+
}
|
654
|
+
|
655
|
+
|
656
|
+
/************************************
|
657
|
+
ConnectionDescriptor::SelectForWrite
|
658
|
+
************************************/
|
659
|
+
|
660
|
+
bool ConnectionDescriptor::SelectForWrite()
|
661
|
+
{
|
662
|
+
/* Cf the notes under SelectForRead.
|
663
|
+
* In a pending-connect state, we ALWAYS select for writable.
|
664
|
+
* In a normal state, we only select for writable when we
|
665
|
+
* have outgoing data to send.
|
666
|
+
*/
|
667
|
+
|
668
|
+
if (bPaused)
|
669
|
+
return false;
|
670
|
+
else if (bConnectPending)
|
671
|
+
return true;
|
672
|
+
else if (bWatchOnly)
|
673
|
+
return bNotifyWritable ? true : false;
|
674
|
+
else
|
675
|
+
return (GetOutboundDataSize() > 0);
|
676
|
+
}
|
677
|
+
|
678
|
+
/***************************
|
679
|
+
ConnectionDescriptor::Pause
|
680
|
+
***************************/
|
681
|
+
|
682
|
+
bool ConnectionDescriptor::Pause()
|
683
|
+
{
|
684
|
+
if (bWatchOnly)
|
685
|
+
throw std::runtime_error ("cannot pause/resume 'watch only' connections, set notify readable/writable instead");
|
686
|
+
|
687
|
+
bool old = bPaused;
|
688
|
+
bPaused = true;
|
689
|
+
_UpdateEvents();
|
690
|
+
return old == false;
|
691
|
+
}
|
692
|
+
|
693
|
+
/****************************
|
694
|
+
ConnectionDescriptor::Resume
|
695
|
+
****************************/
|
696
|
+
|
697
|
+
bool ConnectionDescriptor::Resume()
|
698
|
+
{
|
699
|
+
if (bWatchOnly)
|
700
|
+
throw std::runtime_error ("cannot pause/resume 'watch only' connections, set notify readable/writable instead");
|
701
|
+
|
702
|
+
bool old = bPaused;
|
703
|
+
bPaused = false;
|
704
|
+
_UpdateEvents();
|
705
|
+
return old == true;
|
706
|
+
}
|
707
|
+
|
708
|
+
/**************************
|
709
|
+
ConnectionDescriptor::Read
|
710
|
+
**************************/
|
711
|
+
|
712
|
+
void ConnectionDescriptor::Read()
|
713
|
+
{
|
714
|
+
/* Read and dispatch data on a socket that has selected readable.
|
715
|
+
* It's theoretically possible to get and dispatch incoming data on
|
716
|
+
* a socket that has already been scheduled for closing or close-after-writing.
|
717
|
+
* In those cases, we'll leave it up the to protocol handler to "do the
|
718
|
+
* right thing" (which probably means to ignore the incoming data).
|
719
|
+
*
|
720
|
+
* 22Aug06: Chris Ochs reports that on FreeBSD, it's possible to come
|
721
|
+
* here with the socket already closed, after the process receives
|
722
|
+
* a ctrl-C signal (not sure if that's TERM or INT on BSD). The application
|
723
|
+
* was one in which network connections were doing a lot of interleaved reads
|
724
|
+
* and writes.
|
725
|
+
* Since we always write before reading (in order to keep the outbound queues
|
726
|
+
* as light as possible), I think what happened is that an interrupt caused
|
727
|
+
* the socket to be closed in ConnectionDescriptor::Write. We'll then
|
728
|
+
* come here in the same pass through the main event loop, and won't get
|
729
|
+
* cleaned up until immediately after.
|
730
|
+
* We originally asserted that the socket was valid when we got here.
|
731
|
+
* To deal properly with the possibility that we are closed when we get here,
|
732
|
+
* I removed the assert. HOWEVER, the potential for an infinite loop scares me,
|
733
|
+
* so even though this is really clunky, I added a flag to assert that we never
|
734
|
+
* come here more than once after being closed. (FCianfrocca)
|
735
|
+
*/
|
736
|
+
|
737
|
+
int sd = GetSocket();
|
738
|
+
//assert (sd != INVALID_SOCKET); (original, removed 22Aug06)
|
739
|
+
if (sd == INVALID_SOCKET) {
|
740
|
+
assert (!bReadAttemptedAfterClose);
|
741
|
+
bReadAttemptedAfterClose = true;
|
742
|
+
return;
|
743
|
+
}
|
744
|
+
|
745
|
+
if (bWatchOnly) {
|
746
|
+
if (bNotifyReadable && EventCallback)
|
747
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_NOTIFY_READABLE, NULL, 0);
|
748
|
+
return;
|
749
|
+
}
|
750
|
+
|
751
|
+
LastActivity = MyEventMachine->GetCurrentLoopTime();
|
752
|
+
|
753
|
+
int total_bytes_read = 0;
|
754
|
+
char readbuffer [16 * 1024 + 1];
|
755
|
+
|
756
|
+
for (int i=0; i < 10; i++) {
|
757
|
+
// Don't read just one buffer and then move on. This is faster
|
758
|
+
// if there is a lot of incoming.
|
759
|
+
// But don't read indefinitely. Give other sockets a chance to run.
|
760
|
+
// NOTICE, we're reading one less than the buffer size.
|
761
|
+
// That's so we can put a guard byte at the end of what we send
|
762
|
+
// to user code.
|
763
|
+
|
764
|
+
|
765
|
+
int r = read (sd, readbuffer, sizeof(readbuffer) - 1);
|
766
|
+
int e = errno;
|
767
|
+
//cerr << "<R:" << r << ">";
|
768
|
+
|
769
|
+
if (r > 0) {
|
770
|
+
total_bytes_read += r;
|
771
|
+
|
772
|
+
// Add a null-terminator at the the end of the buffer
|
773
|
+
// that we will send to the callback.
|
774
|
+
// DO NOT EVER CHANGE THIS. We want to explicitly allow users
|
775
|
+
// to be able to depend on this behavior, so they will have
|
776
|
+
// the option to do some things faster. Additionally it's
|
777
|
+
// a security guard against buffer overflows.
|
778
|
+
readbuffer [r] = 0;
|
779
|
+
_DispatchInboundData (readbuffer, r);
|
780
|
+
}
|
781
|
+
else if (r == 0) {
|
782
|
+
break;
|
783
|
+
}
|
784
|
+
else {
|
785
|
+
#ifdef OS_UNIX
|
786
|
+
if ((e != EINPROGRESS) && (e != EWOULDBLOCK) && (e != EAGAIN) && (e != EINTR)) {
|
787
|
+
#endif
|
788
|
+
#ifdef OS_WIN32
|
789
|
+
if ((e != WSAEINPROGRESS) && (e != WSAEWOULDBLOCK)) {
|
790
|
+
#endif
|
791
|
+
// 26Mar11: Previously, all read errors were assumed to be EWOULDBLOCK and ignored.
|
792
|
+
// Now, instead, we call Close() on errors like ECONNRESET and ENOTCONN.
|
793
|
+
UnbindReasonCode = e;
|
794
|
+
Close();
|
795
|
+
break;
|
796
|
+
} else {
|
797
|
+
// Basically a would-block, meaning we've read everything there is to read.
|
798
|
+
break;
|
799
|
+
}
|
800
|
+
}
|
801
|
+
|
802
|
+
}
|
803
|
+
|
804
|
+
|
805
|
+
if (total_bytes_read == 0) {
|
806
|
+
// If we read no data on a socket that selected readable,
|
807
|
+
// it generally means the other end closed the connection gracefully.
|
808
|
+
ScheduleClose (false);
|
809
|
+
//bCloseNow = true;
|
810
|
+
}
|
811
|
+
|
812
|
+
}
|
813
|
+
|
814
|
+
|
815
|
+
|
816
|
+
/******************************************
|
817
|
+
ConnectionDescriptor::_DispatchInboundData
|
818
|
+
******************************************/
|
819
|
+
|
820
|
+
void ConnectionDescriptor::_DispatchInboundData (const char *buffer, int size)
|
821
|
+
{
|
822
|
+
#ifdef WITH_SSL
|
823
|
+
if (SslBox) {
|
824
|
+
SslBox->PutCiphertext (buffer, size);
|
825
|
+
|
826
|
+
int s;
|
827
|
+
char B [2048];
|
828
|
+
while ((s = SslBox->GetPlaintext (B, sizeof(B) - 1)) > 0) {
|
829
|
+
_CheckHandshakeStatus();
|
830
|
+
B [s] = 0;
|
831
|
+
_GenericInboundDispatch(B, s);
|
832
|
+
}
|
833
|
+
|
834
|
+
// If our SSL handshake had a problem, shut down the connection.
|
835
|
+
if (s == -2) {
|
836
|
+
ScheduleClose(false);
|
837
|
+
return;
|
838
|
+
}
|
839
|
+
|
840
|
+
_CheckHandshakeStatus();
|
841
|
+
_DispatchCiphertext();
|
842
|
+
}
|
843
|
+
else {
|
844
|
+
_GenericInboundDispatch(buffer, size);
|
845
|
+
}
|
846
|
+
#endif
|
847
|
+
|
848
|
+
#ifdef WITHOUT_SSL
|
849
|
+
_GenericInboundDispatch(buffer, size);
|
850
|
+
#endif
|
851
|
+
}
|
852
|
+
|
853
|
+
|
854
|
+
|
855
|
+
/*******************************************
|
856
|
+
ConnectionDescriptor::_CheckHandshakeStatus
|
857
|
+
*******************************************/
|
858
|
+
|
859
|
+
void ConnectionDescriptor::_CheckHandshakeStatus()
|
860
|
+
{
|
861
|
+
#ifdef WITH_SSL
|
862
|
+
if (SslBox && (!bHandshakeSignaled) && SslBox->IsHandshakeCompleted()) {
|
863
|
+
bHandshakeSignaled = true;
|
864
|
+
if (EventCallback)
|
865
|
+
(*EventCallback)(GetBinding(), EM_SSL_HANDSHAKE_COMPLETED, NULL, 0);
|
866
|
+
}
|
867
|
+
#endif
|
868
|
+
}
|
869
|
+
|
870
|
+
|
871
|
+
|
872
|
+
/***************************
|
873
|
+
ConnectionDescriptor::Write
|
874
|
+
***************************/
|
875
|
+
|
876
|
+
void ConnectionDescriptor::Write()
|
877
|
+
{
|
878
|
+
/* A socket which is in a pending-connect state will select
|
879
|
+
* writable when the disposition of the connect is known.
|
880
|
+
* At that point, check to be sure there are no errors,
|
881
|
+
* and if none, then promote the socket out of the pending
|
882
|
+
* state.
|
883
|
+
* TODO: I haven't figured out how Windows signals errors on
|
884
|
+
* unconnected sockets. Maybe it does the untraditional but
|
885
|
+
* logical thing and makes the socket selectable for error.
|
886
|
+
* If so, it's unsupported here for the time being, and connect
|
887
|
+
* errors will have to be caught by the timeout mechanism.
|
888
|
+
*/
|
889
|
+
|
890
|
+
if (bConnectPending) {
|
891
|
+
int error;
|
892
|
+
socklen_t len;
|
893
|
+
len = sizeof(error);
|
894
|
+
#ifdef OS_UNIX
|
895
|
+
int o = getsockopt (GetSocket(), SOL_SOCKET, SO_ERROR, &error, &len);
|
896
|
+
#endif
|
897
|
+
#ifdef OS_WIN32
|
898
|
+
int o = getsockopt (GetSocket(), SOL_SOCKET, SO_ERROR, (char*)&error, &len);
|
899
|
+
#endif
|
900
|
+
if ((o == 0) && (error == 0)) {
|
901
|
+
if (EventCallback)
|
902
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_COMPLETED, "", 0);
|
903
|
+
|
904
|
+
// 5May09: Moved epoll/kqueue read/write arming into SetConnectPending, so it can be called
|
905
|
+
// from EventMachine_t::AttachFD as well.
|
906
|
+
SetConnectPending (false);
|
907
|
+
}
|
908
|
+
else {
|
909
|
+
if (o == 0)
|
910
|
+
UnbindReasonCode = error;
|
911
|
+
ScheduleClose (false);
|
912
|
+
//bCloseNow = true;
|
913
|
+
}
|
914
|
+
}
|
915
|
+
else {
|
916
|
+
|
917
|
+
if (bNotifyWritable) {
|
918
|
+
if (EventCallback)
|
919
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_NOTIFY_WRITABLE, NULL, 0);
|
920
|
+
|
921
|
+
_UpdateEvents(false, true);
|
922
|
+
return;
|
923
|
+
}
|
924
|
+
|
925
|
+
assert(!bWatchOnly);
|
926
|
+
|
927
|
+
/* 5May09: Kqueue bugs on OSX cause one extra writable event to fire even though we're using
|
928
|
+
EV_ONESHOT. We ignore this extra event once, but only the first time. If it happens again,
|
929
|
+
we should fall through to the assert(nbytes>0) failure to catch any EM bugs which might cause
|
930
|
+
::Write to be called in a busy-loop.
|
931
|
+
*/
|
932
|
+
#ifdef HAVE_KQUEUE
|
933
|
+
if (MyEventMachine->UsingKqueue()) {
|
934
|
+
if (OutboundDataSize == 0 && !bGotExtraKqueueEvent) {
|
935
|
+
bGotExtraKqueueEvent = true;
|
936
|
+
return;
|
937
|
+
} else if (OutboundDataSize > 0) {
|
938
|
+
bGotExtraKqueueEvent = false;
|
939
|
+
}
|
940
|
+
}
|
941
|
+
#endif
|
942
|
+
|
943
|
+
_WriteOutboundData();
|
944
|
+
}
|
945
|
+
}
|
946
|
+
|
947
|
+
|
948
|
+
/****************************************
|
949
|
+
ConnectionDescriptor::_WriteOutboundData
|
950
|
+
****************************************/
|
951
|
+
|
952
|
+
void ConnectionDescriptor::_WriteOutboundData()
|
953
|
+
{
|
954
|
+
/* This is a helper function called by ::Write.
|
955
|
+
* It's possible for a socket to select writable and then no longer
|
956
|
+
* be writable by the time we get around to writing. The kernel might
|
957
|
+
* have used up its available output buffers between the select call
|
958
|
+
* and when we get here. So this condition is not an error.
|
959
|
+
*
|
960
|
+
* 20Jul07, added the same kind of protection against an invalid socket
|
961
|
+
* that is at the top of ::Read. Not entirely how this could happen in
|
962
|
+
* real life (connection-reset from the remote peer, perhaps?), but I'm
|
963
|
+
* doing it to address some reports of crashing under heavy loads.
|
964
|
+
*/
|
965
|
+
|
966
|
+
int sd = GetSocket();
|
967
|
+
//assert (sd != INVALID_SOCKET);
|
968
|
+
if (sd == INVALID_SOCKET) {
|
969
|
+
assert (!bWriteAttemptedAfterClose);
|
970
|
+
bWriteAttemptedAfterClose = true;
|
971
|
+
return;
|
972
|
+
}
|
973
|
+
|
974
|
+
LastActivity = MyEventMachine->GetCurrentLoopTime();
|
975
|
+
size_t nbytes = 0;
|
976
|
+
|
977
|
+
#ifdef HAVE_WRITEV
|
978
|
+
int iovcnt = OutboundPages.size();
|
979
|
+
// Max of 16 outbound pages at a time
|
980
|
+
if (iovcnt > 16) iovcnt = 16;
|
981
|
+
|
982
|
+
#ifdef CC_SUNWspro
|
983
|
+
struct iovec iov[16];
|
984
|
+
#else
|
985
|
+
struct iovec iov[ iovcnt ];
|
986
|
+
#endif
|
987
|
+
|
988
|
+
for(int i = 0; i < iovcnt; i++){
|
989
|
+
OutboundPage *op = &(OutboundPages[i]);
|
990
|
+
#ifdef CC_SUNWspro
|
991
|
+
iov[i].iov_base = (char *)(op->Buffer + op->Offset);
|
992
|
+
#else
|
993
|
+
iov[i].iov_base = (void *)(op->Buffer + op->Offset);
|
994
|
+
#endif
|
995
|
+
iov[i].iov_len = op->Length - op->Offset;
|
996
|
+
|
997
|
+
nbytes += iov[i].iov_len;
|
998
|
+
}
|
999
|
+
#else
|
1000
|
+
char output_buffer [16 * 1024];
|
1001
|
+
|
1002
|
+
while ((OutboundPages.size() > 0) && (nbytes < sizeof(output_buffer))) {
|
1003
|
+
OutboundPage *op = &(OutboundPages[0]);
|
1004
|
+
if ((nbytes + op->Length - op->Offset) < sizeof (output_buffer)) {
|
1005
|
+
memcpy (output_buffer + nbytes, op->Buffer + op->Offset, op->Length - op->Offset);
|
1006
|
+
nbytes += (op->Length - op->Offset);
|
1007
|
+
op->Free();
|
1008
|
+
OutboundPages.pop_front();
|
1009
|
+
}
|
1010
|
+
else {
|
1011
|
+
int len = sizeof(output_buffer) - nbytes;
|
1012
|
+
memcpy (output_buffer + nbytes, op->Buffer + op->Offset, len);
|
1013
|
+
op->Offset += len;
|
1014
|
+
nbytes += len;
|
1015
|
+
}
|
1016
|
+
}
|
1017
|
+
#endif
|
1018
|
+
|
1019
|
+
// We should never have gotten here if there were no data to write,
|
1020
|
+
// so assert that as a sanity check.
|
1021
|
+
// Don't bother to make sure nbytes is less than output_buffer because
|
1022
|
+
// if it were we probably would have crashed already.
|
1023
|
+
assert (nbytes > 0);
|
1024
|
+
|
1025
|
+
assert (GetSocket() != INVALID_SOCKET);
|
1026
|
+
#ifdef HAVE_WRITEV
|
1027
|
+
int bytes_written = writev (GetSocket(), iov, iovcnt);
|
1028
|
+
#else
|
1029
|
+
int bytes_written = write (GetSocket(), output_buffer, nbytes);
|
1030
|
+
#endif
|
1031
|
+
|
1032
|
+
bool err = false;
|
1033
|
+
int e = errno;
|
1034
|
+
if (bytes_written < 0) {
|
1035
|
+
err = true;
|
1036
|
+
bytes_written = 0;
|
1037
|
+
}
|
1038
|
+
|
1039
|
+
assert (bytes_written >= 0);
|
1040
|
+
OutboundDataSize -= bytes_written;
|
1041
|
+
|
1042
|
+
if (ProxiedFrom && MaxOutboundBufSize && (unsigned int)GetOutboundDataSize() < MaxOutboundBufSize && ProxiedFrom->IsPaused())
|
1043
|
+
ProxiedFrom->Resume();
|
1044
|
+
|
1045
|
+
#ifdef HAVE_WRITEV
|
1046
|
+
if (!err) {
|
1047
|
+
unsigned int sent = bytes_written;
|
1048
|
+
deque<OutboundPage>::iterator op = OutboundPages.begin();
|
1049
|
+
|
1050
|
+
for (int i = 0; i < iovcnt; i++) {
|
1051
|
+
if (iov[i].iov_len <= sent) {
|
1052
|
+
// Sent this page in full, free it.
|
1053
|
+
op->Free();
|
1054
|
+
OutboundPages.pop_front();
|
1055
|
+
|
1056
|
+
sent -= iov[i].iov_len;
|
1057
|
+
} else {
|
1058
|
+
// Sent part (or none) of this page, increment offset to send the remainder
|
1059
|
+
op->Offset += sent;
|
1060
|
+
break;
|
1061
|
+
}
|
1062
|
+
|
1063
|
+
// Shouldn't be possible run out of pages before the loop ends
|
1064
|
+
assert(op != OutboundPages.end());
|
1065
|
+
*op++;
|
1066
|
+
}
|
1067
|
+
}
|
1068
|
+
#else
|
1069
|
+
if ((size_t)bytes_written < nbytes) {
|
1070
|
+
int len = nbytes - bytes_written;
|
1071
|
+
char *buffer = (char*) malloc (len + 1);
|
1072
|
+
if (!buffer)
|
1073
|
+
throw std::runtime_error ("bad alloc throwing back data");
|
1074
|
+
memcpy (buffer, output_buffer + bytes_written, len);
|
1075
|
+
buffer [len] = 0;
|
1076
|
+
OutboundPages.push_front (OutboundPage (buffer, len));
|
1077
|
+
}
|
1078
|
+
#endif
|
1079
|
+
|
1080
|
+
_UpdateEvents(false, true);
|
1081
|
+
|
1082
|
+
if (err) {
|
1083
|
+
#ifdef OS_UNIX
|
1084
|
+
if ((e != EINPROGRESS) && (e != EWOULDBLOCK) && (e != EINTR)) {
|
1085
|
+
#endif
|
1086
|
+
#ifdef OS_WIN32
|
1087
|
+
if ((e != WSAEINPROGRESS) && (e != WSAEWOULDBLOCK)) {
|
1088
|
+
#endif
|
1089
|
+
UnbindReasonCode = e;
|
1090
|
+
Close();
|
1091
|
+
}
|
1092
|
+
}
|
1093
|
+
}
|
1094
|
+
|
1095
|
+
|
1096
|
+
/***************************************
|
1097
|
+
ConnectionDescriptor::ReportErrorStatus
|
1098
|
+
***************************************/
|
1099
|
+
|
1100
|
+
int ConnectionDescriptor::ReportErrorStatus()
|
1101
|
+
{
|
1102
|
+
if (MySocket == INVALID_SOCKET) {
|
1103
|
+
return -1;
|
1104
|
+
}
|
1105
|
+
|
1106
|
+
int error;
|
1107
|
+
socklen_t len;
|
1108
|
+
len = sizeof(error);
|
1109
|
+
#ifdef OS_UNIX
|
1110
|
+
int o = getsockopt (GetSocket(), SOL_SOCKET, SO_ERROR, &error, &len);
|
1111
|
+
#endif
|
1112
|
+
#ifdef OS_WIN32
|
1113
|
+
int o = getsockopt (GetSocket(), SOL_SOCKET, SO_ERROR, (char*)&error, &len);
|
1114
|
+
#endif
|
1115
|
+
if ((o == 0) && (error == 0))
|
1116
|
+
return 0;
|
1117
|
+
else if (o == 0)
|
1118
|
+
return error;
|
1119
|
+
else
|
1120
|
+
return -1;
|
1121
|
+
}
|
1122
|
+
|
1123
|
+
|
1124
|
+
/******************************
|
1125
|
+
ConnectionDescriptor::StartTls
|
1126
|
+
******************************/
|
1127
|
+
|
1128
|
+
void ConnectionDescriptor::StartTls()
|
1129
|
+
{
|
1130
|
+
#ifdef WITH_SSL
|
1131
|
+
if (SslBox)
|
1132
|
+
throw std::runtime_error ("SSL/TLS already running on connection");
|
1133
|
+
|
1134
|
+
SslBox = new SslBox_t (bIsServer, PrivateKeyFilename, CertChainFilename, bSslVerifyPeer, GetBinding());
|
1135
|
+
_DispatchCiphertext();
|
1136
|
+
#endif
|
1137
|
+
|
1138
|
+
#ifdef WITHOUT_SSL
|
1139
|
+
throw std::runtime_error ("Encryption not available on this event-machine");
|
1140
|
+
#endif
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
|
1144
|
+
/*********************************
|
1145
|
+
ConnectionDescriptor::SetTlsParms
|
1146
|
+
*********************************/
|
1147
|
+
|
1148
|
+
void ConnectionDescriptor::SetTlsParms (const char *privkey_filename, const char *certchain_filename, bool verify_peer)
|
1149
|
+
{
|
1150
|
+
#ifdef WITH_SSL
|
1151
|
+
if (SslBox)
|
1152
|
+
throw std::runtime_error ("call SetTlsParms before calling StartTls");
|
1153
|
+
if (privkey_filename && *privkey_filename)
|
1154
|
+
PrivateKeyFilename = privkey_filename;
|
1155
|
+
if (certchain_filename && *certchain_filename)
|
1156
|
+
CertChainFilename = certchain_filename;
|
1157
|
+
bSslVerifyPeer = verify_peer;
|
1158
|
+
#endif
|
1159
|
+
|
1160
|
+
#ifdef WITHOUT_SSL
|
1161
|
+
throw std::runtime_error ("Encryption not available on this event-machine");
|
1162
|
+
#endif
|
1163
|
+
}
|
1164
|
+
|
1165
|
+
|
1166
|
+
/*********************************
|
1167
|
+
ConnectionDescriptor::GetPeerCert
|
1168
|
+
*********************************/
|
1169
|
+
|
1170
|
+
#ifdef WITH_SSL
|
1171
|
+
X509 *ConnectionDescriptor::GetPeerCert()
|
1172
|
+
{
|
1173
|
+
if (!SslBox)
|
1174
|
+
throw std::runtime_error ("SSL/TLS not running on this connection");
|
1175
|
+
return SslBox->GetPeerCert();
|
1176
|
+
}
|
1177
|
+
#endif
|
1178
|
+
|
1179
|
+
|
1180
|
+
/***********************************
|
1181
|
+
ConnectionDescriptor::VerifySslPeer
|
1182
|
+
***********************************/
|
1183
|
+
|
1184
|
+
#ifdef WITH_SSL
|
1185
|
+
bool ConnectionDescriptor::VerifySslPeer(const char *cert)
|
1186
|
+
{
|
1187
|
+
bSslPeerAccepted = false;
|
1188
|
+
|
1189
|
+
if (EventCallback)
|
1190
|
+
(*EventCallback)(GetBinding(), EM_SSL_VERIFY, cert, strlen(cert));
|
1191
|
+
|
1192
|
+
return bSslPeerAccepted;
|
1193
|
+
}
|
1194
|
+
#endif
|
1195
|
+
|
1196
|
+
|
1197
|
+
/***********************************
|
1198
|
+
ConnectionDescriptor::AcceptSslPeer
|
1199
|
+
***********************************/
|
1200
|
+
|
1201
|
+
#ifdef WITH_SSL
|
1202
|
+
void ConnectionDescriptor::AcceptSslPeer()
|
1203
|
+
{
|
1204
|
+
bSslPeerAccepted = true;
|
1205
|
+
}
|
1206
|
+
#endif
|
1207
|
+
|
1208
|
+
|
1209
|
+
/*****************************************
|
1210
|
+
ConnectionDescriptor::_DispatchCiphertext
|
1211
|
+
*****************************************/
|
1212
|
+
|
1213
|
+
#ifdef WITH_SSL
|
1214
|
+
void ConnectionDescriptor::_DispatchCiphertext()
|
1215
|
+
{
|
1216
|
+
assert (SslBox);
|
1217
|
+
|
1218
|
+
|
1219
|
+
char BigBuf [2048];
|
1220
|
+
bool did_work;
|
1221
|
+
|
1222
|
+
do {
|
1223
|
+
did_work = false;
|
1224
|
+
|
1225
|
+
// try to drain ciphertext
|
1226
|
+
while (SslBox->CanGetCiphertext()) {
|
1227
|
+
int r = SslBox->GetCiphertext (BigBuf, sizeof(BigBuf));
|
1228
|
+
assert (r > 0);
|
1229
|
+
_SendRawOutboundData (BigBuf, r);
|
1230
|
+
did_work = true;
|
1231
|
+
}
|
1232
|
+
|
1233
|
+
// Pump the SslBox, in case it has queued outgoing plaintext
|
1234
|
+
// This will return >0 if data was written,
|
1235
|
+
// 0 if no data was written, and <0 if there was a fatal error.
|
1236
|
+
bool pump;
|
1237
|
+
do {
|
1238
|
+
pump = false;
|
1239
|
+
int w = SslBox->PutPlaintext (NULL, 0);
|
1240
|
+
if (w > 0) {
|
1241
|
+
did_work = true;
|
1242
|
+
pump = true;
|
1243
|
+
}
|
1244
|
+
else if (w < 0)
|
1245
|
+
ScheduleClose (false);
|
1246
|
+
} while (pump);
|
1247
|
+
|
1248
|
+
// try to put plaintext. INCOMPLETE, doesn't belong here?
|
1249
|
+
// In SendOutboundData, we're spooling plaintext directly
|
1250
|
+
// into SslBox. That may be wrong, we may need to buffer it
|
1251
|
+
// up here!
|
1252
|
+
/*
|
1253
|
+
const char *ptr;
|
1254
|
+
int ptr_length;
|
1255
|
+
while (OutboundPlaintext.GetPage (&ptr, &ptr_length)) {
|
1256
|
+
assert (ptr && (ptr_length > 0));
|
1257
|
+
int w = SslMachine.PutPlaintext (ptr, ptr_length);
|
1258
|
+
if (w > 0) {
|
1259
|
+
OutboundPlaintext.DiscardBytes (w);
|
1260
|
+
did_work = true;
|
1261
|
+
}
|
1262
|
+
else
|
1263
|
+
break;
|
1264
|
+
}
|
1265
|
+
*/
|
1266
|
+
|
1267
|
+
} while (did_work);
|
1268
|
+
|
1269
|
+
}
|
1270
|
+
#endif
|
1271
|
+
|
1272
|
+
|
1273
|
+
|
1274
|
+
/*******************************
|
1275
|
+
ConnectionDescriptor::Heartbeat
|
1276
|
+
*******************************/
|
1277
|
+
|
1278
|
+
void ConnectionDescriptor::Heartbeat()
|
1279
|
+
{
|
1280
|
+
/* Only allow a certain amount of time to go by while waiting
|
1281
|
+
* for a pending connect. If it expires, then kill the socket.
|
1282
|
+
* For a connected socket, close it if its inactivity timer
|
1283
|
+
* has expired.
|
1284
|
+
*/
|
1285
|
+
|
1286
|
+
if (bConnectPending) {
|
1287
|
+
if ((MyEventMachine->GetCurrentLoopTime() - CreatedAt) >= PendingConnectTimeout) {
|
1288
|
+
UnbindReasonCode = ETIMEDOUT;
|
1289
|
+
ScheduleClose (false);
|
1290
|
+
//bCloseNow = true;
|
1291
|
+
}
|
1292
|
+
}
|
1293
|
+
else {
|
1294
|
+
if (InactivityTimeout && ((MyEventMachine->GetCurrentLoopTime() - LastActivity) >= InactivityTimeout)) {
|
1295
|
+
UnbindReasonCode = ETIMEDOUT;
|
1296
|
+
ScheduleClose (false);
|
1297
|
+
//bCloseNow = true;
|
1298
|
+
}
|
1299
|
+
}
|
1300
|
+
}
|
1301
|
+
|
1302
|
+
|
1303
|
+
/****************************************
|
1304
|
+
LoopbreakDescriptor::LoopbreakDescriptor
|
1305
|
+
****************************************/
|
1306
|
+
|
1307
|
+
LoopbreakDescriptor::LoopbreakDescriptor (int sd, EventMachine_t *parent_em):
|
1308
|
+
EventableDescriptor (sd, parent_em)
|
1309
|
+
{
|
1310
|
+
/* This is really bad and ugly. Change someday if possible.
|
1311
|
+
* We have to know about an event-machine (probably the one that owns us),
|
1312
|
+
* so we can pass newly-created connections to it.
|
1313
|
+
*/
|
1314
|
+
|
1315
|
+
bCallbackUnbind = false;
|
1316
|
+
|
1317
|
+
#ifdef HAVE_EPOLL
|
1318
|
+
EpollEvent.events = EPOLLIN;
|
1319
|
+
#endif
|
1320
|
+
#ifdef HAVE_KQUEUE
|
1321
|
+
MyEventMachine->ArmKqueueReader (this);
|
1322
|
+
#endif
|
1323
|
+
}
|
1324
|
+
|
1325
|
+
|
1326
|
+
|
1327
|
+
|
1328
|
+
/*************************
|
1329
|
+
LoopbreakDescriptor::Read
|
1330
|
+
*************************/
|
1331
|
+
|
1332
|
+
void LoopbreakDescriptor::Read()
|
1333
|
+
{
|
1334
|
+
// TODO, refactor, this code is probably in the wrong place.
|
1335
|
+
assert (MyEventMachine);
|
1336
|
+
MyEventMachine->_ReadLoopBreaker();
|
1337
|
+
}
|
1338
|
+
|
1339
|
+
|
1340
|
+
/**************************
|
1341
|
+
LoopbreakDescriptor::Write
|
1342
|
+
**************************/
|
1343
|
+
|
1344
|
+
void LoopbreakDescriptor::Write()
|
1345
|
+
{
|
1346
|
+
// Why are we here?
|
1347
|
+
throw std::runtime_error ("bad code path in loopbreak");
|
1348
|
+
}
|
1349
|
+
|
1350
|
+
/**************************************
|
1351
|
+
AcceptorDescriptor::AcceptorDescriptor
|
1352
|
+
**************************************/
|
1353
|
+
|
1354
|
+
AcceptorDescriptor::AcceptorDescriptor (int sd, EventMachine_t *parent_em, bool autoclose):
|
1355
|
+
EventableDescriptor (sd, parent_em, autoclose)
|
1356
|
+
{
|
1357
|
+
#ifdef HAVE_EPOLL
|
1358
|
+
EpollEvent.events = EPOLLIN;
|
1359
|
+
#endif
|
1360
|
+
#ifdef HAVE_KQUEUE
|
1361
|
+
MyEventMachine->ArmKqueueReader (this);
|
1362
|
+
#endif
|
1363
|
+
}
|
1364
|
+
|
1365
|
+
|
1366
|
+
/***************************************
|
1367
|
+
AcceptorDescriptor::~AcceptorDescriptor
|
1368
|
+
***************************************/
|
1369
|
+
|
1370
|
+
AcceptorDescriptor::~AcceptorDescriptor()
|
1371
|
+
{
|
1372
|
+
}
|
1373
|
+
|
1374
|
+
/****************************************
|
1375
|
+
STATIC: AcceptorDescriptor::StopAcceptor
|
1376
|
+
****************************************/
|
1377
|
+
|
1378
|
+
void AcceptorDescriptor::StopAcceptor (const unsigned long binding)
|
1379
|
+
{
|
1380
|
+
// TODO: This is something of a hack, or at least it's a static method of the wrong class.
|
1381
|
+
AcceptorDescriptor *ad = dynamic_cast <AcceptorDescriptor*> (Bindable_t::GetObject (binding));
|
1382
|
+
if (ad)
|
1383
|
+
ad->ScheduleClose (false);
|
1384
|
+
else
|
1385
|
+
throw std::runtime_error ("failed to close nonexistent acceptor");
|
1386
|
+
}
|
1387
|
+
|
1388
|
+
|
1389
|
+
/************************
|
1390
|
+
AcceptorDescriptor::Read
|
1391
|
+
************************/
|
1392
|
+
|
1393
|
+
void AcceptorDescriptor::Read()
|
1394
|
+
{
|
1395
|
+
/* Accept up to a certain number of sockets on the listening connection.
|
1396
|
+
* Don't try to accept all that are present, because this would allow a DoS attack
|
1397
|
+
* in which no data were ever read or written. We should accept more than one,
|
1398
|
+
* if available, to keep the partially accepted sockets from backing up in the kernel.
|
1399
|
+
*/
|
1400
|
+
|
1401
|
+
/* Make sure we use non-blocking i/o on the acceptor socket, since we're selecting it
|
1402
|
+
* for readability. According to Stevens UNP, it's possible for an acceptor to select readable
|
1403
|
+
* and then block when we call accept. For example, the other end resets the connection after
|
1404
|
+
* the socket selects readable and before we call accept. The kernel will remove the dead
|
1405
|
+
* socket from the accept queue. If the accept queue is now empty, accept will block.
|
1406
|
+
*/
|
1407
|
+
|
1408
|
+
|
1409
|
+
struct sockaddr_storage pin;
|
1410
|
+
socklen_t addrlen = sizeof (pin);
|
1411
|
+
|
1412
|
+
for (int i=0; i < 10; i++) {
|
1413
|
+
int sd = accept (GetSocket(), (struct sockaddr*)&pin, &addrlen);
|
1414
|
+
if (sd == INVALID_SOCKET) {
|
1415
|
+
// This breaks the loop when we've accepted everything on the kernel queue,
|
1416
|
+
// up to 10 new connections. But what if the *first* accept fails?
|
1417
|
+
// Does that mean anything serious is happening, beyond the situation
|
1418
|
+
// described in the note above?
|
1419
|
+
break;
|
1420
|
+
}
|
1421
|
+
|
1422
|
+
// Set the newly-accepted socket non-blocking.
|
1423
|
+
// On Windows, this may fail because, weirdly, Windows inherits the non-blocking
|
1424
|
+
// attribute that we applied to the acceptor socket into the accepted one.
|
1425
|
+
if (!SetSocketNonblocking (sd)) {
|
1426
|
+
//int val = fcntl (sd, F_GETFL, 0);
|
1427
|
+
//if (fcntl (sd, F_SETFL, val | O_NONBLOCK) == -1) {
|
1428
|
+
shutdown (sd, 1);
|
1429
|
+
close (sd);
|
1430
|
+
continue;
|
1431
|
+
}
|
1432
|
+
|
1433
|
+
|
1434
|
+
// Disable slow-start (Nagle algorithm). Eventually make this configurable.
|
1435
|
+
int one = 1;
|
1436
|
+
setsockopt (sd, IPPROTO_TCP, TCP_NODELAY, (char*) &one, sizeof(one));
|
1437
|
+
|
1438
|
+
|
1439
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, MyEventMachine);
|
1440
|
+
if (!cd)
|
1441
|
+
throw std::runtime_error ("no newly accepted connection");
|
1442
|
+
cd->SetServerMode();
|
1443
|
+
if (EventCallback) {
|
1444
|
+
(*EventCallback) (GetBinding(), EM_CONNECTION_ACCEPTED, NULL, cd->GetBinding());
|
1445
|
+
}
|
1446
|
+
#ifdef HAVE_EPOLL
|
1447
|
+
cd->GetEpollEvent()->events = EPOLLIN | (cd->SelectForWrite() ? EPOLLOUT : 0);
|
1448
|
+
#endif
|
1449
|
+
assert (MyEventMachine);
|
1450
|
+
MyEventMachine->Add (cd);
|
1451
|
+
#ifdef HAVE_KQUEUE
|
1452
|
+
if (cd->SelectForWrite())
|
1453
|
+
MyEventMachine->ArmKqueueWriter (cd);
|
1454
|
+
MyEventMachine->ArmKqueueReader (cd);
|
1455
|
+
#endif
|
1456
|
+
}
|
1457
|
+
|
1458
|
+
}
|
1459
|
+
|
1460
|
+
|
1461
|
+
/*************************
|
1462
|
+
AcceptorDescriptor::Write
|
1463
|
+
*************************/
|
1464
|
+
|
1465
|
+
void AcceptorDescriptor::Write()
|
1466
|
+
{
|
1467
|
+
// Why are we here?
|
1468
|
+
throw std::runtime_error ("bad code path in acceptor");
|
1469
|
+
}
|
1470
|
+
|
1471
|
+
|
1472
|
+
/*****************************
|
1473
|
+
AcceptorDescriptor::Heartbeat
|
1474
|
+
*****************************/
|
1475
|
+
|
1476
|
+
void AcceptorDescriptor::Heartbeat()
|
1477
|
+
{
|
1478
|
+
// No-op
|
1479
|
+
}
|
1480
|
+
|
1481
|
+
|
1482
|
+
/*******************************
|
1483
|
+
AcceptorDescriptor::GetSockname
|
1484
|
+
*******************************/
|
1485
|
+
|
1486
|
+
bool AcceptorDescriptor::GetSockname (struct sockaddr_storage *s, socklen_t *len)
|
1487
|
+
{
|
1488
|
+
bool ok = false;
|
1489
|
+
if (s) {
|
1490
|
+
int gp = getsockname (GetSocket(), (struct sockaddr *)s, len);
|
1491
|
+
if (gp == 0)
|
1492
|
+
ok = true;
|
1493
|
+
}
|
1494
|
+
return ok;
|
1495
|
+
}
|
1496
|
+
|
1497
|
+
|
1498
|
+
|
1499
|
+
/**************************************
|
1500
|
+
DatagramDescriptor::DatagramDescriptor
|
1501
|
+
**************************************/
|
1502
|
+
|
1503
|
+
DatagramDescriptor::DatagramDescriptor (int sd, EventMachine_t *parent_em):
|
1504
|
+
EventableDescriptor (sd, parent_em),
|
1505
|
+
OutboundDataSize (0),
|
1506
|
+
SendErrorHandling(ERRORHANDLING_KILL)
|
1507
|
+
{
|
1508
|
+
memset (&ReturnAddress, 0, sizeof(ReturnAddress));
|
1509
|
+
|
1510
|
+
/* Provisionally added 19Oct07. All datagram sockets support broadcasting.
|
1511
|
+
* Until now, sending to a broadcast address would give EACCES (permission denied)
|
1512
|
+
* on systems like Linux and BSD that require the SO_BROADCAST socket-option in order
|
1513
|
+
* to accept a packet to a broadcast address. Solaris doesn't require it. I think
|
1514
|
+
* Windows DOES require it but I'm not sure.
|
1515
|
+
*
|
1516
|
+
* Ruby does NOT do what we're doing here. In Ruby, you have to explicitly set SO_BROADCAST
|
1517
|
+
* on a UDP socket in order to enable broadcasting. The reason for requiring the option
|
1518
|
+
* in the first place is so that applications don't send broadcast datagrams by mistake.
|
1519
|
+
* I imagine that could happen if a user of an application typed in an address that happened
|
1520
|
+
* to be a broadcast address on that particular subnet.
|
1521
|
+
*
|
1522
|
+
* This is provisional because someone may eventually come up with a good reason not to
|
1523
|
+
* do it for all UDP sockets. If that happens, then we'll need to add a usercode-level API
|
1524
|
+
* to set the socket option, just like Ruby does. AND WE'LL ALSO BREAK CODE THAT DOESN'T
|
1525
|
+
* EXPLICITLY SET THE OPTION.
|
1526
|
+
*/
|
1527
|
+
|
1528
|
+
int oval = 1;
|
1529
|
+
setsockopt (GetSocket(), SOL_SOCKET, SO_BROADCAST, (char*)&oval, sizeof(oval));
|
1530
|
+
|
1531
|
+
#ifdef HAVE_EPOLL
|
1532
|
+
EpollEvent.events = EPOLLIN;
|
1533
|
+
#endif
|
1534
|
+
#ifdef HAVE_KQUEUE
|
1535
|
+
MyEventMachine->ArmKqueueReader (this);
|
1536
|
+
#endif
|
1537
|
+
}
|
1538
|
+
|
1539
|
+
|
1540
|
+
/***************************************
|
1541
|
+
DatagramDescriptor::~DatagramDescriptor
|
1542
|
+
***************************************/
|
1543
|
+
|
1544
|
+
DatagramDescriptor::~DatagramDescriptor()
|
1545
|
+
{
|
1546
|
+
// Run down any stranded outbound data.
|
1547
|
+
for (size_t i=0; i < OutboundPages.size(); i++)
|
1548
|
+
OutboundPages[i].Free();
|
1549
|
+
}
|
1550
|
+
|
1551
|
+
|
1552
|
+
/*****************************
|
1553
|
+
DatagramDescriptor::Heartbeat
|
1554
|
+
*****************************/
|
1555
|
+
|
1556
|
+
void DatagramDescriptor::Heartbeat()
|
1557
|
+
{
|
1558
|
+
// Close it if its inactivity timer has expired.
|
1559
|
+
|
1560
|
+
if (InactivityTimeout && ((MyEventMachine->GetCurrentLoopTime() - LastActivity) >= InactivityTimeout))
|
1561
|
+
ScheduleClose (false);
|
1562
|
+
//bCloseNow = true;
|
1563
|
+
}
|
1564
|
+
|
1565
|
+
|
1566
|
+
/************************
|
1567
|
+
DatagramDescriptor::Read
|
1568
|
+
************************/
|
1569
|
+
|
1570
|
+
void DatagramDescriptor::Read()
|
1571
|
+
{
|
1572
|
+
int sd = GetSocket();
|
1573
|
+
assert (sd != INVALID_SOCKET);
|
1574
|
+
LastActivity = MyEventMachine->GetCurrentLoopTime();
|
1575
|
+
|
1576
|
+
// This is an extremely large read buffer.
|
1577
|
+
// In many cases you wouldn't expect to get any more than 4K.
|
1578
|
+
char readbuffer [16 * 1024];
|
1579
|
+
|
1580
|
+
for (int i=0; i < 10; i++) {
|
1581
|
+
// Don't read just one buffer and then move on. This is faster
|
1582
|
+
// if there is a lot of incoming.
|
1583
|
+
// But don't read indefinitely. Give other sockets a chance to run.
|
1584
|
+
// NOTICE, we're reading one less than the buffer size.
|
1585
|
+
// That's so we can put a guard byte at the end of what we send
|
1586
|
+
// to user code.
|
1587
|
+
|
1588
|
+
struct sockaddr_storage sin;
|
1589
|
+
socklen_t slen = sizeof (sin);
|
1590
|
+
memset (&sin, 0, slen);
|
1591
|
+
|
1592
|
+
int r = recvfrom (sd, readbuffer, sizeof(readbuffer) - 1, 0, (struct sockaddr*)&sin, &slen);
|
1593
|
+
//cerr << "<R:" << r << ">";
|
1594
|
+
|
1595
|
+
// In UDP, a zero-length packet is perfectly legal.
|
1596
|
+
if (r >= 0) {
|
1597
|
+
|
1598
|
+
// Add a null-terminator at the the end of the buffer
|
1599
|
+
// that we will send to the callback.
|
1600
|
+
// DO NOT EVER CHANGE THIS. We want to explicitly allow users
|
1601
|
+
// to be able to depend on this behavior, so they will have
|
1602
|
+
// the option to do some things faster. Additionally it's
|
1603
|
+
// a security guard against buffer overflows.
|
1604
|
+
readbuffer [r] = 0;
|
1605
|
+
|
1606
|
+
|
1607
|
+
// Set up a "temporary" return address so that callers can "reply" to us
|
1608
|
+
// from within the callback we are about to invoke. That means that ordinary
|
1609
|
+
// calls to "send_data_to_connection" (which is of course misnamed in this
|
1610
|
+
// case) will result in packets being sent back to the same place that sent
|
1611
|
+
// us this one.
|
1612
|
+
// There is a different call (evma_send_datagram) for cases where the caller
|
1613
|
+
// actually wants to send a packet somewhere else.
|
1614
|
+
|
1615
|
+
memset (&ReturnAddress, 0, sizeof(ReturnAddress));
|
1616
|
+
memcpy (&ReturnAddress, &sin, slen);
|
1617
|
+
|
1618
|
+
_GenericInboundDispatch(readbuffer, r);
|
1619
|
+
|
1620
|
+
}
|
1621
|
+
else {
|
1622
|
+
// Basically a would-block, meaning we've read everything there is to read.
|
1623
|
+
break;
|
1624
|
+
}
|
1625
|
+
|
1626
|
+
}
|
1627
|
+
|
1628
|
+
|
1629
|
+
}
|
1630
|
+
|
1631
|
+
|
1632
|
+
/*************************
|
1633
|
+
DatagramDescriptor::Write
|
1634
|
+
*************************/
|
1635
|
+
|
1636
|
+
void DatagramDescriptor::Write()
|
1637
|
+
{
|
1638
|
+
/* It's possible for a socket to select writable and then no longer
|
1639
|
+
* be writable by the time we get around to writing. The kernel might
|
1640
|
+
* have used up its available output buffers between the select call
|
1641
|
+
* and when we get here. So this condition is not an error.
|
1642
|
+
* This code is very reminiscent of ConnectionDescriptor::_WriteOutboundData,
|
1643
|
+
* but differs in the that the outbound data pages (received from the
|
1644
|
+
* user) are _message-structured._ That is, we send each of them out
|
1645
|
+
* one message at a time.
|
1646
|
+
* TODO, we are currently suppressing the EMSGSIZE error!!!
|
1647
|
+
*/
|
1648
|
+
|
1649
|
+
int sd = GetSocket();
|
1650
|
+
assert (sd != INVALID_SOCKET);
|
1651
|
+
LastActivity = MyEventMachine->GetCurrentLoopTime();
|
1652
|
+
|
1653
|
+
assert (OutboundPages.size() > 0);
|
1654
|
+
|
1655
|
+
// Send out up to 10 packets, then cycle the machine.
|
1656
|
+
for (int i = 0; i < 10; i++) {
|
1657
|
+
if (OutboundPages.size() <= 0)
|
1658
|
+
break;
|
1659
|
+
OutboundPage *op = &(OutboundPages[0]);
|
1660
|
+
|
1661
|
+
// The nasty cast to (char*) is needed because Windows is brain-dead.
|
1662
|
+
int s = sendto (sd, (char*)op->Buffer, op->Length, 0,
|
1663
|
+
(struct sockaddr*)&(op->From),
|
1664
|
+
(op->From.sin6_family == AF_INET6 ?
|
1665
|
+
sizeof (struct sockaddr_in6) : sizeof (struct sockaddr_in)));
|
1666
|
+
int e = errno;
|
1667
|
+
|
1668
|
+
OutboundDataSize -= op->Length;
|
1669
|
+
op->Free();
|
1670
|
+
|
1671
|
+
if (s == SOCKET_ERROR) {
|
1672
|
+
#ifdef OS_UNIX
|
1673
|
+
if ((e != EINPROGRESS) && (e != EWOULDBLOCK) && (e != EINTR)) {
|
1674
|
+
#endif
|
1675
|
+
#ifdef OS_WIN32
|
1676
|
+
if ((e != WSAEINPROGRESS) && (e != WSAEWOULDBLOCK)) {
|
1677
|
+
#endif
|
1678
|
+
// save error info before deleting outboundpage
|
1679
|
+
// Hmm, we don't seem to have sockport.h available here, kludge along
|
1680
|
+
int sz = (op->From.sin6_family == AF_INET ?
|
1681
|
+
sizeof (struct sockaddr_in) : sizeof (struct sockaddr_in6));
|
1682
|
+
int f = op->From.sin6_family;
|
1683
|
+
*((char *)&op->From) = sz;
|
1684
|
+
op->From.sin6_family = f;
|
1685
|
+
// this would have been SET_SS_LEN(((struct sockaddr_storage *)&op->From), sz);
|
1686
|
+
|
1687
|
+
char info[sizeof (struct sockaddr_in6)+2];
|
1688
|
+
info[0] = e;
|
1689
|
+
memcpy(info+1, (const char *)&(op->From), sz);
|
1690
|
+
sz++;
|
1691
|
+
info[sz] = 0; // cargo cult
|
1692
|
+
|
1693
|
+
OutboundPages.pop_front();
|
1694
|
+
|
1695
|
+
switch(SendErrorHandling) {
|
1696
|
+
case ERRORHANDLING_KILL:
|
1697
|
+
UnbindReasonCode = e;
|
1698
|
+
Close();
|
1699
|
+
i = 11; // break out from send loop
|
1700
|
+
break;
|
1701
|
+
case ERRORHANDLING_IGNORE:
|
1702
|
+
break;
|
1703
|
+
case ERRORHANDLING_REPORT:
|
1704
|
+
if (EventCallback) {
|
1705
|
+
(*EventCallback)(GetBinding(), EM_CONNECTION_SENDERROR, info, sz);
|
1706
|
+
}
|
1707
|
+
break;
|
1708
|
+
}
|
1709
|
+
}
|
1710
|
+
} else
|
1711
|
+
OutboundPages.pop_front();
|
1712
|
+
|
1713
|
+
}
|
1714
|
+
#ifdef HAVE_EPOLL
|
1715
|
+
EpollEvent.events = (EPOLLIN | (SelectForWrite() ? EPOLLOUT : 0));
|
1716
|
+
assert (MyEventMachine);
|
1717
|
+
MyEventMachine->Modify (this);
|
1718
|
+
#endif
|
1719
|
+
#ifdef HAVE_KQUEUE
|
1720
|
+
if (SelectForWrite())
|
1721
|
+
MyEventMachine->ArmKqueueWriter (this);
|
1722
|
+
#endif
|
1723
|
+
}
|
1724
|
+
|
1725
|
+
|
1726
|
+
/**********************************
|
1727
|
+
DatagramDescriptor::SelectForWrite
|
1728
|
+
**********************************/
|
1729
|
+
|
1730
|
+
bool DatagramDescriptor::SelectForWrite()
|
1731
|
+
{
|
1732
|
+
/* Changed 15Nov07, per bug report by Mark Zvillius.
|
1733
|
+
* The outbound data size will be zero if there are zero-length outbound packets,
|
1734
|
+
* so we now select writable in case the outbound page buffer is not empty.
|
1735
|
+
* Note that the superclass ShouldDelete method still checks for outbound data size,
|
1736
|
+
* which may be wrong.
|
1737
|
+
*/
|
1738
|
+
//return (GetOutboundDataSize() > 0); (Original)
|
1739
|
+
return (OutboundPages.size() > 0);
|
1740
|
+
}
|
1741
|
+
|
1742
|
+
|
1743
|
+
/************************************
|
1744
|
+
DatagramDescriptor::SendOutboundData
|
1745
|
+
************************************/
|
1746
|
+
|
1747
|
+
int DatagramDescriptor::SendOutboundData (const char *data, int length)
|
1748
|
+
{
|
1749
|
+
// This is almost an exact clone of ConnectionDescriptor::_SendRawOutboundData.
|
1750
|
+
// That means most of it could be factored to a common ancestor. Note that
|
1751
|
+
// empty datagrams are meaningful, which isn't the case for TCP streams.
|
1752
|
+
|
1753
|
+
if (IsCloseScheduled())
|
1754
|
+
return 0;
|
1755
|
+
|
1756
|
+
if (!data && (length > 0))
|
1757
|
+
throw std::runtime_error ("bad outbound data");
|
1758
|
+
char *buffer = (char *) malloc (length + 1);
|
1759
|
+
if (!buffer)
|
1760
|
+
throw std::runtime_error ("no allocation for outbound data");
|
1761
|
+
memcpy (buffer, data, length);
|
1762
|
+
buffer [length] = 0;
|
1763
|
+
OutboundPages.push_back (OutboundPage (buffer, length, ReturnAddress));
|
1764
|
+
OutboundDataSize += length;
|
1765
|
+
|
1766
|
+
#ifdef HAVE_EPOLL
|
1767
|
+
EpollEvent.events = (EPOLLIN | EPOLLOUT);
|
1768
|
+
assert (MyEventMachine);
|
1769
|
+
MyEventMachine->Modify (this);
|
1770
|
+
#endif
|
1771
|
+
#ifdef HAVE_KQUEUE
|
1772
|
+
MyEventMachine->ArmKqueueWriter (this);
|
1773
|
+
#endif
|
1774
|
+
|
1775
|
+
return length;
|
1776
|
+
}
|
1777
|
+
|
1778
|
+
|
1779
|
+
/****************************************
|
1780
|
+
DatagramDescriptor::SendOutboundDatagram
|
1781
|
+
****************************************/
|
1782
|
+
|
1783
|
+
int DatagramDescriptor::SendOutboundDatagram (const char *data, int length, const char *address, int port)
|
1784
|
+
{
|
1785
|
+
// This is an exact clone of ConnectionDescriptor::SendOutboundData.
|
1786
|
+
// That means it needs to move to a common ancestor.
|
1787
|
+
// TODO: Refactor this so there's no overlap with SendOutboundData.
|
1788
|
+
|
1789
|
+
if (IsCloseScheduled())
|
1790
|
+
//if (bCloseNow || bCloseAfterWriting)
|
1791
|
+
return -1;
|
1792
|
+
|
1793
|
+
if (!address || !*address || !port)
|
1794
|
+
return -1;
|
1795
|
+
|
1796
|
+
int family, addr_size;
|
1797
|
+
struct sockaddr *addr_here = EventMachine_t::name2address (address, port, &family, &addr_size);
|
1798
|
+
if (!addr_here)
|
1799
|
+
return -1;
|
1800
|
+
|
1801
|
+
|
1802
|
+
if (!data && (length > 0))
|
1803
|
+
throw std::runtime_error ("bad outbound data");
|
1804
|
+
char *buffer = (char *) malloc (length + 1);
|
1805
|
+
if (!buffer)
|
1806
|
+
throw std::runtime_error ("no allocation for outbound data");
|
1807
|
+
memcpy (buffer, data, length);
|
1808
|
+
buffer [length] = 0;
|
1809
|
+
OutboundPages.push_back (OutboundPage (buffer, length, *(struct sockaddr_storage*)addr_here));
|
1810
|
+
OutboundDataSize += length;
|
1811
|
+
|
1812
|
+
#ifdef HAVE_EPOLL
|
1813
|
+
EpollEvent.events = (EPOLLIN | EPOLLOUT);
|
1814
|
+
assert (MyEventMachine);
|
1815
|
+
MyEventMachine->Modify (this);
|
1816
|
+
#endif
|
1817
|
+
#ifdef HAVE_KQUEUE
|
1818
|
+
MyEventMachine->ArmKqueueWriter (this);
|
1819
|
+
#endif
|
1820
|
+
|
1821
|
+
return length;
|
1822
|
+
}
|
1823
|
+
|
1824
|
+
|
1825
|
+
/*********************************
|
1826
|
+
ConnectionDescriptor::GetPeername
|
1827
|
+
*********************************/
|
1828
|
+
|
1829
|
+
bool ConnectionDescriptor::GetPeername (struct sockaddr_storage *s, socklen_t *len)
|
1830
|
+
{
|
1831
|
+
bool ok = false;
|
1832
|
+
if (s) {
|
1833
|
+
socklen_t len = sizeof(*s);
|
1834
|
+
int gp = getpeername (GetSocket(), (struct sockaddr *)s, &len);
|
1835
|
+
if (gp == 0)
|
1836
|
+
ok = true;
|
1837
|
+
}
|
1838
|
+
return ok;
|
1839
|
+
}
|
1840
|
+
|
1841
|
+
/*********************************
|
1842
|
+
ConnectionDescriptor::GetSockname
|
1843
|
+
*********************************/
|
1844
|
+
|
1845
|
+
bool ConnectionDescriptor::GetSockname (struct sockaddr_storage *s, socklen_t *len)
|
1846
|
+
{
|
1847
|
+
bool ok = false;
|
1848
|
+
if (s) {
|
1849
|
+
*len = sizeof(*s);
|
1850
|
+
int gp = getsockname (GetSocket(), (struct sockaddr *)s, len);
|
1851
|
+
if (gp == 0)
|
1852
|
+
ok = true;
|
1853
|
+
}
|
1854
|
+
return ok;
|
1855
|
+
}
|
1856
|
+
|
1857
|
+
|
1858
|
+
/**********************************************
|
1859
|
+
ConnectionDescriptor::GetCommInactivityTimeout
|
1860
|
+
**********************************************/
|
1861
|
+
|
1862
|
+
uint64_t ConnectionDescriptor::GetCommInactivityTimeout()
|
1863
|
+
{
|
1864
|
+
return InactivityTimeout / 1000;
|
1865
|
+
}
|
1866
|
+
|
1867
|
+
|
1868
|
+
/**********************************************
|
1869
|
+
ConnectionDescriptor::SetCommInactivityTimeout
|
1870
|
+
**********************************************/
|
1871
|
+
|
1872
|
+
int ConnectionDescriptor::SetCommInactivityTimeout (uint64_t value)
|
1873
|
+
{
|
1874
|
+
InactivityTimeout = value * 1000;
|
1875
|
+
MyEventMachine->QueueHeartbeat(this);
|
1876
|
+
return 1;
|
1877
|
+
}
|
1878
|
+
|
1879
|
+
/*******************************
|
1880
|
+
DatagramDescriptor::GetPeername
|
1881
|
+
*******************************/
|
1882
|
+
|
1883
|
+
bool DatagramDescriptor::GetPeername (struct sockaddr_storage *s, socklen_t *len)
|
1884
|
+
{
|
1885
|
+
bool ok = false;
|
1886
|
+
if (s) {
|
1887
|
+
*len = sizeof(struct sockaddr_storage);
|
1888
|
+
memset (s, 0, sizeof(struct sockaddr_storage));
|
1889
|
+
memcpy (s, &ReturnAddress, sizeof(ReturnAddress));
|
1890
|
+
ok = true;
|
1891
|
+
}
|
1892
|
+
return ok;
|
1893
|
+
}
|
1894
|
+
|
1895
|
+
/*******************************
|
1896
|
+
DatagramDescriptor::GetSockname
|
1897
|
+
*******************************/
|
1898
|
+
|
1899
|
+
bool DatagramDescriptor::GetSockname (struct sockaddr_storage *s, socklen_t *len)
|
1900
|
+
{
|
1901
|
+
bool ok = false;
|
1902
|
+
if (s) {
|
1903
|
+
*len = sizeof(*s);
|
1904
|
+
int gp = getsockname (GetSocket(), (struct sockaddr *)s, len);
|
1905
|
+
if (gp == 0)
|
1906
|
+
ok = true;
|
1907
|
+
}
|
1908
|
+
return ok;
|
1909
|
+
}
|
1910
|
+
|
1911
|
+
|
1912
|
+
|
1913
|
+
/********************************************
|
1914
|
+
DatagramDescriptor::GetCommInactivityTimeout
|
1915
|
+
********************************************/
|
1916
|
+
|
1917
|
+
uint64_t DatagramDescriptor::GetCommInactivityTimeout()
|
1918
|
+
{
|
1919
|
+
return InactivityTimeout / 1000;
|
1920
|
+
}
|
1921
|
+
|
1922
|
+
/********************************************
|
1923
|
+
DatagramDescriptor::SetCommInactivityTimeout
|
1924
|
+
********************************************/
|
1925
|
+
|
1926
|
+
int DatagramDescriptor::SetCommInactivityTimeout (uint64_t value)
|
1927
|
+
{
|
1928
|
+
if (value > 0) {
|
1929
|
+
InactivityTimeout = value * 1000;
|
1930
|
+
MyEventMachine->QueueHeartbeat(this);
|
1931
|
+
return 1;
|
1932
|
+
}
|
1933
|
+
return 0;
|
1934
|
+
}
|
1935
|
+
|
1936
|
+
|
1937
|
+
/************************************
|
1938
|
+
InotifyDescriptor::InotifyDescriptor
|
1939
|
+
*************************************/
|
1940
|
+
|
1941
|
+
InotifyDescriptor::InotifyDescriptor (EventMachine_t *em):
|
1942
|
+
EventableDescriptor(0, em)
|
1943
|
+
{
|
1944
|
+
bCallbackUnbind = false;
|
1945
|
+
|
1946
|
+
#ifndef HAVE_INOTIFY
|
1947
|
+
throw std::runtime_error("no inotify support on this system");
|
1948
|
+
#else
|
1949
|
+
|
1950
|
+
int fd = inotify_init();
|
1951
|
+
if (fd == -1) {
|
1952
|
+
char buf[200];
|
1953
|
+
snprintf (buf, sizeof(buf)-1, "unable to create inotify descriptor: %s", strerror(errno));
|
1954
|
+
throw std::runtime_error (buf);
|
1955
|
+
}
|
1956
|
+
|
1957
|
+
MySocket = fd;
|
1958
|
+
SetSocketNonblocking(MySocket);
|
1959
|
+
#ifdef HAVE_EPOLL
|
1960
|
+
EpollEvent.events = EPOLLIN;
|
1961
|
+
#endif
|
1962
|
+
|
1963
|
+
#endif
|
1964
|
+
}
|
1965
|
+
|
1966
|
+
|
1967
|
+
/*************************************
|
1968
|
+
InotifyDescriptor::~InotifyDescriptor
|
1969
|
+
**************************************/
|
1970
|
+
|
1971
|
+
InotifyDescriptor::~InotifyDescriptor()
|
1972
|
+
{
|
1973
|
+
close(MySocket);
|
1974
|
+
MySocket = INVALID_SOCKET;
|
1975
|
+
}
|
1976
|
+
|
1977
|
+
/***********************
|
1978
|
+
InotifyDescriptor::Read
|
1979
|
+
************************/
|
1980
|
+
|
1981
|
+
void InotifyDescriptor::Read()
|
1982
|
+
{
|
1983
|
+
assert (MyEventMachine);
|
1984
|
+
MyEventMachine->_ReadInotifyEvents();
|
1985
|
+
}
|
1986
|
+
|
1987
|
+
|
1988
|
+
/************************
|
1989
|
+
InotifyDescriptor::Write
|
1990
|
+
*************************/
|
1991
|
+
|
1992
|
+
void InotifyDescriptor::Write()
|
1993
|
+
{
|
1994
|
+
throw std::runtime_error("bad code path in inotify");
|
1995
|
+
}
|