eventmachine 1.0.0.beta.2-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +16 -0
- data/Gemfile +1 -0
- data/README +81 -0
- data/Rakefile +11 -0
- data/docs/COPYING +60 -0
- data/docs/ChangeLog +211 -0
- data/docs/DEFERRABLES +246 -0
- data/docs/EPOLL +141 -0
- data/docs/GNU +281 -0
- data/docs/INSTALL +13 -0
- data/docs/KEYBOARD +42 -0
- data/docs/LEGAL +25 -0
- data/docs/LIGHTWEIGHT_CONCURRENCY +130 -0
- data/docs/PURE_RUBY +75 -0
- data/docs/RELEASE_NOTES +94 -0
- data/docs/SMTP +4 -0
- data/docs/SPAWNED_PROCESSES +148 -0
- data/docs/TODO +8 -0
- data/eventmachine.gemspec +33 -0
- data/examples/ex_channel.rb +43 -0
- data/examples/ex_queue.rb +2 -0
- data/examples/ex_tick_loop_array.rb +15 -0
- data/examples/ex_tick_loop_counter.rb +32 -0
- data/examples/helper.rb +2 -0
- data/ext/binder.cpp +124 -0
- data/ext/binder.h +46 -0
- data/ext/cmain.cpp +838 -0
- data/ext/ed.cpp +1884 -0
- data/ext/ed.h +418 -0
- data/ext/em.cpp +2348 -0
- data/ext/em.h +228 -0
- data/ext/eventmachine.h +123 -0
- data/ext/extconf.rb +157 -0
- data/ext/fastfilereader/extconf.rb +85 -0
- data/ext/fastfilereader/mapper.cpp +214 -0
- data/ext/fastfilereader/mapper.h +59 -0
- data/ext/fastfilereader/rubymain.cpp +127 -0
- data/ext/kb.cpp +79 -0
- data/ext/page.cpp +107 -0
- data/ext/page.h +51 -0
- data/ext/pipe.cpp +347 -0
- data/ext/project.h +155 -0
- data/ext/rubymain.cpp +1200 -0
- data/ext/ssl.cpp +460 -0
- data/ext/ssl.h +94 -0
- data/java/.classpath +8 -0
- data/java/.project +17 -0
- data/java/src/com/rubyeventmachine/EmReactor.java +571 -0
- data/java/src/com/rubyeventmachine/EmReactorException.java +40 -0
- data/java/src/com/rubyeventmachine/EventableChannel.java +69 -0
- data/java/src/com/rubyeventmachine/EventableDatagramChannel.java +189 -0
- data/java/src/com/rubyeventmachine/EventableSocketChannel.java +364 -0
- data/lib/em/buftok.rb +138 -0
- data/lib/em/callback.rb +26 -0
- data/lib/em/channel.rb +57 -0
- data/lib/em/connection.rb +569 -0
- data/lib/em/deferrable.rb +206 -0
- data/lib/em/file_watch.rb +54 -0
- data/lib/em/future.rb +61 -0
- data/lib/em/iterator.rb +270 -0
- data/lib/em/messages.rb +66 -0
- data/lib/em/process_watch.rb +44 -0
- data/lib/em/processes.rb +119 -0
- data/lib/em/protocols.rb +36 -0
- data/lib/em/protocols/header_and_content.rb +138 -0
- data/lib/em/protocols/httpclient.rb +268 -0
- data/lib/em/protocols/httpclient2.rb +590 -0
- data/lib/em/protocols/line_and_text.rb +125 -0
- data/lib/em/protocols/line_protocol.rb +28 -0
- data/lib/em/protocols/linetext2.rb +161 -0
- data/lib/em/protocols/memcache.rb +323 -0
- data/lib/em/protocols/object_protocol.rb +45 -0
- data/lib/em/protocols/postgres3.rb +247 -0
- data/lib/em/protocols/saslauth.rb +175 -0
- data/lib/em/protocols/smtpclient.rb +357 -0
- data/lib/em/protocols/smtpserver.rb +640 -0
- data/lib/em/protocols/socks4.rb +66 -0
- data/lib/em/protocols/stomp.rb +200 -0
- data/lib/em/protocols/tcptest.rb +53 -0
- data/lib/em/pure_ruby.rb +1013 -0
- data/lib/em/queue.rb +62 -0
- data/lib/em/spawnable.rb +85 -0
- data/lib/em/streamer.rb +130 -0
- data/lib/em/tick_loop.rb +85 -0
- data/lib/em/timers.rb +57 -0
- data/lib/em/version.rb +3 -0
- data/lib/eventmachine.rb +1548 -0
- data/lib/jeventmachine.rb +258 -0
- data/lib/rubyeventmachine.rb +2 -0
- data/setup.rb +1585 -0
- data/tasks/cpp.rake_example +77 -0
- data/tasks/doc.rake +30 -0
- data/tasks/package.rake +85 -0
- data/tasks/test.rake +6 -0
- data/tests/client.crt +31 -0
- data/tests/client.key +51 -0
- data/tests/test_attach.rb +136 -0
- data/tests/test_basic.rb +249 -0
- data/tests/test_channel.rb +64 -0
- data/tests/test_connection_count.rb +35 -0
- data/tests/test_defer.rb +49 -0
- data/tests/test_deferrable.rb +35 -0
- data/tests/test_epoll.rb +160 -0
- data/tests/test_error_handler.rb +35 -0
- data/tests/test_errors.rb +82 -0
- data/tests/test_exc.rb +55 -0
- data/tests/test_file_watch.rb +49 -0
- data/tests/test_futures.rb +198 -0
- data/tests/test_get_sock_opt.rb +30 -0
- data/tests/test_handler_check.rb +37 -0
- data/tests/test_hc.rb +190 -0
- data/tests/test_httpclient.rb +227 -0
- data/tests/test_httpclient2.rb +154 -0
- data/tests/test_inactivity_timeout.rb +50 -0
- data/tests/test_kb.rb +60 -0
- data/tests/test_ltp.rb +190 -0
- data/tests/test_ltp2.rb +317 -0
- data/tests/test_next_tick.rb +133 -0
- data/tests/test_object_protocol.rb +37 -0
- data/tests/test_pause.rb +70 -0
- data/tests/test_pending_connect_timeout.rb +48 -0
- data/tests/test_process_watch.rb +50 -0
- data/tests/test_processes.rb +128 -0
- data/tests/test_proxy_connection.rb +144 -0
- data/tests/test_pure.rb +134 -0
- data/tests/test_queue.rb +44 -0
- data/tests/test_running.rb +42 -0
- data/tests/test_sasl.rb +72 -0
- data/tests/test_send_file.rb +251 -0
- data/tests/test_servers.rb +76 -0
- data/tests/test_smtpclient.rb +83 -0
- data/tests/test_smtpserver.rb +85 -0
- data/tests/test_spawn.rb +322 -0
- data/tests/test_ssl_args.rb +79 -0
- data/tests/test_ssl_methods.rb +50 -0
- data/tests/test_ssl_verify.rb +82 -0
- data/tests/test_tick_loop.rb +59 -0
- data/tests/test_timers.rb +160 -0
- data/tests/test_ud.rb +36 -0
- data/tests/testem.rb +31 -0
- metadata +240 -0
data/ext/ed.h
ADDED
@@ -0,0 +1,418 @@
|
|
1
|
+
/*****************************************************************************
|
2
|
+
|
3
|
+
$Id$
|
4
|
+
|
5
|
+
File: ed.h
|
6
|
+
Date: 06Apr06
|
7
|
+
|
8
|
+
Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
|
9
|
+
Gmail: blackhedd
|
10
|
+
|
11
|
+
This program is free software; you can redistribute it and/or modify
|
12
|
+
it under the terms of either: 1) the GNU General Public License
|
13
|
+
as published by the Free Software Foundation; either version 2 of the
|
14
|
+
License, or (at your option) any later version; or 2) Ruby's License.
|
15
|
+
|
16
|
+
See the file COPYING for complete licensing information.
|
17
|
+
|
18
|
+
*****************************************************************************/
|
19
|
+
|
20
|
+
#ifndef __EventableDescriptor__H_
|
21
|
+
#define __EventableDescriptor__H_
|
22
|
+
|
23
|
+
|
24
|
+
class EventMachine_t; // forward reference
|
25
|
+
#ifdef WITH_SSL
|
26
|
+
class SslBox_t; // forward reference
|
27
|
+
#endif
|
28
|
+
|
29
|
+
bool SetSocketNonblocking (SOCKET);
|
30
|
+
|
31
|
+
|
32
|
+
/*************************
|
33
|
+
class EventableDescriptor
|
34
|
+
*************************/
|
35
|
+
|
36
|
+
class EventableDescriptor: public Bindable_t
|
37
|
+
{
|
38
|
+
public:
|
39
|
+
EventableDescriptor (int, EventMachine_t*);
|
40
|
+
virtual ~EventableDescriptor();
|
41
|
+
|
42
|
+
int GetSocket() {return MySocket;}
|
43
|
+
void SetSocketInvalid() { MySocket = INVALID_SOCKET; }
|
44
|
+
void Close();
|
45
|
+
|
46
|
+
virtual void Read() = 0;
|
47
|
+
virtual void Write() = 0;
|
48
|
+
virtual void Heartbeat() = 0;
|
49
|
+
|
50
|
+
// These methods tell us whether the descriptor
|
51
|
+
// should be selected or polled for read/write.
|
52
|
+
virtual bool SelectForRead() = 0;
|
53
|
+
virtual bool SelectForWrite() = 0;
|
54
|
+
|
55
|
+
// are we scheduled for a close, or in an error state, or already closed?
|
56
|
+
bool ShouldDelete();
|
57
|
+
// Do we have any data to write? This is used by ShouldDelete.
|
58
|
+
virtual int GetOutboundDataSize() {return 0;}
|
59
|
+
virtual bool IsWatchOnly(){ return false; }
|
60
|
+
|
61
|
+
virtual void ScheduleClose (bool after_writing);
|
62
|
+
bool IsCloseScheduled();
|
63
|
+
virtual void HandleError(){ ScheduleClose (false); }
|
64
|
+
|
65
|
+
void SetEventCallback (EMCallback);
|
66
|
+
|
67
|
+
virtual bool GetPeername (struct sockaddr*) {return false;}
|
68
|
+
virtual bool GetSockname (struct sockaddr*) {return false;}
|
69
|
+
virtual bool GetSubprocessPid (pid_t*) {return false;}
|
70
|
+
|
71
|
+
virtual void StartTls() {}
|
72
|
+
virtual void SetTlsParms (const char *privkey_filename, const char *certchain_filename, bool verify_peer) {}
|
73
|
+
|
74
|
+
#ifdef WITH_SSL
|
75
|
+
virtual X509 *GetPeerCert() {return NULL;}
|
76
|
+
#endif
|
77
|
+
|
78
|
+
virtual uint64_t GetCommInactivityTimeout() {return 0;}
|
79
|
+
virtual int SetCommInactivityTimeout (uint64_t value) {return 0;}
|
80
|
+
uint64_t GetPendingConnectTimeout();
|
81
|
+
int SetPendingConnectTimeout (uint64_t value);
|
82
|
+
|
83
|
+
#ifdef HAVE_EPOLL
|
84
|
+
struct epoll_event *GetEpollEvent() { return &EpollEvent; }
|
85
|
+
#endif
|
86
|
+
|
87
|
+
virtual void StartProxy(const unsigned long, const unsigned long, const unsigned long);
|
88
|
+
virtual void StopProxy();
|
89
|
+
virtual void SetProxiedFrom(EventableDescriptor*, const unsigned long);
|
90
|
+
virtual int SendOutboundData(const char*,int){ return -1; }
|
91
|
+
virtual bool IsPaused(){ return false; }
|
92
|
+
virtual bool Pause(){ return false; }
|
93
|
+
virtual bool Resume(){ return false; }
|
94
|
+
|
95
|
+
virtual int ReportErrorStatus(){ return 0; }
|
96
|
+
virtual bool IsConnectPending(){ return false; }
|
97
|
+
virtual uint64_t GetNextHeartbeat();
|
98
|
+
|
99
|
+
private:
|
100
|
+
bool bCloseNow;
|
101
|
+
bool bCloseAfterWriting;
|
102
|
+
|
103
|
+
protected:
|
104
|
+
int MySocket;
|
105
|
+
|
106
|
+
EMCallback EventCallback;
|
107
|
+
void _GenericInboundDispatch(const char*, int);
|
108
|
+
|
109
|
+
uint64_t CreatedAt;
|
110
|
+
bool bCallbackUnbind;
|
111
|
+
int UnbindReasonCode;
|
112
|
+
|
113
|
+
unsigned long BytesToProxy;
|
114
|
+
EventableDescriptor *ProxyTarget;
|
115
|
+
EventableDescriptor *ProxiedFrom;
|
116
|
+
|
117
|
+
unsigned long MaxOutboundBufSize;
|
118
|
+
|
119
|
+
#ifdef HAVE_EPOLL
|
120
|
+
struct epoll_event EpollEvent;
|
121
|
+
#endif
|
122
|
+
|
123
|
+
EventMachine_t *MyEventMachine;
|
124
|
+
uint64_t PendingConnectTimeout;
|
125
|
+
uint64_t InactivityTimeout;
|
126
|
+
uint64_t LastActivity;
|
127
|
+
uint64_t NextHeartbeat;
|
128
|
+
};
|
129
|
+
|
130
|
+
|
131
|
+
|
132
|
+
/*************************
|
133
|
+
class LoopbreakDescriptor
|
134
|
+
*************************/
|
135
|
+
|
136
|
+
class LoopbreakDescriptor: public EventableDescriptor
|
137
|
+
{
|
138
|
+
public:
|
139
|
+
LoopbreakDescriptor (int, EventMachine_t*);
|
140
|
+
virtual ~LoopbreakDescriptor() {}
|
141
|
+
|
142
|
+
virtual void Read();
|
143
|
+
virtual void Write();
|
144
|
+
virtual void Heartbeat() {}
|
145
|
+
|
146
|
+
virtual bool SelectForRead() {return true;}
|
147
|
+
virtual bool SelectForWrite() {return false;}
|
148
|
+
};
|
149
|
+
|
150
|
+
|
151
|
+
/**************************
|
152
|
+
class ConnectionDescriptor
|
153
|
+
**************************/
|
154
|
+
|
155
|
+
class ConnectionDescriptor: public EventableDescriptor
|
156
|
+
{
|
157
|
+
public:
|
158
|
+
ConnectionDescriptor (int, EventMachine_t*);
|
159
|
+
virtual ~ConnectionDescriptor();
|
160
|
+
|
161
|
+
int SendOutboundData (const char*, int);
|
162
|
+
|
163
|
+
void SetConnectPending (bool f);
|
164
|
+
virtual void ScheduleClose (bool after_writing);
|
165
|
+
virtual void HandleError();
|
166
|
+
|
167
|
+
void SetNotifyReadable (bool);
|
168
|
+
void SetNotifyWritable (bool);
|
169
|
+
void SetWatchOnly (bool);
|
170
|
+
|
171
|
+
bool IsPaused(){ return bPaused; }
|
172
|
+
bool Pause();
|
173
|
+
bool Resume();
|
174
|
+
|
175
|
+
bool IsNotifyReadable(){ return bNotifyReadable; }
|
176
|
+
bool IsNotifyWritable(){ return bNotifyWritable; }
|
177
|
+
virtual bool IsWatchOnly(){ return bWatchOnly; }
|
178
|
+
|
179
|
+
virtual void Read();
|
180
|
+
virtual void Write();
|
181
|
+
virtual void Heartbeat();
|
182
|
+
|
183
|
+
virtual bool SelectForRead();
|
184
|
+
virtual bool SelectForWrite();
|
185
|
+
|
186
|
+
// Do we have any data to write? This is used by ShouldDelete.
|
187
|
+
virtual int GetOutboundDataSize() {return OutboundDataSize;}
|
188
|
+
|
189
|
+
virtual void StartTls();
|
190
|
+
virtual void SetTlsParms (const char *privkey_filename, const char *certchain_filename, bool verify_peer);
|
191
|
+
|
192
|
+
#ifdef WITH_SSL
|
193
|
+
virtual X509 *GetPeerCert();
|
194
|
+
virtual bool VerifySslPeer(const char*);
|
195
|
+
virtual void AcceptSslPeer();
|
196
|
+
#endif
|
197
|
+
|
198
|
+
void SetServerMode() {bIsServer = true;}
|
199
|
+
|
200
|
+
virtual bool GetPeername (struct sockaddr*);
|
201
|
+
virtual bool GetSockname (struct sockaddr*);
|
202
|
+
|
203
|
+
virtual uint64_t GetCommInactivityTimeout();
|
204
|
+
virtual int SetCommInactivityTimeout (uint64_t value);
|
205
|
+
|
206
|
+
virtual int ReportErrorStatus();
|
207
|
+
virtual bool IsConnectPending(){ return bConnectPending; }
|
208
|
+
|
209
|
+
protected:
|
210
|
+
struct OutboundPage {
|
211
|
+
OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
|
212
|
+
void Free() {if (Buffer) free ((char*)Buffer); }
|
213
|
+
const char *Buffer;
|
214
|
+
int Length;
|
215
|
+
int Offset;
|
216
|
+
};
|
217
|
+
|
218
|
+
protected:
|
219
|
+
bool bPaused;
|
220
|
+
bool bConnectPending;
|
221
|
+
|
222
|
+
bool bNotifyReadable;
|
223
|
+
bool bNotifyWritable;
|
224
|
+
bool bWatchOnly;
|
225
|
+
|
226
|
+
bool bReadAttemptedAfterClose;
|
227
|
+
bool bWriteAttemptedAfterClose;
|
228
|
+
|
229
|
+
deque<OutboundPage> OutboundPages;
|
230
|
+
int OutboundDataSize;
|
231
|
+
|
232
|
+
#ifdef WITH_SSL
|
233
|
+
SslBox_t *SslBox;
|
234
|
+
std::string CertChainFilename;
|
235
|
+
std::string PrivateKeyFilename;
|
236
|
+
bool bHandshakeSignaled;
|
237
|
+
bool bSslVerifyPeer;
|
238
|
+
bool bSslPeerAccepted;
|
239
|
+
#endif
|
240
|
+
|
241
|
+
#ifdef HAVE_KQUEUE
|
242
|
+
bool bGotExtraKqueueEvent;
|
243
|
+
#endif
|
244
|
+
|
245
|
+
bool bIsServer;
|
246
|
+
|
247
|
+
private:
|
248
|
+
void _UpdateEvents();
|
249
|
+
void _UpdateEvents(bool, bool);
|
250
|
+
void _WriteOutboundData();
|
251
|
+
void _DispatchInboundData (const char *buffer, int size);
|
252
|
+
void _DispatchCiphertext();
|
253
|
+
int _SendRawOutboundData (const char*, int);
|
254
|
+
void _CheckHandshakeStatus();
|
255
|
+
|
256
|
+
};
|
257
|
+
|
258
|
+
|
259
|
+
/************************
|
260
|
+
class DatagramDescriptor
|
261
|
+
************************/
|
262
|
+
|
263
|
+
class DatagramDescriptor: public EventableDescriptor
|
264
|
+
{
|
265
|
+
public:
|
266
|
+
DatagramDescriptor (int, EventMachine_t*);
|
267
|
+
virtual ~DatagramDescriptor();
|
268
|
+
|
269
|
+
virtual void Read();
|
270
|
+
virtual void Write();
|
271
|
+
virtual void Heartbeat();
|
272
|
+
|
273
|
+
virtual bool SelectForRead() {return true;}
|
274
|
+
virtual bool SelectForWrite();
|
275
|
+
|
276
|
+
int SendOutboundData (const char*, int);
|
277
|
+
int SendOutboundDatagram (const char*, int, const char*, int);
|
278
|
+
|
279
|
+
// Do we have any data to write? This is used by ShouldDelete.
|
280
|
+
virtual int GetOutboundDataSize() {return OutboundDataSize;}
|
281
|
+
|
282
|
+
virtual bool GetPeername (struct sockaddr*);
|
283
|
+
virtual bool GetSockname (struct sockaddr*);
|
284
|
+
|
285
|
+
virtual uint64_t GetCommInactivityTimeout();
|
286
|
+
virtual int SetCommInactivityTimeout (uint64_t value);
|
287
|
+
|
288
|
+
protected:
|
289
|
+
struct OutboundPage {
|
290
|
+
OutboundPage (const char *b, int l, struct sockaddr_in f, int o=0): Buffer(b), Length(l), Offset(o), From(f) {}
|
291
|
+
void Free() {if (Buffer) free ((char*)Buffer); }
|
292
|
+
const char *Buffer;
|
293
|
+
int Length;
|
294
|
+
int Offset;
|
295
|
+
struct sockaddr_in From;
|
296
|
+
};
|
297
|
+
|
298
|
+
deque<OutboundPage> OutboundPages;
|
299
|
+
int OutboundDataSize;
|
300
|
+
|
301
|
+
struct sockaddr_in ReturnAddress;
|
302
|
+
};
|
303
|
+
|
304
|
+
|
305
|
+
/************************
|
306
|
+
class AcceptorDescriptor
|
307
|
+
************************/
|
308
|
+
|
309
|
+
class AcceptorDescriptor: public EventableDescriptor
|
310
|
+
{
|
311
|
+
public:
|
312
|
+
AcceptorDescriptor (int, EventMachine_t*);
|
313
|
+
virtual ~AcceptorDescriptor();
|
314
|
+
|
315
|
+
virtual void Read();
|
316
|
+
virtual void Write();
|
317
|
+
virtual void Heartbeat();
|
318
|
+
|
319
|
+
virtual bool SelectForRead() {return true;}
|
320
|
+
virtual bool SelectForWrite() {return false;}
|
321
|
+
|
322
|
+
virtual bool GetSockname (struct sockaddr*);
|
323
|
+
|
324
|
+
static void StopAcceptor (const unsigned long binding);
|
325
|
+
};
|
326
|
+
|
327
|
+
/********************
|
328
|
+
class PipeDescriptor
|
329
|
+
********************/
|
330
|
+
|
331
|
+
#ifdef OS_UNIX
|
332
|
+
class PipeDescriptor: public EventableDescriptor
|
333
|
+
{
|
334
|
+
public:
|
335
|
+
PipeDescriptor (int, pid_t, EventMachine_t*);
|
336
|
+
virtual ~PipeDescriptor();
|
337
|
+
|
338
|
+
virtual void Read();
|
339
|
+
virtual void Write();
|
340
|
+
virtual void Heartbeat();
|
341
|
+
|
342
|
+
virtual bool SelectForRead();
|
343
|
+
virtual bool SelectForWrite();
|
344
|
+
|
345
|
+
int SendOutboundData (const char*, int);
|
346
|
+
virtual int GetOutboundDataSize() {return OutboundDataSize;}
|
347
|
+
|
348
|
+
virtual bool GetSubprocessPid (pid_t*);
|
349
|
+
|
350
|
+
protected:
|
351
|
+
struct OutboundPage {
|
352
|
+
OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
|
353
|
+
void Free() {if (Buffer) free ((char*)Buffer); }
|
354
|
+
const char *Buffer;
|
355
|
+
int Length;
|
356
|
+
int Offset;
|
357
|
+
};
|
358
|
+
|
359
|
+
protected:
|
360
|
+
bool bReadAttemptedAfterClose;
|
361
|
+
|
362
|
+
deque<OutboundPage> OutboundPages;
|
363
|
+
int OutboundDataSize;
|
364
|
+
|
365
|
+
pid_t SubprocessPid;
|
366
|
+
|
367
|
+
private:
|
368
|
+
void _DispatchInboundData (const char *buffer, int size);
|
369
|
+
};
|
370
|
+
#endif // OS_UNIX
|
371
|
+
|
372
|
+
|
373
|
+
/************************
|
374
|
+
class KeyboardDescriptor
|
375
|
+
************************/
|
376
|
+
|
377
|
+
class KeyboardDescriptor: public EventableDescriptor
|
378
|
+
{
|
379
|
+
public:
|
380
|
+
KeyboardDescriptor (EventMachine_t*);
|
381
|
+
virtual ~KeyboardDescriptor();
|
382
|
+
|
383
|
+
virtual void Read();
|
384
|
+
virtual void Write();
|
385
|
+
virtual void Heartbeat();
|
386
|
+
|
387
|
+
virtual bool SelectForRead() {return true;}
|
388
|
+
virtual bool SelectForWrite() {return false;}
|
389
|
+
|
390
|
+
protected:
|
391
|
+
bool bReadAttemptedAfterClose;
|
392
|
+
|
393
|
+
private:
|
394
|
+
void _DispatchInboundData (const char *buffer, int size);
|
395
|
+
};
|
396
|
+
|
397
|
+
|
398
|
+
/***********************
|
399
|
+
class InotifyDescriptor
|
400
|
+
************************/
|
401
|
+
|
402
|
+
class InotifyDescriptor: public EventableDescriptor
|
403
|
+
{
|
404
|
+
public:
|
405
|
+
InotifyDescriptor (EventMachine_t*);
|
406
|
+
virtual ~InotifyDescriptor();
|
407
|
+
|
408
|
+
void Read();
|
409
|
+
void Write();
|
410
|
+
|
411
|
+
virtual void Heartbeat() {}
|
412
|
+
virtual bool SelectForRead() {return true;}
|
413
|
+
virtual bool SelectForWrite() {return false;}
|
414
|
+
};
|
415
|
+
|
416
|
+
#endif // __EventableDescriptor__H_
|
417
|
+
|
418
|
+
|
data/ext/em.cpp
ADDED
@@ -0,0 +1,2348 @@
|
|
1
|
+
/*****************************************************************************
|
2
|
+
|
3
|
+
$Id$
|
4
|
+
|
5
|
+
File: em.cpp
|
6
|
+
Date: 06Apr06
|
7
|
+
|
8
|
+
Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
|
9
|
+
Gmail: blackhedd
|
10
|
+
|
11
|
+
This program is free software; you can redistribute it and/or modify
|
12
|
+
it under the terms of either: 1) the GNU General Public License
|
13
|
+
as published by the Free Software Foundation; either version 2 of the
|
14
|
+
License, or (at your option) any later version; or 2) Ruby's License.
|
15
|
+
|
16
|
+
See the file COPYING for complete licensing information.
|
17
|
+
|
18
|
+
*****************************************************************************/
|
19
|
+
|
20
|
+
// THIS ENTIRE FILE WILL EVENTUALLY BE FOR UNIX BUILDS ONLY.
|
21
|
+
//#ifdef OS_UNIX
|
22
|
+
|
23
|
+
#include "project.h"
|
24
|
+
|
25
|
+
/* The numer of max outstanding timers was once a const enum defined in em.h.
|
26
|
+
* Now we define it here so that users can change its value if necessary.
|
27
|
+
*/
|
28
|
+
static unsigned int MaxOutstandingTimers = 100000;
|
29
|
+
|
30
|
+
|
31
|
+
/* Internal helper to convert strings to internet addresses. IPv6-aware.
|
32
|
+
* Not reentrant or threadsafe, optimized for speed.
|
33
|
+
*/
|
34
|
+
static struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size);
|
35
|
+
|
36
|
+
/***************************************
|
37
|
+
STATIC EventMachine_t::GetMaxTimerCount
|
38
|
+
***************************************/
|
39
|
+
|
40
|
+
int EventMachine_t::GetMaxTimerCount()
|
41
|
+
{
|
42
|
+
return MaxOutstandingTimers;
|
43
|
+
}
|
44
|
+
|
45
|
+
|
46
|
+
/***************************************
|
47
|
+
STATIC EventMachine_t::SetMaxTimerCount
|
48
|
+
***************************************/
|
49
|
+
|
50
|
+
void EventMachine_t::SetMaxTimerCount (int count)
|
51
|
+
{
|
52
|
+
/* Allow a user to increase the maximum number of outstanding timers.
|
53
|
+
* If this gets "too high" (a metric that is of course platform dependent),
|
54
|
+
* bad things will happen like performance problems and possible overuse
|
55
|
+
* of memory.
|
56
|
+
* The actual timer mechanism is very efficient so it's hard to know what
|
57
|
+
* the practical max, but 100,000 shouldn't be too problematical.
|
58
|
+
*/
|
59
|
+
if (count < 100)
|
60
|
+
count = 100;
|
61
|
+
MaxOutstandingTimers = count;
|
62
|
+
}
|
63
|
+
|
64
|
+
|
65
|
+
|
66
|
+
/******************************
|
67
|
+
EventMachine_t::EventMachine_t
|
68
|
+
******************************/
|
69
|
+
|
70
|
+
EventMachine_t::EventMachine_t (EMCallback event_callback):
|
71
|
+
HeartbeatInterval(2000000),
|
72
|
+
EventCallback (event_callback),
|
73
|
+
NextHeartbeatTime (0),
|
74
|
+
LoopBreakerReader (-1),
|
75
|
+
LoopBreakerWriter (-1),
|
76
|
+
bTerminateSignalReceived (false),
|
77
|
+
bEpoll (false),
|
78
|
+
epfd (-1),
|
79
|
+
bKqueue (false),
|
80
|
+
kqfd (-1),
|
81
|
+
inotify (NULL)
|
82
|
+
{
|
83
|
+
// Default time-slice is just smaller than one hundred mills.
|
84
|
+
Quantum.tv_sec = 0;
|
85
|
+
Quantum.tv_usec = 90000;
|
86
|
+
|
87
|
+
// Make sure the current loop time is sane, in case we do any initializations of
|
88
|
+
// objects before we start running.
|
89
|
+
_UpdateTime();
|
90
|
+
|
91
|
+
/* We initialize the network library here (only on Windows of course)
|
92
|
+
* and initialize "loop breakers." Our destructor also does some network-level
|
93
|
+
* cleanup. There's thus an implicit assumption that any given instance of EventMachine_t
|
94
|
+
* will only call ::Run once. Is that a good assumption? Should we move some of these
|
95
|
+
* inits and de-inits into ::Run?
|
96
|
+
*/
|
97
|
+
#ifdef OS_WIN32
|
98
|
+
WSADATA w;
|
99
|
+
WSAStartup (MAKEWORD (1, 1), &w);
|
100
|
+
#endif
|
101
|
+
|
102
|
+
_InitializeLoopBreaker();
|
103
|
+
}
|
104
|
+
|
105
|
+
|
106
|
+
/*******************************
|
107
|
+
EventMachine_t::~EventMachine_t
|
108
|
+
*******************************/
|
109
|
+
|
110
|
+
EventMachine_t::~EventMachine_t()
|
111
|
+
{
|
112
|
+
// Run down descriptors
|
113
|
+
size_t i;
|
114
|
+
for (i = 0; i < NewDescriptors.size(); i++)
|
115
|
+
delete NewDescriptors[i];
|
116
|
+
for (i = 0; i < Descriptors.size(); i++)
|
117
|
+
delete Descriptors[i];
|
118
|
+
|
119
|
+
close (LoopBreakerReader);
|
120
|
+
close (LoopBreakerWriter);
|
121
|
+
|
122
|
+
// Remove any file watch descriptors
|
123
|
+
while(!Files.empty()) {
|
124
|
+
map<int, Bindable_t*>::iterator f = Files.begin();
|
125
|
+
UnwatchFile (f->first);
|
126
|
+
}
|
127
|
+
|
128
|
+
if (epfd != -1)
|
129
|
+
close (epfd);
|
130
|
+
if (kqfd != -1)
|
131
|
+
close (kqfd);
|
132
|
+
}
|
133
|
+
|
134
|
+
|
135
|
+
/*************************
|
136
|
+
EventMachine_t::_UseEpoll
|
137
|
+
*************************/
|
138
|
+
|
139
|
+
void EventMachine_t::_UseEpoll()
|
140
|
+
{
|
141
|
+
/* Temporary.
|
142
|
+
* Use an internal flag to switch in epoll-based functionality until we determine
|
143
|
+
* how it should be integrated properly and the extent of the required changes.
|
144
|
+
* A permanent solution needs to allow the integration of additional technologies,
|
145
|
+
* like kqueue and Solaris's events.
|
146
|
+
*/
|
147
|
+
|
148
|
+
#ifdef HAVE_EPOLL
|
149
|
+
bEpoll = true;
|
150
|
+
#endif
|
151
|
+
}
|
152
|
+
|
153
|
+
/**************************
|
154
|
+
EventMachine_t::_UseKqueue
|
155
|
+
**************************/
|
156
|
+
|
157
|
+
void EventMachine_t::_UseKqueue()
|
158
|
+
{
|
159
|
+
/* Temporary.
|
160
|
+
* See comments under _UseEpoll.
|
161
|
+
*/
|
162
|
+
|
163
|
+
#ifdef HAVE_KQUEUE
|
164
|
+
bKqueue = true;
|
165
|
+
#endif
|
166
|
+
}
|
167
|
+
|
168
|
+
|
169
|
+
/****************************
|
170
|
+
EventMachine_t::ScheduleHalt
|
171
|
+
****************************/
|
172
|
+
|
173
|
+
void EventMachine_t::ScheduleHalt()
|
174
|
+
{
|
175
|
+
/* This is how we stop the machine.
|
176
|
+
* This can be called by clients. Signal handlers will probably
|
177
|
+
* set the global flag.
|
178
|
+
* For now this means there can only be one EventMachine ever running at a time.
|
179
|
+
*
|
180
|
+
* IMPORTANT: keep this light, fast, and async-safe. Don't do anything frisky in here,
|
181
|
+
* because it may be called from signal handlers invoked from code that we don't
|
182
|
+
* control. At this writing (20Sep06), EM does NOT install any signal handlers of
|
183
|
+
* its own.
|
184
|
+
*
|
185
|
+
* We need a FAQ. And one of the questions is: how do I stop EM when Ctrl-C happens?
|
186
|
+
* The answer is to call evma_stop_machine, which calls here, from a SIGINT handler.
|
187
|
+
*/
|
188
|
+
bTerminateSignalReceived = true;
|
189
|
+
}
|
190
|
+
|
191
|
+
|
192
|
+
|
193
|
+
/*******************************
|
194
|
+
EventMachine_t::SetTimerQuantum
|
195
|
+
*******************************/
|
196
|
+
|
197
|
+
void EventMachine_t::SetTimerQuantum (int interval)
|
198
|
+
{
|
199
|
+
/* We get a timer-quantum expressed in milliseconds.
|
200
|
+
* Don't set a quantum smaller than 5 or larger than 2500.
|
201
|
+
*/
|
202
|
+
|
203
|
+
if ((interval < 5) || (interval > 2500))
|
204
|
+
throw std::runtime_error ("invalid timer-quantum");
|
205
|
+
|
206
|
+
Quantum.tv_sec = interval / 1000;
|
207
|
+
Quantum.tv_usec = (interval % 1000) * 1000;
|
208
|
+
}
|
209
|
+
|
210
|
+
|
211
|
+
/*************************************
|
212
|
+
(STATIC) EventMachine_t::SetuidString
|
213
|
+
*************************************/
|
214
|
+
|
215
|
+
void EventMachine_t::SetuidString (const char *username)
|
216
|
+
{
|
217
|
+
/* This method takes a caller-supplied username and tries to setuid
|
218
|
+
* to that user. There is no meaningful implementation (and no error)
|
219
|
+
* on Windows. On Unix, a failure to setuid the caller-supplied string
|
220
|
+
* causes a fatal abort, because presumably the program is calling here
|
221
|
+
* in order to fulfill a security requirement. If we fail silently,
|
222
|
+
* the user may continue to run with too much privilege.
|
223
|
+
*
|
224
|
+
* TODO, we need to decide on and document a way of generating C++ level errors
|
225
|
+
* that can be wrapped in documented Ruby exceptions, so users can catch
|
226
|
+
* and handle them. And distinguish it from errors that we WON'T let the Ruby
|
227
|
+
* user catch (like security-violations and resource-overallocation).
|
228
|
+
* A setuid failure here would be in the latter category.
|
229
|
+
*/
|
230
|
+
|
231
|
+
#ifdef OS_UNIX
|
232
|
+
if (!username || !*username)
|
233
|
+
throw std::runtime_error ("setuid_string failed: no username specified");
|
234
|
+
|
235
|
+
struct passwd *p = getpwnam (username);
|
236
|
+
if (!p)
|
237
|
+
throw std::runtime_error ("setuid_string failed: unknown username");
|
238
|
+
|
239
|
+
if (setuid (p->pw_uid) != 0)
|
240
|
+
throw std::runtime_error ("setuid_string failed: no setuid");
|
241
|
+
|
242
|
+
// Success.
|
243
|
+
#endif
|
244
|
+
}
|
245
|
+
|
246
|
+
|
247
|
+
/****************************************
|
248
|
+
(STATIC) EventMachine_t::SetRlimitNofile
|
249
|
+
****************************************/
|
250
|
+
|
251
|
+
int EventMachine_t::SetRlimitNofile (int nofiles)
|
252
|
+
{
|
253
|
+
#ifdef OS_UNIX
|
254
|
+
struct rlimit rlim;
|
255
|
+
getrlimit (RLIMIT_NOFILE, &rlim);
|
256
|
+
if (nofiles >= 0) {
|
257
|
+
rlim.rlim_cur = nofiles;
|
258
|
+
if ((unsigned int)nofiles > rlim.rlim_max)
|
259
|
+
rlim.rlim_max = nofiles;
|
260
|
+
setrlimit (RLIMIT_NOFILE, &rlim);
|
261
|
+
// ignore the error return, for now at least.
|
262
|
+
// TODO, emit an error message someday when we have proper debug levels.
|
263
|
+
}
|
264
|
+
getrlimit (RLIMIT_NOFILE, &rlim);
|
265
|
+
return rlim.rlim_cur;
|
266
|
+
#endif
|
267
|
+
|
268
|
+
#ifdef OS_WIN32
|
269
|
+
// No meaningful implementation on Windows.
|
270
|
+
return 0;
|
271
|
+
#endif
|
272
|
+
}
|
273
|
+
|
274
|
+
|
275
|
+
/*********************************
|
276
|
+
EventMachine_t::SignalLoopBreaker
|
277
|
+
*********************************/
|
278
|
+
|
279
|
+
void EventMachine_t::SignalLoopBreaker()
|
280
|
+
{
|
281
|
+
#ifdef OS_UNIX
|
282
|
+
write (LoopBreakerWriter, "", 1);
|
283
|
+
#endif
|
284
|
+
#ifdef OS_WIN32
|
285
|
+
sendto (LoopBreakerReader, "", 0, 0, (struct sockaddr*)&(LoopBreakerTarget), sizeof(LoopBreakerTarget));
|
286
|
+
#endif
|
287
|
+
}
|
288
|
+
|
289
|
+
|
290
|
+
/**************************************
|
291
|
+
EventMachine_t::_InitializeLoopBreaker
|
292
|
+
**************************************/
|
293
|
+
|
294
|
+
void EventMachine_t::_InitializeLoopBreaker()
|
295
|
+
{
|
296
|
+
/* A "loop-breaker" is a socket-descriptor that we can write to in order
|
297
|
+
* to break the main select loop. Primarily useful for things running on
|
298
|
+
* threads other than the main EM thread, so they can trigger processing
|
299
|
+
* of events that arise exogenously to the EM.
|
300
|
+
* Keep the loop-breaker pipe out of the main descriptor set, otherwise
|
301
|
+
* its events will get passed on to user code.
|
302
|
+
*/
|
303
|
+
|
304
|
+
#ifdef OS_UNIX
|
305
|
+
int fd[2];
|
306
|
+
if (pipe (fd))
|
307
|
+
throw std::runtime_error (strerror(errno));
|
308
|
+
|
309
|
+
LoopBreakerWriter = fd[1];
|
310
|
+
LoopBreakerReader = fd[0];
|
311
|
+
#endif
|
312
|
+
|
313
|
+
#ifdef OS_WIN32
|
314
|
+
int sd = socket (AF_INET, SOCK_DGRAM, 0);
|
315
|
+
if (sd == INVALID_SOCKET)
|
316
|
+
throw std::runtime_error ("no loop breaker socket");
|
317
|
+
SetSocketNonblocking (sd);
|
318
|
+
|
319
|
+
memset (&LoopBreakerTarget, 0, sizeof(LoopBreakerTarget));
|
320
|
+
LoopBreakerTarget.sin_family = AF_INET;
|
321
|
+
LoopBreakerTarget.sin_addr.s_addr = inet_addr ("127.0.0.1");
|
322
|
+
|
323
|
+
srand ((int)time(NULL));
|
324
|
+
int i;
|
325
|
+
for (i=0; i < 100; i++) {
|
326
|
+
int r = (rand() % 10000) + 20000;
|
327
|
+
LoopBreakerTarget.sin_port = htons (r);
|
328
|
+
if (bind (sd, (struct sockaddr*)&LoopBreakerTarget, sizeof(LoopBreakerTarget)) == 0)
|
329
|
+
break;
|
330
|
+
}
|
331
|
+
|
332
|
+
if (i == 100)
|
333
|
+
throw std::runtime_error ("no loop breaker");
|
334
|
+
LoopBreakerReader = sd;
|
335
|
+
#endif
|
336
|
+
}
|
337
|
+
|
338
|
+
/***************************
|
339
|
+
EventMachine_t::_UpdateTime
|
340
|
+
***************************/
|
341
|
+
|
342
|
+
void EventMachine_t::_UpdateTime()
|
343
|
+
{
|
344
|
+
MyCurrentLoopTime = GetRealTime();
|
345
|
+
}
|
346
|
+
|
347
|
+
/***************************
|
348
|
+
EventMachine_t::GetRealTime
|
349
|
+
***************************/
|
350
|
+
|
351
|
+
uint64_t EventMachine_t::GetRealTime()
|
352
|
+
{
|
353
|
+
uint64_t current_time;
|
354
|
+
#if defined(OS_UNIX)
|
355
|
+
struct timeval tv;
|
356
|
+
gettimeofday (&tv, NULL);
|
357
|
+
current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
|
358
|
+
|
359
|
+
#elif defined(OS_WIN32)
|
360
|
+
unsigned tick = GetTickCount();
|
361
|
+
if (tick < LastTickCount)
|
362
|
+
TickCountTickover += 1;
|
363
|
+
LastTickCount = tick;
|
364
|
+
current_time = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
|
365
|
+
|
366
|
+
#else
|
367
|
+
current_time = (uint64_t)time(NULL) * 1000000LL;
|
368
|
+
#endif
|
369
|
+
return current_time;
|
370
|
+
}
|
371
|
+
|
372
|
+
/***********************************
|
373
|
+
EventMachine_t::_DispatchHeartbeats
|
374
|
+
***********************************/
|
375
|
+
|
376
|
+
void EventMachine_t::_DispatchHeartbeats()
|
377
|
+
{
|
378
|
+
while (true) {
|
379
|
+
multimap<uint64_t,EventableDescriptor*>::iterator i = Heartbeats.begin();
|
380
|
+
if (i == Heartbeats.end())
|
381
|
+
break;
|
382
|
+
if (i->first > MyCurrentLoopTime)
|
383
|
+
break;
|
384
|
+
EventableDescriptor *ed = i->second;
|
385
|
+
ed->Heartbeat();
|
386
|
+
QueueHeartbeat(ed);
|
387
|
+
}
|
388
|
+
}
|
389
|
+
|
390
|
+
/******************************
|
391
|
+
EventMachine_t::QueueHeartbeat
|
392
|
+
******************************/
|
393
|
+
|
394
|
+
void EventMachine_t::QueueHeartbeat(EventableDescriptor *ed)
|
395
|
+
{
|
396
|
+
uint64_t heartbeat = ed->GetNextHeartbeat();
|
397
|
+
|
398
|
+
if (heartbeat) {
|
399
|
+
#ifndef HAVE_MAKE_PAIR
|
400
|
+
Heartbeats.insert (multimap<uint64_t,EventableDescriptor*>::value_type (heartbeat, ed));
|
401
|
+
#else
|
402
|
+
Heartbeats.insert (make_pair (heartbeat, ed));
|
403
|
+
#endif
|
404
|
+
}
|
405
|
+
}
|
406
|
+
|
407
|
+
/******************************
|
408
|
+
EventMachine_t::ClearHeartbeat
|
409
|
+
******************************/
|
410
|
+
|
411
|
+
void EventMachine_t::ClearHeartbeat(uint64_t key)
|
412
|
+
{
|
413
|
+
Heartbeats.erase(key);
|
414
|
+
}
|
415
|
+
|
416
|
+
/*******************
|
417
|
+
EventMachine_t::Run
|
418
|
+
*******************/
|
419
|
+
|
420
|
+
void EventMachine_t::Run()
|
421
|
+
{
|
422
|
+
#ifdef HAVE_EPOLL
|
423
|
+
if (bEpoll) {
|
424
|
+
epfd = epoll_create (MaxEpollDescriptors);
|
425
|
+
if (epfd == -1) {
|
426
|
+
char buf[200];
|
427
|
+
snprintf (buf, sizeof(buf)-1, "unable to create epoll descriptor: %s", strerror(errno));
|
428
|
+
throw std::runtime_error (buf);
|
429
|
+
}
|
430
|
+
int cloexec = fcntl (epfd, F_GETFD, 0);
|
431
|
+
assert (cloexec >= 0);
|
432
|
+
cloexec |= FD_CLOEXEC;
|
433
|
+
fcntl (epfd, F_SETFD, cloexec);
|
434
|
+
|
435
|
+
assert (LoopBreakerReader >= 0);
|
436
|
+
LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
|
437
|
+
assert (ld);
|
438
|
+
Add (ld);
|
439
|
+
}
|
440
|
+
#endif
|
441
|
+
|
442
|
+
#ifdef HAVE_KQUEUE
|
443
|
+
if (bKqueue) {
|
444
|
+
kqfd = kqueue();
|
445
|
+
if (kqfd == -1) {
|
446
|
+
char buf[200];
|
447
|
+
snprintf (buf, sizeof(buf)-1, "unable to create kqueue descriptor: %s", strerror(errno));
|
448
|
+
throw std::runtime_error (buf);
|
449
|
+
}
|
450
|
+
// cloexec not needed. By definition, kqueues are not carried across forks.
|
451
|
+
|
452
|
+
assert (LoopBreakerReader >= 0);
|
453
|
+
LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
|
454
|
+
assert (ld);
|
455
|
+
Add (ld);
|
456
|
+
}
|
457
|
+
#endif
|
458
|
+
|
459
|
+
while (true) {
|
460
|
+
_UpdateTime();
|
461
|
+
if (!_RunTimers())
|
462
|
+
break;
|
463
|
+
|
464
|
+
/* _Add must precede _Modify because the same descriptor might
|
465
|
+
* be on both lists during the same pass through the machine,
|
466
|
+
* and to modify a descriptor before adding it would fail.
|
467
|
+
*/
|
468
|
+
_AddNewDescriptors();
|
469
|
+
_ModifyDescriptors();
|
470
|
+
|
471
|
+
if (!_RunOnce())
|
472
|
+
break;
|
473
|
+
if (bTerminateSignalReceived)
|
474
|
+
break;
|
475
|
+
}
|
476
|
+
}
|
477
|
+
|
478
|
+
|
479
|
+
/************************
|
480
|
+
EventMachine_t::_RunOnce
|
481
|
+
************************/
|
482
|
+
|
483
|
+
bool EventMachine_t::_RunOnce()
|
484
|
+
{
|
485
|
+
bool ret;
|
486
|
+
if (bEpoll)
|
487
|
+
ret = _RunEpollOnce();
|
488
|
+
else if (bKqueue)
|
489
|
+
ret = _RunKqueueOnce();
|
490
|
+
else
|
491
|
+
ret = _RunSelectOnce();
|
492
|
+
_DispatchHeartbeats();
|
493
|
+
_CleanupSockets();
|
494
|
+
return ret;
|
495
|
+
}
|
496
|
+
|
497
|
+
|
498
|
+
|
499
|
+
/*****************************
|
500
|
+
EventMachine_t::_RunEpollOnce
|
501
|
+
*****************************/
|
502
|
+
|
503
|
+
bool EventMachine_t::_RunEpollOnce()
|
504
|
+
{
|
505
|
+
#ifdef HAVE_EPOLL
|
506
|
+
assert (epfd != -1);
|
507
|
+
int s;
|
508
|
+
|
509
|
+
timeval tv = _TimeTilNextEvent();
|
510
|
+
|
511
|
+
#ifdef BUILD_FOR_RUBY
|
512
|
+
int ret = 0;
|
513
|
+
fd_set fdreads;
|
514
|
+
|
515
|
+
FD_ZERO(&fdreads);
|
516
|
+
FD_SET(epfd, &fdreads);
|
517
|
+
|
518
|
+
if ((ret = rb_thread_select(epfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
|
519
|
+
if (ret == -1) {
|
520
|
+
assert(errno != EINVAL);
|
521
|
+
assert(errno != EBADF);
|
522
|
+
}
|
523
|
+
return true;
|
524
|
+
}
|
525
|
+
|
526
|
+
TRAP_BEG;
|
527
|
+
s = epoll_wait (epfd, epoll_events, MaxEvents, 0);
|
528
|
+
TRAP_END;
|
529
|
+
#else
|
530
|
+
int duration = 0;
|
531
|
+
duration = duration + (tv.tv_sec * 1000);
|
532
|
+
duration = duration + (tv.tv_usec / 1000);
|
533
|
+
s = epoll_wait (epfd, epoll_events, MaxEvents, duration);
|
534
|
+
#endif
|
535
|
+
|
536
|
+
if (s > 0) {
|
537
|
+
for (int i=0; i < s; i++) {
|
538
|
+
EventableDescriptor *ed = (EventableDescriptor*) epoll_events[i].data.ptr;
|
539
|
+
|
540
|
+
if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
|
541
|
+
continue;
|
542
|
+
|
543
|
+
assert(ed->GetSocket() != INVALID_SOCKET);
|
544
|
+
|
545
|
+
if (epoll_events[i].events & EPOLLIN)
|
546
|
+
ed->Read();
|
547
|
+
if (epoll_events[i].events & EPOLLOUT)
|
548
|
+
ed->Write();
|
549
|
+
if (epoll_events[i].events & (EPOLLERR | EPOLLHUP))
|
550
|
+
ed->HandleError();
|
551
|
+
}
|
552
|
+
}
|
553
|
+
else if (s < 0) {
|
554
|
+
// epoll_wait can fail on error in a handful of ways.
|
555
|
+
// If this happens, then wait for a little while to avoid busy-looping.
|
556
|
+
// If the error was EINTR, we probably caught SIGCHLD or something,
|
557
|
+
// so keep the wait short.
|
558
|
+
timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
|
559
|
+
EmSelect (0, NULL, NULL, NULL, &tv);
|
560
|
+
}
|
561
|
+
|
562
|
+
return true;
|
563
|
+
#else
|
564
|
+
throw std::runtime_error ("epoll is not implemented on this platform");
|
565
|
+
#endif
|
566
|
+
}
|
567
|
+
|
568
|
+
|
569
|
+
/******************************
|
570
|
+
EventMachine_t::_RunKqueueOnce
|
571
|
+
******************************/
|
572
|
+
|
573
|
+
bool EventMachine_t::_RunKqueueOnce()
|
574
|
+
{
|
575
|
+
#ifdef HAVE_KQUEUE
|
576
|
+
assert (kqfd != -1);
|
577
|
+
int k;
|
578
|
+
|
579
|
+
timeval tv = _TimeTilNextEvent();
|
580
|
+
|
581
|
+
#ifdef BUILD_FOR_RUBY
|
582
|
+
int ret = 0;
|
583
|
+
fd_set fdreads;
|
584
|
+
|
585
|
+
FD_ZERO(&fdreads);
|
586
|
+
FD_SET(kqfd, &fdreads);
|
587
|
+
|
588
|
+
if ((ret = rb_thread_select(kqfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
|
589
|
+
if (ret == -1) {
|
590
|
+
assert(errno != EINVAL);
|
591
|
+
assert(errno != EBADF);
|
592
|
+
}
|
593
|
+
return true;
|
594
|
+
}
|
595
|
+
|
596
|
+
TRAP_BEG;
|
597
|
+
k = kevent (kqfd, NULL, 0, Karray, MaxEvents, NULL);
|
598
|
+
TRAP_END;
|
599
|
+
#else
|
600
|
+
struct timespec ts;
|
601
|
+
ts.tv_sec = tv.tv_sec;
|
602
|
+
ts.tv_nsec = tv.tv_usec * 1000;
|
603
|
+
k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
|
604
|
+
#endif
|
605
|
+
|
606
|
+
struct kevent *ke = Karray;
|
607
|
+
while (k > 0) {
|
608
|
+
switch (ke->filter)
|
609
|
+
{
|
610
|
+
case EVFILT_VNODE:
|
611
|
+
_HandleKqueueFileEvent (ke);
|
612
|
+
break;
|
613
|
+
|
614
|
+
case EVFILT_PROC:
|
615
|
+
_HandleKqueuePidEvent (ke);
|
616
|
+
break;
|
617
|
+
|
618
|
+
case EVFILT_READ:
|
619
|
+
case EVFILT_WRITE:
|
620
|
+
EventableDescriptor *ed = (EventableDescriptor*) (ke->udata);
|
621
|
+
assert (ed);
|
622
|
+
|
623
|
+
if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
|
624
|
+
break;
|
625
|
+
|
626
|
+
if (ke->filter == EVFILT_READ)
|
627
|
+
ed->Read();
|
628
|
+
else if (ke->filter == EVFILT_WRITE)
|
629
|
+
ed->Write();
|
630
|
+
else
|
631
|
+
cerr << "Discarding unknown kqueue event " << ke->filter << endl;
|
632
|
+
|
633
|
+
break;
|
634
|
+
}
|
635
|
+
|
636
|
+
--k;
|
637
|
+
++ke;
|
638
|
+
}
|
639
|
+
|
640
|
+
// TODO, replace this with rb_thread_blocking_region for 1.9 builds.
|
641
|
+
#ifdef BUILD_FOR_RUBY
|
642
|
+
if (!rb_thread_alone()) {
|
643
|
+
rb_thread_schedule();
|
644
|
+
}
|
645
|
+
#endif
|
646
|
+
|
647
|
+
return true;
|
648
|
+
#else
|
649
|
+
throw std::runtime_error ("kqueue is not implemented on this platform");
|
650
|
+
#endif
|
651
|
+
}
|
652
|
+
|
653
|
+
|
654
|
+
/*********************************
|
655
|
+
EventMachine_t::_TimeTilNextEvent
|
656
|
+
*********************************/
|
657
|
+
|
658
|
+
timeval EventMachine_t::_TimeTilNextEvent()
|
659
|
+
{
|
660
|
+
uint64_t next_event = 0;
|
661
|
+
|
662
|
+
if (!Heartbeats.empty()) {
|
663
|
+
multimap<uint64_t,EventableDescriptor*>::iterator heartbeats = Heartbeats.begin();
|
664
|
+
next_event = heartbeats->first;
|
665
|
+
}
|
666
|
+
|
667
|
+
if (!Timers.empty()) {
|
668
|
+
multimap<uint64_t,Timer_t>::iterator timers = Timers.begin();
|
669
|
+
if (next_event == 0 || timers->first < next_event)
|
670
|
+
next_event = timers->first;
|
671
|
+
}
|
672
|
+
|
673
|
+
if (!NewDescriptors.empty() || !ModifiedDescriptors.empty()) {
|
674
|
+
next_event = MyCurrentLoopTime;
|
675
|
+
}
|
676
|
+
|
677
|
+
timeval tv;
|
678
|
+
|
679
|
+
if (next_event == 0) {
|
680
|
+
tv = Quantum;
|
681
|
+
} else {
|
682
|
+
if (next_event > MyCurrentLoopTime) {
|
683
|
+
uint64_t duration = next_event - MyCurrentLoopTime;
|
684
|
+
tv.tv_sec = duration / 1000000;
|
685
|
+
tv.tv_usec = duration % 1000000;
|
686
|
+
} else {
|
687
|
+
tv.tv_sec = tv.tv_usec = 0;
|
688
|
+
}
|
689
|
+
}
|
690
|
+
|
691
|
+
return tv;
|
692
|
+
}
|
693
|
+
|
694
|
+
/*******************************
|
695
|
+
EventMachine_t::_CleanupSockets
|
696
|
+
*******************************/
|
697
|
+
|
698
|
+
void EventMachine_t::_CleanupSockets()
|
699
|
+
{
|
700
|
+
// TODO, rip this out and only delete the descriptors we know have died,
|
701
|
+
// rather than traversing the whole list.
|
702
|
+
// Modified 05Jan08 per suggestions by Chris Heath. It's possible that
|
703
|
+
// an EventableDescriptor will have a descriptor value of -1. That will
|
704
|
+
// happen if EventableDescriptor::Close was called on it. In that case,
|
705
|
+
// don't call epoll_ctl to remove the socket's filters from the epoll set.
|
706
|
+
// According to the epoll docs, this happens automatically when the
|
707
|
+
// descriptor is closed anyway. This is different from the case where
|
708
|
+
// the socket has already been closed but the descriptor in the ED object
|
709
|
+
// hasn't yet been set to INVALID_SOCKET.
|
710
|
+
// In kqueue, closing a descriptor automatically removes its event filters.
|
711
|
+
int i, j;
|
712
|
+
int nSockets = Descriptors.size();
|
713
|
+
for (i=0, j=0; i < nSockets; i++) {
|
714
|
+
EventableDescriptor *ed = Descriptors[i];
|
715
|
+
assert (ed);
|
716
|
+
if (ed->ShouldDelete()) {
|
717
|
+
#ifdef HAVE_EPOLL
|
718
|
+
if (bEpoll) {
|
719
|
+
assert (epfd != -1);
|
720
|
+
if (ed->GetSocket() != INVALID_SOCKET) {
|
721
|
+
int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
|
722
|
+
// ENOENT or EBADF are not errors because the socket may be already closed when we get here.
|
723
|
+
if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
|
724
|
+
char buf [200];
|
725
|
+
snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
|
726
|
+
throw std::runtime_error (buf);
|
727
|
+
}
|
728
|
+
}
|
729
|
+
ModifiedDescriptors.erase(ed);
|
730
|
+
}
|
731
|
+
#endif
|
732
|
+
delete ed;
|
733
|
+
}
|
734
|
+
else
|
735
|
+
Descriptors [j++] = ed;
|
736
|
+
}
|
737
|
+
while ((size_t)j < Descriptors.size())
|
738
|
+
Descriptors.pop_back();
|
739
|
+
}
|
740
|
+
|
741
|
+
/*********************************
|
742
|
+
EventMachine_t::_ModifyEpollEvent
|
743
|
+
*********************************/
|
744
|
+
|
745
|
+
void EventMachine_t::_ModifyEpollEvent (EventableDescriptor *ed)
|
746
|
+
{
|
747
|
+
#ifdef HAVE_EPOLL
|
748
|
+
if (bEpoll) {
|
749
|
+
assert (epfd != -1);
|
750
|
+
assert (ed);
|
751
|
+
assert (ed->GetSocket() != INVALID_SOCKET);
|
752
|
+
int e = epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
|
753
|
+
if (e) {
|
754
|
+
char buf [200];
|
755
|
+
snprintf (buf, sizeof(buf)-1, "unable to modify epoll event: %s", strerror(errno));
|
756
|
+
throw std::runtime_error (buf);
|
757
|
+
}
|
758
|
+
}
|
759
|
+
#endif
|
760
|
+
}
|
761
|
+
|
762
|
+
|
763
|
+
|
764
|
+
/**************************
|
765
|
+
SelectData_t::SelectData_t
|
766
|
+
**************************/
|
767
|
+
|
768
|
+
SelectData_t::SelectData_t()
|
769
|
+
{
|
770
|
+
maxsocket = 0;
|
771
|
+
FD_ZERO (&fdreads);
|
772
|
+
FD_ZERO (&fdwrites);
|
773
|
+
FD_ZERO (&fderrors);
|
774
|
+
}
|
775
|
+
|
776
|
+
|
777
|
+
#ifdef BUILD_FOR_RUBY
|
778
|
+
/*****************
|
779
|
+
_SelectDataSelect
|
780
|
+
*****************/
|
781
|
+
|
782
|
+
#ifdef HAVE_TBR
|
783
|
+
static VALUE _SelectDataSelect (void *v)
|
784
|
+
{
|
785
|
+
SelectData_t *sd = (SelectData_t*)v;
|
786
|
+
sd->nSockets = select (sd->maxsocket+1, &(sd->fdreads), &(sd->fdwrites), &(sd->fderrors), &(sd->tv));
|
787
|
+
return Qnil;
|
788
|
+
}
|
789
|
+
#endif
|
790
|
+
|
791
|
+
/*********************
|
792
|
+
SelectData_t::_Select
|
793
|
+
*********************/
|
794
|
+
|
795
|
+
int SelectData_t::_Select()
|
796
|
+
{
|
797
|
+
#ifdef HAVE_TBR
|
798
|
+
rb_thread_blocking_region (_SelectDataSelect, (void*)this, RUBY_UBF_IO, 0);
|
799
|
+
return nSockets;
|
800
|
+
#endif
|
801
|
+
|
802
|
+
#ifndef HAVE_TBR
|
803
|
+
return EmSelect (maxsocket+1, &fdreads, &fdwrites, &fderrors, &tv);
|
804
|
+
#endif
|
805
|
+
}
|
806
|
+
#endif
|
807
|
+
|
808
|
+
|
809
|
+
|
810
|
+
/******************************
|
811
|
+
EventMachine_t::_RunSelectOnce
|
812
|
+
******************************/
|
813
|
+
|
814
|
+
bool EventMachine_t::_RunSelectOnce()
|
815
|
+
{
|
816
|
+
// Crank the event machine once.
|
817
|
+
// If there are no descriptors to process, then sleep
|
818
|
+
// for a few hundred mills to avoid busy-looping.
|
819
|
+
// Return T/F to indicate whether we should continue.
|
820
|
+
// This is based on a select loop. Alternately provide epoll
|
821
|
+
// if we know we're running on a 2.6 kernel.
|
822
|
+
// epoll will be effective if we provide it as an alternative,
|
823
|
+
// however it has the same problem interoperating with Ruby
|
824
|
+
// threads that select does.
|
825
|
+
|
826
|
+
//cerr << "X";
|
827
|
+
|
828
|
+
/* This protection is now obsolete, because we will ALWAYS
|
829
|
+
* have at least one descriptor (the loop-breaker) to read.
|
830
|
+
*/
|
831
|
+
/*
|
832
|
+
if (Descriptors.size() == 0) {
|
833
|
+
#ifdef OS_UNIX
|
834
|
+
timeval tv = {0, 200 * 1000};
|
835
|
+
EmSelect (0, NULL, NULL, NULL, &tv);
|
836
|
+
return true;
|
837
|
+
#endif
|
838
|
+
#ifdef OS_WIN32
|
839
|
+
Sleep (200);
|
840
|
+
return true;
|
841
|
+
#endif
|
842
|
+
}
|
843
|
+
*/
|
844
|
+
|
845
|
+
SelectData_t SelectData;
|
846
|
+
/*
|
847
|
+
fd_set fdreads, fdwrites;
|
848
|
+
FD_ZERO (&fdreads);
|
849
|
+
FD_ZERO (&fdwrites);
|
850
|
+
|
851
|
+
int maxsocket = 0;
|
852
|
+
*/
|
853
|
+
|
854
|
+
// Always read the loop-breaker reader.
|
855
|
+
// Changed 23Aug06, provisionally implemented for Windows with a UDP socket
|
856
|
+
// running on localhost with a randomly-chosen port. (*Puke*)
|
857
|
+
// Windows has a version of the Unix pipe() library function, but it doesn't
|
858
|
+
// give you back descriptors that are selectable.
|
859
|
+
FD_SET (LoopBreakerReader, &(SelectData.fdreads));
|
860
|
+
if (SelectData.maxsocket < LoopBreakerReader)
|
861
|
+
SelectData.maxsocket = LoopBreakerReader;
|
862
|
+
|
863
|
+
// prepare the sockets for reading and writing
|
864
|
+
size_t i;
|
865
|
+
for (i = 0; i < Descriptors.size(); i++) {
|
866
|
+
EventableDescriptor *ed = Descriptors[i];
|
867
|
+
assert (ed);
|
868
|
+
int sd = ed->GetSocket();
|
869
|
+
if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
|
870
|
+
continue;
|
871
|
+
assert (sd != INVALID_SOCKET);
|
872
|
+
|
873
|
+
if (ed->SelectForRead())
|
874
|
+
FD_SET (sd, &(SelectData.fdreads));
|
875
|
+
if (ed->SelectForWrite())
|
876
|
+
FD_SET (sd, &(SelectData.fdwrites));
|
877
|
+
|
878
|
+
#ifdef OS_WIN32
|
879
|
+
/* 21Sep09: on windows, a non-blocking connect() that fails does not come up as writable.
|
880
|
+
Instead, it is added to the error set. See http://www.mail-archive.com/openssl-users@openssl.org/msg58500.html
|
881
|
+
*/
|
882
|
+
FD_SET (sd, &(SelectData.fderrors));
|
883
|
+
#endif
|
884
|
+
|
885
|
+
if (SelectData.maxsocket < sd)
|
886
|
+
SelectData.maxsocket = sd;
|
887
|
+
}
|
888
|
+
|
889
|
+
|
890
|
+
{ // read and write the sockets
|
891
|
+
//timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000.
|
892
|
+
//timeval tv = Quantum;
|
893
|
+
SelectData.tv = _TimeTilNextEvent();
|
894
|
+
int s = SelectData._Select();
|
895
|
+
//rb_thread_blocking_region(xxx,(void*)&SelectData,RUBY_UBF_IO,0);
|
896
|
+
//int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv));
|
897
|
+
//int s = SelectData.nSockets;
|
898
|
+
if (s > 0) {
|
899
|
+
/* Changed 01Jun07. We used to handle the Loop-breaker right here.
|
900
|
+
* Now we do it AFTER all the regular descriptors. There's an
|
901
|
+
* incredibly important and subtle reason for this. Code on
|
902
|
+
* loop breakers is sometimes used to cause the reactor core to
|
903
|
+
* cycle (for example, to allow outbound network buffers to drain).
|
904
|
+
* If a loop-breaker handler reschedules itself (say, after determining
|
905
|
+
* that the write buffers are still too full), then it will execute
|
906
|
+
* IMMEDIATELY if _ReadLoopBreaker is done here instead of after
|
907
|
+
* the other descriptors are processed. That defeats the whole purpose.
|
908
|
+
*/
|
909
|
+
for (i=0; i < Descriptors.size(); i++) {
|
910
|
+
EventableDescriptor *ed = Descriptors[i];
|
911
|
+
assert (ed);
|
912
|
+
int sd = ed->GetSocket();
|
913
|
+
if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
|
914
|
+
continue;
|
915
|
+
assert (sd != INVALID_SOCKET);
|
916
|
+
|
917
|
+
if (FD_ISSET (sd, &(SelectData.fdwrites)))
|
918
|
+
ed->Write();
|
919
|
+
if (FD_ISSET (sd, &(SelectData.fdreads)))
|
920
|
+
ed->Read();
|
921
|
+
if (FD_ISSET (sd, &(SelectData.fderrors)))
|
922
|
+
ed->HandleError();
|
923
|
+
}
|
924
|
+
|
925
|
+
if (FD_ISSET (LoopBreakerReader, &(SelectData.fdreads)))
|
926
|
+
_ReadLoopBreaker();
|
927
|
+
}
|
928
|
+
else if (s < 0) {
|
929
|
+
switch (errno) {
|
930
|
+
case EBADF:
|
931
|
+
_CleanBadDescriptors();
|
932
|
+
break;
|
933
|
+
case EINVAL:
|
934
|
+
throw std::runtime_error ("Somehow EM passed an invalid nfds or invalid timeout to select(2), please report this!");
|
935
|
+
break;
|
936
|
+
default:
|
937
|
+
// select can fail on error in a handful of ways.
|
938
|
+
// If this happens, then wait for a little while to avoid busy-looping.
|
939
|
+
// If the error was EINTR, we probably caught SIGCHLD or something,
|
940
|
+
// so keep the wait short.
|
941
|
+
timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
|
942
|
+
EmSelect (0, NULL, NULL, NULL, &tv);
|
943
|
+
}
|
944
|
+
}
|
945
|
+
}
|
946
|
+
|
947
|
+
return true;
|
948
|
+
}
|
949
|
+
|
950
|
+
void EventMachine_t::_CleanBadDescriptors()
|
951
|
+
{
|
952
|
+
size_t i;
|
953
|
+
|
954
|
+
for (i = 0; i < Descriptors.size(); i++) {
|
955
|
+
EventableDescriptor *ed = Descriptors[i];
|
956
|
+
if (ed->ShouldDelete())
|
957
|
+
continue;
|
958
|
+
|
959
|
+
int sd = ed->GetSocket();
|
960
|
+
|
961
|
+
struct timeval tv;
|
962
|
+
tv.tv_sec = 0;
|
963
|
+
tv.tv_usec = 0;
|
964
|
+
|
965
|
+
fd_set fds;
|
966
|
+
FD_ZERO(&fds);
|
967
|
+
FD_SET(sd, &fds);
|
968
|
+
|
969
|
+
int ret = select(sd + 1, &fds, NULL, NULL, &tv);
|
970
|
+
|
971
|
+
if (ret == -1) {
|
972
|
+
if (errno == EBADF)
|
973
|
+
ed->ScheduleClose(false);
|
974
|
+
}
|
975
|
+
}
|
976
|
+
}
|
977
|
+
|
978
|
+
/********************************
|
979
|
+
EventMachine_t::_ReadLoopBreaker
|
980
|
+
********************************/
|
981
|
+
|
982
|
+
void EventMachine_t::_ReadLoopBreaker()
|
983
|
+
{
|
984
|
+
/* The loop breaker has selected readable.
|
985
|
+
* Read it ONCE (it may block if we try to read it twice)
|
986
|
+
* and send a loop-break event back to user code.
|
987
|
+
*/
|
988
|
+
char buffer [1024];
|
989
|
+
read (LoopBreakerReader, buffer, sizeof(buffer));
|
990
|
+
if (EventCallback)
|
991
|
+
(*EventCallback)(0, EM_LOOPBREAK_SIGNAL, "", 0);
|
992
|
+
}
|
993
|
+
|
994
|
+
|
995
|
+
/**************************
|
996
|
+
EventMachine_t::_RunTimers
|
997
|
+
**************************/
|
998
|
+
|
999
|
+
bool EventMachine_t::_RunTimers()
|
1000
|
+
{
|
1001
|
+
// These are caller-defined timer handlers.
|
1002
|
+
// Return T/F to indicate whether we should continue the main loop.
|
1003
|
+
// We rely on the fact that multimaps sort by their keys to avoid
|
1004
|
+
// inspecting the whole list every time we come here.
|
1005
|
+
// Just keep inspecting and processing the list head until we hit
|
1006
|
+
// one that hasn't expired yet.
|
1007
|
+
|
1008
|
+
while (true) {
|
1009
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.begin();
|
1010
|
+
if (i == Timers.end())
|
1011
|
+
break;
|
1012
|
+
if (i->first > MyCurrentLoopTime)
|
1013
|
+
break;
|
1014
|
+
if (EventCallback)
|
1015
|
+
(*EventCallback) (0, EM_TIMER_FIRED, NULL, i->second.GetBinding());
|
1016
|
+
Timers.erase (i);
|
1017
|
+
}
|
1018
|
+
return true;
|
1019
|
+
}
|
1020
|
+
|
1021
|
+
|
1022
|
+
|
1023
|
+
/***********************************
|
1024
|
+
EventMachine_t::InstallOneshotTimer
|
1025
|
+
***********************************/
|
1026
|
+
|
1027
|
+
const unsigned long EventMachine_t::InstallOneshotTimer (int milliseconds)
|
1028
|
+
{
|
1029
|
+
if (Timers.size() > MaxOutstandingTimers)
|
1030
|
+
return false;
|
1031
|
+
// Don't use the global loop-time variable here, because we might
|
1032
|
+
// get called before the main event machine is running.
|
1033
|
+
|
1034
|
+
// XXX This should be replaced with a call to _GetRealTime(), but I don't
|
1035
|
+
// understand if this is a bug or not? For OS_UNIX we multiply the argument
|
1036
|
+
// milliseconds by 1000, but for OS_WIN32 we do not? This needs to be sorted out.
|
1037
|
+
#ifdef OS_UNIX
|
1038
|
+
struct timeval tv;
|
1039
|
+
gettimeofday (&tv, NULL);
|
1040
|
+
uint64_t fire_at = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
|
1041
|
+
fire_at += ((uint64_t)milliseconds) * 1000LL;
|
1042
|
+
#endif
|
1043
|
+
|
1044
|
+
#ifdef OS_WIN32
|
1045
|
+
unsigned tick = GetTickCount();
|
1046
|
+
if (tick < LastTickCount)
|
1047
|
+
TickCountTickover += 1;
|
1048
|
+
LastTickCount = tick;
|
1049
|
+
|
1050
|
+
uint64_t fire_at = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
|
1051
|
+
fire_at += (uint64_t)milliseconds;
|
1052
|
+
#endif
|
1053
|
+
|
1054
|
+
Timer_t t;
|
1055
|
+
#ifndef HAVE_MAKE_PAIR
|
1056
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.insert (multimap<uint64_t,Timer_t>::value_type (fire_at, t));
|
1057
|
+
#else
|
1058
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.insert (make_pair (fire_at, t));
|
1059
|
+
#endif
|
1060
|
+
return i->second.GetBinding();
|
1061
|
+
}
|
1062
|
+
|
1063
|
+
|
1064
|
+
/*******************************
|
1065
|
+
EventMachine_t::ConnectToServer
|
1066
|
+
*******************************/
|
1067
|
+
|
1068
|
+
const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int bind_port, const char *server, int port)
|
1069
|
+
{
|
1070
|
+
/* We want to spend no more than a few seconds waiting for a connection
|
1071
|
+
* to a remote host. So we use a nonblocking connect.
|
1072
|
+
* Linux disobeys the usual rules for nonblocking connects.
|
1073
|
+
* Per Stevens (UNP p.410), you expect a nonblocking connect to select
|
1074
|
+
* both readable and writable on error, and not to return EINPROGRESS
|
1075
|
+
* if the connect can be fulfilled immediately. Linux violates both
|
1076
|
+
* of these expectations.
|
1077
|
+
* Any kind of nonblocking connect on Linux returns EINPROGRESS.
|
1078
|
+
* The socket will then return writable when the disposition of the
|
1079
|
+
* connect is known, but it will not also be readable in case of
|
1080
|
+
* error! Weirdly, it will be readable in case there is data to read!!!
|
1081
|
+
* (Which can happen with protocols like SSH and SMTP.)
|
1082
|
+
* I suppose if you were so inclined you could consider this logical,
|
1083
|
+
* but it's not the way Unix has historically done it.
|
1084
|
+
* So we ignore the readable flag and read getsockopt to see if there
|
1085
|
+
* was an error connecting. A select timeout works as expected.
|
1086
|
+
* In regard to getsockopt: Linux does the Berkeley-style thing,
|
1087
|
+
* not the Solaris-style, and returns zero with the error code in
|
1088
|
+
* the error parameter.
|
1089
|
+
* Return the binding-text of the newly-created pending connection,
|
1090
|
+
* or NULL if there was a problem.
|
1091
|
+
*/
|
1092
|
+
|
1093
|
+
if (!server || !*server || !port)
|
1094
|
+
throw std::runtime_error ("invalid server or port");
|
1095
|
+
|
1096
|
+
int family, bind_size;
|
1097
|
+
struct sockaddr bind_as, *bind_as_ptr = name2address (server, port, &family, &bind_size);
|
1098
|
+
if (!bind_as_ptr)
|
1099
|
+
throw std::runtime_error ("unable to resolve server address");
|
1100
|
+
bind_as = *bind_as_ptr; // copy because name2address points to a static
|
1101
|
+
|
1102
|
+
int sd = socket (family, SOCK_STREAM, 0);
|
1103
|
+
if (sd == INVALID_SOCKET) {
|
1104
|
+
char buf [200];
|
1105
|
+
snprintf (buf, sizeof(buf)-1, "unable to create new socket: %s", strerror(errno));
|
1106
|
+
throw std::runtime_error (buf);
|
1107
|
+
}
|
1108
|
+
|
1109
|
+
/*
|
1110
|
+
sockaddr_in pin;
|
1111
|
+
unsigned long HostAddr;
|
1112
|
+
|
1113
|
+
HostAddr = inet_addr (server);
|
1114
|
+
if (HostAddr == INADDR_NONE) {
|
1115
|
+
hostent *hp = gethostbyname ((char*)server); // Windows requires (char*)
|
1116
|
+
if (!hp) {
|
1117
|
+
// TODO: This gives the caller a fatal error. Not good.
|
1118
|
+
// They can respond by catching RuntimeError (blecch).
|
1119
|
+
// Possibly we need to fire an unbind event and provide
|
1120
|
+
// a status code so user code can detect the cause of the
|
1121
|
+
// failure.
|
1122
|
+
return NULL;
|
1123
|
+
}
|
1124
|
+
HostAddr = ((in_addr*)(hp->h_addr))->s_addr;
|
1125
|
+
}
|
1126
|
+
|
1127
|
+
memset (&pin, 0, sizeof(pin));
|
1128
|
+
pin.sin_family = AF_INET;
|
1129
|
+
pin.sin_addr.s_addr = HostAddr;
|
1130
|
+
pin.sin_port = htons (port);
|
1131
|
+
|
1132
|
+
int sd = socket (AF_INET, SOCK_STREAM, 0);
|
1133
|
+
if (sd == INVALID_SOCKET)
|
1134
|
+
return NULL;
|
1135
|
+
*/
|
1136
|
+
|
1137
|
+
// From here on, ALL error returns must close the socket.
|
1138
|
+
// Set the new socket nonblocking.
|
1139
|
+
if (!SetSocketNonblocking (sd)) {
|
1140
|
+
close (sd);
|
1141
|
+
throw std::runtime_error ("unable to set socket as non-blocking");
|
1142
|
+
}
|
1143
|
+
// Disable slow-start (Nagle algorithm).
|
1144
|
+
int one = 1;
|
1145
|
+
setsockopt (sd, IPPROTO_TCP, TCP_NODELAY, (char*) &one, sizeof(one));
|
1146
|
+
// Set reuseaddr to improve performance on restarts
|
1147
|
+
setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (char*) &one, sizeof(one));
|
1148
|
+
|
1149
|
+
if (bind_addr) {
|
1150
|
+
int bind_to_size, bind_to_family;
|
1151
|
+
struct sockaddr *bind_to = name2address (bind_addr, bind_port, &bind_to_family, &bind_to_size);
|
1152
|
+
if (!bind_to) {
|
1153
|
+
close (sd);
|
1154
|
+
throw std::runtime_error ("invalid bind address");
|
1155
|
+
}
|
1156
|
+
if (bind (sd, bind_to, bind_to_size) < 0) {
|
1157
|
+
close (sd);
|
1158
|
+
throw std::runtime_error ("couldn't bind to address");
|
1159
|
+
}
|
1160
|
+
}
|
1161
|
+
|
1162
|
+
unsigned long out = 0;
|
1163
|
+
|
1164
|
+
#ifdef OS_UNIX
|
1165
|
+
//if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
|
1166
|
+
if (connect (sd, &bind_as, bind_size) == 0) {
|
1167
|
+
// This is a connect success, which Linux appears
|
1168
|
+
// never to give when the socket is nonblocking,
|
1169
|
+
// even if the connection is intramachine or to
|
1170
|
+
// localhost.
|
1171
|
+
|
1172
|
+
/* Changed this branch 08Aug06. Evidently some kernels
|
1173
|
+
* (FreeBSD for example) will actually return success from
|
1174
|
+
* a nonblocking connect. This is a pretty simple case,
|
1175
|
+
* just set up the new connection and clear the pending flag.
|
1176
|
+
* Thanks to Chris Ochs for helping track this down.
|
1177
|
+
* This branch never gets taken on Linux or (oddly) OSX.
|
1178
|
+
* The original behavior was to throw an unimplemented,
|
1179
|
+
* which the user saw as a fatal exception. Very unfriendly.
|
1180
|
+
*
|
1181
|
+
* Tweaked 10Aug06. Even though the connect disposition is
|
1182
|
+
* known, we still set the connect-pending flag. That way
|
1183
|
+
* some needed initialization will happen in the ConnectionDescriptor.
|
1184
|
+
* (To wit, the ConnectionCompleted event gets sent to the client.)
|
1185
|
+
*/
|
1186
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1187
|
+
if (!cd)
|
1188
|
+
throw std::runtime_error ("no connection allocated");
|
1189
|
+
cd->SetConnectPending (true);
|
1190
|
+
Add (cd);
|
1191
|
+
out = cd->GetBinding();
|
1192
|
+
}
|
1193
|
+
else if (errno == EINPROGRESS) {
|
1194
|
+
// Errno will generally always be EINPROGRESS, but on Linux
|
1195
|
+
// we have to look at getsockopt to be sure what really happened.
|
1196
|
+
int error;
|
1197
|
+
socklen_t len;
|
1198
|
+
len = sizeof(error);
|
1199
|
+
int o = getsockopt (sd, SOL_SOCKET, SO_ERROR, &error, &len);
|
1200
|
+
if ((o == 0) && (error == 0)) {
|
1201
|
+
// Here, there's no disposition.
|
1202
|
+
// Put the connection on the stack and wait for it to complete
|
1203
|
+
// or time out.
|
1204
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1205
|
+
if (!cd)
|
1206
|
+
throw std::runtime_error ("no connection allocated");
|
1207
|
+
cd->SetConnectPending (true);
|
1208
|
+
Add (cd);
|
1209
|
+
out = cd->GetBinding();
|
1210
|
+
}
|
1211
|
+
}
|
1212
|
+
else {
|
1213
|
+
// The error from connect was something other then EINPROGRESS (EHOSTDOWN, etc).
|
1214
|
+
// Fall through to the !out case below
|
1215
|
+
}
|
1216
|
+
|
1217
|
+
if (!out) {
|
1218
|
+
/* This could be connection refused or some such thing.
|
1219
|
+
* We will come here on Linux if a localhost connection fails.
|
1220
|
+
* Changed 16Jul06: Originally this branch was a no-op, and
|
1221
|
+
* we'd drop down to the end of the method, close the socket,
|
1222
|
+
* and return NULL, which would cause the caller to GET A
|
1223
|
+
* FATAL EXCEPTION. Now we keep the socket around but schedule an
|
1224
|
+
* immediate close on it, so the caller will get a close-event
|
1225
|
+
* scheduled on it. This was only an issue for localhost connections
|
1226
|
+
* to non-listening ports. We may eventually need to revise this
|
1227
|
+
* revised behavior, in case it causes problems like making it hard
|
1228
|
+
* for people to know that a failure occurred.
|
1229
|
+
*/
|
1230
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1231
|
+
if (!cd)
|
1232
|
+
throw std::runtime_error ("no connection allocated");
|
1233
|
+
cd->ScheduleClose (false);
|
1234
|
+
Add (cd);
|
1235
|
+
out = cd->GetBinding();
|
1236
|
+
}
|
1237
|
+
#endif
|
1238
|
+
|
1239
|
+
#ifdef OS_WIN32
|
1240
|
+
//if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
|
1241
|
+
if (connect (sd, &bind_as, bind_size) == 0) {
|
1242
|
+
// This is a connect success, which Windows appears
|
1243
|
+
// never to give when the socket is nonblocking,
|
1244
|
+
// even if the connection is intramachine or to
|
1245
|
+
// localhost.
|
1246
|
+
throw std::runtime_error ("unimplemented");
|
1247
|
+
}
|
1248
|
+
else if (WSAGetLastError() == WSAEWOULDBLOCK) {
|
1249
|
+
// Here, there's no disposition.
|
1250
|
+
// Windows appears not to surface refused connections or
|
1251
|
+
// such stuff at this point.
|
1252
|
+
// Put the connection on the stack and wait for it to complete
|
1253
|
+
// or time out.
|
1254
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1255
|
+
if (!cd)
|
1256
|
+
throw std::runtime_error ("no connection allocated");
|
1257
|
+
cd->SetConnectPending (true);
|
1258
|
+
Add (cd);
|
1259
|
+
out = cd->GetBinding();
|
1260
|
+
}
|
1261
|
+
else {
|
1262
|
+
// The error from connect was something other then WSAEWOULDBLOCK.
|
1263
|
+
}
|
1264
|
+
|
1265
|
+
#endif
|
1266
|
+
|
1267
|
+
if (!out)
|
1268
|
+
close (sd);
|
1269
|
+
return out;
|
1270
|
+
}
|
1271
|
+
|
1272
|
+
/***********************************
|
1273
|
+
EventMachine_t::ConnectToUnixServer
|
1274
|
+
***********************************/
|
1275
|
+
|
1276
|
+
const unsigned long EventMachine_t::ConnectToUnixServer (const char *server)
|
1277
|
+
{
|
1278
|
+
/* Connect to a Unix-domain server, which by definition is running
|
1279
|
+
* on the same host.
|
1280
|
+
* There is no meaningful implementation on Windows.
|
1281
|
+
* There's no need to do a nonblocking connect, since the connection
|
1282
|
+
* is always local and can always be fulfilled immediately.
|
1283
|
+
*/
|
1284
|
+
|
1285
|
+
#ifdef OS_WIN32
|
1286
|
+
throw std::runtime_error ("unix-domain connection unavailable on this platform");
|
1287
|
+
return NULL;
|
1288
|
+
#endif
|
1289
|
+
|
1290
|
+
// The whole rest of this function is only compiled on Unix systems.
|
1291
|
+
#ifdef OS_UNIX
|
1292
|
+
|
1293
|
+
unsigned long out = 0;
|
1294
|
+
|
1295
|
+
if (!server || !*server)
|
1296
|
+
return 0;
|
1297
|
+
|
1298
|
+
sockaddr_un pun;
|
1299
|
+
memset (&pun, 0, sizeof(pun));
|
1300
|
+
pun.sun_family = AF_LOCAL;
|
1301
|
+
|
1302
|
+
// You ordinarily expect the server name field to be at least 1024 bytes long,
|
1303
|
+
// but on Linux it can be MUCH shorter.
|
1304
|
+
if (strlen(server) >= sizeof(pun.sun_path))
|
1305
|
+
throw std::runtime_error ("unix-domain server name is too long");
|
1306
|
+
|
1307
|
+
|
1308
|
+
strcpy (pun.sun_path, server);
|
1309
|
+
|
1310
|
+
int fd = socket (AF_LOCAL, SOCK_STREAM, 0);
|
1311
|
+
if (fd == INVALID_SOCKET)
|
1312
|
+
return 0;
|
1313
|
+
|
1314
|
+
// From here on, ALL error returns must close the socket.
|
1315
|
+
// NOTE: At this point, the socket is still a blocking socket.
|
1316
|
+
if (connect (fd, (struct sockaddr*)&pun, sizeof(pun)) != 0) {
|
1317
|
+
close (fd);
|
1318
|
+
return 0;
|
1319
|
+
}
|
1320
|
+
|
1321
|
+
// Set the newly-connected socket nonblocking.
|
1322
|
+
if (!SetSocketNonblocking (fd)) {
|
1323
|
+
close (fd);
|
1324
|
+
return 0;
|
1325
|
+
}
|
1326
|
+
|
1327
|
+
// Set up a connection descriptor and add it to the event-machine.
|
1328
|
+
// Observe, even though we know the connection status is connect-success,
|
1329
|
+
// we still set the "pending" flag, so some needed initializations take
|
1330
|
+
// place.
|
1331
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
|
1332
|
+
if (!cd)
|
1333
|
+
throw std::runtime_error ("no connection allocated");
|
1334
|
+
cd->SetConnectPending (true);
|
1335
|
+
Add (cd);
|
1336
|
+
out = cd->GetBinding();
|
1337
|
+
|
1338
|
+
if (!out)
|
1339
|
+
close (fd);
|
1340
|
+
|
1341
|
+
return out;
|
1342
|
+
#endif
|
1343
|
+
}
|
1344
|
+
|
1345
|
+
/************************
|
1346
|
+
EventMachine_t::AttachFD
|
1347
|
+
************************/
|
1348
|
+
|
1349
|
+
const unsigned long EventMachine_t::AttachFD (int fd, bool watch_mode)
|
1350
|
+
{
|
1351
|
+
#ifdef OS_UNIX
|
1352
|
+
if (fcntl(fd, F_GETFL, 0) < 0)
|
1353
|
+
throw std::runtime_error ("invalid file descriptor");
|
1354
|
+
#endif
|
1355
|
+
|
1356
|
+
#ifdef OS_WIN32
|
1357
|
+
// TODO: add better check for invalid file descriptors (see ioctlsocket or getsockopt)
|
1358
|
+
if (fd == INVALID_SOCKET)
|
1359
|
+
throw std::runtime_error ("invalid file descriptor");
|
1360
|
+
#endif
|
1361
|
+
|
1362
|
+
{// Check for duplicate descriptors
|
1363
|
+
size_t i;
|
1364
|
+
for (i = 0; i < Descriptors.size(); i++) {
|
1365
|
+
EventableDescriptor *ed = Descriptors[i];
|
1366
|
+
assert (ed);
|
1367
|
+
if (ed->GetSocket() == fd)
|
1368
|
+
throw std::runtime_error ("adding existing descriptor");
|
1369
|
+
}
|
1370
|
+
|
1371
|
+
for (i = 0; i < NewDescriptors.size(); i++) {
|
1372
|
+
EventableDescriptor *ed = NewDescriptors[i];
|
1373
|
+
assert (ed);
|
1374
|
+
if (ed->GetSocket() == fd)
|
1375
|
+
throw std::runtime_error ("adding existing new descriptor");
|
1376
|
+
}
|
1377
|
+
}
|
1378
|
+
|
1379
|
+
if (!watch_mode)
|
1380
|
+
SetSocketNonblocking(fd);
|
1381
|
+
|
1382
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
|
1383
|
+
if (!cd)
|
1384
|
+
throw std::runtime_error ("no connection allocated");
|
1385
|
+
|
1386
|
+
cd->SetWatchOnly(watch_mode);
|
1387
|
+
cd->SetConnectPending (false);
|
1388
|
+
|
1389
|
+
Add (cd);
|
1390
|
+
|
1391
|
+
const unsigned long out = cd->GetBinding();
|
1392
|
+
return out;
|
1393
|
+
}
|
1394
|
+
|
1395
|
+
/************************
|
1396
|
+
EventMachine_t::DetachFD
|
1397
|
+
************************/
|
1398
|
+
|
1399
|
+
int EventMachine_t::DetachFD (EventableDescriptor *ed)
|
1400
|
+
{
|
1401
|
+
if (!ed)
|
1402
|
+
throw std::runtime_error ("detaching bad descriptor");
|
1403
|
+
|
1404
|
+
int fd = ed->GetSocket();
|
1405
|
+
|
1406
|
+
#ifdef HAVE_EPOLL
|
1407
|
+
if (bEpoll) {
|
1408
|
+
if (ed->GetSocket() != INVALID_SOCKET) {
|
1409
|
+
assert (epfd != -1);
|
1410
|
+
int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
|
1411
|
+
// ENOENT or EBADF are not errors because the socket may be already closed when we get here.
|
1412
|
+
if (e && (errno != ENOENT) && (errno != EBADF)) {
|
1413
|
+
char buf [200];
|
1414
|
+
snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
|
1415
|
+
throw std::runtime_error (buf);
|
1416
|
+
}
|
1417
|
+
}
|
1418
|
+
}
|
1419
|
+
#endif
|
1420
|
+
|
1421
|
+
#ifdef HAVE_KQUEUE
|
1422
|
+
if (bKqueue) {
|
1423
|
+
// remove any read/write events for this fd
|
1424
|
+
struct kevent k;
|
1425
|
+
EV_SET (&k, ed->GetSocket(), EVFILT_READ | EVFILT_WRITE, EV_DELETE, 0, 0, ed);
|
1426
|
+
int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
|
1427
|
+
if (t < 0 && (errno != ENOENT) && (errno != EBADF)) {
|
1428
|
+
char buf [200];
|
1429
|
+
snprintf (buf, sizeof(buf)-1, "unable to delete kqueue event: %s", strerror(errno));
|
1430
|
+
throw std::runtime_error (buf);
|
1431
|
+
}
|
1432
|
+
}
|
1433
|
+
#endif
|
1434
|
+
|
1435
|
+
// Prevent the descriptor from being modified, in case DetachFD was called from a timer or next_tick
|
1436
|
+
ModifiedDescriptors.erase (ed);
|
1437
|
+
|
1438
|
+
// Set MySocket = INVALID_SOCKET so ShouldDelete() is true (and the descriptor gets deleted and removed),
|
1439
|
+
// and also to prevent anyone from calling close() on the detached fd
|
1440
|
+
ed->SetSocketInvalid();
|
1441
|
+
|
1442
|
+
return fd;
|
1443
|
+
}
|
1444
|
+
|
1445
|
+
/************
|
1446
|
+
name2address
|
1447
|
+
************/
|
1448
|
+
|
1449
|
+
struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size)
|
1450
|
+
{
|
1451
|
+
// THIS IS NOT RE-ENTRANT OR THREADSAFE. Optimize for speed.
|
1452
|
+
// Check the more-common cases first.
|
1453
|
+
// Return NULL if no resolution.
|
1454
|
+
|
1455
|
+
static struct sockaddr_in in4;
|
1456
|
+
#ifndef __CYGWIN__
|
1457
|
+
static struct sockaddr_in6 in6;
|
1458
|
+
#endif
|
1459
|
+
struct hostent *hp;
|
1460
|
+
|
1461
|
+
if (!server || !*server)
|
1462
|
+
server = "0.0.0.0";
|
1463
|
+
|
1464
|
+
memset (&in4, 0, sizeof(in4));
|
1465
|
+
if ( (in4.sin_addr.s_addr = inet_addr (server)) != INADDR_NONE) {
|
1466
|
+
if (family)
|
1467
|
+
*family = AF_INET;
|
1468
|
+
if (bind_size)
|
1469
|
+
*bind_size = sizeof(in4);
|
1470
|
+
in4.sin_family = AF_INET;
|
1471
|
+
in4.sin_port = htons (port);
|
1472
|
+
return (struct sockaddr*)&in4;
|
1473
|
+
}
|
1474
|
+
|
1475
|
+
#if defined(OS_UNIX) && !defined(__CYGWIN__)
|
1476
|
+
memset (&in6, 0, sizeof(in6));
|
1477
|
+
if (inet_pton (AF_INET6, server, in6.sin6_addr.s6_addr) > 0) {
|
1478
|
+
if (family)
|
1479
|
+
*family = AF_INET6;
|
1480
|
+
if (bind_size)
|
1481
|
+
*bind_size = sizeof(in6);
|
1482
|
+
in6.sin6_family = AF_INET6;
|
1483
|
+
in6.sin6_port = htons (port);
|
1484
|
+
return (struct sockaddr*)&in6;
|
1485
|
+
}
|
1486
|
+
#endif
|
1487
|
+
|
1488
|
+
#ifdef OS_WIN32
|
1489
|
+
// TODO, must complete this branch. Windows doesn't have inet_pton.
|
1490
|
+
// A possible approach is to make a getaddrinfo call with the supplied
|
1491
|
+
// server address, constraining the hints to ipv6 and seeing if we
|
1492
|
+
// get any addresses.
|
1493
|
+
// For the time being, Ipv6 addresses aren't supported on Windows.
|
1494
|
+
#endif
|
1495
|
+
|
1496
|
+
hp = gethostbyname ((char*)server); // Windows requires the cast.
|
1497
|
+
if (hp) {
|
1498
|
+
in4.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
|
1499
|
+
if (family)
|
1500
|
+
*family = AF_INET;
|
1501
|
+
if (bind_size)
|
1502
|
+
*bind_size = sizeof(in4);
|
1503
|
+
in4.sin_family = AF_INET;
|
1504
|
+
in4.sin_port = htons (port);
|
1505
|
+
return (struct sockaddr*)&in4;
|
1506
|
+
}
|
1507
|
+
|
1508
|
+
return NULL;
|
1509
|
+
}
|
1510
|
+
|
1511
|
+
|
1512
|
+
/*******************************
|
1513
|
+
EventMachine_t::CreateTcpServer
|
1514
|
+
*******************************/
|
1515
|
+
|
1516
|
+
const unsigned long EventMachine_t::CreateTcpServer (const char *server, int port)
|
1517
|
+
{
|
1518
|
+
/* Create a TCP-acceptor (server) socket and add it to the event machine.
|
1519
|
+
* Return the binding of the new acceptor to the caller.
|
1520
|
+
* This binding will be referenced when the new acceptor sends events
|
1521
|
+
* to indicate accepted connections.
|
1522
|
+
*/
|
1523
|
+
|
1524
|
+
|
1525
|
+
int family, bind_size;
|
1526
|
+
struct sockaddr *bind_here = name2address (server, port, &family, &bind_size);
|
1527
|
+
if (!bind_here)
|
1528
|
+
return 0;
|
1529
|
+
|
1530
|
+
unsigned long output_binding = 0;
|
1531
|
+
|
1532
|
+
//struct sockaddr_in sin;
|
1533
|
+
|
1534
|
+
int sd_accept = socket (family, SOCK_STREAM, 0);
|
1535
|
+
if (sd_accept == INVALID_SOCKET) {
|
1536
|
+
goto fail;
|
1537
|
+
}
|
1538
|
+
|
1539
|
+
/*
|
1540
|
+
memset (&sin, 0, sizeof(sin));
|
1541
|
+
sin.sin_family = AF_INET;
|
1542
|
+
sin.sin_addr.s_addr = INADDR_ANY;
|
1543
|
+
sin.sin_port = htons (port);
|
1544
|
+
|
1545
|
+
if (server && *server) {
|
1546
|
+
sin.sin_addr.s_addr = inet_addr (server);
|
1547
|
+
if (sin.sin_addr.s_addr == INADDR_NONE) {
|
1548
|
+
hostent *hp = gethostbyname ((char*)server); // Windows requires the cast.
|
1549
|
+
if (hp == NULL) {
|
1550
|
+
//__warning ("hostname not resolved: ", server);
|
1551
|
+
goto fail;
|
1552
|
+
}
|
1553
|
+
sin.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
|
1554
|
+
}
|
1555
|
+
}
|
1556
|
+
*/
|
1557
|
+
|
1558
|
+
{ // set reuseaddr to improve performance on restarts.
|
1559
|
+
int oval = 1;
|
1560
|
+
if (setsockopt (sd_accept, SOL_SOCKET, SO_REUSEADDR, (char*)&oval, sizeof(oval)) < 0) {
|
1561
|
+
//__warning ("setsockopt failed while creating listener","");
|
1562
|
+
goto fail;
|
1563
|
+
}
|
1564
|
+
}
|
1565
|
+
|
1566
|
+
{ // set CLOEXEC. Only makes sense on Unix
|
1567
|
+
#ifdef OS_UNIX
|
1568
|
+
int cloexec = fcntl (sd_accept, F_GETFD, 0);
|
1569
|
+
assert (cloexec >= 0);
|
1570
|
+
cloexec |= FD_CLOEXEC;
|
1571
|
+
fcntl (sd_accept, F_SETFD, cloexec);
|
1572
|
+
#endif
|
1573
|
+
}
|
1574
|
+
|
1575
|
+
|
1576
|
+
//if (bind (sd_accept, (struct sockaddr*)&sin, sizeof(sin))) {
|
1577
|
+
if (bind (sd_accept, bind_here, bind_size)) {
|
1578
|
+
//__warning ("binding failed");
|
1579
|
+
goto fail;
|
1580
|
+
}
|
1581
|
+
|
1582
|
+
if (listen (sd_accept, 100)) {
|
1583
|
+
//__warning ("listen failed");
|
1584
|
+
goto fail;
|
1585
|
+
}
|
1586
|
+
|
1587
|
+
{
|
1588
|
+
// Set the acceptor non-blocking.
|
1589
|
+
// THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
|
1590
|
+
if (!SetSocketNonblocking (sd_accept)) {
|
1591
|
+
//int val = fcntl (sd_accept, F_GETFL, 0);
|
1592
|
+
//if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
|
1593
|
+
goto fail;
|
1594
|
+
}
|
1595
|
+
}
|
1596
|
+
|
1597
|
+
{ // Looking good.
|
1598
|
+
AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
|
1599
|
+
if (!ad)
|
1600
|
+
throw std::runtime_error ("unable to allocate acceptor");
|
1601
|
+
Add (ad);
|
1602
|
+
output_binding = ad->GetBinding();
|
1603
|
+
}
|
1604
|
+
|
1605
|
+
return output_binding;
|
1606
|
+
|
1607
|
+
fail:
|
1608
|
+
if (sd_accept != INVALID_SOCKET)
|
1609
|
+
close (sd_accept);
|
1610
|
+
return 0;
|
1611
|
+
}
|
1612
|
+
|
1613
|
+
|
1614
|
+
/**********************************
|
1615
|
+
EventMachine_t::OpenDatagramSocket
|
1616
|
+
**********************************/
|
1617
|
+
|
1618
|
+
const unsigned long EventMachine_t::OpenDatagramSocket (const char *address, int port)
|
1619
|
+
{
|
1620
|
+
unsigned long output_binding = 0;
|
1621
|
+
|
1622
|
+
int sd = socket (AF_INET, SOCK_DGRAM, 0);
|
1623
|
+
if (sd == INVALID_SOCKET)
|
1624
|
+
goto fail;
|
1625
|
+
// from here on, early returns must close the socket!
|
1626
|
+
|
1627
|
+
|
1628
|
+
struct sockaddr_in sin;
|
1629
|
+
memset (&sin, 0, sizeof(sin));
|
1630
|
+
sin.sin_family = AF_INET;
|
1631
|
+
sin.sin_port = htons (port);
|
1632
|
+
|
1633
|
+
|
1634
|
+
if (address && *address) {
|
1635
|
+
sin.sin_addr.s_addr = inet_addr (address);
|
1636
|
+
if (sin.sin_addr.s_addr == INADDR_NONE) {
|
1637
|
+
hostent *hp = gethostbyname ((char*)address); // Windows requires the cast.
|
1638
|
+
if (hp == NULL)
|
1639
|
+
goto fail;
|
1640
|
+
sin.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
|
1641
|
+
}
|
1642
|
+
}
|
1643
|
+
else
|
1644
|
+
sin.sin_addr.s_addr = htonl (INADDR_ANY);
|
1645
|
+
|
1646
|
+
|
1647
|
+
// Set the new socket nonblocking.
|
1648
|
+
{
|
1649
|
+
if (!SetSocketNonblocking (sd))
|
1650
|
+
//int val = fcntl (sd, F_GETFL, 0);
|
1651
|
+
//if (fcntl (sd, F_SETFL, val | O_NONBLOCK) == -1)
|
1652
|
+
goto fail;
|
1653
|
+
}
|
1654
|
+
|
1655
|
+
if (bind (sd, (struct sockaddr*)&sin, sizeof(sin)) != 0)
|
1656
|
+
goto fail;
|
1657
|
+
|
1658
|
+
{ // Looking good.
|
1659
|
+
DatagramDescriptor *ds = new DatagramDescriptor (sd, this);
|
1660
|
+
if (!ds)
|
1661
|
+
throw std::runtime_error ("unable to allocate datagram-socket");
|
1662
|
+
Add (ds);
|
1663
|
+
output_binding = ds->GetBinding();
|
1664
|
+
}
|
1665
|
+
|
1666
|
+
return output_binding;
|
1667
|
+
|
1668
|
+
fail:
|
1669
|
+
if (sd != INVALID_SOCKET)
|
1670
|
+
close (sd);
|
1671
|
+
return 0;
|
1672
|
+
}
|
1673
|
+
|
1674
|
+
|
1675
|
+
|
1676
|
+
/*******************
|
1677
|
+
EventMachine_t::Add
|
1678
|
+
*******************/
|
1679
|
+
|
1680
|
+
void EventMachine_t::Add (EventableDescriptor *ed)
|
1681
|
+
{
|
1682
|
+
if (!ed)
|
1683
|
+
throw std::runtime_error ("added bad descriptor");
|
1684
|
+
ed->SetEventCallback (EventCallback);
|
1685
|
+
NewDescriptors.push_back (ed);
|
1686
|
+
}
|
1687
|
+
|
1688
|
+
|
1689
|
+
/*******************************
|
1690
|
+
EventMachine_t::ArmKqueueWriter
|
1691
|
+
*******************************/
|
1692
|
+
|
1693
|
+
void EventMachine_t::ArmKqueueWriter (EventableDescriptor *ed)
|
1694
|
+
{
|
1695
|
+
#ifdef HAVE_KQUEUE
|
1696
|
+
if (bKqueue) {
|
1697
|
+
if (!ed)
|
1698
|
+
throw std::runtime_error ("added bad descriptor");
|
1699
|
+
struct kevent k;
|
1700
|
+
EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, ed);
|
1701
|
+
int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
|
1702
|
+
if (t < 0) {
|
1703
|
+
char buf [200];
|
1704
|
+
snprintf (buf, sizeof(buf)-1, "arm kqueue writer failed on %d: %s", ed->GetSocket(), strerror(errno));
|
1705
|
+
throw std::runtime_error (buf);
|
1706
|
+
}
|
1707
|
+
}
|
1708
|
+
#endif
|
1709
|
+
}
|
1710
|
+
|
1711
|
+
/*******************************
|
1712
|
+
EventMachine_t::ArmKqueueReader
|
1713
|
+
*******************************/
|
1714
|
+
|
1715
|
+
void EventMachine_t::ArmKqueueReader (EventableDescriptor *ed)
|
1716
|
+
{
|
1717
|
+
#ifdef HAVE_KQUEUE
|
1718
|
+
if (bKqueue) {
|
1719
|
+
if (!ed)
|
1720
|
+
throw std::runtime_error ("added bad descriptor");
|
1721
|
+
struct kevent k;
|
1722
|
+
EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
|
1723
|
+
int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
|
1724
|
+
if (t < 0) {
|
1725
|
+
char buf [200];
|
1726
|
+
snprintf (buf, sizeof(buf)-1, "arm kqueue reader failed on %d: %s", ed->GetSocket(), strerror(errno));
|
1727
|
+
throw std::runtime_error (buf);
|
1728
|
+
}
|
1729
|
+
}
|
1730
|
+
#endif
|
1731
|
+
}
|
1732
|
+
|
1733
|
+
/**********************************
|
1734
|
+
EventMachine_t::_AddNewDescriptors
|
1735
|
+
**********************************/
|
1736
|
+
|
1737
|
+
void EventMachine_t::_AddNewDescriptors()
|
1738
|
+
{
|
1739
|
+
/* Avoid adding descriptors to the main descriptor list
|
1740
|
+
* while we're actually traversing the list.
|
1741
|
+
* Any descriptors that are added as a result of processing timers
|
1742
|
+
* or acceptors should go on a temporary queue and then added
|
1743
|
+
* while we're not traversing the main list.
|
1744
|
+
* Also, it (rarely) happens that a newly-created descriptor
|
1745
|
+
* is immediately scheduled to close. It might be a good
|
1746
|
+
* idea not to bother scheduling these for I/O but if
|
1747
|
+
* we do that, we might bypass some important processing.
|
1748
|
+
*/
|
1749
|
+
|
1750
|
+
for (size_t i = 0; i < NewDescriptors.size(); i++) {
|
1751
|
+
EventableDescriptor *ed = NewDescriptors[i];
|
1752
|
+
if (ed == NULL)
|
1753
|
+
throw std::runtime_error ("adding bad descriptor");
|
1754
|
+
|
1755
|
+
#if HAVE_EPOLL
|
1756
|
+
if (bEpoll) {
|
1757
|
+
assert (epfd != -1);
|
1758
|
+
int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent());
|
1759
|
+
if (e) {
|
1760
|
+
char buf [200];
|
1761
|
+
snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno));
|
1762
|
+
throw std::runtime_error (buf);
|
1763
|
+
}
|
1764
|
+
}
|
1765
|
+
#endif
|
1766
|
+
|
1767
|
+
#if HAVE_KQUEUE
|
1768
|
+
/*
|
1769
|
+
if (bKqueue) {
|
1770
|
+
// INCOMPLETE. Some descriptors don't want to be readable.
|
1771
|
+
assert (kqfd != -1);
|
1772
|
+
struct kevent k;
|
1773
|
+
EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
|
1774
|
+
int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
|
1775
|
+
assert (t == 0);
|
1776
|
+
}
|
1777
|
+
*/
|
1778
|
+
#endif
|
1779
|
+
|
1780
|
+
QueueHeartbeat(ed);
|
1781
|
+
Descriptors.push_back (ed);
|
1782
|
+
}
|
1783
|
+
NewDescriptors.clear();
|
1784
|
+
}
|
1785
|
+
|
1786
|
+
|
1787
|
+
/**********************************
|
1788
|
+
EventMachine_t::_ModifyDescriptors
|
1789
|
+
**********************************/
|
1790
|
+
|
1791
|
+
void EventMachine_t::_ModifyDescriptors()
|
1792
|
+
{
|
1793
|
+
/* For implementations which don't level check every descriptor on
|
1794
|
+
* every pass through the machine, as select does.
|
1795
|
+
* If we're not selecting, then descriptors need a way to signal to the
|
1796
|
+
* machine that their readable or writable status has changed.
|
1797
|
+
* That's what the ::Modify call is for. We do it this way to avoid
|
1798
|
+
* modifying descriptors during the loop traversal, where it can easily
|
1799
|
+
* happen that an object (like a UDP socket) gets data written on it by
|
1800
|
+
* the application during #post_init. That would take place BEFORE the
|
1801
|
+
* descriptor even gets added to the epoll descriptor, so the modify
|
1802
|
+
* operation will crash messily.
|
1803
|
+
* Another really messy possibility is for a descriptor to put itself
|
1804
|
+
* on the Modified list, and then get deleted before we get here.
|
1805
|
+
* Remember, deletes happen after the I/O traversal and before the
|
1806
|
+
* next pass through here. So we have to make sure when we delete a
|
1807
|
+
* descriptor to remove it from the Modified list.
|
1808
|
+
*/
|
1809
|
+
|
1810
|
+
#ifdef HAVE_EPOLL
|
1811
|
+
if (bEpoll) {
|
1812
|
+
set<EventableDescriptor*>::iterator i = ModifiedDescriptors.begin();
|
1813
|
+
while (i != ModifiedDescriptors.end()) {
|
1814
|
+
assert (*i);
|
1815
|
+
_ModifyEpollEvent (*i);
|
1816
|
+
++i;
|
1817
|
+
}
|
1818
|
+
}
|
1819
|
+
#endif
|
1820
|
+
|
1821
|
+
ModifiedDescriptors.clear();
|
1822
|
+
}
|
1823
|
+
|
1824
|
+
|
1825
|
+
/**********************
|
1826
|
+
EventMachine_t::Modify
|
1827
|
+
**********************/
|
1828
|
+
|
1829
|
+
void EventMachine_t::Modify (EventableDescriptor *ed)
|
1830
|
+
{
|
1831
|
+
if (!ed)
|
1832
|
+
throw std::runtime_error ("modified bad descriptor");
|
1833
|
+
ModifiedDescriptors.insert (ed);
|
1834
|
+
}
|
1835
|
+
|
1836
|
+
|
1837
|
+
/**************************************
|
1838
|
+
EventMachine_t::CreateUnixDomainServer
|
1839
|
+
**************************************/
|
1840
|
+
|
1841
|
+
const unsigned long EventMachine_t::CreateUnixDomainServer (const char *filename)
|
1842
|
+
{
|
1843
|
+
/* Create a UNIX-domain acceptor (server) socket and add it to the event machine.
|
1844
|
+
* Return the binding of the new acceptor to the caller.
|
1845
|
+
* This binding will be referenced when the new acceptor sends events
|
1846
|
+
* to indicate accepted connections.
|
1847
|
+
* THERE IS NO MEANINGFUL IMPLEMENTATION ON WINDOWS.
|
1848
|
+
*/
|
1849
|
+
|
1850
|
+
#ifdef OS_WIN32
|
1851
|
+
throw std::runtime_error ("unix-domain server unavailable on this platform");
|
1852
|
+
#endif
|
1853
|
+
|
1854
|
+
// The whole rest of this function is only compiled on Unix systems.
|
1855
|
+
#ifdef OS_UNIX
|
1856
|
+
unsigned long output_binding = 0;
|
1857
|
+
|
1858
|
+
struct sockaddr_un s_sun;
|
1859
|
+
|
1860
|
+
int sd_accept = socket (AF_LOCAL, SOCK_STREAM, 0);
|
1861
|
+
if (sd_accept == INVALID_SOCKET) {
|
1862
|
+
goto fail;
|
1863
|
+
}
|
1864
|
+
|
1865
|
+
if (!filename || !*filename)
|
1866
|
+
goto fail;
|
1867
|
+
unlink (filename);
|
1868
|
+
|
1869
|
+
bzero (&s_sun, sizeof(s_sun));
|
1870
|
+
s_sun.sun_family = AF_LOCAL;
|
1871
|
+
strncpy (s_sun.sun_path, filename, sizeof(s_sun.sun_path)-1);
|
1872
|
+
|
1873
|
+
// don't bother with reuseaddr for a local socket.
|
1874
|
+
|
1875
|
+
{ // set CLOEXEC. Only makes sense on Unix
|
1876
|
+
#ifdef OS_UNIX
|
1877
|
+
int cloexec = fcntl (sd_accept, F_GETFD, 0);
|
1878
|
+
assert (cloexec >= 0);
|
1879
|
+
cloexec |= FD_CLOEXEC;
|
1880
|
+
fcntl (sd_accept, F_SETFD, cloexec);
|
1881
|
+
#endif
|
1882
|
+
}
|
1883
|
+
|
1884
|
+
if (bind (sd_accept, (struct sockaddr*)&s_sun, sizeof(s_sun))) {
|
1885
|
+
//__warning ("binding failed");
|
1886
|
+
goto fail;
|
1887
|
+
}
|
1888
|
+
|
1889
|
+
if (listen (sd_accept, 100)) {
|
1890
|
+
//__warning ("listen failed");
|
1891
|
+
goto fail;
|
1892
|
+
}
|
1893
|
+
|
1894
|
+
{
|
1895
|
+
// Set the acceptor non-blocking.
|
1896
|
+
// THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
|
1897
|
+
if (!SetSocketNonblocking (sd_accept)) {
|
1898
|
+
//int val = fcntl (sd_accept, F_GETFL, 0);
|
1899
|
+
//if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
|
1900
|
+
goto fail;
|
1901
|
+
}
|
1902
|
+
}
|
1903
|
+
|
1904
|
+
{ // Looking good.
|
1905
|
+
AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
|
1906
|
+
if (!ad)
|
1907
|
+
throw std::runtime_error ("unable to allocate acceptor");
|
1908
|
+
Add (ad);
|
1909
|
+
output_binding = ad->GetBinding();
|
1910
|
+
}
|
1911
|
+
|
1912
|
+
return output_binding;
|
1913
|
+
|
1914
|
+
fail:
|
1915
|
+
if (sd_accept != INVALID_SOCKET)
|
1916
|
+
close (sd_accept);
|
1917
|
+
return 0;
|
1918
|
+
#endif // OS_UNIX
|
1919
|
+
}
|
1920
|
+
|
1921
|
+
|
1922
|
+
/*********************
|
1923
|
+
EventMachine_t::Popen
|
1924
|
+
*********************/
|
1925
|
+
#if OBSOLETE
|
1926
|
+
const char *EventMachine_t::Popen (const char *cmd, const char *mode)
|
1927
|
+
{
|
1928
|
+
#ifdef OS_WIN32
|
1929
|
+
throw std::runtime_error ("popen is currently unavailable on this platform");
|
1930
|
+
#endif
|
1931
|
+
|
1932
|
+
// The whole rest of this function is only compiled on Unix systems.
|
1933
|
+
// Eventually we need this functionality (or a full-duplex equivalent) on Windows.
|
1934
|
+
#ifdef OS_UNIX
|
1935
|
+
const char *output_binding = NULL;
|
1936
|
+
|
1937
|
+
FILE *fp = popen (cmd, mode);
|
1938
|
+
if (!fp)
|
1939
|
+
return NULL;
|
1940
|
+
|
1941
|
+
// From here, all early returns must pclose the stream.
|
1942
|
+
|
1943
|
+
// According to the pipe(2) manpage, descriptors returned from pipe have both
|
1944
|
+
// CLOEXEC and NONBLOCK clear. Do NOT set CLOEXEC. DO set nonblocking.
|
1945
|
+
if (!SetSocketNonblocking (fileno (fp))) {
|
1946
|
+
pclose (fp);
|
1947
|
+
return NULL;
|
1948
|
+
}
|
1949
|
+
|
1950
|
+
{ // Looking good.
|
1951
|
+
PipeDescriptor *pd = new PipeDescriptor (fp, this);
|
1952
|
+
if (!pd)
|
1953
|
+
throw std::runtime_error ("unable to allocate pipe");
|
1954
|
+
Add (pd);
|
1955
|
+
output_binding = pd->GetBinding();
|
1956
|
+
}
|
1957
|
+
|
1958
|
+
return output_binding;
|
1959
|
+
#endif
|
1960
|
+
}
|
1961
|
+
#endif // OBSOLETE
|
1962
|
+
|
1963
|
+
/**************************
|
1964
|
+
EventMachine_t::Socketpair
|
1965
|
+
**************************/
|
1966
|
+
|
1967
|
+
const unsigned long EventMachine_t::Socketpair (char * const*cmd_strings)
|
1968
|
+
{
|
1969
|
+
#ifdef OS_WIN32
|
1970
|
+
throw std::runtime_error ("socketpair is currently unavailable on this platform");
|
1971
|
+
#endif
|
1972
|
+
|
1973
|
+
// The whole rest of this function is only compiled on Unix systems.
|
1974
|
+
// Eventually we need this functionality (or a full-duplex equivalent) on Windows.
|
1975
|
+
#ifdef OS_UNIX
|
1976
|
+
// Make sure the incoming array of command strings is sane.
|
1977
|
+
if (!cmd_strings)
|
1978
|
+
return 0;
|
1979
|
+
int j;
|
1980
|
+
for (j=0; j < 2048 && cmd_strings[j]; j++)
|
1981
|
+
;
|
1982
|
+
if ((j==0) || (j==2048))
|
1983
|
+
return 0;
|
1984
|
+
|
1985
|
+
unsigned long output_binding = 0;
|
1986
|
+
|
1987
|
+
int sv[2];
|
1988
|
+
if (socketpair (AF_LOCAL, SOCK_STREAM, 0, sv) < 0)
|
1989
|
+
return 0;
|
1990
|
+
// from here, all early returns must close the pair of sockets.
|
1991
|
+
|
1992
|
+
// Set the parent side of the socketpair nonblocking.
|
1993
|
+
// We don't care about the child side, and most child processes will expect their
|
1994
|
+
// stdout to be blocking. Thanks to Duane Johnson and Bill Kelly for pointing this out.
|
1995
|
+
// Obviously DON'T set CLOEXEC.
|
1996
|
+
if (!SetSocketNonblocking (sv[0])) {
|
1997
|
+
close (sv[0]);
|
1998
|
+
close (sv[1]);
|
1999
|
+
return 0;
|
2000
|
+
}
|
2001
|
+
|
2002
|
+
pid_t f = fork();
|
2003
|
+
if (f > 0) {
|
2004
|
+
close (sv[1]);
|
2005
|
+
PipeDescriptor *pd = new PipeDescriptor (sv[0], f, this);
|
2006
|
+
if (!pd)
|
2007
|
+
throw std::runtime_error ("unable to allocate pipe");
|
2008
|
+
Add (pd);
|
2009
|
+
output_binding = pd->GetBinding();
|
2010
|
+
}
|
2011
|
+
else if (f == 0) {
|
2012
|
+
close (sv[0]);
|
2013
|
+
dup2 (sv[1], STDIN_FILENO);
|
2014
|
+
close (sv[1]);
|
2015
|
+
dup2 (STDIN_FILENO, STDOUT_FILENO);
|
2016
|
+
execvp (cmd_strings[0], cmd_strings+1);
|
2017
|
+
exit (-1); // end the child process if the exec doesn't work.
|
2018
|
+
}
|
2019
|
+
else
|
2020
|
+
throw std::runtime_error ("no fork");
|
2021
|
+
|
2022
|
+
return output_binding;
|
2023
|
+
#endif
|
2024
|
+
}
|
2025
|
+
|
2026
|
+
|
2027
|
+
/****************************
|
2028
|
+
EventMachine_t::OpenKeyboard
|
2029
|
+
****************************/
|
2030
|
+
|
2031
|
+
const unsigned long EventMachine_t::OpenKeyboard()
|
2032
|
+
{
|
2033
|
+
KeyboardDescriptor *kd = new KeyboardDescriptor (this);
|
2034
|
+
if (!kd)
|
2035
|
+
throw std::runtime_error ("no keyboard-object allocated");
|
2036
|
+
Add (kd);
|
2037
|
+
return kd->GetBinding();
|
2038
|
+
}
|
2039
|
+
|
2040
|
+
|
2041
|
+
/**********************************
|
2042
|
+
EventMachine_t::GetConnectionCount
|
2043
|
+
**********************************/
|
2044
|
+
|
2045
|
+
int EventMachine_t::GetConnectionCount ()
|
2046
|
+
{
|
2047
|
+
return Descriptors.size() + NewDescriptors.size();
|
2048
|
+
}
|
2049
|
+
|
2050
|
+
|
2051
|
+
/************************
|
2052
|
+
EventMachine_t::WatchPid
|
2053
|
+
************************/
|
2054
|
+
|
2055
|
+
const unsigned long EventMachine_t::WatchPid (int pid)
|
2056
|
+
{
|
2057
|
+
#ifdef HAVE_KQUEUE
|
2058
|
+
if (!bKqueue)
|
2059
|
+
throw std::runtime_error("must enable kqueue (EM.kqueue=true) for pid watching support");
|
2060
|
+
|
2061
|
+
struct kevent event;
|
2062
|
+
int kqres;
|
2063
|
+
|
2064
|
+
EV_SET(&event, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT | NOTE_FORK, 0, 0);
|
2065
|
+
|
2066
|
+
// Attempt to register the event
|
2067
|
+
kqres = kevent(kqfd, &event, 1, NULL, 0, NULL);
|
2068
|
+
if (kqres == -1) {
|
2069
|
+
char errbuf[200];
|
2070
|
+
sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
|
2071
|
+
throw std::runtime_error(errbuf);
|
2072
|
+
}
|
2073
|
+
#endif
|
2074
|
+
|
2075
|
+
#ifdef HAVE_KQUEUE
|
2076
|
+
Bindable_t* b = new Bindable_t();
|
2077
|
+
Pids.insert(make_pair (pid, b));
|
2078
|
+
|
2079
|
+
return b->GetBinding();
|
2080
|
+
#endif
|
2081
|
+
|
2082
|
+
throw std::runtime_error("no pid watching support on this system");
|
2083
|
+
}
|
2084
|
+
|
2085
|
+
/**************************
|
2086
|
+
EventMachine_t::UnwatchPid
|
2087
|
+
**************************/
|
2088
|
+
|
2089
|
+
void EventMachine_t::UnwatchPid (int pid)
|
2090
|
+
{
|
2091
|
+
Bindable_t *b = Pids[pid];
|
2092
|
+
assert(b);
|
2093
|
+
Pids.erase(pid);
|
2094
|
+
|
2095
|
+
#ifdef HAVE_KQUEUE
|
2096
|
+
struct kevent k;
|
2097
|
+
|
2098
|
+
EV_SET(&k, pid, EVFILT_PROC, EV_DELETE, 0, 0, 0);
|
2099
|
+
/*int t =*/ kevent (kqfd, &k, 1, NULL, 0, NULL);
|
2100
|
+
// t==-1 if the process already exited; ignore this for now
|
2101
|
+
#endif
|
2102
|
+
|
2103
|
+
if (EventCallback)
|
2104
|
+
(*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
|
2105
|
+
|
2106
|
+
delete b;
|
2107
|
+
}
|
2108
|
+
|
2109
|
+
void EventMachine_t::UnwatchPid (const unsigned long sig)
|
2110
|
+
{
|
2111
|
+
for(map<int, Bindable_t*>::iterator i=Pids.begin(); i != Pids.end(); i++)
|
2112
|
+
{
|
2113
|
+
if (i->second->GetBinding() == sig) {
|
2114
|
+
UnwatchPid (i->first);
|
2115
|
+
return;
|
2116
|
+
}
|
2117
|
+
}
|
2118
|
+
|
2119
|
+
throw std::runtime_error("attempted to remove invalid pid signature");
|
2120
|
+
}
|
2121
|
+
|
2122
|
+
|
2123
|
+
/*************************
|
2124
|
+
EventMachine_t::WatchFile
|
2125
|
+
*************************/
|
2126
|
+
|
2127
|
+
const unsigned long EventMachine_t::WatchFile (const char *fpath)
|
2128
|
+
{
|
2129
|
+
struct stat sb;
|
2130
|
+
int sres;
|
2131
|
+
int wd = -1;
|
2132
|
+
|
2133
|
+
sres = stat(fpath, &sb);
|
2134
|
+
|
2135
|
+
if (sres == -1) {
|
2136
|
+
char errbuf[300];
|
2137
|
+
sprintf(errbuf, "error registering file %s for watching: %s", fpath, strerror(errno));
|
2138
|
+
throw std::runtime_error(errbuf);
|
2139
|
+
}
|
2140
|
+
|
2141
|
+
#ifdef HAVE_INOTIFY
|
2142
|
+
if (!inotify) {
|
2143
|
+
inotify = new InotifyDescriptor(this);
|
2144
|
+
assert (inotify);
|
2145
|
+
Add(inotify);
|
2146
|
+
}
|
2147
|
+
|
2148
|
+
wd = inotify_add_watch(inotify->GetSocket(), fpath,
|
2149
|
+
IN_MODIFY | IN_DELETE_SELF | IN_MOVE_SELF | IN_CREATE | IN_DELETE | IN_MOVE) ;
|
2150
|
+
if (wd == -1) {
|
2151
|
+
char errbuf[300];
|
2152
|
+
sprintf(errbuf, "failed to open file %s for registering with inotify: %s", fpath, strerror(errno));
|
2153
|
+
throw std::runtime_error(errbuf);
|
2154
|
+
}
|
2155
|
+
#endif
|
2156
|
+
|
2157
|
+
#ifdef HAVE_KQUEUE
|
2158
|
+
if (!bKqueue)
|
2159
|
+
throw std::runtime_error("must enable kqueue (EM.kqueue=true) for file watching support");
|
2160
|
+
|
2161
|
+
// With kqueue we have to open the file first and use the resulting fd to register for events
|
2162
|
+
wd = open(fpath, O_RDONLY);
|
2163
|
+
if (wd == -1) {
|
2164
|
+
char errbuf[300];
|
2165
|
+
sprintf(errbuf, "failed to open file %s for registering with kqueue: %s", fpath, strerror(errno));
|
2166
|
+
throw std::runtime_error(errbuf);
|
2167
|
+
}
|
2168
|
+
_RegisterKqueueFileEvent(wd);
|
2169
|
+
#endif
|
2170
|
+
|
2171
|
+
if (wd != -1) {
|
2172
|
+
Bindable_t* b = new Bindable_t();
|
2173
|
+
Files.insert(make_pair (wd, b));
|
2174
|
+
|
2175
|
+
return b->GetBinding();
|
2176
|
+
}
|
2177
|
+
|
2178
|
+
throw std::runtime_error("no file watching support on this system"); // is this the right thing to do?
|
2179
|
+
}
|
2180
|
+
|
2181
|
+
|
2182
|
+
/***************************
|
2183
|
+
EventMachine_t::UnwatchFile
|
2184
|
+
***************************/
|
2185
|
+
|
2186
|
+
void EventMachine_t::UnwatchFile (int wd)
|
2187
|
+
{
|
2188
|
+
Bindable_t *b = Files[wd];
|
2189
|
+
assert(b);
|
2190
|
+
Files.erase(wd);
|
2191
|
+
|
2192
|
+
#ifdef HAVE_INOTIFY
|
2193
|
+
inotify_rm_watch(inotify->GetSocket(), wd);
|
2194
|
+
#elif HAVE_KQUEUE
|
2195
|
+
// With kqueue, closing the monitored fd automatically clears all registered events for it
|
2196
|
+
close(wd);
|
2197
|
+
#endif
|
2198
|
+
|
2199
|
+
if (EventCallback)
|
2200
|
+
(*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
|
2201
|
+
|
2202
|
+
delete b;
|
2203
|
+
}
|
2204
|
+
|
2205
|
+
void EventMachine_t::UnwatchFile (const unsigned long sig)
|
2206
|
+
{
|
2207
|
+
for(map<int, Bindable_t*>::iterator i=Files.begin(); i != Files.end(); i++)
|
2208
|
+
{
|
2209
|
+
if (i->second->GetBinding() == sig) {
|
2210
|
+
UnwatchFile (i->first);
|
2211
|
+
return;
|
2212
|
+
}
|
2213
|
+
}
|
2214
|
+
throw std::runtime_error("attempted to remove invalid watch signature");
|
2215
|
+
}
|
2216
|
+
|
2217
|
+
|
2218
|
+
/***********************************
|
2219
|
+
EventMachine_t::_ReadInotify_Events
|
2220
|
+
************************************/
|
2221
|
+
|
2222
|
+
void EventMachine_t::_ReadInotifyEvents()
|
2223
|
+
{
|
2224
|
+
#ifdef HAVE_INOTIFY
|
2225
|
+
char buffer[1024];
|
2226
|
+
|
2227
|
+
assert(EventCallback);
|
2228
|
+
|
2229
|
+
for (;;) {
|
2230
|
+
int returned = read(inotify->GetSocket(), buffer, sizeof(buffer));
|
2231
|
+
assert(!(returned == 0 || returned == -1 && errno == EINVAL));
|
2232
|
+
if (returned <= 0) {
|
2233
|
+
break;
|
2234
|
+
}
|
2235
|
+
int current = 0;
|
2236
|
+
while (current < returned) {
|
2237
|
+
struct inotify_event* event = (struct inotify_event*)(buffer+current);
|
2238
|
+
map<int, Bindable_t*>::const_iterator bindable = Files.find(event->wd);
|
2239
|
+
if (bindable != Files.end()) {
|
2240
|
+
if (event->mask & (IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVE)){
|
2241
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "modified", 8);
|
2242
|
+
}
|
2243
|
+
if (event->mask & IN_MOVE_SELF){
|
2244
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "moved", 5);
|
2245
|
+
}
|
2246
|
+
if (event->mask & IN_DELETE_SELF) {
|
2247
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
|
2248
|
+
UnwatchFile ((int)event->wd);
|
2249
|
+
}
|
2250
|
+
}
|
2251
|
+
current += sizeof(struct inotify_event) + event->len;
|
2252
|
+
}
|
2253
|
+
}
|
2254
|
+
#endif
|
2255
|
+
}
|
2256
|
+
|
2257
|
+
|
2258
|
+
/*************************************
|
2259
|
+
EventMachine_t::_HandleKqueuePidEvent
|
2260
|
+
*************************************/
|
2261
|
+
|
2262
|
+
#ifdef HAVE_KQUEUE
|
2263
|
+
void EventMachine_t::_HandleKqueuePidEvent(struct kevent *event)
|
2264
|
+
{
|
2265
|
+
assert(EventCallback);
|
2266
|
+
|
2267
|
+
if (event->fflags & NOTE_FORK)
|
2268
|
+
(*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "fork", 4);
|
2269
|
+
if (event->fflags & NOTE_EXIT) {
|
2270
|
+
(*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "exit", 4);
|
2271
|
+
// stop watching the pid if it died
|
2272
|
+
UnwatchPid ((int)event->ident);
|
2273
|
+
}
|
2274
|
+
}
|
2275
|
+
#endif
|
2276
|
+
|
2277
|
+
|
2278
|
+
/**************************************
|
2279
|
+
EventMachine_t::_HandleKqueueFileEvent
|
2280
|
+
***************************************/
|
2281
|
+
|
2282
|
+
#ifdef HAVE_KQUEUE
|
2283
|
+
void EventMachine_t::_HandleKqueueFileEvent(struct kevent *event)
|
2284
|
+
{
|
2285
|
+
assert(EventCallback);
|
2286
|
+
|
2287
|
+
if (event->fflags & NOTE_WRITE)
|
2288
|
+
(*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "modified", 8);
|
2289
|
+
if (event->fflags & NOTE_RENAME)
|
2290
|
+
(*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "moved", 5);
|
2291
|
+
if (event->fflags & NOTE_DELETE) {
|
2292
|
+
(*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
|
2293
|
+
UnwatchFile ((int)event->ident);
|
2294
|
+
}
|
2295
|
+
}
|
2296
|
+
#endif
|
2297
|
+
|
2298
|
+
|
2299
|
+
/****************************************
|
2300
|
+
EventMachine_t::_RegisterKqueueFileEvent
|
2301
|
+
*****************************************/
|
2302
|
+
|
2303
|
+
#ifdef HAVE_KQUEUE
|
2304
|
+
void EventMachine_t::_RegisterKqueueFileEvent(int fd)
|
2305
|
+
{
|
2306
|
+
struct kevent newevent;
|
2307
|
+
int kqres;
|
2308
|
+
|
2309
|
+
// Setup the event with our fd and proper flags
|
2310
|
+
EV_SET(&newevent, fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_DELETE | NOTE_RENAME | NOTE_WRITE, 0, 0);
|
2311
|
+
|
2312
|
+
// Attempt to register the event
|
2313
|
+
kqres = kevent(kqfd, &newevent, 1, NULL, 0, NULL);
|
2314
|
+
if (kqres == -1) {
|
2315
|
+
char errbuf[200];
|
2316
|
+
sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
|
2317
|
+
close(fd);
|
2318
|
+
throw std::runtime_error(errbuf);
|
2319
|
+
}
|
2320
|
+
}
|
2321
|
+
#endif
|
2322
|
+
|
2323
|
+
|
2324
|
+
/************************************
|
2325
|
+
EventMachine_t::GetHeartbeatInterval
|
2326
|
+
*************************************/
|
2327
|
+
|
2328
|
+
float EventMachine_t::GetHeartbeatInterval()
|
2329
|
+
{
|
2330
|
+
return ((float)HeartbeatInterval / 1000000);
|
2331
|
+
}
|
2332
|
+
|
2333
|
+
|
2334
|
+
/************************************
|
2335
|
+
EventMachine_t::SetHeartbeatInterval
|
2336
|
+
*************************************/
|
2337
|
+
|
2338
|
+
int EventMachine_t::SetHeartbeatInterval(float interval)
|
2339
|
+
{
|
2340
|
+
int iv = (int)(interval * 1000000);
|
2341
|
+
if (iv > 0) {
|
2342
|
+
HeartbeatInterval = iv;
|
2343
|
+
return 1;
|
2344
|
+
}
|
2345
|
+
return 0;
|
2346
|
+
}
|
2347
|
+
//#endif // OS_UNIX
|
2348
|
+
|