eventmachine 1.2.0.dev.2-x64-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (181) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +105 -0
  3. data/GNU +281 -0
  4. data/LICENSE +60 -0
  5. data/README.md +108 -0
  6. data/docs/DocumentationGuidesIndex.md +27 -0
  7. data/docs/GettingStarted.md +521 -0
  8. data/docs/old/ChangeLog +211 -0
  9. data/docs/old/DEFERRABLES +246 -0
  10. data/docs/old/EPOLL +141 -0
  11. data/docs/old/INSTALL +13 -0
  12. data/docs/old/KEYBOARD +42 -0
  13. data/docs/old/LEGAL +25 -0
  14. data/docs/old/LIGHTWEIGHT_CONCURRENCY +130 -0
  15. data/docs/old/PURE_RUBY +75 -0
  16. data/docs/old/RELEASE_NOTES +94 -0
  17. data/docs/old/SMTP +4 -0
  18. data/docs/old/SPAWNED_PROCESSES +148 -0
  19. data/docs/old/TODO +8 -0
  20. data/examples/guides/getting_started/01_eventmachine_echo_server.rb +18 -0
  21. data/examples/guides/getting_started/02_eventmachine_echo_server_that_recognizes_exit_command.rb +22 -0
  22. data/examples/guides/getting_started/03_simple_chat_server.rb +149 -0
  23. data/examples/guides/getting_started/04_simple_chat_server_step_one.rb +27 -0
  24. data/examples/guides/getting_started/05_simple_chat_server_step_two.rb +43 -0
  25. data/examples/guides/getting_started/06_simple_chat_server_step_three.rb +98 -0
  26. data/examples/guides/getting_started/07_simple_chat_server_step_four.rb +121 -0
  27. data/examples/guides/getting_started/08_simple_chat_server_step_five.rb +141 -0
  28. data/examples/old/ex_channel.rb +43 -0
  29. data/examples/old/ex_queue.rb +2 -0
  30. data/examples/old/ex_tick_loop_array.rb +15 -0
  31. data/examples/old/ex_tick_loop_counter.rb +32 -0
  32. data/examples/old/helper.rb +2 -0
  33. data/ext/binder.cpp +124 -0
  34. data/ext/binder.h +46 -0
  35. data/ext/cmain.cpp +988 -0
  36. data/ext/ed.cpp +2111 -0
  37. data/ext/ed.h +442 -0
  38. data/ext/em.cpp +2379 -0
  39. data/ext/em.h +308 -0
  40. data/ext/eventmachine.h +143 -0
  41. data/ext/extconf.rb +270 -0
  42. data/ext/fastfilereader/extconf.rb +110 -0
  43. data/ext/fastfilereader/mapper.cpp +216 -0
  44. data/ext/fastfilereader/mapper.h +59 -0
  45. data/ext/fastfilereader/rubymain.cpp +127 -0
  46. data/ext/kb.cpp +79 -0
  47. data/ext/page.cpp +107 -0
  48. data/ext/page.h +51 -0
  49. data/ext/pipe.cpp +354 -0
  50. data/ext/project.h +176 -0
  51. data/ext/rubymain.cpp +1504 -0
  52. data/ext/ssl.cpp +615 -0
  53. data/ext/ssl.h +103 -0
  54. data/java/.classpath +8 -0
  55. data/java/.project +17 -0
  56. data/java/src/com/rubyeventmachine/EmReactor.java +591 -0
  57. data/java/src/com/rubyeventmachine/EmReactorException.java +40 -0
  58. data/java/src/com/rubyeventmachine/EventableChannel.java +72 -0
  59. data/java/src/com/rubyeventmachine/EventableDatagramChannel.java +201 -0
  60. data/java/src/com/rubyeventmachine/EventableSocketChannel.java +415 -0
  61. data/lib/2.0/fastfilereaderext.so +0 -0
  62. data/lib/2.0/rubyeventmachine.so +0 -0
  63. data/lib/2.1/fastfilereaderext.so +0 -0
  64. data/lib/2.1/rubyeventmachine.so +0 -0
  65. data/lib/2.2/fastfilereaderext.so +0 -0
  66. data/lib/2.2/rubyeventmachine.so +0 -0
  67. data/lib/2.3/fastfilereaderext.so +0 -0
  68. data/lib/2.3/rubyeventmachine.so +0 -0
  69. data/lib/em/buftok.rb +59 -0
  70. data/lib/em/callback.rb +58 -0
  71. data/lib/em/channel.rb +69 -0
  72. data/lib/em/completion.rb +304 -0
  73. data/lib/em/connection.rb +770 -0
  74. data/lib/em/deferrable.rb +210 -0
  75. data/lib/em/deferrable/pool.rb +2 -0
  76. data/lib/em/file_watch.rb +73 -0
  77. data/lib/em/future.rb +61 -0
  78. data/lib/em/iterator.rb +252 -0
  79. data/lib/em/messages.rb +66 -0
  80. data/lib/em/pool.rb +151 -0
  81. data/lib/em/process_watch.rb +45 -0
  82. data/lib/em/processes.rb +123 -0
  83. data/lib/em/protocols.rb +37 -0
  84. data/lib/em/protocols/header_and_content.rb +138 -0
  85. data/lib/em/protocols/httpclient.rb +299 -0
  86. data/lib/em/protocols/httpclient2.rb +600 -0
  87. data/lib/em/protocols/line_and_text.rb +125 -0
  88. data/lib/em/protocols/line_protocol.rb +29 -0
  89. data/lib/em/protocols/linetext2.rb +166 -0
  90. data/lib/em/protocols/memcache.rb +331 -0
  91. data/lib/em/protocols/object_protocol.rb +46 -0
  92. data/lib/em/protocols/postgres3.rb +246 -0
  93. data/lib/em/protocols/saslauth.rb +175 -0
  94. data/lib/em/protocols/smtpclient.rb +394 -0
  95. data/lib/em/protocols/smtpserver.rb +666 -0
  96. data/lib/em/protocols/socks4.rb +66 -0
  97. data/lib/em/protocols/stomp.rb +205 -0
  98. data/lib/em/protocols/tcptest.rb +54 -0
  99. data/lib/em/pure_ruby.rb +1022 -0
  100. data/lib/em/queue.rb +80 -0
  101. data/lib/em/resolver.rb +232 -0
  102. data/lib/em/spawnable.rb +84 -0
  103. data/lib/em/streamer.rb +118 -0
  104. data/lib/em/threaded_resource.rb +90 -0
  105. data/lib/em/tick_loop.rb +85 -0
  106. data/lib/em/timers.rb +61 -0
  107. data/lib/em/version.rb +3 -0
  108. data/lib/eventmachine.rb +1584 -0
  109. data/lib/fastfilereaderext.rb +2 -0
  110. data/lib/jeventmachine.rb +301 -0
  111. data/lib/rubyeventmachine.rb +2 -0
  112. data/rakelib/package.rake +120 -0
  113. data/rakelib/test.rake +8 -0
  114. data/tests/client.crt +31 -0
  115. data/tests/client.key +51 -0
  116. data/tests/dhparam.pem +13 -0
  117. data/tests/em_test_helper.rb +151 -0
  118. data/tests/test_attach.rb +151 -0
  119. data/tests/test_basic.rb +283 -0
  120. data/tests/test_channel.rb +75 -0
  121. data/tests/test_completion.rb +178 -0
  122. data/tests/test_connection_count.rb +54 -0
  123. data/tests/test_connection_write.rb +35 -0
  124. data/tests/test_defer.rb +35 -0
  125. data/tests/test_deferrable.rb +35 -0
  126. data/tests/test_epoll.rb +142 -0
  127. data/tests/test_error_handler.rb +38 -0
  128. data/tests/test_exc.rb +28 -0
  129. data/tests/test_file_watch.rb +66 -0
  130. data/tests/test_fork.rb +75 -0
  131. data/tests/test_futures.rb +170 -0
  132. data/tests/test_get_sock_opt.rb +37 -0
  133. data/tests/test_handler_check.rb +35 -0
  134. data/tests/test_hc.rb +155 -0
  135. data/tests/test_httpclient.rb +233 -0
  136. data/tests/test_httpclient2.rb +128 -0
  137. data/tests/test_idle_connection.rb +25 -0
  138. data/tests/test_inactivity_timeout.rb +54 -0
  139. data/tests/test_ipv4.rb +125 -0
  140. data/tests/test_ipv6.rb +131 -0
  141. data/tests/test_iterator.rb +115 -0
  142. data/tests/test_kb.rb +28 -0
  143. data/tests/test_line_protocol.rb +33 -0
  144. data/tests/test_ltp.rb +138 -0
  145. data/tests/test_ltp2.rb +308 -0
  146. data/tests/test_many_fds.rb +22 -0
  147. data/tests/test_next_tick.rb +104 -0
  148. data/tests/test_object_protocol.rb +36 -0
  149. data/tests/test_pause.rb +107 -0
  150. data/tests/test_pending_connect_timeout.rb +52 -0
  151. data/tests/test_pool.rb +196 -0
  152. data/tests/test_process_watch.rb +50 -0
  153. data/tests/test_processes.rb +128 -0
  154. data/tests/test_proxy_connection.rb +180 -0
  155. data/tests/test_pure.rb +88 -0
  156. data/tests/test_queue.rb +64 -0
  157. data/tests/test_resolver.rb +104 -0
  158. data/tests/test_running.rb +14 -0
  159. data/tests/test_sasl.rb +47 -0
  160. data/tests/test_send_file.rb +217 -0
  161. data/tests/test_servers.rb +33 -0
  162. data/tests/test_set_sock_opt.rb +39 -0
  163. data/tests/test_shutdown_hooks.rb +23 -0
  164. data/tests/test_smtpclient.rb +75 -0
  165. data/tests/test_smtpserver.rb +57 -0
  166. data/tests/test_spawn.rb +293 -0
  167. data/tests/test_ssl_args.rb +78 -0
  168. data/tests/test_ssl_dhparam.rb +83 -0
  169. data/tests/test_ssl_ecdh_curve.rb +79 -0
  170. data/tests/test_ssl_extensions.rb +49 -0
  171. data/tests/test_ssl_methods.rb +65 -0
  172. data/tests/test_ssl_protocols.rb +246 -0
  173. data/tests/test_ssl_verify.rb +126 -0
  174. data/tests/test_stomp.rb +37 -0
  175. data/tests/test_system.rb +46 -0
  176. data/tests/test_threaded_resource.rb +61 -0
  177. data/tests/test_tick_loop.rb +59 -0
  178. data/tests/test_timers.rb +123 -0
  179. data/tests/test_ud.rb +8 -0
  180. data/tests/test_unbind_reason.rb +52 -0
  181. metadata +381 -0
@@ -0,0 +1,442 @@
1
+ /*****************************************************************************
2
+
3
+ $Id$
4
+
5
+ File: ed.h
6
+ Date: 06Apr06
7
+
8
+ Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
9
+ Gmail: blackhedd
10
+
11
+ This program is free software; you can redistribute it and/or modify
12
+ it under the terms of either: 1) the GNU General Public License
13
+ as published by the Free Software Foundation; either version 2 of the
14
+ License, or (at your option) any later version; or 2) Ruby's License.
15
+
16
+ See the file COPYING for complete licensing information.
17
+
18
+ *****************************************************************************/
19
+
20
+ #ifndef __EventableDescriptor__H_
21
+ #define __EventableDescriptor__H_
22
+
23
+
24
+ class EventMachine_t; // forward reference
25
+ #ifdef WITH_SSL
26
+ class SslBox_t; // forward reference
27
+ #endif
28
+
29
+ bool SetSocketNonblocking (SOCKET);
30
+ bool SetFdCloexec (int);
31
+
32
+ /*************************
33
+ class EventableDescriptor
34
+ *************************/
35
+
36
+ class EventableDescriptor: public Bindable_t
37
+ {
38
+ public:
39
+ EventableDescriptor (SOCKET, EventMachine_t*);
40
+ virtual ~EventableDescriptor();
41
+
42
+ SOCKET GetSocket() {return MySocket;}
43
+ void SetSocketInvalid() { MySocket = INVALID_SOCKET; }
44
+ void Close();
45
+
46
+ virtual void Read() = 0;
47
+ virtual void Write() = 0;
48
+ virtual void Heartbeat() = 0;
49
+
50
+ // These methods tell us whether the descriptor
51
+ // should be selected or polled for read/write.
52
+ virtual bool SelectForRead() = 0;
53
+ virtual bool SelectForWrite() = 0;
54
+
55
+ // are we scheduled for a close, or in an error state, or already closed?
56
+ bool ShouldDelete();
57
+ // Do we have any data to write? This is used by ShouldDelete.
58
+ virtual int GetOutboundDataSize() {return 0;}
59
+ virtual bool IsWatchOnly(){ return bWatchOnly; }
60
+
61
+ virtual void ScheduleClose (bool after_writing);
62
+ bool IsCloseScheduled();
63
+ virtual void HandleError(){ ScheduleClose (false); }
64
+
65
+ void SetEventCallback (EMCallback);
66
+
67
+ virtual bool GetPeername (struct sockaddr*, socklen_t*) {return false;}
68
+ virtual bool GetSockname (struct sockaddr*, socklen_t*) {return false;}
69
+ virtual bool GetSubprocessPid (pid_t*) {return false;}
70
+
71
+ virtual void StartTls() {}
72
+ virtual void SetTlsParms (const char *, const char *, bool, bool, const char *, const char *, const char *, const char *, int) {}
73
+
74
+ #ifdef WITH_SSL
75
+ virtual X509 *GetPeerCert() {return NULL;}
76
+ virtual int GetCipherBits() {return -1;}
77
+ virtual const char *GetCipherName() {return NULL;}
78
+ virtual const char *GetCipherProtocol() {return NULL;}
79
+ virtual const char *GetSNIHostname() {return NULL;}
80
+ #endif
81
+
82
+ virtual uint64_t GetCommInactivityTimeout() {return 0;}
83
+ virtual int SetCommInactivityTimeout (uint64_t) {return 0;}
84
+ uint64_t GetPendingConnectTimeout();
85
+ int SetPendingConnectTimeout (uint64_t value);
86
+ uint64_t GetLastActivity() { return LastActivity; }
87
+
88
+ #ifdef HAVE_EPOLL
89
+ struct epoll_event *GetEpollEvent() { return &EpollEvent; }
90
+ #endif
91
+
92
+ #ifdef HAVE_KQUEUE
93
+ bool GetKqueueArmWrite() { return bKqueueArmWrite; }
94
+ #endif
95
+
96
+ virtual void StartProxy(const uintptr_t, const unsigned long, const unsigned long);
97
+ virtual void StopProxy();
98
+ virtual unsigned long GetProxiedBytes(){ return ProxiedBytes; };
99
+ virtual void SetProxiedFrom(EventableDescriptor*, const unsigned long);
100
+ virtual int SendOutboundData(const char*,unsigned long){ return -1; }
101
+ virtual bool IsPaused(){ return bPaused; }
102
+ virtual bool Pause(){ bPaused = true; return bPaused; }
103
+ virtual bool Resume(){ bPaused = false; return bPaused; }
104
+
105
+ void SetUnbindReasonCode(int code){ UnbindReasonCode = code; }
106
+ virtual int ReportErrorStatus(){ return 0; }
107
+ virtual bool IsConnectPending(){ return false; }
108
+ virtual uint64_t GetNextHeartbeat();
109
+
110
+ private:
111
+ bool bCloseNow;
112
+ bool bCloseAfterWriting;
113
+
114
+ protected:
115
+ SOCKET MySocket;
116
+ bool bAttached;
117
+ bool bWatchOnly;
118
+
119
+ EMCallback EventCallback;
120
+ void _GenericInboundDispatch(const char *buffer, unsigned long size);
121
+
122
+ uint64_t CreatedAt;
123
+ bool bCallbackUnbind;
124
+ int UnbindReasonCode;
125
+
126
+ unsigned long BytesToProxy;
127
+ EventableDescriptor *ProxyTarget;
128
+ EventableDescriptor *ProxiedFrom;
129
+ unsigned long ProxiedBytes;
130
+
131
+ unsigned long MaxOutboundBufSize;
132
+
133
+ #ifdef HAVE_EPOLL
134
+ struct epoll_event EpollEvent;
135
+ #endif
136
+
137
+ #ifdef HAVE_KQUEUE
138
+ bool bKqueueArmWrite;
139
+ #endif
140
+
141
+ EventMachine_t *MyEventMachine;
142
+ uint64_t PendingConnectTimeout;
143
+ uint64_t InactivityTimeout;
144
+ uint64_t LastActivity;
145
+ uint64_t NextHeartbeat;
146
+ bool bPaused;
147
+ };
148
+
149
+
150
+
151
+ /*************************
152
+ class LoopbreakDescriptor
153
+ *************************/
154
+
155
+ class LoopbreakDescriptor: public EventableDescriptor
156
+ {
157
+ public:
158
+ LoopbreakDescriptor (SOCKET, EventMachine_t*);
159
+ virtual ~LoopbreakDescriptor() {}
160
+
161
+ virtual void Read();
162
+ virtual void Write();
163
+ virtual void Heartbeat() {}
164
+
165
+ virtual bool SelectForRead() {return true;}
166
+ virtual bool SelectForWrite() {return false;}
167
+ };
168
+
169
+
170
+ /**************************
171
+ class ConnectionDescriptor
172
+ **************************/
173
+
174
+ class ConnectionDescriptor: public EventableDescriptor
175
+ {
176
+ public:
177
+ ConnectionDescriptor (SOCKET, EventMachine_t*);
178
+ virtual ~ConnectionDescriptor();
179
+
180
+ int SendOutboundData (const char*, unsigned long);
181
+
182
+ void SetConnectPending (bool f);
183
+ virtual void ScheduleClose (bool after_writing);
184
+ virtual void HandleError();
185
+
186
+ void SetNotifyReadable (bool);
187
+ void SetNotifyWritable (bool);
188
+ void SetAttached (bool);
189
+ void SetWatchOnly (bool);
190
+
191
+ bool Pause();
192
+ bool Resume();
193
+
194
+ bool IsNotifyReadable(){ return bNotifyReadable; }
195
+ bool IsNotifyWritable(){ return bNotifyWritable; }
196
+
197
+ virtual void Read();
198
+ virtual void Write();
199
+ virtual void Heartbeat();
200
+
201
+ virtual bool SelectForRead();
202
+ virtual bool SelectForWrite();
203
+
204
+ // Do we have any data to write? This is used by ShouldDelete.
205
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
206
+
207
+ virtual void StartTls();
208
+ virtual void SetTlsParms (const char *, const char *, bool, bool, const char *, const char *, const char *, const char *, int);
209
+
210
+ #ifdef WITH_SSL
211
+ virtual X509 *GetPeerCert();
212
+ virtual int GetCipherBits();
213
+ virtual const char *GetCipherName();
214
+ virtual const char *GetCipherProtocol();
215
+ virtual const char *GetSNIHostname();
216
+ virtual bool VerifySslPeer(const char*);
217
+ virtual void AcceptSslPeer();
218
+ #endif
219
+
220
+ void SetServerMode() {bIsServer = true;}
221
+
222
+ virtual bool GetPeername (struct sockaddr*, socklen_t*);
223
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
224
+
225
+ virtual uint64_t GetCommInactivityTimeout();
226
+ virtual int SetCommInactivityTimeout (uint64_t value);
227
+
228
+ virtual int ReportErrorStatus();
229
+ virtual bool IsConnectPending(){ return bConnectPending; }
230
+
231
+ protected:
232
+ struct OutboundPage {
233
+ OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
234
+ void Free() {if (Buffer) free (const_cast<char*>(Buffer)); }
235
+ const char *Buffer;
236
+ int Length;
237
+ int Offset;
238
+ };
239
+
240
+ protected:
241
+ bool bConnectPending;
242
+
243
+ bool bNotifyReadable;
244
+ bool bNotifyWritable;
245
+
246
+ bool bReadAttemptedAfterClose;
247
+ bool bWriteAttemptedAfterClose;
248
+
249
+ deque<OutboundPage> OutboundPages;
250
+ int OutboundDataSize;
251
+
252
+ #ifdef WITH_SSL
253
+ SslBox_t *SslBox;
254
+ std::string CertChainFilename;
255
+ std::string PrivateKeyFilename;
256
+ std::string CipherList;
257
+ std::string EcdhCurve;
258
+ std::string DhParam;
259
+ int Protocols;
260
+ bool bHandshakeSignaled;
261
+ bool bSslVerifyPeer;
262
+ bool bSslFailIfNoPeerCert;
263
+ std::string SniHostName;
264
+ bool bSslPeerAccepted;
265
+ #endif
266
+
267
+ #ifdef HAVE_KQUEUE
268
+ bool bGotExtraKqueueEvent;
269
+ #endif
270
+
271
+ bool bIsServer;
272
+
273
+ private:
274
+ void _UpdateEvents();
275
+ void _UpdateEvents(bool, bool);
276
+ void _WriteOutboundData();
277
+ void _DispatchInboundData (const char *buffer, unsigned long size);
278
+ void _DispatchCiphertext();
279
+ int _SendRawOutboundData (const char *buffer, unsigned long size);
280
+ void _CheckHandshakeStatus();
281
+
282
+ };
283
+
284
+
285
+ /************************
286
+ class DatagramDescriptor
287
+ ************************/
288
+
289
+ class DatagramDescriptor: public EventableDescriptor
290
+ {
291
+ public:
292
+ DatagramDescriptor (SOCKET, EventMachine_t*);
293
+ virtual ~DatagramDescriptor();
294
+
295
+ virtual void Read();
296
+ virtual void Write();
297
+ virtual void Heartbeat();
298
+
299
+ virtual bool SelectForRead() {return true;}
300
+ virtual bool SelectForWrite();
301
+
302
+ int SendOutboundData (const char*, unsigned long);
303
+ int SendOutboundDatagram (const char*, unsigned long, const char*, int);
304
+
305
+ // Do we have any data to write? This is used by ShouldDelete.
306
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
307
+
308
+ virtual bool GetPeername (struct sockaddr*, socklen_t*);
309
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
310
+
311
+ virtual uint64_t GetCommInactivityTimeout();
312
+ virtual int SetCommInactivityTimeout (uint64_t value);
313
+
314
+ protected:
315
+ struct OutboundPage {
316
+ OutboundPage (const char *b, int l, struct sockaddr_in6 f, int o=0): Buffer(b), Length(l), Offset(o), From(f) {}
317
+ void Free() {if (Buffer) free (const_cast<char*>(Buffer)); }
318
+ const char *Buffer;
319
+ int Length;
320
+ int Offset;
321
+ struct sockaddr_in6 From;
322
+ };
323
+
324
+ deque<OutboundPage> OutboundPages;
325
+ int OutboundDataSize;
326
+
327
+ struct sockaddr_in6 ReturnAddress;
328
+ };
329
+
330
+
331
+ /************************
332
+ class AcceptorDescriptor
333
+ ************************/
334
+
335
+ class AcceptorDescriptor: public EventableDescriptor
336
+ {
337
+ public:
338
+ AcceptorDescriptor (SOCKET, EventMachine_t*);
339
+ virtual ~AcceptorDescriptor();
340
+
341
+ virtual void Read();
342
+ virtual void Write();
343
+ virtual void Heartbeat();
344
+
345
+ virtual bool SelectForRead() {return true;}
346
+ virtual bool SelectForWrite() {return false;}
347
+
348
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
349
+
350
+ static void StopAcceptor (const uintptr_t binding);
351
+ };
352
+
353
+ /********************
354
+ class PipeDescriptor
355
+ ********************/
356
+
357
+ #ifdef OS_UNIX
358
+ class PipeDescriptor: public EventableDescriptor
359
+ {
360
+ public:
361
+ PipeDescriptor (SOCKET, pid_t, EventMachine_t*);
362
+ virtual ~PipeDescriptor();
363
+
364
+ virtual void Read();
365
+ virtual void Write();
366
+ virtual void Heartbeat();
367
+
368
+ virtual bool SelectForRead();
369
+ virtual bool SelectForWrite();
370
+
371
+ int SendOutboundData (const char*, unsigned long);
372
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
373
+
374
+ virtual bool GetSubprocessPid (pid_t*);
375
+
376
+ protected:
377
+ struct OutboundPage {
378
+ OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
379
+ void Free() {if (Buffer) free (const_cast<char*>(Buffer)); }
380
+ const char *Buffer;
381
+ int Length;
382
+ int Offset;
383
+ };
384
+
385
+ protected:
386
+ bool bReadAttemptedAfterClose;
387
+
388
+ deque<OutboundPage> OutboundPages;
389
+ int OutboundDataSize;
390
+
391
+ pid_t SubprocessPid;
392
+
393
+ private:
394
+ void _DispatchInboundData (const char *buffer, int size);
395
+ };
396
+ #endif // OS_UNIX
397
+
398
+
399
+ /************************
400
+ class KeyboardDescriptor
401
+ ************************/
402
+
403
+ class KeyboardDescriptor: public EventableDescriptor
404
+ {
405
+ public:
406
+ KeyboardDescriptor (EventMachine_t*);
407
+ virtual ~KeyboardDescriptor();
408
+
409
+ virtual void Read();
410
+ virtual void Write();
411
+ virtual void Heartbeat();
412
+
413
+ virtual bool SelectForRead() {return true;}
414
+ virtual bool SelectForWrite() {return false;}
415
+
416
+ protected:
417
+ bool bReadAttemptedAfterClose;
418
+
419
+ private:
420
+ void _DispatchInboundData (const char *buffer, int size);
421
+ };
422
+
423
+
424
+ /***********************
425
+ class InotifyDescriptor
426
+ ************************/
427
+
428
+ class InotifyDescriptor: public EventableDescriptor
429
+ {
430
+ public:
431
+ InotifyDescriptor (EventMachine_t*);
432
+ virtual ~InotifyDescriptor();
433
+
434
+ void Read();
435
+ void Write();
436
+
437
+ virtual void Heartbeat() {}
438
+ virtual bool SelectForRead() {return true;}
439
+ virtual bool SelectForWrite() {return false;}
440
+ };
441
+
442
+ #endif // __EventableDescriptor__H_
@@ -0,0 +1,2379 @@
1
+ /*****************************************************************************
2
+
3
+ $Id$
4
+
5
+ File: em.cpp
6
+ Date: 06Apr06
7
+
8
+ Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
9
+ Gmail: blackhedd
10
+
11
+ This program is free software; you can redistribute it and/or modify
12
+ it under the terms of either: 1) the GNU General Public License
13
+ as published by the Free Software Foundation; either version 2 of the
14
+ License, or (at your option) any later version; or 2) Ruby's License.
15
+
16
+ See the file COPYING for complete licensing information.
17
+
18
+ *****************************************************************************/
19
+
20
+ // THIS ENTIRE FILE WILL EVENTUALLY BE FOR UNIX BUILDS ONLY.
21
+ //#ifdef OS_UNIX
22
+
23
+ #include "project.h"
24
+
25
+ /* The numer of max outstanding timers was once a const enum defined in em.h.
26
+ * Now we define it here so that users can change its value if necessary.
27
+ */
28
+ static unsigned int MaxOutstandingTimers = 100000;
29
+
30
+ /* The number of accept() done at once in a single tick when the acceptor
31
+ * socket becomes readable.
32
+ */
33
+ static unsigned int SimultaneousAcceptCount = 10;
34
+
35
+ /* Internal helper to create a socket with SOCK_CLOEXEC set, and fall
36
+ * back to fcntl'ing it if the headers/runtime don't support it.
37
+ */
38
+ SOCKET EmSocket (int domain, int type, int protocol)
39
+ {
40
+ SOCKET sd;
41
+ #ifdef HAVE_SOCKET_CLOEXEC
42
+ sd = socket (domain, type | SOCK_CLOEXEC, protocol);
43
+ if (sd == INVALID_SOCKET) {
44
+ sd = socket (domain, type, protocol);
45
+ if (sd < 0) {
46
+ return sd;
47
+ }
48
+ SetFdCloexec(sd);
49
+ }
50
+ #else
51
+ sd = socket (domain, type, protocol);
52
+ if (sd == INVALID_SOCKET) {
53
+ return sd;
54
+ }
55
+ SetFdCloexec(sd);
56
+ #endif
57
+ return sd;
58
+ }
59
+
60
+
61
+ /***************************************
62
+ STATIC EventMachine_t::GetMaxTimerCount
63
+ ***************************************/
64
+
65
+ int EventMachine_t::GetMaxTimerCount()
66
+ {
67
+ return MaxOutstandingTimers;
68
+ }
69
+
70
+
71
+ /***************************************
72
+ STATIC EventMachine_t::SetMaxTimerCount
73
+ ***************************************/
74
+
75
+ void EventMachine_t::SetMaxTimerCount (int count)
76
+ {
77
+ /* Allow a user to increase the maximum number of outstanding timers.
78
+ * If this gets "too high" (a metric that is of course platform dependent),
79
+ * bad things will happen like performance problems and possible overuse
80
+ * of memory.
81
+ * The actual timer mechanism is very efficient so it's hard to know what
82
+ * the practical max, but 100,000 shouldn't be too problematical.
83
+ */
84
+ if (count < 100)
85
+ count = 100;
86
+ MaxOutstandingTimers = count;
87
+ }
88
+
89
+ int EventMachine_t::GetSimultaneousAcceptCount()
90
+ {
91
+ return SimultaneousAcceptCount;
92
+ }
93
+
94
+ void EventMachine_t::SetSimultaneousAcceptCount (int count)
95
+ {
96
+ if (count < 1)
97
+ count = 1;
98
+ SimultaneousAcceptCount = count;
99
+ }
100
+
101
+
102
+ /******************************
103
+ EventMachine_t::EventMachine_t
104
+ ******************************/
105
+
106
+ EventMachine_t::EventMachine_t (EMCallback event_callback, Poller_t poller):
107
+ NumCloseScheduled (0),
108
+ HeartbeatInterval(2000000),
109
+ EventCallback (event_callback),
110
+ LoopBreakerReader (INVALID_SOCKET),
111
+ LoopBreakerWriter (INVALID_SOCKET),
112
+ bTerminateSignalReceived (false),
113
+ Poller (poller),
114
+ epfd (-1),
115
+ kqfd (-1)
116
+ #ifdef HAVE_INOTIFY
117
+ , inotify (NULL)
118
+ #endif
119
+ {
120
+ // Default time-slice is just smaller than one hundred mills.
121
+ Quantum.tv_sec = 0;
122
+ Quantum.tv_usec = 90000;
123
+
124
+ // Override the requested poller back to default if needed.
125
+ #if !defined(HAVE_EPOLL) && !defined(HAVE_KQUEUE)
126
+ Poller = Poller_Default;
127
+ #endif
128
+
129
+ /* Initialize monotonic timekeeping on OS X before the first call to GetRealTime */
130
+ #ifdef OS_DARWIN
131
+ (void) mach_timebase_info(&mach_timebase);
132
+ #endif
133
+
134
+ #ifdef OS_WIN32
135
+ TickCountTickover = 0;
136
+ LastTickCount = 0;
137
+ #endif
138
+
139
+ // Make sure the current loop time is sane, in case we do any initializations of
140
+ // objects before we start running.
141
+ _UpdateTime();
142
+
143
+ /* We initialize the network library here (only on Windows of course)
144
+ * and initialize "loop breakers." Our destructor also does some network-level
145
+ * cleanup. There's thus an implicit assumption that any given instance of EventMachine_t
146
+ * will only call ::Run once. Is that a good assumption? Should we move some of these
147
+ * inits and de-inits into ::Run?
148
+ */
149
+ #ifdef OS_WIN32
150
+ WSADATA w;
151
+ WSAStartup (MAKEWORD (1, 1), &w);
152
+ #endif
153
+
154
+ _InitializeLoopBreaker();
155
+ SelectData = new SelectData_t();
156
+ }
157
+
158
+
159
+ /*******************************
160
+ EventMachine_t::~EventMachine_t
161
+ *******************************/
162
+
163
+ EventMachine_t::~EventMachine_t()
164
+ {
165
+ // Run down descriptors
166
+ size_t i;
167
+ for (i = 0; i < NewDescriptors.size(); i++)
168
+ delete NewDescriptors[i];
169
+ for (i = 0; i < Descriptors.size(); i++)
170
+ delete Descriptors[i];
171
+
172
+ close (LoopBreakerReader);
173
+ close (LoopBreakerWriter);
174
+
175
+ // Remove any file watch descriptors
176
+ while(!Files.empty()) {
177
+ map<int, Bindable_t*>::iterator f = Files.begin();
178
+ UnwatchFile (f->first);
179
+ }
180
+
181
+ if (epfd != -1)
182
+ close (epfd);
183
+ if (kqfd != -1)
184
+ close (kqfd);
185
+
186
+ delete SelectData;
187
+ }
188
+
189
+
190
+ /****************************
191
+ EventMachine_t::ScheduleHalt
192
+ ****************************/
193
+
194
+ void EventMachine_t::ScheduleHalt()
195
+ {
196
+ /* This is how we stop the machine.
197
+ * This can be called by clients. Signal handlers will probably
198
+ * set the global flag.
199
+ * For now this means there can only be one EventMachine ever running at a time.
200
+ *
201
+ * IMPORTANT: keep this light, fast, and async-safe. Don't do anything frisky in here,
202
+ * because it may be called from signal handlers invoked from code that we don't
203
+ * control. At this writing (20Sep06), EM does NOT install any signal handlers of
204
+ * its own.
205
+ *
206
+ * We need a FAQ. And one of the questions is: how do I stop EM when Ctrl-C happens?
207
+ * The answer is to call evma_stop_machine, which calls here, from a SIGINT handler.
208
+ */
209
+ bTerminateSignalReceived = true;
210
+
211
+ /* Signal the loopbreaker so we break out of long-running select/epoll/kqueue and
212
+ * notice the halt boolean is set. Signalling the loopbreaker also uses a single
213
+ * signal-safe syscall.
214
+ */
215
+ SignalLoopBreaker();
216
+ }
217
+
218
+ bool EventMachine_t::Stopping()
219
+ {
220
+ return bTerminateSignalReceived;
221
+ }
222
+
223
+ /*******************************
224
+ EventMachine_t::SetTimerQuantum
225
+ *******************************/
226
+
227
+ void EventMachine_t::SetTimerQuantum (int interval)
228
+ {
229
+ /* We get a timer-quantum expressed in milliseconds.
230
+ */
231
+
232
+ if ((interval < 5) || (interval > 5*60*1000))
233
+ throw std::runtime_error ("invalid timer-quantum");
234
+
235
+ Quantum.tv_sec = interval / 1000;
236
+ Quantum.tv_usec = (interval % 1000) * 1000;
237
+ }
238
+
239
+
240
+ /*************************************
241
+ (STATIC) EventMachine_t::SetuidString
242
+ *************************************/
243
+
244
+ #ifdef OS_UNIX
245
+ void EventMachine_t::SetuidString (const char *username)
246
+ {
247
+ /* This method takes a caller-supplied username and tries to setuid
248
+ * to that user. There is no meaningful implementation (and no error)
249
+ * on Windows. On Unix, a failure to setuid the caller-supplied string
250
+ * causes a fatal abort, because presumably the program is calling here
251
+ * in order to fulfill a security requirement. If we fail silently,
252
+ * the user may continue to run with too much privilege.
253
+ *
254
+ * TODO, we need to decide on and document a way of generating C++ level errors
255
+ * that can be wrapped in documented Ruby exceptions, so users can catch
256
+ * and handle them. And distinguish it from errors that we WON'T let the Ruby
257
+ * user catch (like security-violations and resource-overallocation).
258
+ * A setuid failure here would be in the latter category.
259
+ */
260
+
261
+ if (!username || !*username)
262
+ throw std::runtime_error ("setuid_string failed: no username specified");
263
+
264
+ errno = 0;
265
+ struct passwd *p = getpwnam (username);
266
+ if (!p) {
267
+ if (errno) {
268
+ char buf[200];
269
+ snprintf (buf, sizeof(buf)-1, "setuid_string failed: %s", strerror(errno));
270
+ throw std::runtime_error (buf);
271
+ } else {
272
+ throw std::runtime_error ("setuid_string failed: unknown username");
273
+ }
274
+ }
275
+
276
+ if (setuid (p->pw_uid) != 0)
277
+ throw std::runtime_error ("setuid_string failed: no setuid");
278
+
279
+ // Success.
280
+ }
281
+ #else
282
+ void EventMachine_t::SetuidString (const char *username UNUSED) { }
283
+ #endif
284
+
285
+ /****************************************
286
+ (STATIC) EventMachine_t::SetRlimitNofile
287
+ ****************************************/
288
+
289
+ #ifdef OS_UNIX
290
+ int EventMachine_t::SetRlimitNofile (int nofiles)
291
+ {
292
+ struct rlimit rlim;
293
+ getrlimit (RLIMIT_NOFILE, &rlim);
294
+ if (nofiles >= 0) {
295
+ rlim.rlim_cur = nofiles;
296
+ if ((unsigned int)nofiles > rlim.rlim_max)
297
+ rlim.rlim_max = nofiles;
298
+ setrlimit (RLIMIT_NOFILE, &rlim);
299
+ // ignore the error return, for now at least.
300
+ // TODO, emit an error message someday when we have proper debug levels.
301
+ }
302
+ getrlimit (RLIMIT_NOFILE, &rlim);
303
+ return rlim.rlim_cur;
304
+ }
305
+ #else
306
+ int EventMachine_t::SetRlimitNofile (int nofiles UNUSED) { return 0; }
307
+ #endif
308
+
309
+ /*********************************
310
+ EventMachine_t::SignalLoopBreaker
311
+ *********************************/
312
+
313
+ void EventMachine_t::SignalLoopBreaker()
314
+ {
315
+ #ifdef OS_UNIX
316
+ (void)write (LoopBreakerWriter, "", 1);
317
+ #endif
318
+ #ifdef OS_WIN32
319
+ sendto (LoopBreakerReader, "", 0, 0, (struct sockaddr*)&(LoopBreakerTarget), sizeof(LoopBreakerTarget));
320
+ #endif
321
+ }
322
+
323
+
324
+ /**************************************
325
+ EventMachine_t::_InitializeLoopBreaker
326
+ **************************************/
327
+
328
+ void EventMachine_t::_InitializeLoopBreaker()
329
+ {
330
+ /* A "loop-breaker" is a socket-descriptor that we can write to in order
331
+ * to break the main select loop. Primarily useful for things running on
332
+ * threads other than the main EM thread, so they can trigger processing
333
+ * of events that arise exogenously to the EM.
334
+ * Keep the loop-breaker pipe out of the main descriptor set, otherwise
335
+ * its events will get passed on to user code.
336
+ */
337
+
338
+ #ifdef OS_UNIX
339
+ int fd[2];
340
+ #if defined (HAVE_CLOEXEC) && defined (HAVE_PIPE2)
341
+ int pipestatus = pipe2(fd, O_CLOEXEC);
342
+ if (pipestatus < 0) {
343
+ if (pipe(fd))
344
+ throw std::runtime_error (strerror(errno));
345
+ }
346
+ #else
347
+ if (pipe (fd))
348
+ throw std::runtime_error (strerror(errno));
349
+ #endif
350
+ if (!SetFdCloexec(fd[0]) || !SetFdCloexec(fd[1]))
351
+ throw std::runtime_error (strerror(errno));
352
+
353
+ LoopBreakerWriter = fd[1];
354
+ LoopBreakerReader = fd[0];
355
+
356
+ /* 16Jan11: Make sure the pipe is non-blocking, so more than 65k loopbreaks
357
+ * in one tick do not fill up the pipe and block the process on write() */
358
+ SetSocketNonblocking (LoopBreakerWriter);
359
+ #endif
360
+
361
+ #ifdef OS_WIN32
362
+ SOCKET sd = EmSocket (AF_INET, SOCK_DGRAM, 0);
363
+ if (sd == INVALID_SOCKET)
364
+ throw std::runtime_error ("no loop breaker socket");
365
+ SetSocketNonblocking (sd);
366
+
367
+ memset (&LoopBreakerTarget, 0, sizeof(LoopBreakerTarget));
368
+ LoopBreakerTarget.sin_family = AF_INET;
369
+ LoopBreakerTarget.sin_addr.s_addr = inet_addr ("127.0.0.1");
370
+
371
+ srand ((int)time(NULL));
372
+ int i;
373
+ for (i=0; i < 100; i++) {
374
+ int r = (rand() % 10000) + 20000;
375
+ LoopBreakerTarget.sin_port = htons (r);
376
+ if (bind (sd, (struct sockaddr*)&LoopBreakerTarget, sizeof(LoopBreakerTarget)) == 0)
377
+ break;
378
+ }
379
+
380
+ if (i == 100)
381
+ throw std::runtime_error ("no loop breaker");
382
+ LoopBreakerReader = sd;
383
+ #endif
384
+
385
+ #ifdef HAVE_EPOLL
386
+ if (Poller == Poller_Epoll) {
387
+ epfd = epoll_create (MaxEpollDescriptors);
388
+ if (epfd == -1) {
389
+ char buf[200];
390
+ snprintf (buf, sizeof(buf)-1, "unable to create epoll descriptor: %s", strerror(errno));
391
+ throw std::runtime_error (buf);
392
+ }
393
+ int cloexec = fcntl (epfd, F_GETFD, 0);
394
+ assert (cloexec >= 0);
395
+ cloexec |= FD_CLOEXEC;
396
+ fcntl (epfd, F_SETFD, cloexec);
397
+
398
+ assert (LoopBreakerReader >= 0);
399
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
400
+ assert (ld);
401
+ Add (ld);
402
+ }
403
+ #endif
404
+
405
+ #ifdef HAVE_KQUEUE
406
+ if (Poller == Poller_Kqueue) {
407
+ kqfd = kqueue();
408
+ if (kqfd == -1) {
409
+ char buf[200];
410
+ snprintf (buf, sizeof(buf)-1, "unable to create kqueue descriptor: %s", strerror(errno));
411
+ throw std::runtime_error (buf);
412
+ }
413
+ // cloexec not needed. By definition, kqueues are not carried across forks.
414
+
415
+ assert (LoopBreakerReader >= 0);
416
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
417
+ assert (ld);
418
+ Add (ld);
419
+ }
420
+ #endif
421
+ }
422
+
423
+ /***************************
424
+ EventMachine_t::_UpdateTime
425
+ ***************************/
426
+
427
+ void EventMachine_t::_UpdateTime()
428
+ {
429
+ MyCurrentLoopTime = GetRealTime();
430
+ }
431
+
432
+ /***************************
433
+ EventMachine_t::GetRealTime
434
+ ***************************/
435
+
436
+ // Two great writeups of cross-platform monotonic time are at:
437
+ // http://www.python.org/dev/peps/pep-0418
438
+ // http://nadeausoftware.com/articles/2012/04/c_c_tip_how_measure_elapsed_real_time_benchmarking
439
+ // Uncomment the #pragma messages to confirm which compile-time option was used
440
+ uint64_t EventMachine_t::GetRealTime()
441
+ {
442
+ uint64_t current_time;
443
+
444
+ #if defined(HAVE_CONST_CLOCK_MONOTONIC_RAW)
445
+ // #pragma message "GetRealTime: clock_gettime CLOCK_MONOTONIC_RAW"
446
+ // Linux 2.6.28 and above
447
+ struct timespec tv;
448
+ clock_gettime (CLOCK_MONOTONIC_RAW, &tv);
449
+ current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)((tv.tv_nsec)/1000));
450
+
451
+ #elif defined(HAVE_CONST_CLOCK_MONOTONIC)
452
+ // #pragma message "GetRealTime: clock_gettime CLOCK_MONOTONIC"
453
+ // Linux, FreeBSD 5.0 and above, Solaris 8 and above, OpenBSD, NetBSD, DragonflyBSD
454
+ struct timespec tv;
455
+ clock_gettime (CLOCK_MONOTONIC, &tv);
456
+ current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)((tv.tv_nsec)/1000));
457
+
458
+ #elif defined(HAVE_GETHRTIME)
459
+ // #pragma message "GetRealTime: gethrtime"
460
+ // Solaris and HP-UX
461
+ current_time = (uint64_t)gethrtime() / 1000;
462
+
463
+ #elif defined(OS_DARWIN)
464
+ // #pragma message "GetRealTime: mach_absolute_time"
465
+ // Mac OS X
466
+ // https://developer.apple.com/library/mac/qa/qa1398/_index.html
467
+ current_time = mach_absolute_time() * mach_timebase.numer / mach_timebase.denom / 1000;
468
+
469
+ #elif defined(OS_UNIX)
470
+ // #pragma message "GetRealTime: gettimeofday"
471
+ // Unix fallback
472
+ struct timeval tv;
473
+ gettimeofday (&tv, NULL);
474
+ current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
475
+
476
+ #elif defined(OS_WIN32)
477
+ // #pragma message "GetRealTime: GetTickCount"
478
+ // Future improvement: use GetTickCount64 in Windows Vista / Server 2008
479
+ unsigned tick = GetTickCount();
480
+ if (tick < LastTickCount)
481
+ TickCountTickover += 1;
482
+ LastTickCount = tick;
483
+ current_time = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
484
+ current_time *= 1000; // convert to microseconds
485
+
486
+ #else
487
+ // #pragma message "GetRealTime: time"
488
+ // Universal fallback
489
+ current_time = (uint64_t)time(NULL) * 1000000LL;
490
+ #endif
491
+
492
+ return current_time;
493
+ }
494
+
495
+ /***********************************
496
+ EventMachine_t::_DispatchHeartbeats
497
+ ***********************************/
498
+
499
+ void EventMachine_t::_DispatchHeartbeats()
500
+ {
501
+ // Store the first processed heartbeat descriptor and bail out if
502
+ // we see it again. This fixes an infinite loop in case the system time
503
+ // is changed out from underneath MyCurrentLoopTime.
504
+ const EventableDescriptor *head = NULL;
505
+
506
+ while (true) {
507
+ multimap<uint64_t,EventableDescriptor*>::iterator i = Heartbeats.begin();
508
+ if (i == Heartbeats.end())
509
+ break;
510
+ if (i->first > MyCurrentLoopTime)
511
+ break;
512
+
513
+ EventableDescriptor *ed = i->second;
514
+ if (ed == head)
515
+ break;
516
+
517
+ ed->Heartbeat();
518
+ QueueHeartbeat(ed);
519
+
520
+ if (head == NULL)
521
+ head = ed;
522
+ }
523
+ }
524
+
525
+ /******************************
526
+ EventMachine_t::QueueHeartbeat
527
+ ******************************/
528
+
529
+ void EventMachine_t::QueueHeartbeat(EventableDescriptor *ed)
530
+ {
531
+ uint64_t heartbeat = ed->GetNextHeartbeat();
532
+
533
+ if (heartbeat) {
534
+ #ifndef HAVE_MAKE_PAIR
535
+ Heartbeats.insert (multimap<uint64_t,EventableDescriptor*>::value_type (heartbeat, ed));
536
+ #else
537
+ Heartbeats.insert (make_pair (heartbeat, ed));
538
+ #endif
539
+ }
540
+ }
541
+
542
+ /******************************
543
+ EventMachine_t::ClearHeartbeat
544
+ ******************************/
545
+
546
+ void EventMachine_t::ClearHeartbeat(uint64_t key, EventableDescriptor* ed)
547
+ {
548
+ multimap<uint64_t,EventableDescriptor*>::iterator it;
549
+ pair<multimap<uint64_t,EventableDescriptor*>::iterator,multimap<uint64_t,EventableDescriptor*>::iterator> ret;
550
+ ret = Heartbeats.equal_range (key);
551
+ for (it = ret.first; it != ret.second; ++it) {
552
+ if (it->second == ed) {
553
+ Heartbeats.erase (it);
554
+ break;
555
+ }
556
+ }
557
+ }
558
+
559
+ /*******************
560
+ EventMachine_t::Run
561
+ *******************/
562
+
563
+ void EventMachine_t::Run()
564
+ {
565
+ while (RunOnce()) ;
566
+ }
567
+
568
+ /***********************
569
+ EventMachine_t::RunOnce
570
+ ***********************/
571
+
572
+ bool EventMachine_t::RunOnce()
573
+ {
574
+ _UpdateTime();
575
+ _RunTimers();
576
+
577
+ /* _Add must precede _Modify because the same descriptor might
578
+ * be on both lists during the same pass through the machine,
579
+ * and to modify a descriptor before adding it would fail.
580
+ */
581
+ _AddNewDescriptors();
582
+ _ModifyDescriptors();
583
+
584
+ switch (Poller) {
585
+ case Poller_Epoll:
586
+ _RunEpollOnce();
587
+ break;
588
+ case Poller_Kqueue:
589
+ _RunKqueueOnce();
590
+ break;
591
+ case Poller_Default:
592
+ _RunSelectOnce();
593
+ break;
594
+ }
595
+
596
+ _DispatchHeartbeats();
597
+ _CleanupSockets();
598
+
599
+ if (bTerminateSignalReceived)
600
+ return false;
601
+
602
+ return true;
603
+ }
604
+
605
+
606
+ /*****************************
607
+ EventMachine_t::_RunEpollOnce
608
+ *****************************/
609
+
610
+ void EventMachine_t::_RunEpollOnce()
611
+ {
612
+ #ifdef HAVE_EPOLL
613
+ assert (epfd != -1);
614
+ int s;
615
+
616
+ timeval tv = _TimeTilNextEvent();
617
+
618
+ #ifdef BUILD_FOR_RUBY
619
+ int ret = 0;
620
+
621
+ #ifdef HAVE_RB_WAIT_FOR_SINGLE_FD
622
+ if ((ret = rb_wait_for_single_fd(epfd, RB_WAITFD_IN|RB_WAITFD_PRI, &tv)) < 1) {
623
+ #else
624
+ fd_set fdreads;
625
+
626
+ FD_ZERO(&fdreads);
627
+ FD_SET(epfd, &fdreads);
628
+
629
+ if ((ret = rb_thread_select(epfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
630
+ #endif
631
+ if (ret == -1) {
632
+ assert(errno != EINVAL);
633
+ assert(errno != EBADF);
634
+ }
635
+ return;
636
+ }
637
+
638
+ TRAP_BEG;
639
+ s = epoll_wait (epfd, epoll_events, MaxEvents, 0);
640
+ TRAP_END;
641
+ #else
642
+ int duration = 0;
643
+ duration = duration + (tv.tv_sec * 1000);
644
+ duration = duration + (tv.tv_usec / 1000);
645
+ s = epoll_wait (epfd, epoll_events, MaxEvents, duration);
646
+ #endif
647
+
648
+ if (s > 0) {
649
+ for (int i=0; i < s; i++) {
650
+ EventableDescriptor *ed = (EventableDescriptor*) epoll_events[i].data.ptr;
651
+
652
+ if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
653
+ continue;
654
+
655
+ assert(ed->GetSocket() != INVALID_SOCKET);
656
+
657
+ if (epoll_events[i].events & EPOLLIN)
658
+ ed->Read();
659
+ if (epoll_events[i].events & EPOLLOUT)
660
+ ed->Write();
661
+ if (epoll_events[i].events & (EPOLLERR | EPOLLHUP))
662
+ ed->HandleError();
663
+ }
664
+ }
665
+ else if (s < 0) {
666
+ // epoll_wait can fail on error in a handful of ways.
667
+ // If this happens, then wait for a little while to avoid busy-looping.
668
+ // If the error was EINTR, we probably caught SIGCHLD or something,
669
+ // so keep the wait short.
670
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
671
+ EmSelect (0, NULL, NULL, NULL, &tv);
672
+ }
673
+ #else
674
+ throw std::runtime_error ("epoll is not implemented on this platform");
675
+ #endif
676
+ }
677
+
678
+
679
+ /******************************
680
+ EventMachine_t::_RunKqueueOnce
681
+ ******************************/
682
+
683
+ #ifdef HAVE_KQUEUE
684
+ void EventMachine_t::_RunKqueueOnce()
685
+ {
686
+ assert (kqfd != -1);
687
+ int k;
688
+
689
+ timeval tv = _TimeTilNextEvent();
690
+
691
+ struct timespec ts;
692
+ ts.tv_sec = tv.tv_sec;
693
+ ts.tv_nsec = tv.tv_usec * 1000;
694
+
695
+ #ifdef BUILD_FOR_RUBY
696
+ int ret = 0;
697
+
698
+ #ifdef HAVE_RB_WAIT_FOR_SINGLE_FD
699
+ if ((ret = rb_wait_for_single_fd(kqfd, RB_WAITFD_IN|RB_WAITFD_PRI, &tv)) < 1) {
700
+ #else
701
+ fd_set fdreads;
702
+
703
+ FD_ZERO(&fdreads);
704
+ FD_SET(kqfd, &fdreads);
705
+
706
+ if ((ret = rb_thread_select(kqfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
707
+ #endif
708
+ if (ret == -1) {
709
+ assert(errno != EINVAL);
710
+ assert(errno != EBADF);
711
+ }
712
+ return;
713
+ }
714
+
715
+ TRAP_BEG;
716
+ ts.tv_sec = ts.tv_nsec = 0;
717
+ k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
718
+ TRAP_END;
719
+ #else
720
+ k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
721
+ #endif
722
+
723
+ struct kevent *ke = Karray;
724
+ while (k > 0) {
725
+ switch (ke->filter)
726
+ {
727
+ case EVFILT_VNODE:
728
+ _HandleKqueueFileEvent (ke);
729
+ break;
730
+
731
+ case EVFILT_PROC:
732
+ _HandleKqueuePidEvent (ke);
733
+ break;
734
+
735
+ case EVFILT_READ:
736
+ case EVFILT_WRITE:
737
+ EventableDescriptor *ed = (EventableDescriptor*) (ke->udata);
738
+ assert (ed);
739
+
740
+ if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
741
+ break;
742
+
743
+ if (ke->filter == EVFILT_READ)
744
+ ed->Read();
745
+ else if (ke->filter == EVFILT_WRITE)
746
+ ed->Write();
747
+ else
748
+ cerr << "Discarding unknown kqueue event " << ke->filter << endl;
749
+
750
+ break;
751
+ }
752
+
753
+ --k;
754
+ ++ke;
755
+ }
756
+
757
+ // TODO, replace this with rb_thread_blocking_region for 1.9 builds.
758
+ #ifdef BUILD_FOR_RUBY
759
+ if (!rb_thread_alone()) {
760
+ rb_thread_schedule();
761
+ }
762
+ #endif
763
+ }
764
+ #else
765
+ void EventMachine_t::_RunKqueueOnce()
766
+ {
767
+ throw std::runtime_error ("kqueue is not implemented on this platform");
768
+ }
769
+ #endif
770
+
771
+
772
+ /*********************************
773
+ EventMachine_t::_TimeTilNextEvent
774
+ *********************************/
775
+
776
+ timeval EventMachine_t::_TimeTilNextEvent()
777
+ {
778
+ // 29jul11: Changed calculation base from MyCurrentLoopTime to the
779
+ // real time. As MyCurrentLoopTime is set at the beginning of an
780
+ // iteration and this calculation is done at the end, evenmachine
781
+ // will potentially oversleep by the amount of time the iteration
782
+ // took to execute.
783
+ uint64_t next_event = 0;
784
+ uint64_t current_time = GetRealTime();
785
+
786
+ if (!Heartbeats.empty()) {
787
+ multimap<uint64_t,EventableDescriptor*>::iterator heartbeats = Heartbeats.begin();
788
+ next_event = heartbeats->first;
789
+ }
790
+
791
+ if (!Timers.empty()) {
792
+ multimap<uint64_t,Timer_t>::iterator timers = Timers.begin();
793
+ if (next_event == 0 || timers->first < next_event)
794
+ next_event = timers->first;
795
+ }
796
+
797
+ if (!NewDescriptors.empty() || !ModifiedDescriptors.empty()) {
798
+ next_event = current_time;
799
+ }
800
+
801
+ timeval tv;
802
+
803
+ if (NumCloseScheduled > 0 || bTerminateSignalReceived) {
804
+ tv.tv_sec = tv.tv_usec = 0;
805
+ } else if (next_event == 0) {
806
+ tv = Quantum;
807
+ } else {
808
+ if (next_event > current_time) {
809
+ uint64_t duration = next_event - current_time;
810
+ tv.tv_sec = duration / 1000000;
811
+ tv.tv_usec = duration % 1000000;
812
+ } else {
813
+ tv.tv_sec = tv.tv_usec = 0;
814
+ }
815
+ }
816
+
817
+ return tv;
818
+ }
819
+
820
+ /*******************************
821
+ EventMachine_t::_CleanupSockets
822
+ *******************************/
823
+
824
+ void EventMachine_t::_CleanupSockets()
825
+ {
826
+ // TODO, rip this out and only delete the descriptors we know have died,
827
+ // rather than traversing the whole list.
828
+ // Modified 05Jan08 per suggestions by Chris Heath. It's possible that
829
+ // an EventableDescriptor will have a descriptor value of -1. That will
830
+ // happen if EventableDescriptor::Close was called on it. In that case,
831
+ // don't call epoll_ctl to remove the socket's filters from the epoll set.
832
+ // According to the epoll docs, this happens automatically when the
833
+ // descriptor is closed anyway. This is different from the case where
834
+ // the socket has already been closed but the descriptor in the ED object
835
+ // hasn't yet been set to INVALID_SOCKET.
836
+ // In kqueue, closing a descriptor automatically removes its event filters.
837
+ int i, j;
838
+ int nSockets = Descriptors.size();
839
+ for (i=0, j=0; i < nSockets; i++) {
840
+ EventableDescriptor *ed = Descriptors[i];
841
+ assert (ed);
842
+ if (ed->ShouldDelete()) {
843
+ #ifdef HAVE_EPOLL
844
+ if (Poller == Poller_Epoll) {
845
+ assert (epfd != -1);
846
+ if (ed->GetSocket() != INVALID_SOCKET) {
847
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
848
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
849
+ if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
850
+ char buf [200];
851
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
852
+ throw std::runtime_error (buf);
853
+ }
854
+ }
855
+ ModifiedDescriptors.erase(ed);
856
+ }
857
+ #endif
858
+ delete ed;
859
+ }
860
+ else
861
+ Descriptors [j++] = ed;
862
+ }
863
+ while ((size_t)j < Descriptors.size())
864
+ Descriptors.pop_back();
865
+ }
866
+
867
+ /*********************************
868
+ EventMachine_t::_ModifyEpollEvent
869
+ *********************************/
870
+
871
+ #ifdef HAVE_EPOLL
872
+ void EventMachine_t::_ModifyEpollEvent (EventableDescriptor *ed)
873
+ {
874
+ if (Poller == Poller_Epoll) {
875
+ assert (epfd != -1);
876
+ assert (ed);
877
+ assert (ed->GetSocket() != INVALID_SOCKET);
878
+ int e = epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
879
+ if (e) {
880
+ char buf [200];
881
+ snprintf (buf, sizeof(buf)-1, "unable to modify epoll event: %s", strerror(errno));
882
+ throw std::runtime_error (buf);
883
+ }
884
+ }
885
+ }
886
+ #else
887
+ void EventMachine_t::_ModifyEpollEvent (EventableDescriptor *ed UNUSED) { }
888
+ #endif
889
+
890
+
891
+ /**************************
892
+ SelectData_t::SelectData_t
893
+ **************************/
894
+
895
+ SelectData_t::SelectData_t()
896
+ {
897
+ maxsocket = 0;
898
+ rb_fd_init (&fdreads);
899
+ rb_fd_init (&fdwrites);
900
+ rb_fd_init (&fderrors);
901
+ }
902
+
903
+ SelectData_t::~SelectData_t()
904
+ {
905
+ rb_fd_term (&fdreads);
906
+ rb_fd_term (&fdwrites);
907
+ rb_fd_term (&fderrors);
908
+ }
909
+
910
+ #ifdef BUILD_FOR_RUBY
911
+ /*****************
912
+ _SelectDataSelect
913
+ *****************/
914
+
915
+ #if defined(HAVE_RB_THREAD_BLOCKING_REGION) || defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
916
+ static VALUE _SelectDataSelect (void *v)
917
+ {
918
+ SelectData_t *sd = (SelectData_t*)v;
919
+ sd->nSockets = select (sd->maxsocket+1, rb_fd_ptr(&(sd->fdreads)), rb_fd_ptr(&(sd->fdwrites)), rb_fd_ptr(&(sd->fderrors)), &(sd->tv));
920
+ return Qnil;
921
+ }
922
+ #endif
923
+
924
+ /*********************
925
+ SelectData_t::_Select
926
+ *********************/
927
+
928
+ int SelectData_t::_Select()
929
+ {
930
+ #if defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
931
+ // added in ruby 1.9.3
932
+ rb_thread_call_without_gvl ((void *(*)(void *))_SelectDataSelect, (void*)this, RUBY_UBF_IO, 0);
933
+ return nSockets;
934
+ #elif defined(HAVE_TBR)
935
+ // added in ruby 1.9.1, deprecated in ruby 2.0.0
936
+ rb_thread_blocking_region (_SelectDataSelect, (void*)this, RUBY_UBF_IO, 0);
937
+ return nSockets;
938
+ #else
939
+ return EmSelect (maxsocket+1, &fdreads, &fdwrites, &fderrors, &tv);
940
+ #endif
941
+ }
942
+ #endif
943
+
944
+ void SelectData_t::_Clear()
945
+ {
946
+ maxsocket = 0;
947
+ rb_fd_zero (&fdreads);
948
+ rb_fd_zero (&fdwrites);
949
+ rb_fd_zero (&fderrors);
950
+ }
951
+
952
+ /******************************
953
+ EventMachine_t::_RunSelectOnce
954
+ ******************************/
955
+
956
+ void EventMachine_t::_RunSelectOnce()
957
+ {
958
+ // Crank the event machine once.
959
+ // If there are no descriptors to process, then sleep
960
+ // for a few hundred mills to avoid busy-looping.
961
+ // This is based on a select loop. Alternately provide epoll
962
+ // if we know we're running on a 2.6 kernel.
963
+ // epoll will be effective if we provide it as an alternative,
964
+ // however it has the same problem interoperating with Ruby
965
+ // threads that select does.
966
+
967
+ // Get ready for select()
968
+ SelectData->_Clear();
969
+
970
+ // Always read the loop-breaker reader.
971
+ // Changed 23Aug06, provisionally implemented for Windows with a UDP socket
972
+ // running on localhost with a randomly-chosen port. (*Puke*)
973
+ // Windows has a version of the Unix pipe() library function, but it doesn't
974
+ // give you back descriptors that are selectable.
975
+ rb_fd_set (LoopBreakerReader, &(SelectData->fdreads));
976
+ if (SelectData->maxsocket < LoopBreakerReader)
977
+ SelectData->maxsocket = LoopBreakerReader;
978
+
979
+ // prepare the sockets for reading and writing
980
+ size_t i;
981
+ for (i = 0; i < Descriptors.size(); i++) {
982
+ EventableDescriptor *ed = Descriptors[i];
983
+ assert (ed);
984
+ SOCKET sd = ed->GetSocket();
985
+ if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
986
+ continue;
987
+ assert (sd != INVALID_SOCKET);
988
+
989
+ if (ed->SelectForRead())
990
+ rb_fd_set (sd, &(SelectData->fdreads));
991
+ if (ed->SelectForWrite())
992
+ rb_fd_set (sd, &(SelectData->fdwrites));
993
+
994
+ #ifdef OS_WIN32
995
+ /* 21Sep09: on windows, a non-blocking connect() that fails does not come up as writable.
996
+ Instead, it is added to the error set. See http://www.mail-archive.com/openssl-users@openssl.org/msg58500.html
997
+ */
998
+ if (ed->IsConnectPending())
999
+ rb_fd_set (sd, &(SelectData->fderrors));
1000
+ #endif
1001
+
1002
+ if (SelectData->maxsocket < sd)
1003
+ SelectData->maxsocket = sd;
1004
+ }
1005
+
1006
+
1007
+ { // read and write the sockets
1008
+ //timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000.
1009
+ //timeval tv = Quantum;
1010
+ SelectData->tv = _TimeTilNextEvent();
1011
+ int s = SelectData->_Select();
1012
+ //rb_thread_blocking_region(xxx,(void*)&SelectData,RUBY_UBF_IO,0);
1013
+ //int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv));
1014
+ //int s = SelectData.nSockets;
1015
+ if (s > 0) {
1016
+ /* Changed 01Jun07. We used to handle the Loop-breaker right here.
1017
+ * Now we do it AFTER all the regular descriptors. There's an
1018
+ * incredibly important and subtle reason for this. Code on
1019
+ * loop breakers is sometimes used to cause the reactor core to
1020
+ * cycle (for example, to allow outbound network buffers to drain).
1021
+ * If a loop-breaker handler reschedules itself (say, after determining
1022
+ * that the write buffers are still too full), then it will execute
1023
+ * IMMEDIATELY if _ReadLoopBreaker is done here instead of after
1024
+ * the other descriptors are processed. That defeats the whole purpose.
1025
+ */
1026
+ for (i=0; i < Descriptors.size(); i++) {
1027
+ EventableDescriptor *ed = Descriptors[i];
1028
+ assert (ed);
1029
+ SOCKET sd = ed->GetSocket();
1030
+ if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
1031
+ continue;
1032
+ assert (sd != INVALID_SOCKET);
1033
+
1034
+ if (rb_fd_isset (sd, &(SelectData->fdwrites))) {
1035
+ // Double-check SelectForWrite() still returns true. If not, one of the callbacks must have
1036
+ // modified some value since we checked SelectForWrite() earlier in this method.
1037
+ if (ed->SelectForWrite())
1038
+ ed->Write();
1039
+ }
1040
+ if (rb_fd_isset (sd, &(SelectData->fdreads)))
1041
+ ed->Read();
1042
+ if (rb_fd_isset (sd, &(SelectData->fderrors)))
1043
+ ed->HandleError();
1044
+ }
1045
+
1046
+ if (rb_fd_isset (LoopBreakerReader, &(SelectData->fdreads)))
1047
+ _ReadLoopBreaker();
1048
+ }
1049
+ else if (s < 0) {
1050
+ switch (errno) {
1051
+ case EBADF:
1052
+ _CleanBadDescriptors();
1053
+ break;
1054
+ case EINVAL:
1055
+ throw std::runtime_error ("Somehow EM passed an invalid nfds or invalid timeout to select(2), please report this!");
1056
+ break;
1057
+ default:
1058
+ // select can fail on error in a handful of ways.
1059
+ // If this happens, then wait for a little while to avoid busy-looping.
1060
+ // If the error was EINTR, we probably caught SIGCHLD or something,
1061
+ // so keep the wait short.
1062
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
1063
+ EmSelect (0, NULL, NULL, NULL, &tv);
1064
+ }
1065
+ }
1066
+ }
1067
+ }
1068
+
1069
+ void EventMachine_t::_CleanBadDescriptors()
1070
+ {
1071
+ size_t i;
1072
+
1073
+ for (i = 0; i < Descriptors.size(); i++) {
1074
+ EventableDescriptor *ed = Descriptors[i];
1075
+ if (ed->ShouldDelete())
1076
+ continue;
1077
+
1078
+ SOCKET sd = ed->GetSocket();
1079
+
1080
+ struct timeval tv;
1081
+ tv.tv_sec = 0;
1082
+ tv.tv_usec = 0;
1083
+
1084
+ rb_fdset_t fds;
1085
+ rb_fd_init(&fds);
1086
+ rb_fd_set(sd, &fds);
1087
+
1088
+ int ret = rb_fd_select(sd + 1, &fds, NULL, NULL, &tv);
1089
+ rb_fd_term(&fds);
1090
+
1091
+ if (ret == -1) {
1092
+ if (errno == EBADF)
1093
+ ed->ScheduleClose(false);
1094
+ }
1095
+ }
1096
+ }
1097
+
1098
+ /********************************
1099
+ EventMachine_t::_ReadLoopBreaker
1100
+ ********************************/
1101
+
1102
+ void EventMachine_t::_ReadLoopBreaker()
1103
+ {
1104
+ /* The loop breaker has selected readable.
1105
+ * Read it ONCE (it may block if we try to read it twice)
1106
+ * and send a loop-break event back to user code.
1107
+ */
1108
+ char buffer [1024];
1109
+ (void)read (LoopBreakerReader, buffer, sizeof(buffer));
1110
+ if (EventCallback)
1111
+ (*EventCallback)(0, EM_LOOPBREAK_SIGNAL, "", 0);
1112
+ }
1113
+
1114
+
1115
+ /**************************
1116
+ EventMachine_t::_RunTimers
1117
+ **************************/
1118
+
1119
+ void EventMachine_t::_RunTimers()
1120
+ {
1121
+ // These are caller-defined timer handlers.
1122
+ // We rely on the fact that multimaps sort by their keys to avoid
1123
+ // inspecting the whole list every time we come here.
1124
+ // Just keep inspecting and processing the list head until we hit
1125
+ // one that hasn't expired yet.
1126
+
1127
+ while (true) {
1128
+ multimap<uint64_t,Timer_t>::iterator i = Timers.begin();
1129
+ if (i == Timers.end())
1130
+ break;
1131
+ if (i->first > MyCurrentLoopTime)
1132
+ break;
1133
+ if (EventCallback)
1134
+ (*EventCallback) (0, EM_TIMER_FIRED, NULL, i->second.GetBinding());
1135
+ Timers.erase (i);
1136
+ }
1137
+ }
1138
+
1139
+
1140
+
1141
+ /***********************************
1142
+ EventMachine_t::InstallOneshotTimer
1143
+ ***********************************/
1144
+
1145
+ const uintptr_t EventMachine_t::InstallOneshotTimer (int milliseconds)
1146
+ {
1147
+ if (Timers.size() > MaxOutstandingTimers)
1148
+ return false;
1149
+
1150
+ uint64_t fire_at = GetRealTime();
1151
+ fire_at += ((uint64_t)milliseconds) * 1000LL;
1152
+
1153
+ Timer_t t;
1154
+ #ifndef HAVE_MAKE_PAIR
1155
+ multimap<uint64_t,Timer_t>::iterator i = Timers.insert (multimap<uint64_t,Timer_t>::value_type (fire_at, t));
1156
+ #else
1157
+ multimap<uint64_t,Timer_t>::iterator i = Timers.insert (make_pair (fire_at, t));
1158
+ #endif
1159
+ return i->second.GetBinding();
1160
+ }
1161
+
1162
+
1163
+ /*******************************
1164
+ EventMachine_t::ConnectToServer
1165
+ *******************************/
1166
+
1167
+ const uintptr_t EventMachine_t::ConnectToServer (const char *bind_addr, int bind_port, const char *server, int port)
1168
+ {
1169
+ /* We want to spend no more than a few seconds waiting for a connection
1170
+ * to a remote host. So we use a nonblocking connect.
1171
+ * Linux disobeys the usual rules for nonblocking connects.
1172
+ * Per Stevens (UNP p.410), you expect a nonblocking connect to select
1173
+ * both readable and writable on error, and not to return EINPROGRESS
1174
+ * if the connect can be fulfilled immediately. Linux violates both
1175
+ * of these expectations.
1176
+ * Any kind of nonblocking connect on Linux returns EINPROGRESS.
1177
+ * The socket will then return writable when the disposition of the
1178
+ * connect is known, but it will not also be readable in case of
1179
+ * error! Weirdly, it will be readable in case there is data to read!!!
1180
+ * (Which can happen with protocols like SSH and SMTP.)
1181
+ * I suppose if you were so inclined you could consider this logical,
1182
+ * but it's not the way Unix has historically done it.
1183
+ * So we ignore the readable flag and read getsockopt to see if there
1184
+ * was an error connecting. A select timeout works as expected.
1185
+ * In regard to getsockopt: Linux does the Berkeley-style thing,
1186
+ * not the Solaris-style, and returns zero with the error code in
1187
+ * the error parameter.
1188
+ * Return the binding-text of the newly-created pending connection,
1189
+ * or NULL if there was a problem.
1190
+ */
1191
+
1192
+ if (!server || !*server || !port)
1193
+ throw std::runtime_error ("invalid server or port");
1194
+
1195
+ struct sockaddr_storage bind_as;
1196
+ size_t bind_as_len = sizeof bind_as;
1197
+ if (!name2address (server, port, (struct sockaddr *)&bind_as, &bind_as_len)) {
1198
+ char buf [200];
1199
+ snprintf (buf, sizeof(buf)-1, "unable to resolve server address: %s", strerror(errno));
1200
+ throw std::runtime_error (buf);
1201
+ }
1202
+
1203
+ SOCKET sd = EmSocket (bind_as.ss_family, SOCK_STREAM, 0);
1204
+ if (sd == INVALID_SOCKET) {
1205
+ char buf [200];
1206
+ snprintf (buf, sizeof(buf)-1, "unable to create new socket: %s", strerror(errno));
1207
+ throw std::runtime_error (buf);
1208
+ }
1209
+
1210
+ // From here on, ALL error returns must close the socket.
1211
+ // Set the new socket nonblocking.
1212
+ if (!SetSocketNonblocking (sd)) {
1213
+ close (sd);
1214
+ throw std::runtime_error ("unable to set socket as non-blocking");
1215
+ }
1216
+ // Disable slow-start (Nagle algorithm).
1217
+ int one = 1;
1218
+ setsockopt (sd, IPPROTO_TCP, TCP_NODELAY, (char*) &one, sizeof(one));
1219
+ // Set reuseaddr to improve performance on restarts
1220
+ setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (char*) &one, sizeof(one));
1221
+
1222
+ if (bind_addr) {
1223
+ struct sockaddr_storage bind_to;
1224
+ size_t bind_to_len = sizeof bind_to;
1225
+ if (!name2address (bind_addr, bind_port, (struct sockaddr *)&bind_to, &bind_to_len)) {
1226
+ close (sd);
1227
+ throw std::runtime_error ("invalid bind address");
1228
+ }
1229
+ if (bind (sd, (struct sockaddr *)&bind_to, bind_to_len) < 0) {
1230
+ close (sd);
1231
+ throw std::runtime_error ("couldn't bind to address");
1232
+ }
1233
+ }
1234
+
1235
+ uintptr_t out = 0;
1236
+
1237
+ #ifdef OS_UNIX
1238
+ int e_reason = 0;
1239
+ if (connect (sd, (struct sockaddr *)&bind_as, bind_as_len) == 0) {
1240
+ // This is a connect success, which Linux appears
1241
+ // never to give when the socket is nonblocking,
1242
+ // even if the connection is intramachine or to
1243
+ // localhost.
1244
+
1245
+ /* Changed this branch 08Aug06. Evidently some kernels
1246
+ * (FreeBSD for example) will actually return success from
1247
+ * a nonblocking connect. This is a pretty simple case,
1248
+ * just set up the new connection and clear the pending flag.
1249
+ * Thanks to Chris Ochs for helping track this down.
1250
+ * This branch never gets taken on Linux or (oddly) OSX.
1251
+ * The original behavior was to throw an unimplemented,
1252
+ * which the user saw as a fatal exception. Very unfriendly.
1253
+ *
1254
+ * Tweaked 10Aug06. Even though the connect disposition is
1255
+ * known, we still set the connect-pending flag. That way
1256
+ * some needed initialization will happen in the ConnectionDescriptor.
1257
+ * (To wit, the ConnectionCompleted event gets sent to the client.)
1258
+ */
1259
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1260
+ if (!cd)
1261
+ throw std::runtime_error ("no connection allocated");
1262
+ cd->SetConnectPending (true);
1263
+ Add (cd);
1264
+ out = cd->GetBinding();
1265
+ }
1266
+ else if (errno == EINPROGRESS) {
1267
+ // Errno will generally always be EINPROGRESS, but on Linux
1268
+ // we have to look at getsockopt to be sure what really happened.
1269
+ int error = 0;
1270
+ socklen_t len;
1271
+ len = sizeof(error);
1272
+ int o = getsockopt (sd, SOL_SOCKET, SO_ERROR, &error, &len);
1273
+ if ((o == 0) && (error == 0)) {
1274
+ // Here, there's no disposition.
1275
+ // Put the connection on the stack and wait for it to complete
1276
+ // or time out.
1277
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1278
+ if (!cd)
1279
+ throw std::runtime_error ("no connection allocated");
1280
+ cd->SetConnectPending (true);
1281
+ Add (cd);
1282
+ out = cd->GetBinding();
1283
+ } else {
1284
+ // Fall through to the !out case below.
1285
+ e_reason = error;
1286
+ }
1287
+ }
1288
+ else {
1289
+ // The error from connect was something other then EINPROGRESS (EHOSTDOWN, etc).
1290
+ // Fall through to the !out case below
1291
+ e_reason = errno;
1292
+ }
1293
+
1294
+ if (!out) {
1295
+ /* This could be connection refused or some such thing.
1296
+ * We will come here on Linux if a localhost connection fails.
1297
+ * Changed 16Jul06: Originally this branch was a no-op, and
1298
+ * we'd drop down to the end of the method, close the socket,
1299
+ * and return NULL, which would cause the caller to GET A
1300
+ * FATAL EXCEPTION. Now we keep the socket around but schedule an
1301
+ * immediate close on it, so the caller will get a close-event
1302
+ * scheduled on it. This was only an issue for localhost connections
1303
+ * to non-listening ports. We may eventually need to revise this
1304
+ * revised behavior, in case it causes problems like making it hard
1305
+ * for people to know that a failure occurred.
1306
+ */
1307
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1308
+ if (!cd)
1309
+ throw std::runtime_error ("no connection allocated");
1310
+ cd->SetUnbindReasonCode (e_reason);
1311
+ cd->ScheduleClose (false);
1312
+ Add (cd);
1313
+ out = cd->GetBinding();
1314
+ }
1315
+ #endif
1316
+
1317
+ #ifdef OS_WIN32
1318
+ if (connect (sd, (struct sockaddr *)&bind_as, bind_as_len) == 0) {
1319
+ // This is a connect success, which Windows appears
1320
+ // never to give when the socket is nonblocking,
1321
+ // even if the connection is intramachine or to
1322
+ // localhost.
1323
+ throw std::runtime_error ("unimplemented");
1324
+ }
1325
+ else if (WSAGetLastError() == WSAEWOULDBLOCK) {
1326
+ // Here, there's no disposition.
1327
+ // Windows appears not to surface refused connections or
1328
+ // such stuff at this point.
1329
+ // Put the connection on the stack and wait for it to complete
1330
+ // or time out.
1331
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1332
+ if (!cd)
1333
+ throw std::runtime_error ("no connection allocated");
1334
+ cd->SetConnectPending (true);
1335
+ Add (cd);
1336
+ out = cd->GetBinding();
1337
+ }
1338
+ else {
1339
+ // The error from connect was something other then WSAEWOULDBLOCK.
1340
+ }
1341
+
1342
+ #endif
1343
+
1344
+ if (!out)
1345
+ close (sd);
1346
+ return out;
1347
+ }
1348
+
1349
+ /***********************************
1350
+ EventMachine_t::ConnectToUnixServer
1351
+ ***********************************/
1352
+
1353
+ #ifdef OS_UNIX
1354
+ const uintptr_t EventMachine_t::ConnectToUnixServer (const char *server)
1355
+ {
1356
+ /* Connect to a Unix-domain server, which by definition is running
1357
+ * on the same host.
1358
+ * There is no meaningful implementation on Windows.
1359
+ * There's no need to do a nonblocking connect, since the connection
1360
+ * is always local and can always be fulfilled immediately.
1361
+ */
1362
+
1363
+ uintptr_t out = 0;
1364
+
1365
+ if (!server || !*server)
1366
+ return 0;
1367
+
1368
+ sockaddr_un pun;
1369
+ memset (&pun, 0, sizeof(pun));
1370
+ pun.sun_family = AF_LOCAL;
1371
+
1372
+ // You ordinarily expect the server name field to be at least 1024 bytes long,
1373
+ // but on Linux it can be MUCH shorter.
1374
+ if (strlen(server) >= sizeof(pun.sun_path))
1375
+ throw std::runtime_error ("unix-domain server name is too long");
1376
+
1377
+
1378
+ strcpy (pun.sun_path, server);
1379
+
1380
+ SOCKET fd = EmSocket (AF_LOCAL, SOCK_STREAM, 0);
1381
+ if (fd == INVALID_SOCKET)
1382
+ return 0;
1383
+
1384
+ // From here on, ALL error returns must close the socket.
1385
+ // NOTE: At this point, the socket is still a blocking socket.
1386
+ if (connect (fd, (struct sockaddr*)&pun, sizeof(pun)) != 0) {
1387
+ close (fd);
1388
+ return 0;
1389
+ }
1390
+
1391
+ // Set the newly-connected socket nonblocking.
1392
+ if (!SetSocketNonblocking (fd)) {
1393
+ close (fd);
1394
+ return 0;
1395
+ }
1396
+
1397
+ // Set up a connection descriptor and add it to the event-machine.
1398
+ // Observe, even though we know the connection status is connect-success,
1399
+ // we still set the "pending" flag, so some needed initializations take
1400
+ // place.
1401
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1402
+ if (!cd)
1403
+ throw std::runtime_error ("no connection allocated");
1404
+ cd->SetConnectPending (true);
1405
+ Add (cd);
1406
+ out = cd->GetBinding();
1407
+
1408
+ if (!out)
1409
+ close (fd);
1410
+
1411
+ return out;
1412
+ }
1413
+ #else
1414
+ const uintptr_t EventMachine_t::ConnectToUnixServer (const char *server UNUSED)
1415
+ {
1416
+ throw std::runtime_error ("unix-domain connection unavailable on this platform");
1417
+ }
1418
+ #endif
1419
+
1420
+ /************************
1421
+ EventMachine_t::AttachFD
1422
+ ************************/
1423
+
1424
+ const uintptr_t EventMachine_t::AttachFD (SOCKET fd, bool watch_mode)
1425
+ {
1426
+ #ifdef OS_UNIX
1427
+ if (fcntl(fd, F_GETFL, 0) < 0) {
1428
+ if (errno) {
1429
+ throw std::runtime_error (strerror(errno));
1430
+ } else {
1431
+ throw std::runtime_error ("invalid file descriptor");
1432
+ }
1433
+ }
1434
+ #endif
1435
+
1436
+ #ifdef OS_WIN32
1437
+ // TODO: add better check for invalid file descriptors (see ioctlsocket or getsockopt)
1438
+ if (fd == INVALID_SOCKET)
1439
+ throw std::runtime_error ("invalid file descriptor");
1440
+ #endif
1441
+
1442
+ {// Check for duplicate descriptors
1443
+ size_t i;
1444
+ for (i = 0; i < Descriptors.size(); i++) {
1445
+ EventableDescriptor *ed = Descriptors[i];
1446
+ assert (ed);
1447
+ if (ed->GetSocket() == fd)
1448
+ throw std::runtime_error ("adding existing descriptor");
1449
+ }
1450
+
1451
+ for (i = 0; i < NewDescriptors.size(); i++) {
1452
+ EventableDescriptor *ed = NewDescriptors[i];
1453
+ assert (ed);
1454
+ if (ed->GetSocket() == fd)
1455
+ throw std::runtime_error ("adding existing new descriptor");
1456
+ }
1457
+ }
1458
+
1459
+ if (!watch_mode)
1460
+ SetSocketNonblocking(fd);
1461
+
1462
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1463
+ if (!cd)
1464
+ throw std::runtime_error ("no connection allocated");
1465
+
1466
+ cd->SetAttached(true);
1467
+ cd->SetWatchOnly(watch_mode);
1468
+ cd->SetConnectPending (false);
1469
+
1470
+ Add (cd);
1471
+
1472
+ const uintptr_t out = cd->GetBinding();
1473
+ return out;
1474
+ }
1475
+
1476
+ /************************
1477
+ EventMachine_t::DetachFD
1478
+ ************************/
1479
+
1480
+ int EventMachine_t::DetachFD (EventableDescriptor *ed)
1481
+ {
1482
+ if (!ed)
1483
+ throw std::runtime_error ("detaching bad descriptor");
1484
+
1485
+ SOCKET fd = ed->GetSocket();
1486
+
1487
+ #ifdef HAVE_EPOLL
1488
+ if (Poller == Poller_Epoll) {
1489
+ if (ed->GetSocket() != INVALID_SOCKET) {
1490
+ assert (epfd != -1);
1491
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
1492
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
1493
+ if (e && (errno != ENOENT) && (errno != EBADF)) {
1494
+ char buf [200];
1495
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
1496
+ throw std::runtime_error (buf);
1497
+ }
1498
+ }
1499
+ }
1500
+ #endif
1501
+
1502
+ #ifdef HAVE_KQUEUE
1503
+ if (Poller == Poller_Kqueue) {
1504
+ // remove any read/write events for this fd
1505
+ struct kevent k;
1506
+ #ifdef __NetBSD__
1507
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ | EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t)ed);
1508
+ #else
1509
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ | EVFILT_WRITE, EV_DELETE, 0, 0, ed);
1510
+ #endif
1511
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1512
+ if (t < 0 && (errno != ENOENT) && (errno != EBADF)) {
1513
+ char buf [200];
1514
+ snprintf (buf, sizeof(buf)-1, "unable to delete kqueue event: %s", strerror(errno));
1515
+ throw std::runtime_error (buf);
1516
+ }
1517
+ }
1518
+ #endif
1519
+
1520
+ // Prevent the descriptor from being modified, in case DetachFD was called from a timer or next_tick
1521
+ ModifiedDescriptors.erase (ed);
1522
+
1523
+ // Prevent the descriptor from being added, in case DetachFD was called in the same tick as AttachFD
1524
+ for (size_t i = 0; i < NewDescriptors.size(); i++) {
1525
+ if (ed == NewDescriptors[i]) {
1526
+ NewDescriptors.erase(NewDescriptors.begin() + i);
1527
+ break;
1528
+ }
1529
+ }
1530
+
1531
+ // Set MySocket = INVALID_SOCKET so ShouldDelete() is true (and the descriptor gets deleted and removed),
1532
+ // and also to prevent anyone from calling close() on the detached fd
1533
+ ed->SetSocketInvalid();
1534
+
1535
+ return fd;
1536
+ }
1537
+
1538
+ /************
1539
+ name2address
1540
+ ************/
1541
+
1542
+ bool EventMachine_t::name2address (const char *server, int port, struct sockaddr *addr, size_t *addr_len)
1543
+ {
1544
+ if (!server || !*server)
1545
+ server = "0.0.0.0";
1546
+
1547
+ struct addrinfo *ai;
1548
+ struct addrinfo hints;
1549
+ memset (&hints, 0, sizeof(hints));
1550
+ hints.ai_family = AF_UNSPEC;
1551
+ hints.ai_flags = AI_NUMERICSERV | AI_ADDRCONFIG;
1552
+
1553
+ char portstr[12];
1554
+ snprintf(portstr, sizeof(portstr), "%u", port);
1555
+
1556
+ if (getaddrinfo (server, portstr, &hints, &ai) == 0) {
1557
+ assert (ai->ai_addrlen <= *addr_len);
1558
+ memcpy (addr, ai->ai_addr, ai->ai_addrlen);
1559
+ *addr_len = ai->ai_addrlen;
1560
+
1561
+ freeaddrinfo(ai);
1562
+ return true;
1563
+ }
1564
+
1565
+ return false;
1566
+ }
1567
+
1568
+
1569
+ /*******************************
1570
+ EventMachine_t::CreateTcpServer
1571
+ *******************************/
1572
+
1573
+ const uintptr_t EventMachine_t::CreateTcpServer (const char *server, int port)
1574
+ {
1575
+ /* Create a TCP-acceptor (server) socket and add it to the event machine.
1576
+ * Return the binding of the new acceptor to the caller.
1577
+ * This binding will be referenced when the new acceptor sends events
1578
+ * to indicate accepted connections.
1579
+ */
1580
+
1581
+
1582
+ struct sockaddr_storage bind_here;
1583
+ size_t bind_here_len = sizeof bind_here;
1584
+ if (!name2address (server, port, (struct sockaddr *)&bind_here, &bind_here_len))
1585
+ return 0;
1586
+
1587
+ SOCKET sd_accept = EmSocket (bind_here.ss_family, SOCK_STREAM, 0);
1588
+ if (sd_accept == INVALID_SOCKET) {
1589
+ goto fail;
1590
+ }
1591
+
1592
+ { // set reuseaddr to improve performance on restarts.
1593
+ int oval = 1;
1594
+ if (setsockopt (sd_accept, SOL_SOCKET, SO_REUSEADDR, (char*)&oval, sizeof(oval)) < 0) {
1595
+ //__warning ("setsockopt failed while creating listener","");
1596
+ goto fail;
1597
+ }
1598
+ }
1599
+
1600
+ { // set CLOEXEC. Only makes sense on Unix
1601
+ #ifdef OS_UNIX
1602
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1603
+ assert (cloexec >= 0);
1604
+ cloexec |= FD_CLOEXEC;
1605
+ fcntl (sd_accept, F_SETFD, cloexec);
1606
+ #endif
1607
+ }
1608
+
1609
+
1610
+ if (bind (sd_accept, (struct sockaddr *)&bind_here, bind_here_len)) {
1611
+ //__warning ("binding failed");
1612
+ goto fail;
1613
+ }
1614
+
1615
+ if (listen (sd_accept, 100)) {
1616
+ //__warning ("listen failed");
1617
+ goto fail;
1618
+ }
1619
+
1620
+ return AttachSD(sd_accept);
1621
+
1622
+ fail:
1623
+ if (sd_accept != INVALID_SOCKET)
1624
+ close (sd_accept);
1625
+ return 0;
1626
+ }
1627
+
1628
+
1629
+ /**********************************
1630
+ EventMachine_t::OpenDatagramSocket
1631
+ **********************************/
1632
+
1633
+ const uintptr_t EventMachine_t::OpenDatagramSocket (const char *address, int port)
1634
+ {
1635
+ uintptr_t output_binding = 0;
1636
+
1637
+ struct sockaddr_storage bind_here;
1638
+ size_t bind_here_len = sizeof bind_here;
1639
+ if (!name2address (address, port, (struct sockaddr *)&bind_here, &bind_here_len))
1640
+ return 0;
1641
+
1642
+ // from here on, early returns must close the socket!
1643
+ SOCKET sd = EmSocket (bind_here.ss_family, SOCK_DGRAM, 0);
1644
+ if (sd == INVALID_SOCKET)
1645
+ goto fail;
1646
+
1647
+ { // set the SO_REUSEADDR on the socket before we bind, otherwise it won't work for a second one
1648
+ int oval = 1;
1649
+ if (setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (char*)&oval, sizeof(oval)) < 0)
1650
+ goto fail;
1651
+ }
1652
+
1653
+ // Set the new socket nonblocking.
1654
+ if (!SetSocketNonblocking (sd))
1655
+ goto fail;
1656
+
1657
+ if (bind (sd, (struct sockaddr *)&bind_here, bind_here_len) != 0)
1658
+ goto fail;
1659
+
1660
+ { // Looking good.
1661
+ DatagramDescriptor *ds = new DatagramDescriptor (sd, this);
1662
+ if (!ds)
1663
+ throw std::runtime_error ("unable to allocate datagram-socket");
1664
+ Add (ds);
1665
+ output_binding = ds->GetBinding();
1666
+ }
1667
+
1668
+ return output_binding;
1669
+
1670
+ fail:
1671
+ if (sd != INVALID_SOCKET)
1672
+ close (sd);
1673
+ return 0;
1674
+ }
1675
+
1676
+
1677
+
1678
+ /*******************
1679
+ EventMachine_t::Add
1680
+ *******************/
1681
+
1682
+ void EventMachine_t::Add (EventableDescriptor *ed)
1683
+ {
1684
+ if (!ed)
1685
+ throw std::runtime_error ("added bad descriptor");
1686
+ ed->SetEventCallback (EventCallback);
1687
+ NewDescriptors.push_back (ed);
1688
+ }
1689
+
1690
+
1691
+ /*******************************
1692
+ EventMachine_t::ArmKqueueWriter
1693
+ *******************************/
1694
+
1695
+ #ifdef HAVE_KQUEUE
1696
+ void EventMachine_t::ArmKqueueWriter (EventableDescriptor *ed)
1697
+ {
1698
+ if (Poller == Poller_Kqueue) {
1699
+ if (!ed)
1700
+ throw std::runtime_error ("added bad descriptor");
1701
+ struct kevent k;
1702
+ #ifdef __NetBSD__
1703
+ EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, (intptr_t)ed);
1704
+ #else
1705
+ EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, ed);
1706
+ #endif
1707
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1708
+ if (t < 0) {
1709
+ char buf [200];
1710
+ snprintf (buf, sizeof(buf)-1, "arm kqueue writer failed on %d: %s", ed->GetSocket(), strerror(errno));
1711
+ throw std::runtime_error (buf);
1712
+ }
1713
+ }
1714
+ }
1715
+ #else
1716
+ void EventMachine_t::ArmKqueueWriter (EventableDescriptor *ed UNUSED) { }
1717
+ #endif
1718
+
1719
+ /*******************************
1720
+ EventMachine_t::ArmKqueueReader
1721
+ *******************************/
1722
+
1723
+ #ifdef HAVE_KQUEUE
1724
+ void EventMachine_t::ArmKqueueReader (EventableDescriptor *ed)
1725
+ {
1726
+ if (Poller == Poller_Kqueue) {
1727
+ if (!ed)
1728
+ throw std::runtime_error ("added bad descriptor");
1729
+ struct kevent k;
1730
+ #ifdef __NetBSD__
1731
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, (intptr_t)ed);
1732
+ #else
1733
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1734
+ #endif
1735
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1736
+ if (t < 0) {
1737
+ char buf [200];
1738
+ snprintf (buf, sizeof(buf)-1, "arm kqueue reader failed on %d: %s", ed->GetSocket(), strerror(errno));
1739
+ throw std::runtime_error (buf);
1740
+ }
1741
+ }
1742
+ }
1743
+ #else
1744
+ void EventMachine_t::ArmKqueueReader (EventableDescriptor *ed UNUSED) { }
1745
+ #endif
1746
+
1747
+ /**********************************
1748
+ EventMachine_t::_AddNewDescriptors
1749
+ **********************************/
1750
+
1751
+ void EventMachine_t::_AddNewDescriptors()
1752
+ {
1753
+ /* Avoid adding descriptors to the main descriptor list
1754
+ * while we're actually traversing the list.
1755
+ * Any descriptors that are added as a result of processing timers
1756
+ * or acceptors should go on a temporary queue and then added
1757
+ * while we're not traversing the main list.
1758
+ * Also, it (rarely) happens that a newly-created descriptor
1759
+ * is immediately scheduled to close. It might be a good
1760
+ * idea not to bother scheduling these for I/O but if
1761
+ * we do that, we might bypass some important processing.
1762
+ */
1763
+
1764
+ for (size_t i = 0; i < NewDescriptors.size(); i++) {
1765
+ EventableDescriptor *ed = NewDescriptors[i];
1766
+ if (ed == NULL)
1767
+ throw std::runtime_error ("adding bad descriptor");
1768
+
1769
+ #if HAVE_EPOLL
1770
+ if (Poller == Poller_Epoll) {
1771
+ assert (epfd != -1);
1772
+ int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent());
1773
+ if (e) {
1774
+ char buf [200];
1775
+ snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno));
1776
+ throw std::runtime_error (buf);
1777
+ }
1778
+ }
1779
+ #endif
1780
+
1781
+ #if HAVE_KQUEUE
1782
+ /*
1783
+ if (Poller == Poller_Kqueue) {
1784
+ // INCOMPLETE. Some descriptors don't want to be readable.
1785
+ assert (kqfd != -1);
1786
+ struct kevent k;
1787
+ #ifdef __NetBSD__
1788
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, (intptr_t)ed);
1789
+ #else
1790
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1791
+ #endif
1792
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1793
+ assert (t == 0);
1794
+ }
1795
+ */
1796
+ #endif
1797
+
1798
+ QueueHeartbeat(ed);
1799
+ Descriptors.push_back (ed);
1800
+ }
1801
+ NewDescriptors.clear();
1802
+ }
1803
+
1804
+
1805
+ /**********************************
1806
+ EventMachine_t::_ModifyDescriptors
1807
+ **********************************/
1808
+
1809
+ void EventMachine_t::_ModifyDescriptors()
1810
+ {
1811
+ /* For implementations which don't level check every descriptor on
1812
+ * every pass through the machine, as select does.
1813
+ * If we're not selecting, then descriptors need a way to signal to the
1814
+ * machine that their readable or writable status has changed.
1815
+ * That's what the ::Modify call is for. We do it this way to avoid
1816
+ * modifying descriptors during the loop traversal, where it can easily
1817
+ * happen that an object (like a UDP socket) gets data written on it by
1818
+ * the application during #post_init. That would take place BEFORE the
1819
+ * descriptor even gets added to the epoll descriptor, so the modify
1820
+ * operation will crash messily.
1821
+ * Another really messy possibility is for a descriptor to put itself
1822
+ * on the Modified list, and then get deleted before we get here.
1823
+ * Remember, deletes happen after the I/O traversal and before the
1824
+ * next pass through here. So we have to make sure when we delete a
1825
+ * descriptor to remove it from the Modified list.
1826
+ */
1827
+
1828
+ #ifdef HAVE_EPOLL
1829
+ if (Poller == Poller_Epoll) {
1830
+ set<EventableDescriptor*>::iterator i = ModifiedDescriptors.begin();
1831
+ while (i != ModifiedDescriptors.end()) {
1832
+ assert (*i);
1833
+ _ModifyEpollEvent (*i);
1834
+ ++i;
1835
+ }
1836
+ }
1837
+ #endif
1838
+
1839
+ #ifdef HAVE_KQUEUE
1840
+ if (Poller == Poller_Kqueue) {
1841
+ set<EventableDescriptor*>::iterator i = ModifiedDescriptors.begin();
1842
+ while (i != ModifiedDescriptors.end()) {
1843
+ assert (*i);
1844
+ if ((*i)->GetKqueueArmWrite())
1845
+ ArmKqueueWriter (*i);
1846
+ ++i;
1847
+ }
1848
+ }
1849
+ #endif
1850
+
1851
+ ModifiedDescriptors.clear();
1852
+ }
1853
+
1854
+
1855
+ /**********************
1856
+ EventMachine_t::Modify
1857
+ **********************/
1858
+
1859
+ void EventMachine_t::Modify (EventableDescriptor *ed)
1860
+ {
1861
+ if (!ed)
1862
+ throw std::runtime_error ("modified bad descriptor");
1863
+ ModifiedDescriptors.insert (ed);
1864
+ }
1865
+
1866
+
1867
+ /***********************
1868
+ EventMachine_t::Deregister
1869
+ ***********************/
1870
+
1871
+ void EventMachine_t::Deregister (EventableDescriptor *ed)
1872
+ {
1873
+ if (!ed)
1874
+ throw std::runtime_error ("modified bad descriptor");
1875
+ #ifdef HAVE_EPOLL
1876
+ // cut/paste from _CleanupSockets(). The error handling could be
1877
+ // refactored out of there, but it is cut/paste all over the
1878
+ // file already.
1879
+ if (Poller == Poller_Epoll) {
1880
+ assert (epfd != -1);
1881
+ assert (ed->GetSocket() != INVALID_SOCKET);
1882
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
1883
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
1884
+ if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
1885
+ char buf [200];
1886
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
1887
+ throw std::runtime_error (buf);
1888
+ }
1889
+ ModifiedDescriptors.erase(ed);
1890
+ }
1891
+ #endif
1892
+ }
1893
+
1894
+
1895
+ /**************************************
1896
+ EventMachine_t::CreateUnixDomainServer
1897
+ **************************************/
1898
+
1899
+ #ifdef OS_UNIX
1900
+ const uintptr_t EventMachine_t::CreateUnixDomainServer (const char *filename)
1901
+ {
1902
+ /* Create a UNIX-domain acceptor (server) socket and add it to the event machine.
1903
+ * Return the binding of the new acceptor to the caller.
1904
+ * This binding will be referenced when the new acceptor sends events
1905
+ * to indicate accepted connections.
1906
+ * THERE IS NO MEANINGFUL IMPLEMENTATION ON WINDOWS.
1907
+ */
1908
+
1909
+ struct sockaddr_un s_sun;
1910
+
1911
+ SOCKET sd_accept = EmSocket (AF_LOCAL, SOCK_STREAM, 0);
1912
+ if (sd_accept == INVALID_SOCKET) {
1913
+ goto fail;
1914
+ }
1915
+
1916
+ if (!filename || !*filename)
1917
+ goto fail;
1918
+ unlink (filename);
1919
+
1920
+ bzero (&s_sun, sizeof(s_sun));
1921
+ s_sun.sun_family = AF_LOCAL;
1922
+ strncpy (s_sun.sun_path, filename, sizeof(s_sun.sun_path)-1);
1923
+
1924
+ // don't bother with reuseaddr for a local socket.
1925
+
1926
+ { // set CLOEXEC. Only makes sense on Unix
1927
+ #ifdef OS_UNIX
1928
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1929
+ assert (cloexec >= 0);
1930
+ cloexec |= FD_CLOEXEC;
1931
+ fcntl (sd_accept, F_SETFD, cloexec);
1932
+ #endif
1933
+ }
1934
+
1935
+ if (bind (sd_accept, (struct sockaddr*)&s_sun, sizeof(s_sun))) {
1936
+ //__warning ("binding failed");
1937
+ goto fail;
1938
+ }
1939
+
1940
+ if (listen (sd_accept, 100)) {
1941
+ //__warning ("listen failed");
1942
+ goto fail;
1943
+ }
1944
+
1945
+ return AttachSD(sd_accept);
1946
+
1947
+ fail:
1948
+ if (sd_accept != INVALID_SOCKET)
1949
+ close (sd_accept);
1950
+ return 0;
1951
+ }
1952
+ #else
1953
+ const uintptr_t EventMachine_t::CreateUnixDomainServer (const char *filename UNUSED)
1954
+ {
1955
+ throw std::runtime_error ("unix-domain server unavailable on this platform");
1956
+ }
1957
+ #endif
1958
+
1959
+
1960
+ /**************************************
1961
+ EventMachine_t::AttachSD
1962
+ **************************************/
1963
+
1964
+ const uintptr_t EventMachine_t::AttachSD (SOCKET sd_accept)
1965
+ {
1966
+ uintptr_t output_binding = 0;
1967
+
1968
+ {
1969
+ // Set the acceptor non-blocking.
1970
+ // THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
1971
+ if (!SetSocketNonblocking (sd_accept)) {
1972
+ //int val = fcntl (sd_accept, F_GETFL, 0);
1973
+ //if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
1974
+ goto fail;
1975
+ }
1976
+ }
1977
+
1978
+ { // Looking good.
1979
+ AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
1980
+ if (!ad)
1981
+ throw std::runtime_error ("unable to allocate acceptor");
1982
+ Add (ad);
1983
+ output_binding = ad->GetBinding();
1984
+ }
1985
+
1986
+ return output_binding;
1987
+
1988
+ fail:
1989
+ if (sd_accept != INVALID_SOCKET)
1990
+ close (sd_accept);
1991
+ return 0;
1992
+ }
1993
+
1994
+
1995
+ /**************************
1996
+ EventMachine_t::Socketpair
1997
+ **************************/
1998
+
1999
+ #ifdef OS_UNIX
2000
+ const uintptr_t EventMachine_t::Socketpair (char * const * cmd_strings)
2001
+ {
2002
+ // Make sure the incoming array of command strings is sane.
2003
+ if (!cmd_strings)
2004
+ return 0;
2005
+ int j;
2006
+ for (j=0; j < 2048 && cmd_strings[j]; j++)
2007
+ ;
2008
+ if ((j==0) || (j==2048))
2009
+ return 0;
2010
+
2011
+ uintptr_t output_binding = 0;
2012
+
2013
+ int sv[2];
2014
+ if (socketpair (AF_LOCAL, SOCK_STREAM, 0, sv) < 0)
2015
+ return 0;
2016
+ // from here, all early returns must close the pair of sockets.
2017
+
2018
+ // Set the parent side of the socketpair nonblocking.
2019
+ // We don't care about the child side, and most child processes will expect their
2020
+ // stdout to be blocking. Thanks to Duane Johnson and Bill Kelly for pointing this out.
2021
+ // Obviously DON'T set CLOEXEC.
2022
+ if (!SetSocketNonblocking (sv[0])) {
2023
+ close (sv[0]);
2024
+ close (sv[1]);
2025
+ return 0;
2026
+ }
2027
+
2028
+ pid_t f = fork();
2029
+ if (f > 0) {
2030
+ close (sv[1]);
2031
+ PipeDescriptor *pd = new PipeDescriptor (sv[0], f, this);
2032
+ if (!pd)
2033
+ throw std::runtime_error ("unable to allocate pipe");
2034
+ Add (pd);
2035
+ output_binding = pd->GetBinding();
2036
+ }
2037
+ else if (f == 0) {
2038
+ close (sv[0]);
2039
+ dup2 (sv[1], STDIN_FILENO);
2040
+ close (sv[1]);
2041
+ dup2 (STDIN_FILENO, STDOUT_FILENO);
2042
+ execvp (cmd_strings[0], cmd_strings+1);
2043
+ exit (-1); // end the child process if the exec doesn't work.
2044
+ }
2045
+ else
2046
+ throw std::runtime_error ("no fork");
2047
+
2048
+ return output_binding;
2049
+ }
2050
+ #else
2051
+ const uintptr_t EventMachine_t::Socketpair (char * const * cmd_strings UNUSED)
2052
+ {
2053
+ throw std::runtime_error ("socketpair is currently unavailable on this platform");
2054
+ }
2055
+ #endif
2056
+
2057
+
2058
+
2059
+ /****************************
2060
+ EventMachine_t::OpenKeyboard
2061
+ ****************************/
2062
+
2063
+ const uintptr_t EventMachine_t::OpenKeyboard()
2064
+ {
2065
+ KeyboardDescriptor *kd = new KeyboardDescriptor (this);
2066
+ if (!kd)
2067
+ throw std::runtime_error ("no keyboard-object allocated");
2068
+ Add (kd);
2069
+ return kd->GetBinding();
2070
+ }
2071
+
2072
+
2073
+ /**********************************
2074
+ EventMachine_t::GetConnectionCount
2075
+ **********************************/
2076
+
2077
+ int EventMachine_t::GetConnectionCount ()
2078
+ {
2079
+ return Descriptors.size() + NewDescriptors.size();
2080
+ }
2081
+
2082
+
2083
+ /************************
2084
+ EventMachine_t::WatchPid
2085
+ ************************/
2086
+
2087
+ #ifdef HAVE_KQUEUE
2088
+ const uintptr_t EventMachine_t::WatchPid (int pid)
2089
+ {
2090
+ if (Poller != Poller_Kqueue)
2091
+ throw std::runtime_error("must enable kqueue (EM.kqueue=true) for pid watching support");
2092
+
2093
+ struct kevent event;
2094
+ int kqres;
2095
+
2096
+ EV_SET(&event, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT | NOTE_FORK, 0, 0);
2097
+
2098
+ // Attempt to register the event
2099
+ kqres = kevent(kqfd, &event, 1, NULL, 0, NULL);
2100
+ if (kqres == -1) {
2101
+ char errbuf[200];
2102
+ sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
2103
+ throw std::runtime_error(errbuf);
2104
+ }
2105
+ Bindable_t* b = new Bindable_t();
2106
+ Pids.insert(make_pair (pid, b));
2107
+
2108
+ return b->GetBinding();
2109
+ }
2110
+ #else
2111
+ const uintptr_t EventMachine_t::WatchPid (int pid UNUSED)
2112
+ {
2113
+ throw std::runtime_error("no pid watching support on this system");
2114
+ }
2115
+ #endif
2116
+
2117
+ /**************************
2118
+ EventMachine_t::UnwatchPid
2119
+ **************************/
2120
+
2121
+ void EventMachine_t::UnwatchPid (int pid)
2122
+ {
2123
+ Bindable_t *b = Pids[pid];
2124
+ assert(b);
2125
+ Pids.erase(pid);
2126
+
2127
+ #ifdef HAVE_KQUEUE
2128
+ struct kevent k;
2129
+
2130
+ EV_SET(&k, pid, EVFILT_PROC, EV_DELETE, 0, 0, 0);
2131
+ /*int t =*/ kevent (kqfd, &k, 1, NULL, 0, NULL);
2132
+ // t==-1 if the process already exited; ignore this for now
2133
+ #endif
2134
+
2135
+ if (EventCallback)
2136
+ (*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
2137
+
2138
+ delete b;
2139
+ }
2140
+
2141
+ void EventMachine_t::UnwatchPid (const uintptr_t sig)
2142
+ {
2143
+ for(map<int, Bindable_t*>::iterator i=Pids.begin(); i != Pids.end(); i++)
2144
+ {
2145
+ if (i->second->GetBinding() == sig) {
2146
+ UnwatchPid (i->first);
2147
+ return;
2148
+ }
2149
+ }
2150
+
2151
+ throw std::runtime_error("attempted to remove invalid pid signature");
2152
+ }
2153
+
2154
+
2155
+ /*************************
2156
+ EventMachine_t::WatchFile
2157
+ *************************/
2158
+
2159
+ const uintptr_t EventMachine_t::WatchFile (const char *fpath)
2160
+ {
2161
+ struct stat sb;
2162
+ int sres;
2163
+ int wd = -1;
2164
+
2165
+ sres = stat(fpath, &sb);
2166
+
2167
+ if (sres == -1) {
2168
+ char errbuf[300];
2169
+ sprintf(errbuf, "error registering file %s for watching: %s", fpath, strerror(errno));
2170
+ throw std::runtime_error(errbuf);
2171
+ }
2172
+
2173
+ #ifdef HAVE_INOTIFY
2174
+ if (!inotify) {
2175
+ inotify = new InotifyDescriptor(this);
2176
+ assert (inotify);
2177
+ Add(inotify);
2178
+ }
2179
+
2180
+ wd = inotify_add_watch(inotify->GetSocket(), fpath,
2181
+ IN_MODIFY | IN_DELETE_SELF | IN_MOVE_SELF | IN_CREATE | IN_DELETE | IN_MOVE) ;
2182
+ if (wd == -1) {
2183
+ char errbuf[300];
2184
+ sprintf(errbuf, "failed to open file %s for registering with inotify: %s", fpath, strerror(errno));
2185
+ throw std::runtime_error(errbuf);
2186
+ }
2187
+ #endif
2188
+
2189
+ #ifdef HAVE_KQUEUE
2190
+ if (Poller != Poller_Kqueue)
2191
+ throw std::runtime_error("must enable kqueue (EM.kqueue=true) for file watching support");
2192
+
2193
+ // With kqueue we have to open the file first and use the resulting fd to register for events
2194
+ wd = open(fpath, O_RDONLY);
2195
+ if (wd == -1) {
2196
+ char errbuf[300];
2197
+ sprintf(errbuf, "failed to open file %s for registering with kqueue: %s", fpath, strerror(errno));
2198
+ throw std::runtime_error(errbuf);
2199
+ }
2200
+ _RegisterKqueueFileEvent(wd);
2201
+ #endif
2202
+
2203
+ if (wd != -1) {
2204
+ Bindable_t* b = new Bindable_t();
2205
+ Files.insert(make_pair (wd, b));
2206
+
2207
+ return b->GetBinding();
2208
+ }
2209
+
2210
+ throw std::runtime_error("no file watching support on this system"); // is this the right thing to do?
2211
+ }
2212
+
2213
+
2214
+ /***************************
2215
+ EventMachine_t::UnwatchFile
2216
+ ***************************/
2217
+
2218
+ void EventMachine_t::UnwatchFile (int wd)
2219
+ {
2220
+ Bindable_t *b = Files[wd];
2221
+ assert(b);
2222
+ Files.erase(wd);
2223
+
2224
+ #ifdef HAVE_INOTIFY
2225
+ inotify_rm_watch(inotify->GetSocket(), wd);
2226
+ #elif HAVE_KQUEUE
2227
+ // With kqueue, closing the monitored fd automatically clears all registered events for it
2228
+ close(wd);
2229
+ #endif
2230
+
2231
+ if (EventCallback)
2232
+ (*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
2233
+
2234
+ delete b;
2235
+ }
2236
+
2237
+ void EventMachine_t::UnwatchFile (const uintptr_t sig)
2238
+ {
2239
+ for(map<int, Bindable_t*>::iterator i=Files.begin(); i != Files.end(); i++)
2240
+ {
2241
+ if (i->second->GetBinding() == sig) {
2242
+ UnwatchFile (i->first);
2243
+ return;
2244
+ }
2245
+ }
2246
+ throw std::runtime_error("attempted to remove invalid watch signature");
2247
+ }
2248
+
2249
+
2250
+ /***********************************
2251
+ EventMachine_t::_ReadInotify_Events
2252
+ ************************************/
2253
+
2254
+ void EventMachine_t::_ReadInotifyEvents()
2255
+ {
2256
+ #ifdef HAVE_INOTIFY
2257
+ char buffer[1024];
2258
+
2259
+ assert(EventCallback);
2260
+
2261
+ for (;;) {
2262
+ int returned = read(inotify->GetSocket(), buffer, sizeof(buffer));
2263
+ assert(!(returned == 0 || (returned == -1 && errno == EINVAL)));
2264
+ if (returned <= 0) {
2265
+ break;
2266
+ }
2267
+ int current = 0;
2268
+ while (current < returned) {
2269
+ struct inotify_event* event = (struct inotify_event*)(buffer+current);
2270
+ map<int, Bindable_t*>::const_iterator bindable = Files.find(event->wd);
2271
+ if (bindable != Files.end()) {
2272
+ if (event->mask & (IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVE)){
2273
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "modified", 8);
2274
+ }
2275
+ if (event->mask & IN_MOVE_SELF){
2276
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "moved", 5);
2277
+ }
2278
+ if (event->mask & IN_DELETE_SELF) {
2279
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
2280
+ UnwatchFile ((int)event->wd);
2281
+ }
2282
+ }
2283
+ current += sizeof(struct inotify_event) + event->len;
2284
+ }
2285
+ }
2286
+ #endif
2287
+ }
2288
+
2289
+
2290
+ /*************************************
2291
+ EventMachine_t::_HandleKqueuePidEvent
2292
+ *************************************/
2293
+
2294
+ #ifdef HAVE_KQUEUE
2295
+ void EventMachine_t::_HandleKqueuePidEvent(struct kevent *event)
2296
+ {
2297
+ assert(EventCallback);
2298
+
2299
+ if (event->fflags & NOTE_FORK)
2300
+ (*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "fork", 4);
2301
+ if (event->fflags & NOTE_EXIT) {
2302
+ (*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "exit", 4);
2303
+ // stop watching the pid if it died
2304
+ UnwatchPid ((int)event->ident);
2305
+ }
2306
+ }
2307
+ #endif
2308
+
2309
+
2310
+ /**************************************
2311
+ EventMachine_t::_HandleKqueueFileEvent
2312
+ ***************************************/
2313
+
2314
+ #ifdef HAVE_KQUEUE
2315
+ void EventMachine_t::_HandleKqueueFileEvent(struct kevent *event)
2316
+ {
2317
+ assert(EventCallback);
2318
+
2319
+ if (event->fflags & NOTE_WRITE)
2320
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "modified", 8);
2321
+ if (event->fflags & NOTE_RENAME)
2322
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "moved", 5);
2323
+ if (event->fflags & NOTE_DELETE) {
2324
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
2325
+ UnwatchFile ((int)event->ident);
2326
+ }
2327
+ }
2328
+ #endif
2329
+
2330
+
2331
+ /****************************************
2332
+ EventMachine_t::_RegisterKqueueFileEvent
2333
+ *****************************************/
2334
+
2335
+ #ifdef HAVE_KQUEUE
2336
+ void EventMachine_t::_RegisterKqueueFileEvent(int fd)
2337
+ {
2338
+ struct kevent newevent;
2339
+ int kqres;
2340
+
2341
+ // Setup the event with our fd and proper flags
2342
+ EV_SET(&newevent, fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_DELETE | NOTE_RENAME | NOTE_WRITE, 0, 0);
2343
+
2344
+ // Attempt to register the event
2345
+ kqres = kevent(kqfd, &newevent, 1, NULL, 0, NULL);
2346
+ if (kqres == -1) {
2347
+ char errbuf[200];
2348
+ sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
2349
+ close(fd);
2350
+ throw std::runtime_error(errbuf);
2351
+ }
2352
+ }
2353
+ #endif
2354
+
2355
+
2356
+ /************************************
2357
+ EventMachine_t::GetHeartbeatInterval
2358
+ *************************************/
2359
+
2360
+ float EventMachine_t::GetHeartbeatInterval()
2361
+ {
2362
+ return ((float)HeartbeatInterval / 1000000);
2363
+ }
2364
+
2365
+
2366
+ /************************************
2367
+ EventMachine_t::SetHeartbeatInterval
2368
+ *************************************/
2369
+
2370
+ int EventMachine_t::SetHeartbeatInterval(float interval)
2371
+ {
2372
+ int iv = (int)(interval * 1000000);
2373
+ if (iv > 0) {
2374
+ HeartbeatInterval = iv;
2375
+ return 1;
2376
+ }
2377
+ return 0;
2378
+ }
2379
+ //#endif // OS_UNIX