sonixlabs-eventmachine-java 1.0.0.rc.4-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. data/.gitignore +22 -0
  2. data/.yardopts +7 -0
  3. data/GNU +281 -0
  4. data/Gemfile +3 -0
  5. data/LICENSE +60 -0
  6. data/README.md +109 -0
  7. data/Rakefile +20 -0
  8. data/docs/DocumentationGuidesIndex.md +27 -0
  9. data/docs/GettingStarted.md +521 -0
  10. data/docs/old/ChangeLog +211 -0
  11. data/docs/old/DEFERRABLES +246 -0
  12. data/docs/old/EPOLL +141 -0
  13. data/docs/old/INSTALL +13 -0
  14. data/docs/old/KEYBOARD +42 -0
  15. data/docs/old/LEGAL +25 -0
  16. data/docs/old/LIGHTWEIGHT_CONCURRENCY +130 -0
  17. data/docs/old/PURE_RUBY +75 -0
  18. data/docs/old/RELEASE_NOTES +94 -0
  19. data/docs/old/SMTP +4 -0
  20. data/docs/old/SPAWNED_PROCESSES +148 -0
  21. data/docs/old/TODO +8 -0
  22. data/eventmachine.gemspec +34 -0
  23. data/examples/guides/getting_started/01_eventmachine_echo_server.rb +18 -0
  24. data/examples/guides/getting_started/02_eventmachine_echo_server_that_recognizes_exit_command.rb +22 -0
  25. data/examples/guides/getting_started/03_simple_chat_server.rb +149 -0
  26. data/examples/guides/getting_started/04_simple_chat_server_step_one.rb +27 -0
  27. data/examples/guides/getting_started/05_simple_chat_server_step_two.rb +43 -0
  28. data/examples/guides/getting_started/06_simple_chat_server_step_three.rb +98 -0
  29. data/examples/guides/getting_started/07_simple_chat_server_step_four.rb +121 -0
  30. data/examples/guides/getting_started/08_simple_chat_server_step_five.rb +141 -0
  31. data/examples/old/ex_channel.rb +43 -0
  32. data/examples/old/ex_queue.rb +2 -0
  33. data/examples/old/ex_tick_loop_array.rb +15 -0
  34. data/examples/old/ex_tick_loop_counter.rb +32 -0
  35. data/examples/old/helper.rb +2 -0
  36. data/ext/binder.cpp +124 -0
  37. data/ext/binder.h +46 -0
  38. data/ext/cmain.cpp +876 -0
  39. data/ext/ed.cpp +1973 -0
  40. data/ext/ed.h +422 -0
  41. data/ext/em.cpp +2353 -0
  42. data/ext/em.h +239 -0
  43. data/ext/eventmachine.h +127 -0
  44. data/ext/extconf.rb +176 -0
  45. data/ext/fastfilereader/extconf.rb +103 -0
  46. data/ext/fastfilereader/mapper.cpp +214 -0
  47. data/ext/fastfilereader/mapper.h +59 -0
  48. data/ext/fastfilereader/rubymain.cpp +127 -0
  49. data/ext/kb.cpp +79 -0
  50. data/ext/page.cpp +107 -0
  51. data/ext/page.h +51 -0
  52. data/ext/pipe.cpp +347 -0
  53. data/ext/project.h +156 -0
  54. data/ext/rubymain.cpp +1297 -0
  55. data/ext/ssl.cpp +468 -0
  56. data/ext/ssl.h +94 -0
  57. data/java/.classpath +8 -0
  58. data/java/.project +17 -0
  59. data/java/src/com/rubyeventmachine/EmReactor.java +588 -0
  60. data/java/src/com/rubyeventmachine/EmReactorException.java +40 -0
  61. data/java/src/com/rubyeventmachine/EventableChannel.java +70 -0
  62. data/java/src/com/rubyeventmachine/EventableDatagramChannel.java +195 -0
  63. data/java/src/com/rubyeventmachine/EventableSocketChannel.java +364 -0
  64. data/lib/em/buftok.rb +110 -0
  65. data/lib/em/callback.rb +58 -0
  66. data/lib/em/channel.rb +64 -0
  67. data/lib/em/completion.rb +304 -0
  68. data/lib/em/connection.rb +712 -0
  69. data/lib/em/deferrable.rb +210 -0
  70. data/lib/em/deferrable/pool.rb +2 -0
  71. data/lib/em/file_watch.rb +73 -0
  72. data/lib/em/future.rb +61 -0
  73. data/lib/em/iterator.rb +270 -0
  74. data/lib/em/messages.rb +66 -0
  75. data/lib/em/pool.rb +151 -0
  76. data/lib/em/process_watch.rb +45 -0
  77. data/lib/em/processes.rb +123 -0
  78. data/lib/em/protocols.rb +36 -0
  79. data/lib/em/protocols/header_and_content.rb +138 -0
  80. data/lib/em/protocols/httpclient.rb +279 -0
  81. data/lib/em/protocols/httpclient2.rb +600 -0
  82. data/lib/em/protocols/line_and_text.rb +125 -0
  83. data/lib/em/protocols/line_protocol.rb +29 -0
  84. data/lib/em/protocols/linetext2.rb +161 -0
  85. data/lib/em/protocols/memcache.rb +331 -0
  86. data/lib/em/protocols/object_protocol.rb +46 -0
  87. data/lib/em/protocols/postgres3.rb +246 -0
  88. data/lib/em/protocols/saslauth.rb +175 -0
  89. data/lib/em/protocols/smtpclient.rb +365 -0
  90. data/lib/em/protocols/smtpserver.rb +640 -0
  91. data/lib/em/protocols/socks4.rb +66 -0
  92. data/lib/em/protocols/stomp.rb +202 -0
  93. data/lib/em/protocols/tcptest.rb +54 -0
  94. data/lib/em/pure_ruby.rb +1017 -0
  95. data/lib/em/queue.rb +71 -0
  96. data/lib/em/resolver.rb +192 -0
  97. data/lib/em/spawnable.rb +84 -0
  98. data/lib/em/streamer.rb +118 -0
  99. data/lib/em/threaded_resource.rb +90 -0
  100. data/lib/em/tick_loop.rb +85 -0
  101. data/lib/em/timers.rb +61 -0
  102. data/lib/em/version.rb +3 -0
  103. data/lib/eventmachine.rb +1532 -0
  104. data/lib/jeventmachine.rb +284 -0
  105. data/lib/sonixlabs-eventmachine-java.rb +1 -0
  106. data/rakelib/cpp.rake_example +77 -0
  107. data/rakelib/package.rake +98 -0
  108. data/rakelib/test.rake +8 -0
  109. data/tests/client.crt +31 -0
  110. data/tests/client.key +51 -0
  111. data/tests/em_test_helper.rb +64 -0
  112. data/tests/test_attach.rb +126 -0
  113. data/tests/test_basic.rb +294 -0
  114. data/tests/test_channel.rb +62 -0
  115. data/tests/test_completion.rb +177 -0
  116. data/tests/test_connection_count.rb +33 -0
  117. data/tests/test_defer.rb +18 -0
  118. data/tests/test_deferrable.rb +35 -0
  119. data/tests/test_epoll.rb +130 -0
  120. data/tests/test_error_handler.rb +38 -0
  121. data/tests/test_exc.rb +28 -0
  122. data/tests/test_file_watch.rb +65 -0
  123. data/tests/test_futures.rb +170 -0
  124. data/tests/test_get_sock_opt.rb +37 -0
  125. data/tests/test_handler_check.rb +35 -0
  126. data/tests/test_hc.rb +155 -0
  127. data/tests/test_httpclient.rb +190 -0
  128. data/tests/test_httpclient2.rb +128 -0
  129. data/tests/test_idle_connection.rb +23 -0
  130. data/tests/test_inactivity_timeout.rb +54 -0
  131. data/tests/test_kb.rb +34 -0
  132. data/tests/test_ltp.rb +138 -0
  133. data/tests/test_ltp2.rb +288 -0
  134. data/tests/test_next_tick.rb +104 -0
  135. data/tests/test_object_protocol.rb +36 -0
  136. data/tests/test_pause.rb +78 -0
  137. data/tests/test_pending_connect_timeout.rb +52 -0
  138. data/tests/test_pool.rb +194 -0
  139. data/tests/test_process_watch.rb +48 -0
  140. data/tests/test_processes.rb +128 -0
  141. data/tests/test_proxy_connection.rb +180 -0
  142. data/tests/test_pure.rb +88 -0
  143. data/tests/test_queue.rb +50 -0
  144. data/tests/test_resolver.rb +55 -0
  145. data/tests/test_running.rb +14 -0
  146. data/tests/test_sasl.rb +47 -0
  147. data/tests/test_send_file.rb +217 -0
  148. data/tests/test_servers.rb +33 -0
  149. data/tests/test_set_sock_opt.rb +37 -0
  150. data/tests/test_shutdown_hooks.rb +23 -0
  151. data/tests/test_smtpclient.rb +55 -0
  152. data/tests/test_smtpserver.rb +57 -0
  153. data/tests/test_spawn.rb +293 -0
  154. data/tests/test_ssl_args.rb +78 -0
  155. data/tests/test_ssl_methods.rb +48 -0
  156. data/tests/test_ssl_verify.rb +82 -0
  157. data/tests/test_threaded_resource.rb +53 -0
  158. data/tests/test_tick_loop.rb +59 -0
  159. data/tests/test_timers.rb +123 -0
  160. data/tests/test_ud.rb +8 -0
  161. data/tests/test_unbind_reason.rb +48 -0
  162. metadata +301 -0
@@ -0,0 +1,422 @@
1
+ /*****************************************************************************
2
+
3
+ $Id$
4
+
5
+ File: ed.h
6
+ Date: 06Apr06
7
+
8
+ Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
9
+ Gmail: blackhedd
10
+
11
+ This program is free software; you can redistribute it and/or modify
12
+ it under the terms of either: 1) the GNU General Public License
13
+ as published by the Free Software Foundation; either version 2 of the
14
+ License, or (at your option) any later version; or 2) Ruby's License.
15
+
16
+ See the file COPYING for complete licensing information.
17
+
18
+ *****************************************************************************/
19
+
20
+ #ifndef __EventableDescriptor__H_
21
+ #define __EventableDescriptor__H_
22
+
23
+
24
+ class EventMachine_t; // forward reference
25
+ #ifdef WITH_SSL
26
+ class SslBox_t; // forward reference
27
+ #endif
28
+
29
+ bool SetSocketNonblocking (SOCKET);
30
+
31
+
32
+ /*************************
33
+ class EventableDescriptor
34
+ *************************/
35
+
36
+ class EventableDescriptor: public Bindable_t
37
+ {
38
+ public:
39
+ EventableDescriptor (int, EventMachine_t*);
40
+ virtual ~EventableDescriptor();
41
+
42
+ int GetSocket() {return MySocket;}
43
+ void SetSocketInvalid() { MySocket = INVALID_SOCKET; }
44
+ void Close();
45
+
46
+ virtual void Read() = 0;
47
+ virtual void Write() = 0;
48
+ virtual void Heartbeat() = 0;
49
+
50
+ // These methods tell us whether the descriptor
51
+ // should be selected or polled for read/write.
52
+ virtual bool SelectForRead() = 0;
53
+ virtual bool SelectForWrite() = 0;
54
+
55
+ // are we scheduled for a close, or in an error state, or already closed?
56
+ bool ShouldDelete();
57
+ // Do we have any data to write? This is used by ShouldDelete.
58
+ virtual int GetOutboundDataSize() {return 0;}
59
+ virtual bool IsWatchOnly(){ return bWatchOnly; }
60
+
61
+ virtual void ScheduleClose (bool after_writing);
62
+ bool IsCloseScheduled();
63
+ virtual void HandleError(){ ScheduleClose (false); }
64
+
65
+ void SetEventCallback (EMCallback);
66
+
67
+ virtual bool GetPeername (struct sockaddr*, socklen_t*) {return false;}
68
+ virtual bool GetSockname (struct sockaddr*, socklen_t*) {return false;}
69
+ virtual bool GetSubprocessPid (pid_t*) {return false;}
70
+
71
+ virtual void StartTls() {}
72
+ virtual void SetTlsParms (const char *privkey_filename, const char *certchain_filename, bool verify_peer) {}
73
+
74
+ #ifdef WITH_SSL
75
+ virtual X509 *GetPeerCert() {return NULL;}
76
+ #endif
77
+
78
+ virtual uint64_t GetCommInactivityTimeout() {return 0;}
79
+ virtual int SetCommInactivityTimeout (uint64_t value) {return 0;}
80
+ uint64_t GetPendingConnectTimeout();
81
+ int SetPendingConnectTimeout (uint64_t value);
82
+ uint64_t GetLastActivity() { return LastActivity; }
83
+
84
+ #ifdef HAVE_EPOLL
85
+ struct epoll_event *GetEpollEvent() { return &EpollEvent; }
86
+ #endif
87
+
88
+ virtual void StartProxy(const unsigned long, const unsigned long, const unsigned long);
89
+ virtual void StopProxy();
90
+ virtual unsigned long GetProxiedBytes(){ return ProxiedBytes; };
91
+ virtual void SetProxiedFrom(EventableDescriptor*, const unsigned long);
92
+ virtual int SendOutboundData(const char*,int){ return -1; }
93
+ virtual bool IsPaused(){ return bPaused; }
94
+ virtual bool Pause(){ bPaused = true; return bPaused; }
95
+ virtual bool Resume(){ bPaused = false; return bPaused; }
96
+
97
+ void SetUnbindReasonCode(int code){ UnbindReasonCode = code; }
98
+ virtual int ReportErrorStatus(){ return 0; }
99
+ virtual bool IsConnectPending(){ return false; }
100
+ virtual uint64_t GetNextHeartbeat();
101
+
102
+ private:
103
+ bool bCloseNow;
104
+ bool bCloseAfterWriting;
105
+
106
+ protected:
107
+ int MySocket;
108
+ bool bAttached;
109
+ bool bWatchOnly;
110
+
111
+ EMCallback EventCallback;
112
+ void _GenericInboundDispatch(const char*, int);
113
+
114
+ uint64_t CreatedAt;
115
+ bool bCallbackUnbind;
116
+ int UnbindReasonCode;
117
+
118
+ unsigned long BytesToProxy;
119
+ EventableDescriptor *ProxyTarget;
120
+ EventableDescriptor *ProxiedFrom;
121
+ unsigned long ProxiedBytes;
122
+
123
+ unsigned long MaxOutboundBufSize;
124
+
125
+ #ifdef HAVE_EPOLL
126
+ struct epoll_event EpollEvent;
127
+ #endif
128
+
129
+ EventMachine_t *MyEventMachine;
130
+ uint64_t PendingConnectTimeout;
131
+ uint64_t InactivityTimeout;
132
+ uint64_t LastActivity;
133
+ uint64_t NextHeartbeat;
134
+ bool bPaused;
135
+ };
136
+
137
+
138
+
139
+ /*************************
140
+ class LoopbreakDescriptor
141
+ *************************/
142
+
143
+ class LoopbreakDescriptor: public EventableDescriptor
144
+ {
145
+ public:
146
+ LoopbreakDescriptor (int, EventMachine_t*);
147
+ virtual ~LoopbreakDescriptor() {}
148
+
149
+ virtual void Read();
150
+ virtual void Write();
151
+ virtual void Heartbeat() {}
152
+
153
+ virtual bool SelectForRead() {return true;}
154
+ virtual bool SelectForWrite() {return false;}
155
+ };
156
+
157
+
158
+ /**************************
159
+ class ConnectionDescriptor
160
+ **************************/
161
+
162
+ class ConnectionDescriptor: public EventableDescriptor
163
+ {
164
+ public:
165
+ ConnectionDescriptor (int, EventMachine_t*);
166
+ virtual ~ConnectionDescriptor();
167
+
168
+ int SendOutboundData (const char*, int);
169
+
170
+ void SetConnectPending (bool f);
171
+ virtual void ScheduleClose (bool after_writing);
172
+ virtual void HandleError();
173
+
174
+ void SetNotifyReadable (bool);
175
+ void SetNotifyWritable (bool);
176
+ void SetAttached (bool);
177
+ void SetWatchOnly (bool);
178
+
179
+ bool Pause();
180
+ bool Resume();
181
+
182
+ bool IsNotifyReadable(){ return bNotifyReadable; }
183
+ bool IsNotifyWritable(){ return bNotifyWritable; }
184
+
185
+ virtual void Read();
186
+ virtual void Write();
187
+ virtual void Heartbeat();
188
+
189
+ virtual bool SelectForRead();
190
+ virtual bool SelectForWrite();
191
+
192
+ // Do we have any data to write? This is used by ShouldDelete.
193
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
194
+
195
+ virtual void StartTls();
196
+ virtual void SetTlsParms (const char *privkey_filename, const char *certchain_filename, bool verify_peer);
197
+
198
+ #ifdef WITH_SSL
199
+ virtual X509 *GetPeerCert();
200
+ virtual bool VerifySslPeer(const char*);
201
+ virtual void AcceptSslPeer();
202
+ #endif
203
+
204
+ void SetServerMode() {bIsServer = true;}
205
+
206
+ virtual bool GetPeername (struct sockaddr*, socklen_t*);
207
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
208
+
209
+ virtual uint64_t GetCommInactivityTimeout();
210
+ virtual int SetCommInactivityTimeout (uint64_t value);
211
+
212
+ virtual int ReportErrorStatus();
213
+ virtual bool IsConnectPending(){ return bConnectPending; }
214
+
215
+ protected:
216
+ struct OutboundPage {
217
+ OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
218
+ void Free() {if (Buffer) free ((char*)Buffer); }
219
+ const char *Buffer;
220
+ int Length;
221
+ int Offset;
222
+ };
223
+
224
+ protected:
225
+ bool bConnectPending;
226
+
227
+ bool bNotifyReadable;
228
+ bool bNotifyWritable;
229
+
230
+ bool bReadAttemptedAfterClose;
231
+ bool bWriteAttemptedAfterClose;
232
+
233
+ deque<OutboundPage> OutboundPages;
234
+ int OutboundDataSize;
235
+
236
+ #ifdef WITH_SSL
237
+ SslBox_t *SslBox;
238
+ std::string CertChainFilename;
239
+ std::string PrivateKeyFilename;
240
+ bool bHandshakeSignaled;
241
+ bool bSslVerifyPeer;
242
+ bool bSslPeerAccepted;
243
+ #endif
244
+
245
+ #ifdef HAVE_KQUEUE
246
+ bool bGotExtraKqueueEvent;
247
+ #endif
248
+
249
+ bool bIsServer;
250
+
251
+ private:
252
+ void _UpdateEvents();
253
+ void _UpdateEvents(bool, bool);
254
+ void _WriteOutboundData();
255
+ void _DispatchInboundData (const char *buffer, int size);
256
+ void _DispatchCiphertext();
257
+ int _SendRawOutboundData (const char*, int);
258
+ void _CheckHandshakeStatus();
259
+
260
+ };
261
+
262
+
263
+ /************************
264
+ class DatagramDescriptor
265
+ ************************/
266
+
267
+ class DatagramDescriptor: public EventableDescriptor
268
+ {
269
+ public:
270
+ DatagramDescriptor (int, EventMachine_t*);
271
+ virtual ~DatagramDescriptor();
272
+
273
+ virtual void Read();
274
+ virtual void Write();
275
+ virtual void Heartbeat();
276
+
277
+ virtual bool SelectForRead() {return true;}
278
+ virtual bool SelectForWrite();
279
+
280
+ int SendOutboundData (const char*, int);
281
+ int SendOutboundDatagram (const char*, int, const char*, int);
282
+
283
+ // Do we have any data to write? This is used by ShouldDelete.
284
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
285
+
286
+ virtual bool GetPeername (struct sockaddr*, socklen_t*);
287
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
288
+
289
+ virtual uint64_t GetCommInactivityTimeout();
290
+ virtual int SetCommInactivityTimeout (uint64_t value);
291
+
292
+ protected:
293
+ struct OutboundPage {
294
+ OutboundPage (const char *b, int l, struct sockaddr_in f, int o=0): Buffer(b), Length(l), Offset(o), From(f) {}
295
+ void Free() {if (Buffer) free ((char*)Buffer); }
296
+ const char *Buffer;
297
+ int Length;
298
+ int Offset;
299
+ struct sockaddr_in From;
300
+ };
301
+
302
+ deque<OutboundPage> OutboundPages;
303
+ int OutboundDataSize;
304
+
305
+ struct sockaddr_in ReturnAddress;
306
+ };
307
+
308
+
309
+ /************************
310
+ class AcceptorDescriptor
311
+ ************************/
312
+
313
+ class AcceptorDescriptor: public EventableDescriptor
314
+ {
315
+ public:
316
+ AcceptorDescriptor (int, EventMachine_t*);
317
+ virtual ~AcceptorDescriptor();
318
+
319
+ virtual void Read();
320
+ virtual void Write();
321
+ virtual void Heartbeat();
322
+
323
+ virtual bool SelectForRead() {return true;}
324
+ virtual bool SelectForWrite() {return false;}
325
+
326
+ virtual bool GetSockname (struct sockaddr*, socklen_t*);
327
+
328
+ static void StopAcceptor (const unsigned long binding);
329
+ };
330
+
331
+ /********************
332
+ class PipeDescriptor
333
+ ********************/
334
+
335
+ #ifdef OS_UNIX
336
+ class PipeDescriptor: public EventableDescriptor
337
+ {
338
+ public:
339
+ PipeDescriptor (int, pid_t, EventMachine_t*);
340
+ virtual ~PipeDescriptor();
341
+
342
+ virtual void Read();
343
+ virtual void Write();
344
+ virtual void Heartbeat();
345
+
346
+ virtual bool SelectForRead();
347
+ virtual bool SelectForWrite();
348
+
349
+ int SendOutboundData (const char*, int);
350
+ virtual int GetOutboundDataSize() {return OutboundDataSize;}
351
+
352
+ virtual bool GetSubprocessPid (pid_t*);
353
+
354
+ protected:
355
+ struct OutboundPage {
356
+ OutboundPage (const char *b, int l, int o=0): Buffer(b), Length(l), Offset(o) {}
357
+ void Free() {if (Buffer) free ((char*)Buffer); }
358
+ const char *Buffer;
359
+ int Length;
360
+ int Offset;
361
+ };
362
+
363
+ protected:
364
+ bool bReadAttemptedAfterClose;
365
+
366
+ deque<OutboundPage> OutboundPages;
367
+ int OutboundDataSize;
368
+
369
+ pid_t SubprocessPid;
370
+
371
+ private:
372
+ void _DispatchInboundData (const char *buffer, int size);
373
+ };
374
+ #endif // OS_UNIX
375
+
376
+
377
+ /************************
378
+ class KeyboardDescriptor
379
+ ************************/
380
+
381
+ class KeyboardDescriptor: public EventableDescriptor
382
+ {
383
+ public:
384
+ KeyboardDescriptor (EventMachine_t*);
385
+ virtual ~KeyboardDescriptor();
386
+
387
+ virtual void Read();
388
+ virtual void Write();
389
+ virtual void Heartbeat();
390
+
391
+ virtual bool SelectForRead() {return true;}
392
+ virtual bool SelectForWrite() {return false;}
393
+
394
+ protected:
395
+ bool bReadAttemptedAfterClose;
396
+
397
+ private:
398
+ void _DispatchInboundData (const char *buffer, int size);
399
+ };
400
+
401
+
402
+ /***********************
403
+ class InotifyDescriptor
404
+ ************************/
405
+
406
+ class InotifyDescriptor: public EventableDescriptor
407
+ {
408
+ public:
409
+ InotifyDescriptor (EventMachine_t*);
410
+ virtual ~InotifyDescriptor();
411
+
412
+ void Read();
413
+ void Write();
414
+
415
+ virtual void Heartbeat() {}
416
+ virtual bool SelectForRead() {return true;}
417
+ virtual bool SelectForWrite() {return false;}
418
+ };
419
+
420
+ #endif // __EventableDescriptor__H_
421
+
422
+
@@ -0,0 +1,2353 @@
1
+ /*****************************************************************************
2
+
3
+ $Id$
4
+
5
+ File: em.cpp
6
+ Date: 06Apr06
7
+
8
+ Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
9
+ Gmail: blackhedd
10
+
11
+ This program is free software; you can redistribute it and/or modify
12
+ it under the terms of either: 1) the GNU General Public License
13
+ as published by the Free Software Foundation; either version 2 of the
14
+ License, or (at your option) any later version; or 2) Ruby's License.
15
+
16
+ See the file COPYING for complete licensing information.
17
+
18
+ *****************************************************************************/
19
+
20
+ // THIS ENTIRE FILE WILL EVENTUALLY BE FOR UNIX BUILDS ONLY.
21
+ //#ifdef OS_UNIX
22
+
23
+ #include "project.h"
24
+
25
+ /* The numer of max outstanding timers was once a const enum defined in em.h.
26
+ * Now we define it here so that users can change its value if necessary.
27
+ */
28
+ static unsigned int MaxOutstandingTimers = 100000;
29
+
30
+
31
+ /* Internal helper to convert strings to internet addresses. IPv6-aware.
32
+ * Not reentrant or threadsafe, optimized for speed.
33
+ */
34
+ static struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size);
35
+
36
+ /***************************************
37
+ STATIC EventMachine_t::GetMaxTimerCount
38
+ ***************************************/
39
+
40
+ int EventMachine_t::GetMaxTimerCount()
41
+ {
42
+ return MaxOutstandingTimers;
43
+ }
44
+
45
+
46
+ /***************************************
47
+ STATIC EventMachine_t::SetMaxTimerCount
48
+ ***************************************/
49
+
50
+ void EventMachine_t::SetMaxTimerCount (int count)
51
+ {
52
+ /* Allow a user to increase the maximum number of outstanding timers.
53
+ * If this gets "too high" (a metric that is of course platform dependent),
54
+ * bad things will happen like performance problems and possible overuse
55
+ * of memory.
56
+ * The actual timer mechanism is very efficient so it's hard to know what
57
+ * the practical max, but 100,000 shouldn't be too problematical.
58
+ */
59
+ if (count < 100)
60
+ count = 100;
61
+ MaxOutstandingTimers = count;
62
+ }
63
+
64
+
65
+
66
+ /******************************
67
+ EventMachine_t::EventMachine_t
68
+ ******************************/
69
+
70
+ EventMachine_t::EventMachine_t (EMCallback event_callback):
71
+ HeartbeatInterval(2000000),
72
+ EventCallback (event_callback),
73
+ NextHeartbeatTime (0),
74
+ LoopBreakerReader (-1),
75
+ LoopBreakerWriter (-1),
76
+ NumCloseScheduled (0),
77
+ bTerminateSignalReceived (false),
78
+ bEpoll (false),
79
+ epfd (-1),
80
+ bKqueue (false),
81
+ kqfd (-1),
82
+ inotify (NULL)
83
+ {
84
+ // Default time-slice is just smaller than one hundred mills.
85
+ Quantum.tv_sec = 0;
86
+ Quantum.tv_usec = 90000;
87
+
88
+ // Make sure the current loop time is sane, in case we do any initializations of
89
+ // objects before we start running.
90
+ _UpdateTime();
91
+
92
+ /* We initialize the network library here (only on Windows of course)
93
+ * and initialize "loop breakers." Our destructor also does some network-level
94
+ * cleanup. There's thus an implicit assumption that any given instance of EventMachine_t
95
+ * will only call ::Run once. Is that a good assumption? Should we move some of these
96
+ * inits and de-inits into ::Run?
97
+ */
98
+ #ifdef OS_WIN32
99
+ WSADATA w;
100
+ WSAStartup (MAKEWORD (1, 1), &w);
101
+ #endif
102
+
103
+ _InitializeLoopBreaker();
104
+ }
105
+
106
+
107
+ /*******************************
108
+ EventMachine_t::~EventMachine_t
109
+ *******************************/
110
+
111
+ EventMachine_t::~EventMachine_t()
112
+ {
113
+ // Run down descriptors
114
+ size_t i;
115
+ for (i = 0; i < NewDescriptors.size(); i++)
116
+ delete NewDescriptors[i];
117
+ for (i = 0; i < Descriptors.size(); i++)
118
+ delete Descriptors[i];
119
+
120
+ close (LoopBreakerReader);
121
+ close (LoopBreakerWriter);
122
+
123
+ // Remove any file watch descriptors
124
+ while(!Files.empty()) {
125
+ map<int, Bindable_t*>::iterator f = Files.begin();
126
+ UnwatchFile (f->first);
127
+ }
128
+
129
+ if (epfd != -1)
130
+ close (epfd);
131
+ if (kqfd != -1)
132
+ close (kqfd);
133
+ }
134
+
135
+
136
+ /*************************
137
+ EventMachine_t::_UseEpoll
138
+ *************************/
139
+
140
+ void EventMachine_t::_UseEpoll()
141
+ {
142
+ /* Temporary.
143
+ * Use an internal flag to switch in epoll-based functionality until we determine
144
+ * how it should be integrated properly and the extent of the required changes.
145
+ * A permanent solution needs to allow the integration of additional technologies,
146
+ * like kqueue and Solaris's events.
147
+ */
148
+
149
+ #ifdef HAVE_EPOLL
150
+ bEpoll = true;
151
+ #endif
152
+ }
153
+
154
+ /**************************
155
+ EventMachine_t::_UseKqueue
156
+ **************************/
157
+
158
+ void EventMachine_t::_UseKqueue()
159
+ {
160
+ /* Temporary.
161
+ * See comments under _UseEpoll.
162
+ */
163
+
164
+ #ifdef HAVE_KQUEUE
165
+ bKqueue = true;
166
+ #endif
167
+ }
168
+
169
+
170
+ /****************************
171
+ EventMachine_t::ScheduleHalt
172
+ ****************************/
173
+
174
+ void EventMachine_t::ScheduleHalt()
175
+ {
176
+ /* This is how we stop the machine.
177
+ * This can be called by clients. Signal handlers will probably
178
+ * set the global flag.
179
+ * For now this means there can only be one EventMachine ever running at a time.
180
+ *
181
+ * IMPORTANT: keep this light, fast, and async-safe. Don't do anything frisky in here,
182
+ * because it may be called from signal handlers invoked from code that we don't
183
+ * control. At this writing (20Sep06), EM does NOT install any signal handlers of
184
+ * its own.
185
+ *
186
+ * We need a FAQ. And one of the questions is: how do I stop EM when Ctrl-C happens?
187
+ * The answer is to call evma_stop_machine, which calls here, from a SIGINT handler.
188
+ */
189
+ bTerminateSignalReceived = true;
190
+ }
191
+
192
+
193
+
194
+ /*******************************
195
+ EventMachine_t::SetTimerQuantum
196
+ *******************************/
197
+
198
+ void EventMachine_t::SetTimerQuantum (int interval)
199
+ {
200
+ /* We get a timer-quantum expressed in milliseconds.
201
+ */
202
+
203
+ if ((interval < 5) || (interval > 5*60*1000))
204
+ throw std::runtime_error ("invalid timer-quantum");
205
+
206
+ Quantum.tv_sec = interval / 1000;
207
+ Quantum.tv_usec = (interval % 1000) * 1000;
208
+ }
209
+
210
+
211
+ /*************************************
212
+ (STATIC) EventMachine_t::SetuidString
213
+ *************************************/
214
+
215
+ void EventMachine_t::SetuidString (const char *username)
216
+ {
217
+ /* This method takes a caller-supplied username and tries to setuid
218
+ * to that user. There is no meaningful implementation (and no error)
219
+ * on Windows. On Unix, a failure to setuid the caller-supplied string
220
+ * causes a fatal abort, because presumably the program is calling here
221
+ * in order to fulfill a security requirement. If we fail silently,
222
+ * the user may continue to run with too much privilege.
223
+ *
224
+ * TODO, we need to decide on and document a way of generating C++ level errors
225
+ * that can be wrapped in documented Ruby exceptions, so users can catch
226
+ * and handle them. And distinguish it from errors that we WON'T let the Ruby
227
+ * user catch (like security-violations and resource-overallocation).
228
+ * A setuid failure here would be in the latter category.
229
+ */
230
+
231
+ #ifdef OS_UNIX
232
+ if (!username || !*username)
233
+ throw std::runtime_error ("setuid_string failed: no username specified");
234
+
235
+ struct passwd *p = getpwnam (username);
236
+ if (!p)
237
+ throw std::runtime_error ("setuid_string failed: unknown username");
238
+
239
+ if (setuid (p->pw_uid) != 0)
240
+ throw std::runtime_error ("setuid_string failed: no setuid");
241
+
242
+ // Success.
243
+ #endif
244
+ }
245
+
246
+
247
+ /****************************************
248
+ (STATIC) EventMachine_t::SetRlimitNofile
249
+ ****************************************/
250
+
251
+ int EventMachine_t::SetRlimitNofile (int nofiles)
252
+ {
253
+ #ifdef OS_UNIX
254
+ struct rlimit rlim;
255
+ getrlimit (RLIMIT_NOFILE, &rlim);
256
+ if (nofiles >= 0) {
257
+ rlim.rlim_cur = nofiles;
258
+ if ((unsigned int)nofiles > rlim.rlim_max)
259
+ rlim.rlim_max = nofiles;
260
+ setrlimit (RLIMIT_NOFILE, &rlim);
261
+ // ignore the error return, for now at least.
262
+ // TODO, emit an error message someday when we have proper debug levels.
263
+ }
264
+ getrlimit (RLIMIT_NOFILE, &rlim);
265
+ return rlim.rlim_cur;
266
+ #endif
267
+
268
+ #ifdef OS_WIN32
269
+ // No meaningful implementation on Windows.
270
+ return 0;
271
+ #endif
272
+ }
273
+
274
+
275
+ /*********************************
276
+ EventMachine_t::SignalLoopBreaker
277
+ *********************************/
278
+
279
+ void EventMachine_t::SignalLoopBreaker()
280
+ {
281
+ #ifdef OS_UNIX
282
+ write (LoopBreakerWriter, "", 1);
283
+ #endif
284
+ #ifdef OS_WIN32
285
+ sendto (LoopBreakerReader, "", 0, 0, (struct sockaddr*)&(LoopBreakerTarget), sizeof(LoopBreakerTarget));
286
+ #endif
287
+ }
288
+
289
+
290
+ /**************************************
291
+ EventMachine_t::_InitializeLoopBreaker
292
+ **************************************/
293
+
294
+ void EventMachine_t::_InitializeLoopBreaker()
295
+ {
296
+ /* A "loop-breaker" is a socket-descriptor that we can write to in order
297
+ * to break the main select loop. Primarily useful for things running on
298
+ * threads other than the main EM thread, so they can trigger processing
299
+ * of events that arise exogenously to the EM.
300
+ * Keep the loop-breaker pipe out of the main descriptor set, otherwise
301
+ * its events will get passed on to user code.
302
+ */
303
+
304
+ #ifdef OS_UNIX
305
+ int fd[2];
306
+ if (pipe (fd))
307
+ throw std::runtime_error (strerror(errno));
308
+
309
+ LoopBreakerWriter = fd[1];
310
+ LoopBreakerReader = fd[0];
311
+
312
+ /* 16Jan11: Make sure the pipe is non-blocking, so more than 65k loopbreaks
313
+ * in one tick do not fill up the pipe and block the process on write() */
314
+ SetSocketNonblocking (LoopBreakerWriter);
315
+ #endif
316
+
317
+ #ifdef OS_WIN32
318
+ int sd = socket (AF_INET, SOCK_DGRAM, 0);
319
+ if (sd == INVALID_SOCKET)
320
+ throw std::runtime_error ("no loop breaker socket");
321
+ SetSocketNonblocking (sd);
322
+
323
+ memset (&LoopBreakerTarget, 0, sizeof(LoopBreakerTarget));
324
+ LoopBreakerTarget.sin_family = AF_INET;
325
+ LoopBreakerTarget.sin_addr.s_addr = inet_addr ("127.0.0.1");
326
+
327
+ srand ((int)time(NULL));
328
+ int i;
329
+ for (i=0; i < 100; i++) {
330
+ int r = (rand() % 10000) + 20000;
331
+ LoopBreakerTarget.sin_port = htons (r);
332
+ if (bind (sd, (struct sockaddr*)&LoopBreakerTarget, sizeof(LoopBreakerTarget)) == 0)
333
+ break;
334
+ }
335
+
336
+ if (i == 100)
337
+ throw std::runtime_error ("no loop breaker");
338
+ LoopBreakerReader = sd;
339
+ #endif
340
+ }
341
+
342
+ /***************************
343
+ EventMachine_t::_UpdateTime
344
+ ***************************/
345
+
346
+ void EventMachine_t::_UpdateTime()
347
+ {
348
+ MyCurrentLoopTime = GetRealTime();
349
+ }
350
+
351
+ /***************************
352
+ EventMachine_t::GetRealTime
353
+ ***************************/
354
+
355
+ uint64_t EventMachine_t::GetRealTime()
356
+ {
357
+ uint64_t current_time;
358
+
359
+ #if defined(OS_UNIX)
360
+ struct timeval tv;
361
+ gettimeofday (&tv, NULL);
362
+ current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
363
+
364
+ #elif defined(OS_WIN32)
365
+ unsigned tick = GetTickCount();
366
+ if (tick < LastTickCount)
367
+ TickCountTickover += 1;
368
+ LastTickCount = tick;
369
+ current_time = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
370
+ current_time *= 1000; // convert to microseconds
371
+
372
+ #else
373
+ current_time = (uint64_t)time(NULL) * 1000000LL;
374
+ #endif
375
+
376
+ return current_time;
377
+ }
378
+
379
+ /***********************************
380
+ EventMachine_t::_DispatchHeartbeats
381
+ ***********************************/
382
+
383
+ void EventMachine_t::_DispatchHeartbeats()
384
+ {
385
+ while (true) {
386
+ multimap<uint64_t,EventableDescriptor*>::iterator i = Heartbeats.begin();
387
+ if (i == Heartbeats.end())
388
+ break;
389
+ if (i->first > MyCurrentLoopTime)
390
+ break;
391
+ EventableDescriptor *ed = i->second;
392
+ ed->Heartbeat();
393
+ QueueHeartbeat(ed);
394
+ }
395
+ }
396
+
397
+ /******************************
398
+ EventMachine_t::QueueHeartbeat
399
+ ******************************/
400
+
401
+ void EventMachine_t::QueueHeartbeat(EventableDescriptor *ed)
402
+ {
403
+ uint64_t heartbeat = ed->GetNextHeartbeat();
404
+
405
+ if (heartbeat) {
406
+ #ifndef HAVE_MAKE_PAIR
407
+ Heartbeats.insert (multimap<uint64_t,EventableDescriptor*>::value_type (heartbeat, ed));
408
+ #else
409
+ Heartbeats.insert (make_pair (heartbeat, ed));
410
+ #endif
411
+ }
412
+ }
413
+
414
+ /******************************
415
+ EventMachine_t::ClearHeartbeat
416
+ ******************************/
417
+
418
+ void EventMachine_t::ClearHeartbeat(uint64_t key, EventableDescriptor* ed)
419
+ {
420
+ multimap<uint64_t,EventableDescriptor*>::iterator it;
421
+ pair<multimap<uint64_t,EventableDescriptor*>::iterator,multimap<uint64_t,EventableDescriptor*>::iterator> ret;
422
+ ret = Heartbeats.equal_range (key);
423
+ for (it = ret.first; it != ret.second; ++it) {
424
+ if (it->second == ed) {
425
+ Heartbeats.erase (it);
426
+ break;
427
+ }
428
+ }
429
+ }
430
+
431
+ /*******************
432
+ EventMachine_t::Run
433
+ *******************/
434
+
435
+ void EventMachine_t::Run()
436
+ {
437
+ #ifdef HAVE_EPOLL
438
+ if (bEpoll) {
439
+ epfd = epoll_create (MaxEpollDescriptors);
440
+ if (epfd == -1) {
441
+ char buf[200];
442
+ snprintf (buf, sizeof(buf)-1, "unable to create epoll descriptor: %s", strerror(errno));
443
+ throw std::runtime_error (buf);
444
+ }
445
+ int cloexec = fcntl (epfd, F_GETFD, 0);
446
+ assert (cloexec >= 0);
447
+ cloexec |= FD_CLOEXEC;
448
+ fcntl (epfd, F_SETFD, cloexec);
449
+
450
+ assert (LoopBreakerReader >= 0);
451
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
452
+ assert (ld);
453
+ Add (ld);
454
+ }
455
+ #endif
456
+
457
+ #ifdef HAVE_KQUEUE
458
+ if (bKqueue) {
459
+ kqfd = kqueue();
460
+ if (kqfd == -1) {
461
+ char buf[200];
462
+ snprintf (buf, sizeof(buf)-1, "unable to create kqueue descriptor: %s", strerror(errno));
463
+ throw std::runtime_error (buf);
464
+ }
465
+ // cloexec not needed. By definition, kqueues are not carried across forks.
466
+
467
+ assert (LoopBreakerReader >= 0);
468
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
469
+ assert (ld);
470
+ Add (ld);
471
+ }
472
+ #endif
473
+
474
+ while (true) {
475
+ _UpdateTime();
476
+ _RunTimers();
477
+
478
+ /* _Add must precede _Modify because the same descriptor might
479
+ * be on both lists during the same pass through the machine,
480
+ * and to modify a descriptor before adding it would fail.
481
+ */
482
+ _AddNewDescriptors();
483
+ _ModifyDescriptors();
484
+
485
+ if (!_RunOnce())
486
+ break;
487
+ if (bTerminateSignalReceived)
488
+ break;
489
+ }
490
+ }
491
+
492
+
493
+ /************************
494
+ EventMachine_t::_RunOnce
495
+ ************************/
496
+
497
+ bool EventMachine_t::_RunOnce()
498
+ {
499
+ bool ret;
500
+ if (bEpoll)
501
+ ret = _RunEpollOnce();
502
+ else if (bKqueue)
503
+ ret = _RunKqueueOnce();
504
+ else
505
+ ret = _RunSelectOnce();
506
+ _DispatchHeartbeats();
507
+ _CleanupSockets();
508
+ return ret;
509
+ }
510
+
511
+
512
+
513
+ /*****************************
514
+ EventMachine_t::_RunEpollOnce
515
+ *****************************/
516
+
517
+ bool EventMachine_t::_RunEpollOnce()
518
+ {
519
+ #ifdef HAVE_EPOLL
520
+ assert (epfd != -1);
521
+ int s;
522
+
523
+ timeval tv = _TimeTilNextEvent();
524
+
525
+ #ifdef BUILD_FOR_RUBY
526
+ int ret = 0;
527
+ fd_set fdreads;
528
+
529
+ FD_ZERO(&fdreads);
530
+ FD_SET(epfd, &fdreads);
531
+
532
+ if ((ret = rb_thread_select(epfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
533
+ if (ret == -1) {
534
+ assert(errno != EINVAL);
535
+ assert(errno != EBADF);
536
+ }
537
+ return true;
538
+ }
539
+
540
+ TRAP_BEG;
541
+ s = epoll_wait (epfd, epoll_events, MaxEvents, 0);
542
+ TRAP_END;
543
+ #else
544
+ int duration = 0;
545
+ duration = duration + (tv.tv_sec * 1000);
546
+ duration = duration + (tv.tv_usec / 1000);
547
+ s = epoll_wait (epfd, epoll_events, MaxEvents, duration);
548
+ #endif
549
+
550
+ if (s > 0) {
551
+ for (int i=0; i < s; i++) {
552
+ EventableDescriptor *ed = (EventableDescriptor*) epoll_events[i].data.ptr;
553
+
554
+ if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
555
+ continue;
556
+
557
+ assert(ed->GetSocket() != INVALID_SOCKET);
558
+
559
+ if (epoll_events[i].events & EPOLLIN)
560
+ ed->Read();
561
+ if (epoll_events[i].events & EPOLLOUT)
562
+ ed->Write();
563
+ if (epoll_events[i].events & (EPOLLERR | EPOLLHUP))
564
+ ed->HandleError();
565
+ }
566
+ }
567
+ else if (s < 0) {
568
+ // epoll_wait can fail on error in a handful of ways.
569
+ // If this happens, then wait for a little while to avoid busy-looping.
570
+ // If the error was EINTR, we probably caught SIGCHLD or something,
571
+ // so keep the wait short.
572
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
573
+ EmSelect (0, NULL, NULL, NULL, &tv);
574
+ }
575
+
576
+ return true;
577
+ #else
578
+ throw std::runtime_error ("epoll is not implemented on this platform");
579
+ #endif
580
+ }
581
+
582
+
583
+ /******************************
584
+ EventMachine_t::_RunKqueueOnce
585
+ ******************************/
586
+
587
+ bool EventMachine_t::_RunKqueueOnce()
588
+ {
589
+ #ifdef HAVE_KQUEUE
590
+ assert (kqfd != -1);
591
+ int k;
592
+
593
+ timeval tv = _TimeTilNextEvent();
594
+
595
+ struct timespec ts;
596
+ ts.tv_sec = tv.tv_sec;
597
+ ts.tv_nsec = tv.tv_usec * 1000;
598
+
599
+ #ifdef BUILD_FOR_RUBY
600
+ int ret = 0;
601
+ fd_set fdreads;
602
+
603
+ FD_ZERO(&fdreads);
604
+ FD_SET(kqfd, &fdreads);
605
+
606
+ if ((ret = rb_thread_select(kqfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
607
+ if (ret == -1) {
608
+ assert(errno != EINVAL);
609
+ assert(errno != EBADF);
610
+ }
611
+ return true;
612
+ }
613
+
614
+ TRAP_BEG;
615
+ ts.tv_sec = ts.tv_nsec = 0;
616
+ k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
617
+ TRAP_END;
618
+ #else
619
+ k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
620
+ #endif
621
+
622
+ struct kevent *ke = Karray;
623
+ while (k > 0) {
624
+ switch (ke->filter)
625
+ {
626
+ case EVFILT_VNODE:
627
+ _HandleKqueueFileEvent (ke);
628
+ break;
629
+
630
+ case EVFILT_PROC:
631
+ _HandleKqueuePidEvent (ke);
632
+ break;
633
+
634
+ case EVFILT_READ:
635
+ case EVFILT_WRITE:
636
+ EventableDescriptor *ed = (EventableDescriptor*) (ke->udata);
637
+ assert (ed);
638
+
639
+ if (ed->IsWatchOnly() && ed->GetSocket() == INVALID_SOCKET)
640
+ break;
641
+
642
+ if (ke->filter == EVFILT_READ)
643
+ ed->Read();
644
+ else if (ke->filter == EVFILT_WRITE)
645
+ ed->Write();
646
+ else
647
+ cerr << "Discarding unknown kqueue event " << ke->filter << endl;
648
+
649
+ break;
650
+ }
651
+
652
+ --k;
653
+ ++ke;
654
+ }
655
+
656
+ // TODO, replace this with rb_thread_blocking_region for 1.9 builds.
657
+ #ifdef BUILD_FOR_RUBY
658
+ if (!rb_thread_alone()) {
659
+ rb_thread_schedule();
660
+ }
661
+ #endif
662
+
663
+ return true;
664
+ #else
665
+ throw std::runtime_error ("kqueue is not implemented on this platform");
666
+ #endif
667
+ }
668
+
669
+
670
+ /*********************************
671
+ EventMachine_t::_TimeTilNextEvent
672
+ *********************************/
673
+
674
+ timeval EventMachine_t::_TimeTilNextEvent()
675
+ {
676
+ // 29jul11: Changed calculation base from MyCurrentLoopTime to the
677
+ // real time. As MyCurrentLoopTime is set at the beginning of an
678
+ // iteration and this calculation is done at the end, evenmachine
679
+ // will potentially oversleep by the amount of time the iteration
680
+ // took to execute.
681
+ uint64_t next_event = 0;
682
+ uint64_t current_time = GetRealTime();
683
+
684
+ if (!Heartbeats.empty()) {
685
+ multimap<uint64_t,EventableDescriptor*>::iterator heartbeats = Heartbeats.begin();
686
+ next_event = heartbeats->first;
687
+ }
688
+
689
+ if (!Timers.empty()) {
690
+ multimap<uint64_t,Timer_t>::iterator timers = Timers.begin();
691
+ if (next_event == 0 || timers->first < next_event)
692
+ next_event = timers->first;
693
+ }
694
+
695
+ if (!NewDescriptors.empty() || !ModifiedDescriptors.empty()) {
696
+ next_event = current_time;
697
+ }
698
+
699
+ timeval tv;
700
+
701
+ if (next_event == 0 || NumCloseScheduled > 0) {
702
+ tv = Quantum;
703
+ } else {
704
+ if (next_event > current_time) {
705
+ uint64_t duration = next_event - current_time;
706
+ tv.tv_sec = duration / 1000000;
707
+ tv.tv_usec = duration % 1000000;
708
+ } else {
709
+ tv.tv_sec = tv.tv_usec = 0;
710
+ }
711
+ }
712
+
713
+ return tv;
714
+ }
715
+
716
+ /*******************************
717
+ EventMachine_t::_CleanupSockets
718
+ *******************************/
719
+
720
+ void EventMachine_t::_CleanupSockets()
721
+ {
722
+ // TODO, rip this out and only delete the descriptors we know have died,
723
+ // rather than traversing the whole list.
724
+ // Modified 05Jan08 per suggestions by Chris Heath. It's possible that
725
+ // an EventableDescriptor will have a descriptor value of -1. That will
726
+ // happen if EventableDescriptor::Close was called on it. In that case,
727
+ // don't call epoll_ctl to remove the socket's filters from the epoll set.
728
+ // According to the epoll docs, this happens automatically when the
729
+ // descriptor is closed anyway. This is different from the case where
730
+ // the socket has already been closed but the descriptor in the ED object
731
+ // hasn't yet been set to INVALID_SOCKET.
732
+ // In kqueue, closing a descriptor automatically removes its event filters.
733
+ int i, j;
734
+ int nSockets = Descriptors.size();
735
+ for (i=0, j=0; i < nSockets; i++) {
736
+ EventableDescriptor *ed = Descriptors[i];
737
+ assert (ed);
738
+ if (ed->ShouldDelete()) {
739
+ #ifdef HAVE_EPOLL
740
+ if (bEpoll) {
741
+ assert (epfd != -1);
742
+ if (ed->GetSocket() != INVALID_SOCKET) {
743
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
744
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
745
+ if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
746
+ char buf [200];
747
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
748
+ throw std::runtime_error (buf);
749
+ }
750
+ }
751
+ ModifiedDescriptors.erase(ed);
752
+ }
753
+ #endif
754
+ delete ed;
755
+ }
756
+ else
757
+ Descriptors [j++] = ed;
758
+ }
759
+ while ((size_t)j < Descriptors.size())
760
+ Descriptors.pop_back();
761
+ }
762
+
763
+ /*********************************
764
+ EventMachine_t::_ModifyEpollEvent
765
+ *********************************/
766
+
767
+ void EventMachine_t::_ModifyEpollEvent (EventableDescriptor *ed)
768
+ {
769
+ #ifdef HAVE_EPOLL
770
+ if (bEpoll) {
771
+ assert (epfd != -1);
772
+ assert (ed);
773
+ assert (ed->GetSocket() != INVALID_SOCKET);
774
+ int e = epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
775
+ if (e) {
776
+ char buf [200];
777
+ snprintf (buf, sizeof(buf)-1, "unable to modify epoll event: %s", strerror(errno));
778
+ throw std::runtime_error (buf);
779
+ }
780
+ }
781
+ #endif
782
+ }
783
+
784
+
785
+
786
+ /**************************
787
+ SelectData_t::SelectData_t
788
+ **************************/
789
+
790
+ SelectData_t::SelectData_t()
791
+ {
792
+ maxsocket = 0;
793
+ FD_ZERO (&fdreads);
794
+ FD_ZERO (&fdwrites);
795
+ FD_ZERO (&fderrors);
796
+ }
797
+
798
+
799
+ #ifdef BUILD_FOR_RUBY
800
+ /*****************
801
+ _SelectDataSelect
802
+ *****************/
803
+
804
+ #ifdef HAVE_TBR
805
+ static VALUE _SelectDataSelect (void *v)
806
+ {
807
+ SelectData_t *sd = (SelectData_t*)v;
808
+ sd->nSockets = select (sd->maxsocket+1, &(sd->fdreads), &(sd->fdwrites), &(sd->fderrors), &(sd->tv));
809
+ return Qnil;
810
+ }
811
+ #endif
812
+
813
+ /*********************
814
+ SelectData_t::_Select
815
+ *********************/
816
+
817
+ int SelectData_t::_Select()
818
+ {
819
+ #ifdef HAVE_TBR
820
+ rb_thread_blocking_region (_SelectDataSelect, (void*)this, RUBY_UBF_IO, 0);
821
+ return nSockets;
822
+ #endif
823
+
824
+ #ifndef HAVE_TBR
825
+ return EmSelect (maxsocket+1, &fdreads, &fdwrites, &fderrors, &tv);
826
+ #endif
827
+ }
828
+ #endif
829
+
830
+
831
+
832
+ /******************************
833
+ EventMachine_t::_RunSelectOnce
834
+ ******************************/
835
+
836
+ bool EventMachine_t::_RunSelectOnce()
837
+ {
838
+ // Crank the event machine once.
839
+ // If there are no descriptors to process, then sleep
840
+ // for a few hundred mills to avoid busy-looping.
841
+ // Return T/F to indicate whether we should continue.
842
+ // This is based on a select loop. Alternately provide epoll
843
+ // if we know we're running on a 2.6 kernel.
844
+ // epoll will be effective if we provide it as an alternative,
845
+ // however it has the same problem interoperating with Ruby
846
+ // threads that select does.
847
+
848
+ //cerr << "X";
849
+
850
+ /* This protection is now obsolete, because we will ALWAYS
851
+ * have at least one descriptor (the loop-breaker) to read.
852
+ */
853
+ /*
854
+ if (Descriptors.size() == 0) {
855
+ #ifdef OS_UNIX
856
+ timeval tv = {0, 200 * 1000};
857
+ EmSelect (0, NULL, NULL, NULL, &tv);
858
+ return true;
859
+ #endif
860
+ #ifdef OS_WIN32
861
+ Sleep (200);
862
+ return true;
863
+ #endif
864
+ }
865
+ */
866
+
867
+ SelectData_t SelectData;
868
+ /*
869
+ fd_set fdreads, fdwrites;
870
+ FD_ZERO (&fdreads);
871
+ FD_ZERO (&fdwrites);
872
+
873
+ int maxsocket = 0;
874
+ */
875
+
876
+ // Always read the loop-breaker reader.
877
+ // Changed 23Aug06, provisionally implemented for Windows with a UDP socket
878
+ // running on localhost with a randomly-chosen port. (*Puke*)
879
+ // Windows has a version of the Unix pipe() library function, but it doesn't
880
+ // give you back descriptors that are selectable.
881
+ FD_SET (LoopBreakerReader, &(SelectData.fdreads));
882
+ if (SelectData.maxsocket < LoopBreakerReader)
883
+ SelectData.maxsocket = LoopBreakerReader;
884
+
885
+ // prepare the sockets for reading and writing
886
+ size_t i;
887
+ for (i = 0; i < Descriptors.size(); i++) {
888
+ EventableDescriptor *ed = Descriptors[i];
889
+ assert (ed);
890
+ int sd = ed->GetSocket();
891
+ if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
892
+ continue;
893
+ assert (sd != INVALID_SOCKET);
894
+
895
+ if (ed->SelectForRead())
896
+ FD_SET (sd, &(SelectData.fdreads));
897
+ if (ed->SelectForWrite())
898
+ FD_SET (sd, &(SelectData.fdwrites));
899
+
900
+ #ifdef OS_WIN32
901
+ /* 21Sep09: on windows, a non-blocking connect() that fails does not come up as writable.
902
+ Instead, it is added to the error set. See http://www.mail-archive.com/openssl-users@openssl.org/msg58500.html
903
+ */
904
+ FD_SET (sd, &(SelectData.fderrors));
905
+ #endif
906
+
907
+ if (SelectData.maxsocket < sd)
908
+ SelectData.maxsocket = sd;
909
+ }
910
+
911
+
912
+ { // read and write the sockets
913
+ //timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000.
914
+ //timeval tv = Quantum;
915
+ SelectData.tv = _TimeTilNextEvent();
916
+ int s = SelectData._Select();
917
+ //rb_thread_blocking_region(xxx,(void*)&SelectData,RUBY_UBF_IO,0);
918
+ //int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv));
919
+ //int s = SelectData.nSockets;
920
+ if (s > 0) {
921
+ /* Changed 01Jun07. We used to handle the Loop-breaker right here.
922
+ * Now we do it AFTER all the regular descriptors. There's an
923
+ * incredibly important and subtle reason for this. Code on
924
+ * loop breakers is sometimes used to cause the reactor core to
925
+ * cycle (for example, to allow outbound network buffers to drain).
926
+ * If a loop-breaker handler reschedules itself (say, after determining
927
+ * that the write buffers are still too full), then it will execute
928
+ * IMMEDIATELY if _ReadLoopBreaker is done here instead of after
929
+ * the other descriptors are processed. That defeats the whole purpose.
930
+ */
931
+ for (i=0; i < Descriptors.size(); i++) {
932
+ EventableDescriptor *ed = Descriptors[i];
933
+ assert (ed);
934
+ int sd = ed->GetSocket();
935
+ if (ed->IsWatchOnly() && sd == INVALID_SOCKET)
936
+ continue;
937
+ assert (sd != INVALID_SOCKET);
938
+
939
+ if (FD_ISSET (sd, &(SelectData.fdwrites)))
940
+ ed->Write();
941
+ if (FD_ISSET (sd, &(SelectData.fdreads)))
942
+ ed->Read();
943
+ if (FD_ISSET (sd, &(SelectData.fderrors)))
944
+ ed->HandleError();
945
+ }
946
+
947
+ if (FD_ISSET (LoopBreakerReader, &(SelectData.fdreads)))
948
+ _ReadLoopBreaker();
949
+ }
950
+ else if (s < 0) {
951
+ switch (errno) {
952
+ case EBADF:
953
+ _CleanBadDescriptors();
954
+ break;
955
+ case EINVAL:
956
+ throw std::runtime_error ("Somehow EM passed an invalid nfds or invalid timeout to select(2), please report this!");
957
+ break;
958
+ default:
959
+ // select can fail on error in a handful of ways.
960
+ // If this happens, then wait for a little while to avoid busy-looping.
961
+ // If the error was EINTR, we probably caught SIGCHLD or something,
962
+ // so keep the wait short.
963
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
964
+ EmSelect (0, NULL, NULL, NULL, &tv);
965
+ }
966
+ }
967
+ }
968
+
969
+ return true;
970
+ }
971
+
972
+ void EventMachine_t::_CleanBadDescriptors()
973
+ {
974
+ size_t i;
975
+
976
+ for (i = 0; i < Descriptors.size(); i++) {
977
+ EventableDescriptor *ed = Descriptors[i];
978
+ if (ed->ShouldDelete())
979
+ continue;
980
+
981
+ int sd = ed->GetSocket();
982
+
983
+ struct timeval tv;
984
+ tv.tv_sec = 0;
985
+ tv.tv_usec = 0;
986
+
987
+ fd_set fds;
988
+ FD_ZERO(&fds);
989
+ FD_SET(sd, &fds);
990
+
991
+ int ret = select(sd + 1, &fds, NULL, NULL, &tv);
992
+
993
+ if (ret == -1) {
994
+ if (errno == EBADF)
995
+ ed->ScheduleClose(false);
996
+ }
997
+ }
998
+ }
999
+
1000
+ /********************************
1001
+ EventMachine_t::_ReadLoopBreaker
1002
+ ********************************/
1003
+
1004
+ void EventMachine_t::_ReadLoopBreaker()
1005
+ {
1006
+ /* The loop breaker has selected readable.
1007
+ * Read it ONCE (it may block if we try to read it twice)
1008
+ * and send a loop-break event back to user code.
1009
+ */
1010
+ char buffer [1024];
1011
+ read (LoopBreakerReader, buffer, sizeof(buffer));
1012
+ if (EventCallback)
1013
+ (*EventCallback)(0, EM_LOOPBREAK_SIGNAL, "", 0);
1014
+ }
1015
+
1016
+
1017
+ /**************************
1018
+ EventMachine_t::_RunTimers
1019
+ **************************/
1020
+
1021
+ void EventMachine_t::_RunTimers()
1022
+ {
1023
+ // These are caller-defined timer handlers.
1024
+ // We rely on the fact that multimaps sort by their keys to avoid
1025
+ // inspecting the whole list every time we come here.
1026
+ // Just keep inspecting and processing the list head until we hit
1027
+ // one that hasn't expired yet.
1028
+
1029
+ while (true) {
1030
+ multimap<uint64_t,Timer_t>::iterator i = Timers.begin();
1031
+ if (i == Timers.end())
1032
+ break;
1033
+ if (i->first > MyCurrentLoopTime)
1034
+ break;
1035
+ if (EventCallback)
1036
+ (*EventCallback) (0, EM_TIMER_FIRED, NULL, i->second.GetBinding());
1037
+ Timers.erase (i);
1038
+ }
1039
+ }
1040
+
1041
+
1042
+
1043
+ /***********************************
1044
+ EventMachine_t::InstallOneshotTimer
1045
+ ***********************************/
1046
+
1047
+ const unsigned long EventMachine_t::InstallOneshotTimer (int milliseconds)
1048
+ {
1049
+ if (Timers.size() > MaxOutstandingTimers)
1050
+ return false;
1051
+
1052
+ uint64_t fire_at = GetRealTime();
1053
+ fire_at += ((uint64_t)milliseconds) * 1000LL;
1054
+
1055
+ Timer_t t;
1056
+ #ifndef HAVE_MAKE_PAIR
1057
+ multimap<uint64_t,Timer_t>::iterator i = Timers.insert (multimap<uint64_t,Timer_t>::value_type (fire_at, t));
1058
+ #else
1059
+ multimap<uint64_t,Timer_t>::iterator i = Timers.insert (make_pair (fire_at, t));
1060
+ #endif
1061
+ return i->second.GetBinding();
1062
+ }
1063
+
1064
+
1065
+ /*******************************
1066
+ EventMachine_t::ConnectToServer
1067
+ *******************************/
1068
+
1069
+ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int bind_port, const char *server, int port)
1070
+ {
1071
+ /* We want to spend no more than a few seconds waiting for a connection
1072
+ * to a remote host. So we use a nonblocking connect.
1073
+ * Linux disobeys the usual rules for nonblocking connects.
1074
+ * Per Stevens (UNP p.410), you expect a nonblocking connect to select
1075
+ * both readable and writable on error, and not to return EINPROGRESS
1076
+ * if the connect can be fulfilled immediately. Linux violates both
1077
+ * of these expectations.
1078
+ * Any kind of nonblocking connect on Linux returns EINPROGRESS.
1079
+ * The socket will then return writable when the disposition of the
1080
+ * connect is known, but it will not also be readable in case of
1081
+ * error! Weirdly, it will be readable in case there is data to read!!!
1082
+ * (Which can happen with protocols like SSH and SMTP.)
1083
+ * I suppose if you were so inclined you could consider this logical,
1084
+ * but it's not the way Unix has historically done it.
1085
+ * So we ignore the readable flag and read getsockopt to see if there
1086
+ * was an error connecting. A select timeout works as expected.
1087
+ * In regard to getsockopt: Linux does the Berkeley-style thing,
1088
+ * not the Solaris-style, and returns zero with the error code in
1089
+ * the error parameter.
1090
+ * Return the binding-text of the newly-created pending connection,
1091
+ * or NULL if there was a problem.
1092
+ */
1093
+
1094
+ if (!server || !*server || !port)
1095
+ throw std::runtime_error ("invalid server or port");
1096
+
1097
+ int family, bind_size;
1098
+ struct sockaddr bind_as, *bind_as_ptr = name2address (server, port, &family, &bind_size);
1099
+ if (!bind_as_ptr)
1100
+ throw std::runtime_error ("unable to resolve server address");
1101
+ bind_as = *bind_as_ptr; // copy because name2address points to a static
1102
+
1103
+ int sd = socket (family, SOCK_STREAM, 0);
1104
+ if (sd == INVALID_SOCKET) {
1105
+ char buf [200];
1106
+ snprintf (buf, sizeof(buf)-1, "unable to create new socket: %s", strerror(errno));
1107
+ throw std::runtime_error (buf);
1108
+ }
1109
+
1110
+ // From here on, ALL error returns must close the socket.
1111
+ // Set the new socket nonblocking.
1112
+ if (!SetSocketNonblocking (sd)) {
1113
+ close (sd);
1114
+ throw std::runtime_error ("unable to set socket as non-blocking");
1115
+ }
1116
+ // Disable slow-start (Nagle algorithm).
1117
+ int one = 1;
1118
+ setsockopt (sd, IPPROTO_TCP, TCP_NODELAY, (char*) &one, sizeof(one));
1119
+ // Set reuseaddr to improve performance on restarts
1120
+ setsockopt (sd, SOL_SOCKET, SO_REUSEADDR, (char*) &one, sizeof(one));
1121
+
1122
+ if (bind_addr) {
1123
+ int bind_to_size, bind_to_family;
1124
+ struct sockaddr *bind_to = name2address (bind_addr, bind_port, &bind_to_family, &bind_to_size);
1125
+ if (!bind_to) {
1126
+ close (sd);
1127
+ throw std::runtime_error ("invalid bind address");
1128
+ }
1129
+ if (bind (sd, bind_to, bind_to_size) < 0) {
1130
+ close (sd);
1131
+ throw std::runtime_error ("couldn't bind to address");
1132
+ }
1133
+ }
1134
+
1135
+ unsigned long out = 0;
1136
+ int e = 0;
1137
+
1138
+ #ifdef OS_UNIX
1139
+ //if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
1140
+ if (connect (sd, &bind_as, bind_size) == 0) {
1141
+ // This is a connect success, which Linux appears
1142
+ // never to give when the socket is nonblocking,
1143
+ // even if the connection is intramachine or to
1144
+ // localhost.
1145
+
1146
+ /* Changed this branch 08Aug06. Evidently some kernels
1147
+ * (FreeBSD for example) will actually return success from
1148
+ * a nonblocking connect. This is a pretty simple case,
1149
+ * just set up the new connection and clear the pending flag.
1150
+ * Thanks to Chris Ochs for helping track this down.
1151
+ * This branch never gets taken on Linux or (oddly) OSX.
1152
+ * The original behavior was to throw an unimplemented,
1153
+ * which the user saw as a fatal exception. Very unfriendly.
1154
+ *
1155
+ * Tweaked 10Aug06. Even though the connect disposition is
1156
+ * known, we still set the connect-pending flag. That way
1157
+ * some needed initialization will happen in the ConnectionDescriptor.
1158
+ * (To wit, the ConnectionCompleted event gets sent to the client.)
1159
+ */
1160
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1161
+ if (!cd)
1162
+ throw std::runtime_error ("no connection allocated");
1163
+ cd->SetConnectPending (true);
1164
+ Add (cd);
1165
+ out = cd->GetBinding();
1166
+ }
1167
+ else if (errno == EINPROGRESS) {
1168
+ // Errno will generally always be EINPROGRESS, but on Linux
1169
+ // we have to look at getsockopt to be sure what really happened.
1170
+ int error = 0;
1171
+ socklen_t len;
1172
+ len = sizeof(error);
1173
+ int o = getsockopt (sd, SOL_SOCKET, SO_ERROR, &error, &len);
1174
+ if ((o == 0) && (error == 0)) {
1175
+ // Here, there's no disposition.
1176
+ // Put the connection on the stack and wait for it to complete
1177
+ // or time out.
1178
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1179
+ if (!cd)
1180
+ throw std::runtime_error ("no connection allocated");
1181
+ cd->SetConnectPending (true);
1182
+ Add (cd);
1183
+ out = cd->GetBinding();
1184
+ } else {
1185
+ // Fall through to the !out case below.
1186
+ e = error;
1187
+ }
1188
+ }
1189
+ else {
1190
+ // The error from connect was something other then EINPROGRESS (EHOSTDOWN, etc).
1191
+ // Fall through to the !out case below
1192
+ e = errno;
1193
+ }
1194
+
1195
+ if (!out) {
1196
+ /* This could be connection refused or some such thing.
1197
+ * We will come here on Linux if a localhost connection fails.
1198
+ * Changed 16Jul06: Originally this branch was a no-op, and
1199
+ * we'd drop down to the end of the method, close the socket,
1200
+ * and return NULL, which would cause the caller to GET A
1201
+ * FATAL EXCEPTION. Now we keep the socket around but schedule an
1202
+ * immediate close on it, so the caller will get a close-event
1203
+ * scheduled on it. This was only an issue for localhost connections
1204
+ * to non-listening ports. We may eventually need to revise this
1205
+ * revised behavior, in case it causes problems like making it hard
1206
+ * for people to know that a failure occurred.
1207
+ */
1208
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1209
+ if (!cd)
1210
+ throw std::runtime_error ("no connection allocated");
1211
+ cd->SetUnbindReasonCode(e);
1212
+ cd->ScheduleClose (false);
1213
+ Add (cd);
1214
+ out = cd->GetBinding();
1215
+ }
1216
+ #endif
1217
+
1218
+ #ifdef OS_WIN32
1219
+ //if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
1220
+ if (connect (sd, &bind_as, bind_size) == 0) {
1221
+ // This is a connect success, which Windows appears
1222
+ // never to give when the socket is nonblocking,
1223
+ // even if the connection is intramachine or to
1224
+ // localhost.
1225
+ throw std::runtime_error ("unimplemented");
1226
+ }
1227
+ else if (WSAGetLastError() == WSAEWOULDBLOCK) {
1228
+ // Here, there's no disposition.
1229
+ // Windows appears not to surface refused connections or
1230
+ // such stuff at this point.
1231
+ // Put the connection on the stack and wait for it to complete
1232
+ // or time out.
1233
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1234
+ if (!cd)
1235
+ throw std::runtime_error ("no connection allocated");
1236
+ cd->SetConnectPending (true);
1237
+ Add (cd);
1238
+ out = cd->GetBinding();
1239
+ }
1240
+ else {
1241
+ // The error from connect was something other then WSAEWOULDBLOCK.
1242
+ }
1243
+
1244
+ #endif
1245
+
1246
+ if (!out)
1247
+ close (sd);
1248
+ return out;
1249
+ }
1250
+
1251
+ /***********************************
1252
+ EventMachine_t::ConnectToUnixServer
1253
+ ***********************************/
1254
+
1255
+ const unsigned long EventMachine_t::ConnectToUnixServer (const char *server)
1256
+ {
1257
+ /* Connect to a Unix-domain server, which by definition is running
1258
+ * on the same host.
1259
+ * There is no meaningful implementation on Windows.
1260
+ * There's no need to do a nonblocking connect, since the connection
1261
+ * is always local and can always be fulfilled immediately.
1262
+ */
1263
+
1264
+ #ifdef OS_WIN32
1265
+ throw std::runtime_error ("unix-domain connection unavailable on this platform");
1266
+ return 0;
1267
+ #endif
1268
+
1269
+ // The whole rest of this function is only compiled on Unix systems.
1270
+ #ifdef OS_UNIX
1271
+
1272
+ unsigned long out = 0;
1273
+
1274
+ if (!server || !*server)
1275
+ return 0;
1276
+
1277
+ sockaddr_un pun;
1278
+ memset (&pun, 0, sizeof(pun));
1279
+ pun.sun_family = AF_LOCAL;
1280
+
1281
+ // You ordinarily expect the server name field to be at least 1024 bytes long,
1282
+ // but on Linux it can be MUCH shorter.
1283
+ if (strlen(server) >= sizeof(pun.sun_path))
1284
+ throw std::runtime_error ("unix-domain server name is too long");
1285
+
1286
+
1287
+ strcpy (pun.sun_path, server);
1288
+
1289
+ int fd = socket (AF_LOCAL, SOCK_STREAM, 0);
1290
+ if (fd == INVALID_SOCKET)
1291
+ return 0;
1292
+
1293
+ // From here on, ALL error returns must close the socket.
1294
+ // NOTE: At this point, the socket is still a blocking socket.
1295
+ if (connect (fd, (struct sockaddr*)&pun, sizeof(pun)) != 0) {
1296
+ close (fd);
1297
+ return 0;
1298
+ }
1299
+
1300
+ // Set the newly-connected socket nonblocking.
1301
+ if (!SetSocketNonblocking (fd)) {
1302
+ close (fd);
1303
+ return 0;
1304
+ }
1305
+
1306
+ // Set up a connection descriptor and add it to the event-machine.
1307
+ // Observe, even though we know the connection status is connect-success,
1308
+ // we still set the "pending" flag, so some needed initializations take
1309
+ // place.
1310
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1311
+ if (!cd)
1312
+ throw std::runtime_error ("no connection allocated");
1313
+ cd->SetConnectPending (true);
1314
+ Add (cd);
1315
+ out = cd->GetBinding();
1316
+
1317
+ if (!out)
1318
+ close (fd);
1319
+
1320
+ return out;
1321
+ #endif
1322
+ }
1323
+
1324
+ /************************
1325
+ EventMachine_t::AttachFD
1326
+ ************************/
1327
+
1328
+ const unsigned long EventMachine_t::AttachFD (int fd, bool watch_mode)
1329
+ {
1330
+ #ifdef OS_UNIX
1331
+ if (fcntl(fd, F_GETFL, 0) < 0)
1332
+ throw std::runtime_error ("invalid file descriptor");
1333
+ #endif
1334
+
1335
+ #ifdef OS_WIN32
1336
+ // TODO: add better check for invalid file descriptors (see ioctlsocket or getsockopt)
1337
+ if (fd == INVALID_SOCKET)
1338
+ throw std::runtime_error ("invalid file descriptor");
1339
+ #endif
1340
+
1341
+ {// Check for duplicate descriptors
1342
+ size_t i;
1343
+ for (i = 0; i < Descriptors.size(); i++) {
1344
+ EventableDescriptor *ed = Descriptors[i];
1345
+ assert (ed);
1346
+ if (ed->GetSocket() == fd)
1347
+ throw std::runtime_error ("adding existing descriptor");
1348
+ }
1349
+
1350
+ for (i = 0; i < NewDescriptors.size(); i++) {
1351
+ EventableDescriptor *ed = NewDescriptors[i];
1352
+ assert (ed);
1353
+ if (ed->GetSocket() == fd)
1354
+ throw std::runtime_error ("adding existing new descriptor");
1355
+ }
1356
+ }
1357
+
1358
+ if (!watch_mode)
1359
+ SetSocketNonblocking(fd);
1360
+
1361
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1362
+ if (!cd)
1363
+ throw std::runtime_error ("no connection allocated");
1364
+
1365
+ cd->SetAttached(true);
1366
+ cd->SetWatchOnly(watch_mode);
1367
+ cd->SetConnectPending (false);
1368
+
1369
+ Add (cd);
1370
+
1371
+ const unsigned long out = cd->GetBinding();
1372
+ return out;
1373
+ }
1374
+
1375
+ /************************
1376
+ EventMachine_t::DetachFD
1377
+ ************************/
1378
+
1379
+ int EventMachine_t::DetachFD (EventableDescriptor *ed)
1380
+ {
1381
+ if (!ed)
1382
+ throw std::runtime_error ("detaching bad descriptor");
1383
+
1384
+ int fd = ed->GetSocket();
1385
+
1386
+ #ifdef HAVE_EPOLL
1387
+ if (bEpoll) {
1388
+ if (ed->GetSocket() != INVALID_SOCKET) {
1389
+ assert (epfd != -1);
1390
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
1391
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
1392
+ if (e && (errno != ENOENT) && (errno != EBADF)) {
1393
+ char buf [200];
1394
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
1395
+ throw std::runtime_error (buf);
1396
+ }
1397
+ }
1398
+ }
1399
+ #endif
1400
+
1401
+ #ifdef HAVE_KQUEUE
1402
+ if (bKqueue) {
1403
+ // remove any read/write events for this fd
1404
+ struct kevent k;
1405
+ #ifdef __NetBSD__
1406
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ | EVFILT_WRITE, EV_DELETE, 0, 0, (intptr_t)ed);
1407
+ #else
1408
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ | EVFILT_WRITE, EV_DELETE, 0, 0, ed);
1409
+ #endif
1410
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1411
+ if (t < 0 && (errno != ENOENT) && (errno != EBADF)) {
1412
+ char buf [200];
1413
+ snprintf (buf, sizeof(buf)-1, "unable to delete kqueue event: %s", strerror(errno));
1414
+ throw std::runtime_error (buf);
1415
+ }
1416
+ }
1417
+ #endif
1418
+
1419
+ // Prevent the descriptor from being modified, in case DetachFD was called from a timer or next_tick
1420
+ ModifiedDescriptors.erase (ed);
1421
+
1422
+ // Set MySocket = INVALID_SOCKET so ShouldDelete() is true (and the descriptor gets deleted and removed),
1423
+ // and also to prevent anyone from calling close() on the detached fd
1424
+ ed->SetSocketInvalid();
1425
+
1426
+ return fd;
1427
+ }
1428
+
1429
+ /************
1430
+ name2address
1431
+ ************/
1432
+
1433
+ struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size)
1434
+ {
1435
+ // THIS IS NOT RE-ENTRANT OR THREADSAFE. Optimize for speed.
1436
+ // Check the more-common cases first.
1437
+ // Return NULL if no resolution.
1438
+
1439
+ static struct sockaddr_in in4;
1440
+ #ifndef __CYGWIN__
1441
+ static struct sockaddr_in6 in6;
1442
+ #endif
1443
+ struct hostent *hp;
1444
+
1445
+ if (!server || !*server)
1446
+ server = "0.0.0.0";
1447
+
1448
+ memset (&in4, 0, sizeof(in4));
1449
+ if ( (in4.sin_addr.s_addr = inet_addr (server)) != INADDR_NONE) {
1450
+ if (family)
1451
+ *family = AF_INET;
1452
+ if (bind_size)
1453
+ *bind_size = sizeof(in4);
1454
+ in4.sin_family = AF_INET;
1455
+ in4.sin_port = htons (port);
1456
+ return (struct sockaddr*)&in4;
1457
+ }
1458
+
1459
+ #if defined(OS_UNIX) && !defined(__CYGWIN__)
1460
+ memset (&in6, 0, sizeof(in6));
1461
+ if (inet_pton (AF_INET6, server, in6.sin6_addr.s6_addr) > 0) {
1462
+ if (family)
1463
+ *family = AF_INET6;
1464
+ if (bind_size)
1465
+ *bind_size = sizeof(in6);
1466
+ in6.sin6_family = AF_INET6;
1467
+ in6.sin6_port = htons (port);
1468
+ return (struct sockaddr*)&in6;
1469
+ }
1470
+ #endif
1471
+
1472
+ #ifdef OS_WIN32
1473
+ // TODO, must complete this branch. Windows doesn't have inet_pton.
1474
+ // A possible approach is to make a getaddrinfo call with the supplied
1475
+ // server address, constraining the hints to ipv6 and seeing if we
1476
+ // get any addresses.
1477
+ // For the time being, Ipv6 addresses aren't supported on Windows.
1478
+ #endif
1479
+
1480
+ hp = gethostbyname ((char*)server); // Windows requires the cast.
1481
+ if (hp) {
1482
+ in4.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
1483
+ if (family)
1484
+ *family = AF_INET;
1485
+ if (bind_size)
1486
+ *bind_size = sizeof(in4);
1487
+ in4.sin_family = AF_INET;
1488
+ in4.sin_port = htons (port);
1489
+ return (struct sockaddr*)&in4;
1490
+ }
1491
+
1492
+ return NULL;
1493
+ }
1494
+
1495
+
1496
+ /*******************************
1497
+ EventMachine_t::CreateTcpServer
1498
+ *******************************/
1499
+
1500
+ const unsigned long EventMachine_t::CreateTcpServer (const char *server, int port)
1501
+ {
1502
+ /* Create a TCP-acceptor (server) socket and add it to the event machine.
1503
+ * Return the binding of the new acceptor to the caller.
1504
+ * This binding will be referenced when the new acceptor sends events
1505
+ * to indicate accepted connections.
1506
+ */
1507
+
1508
+
1509
+ int family, bind_size;
1510
+ struct sockaddr *bind_here = name2address (server, port, &family, &bind_size);
1511
+ if (!bind_here)
1512
+ return 0;
1513
+
1514
+ unsigned long output_binding = 0;
1515
+
1516
+ //struct sockaddr_in sin;
1517
+
1518
+ int sd_accept = socket (family, SOCK_STREAM, 0);
1519
+ if (sd_accept == INVALID_SOCKET) {
1520
+ goto fail;
1521
+ }
1522
+
1523
+ { // set reuseaddr to improve performance on restarts.
1524
+ int oval = 1;
1525
+ if (setsockopt (sd_accept, SOL_SOCKET, SO_REUSEADDR, (char*)&oval, sizeof(oval)) < 0) {
1526
+ //__warning ("setsockopt failed while creating listener","");
1527
+ goto fail;
1528
+ }
1529
+ }
1530
+
1531
+ { // set CLOEXEC. Only makes sense on Unix
1532
+ #ifdef OS_UNIX
1533
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1534
+ assert (cloexec >= 0);
1535
+ cloexec |= FD_CLOEXEC;
1536
+ fcntl (sd_accept, F_SETFD, cloexec);
1537
+ #endif
1538
+ }
1539
+
1540
+
1541
+ //if (bind (sd_accept, (struct sockaddr*)&sin, sizeof(sin))) {
1542
+ if (bind (sd_accept, bind_here, bind_size)) {
1543
+ //__warning ("binding failed");
1544
+ goto fail;
1545
+ }
1546
+
1547
+ if (listen (sd_accept, 100)) {
1548
+ //__warning ("listen failed");
1549
+ goto fail;
1550
+ }
1551
+
1552
+ {
1553
+ // Set the acceptor non-blocking.
1554
+ // THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
1555
+ if (!SetSocketNonblocking (sd_accept)) {
1556
+ //int val = fcntl (sd_accept, F_GETFL, 0);
1557
+ //if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
1558
+ goto fail;
1559
+ }
1560
+ }
1561
+
1562
+ { // Looking good.
1563
+ AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
1564
+ if (!ad)
1565
+ throw std::runtime_error ("unable to allocate acceptor");
1566
+ Add (ad);
1567
+ output_binding = ad->GetBinding();
1568
+ }
1569
+
1570
+ return output_binding;
1571
+
1572
+ fail:
1573
+ if (sd_accept != INVALID_SOCKET)
1574
+ close (sd_accept);
1575
+ return 0;
1576
+ }
1577
+
1578
+
1579
+ /**********************************
1580
+ EventMachine_t::OpenDatagramSocket
1581
+ **********************************/
1582
+
1583
+ const unsigned long EventMachine_t::OpenDatagramSocket (const char *address, int port)
1584
+ {
1585
+ unsigned long output_binding = 0;
1586
+
1587
+ int sd = socket (AF_INET, SOCK_DGRAM, 0);
1588
+ if (sd == INVALID_SOCKET)
1589
+ goto fail;
1590
+ // from here on, early returns must close the socket!
1591
+
1592
+
1593
+ struct sockaddr_in sin;
1594
+ memset (&sin, 0, sizeof(sin));
1595
+ sin.sin_family = AF_INET;
1596
+ sin.sin_port = htons (port);
1597
+
1598
+
1599
+ if (address && *address) {
1600
+ sin.sin_addr.s_addr = inet_addr (address);
1601
+ if (sin.sin_addr.s_addr == INADDR_NONE) {
1602
+ hostent *hp = gethostbyname ((char*)address); // Windows requires the cast.
1603
+ if (hp == NULL)
1604
+ goto fail;
1605
+ sin.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
1606
+ }
1607
+ }
1608
+ else
1609
+ sin.sin_addr.s_addr = htonl (INADDR_ANY);
1610
+
1611
+
1612
+ // Set the new socket nonblocking.
1613
+ {
1614
+ if (!SetSocketNonblocking (sd))
1615
+ //int val = fcntl (sd, F_GETFL, 0);
1616
+ //if (fcntl (sd, F_SETFL, val | O_NONBLOCK) == -1)
1617
+ goto fail;
1618
+ }
1619
+
1620
+ if (bind (sd, (struct sockaddr*)&sin, sizeof(sin)) != 0)
1621
+ goto fail;
1622
+
1623
+ { // Looking good.
1624
+ DatagramDescriptor *ds = new DatagramDescriptor (sd, this);
1625
+ if (!ds)
1626
+ throw std::runtime_error ("unable to allocate datagram-socket");
1627
+ Add (ds);
1628
+ output_binding = ds->GetBinding();
1629
+ }
1630
+
1631
+ return output_binding;
1632
+
1633
+ fail:
1634
+ if (sd != INVALID_SOCKET)
1635
+ close (sd);
1636
+ return 0;
1637
+ }
1638
+
1639
+
1640
+
1641
+ /*******************
1642
+ EventMachine_t::Add
1643
+ *******************/
1644
+
1645
+ void EventMachine_t::Add (EventableDescriptor *ed)
1646
+ {
1647
+ if (!ed)
1648
+ throw std::runtime_error ("added bad descriptor");
1649
+ ed->SetEventCallback (EventCallback);
1650
+ NewDescriptors.push_back (ed);
1651
+ }
1652
+
1653
+
1654
+ /*******************************
1655
+ EventMachine_t::ArmKqueueWriter
1656
+ *******************************/
1657
+
1658
+ void EventMachine_t::ArmKqueueWriter (EventableDescriptor *ed)
1659
+ {
1660
+ #ifdef HAVE_KQUEUE
1661
+ if (bKqueue) {
1662
+ if (!ed)
1663
+ throw std::runtime_error ("added bad descriptor");
1664
+ struct kevent k;
1665
+ #ifdef __NetBSD__
1666
+ EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, (intptr_t)ed);
1667
+ #else
1668
+ EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, ed);
1669
+ #endif
1670
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1671
+ if (t < 0) {
1672
+ char buf [200];
1673
+ snprintf (buf, sizeof(buf)-1, "arm kqueue writer failed on %d: %s", ed->GetSocket(), strerror(errno));
1674
+ throw std::runtime_error (buf);
1675
+ }
1676
+ }
1677
+ #endif
1678
+ }
1679
+
1680
+ /*******************************
1681
+ EventMachine_t::ArmKqueueReader
1682
+ *******************************/
1683
+
1684
+ void EventMachine_t::ArmKqueueReader (EventableDescriptor *ed)
1685
+ {
1686
+ #ifdef HAVE_KQUEUE
1687
+ if (bKqueue) {
1688
+ if (!ed)
1689
+ throw std::runtime_error ("added bad descriptor");
1690
+ struct kevent k;
1691
+ #ifdef __NetBSD__
1692
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, (intptr_t)ed);
1693
+ #else
1694
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1695
+ #endif
1696
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1697
+ if (t < 0) {
1698
+ char buf [200];
1699
+ snprintf (buf, sizeof(buf)-1, "arm kqueue reader failed on %d: %s", ed->GetSocket(), strerror(errno));
1700
+ throw std::runtime_error (buf);
1701
+ }
1702
+ }
1703
+ #endif
1704
+ }
1705
+
1706
+ /**********************************
1707
+ EventMachine_t::_AddNewDescriptors
1708
+ **********************************/
1709
+
1710
+ void EventMachine_t::_AddNewDescriptors()
1711
+ {
1712
+ /* Avoid adding descriptors to the main descriptor list
1713
+ * while we're actually traversing the list.
1714
+ * Any descriptors that are added as a result of processing timers
1715
+ * or acceptors should go on a temporary queue and then added
1716
+ * while we're not traversing the main list.
1717
+ * Also, it (rarely) happens that a newly-created descriptor
1718
+ * is immediately scheduled to close. It might be a good
1719
+ * idea not to bother scheduling these for I/O but if
1720
+ * we do that, we might bypass some important processing.
1721
+ */
1722
+
1723
+ for (size_t i = 0; i < NewDescriptors.size(); i++) {
1724
+ EventableDescriptor *ed = NewDescriptors[i];
1725
+ if (ed == NULL)
1726
+ throw std::runtime_error ("adding bad descriptor");
1727
+
1728
+ #if HAVE_EPOLL
1729
+ if (bEpoll) {
1730
+ assert (epfd != -1);
1731
+ int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent());
1732
+ if (e) {
1733
+ char buf [200];
1734
+ snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno));
1735
+ throw std::runtime_error (buf);
1736
+ }
1737
+ }
1738
+ #endif
1739
+
1740
+ #if HAVE_KQUEUE
1741
+ /*
1742
+ if (bKqueue) {
1743
+ // INCOMPLETE. Some descriptors don't want to be readable.
1744
+ assert (kqfd != -1);
1745
+ struct kevent k;
1746
+ #ifdef __NetBSD__
1747
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, (intptr_t)ed);
1748
+ #else
1749
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1750
+ #endif
1751
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1752
+ assert (t == 0);
1753
+ }
1754
+ */
1755
+ #endif
1756
+
1757
+ QueueHeartbeat(ed);
1758
+ Descriptors.push_back (ed);
1759
+ }
1760
+ NewDescriptors.clear();
1761
+ }
1762
+
1763
+
1764
+ /**********************************
1765
+ EventMachine_t::_ModifyDescriptors
1766
+ **********************************/
1767
+
1768
+ void EventMachine_t::_ModifyDescriptors()
1769
+ {
1770
+ /* For implementations which don't level check every descriptor on
1771
+ * every pass through the machine, as select does.
1772
+ * If we're not selecting, then descriptors need a way to signal to the
1773
+ * machine that their readable or writable status has changed.
1774
+ * That's what the ::Modify call is for. We do it this way to avoid
1775
+ * modifying descriptors during the loop traversal, where it can easily
1776
+ * happen that an object (like a UDP socket) gets data written on it by
1777
+ * the application during #post_init. That would take place BEFORE the
1778
+ * descriptor even gets added to the epoll descriptor, so the modify
1779
+ * operation will crash messily.
1780
+ * Another really messy possibility is for a descriptor to put itself
1781
+ * on the Modified list, and then get deleted before we get here.
1782
+ * Remember, deletes happen after the I/O traversal and before the
1783
+ * next pass through here. So we have to make sure when we delete a
1784
+ * descriptor to remove it from the Modified list.
1785
+ */
1786
+
1787
+ #ifdef HAVE_EPOLL
1788
+ if (bEpoll) {
1789
+ set<EventableDescriptor*>::iterator i = ModifiedDescriptors.begin();
1790
+ while (i != ModifiedDescriptors.end()) {
1791
+ assert (*i);
1792
+ _ModifyEpollEvent (*i);
1793
+ ++i;
1794
+ }
1795
+ }
1796
+ #endif
1797
+
1798
+ ModifiedDescriptors.clear();
1799
+ }
1800
+
1801
+
1802
+ /**********************
1803
+ EventMachine_t::Modify
1804
+ **********************/
1805
+
1806
+ void EventMachine_t::Modify (EventableDescriptor *ed)
1807
+ {
1808
+ if (!ed)
1809
+ throw std::runtime_error ("modified bad descriptor");
1810
+ ModifiedDescriptors.insert (ed);
1811
+ }
1812
+
1813
+
1814
+ /***********************
1815
+ EventMachine_t::Deregister
1816
+ ***********************/
1817
+
1818
+ void EventMachine_t::Deregister (EventableDescriptor *ed)
1819
+ {
1820
+ if (!ed)
1821
+ throw std::runtime_error ("modified bad descriptor");
1822
+ #ifdef HAVE_EPOLL
1823
+ // cut/paste from _CleanupSockets(). The error handling could be
1824
+ // refactored out of there, but it is cut/paste all over the
1825
+ // file already.
1826
+ if (bEpoll) {
1827
+ assert (epfd != -1);
1828
+ assert (ed->GetSocket() != INVALID_SOCKET);
1829
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
1830
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
1831
+ if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
1832
+ char buf [200];
1833
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
1834
+ throw std::runtime_error (buf);
1835
+ }
1836
+ ModifiedDescriptors.erase(ed);
1837
+ }
1838
+ #endif
1839
+ }
1840
+
1841
+
1842
+ /**************************************
1843
+ EventMachine_t::CreateUnixDomainServer
1844
+ **************************************/
1845
+
1846
+ const unsigned long EventMachine_t::CreateUnixDomainServer (const char *filename)
1847
+ {
1848
+ /* Create a UNIX-domain acceptor (server) socket and add it to the event machine.
1849
+ * Return the binding of the new acceptor to the caller.
1850
+ * This binding will be referenced when the new acceptor sends events
1851
+ * to indicate accepted connections.
1852
+ * THERE IS NO MEANINGFUL IMPLEMENTATION ON WINDOWS.
1853
+ */
1854
+
1855
+ #ifdef OS_WIN32
1856
+ throw std::runtime_error ("unix-domain server unavailable on this platform");
1857
+ #endif
1858
+
1859
+ // The whole rest of this function is only compiled on Unix systems.
1860
+ #ifdef OS_UNIX
1861
+ unsigned long output_binding = 0;
1862
+
1863
+ struct sockaddr_un s_sun;
1864
+
1865
+ int sd_accept = socket (AF_LOCAL, SOCK_STREAM, 0);
1866
+ if (sd_accept == INVALID_SOCKET) {
1867
+ goto fail;
1868
+ }
1869
+
1870
+ if (!filename || !*filename)
1871
+ goto fail;
1872
+ unlink (filename);
1873
+
1874
+ bzero (&s_sun, sizeof(s_sun));
1875
+ s_sun.sun_family = AF_LOCAL;
1876
+ strncpy (s_sun.sun_path, filename, sizeof(s_sun.sun_path)-1);
1877
+
1878
+ // don't bother with reuseaddr for a local socket.
1879
+
1880
+ { // set CLOEXEC. Only makes sense on Unix
1881
+ #ifdef OS_UNIX
1882
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1883
+ assert (cloexec >= 0);
1884
+ cloexec |= FD_CLOEXEC;
1885
+ fcntl (sd_accept, F_SETFD, cloexec);
1886
+ #endif
1887
+ }
1888
+
1889
+ if (bind (sd_accept, (struct sockaddr*)&s_sun, sizeof(s_sun))) {
1890
+ //__warning ("binding failed");
1891
+ goto fail;
1892
+ }
1893
+
1894
+ if (listen (sd_accept, 100)) {
1895
+ //__warning ("listen failed");
1896
+ goto fail;
1897
+ }
1898
+
1899
+ {
1900
+ // Set the acceptor non-blocking.
1901
+ // THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
1902
+ if (!SetSocketNonblocking (sd_accept)) {
1903
+ //int val = fcntl (sd_accept, F_GETFL, 0);
1904
+ //if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
1905
+ goto fail;
1906
+ }
1907
+ }
1908
+
1909
+ { // Looking good.
1910
+ AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
1911
+ if (!ad)
1912
+ throw std::runtime_error ("unable to allocate acceptor");
1913
+ Add (ad);
1914
+ output_binding = ad->GetBinding();
1915
+ }
1916
+
1917
+ return output_binding;
1918
+
1919
+ fail:
1920
+ if (sd_accept != INVALID_SOCKET)
1921
+ close (sd_accept);
1922
+ return 0;
1923
+ #endif // OS_UNIX
1924
+ }
1925
+
1926
+
1927
+ /*********************
1928
+ EventMachine_t::Popen
1929
+ *********************/
1930
+ #if OBSOLETE
1931
+ const char *EventMachine_t::Popen (const char *cmd, const char *mode)
1932
+ {
1933
+ #ifdef OS_WIN32
1934
+ throw std::runtime_error ("popen is currently unavailable on this platform");
1935
+ #endif
1936
+
1937
+ // The whole rest of this function is only compiled on Unix systems.
1938
+ // Eventually we need this functionality (or a full-duplex equivalent) on Windows.
1939
+ #ifdef OS_UNIX
1940
+ const char *output_binding = NULL;
1941
+
1942
+ FILE *fp = popen (cmd, mode);
1943
+ if (!fp)
1944
+ return NULL;
1945
+
1946
+ // From here, all early returns must pclose the stream.
1947
+
1948
+ // According to the pipe(2) manpage, descriptors returned from pipe have both
1949
+ // CLOEXEC and NONBLOCK clear. Do NOT set CLOEXEC. DO set nonblocking.
1950
+ if (!SetSocketNonblocking (fileno (fp))) {
1951
+ pclose (fp);
1952
+ return NULL;
1953
+ }
1954
+
1955
+ { // Looking good.
1956
+ PipeDescriptor *pd = new PipeDescriptor (fp, this);
1957
+ if (!pd)
1958
+ throw std::runtime_error ("unable to allocate pipe");
1959
+ Add (pd);
1960
+ output_binding = pd->GetBinding();
1961
+ }
1962
+
1963
+ return output_binding;
1964
+ #endif
1965
+ }
1966
+ #endif // OBSOLETE
1967
+
1968
+ /**************************
1969
+ EventMachine_t::Socketpair
1970
+ **************************/
1971
+
1972
+ const unsigned long EventMachine_t::Socketpair (char * const*cmd_strings)
1973
+ {
1974
+ #ifdef OS_WIN32
1975
+ throw std::runtime_error ("socketpair is currently unavailable on this platform");
1976
+ #endif
1977
+
1978
+ // The whole rest of this function is only compiled on Unix systems.
1979
+ // Eventually we need this functionality (or a full-duplex equivalent) on Windows.
1980
+ #ifdef OS_UNIX
1981
+ // Make sure the incoming array of command strings is sane.
1982
+ if (!cmd_strings)
1983
+ return 0;
1984
+ int j;
1985
+ for (j=0; j < 2048 && cmd_strings[j]; j++)
1986
+ ;
1987
+ if ((j==0) || (j==2048))
1988
+ return 0;
1989
+
1990
+ unsigned long output_binding = 0;
1991
+
1992
+ int sv[2];
1993
+ if (socketpair (AF_LOCAL, SOCK_STREAM, 0, sv) < 0)
1994
+ return 0;
1995
+ // from here, all early returns must close the pair of sockets.
1996
+
1997
+ // Set the parent side of the socketpair nonblocking.
1998
+ // We don't care about the child side, and most child processes will expect their
1999
+ // stdout to be blocking. Thanks to Duane Johnson and Bill Kelly for pointing this out.
2000
+ // Obviously DON'T set CLOEXEC.
2001
+ if (!SetSocketNonblocking (sv[0])) {
2002
+ close (sv[0]);
2003
+ close (sv[1]);
2004
+ return 0;
2005
+ }
2006
+
2007
+ pid_t f = fork();
2008
+ if (f > 0) {
2009
+ close (sv[1]);
2010
+ PipeDescriptor *pd = new PipeDescriptor (sv[0], f, this);
2011
+ if (!pd)
2012
+ throw std::runtime_error ("unable to allocate pipe");
2013
+ Add (pd);
2014
+ output_binding = pd->GetBinding();
2015
+ }
2016
+ else if (f == 0) {
2017
+ close (sv[0]);
2018
+ dup2 (sv[1], STDIN_FILENO);
2019
+ close (sv[1]);
2020
+ dup2 (STDIN_FILENO, STDOUT_FILENO);
2021
+ execvp (cmd_strings[0], cmd_strings+1);
2022
+ exit (-1); // end the child process if the exec doesn't work.
2023
+ }
2024
+ else
2025
+ throw std::runtime_error ("no fork");
2026
+
2027
+ return output_binding;
2028
+ #endif
2029
+ }
2030
+
2031
+
2032
+ /****************************
2033
+ EventMachine_t::OpenKeyboard
2034
+ ****************************/
2035
+
2036
+ const unsigned long EventMachine_t::OpenKeyboard()
2037
+ {
2038
+ KeyboardDescriptor *kd = new KeyboardDescriptor (this);
2039
+ if (!kd)
2040
+ throw std::runtime_error ("no keyboard-object allocated");
2041
+ Add (kd);
2042
+ return kd->GetBinding();
2043
+ }
2044
+
2045
+
2046
+ /**********************************
2047
+ EventMachine_t::GetConnectionCount
2048
+ **********************************/
2049
+
2050
+ int EventMachine_t::GetConnectionCount ()
2051
+ {
2052
+ return Descriptors.size() + NewDescriptors.size();
2053
+ }
2054
+
2055
+
2056
+ /************************
2057
+ EventMachine_t::WatchPid
2058
+ ************************/
2059
+
2060
+ const unsigned long EventMachine_t::WatchPid (int pid)
2061
+ {
2062
+ #ifdef HAVE_KQUEUE
2063
+ if (!bKqueue)
2064
+ throw std::runtime_error("must enable kqueue (EM.kqueue=true) for pid watching support");
2065
+
2066
+ struct kevent event;
2067
+ int kqres;
2068
+
2069
+ EV_SET(&event, pid, EVFILT_PROC, EV_ADD, NOTE_EXIT | NOTE_FORK, 0, 0);
2070
+
2071
+ // Attempt to register the event
2072
+ kqres = kevent(kqfd, &event, 1, NULL, 0, NULL);
2073
+ if (kqres == -1) {
2074
+ char errbuf[200];
2075
+ sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
2076
+ throw std::runtime_error(errbuf);
2077
+ }
2078
+ #endif
2079
+
2080
+ #ifdef HAVE_KQUEUE
2081
+ Bindable_t* b = new Bindable_t();
2082
+ Pids.insert(make_pair (pid, b));
2083
+
2084
+ return b->GetBinding();
2085
+ #endif
2086
+
2087
+ throw std::runtime_error("no pid watching support on this system");
2088
+ }
2089
+
2090
+ /**************************
2091
+ EventMachine_t::UnwatchPid
2092
+ **************************/
2093
+
2094
+ void EventMachine_t::UnwatchPid (int pid)
2095
+ {
2096
+ Bindable_t *b = Pids[pid];
2097
+ assert(b);
2098
+ Pids.erase(pid);
2099
+
2100
+ #ifdef HAVE_KQUEUE
2101
+ struct kevent k;
2102
+
2103
+ EV_SET(&k, pid, EVFILT_PROC, EV_DELETE, 0, 0, 0);
2104
+ /*int t =*/ kevent (kqfd, &k, 1, NULL, 0, NULL);
2105
+ // t==-1 if the process already exited; ignore this for now
2106
+ #endif
2107
+
2108
+ if (EventCallback)
2109
+ (*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
2110
+
2111
+ delete b;
2112
+ }
2113
+
2114
+ void EventMachine_t::UnwatchPid (const unsigned long sig)
2115
+ {
2116
+ for(map<int, Bindable_t*>::iterator i=Pids.begin(); i != Pids.end(); i++)
2117
+ {
2118
+ if (i->second->GetBinding() == sig) {
2119
+ UnwatchPid (i->first);
2120
+ return;
2121
+ }
2122
+ }
2123
+
2124
+ throw std::runtime_error("attempted to remove invalid pid signature");
2125
+ }
2126
+
2127
+
2128
+ /*************************
2129
+ EventMachine_t::WatchFile
2130
+ *************************/
2131
+
2132
+ const unsigned long EventMachine_t::WatchFile (const char *fpath)
2133
+ {
2134
+ struct stat sb;
2135
+ int sres;
2136
+ int wd = -1;
2137
+
2138
+ sres = stat(fpath, &sb);
2139
+
2140
+ if (sres == -1) {
2141
+ char errbuf[300];
2142
+ sprintf(errbuf, "error registering file %s for watching: %s", fpath, strerror(errno));
2143
+ throw std::runtime_error(errbuf);
2144
+ }
2145
+
2146
+ #ifdef HAVE_INOTIFY
2147
+ if (!inotify) {
2148
+ inotify = new InotifyDescriptor(this);
2149
+ assert (inotify);
2150
+ Add(inotify);
2151
+ }
2152
+
2153
+ wd = inotify_add_watch(inotify->GetSocket(), fpath,
2154
+ IN_MODIFY | IN_DELETE_SELF | IN_MOVE_SELF | IN_CREATE | IN_DELETE | IN_MOVE) ;
2155
+ if (wd == -1) {
2156
+ char errbuf[300];
2157
+ sprintf(errbuf, "failed to open file %s for registering with inotify: %s", fpath, strerror(errno));
2158
+ throw std::runtime_error(errbuf);
2159
+ }
2160
+ #endif
2161
+
2162
+ #ifdef HAVE_KQUEUE
2163
+ if (!bKqueue)
2164
+ throw std::runtime_error("must enable kqueue (EM.kqueue=true) for file watching support");
2165
+
2166
+ // With kqueue we have to open the file first and use the resulting fd to register for events
2167
+ wd = open(fpath, O_RDONLY);
2168
+ if (wd == -1) {
2169
+ char errbuf[300];
2170
+ sprintf(errbuf, "failed to open file %s for registering with kqueue: %s", fpath, strerror(errno));
2171
+ throw std::runtime_error(errbuf);
2172
+ }
2173
+ _RegisterKqueueFileEvent(wd);
2174
+ #endif
2175
+
2176
+ if (wd != -1) {
2177
+ Bindable_t* b = new Bindable_t();
2178
+ Files.insert(make_pair (wd, b));
2179
+
2180
+ return b->GetBinding();
2181
+ }
2182
+
2183
+ throw std::runtime_error("no file watching support on this system"); // is this the right thing to do?
2184
+ }
2185
+
2186
+
2187
+ /***************************
2188
+ EventMachine_t::UnwatchFile
2189
+ ***************************/
2190
+
2191
+ void EventMachine_t::UnwatchFile (int wd)
2192
+ {
2193
+ Bindable_t *b = Files[wd];
2194
+ assert(b);
2195
+ Files.erase(wd);
2196
+
2197
+ #ifdef HAVE_INOTIFY
2198
+ inotify_rm_watch(inotify->GetSocket(), wd);
2199
+ #elif HAVE_KQUEUE
2200
+ // With kqueue, closing the monitored fd automatically clears all registered events for it
2201
+ close(wd);
2202
+ #endif
2203
+
2204
+ if (EventCallback)
2205
+ (*EventCallback)(b->GetBinding(), EM_CONNECTION_UNBOUND, NULL, 0);
2206
+
2207
+ delete b;
2208
+ }
2209
+
2210
+ void EventMachine_t::UnwatchFile (const unsigned long sig)
2211
+ {
2212
+ for(map<int, Bindable_t*>::iterator i=Files.begin(); i != Files.end(); i++)
2213
+ {
2214
+ if (i->second->GetBinding() == sig) {
2215
+ UnwatchFile (i->first);
2216
+ return;
2217
+ }
2218
+ }
2219
+ throw std::runtime_error("attempted to remove invalid watch signature");
2220
+ }
2221
+
2222
+
2223
+ /***********************************
2224
+ EventMachine_t::_ReadInotify_Events
2225
+ ************************************/
2226
+
2227
+ void EventMachine_t::_ReadInotifyEvents()
2228
+ {
2229
+ #ifdef HAVE_INOTIFY
2230
+ char buffer[1024];
2231
+
2232
+ assert(EventCallback);
2233
+
2234
+ for (;;) {
2235
+ int returned = read(inotify->GetSocket(), buffer, sizeof(buffer));
2236
+ assert(!(returned == 0 || returned == -1 && errno == EINVAL));
2237
+ if (returned <= 0) {
2238
+ break;
2239
+ }
2240
+ int current = 0;
2241
+ while (current < returned) {
2242
+ struct inotify_event* event = (struct inotify_event*)(buffer+current);
2243
+ map<int, Bindable_t*>::const_iterator bindable = Files.find(event->wd);
2244
+ if (bindable != Files.end()) {
2245
+ if (event->mask & (IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVE)){
2246
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "modified", 8);
2247
+ }
2248
+ if (event->mask & IN_MOVE_SELF){
2249
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "moved", 5);
2250
+ }
2251
+ if (event->mask & IN_DELETE_SELF) {
2252
+ (*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
2253
+ UnwatchFile ((int)event->wd);
2254
+ }
2255
+ }
2256
+ current += sizeof(struct inotify_event) + event->len;
2257
+ }
2258
+ }
2259
+ #endif
2260
+ }
2261
+
2262
+
2263
+ /*************************************
2264
+ EventMachine_t::_HandleKqueuePidEvent
2265
+ *************************************/
2266
+
2267
+ #ifdef HAVE_KQUEUE
2268
+ void EventMachine_t::_HandleKqueuePidEvent(struct kevent *event)
2269
+ {
2270
+ assert(EventCallback);
2271
+
2272
+ if (event->fflags & NOTE_FORK)
2273
+ (*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "fork", 4);
2274
+ if (event->fflags & NOTE_EXIT) {
2275
+ (*EventCallback)(Pids [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "exit", 4);
2276
+ // stop watching the pid if it died
2277
+ UnwatchPid ((int)event->ident);
2278
+ }
2279
+ }
2280
+ #endif
2281
+
2282
+
2283
+ /**************************************
2284
+ EventMachine_t::_HandleKqueueFileEvent
2285
+ ***************************************/
2286
+
2287
+ #ifdef HAVE_KQUEUE
2288
+ void EventMachine_t::_HandleKqueueFileEvent(struct kevent *event)
2289
+ {
2290
+ assert(EventCallback);
2291
+
2292
+ if (event->fflags & NOTE_WRITE)
2293
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "modified", 8);
2294
+ if (event->fflags & NOTE_RENAME)
2295
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "moved", 5);
2296
+ if (event->fflags & NOTE_DELETE) {
2297
+ (*EventCallback)(Files [(int) event->ident]->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
2298
+ UnwatchFile ((int)event->ident);
2299
+ }
2300
+ }
2301
+ #endif
2302
+
2303
+
2304
+ /****************************************
2305
+ EventMachine_t::_RegisterKqueueFileEvent
2306
+ *****************************************/
2307
+
2308
+ #ifdef HAVE_KQUEUE
2309
+ void EventMachine_t::_RegisterKqueueFileEvent(int fd)
2310
+ {
2311
+ struct kevent newevent;
2312
+ int kqres;
2313
+
2314
+ // Setup the event with our fd and proper flags
2315
+ EV_SET(&newevent, fd, EVFILT_VNODE, EV_ADD | EV_CLEAR, NOTE_DELETE | NOTE_RENAME | NOTE_WRITE, 0, 0);
2316
+
2317
+ // Attempt to register the event
2318
+ kqres = kevent(kqfd, &newevent, 1, NULL, 0, NULL);
2319
+ if (kqres == -1) {
2320
+ char errbuf[200];
2321
+ sprintf(errbuf, "failed to register file watch descriptor with kqueue: %s", strerror(errno));
2322
+ close(fd);
2323
+ throw std::runtime_error(errbuf);
2324
+ }
2325
+ }
2326
+ #endif
2327
+
2328
+
2329
+ /************************************
2330
+ EventMachine_t::GetHeartbeatInterval
2331
+ *************************************/
2332
+
2333
+ float EventMachine_t::GetHeartbeatInterval()
2334
+ {
2335
+ return ((float)HeartbeatInterval / 1000000);
2336
+ }
2337
+
2338
+
2339
+ /************************************
2340
+ EventMachine_t::SetHeartbeatInterval
2341
+ *************************************/
2342
+
2343
+ int EventMachine_t::SetHeartbeatInterval(float interval)
2344
+ {
2345
+ int iv = (int)(interval * 1000000);
2346
+ if (iv > 0) {
2347
+ HeartbeatInterval = iv;
2348
+ return 1;
2349
+ }
2350
+ return 0;
2351
+ }
2352
+ //#endif // OS_UNIX
2353
+