libc-eventmachine 0.12.5.42

Sign up to get free protection for your applications and to get access to all the features.
Files changed (131) hide show
  1. data/Rakefile +195 -0
  2. data/docs/COPYING +60 -0
  3. data/docs/ChangeLog +211 -0
  4. data/docs/DEFERRABLES +138 -0
  5. data/docs/EPOLL +141 -0
  6. data/docs/GNU +281 -0
  7. data/docs/INSTALL +15 -0
  8. data/docs/KEYBOARD +38 -0
  9. data/docs/LEGAL +25 -0
  10. data/docs/LIGHTWEIGHT_CONCURRENCY +72 -0
  11. data/docs/PURE_RUBY +77 -0
  12. data/docs/README +74 -0
  13. data/docs/RELEASE_NOTES +96 -0
  14. data/docs/SMTP +9 -0
  15. data/docs/SPAWNED_PROCESSES +93 -0
  16. data/docs/TODO +10 -0
  17. data/ext/binder.cpp +126 -0
  18. data/ext/binder.h +48 -0
  19. data/ext/cmain.cpp +582 -0
  20. data/ext/cplusplus.cpp +177 -0
  21. data/ext/ed.cpp +1522 -0
  22. data/ext/ed.h +380 -0
  23. data/ext/em.cpp +1947 -0
  24. data/ext/em.h +186 -0
  25. data/ext/emwin.cpp +300 -0
  26. data/ext/emwin.h +94 -0
  27. data/ext/epoll.cpp +26 -0
  28. data/ext/epoll.h +25 -0
  29. data/ext/eventmachine.h +98 -0
  30. data/ext/eventmachine_cpp.h +96 -0
  31. data/ext/extconf.rb +129 -0
  32. data/ext/fastfilereader/extconf.rb +77 -0
  33. data/ext/fastfilereader/mapper.cpp +214 -0
  34. data/ext/fastfilereader/mapper.h +59 -0
  35. data/ext/fastfilereader/rubymain.cpp +127 -0
  36. data/ext/files.cpp +94 -0
  37. data/ext/files.h +65 -0
  38. data/ext/kb.cpp +82 -0
  39. data/ext/page.cpp +107 -0
  40. data/ext/page.h +51 -0
  41. data/ext/pipe.cpp +351 -0
  42. data/ext/project.h +119 -0
  43. data/ext/rubymain.cpp +858 -0
  44. data/ext/sigs.cpp +89 -0
  45. data/ext/sigs.h +32 -0
  46. data/ext/ssl.cpp +423 -0
  47. data/ext/ssl.h +90 -0
  48. data/java/src/com/rubyeventmachine/Application.java +196 -0
  49. data/java/src/com/rubyeventmachine/Connection.java +74 -0
  50. data/java/src/com/rubyeventmachine/ConnectionFactory.java +37 -0
  51. data/java/src/com/rubyeventmachine/DefaultConnectionFactory.java +46 -0
  52. data/java/src/com/rubyeventmachine/EmReactor.java +408 -0
  53. data/java/src/com/rubyeventmachine/EmReactorException.java +40 -0
  54. data/java/src/com/rubyeventmachine/EventableChannel.java +57 -0
  55. data/java/src/com/rubyeventmachine/EventableDatagramChannel.java +171 -0
  56. data/java/src/com/rubyeventmachine/EventableSocketChannel.java +244 -0
  57. data/java/src/com/rubyeventmachine/PeriodicTimer.java +38 -0
  58. data/java/src/com/rubyeventmachine/Timer.java +54 -0
  59. data/java/src/com/rubyeventmachine/tests/ApplicationTest.java +108 -0
  60. data/java/src/com/rubyeventmachine/tests/ConnectTest.java +124 -0
  61. data/java/src/com/rubyeventmachine/tests/EMTest.java +80 -0
  62. data/java/src/com/rubyeventmachine/tests/TestDatagrams.java +53 -0
  63. data/java/src/com/rubyeventmachine/tests/TestServers.java +74 -0
  64. data/java/src/com/rubyeventmachine/tests/TestTimers.java +89 -0
  65. data/lib/em/deferrable.rb +208 -0
  66. data/lib/em/eventable.rb +39 -0
  67. data/lib/em/future.rb +62 -0
  68. data/lib/em/messages.rb +66 -0
  69. data/lib/em/processes.rb +68 -0
  70. data/lib/em/spawnable.rb +88 -0
  71. data/lib/em/streamer.rb +112 -0
  72. data/lib/eventmachine.rb +1920 -0
  73. data/lib/eventmachine_version.rb +31 -0
  74. data/lib/evma/callback.rb +32 -0
  75. data/lib/evma/container.rb +75 -0
  76. data/lib/evma/factory.rb +77 -0
  77. data/lib/evma/protocol.rb +87 -0
  78. data/lib/evma/reactor.rb +48 -0
  79. data/lib/evma.rb +32 -0
  80. data/lib/jeventmachine.rb +140 -0
  81. data/lib/pr_eventmachine.rb +1017 -0
  82. data/lib/protocols/buftok.rb +127 -0
  83. data/lib/protocols/header_and_content.rb +129 -0
  84. data/lib/protocols/httpcli2.rb +803 -0
  85. data/lib/protocols/httpclient.rb +270 -0
  86. data/lib/protocols/line_and_text.rb +126 -0
  87. data/lib/protocols/linetext2.rb +161 -0
  88. data/lib/protocols/memcache.rb +293 -0
  89. data/lib/protocols/postgres.rb +261 -0
  90. data/lib/protocols/saslauth.rb +179 -0
  91. data/lib/protocols/smtpclient.rb +308 -0
  92. data/lib/protocols/smtpserver.rb +556 -0
  93. data/lib/protocols/stomp.rb +153 -0
  94. data/lib/protocols/tcptest.rb +57 -0
  95. data/tasks/cpp.rake +77 -0
  96. data/tasks/project.rake +78 -0
  97. data/tasks/tests.rake +193 -0
  98. data/tests/test_attach.rb +83 -0
  99. data/tests/test_basic.rb +231 -0
  100. data/tests/test_bind.rb +73 -0
  101. data/tests/test_connection_count.rb +35 -0
  102. data/tests/test_defer.rb +47 -0
  103. data/tests/test_epoll.rb +163 -0
  104. data/tests/test_error_handler.rb +32 -0
  105. data/tests/test_errors.rb +82 -0
  106. data/tests/test_eventables.rb +77 -0
  107. data/tests/test_exc.rb +58 -0
  108. data/tests/test_futures.rb +214 -0
  109. data/tests/test_handler_check.rb +37 -0
  110. data/tests/test_hc.rb +218 -0
  111. data/tests/test_httpclient.rb +215 -0
  112. data/tests/test_httpclient2.rb +155 -0
  113. data/tests/test_kb.rb +61 -0
  114. data/tests/test_ltp.rb +188 -0
  115. data/tests/test_ltp2.rb +320 -0
  116. data/tests/test_next_tick.rb +109 -0
  117. data/tests/test_processes.rb +56 -0
  118. data/tests/test_pure.rb +129 -0
  119. data/tests/test_running.rb +47 -0
  120. data/tests/test_sasl.rb +74 -0
  121. data/tests/test_send_file.rb +243 -0
  122. data/tests/test_servers.rb +80 -0
  123. data/tests/test_smtpclient.rb +83 -0
  124. data/tests/test_smtpserver.rb +93 -0
  125. data/tests/test_spawn.rb +329 -0
  126. data/tests/test_ssl_args.rb +68 -0
  127. data/tests/test_ssl_methods.rb +50 -0
  128. data/tests/test_timers.rb +148 -0
  129. data/tests/test_ud.rb +43 -0
  130. data/tests/testem.rb +31 -0
  131. metadata +230 -0
data/ext/em.cpp ADDED
@@ -0,0 +1,1947 @@
1
+ /*****************************************************************************
2
+
3
+ $Id$
4
+
5
+ File: em.cpp
6
+ Date: 06Apr06
7
+
8
+ Copyright (C) 2006-07 by Francis Cianfrocca. All Rights Reserved.
9
+ Gmail: blackhedd
10
+
11
+ This program is free software; you can redistribute it and/or modify
12
+ it under the terms of either: 1) the GNU General Public License
13
+ as published by the Free Software Foundation; either version 2 of the
14
+ License, or (at your option) any later version; or 2) Ruby's License.
15
+
16
+ See the file COPYING for complete licensing information.
17
+
18
+ *****************************************************************************/
19
+
20
+ // THIS ENTIRE FILE WILL EVENTUALLY BE FOR UNIX BUILDS ONLY.
21
+ //#ifdef OS_UNIX
22
+
23
+
24
+ #include "project.h"
25
+
26
+ // Keep a global variable floating around
27
+ // with the current loop time as set by the Event Machine.
28
+ // This avoids the need for frequent expensive calls to time(NULL);
29
+ time_t gCurrentLoopTime;
30
+
31
+ #ifdef OS_WIN32
32
+ unsigned gTickCountTickover;
33
+ unsigned gLastTickCount;
34
+ #endif
35
+
36
+
37
+ /* The numer of max outstanding timers was once a const enum defined in em.h.
38
+ * Now we define it here so that users can change its value if necessary.
39
+ */
40
+ static int MaxOutstandingTimers = 1000;
41
+
42
+
43
+ /* Internal helper to convert strings to internet addresses. IPv6-aware.
44
+ * Not reentrant or threadsafe, optimized for speed.
45
+ */
46
+ static struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size);
47
+
48
+ /***************************************
49
+ STATIC EventMachine_t::GetMaxTimerCount
50
+ ***************************************/
51
+
52
+ int EventMachine_t::GetMaxTimerCount()
53
+ {
54
+ return MaxOutstandingTimers;
55
+ }
56
+
57
+
58
+ /***************************************
59
+ STATIC EventMachine_t::SetMaxTimerCount
60
+ ***************************************/
61
+
62
+ void EventMachine_t::SetMaxTimerCount (int count)
63
+ {
64
+ /* Allow a user to increase the maximum number of outstanding timers.
65
+ * If this gets "too high" (a metric that is of course platform dependent),
66
+ * bad things will happen like performance problems and possible overuse
67
+ * of memory.
68
+ * The actual timer mechanism is very efficient so it's hard to know what
69
+ * the practical max, but 100,000 shouldn't be too problematical.
70
+ */
71
+ if (count < 100)
72
+ count = 100;
73
+ MaxOutstandingTimers = count;
74
+ }
75
+
76
+
77
+
78
+ /******************************
79
+ EventMachine_t::EventMachine_t
80
+ ******************************/
81
+
82
+ EventMachine_t::EventMachine_t (void (*event_callback)(const char*, int, const char*, int)):
83
+ EventCallback (event_callback),
84
+ NextHeartbeatTime (0),
85
+ LoopBreakerReader (-1),
86
+ LoopBreakerWriter (-1),
87
+ bEpoll (false),
88
+ bKqueue (false),
89
+ epfd (-1)
90
+ {
91
+ // Default time-slice is just smaller than one hundred mills.
92
+ Quantum.tv_sec = 0;
93
+ Quantum.tv_usec = 90000;
94
+
95
+ gTerminateSignalReceived = false;
96
+ // Make sure the current loop time is sane, in case we do any initializations of
97
+ // objects before we start running.
98
+ gCurrentLoopTime = time(NULL);
99
+
100
+ /* We initialize the network library here (only on Windows of course)
101
+ * and initialize "loop breakers." Our destructor also does some network-level
102
+ * cleanup. There's thus an implicit assumption that any given instance of EventMachine_t
103
+ * will only call ::Run once. Is that a good assumption? Should we move some of these
104
+ * inits and de-inits into ::Run?
105
+ */
106
+ #ifdef OS_WIN32
107
+ WSADATA w;
108
+ WSAStartup (MAKEWORD (1, 1), &w);
109
+ #endif
110
+
111
+ _InitializeLoopBreaker();
112
+ }
113
+
114
+
115
+ /*******************************
116
+ EventMachine_t::~EventMachine_t
117
+ *******************************/
118
+
119
+ EventMachine_t::~EventMachine_t()
120
+ {
121
+ // Run down descriptors
122
+ size_t i;
123
+ for (i = 0; i < NewDescriptors.size(); i++)
124
+ delete NewDescriptors[i];
125
+ for (i = 0; i < Descriptors.size(); i++)
126
+ delete Descriptors[i];
127
+
128
+ close (LoopBreakerReader);
129
+ close (LoopBreakerWriter);
130
+
131
+ if (epfd != -1)
132
+ close (epfd);
133
+ if (kqfd != -1)
134
+ close (kqfd);
135
+ }
136
+
137
+
138
+ /*************************
139
+ EventMachine_t::_UseEpoll
140
+ *************************/
141
+
142
+ void EventMachine_t::_UseEpoll()
143
+ {
144
+ /* Temporary.
145
+ * Use an internal flag to switch in epoll-based functionality until we determine
146
+ * how it should be integrated properly and the extent of the required changes.
147
+ * A permanent solution needs to allow the integration of additional technologies,
148
+ * like kqueue and Solaris's events.
149
+ */
150
+
151
+ #ifdef HAVE_EPOLL
152
+ bEpoll = true;
153
+ #endif
154
+ }
155
+
156
+ /**************************
157
+ EventMachine_t::_UseKqueue
158
+ **************************/
159
+
160
+ void EventMachine_t::_UseKqueue()
161
+ {
162
+ /* Temporary.
163
+ * See comments under _UseEpoll.
164
+ */
165
+
166
+ #ifdef HAVE_KQUEUE
167
+ bKqueue = true;
168
+ #endif
169
+ }
170
+
171
+
172
+ /****************************
173
+ EventMachine_t::ScheduleHalt
174
+ ****************************/
175
+
176
+ void EventMachine_t::ScheduleHalt()
177
+ {
178
+ /* This is how we stop the machine.
179
+ * This can be called by clients. Signal handlers will probably
180
+ * set the global flag.
181
+ * For now this means there can only be one EventMachine ever running at a time.
182
+ *
183
+ * IMPORTANT: keep this light, fast, and async-safe. Don't do anything frisky in here,
184
+ * because it may be called from signal handlers invoked from code that we don't
185
+ * control. At this writing (20Sep06), EM does NOT install any signal handlers of
186
+ * its own.
187
+ *
188
+ * We need a FAQ. And one of the questions is: how do I stop EM when Ctrl-C happens?
189
+ * The answer is to call evma_stop_machine, which calls here, from a SIGINT handler.
190
+ */
191
+ gTerminateSignalReceived = true;
192
+ }
193
+
194
+
195
+
196
+ /*******************************
197
+ EventMachine_t::SetTimerQuantum
198
+ *******************************/
199
+
200
+ void EventMachine_t::SetTimerQuantum (int interval)
201
+ {
202
+ /* We get a timer-quantum expressed in milliseconds.
203
+ * Don't set a quantum smaller than 5 or larger than 2500.
204
+ */
205
+
206
+ if ((interval < 5) || (interval > 2500))
207
+ throw std::runtime_error ("invalid timer-quantum");
208
+
209
+ Quantum.tv_sec = interval / 1000;
210
+ Quantum.tv_usec = (interval % 1000) * 1000;
211
+ }
212
+
213
+
214
+ /*************************************
215
+ (STATIC) EventMachine_t::SetuidString
216
+ *************************************/
217
+
218
+ void EventMachine_t::SetuidString (const char *username)
219
+ {
220
+ /* This method takes a caller-supplied username and tries to setuid
221
+ * to that user. There is no meaningful implementation (and no error)
222
+ * on Windows. On Unix, a failure to setuid the caller-supplied string
223
+ * causes a fatal abort, because presumably the program is calling here
224
+ * in order to fulfill a security requirement. If we fail silently,
225
+ * the user may continue to run with too much privilege.
226
+ *
227
+ * TODO, we need to decide on and document a way of generating C++ level errors
228
+ * that can be wrapped in documented Ruby exceptions, so users can catch
229
+ * and handle them. And distinguish it from errors that we WON'T let the Ruby
230
+ * user catch (like security-violations and resource-overallocation).
231
+ * A setuid failure here would be in the latter category.
232
+ */
233
+
234
+ #ifdef OS_UNIX
235
+ if (!username || !*username)
236
+ throw std::runtime_error ("setuid_string failed: no username specified");
237
+
238
+ struct passwd *p = getpwnam (username);
239
+ if (!p)
240
+ throw std::runtime_error ("setuid_string failed: unknown username");
241
+
242
+ if (setuid (p->pw_uid) != 0)
243
+ throw std::runtime_error ("setuid_string failed: no setuid");
244
+
245
+ // Success.
246
+ #endif
247
+ }
248
+
249
+
250
+ /****************************************
251
+ (STATIC) EventMachine_t::SetRlimitNofile
252
+ ****************************************/
253
+
254
+ int EventMachine_t::SetRlimitNofile (int nofiles)
255
+ {
256
+ #ifdef OS_UNIX
257
+ struct rlimit rlim;
258
+ getrlimit (RLIMIT_NOFILE, &rlim);
259
+ if (nofiles >= 0) {
260
+ rlim.rlim_cur = nofiles;
261
+ if (nofiles > rlim.rlim_max)
262
+ rlim.rlim_max = nofiles;
263
+ setrlimit (RLIMIT_NOFILE, &rlim);
264
+ // ignore the error return, for now at least.
265
+ // TODO, emit an error message someday when we have proper debug levels.
266
+ }
267
+ getrlimit (RLIMIT_NOFILE, &rlim);
268
+ return rlim.rlim_cur;
269
+ #endif
270
+
271
+ #ifdef OS_WIN32
272
+ // No meaningful implementation on Windows.
273
+ return 0;
274
+ #endif
275
+ }
276
+
277
+
278
+ /*********************************
279
+ EventMachine_t::SignalLoopBreaker
280
+ *********************************/
281
+
282
+ void EventMachine_t::SignalLoopBreaker()
283
+ {
284
+ #ifdef OS_UNIX
285
+ write (LoopBreakerWriter, "", 1);
286
+ #endif
287
+ #ifdef OS_WIN32
288
+ sendto (LoopBreakerReader, "", 0, 0, (struct sockaddr*)&(LoopBreakerTarget), sizeof(LoopBreakerTarget));
289
+ #endif
290
+ }
291
+
292
+
293
+ /**************************************
294
+ EventMachine_t::_InitializeLoopBreaker
295
+ **************************************/
296
+
297
+ void EventMachine_t::_InitializeLoopBreaker()
298
+ {
299
+ /* A "loop-breaker" is a socket-descriptor that we can write to in order
300
+ * to break the main select loop. Primarily useful for things running on
301
+ * threads other than the main EM thread, so they can trigger processing
302
+ * of events that arise exogenously to the EM.
303
+ * Keep the loop-breaker pipe out of the main descriptor set, otherwise
304
+ * its events will get passed on to user code.
305
+ */
306
+
307
+ #ifdef OS_UNIX
308
+ int fd[2];
309
+ if (pipe (fd))
310
+ throw std::runtime_error ("no loop breaker");
311
+
312
+ LoopBreakerWriter = fd[1];
313
+ LoopBreakerReader = fd[0];
314
+ #endif
315
+
316
+ #ifdef OS_WIN32
317
+ int sd = socket (AF_INET, SOCK_DGRAM, 0);
318
+ if (sd == INVALID_SOCKET)
319
+ throw std::runtime_error ("no loop breaker socket");
320
+ SetSocketNonblocking (sd);
321
+
322
+ memset (&LoopBreakerTarget, 0, sizeof(LoopBreakerTarget));
323
+ LoopBreakerTarget.sin_family = AF_INET;
324
+ LoopBreakerTarget.sin_addr.s_addr = inet_addr ("127.0.0.1");
325
+
326
+ srand ((int)time(NULL));
327
+ int i;
328
+ for (i=0; i < 100; i++) {
329
+ int r = (rand() % 10000) + 20000;
330
+ LoopBreakerTarget.sin_port = htons (r);
331
+ if (bind (sd, (struct sockaddr*)&LoopBreakerTarget, sizeof(LoopBreakerTarget)) == 0)
332
+ break;
333
+ }
334
+
335
+ if (i == 100)
336
+ throw std::runtime_error ("no loop breaker");
337
+ LoopBreakerReader = sd;
338
+ #endif
339
+ }
340
+
341
+
342
+ /*******************
343
+ EventMachine_t::Run
344
+ *******************/
345
+
346
+ void EventMachine_t::Run()
347
+ {
348
+ #ifdef OS_WIN32
349
+ HookControlC (true);
350
+ #endif
351
+
352
+ #ifdef HAVE_EPOLL
353
+ if (bEpoll) {
354
+ epfd = epoll_create (MaxEpollDescriptors);
355
+ if (epfd == -1) {
356
+ char buf[200];
357
+ snprintf (buf, sizeof(buf)-1, "unable to create epoll descriptor: %s", strerror(errno));
358
+ throw std::runtime_error (buf);
359
+ }
360
+ int cloexec = fcntl (epfd, F_GETFD, 0);
361
+ assert (cloexec >= 0);
362
+ cloexec |= FD_CLOEXEC;
363
+ fcntl (epfd, F_SETFD, cloexec);
364
+
365
+ assert (LoopBreakerReader >= 0);
366
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
367
+ assert (ld);
368
+ Add (ld);
369
+ }
370
+ #endif
371
+
372
+ #ifdef HAVE_KQUEUE
373
+ if (bKqueue) {
374
+ kqfd = kqueue();
375
+ if (kqfd == -1) {
376
+ char buf[200];
377
+ snprintf (buf, sizeof(buf)-1, "unable to create kqueue descriptor: %s", strerror(errno));
378
+ throw std::runtime_error (buf);
379
+ }
380
+ // cloexec not needed. By definition, kqueues are not carried across forks.
381
+
382
+ assert (LoopBreakerReader >= 0);
383
+ LoopbreakDescriptor *ld = new LoopbreakDescriptor (LoopBreakerReader, this);
384
+ assert (ld);
385
+ Add (ld);
386
+ }
387
+ #endif
388
+
389
+ while (true) {
390
+ gCurrentLoopTime = time(NULL);
391
+ if (!_RunTimers())
392
+ break;
393
+
394
+ /* _Add must precede _Modify because the same descriptor might
395
+ * be on both lists during the same pass through the machine,
396
+ * and to modify a descriptor before adding it would fail.
397
+ */
398
+ _AddNewDescriptors();
399
+ _ModifyDescriptors();
400
+
401
+ if (!_RunOnce())
402
+ break;
403
+ if (gTerminateSignalReceived)
404
+ break;
405
+ }
406
+
407
+ #ifdef OS_WIN32
408
+ HookControlC (false);
409
+ #endif
410
+ }
411
+
412
+
413
+ /************************
414
+ EventMachine_t::_RunOnce
415
+ ************************/
416
+
417
+ bool EventMachine_t::_RunOnce()
418
+ {
419
+ if (bEpoll)
420
+ return _RunEpollOnce();
421
+ else if (bKqueue)
422
+ return _RunKqueueOnce();
423
+ else
424
+ return _RunSelectOnce();
425
+ }
426
+
427
+
428
+
429
+ /*****************************
430
+ EventMachine_t::_RunEpollOnce
431
+ *****************************/
432
+
433
+ bool EventMachine_t::_RunEpollOnce()
434
+ {
435
+ #ifdef HAVE_EPOLL
436
+ assert (epfd != -1);
437
+ struct epoll_event ev [MaxEpollDescriptors];
438
+ int s;
439
+
440
+ #ifdef BUILD_FOR_RUBY
441
+ TRAP_BEG;
442
+ #endif
443
+ s = epoll_wait (epfd, ev, MaxEpollDescriptors, 50);
444
+ #ifdef BUILD_FOR_RUBY
445
+ TRAP_END;
446
+ #endif
447
+
448
+ if (s > 0) {
449
+ for (int i=0; i < s; i++) {
450
+ EventableDescriptor *ed = (EventableDescriptor*) ev[i].data.ptr;
451
+
452
+ if (ev[i].events & (EPOLLERR | EPOLLHUP))
453
+ ed->ScheduleClose (false);
454
+ if (ev[i].events & EPOLLIN)
455
+ ed->Read();
456
+ if (ev[i].events & EPOLLOUT) {
457
+ ed->Write();
458
+ epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
459
+ // Ignoring return value
460
+ }
461
+ }
462
+ }
463
+ else if (s < 0) {
464
+ // epoll_wait can fail on error in a handful of ways.
465
+ // If this happens, then wait for a little while to avoid busy-looping.
466
+ // If the error was EINTR, we probably caught SIGCHLD or something,
467
+ // so keep the wait short.
468
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
469
+ EmSelect (0, NULL, NULL, NULL, &tv);
470
+ }
471
+
472
+ { // cleanup dying sockets
473
+ // vector::pop_back works in constant time.
474
+ // TODO, rip this out and only delete the descriptors we know have died,
475
+ // rather than traversing the whole list.
476
+ // Modified 05Jan08 per suggestions by Chris Heath. It's possible that
477
+ // an EventableDescriptor will have a descriptor value of -1. That will
478
+ // happen if EventableDescriptor::Close was called on it. In that case,
479
+ // don't call epoll_ctl to remove the socket's filters from the epoll set.
480
+ // According to the epoll docs, this happens automatically when the
481
+ // descriptor is closed anyway. This is different from the case where
482
+ // the socket has already been closed but the descriptor in the ED object
483
+ // hasn't yet been set to INVALID_SOCKET.
484
+ int i, j;
485
+ int nSockets = Descriptors.size();
486
+ for (i=0, j=0; i < nSockets; i++) {
487
+ EventableDescriptor *ed = Descriptors[i];
488
+ assert (ed);
489
+ if (ed->ShouldDelete()) {
490
+ if (ed->GetSocket() != INVALID_SOCKET) {
491
+ assert (bEpoll); // wouldn't be in this method otherwise.
492
+ assert (epfd != -1);
493
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
494
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
495
+ if (e && (errno != ENOENT) && (errno != EBADF)) {
496
+ char buf [200];
497
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
498
+ throw std::runtime_error (buf);
499
+ }
500
+ }
501
+
502
+ ModifiedDescriptors.erase (ed);
503
+ delete ed;
504
+ }
505
+ else
506
+ Descriptors [j++] = ed;
507
+ }
508
+ while ((size_t)j < Descriptors.size())
509
+ Descriptors.pop_back();
510
+
511
+ }
512
+
513
+ // TODO, heartbeats.
514
+ // Added 14Sep07, its absence was noted by Brian Candler. But the comment was here, indicated
515
+ // that this got thought about and not done when EPOLL was originally written. Was there a reason
516
+ // not to do it, or was it an oversight? Certainly, running a heartbeat on 50,000 connections every
517
+ // two seconds can get to be a real bear, especially if all we're doing is timing out dead ones.
518
+ // Maybe there's a better way to do this. (Or maybe it's not that expensive after all.)
519
+ //
520
+ { // dispatch heartbeats
521
+ if (gCurrentLoopTime >= NextHeartbeatTime) {
522
+ NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;
523
+
524
+ for (int i=0; i < Descriptors.size(); i++) {
525
+ EventableDescriptor *ed = Descriptors[i];
526
+ assert (ed);
527
+ ed->Heartbeat();
528
+ }
529
+ }
530
+ }
531
+
532
+ #ifdef BUILD_FOR_RUBY
533
+ if (!rb_thread_alone()) {
534
+ rb_thread_schedule();
535
+ }
536
+ #endif
537
+
538
+ return true;
539
+ #else
540
+ throw std::runtime_error ("epoll is not implemented on this platform");
541
+ #endif
542
+ }
543
+
544
+
545
+ /******************************
546
+ EventMachine_t::_RunKqueueOnce
547
+ ******************************/
548
+
549
+ bool EventMachine_t::_RunKqueueOnce()
550
+ {
551
+ #ifdef HAVE_KQUEUE
552
+ assert (kqfd != -1);
553
+ const int maxKevents = 2000;
554
+ struct kevent Karray [maxKevents];
555
+ struct timespec ts = {0, 10000000}; // Too frequent. Use blocking_region
556
+
557
+ int k;
558
+ #ifdef BUILD_FOR_RUBY
559
+ TRAP_BEG;
560
+ #endif
561
+ k = kevent (kqfd, NULL, 0, Karray, maxKevents, &ts);
562
+ #ifdef BUILD_FOR_RUBY
563
+ TRAP_END;
564
+ #endif
565
+ struct kevent *ke = Karray;
566
+ while (k > 0) {
567
+ EventableDescriptor *ed = (EventableDescriptor*) (ke->udata);
568
+ assert (ed);
569
+
570
+ if (ke->filter == EVFILT_READ)
571
+ ed->Read();
572
+ else if (ke->filter == EVFILT_WRITE)
573
+ ed->Write();
574
+ else
575
+ cerr << "Discarding unknown kqueue event " << ke->filter << endl;
576
+
577
+ --k;
578
+ ++ke;
579
+ }
580
+
581
+ { // cleanup dying sockets
582
+ // vector::pop_back works in constant time.
583
+ // TODO, rip this out and only delete the descriptors we know have died,
584
+ // rather than traversing the whole list.
585
+ // In kqueue, closing a descriptor automatically removes its event filters.
586
+
587
+ int i, j;
588
+ int nSockets = Descriptors.size();
589
+ for (i=0, j=0; i < nSockets; i++) {
590
+ EventableDescriptor *ed = Descriptors[i];
591
+ assert (ed);
592
+ if (ed->ShouldDelete()) {
593
+ ModifiedDescriptors.erase (ed);
594
+ delete ed;
595
+ }
596
+ else
597
+ Descriptors [j++] = ed;
598
+ }
599
+ while ((size_t)j < Descriptors.size())
600
+ Descriptors.pop_back();
601
+
602
+ }
603
+
604
+ { // dispatch heartbeats
605
+ if (gCurrentLoopTime >= NextHeartbeatTime) {
606
+ NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;
607
+
608
+ for (int i=0; i < Descriptors.size(); i++) {
609
+ EventableDescriptor *ed = Descriptors[i];
610
+ assert (ed);
611
+ ed->Heartbeat();
612
+ }
613
+ }
614
+ }
615
+
616
+
617
+ // TODO, replace this with rb_thread_blocking_region for 1.9 builds.
618
+ #ifdef BUILD_FOR_RUBY
619
+ if (!rb_thread_alone()) {
620
+ rb_thread_schedule();
621
+ }
622
+ #endif
623
+
624
+ return true;
625
+ #else
626
+ throw std::runtime_error ("kqueue is not implemented on this platform");
627
+ #endif
628
+ }
629
+
630
+
631
+ /*********************************
632
+ EventMachine_t::_ModifyEpollEvent
633
+ *********************************/
634
+
635
+ void EventMachine_t::_ModifyEpollEvent (EventableDescriptor *ed)
636
+ {
637
+ #ifdef HAVE_EPOLL
638
+ if (bEpoll) {
639
+ assert (epfd != -1);
640
+ assert (ed);
641
+ int e = epoll_ctl (epfd, EPOLL_CTL_MOD, ed->GetSocket(), ed->GetEpollEvent());
642
+ if (e) {
643
+ char buf [200];
644
+ snprintf (buf, sizeof(buf)-1, "unable to modify epoll event: %s", strerror(errno));
645
+ throw std::runtime_error (buf);
646
+ }
647
+ }
648
+ #endif
649
+ }
650
+
651
+
652
+
653
+ /**************************
654
+ SelectData_t::SelectData_t
655
+ **************************/
656
+
657
+ SelectData_t::SelectData_t()
658
+ {
659
+ maxsocket = 0;
660
+ FD_ZERO (&fdreads);
661
+ FD_ZERO (&fdwrites);
662
+ }
663
+
664
+
665
+ #ifdef BUILD_FOR_RUBY
666
+ /*****************
667
+ _SelectDataSelect
668
+ *****************/
669
+
670
+ #ifdef HAVE_TBR
671
+ static VALUE _SelectDataSelect (void *v)
672
+ {
673
+ SelectData_t *sd = (SelectData_t*)v;
674
+ sd->nSockets = select (sd->maxsocket+1, &(sd->fdreads), &(sd->fdwrites), NULL, &(sd->tv));
675
+ return Qnil;
676
+ }
677
+ #endif
678
+
679
+ /*********************
680
+ SelectData_t::_Select
681
+ *********************/
682
+
683
+ int SelectData_t::_Select()
684
+ {
685
+ #ifdef HAVE_TBR
686
+ rb_thread_blocking_region (_SelectDataSelect, (void*)this, RUBY_UBF_IO, 0);
687
+ return nSockets;
688
+ #endif
689
+
690
+ #ifndef HAVE_TBR
691
+ return EmSelect (maxsocket+1, &fdreads, &fdwrites, NULL, &tv);
692
+ #endif
693
+ }
694
+ #endif
695
+
696
+
697
+
698
+ /******************************
699
+ EventMachine_t::_RunSelectOnce
700
+ ******************************/
701
+
702
+ bool EventMachine_t::_RunSelectOnce()
703
+ {
704
+ // Crank the event machine once.
705
+ // If there are no descriptors to process, then sleep
706
+ // for a few hundred mills to avoid busy-looping.
707
+ // Return T/F to indicate whether we should continue.
708
+ // This is based on a select loop. Alternately provide epoll
709
+ // if we know we're running on a 2.6 kernel.
710
+ // epoll will be effective if we provide it as an alternative,
711
+ // however it has the same problem interoperating with Ruby
712
+ // threads that select does.
713
+
714
+ //cerr << "X";
715
+
716
+ /* This protection is now obsolete, because we will ALWAYS
717
+ * have at least one descriptor (the loop-breaker) to read.
718
+ */
719
+ /*
720
+ if (Descriptors.size() == 0) {
721
+ #ifdef OS_UNIX
722
+ timeval tv = {0, 200 * 1000};
723
+ EmSelect (0, NULL, NULL, NULL, &tv);
724
+ return true;
725
+ #endif
726
+ #ifdef OS_WIN32
727
+ Sleep (200);
728
+ return true;
729
+ #endif
730
+ }
731
+ */
732
+
733
+ SelectData_t SelectData;
734
+ /*
735
+ fd_set fdreads, fdwrites;
736
+ FD_ZERO (&fdreads);
737
+ FD_ZERO (&fdwrites);
738
+
739
+ int maxsocket = 0;
740
+ */
741
+
742
+ // Always read the loop-breaker reader.
743
+ // Changed 23Aug06, provisionally implemented for Windows with a UDP socket
744
+ // running on localhost with a randomly-chosen port. (*Puke*)
745
+ // Windows has a version of the Unix pipe() library function, but it doesn't
746
+ // give you back descriptors that are selectable.
747
+ FD_SET (LoopBreakerReader, &(SelectData.fdreads));
748
+ if (SelectData.maxsocket < LoopBreakerReader)
749
+ SelectData.maxsocket = LoopBreakerReader;
750
+
751
+ // prepare the sockets for reading and writing
752
+ size_t i;
753
+ for (i = 0; i < Descriptors.size(); i++) {
754
+ EventableDescriptor *ed = Descriptors[i];
755
+ assert (ed);
756
+ int sd = ed->GetSocket();
757
+ assert (sd != INVALID_SOCKET);
758
+
759
+ if (ed->SelectForRead())
760
+ FD_SET (sd, &(SelectData.fdreads));
761
+ if (ed->SelectForWrite())
762
+ FD_SET (sd, &(SelectData.fdwrites));
763
+
764
+ if (SelectData.maxsocket < sd)
765
+ SelectData.maxsocket = sd;
766
+ }
767
+
768
+
769
+ { // read and write the sockets
770
+ //timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000.
771
+ //timeval tv = Quantum;
772
+ SelectData.tv = Quantum;
773
+ int s = SelectData._Select();
774
+ //rb_thread_blocking_region(xxx,(void*)&SelectData,RUBY_UBF_IO,0);
775
+ //int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv));
776
+ //int s = SelectData.nSockets;
777
+ if (s > 0) {
778
+ /* Changed 01Jun07. We used to handle the Loop-breaker right here.
779
+ * Now we do it AFTER all the regular descriptors. There's an
780
+ * incredibly important and subtle reason for this. Code on
781
+ * loop breakers is sometimes used to cause the reactor core to
782
+ * cycle (for example, to allow outbound network buffers to drain).
783
+ * If a loop-breaker handler reschedules itself (say, after determining
784
+ * that the write buffers are still too full), then it will execute
785
+ * IMMEDIATELY if _ReadLoopBreaker is done here instead of after
786
+ * the other descriptors are processed. That defeats the whole purpose.
787
+ */
788
+ for (i=0; i < Descriptors.size(); i++) {
789
+ EventableDescriptor *ed = Descriptors[i];
790
+ assert (ed);
791
+ int sd = ed->GetSocket();
792
+ assert (sd != INVALID_SOCKET);
793
+
794
+ if (FD_ISSET (sd, &(SelectData.fdwrites)))
795
+ ed->Write();
796
+ if (FD_ISSET (sd, &(SelectData.fdreads)))
797
+ ed->Read();
798
+ }
799
+
800
+ if (FD_ISSET (LoopBreakerReader, &(SelectData.fdreads)))
801
+ _ReadLoopBreaker();
802
+ }
803
+ else if (s < 0) {
804
+ // select can fail on error in a handful of ways.
805
+ // If this happens, then wait for a little while to avoid busy-looping.
806
+ // If the error was EINTR, we probably caught SIGCHLD or something,
807
+ // so keep the wait short.
808
+ timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
809
+ EmSelect (0, NULL, NULL, NULL, &tv);
810
+ }
811
+ }
812
+
813
+
814
+ { // dispatch heartbeats
815
+ if (gCurrentLoopTime >= NextHeartbeatTime) {
816
+ NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;
817
+
818
+ for (i=0; i < Descriptors.size(); i++) {
819
+ EventableDescriptor *ed = Descriptors[i];
820
+ assert (ed);
821
+ ed->Heartbeat();
822
+ }
823
+ }
824
+ }
825
+
826
+ { // cleanup dying sockets
827
+ // vector::pop_back works in constant time.
828
+ int i, j;
829
+ int nSockets = Descriptors.size();
830
+ for (i=0, j=0; i < nSockets; i++) {
831
+ EventableDescriptor *ed = Descriptors[i];
832
+ assert (ed);
833
+ if (ed->ShouldDelete())
834
+ delete ed;
835
+ else
836
+ Descriptors [j++] = ed;
837
+ }
838
+ while ((size_t)j < Descriptors.size())
839
+ Descriptors.pop_back();
840
+
841
+ }
842
+
843
+ return true;
844
+ }
845
+
846
+
847
+ /********************************
848
+ EventMachine_t::_ReadLoopBreaker
849
+ ********************************/
850
+
851
+ void EventMachine_t::_ReadLoopBreaker()
852
+ {
853
+ /* The loop breaker has selected readable.
854
+ * Read it ONCE (it may block if we try to read it twice)
855
+ * and send a loop-break event back to user code.
856
+ */
857
+ char buffer [1024];
858
+ read (LoopBreakerReader, buffer, sizeof(buffer));
859
+ if (EventCallback)
860
+ (*EventCallback)("", EM_LOOPBREAK_SIGNAL, "", 0);
861
+ }
862
+
863
+
864
+ /**************************
865
+ EventMachine_t::_RunTimers
866
+ **************************/
867
+
868
+ bool EventMachine_t::_RunTimers()
869
+ {
870
+ // These are caller-defined timer handlers.
871
+ // Return T/F to indicate whether we should continue the main loop.
872
+ // We rely on the fact that multimaps sort by their keys to avoid
873
+ // inspecting the whole list every time we come here.
874
+ // Just keep inspecting and processing the list head until we hit
875
+ // one that hasn't expired yet.
876
+
877
+ #ifdef OS_UNIX
878
+ struct timeval tv;
879
+ gettimeofday (&tv, NULL);
880
+ Int64 now = (((Int64)(tv.tv_sec)) * 1000000LL) + ((Int64)(tv.tv_usec));
881
+ #endif
882
+
883
+ #ifdef OS_WIN32
884
+ unsigned tick = GetTickCount();
885
+ if (tick < gLastTickCount)
886
+ gTickCountTickover += 1;
887
+ gLastTickCount = tick;
888
+ Int64 now = ((Int64)gTickCountTickover << 32) + (Int64)tick;
889
+ #endif
890
+
891
+ while (true) {
892
+ multimap<Int64,Timer_t>::iterator i = Timers.begin();
893
+ if (i == Timers.end())
894
+ break;
895
+ if (i->first > now)
896
+ break;
897
+ if (EventCallback)
898
+ (*EventCallback) ("", EM_TIMER_FIRED, i->second.GetBinding().c_str(), i->second.GetBinding().length());
899
+ Timers.erase (i);
900
+ }
901
+ return true;
902
+ }
903
+
904
+
905
+
906
+ /***********************************
907
+ EventMachine_t::InstallOneshotTimer
908
+ ***********************************/
909
+
910
+ const char *EventMachine_t::InstallOneshotTimer (int milliseconds)
911
+ {
912
+ if (Timers.size() > MaxOutstandingTimers)
913
+ return false;
914
+ // Don't use the global loop-time variable here, because we might
915
+ // get called before the main event machine is running.
916
+
917
+ #ifdef OS_UNIX
918
+ struct timeval tv;
919
+ gettimeofday (&tv, NULL);
920
+ Int64 fire_at = (((Int64)(tv.tv_sec)) * 1000000LL) + ((Int64)(tv.tv_usec));
921
+ fire_at += ((Int64)milliseconds) * 1000LL;
922
+ #endif
923
+
924
+ #ifdef OS_WIN32
925
+ unsigned tick = GetTickCount();
926
+ if (tick < gLastTickCount)
927
+ gTickCountTickover += 1;
928
+ gLastTickCount = tick;
929
+
930
+ Int64 fire_at = ((Int64)gTickCountTickover << 32) + (Int64)tick;
931
+ fire_at += (Int64)milliseconds;
932
+ #endif
933
+
934
+ Timer_t t;
935
+ multimap<Int64,Timer_t>::iterator i =
936
+ Timers.insert (make_pair (fire_at, t));
937
+ return i->second.GetBindingChars();
938
+ }
939
+
940
+
941
+ /*******************************
942
+ EventMachine_t::ConnectToServer
943
+ *******************************/
944
+
945
+ const char *EventMachine_t::ConnectToServer (const char *server, int port, const char * bind_host)
946
+ {
947
+ /* We want to spend no more than a few seconds waiting for a connection
948
+ * to a remote host. So we use a nonblocking connect.
949
+ * Linux disobeys the usual rules for nonblocking connects.
950
+ * Per Stevens (UNP p.410), you expect a nonblocking connect to select
951
+ * both readable and writable on error, and not to return EINPROGRESS
952
+ * if the connect can be fulfilled immediately. Linux violates both
953
+ * of these expectations.
954
+ * Any kind of nonblocking connect on Linux returns EINPROGRESS.
955
+ * The socket will then return writable when the disposition of the
956
+ * connect is known, but it will not also be readable in case of
957
+ * error! Weirdly, it will be readable in case there is data to read!!!
958
+ * (Which can happen with protocols like SSH and SMTP.)
959
+ * I suppose if you were so inclined you could consider this logical,
960
+ * but it's not the way Unix has historically done it.
961
+ * So we ignore the readable flag and read getsockopt to see if there
962
+ * was an error connecting. A select timeout works as expected.
963
+ * In regard to getsockopt: Linux does the Berkeley-style thing,
964
+ * not the Solaris-style, and returns zero with the error code in
965
+ * the error parameter.
966
+ * Return the binding-text of the newly-created pending connection,
967
+ * or NULL if there was a problem.
968
+ */
969
+
970
+ if (!server || !*server || !port)
971
+ return NULL;
972
+
973
+ int family, bind_size, bind_here_size;
974
+ struct sockaddr *bind_as = name2address (server, port, &family, &bind_size);
975
+ if (!bind_as)
976
+ return NULL;
977
+
978
+ int sd = socket (family, SOCK_STREAM, 0);
979
+ if (sd == INVALID_SOCKET)
980
+ return NULL;
981
+
982
+ /*
983
+ sockaddr_in pin;
984
+ unsigned long HostAddr;
985
+
986
+ HostAddr = inet_addr (server);
987
+ if (HostAddr == INADDR_NONE) {
988
+ hostent *hp = gethostbyname ((char*)server); // Windows requires (char*)
989
+ if (!hp) {
990
+ // TODO: This gives the caller a fatal error. Not good.
991
+ // They can respond by catching RuntimeError (blecch).
992
+ // Possibly we need to fire an unbind event and provide
993
+ // a status code so user code can detect the cause of the
994
+ // failure.
995
+ return NULL;
996
+ }
997
+ HostAddr = ((in_addr*)(hp->h_addr))->s_addr;
998
+ }
999
+
1000
+ memset (&pin, 0, sizeof(pin));
1001
+ pin.sin_family = AF_INET;
1002
+ pin.sin_addr.s_addr = HostAddr;
1003
+ pin.sin_port = htons (port);
1004
+
1005
+ int sd = socket (AF_INET, SOCK_STREAM, 0);
1006
+ if (sd == INVALID_SOCKET)
1007
+ return NULL;
1008
+ */
1009
+
1010
+ // From here on, ALL error returns must close the socket.
1011
+ // Set the new socket nonblocking.
1012
+ if (!SetSocketNonblocking (sd)) {
1013
+ closesocket (sd);
1014
+ return NULL;
1015
+ }
1016
+ // Disable slow-start (Nagle algorithm).
1017
+ int one = 1;
1018
+ setsockopt (sd, IPPROTO_TCP, TCP_NODELAY, (char*) &one, sizeof(one));
1019
+
1020
+ const char *out = NULL;
1021
+
1022
+ if(bind_host) {
1023
+ int family, bind_here_size;
1024
+ char old_bind[bind_size];
1025
+ memcpy(old_bind, bind_as, bind_size);
1026
+
1027
+ struct sockaddr *bind_here = name2address (bind_host, 0, &family, &bind_here_size);
1028
+ if( !bind_as || bind(sd, bind_here, bind_here_size) != 0) {
1029
+ closesocket (sd);
1030
+ return NULL;
1031
+ }
1032
+
1033
+ memcpy(bind_as, old_bind, bind_size);
1034
+ }
1035
+
1036
+ #ifdef OS_UNIX
1037
+ //if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
1038
+ if (connect (sd, bind_as, bind_size) == 0) {
1039
+ // This is a connect success, which Linux appears
1040
+ // never to give when the socket is nonblocking,
1041
+ // even if the connection is intramachine or to
1042
+ // localhost.
1043
+
1044
+ /* Changed this branch 08Aug06. Evidently some kernels
1045
+ * (FreeBSD for example) will actually return success from
1046
+ * a nonblocking connect. This is a pretty simple case,
1047
+ * just set up the new connection and clear the pending flag.
1048
+ * Thanks to Chris Ochs for helping track this down.
1049
+ * This branch never gets taken on Linux or (oddly) OSX.
1050
+ * The original behavior was to throw an unimplemented,
1051
+ * which the user saw as a fatal exception. Very unfriendly.
1052
+ *
1053
+ * Tweaked 10Aug06. Even though the connect disposition is
1054
+ * known, we still set the connect-pending flag. That way
1055
+ * some needed initialization will happen in the ConnectionDescriptor.
1056
+ * (To wit, the ConnectionCompleted event gets sent to the client.)
1057
+ */
1058
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1059
+ if (!cd)
1060
+ throw std::runtime_error ("no connection allocated");
1061
+ cd->SetConnectPending (true);
1062
+ Add (cd);
1063
+ out = cd->GetBinding().c_str();
1064
+ }
1065
+ else if (errno == EINPROGRESS) {
1066
+ // Errno will generally always be EINPROGRESS, but on Linux
1067
+ // we have to look at getsockopt to be sure what really happened.
1068
+ int error;
1069
+ socklen_t len;
1070
+ len = sizeof(error);
1071
+ int o = getsockopt (sd, SOL_SOCKET, SO_ERROR, &error, &len);
1072
+ if ((o == 0) && (error == 0)) {
1073
+ // Here, there's no disposition.
1074
+ // Put the connection on the stack and wait for it to complete
1075
+ // or time out.
1076
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1077
+ if (!cd)
1078
+ throw std::runtime_error ("no connection allocated");
1079
+ cd->SetConnectPending (true);
1080
+ Add (cd);
1081
+ out = cd->GetBinding().c_str();
1082
+ }
1083
+ else {
1084
+ /* This could be connection refused or some such thing.
1085
+ * We will come here on Linux if a localhost connection fails.
1086
+ * Changed 16Jul06: Originally this branch was a no-op, and
1087
+ * we'd drop down to the end of the method, close the socket,
1088
+ * and return NULL, which would cause the caller to GET A
1089
+ * FATAL EXCEPTION. Now we keep the socket around but schedule an
1090
+ * immediate close on it, so the caller will get a close-event
1091
+ * scheduled on it. This was only an issue for localhost connections
1092
+ * to non-listening ports. We may eventually need to revise this
1093
+ * revised behavior, in case it causes problems like making it hard
1094
+ * for people to know that a failure occurred.
1095
+ */
1096
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1097
+ if (!cd)
1098
+ throw std::runtime_error ("no connection allocated");
1099
+ cd->ScheduleClose (false);
1100
+ Add (cd);
1101
+ out = cd->GetBinding().c_str();
1102
+ }
1103
+ }
1104
+ else {
1105
+ // The error from connect was something other then EINPROGRESS.
1106
+ }
1107
+ #endif
1108
+
1109
+ #ifdef OS_WIN32
1110
+ //if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
1111
+ if (connect (sd, bind_as, bind_size) == 0) {
1112
+ // This is a connect success, which Windows appears
1113
+ // never to give when the socket is nonblocking,
1114
+ // even if the connection is intramachine or to
1115
+ // localhost.
1116
+ throw std::runtime_error ("unimplemented");
1117
+ }
1118
+ else if (WSAGetLastError() == WSAEWOULDBLOCK) {
1119
+ // Here, there's no disposition.
1120
+ // Windows appears not to surface refused connections or
1121
+ // such stuff at this point.
1122
+ // Put the connection on the stack and wait for it to complete
1123
+ // or time out.
1124
+ ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
1125
+ if (!cd)
1126
+ throw std::runtime_error ("no connection allocated");
1127
+ cd->SetConnectPending (true);
1128
+ Add (cd);
1129
+ out = cd->GetBinding().c_str();
1130
+ }
1131
+ else {
1132
+ // The error from connect was something other then WSAEWOULDBLOCK.
1133
+ }
1134
+
1135
+ #endif
1136
+
1137
+ if (out == NULL)
1138
+ closesocket (sd);
1139
+ return out;
1140
+ }
1141
+
1142
+ /***********************************
1143
+ EventMachine_t::ConnectToUnixServer
1144
+ ***********************************/
1145
+
1146
+ const char *EventMachine_t::ConnectToUnixServer (const char *server)
1147
+ {
1148
+ /* Connect to a Unix-domain server, which by definition is running
1149
+ * on the same host.
1150
+ * There is no meaningful implementation on Windows.
1151
+ * There's no need to do a nonblocking connect, since the connection
1152
+ * is always local and can always be fulfilled immediately.
1153
+ */
1154
+
1155
+ #ifdef OS_WIN32
1156
+ throw std::runtime_error ("unix-domain connection unavailable on this platform");
1157
+ return NULL;
1158
+ #endif
1159
+
1160
+ // The whole rest of this function is only compiled on Unix systems.
1161
+ #ifdef OS_UNIX
1162
+
1163
+ const char *out = NULL;
1164
+
1165
+ if (!server || !*server)
1166
+ return NULL;
1167
+
1168
+ sockaddr_un pun;
1169
+ memset (&pun, 0, sizeof(pun));
1170
+ pun.sun_family = AF_LOCAL;
1171
+
1172
+ // You ordinarily expect the server name field to be at least 1024 bytes long,
1173
+ // but on Linux it can be MUCH shorter.
1174
+ if (strlen(server) >= sizeof(pun.sun_path))
1175
+ throw std::runtime_error ("unix-domain server name is too long");
1176
+
1177
+
1178
+ strcpy (pun.sun_path, server);
1179
+
1180
+ int fd = socket (AF_LOCAL, SOCK_STREAM, 0);
1181
+ if (fd == INVALID_SOCKET)
1182
+ return NULL;
1183
+
1184
+ // From here on, ALL error returns must close the socket.
1185
+ // NOTE: At this point, the socket is still a blocking socket.
1186
+ if (connect (fd, (struct sockaddr*)&pun, sizeof(pun)) != 0) {
1187
+ closesocket (fd);
1188
+ return NULL;
1189
+ }
1190
+
1191
+ // Set the newly-connected socket nonblocking.
1192
+ if (!SetSocketNonblocking (fd)) {
1193
+ closesocket (fd);
1194
+ return NULL;
1195
+ }
1196
+
1197
+ // Set up a connection descriptor and add it to the event-machine.
1198
+ // Observe, even though we know the connection status is connect-success,
1199
+ // we still set the "pending" flag, so some needed initializations take
1200
+ // place.
1201
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1202
+ if (!cd)
1203
+ throw std::runtime_error ("no connection allocated");
1204
+ cd->SetConnectPending (true);
1205
+ Add (cd);
1206
+ out = cd->GetBinding().c_str();
1207
+
1208
+ if (out == NULL)
1209
+ closesocket (fd);
1210
+
1211
+ return out;
1212
+ #endif
1213
+ }
1214
+
1215
+ /************************
1216
+ EventMachine_t::AttachFD
1217
+ ************************/
1218
+
1219
+ const char *EventMachine_t::AttachFD (int fd, bool notify_readable, bool notify_writable)
1220
+ {
1221
+ #ifdef OS_UNIX
1222
+ if (fcntl(fd, F_GETFL, 0) < 0)
1223
+ throw std::runtime_error ("invalid file descriptor");
1224
+ #endif
1225
+
1226
+ #ifdef OS_WIN32
1227
+ // TODO: add better check for invalid file descriptors (see ioctlsocket or getsockopt)
1228
+ if (fd == INVALID_SOCKET)
1229
+ throw std::runtime_error ("invalid file descriptor");
1230
+ #endif
1231
+
1232
+ {// Check for duplicate descriptors
1233
+ size_t i;
1234
+ for (i = 0; i < Descriptors.size(); i++) {
1235
+ EventableDescriptor *ed = Descriptors[i];
1236
+ assert (ed);
1237
+ if (ed->GetSocket() == fd)
1238
+ throw std::runtime_error ("adding existing descriptor");
1239
+ }
1240
+
1241
+ for (i = 0; i < NewDescriptors.size(); i++) {
1242
+ EventableDescriptor *ed = NewDescriptors[i];
1243
+ assert (ed);
1244
+ if (ed->GetSocket() == fd)
1245
+ throw std::runtime_error ("adding existing new descriptor");
1246
+ }
1247
+ }
1248
+
1249
+ ConnectionDescriptor *cd = new ConnectionDescriptor (fd, this);
1250
+ if (!cd)
1251
+ throw std::runtime_error ("no connection allocated");
1252
+
1253
+ cd->SetConnectPending (false);
1254
+ cd->SetNotifyReadable (notify_readable);
1255
+ cd->SetNotifyWritable (notify_writable);
1256
+
1257
+ Add (cd);
1258
+
1259
+ const char *out = NULL;
1260
+ out = cd->GetBinding().c_str();
1261
+ if (out == NULL)
1262
+ closesocket (fd);
1263
+ return out;
1264
+ }
1265
+
1266
+ /************************
1267
+ EventMachine_t::DetachFD
1268
+ ************************/
1269
+
1270
+ int EventMachine_t::DetachFD (EventableDescriptor *ed)
1271
+ {
1272
+ if (!ed)
1273
+ throw std::runtime_error ("detaching bad descriptor");
1274
+
1275
+ #ifdef HAVE_EPOLL
1276
+ if (bEpoll) {
1277
+ if (ed->GetSocket() != INVALID_SOCKET) {
1278
+ assert (bEpoll); // wouldn't be in this method otherwise.
1279
+ assert (epfd != -1);
1280
+ int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
1281
+ // ENOENT or EBADF are not errors because the socket may be already closed when we get here.
1282
+ if (e && (errno != ENOENT) && (errno != EBADF)) {
1283
+ char buf [200];
1284
+ snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
1285
+ throw std::runtime_error (buf);
1286
+ }
1287
+ }
1288
+ }
1289
+ #endif
1290
+
1291
+ #ifdef HAVE_KQUEUE
1292
+ if (bKqueue) {
1293
+ struct kevent k;
1294
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_DELETE, 0, 0, ed);
1295
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1296
+ assert (t == 0);
1297
+ }
1298
+ #endif
1299
+
1300
+ { // remove descriptor from lists
1301
+ int i, j;
1302
+ int nSockets = Descriptors.size();
1303
+ for (i=0, j=0; i < nSockets; i++) {
1304
+ EventableDescriptor *ted = Descriptors[i];
1305
+ assert (ted);
1306
+ if (ted != ed)
1307
+ Descriptors [j++] = ted;
1308
+ }
1309
+ while ((size_t)j < Descriptors.size())
1310
+ Descriptors.pop_back();
1311
+
1312
+ ModifiedDescriptors.erase (ed);
1313
+ }
1314
+
1315
+ int fd = ed->GetSocket();
1316
+
1317
+ // We depend on ~EventableDescriptor not calling close() if the socket is invalid
1318
+ ed->SetSocketInvalid();
1319
+ delete ed;
1320
+
1321
+ return fd;
1322
+ }
1323
+
1324
+ /************
1325
+ name2address
1326
+ ************/
1327
+
1328
+ struct sockaddr *name2address (const char *server, int port, int *family, int *bind_size)
1329
+ {
1330
+ // THIS IS NOT RE-ENTRANT OR THREADSAFE. Optimize for speed.
1331
+ // Check the more-common cases first.
1332
+ // Return NULL if no resolution.
1333
+
1334
+ static struct sockaddr_in in4;
1335
+ #ifndef __CYGWIN__
1336
+ static struct sockaddr_in6 in6;
1337
+ #endif
1338
+ struct hostent *hp;
1339
+
1340
+ if (!server || !*server)
1341
+ server = "0.0.0.0";
1342
+
1343
+ memset (&in4, 0, sizeof(in4));
1344
+ if ( (in4.sin_addr.s_addr = inet_addr (server)) != INADDR_NONE) {
1345
+ if (family)
1346
+ *family = AF_INET;
1347
+ if (bind_size)
1348
+ *bind_size = sizeof(in4);
1349
+ in4.sin_family = AF_INET;
1350
+ in4.sin_port = htons (port);
1351
+ return (struct sockaddr*)&in4;
1352
+ }
1353
+
1354
+ #if defined(OS_UNIX) && !defined(__CYGWIN__)
1355
+ memset (&in6, 0, sizeof(in6));
1356
+ if (inet_pton (AF_INET6, server, in6.sin6_addr.s6_addr) > 0) {
1357
+ if (family)
1358
+ *family = AF_INET6;
1359
+ if (bind_size)
1360
+ *bind_size = sizeof(in6);
1361
+ in6.sin6_family = AF_INET6;
1362
+ in6.sin6_port = htons (port);
1363
+ return (struct sockaddr*)&in6;
1364
+ }
1365
+ #endif
1366
+
1367
+ #ifdef OS_WIN32
1368
+ // TODO, must complete this branch. Windows doesn't have inet_pton.
1369
+ // A possible approach is to make a getaddrinfo call with the supplied
1370
+ // server address, constraining the hints to ipv6 and seeing if we
1371
+ // get any addresses.
1372
+ // For the time being, Ipv6 addresses aren't supported on Windows.
1373
+ #endif
1374
+
1375
+ hp = gethostbyname ((char*)server); // Windows requires the cast.
1376
+ if (hp) {
1377
+ in4.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
1378
+ if (family)
1379
+ *family = AF_INET;
1380
+ if (bind_size)
1381
+ *bind_size = sizeof(in4);
1382
+ in4.sin_family = AF_INET;
1383
+ in4.sin_port = htons (port);
1384
+ return (struct sockaddr*)&in4;
1385
+ }
1386
+
1387
+ return NULL;
1388
+ }
1389
+
1390
+
1391
+ /*******************************
1392
+ EventMachine_t::CreateTcpServer
1393
+ *******************************/
1394
+
1395
+ const char *EventMachine_t::CreateTcpServer (const char *server, int port)
1396
+ {
1397
+ /* Create a TCP-acceptor (server) socket and add it to the event machine.
1398
+ * Return the binding of the new acceptor to the caller.
1399
+ * This binding will be referenced when the new acceptor sends events
1400
+ * to indicate accepted connections.
1401
+ */
1402
+
1403
+
1404
+ int family, bind_size;
1405
+ struct sockaddr *bind_here = name2address (server, port, &family, &bind_size);
1406
+ if (!bind_here)
1407
+ return NULL;
1408
+
1409
+ const char *output_binding = NULL;
1410
+
1411
+ //struct sockaddr_in sin;
1412
+
1413
+ int sd_accept = socket (family, SOCK_STREAM, 0);
1414
+ if (sd_accept == INVALID_SOCKET) {
1415
+ goto fail;
1416
+ }
1417
+
1418
+ /*
1419
+ memset (&sin, 0, sizeof(sin));
1420
+ sin.sin_family = AF_INET;
1421
+ sin.sin_addr.s_addr = INADDR_ANY;
1422
+ sin.sin_port = htons (port);
1423
+
1424
+ if (server && *server) {
1425
+ sin.sin_addr.s_addr = inet_addr (server);
1426
+ if (sin.sin_addr.s_addr == INADDR_NONE) {
1427
+ hostent *hp = gethostbyname ((char*)server); // Windows requires the cast.
1428
+ if (hp == NULL) {
1429
+ //__warning ("hostname not resolved: ", server);
1430
+ goto fail;
1431
+ }
1432
+ sin.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
1433
+ }
1434
+ }
1435
+ */
1436
+
1437
+ { // set reuseaddr to improve performance on restarts.
1438
+ int oval = 1;
1439
+ if (setsockopt (sd_accept, SOL_SOCKET, SO_REUSEADDR, (char*)&oval, sizeof(oval)) < 0) {
1440
+ //__warning ("setsockopt failed while creating listener","");
1441
+ goto fail;
1442
+ }
1443
+ }
1444
+
1445
+ { // set CLOEXEC. Only makes sense on Unix
1446
+ #ifdef OS_UNIX
1447
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1448
+ assert (cloexec >= 0);
1449
+ cloexec |= FD_CLOEXEC;
1450
+ fcntl (sd_accept, F_SETFD, cloexec);
1451
+ #endif
1452
+ }
1453
+
1454
+
1455
+ //if (bind (sd_accept, (struct sockaddr*)&sin, sizeof(sin))) {
1456
+ if (bind (sd_accept, bind_here, bind_size)) {
1457
+ //__warning ("binding failed");
1458
+ goto fail;
1459
+ }
1460
+
1461
+ if (listen (sd_accept, 100)) {
1462
+ //__warning ("listen failed");
1463
+ goto fail;
1464
+ }
1465
+
1466
+ {
1467
+ // Set the acceptor non-blocking.
1468
+ // THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
1469
+ if (!SetSocketNonblocking (sd_accept)) {
1470
+ //int val = fcntl (sd_accept, F_GETFL, 0);
1471
+ //if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
1472
+ goto fail;
1473
+ }
1474
+ }
1475
+
1476
+ { // Looking good.
1477
+ AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
1478
+ if (!ad)
1479
+ throw std::runtime_error ("unable to allocate acceptor");
1480
+ Add (ad);
1481
+ output_binding = ad->GetBinding().c_str();
1482
+ }
1483
+
1484
+ return output_binding;
1485
+
1486
+ fail:
1487
+ if (sd_accept != INVALID_SOCKET)
1488
+ closesocket (sd_accept);
1489
+ return NULL;
1490
+ }
1491
+
1492
+
1493
+ /**********************************
1494
+ EventMachine_t::OpenDatagramSocket
1495
+ **********************************/
1496
+
1497
+ const char *EventMachine_t::OpenDatagramSocket (const char *address, int port)
1498
+ {
1499
+ const char *output_binding = NULL;
1500
+
1501
+ int sd = socket (AF_INET, SOCK_DGRAM, 0);
1502
+ if (sd == INVALID_SOCKET)
1503
+ goto fail;
1504
+ // from here on, early returns must close the socket!
1505
+
1506
+
1507
+ struct sockaddr_in sin;
1508
+ memset (&sin, 0, sizeof(sin));
1509
+ sin.sin_family = AF_INET;
1510
+ sin.sin_port = htons (port);
1511
+
1512
+
1513
+ if (address && *address) {
1514
+ sin.sin_addr.s_addr = inet_addr (address);
1515
+ if (sin.sin_addr.s_addr == INADDR_NONE) {
1516
+ hostent *hp = gethostbyname ((char*)address); // Windows requires the cast.
1517
+ if (hp == NULL)
1518
+ goto fail;
1519
+ sin.sin_addr.s_addr = ((in_addr*)(hp->h_addr))->s_addr;
1520
+ }
1521
+ }
1522
+ else
1523
+ sin.sin_addr.s_addr = htonl (INADDR_ANY);
1524
+
1525
+
1526
+ // Set the new socket nonblocking.
1527
+ {
1528
+ if (!SetSocketNonblocking (sd))
1529
+ //int val = fcntl (sd, F_GETFL, 0);
1530
+ //if (fcntl (sd, F_SETFL, val | O_NONBLOCK) == -1)
1531
+ goto fail;
1532
+ }
1533
+
1534
+ if (bind (sd, (struct sockaddr*)&sin, sizeof(sin)) != 0)
1535
+ goto fail;
1536
+
1537
+ { // Looking good.
1538
+ DatagramDescriptor *ds = new DatagramDescriptor (sd, this);
1539
+ if (!ds)
1540
+ throw std::runtime_error ("unable to allocate datagram-socket");
1541
+ Add (ds);
1542
+ output_binding = ds->GetBinding().c_str();
1543
+ }
1544
+
1545
+ return output_binding;
1546
+
1547
+ fail:
1548
+ if (sd != INVALID_SOCKET)
1549
+ closesocket (sd);
1550
+ return NULL;
1551
+ }
1552
+
1553
+
1554
+
1555
+ /*******************
1556
+ EventMachine_t::Add
1557
+ *******************/
1558
+
1559
+ void EventMachine_t::Add (EventableDescriptor *ed)
1560
+ {
1561
+ if (!ed)
1562
+ throw std::runtime_error ("added bad descriptor");
1563
+ ed->SetEventCallback (EventCallback);
1564
+ NewDescriptors.push_back (ed);
1565
+ }
1566
+
1567
+
1568
+ /*******************************
1569
+ EventMachine_t::ArmKqueueWriter
1570
+ *******************************/
1571
+
1572
+ void EventMachine_t::ArmKqueueWriter (EventableDescriptor *ed)
1573
+ {
1574
+ #ifdef HAVE_KQUEUE
1575
+ if (bKqueue) {
1576
+ if (!ed)
1577
+ throw std::runtime_error ("added bad descriptor");
1578
+ struct kevent k;
1579
+ EV_SET (&k, ed->GetSocket(), EVFILT_WRITE, EV_ADD | EV_ONESHOT, 0, 0, ed);
1580
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1581
+ assert (t == 0);
1582
+ }
1583
+ #endif
1584
+ }
1585
+
1586
+ /*******************************
1587
+ EventMachine_t::ArmKqueueReader
1588
+ *******************************/
1589
+
1590
+ void EventMachine_t::ArmKqueueReader (EventableDescriptor *ed)
1591
+ {
1592
+ #ifdef HAVE_KQUEUE
1593
+ if (bKqueue) {
1594
+ if (!ed)
1595
+ throw std::runtime_error ("added bad descriptor");
1596
+ struct kevent k;
1597
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1598
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1599
+ assert (t == 0);
1600
+ }
1601
+ #endif
1602
+ }
1603
+
1604
+ /**********************************
1605
+ EventMachine_t::_AddNewDescriptors
1606
+ **********************************/
1607
+
1608
+ void EventMachine_t::_AddNewDescriptors()
1609
+ {
1610
+ /* Avoid adding descriptors to the main descriptor list
1611
+ * while we're actually traversing the list.
1612
+ * Any descriptors that are added as a result of processing timers
1613
+ * or acceptors should go on a temporary queue and then added
1614
+ * while we're not traversing the main list.
1615
+ * Also, it (rarely) happens that a newly-created descriptor
1616
+ * is immediately scheduled to close. It might be a good
1617
+ * idea not to bother scheduling these for I/O but if
1618
+ * we do that, we might bypass some important processing.
1619
+ */
1620
+
1621
+ for (size_t i = 0; i < NewDescriptors.size(); i++) {
1622
+ EventableDescriptor *ed = NewDescriptors[i];
1623
+ if (ed == NULL)
1624
+ throw std::runtime_error ("adding bad descriptor");
1625
+
1626
+ #if HAVE_EPOLL
1627
+ if (bEpoll) {
1628
+ assert (epfd != -1);
1629
+ int e = epoll_ctl (epfd, EPOLL_CTL_ADD, ed->GetSocket(), ed->GetEpollEvent());
1630
+ if (e) {
1631
+ char buf [200];
1632
+ snprintf (buf, sizeof(buf)-1, "unable to add new descriptor: %s", strerror(errno));
1633
+ throw std::runtime_error (buf);
1634
+ }
1635
+ }
1636
+ #endif
1637
+
1638
+ #if HAVE_KQUEUE
1639
+ /*
1640
+ if (bKqueue) {
1641
+ // INCOMPLETE. Some descriptors don't want to be readable.
1642
+ assert (kqfd != -1);
1643
+ struct kevent k;
1644
+ EV_SET (&k, ed->GetSocket(), EVFILT_READ, EV_ADD, 0, 0, ed);
1645
+ int t = kevent (kqfd, &k, 1, NULL, 0, NULL);
1646
+ assert (t == 0);
1647
+ }
1648
+ */
1649
+ #endif
1650
+
1651
+ Descriptors.push_back (ed);
1652
+ }
1653
+ NewDescriptors.clear();
1654
+ }
1655
+
1656
+
1657
+ /**********************************
1658
+ EventMachine_t::_ModifyDescriptors
1659
+ **********************************/
1660
+
1661
+ void EventMachine_t::_ModifyDescriptors()
1662
+ {
1663
+ /* For implementations which don't level check every descriptor on
1664
+ * every pass through the machine, as select does.
1665
+ * If we're not selecting, then descriptors need a way to signal to the
1666
+ * machine that their readable or writable status has changed.
1667
+ * That's what the ::Modify call is for. We do it this way to avoid
1668
+ * modifying descriptors during the loop traversal, where it can easily
1669
+ * happen that an object (like a UDP socket) gets data written on it by
1670
+ * the application during #post_init. That would take place BEFORE the
1671
+ * descriptor even gets added to the epoll descriptor, so the modify
1672
+ * operation will crash messily.
1673
+ * Another really messy possibility is for a descriptor to put itself
1674
+ * on the Modified list, and then get deleted before we get here.
1675
+ * Remember, deletes happen after the I/O traversal and before the
1676
+ * next pass through here. So we have to make sure when we delete a
1677
+ * descriptor to remove it from the Modified list.
1678
+ */
1679
+
1680
+ #ifdef HAVE_EPOLL
1681
+ if (bEpoll) {
1682
+ set<EventableDescriptor*>::iterator i = ModifiedDescriptors.begin();
1683
+ while (i != ModifiedDescriptors.end()) {
1684
+ assert (*i);
1685
+ _ModifyEpollEvent (*i);
1686
+ ++i;
1687
+ }
1688
+ }
1689
+ #endif
1690
+
1691
+ ModifiedDescriptors.clear();
1692
+ }
1693
+
1694
+
1695
+ /**********************
1696
+ EventMachine_t::Modify
1697
+ **********************/
1698
+
1699
+ void EventMachine_t::Modify (EventableDescriptor *ed)
1700
+ {
1701
+ if (!ed)
1702
+ throw std::runtime_error ("modified bad descriptor");
1703
+ ModifiedDescriptors.insert (ed);
1704
+ }
1705
+
1706
+
1707
+ /***********************************
1708
+ EventMachine_t::_OpenFileForWriting
1709
+ ***********************************/
1710
+
1711
+ const char *EventMachine_t::_OpenFileForWriting (const char *filename)
1712
+ {
1713
+ /*
1714
+ * Return the binding-text of the newly-opened file,
1715
+ * or NULL if there was a problem.
1716
+ */
1717
+
1718
+ if (!filename || !*filename)
1719
+ return NULL;
1720
+
1721
+ int fd = open (filename, O_CREAT|O_TRUNC|O_WRONLY|O_NONBLOCK, 0644);
1722
+
1723
+ FileStreamDescriptor *fsd = new FileStreamDescriptor (fd, this);
1724
+ if (!fsd)
1725
+ throw std::runtime_error ("no file-stream allocated");
1726
+ Add (fsd);
1727
+ return fsd->GetBinding().c_str();
1728
+
1729
+ }
1730
+
1731
+
1732
+ /**************************************
1733
+ EventMachine_t::CreateUnixDomainServer
1734
+ **************************************/
1735
+
1736
+ const char *EventMachine_t::CreateUnixDomainServer (const char *filename)
1737
+ {
1738
+ /* Create a UNIX-domain acceptor (server) socket and add it to the event machine.
1739
+ * Return the binding of the new acceptor to the caller.
1740
+ * This binding will be referenced when the new acceptor sends events
1741
+ * to indicate accepted connections.
1742
+ * THERE IS NO MEANINGFUL IMPLEMENTATION ON WINDOWS.
1743
+ */
1744
+
1745
+ #ifdef OS_WIN32
1746
+ throw std::runtime_error ("unix-domain server unavailable on this platform");
1747
+ #endif
1748
+
1749
+ // The whole rest of this function is only compiled on Unix systems.
1750
+ #ifdef OS_UNIX
1751
+ const char *output_binding = NULL;
1752
+
1753
+ struct sockaddr_un s_sun;
1754
+
1755
+ int sd_accept = socket (AF_LOCAL, SOCK_STREAM, 0);
1756
+ if (sd_accept == INVALID_SOCKET) {
1757
+ goto fail;
1758
+ }
1759
+
1760
+ if (!filename || !*filename)
1761
+ goto fail;
1762
+ unlink (filename);
1763
+
1764
+ bzero (&s_sun, sizeof(s_sun));
1765
+ s_sun.sun_family = AF_LOCAL;
1766
+ strncpy (s_sun.sun_path, filename, sizeof(s_sun.sun_path)-1);
1767
+
1768
+ // don't bother with reuseaddr for a local socket.
1769
+
1770
+ { // set CLOEXEC. Only makes sense on Unix
1771
+ #ifdef OS_UNIX
1772
+ int cloexec = fcntl (sd_accept, F_GETFD, 0);
1773
+ assert (cloexec >= 0);
1774
+ cloexec |= FD_CLOEXEC;
1775
+ fcntl (sd_accept, F_SETFD, cloexec);
1776
+ #endif
1777
+ }
1778
+
1779
+ if (bind (sd_accept, (struct sockaddr*)&s_sun, sizeof(s_sun))) {
1780
+ //__warning ("binding failed");
1781
+ goto fail;
1782
+ }
1783
+
1784
+ if (listen (sd_accept, 100)) {
1785
+ //__warning ("listen failed");
1786
+ goto fail;
1787
+ }
1788
+
1789
+ {
1790
+ // Set the acceptor non-blocking.
1791
+ // THIS IS CRUCIALLY IMPORTANT because we read it in a select loop.
1792
+ if (!SetSocketNonblocking (sd_accept)) {
1793
+ //int val = fcntl (sd_accept, F_GETFL, 0);
1794
+ //if (fcntl (sd_accept, F_SETFL, val | O_NONBLOCK) == -1) {
1795
+ goto fail;
1796
+ }
1797
+ }
1798
+
1799
+ { // Looking good.
1800
+ AcceptorDescriptor *ad = new AcceptorDescriptor (sd_accept, this);
1801
+ if (!ad)
1802
+ throw std::runtime_error ("unable to allocate acceptor");
1803
+ Add (ad);
1804
+ output_binding = ad->GetBinding().c_str();
1805
+ }
1806
+
1807
+ return output_binding;
1808
+
1809
+ fail:
1810
+ if (sd_accept != INVALID_SOCKET)
1811
+ closesocket (sd_accept);
1812
+ return NULL;
1813
+ #endif // OS_UNIX
1814
+ }
1815
+
1816
+
1817
+ /*********************
1818
+ EventMachine_t::Popen
1819
+ *********************/
1820
+ #if OBSOLETE
1821
+ const char *EventMachine_t::Popen (const char *cmd, const char *mode)
1822
+ {
1823
+ #ifdef OS_WIN32
1824
+ throw std::runtime_error ("popen is currently unavailable on this platform");
1825
+ #endif
1826
+
1827
+ // The whole rest of this function is only compiled on Unix systems.
1828
+ // Eventually we need this functionality (or a full-duplex equivalent) on Windows.
1829
+ #ifdef OS_UNIX
1830
+ const char *output_binding = NULL;
1831
+
1832
+ FILE *fp = popen (cmd, mode);
1833
+ if (!fp)
1834
+ return NULL;
1835
+
1836
+ // From here, all early returns must pclose the stream.
1837
+
1838
+ // According to the pipe(2) manpage, descriptors returned from pipe have both
1839
+ // CLOEXEC and NONBLOCK clear. Do NOT set CLOEXEC. DO set nonblocking.
1840
+ if (!SetSocketNonblocking (fileno (fp))) {
1841
+ pclose (fp);
1842
+ return NULL;
1843
+ }
1844
+
1845
+ { // Looking good.
1846
+ PipeDescriptor *pd = new PipeDescriptor (fp, this);
1847
+ if (!pd)
1848
+ throw std::runtime_error ("unable to allocate pipe");
1849
+ Add (pd);
1850
+ output_binding = pd->GetBinding().c_str();
1851
+ }
1852
+
1853
+ return output_binding;
1854
+ #endif
1855
+ }
1856
+ #endif // OBSOLETE
1857
+
1858
+ /**************************
1859
+ EventMachine_t::Socketpair
1860
+ **************************/
1861
+
1862
+ const char *EventMachine_t::Socketpair (char * const*cmd_strings)
1863
+ {
1864
+ #ifdef OS_WIN32
1865
+ throw std::runtime_error ("socketpair is currently unavailable on this platform");
1866
+ #endif
1867
+
1868
+ // The whole rest of this function is only compiled on Unix systems.
1869
+ // Eventually we need this functionality (or a full-duplex equivalent) on Windows.
1870
+ #ifdef OS_UNIX
1871
+ // Make sure the incoming array of command strings is sane.
1872
+ if (!cmd_strings)
1873
+ return NULL;
1874
+ int j;
1875
+ for (j=0; j < 100 && cmd_strings[j]; j++)
1876
+ ;
1877
+ if ((j==0) || (j==100))
1878
+ return NULL;
1879
+
1880
+ const char *output_binding = NULL;
1881
+
1882
+ int sv[2];
1883
+ if (socketpair (AF_LOCAL, SOCK_STREAM, 0, sv) < 0)
1884
+ return NULL;
1885
+ // from here, all early returns must close the pair of sockets.
1886
+
1887
+ // Set the parent side of the socketpair nonblocking.
1888
+ // We don't care about the child side, and most child processes will expect their
1889
+ // stdout to be blocking. Thanks to Duane Johnson and Bill Kelly for pointing this out.
1890
+ // Obviously DON'T set CLOEXEC.
1891
+ if (!SetSocketNonblocking (sv[0])) {
1892
+ close (sv[0]);
1893
+ close (sv[1]);
1894
+ return NULL;
1895
+ }
1896
+
1897
+ pid_t f = fork();
1898
+ if (f > 0) {
1899
+ close (sv[1]);
1900
+ PipeDescriptor *pd = new PipeDescriptor (sv[0], f, this);
1901
+ if (!pd)
1902
+ throw std::runtime_error ("unable to allocate pipe");
1903
+ Add (pd);
1904
+ output_binding = pd->GetBinding().c_str();
1905
+ }
1906
+ else if (f == 0) {
1907
+ close (sv[0]);
1908
+ dup2 (sv[1], STDIN_FILENO);
1909
+ close (sv[1]);
1910
+ dup2 (STDIN_FILENO, STDOUT_FILENO);
1911
+ execvp (cmd_strings[0], cmd_strings+1);
1912
+ exit (-1); // end the child process if the exec doesn't work.
1913
+ }
1914
+ else
1915
+ throw std::runtime_error ("no fork");
1916
+
1917
+ return output_binding;
1918
+ #endif
1919
+ }
1920
+
1921
+
1922
+ /****************************
1923
+ EventMachine_t::OpenKeyboard
1924
+ ****************************/
1925
+
1926
+ const char *EventMachine_t::OpenKeyboard()
1927
+ {
1928
+ KeyboardDescriptor *kd = new KeyboardDescriptor (this);
1929
+ if (!kd)
1930
+ throw std::runtime_error ("no keyboard-object allocated");
1931
+ Add (kd);
1932
+ return kd->GetBinding().c_str();
1933
+ }
1934
+
1935
+
1936
+ /**********************************
1937
+ EventMachine_t::GetConnectionCount
1938
+ **********************************/
1939
+
1940
+ int EventMachine_t::GetConnectionCount ()
1941
+ {
1942
+ return Descriptors.size();
1943
+ }
1944
+
1945
+
1946
+ //#endif // OS_UNIX
1947
+