eventmachine 0.12.10 → 1.0.0.beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +2 -0
- data/Gemfile +1 -0
- data/README +1 -2
- data/Rakefile +4 -76
- data/docs/DEFERRABLES +183 -70
- data/docs/KEYBOARD +15 -11
- data/docs/LIGHTWEIGHT_CONCURRENCY +84 -24
- data/docs/SMTP +3 -1
- data/docs/SPAWNED_PROCESSES +84 -25
- data/eventmachine.gemspec +19 -26
- data/examples/ex_tick_loop_array.rb +15 -0
- data/examples/ex_tick_loop_counter.rb +32 -0
- data/ext/binder.cpp +0 -1
- data/ext/cmain.cpp +36 -11
- data/ext/cplusplus.cpp +1 -1
- data/ext/ed.cpp +104 -113
- data/ext/ed.h +24 -30
- data/ext/em.cpp +347 -248
- data/ext/em.h +23 -16
- data/ext/eventmachine.h +5 -3
- data/ext/extconf.rb +5 -3
- data/ext/fastfilereader/extconf.rb +5 -3
- data/ext/fastfilereader/mapper.cpp +1 -1
- data/ext/kb.cpp +1 -3
- data/ext/pipe.cpp +9 -11
- data/ext/project.h +12 -4
- data/ext/rubymain.cpp +138 -89
- data/java/src/com/rubyeventmachine/EmReactor.java +1 -0
- data/lib/em/channel.rb +1 -1
- data/lib/em/connection.rb +6 -1
- data/lib/em/deferrable.rb +16 -2
- data/lib/em/iterator.rb +270 -0
- data/lib/em/protocols.rb +1 -1
- data/lib/em/protocols/httpclient.rb +5 -0
- data/lib/em/protocols/line_protocol.rb +28 -0
- data/lib/em/protocols/smtpserver.rb +101 -8
- data/lib/em/protocols/stomp.rb +1 -1
- data/lib/{pr_eventmachine.rb → em/pure_ruby.rb} +1 -11
- data/lib/em/queue.rb +1 -0
- data/lib/em/streamer.rb +1 -1
- data/lib/em/tick_loop.rb +85 -0
- data/lib/em/timers.rb +2 -1
- data/lib/em/version.rb +1 -1
- data/lib/eventmachine.rb +38 -84
- data/lib/jeventmachine.rb +1 -0
- data/tests/test_attach.rb +13 -3
- data/tests/test_basic.rb +60 -95
- data/tests/test_channel.rb +3 -2
- data/tests/test_defer.rb +14 -12
- data/tests/test_deferrable.rb +35 -0
- data/tests/test_file_watch.rb +1 -1
- data/tests/test_futures.rb +1 -1
- data/tests/test_hc.rb +40 -68
- data/tests/test_httpclient.rb +15 -6
- data/tests/test_httpclient2.rb +3 -2
- data/tests/test_inactivity_timeout.rb +3 -3
- data/tests/test_ltp.rb +13 -5
- data/tests/test_next_tick.rb +1 -1
- data/tests/test_pending_connect_timeout.rb +2 -2
- data/tests/test_process_watch.rb +36 -34
- data/tests/test_proxy_connection.rb +52 -0
- data/tests/test_pure.rb +10 -1
- data/tests/test_sasl.rb +1 -1
- data/tests/test_send_file.rb +16 -7
- data/tests/test_servers.rb +1 -1
- data/tests/test_tick_loop.rb +59 -0
- data/tests/test_timers.rb +13 -15
- metadata +45 -17
- data/web/whatis +0 -7
data/ext/ed.h
CHANGED
@@ -62,7 +62,7 @@ class EventableDescriptor: public Bindable_t
|
|
62
62
|
bool IsCloseScheduled();
|
63
63
|
virtual void HandleError(){ ScheduleClose (false); }
|
64
64
|
|
65
|
-
void SetEventCallback (
|
65
|
+
void SetEventCallback (EMCallback);
|
66
66
|
|
67
67
|
virtual bool GetPeername (struct sockaddr*) {return false;}
|
68
68
|
virtual bool GetSockname (struct sockaddr*) {return false;}
|
@@ -75,16 +75,16 @@ class EventableDescriptor: public Bindable_t
|
|
75
75
|
virtual X509 *GetPeerCert() {return NULL;}
|
76
76
|
#endif
|
77
77
|
|
78
|
-
virtual
|
79
|
-
virtual int SetCommInactivityTimeout (
|
80
|
-
|
81
|
-
int SetPendingConnectTimeout (
|
78
|
+
virtual uint64_t GetCommInactivityTimeout() {return 0;}
|
79
|
+
virtual int SetCommInactivityTimeout (uint64_t value) {return 0;}
|
80
|
+
uint64_t GetPendingConnectTimeout();
|
81
|
+
int SetPendingConnectTimeout (uint64_t value);
|
82
82
|
|
83
83
|
#ifdef HAVE_EPOLL
|
84
84
|
struct epoll_event *GetEpollEvent() { return &EpollEvent; }
|
85
85
|
#endif
|
86
86
|
|
87
|
-
virtual void StartProxy(const unsigned long, const unsigned long);
|
87
|
+
virtual void StartProxy(const unsigned long, const unsigned long, const unsigned long);
|
88
88
|
virtual void StopProxy();
|
89
89
|
virtual void SetProxiedFrom(EventableDescriptor*, const unsigned long);
|
90
90
|
virtual int SendOutboundData(const char*,int){ return -1; }
|
@@ -92,6 +92,10 @@ class EventableDescriptor: public Bindable_t
|
|
92
92
|
virtual bool Pause(){ return false; }
|
93
93
|
virtual bool Resume(){ return false; }
|
94
94
|
|
95
|
+
virtual int ReportErrorStatus(){ return 0; }
|
96
|
+
virtual bool IsConnectPending(){ return false; }
|
97
|
+
virtual uint64_t GetNextHeartbeat();
|
98
|
+
|
95
99
|
private:
|
96
100
|
bool bCloseNow;
|
97
101
|
bool bCloseAfterWriting;
|
@@ -99,12 +103,14 @@ class EventableDescriptor: public Bindable_t
|
|
99
103
|
protected:
|
100
104
|
int MySocket;
|
101
105
|
|
102
|
-
|
106
|
+
EMCallback EventCallback;
|
103
107
|
void _GenericInboundDispatch(const char*, int);
|
104
108
|
|
105
|
-
|
109
|
+
uint64_t CreatedAt;
|
106
110
|
bool bCallbackUnbind;
|
107
111
|
int UnbindReasonCode;
|
112
|
+
|
113
|
+
unsigned long BytesToProxy;
|
108
114
|
EventableDescriptor *ProxyTarget;
|
109
115
|
EventableDescriptor *ProxiedFrom;
|
110
116
|
|
@@ -115,7 +121,10 @@ class EventableDescriptor: public Bindable_t
|
|
115
121
|
#endif
|
116
122
|
|
117
123
|
EventMachine_t *MyEventMachine;
|
118
|
-
|
124
|
+
uint64_t PendingConnectTimeout;
|
125
|
+
uint64_t InactivityTimeout;
|
126
|
+
uint64_t LastActivity;
|
127
|
+
uint64_t NextHeartbeat;
|
119
128
|
};
|
120
129
|
|
121
130
|
|
@@ -149,10 +158,6 @@ class ConnectionDescriptor: public EventableDescriptor
|
|
149
158
|
ConnectionDescriptor (int, EventMachine_t*);
|
150
159
|
virtual ~ConnectionDescriptor();
|
151
160
|
|
152
|
-
static int SendDataToConnection (const unsigned long, const char*, int);
|
153
|
-
static void CloseConnection (const unsigned long, bool);
|
154
|
-
static int ReportErrorStatus (const unsigned long);
|
155
|
-
|
156
161
|
int SendOutboundData (const char*, int);
|
157
162
|
|
158
163
|
void SetConnectPending (bool f);
|
@@ -195,9 +200,11 @@ class ConnectionDescriptor: public EventableDescriptor
|
|
195
200
|
virtual bool GetPeername (struct sockaddr*);
|
196
201
|
virtual bool GetSockname (struct sockaddr*);
|
197
202
|
|
198
|
-
virtual
|
199
|
-
virtual int SetCommInactivityTimeout (
|
203
|
+
virtual uint64_t GetCommInactivityTimeout();
|
204
|
+
virtual int SetCommInactivityTimeout (uint64_t value);
|
200
205
|
|
206
|
+
virtual int ReportErrorStatus();
|
207
|
+
virtual bool IsConnectPending(){ return bConnectPending; }
|
201
208
|
|
202
209
|
protected:
|
203
210
|
struct OutboundPage {
|
@@ -236,8 +243,6 @@ class ConnectionDescriptor: public EventableDescriptor
|
|
236
243
|
#endif
|
237
244
|
|
238
245
|
bool bIsServer;
|
239
|
-
Int64 LastIo;
|
240
|
-
int InactivityTimeout;
|
241
246
|
|
242
247
|
private:
|
243
248
|
void _UpdateEvents();
|
@@ -246,7 +251,6 @@ class ConnectionDescriptor: public EventableDescriptor
|
|
246
251
|
void _DispatchInboundData (const char *buffer, int size);
|
247
252
|
void _DispatchCiphertext();
|
248
253
|
int _SendRawOutboundData (const char*, int);
|
249
|
-
int _ReportErrorStatus();
|
250
254
|
void _CheckHandshakeStatus();
|
251
255
|
|
252
256
|
};
|
@@ -278,11 +282,8 @@ class DatagramDescriptor: public EventableDescriptor
|
|
278
282
|
virtual bool GetPeername (struct sockaddr*);
|
279
283
|
virtual bool GetSockname (struct sockaddr*);
|
280
284
|
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
static int SendDatagram (const unsigned long, const char*, int, const char*, int);
|
285
|
-
|
285
|
+
virtual uint64_t GetCommInactivityTimeout();
|
286
|
+
virtual int SetCommInactivityTimeout (uint64_t value);
|
286
287
|
|
287
288
|
protected:
|
288
289
|
struct OutboundPage {
|
@@ -298,9 +299,6 @@ class DatagramDescriptor: public EventableDescriptor
|
|
298
299
|
int OutboundDataSize;
|
299
300
|
|
300
301
|
struct sockaddr_in ReturnAddress;
|
301
|
-
|
302
|
-
Int64 LastIo;
|
303
|
-
int InactivityTimeout;
|
304
302
|
};
|
305
303
|
|
306
304
|
|
@@ -360,8 +358,6 @@ class PipeDescriptor: public EventableDescriptor
|
|
360
358
|
|
361
359
|
protected:
|
362
360
|
bool bReadAttemptedAfterClose;
|
363
|
-
Int64 LastIo;
|
364
|
-
int InactivityTimeout;
|
365
361
|
|
366
362
|
deque<OutboundPage> OutboundPages;
|
367
363
|
int OutboundDataSize;
|
@@ -393,8 +389,6 @@ class KeyboardDescriptor: public EventableDescriptor
|
|
393
389
|
|
394
390
|
protected:
|
395
391
|
bool bReadAttemptedAfterClose;
|
396
|
-
Int64 LastIo;
|
397
|
-
int InactivityTimeout;
|
398
392
|
|
399
393
|
private:
|
400
394
|
void _DispatchInboundData (const char *buffer, int size);
|
data/ext/em.cpp
CHANGED
@@ -20,24 +20,12 @@ See the file COPYING for complete licensing information.
|
|
20
20
|
// THIS ENTIRE FILE WILL EVENTUALLY BE FOR UNIX BUILDS ONLY.
|
21
21
|
//#ifdef OS_UNIX
|
22
22
|
|
23
|
-
|
24
23
|
#include "project.h"
|
25
24
|
|
26
|
-
// Keep a global variable floating around
|
27
|
-
// with the current loop time as set by the Event Machine.
|
28
|
-
// This avoids the need for frequent expensive calls to time(NULL);
|
29
|
-
Int64 gCurrentLoopTime;
|
30
|
-
|
31
|
-
#ifdef OS_WIN32
|
32
|
-
unsigned gTickCountTickover;
|
33
|
-
unsigned gLastTickCount;
|
34
|
-
#endif
|
35
|
-
|
36
|
-
|
37
25
|
/* The numer of max outstanding timers was once a const enum defined in em.h.
|
38
26
|
* Now we define it here so that users can change its value if necessary.
|
39
27
|
*/
|
40
|
-
static unsigned int MaxOutstandingTimers =
|
28
|
+
static unsigned int MaxOutstandingTimers = 100000;
|
41
29
|
|
42
30
|
|
43
31
|
/* Internal helper to convert strings to internet addresses. IPv6-aware.
|
@@ -79,7 +67,7 @@ void EventMachine_t::SetMaxTimerCount (int count)
|
|
79
67
|
EventMachine_t::EventMachine_t
|
80
68
|
******************************/
|
81
69
|
|
82
|
-
EventMachine_t::EventMachine_t (
|
70
|
+
EventMachine_t::EventMachine_t (EMCallback event_callback):
|
83
71
|
HeartbeatInterval(2000000),
|
84
72
|
EventCallback (event_callback),
|
85
73
|
NextHeartbeatTime (0),
|
@@ -316,7 +304,7 @@ void EventMachine_t::_InitializeLoopBreaker()
|
|
316
304
|
#ifdef OS_UNIX
|
317
305
|
int fd[2];
|
318
306
|
if (pipe (fd))
|
319
|
-
throw std::runtime_error (
|
307
|
+
throw std::runtime_error (strerror(errno));
|
320
308
|
|
321
309
|
LoopBreakerWriter = fd[1];
|
322
310
|
LoopBreakerReader = fd[0];
|
@@ -353,21 +341,76 @@ EventMachine_t::_UpdateTime
|
|
353
341
|
|
354
342
|
void EventMachine_t::_UpdateTime()
|
355
343
|
{
|
344
|
+
MyCurrentLoopTime = GetRealTime();
|
345
|
+
}
|
346
|
+
|
347
|
+
/***************************
|
348
|
+
EventMachine_t::GetRealTime
|
349
|
+
***************************/
|
350
|
+
|
351
|
+
uint64_t EventMachine_t::GetRealTime()
|
352
|
+
{
|
353
|
+
uint64_t current_time;
|
356
354
|
#if defined(OS_UNIX)
|
357
355
|
struct timeval tv;
|
358
356
|
gettimeofday (&tv, NULL);
|
359
|
-
|
357
|
+
current_time = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
|
360
358
|
|
361
359
|
#elif defined(OS_WIN32)
|
362
360
|
unsigned tick = GetTickCount();
|
363
|
-
if (tick <
|
364
|
-
|
365
|
-
|
366
|
-
|
361
|
+
if (tick < LastTickCount)
|
362
|
+
TickCountTickover += 1;
|
363
|
+
LastTickCount = tick;
|
364
|
+
current_time = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
|
367
365
|
|
368
366
|
#else
|
369
|
-
|
367
|
+
current_time = (uint64_t)time(NULL) * 1000000LL;
|
370
368
|
#endif
|
369
|
+
return current_time;
|
370
|
+
}
|
371
|
+
|
372
|
+
/***********************************
|
373
|
+
EventMachine_t::_DispatchHeartbeats
|
374
|
+
***********************************/
|
375
|
+
|
376
|
+
void EventMachine_t::_DispatchHeartbeats()
|
377
|
+
{
|
378
|
+
while (true) {
|
379
|
+
multimap<uint64_t,EventableDescriptor*>::iterator i = Heartbeats.begin();
|
380
|
+
if (i == Heartbeats.end())
|
381
|
+
break;
|
382
|
+
if (i->first > MyCurrentLoopTime)
|
383
|
+
break;
|
384
|
+
EventableDescriptor *ed = i->second;
|
385
|
+
ed->Heartbeat();
|
386
|
+
QueueHeartbeat(ed);
|
387
|
+
}
|
388
|
+
}
|
389
|
+
|
390
|
+
/******************************
|
391
|
+
EventMachine_t::QueueHeartbeat
|
392
|
+
******************************/
|
393
|
+
|
394
|
+
void EventMachine_t::QueueHeartbeat(EventableDescriptor *ed)
|
395
|
+
{
|
396
|
+
uint64_t heartbeat = ed->GetNextHeartbeat();
|
397
|
+
|
398
|
+
if (heartbeat) {
|
399
|
+
#ifndef HAVE_MAKE_PAIR
|
400
|
+
Heartbeats.insert (multimap<uint64_t,EventableDescriptor*>::value_type (heartbeat, ed));
|
401
|
+
#else
|
402
|
+
Heartbeats.insert (make_pair (heartbeat, ed));
|
403
|
+
#endif
|
404
|
+
}
|
405
|
+
}
|
406
|
+
|
407
|
+
/******************************
|
408
|
+
EventMachine_t::ClearHeartbeat
|
409
|
+
******************************/
|
410
|
+
|
411
|
+
void EventMachine_t::ClearHeartbeat(uint64_t key)
|
412
|
+
{
|
413
|
+
Heartbeats.erase(key);
|
371
414
|
}
|
372
415
|
|
373
416
|
/*******************
|
@@ -447,12 +490,16 @@ EventMachine_t::_RunOnce
|
|
447
490
|
|
448
491
|
bool EventMachine_t::_RunOnce()
|
449
492
|
{
|
493
|
+
bool ret;
|
450
494
|
if (bEpoll)
|
451
|
-
|
495
|
+
ret = _RunEpollOnce();
|
452
496
|
else if (bKqueue)
|
453
|
-
|
497
|
+
ret = _RunKqueueOnce();
|
454
498
|
else
|
455
|
-
|
499
|
+
ret = _RunSelectOnce();
|
500
|
+
_DispatchHeartbeats();
|
501
|
+
_CleanupSockets();
|
502
|
+
return ret;
|
456
503
|
}
|
457
504
|
|
458
505
|
|
@@ -467,12 +514,31 @@ bool EventMachine_t::_RunEpollOnce()
|
|
467
514
|
assert (epfd != -1);
|
468
515
|
int s;
|
469
516
|
|
517
|
+
timeval tv = _TimeTilNextEvent();
|
518
|
+
|
470
519
|
#ifdef BUILD_FOR_RUBY
|
520
|
+
int ret = 0;
|
521
|
+
fd_set fdreads;
|
522
|
+
|
523
|
+
FD_ZERO(&fdreads);
|
524
|
+
FD_SET(epfd, &fdreads);
|
525
|
+
|
526
|
+
if ((ret = rb_thread_select(epfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
|
527
|
+
if (ret == -1) {
|
528
|
+
assert(errno != EINVAL);
|
529
|
+
assert(errno != EBADF);
|
530
|
+
}
|
531
|
+
return true;
|
532
|
+
}
|
533
|
+
|
471
534
|
TRAP_BEG;
|
472
|
-
|
473
|
-
s = epoll_wait (epfd, epoll_events, MaxEvents, 50);
|
474
|
-
#ifdef BUILD_FOR_RUBY
|
535
|
+
s = epoll_wait (epfd, epoll_events, MaxEvents, 0);
|
475
536
|
TRAP_END;
|
537
|
+
#else
|
538
|
+
int duration = 0;
|
539
|
+
duration = duration + (tv.tv_sec * 1000);
|
540
|
+
duration = duration + (tv.tv_usec / 1000);
|
541
|
+
s = epoll_wait (epfd, epoll_events, MaxEvents, duration);
|
476
542
|
#endif
|
477
543
|
|
478
544
|
if (s > 0) {
|
@@ -501,72 +567,6 @@ bool EventMachine_t::_RunEpollOnce()
|
|
501
567
|
EmSelect (0, NULL, NULL, NULL, &tv);
|
502
568
|
}
|
503
569
|
|
504
|
-
{ // cleanup dying sockets
|
505
|
-
// vector::pop_back works in constant time.
|
506
|
-
// TODO, rip this out and only delete the descriptors we know have died,
|
507
|
-
// rather than traversing the whole list.
|
508
|
-
// Modified 05Jan08 per suggestions by Chris Heath. It's possible that
|
509
|
-
// an EventableDescriptor will have a descriptor value of -1. That will
|
510
|
-
// happen if EventableDescriptor::Close was called on it. In that case,
|
511
|
-
// don't call epoll_ctl to remove the socket's filters from the epoll set.
|
512
|
-
// According to the epoll docs, this happens automatically when the
|
513
|
-
// descriptor is closed anyway. This is different from the case where
|
514
|
-
// the socket has already been closed but the descriptor in the ED object
|
515
|
-
// hasn't yet been set to INVALID_SOCKET.
|
516
|
-
int i, j;
|
517
|
-
int nSockets = Descriptors.size();
|
518
|
-
for (i=0, j=0; i < nSockets; i++) {
|
519
|
-
EventableDescriptor *ed = Descriptors[i];
|
520
|
-
assert (ed);
|
521
|
-
if (ed->ShouldDelete()) {
|
522
|
-
if (ed->GetSocket() != INVALID_SOCKET) {
|
523
|
-
assert (bEpoll); // wouldn't be in this method otherwise.
|
524
|
-
assert (epfd != -1);
|
525
|
-
int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
|
526
|
-
// ENOENT or EBADF are not errors because the socket may be already closed when we get here.
|
527
|
-
if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
|
528
|
-
char buf [200];
|
529
|
-
snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
|
530
|
-
throw std::runtime_error (buf);
|
531
|
-
}
|
532
|
-
}
|
533
|
-
|
534
|
-
ModifiedDescriptors.erase (ed);
|
535
|
-
delete ed;
|
536
|
-
}
|
537
|
-
else
|
538
|
-
Descriptors [j++] = ed;
|
539
|
-
}
|
540
|
-
while ((size_t)j < Descriptors.size())
|
541
|
-
Descriptors.pop_back();
|
542
|
-
|
543
|
-
}
|
544
|
-
|
545
|
-
// TODO, heartbeats.
|
546
|
-
// Added 14Sep07, its absence was noted by Brian Candler. But the comment was here, indicated
|
547
|
-
// that this got thought about and not done when EPOLL was originally written. Was there a reason
|
548
|
-
// not to do it, or was it an oversight? Certainly, running a heartbeat on 50,000 connections every
|
549
|
-
// two seconds can get to be a real bear, especially if all we're doing is timing out dead ones.
|
550
|
-
// Maybe there's a better way to do this. (Or maybe it's not that expensive after all.)
|
551
|
-
//
|
552
|
-
{ // dispatch heartbeats
|
553
|
-
if (gCurrentLoopTime >= NextHeartbeatTime) {
|
554
|
-
NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;
|
555
|
-
|
556
|
-
for (int i=0; i < Descriptors.size(); i++) {
|
557
|
-
EventableDescriptor *ed = Descriptors[i];
|
558
|
-
assert (ed);
|
559
|
-
ed->Heartbeat();
|
560
|
-
}
|
561
|
-
}
|
562
|
-
}
|
563
|
-
|
564
|
-
#ifdef BUILD_FOR_RUBY
|
565
|
-
if (!rb_thread_alone()) {
|
566
|
-
rb_thread_schedule();
|
567
|
-
}
|
568
|
-
#endif
|
569
|
-
|
570
570
|
return true;
|
571
571
|
#else
|
572
572
|
throw std::runtime_error ("epoll is not implemented on this platform");
|
@@ -582,15 +582,33 @@ bool EventMachine_t::_RunKqueueOnce()
|
|
582
582
|
{
|
583
583
|
#ifdef HAVE_KQUEUE
|
584
584
|
assert (kqfd != -1);
|
585
|
-
struct timespec ts = {0, 10000000}; // Too frequent. Use blocking_region
|
586
|
-
|
587
585
|
int k;
|
586
|
+
|
587
|
+
timeval tv = _TimeTilNextEvent();
|
588
|
+
|
588
589
|
#ifdef BUILD_FOR_RUBY
|
590
|
+
int ret = 0;
|
591
|
+
fd_set fdreads;
|
592
|
+
|
593
|
+
FD_ZERO(&fdreads);
|
594
|
+
FD_SET(kqfd, &fdreads);
|
595
|
+
|
596
|
+
if ((ret = rb_thread_select(kqfd + 1, &fdreads, NULL, NULL, &tv)) < 1) {
|
597
|
+
if (ret == -1) {
|
598
|
+
assert(errno != EINVAL);
|
599
|
+
assert(errno != EBADF);
|
600
|
+
}
|
601
|
+
return true;
|
602
|
+
}
|
603
|
+
|
589
604
|
TRAP_BEG;
|
590
|
-
|
591
|
-
k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
|
592
|
-
#ifdef BUILD_FOR_RUBY
|
605
|
+
k = kevent (kqfd, NULL, 0, Karray, MaxEvents, NULL);
|
593
606
|
TRAP_END;
|
607
|
+
#else
|
608
|
+
struct timespec ts;
|
609
|
+
ts.tv_sec = tv.tv_sec;
|
610
|
+
ts.tv_nsec = tv.tv_usec * 1000;
|
611
|
+
k = kevent (kqfd, NULL, 0, Karray, MaxEvents, &ts);
|
594
612
|
#endif
|
595
613
|
|
596
614
|
struct kevent *ke = Karray;
|
@@ -627,42 +645,6 @@ bool EventMachine_t::_RunKqueueOnce()
|
|
627
645
|
++ke;
|
628
646
|
}
|
629
647
|
|
630
|
-
{ // cleanup dying sockets
|
631
|
-
// vector::pop_back works in constant time.
|
632
|
-
// TODO, rip this out and only delete the descriptors we know have died,
|
633
|
-
// rather than traversing the whole list.
|
634
|
-
// In kqueue, closing a descriptor automatically removes its event filters.
|
635
|
-
|
636
|
-
int i, j;
|
637
|
-
int nSockets = Descriptors.size();
|
638
|
-
for (i=0, j=0; i < nSockets; i++) {
|
639
|
-
EventableDescriptor *ed = Descriptors[i];
|
640
|
-
assert (ed);
|
641
|
-
if (ed->ShouldDelete()) {
|
642
|
-
ModifiedDescriptors.erase (ed);
|
643
|
-
delete ed;
|
644
|
-
}
|
645
|
-
else
|
646
|
-
Descriptors [j++] = ed;
|
647
|
-
}
|
648
|
-
while ((size_t)j < Descriptors.size())
|
649
|
-
Descriptors.pop_back();
|
650
|
-
|
651
|
-
}
|
652
|
-
|
653
|
-
{ // dispatch heartbeats
|
654
|
-
if (gCurrentLoopTime >= NextHeartbeatTime) {
|
655
|
-
NextHeartbeatTime = gCurrentLoopTime + HeartbeatInterval;
|
656
|
-
|
657
|
-
for (unsigned int i=0; i < Descriptors.size(); i++) {
|
658
|
-
EventableDescriptor *ed = Descriptors[i];
|
659
|
-
assert (ed);
|
660
|
-
ed->Heartbeat();
|
661
|
-
}
|
662
|
-
}
|
663
|
-
}
|
664
|
-
|
665
|
-
|
666
648
|
// TODO, replace this with rb_thread_blocking_region for 1.9 builds.
|
667
649
|
#ifdef BUILD_FOR_RUBY
|
668
650
|
if (!rb_thread_alone()) {
|
@@ -677,6 +659,93 @@ bool EventMachine_t::_RunKqueueOnce()
|
|
677
659
|
}
|
678
660
|
|
679
661
|
|
662
|
+
/*********************************
|
663
|
+
EventMachine_t::_TimeTilNextEvent
|
664
|
+
*********************************/
|
665
|
+
|
666
|
+
timeval EventMachine_t::_TimeTilNextEvent()
|
667
|
+
{
|
668
|
+
uint64_t next_event = 0;
|
669
|
+
|
670
|
+
if (!Heartbeats.empty()) {
|
671
|
+
multimap<uint64_t,EventableDescriptor*>::iterator heartbeats = Heartbeats.begin();
|
672
|
+
next_event = heartbeats->first;
|
673
|
+
}
|
674
|
+
|
675
|
+
if (!Timers.empty()) {
|
676
|
+
multimap<uint64_t,Timer_t>::iterator timers = Timers.begin();
|
677
|
+
if (next_event == 0 || timers->first < next_event)
|
678
|
+
next_event = timers->first;
|
679
|
+
}
|
680
|
+
|
681
|
+
if (!NewDescriptors.empty() || !ModifiedDescriptors.empty()) {
|
682
|
+
next_event = MyCurrentLoopTime;
|
683
|
+
}
|
684
|
+
|
685
|
+
timeval tv;
|
686
|
+
|
687
|
+
if (next_event == 0) {
|
688
|
+
tv = Quantum;
|
689
|
+
} else {
|
690
|
+
if (next_event > MyCurrentLoopTime) {
|
691
|
+
uint64_t duration = next_event - MyCurrentLoopTime;
|
692
|
+
tv.tv_sec = duration / 1000000;
|
693
|
+
tv.tv_usec = duration % 1000000;
|
694
|
+
} else {
|
695
|
+
tv.tv_sec = tv.tv_usec = 0;
|
696
|
+
}
|
697
|
+
}
|
698
|
+
|
699
|
+
return tv;
|
700
|
+
}
|
701
|
+
|
702
|
+
/*******************************
|
703
|
+
EventMachine_t::_CleanupSockets
|
704
|
+
*******************************/
|
705
|
+
|
706
|
+
void EventMachine_t::_CleanupSockets()
|
707
|
+
{
|
708
|
+
// TODO, rip this out and only delete the descriptors we know have died,
|
709
|
+
// rather than traversing the whole list.
|
710
|
+
// Modified 05Jan08 per suggestions by Chris Heath. It's possible that
|
711
|
+
// an EventableDescriptor will have a descriptor value of -1. That will
|
712
|
+
// happen if EventableDescriptor::Close was called on it. In that case,
|
713
|
+
// don't call epoll_ctl to remove the socket's filters from the epoll set.
|
714
|
+
// According to the epoll docs, this happens automatically when the
|
715
|
+
// descriptor is closed anyway. This is different from the case where
|
716
|
+
// the socket has already been closed but the descriptor in the ED object
|
717
|
+
// hasn't yet been set to INVALID_SOCKET.
|
718
|
+
// In kqueue, closing a descriptor automatically removes its event filters.
|
719
|
+
int i, j;
|
720
|
+
int nSockets = Descriptors.size();
|
721
|
+
for (i=0, j=0; i < nSockets; i++) {
|
722
|
+
EventableDescriptor *ed = Descriptors[i];
|
723
|
+
assert (ed);
|
724
|
+
if (ed->ShouldDelete()) {
|
725
|
+
#ifdef HAVE_EPOLL
|
726
|
+
if (bEpoll) {
|
727
|
+
assert (epfd != -1);
|
728
|
+
if (ed->GetSocket() != INVALID_SOCKET) {
|
729
|
+
int e = epoll_ctl (epfd, EPOLL_CTL_DEL, ed->GetSocket(), ed->GetEpollEvent());
|
730
|
+
// ENOENT or EBADF are not errors because the socket may be already closed when we get here.
|
731
|
+
if (e && (errno != ENOENT) && (errno != EBADF) && (errno != EPERM)) {
|
732
|
+
char buf [200];
|
733
|
+
snprintf (buf, sizeof(buf)-1, "unable to delete epoll event: %s", strerror(errno));
|
734
|
+
throw std::runtime_error (buf);
|
735
|
+
}
|
736
|
+
}
|
737
|
+
ModifiedDescriptors.erase(ed);
|
738
|
+
}
|
739
|
+
#endif
|
740
|
+
delete ed;
|
741
|
+
}
|
742
|
+
else
|
743
|
+
Descriptors [j++] = ed;
|
744
|
+
}
|
745
|
+
while ((size_t)j < Descriptors.size())
|
746
|
+
Descriptors.pop_back();
|
747
|
+
}
|
748
|
+
|
680
749
|
/*********************************
|
681
750
|
EventMachine_t::_ModifyEpollEvent
|
682
751
|
*********************************/
|
@@ -829,7 +898,7 @@ bool EventMachine_t::_RunSelectOnce()
|
|
829
898
|
{ // read and write the sockets
|
830
899
|
//timeval tv = {1, 0}; // Solaris fails if the microseconds member is >= 1000000.
|
831
900
|
//timeval tv = Quantum;
|
832
|
-
SelectData.tv =
|
901
|
+
SelectData.tv = _TimeTilNextEvent();
|
833
902
|
int s = SelectData._Select();
|
834
903
|
//rb_thread_blocking_region(xxx,(void*)&SelectData,RUBY_UBF_IO,0);
|
835
904
|
//int s = EmSelect (SelectData.maxsocket+1, &(SelectData.fdreads), &(SelectData.fdwrites), NULL, &(SelectData.tv));
|
@@ -865,48 +934,54 @@ bool EventMachine_t::_RunSelectOnce()
|
|
865
934
|
_ReadLoopBreaker();
|
866
935
|
}
|
867
936
|
else if (s < 0) {
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
937
|
+
switch (errno) {
|
938
|
+
case EBADF:
|
939
|
+
_CleanBadDescriptors();
|
940
|
+
break;
|
941
|
+
case EINVAL:
|
942
|
+
throw std::runtime_error ("Somehow EM passed an invalid nfds or invalid timeout to select(2), please report this!");
|
943
|
+
break;
|
944
|
+
default:
|
945
|
+
// select can fail on error in a handful of ways.
|
946
|
+
// If this happens, then wait for a little while to avoid busy-looping.
|
947
|
+
// If the error was EINTR, we probably caught SIGCHLD or something,
|
948
|
+
// so keep the wait short.
|
949
|
+
timeval tv = {0, ((errno == EINTR) ? 5 : 50) * 1000};
|
950
|
+
EmSelect (0, NULL, NULL, NULL, &tv);
|
951
|
+
}
|
874
952
|
}
|
875
953
|
}
|
876
954
|
|
955
|
+
return true;
|
956
|
+
}
|
877
957
|
|
878
|
-
|
879
|
-
|
880
|
-
|
958
|
+
void EventMachine_t::_CleanBadDescriptors()
|
959
|
+
{
|
960
|
+
size_t i;
|
881
961
|
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
}
|
887
|
-
}
|
888
|
-
}
|
962
|
+
for (i = 0; i < Descriptors.size(); i++) {
|
963
|
+
EventableDescriptor *ed = Descriptors[i];
|
964
|
+
if (ed->ShouldDelete())
|
965
|
+
continue;
|
889
966
|
|
890
|
-
|
891
|
-
// vector::pop_back works in constant time.
|
892
|
-
int i, j;
|
893
|
-
int nSockets = Descriptors.size();
|
894
|
-
for (i=0, j=0; i < nSockets; i++) {
|
895
|
-
EventableDescriptor *ed = Descriptors[i];
|
896
|
-
assert (ed);
|
897
|
-
if (ed->ShouldDelete())
|
898
|
-
delete ed;
|
899
|
-
else
|
900
|
-
Descriptors [j++] = ed;
|
901
|
-
}
|
902
|
-
while ((size_t)j < Descriptors.size())
|
903
|
-
Descriptors.pop_back();
|
967
|
+
int sd = ed->GetSocket();
|
904
968
|
|
905
|
-
|
969
|
+
struct timeval tv;
|
970
|
+
tv.tv_sec = 0;
|
971
|
+
tv.tv_usec = 0;
|
906
972
|
|
907
|
-
|
908
|
-
|
973
|
+
fd_set fds;
|
974
|
+
FD_ZERO(&fds);
|
975
|
+
FD_SET(sd, &fds);
|
976
|
+
|
977
|
+
int ret = select(sd + 1, &fds, NULL, NULL, &tv);
|
909
978
|
|
979
|
+
if (ret == -1) {
|
980
|
+
if (errno == EBADF)
|
981
|
+
ed->ScheduleClose(false);
|
982
|
+
}
|
983
|
+
}
|
984
|
+
}
|
910
985
|
|
911
986
|
/********************************
|
912
987
|
EventMachine_t::_ReadLoopBreaker
|
@@ -921,7 +996,7 @@ void EventMachine_t::_ReadLoopBreaker()
|
|
921
996
|
char buffer [1024];
|
922
997
|
read (LoopBreakerReader, buffer, sizeof(buffer));
|
923
998
|
if (EventCallback)
|
924
|
-
(*EventCallback)(
|
999
|
+
(*EventCallback)(0, EM_LOOPBREAK_SIGNAL, "", 0);
|
925
1000
|
}
|
926
1001
|
|
927
1002
|
|
@@ -939,13 +1014,13 @@ bool EventMachine_t::_RunTimers()
|
|
939
1014
|
// one that hasn't expired yet.
|
940
1015
|
|
941
1016
|
while (true) {
|
942
|
-
multimap<
|
1017
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.begin();
|
943
1018
|
if (i == Timers.end())
|
944
1019
|
break;
|
945
|
-
if (i->first >
|
1020
|
+
if (i->first > MyCurrentLoopTime)
|
946
1021
|
break;
|
947
1022
|
if (EventCallback)
|
948
|
-
(*EventCallback) (
|
1023
|
+
(*EventCallback) (0, EM_TIMER_FIRED, NULL, i->second.GetBinding());
|
949
1024
|
Timers.erase (i);
|
950
1025
|
}
|
951
1026
|
return true;
|
@@ -964,28 +1039,31 @@ const unsigned long EventMachine_t::InstallOneshotTimer (int milliseconds)
|
|
964
1039
|
// Don't use the global loop-time variable here, because we might
|
965
1040
|
// get called before the main event machine is running.
|
966
1041
|
|
1042
|
+
// XXX This should be replaced with a call to _GetRealTime(), but I don't
|
1043
|
+
// understand if this is a bug or not? For OS_UNIX we multiply the argument
|
1044
|
+
// milliseconds by 1000, but for OS_WIN32 we do not? This needs to be sorted out.
|
967
1045
|
#ifdef OS_UNIX
|
968
1046
|
struct timeval tv;
|
969
1047
|
gettimeofday (&tv, NULL);
|
970
|
-
|
971
|
-
fire_at += ((
|
1048
|
+
uint64_t fire_at = (((uint64_t)(tv.tv_sec)) * 1000000LL) + ((uint64_t)(tv.tv_usec));
|
1049
|
+
fire_at += ((uint64_t)milliseconds) * 1000LL;
|
972
1050
|
#endif
|
973
1051
|
|
974
1052
|
#ifdef OS_WIN32
|
975
1053
|
unsigned tick = GetTickCount();
|
976
|
-
if (tick <
|
977
|
-
|
978
|
-
|
1054
|
+
if (tick < LastTickCount)
|
1055
|
+
TickCountTickover += 1;
|
1056
|
+
LastTickCount = tick;
|
979
1057
|
|
980
|
-
|
981
|
-
fire_at += (
|
1058
|
+
uint64_t fire_at = ((uint64_t)TickCountTickover << 32) + (uint64_t)tick;
|
1059
|
+
fire_at += (uint64_t)milliseconds;
|
982
1060
|
#endif
|
983
1061
|
|
984
1062
|
Timer_t t;
|
985
1063
|
#ifndef HAVE_MAKE_PAIR
|
986
|
-
multimap<
|
1064
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.insert (multimap<uint64_t,Timer_t>::value_type (fire_at, t));
|
987
1065
|
#else
|
988
|
-
multimap<
|
1066
|
+
multimap<uint64_t,Timer_t>::iterator i = Timers.insert (make_pair (fire_at, t));
|
989
1067
|
#endif
|
990
1068
|
return i->second.GetBinding();
|
991
1069
|
}
|
@@ -1030,8 +1108,11 @@ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int
|
|
1030
1108
|
bind_as = *bind_as_ptr; // copy because name2address points to a static
|
1031
1109
|
|
1032
1110
|
int sd = socket (family, SOCK_STREAM, 0);
|
1033
|
-
if (sd == INVALID_SOCKET)
|
1034
|
-
|
1111
|
+
if (sd == INVALID_SOCKET) {
|
1112
|
+
char buf [200];
|
1113
|
+
snprintf (buf, sizeof(buf)-1, "unable to create new socket: %s", strerror(errno));
|
1114
|
+
throw std::runtime_error (buf);
|
1115
|
+
}
|
1035
1116
|
|
1036
1117
|
/*
|
1037
1118
|
sockaddr_in pin;
|
@@ -1064,7 +1145,7 @@ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int
|
|
1064
1145
|
// From here on, ALL error returns must close the socket.
|
1065
1146
|
// Set the new socket nonblocking.
|
1066
1147
|
if (!SetSocketNonblocking (sd)) {
|
1067
|
-
|
1148
|
+
close (sd);
|
1068
1149
|
throw std::runtime_error ("unable to set socket as non-blocking");
|
1069
1150
|
}
|
1070
1151
|
// Disable slow-start (Nagle algorithm).
|
@@ -1077,16 +1158,16 @@ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int
|
|
1077
1158
|
int bind_to_size, bind_to_family;
|
1078
1159
|
struct sockaddr *bind_to = name2address (bind_addr, bind_port, &bind_to_family, &bind_to_size);
|
1079
1160
|
if (!bind_to) {
|
1080
|
-
|
1161
|
+
close (sd);
|
1081
1162
|
throw std::runtime_error ("invalid bind address");
|
1082
1163
|
}
|
1083
1164
|
if (bind (sd, bind_to, bind_to_size) < 0) {
|
1084
|
-
|
1165
|
+
close (sd);
|
1085
1166
|
throw std::runtime_error ("couldn't bind to address");
|
1086
1167
|
}
|
1087
1168
|
}
|
1088
1169
|
|
1089
|
-
unsigned long out =
|
1170
|
+
unsigned long out = 0;
|
1090
1171
|
|
1091
1172
|
#ifdef OS_UNIX
|
1092
1173
|
//if (connect (sd, (sockaddr*)&pin, sizeof pin) == 0) {
|
@@ -1135,29 +1216,31 @@ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int
|
|
1135
1216
|
Add (cd);
|
1136
1217
|
out = cd->GetBinding();
|
1137
1218
|
}
|
1138
|
-
else {
|
1139
|
-
/* This could be connection refused or some such thing.
|
1140
|
-
* We will come here on Linux if a localhost connection fails.
|
1141
|
-
* Changed 16Jul06: Originally this branch was a no-op, and
|
1142
|
-
* we'd drop down to the end of the method, close the socket,
|
1143
|
-
* and return NULL, which would cause the caller to GET A
|
1144
|
-
* FATAL EXCEPTION. Now we keep the socket around but schedule an
|
1145
|
-
* immediate close on it, so the caller will get a close-event
|
1146
|
-
* scheduled on it. This was only an issue for localhost connections
|
1147
|
-
* to non-listening ports. We may eventually need to revise this
|
1148
|
-
* revised behavior, in case it causes problems like making it hard
|
1149
|
-
* for people to know that a failure occurred.
|
1150
|
-
*/
|
1151
|
-
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1152
|
-
if (!cd)
|
1153
|
-
throw std::runtime_error ("no connection allocated");
|
1154
|
-
cd->ScheduleClose (false);
|
1155
|
-
Add (cd);
|
1156
|
-
out = cd->GetBinding();
|
1157
|
-
}
|
1158
1219
|
}
|
1159
1220
|
else {
|
1160
|
-
// The error from connect was something other then EINPROGRESS.
|
1221
|
+
// The error from connect was something other then EINPROGRESS (EHOSTDOWN, etc).
|
1222
|
+
// Fall through to the !out case below
|
1223
|
+
}
|
1224
|
+
|
1225
|
+
if (!out) {
|
1226
|
+
/* This could be connection refused or some such thing.
|
1227
|
+
* We will come here on Linux if a localhost connection fails.
|
1228
|
+
* Changed 16Jul06: Originally this branch was a no-op, and
|
1229
|
+
* we'd drop down to the end of the method, close the socket,
|
1230
|
+
* and return NULL, which would cause the caller to GET A
|
1231
|
+
* FATAL EXCEPTION. Now we keep the socket around but schedule an
|
1232
|
+
* immediate close on it, so the caller will get a close-event
|
1233
|
+
* scheduled on it. This was only an issue for localhost connections
|
1234
|
+
* to non-listening ports. We may eventually need to revise this
|
1235
|
+
* revised behavior, in case it causes problems like making it hard
|
1236
|
+
* for people to know that a failure occurred.
|
1237
|
+
*/
|
1238
|
+
ConnectionDescriptor *cd = new ConnectionDescriptor (sd, this);
|
1239
|
+
if (!cd)
|
1240
|
+
throw std::runtime_error ("no connection allocated");
|
1241
|
+
cd->ScheduleClose (false);
|
1242
|
+
Add (cd);
|
1243
|
+
out = cd->GetBinding();
|
1161
1244
|
}
|
1162
1245
|
#endif
|
1163
1246
|
|
@@ -1190,7 +1273,7 @@ const unsigned long EventMachine_t::ConnectToServer (const char *bind_addr, int
|
|
1190
1273
|
#endif
|
1191
1274
|
|
1192
1275
|
if (!out)
|
1193
|
-
|
1276
|
+
close (sd);
|
1194
1277
|
return out;
|
1195
1278
|
}
|
1196
1279
|
|
@@ -1215,10 +1298,10 @@ const unsigned long EventMachine_t::ConnectToUnixServer (const char *server)
|
|
1215
1298
|
// The whole rest of this function is only compiled on Unix systems.
|
1216
1299
|
#ifdef OS_UNIX
|
1217
1300
|
|
1218
|
-
unsigned long out =
|
1301
|
+
unsigned long out = 0;
|
1219
1302
|
|
1220
1303
|
if (!server || !*server)
|
1221
|
-
return
|
1304
|
+
return 0;
|
1222
1305
|
|
1223
1306
|
sockaddr_un pun;
|
1224
1307
|
memset (&pun, 0, sizeof(pun));
|
@@ -1234,19 +1317,19 @@ const unsigned long EventMachine_t::ConnectToUnixServer (const char *server)
|
|
1234
1317
|
|
1235
1318
|
int fd = socket (AF_LOCAL, SOCK_STREAM, 0);
|
1236
1319
|
if (fd == INVALID_SOCKET)
|
1237
|
-
return
|
1320
|
+
return 0;
|
1238
1321
|
|
1239
1322
|
// From here on, ALL error returns must close the socket.
|
1240
1323
|
// NOTE: At this point, the socket is still a blocking socket.
|
1241
1324
|
if (connect (fd, (struct sockaddr*)&pun, sizeof(pun)) != 0) {
|
1242
|
-
|
1243
|
-
return
|
1325
|
+
close (fd);
|
1326
|
+
return 0;
|
1244
1327
|
}
|
1245
1328
|
|
1246
1329
|
// Set the newly-connected socket nonblocking.
|
1247
1330
|
if (!SetSocketNonblocking (fd)) {
|
1248
|
-
|
1249
|
-
return
|
1331
|
+
close (fd);
|
1332
|
+
return 0;
|
1250
1333
|
}
|
1251
1334
|
|
1252
1335
|
// Set up a connection descriptor and add it to the event-machine.
|
@@ -1261,7 +1344,7 @@ const unsigned long EventMachine_t::ConnectToUnixServer (const char *server)
|
|
1261
1344
|
out = cd->GetBinding();
|
1262
1345
|
|
1263
1346
|
if (!out)
|
1264
|
-
|
1347
|
+
close (fd);
|
1265
1348
|
|
1266
1349
|
return out;
|
1267
1350
|
#endif
|
@@ -1450,9 +1533,9 @@ const unsigned long EventMachine_t::CreateTcpServer (const char *server, int por
|
|
1450
1533
|
int family, bind_size;
|
1451
1534
|
struct sockaddr *bind_here = name2address (server, port, &family, &bind_size);
|
1452
1535
|
if (!bind_here)
|
1453
|
-
return
|
1536
|
+
return 0;
|
1454
1537
|
|
1455
|
-
unsigned long output_binding =
|
1538
|
+
unsigned long output_binding = 0;
|
1456
1539
|
|
1457
1540
|
//struct sockaddr_in sin;
|
1458
1541
|
|
@@ -1531,8 +1614,8 @@ const unsigned long EventMachine_t::CreateTcpServer (const char *server, int por
|
|
1531
1614
|
|
1532
1615
|
fail:
|
1533
1616
|
if (sd_accept != INVALID_SOCKET)
|
1534
|
-
|
1535
|
-
return
|
1617
|
+
close (sd_accept);
|
1618
|
+
return 0;
|
1536
1619
|
}
|
1537
1620
|
|
1538
1621
|
|
@@ -1542,7 +1625,7 @@ EventMachine_t::OpenDatagramSocket
|
|
1542
1625
|
|
1543
1626
|
const unsigned long EventMachine_t::OpenDatagramSocket (const char *address, int port)
|
1544
1627
|
{
|
1545
|
-
unsigned long output_binding =
|
1628
|
+
unsigned long output_binding = 0;
|
1546
1629
|
|
1547
1630
|
int sd = socket (AF_INET, SOCK_DGRAM, 0);
|
1548
1631
|
if (sd == INVALID_SOCKET)
|
@@ -1592,8 +1675,8 @@ const unsigned long EventMachine_t::OpenDatagramSocket (const char *address, int
|
|
1592
1675
|
|
1593
1676
|
fail:
|
1594
1677
|
if (sd != INVALID_SOCKET)
|
1595
|
-
|
1596
|
-
return
|
1678
|
+
close (sd);
|
1679
|
+
return 0;
|
1597
1680
|
}
|
1598
1681
|
|
1599
1682
|
|
@@ -1702,6 +1785,7 @@ void EventMachine_t::_AddNewDescriptors()
|
|
1702
1785
|
*/
|
1703
1786
|
#endif
|
1704
1787
|
|
1788
|
+
QueueHeartbeat(ed);
|
1705
1789
|
Descriptors.push_back (ed);
|
1706
1790
|
}
|
1707
1791
|
NewDescriptors.clear();
|
@@ -1770,7 +1854,7 @@ const unsigned long EventMachine_t::_OpenFileForWriting (const char *filename)
|
|
1770
1854
|
*/
|
1771
1855
|
|
1772
1856
|
if (!filename || !*filename)
|
1773
|
-
return
|
1857
|
+
return 0;
|
1774
1858
|
|
1775
1859
|
int fd = open (filename, O_CREAT|O_TRUNC|O_WRONLY|O_NONBLOCK, 0644);
|
1776
1860
|
|
@@ -1802,7 +1886,7 @@ const unsigned long EventMachine_t::CreateUnixDomainServer (const char *filename
|
|
1802
1886
|
|
1803
1887
|
// The whole rest of this function is only compiled on Unix systems.
|
1804
1888
|
#ifdef OS_UNIX
|
1805
|
-
unsigned long output_binding =
|
1889
|
+
unsigned long output_binding = 0;
|
1806
1890
|
|
1807
1891
|
struct sockaddr_un s_sun;
|
1808
1892
|
|
@@ -1862,8 +1946,8 @@ const unsigned long EventMachine_t::CreateUnixDomainServer (const char *filename
|
|
1862
1946
|
|
1863
1947
|
fail:
|
1864
1948
|
if (sd_accept != INVALID_SOCKET)
|
1865
|
-
|
1866
|
-
return
|
1949
|
+
close (sd_accept);
|
1950
|
+
return 0;
|
1867
1951
|
#endif // OS_UNIX
|
1868
1952
|
}
|
1869
1953
|
|
@@ -1924,18 +2008,18 @@ const unsigned long EventMachine_t::Socketpair (char * const*cmd_strings)
|
|
1924
2008
|
#ifdef OS_UNIX
|
1925
2009
|
// Make sure the incoming array of command strings is sane.
|
1926
2010
|
if (!cmd_strings)
|
1927
|
-
return
|
2011
|
+
return 0;
|
1928
2012
|
int j;
|
1929
|
-
for (j=0; j <
|
2013
|
+
for (j=0; j < 2048 && cmd_strings[j]; j++)
|
1930
2014
|
;
|
1931
|
-
if ((j==0) || (j==
|
1932
|
-
return
|
2015
|
+
if ((j==0) || (j==2048))
|
2016
|
+
return 0;
|
1933
2017
|
|
1934
|
-
unsigned long output_binding =
|
2018
|
+
unsigned long output_binding = 0;
|
1935
2019
|
|
1936
2020
|
int sv[2];
|
1937
2021
|
if (socketpair (AF_LOCAL, SOCK_STREAM, 0, sv) < 0)
|
1938
|
-
return
|
2022
|
+
return 0;
|
1939
2023
|
// from here, all early returns must close the pair of sockets.
|
1940
2024
|
|
1941
2025
|
// Set the parent side of the socketpair nonblocking.
|
@@ -1945,7 +2029,7 @@ const unsigned long EventMachine_t::Socketpair (char * const*cmd_strings)
|
|
1945
2029
|
if (!SetSocketNonblocking (sv[0])) {
|
1946
2030
|
close (sv[0]);
|
1947
2031
|
close (sv[1]);
|
1948
|
-
return
|
2032
|
+
return 0;
|
1949
2033
|
}
|
1950
2034
|
|
1951
2035
|
pid_t f = fork();
|
@@ -2005,7 +2089,7 @@ const unsigned long EventMachine_t::WatchPid (int pid)
|
|
2005
2089
|
{
|
2006
2090
|
#ifdef HAVE_KQUEUE
|
2007
2091
|
if (!bKqueue)
|
2008
|
-
throw std::runtime_error("must enable kqueue");
|
2092
|
+
throw std::runtime_error("must enable kqueue (EM.kqueue=true) for pid watching support");
|
2009
2093
|
|
2010
2094
|
struct kevent event;
|
2011
2095
|
int kqres;
|
@@ -2094,7 +2178,8 @@ const unsigned long EventMachine_t::WatchFile (const char *fpath)
|
|
2094
2178
|
Add(inotify);
|
2095
2179
|
}
|
2096
2180
|
|
2097
|
-
wd = inotify_add_watch(inotify->GetSocket(), fpath,
|
2181
|
+
wd = inotify_add_watch(inotify->GetSocket(), fpath,
|
2182
|
+
IN_MODIFY | IN_DELETE_SELF | IN_MOVE_SELF | IN_CREATE | IN_DELETE | IN_MOVE) ;
|
2098
2183
|
if (wd == -1) {
|
2099
2184
|
char errbuf[300];
|
2100
2185
|
sprintf(errbuf, "failed to open file %s for registering with inotify: %s", fpath, strerror(errno));
|
@@ -2104,7 +2189,7 @@ const unsigned long EventMachine_t::WatchFile (const char *fpath)
|
|
2104
2189
|
|
2105
2190
|
#ifdef HAVE_KQUEUE
|
2106
2191
|
if (!bKqueue)
|
2107
|
-
throw std::runtime_error("must enable kqueue");
|
2192
|
+
throw std::runtime_error("must enable kqueue (EM.kqueue=true) for file watching support");
|
2108
2193
|
|
2109
2194
|
// With kqueue we have to open the file first and use the resulting fd to register for events
|
2110
2195
|
wd = open(fpath, O_RDONLY);
|
@@ -2170,19 +2255,33 @@ EventMachine_t::_ReadInotify_Events
|
|
2170
2255
|
void EventMachine_t::_ReadInotifyEvents()
|
2171
2256
|
{
|
2172
2257
|
#ifdef HAVE_INOTIFY
|
2173
|
-
|
2258
|
+
char buffer[1024];
|
2174
2259
|
|
2175
2260
|
assert(EventCallback);
|
2176
2261
|
|
2177
|
-
|
2178
|
-
|
2179
|
-
|
2180
|
-
|
2181
|
-
|
2182
|
-
|
2183
|
-
|
2184
|
-
|
2185
|
-
|
2262
|
+
for (;;) {
|
2263
|
+
int returned = read(inotify->GetSocket(), buffer, sizeof(buffer));
|
2264
|
+
assert(!(returned == 0 || returned == -1 && errno == EINVAL));
|
2265
|
+
if (returned <= 0) {
|
2266
|
+
break;
|
2267
|
+
}
|
2268
|
+
int current = 0;
|
2269
|
+
while (current < returned) {
|
2270
|
+
struct inotify_event* event = (struct inotify_event*)(buffer+current);
|
2271
|
+
map<int, Bindable_t*>::const_iterator bindable = Files.find(event->wd);
|
2272
|
+
if (bindable != Files.end()) {
|
2273
|
+
if (event->mask & (IN_MODIFY | IN_CREATE | IN_DELETE | IN_MOVE)){
|
2274
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "modified", 8);
|
2275
|
+
}
|
2276
|
+
if (event->mask & IN_MOVE_SELF){
|
2277
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "moved", 5);
|
2278
|
+
}
|
2279
|
+
if (event->mask & IN_DELETE_SELF) {
|
2280
|
+
(*EventCallback)(bindable->second->GetBinding(), EM_CONNECTION_READ, "deleted", 7);
|
2281
|
+
UnwatchFile ((int)event->wd);
|
2282
|
+
}
|
2283
|
+
}
|
2284
|
+
current += sizeof(struct inotify_event) + event->len;
|
2186
2285
|
}
|
2187
2286
|
}
|
2188
2287
|
#endif
|