rev 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. data/CHANGES +11 -0
  2. data/README +10 -3
  3. data/Rakefile +2 -2
  4. data/examples/echo_client.rb +35 -0
  5. data/examples/google.rb +8 -0
  6. data/examples/httpclient.rb +35 -0
  7. data/ext/http11_client/Makefile +149 -0
  8. data/ext/http11_client/http11_client.bundle +0 -0
  9. data/ext/http11_client/http11_client.o +0 -0
  10. data/ext/http11_client/http11_parser.o +0 -0
  11. data/ext/http11_client/mkmf.log +12 -0
  12. data/ext/libev/Changes +114 -1
  13. data/ext/libev/ev.c +212 -97
  14. data/ext/libev/ev.h +13 -7
  15. data/ext/libev/ev_epoll.c +44 -11
  16. data/ext/libev/ev_kqueue.c +2 -2
  17. data/ext/libev/ev_poll.c +3 -1
  18. data/ext/libev/ev_port.c +4 -4
  19. data/ext/libev/ev_select.c +58 -19
  20. data/ext/libev/ev_vars.h +5 -1
  21. data/ext/libev/ev_win32.c +32 -3
  22. data/ext/libev/ev_wrap.h +4 -0
  23. data/ext/libev/test_libev_win32.c +123 -0
  24. data/ext/libev/update_ev_wrap +0 -0
  25. data/ext/rev/Makefile +149 -0
  26. data/ext/rev/ev_wrap.h +8 -0
  27. data/ext/rev/extconf.rb +17 -0
  28. data/ext/rev/libev.c +8 -0
  29. data/ext/rev/libev.o +0 -0
  30. data/ext/rev/mkmf.log +221 -0
  31. data/ext/rev/rev.h +8 -2
  32. data/ext/rev/rev_buffer.c +2 -3
  33. data/ext/rev/rev_buffer.o +0 -0
  34. data/ext/rev/rev_ext.bundle +0 -0
  35. data/ext/rev/rev_ext.c +4 -3
  36. data/ext/rev/rev_ext.o +0 -0
  37. data/ext/rev/rev_io_watcher.c +1 -2
  38. data/ext/rev/rev_io_watcher.o +0 -0
  39. data/ext/rev/rev_loop.c +4 -4
  40. data/ext/rev/rev_loop.o +0 -0
  41. data/ext/rev/rev_ssl.o +0 -0
  42. data/ext/rev/rev_timer_watcher.c +1 -2
  43. data/ext/rev/rev_timer_watcher.o +0 -0
  44. data/ext/rev/rev_utils.c +14 -0
  45. data/ext/rev/rev_utils.o +0 -0
  46. data/ext/rev/rev_watcher.c +7 -6
  47. data/ext/rev/rev_watcher.o +0 -0
  48. data/lib/http11_client.bundle +0 -0
  49. data/lib/rev.rb +1 -1
  50. data/lib/rev/dns_resolver.rb +29 -9
  51. data/lib/rev/io.rb +6 -4
  52. data/lib/rev/listener.rb +5 -1
  53. data/lib/rev/loop.rb +8 -4
  54. data/lib/rev/server.rb +3 -2
  55. data/lib/rev/socket.rb +14 -5
  56. data/lib/rev_ext.bundle +0 -0
  57. data/lib/revem.rb +210 -0
  58. data/rev.gemspec +2 -2
  59. metadata +29 -3
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * libev native API header
3
3
  *
4
- * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -90,6 +90,10 @@ typedef double ev_tstamp;
90
90
  /*****************************************************************************/
91
91
 
92
92
  #if EV_STAT_ENABLE
93
+ # ifdef _WIN32
94
+ # include <time.h>
95
+ # include <sys/types.h>
96
+ # endif
93
97
  # include <sys/stat.h>
94
98
  #endif
95
99
 
@@ -163,9 +167,10 @@ struct ev_loop;
163
167
 
164
168
  /*
165
169
  * struct member types:
166
- * private: you can look at them, but not change them, and they might not mean anything to you.
167
- * ro: can be read anytime, but only changed when the watcher isn't active
168
- * rw: can be read and modified anytime, even when the watcher is active
170
+ * private: you may look at them, but not change them,
171
+ * and they might not mean anything to you.
172
+ * ro: can be read anytime, but only changed when the watcher isn't active.
173
+ * rw: can be read and modified anytime, even when the watcher is active.
169
174
  *
170
175
  * some internal details that might be helpful for debugging:
171
176
  *
@@ -174,7 +179,7 @@ struct ev_loop;
174
179
  * or the array index + 1 (most other watchers)
175
180
  * or simply 1 for watchers that aren't in some array.
176
181
  * pending is either 0, in which case the watcher isn't,
177
- * or the array index + 1 in the pendings array.
182
+ * or the array index + 1 in the pendings array.
178
183
  */
179
184
 
180
185
  /* shared by all watchers */
@@ -333,7 +338,7 @@ typedef struct ev_embed
333
338
  ev_timer timer; /* unused */
334
339
  ev_periodic periodic; /* unused */
335
340
  ev_idle idle; /* unused */
336
- ev_fork fork; /* unused */
341
+ ev_fork fork; /* private */
337
342
  } ev_embed;
338
343
  #endif
339
344
 
@@ -452,6 +457,7 @@ void ev_loop_fork (EV_P);
452
457
  void ev_loop_verify (EV_P);
453
458
 
454
459
  ev_tstamp ev_now (EV_P); /* time w.r.t. timers and the eventloop, updated after each poll */
460
+ void ev_now_update (EV_P);
455
461
 
456
462
  #else
457
463
 
@@ -510,7 +516,7 @@ void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval); /* sleep at lea
510
516
  void ev_ref (EV_P);
511
517
  void ev_unref (EV_P);
512
518
 
513
- /* convinience function, wait for a single event, without registering an event watcher */
519
+ /* convenience function, wait for a single event, without registering an event watcher */
514
520
  /* if timeout is < 0, do wait indefinitely */
515
521
  void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg);
516
522
  #endif
@@ -52,7 +52,9 @@
52
52
  *
53
53
  * lots of "weird code" and complication handling in this file is due
54
54
  * to these design problems with epoll, as we try very hard to avoid
55
- * epoll_ctl syscalls for common usage patterns.
55
+ * epoll_ctl syscalls for common usage patterns and handle the breakage
56
+ * ensuing from receiving events for closed and otherwise long gone
57
+ * file descriptors.
56
58
  */
57
59
 
58
60
  #include <sys/epoll.h>
@@ -61,17 +63,25 @@ static void
61
63
  epoll_modify (EV_P_ int fd, int oev, int nev)
62
64
  {
63
65
  struct epoll_event ev;
66
+ unsigned char oldmask;
64
67
 
65
68
  /*
66
69
  * we handle EPOLL_CTL_DEL by ignoring it here
67
70
  * on the assumption that the fd is gone anyways
68
71
  * if that is wrong, we have to handle the spurious
69
72
  * event in epoll_poll.
73
+ * if the fd is added again, we try to ADD it, and, if that
74
+ * fails, we assume it still has the same eventmask.
70
75
  */
71
76
  if (!nev)
72
77
  return;
73
78
 
74
- ev.data.u64 = fd; /* use u64 to fully initialise the struct, for nicer strace etc. */
79
+ oldmask = anfds [fd].emask;
80
+ anfds [fd].emask = nev;
81
+
82
+ /* store the generation counter in the upper 32 bits, the fd in the lower 32 bits */
83
+ ev.data.u64 = (uint64_t)(uint32_t)fd
84
+ | ((uint64_t)(uint32_t)++anfds [fd].egen << 32);
75
85
  ev.events = (nev & EV_READ ? EPOLLIN : 0)
76
86
  | (nev & EV_WRITE ? EPOLLOUT : 0);
77
87
 
@@ -80,21 +90,29 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
80
90
 
81
91
  if (expect_true (errno == ENOENT))
82
92
  {
83
- /* on ENOENT the fd went away, so try to do the right thing */
93
+ /* if ENOENT then the fd went away, so try to do the right thing */
84
94
  if (!nev)
85
- return;
95
+ goto dec_egen;
86
96
 
87
97
  if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
88
98
  return;
89
99
  }
90
100
  else if (expect_true (errno == EEXIST))
91
101
  {
92
- /* on EEXIST we ignored a previous DEL */
102
+ /* EEXIST means we ignored a previous DEL, but the fd is still active */
103
+ /* if the kernel mask is the same as the new mask, we assume it hasn't changed */
104
+ if (oldmask == nev)
105
+ goto dec_egen;
106
+
93
107
  if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
94
108
  return;
95
109
  }
96
110
 
97
111
  fd_kill (EV_A_ fd);
112
+
113
+ dec_egen:
114
+ /* we didn't successfully call epoll_ctl, so decrement the generation counter again */
115
+ --anfds [fd].egen;
98
116
  }
99
117
 
100
118
  static void
@@ -106,7 +124,7 @@ epoll_poll (EV_P_ ev_tstamp timeout)
106
124
  if (expect_false (eventcnt < 0))
107
125
  {
108
126
  if (errno != EINTR)
109
- syserr ("(libev) epoll_wait");
127
+ ev_syserr ("(libev) epoll_wait");
110
128
 
111
129
  return;
112
130
  }
@@ -115,18 +133,33 @@ epoll_poll (EV_P_ ev_tstamp timeout)
115
133
  {
116
134
  struct epoll_event *ev = epoll_events + i;
117
135
 
118
- int fd = ev->data.u64;
136
+ int fd = (uint32_t)ev->data.u64; /* mask out the lower 32 bits */
137
+ int want = anfds [fd].events;
119
138
  int got = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
120
139
  | (ev->events & (EPOLLIN | EPOLLERR | EPOLLHUP) ? EV_READ : 0);
121
- int want = anfds [fd].events;
140
+
141
+ /* check for spurious notification */
142
+ if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
143
+ {
144
+ /* recreate kernel state */
145
+ postfork = 1;
146
+ continue;
147
+ }
122
148
 
123
149
  if (expect_false (got & ~want))
124
150
  {
151
+ anfds [fd].emask = want;
152
+
125
153
  /* we received an event but are not interested in it, try mod or del */
154
+ /* I don't think we ever need MOD, but let's handle it anyways */
126
155
  ev->events = (want & EV_READ ? EPOLLIN : 0)
127
156
  | (want & EV_WRITE ? EPOLLOUT : 0);
128
157
 
129
- epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev);
158
+ if (epoll_ctl (backend_fd, want ? EPOLL_CTL_MOD : EPOLL_CTL_DEL, fd, ev))
159
+ {
160
+ postfork = 1; /* an error occured, recreate kernel state */
161
+ continue;
162
+ }
130
163
  }
131
164
 
132
165
  fd_event (EV_A_ fd, got);
@@ -155,7 +188,7 @@ epoll_init (EV_P_ int flags)
155
188
  backend_modify = epoll_modify;
156
189
  backend_poll = epoll_poll;
157
190
 
158
- epoll_eventmax = 64; /* intiial number of events receivable per poll */
191
+ epoll_eventmax = 64; /* initial number of events receivable per poll */
159
192
  epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
160
193
 
161
194
  return EVBACKEND_EPOLL;
@@ -173,7 +206,7 @@ epoll_fork (EV_P)
173
206
  close (backend_fd);
174
207
 
175
208
  while ((backend_fd = epoll_create (256)) < 0)
176
- syserr ("(libev) epoll_create");
209
+ ev_syserr ("(libev) epoll_create");
177
210
 
178
211
  fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
179
212
 
@@ -101,7 +101,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
101
101
  if (expect_false (res < 0))
102
102
  {
103
103
  if (errno != EINTR)
104
- syserr ("(libev) kevent");
104
+ ev_syserr ("(libev) kevent");
105
105
 
106
106
  return;
107
107
  }
@@ -184,7 +184,7 @@ kqueue_fork (EV_P)
184
184
  close (backend_fd);
185
185
 
186
186
  while ((backend_fd = kqueue ()) < 0)
187
- syserr ("(libev) kqueue");
187
+ ev_syserr ("(libev) kqueue");
188
188
 
189
189
  fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
190
190
 
@@ -42,6 +42,8 @@
42
42
  void inline_size
43
43
  pollidx_init (int *base, int count)
44
44
  {
45
+ /* consider using memset (.., -1, ...), which is pratically guarenteed
46
+ * to work on all systems implementing poll */
45
47
  while (count--)
46
48
  *base++ = -1;
47
49
  }
@@ -96,7 +98,7 @@ poll_poll (EV_P_ ev_tstamp timeout)
96
98
  else if (errno == ENOMEM && !syserr_cb)
97
99
  fd_enomem (EV_A);
98
100
  else if (errno != EINTR)
99
- syserr ("(libev) poll");
101
+ ev_syserr ("(libev) poll");
100
102
  }
101
103
  else
102
104
  for (p = polls; res; ++p)
@@ -59,7 +59,7 @@ port_associate_and_check (EV_P_ int fd, int ev)
59
59
  if (errno == EBADFD)
60
60
  fd_kill (EV_A_ fd);
61
61
  else
62
- syserr ("(libev) port_associate");
62
+ ev_syserr ("(libev) port_associate");
63
63
  }
64
64
  }
65
65
 
@@ -89,10 +89,10 @@ port_poll (EV_P_ ev_tstamp timeout)
89
89
  ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9;
90
90
  res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts);
91
91
 
92
- if (res < 0)
92
+ if (res == -1)
93
93
  {
94
94
  if (errno != EINTR && errno != ETIME)
95
- syserr ("(libev) port_getn");
95
+ ev_syserr ("(libev) port_getn");
96
96
 
97
97
  return;
98
98
  }
@@ -153,7 +153,7 @@ port_fork (EV_P)
153
153
  close (backend_fd);
154
154
 
155
155
  while ((backend_fd = port_create ()) < 0)
156
- syserr ("(libev) port");
156
+ ev_syserr ("(libev) port");
157
157
 
158
158
  fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
159
159
 
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * libev select fd activity backend
3
3
  *
4
- * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -54,6 +54,8 @@
54
54
  #if EV_SELECT_IS_WINSOCKET
55
55
  # undef EV_SELECT_USE_FD_SET
56
56
  # define EV_SELECT_USE_FD_SET 1
57
+ # undef NFDBITS
58
+ # define NFDBITS 0
57
59
  #endif
58
60
 
59
61
  #if !EV_SELECT_USE_FD_SET
@@ -77,15 +79,26 @@ select_modify (EV_P_ int fd, int oev, int nev)
77
79
  int handle = fd;
78
80
  #endif
79
81
 
80
- if (nev & EV_READ)
81
- FD_SET (handle, (fd_set *)vec_ri);
82
- else
83
- FD_CLR (handle, (fd_set *)vec_ri);
82
+ assert (("libev: fd >= FD_SETSIZE passed to fd_set-based select backend", fd < FD_SETSIZE));
84
83
 
85
- if (nev & EV_WRITE)
86
- FD_SET (handle, (fd_set *)vec_wi);
87
- else
88
- FD_CLR (handle, (fd_set *)vec_wi);
84
+ /* FD_SET is broken on windows (it adds the fd to a set twice or more,
85
+ * which eventually leads to overflows). Need to call it only on changes.
86
+ */
87
+ #if EV_SELECT_IS_WINSOCKET
88
+ if ((oev ^ nev) & EV_READ)
89
+ #endif
90
+ if (nev & EV_READ)
91
+ FD_SET (handle, (fd_set *)vec_ri);
92
+ else
93
+ FD_CLR (handle, (fd_set *)vec_ri);
94
+
95
+ #if EV_SELECT_IS_WINSOCKET
96
+ if ((oev ^ nev) & EV_WRITE)
97
+ #endif
98
+ if (nev & EV_WRITE)
99
+ FD_SET (handle, (fd_set *)vec_wi);
100
+ else
101
+ FD_CLR (handle, (fd_set *)vec_wi);
89
102
 
90
103
  #else
91
104
 
@@ -100,6 +113,9 @@ select_modify (EV_P_ int fd, int oev, int nev)
100
113
  vec_ro = ev_realloc (vec_ro, new_max * NFDBYTES); /* could free/malloc */
101
114
  vec_wi = ev_realloc (vec_wi, new_max * NFDBYTES);
102
115
  vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */
116
+ #ifdef _WIN32
117
+ vec_eo = ev_realloc (vec_eo, new_max * NFDBYTES); /* could free/malloc */
118
+ #endif
103
119
 
104
120
  for (; vec_max < new_max; ++vec_max)
105
121
  ((fd_mask *)vec_ri) [vec_max] =
@@ -122,19 +138,34 @@ select_poll (EV_P_ ev_tstamp timeout)
122
138
  {
123
139
  struct timeval tv;
124
140
  int res;
141
+ int fd_setsize;
142
+
143
+ tv.tv_sec = (long)timeout;
144
+ tv.tv_usec = (long)((timeout - (ev_tstamp)tv.tv_sec) * 1e6);
125
145
 
126
146
  #if EV_SELECT_USE_FD_SET
127
- memcpy (vec_ro, vec_ri, sizeof (fd_set));
128
- memcpy (vec_wo, vec_wi, sizeof (fd_set));
147
+ fd_setsize = sizeof (fd_set);
129
148
  #else
130
- memcpy (vec_ro, vec_ri, vec_max * NFDBYTES);
131
- memcpy (vec_wo, vec_wi, vec_max * NFDBYTES);
149
+ fd_setsize = vec_max * NFDBYTES;
132
150
  #endif
133
151
 
134
- tv.tv_sec = (long)timeout;
135
- tv.tv_usec = (long)((timeout - (ev_tstamp)tv.tv_sec) * 1e6);
136
-
152
+ memcpy (vec_ro, vec_ri, fd_setsize);
153
+ memcpy (vec_wo, vec_wi, fd_setsize);
154
+
155
+ #ifdef _WIN32
156
+ /* pass in the write set as except set.
157
+ * the idea behind this is to work around a windows bug that causes
158
+ * errors to be reported as an exception and not by setting
159
+ * the writable bit. this is so uncontrollably lame.
160
+ */
161
+ memcpy (vec_eo, vec_wi, fd_setsize);
162
+ res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv);
163
+ #elif EV_SELECT_USE_FD_SET
164
+ fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE;
165
+ res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv);
166
+ #else
137
167
  res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv);
168
+ #endif
138
169
 
139
170
  if (expect_false (res < 0))
140
171
  {
@@ -153,7 +184,7 @@ select_poll (EV_P_ ev_tstamp timeout)
153
184
  #ifdef _WIN32
154
185
  /* select on windows errornously returns EINVAL when no fd sets have been
155
186
  * provided (this is documented). what microsoft doesn't tell you that this bug
156
- * exists even when the fd sets are provided, so we have to check for this bug
187
+ * exists even when the fd sets _are_ provided, so we have to check for this bug
157
188
  * here and emulate by sleeping manually.
158
189
  * we also get EINVAL when the timeout is invalid, but we ignore this case here
159
190
  * and assume that EINVAL always means: you have to wait manually.
@@ -170,7 +201,7 @@ select_poll (EV_P_ ev_tstamp timeout)
170
201
  else if (errno == ENOMEM && !syserr_cb)
171
202
  fd_enomem (EV_A);
172
203
  else if (errno != EINTR)
173
- syserr ("(libev) select");
204
+ ev_syserr ("(libev) select");
174
205
 
175
206
  return;
176
207
  }
@@ -192,6 +223,9 @@ select_poll (EV_P_ ev_tstamp timeout)
192
223
 
193
224
  if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ;
194
225
  if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE;
226
+ #ifdef _WIN32
227
+ if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE;
228
+ #endif
195
229
 
196
230
  if (expect_true (events))
197
231
  fd_event (EV_A_ fd, events);
@@ -206,6 +240,9 @@ select_poll (EV_P_ ev_tstamp timeout)
206
240
  {
207
241
  fd_mask word_r = ((fd_mask *)vec_ro) [word];
208
242
  fd_mask word_w = ((fd_mask *)vec_wo) [word];
243
+ #ifdef _WIN32
244
+ word_w |= ((fd_mask *)vec_eo) [word];
245
+ #endif
209
246
 
210
247
  if (word_r || word_w)
211
248
  for (bit = NFDBITS; bit--; )
@@ -233,11 +270,13 @@ select_init (EV_P_ int flags)
233
270
  backend_poll = select_poll;
234
271
 
235
272
  #if EV_SELECT_USE_FD_SET
236
- vec_max = FD_SETSIZE / 32;
237
273
  vec_ri = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri);
238
274
  vec_ro = ev_malloc (sizeof (fd_set));
239
275
  vec_wi = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_wi);
240
276
  vec_wo = ev_malloc (sizeof (fd_set));
277
+ #ifdef _WIN32
278
+ vec_eo = ev_malloc (sizeof (fd_set));
279
+ #endif
241
280
  #else
242
281
  vec_max = 0;
243
282
  vec_ri = 0;
@@ -65,13 +65,16 @@ VARx(ev_io, pipeev)
65
65
  VARx(pid_t, curpid)
66
66
  #endif
67
67
 
68
- VARx(int, postfork) /* true if we need to recreate kernel state after fork */
68
+ VARx(char, postfork) /* true if we need to recreate kernel state after fork */
69
69
 
70
70
  #if EV_USE_SELECT || EV_GENWRAP
71
71
  VARx(void *, vec_ri)
72
72
  VARx(void *, vec_ro)
73
73
  VARx(void *, vec_wi)
74
74
  VARx(void *, vec_wo)
75
+ #if defined(_WIN32) || EV_GENWRAP
76
+ VARx(void *, vec_eo)
77
+ #endif
75
78
  VARx(int, vec_max)
76
79
  #endif
77
80
 
@@ -153,6 +156,7 @@ VARx(int, asynccnt)
153
156
  #if EV_USE_INOTIFY || EV_GENWRAP
154
157
  VARx(int, fs_fd)
155
158
  VARx(ev_io, fs_w)
159
+ VARx(char, fs_2625) /* whether we are running in linux 2.6.25 or newer */
156
160
  VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE])
157
161
  #endif
158
162
 
@@ -39,10 +39,13 @@
39
39
 
40
40
  #ifdef _WIN32
41
41
 
42
+ /* timeb.h is actually xsi legacy functionality */
42
43
  #include <sys/timeb.h>
43
44
 
44
45
  /* note: the comment below could not be substantiated, but what would I care */
45
46
  /* MSDN says this is required to handle SIGFPE */
47
+ /* my wild guess would be that using something floating-pointy is required */
48
+ /* for the crt to do something about it */
46
49
  volatile double SIGFPE_REQ = 0.0f;
47
50
 
48
51
  /* oh, the humanity! */
@@ -51,6 +54,8 @@ ev_pipe (int filedes [2])
51
54
  {
52
55
  struct sockaddr_in addr = { 0 };
53
56
  int addr_size = sizeof (addr);
57
+ struct sockaddr_in adr2;
58
+ int adr2_size;
54
59
  SOCKET listener;
55
60
  SOCKET sock [2] = { -1, -1 };
56
61
 
@@ -64,7 +69,7 @@ ev_pipe (int filedes [2])
64
69
  if (bind (listener, (struct sockaddr *)&addr, addr_size))
65
70
  goto fail;
66
71
 
67
- if (getsockname(listener, (struct sockaddr *)&addr, &addr_size))
72
+ if (getsockname (listener, (struct sockaddr *)&addr, &addr_size))
68
73
  goto fail;
69
74
 
70
75
  if (listen (listener, 1))
@@ -73,10 +78,34 @@ ev_pipe (int filedes [2])
73
78
  if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
74
79
  goto fail;
75
80
 
76
- if (connect (sock[0], (struct sockaddr *)&addr, addr_size))
81
+ if (connect (sock [0], (struct sockaddr *)&addr, addr_size))
77
82
  goto fail;
78
83
 
79
- if ((sock[1] = accept (listener, 0, 0)) < 0)
84
+ if ((sock [1] = accept (listener, 0, 0)) < 0)
85
+ goto fail;
86
+
87
+ /* windows vista returns fantasy port numbers for sockets:
88
+ * example for two interconnected tcp sockets:
89
+ *
90
+ * (Socket::unpack_sockaddr_in getsockname $sock0)[0] == 53364
91
+ * (Socket::unpack_sockaddr_in getpeername $sock0)[0] == 53363
92
+ * (Socket::unpack_sockaddr_in getsockname $sock1)[0] == 53363
93
+ * (Socket::unpack_sockaddr_in getpeername $sock1)[0] == 53365
94
+ *
95
+ * wow! tridirectional sockets!
96
+ *
97
+ * this way of checking ports seems to work:
98
+ */
99
+ if (getpeername (sock [0], (struct sockaddr *)&addr, &addr_size))
100
+ goto fail;
101
+
102
+ if (getsockname (sock [1], (struct sockaddr *)&adr2, &adr2_size))
103
+ goto fail;
104
+
105
+ errno = WSAEINVAL;
106
+ if (addr_size != adr2_size
107
+ || addr.sin_addr.s_addr != adr2.sin_addr.s_addr /* just to be sure, I mean, it's windows */
108
+ || addr.sin_port != adr2.sin_port)
80
109
  goto fail;
81
110
 
82
111
  closesocket (listener);