rev 0.2.0 → 0.2.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * libev native API header
3
3
  *
4
- * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -78,6 +78,15 @@ typedef double ev_tstamp;
78
78
  # define EV_EMBED_ENABLE 1
79
79
  #endif
80
80
 
81
+ #ifndef EV_ASYNC_ENABLE
82
+ # define EV_ASYNC_ENABLE 1
83
+ #endif
84
+
85
+ #ifndef EV_ATOMIC_T
86
+ # include <signal.h>
87
+ # define EV_ATOMIC_T sig_atomic_t volatile
88
+ #endif
89
+
81
90
  /*****************************************************************************/
82
91
 
83
92
  #if EV_STAT_ENABLE
@@ -91,6 +100,8 @@ struct ev_loop;
91
100
  # define EV_P_ EV_P,
92
101
  # define EV_A loop
93
102
  # define EV_A_ EV_A,
103
+ # define EV_DEFAULT_UC ev_default_loop_uc ()
104
+ # define EV_DEFAULT_UC_ EV_DEFAULT_UC,
94
105
  # define EV_DEFAULT ev_default_loop (0)
95
106
  # define EV_DEFAULT_ EV_DEFAULT,
96
107
  #else
@@ -100,27 +111,37 @@ struct ev_loop;
100
111
  # define EV_A_
101
112
  # define EV_DEFAULT
102
113
  # define EV_DEFAULT_
103
-
114
+ # define EV_DEFAULT_UC
115
+ # define EV_DEFAULT_UC_
104
116
  # undef EV_EMBED_ENABLE
105
117
  #endif
106
118
 
119
+ #if __STDC_VERSION__ >= 199901L || __GNUC__ >= 3
120
+ # define EV_INLINE static inline
121
+ #else
122
+ # define EV_INLINE static
123
+ #endif
124
+
125
+ /*****************************************************************************/
126
+
107
127
  /* eventmask, revents, events... */
108
- #define EV_UNDEF -1L /* guaranteed to be invalid */
109
- #define EV_NONE 0x00L /* no events */
110
- #define EV_READ 0x01L /* ev_io detected read will not block */
111
- #define EV_WRITE 0x02L /* ev_io detected write will not block */
112
- #define EV_IOFDSET 0x80L /* internal use only */
113
- #define EV_TIMEOUT 0x00000100L /* timer timed out */
114
- #define EV_PERIODIC 0x00000200L /* periodic timer timed out */
115
- #define EV_SIGNAL 0x00000400L /* signal was received */
116
- #define EV_CHILD 0x00000800L /* child/pid had status change */
117
- #define EV_STAT 0x00001000L /* stat data changed */
118
- #define EV_IDLE 0x00002000L /* event loop is idling */
119
- #define EV_PREPARE 0x00004000L /* event loop about to poll */
120
- #define EV_CHECK 0x00008000L /* event loop finished poll */
121
- #define EV_EMBED 0x00010000L /* embedded event loop needs sweep */
122
- #define EV_FORK 0x00020000L /* event loop resumed in child */
123
- #define EV_ERROR 0x80000000L /* sent when an error occurs */
128
+ #define EV_UNDEF -1 /* guaranteed to be invalid */
129
+ #define EV_NONE 0x00 /* no events */
130
+ #define EV_READ 0x01 /* ev_io detected read will not block */
131
+ #define EV_WRITE 0x02 /* ev_io detected write will not block */
132
+ #define EV_IOFDSET 0x80 /* internal use only */
133
+ #define EV_TIMEOUT 0x00000100 /* timer timed out */
134
+ #define EV_PERIODIC 0x00000200 /* periodic timer timed out */
135
+ #define EV_SIGNAL 0x00000400 /* signal was received */
136
+ #define EV_CHILD 0x00000800 /* child/pid had status change */
137
+ #define EV_STAT 0x00001000 /* stat data changed */
138
+ #define EV_IDLE 0x00002000 /* event loop is idling */
139
+ #define EV_PREPARE 0x00004000 /* event loop about to poll */
140
+ #define EV_CHECK 0x00008000 /* event loop finished poll */
141
+ #define EV_EMBED 0x00010000 /* embedded event loop needs sweep */
142
+ #define EV_FORK 0x00020000 /* event loop resumed in child */
143
+ #define EV_ASYNC 0x00040000 /* async intra-loop signal */
144
+ #define EV_ERROR 0x80000000 /* sent when an error occurs */
124
145
 
125
146
  /* can be used to add custom fields to all watchers, while losing binary compatibility */
126
147
  #ifndef EV_COMMON
@@ -307,6 +328,19 @@ typedef struct ev_embed
307
328
  } ev_embed;
308
329
  #endif
309
330
 
331
+ #if EV_ASYNC_ENABLE
332
+ /* invoked when somebody calls ev_async_send on the watcher */
333
+ /* revent EV_ASYNC */
334
+ typedef struct ev_async
335
+ {
336
+ EV_WATCHER (ev_async)
337
+
338
+ EV_ATOMIC_T sent; /* private */
339
+ } ev_async;
340
+
341
+ # define ev_async_pending(w) ((w)->sent + 0)
342
+ #endif
343
+
310
344
  /* the presence of this union forces similar struct layout */
311
345
  union ev_any_watcher
312
346
  {
@@ -332,21 +366,24 @@ union ev_any_watcher
332
366
  #if EV_EMBED_ENABLE
333
367
  struct ev_embed embed;
334
368
  #endif
369
+ #if EV_ASYNC_ENABLE
370
+ struct ev_async async;
371
+ #endif
335
372
  };
336
373
 
337
374
  /* bits for ev_default_loop and ev_loop_new */
338
375
  /* the default */
339
- #define EVFLAG_AUTO 0x00000000UL /* not quite a mask */
376
+ #define EVFLAG_AUTO 0x00000000U /* not quite a mask */
340
377
  /* flag bits */
341
- #define EVFLAG_NOENV 0x01000000UL /* do NOT consult environment */
342
- #define EVFLAG_FORKCHECK 0x02000000UL /* check for a fork in each iteration */
378
+ #define EVFLAG_NOENV 0x01000000U /* do NOT consult environment */
379
+ #define EVFLAG_FORKCHECK 0x02000000U /* check for a fork in each iteration */
343
380
  /* method bits to be ored together */
344
- #define EVBACKEND_SELECT 0x00000001UL /* about anywhere */
345
- #define EVBACKEND_POLL 0x00000002UL /* !win */
346
- #define EVBACKEND_EPOLL 0x00000004UL /* linux */
347
- #define EVBACKEND_KQUEUE 0x00000008UL /* bsd */
348
- #define EVBACKEND_DEVPOLL 0x00000010UL /* solaris 8 */ /* NYI */
349
- #define EVBACKEND_PORT 0x00000020UL /* solaris 10 */
381
+ #define EVBACKEND_SELECT 0x00000001U /* about anywhere */
382
+ #define EVBACKEND_POLL 0x00000002U /* !win */
383
+ #define EVBACKEND_EPOLL 0x00000004U /* linux */
384
+ #define EVBACKEND_KQUEUE 0x00000008U /* bsd */
385
+ #define EVBACKEND_DEVPOLL 0x00000010U /* solaris 8 */ /* NYI */
386
+ #define EVBACKEND_PORT 0x00000020U /* solaris 10 */
350
387
 
351
388
  #if EV_PROTOTYPES
352
389
  int ev_version_major (void);
@@ -374,18 +411,29 @@ void ev_set_allocator (void *(*cb)(void *ptr, long size));
374
411
  void ev_set_syserr_cb (void (*cb)(const char *msg));
375
412
 
376
413
  # if EV_MULTIPLICITY
414
+ EV_INLINE struct ev_loop *
415
+ ev_default_loop_uc (void)
416
+ {
417
+ extern struct ev_loop *ev_default_loop_ptr;
418
+
419
+ return ev_default_loop_ptr;
420
+ }
421
+
377
422
  /* the default loop is the only one that handles signals and child watchers */
378
423
  /* you can call this as often as you like */
379
- static struct ev_loop *
424
+ EV_INLINE struct ev_loop *
380
425
  ev_default_loop (unsigned int flags)
381
426
  {
382
- extern struct ev_loop *ev_default_loop_ptr;
383
- extern struct ev_loop *ev_default_loop_init (unsigned int flags);
427
+ struct ev_loop *loop = ev_default_loop_uc ();
384
428
 
385
- if (!ev_default_loop_ptr)
386
- ev_default_loop_init (flags);
429
+ if (!loop)
430
+ {
431
+ extern struct ev_loop *ev_default_loop_init (unsigned int flags);
387
432
 
388
- return ev_default_loop_ptr;
433
+ loop = ev_default_loop_init (flags);
434
+ }
435
+
436
+ return loop;
389
437
  }
390
438
 
391
439
  /* create and destroy alternative loops that don't handle signals */
@@ -399,7 +447,7 @@ ev_tstamp ev_now (EV_P); /* time w.r.t. timers and the eventloop, updated after
399
447
 
400
448
  int ev_default_loop (unsigned int flags); /* returns true when successful */
401
449
 
402
- static ev_tstamp
450
+ EV_INLINE ev_tstamp
403
451
  ev_now (void)
404
452
  {
405
453
  extern ev_tstamp ev_rt_now;
@@ -408,6 +456,18 @@ ev_now (void)
408
456
  }
409
457
  # endif
410
458
 
459
+ EV_INLINE int
460
+ ev_is_default_loop (EV_P)
461
+ {
462
+ #if EV_MULTIPLICITY
463
+ extern struct ev_loop *ev_default_loop_ptr;
464
+
465
+ return !!(EV_A == ev_default_loop_ptr);
466
+ #else
467
+ return 1;
468
+ #endif
469
+ }
470
+
411
471
  void ev_default_destroy (void); /* destroy the default loop */
412
472
  /* this needs to be called after fork, to duplicate the default loop */
413
473
  /* if you create alternative loops you have to call ev_loop_fork on them */
@@ -465,6 +525,7 @@ void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revent
465
525
  #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */
466
526
  #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0)
467
527
  #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */
528
+ #define ev_async_set(ev) do { (ev)->sent = 0; } while (0)
468
529
 
469
530
  #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0)
470
531
  #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0)
@@ -477,6 +538,7 @@ void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revent
477
538
  #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0)
478
539
  #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0)
479
540
  #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0)
541
+ #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0)
480
542
 
481
543
  #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */
482
544
  #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */
@@ -552,6 +614,12 @@ void ev_embed_stop (EV_P_ ev_embed *w);
552
614
  void ev_embed_sweep (EV_P_ ev_embed *w);
553
615
  # endif
554
616
 
617
+ # if EV_ASYNC_ENABLE
618
+ void ev_async_start (EV_P_ ev_async *w);
619
+ void ev_async_stop (EV_P_ ev_async *w);
620
+ void ev_async_send (EV_P_ ev_async *w);
621
+ # endif
622
+
555
623
  #endif
556
624
 
557
625
  #ifdef __cplusplus
@@ -86,7 +86,7 @@ poll_modify (EV_P_ int fd, int oev, int nev)
86
86
  static void
87
87
  poll_poll (EV_P_ ev_tstamp timeout)
88
88
  {
89
- int i;
89
+ struct pollfd *p;
90
90
  int res = poll (polls, pollcnt, (int)ceil (timeout * 1000.));
91
91
 
92
92
  if (expect_false (res < 0))
@@ -97,20 +97,23 @@ poll_poll (EV_P_ ev_tstamp timeout)
97
97
  fd_enomem (EV_A);
98
98
  else if (errno != EINTR)
99
99
  syserr ("(libev) poll");
100
-
101
- return;
102
100
  }
103
-
104
- for (i = 0; i < pollcnt; ++i)
105
- if (expect_false (polls [i].revents & POLLNVAL))
106
- fd_kill (EV_A_ polls [i].fd);
107
- else
108
- fd_event (
109
- EV_A_
110
- polls [i].fd,
111
- (polls [i].revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
112
- | (polls [i].revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
113
- );
101
+ else
102
+ for (p = polls; res; ++p)
103
+ if (expect_false (p->revents)) /* this expect is debatable */
104
+ {
105
+ --res;
106
+
107
+ if (expect_false (p->revents & POLLNVAL))
108
+ fd_kill (EV_A_ p->fd);
109
+ else
110
+ fd_event (
111
+ EV_A_
112
+ p->fd,
113
+ (p->revents & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
114
+ | (p->revents & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
115
+ );
116
+ }
114
117
  }
115
118
 
116
119
  int inline_size
@@ -95,8 +95,8 @@ select_modify (EV_P_ int fd, int oev, int nev)
95
95
 
96
96
  #else
97
97
 
98
- int word = fd / NFDBITS;
99
- int mask = 1UL << (fd % NFDBITS);
98
+ int word = fd / NFDBITS;
99
+ fd_mask mask = 1UL << (fd % NFDBITS);
100
100
 
101
101
  if (expect_false (vec_max < word + 1))
102
102
  {
@@ -108,8 +108,8 @@ select_modify (EV_P_ int fd, int oev, int nev)
108
108
  vec_wo = ev_realloc (vec_wo, new_max * NFDBYTES); /* could free/malloc */
109
109
 
110
110
  for (; vec_max < new_max; ++vec_max)
111
- ((fd_mask *)vec_ri)[vec_max] =
112
- ((fd_mask *)vec_wi)[vec_max] = 0;
111
+ ((fd_mask *)vec_ri) [vec_max] =
112
+ ((fd_mask *)vec_wi) [vec_max] = 0;
113
113
  }
114
114
 
115
115
  ((fd_mask *)vec_ri) [word] |= mask;
@@ -1,7 +1,7 @@
1
1
  /*
2
2
  * loop member variable declarations
3
3
  *
4
- * Copyright (c) 2007 Marc Alexander Lehmann <libev@schmorp.de>
4
+ * Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de>
5
5
  * All rights reserved.
6
6
  *
7
7
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -55,6 +55,12 @@ VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */
55
55
  VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev))
56
56
  VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout))
57
57
 
58
+ #if EV_USE_EVENTFD || EV_GENWRAP
59
+ VARx(int, evfd)
60
+ #endif
61
+ VAR (evpipe, int evpipe [2])
62
+ VARx(ev_io, pipeev)
63
+
58
64
  #if !defined(_WIN32) || EV_GENWRAP
59
65
  VARx(pid_t, curpid)
60
66
  #endif
@@ -137,6 +143,13 @@ VARx(int, forkmax)
137
143
  VARx(int, forkcnt)
138
144
  #endif
139
145
 
146
+ VARx(EV_ATOMIC_T, gotasync)
147
+ #if EV_ASYNC_ENABLE || EV_GENWRAP
148
+ VARx(struct ev_async **, asyncs)
149
+ VARx(int, asyncmax)
150
+ VARx(int, asynccnt)
151
+ #endif
152
+
140
153
  #if EV_USE_INOTIFY || EV_GENWRAP
141
154
  VARx(int, fs_fd)
142
155
  VARx(ev_io, fs_w)
@@ -13,6 +13,9 @@
13
13
  #define backend_fudge ((loop)->backend_fudge)
14
14
  #define backend_modify ((loop)->backend_modify)
15
15
  #define backend_poll ((loop)->backend_poll)
16
+ #define evfd ((loop)->evfd)
17
+ #define evpipe ((loop)->evpipe)
18
+ #define pipeev ((loop)->pipeev)
16
19
  #define curpid ((loop)->curpid)
17
20
  #define postfork ((loop)->postfork)
18
21
  #define vec_ri ((loop)->vec_ri)
@@ -61,6 +64,10 @@
61
64
  #define forks ((loop)->forks)
62
65
  #define forkmax ((loop)->forkmax)
63
66
  #define forkcnt ((loop)->forkcnt)
67
+ #define gotasync ((loop)->gotasync)
68
+ #define asyncs ((loop)->asyncs)
69
+ #define asyncmax ((loop)->asyncmax)
70
+ #define asynccnt ((loop)->asynccnt)
64
71
  #define fs_fd ((loop)->fs_fd)
65
72
  #define fs_w ((loop)->fs_w)
66
73
  #define fs_hash ((loop)->fs_hash)
@@ -78,6 +85,9 @@
78
85
  #undef backend_fudge
79
86
  #undef backend_modify
80
87
  #undef backend_poll
88
+ #undef evfd
89
+ #undef evpipe
90
+ #undef pipeev
81
91
  #undef curpid
82
92
  #undef postfork
83
93
  #undef vec_ri
@@ -126,6 +136,10 @@
126
136
  #undef forks
127
137
  #undef forkmax
128
138
  #undef forkcnt
139
+ #undef gotasync
140
+ #undef asyncs
141
+ #undef asyncmax
142
+ #undef asynccnt
129
143
  #undef fs_fd
130
144
  #undef fs_w
131
145
  #undef fs_hash
@@ -1,48 +1,52 @@
1
1
  require 'mkmf'
2
2
 
3
- cflags = []
4
3
  libs = []
5
4
 
5
+ $defs << "-DRUBY_VERSION_CODE=#{RUBY_VERSION.gsub(/\D/, '')}"
6
+
6
7
  if have_func('rb_thread_blocking_region')
7
- cflags << '-DHAVE_RB_THREAD_BLOCKING_REGION'
8
+ $defs << '-DHAVE_RB_THREAD_BLOCKING_REGION'
9
+ end
10
+
11
+ if have_func('rb_str_set_len')
12
+ $defs << '-DHAVE_RB_STR_SET_LEN'
8
13
  end
9
14
 
10
15
  if have_header('sys/select.h')
11
- cflags << '-DEV_USE_SELECT'
16
+ $defs << '-DEV_USE_SELECT'
12
17
  end
13
18
 
14
19
  if have_header('poll.h')
15
- cflags << '-DEV_USE_POLL'
20
+ $defs << '-DEV_USE_POLL'
16
21
  end
17
22
 
18
23
  if have_header('sys/epoll.h')
19
- cflags << '-DEV_USE_EPOLL'
24
+ $defs << '-DEV_USE_EPOLL'
20
25
  end
21
26
 
22
27
  if have_header('sys/event.h') and have_header('sys/queue.h')
23
- cflags << '-DEV_USE_KQUEUE'
28
+ $defs << '-DEV_USE_KQUEUE'
24
29
  end
25
30
 
26
31
  if have_header('port.h')
27
- cflags << '-DEV_USE_PORT'
32
+ $defs << '-DEV_USE_PORT'
28
33
  end
29
34
 
30
35
  if have_header('openssl/ssl.h')
31
- cflags << '-DHAVE_OPENSSL_SSL_H'
36
+ $defs << '-DHAVE_OPENSSL_SSL_H'
32
37
  libs << '-lssl -lcrypto'
33
38
  end
34
39
 
35
40
  # ncpu detection specifics
36
41
  case RUBY_PLATFORM
37
42
  when /linux/
38
- cflags << '-DHAVE_LINUX_PROCFS'
43
+ $defs << '-DHAVE_LINUX_PROCFS'
39
44
  else
40
45
  if have_func('sysctlbyname', ['sys/param.h', 'sys/sysctl.h'])
41
- cflags << '-DHAVE_SYSCTLBYNAME'
46
+ $defs << '-DHAVE_SYSCTLBYNAME'
42
47
  end
43
48
  end
44
49
 
45
- $CFLAGS << ' ' << cflags.join(' ')
46
50
  $LIBS << ' ' << libs.join(' ')
47
51
 
48
52
  dir_config('rev_ext')
@@ -6,6 +6,7 @@
6
6
 
7
7
  #include <assert.h>
8
8
  #include "ruby.h"
9
+ #include "rubysig.h"
9
10
 
10
11
  #define EV_STANDALONE 1
11
12
  #include "../libev/ev.h"
@@ -28,6 +29,10 @@ static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data);
28
29
  static void Rev_Loop_dispatch_events(struct Rev_Loop *loop_data);
29
30
 
30
31
  #define DEFAULT_EVENTBUF_SIZE 32
32
+ #define RUN_LOOP(loop_data, options) \
33
+ loop_data->running = 1; \
34
+ ev_loop(loop_data->ev_loop, options); \
35
+ loop_data->running = 0;
31
36
 
32
37
  /*
33
38
  * Rev::Loop represents an event loop. Event watchers can be attached and
@@ -52,7 +57,6 @@ static VALUE Rev_Loop_allocate(VALUE klass)
52
57
  struct Rev_Loop *loop = (struct Rev_Loop *)xmalloc(sizeof(struct Rev_Loop));
53
58
 
54
59
  loop->ev_loop = 0;
55
-
56
60
  loop->running = 0;
57
61
  loop->events_received = 0;
58
62
  loop->eventbuf_size = DEFAULT_EVENTBUF_SIZE;
@@ -173,22 +177,19 @@ void Rev_Loop_process_event(VALUE watcher, int revents)
173
177
  static VALUE Rev_Loop_run_once(VALUE self)
174
178
  {
175
179
  struct Rev_Loop *loop_data;
180
+ VALUE nevents;
181
+
176
182
  Data_Get_Struct(self, struct Rev_Loop, loop_data);
177
183
 
178
- if(loop_data->running)
179
- rb_raise(rb_eRuntimeError, "cannot run loop from within a callback");
180
-
181
184
  assert(loop_data->ev_loop && !loop_data->events_received);
182
-
183
- loop_data->running = 1;
184
-
185
- Rev_Loop_ev_loop_oneshot(loop_data);
185
+
186
+ Rev_Loop_ev_loop_oneshot(loop_data);
186
187
  Rev_Loop_dispatch_events(loop_data);
188
+
189
+ nevents = INT2NUM(loop_data->events_received);
187
190
  loop_data->events_received = 0;
188
-
189
- loop_data->running = 0;
190
-
191
- return Qnil;
191
+
192
+ return nevents;
192
193
  }
193
194
 
194
195
  /* Ruby 1.9 supports blocking system calls through rb_thread_blocking_region() */
@@ -197,16 +198,17 @@ static VALUE Rev_Loop_run_once(VALUE self)
197
198
  static VALUE Rev_Loop_ev_loop_oneshot_blocking(void *ptr)
198
199
  {
199
200
  /* The libev loop has now escaped through the Global VM Lock unscathed! */
200
- struct ev_loop *loop = (struct ev_loop *)ptr;
201
+ struct Rev_Loop *loop_data = (struct Rev_Loop *)ptr;
201
202
 
202
- ev_loop(loop, EVLOOP_ONESHOT);
203
+ RUN_LOOP(loop_data, EVLOOP_ONESHOT);
204
+
203
205
  return Qnil;
204
206
  }
205
207
 
206
208
  static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data)
207
209
  {
208
210
  /* Use Ruby 1.9's rb_thread_blocking_region call to make a blocking system call */
209
- rb_thread_blocking_region(Rev_Loop_ev_loop_oneshot_blocking, loop_data->ev_loop, RB_UBF_DFL, 0);
211
+ rb_thread_blocking_region(Rev_Loop_ev_loop_oneshot_blocking, loop_data, RB_UBF_DFL, 0);
210
212
  }
211
213
  #endif
212
214
 
@@ -215,11 +217,12 @@ static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data)
215
217
  #ifndef HAVE_EV_LOOP_ONESHOT
216
218
  #define BLOCKING_INTERVAL 0.01 /* Block for 10ms at a time */
217
219
 
218
- /* Stub callback */
220
+ /* Stub for scheduler's ev_timer callback */
219
221
  static void timer_callback(struct ev_loop *ev_loop, struct ev_timer *timer, int revents)
220
222
  {
221
223
  }
222
224
 
225
+ /* Run the event loop, calling rb_thread_schedule every 10ms */
223
226
  static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data)
224
227
  {
225
228
  struct ev_timer timer;
@@ -229,15 +232,14 @@ static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data)
229
232
  ev_timer_init(&timer, timer_callback, BLOCKING_INTERVAL, BLOCKING_INTERVAL);
230
233
  ev_timer_start(loop_data->ev_loop, &timer);
231
234
 
232
- do {
233
- /* Since blocking calls would hang the Ruby 1.8 thread scheduler, don't block */
234
- ev_loop(loop_data->ev_loop, EVLOOP_ONESHOT);
235
+ /* Loop until we receive events */
236
+ while(!loop_data->events_received) {
237
+ TRAP_BEG;
238
+ RUN_LOOP(loop_data, EVLOOP_ONESHOT);
239
+ TRAP_END;
235
240
 
236
- /* Call rb_thread_select to resume the Ruby scheduler */
237
- tv.tv_sec = 0;
238
- tv.tv_usec = 0;
239
- rb_thread_select(0, NULL, NULL, NULL, &tv);
240
- } while(!loop_data->events_received);
241
+ rb_thread_schedule();
242
+ }
241
243
 
242
244
  ev_timer_stop(loop_data->ev_loop, &timer);
243
245
  }
@@ -245,27 +247,26 @@ static void Rev_Loop_ev_loop_oneshot(struct Rev_Loop *loop_data)
245
247
 
246
248
  /**
247
249
  * call-seq:
248
- * Rev::Loop.run_once -> nil
250
+ * Rev::Loop.run_nonblock -> nil
249
251
  *
250
252
  * Run the Rev::Loop once, but return immediately if there are no pending events.
251
253
  */
252
254
  static VALUE Rev_Loop_run_nonblock(VALUE self)
253
255
  {
254
256
  struct Rev_Loop *loop_data;
257
+ VALUE nevents;
258
+
255
259
  Data_Get_Struct(self, struct Rev_Loop, loop_data);
256
260
 
257
- if(loop_data->running)
258
- rb_raise(rb_eRuntimeError, "cannot run loop from within a callback");
259
-
260
261
  assert(loop_data->ev_loop && !loop_data->events_received);
261
262
 
262
- loop_data->running = 1;
263
- ev_loop(loop_data->ev_loop, EVLOOP_NONBLOCK);
263
+ RUN_LOOP(loop_data, EVLOOP_NONBLOCK);
264
264
  Rev_Loop_dispatch_events(loop_data);
265
+
266
+ nevents = INT2NUM(loop_data->events_received);
265
267
  loop_data->events_received = 0;
266
- loop_data->running = 0;
267
-
268
- return Qnil;
268
+
269
+ return nevents;
269
270
  }
270
271
 
271
272
  static void Rev_Loop_dispatch_events(struct Rev_Loop *loop_data)
@@ -283,4 +284,4 @@ static void Rev_Loop_dispatch_events(struct Rev_Loop *loop_data)
283
284
  Data_Get_Struct(loop_data->eventbuf[i].watcher, struct Rev_Watcher, watcher_data);
284
285
  watcher_data->dispatch_callback(loop_data->eventbuf[i].watcher, loop_data->eventbuf[i].revents);
285
286
  }
286
- }
287
+ }