sleepy_penguin 3.1.0 → 3.1.0.26.g7181

Sign up to get free protection for your applications and to get access to all the features.
data/.gitignore CHANGED
@@ -3,6 +3,7 @@
3
3
  *.log
4
4
  *.so
5
5
  *.rbc
6
+ /.rbx
6
7
  /.config
7
8
  /InstalledFiles
8
9
  /doc
data/.wrongdoc.yml CHANGED
@@ -2,3 +2,5 @@
2
2
  cgit_url: http://bogomips.org/sleepy_penguin.git
3
3
  git_url: git://bogomips.org/sleepy_penguin.git
4
4
  rdoc_url: http://bogomips.org/sleepy_penguin/
5
+ private_email: sleepy.penguin@bogomips.org
6
+ public_email: sleepy.penguin@librelist.org
data/LICENSE CHANGED
@@ -14,5 +14,4 @@ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
14
14
  General Public License for more details.
15
15
 
16
16
  You should have received a copy of the GNU Lesser General Public License
17
- along with this library; if not, write to the Free Software
18
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
17
+ along with this library; if not, see http://www.gnu.org/licenses/
data/README CHANGED
@@ -6,14 +6,15 @@ timerfd, inotify, and epoll interfaces are provided.
6
6
 
7
7
  == Features
8
8
 
9
- * Thread-safe blocking operations under both Ruby 1.8 and 1.9.
9
+ * Thread-safe blocking operations for all versions of Ruby
10
10
 
11
11
  * IO-like objects are backwards-compatible with IO.select.
12
12
 
13
13
  * Epoll interface is fork-safe and GC-safe
14
14
 
15
- * Unlike portable event frameworks, the Linux-only Epoll interface
16
- allows using edge-triggered I/O for possibly improved performance
15
+ * Unlike portable event frameworks, the Linux-only epoll interfaces
16
+ allow using edge-triggered or one-shot notifications for possibly
17
+ improved performance
17
18
 
18
19
  * Fully-documented and user-friendly API
19
20
 
@@ -1,22 +1,13 @@
1
1
  #include "sleepy_penguin.h"
2
2
  #include <sys/epoll.h>
3
- #include <pthread.h>
3
+ #include <unistd.h>
4
4
  #include <time.h>
5
5
  #include "missing_epoll.h"
6
- #ifdef HAVE_RUBY_ST_H
7
- # include <ruby/st.h>
8
- #else
9
- # include <st.h>
10
- #endif
11
6
  #include "missing_rb_thread_fd_close.h"
12
7
  #include "missing_rb_update_max_fd.h"
13
- #define EP_RECREATE (-2)
14
8
 
15
- static pthread_key_t epoll_key;
16
- static st_table *active;
17
- static const int step = 64; /* unlikely to grow unless you're huge */
18
- static VALUE cEpoll_IO;
19
9
  static ID id_for_fd;
10
+ static VALUE cEpoll;
20
11
 
21
12
  static uint64_t now_ms(void)
22
13
  {
@@ -47,27 +38,38 @@ static VALUE unpack_event_data(struct epoll_event *event)
47
38
  # endif
48
39
  #endif
49
40
 
50
- struct rb_epoll {
51
- int fd;
52
- VALUE io;
53
- VALUE marks;
54
- VALUE flag_cache;
55
- int flags;
56
- };
57
-
58
41
  struct ep_per_thread {
59
- struct rb_epoll *ep;
42
+ VALUE io;
43
+ int fd;
60
44
  int timeout;
61
45
  int maxevents;
62
46
  int capa;
63
47
  struct epoll_event events[FLEX_ARRAY];
64
48
  };
65
49
 
66
- static struct ep_per_thread *ept_get(int maxevents)
50
+ /* this will raise if the IO is closed */
51
+ static int ep_fd_check(struct ep_per_thread *ept)
67
52
  {
68
- struct ep_per_thread *ept = pthread_getspecific(epoll_key);
69
- int err;
53
+ int save_errno = errno;
54
+
55
+ ept->fd = rb_sp_fileno(ept->io);
56
+ errno = save_errno;
57
+
58
+ return 1;
59
+ }
60
+
61
+ static struct ep_per_thread *ept_get(VALUE self, int maxevents)
62
+ {
63
+ static __thread struct ep_per_thread *ept;
70
64
  size_t size;
65
+ int err;
66
+ void *ptr;
67
+
68
+ /* error check here to prevent OOM from posix_memalign */
69
+ if (maxevents <= 0) {
70
+ errno = EINVAL;
71
+ rb_sys_fail("epoll_wait maxevents <= 0");
72
+ }
71
73
 
72
74
  if (ept && ept->capa >= maxevents)
73
75
  goto out;
@@ -76,264 +78,76 @@ static struct ep_per_thread *ept_get(int maxevents)
76
78
  sizeof(struct epoll_event) * maxevents;
77
79
 
78
80
  free(ept); /* free(NULL) is POSIX and works on glibc */
79
- ept = malloc(size);
80
- if (ept == NULL)
81
- rb_memerror();
82
- err = pthread_setspecific(epoll_key, ept);
83
- if (err != 0) {
81
+ err = posix_memalign(&ptr, rb_sp_l1_cache_line_size, size);
82
+ if (err) {
84
83
  errno = err;
85
- rb_sys_fail("pthread_setspecific");
84
+ rb_memerror();
86
85
  }
86
+ ept = ptr;
87
87
  ept->capa = maxevents;
88
88
  out:
89
89
  ept->maxevents = maxevents;
90
+ ept->io = self;
91
+ ept->fd = rb_sp_fileno(ept->io);
90
92
 
91
93
  return ept;
92
94
  }
93
95
 
94
- static struct rb_epoll *ep_get(VALUE self)
95
- {
96
- struct rb_epoll *ep;
97
-
98
- Data_Get_Struct(self, struct rb_epoll, ep);
99
-
100
- return ep;
101
- }
102
-
103
- static void gcmark(void *ptr)
104
- {
105
- struct rb_epoll *ep = ptr;
106
-
107
- rb_gc_mark(ep->io);
108
- rb_gc_mark(ep->marks);
109
- rb_gc_mark(ep->flag_cache);
110
- }
111
-
112
- static void gcfree(void *ptr)
113
- {
114
- struct rb_epoll *ep = ptr;
115
-
116
- if (ep->fd >= 0) {
117
- st_data_t key = ep->fd;
118
- st_delete(active, &key, NULL);
119
- }
120
- if (NIL_P(ep->io) && ep->fd >= 0) {
121
- /* can't raise during GC, and close() never fails in Linux */
122
- (void)close(ep->fd);
123
- errno = 0;
124
- }
125
- /* let GC take care of the underlying IO object if there is one */
126
-
127
- xfree(ep);
128
- }
129
-
130
- static VALUE alloc(VALUE klass)
131
- {
132
- struct rb_epoll *ep;
133
- VALUE self;
134
-
135
- self = Data_Make_Struct(klass, struct rb_epoll, gcmark, gcfree, ep);
136
- ep->fd = -1;
137
- ep->io = Qnil;
138
- ep->marks = Qnil;
139
- ep->flag_cache = Qnil;
140
- ep->flags = 0;
141
-
142
- return self;
143
- }
144
-
145
- static void my_epoll_create(struct rb_epoll *ep)
146
- {
147
- ep->fd = epoll_create1(ep->flags);
148
-
149
- if (ep->fd == -1) {
150
- if (errno == EMFILE || errno == ENFILE || errno == ENOMEM) {
151
- rb_gc();
152
- ep->fd = epoll_create1(ep->flags);
153
- }
154
- if (ep->fd == -1)
155
- rb_sys_fail("epoll_create1");
156
- }
157
- rb_update_max_fd(ep->fd);
158
- st_insert(active, (st_data_t)ep->fd, (st_data_t)ep);
159
- ep->marks = rb_ary_new();
160
- ep->flag_cache = rb_ary_new();
161
- }
162
-
163
- static int ep_fd_check(struct rb_epoll *ep)
164
- {
165
- if (ep->fd == -1)
166
- rb_raise(rb_eIOError, "closed epoll descriptor");
167
- return 1;
168
- }
169
-
170
- static void ep_check(struct rb_epoll *ep)
171
- {
172
- if (ep->fd == EP_RECREATE)
173
- my_epoll_create(ep);
174
- ep_fd_check(ep);
175
- assert(TYPE(ep->marks) == T_ARRAY && "marks not initialized");
176
- assert(TYPE(ep->flag_cache) == T_ARRAY && "flag_cache not initialized");
177
- }
178
-
179
96
  /*
180
97
  * call-seq:
181
- * SleepyPenguin::Epoll.new([flags]) -> Epoll object
98
+ * SleepyPenguin::Epoll::IO.new(flags) -> Epoll::IO object
182
99
  *
183
- * Creates a new Epoll object with an optional +flags+ argument.
184
- * +flags+ may currently be +:CLOEXEC+ or +0+ (or +nil+).
100
+ * Creates a new Epoll::IO object with the given +flags+ argument.
101
+ * +flags+ may currently be +CLOEXEC+ or +0+.
185
102
  */
186
- static VALUE init(int argc, VALUE *argv, VALUE self)
103
+ static VALUE s_new(VALUE klass, VALUE _flags)
187
104
  {
188
- struct rb_epoll *ep = ep_get(self);
189
- VALUE fl;
105
+ int default_flags = RB_SP_CLOEXEC(EPOLL_CLOEXEC);
106
+ int flags = rb_sp_get_flags(klass, _flags, default_flags);
107
+ int fd = epoll_create1(flags);
108
+ VALUE rv;
190
109
 
191
- rb_scan_args(argc, argv, "01", &fl);
192
- ep->flags = rb_sp_get_flags(self, fl);
193
- my_epoll_create(ep);
194
-
195
- return self;
196
- }
197
-
198
- static VALUE ctl(VALUE self, VALUE io, VALUE flags, int op)
199
- {
200
- struct epoll_event event;
201
- struct rb_epoll *ep = ep_get(self);
202
- int fd = rb_sp_fileno(io);
203
- int rv;
204
-
205
- ep_check(ep);
206
- event.events = rb_sp_get_uflags(self, flags);
207
- pack_event_data(&event, io);
208
-
209
- rv = epoll_ctl(ep->fd, op, fd, &event);
210
- if (rv == -1) {
211
- if (errno == ENOMEM) {
110
+ if (fd < 0) {
111
+ if (errno == EMFILE || errno == ENFILE || errno == ENOMEM) {
212
112
  rb_gc();
213
- rv = epoll_ctl(ep->fd, op, fd, &event);
113
+ fd = epoll_create1(flags);
214
114
  }
215
- if (rv == -1)
216
- rb_sys_fail("epoll_ctl");
217
- }
218
- switch (op) {
219
- case EPOLL_CTL_ADD:
220
- rb_ary_store(ep->marks, fd, io);
221
- /* fall-through */
222
- case EPOLL_CTL_MOD:
223
- flags = UINT2NUM(event.events);
224
- rb_ary_store(ep->flag_cache, fd, flags);
225
- break;
226
- case EPOLL_CTL_DEL:
227
- rb_ary_store(ep->marks, fd, Qnil);
228
- rb_ary_store(ep->flag_cache, fd, Qnil);
115
+ if (fd < 0)
116
+ rb_sys_fail("epoll_create1");
229
117
  }
230
118
 
231
- return INT2NUM(rv);
119
+ rv = INT2FIX(fd);
120
+ return rb_call_super(1, &rv);
232
121
  }
233
122
 
234
123
  /*
235
124
  * call-seq:
236
- * ep.set(io, flags) -> 0
237
- *
238
- * Used to avoid exceptions when your app is too lazy to check
239
- * what state a descriptor is in, this sets the epoll descriptor
240
- * to watch an +io+ with the given +flags+
125
+ * epoll_io.epoll_ctl(op, io, events) -> nil
241
126
  *
242
- * +flags+ may be an array of symbols or an unsigned Integer bit mask:
127
+ * Register, modify, or register a watch for a given +io+ for events.
243
128
  *
244
- * - flags = [ :IN, :ET ]
245
- * - flags = SleepyPenguin::Epoll::IN | SleepyPenguin::Epoll::ET
129
+ * +op+ may be one of +EPOLL_CTL_ADD+, +EPOLL_CTL_MOD+, or +EPOLL_CTL_DEL+
130
+ * +io+ is an IO object or one which proxies via the +to_io+ method.
131
+ * +events+ is an integer mask of events to watch for.
246
132
  *
247
- * See constants in Epoll for more information.
133
+ * Returns nil on success.
248
134
  */
249
- static VALUE set(VALUE self, VALUE io, VALUE flags)
135
+ static VALUE epctl(VALUE self, VALUE _op, VALUE io, VALUE events)
250
136
  {
251
137
  struct epoll_event event;
252
- struct rb_epoll *ep = ep_get(self);
138
+ int epfd = rb_sp_fileno(self);
253
139
  int fd = rb_sp_fileno(io);
140
+ int op = NUM2INT(_op);
254
141
  int rv;
255
- VALUE cur_io = rb_ary_entry(ep->marks, fd);
256
142
 
257
- ep_check(ep);
258
- event.events = rb_sp_get_uflags(self, flags);
143
+ event.events = NUM2UINT(events);
259
144
  pack_event_data(&event, io);
260
145
 
261
- if (cur_io == io) {
262
- VALUE cur_flags = rb_ary_entry(ep->flag_cache, fd);
263
- uint32_t cur_events;
264
-
265
- assert(!NIL_P(cur_flags) && "cur_flags nil but cur_io is not");
266
- cur_events = NUM2UINT(cur_flags);
267
-
268
- if (!(cur_events & EPOLLONESHOT) && cur_events == event.events)
269
- return Qnil;
270
-
271
- fallback_mod:
272
- rv = epoll_ctl(ep->fd, EPOLL_CTL_MOD, fd, &event);
273
- if (rv == -1) {
274
- if (errno != ENOENT)
275
- rb_sys_fail("epoll_ctl - mod");
276
- errno = 0;
277
- rb_warn("epoll flag_cache failed (mod -> add)");
278
- goto fallback_add;
279
- }
280
- } else {
281
- fallback_add:
282
- rv = epoll_ctl(ep->fd, EPOLL_CTL_ADD, fd, &event);
283
- if (rv == -1) {
284
- if (errno != EEXIST)
285
- rb_sys_fail("epoll_ctl - add");
286
- errno = 0;
287
- rb_warn("epoll flag_cache failed (add -> mod)");
288
- goto fallback_mod;
289
- }
290
- rb_ary_store(ep->marks, fd, io);
291
- }
292
- flags = UINT2NUM(event.events);
293
- rb_ary_store(ep->flag_cache, fd, flags);
294
-
295
- return INT2NUM(rv);
296
- }
297
-
298
- /*
299
- * call-seq:
300
- * epoll.delete(io) -> io or nil
301
- *
302
- * Stops an +io+ object from being monitored. This is like Epoll#del
303
- * but returns +nil+ on ENOENT instead of raising an error. This is
304
- * useful for apps that do not care to track the status of an
305
- * epoll object itself.
306
- */
307
- static VALUE delete(VALUE self, VALUE io)
308
- {
309
- struct rb_epoll *ep = ep_get(self);
310
- int fd = rb_sp_fileno(io);
311
- int rv;
312
- VALUE cur_io;
313
-
314
- ep_check(ep);
315
- if (rb_sp_io_closed(io))
316
- goto out;
317
-
318
- cur_io = rb_ary_entry(ep->marks, fd);
319
- if (NIL_P(cur_io) || rb_sp_io_closed(cur_io))
320
- return Qnil;
321
-
322
- rv = epoll_ctl(ep->fd, EPOLL_CTL_DEL, fd, NULL);
323
- if (rv == -1) {
324
- /* beware of IO.for_fd-created descriptors */
325
- if (errno == ENOENT || errno == EBADF) {
326
- errno = 0;
327
- io = Qnil;
328
- } else {
329
- rb_sys_fail("epoll_ctl - del");
330
- }
331
- }
332
- out:
333
- rb_ary_store(ep->marks, fd, Qnil);
334
- rb_ary_store(ep->flag_cache, fd, Qnil);
146
+ rv = epoll_ctl(epfd, op, fd, &event);
147
+ if (rv < 0)
148
+ rb_sys_fail("epoll_ctl");
335
149
 
336
- return io;
150
+ return Qnil;
337
151
  }
338
152
 
339
153
  static VALUE epwait_result(struct ep_per_thread *ept, int n)
@@ -342,7 +156,7 @@ static VALUE epwait_result(struct ep_per_thread *ept, int n)
342
156
  struct epoll_event *epoll_event = ept->events;
343
157
  VALUE obj_events, obj;
344
158
 
345
- if (n == -1)
159
+ if (n < 0)
346
160
  rb_sys_fail("epoll_wait");
347
161
 
348
162
  for (i = n; --i >= 0; epoll_event++) {
@@ -358,7 +172,7 @@ static int epoll_resume_p(uint64_t expire_at, struct ep_per_thread *ept)
358
172
  {
359
173
  uint64_t now;
360
174
 
361
- ep_fd_check(ept->ep);
175
+ ep_fd_check(ept); /* may raise IOError */
362
176
 
363
177
  if (errno != EINTR)
364
178
  return 0;
@@ -369,326 +183,61 @@ static int epoll_resume_p(uint64_t expire_at, struct ep_per_thread *ept)
369
183
  return 1;
370
184
  }
371
185
 
372
- #if defined(HAVE_RB_THREAD_BLOCKING_REGION)
373
186
  static VALUE nogvl_wait(void *args)
374
187
  {
375
188
  struct ep_per_thread *ept = args;
376
- int fd = ept->ep->fd;
377
- int n = epoll_wait(fd, ept->events, ept->maxevents, ept->timeout);
189
+ int n = epoll_wait(ept->fd, ept->events, ept->maxevents, ept->timeout);
378
190
 
379
191
  return (VALUE)n;
380
192
  }
381
193
 
382
194
  static VALUE real_epwait(struct ep_per_thread *ept)
383
195
  {
384
- int n;
196
+ long n;
385
197
  uint64_t expire_at = ept->timeout > 0 ? now_ms() + ept->timeout : 0;
386
198
 
387
199
  do {
388
- n = (int)rb_sp_fd_region(nogvl_wait, ept, ept->ep->fd);
389
- } while (n == -1 && epoll_resume_p(expire_at, ept));
200
+ n = (long)rb_sp_fd_region(nogvl_wait, ept, ept->fd);
201
+ } while (n < 0 && epoll_resume_p(expire_at, ept));
390
202
 
391
- return epwait_result(ept, n);
203
+ return epwait_result(ept, (int)n);
392
204
  }
393
- #else /* 1.8 Green thread compatible code */
394
- # include "epoll_green.h"
395
- #endif /* 1.8 Green thread compatibility code */
396
205
 
397
206
  /*
398
207
  * call-seq:
399
- * epoll.wait([maxevents[, timeout]]) { |flags, io| ... }
208
+ * ep_io.epoll_wait([maxevents[, timeout]]) { |events, io| ... }
400
209
  *
401
- * Calls epoll_wait(2) and yields Integer +flags+ and IO objects watched
210
+ * Calls epoll_wait(2) and yields Integer +events+ and IO objects watched
402
211
  * for. +maxevents+ is the maximum number of events to process at once,
403
- * lower numbers may prevent starvation when used by dup-ed Epoll objects
404
- * in multiple threads. +timeout+ is specified in milliseconds, +nil+
212
+ * lower numbers may prevent starvation when used by epoll_wait in multiple
213
+ * threads. Larger +maxevents+ reduces syscall overhead for
214
+ * single-threaded applications. +maxevents+ defaults to 64 events.
215
+ * +timeout+ is specified in milliseconds, +nil+
405
216
  * (the default) meaning it will block and wait indefinitely.
406
217
  */
407
218
  static VALUE epwait(int argc, VALUE *argv, VALUE self)
408
219
  {
409
220
  VALUE timeout, maxevents;
410
- struct rb_epoll *ep = ep_get(self);
411
221
  struct ep_per_thread *ept;
412
222
 
413
- ep_check(ep);
414
223
  rb_need_block();
415
224
  rb_scan_args(argc, argv, "02", &maxevents, &timeout);
416
- ept = ept_get(NIL_P(maxevents) ? 64 : NUM2INT(maxevents));
225
+
226
+ ept = ept_get(self, NIL_P(maxevents) ? 64 : NUM2INT(maxevents));
417
227
  ept->timeout = NIL_P(timeout) ? -1 : NUM2INT(timeout);
418
- ept->ep = ep;
419
228
 
420
229
  return real_epwait(ept);
421
230
  }
422
231
 
423
- /*
424
- * call-seq:
425
- * epoll.add(io, flags) -> 0
426
- *
427
- * Starts watching a given +io+ object with +flags+ which may be an Integer
428
- * bitmask or Array representing arrays to watch for. Consider Epoll#set
429
- * instead as it is easier to use.
430
- */
431
- static VALUE add(VALUE self, VALUE io, VALUE flags)
432
- {
433
- return ctl(self, io, flags, EPOLL_CTL_ADD);
434
- }
435
-
436
- /*
437
- * call-seq:
438
- * epoll.del(io) -> 0
439
- *
440
- * Disables an IO object from being watched. Consider Epoll#delete as
441
- * it is easier to use.
442
- */
443
- static VALUE del(VALUE self, VALUE io)
444
- {
445
- return ctl(self, io, INT2FIX(0), EPOLL_CTL_DEL);
446
- }
447
-
448
- /*
449
- * call-seq:
450
- * epoll.mod(io, flags) -> 0
451
- *
452
- * Changes the watch for an existing IO object based on +flags+.
453
- * Consider Epoll#set instead as it is easier to use.
454
- */
455
- static VALUE mod(VALUE self, VALUE io, VALUE flags)
232
+ /* :nodoc: */
233
+ static VALUE event_flags(VALUE self, VALUE flags)
456
234
  {
457
- return ctl(self, io, flags, EPOLL_CTL_MOD);
458
- }
459
-
460
- /*
461
- * call-seq:
462
- * epoll.to_io -> Epoll::IO object
463
- *
464
- * Used to expose the given Epoll object as an Epoll::IO object for IO.select
465
- * or IO#stat. This is unlikely to be useful directly, but is used internally
466
- * by IO.select.
467
- */
468
- static VALUE to_io(VALUE self)
469
- {
470
- struct rb_epoll *ep = ep_get(self);
471
-
472
- ep_check(ep);
473
-
474
- if (NIL_P(ep->io))
475
- ep->io = rb_funcall(cEpoll_IO, id_for_fd, 1, INT2NUM(ep->fd));
476
-
477
- return ep->io;
478
- }
479
-
480
- /*
481
- * call-seq:
482
- * epoll.close -> nil
483
- *
484
- * Closes an existing Epoll object and returns memory back to the kernel.
485
- * Raises IOError if object is already closed.
486
- */
487
- static VALUE epclose(VALUE self)
488
- {
489
- struct rb_epoll *ep = ep_get(self);
490
-
491
- if (ep->fd >= 0) {
492
- st_data_t key = ep->fd;
493
- st_delete(active, &key, NULL);
494
- }
495
-
496
- if (NIL_P(ep->io)) {
497
- ep_fd_check(ep);
498
-
499
- if (ep->fd == EP_RECREATE) {
500
- ep->fd = -1; /* success */
501
- } else {
502
- int err;
503
- int fd = ep->fd;
504
-
505
- ep->fd = -1;
506
- rb_thread_fd_close(fd);
507
- err = close(fd);
508
- if (err == -1)
509
- rb_sys_fail("close");
510
- }
511
- } else {
512
- ep->fd = -1;
513
- rb_io_close(ep->io);
514
- }
515
-
516
- return Qnil;
517
- }
518
-
519
- /*
520
- * call-seq:
521
- * epoll.closed? -> true or false
522
- *
523
- * Returns whether or not an Epoll object is closed.
524
- */
525
- static VALUE epclosed(VALUE self)
526
- {
527
- struct rb_epoll *ep = ep_get(self);
528
-
529
- return ep->fd == -1 ? Qtrue : Qfalse;
530
- }
531
-
532
- static int cloexec_dup(struct rb_epoll *ep)
533
- {
534
- #ifdef F_DUPFD_CLOEXEC
535
- int flags = ep->flags & EPOLL_CLOEXEC ? F_DUPFD_CLOEXEC : F_DUPFD;
536
- int fd = fcntl(ep->fd, flags, 0);
537
- #else /* potentially racy on GVL-free systems: */
538
- int fd = dup(ep->fd);
539
- if (fd >= 0)
540
- (void)fcntl(fd, F_SETFD, FD_CLOEXEC);
541
- #endif
542
- return fd;
543
- }
544
-
545
- /*
546
- * call-seq:
547
- * epoll.dup -> another Epoll object
548
- *
549
- * Duplicates an Epoll object and userspace buffers related to this library.
550
- * This allows the same epoll object in the Linux kernel to be safely used
551
- * across multiple native threads as long as there is one SleepyPenguin::Epoll
552
- * object per-thread.
553
- */
554
- static VALUE init_copy(VALUE copy, VALUE orig)
555
- {
556
- struct rb_epoll *a = ep_get(orig);
557
- struct rb_epoll *b = ep_get(copy);
558
-
559
- assert(NIL_P(b->io) && "Ruby broken?");
560
-
561
- ep_check(a);
562
- assert(NIL_P(b->marks) && "mark array not nil");
563
- assert(NIL_P(b->flag_cache) && "flag_cache not nil");
564
- b->marks = a->marks;
565
- b->flag_cache = a->flag_cache;
566
- assert(TYPE(b->marks) == T_ARRAY && "mark array not initialized");
567
- assert(TYPE(b->flag_cache) == T_ARRAY && "flag_cache not initialized");
568
- b->flags = a->flags;
569
- b->fd = cloexec_dup(a);
570
- if (b->fd == -1) {
571
- if (errno == ENFILE || errno == EMFILE) {
572
- rb_gc();
573
- b->fd = cloexec_dup(a);
574
- }
575
- if (b->fd == -1)
576
- rb_sys_fail("dup");
577
- }
578
- st_insert(active, (st_data_t)b->fd, (st_data_t)b);
579
-
580
- return copy;
581
- }
582
-
583
- /* occasionally it's still useful to lookup aliased IO objects
584
- * based on for debugging */
585
- static int my_fileno(VALUE obj)
586
- {
587
- if (T_FIXNUM == TYPE(obj))
588
- return FIX2INT(obj);
589
- return rb_sp_fileno(obj);
590
- }
591
-
592
- /*
593
- * call-seq:
594
- * epoll.io_for(io) -> object
595
- *
596
- * Returns the given IO object currently being watched for. Different
597
- * IO objects may internally refer to the same process file descriptor.
598
- * Mostly used for debugging.
599
- */
600
- static VALUE io_for(VALUE self, VALUE obj)
601
- {
602
- struct rb_epoll *ep = ep_get(self);
603
-
604
- return rb_ary_entry(ep->marks, my_fileno(obj));
605
- }
606
-
607
- /*
608
- * call-seq:
609
- * epoll.flags_for(io) -> Integer
610
- *
611
- * Returns the flags currently watched for in current Epoll object.
612
- * Mostly used for debugging.
613
- */
614
- static VALUE flags_for(VALUE self, VALUE obj)
615
- {
616
- struct rb_epoll *ep = ep_get(self);
617
-
618
- return rb_ary_entry(ep->flag_cache, my_fileno(obj));
619
- }
620
-
621
- /*
622
- * call-seq:
623
- * epoll.include?(io) => true or false
624
- *
625
- * Returns whether or not a given IO is watched and prevented from being
626
- * garbage-collected by the current Epoll object. This may include
627
- * closed IO objects.
628
- */
629
- static VALUE include_p(VALUE self, VALUE obj)
630
- {
631
- struct rb_epoll *ep = ep_get(self);
632
-
633
- return NIL_P(rb_ary_entry(ep->marks, my_fileno(obj))) ? Qfalse : Qtrue;
634
- }
635
-
636
- /*
637
- * we close (or lose to GC) epoll descriptors at fork to avoid leakage
638
- * and invalid objects being referenced later in the child
639
- */
640
- static int ep_atfork(st_data_t key, st_data_t value, void *ignored)
641
- {
642
- struct rb_epoll *ep = (struct rb_epoll *)value;
643
-
644
- if (NIL_P(ep->io)) {
645
- if (ep->fd >= 0)
646
- (void)close(ep->fd);
647
- } else {
648
- ep->io = Qnil; /* must let GC take care of it later :< */
649
- }
650
- ep->fd = EP_RECREATE;
651
-
652
- return ST_CONTINUE;
653
- }
654
-
655
- static void atfork_child(void)
656
- {
657
- st_table *old = active;
658
-
659
- active = st_init_numtable();
660
- st_foreach(old, ep_atfork, (st_data_t)NULL);
661
- st_free_table(old);
662
- }
663
-
664
- static void epoll_once(void)
665
- {
666
- int err = pthread_key_create(&epoll_key, free);
667
-
668
- if (err) {
669
- errno = err;
670
- rb_sys_fail("pthread_key_create");
671
- }
672
-
673
- active = st_init_numtable();
674
-
675
- if (pthread_atfork(NULL, NULL, atfork_child) != 0) {
676
- rb_gc();
677
- if (pthread_atfork(NULL, NULL, atfork_child) != 0)
678
- rb_memerror();
679
- }
235
+ return UINT2NUM(rb_sp_get_uflags(self, flags));
680
236
  }
681
237
 
682
238
  void sleepy_penguin_init_epoll(void)
683
239
  {
684
- VALUE mSleepyPenguin, cEpoll;
685
- pthread_once_t once = PTHREAD_ONCE_INIT;
686
- int err = pthread_once(&once, epoll_once);
687
-
688
- if (err) {
689
- errno = err;
690
- rb_sys_fail("pthread_once(.., epoll_once)");
691
- }
240
+ VALUE mSleepyPenguin, cEpoll_IO;
692
241
 
693
242
  /*
694
243
  * Document-module: SleepyPenguin
@@ -707,6 +256,7 @@ void sleepy_penguin_init_epoll(void)
707
256
  * And then access classes via:
708
257
  *
709
258
  * - SP::Epoll
259
+ * - SP::Epoll::IO
710
260
  * - SP::EventFD
711
261
  * - SP::Inotify
712
262
  * - SP::TimerFD
@@ -716,36 +266,36 @@ void sleepy_penguin_init_epoll(void)
716
266
  /*
717
267
  * Document-class: SleepyPenguin::Epoll
718
268
  *
719
- * The Epoll class provides access to epoll(7) functionality in the
720
- * Linux 2.6 kernel. It provides fork and GC-safety for Ruby
721
- * objects stored within the IO object and may be passed as an
722
- * argument to IO.select.
269
+ * The Epoll class provides high-level access to epoll(7)
270
+ * functionality in the Linux 2.6 and later kernels. It provides
271
+ * fork and GC-safety for Ruby objects stored within the IO object
272
+ * and may be passed as an argument to IO.select.
723
273
  */
724
274
  cEpoll = rb_define_class_under(mSleepyPenguin, "Epoll", rb_cObject);
725
275
 
726
276
  /*
727
277
  * Document-class: SleepyPenguin::Epoll::IO
728
278
  *
729
- * Epoll::IO is an internal class. Its only purpose is to be
730
- * compatible with IO.select and related methods and should
731
- * never be used directly, use Epoll instead.
279
+ * Epoll::IO is a low-level class. It does not provide fork nor
280
+ * GC-safety, so Ruby IO objects added via epoll_ctl must be retained
281
+ * by the application until IO#close is called.
732
282
  */
733
283
  cEpoll_IO = rb_define_class_under(cEpoll, "IO", rb_cIO);
734
- rb_define_method(cEpoll, "initialize", init, -1);
735
- rb_define_method(cEpoll, "initialize_copy", init_copy, 1);
736
- rb_define_alloc_func(cEpoll, alloc);
737
- rb_define_method(cEpoll, "to_io", to_io, 0);
738
- rb_define_method(cEpoll, "close", epclose, 0);
739
- rb_define_method(cEpoll, "closed?", epclosed, 0);
740
- rb_define_method(cEpoll, "add", add, 2);
741
- rb_define_method(cEpoll, "mod", mod, 2);
742
- rb_define_method(cEpoll, "del", del, 1);
743
- rb_define_method(cEpoll, "delete", delete, 1);
744
- rb_define_method(cEpoll, "io_for", io_for, 1);
745
- rb_define_method(cEpoll, "flags_for", flags_for, 1);
746
- rb_define_method(cEpoll, "include?", include_p, 1);
747
- rb_define_method(cEpoll, "set", set, 2);
748
- rb_define_method(cEpoll, "wait", epwait, -1);
284
+ rb_define_singleton_method(cEpoll_IO, "new", s_new, 1);
285
+
286
+ rb_define_method(cEpoll_IO, "epoll_ctl", epctl, 3);
287
+ rb_define_method(cEpoll_IO, "epoll_wait", epwait, -1);
288
+
289
+ rb_define_method(cEpoll, "__event_flags", event_flags, 1);
290
+
291
+ /* registers an IO object via epoll_ctl */
292
+ rb_define_const(cEpoll, "CTL_ADD", INT2NUM(EPOLL_CTL_ADD));
293
+
294
+ /* unregisters an IO object via epoll_ctl */
295
+ rb_define_const(cEpoll, "CTL_DEL", INT2NUM(EPOLL_CTL_DEL));
296
+
297
+ /* modifies the registration of an IO object via epoll_ctl */
298
+ rb_define_const(cEpoll, "CTL_MOD", INT2NUM(EPOLL_CTL_MOD));
749
299
 
750
300
  /* specifies whether close-on-exec flag is set for Epoll.new */
751
301
  rb_define_const(cEpoll, "CLOEXEC", INT2NUM(EPOLL_CLOEXEC));
@@ -764,6 +314,15 @@ void sleepy_penguin_init_epoll(void)
764
314
  rb_define_const(cEpoll, "RDHUP", UINT2NUM(EPOLLRDHUP));
765
315
  #endif
766
316
 
317
+ #ifdef EPOLLWAKEUP
318
+ /*
319
+ * This prevents system suspend while event is ready.
320
+ * This requires the caller to have the CAP_BLOCK_SUSPEND capability
321
+ * Available since Linux 3.5
322
+ */
323
+ rb_define_const(cEpoll, "WAKEUP", UINT2NUM(EPOLLWAKEUP));
324
+ #endif
325
+
767
326
  /* watch for urgent read(2) data */
768
327
  rb_define_const(cEpoll, "PRI", UINT2NUM(EPOLLPRI));
769
328
 
@@ -786,4 +345,7 @@ void sleepy_penguin_init_epoll(void)
786
345
  rb_define_const(cEpoll, "ONESHOT", UINT2NUM(EPOLLONESHOT));
787
346
 
788
347
  id_for_fd = rb_intern("for_fd");
348
+
349
+ if (RB_SP_GREEN_THREAD)
350
+ rb_require("sleepy_penguin/epoll/io");
789
351
  }