sleepy_penguin 3.4.1 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.document +1 -0
- data/.olddoc.yml +3 -4
- data/GIT-VERSION-GEN +1 -1
- data/LICENSE +3 -3
- data/README +7 -4
- data/TODO +1 -0
- data/ext/sleepy_penguin/cfr.c +62 -0
- data/ext/sleepy_penguin/epoll.c +34 -24
- data/ext/sleepy_penguin/eventfd.c +6 -5
- data/ext/sleepy_penguin/extconf.rb +6 -0
- data/ext/sleepy_penguin/init.c +83 -12
- data/ext/sleepy_penguin/inotify.c +48 -36
- data/ext/sleepy_penguin/kqueue.c +22 -21
- data/ext/sleepy_penguin/sendfile.c +120 -0
- data/ext/sleepy_penguin/sleepy_penguin.h +15 -28
- data/ext/sleepy_penguin/sp_copy.h +33 -0
- data/ext/sleepy_penguin/splice.c +174 -0
- data/ext/sleepy_penguin/timerfd.c +1 -5
- data/ext/sleepy_penguin/util.c +12 -0
- data/lib/sleepy_penguin.rb +28 -0
- data/lib/sleepy_penguin/cfr.rb +29 -0
- data/lib/sleepy_penguin/epoll.rb +13 -10
- data/lib/sleepy_penguin/kqueue.rb +6 -6
- data/lib/sleepy_penguin/sp.rb +1 -1
- data/lib/sleepy_penguin/splice.rb +125 -0
- data/pkg.mk +5 -12
- data/sleepy_penguin.gemspec +13 -15
- data/test/helper.rb +2 -7
- data/test/test_cfr.rb +35 -0
- data/test/test_constants.rb +2 -4
- data/test/test_epoll.rb +35 -6
- data/test/test_epoll_gc.rb +2 -5
- data/test/test_epoll_io.rb +3 -6
- data/test/test_epoll_optimizations.rb +2 -2
- data/test/test_eventfd.rb +2 -5
- data/test/test_inotify.rb +2 -4
- data/test/test_kqueue.rb +35 -7
- data/test/test_kqueue_io.rb +2 -5
- data/test/test_pipesize.rb +22 -0
- data/test/test_sendfile.rb +26 -0
- data/test/test_splice.rb +250 -0
- data/test/test_splice_eintr.rb +31 -0
- data/test/test_timerfd.rb +2 -5
- metadata +27 -34
- data/lib/sleepy_penguin/epoll/io.rb +0 -28
- data/lib/sleepy_penguin/kqueue/io.rb +0 -30
@@ -26,10 +26,8 @@ static VALUE s_new(int argc, VALUE *argv, VALUE klass)
|
|
26
26
|
|
27
27
|
fd = inotify_init1(flags);
|
28
28
|
if (fd < 0) {
|
29
|
-
if (errno
|
30
|
-
rb_gc();
|
29
|
+
if (rb_sp_gc_for_fd(errno))
|
31
30
|
fd = inotify_init1(flags);
|
32
|
-
}
|
33
31
|
if (fd < 0)
|
34
32
|
rb_sys_fail("inotify_init1");
|
35
33
|
}
|
@@ -136,8 +134,11 @@ static VALUE event_new(struct inotify_event *e)
|
|
136
134
|
}
|
137
135
|
|
138
136
|
struct inread_args {
|
137
|
+
VALUE self;
|
139
138
|
int fd;
|
139
|
+
int nonblock_p;
|
140
140
|
size_t size;
|
141
|
+
VALUE tmp;
|
141
142
|
void *buf;
|
142
143
|
};
|
143
144
|
|
@@ -160,6 +161,7 @@ static void resize_internal_buffer(struct inread_args *args)
|
|
160
161
|
|
161
162
|
if (newlen > 0) {
|
162
163
|
args->size = (size_t)newlen;
|
164
|
+
rb_sp_puttlsbuf((VALUE)args->buf);
|
163
165
|
args->buf = rb_sp_gettlsbuf(&args->size);
|
164
166
|
}
|
165
167
|
|
@@ -171,56 +173,35 @@ static void resize_internal_buffer(struct inread_args *args)
|
|
171
173
|
newlen);
|
172
174
|
}
|
173
175
|
|
174
|
-
|
175
|
-
* call-seq:
|
176
|
-
* ino.take([nonblock]) -> Inotify::Event or nil
|
177
|
-
*
|
178
|
-
* Returns the next Inotify::Event processed. May return +nil+ if +nonblock+
|
179
|
-
* is +true+.
|
180
|
-
*/
|
181
|
-
static VALUE take(int argc, VALUE *argv, VALUE self)
|
176
|
+
static VALUE do_take(VALUE p)
|
182
177
|
{
|
183
|
-
struct inread_args args;
|
184
|
-
VALUE tmp = rb_ivar_get(self, id_inotify_tmp);
|
185
|
-
struct inotify_event *e, *end;
|
186
|
-
ssize_t r;
|
178
|
+
struct inread_args *args = (struct inread_args *)p;
|
187
179
|
VALUE rv = Qnil;
|
188
|
-
|
189
|
-
|
190
|
-
if (RARRAY_LEN(tmp) > 0)
|
191
|
-
return rb_ary_shift(tmp);
|
192
|
-
|
193
|
-
rb_scan_args(argc, argv, "01", &nonblock);
|
194
|
-
|
195
|
-
args.fd = rb_sp_fileno(self);
|
196
|
-
args.size = 128;
|
197
|
-
args.buf = rb_sp_gettlsbuf(&args.size);
|
180
|
+
struct inotify_event *e, *end;
|
198
181
|
|
199
|
-
|
200
|
-
rb_sp_set_nonblock(args.fd);
|
201
|
-
else
|
202
|
-
blocking_io_prepare(args.fd);
|
182
|
+
args->buf = rb_sp_gettlsbuf(&args->size);
|
203
183
|
do {
|
204
|
-
r = (ssize_t)rb_sp_fd_region(inread,
|
184
|
+
ssize_t r = (ssize_t)rb_sp_fd_region(inread, args, args->fd);
|
205
185
|
if (r == 0 /* Linux < 2.6.21 */
|
206
186
|
||
|
207
187
|
(r < 0 && errno == EINVAL) /* Linux >= 2.6.21 */
|
208
188
|
) {
|
209
|
-
resize_internal_buffer(
|
189
|
+
resize_internal_buffer(args);
|
210
190
|
} else if (r < 0) {
|
211
|
-
if (errno == EAGAIN &&
|
191
|
+
if (errno == EAGAIN && args->nonblock_p)
|
212
192
|
return Qnil;
|
213
|
-
if (!rb_sp_wait(rb_io_wait_readable, self,
|
193
|
+
if (!rb_sp_wait(rb_io_wait_readable, args->self,
|
194
|
+
&args->fd))
|
214
195
|
rb_sys_fail("read(inotify)");
|
215
196
|
} else {
|
216
197
|
/* buffer in userspace to minimize read() calls */
|
217
|
-
end = (struct inotify_event *)((char *)args
|
218
|
-
for (e = args
|
198
|
+
end = (struct inotify_event *)((char *)args->buf + r);
|
199
|
+
for (e = args->buf; e < end; ) {
|
219
200
|
VALUE event = event_new(e);
|
220
201
|
if (NIL_P(rv))
|
221
202
|
rv = event;
|
222
203
|
else
|
223
|
-
rb_ary_push(tmp, event);
|
204
|
+
rb_ary_push(args->tmp, event);
|
224
205
|
e = (struct inotify_event *)
|
225
206
|
((char *)e + event_len(e));
|
226
207
|
}
|
@@ -230,6 +211,37 @@ static VALUE take(int argc, VALUE *argv, VALUE self)
|
|
230
211
|
return rv;
|
231
212
|
}
|
232
213
|
|
214
|
+
/*
|
215
|
+
* call-seq:
|
216
|
+
* ino.take([nonblock]) -> Inotify::Event or nil
|
217
|
+
*
|
218
|
+
* Returns the next Inotify::Event processed. May return +nil+ if +nonblock+
|
219
|
+
* is +true+.
|
220
|
+
*/
|
221
|
+
static VALUE take(int argc, VALUE *argv, VALUE self)
|
222
|
+
{
|
223
|
+
struct inread_args args;
|
224
|
+
VALUE nonblock;
|
225
|
+
|
226
|
+
args.tmp = rb_ivar_get(self, id_inotify_tmp);
|
227
|
+
if (RARRAY_LEN(args.tmp) > 0)
|
228
|
+
return rb_ary_shift(args.tmp);
|
229
|
+
|
230
|
+
rb_scan_args(argc, argv, "01", &nonblock);
|
231
|
+
|
232
|
+
args.self = self;
|
233
|
+
args.fd = rb_sp_fileno(self);
|
234
|
+
args.size = 128;
|
235
|
+
args.nonblock_p = RTEST(nonblock);
|
236
|
+
|
237
|
+
if (args.nonblock_p)
|
238
|
+
rb_sp_set_nonblock(args.fd);
|
239
|
+
|
240
|
+
args.buf = 0;
|
241
|
+
return rb_ensure(do_take, (VALUE)&args,
|
242
|
+
rb_sp_puttlsbuf, (VALUE)args.buf);
|
243
|
+
}
|
244
|
+
|
233
245
|
/*
|
234
246
|
* call-seq:
|
235
247
|
* inotify_event.events => [ :MOVED_TO, ... ]
|
data/ext/sleepy_penguin/kqueue.c
CHANGED
@@ -43,6 +43,7 @@ static VALUE mEv, mEvFilt, mNote, mVQ;
|
|
43
43
|
|
44
44
|
struct kq_per_thread {
|
45
45
|
VALUE io;
|
46
|
+
VALUE changelist;
|
46
47
|
int fd;
|
47
48
|
int nchanges;
|
48
49
|
int nevents;
|
@@ -72,7 +73,7 @@ static int kq_fd_check(struct kq_per_thread *kpt)
|
|
72
73
|
return 1;
|
73
74
|
}
|
74
75
|
|
75
|
-
static struct kq_per_thread *kpt_get(
|
76
|
+
static struct kq_per_thread *kpt_get(int nchanges, int nevents)
|
76
77
|
{
|
77
78
|
struct kq_per_thread *kpt;
|
78
79
|
size_t size;
|
@@ -89,8 +90,6 @@ static struct kq_per_thread *kpt_get(VALUE self, int nchanges, int nevents)
|
|
89
90
|
kpt->capa = max;
|
90
91
|
kpt->nchanges = nchanges;
|
91
92
|
kpt->nevents = nevents;
|
92
|
-
kpt->io = self;
|
93
|
-
kpt->fd = rb_sp_fileno(kpt->io);
|
94
93
|
|
95
94
|
return kpt;
|
96
95
|
}
|
@@ -102,11 +101,10 @@ static struct kq_per_thread *kpt_get(VALUE self, int nchanges, int nevents)
|
|
102
101
|
* Creates a new Kqueue::IO object. This is a wrapper around the kqueue(2)
|
103
102
|
* system call which creates a Ruby IO object around the kqueue descriptor.
|
104
103
|
*
|
105
|
-
* kqueue descriptors are automatically invalidated across fork,
|
106
|
-
* must be taken when forking.
|
104
|
+
* kqueue descriptors are automatically invalidated by the OS across fork,
|
105
|
+
* so care must be taken when forking.
|
107
106
|
* Setting IO#autoclose=false is recommended for applications which fork
|
108
|
-
* after kqueue creation.
|
109
|
-
* this class is not recommended under Ruby 1.8
|
107
|
+
* after kqueue creation.
|
110
108
|
*/
|
111
109
|
static VALUE s_new(VALUE klass)
|
112
110
|
{
|
@@ -203,11 +201,16 @@ static VALUE nogvl_kevent(void *args)
|
|
203
201
|
return (VALUE)nevents;
|
204
202
|
}
|
205
203
|
|
204
|
+
static void changelist_prepare(struct kevent *, VALUE);
|
205
|
+
|
206
206
|
static VALUE do_kevent(struct kq_per_thread *kpt)
|
207
207
|
{
|
208
208
|
long nevents;
|
209
209
|
struct timespec expire_at;
|
210
210
|
|
211
|
+
if (kpt->nchanges)
|
212
|
+
changelist_prepare(kpt->events, kpt->changelist);
|
213
|
+
|
211
214
|
if (kpt->ts) {
|
212
215
|
clock_gettime(CLOCK_MONOTONIC, &expire_at);
|
213
216
|
|
@@ -333,7 +336,7 @@ static void changelist_prepare(struct kevent *events, VALUE changelist)
|
|
333
336
|
*/
|
334
337
|
static VALUE sp_kevent(int argc, VALUE *argv, VALUE self)
|
335
338
|
{
|
336
|
-
struct timespec ts;
|
339
|
+
struct timespec ts, *t;
|
337
340
|
VALUE changelist, events, timeout;
|
338
341
|
struct kq_per_thread *kpt;
|
339
342
|
int nchanges, nevents;
|
@@ -362,12 +365,14 @@ static VALUE sp_kevent(int argc, VALUE *argv, VALUE self)
|
|
362
365
|
nevents = 0;
|
363
366
|
}
|
364
367
|
|
365
|
-
|
366
|
-
kpt
|
367
|
-
|
368
|
-
|
368
|
+
t = NIL_P(timeout) ? NULL : value2timespec(&ts, timeout);
|
369
|
+
kpt = kpt_get(nchanges, nevents);
|
370
|
+
kpt->ts = t;
|
371
|
+
kpt->changelist = changelist;
|
372
|
+
kpt->io = self;
|
373
|
+
kpt->fd = rb_sp_fileno(kpt->io);
|
369
374
|
|
370
|
-
return do_kevent(kpt);
|
375
|
+
return rb_ensure(do_kevent, (VALUE)kpt, rb_sp_puttlsbuf, (VALUE)kpt);
|
371
376
|
}
|
372
377
|
|
373
378
|
/* initialize constants in the SleepyPenguin::Ev namespace */
|
@@ -651,13 +656,9 @@ void sleepy_penguin_init_kqueue(void)
|
|
651
656
|
|
652
657
|
id_for_fd = rb_intern("for_fd");
|
653
658
|
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
rb_require("sleepy_penguin/kqueue");
|
659
|
-
|
660
|
-
/* Kevent helper struct */
|
661
|
-
rb_require("sleepy_penguin/kevent");
|
659
|
+
/*
|
660
|
+
* the high-level interface is implemented in Ruby
|
661
|
+
* see lib/sleepy_penguin/kevent.rb
|
662
|
+
*/
|
662
663
|
}
|
663
664
|
#endif /* HAVE_SYS_EVENT_H */
|
@@ -0,0 +1,120 @@
|
|
1
|
+
#include "sleepy_penguin.h"
|
2
|
+
#include <sys/types.h>
|
3
|
+
#include <sys/socket.h>
|
4
|
+
#include <sys/uio.h>
|
5
|
+
|
6
|
+
#if defined(HAVE_SYS_SENDFILE_H) && !defined(HAVE_BSD_SENDFILE)
|
7
|
+
# include <sys/sendfile.h>
|
8
|
+
#endif
|
9
|
+
|
10
|
+
#if defined(__linux__) && defined(HAVE_SENDFILE)
|
11
|
+
# define linux_sendfile(in_fd, out_fd, offset, count) \
|
12
|
+
sendfile((in_fd),(out_fd),(offset),(count))
|
13
|
+
|
14
|
+
/* all good */
|
15
|
+
#elif defined(HAVE_SENDFILE) && \
|
16
|
+
(defined(__FreeBSD__) || defined(__DragonFly__))
|
17
|
+
/*
|
18
|
+
* make BSD sendfile look like Linux for now...
|
19
|
+
* we can support SF_NODISKIO later
|
20
|
+
*/
|
21
|
+
static ssize_t linux_sendfile(int sockfd, int filefd, off_t *off, size_t count)
|
22
|
+
{
|
23
|
+
off_t sbytes = 0;
|
24
|
+
off_t offset = off ? *off : lseek(filefd, 0, SEEK_CUR);
|
25
|
+
|
26
|
+
int rc = sendfile(filefd, sockfd, offset, count, NULL, &sbytes, 0);
|
27
|
+
if (sbytes > 0) {
|
28
|
+
if (off)
|
29
|
+
*off += sbytes;
|
30
|
+
else
|
31
|
+
lseek(filefd, sbytes, SEEK_CUR);
|
32
|
+
return (ssize_t)sbytes;
|
33
|
+
}
|
34
|
+
|
35
|
+
return (ssize_t)rc;
|
36
|
+
}
|
37
|
+
#else /* emulate sendfile using (read|pread) + write */
|
38
|
+
static ssize_t pread_sendfile(int sockfd, int filefd, off_t *off, size_t count)
|
39
|
+
{
|
40
|
+
size_t max_read = 16384;
|
41
|
+
void *buf;
|
42
|
+
ssize_t r;
|
43
|
+
ssize_t w;
|
44
|
+
|
45
|
+
max_read = count > max_read ? max_read : count;
|
46
|
+
buf = xmalloc(max_read);
|
47
|
+
|
48
|
+
do {
|
49
|
+
r = off ? pread(filefd, buf, max_read, *off) :
|
50
|
+
read(filefd, buf, max_read);
|
51
|
+
} while (r < 0 && errno == EINTR);
|
52
|
+
|
53
|
+
if (r <= 0) {
|
54
|
+
int err = errno;
|
55
|
+
xfree(buf);
|
56
|
+
errno = err;
|
57
|
+
return r;
|
58
|
+
}
|
59
|
+
w = write(sockfd, buf, r);
|
60
|
+
if (w > 0 && off)
|
61
|
+
*off += w;
|
62
|
+
xfree(buf);
|
63
|
+
return w;
|
64
|
+
}
|
65
|
+
# define linux_sendfile(out_fd, in_fd, offset, count) \
|
66
|
+
pread_sendfile((out_fd),(in_fd),(offset),(count))
|
67
|
+
#endif
|
68
|
+
|
69
|
+
struct sf_args {
|
70
|
+
int dst_fd;
|
71
|
+
int src_fd;
|
72
|
+
off_t *off;
|
73
|
+
size_t count;
|
74
|
+
};
|
75
|
+
|
76
|
+
static VALUE sym_wait_writable;
|
77
|
+
|
78
|
+
static VALUE nogvl_sf(void *ptr)
|
79
|
+
{
|
80
|
+
struct sf_args *a = ptr;
|
81
|
+
|
82
|
+
return (VALUE)linux_sendfile(a->dst_fd, a->src_fd, a->off, a->count);
|
83
|
+
}
|
84
|
+
|
85
|
+
static VALUE lsf(VALUE mod, VALUE dst, VALUE src, VALUE src_off, VALUE count)
|
86
|
+
{
|
87
|
+
off_t off = 0;
|
88
|
+
struct sf_args a;
|
89
|
+
ssize_t bytes;
|
90
|
+
int retried = 0;
|
91
|
+
|
92
|
+
a.off = NIL_P(src_off) ? NULL : (off = NUM2OFFT(src_off), &off);
|
93
|
+
a.count = NUM2SIZET(count);
|
94
|
+
again:
|
95
|
+
a.src_fd = rb_sp_fileno(src);
|
96
|
+
a.dst_fd = rb_sp_fileno(dst);
|
97
|
+
bytes = (ssize_t)rb_sp_fd_region(nogvl_sf, &a, a.dst_fd);
|
98
|
+
if (bytes < 0) {
|
99
|
+
switch (errno) {
|
100
|
+
case EAGAIN:
|
101
|
+
return sym_wait_writable;
|
102
|
+
case ENOMEM:
|
103
|
+
case ENOBUFS:
|
104
|
+
if (!retried) {
|
105
|
+
rb_gc();
|
106
|
+
retried = 1;
|
107
|
+
goto again;
|
108
|
+
}
|
109
|
+
}
|
110
|
+
rb_sys_fail("sendfile");
|
111
|
+
}
|
112
|
+
return SSIZET2NUM(bytes);
|
113
|
+
}
|
114
|
+
|
115
|
+
void sleepy_penguin_init_sendfile(void)
|
116
|
+
{
|
117
|
+
VALUE m = rb_define_module("SleepyPenguin");
|
118
|
+
rb_define_singleton_method(m, "__lsf", lsf, 4);
|
119
|
+
sym_wait_writable = ID2SYM(rb_intern("wait_writable"));
|
120
|
+
}
|
@@ -19,16 +19,6 @@ int rb_sp_io_closed(VALUE io);
|
|
19
19
|
int rb_sp_fileno(VALUE io);
|
20
20
|
void rb_sp_set_nonblock(int fd);
|
21
21
|
|
22
|
-
#if defined(HAVE_RB_THREAD_BLOCKING_REGION) || \
|
23
|
-
defined(HAVE_RB_THREAD_IO_BLOCKING_REGION) || \
|
24
|
-
defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL)
|
25
|
-
# define RB_SP_GREEN_THREAD 0
|
26
|
-
# define blocking_io_prepare(fd) ((void)(fd))
|
27
|
-
#else
|
28
|
-
# define RB_SP_GREEN_THREAD 1
|
29
|
-
# define blocking_io_prepare(fd) rb_sp_set_nonblock((fd))
|
30
|
-
#endif
|
31
|
-
|
32
22
|
#ifdef HAVE_RB_THREAD_IO_BLOCKING_REGION
|
33
23
|
/* Ruby 1.9.3 and 2.0.0 */
|
34
24
|
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *, void *, int);
|
@@ -46,24 +36,7 @@ VALUE rb_thread_io_blocking_region(rb_blocking_function_t *, void *, int);
|
|
46
36
|
# define rb_sp_fd_region(fn,data,fd) \
|
47
37
|
rb_thread_blocking_region((fn),(data),RUBY_UBF_IO,NULL)
|
48
38
|
#else
|
49
|
-
|
50
|
-
* Ruby 1.8 does not have a GVL, we'll just enable signal interrupts
|
51
|
-
* here in case we make interruptible syscalls.
|
52
|
-
*
|
53
|
-
* Note: epoll_wait with timeout=0 was interruptible until Linux 2.6.39
|
54
|
-
*/
|
55
|
-
# include <rubysig.h>
|
56
|
-
static inline VALUE fake_blocking_region(VALUE (*fn)(void *), void *data)
|
57
|
-
{
|
58
|
-
VALUE rv;
|
59
|
-
|
60
|
-
TRAP_BEG;
|
61
|
-
rv = fn(data);
|
62
|
-
TRAP_END;
|
63
|
-
|
64
|
-
return rv;
|
65
|
-
}
|
66
|
-
# define rb_sp_fd_region(fn,data,fd) fake_blocking_region((fn),(data))
|
39
|
+
# error Ruby <= 1.8 not supported
|
67
40
|
#endif
|
68
41
|
|
69
42
|
#define NODOC_CONST(klass,name,value) \
|
@@ -78,6 +51,7 @@ static inline VALUE fake_blocking_region(VALUE (*fn)(void *), void *data)
|
|
78
51
|
typedef int rb_sp_waitfn(int fd);
|
79
52
|
int rb_sp_wait(rb_sp_waitfn waiter, VALUE obj, int *fd);
|
80
53
|
void *rb_sp_gettlsbuf(size_t *size);
|
54
|
+
VALUE rb_sp_puttlsbuf(VALUE);
|
81
55
|
|
82
56
|
/* Flexible array elements are standard in C99 */
|
83
57
|
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
|
@@ -90,4 +64,17 @@ void *rb_sp_gettlsbuf(size_t *size);
|
|
90
64
|
# endif
|
91
65
|
#endif
|
92
66
|
|
67
|
+
int rb_sp_gc_for_fd(int err);
|
68
|
+
|
69
|
+
#ifndef HAVE_COPY_FILE_RANGE
|
70
|
+
# include <sys/syscall.h>
|
71
|
+
# if !defined(__NR_copy_file_range) && defined(__linux__)
|
72
|
+
# if defined(__x86_64__)
|
73
|
+
# define __NR_copy_file_range 326
|
74
|
+
# elif defined(__i386__)
|
75
|
+
# define __NR_copy_file_range 377
|
76
|
+
# endif /* supported arches */
|
77
|
+
# endif /* __NR_copy_file_range */
|
78
|
+
#endif
|
79
|
+
|
93
80
|
#endif /* SLEEPY_PENGUIN_H */
|
@@ -0,0 +1,33 @@
|
|
1
|
+
/* common splice and copy_file_range-related definitions */
|
2
|
+
|
3
|
+
#ifndef SSIZET2NUM
|
4
|
+
# define SSIZET2NUM(x) LONG2NUM(x)
|
5
|
+
#endif
|
6
|
+
#ifndef NUM2SIZET
|
7
|
+
# define NUM2SIZET(x) NUM2ULONG(x)
|
8
|
+
#endif
|
9
|
+
|
10
|
+
#if defined(HAVE_RB_THREAD_CALL_WITHOUT_GVL) && defined(HAVE_RUBY_THREAD_H)
|
11
|
+
/* Ruby 2.0+ */
|
12
|
+
# include <ruby/thread.h>
|
13
|
+
# define WITHOUT_GVL(fn,a,ubf,b) \
|
14
|
+
rb_thread_call_without_gvl((fn),(a),(ubf),(b))
|
15
|
+
#elif defined(HAVE_RB_THREAD_BLOCKING_REGION)
|
16
|
+
typedef VALUE (*my_blocking_fn_t)(void*);
|
17
|
+
# define WITHOUT_GVL(fn,a,ubf,b) \
|
18
|
+
rb_thread_blocking_region((my_blocking_fn_t)(fn),(a),(ubf),(b))
|
19
|
+
|
20
|
+
#else /* Ruby 1.8 */
|
21
|
+
# error Ruby 1.8 not supported
|
22
|
+
#endif /* ! HAVE_RB_THREAD_BLOCKING_REGION */
|
23
|
+
|
24
|
+
#define IO_RUN(fn,data) WITHOUT_GVL((fn),(data),RUBY_UBF_IO,0)
|
25
|
+
|
26
|
+
struct copy_args {
|
27
|
+
int fd_in;
|
28
|
+
int fd_out;
|
29
|
+
off_t *off_in;
|
30
|
+
off_t *off_out;
|
31
|
+
size_t len;
|
32
|
+
unsigned flags;
|
33
|
+
};
|