nio4r 1.2.1 → 2.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.github/workflows/workflow.yml +43 -0
- data/.gitignore +1 -0
- data/.rspec +0 -1
- data/.rubocop.yml +70 -31
- data/CHANGES.md +190 -42
- data/Gemfile +8 -4
- data/Guardfile +10 -0
- data/README.md +102 -147
- data/Rakefile +3 -4
- data/examples/echo_server.rb +3 -2
- data/ext/libev/Changes +44 -13
- data/ext/libev/README +2 -1
- data/ext/libev/ev.c +314 -225
- data/ext/libev/ev.h +90 -88
- data/ext/libev/ev_epoll.c +30 -16
- data/ext/libev/ev_kqueue.c +19 -9
- data/ext/libev/ev_linuxaio.c +642 -0
- data/ext/libev/ev_poll.c +19 -11
- data/ext/libev/ev_port.c +13 -6
- data/ext/libev/ev_select.c +4 -2
- data/ext/libev/ev_vars.h +14 -3
- data/ext/libev/ev_wrap.h +16 -0
- data/ext/nio4r/bytebuffer.c +429 -0
- data/ext/nio4r/extconf.rb +17 -30
- data/ext/nio4r/monitor.c +113 -49
- data/ext/nio4r/nio4r.h +11 -13
- data/ext/nio4r/org/nio4r/ByteBuffer.java +293 -0
- data/ext/nio4r/org/nio4r/Monitor.java +175 -0
- data/ext/nio4r/org/nio4r/Nio4r.java +22 -391
- data/ext/nio4r/org/nio4r/Selector.java +299 -0
- data/ext/nio4r/selector.c +155 -68
- data/lib/nio.rb +4 -4
- data/lib/nio/bytebuffer.rb +229 -0
- data/lib/nio/monitor.rb +73 -11
- data/lib/nio/selector.rb +64 -21
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +34 -20
- data/{tasks → rakelib}/extension.rake +4 -0
- data/{tasks → rakelib}/rspec.rake +2 -0
- data/{tasks → rakelib}/rubocop.rake +2 -0
- data/spec/nio/acceptables_spec.rb +5 -5
- data/spec/nio/bytebuffer_spec.rb +354 -0
- data/spec/nio/monitor_spec.rb +128 -79
- data/spec/nio/selectables/pipe_spec.rb +12 -3
- data/spec/nio/selectables/ssl_socket_spec.rb +61 -29
- data/spec/nio/selectables/tcp_socket_spec.rb +47 -34
- data/spec/nio/selectables/udp_socket_spec.rb +24 -7
- data/spec/nio/selector_spec.rb +65 -16
- data/spec/spec_helper.rb +12 -3
- data/spec/support/selectable_examples.rb +45 -18
- metadata +33 -23
- data/.rubocop_todo.yml +0 -35
- data/.travis.yml +0 -27
- data/LICENSE.txt +0 -20
- data/ext/libev/README.embed +0 -3
- data/ext/libev/test_libev_win32.c +0 -123
data/ext/libev/ev_poll.c
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* libev poll fd activity backend
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -39,11 +39,14 @@
|
|
39
39
|
|
40
40
|
#include <poll.h>
|
41
41
|
|
42
|
-
|
43
|
-
|
42
|
+
inline_size
|
43
|
+
void
|
44
|
+
array_needsize_pollidx (int *base, int offset, int count)
|
44
45
|
{
|
45
|
-
/*
|
46
|
-
* to
|
46
|
+
/* using memset (.., -1, ...) is tempting, we we try
|
47
|
+
* to be ultraportable
|
48
|
+
*/
|
49
|
+
base += offset;
|
47
50
|
while (count--)
|
48
51
|
*base++ = -1;
|
49
52
|
}
|
@@ -56,14 +59,14 @@ poll_modify (EV_P_ int fd, int oev, int nev)
|
|
56
59
|
if (oev == nev)
|
57
60
|
return;
|
58
61
|
|
59
|
-
array_needsize (int, pollidxs, pollidxmax, fd + 1,
|
62
|
+
array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx);
|
60
63
|
|
61
64
|
idx = pollidxs [fd];
|
62
65
|
|
63
66
|
if (idx < 0) /* need to allocate a new pollfd */
|
64
67
|
{
|
65
68
|
pollidxs [fd] = idx = pollcnt++;
|
66
|
-
array_needsize (struct pollfd, polls, pollmax, pollcnt,
|
69
|
+
array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit);
|
67
70
|
polls [idx].fd = fd;
|
68
71
|
}
|
69
72
|
|
@@ -107,14 +110,17 @@ poll_poll (EV_P_ ev_tstamp timeout)
|
|
107
110
|
else
|
108
111
|
for (p = polls; res; ++p)
|
109
112
|
{
|
110
|
-
assert (("libev: poll
|
113
|
+
assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt));
|
111
114
|
|
112
115
|
if (expect_false (p->revents)) /* this expect is debatable */
|
113
116
|
{
|
114
117
|
--res;
|
115
118
|
|
116
119
|
if (expect_false (p->revents & POLLNVAL))
|
117
|
-
|
120
|
+
{
|
121
|
+
assert (("libev: poll found invalid fd in poll set", 0));
|
122
|
+
fd_kill (EV_A_ p->fd);
|
123
|
+
}
|
118
124
|
else
|
119
125
|
fd_event (
|
120
126
|
EV_A_
|
@@ -126,7 +132,8 @@ poll_poll (EV_P_ ev_tstamp timeout)
|
|
126
132
|
}
|
127
133
|
}
|
128
134
|
|
129
|
-
|
135
|
+
inline_size
|
136
|
+
int
|
130
137
|
poll_init (EV_P_ int flags)
|
131
138
|
{
|
132
139
|
backend_mintime = 1e-3;
|
@@ -139,7 +146,8 @@ poll_init (EV_P_ int flags)
|
|
139
146
|
return EVBACKEND_POLL;
|
140
147
|
}
|
141
148
|
|
142
|
-
|
149
|
+
inline_size
|
150
|
+
void
|
143
151
|
poll_destroy (EV_P)
|
144
152
|
{
|
145
153
|
ev_free (pollidxs);
|
data/ext/libev/ev_port.c
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* libev solaris event port backend
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -55,7 +55,8 @@
|
|
55
55
|
#include <string.h>
|
56
56
|
#include <errno.h>
|
57
57
|
|
58
|
-
|
58
|
+
inline_speed
|
59
|
+
void
|
59
60
|
port_associate_and_check (EV_P_ int fd, int ev)
|
60
61
|
{
|
61
62
|
if (0 >
|
@@ -68,7 +69,10 @@ port_associate_and_check (EV_P_ int fd, int ev)
|
|
68
69
|
)
|
69
70
|
{
|
70
71
|
if (errno == EBADFD)
|
71
|
-
|
72
|
+
{
|
73
|
+
assert (("libev: port_associate found invalid fd", errno != EBADFD));
|
74
|
+
fd_kill (EV_A_ fd);
|
75
|
+
}
|
72
76
|
else
|
73
77
|
ev_syserr ("(libev) port_associate");
|
74
78
|
}
|
@@ -136,7 +140,8 @@ port_poll (EV_P_ ev_tstamp timeout)
|
|
136
140
|
}
|
137
141
|
}
|
138
142
|
|
139
|
-
|
143
|
+
inline_size
|
144
|
+
int
|
140
145
|
port_init (EV_P_ int flags)
|
141
146
|
{
|
142
147
|
/* Initialize the kernel queue */
|
@@ -163,13 +168,15 @@ port_init (EV_P_ int flags)
|
|
163
168
|
return EVBACKEND_PORT;
|
164
169
|
}
|
165
170
|
|
166
|
-
|
171
|
+
inline_size
|
172
|
+
void
|
167
173
|
port_destroy (EV_P)
|
168
174
|
{
|
169
175
|
ev_free (port_events);
|
170
176
|
}
|
171
177
|
|
172
|
-
|
178
|
+
inline_size
|
179
|
+
void
|
173
180
|
port_fork (EV_P)
|
174
181
|
{
|
175
182
|
close (backend_fd);
|
data/ext/libev/ev_select.c
CHANGED
@@ -271,7 +271,8 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
271
271
|
#endif
|
272
272
|
}
|
273
273
|
|
274
|
-
|
274
|
+
inline_size
|
275
|
+
int
|
275
276
|
select_init (EV_P_ int flags)
|
276
277
|
{
|
277
278
|
backend_mintime = 1e-6;
|
@@ -300,7 +301,8 @@ select_init (EV_P_ int flags)
|
|
300
301
|
return EVBACKEND_SELECT;
|
301
302
|
}
|
302
303
|
|
303
|
-
|
304
|
+
inline_size
|
305
|
+
void
|
304
306
|
select_destroy (EV_P)
|
305
307
|
{
|
306
308
|
ev_free (vec_ri);
|
data/ext/libev/ev_vars.h
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* loop member variable declarations
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -107,6 +107,17 @@ VARx(int, epoll_epermcnt)
|
|
107
107
|
VARx(int, epoll_epermmax)
|
108
108
|
#endif
|
109
109
|
|
110
|
+
#if EV_USE_LINUXAIO || EV_GENWRAP
|
111
|
+
VARx(aio_context_t, linuxaio_ctx)
|
112
|
+
VARx(int, linuxaio_iteration)
|
113
|
+
VARx(struct aniocb **, linuxaio_iocbps)
|
114
|
+
VARx(int, linuxaio_iocbpmax)
|
115
|
+
VARx(struct iocb **, linuxaio_submits)
|
116
|
+
VARx(int, linuxaio_submitcnt)
|
117
|
+
VARx(int, linuxaio_submitmax)
|
118
|
+
VARx(ev_io, linuxaio_epoll_w)
|
119
|
+
#endif
|
120
|
+
|
110
121
|
#if EV_USE_KQUEUE || EV_GENWRAP
|
111
122
|
VARx(pid_t, kqueue_fd_pid)
|
112
123
|
VARx(struct kevent *, kqueue_changes)
|
@@ -195,8 +206,8 @@ VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */
|
|
195
206
|
|
196
207
|
VARx(void *, userdata)
|
197
208
|
/* C++ doesn't support the ev_loop_callback typedef here. stinks. */
|
198
|
-
VAR (release_cb, void (*release_cb)(EV_P)
|
199
|
-
VAR (acquire_cb, void (*acquire_cb)(EV_P)
|
209
|
+
VAR (release_cb, void (*release_cb)(EV_P) EV_NOEXCEPT)
|
210
|
+
VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_NOEXCEPT)
|
200
211
|
VAR (invoke_cb , ev_loop_callback invoke_cb)
|
201
212
|
#endif
|
202
213
|
|
data/ext/libev/ev_wrap.h
CHANGED
@@ -50,6 +50,14 @@
|
|
50
50
|
#define kqueue_eventmax ((loop)->kqueue_eventmax)
|
51
51
|
#define kqueue_events ((loop)->kqueue_events)
|
52
52
|
#define kqueue_fd_pid ((loop)->kqueue_fd_pid)
|
53
|
+
#define linuxaio_ctx ((loop)->linuxaio_ctx)
|
54
|
+
#define linuxaio_epoll_w ((loop)->linuxaio_epoll_w)
|
55
|
+
#define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax)
|
56
|
+
#define linuxaio_iocbps ((loop)->linuxaio_iocbps)
|
57
|
+
#define linuxaio_iteration ((loop)->linuxaio_iteration)
|
58
|
+
#define linuxaio_submitcnt ((loop)->linuxaio_submitcnt)
|
59
|
+
#define linuxaio_submitmax ((loop)->linuxaio_submitmax)
|
60
|
+
#define linuxaio_submits ((loop)->linuxaio_submits)
|
53
61
|
#define loop_count ((loop)->loop_count)
|
54
62
|
#define loop_depth ((loop)->loop_depth)
|
55
63
|
#define loop_done ((loop)->loop_done)
|
@@ -149,6 +157,14 @@
|
|
149
157
|
#undef kqueue_eventmax
|
150
158
|
#undef kqueue_events
|
151
159
|
#undef kqueue_fd_pid
|
160
|
+
#undef linuxaio_ctx
|
161
|
+
#undef linuxaio_epoll_w
|
162
|
+
#undef linuxaio_iocbpmax
|
163
|
+
#undef linuxaio_iocbps
|
164
|
+
#undef linuxaio_iteration
|
165
|
+
#undef linuxaio_submitcnt
|
166
|
+
#undef linuxaio_submitmax
|
167
|
+
#undef linuxaio_submits
|
152
168
|
#undef loop_count
|
153
169
|
#undef loop_depth
|
154
170
|
#undef loop_done
|
@@ -0,0 +1,429 @@
|
|
1
|
+
#include "nio4r.h"
|
2
|
+
|
3
|
+
static VALUE mNIO = Qnil;
|
4
|
+
static VALUE cNIO_ByteBuffer = Qnil;
|
5
|
+
static VALUE cNIO_ByteBuffer_OverflowError = Qnil;
|
6
|
+
static VALUE cNIO_ByteBuffer_UnderflowError = Qnil;
|
7
|
+
static VALUE cNIO_ByteBuffer_MarkUnsetError = Qnil;
|
8
|
+
|
9
|
+
/* Allocator/deallocator */
|
10
|
+
static VALUE NIO_ByteBuffer_allocate(VALUE klass);
|
11
|
+
static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *byteBuffer);
|
12
|
+
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *byteBuffer);
|
13
|
+
|
14
|
+
/* Methods */
|
15
|
+
static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity);
|
16
|
+
static VALUE NIO_ByteBuffer_clear(VALUE self);
|
17
|
+
static VALUE NIO_ByteBuffer_get_position(VALUE self);
|
18
|
+
static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position);
|
19
|
+
static VALUE NIO_ByteBuffer_get_limit(VALUE self);
|
20
|
+
static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit);
|
21
|
+
static VALUE NIO_ByteBuffer_capacity(VALUE self);
|
22
|
+
static VALUE NIO_ByteBuffer_remaining(VALUE self);
|
23
|
+
static VALUE NIO_ByteBuffer_full(VALUE self);
|
24
|
+
static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self);
|
25
|
+
static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index);
|
26
|
+
static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string);
|
27
|
+
static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE file);
|
28
|
+
static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE file);
|
29
|
+
static VALUE NIO_ByteBuffer_flip(VALUE self);
|
30
|
+
static VALUE NIO_ByteBuffer_rewind(VALUE self);
|
31
|
+
static VALUE NIO_ByteBuffer_mark(VALUE self);
|
32
|
+
static VALUE NIO_ByteBuffer_reset(VALUE self);
|
33
|
+
static VALUE NIO_ByteBuffer_compact(VALUE self);
|
34
|
+
static VALUE NIO_ByteBuffer_each(VALUE self);
|
35
|
+
static VALUE NIO_ByteBuffer_inspect(VALUE self);
|
36
|
+
|
37
|
+
#define MARK_UNSET -1
|
38
|
+
|
39
|
+
void Init_NIO_ByteBuffer()
|
40
|
+
{
|
41
|
+
mNIO = rb_define_module("NIO");
|
42
|
+
cNIO_ByteBuffer = rb_define_class_under(mNIO, "ByteBuffer", rb_cObject);
|
43
|
+
rb_define_alloc_func(cNIO_ByteBuffer, NIO_ByteBuffer_allocate);
|
44
|
+
|
45
|
+
cNIO_ByteBuffer_OverflowError = rb_define_class_under(cNIO_ByteBuffer, "OverflowError", rb_eIOError);
|
46
|
+
cNIO_ByteBuffer_UnderflowError = rb_define_class_under(cNIO_ByteBuffer, "UnderflowError", rb_eIOError);
|
47
|
+
cNIO_ByteBuffer_MarkUnsetError = rb_define_class_under(cNIO_ByteBuffer, "MarkUnsetError", rb_eIOError);
|
48
|
+
|
49
|
+
rb_include_module(cNIO_ByteBuffer, rb_mEnumerable);
|
50
|
+
|
51
|
+
rb_define_method(cNIO_ByteBuffer, "initialize", NIO_ByteBuffer_initialize, 1);
|
52
|
+
rb_define_method(cNIO_ByteBuffer, "clear", NIO_ByteBuffer_clear, 0);
|
53
|
+
rb_define_method(cNIO_ByteBuffer, "position", NIO_ByteBuffer_get_position, 0);
|
54
|
+
rb_define_method(cNIO_ByteBuffer, "position=", NIO_ByteBuffer_set_position, 1);
|
55
|
+
rb_define_method(cNIO_ByteBuffer, "limit", NIO_ByteBuffer_get_limit, 0);
|
56
|
+
rb_define_method(cNIO_ByteBuffer, "limit=", NIO_ByteBuffer_set_limit, 1);
|
57
|
+
rb_define_method(cNIO_ByteBuffer, "capacity", NIO_ByteBuffer_capacity, 0);
|
58
|
+
rb_define_method(cNIO_ByteBuffer, "size", NIO_ByteBuffer_capacity, 0);
|
59
|
+
rb_define_method(cNIO_ByteBuffer, "remaining", NIO_ByteBuffer_remaining, 0);
|
60
|
+
rb_define_method(cNIO_ByteBuffer, "full?", NIO_ByteBuffer_full, 0);
|
61
|
+
rb_define_method(cNIO_ByteBuffer, "get", NIO_ByteBuffer_get, -1);
|
62
|
+
rb_define_method(cNIO_ByteBuffer, "[]", NIO_ByteBuffer_fetch, 1);
|
63
|
+
rb_define_method(cNIO_ByteBuffer, "<<", NIO_ByteBuffer_put, 1);
|
64
|
+
rb_define_method(cNIO_ByteBuffer, "read_from", NIO_ByteBuffer_read_from, 1);
|
65
|
+
rb_define_method(cNIO_ByteBuffer, "write_to", NIO_ByteBuffer_write_to, 1);
|
66
|
+
rb_define_method(cNIO_ByteBuffer, "flip", NIO_ByteBuffer_flip, 0);
|
67
|
+
rb_define_method(cNIO_ByteBuffer, "rewind", NIO_ByteBuffer_rewind, 0);
|
68
|
+
rb_define_method(cNIO_ByteBuffer, "mark", NIO_ByteBuffer_mark, 0);
|
69
|
+
rb_define_method(cNIO_ByteBuffer, "reset", NIO_ByteBuffer_reset, 0);
|
70
|
+
rb_define_method(cNIO_ByteBuffer, "compact", NIO_ByteBuffer_compact, 0);
|
71
|
+
rb_define_method(cNIO_ByteBuffer, "each", NIO_ByteBuffer_each, 0);
|
72
|
+
rb_define_method(cNIO_ByteBuffer, "inspect", NIO_ByteBuffer_inspect, 0);
|
73
|
+
}
|
74
|
+
|
75
|
+
static VALUE NIO_ByteBuffer_allocate(VALUE klass)
|
76
|
+
{
|
77
|
+
struct NIO_ByteBuffer *bytebuffer = (struct NIO_ByteBuffer *)xmalloc(sizeof(struct NIO_ByteBuffer));
|
78
|
+
bytebuffer->buffer = NULL;
|
79
|
+
return Data_Wrap_Struct(klass, NIO_ByteBuffer_gc_mark, NIO_ByteBuffer_free, bytebuffer);
|
80
|
+
}
|
81
|
+
|
82
|
+
static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *buffer)
|
83
|
+
{
|
84
|
+
}
|
85
|
+
|
86
|
+
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *buffer)
|
87
|
+
{
|
88
|
+
if(buffer->buffer)
|
89
|
+
xfree(buffer->buffer);
|
90
|
+
xfree(buffer);
|
91
|
+
}
|
92
|
+
|
93
|
+
static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity)
|
94
|
+
{
|
95
|
+
struct NIO_ByteBuffer *buffer;
|
96
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
97
|
+
|
98
|
+
buffer->capacity = NUM2INT(capacity);
|
99
|
+
buffer->buffer = xmalloc(buffer->capacity);
|
100
|
+
|
101
|
+
NIO_ByteBuffer_clear(self);
|
102
|
+
|
103
|
+
return self;
|
104
|
+
}
|
105
|
+
|
106
|
+
static VALUE NIO_ByteBuffer_clear(VALUE self)
|
107
|
+
{
|
108
|
+
struct NIO_ByteBuffer *buffer;
|
109
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
110
|
+
|
111
|
+
memset(buffer->buffer, 0, buffer->capacity);
|
112
|
+
|
113
|
+
buffer->position = 0;
|
114
|
+
buffer->limit = buffer->capacity;
|
115
|
+
buffer->mark = MARK_UNSET;
|
116
|
+
|
117
|
+
return self;
|
118
|
+
}
|
119
|
+
|
120
|
+
static VALUE NIO_ByteBuffer_get_position(VALUE self)
|
121
|
+
{
|
122
|
+
struct NIO_ByteBuffer *buffer;
|
123
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
124
|
+
|
125
|
+
return INT2NUM(buffer->position);
|
126
|
+
}
|
127
|
+
|
128
|
+
static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position)
|
129
|
+
{
|
130
|
+
int pos;
|
131
|
+
struct NIO_ByteBuffer *buffer;
|
132
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
133
|
+
|
134
|
+
pos = NUM2INT(new_position);
|
135
|
+
|
136
|
+
if(pos < 0) {
|
137
|
+
rb_raise(rb_eArgError, "negative position given");
|
138
|
+
}
|
139
|
+
|
140
|
+
if(pos > buffer->limit) {
|
141
|
+
rb_raise(rb_eArgError, "specified position exceeds limit");
|
142
|
+
}
|
143
|
+
|
144
|
+
buffer->position = pos;
|
145
|
+
|
146
|
+
if(buffer->mark > buffer->position) {
|
147
|
+
buffer->mark = MARK_UNSET;
|
148
|
+
}
|
149
|
+
|
150
|
+
return new_position;
|
151
|
+
}
|
152
|
+
|
153
|
+
static VALUE NIO_ByteBuffer_get_limit(VALUE self)
|
154
|
+
{
|
155
|
+
struct NIO_ByteBuffer *buffer;
|
156
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
157
|
+
|
158
|
+
return INT2NUM(buffer->limit);
|
159
|
+
}
|
160
|
+
|
161
|
+
static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit)
|
162
|
+
{
|
163
|
+
int lim;
|
164
|
+
struct NIO_ByteBuffer *buffer;
|
165
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
166
|
+
|
167
|
+
lim = NUM2INT(new_limit);
|
168
|
+
|
169
|
+
if(lim < 0) {
|
170
|
+
rb_raise(rb_eArgError, "negative limit given");
|
171
|
+
}
|
172
|
+
|
173
|
+
if(lim > buffer->capacity) {
|
174
|
+
rb_raise(rb_eArgError, "specified limit exceeds capacity");
|
175
|
+
}
|
176
|
+
|
177
|
+
buffer->limit = lim;
|
178
|
+
|
179
|
+
if(buffer->position > lim) {
|
180
|
+
buffer->position = lim;
|
181
|
+
}
|
182
|
+
|
183
|
+
if(buffer->mark > lim) {
|
184
|
+
buffer->mark = MARK_UNSET;
|
185
|
+
}
|
186
|
+
|
187
|
+
return new_limit;
|
188
|
+
}
|
189
|
+
|
190
|
+
static VALUE NIO_ByteBuffer_capacity(VALUE self)
|
191
|
+
{
|
192
|
+
struct NIO_ByteBuffer *buffer;
|
193
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
194
|
+
|
195
|
+
return INT2NUM(buffer->capacity);
|
196
|
+
}
|
197
|
+
|
198
|
+
static VALUE NIO_ByteBuffer_remaining(VALUE self)
|
199
|
+
{
|
200
|
+
struct NIO_ByteBuffer *buffer;
|
201
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
202
|
+
|
203
|
+
return INT2NUM(buffer->limit - buffer->position);
|
204
|
+
}
|
205
|
+
|
206
|
+
static VALUE NIO_ByteBuffer_full(VALUE self)
|
207
|
+
{
|
208
|
+
struct NIO_ByteBuffer *buffer;
|
209
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
210
|
+
|
211
|
+
return buffer->position == buffer->limit ? Qtrue : Qfalse;
|
212
|
+
}
|
213
|
+
|
214
|
+
static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self)
|
215
|
+
{
|
216
|
+
int len;
|
217
|
+
VALUE length, result;
|
218
|
+
struct NIO_ByteBuffer *buffer;
|
219
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
220
|
+
|
221
|
+
rb_scan_args(argc, argv, "01", &length);
|
222
|
+
|
223
|
+
if(length == Qnil) {
|
224
|
+
len = buffer->limit - buffer->position;
|
225
|
+
} else {
|
226
|
+
len = NUM2INT(length);
|
227
|
+
}
|
228
|
+
|
229
|
+
if(len < 0) {
|
230
|
+
rb_raise(rb_eArgError, "negative length given");
|
231
|
+
}
|
232
|
+
|
233
|
+
if(len > buffer->limit - buffer->position) {
|
234
|
+
rb_raise(cNIO_ByteBuffer_UnderflowError, "not enough data in buffer");
|
235
|
+
}
|
236
|
+
|
237
|
+
result = rb_str_new(buffer->buffer + buffer->position, len);
|
238
|
+
buffer->position += len;
|
239
|
+
|
240
|
+
return result;
|
241
|
+
}
|
242
|
+
|
243
|
+
static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index)
|
244
|
+
{
|
245
|
+
int i;
|
246
|
+
struct NIO_ByteBuffer *buffer;
|
247
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
248
|
+
|
249
|
+
i = NUM2INT(index);
|
250
|
+
|
251
|
+
if(i < 0) {
|
252
|
+
rb_raise(rb_eArgError, "negative index given");
|
253
|
+
}
|
254
|
+
|
255
|
+
if(i >= buffer->limit) {
|
256
|
+
rb_raise(rb_eArgError, "specified index exceeds limit");
|
257
|
+
}
|
258
|
+
|
259
|
+
return INT2NUM(buffer->buffer[i]);
|
260
|
+
}
|
261
|
+
|
262
|
+
static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string)
|
263
|
+
{
|
264
|
+
long length;
|
265
|
+
struct NIO_ByteBuffer *buffer;
|
266
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
267
|
+
|
268
|
+
StringValue(string);
|
269
|
+
length = RSTRING_LEN(string);
|
270
|
+
|
271
|
+
if(length > buffer->limit - buffer->position) {
|
272
|
+
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
273
|
+
}
|
274
|
+
|
275
|
+
memcpy(buffer->buffer + buffer->position, StringValuePtr(string), length);
|
276
|
+
buffer->position += length;
|
277
|
+
|
278
|
+
return self;
|
279
|
+
}
|
280
|
+
|
281
|
+
static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE io)
|
282
|
+
{
|
283
|
+
struct NIO_ByteBuffer *buffer;
|
284
|
+
rb_io_t *fptr;
|
285
|
+
ssize_t nbytes, bytes_read;
|
286
|
+
|
287
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
288
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
289
|
+
rb_io_set_nonblock(fptr);
|
290
|
+
|
291
|
+
nbytes = buffer->limit - buffer->position;
|
292
|
+
if(nbytes == 0) {
|
293
|
+
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
294
|
+
}
|
295
|
+
|
296
|
+
bytes_read = read(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
297
|
+
|
298
|
+
if(bytes_read < 0) {
|
299
|
+
if(errno == EAGAIN) {
|
300
|
+
return INT2NUM(0);
|
301
|
+
} else {
|
302
|
+
rb_sys_fail("write");
|
303
|
+
}
|
304
|
+
}
|
305
|
+
|
306
|
+
buffer->position += bytes_read;
|
307
|
+
|
308
|
+
return INT2NUM(bytes_read);
|
309
|
+
}
|
310
|
+
|
311
|
+
static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE io)
|
312
|
+
{
|
313
|
+
struct NIO_ByteBuffer *buffer;
|
314
|
+
rb_io_t *fptr;
|
315
|
+
ssize_t nbytes, bytes_written;
|
316
|
+
|
317
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
318
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
319
|
+
rb_io_set_nonblock(fptr);
|
320
|
+
|
321
|
+
nbytes = buffer->limit - buffer->position;
|
322
|
+
if(nbytes == 0) {
|
323
|
+
rb_raise(cNIO_ByteBuffer_UnderflowError, "no data remaining in buffer");
|
324
|
+
}
|
325
|
+
|
326
|
+
bytes_written = write(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
327
|
+
|
328
|
+
if(bytes_written < 0) {
|
329
|
+
if(errno == EAGAIN) {
|
330
|
+
return INT2NUM(0);
|
331
|
+
} else {
|
332
|
+
rb_sys_fail("write");
|
333
|
+
}
|
334
|
+
}
|
335
|
+
|
336
|
+
buffer->position += bytes_written;
|
337
|
+
|
338
|
+
return INT2NUM(bytes_written);
|
339
|
+
}
|
340
|
+
|
341
|
+
static VALUE NIO_ByteBuffer_flip(VALUE self)
|
342
|
+
{
|
343
|
+
struct NIO_ByteBuffer *buffer;
|
344
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
345
|
+
|
346
|
+
buffer->limit = buffer->position;
|
347
|
+
buffer->position = 0;
|
348
|
+
buffer->mark = MARK_UNSET;
|
349
|
+
|
350
|
+
return self;
|
351
|
+
}
|
352
|
+
|
353
|
+
static VALUE NIO_ByteBuffer_rewind(VALUE self)
|
354
|
+
{
|
355
|
+
struct NIO_ByteBuffer *buffer;
|
356
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
357
|
+
|
358
|
+
buffer->position = 0;
|
359
|
+
buffer->mark = MARK_UNSET;
|
360
|
+
|
361
|
+
return self;
|
362
|
+
}
|
363
|
+
|
364
|
+
static VALUE NIO_ByteBuffer_mark(VALUE self)
|
365
|
+
{
|
366
|
+
struct NIO_ByteBuffer *buffer;
|
367
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
368
|
+
|
369
|
+
buffer->mark = buffer->position;
|
370
|
+
return self;
|
371
|
+
}
|
372
|
+
|
373
|
+
static VALUE NIO_ByteBuffer_reset(VALUE self)
|
374
|
+
{
|
375
|
+
struct NIO_ByteBuffer *buffer;
|
376
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
377
|
+
|
378
|
+
if(buffer->mark < 0) {
|
379
|
+
rb_raise(cNIO_ByteBuffer_MarkUnsetError, "mark has not been set");
|
380
|
+
} else {
|
381
|
+
buffer->position = buffer->mark;
|
382
|
+
}
|
383
|
+
|
384
|
+
return self;
|
385
|
+
}
|
386
|
+
|
387
|
+
static VALUE NIO_ByteBuffer_compact(VALUE self)
|
388
|
+
{
|
389
|
+
struct NIO_ByteBuffer *buffer;
|
390
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
391
|
+
|
392
|
+
memmove(buffer->buffer, buffer->buffer + buffer->position, buffer->limit - buffer->position);
|
393
|
+
buffer->position = buffer->limit - buffer->position;
|
394
|
+
buffer->limit = buffer->capacity;
|
395
|
+
|
396
|
+
return self;
|
397
|
+
}
|
398
|
+
|
399
|
+
static VALUE NIO_ByteBuffer_each(VALUE self)
|
400
|
+
{
|
401
|
+
int i;
|
402
|
+
struct NIO_ByteBuffer *buffer;
|
403
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
404
|
+
|
405
|
+
if(rb_block_given_p()) {
|
406
|
+
for(i = 0; i < buffer->limit; i++) {
|
407
|
+
rb_yield(INT2NUM(buffer->buffer[i]));
|
408
|
+
}
|
409
|
+
} else {
|
410
|
+
rb_raise(rb_eArgError, "no block given");
|
411
|
+
}
|
412
|
+
|
413
|
+
return self;
|
414
|
+
}
|
415
|
+
|
416
|
+
static VALUE NIO_ByteBuffer_inspect(VALUE self)
|
417
|
+
{
|
418
|
+
struct NIO_ByteBuffer *buffer;
|
419
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
420
|
+
|
421
|
+
return rb_sprintf(
|
422
|
+
"#<%s:%p @position=%d @limit=%d @capacity=%d>",
|
423
|
+
rb_class2name(CLASS_OF(self)),
|
424
|
+
(void*)self,
|
425
|
+
buffer->position,
|
426
|
+
buffer->limit,
|
427
|
+
buffer->capacity
|
428
|
+
);
|
429
|
+
}
|