nio4r 1.2.1-java → 2.0.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rspec +0 -1
- data/.rubocop.yml +31 -38
- data/.ruby-version +1 -0
- data/.travis.yml +15 -14
- data/CHANGES.md +75 -42
- data/Gemfile +10 -5
- data/Guardfile +10 -0
- data/LICENSE.txt +1 -1
- data/README.md +57 -161
- data/Rakefile +2 -1
- data/examples/echo_server.rb +1 -0
- data/ext/libev/Changes +4 -13
- data/ext/libev/ev.c +101 -74
- data/ext/libev/ev.h +3 -3
- data/ext/libev/ev_epoll.c +6 -3
- data/ext/libev/ev_kqueue.c +8 -4
- data/ext/libev/ev_poll.c +6 -3
- data/ext/libev/ev_port.c +8 -4
- data/ext/libev/ev_select.c +4 -2
- data/ext/nio4r/bytebuffer.c +421 -0
- data/ext/nio4r/extconf.rb +2 -10
- data/ext/nio4r/monitor.c +93 -46
- data/ext/nio4r/nio4r.h +11 -13
- data/ext/nio4r/org/nio4r/ByteBuffer.java +295 -0
- data/ext/nio4r/org/nio4r/Monitor.java +164 -0
- data/ext/nio4r/org/nio4r/Nio4r.java +22 -391
- data/ext/nio4r/org/nio4r/Selector.java +278 -0
- data/ext/nio4r/selector.c +55 -53
- data/lib/nio.rb +4 -3
- data/lib/nio/bytebuffer.rb +222 -0
- data/lib/nio/monitor.rb +64 -4
- data/lib/nio/selector.rb +52 -20
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +25 -19
- data/spec/nio/acceptables_spec.rb +6 -4
- data/spec/nio/bytebuffer_spec.rb +349 -0
- data/spec/nio/monitor_spec.rb +122 -79
- data/spec/nio/selectables/pipe_spec.rb +5 -1
- data/spec/nio/selectables/ssl_socket_spec.rb +15 -12
- data/spec/nio/selectables/tcp_socket_spec.rb +42 -31
- data/spec/nio/selectables/udp_socket_spec.rb +2 -0
- data/spec/nio/selector_spec.rb +10 -4
- data/spec/spec_helper.rb +24 -3
- data/spec/support/selectable_examples.rb +7 -5
- data/tasks/extension.rake +2 -0
- data/tasks/rspec.rake +2 -0
- data/tasks/rubocop.rake +2 -0
- metadata +21 -14
- data/.rubocop_todo.yml +0 -35
data/ext/libev/ev.h
CHANGED
@@ -515,10 +515,10 @@ enum {
|
|
515
515
|
|
516
516
|
/* method bits to be ored together */
|
517
517
|
enum {
|
518
|
-
EVBACKEND_SELECT = 0x00000001U, /* about anywhere */
|
519
|
-
EVBACKEND_POLL = 0x00000002U, /* !win */
|
518
|
+
EVBACKEND_SELECT = 0x00000001U, /* available just about anywhere */
|
519
|
+
EVBACKEND_POLL = 0x00000002U, /* !win, !aix, broken on osx */
|
520
520
|
EVBACKEND_EPOLL = 0x00000004U, /* linux */
|
521
|
-
EVBACKEND_KQUEUE = 0x00000008U, /* bsd */
|
521
|
+
EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
|
522
522
|
EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
|
523
523
|
EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
|
524
524
|
EVBACKEND_ALL = 0x0000003FU, /* all known backends */
|
data/ext/libev/ev_epoll.c
CHANGED
@@ -235,7 +235,8 @@ epoll_poll (EV_P_ ev_tstamp timeout)
|
|
235
235
|
}
|
236
236
|
}
|
237
237
|
|
238
|
-
|
238
|
+
inline_size
|
239
|
+
int
|
239
240
|
epoll_init (EV_P_ int flags)
|
240
241
|
{
|
241
242
|
#ifdef EPOLL_CLOEXEC
|
@@ -260,14 +261,16 @@ epoll_init (EV_P_ int flags)
|
|
260
261
|
return EVBACKEND_EPOLL;
|
261
262
|
}
|
262
263
|
|
263
|
-
|
264
|
+
inline_size
|
265
|
+
void
|
264
266
|
epoll_destroy (EV_P)
|
265
267
|
{
|
266
268
|
ev_free (epoll_events);
|
267
269
|
array_free (epoll_eperm, EMPTY);
|
268
270
|
}
|
269
271
|
|
270
|
-
|
272
|
+
inline_size
|
273
|
+
void
|
271
274
|
epoll_fork (EV_P)
|
272
275
|
{
|
273
276
|
close (backend_fd);
|
data/ext/libev/ev_kqueue.c
CHANGED
@@ -43,7 +43,8 @@
|
|
43
43
|
#include <string.h>
|
44
44
|
#include <errno.h>
|
45
45
|
|
46
|
-
|
46
|
+
inline_speed
|
47
|
+
void
|
47
48
|
kqueue_change (EV_P_ int fd, int filter, int flags, int fflags)
|
48
49
|
{
|
49
50
|
++kqueue_changecnt;
|
@@ -152,7 +153,8 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
|
|
152
153
|
}
|
153
154
|
}
|
154
155
|
|
155
|
-
|
156
|
+
inline_size
|
157
|
+
int
|
156
158
|
kqueue_init (EV_P_ int flags)
|
157
159
|
{
|
158
160
|
/* initialize the kernel queue */
|
@@ -176,14 +178,16 @@ kqueue_init (EV_P_ int flags)
|
|
176
178
|
return EVBACKEND_KQUEUE;
|
177
179
|
}
|
178
180
|
|
179
|
-
|
181
|
+
inline_size
|
182
|
+
void
|
180
183
|
kqueue_destroy (EV_P)
|
181
184
|
{
|
182
185
|
ev_free (kqueue_events);
|
183
186
|
ev_free (kqueue_changes);
|
184
187
|
}
|
185
188
|
|
186
|
-
|
189
|
+
inline_size
|
190
|
+
void
|
187
191
|
kqueue_fork (EV_P)
|
188
192
|
{
|
189
193
|
/* some BSD kernels don't just destroy the kqueue itself,
|
data/ext/libev/ev_poll.c
CHANGED
@@ -39,7 +39,8 @@
|
|
39
39
|
|
40
40
|
#include <poll.h>
|
41
41
|
|
42
|
-
|
42
|
+
inline_size
|
43
|
+
void
|
43
44
|
pollidx_init (int *base, int count)
|
44
45
|
{
|
45
46
|
/* consider using memset (.., -1, ...), which is practically guaranteed
|
@@ -126,7 +127,8 @@ poll_poll (EV_P_ ev_tstamp timeout)
|
|
126
127
|
}
|
127
128
|
}
|
128
129
|
|
129
|
-
|
130
|
+
inline_size
|
131
|
+
int
|
130
132
|
poll_init (EV_P_ int flags)
|
131
133
|
{
|
132
134
|
backend_mintime = 1e-3;
|
@@ -139,7 +141,8 @@ poll_init (EV_P_ int flags)
|
|
139
141
|
return EVBACKEND_POLL;
|
140
142
|
}
|
141
143
|
|
142
|
-
|
144
|
+
inline_size
|
145
|
+
void
|
143
146
|
poll_destroy (EV_P)
|
144
147
|
{
|
145
148
|
ev_free (pollidxs);
|
data/ext/libev/ev_port.c
CHANGED
@@ -55,7 +55,8 @@
|
|
55
55
|
#include <string.h>
|
56
56
|
#include <errno.h>
|
57
57
|
|
58
|
-
|
58
|
+
inline_speed
|
59
|
+
void
|
59
60
|
port_associate_and_check (EV_P_ int fd, int ev)
|
60
61
|
{
|
61
62
|
if (0 >
|
@@ -136,7 +137,8 @@ port_poll (EV_P_ ev_tstamp timeout)
|
|
136
137
|
}
|
137
138
|
}
|
138
139
|
|
139
|
-
|
140
|
+
inline_size
|
141
|
+
int
|
140
142
|
port_init (EV_P_ int flags)
|
141
143
|
{
|
142
144
|
/* Initialize the kernel queue */
|
@@ -163,13 +165,15 @@ port_init (EV_P_ int flags)
|
|
163
165
|
return EVBACKEND_PORT;
|
164
166
|
}
|
165
167
|
|
166
|
-
|
168
|
+
inline_size
|
169
|
+
void
|
167
170
|
port_destroy (EV_P)
|
168
171
|
{
|
169
172
|
ev_free (port_events);
|
170
173
|
}
|
171
174
|
|
172
|
-
|
175
|
+
inline_size
|
176
|
+
void
|
173
177
|
port_fork (EV_P)
|
174
178
|
{
|
175
179
|
close (backend_fd);
|
data/ext/libev/ev_select.c
CHANGED
@@ -271,7 +271,8 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
271
271
|
#endif
|
272
272
|
}
|
273
273
|
|
274
|
-
|
274
|
+
inline_size
|
275
|
+
int
|
275
276
|
select_init (EV_P_ int flags)
|
276
277
|
{
|
277
278
|
backend_mintime = 1e-6;
|
@@ -300,7 +301,8 @@ select_init (EV_P_ int flags)
|
|
300
301
|
return EVBACKEND_SELECT;
|
301
302
|
}
|
302
303
|
|
303
|
-
|
304
|
+
inline_size
|
305
|
+
void
|
304
306
|
select_destroy (EV_P)
|
305
307
|
{
|
306
308
|
ev_free (vec_ri);
|
@@ -0,0 +1,421 @@
|
|
1
|
+
#include "nio4r.h"
|
2
|
+
|
3
|
+
static VALUE mNIO = Qnil;
|
4
|
+
static VALUE cNIO_ByteBuffer = Qnil;
|
5
|
+
static VALUE cNIO_ByteBuffer_OverflowError = Qnil;
|
6
|
+
static VALUE cNIO_ByteBuffer_UnderflowError = Qnil;
|
7
|
+
static VALUE cNIO_ByteBuffer_MarkUnsetError = Qnil;
|
8
|
+
|
9
|
+
/* Allocator/deallocator */
|
10
|
+
static VALUE NIO_ByteBuffer_allocate(VALUE klass);
|
11
|
+
static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *byteBuffer);
|
12
|
+
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *byteBuffer);
|
13
|
+
|
14
|
+
/* Methods */
|
15
|
+
static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity);
|
16
|
+
static VALUE NIO_ByteBuffer_clear(VALUE self);
|
17
|
+
static VALUE NIO_ByteBuffer_get_position(VALUE self);
|
18
|
+
static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position);
|
19
|
+
static VALUE NIO_ByteBuffer_get_limit(VALUE self);
|
20
|
+
static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit);
|
21
|
+
static VALUE NIO_ByteBuffer_capacity(VALUE self);
|
22
|
+
static VALUE NIO_ByteBuffer_remaining(VALUE self);
|
23
|
+
static VALUE NIO_ByteBuffer_full(VALUE self);
|
24
|
+
static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self);
|
25
|
+
static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index);
|
26
|
+
static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string);
|
27
|
+
static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE file);
|
28
|
+
static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE file);
|
29
|
+
static VALUE NIO_ByteBuffer_flip(VALUE self);
|
30
|
+
static VALUE NIO_ByteBuffer_rewind(VALUE self);
|
31
|
+
static VALUE NIO_ByteBuffer_mark(VALUE self);
|
32
|
+
static VALUE NIO_ByteBuffer_reset(VALUE self);
|
33
|
+
static VALUE NIO_ByteBuffer_compact(VALUE self);
|
34
|
+
static VALUE NIO_ByteBuffer_each(VALUE self);
|
35
|
+
static VALUE NIO_ByteBuffer_inspect(VALUE self);
|
36
|
+
|
37
|
+
#define MARK_UNSET -1
|
38
|
+
|
39
|
+
void Init_NIO_ByteBuffer()
|
40
|
+
{
|
41
|
+
mNIO = rb_define_module("NIO");
|
42
|
+
cNIO_ByteBuffer = rb_define_class_under(mNIO, "ByteBuffer", rb_cObject);
|
43
|
+
rb_define_alloc_func(cNIO_ByteBuffer, NIO_ByteBuffer_allocate);
|
44
|
+
|
45
|
+
cNIO_ByteBuffer_OverflowError = rb_define_class_under(cNIO_ByteBuffer, "OverflowError", rb_eIOError);
|
46
|
+
cNIO_ByteBuffer_UnderflowError = rb_define_class_under(cNIO_ByteBuffer, "UnderflowError", rb_eIOError);
|
47
|
+
cNIO_ByteBuffer_MarkUnsetError = rb_define_class_under(cNIO_ByteBuffer, "MarkUnsetError", rb_eIOError);
|
48
|
+
|
49
|
+
rb_include_module(cNIO_ByteBuffer, rb_mEnumerable);
|
50
|
+
|
51
|
+
rb_define_method(cNIO_ByteBuffer, "initialize", NIO_ByteBuffer_initialize, 1);
|
52
|
+
rb_define_method(cNIO_ByteBuffer, "clear", NIO_ByteBuffer_clear, 0);
|
53
|
+
rb_define_method(cNIO_ByteBuffer, "position", NIO_ByteBuffer_get_position, 0);
|
54
|
+
rb_define_method(cNIO_ByteBuffer, "position=", NIO_ByteBuffer_set_position, 1);
|
55
|
+
rb_define_method(cNIO_ByteBuffer, "limit", NIO_ByteBuffer_get_limit, 0);
|
56
|
+
rb_define_method(cNIO_ByteBuffer, "limit=", NIO_ByteBuffer_set_limit, 1);
|
57
|
+
rb_define_method(cNIO_ByteBuffer, "capacity", NIO_ByteBuffer_capacity, 0);
|
58
|
+
rb_define_method(cNIO_ByteBuffer, "size", NIO_ByteBuffer_capacity, 0);
|
59
|
+
rb_define_method(cNIO_ByteBuffer, "remaining", NIO_ByteBuffer_remaining, 0);
|
60
|
+
rb_define_method(cNIO_ByteBuffer, "full?", NIO_ByteBuffer_full, 0);
|
61
|
+
rb_define_method(cNIO_ByteBuffer, "get", NIO_ByteBuffer_get, -1);
|
62
|
+
rb_define_method(cNIO_ByteBuffer, "[]", NIO_ByteBuffer_fetch, 1);
|
63
|
+
rb_define_method(cNIO_ByteBuffer, "<<", NIO_ByteBuffer_put, 1);
|
64
|
+
rb_define_method(cNIO_ByteBuffer, "read_from", NIO_ByteBuffer_read_from, 1);
|
65
|
+
rb_define_method(cNIO_ByteBuffer, "write_to", NIO_ByteBuffer_write_to, 1);
|
66
|
+
rb_define_method(cNIO_ByteBuffer, "flip", NIO_ByteBuffer_flip, 0);
|
67
|
+
rb_define_method(cNIO_ByteBuffer, "rewind", NIO_ByteBuffer_rewind, 0);
|
68
|
+
rb_define_method(cNIO_ByteBuffer, "mark", NIO_ByteBuffer_mark, 0);
|
69
|
+
rb_define_method(cNIO_ByteBuffer, "reset", NIO_ByteBuffer_reset, 0);
|
70
|
+
rb_define_method(cNIO_ByteBuffer, "compact", NIO_ByteBuffer_compact, 0);
|
71
|
+
rb_define_method(cNIO_ByteBuffer, "each", NIO_ByteBuffer_each, 0);
|
72
|
+
rb_define_method(cNIO_ByteBuffer, "inspect", NIO_ByteBuffer_inspect, 0);
|
73
|
+
}
|
74
|
+
|
75
|
+
static VALUE NIO_ByteBuffer_allocate(VALUE klass)
|
76
|
+
{
|
77
|
+
struct NIO_ByteBuffer *bytebuffer = (struct NIO_ByteBuffer *)xmalloc(sizeof(struct NIO_ByteBuffer));
|
78
|
+
return Data_Wrap_Struct(klass, NIO_ByteBuffer_gc_mark, NIO_ByteBuffer_free, bytebuffer);
|
79
|
+
}
|
80
|
+
|
81
|
+
static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *buffer)
|
82
|
+
{
|
83
|
+
}
|
84
|
+
|
85
|
+
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *buffer)
|
86
|
+
{
|
87
|
+
xfree(buffer);
|
88
|
+
}
|
89
|
+
|
90
|
+
static VALUE NIO_ByteBuffer_initialize(VALUE self, VALUE capacity)
|
91
|
+
{
|
92
|
+
struct NIO_ByteBuffer *buffer;
|
93
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
94
|
+
|
95
|
+
buffer->capacity = NUM2INT(capacity);
|
96
|
+
buffer->buffer = xmalloc(buffer->capacity);
|
97
|
+
|
98
|
+
NIO_ByteBuffer_clear(self);
|
99
|
+
|
100
|
+
return self;
|
101
|
+
}
|
102
|
+
|
103
|
+
static VALUE NIO_ByteBuffer_clear(VALUE self)
|
104
|
+
{
|
105
|
+
struct NIO_ByteBuffer *buffer;
|
106
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
107
|
+
|
108
|
+
memset(buffer->buffer, 0, buffer->capacity);
|
109
|
+
|
110
|
+
buffer->position = 0;
|
111
|
+
buffer->limit = buffer->capacity;
|
112
|
+
buffer->mark = MARK_UNSET;
|
113
|
+
|
114
|
+
return self;
|
115
|
+
}
|
116
|
+
|
117
|
+
static VALUE NIO_ByteBuffer_get_position(VALUE self)
|
118
|
+
{
|
119
|
+
struct NIO_ByteBuffer *buffer;
|
120
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
121
|
+
|
122
|
+
return INT2NUM(buffer->position);
|
123
|
+
}
|
124
|
+
|
125
|
+
static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position)
|
126
|
+
{
|
127
|
+
struct NIO_ByteBuffer *buffer;
|
128
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
129
|
+
|
130
|
+
int pos = NUM2INT(new_position);
|
131
|
+
|
132
|
+
if(pos < 0) {
|
133
|
+
rb_raise(rb_eArgError, "negative position given");
|
134
|
+
}
|
135
|
+
|
136
|
+
if(pos > buffer->limit) {
|
137
|
+
rb_raise(rb_eArgError, "specified position exceeds limit");
|
138
|
+
}
|
139
|
+
|
140
|
+
buffer->position = pos;
|
141
|
+
|
142
|
+
if(buffer->mark > buffer->position) {
|
143
|
+
buffer->mark = MARK_UNSET;
|
144
|
+
}
|
145
|
+
|
146
|
+
return new_position;
|
147
|
+
}
|
148
|
+
|
149
|
+
static VALUE NIO_ByteBuffer_get_limit(VALUE self)
|
150
|
+
{
|
151
|
+
struct NIO_ByteBuffer *buffer;
|
152
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
153
|
+
|
154
|
+
return INT2NUM(buffer->limit);
|
155
|
+
}
|
156
|
+
|
157
|
+
static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit)
|
158
|
+
{
|
159
|
+
struct NIO_ByteBuffer *buffer;
|
160
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
161
|
+
|
162
|
+
int lim = NUM2INT(new_limit);
|
163
|
+
|
164
|
+
if(lim < 0) {
|
165
|
+
rb_raise(rb_eArgError, "negative limit given");
|
166
|
+
}
|
167
|
+
|
168
|
+
if(lim > buffer->capacity) {
|
169
|
+
rb_raise(rb_eArgError, "specified limit exceeds capacity");
|
170
|
+
}
|
171
|
+
|
172
|
+
buffer->limit = lim;
|
173
|
+
|
174
|
+
if(buffer->position > lim) {
|
175
|
+
buffer->position = lim;
|
176
|
+
}
|
177
|
+
|
178
|
+
if(buffer->mark > lim) {
|
179
|
+
buffer->mark = MARK_UNSET;
|
180
|
+
}
|
181
|
+
|
182
|
+
return new_limit;
|
183
|
+
}
|
184
|
+
|
185
|
+
static VALUE NIO_ByteBuffer_capacity(VALUE self)
|
186
|
+
{
|
187
|
+
struct NIO_ByteBuffer *buffer;
|
188
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
189
|
+
|
190
|
+
return INT2NUM(buffer->capacity);
|
191
|
+
}
|
192
|
+
|
193
|
+
static VALUE NIO_ByteBuffer_remaining(VALUE self)
|
194
|
+
{
|
195
|
+
struct NIO_ByteBuffer *buffer;
|
196
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
197
|
+
|
198
|
+
return INT2NUM(buffer->limit - buffer->position);
|
199
|
+
}
|
200
|
+
|
201
|
+
static VALUE NIO_ByteBuffer_full(VALUE self)
|
202
|
+
{
|
203
|
+
struct NIO_ByteBuffer *buffer;
|
204
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
205
|
+
|
206
|
+
return buffer->position == buffer->limit ? Qtrue : Qfalse;
|
207
|
+
}
|
208
|
+
|
209
|
+
static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self)
|
210
|
+
{
|
211
|
+
int len;
|
212
|
+
VALUE length, result;
|
213
|
+
struct NIO_ByteBuffer *buffer;
|
214
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
215
|
+
|
216
|
+
rb_scan_args(argc, argv, "01", &length);
|
217
|
+
|
218
|
+
if(length == Qnil) {
|
219
|
+
len = buffer->limit - buffer->position;
|
220
|
+
} else {
|
221
|
+
len = NUM2INT(length);
|
222
|
+
}
|
223
|
+
|
224
|
+
if(len < 0) {
|
225
|
+
rb_raise(rb_eArgError, "negative length given");
|
226
|
+
}
|
227
|
+
|
228
|
+
if(len > buffer->limit - buffer->position) {
|
229
|
+
rb_raise(cNIO_ByteBuffer_UnderflowError, "not enough data in buffer");
|
230
|
+
}
|
231
|
+
|
232
|
+
result = rb_str_new(buffer->buffer + buffer->position, len);
|
233
|
+
buffer->position += len;
|
234
|
+
|
235
|
+
return result;
|
236
|
+
}
|
237
|
+
|
238
|
+
static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index)
|
239
|
+
{
|
240
|
+
struct NIO_ByteBuffer *buffer;
|
241
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
242
|
+
|
243
|
+
int i = NUM2INT(index);
|
244
|
+
|
245
|
+
if(i < 0) {
|
246
|
+
rb_raise(rb_eArgError, "negative index given");
|
247
|
+
}
|
248
|
+
|
249
|
+
if(i >= buffer->limit) {
|
250
|
+
rb_raise(rb_eArgError, "specified index exceeds limit");
|
251
|
+
}
|
252
|
+
|
253
|
+
return INT2NUM(buffer->buffer[i]);
|
254
|
+
}
|
255
|
+
|
256
|
+
static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string)
|
257
|
+
{
|
258
|
+
struct NIO_ByteBuffer *buffer;
|
259
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
260
|
+
|
261
|
+
long length = RSTRING_LEN(string);
|
262
|
+
|
263
|
+
if(length > buffer->limit - buffer->position) {
|
264
|
+
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
265
|
+
}
|
266
|
+
|
267
|
+
memcpy(buffer->buffer + buffer->position, StringValuePtr(string), length);
|
268
|
+
buffer->position += length;
|
269
|
+
|
270
|
+
return self;
|
271
|
+
}
|
272
|
+
|
273
|
+
static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE io)
|
274
|
+
{
|
275
|
+
struct NIO_ByteBuffer *buffer;
|
276
|
+
rb_io_t *fptr;
|
277
|
+
ssize_t nbytes, bytes_read;
|
278
|
+
|
279
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
280
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
281
|
+
rb_io_set_nonblock(fptr);
|
282
|
+
|
283
|
+
nbytes = buffer->limit - buffer->position;
|
284
|
+
if(nbytes == 0) {
|
285
|
+
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
286
|
+
}
|
287
|
+
|
288
|
+
bytes_read = read(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
289
|
+
|
290
|
+
if(bytes_read < 0) {
|
291
|
+
if(errno == EAGAIN) {
|
292
|
+
return INT2NUM(0);
|
293
|
+
} else {
|
294
|
+
rb_sys_fail("write");
|
295
|
+
}
|
296
|
+
}
|
297
|
+
|
298
|
+
buffer->position += bytes_read;
|
299
|
+
|
300
|
+
return INT2NUM(bytes_read);
|
301
|
+
}
|
302
|
+
|
303
|
+
static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE io)
|
304
|
+
{
|
305
|
+
struct NIO_ByteBuffer *buffer;
|
306
|
+
rb_io_t *fptr;
|
307
|
+
ssize_t nbytes, bytes_written;
|
308
|
+
|
309
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
310
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
311
|
+
rb_io_set_nonblock(fptr);
|
312
|
+
|
313
|
+
nbytes = buffer->limit - buffer->position;
|
314
|
+
if(nbytes == 0) {
|
315
|
+
rb_raise(cNIO_ByteBuffer_UnderflowError, "no data remaining in buffer");
|
316
|
+
}
|
317
|
+
|
318
|
+
bytes_written = write(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
319
|
+
|
320
|
+
if(bytes_written < 0) {
|
321
|
+
if(errno == EAGAIN) {
|
322
|
+
return INT2NUM(0);
|
323
|
+
} else {
|
324
|
+
rb_sys_fail("write");
|
325
|
+
}
|
326
|
+
}
|
327
|
+
|
328
|
+
buffer->position += bytes_written;
|
329
|
+
|
330
|
+
return INT2NUM(bytes_written);
|
331
|
+
}
|
332
|
+
|
333
|
+
static VALUE NIO_ByteBuffer_flip(VALUE self)
|
334
|
+
{
|
335
|
+
struct NIO_ByteBuffer *buffer;
|
336
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
337
|
+
|
338
|
+
buffer->limit = buffer->position;
|
339
|
+
buffer->position = 0;
|
340
|
+
buffer->mark = MARK_UNSET;
|
341
|
+
|
342
|
+
return self;
|
343
|
+
}
|
344
|
+
|
345
|
+
static VALUE NIO_ByteBuffer_rewind(VALUE self)
|
346
|
+
{
|
347
|
+
struct NIO_ByteBuffer *buffer;
|
348
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
349
|
+
|
350
|
+
buffer->position = 0;
|
351
|
+
buffer->mark = MARK_UNSET;
|
352
|
+
|
353
|
+
return self;
|
354
|
+
}
|
355
|
+
|
356
|
+
static VALUE NIO_ByteBuffer_mark(VALUE self)
|
357
|
+
{
|
358
|
+
struct NIO_ByteBuffer *buffer;
|
359
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
360
|
+
|
361
|
+
buffer->mark = buffer->position;
|
362
|
+
return self;
|
363
|
+
}
|
364
|
+
|
365
|
+
static VALUE NIO_ByteBuffer_reset(VALUE self)
|
366
|
+
{
|
367
|
+
struct NIO_ByteBuffer *buffer;
|
368
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
369
|
+
|
370
|
+
if(buffer->mark < 0) {
|
371
|
+
rb_raise(cNIO_ByteBuffer_MarkUnsetError, "mark has not been set");
|
372
|
+
} else {
|
373
|
+
buffer->position = buffer->mark;
|
374
|
+
}
|
375
|
+
|
376
|
+
return self;
|
377
|
+
}
|
378
|
+
|
379
|
+
static VALUE NIO_ByteBuffer_compact(VALUE self)
|
380
|
+
{
|
381
|
+
struct NIO_ByteBuffer *buffer;
|
382
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
383
|
+
|
384
|
+
memmove(buffer->buffer, buffer->buffer + buffer->position, buffer->limit - buffer->position);
|
385
|
+
buffer->position = buffer->limit - buffer->position;
|
386
|
+
buffer->limit = buffer->capacity;
|
387
|
+
|
388
|
+
return self;
|
389
|
+
}
|
390
|
+
|
391
|
+
static VALUE NIO_ByteBuffer_each(VALUE self)
|
392
|
+
{
|
393
|
+
int i;
|
394
|
+
struct NIO_ByteBuffer *buffer;
|
395
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
396
|
+
|
397
|
+
if(rb_block_given_p()) {
|
398
|
+
for(i = 0; i < buffer->limit; i++) {
|
399
|
+
rb_yield(INT2NUM(buffer->buffer[i]));
|
400
|
+
}
|
401
|
+
} else {
|
402
|
+
rb_raise(rb_eArgError, "no block given");
|
403
|
+
}
|
404
|
+
|
405
|
+
return self;
|
406
|
+
}
|
407
|
+
|
408
|
+
static VALUE NIO_ByteBuffer_inspect(VALUE self)
|
409
|
+
{
|
410
|
+
struct NIO_ByteBuffer *buffer;
|
411
|
+
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
412
|
+
|
413
|
+
return rb_sprintf(
|
414
|
+
"#<%s:%p @position=%d @limit=%d @capacity=%d>",
|
415
|
+
rb_class2name(CLASS_OF(self)),
|
416
|
+
(void*)self,
|
417
|
+
buffer->position,
|
418
|
+
buffer->limit,
|
419
|
+
buffer->capacity
|
420
|
+
);
|
421
|
+
}
|