nio4r 2.4.0 → 2.5.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/workflow.yml +47 -0
- data/.rubocop.yml +30 -11
- data/CHANGES.md +63 -0
- data/Gemfile +1 -1
- data/README.md +57 -30
- data/examples/echo_server.rb +2 -2
- data/ext/libev/Changes +90 -2
- data/ext/libev/README +2 -1
- data/ext/libev/ev.c +708 -247
- data/ext/libev/ev.h +33 -29
- data/ext/libev/ev_epoll.c +41 -28
- data/ext/libev/ev_iouring.c +694 -0
- data/ext/libev/ev_kqueue.c +15 -9
- data/ext/libev/ev_linuxaio.c +620 -0
- data/ext/libev/ev_poll.c +19 -14
- data/ext/libev/ev_port.c +8 -5
- data/ext/libev/ev_select.c +6 -6
- data/ext/libev/ev_vars.h +46 -1
- data/ext/libev/ev_win32.c +2 -2
- data/ext/libev/ev_wrap.h +72 -0
- data/ext/nio4r/.clang-format +16 -0
- data/ext/nio4r/bytebuffer.c +27 -28
- data/ext/nio4r/extconf.rb +9 -0
- data/ext/nio4r/libev.h +1 -3
- data/ext/nio4r/monitor.c +34 -31
- data/ext/nio4r/nio4r.h +7 -12
- data/ext/nio4r/org/nio4r/ByteBuffer.java +2 -0
- data/ext/nio4r/org/nio4r/Monitor.java +1 -0
- data/ext/nio4r/org/nio4r/Selector.java +13 -11
- data/ext/nio4r/selector.c +66 -51
- data/lib/nio.rb +20 -1
- data/lib/nio/bytebuffer.rb +4 -0
- data/lib/nio/monitor.rb +1 -1
- data/lib/nio/selector.rb +12 -10
- data/lib/nio/version.rb +1 -1
- data/nio4r.gemspec +10 -2
- data/spec/nio/bytebuffer_spec.rb +0 -1
- data/spec/nio/selectables/ssl_socket_spec.rb +3 -1
- data/spec/nio/selectables/udp_socket_spec.rb +2 -2
- data/spec/nio/selector_spec.rb +27 -5
- data/spec/spec_helper.rb +2 -0
- metadata +17 -12
- data/.travis.yml +0 -29
- data/Guardfile +0 -10
- data/LICENSE.txt +0 -20
- data/appveyor.yml +0 -40
data/ext/libev/ev_poll.c
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* libev poll fd activity backend
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2016,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -41,10 +41,12 @@
|
|
41
41
|
|
42
42
|
inline_size
|
43
43
|
void
|
44
|
-
|
44
|
+
array_needsize_pollidx (int *base, int offset, int count)
|
45
45
|
{
|
46
|
-
/*
|
47
|
-
* to
|
46
|
+
/* using memset (.., -1, ...) is tempting, we we try
|
47
|
+
* to be ultraportable
|
48
|
+
*/
|
49
|
+
base += offset;
|
48
50
|
while (count--)
|
49
51
|
*base++ = -1;
|
50
52
|
}
|
@@ -57,14 +59,14 @@ poll_modify (EV_P_ int fd, int oev, int nev)
|
|
57
59
|
if (oev == nev)
|
58
60
|
return;
|
59
61
|
|
60
|
-
array_needsize (int, pollidxs, pollidxmax, fd + 1,
|
62
|
+
array_needsize (int, pollidxs, pollidxmax, fd + 1, array_needsize_pollidx);
|
61
63
|
|
62
64
|
idx = pollidxs [fd];
|
63
65
|
|
64
66
|
if (idx < 0) /* need to allocate a new pollfd */
|
65
67
|
{
|
66
68
|
pollidxs [fd] = idx = pollcnt++;
|
67
|
-
array_needsize (struct pollfd, polls, pollmax, pollcnt,
|
69
|
+
array_needsize (struct pollfd, polls, pollmax, pollcnt, array_needsize_noinit);
|
68
70
|
polls [idx].fd = fd;
|
69
71
|
}
|
70
72
|
|
@@ -78,7 +80,7 @@ poll_modify (EV_P_ int fd, int oev, int nev)
|
|
78
80
|
{
|
79
81
|
pollidxs [fd] = -1;
|
80
82
|
|
81
|
-
if (
|
83
|
+
if (ecb_expect_true (idx < --pollcnt))
|
82
84
|
{
|
83
85
|
polls [idx] = polls [pollcnt];
|
84
86
|
pollidxs [polls [idx].fd] = idx;
|
@@ -93,10 +95,10 @@ poll_poll (EV_P_ ev_tstamp timeout)
|
|
93
95
|
int res;
|
94
96
|
|
95
97
|
EV_RELEASE_CB;
|
96
|
-
res = poll (polls, pollcnt, timeout
|
98
|
+
res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout));
|
97
99
|
EV_ACQUIRE_CB;
|
98
100
|
|
99
|
-
if (
|
101
|
+
if (ecb_expect_false (res < 0))
|
100
102
|
{
|
101
103
|
if (errno == EBADF)
|
102
104
|
fd_ebadf (EV_A);
|
@@ -108,14 +110,17 @@ poll_poll (EV_P_ ev_tstamp timeout)
|
|
108
110
|
else
|
109
111
|
for (p = polls; res; ++p)
|
110
112
|
{
|
111
|
-
assert (("libev: poll
|
113
|
+
assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt));
|
112
114
|
|
113
|
-
if (
|
115
|
+
if (ecb_expect_false (p->revents)) /* this expect is debatable */
|
114
116
|
{
|
115
117
|
--res;
|
116
118
|
|
117
|
-
if (
|
118
|
-
|
119
|
+
if (ecb_expect_false (p->revents & POLLNVAL))
|
120
|
+
{
|
121
|
+
assert (("libev: poll found invalid fd in poll set", 0));
|
122
|
+
fd_kill (EV_A_ p->fd);
|
123
|
+
}
|
119
124
|
else
|
120
125
|
fd_event (
|
121
126
|
EV_A_
|
@@ -131,7 +136,7 @@ inline_size
|
|
131
136
|
int
|
132
137
|
poll_init (EV_P_ int flags)
|
133
138
|
{
|
134
|
-
backend_mintime = 1e-3;
|
139
|
+
backend_mintime = EV_TS_CONST (1e-3);
|
135
140
|
backend_modify = poll_modify;
|
136
141
|
backend_poll = poll_poll;
|
137
142
|
|
data/ext/libev/ev_port.c
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* libev solaris event port backend
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -69,7 +69,10 @@ port_associate_and_check (EV_P_ int fd, int ev)
|
|
69
69
|
)
|
70
70
|
{
|
71
71
|
if (errno == EBADFD)
|
72
|
-
|
72
|
+
{
|
73
|
+
assert (("libev: port_associate found invalid fd", errno != EBADFD));
|
74
|
+
fd_kill (EV_A_ fd);
|
75
|
+
}
|
73
76
|
else
|
74
77
|
ev_syserr ("(libev) port_associate");
|
75
78
|
}
|
@@ -129,7 +132,7 @@ port_poll (EV_P_ ev_tstamp timeout)
|
|
129
132
|
}
|
130
133
|
}
|
131
134
|
|
132
|
-
if (
|
135
|
+
if (ecb_expect_false (nget == port_eventmax))
|
133
136
|
{
|
134
137
|
ev_free (port_events);
|
135
138
|
port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1);
|
@@ -151,11 +154,11 @@ port_init (EV_P_ int flags)
|
|
151
154
|
|
152
155
|
/* if my reading of the opensolaris kernel sources are correct, then
|
153
156
|
* opensolaris does something very stupid: it checks if the time has already
|
154
|
-
* elapsed and doesn't round up if that is the case,
|
157
|
+
* elapsed and doesn't round up if that is the case, otherwise it DOES round
|
155
158
|
* up. Since we can't know what the case is, we need to guess by using a
|
156
159
|
* "large enough" timeout. Normally, 1e-9 would be correct.
|
157
160
|
*/
|
158
|
-
backend_mintime = 1e-3; /* needed to compensate for port_getn returning early */
|
161
|
+
backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */
|
159
162
|
backend_modify = port_modify;
|
160
163
|
backend_poll = port_poll;
|
161
164
|
|
data/ext/libev/ev_select.c
CHANGED
@@ -108,7 +108,7 @@ select_modify (EV_P_ int fd, int oev, int nev)
|
|
108
108
|
int word = fd / NFDBITS;
|
109
109
|
fd_mask mask = 1UL << (fd % NFDBITS);
|
110
110
|
|
111
|
-
if (
|
111
|
+
if (ecb_expect_false (vec_max <= word))
|
112
112
|
{
|
113
113
|
int new_max = word + 1;
|
114
114
|
|
@@ -171,7 +171,7 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
171
171
|
#endif
|
172
172
|
EV_ACQUIRE_CB;
|
173
173
|
|
174
|
-
if (
|
174
|
+
if (ecb_expect_false (res < 0))
|
175
175
|
{
|
176
176
|
#if EV_SELECT_IS_WINSOCKET
|
177
177
|
errno = WSAGetLastError ();
|
@@ -197,7 +197,7 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
197
197
|
{
|
198
198
|
if (timeout)
|
199
199
|
{
|
200
|
-
unsigned long ms = timeout
|
200
|
+
unsigned long ms = EV_TS_TO_MSEC (timeout);
|
201
201
|
Sleep (ms ? ms : 1);
|
202
202
|
}
|
203
203
|
|
@@ -236,7 +236,7 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
236
236
|
if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE;
|
237
237
|
#endif
|
238
238
|
|
239
|
-
if (
|
239
|
+
if (ecb_expect_true (events))
|
240
240
|
fd_event (EV_A_ fd, events);
|
241
241
|
}
|
242
242
|
}
|
@@ -262,7 +262,7 @@ select_poll (EV_P_ ev_tstamp timeout)
|
|
262
262
|
events |= word_r & mask ? EV_READ : 0;
|
263
263
|
events |= word_w & mask ? EV_WRITE : 0;
|
264
264
|
|
265
|
-
if (
|
265
|
+
if (ecb_expect_true (events))
|
266
266
|
fd_event (EV_A_ word * NFDBITS + bit, events);
|
267
267
|
}
|
268
268
|
}
|
@@ -275,7 +275,7 @@ inline_size
|
|
275
275
|
int
|
276
276
|
select_init (EV_P_ int flags)
|
277
277
|
{
|
278
|
-
backend_mintime = 1e-6;
|
278
|
+
backend_mintime = EV_TS_CONST (1e-6);
|
279
279
|
backend_modify = select_modify;
|
280
280
|
backend_poll = select_poll;
|
281
281
|
|
data/ext/libev/ev_vars.h
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
/*
|
2
2
|
* loop member variable declarations
|
3
3
|
*
|
4
|
-
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libev@schmorp.de>
|
4
|
+
* Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2019 Marc Alexander Lehmann <libev@schmorp.de>
|
5
5
|
* All rights reserved.
|
6
6
|
*
|
7
7
|
* Redistribution and use in source and binary forms, with or without modifica-
|
@@ -107,6 +107,46 @@ VARx(int, epoll_epermcnt)
|
|
107
107
|
VARx(int, epoll_epermmax)
|
108
108
|
#endif
|
109
109
|
|
110
|
+
#if EV_USE_LINUXAIO || EV_GENWRAP
|
111
|
+
VARx(aio_context_t, linuxaio_ctx)
|
112
|
+
VARx(int, linuxaio_iteration)
|
113
|
+
VARx(struct aniocb **, linuxaio_iocbps)
|
114
|
+
VARx(int, linuxaio_iocbpmax)
|
115
|
+
VARx(struct iocb **, linuxaio_submits)
|
116
|
+
VARx(int, linuxaio_submitcnt)
|
117
|
+
VARx(int, linuxaio_submitmax)
|
118
|
+
VARx(ev_io, linuxaio_epoll_w)
|
119
|
+
#endif
|
120
|
+
|
121
|
+
#if EV_USE_IOURING || EV_GENWRAP
|
122
|
+
VARx(int, iouring_fd)
|
123
|
+
VARx(unsigned, iouring_to_submit);
|
124
|
+
VARx(int, iouring_entries)
|
125
|
+
VARx(int, iouring_max_entries)
|
126
|
+
VARx(void *, iouring_sq_ring)
|
127
|
+
VARx(void *, iouring_cq_ring)
|
128
|
+
VARx(void *, iouring_sqes)
|
129
|
+
VARx(uint32_t, iouring_sq_ring_size)
|
130
|
+
VARx(uint32_t, iouring_cq_ring_size)
|
131
|
+
VARx(uint32_t, iouring_sqes_size)
|
132
|
+
VARx(uint32_t, iouring_sq_head)
|
133
|
+
VARx(uint32_t, iouring_sq_tail)
|
134
|
+
VARx(uint32_t, iouring_sq_ring_mask)
|
135
|
+
VARx(uint32_t, iouring_sq_ring_entries)
|
136
|
+
VARx(uint32_t, iouring_sq_flags)
|
137
|
+
VARx(uint32_t, iouring_sq_dropped)
|
138
|
+
VARx(uint32_t, iouring_sq_array)
|
139
|
+
VARx(uint32_t, iouring_cq_head)
|
140
|
+
VARx(uint32_t, iouring_cq_tail)
|
141
|
+
VARx(uint32_t, iouring_cq_ring_mask)
|
142
|
+
VARx(uint32_t, iouring_cq_ring_entries)
|
143
|
+
VARx(uint32_t, iouring_cq_overflow)
|
144
|
+
VARx(uint32_t, iouring_cq_cqes)
|
145
|
+
VARx(ev_tstamp, iouring_tfd_to)
|
146
|
+
VARx(int, iouring_tfd)
|
147
|
+
VARx(ev_io, iouring_tfd_w)
|
148
|
+
#endif
|
149
|
+
|
110
150
|
#if EV_USE_KQUEUE || EV_GENWRAP
|
111
151
|
VARx(pid_t, kqueue_fd_pid)
|
112
152
|
VARx(struct kevent *, kqueue_changes)
|
@@ -187,6 +227,11 @@ VARx(ev_io, sigfd_w)
|
|
187
227
|
VARx(sigset_t, sigfd_set)
|
188
228
|
#endif
|
189
229
|
|
230
|
+
#if EV_USE_TIMERFD || EV_GENWRAP
|
231
|
+
VARx(int, timerfd) /* timerfd for time jump detection */
|
232
|
+
VARx(ev_io, timerfd_w)
|
233
|
+
#endif
|
234
|
+
|
190
235
|
VARx(unsigned int, origflags) /* original loop flags */
|
191
236
|
|
192
237
|
#if EV_FEATURE_API || EV_GENWRAP
|
data/ext/libev/ev_win32.c
CHANGED
@@ -154,8 +154,8 @@ ev_time (void)
|
|
154
154
|
ui.u.LowPart = ft.dwLowDateTime;
|
155
155
|
ui.u.HighPart = ft.dwHighDateTime;
|
156
156
|
|
157
|
-
/* msvc cannot convert ulonglong to double... yes, it is that sucky */
|
158
|
-
return (LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-
|
157
|
+
/* also, msvc cannot convert ulonglong to double... yes, it is that sucky */
|
158
|
+
return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1));
|
159
159
|
}
|
160
160
|
|
161
161
|
#endif
|
data/ext/libev/ev_wrap.h
CHANGED
@@ -44,12 +44,46 @@
|
|
44
44
|
#define invoke_cb ((loop)->invoke_cb)
|
45
45
|
#define io_blocktime ((loop)->io_blocktime)
|
46
46
|
#define iocp ((loop)->iocp)
|
47
|
+
#define iouring_cq_cqes ((loop)->iouring_cq_cqes)
|
48
|
+
#define iouring_cq_head ((loop)->iouring_cq_head)
|
49
|
+
#define iouring_cq_overflow ((loop)->iouring_cq_overflow)
|
50
|
+
#define iouring_cq_ring ((loop)->iouring_cq_ring)
|
51
|
+
#define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries)
|
52
|
+
#define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask)
|
53
|
+
#define iouring_cq_ring_size ((loop)->iouring_cq_ring_size)
|
54
|
+
#define iouring_cq_tail ((loop)->iouring_cq_tail)
|
55
|
+
#define iouring_entries ((loop)->iouring_entries)
|
56
|
+
#define iouring_fd ((loop)->iouring_fd)
|
57
|
+
#define iouring_max_entries ((loop)->iouring_max_entries)
|
58
|
+
#define iouring_sq_array ((loop)->iouring_sq_array)
|
59
|
+
#define iouring_sq_dropped ((loop)->iouring_sq_dropped)
|
60
|
+
#define iouring_sq_flags ((loop)->iouring_sq_flags)
|
61
|
+
#define iouring_sq_head ((loop)->iouring_sq_head)
|
62
|
+
#define iouring_sq_ring ((loop)->iouring_sq_ring)
|
63
|
+
#define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries)
|
64
|
+
#define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask)
|
65
|
+
#define iouring_sq_ring_size ((loop)->iouring_sq_ring_size)
|
66
|
+
#define iouring_sq_tail ((loop)->iouring_sq_tail)
|
67
|
+
#define iouring_sqes ((loop)->iouring_sqes)
|
68
|
+
#define iouring_sqes_size ((loop)->iouring_sqes_size)
|
69
|
+
#define iouring_tfd ((loop)->iouring_tfd)
|
70
|
+
#define iouring_tfd_to ((loop)->iouring_tfd_to)
|
71
|
+
#define iouring_tfd_w ((loop)->iouring_tfd_w)
|
72
|
+
#define iouring_to_submit ((loop)->iouring_to_submit)
|
47
73
|
#define kqueue_changecnt ((loop)->kqueue_changecnt)
|
48
74
|
#define kqueue_changemax ((loop)->kqueue_changemax)
|
49
75
|
#define kqueue_changes ((loop)->kqueue_changes)
|
50
76
|
#define kqueue_eventmax ((loop)->kqueue_eventmax)
|
51
77
|
#define kqueue_events ((loop)->kqueue_events)
|
52
78
|
#define kqueue_fd_pid ((loop)->kqueue_fd_pid)
|
79
|
+
#define linuxaio_ctx ((loop)->linuxaio_ctx)
|
80
|
+
#define linuxaio_epoll_w ((loop)->linuxaio_epoll_w)
|
81
|
+
#define linuxaio_iocbpmax ((loop)->linuxaio_iocbpmax)
|
82
|
+
#define linuxaio_iocbps ((loop)->linuxaio_iocbps)
|
83
|
+
#define linuxaio_iteration ((loop)->linuxaio_iteration)
|
84
|
+
#define linuxaio_submitcnt ((loop)->linuxaio_submitcnt)
|
85
|
+
#define linuxaio_submitmax ((loop)->linuxaio_submitmax)
|
86
|
+
#define linuxaio_submits ((loop)->linuxaio_submits)
|
53
87
|
#define loop_count ((loop)->loop_count)
|
54
88
|
#define loop_depth ((loop)->loop_depth)
|
55
89
|
#define loop_done ((loop)->loop_done)
|
@@ -89,6 +123,8 @@
|
|
89
123
|
#define sigfd_w ((loop)->sigfd_w)
|
90
124
|
#define timeout_blocktime ((loop)->timeout_blocktime)
|
91
125
|
#define timercnt ((loop)->timercnt)
|
126
|
+
#define timerfd ((loop)->timerfd)
|
127
|
+
#define timerfd_w ((loop)->timerfd_w)
|
92
128
|
#define timermax ((loop)->timermax)
|
93
129
|
#define timers ((loop)->timers)
|
94
130
|
#define userdata ((loop)->userdata)
|
@@ -143,12 +179,46 @@
|
|
143
179
|
#undef invoke_cb
|
144
180
|
#undef io_blocktime
|
145
181
|
#undef iocp
|
182
|
+
#undef iouring_cq_cqes
|
183
|
+
#undef iouring_cq_head
|
184
|
+
#undef iouring_cq_overflow
|
185
|
+
#undef iouring_cq_ring
|
186
|
+
#undef iouring_cq_ring_entries
|
187
|
+
#undef iouring_cq_ring_mask
|
188
|
+
#undef iouring_cq_ring_size
|
189
|
+
#undef iouring_cq_tail
|
190
|
+
#undef iouring_entries
|
191
|
+
#undef iouring_fd
|
192
|
+
#undef iouring_max_entries
|
193
|
+
#undef iouring_sq_array
|
194
|
+
#undef iouring_sq_dropped
|
195
|
+
#undef iouring_sq_flags
|
196
|
+
#undef iouring_sq_head
|
197
|
+
#undef iouring_sq_ring
|
198
|
+
#undef iouring_sq_ring_entries
|
199
|
+
#undef iouring_sq_ring_mask
|
200
|
+
#undef iouring_sq_ring_size
|
201
|
+
#undef iouring_sq_tail
|
202
|
+
#undef iouring_sqes
|
203
|
+
#undef iouring_sqes_size
|
204
|
+
#undef iouring_tfd
|
205
|
+
#undef iouring_tfd_to
|
206
|
+
#undef iouring_tfd_w
|
207
|
+
#undef iouring_to_submit
|
146
208
|
#undef kqueue_changecnt
|
147
209
|
#undef kqueue_changemax
|
148
210
|
#undef kqueue_changes
|
149
211
|
#undef kqueue_eventmax
|
150
212
|
#undef kqueue_events
|
151
213
|
#undef kqueue_fd_pid
|
214
|
+
#undef linuxaio_ctx
|
215
|
+
#undef linuxaio_epoll_w
|
216
|
+
#undef linuxaio_iocbpmax
|
217
|
+
#undef linuxaio_iocbps
|
218
|
+
#undef linuxaio_iteration
|
219
|
+
#undef linuxaio_submitcnt
|
220
|
+
#undef linuxaio_submitmax
|
221
|
+
#undef linuxaio_submits
|
152
222
|
#undef loop_count
|
153
223
|
#undef loop_depth
|
154
224
|
#undef loop_done
|
@@ -188,6 +258,8 @@
|
|
188
258
|
#undef sigfd_w
|
189
259
|
#undef timeout_blocktime
|
190
260
|
#undef timercnt
|
261
|
+
#undef timerfd
|
262
|
+
#undef timerfd_w
|
191
263
|
#undef timermax
|
192
264
|
#undef timers
|
193
265
|
#undef userdata
|
@@ -0,0 +1,16 @@
|
|
1
|
+
---
|
2
|
+
Language: Cpp
|
3
|
+
BasedOnStyle: WebKit
|
4
|
+
AllowAllParametersOfDeclarationOnNextLine: false
|
5
|
+
BinPackArguments: false
|
6
|
+
BinPackParameters: false
|
7
|
+
AlignConsecutiveMacros: false
|
8
|
+
AlignConsecutiveAssignments: false
|
9
|
+
BreakBeforeBraces: Linux
|
10
|
+
BraceWrapping:
|
11
|
+
AfterControlStatement: Never
|
12
|
+
IndentCaseLabels: true
|
13
|
+
PointerAlignment: Right
|
14
|
+
SpaceBeforeParens: ControlStatements
|
15
|
+
IndentWidth: 4
|
16
|
+
...
|
data/ext/nio4r/bytebuffer.c
CHANGED
@@ -42,7 +42,7 @@ void Init_NIO_ByteBuffer()
|
|
42
42
|
cNIO_ByteBuffer = rb_define_class_under(mNIO, "ByteBuffer", rb_cObject);
|
43
43
|
rb_define_alloc_func(cNIO_ByteBuffer, NIO_ByteBuffer_allocate);
|
44
44
|
|
45
|
-
cNIO_ByteBuffer_OverflowError
|
45
|
+
cNIO_ByteBuffer_OverflowError = rb_define_class_under(cNIO_ByteBuffer, "OverflowError", rb_eIOError);
|
46
46
|
cNIO_ByteBuffer_UnderflowError = rb_define_class_under(cNIO_ByteBuffer, "UnderflowError", rb_eIOError);
|
47
47
|
cNIO_ByteBuffer_MarkUnsetError = rb_define_class_under(cNIO_ByteBuffer, "MarkUnsetError", rb_eIOError);
|
48
48
|
|
@@ -85,8 +85,8 @@ static void NIO_ByteBuffer_gc_mark(struct NIO_ByteBuffer *buffer)
|
|
85
85
|
|
86
86
|
static void NIO_ByteBuffer_free(struct NIO_ByteBuffer *buffer)
|
87
87
|
{
|
88
|
-
if(buffer->buffer)
|
89
|
-
|
88
|
+
if (buffer->buffer)
|
89
|
+
xfree(buffer->buffer);
|
90
90
|
xfree(buffer);
|
91
91
|
}
|
92
92
|
|
@@ -133,17 +133,17 @@ static VALUE NIO_ByteBuffer_set_position(VALUE self, VALUE new_position)
|
|
133
133
|
|
134
134
|
pos = NUM2INT(new_position);
|
135
135
|
|
136
|
-
if(pos < 0) {
|
136
|
+
if (pos < 0) {
|
137
137
|
rb_raise(rb_eArgError, "negative position given");
|
138
138
|
}
|
139
139
|
|
140
|
-
if(pos > buffer->limit) {
|
140
|
+
if (pos > buffer->limit) {
|
141
141
|
rb_raise(rb_eArgError, "specified position exceeds limit");
|
142
142
|
}
|
143
143
|
|
144
144
|
buffer->position = pos;
|
145
145
|
|
146
|
-
if(buffer->mark > buffer->position) {
|
146
|
+
if (buffer->mark > buffer->position) {
|
147
147
|
buffer->mark = MARK_UNSET;
|
148
148
|
}
|
149
149
|
|
@@ -166,21 +166,21 @@ static VALUE NIO_ByteBuffer_set_limit(VALUE self, VALUE new_limit)
|
|
166
166
|
|
167
167
|
lim = NUM2INT(new_limit);
|
168
168
|
|
169
|
-
if(lim < 0) {
|
169
|
+
if (lim < 0) {
|
170
170
|
rb_raise(rb_eArgError, "negative limit given");
|
171
171
|
}
|
172
172
|
|
173
|
-
if(lim > buffer->capacity) {
|
173
|
+
if (lim > buffer->capacity) {
|
174
174
|
rb_raise(rb_eArgError, "specified limit exceeds capacity");
|
175
175
|
}
|
176
176
|
|
177
177
|
buffer->limit = lim;
|
178
178
|
|
179
|
-
if(buffer->position > lim) {
|
179
|
+
if (buffer->position > lim) {
|
180
180
|
buffer->position = lim;
|
181
181
|
}
|
182
182
|
|
183
|
-
if(buffer->mark > lim) {
|
183
|
+
if (buffer->mark > lim) {
|
184
184
|
buffer->mark = MARK_UNSET;
|
185
185
|
}
|
186
186
|
|
@@ -220,17 +220,17 @@ static VALUE NIO_ByteBuffer_get(int argc, VALUE *argv, VALUE self)
|
|
220
220
|
|
221
221
|
rb_scan_args(argc, argv, "01", &length);
|
222
222
|
|
223
|
-
if(length == Qnil) {
|
223
|
+
if (length == Qnil) {
|
224
224
|
len = buffer->limit - buffer->position;
|
225
225
|
} else {
|
226
226
|
len = NUM2INT(length);
|
227
227
|
}
|
228
228
|
|
229
|
-
if(len < 0) {
|
229
|
+
if (len < 0) {
|
230
230
|
rb_raise(rb_eArgError, "negative length given");
|
231
231
|
}
|
232
232
|
|
233
|
-
if(len > buffer->limit - buffer->position) {
|
233
|
+
if (len > buffer->limit - buffer->position) {
|
234
234
|
rb_raise(cNIO_ByteBuffer_UnderflowError, "not enough data in buffer");
|
235
235
|
}
|
236
236
|
|
@@ -248,11 +248,11 @@ static VALUE NIO_ByteBuffer_fetch(VALUE self, VALUE index)
|
|
248
248
|
|
249
249
|
i = NUM2INT(index);
|
250
250
|
|
251
|
-
if(i < 0) {
|
251
|
+
if (i < 0) {
|
252
252
|
rb_raise(rb_eArgError, "negative index given");
|
253
253
|
}
|
254
254
|
|
255
|
-
if(i >= buffer->limit) {
|
255
|
+
if (i >= buffer->limit) {
|
256
256
|
rb_raise(rb_eArgError, "specified index exceeds limit");
|
257
257
|
}
|
258
258
|
|
@@ -268,7 +268,7 @@ static VALUE NIO_ByteBuffer_put(VALUE self, VALUE string)
|
|
268
268
|
StringValue(string);
|
269
269
|
length = RSTRING_LEN(string);
|
270
270
|
|
271
|
-
if(length > buffer->limit - buffer->position) {
|
271
|
+
if (length > buffer->limit - buffer->position) {
|
272
272
|
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
273
273
|
}
|
274
274
|
|
@@ -289,14 +289,14 @@ static VALUE NIO_ByteBuffer_read_from(VALUE self, VALUE io)
|
|
289
289
|
rb_io_set_nonblock(fptr);
|
290
290
|
|
291
291
|
nbytes = buffer->limit - buffer->position;
|
292
|
-
if(nbytes == 0) {
|
292
|
+
if (nbytes == 0) {
|
293
293
|
rb_raise(cNIO_ByteBuffer_OverflowError, "buffer is full");
|
294
294
|
}
|
295
295
|
|
296
296
|
bytes_read = read(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
297
297
|
|
298
|
-
if(bytes_read < 0) {
|
299
|
-
if(errno == EAGAIN) {
|
298
|
+
if (bytes_read < 0) {
|
299
|
+
if (errno == EAGAIN) {
|
300
300
|
return INT2NUM(0);
|
301
301
|
} else {
|
302
302
|
rb_sys_fail("write");
|
@@ -319,14 +319,14 @@ static VALUE NIO_ByteBuffer_write_to(VALUE self, VALUE io)
|
|
319
319
|
rb_io_set_nonblock(fptr);
|
320
320
|
|
321
321
|
nbytes = buffer->limit - buffer->position;
|
322
|
-
if(nbytes == 0) {
|
322
|
+
if (nbytes == 0) {
|
323
323
|
rb_raise(cNIO_ByteBuffer_UnderflowError, "no data remaining in buffer");
|
324
324
|
}
|
325
325
|
|
326
326
|
bytes_written = write(FPTR_TO_FD(fptr), buffer->buffer + buffer->position, nbytes);
|
327
327
|
|
328
|
-
if(bytes_written < 0) {
|
329
|
-
if(errno == EAGAIN) {
|
328
|
+
if (bytes_written < 0) {
|
329
|
+
if (errno == EAGAIN) {
|
330
330
|
return INT2NUM(0);
|
331
331
|
} else {
|
332
332
|
rb_sys_fail("write");
|
@@ -375,7 +375,7 @@ static VALUE NIO_ByteBuffer_reset(VALUE self)
|
|
375
375
|
struct NIO_ByteBuffer *buffer;
|
376
376
|
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
377
377
|
|
378
|
-
if(buffer->mark < 0) {
|
378
|
+
if (buffer->mark < 0) {
|
379
379
|
rb_raise(cNIO_ByteBuffer_MarkUnsetError, "mark has not been set");
|
380
380
|
} else {
|
381
381
|
buffer->position = buffer->mark;
|
@@ -402,8 +402,8 @@ static VALUE NIO_ByteBuffer_each(VALUE self)
|
|
402
402
|
struct NIO_ByteBuffer *buffer;
|
403
403
|
Data_Get_Struct(self, struct NIO_ByteBuffer, buffer);
|
404
404
|
|
405
|
-
if(rb_block_given_p()) {
|
406
|
-
for(i = 0; i < buffer->limit; i++) {
|
405
|
+
if (rb_block_given_p()) {
|
406
|
+
for (i = 0; i < buffer->limit; i++) {
|
407
407
|
rb_yield(INT2NUM(buffer->buffer[i]));
|
408
408
|
}
|
409
409
|
} else {
|
@@ -421,9 +421,8 @@ static VALUE NIO_ByteBuffer_inspect(VALUE self)
|
|
421
421
|
return rb_sprintf(
|
422
422
|
"#<%s:%p @position=%d @limit=%d @capacity=%d>",
|
423
423
|
rb_class2name(CLASS_OF(self)),
|
424
|
-
(void*)self,
|
424
|
+
(void *)self,
|
425
425
|
buffer->position,
|
426
426
|
buffer->limit,
|
427
|
-
buffer->capacity
|
428
|
-
);
|
427
|
+
buffer->capacity);
|
429
428
|
}
|