libev_scheduler 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.github/test.yml +31 -0
- data/.gitignore +58 -0
- data/CHANGELOG.md +0 -0
- data/Gemfile +3 -0
- data/Gemfile.lock +32 -0
- data/LICENSE +21 -0
- data/README.md +5 -0
- data/Rakefile +18 -0
- data/TODO.md +0 -0
- data/examples/io.rb +31 -0
- data/examples/sleep.rb +10 -0
- data/ext/libev/Changes +548 -0
- data/ext/libev/LICENSE +37 -0
- data/ext/libev/README +59 -0
- data/ext/libev/README.embed +3 -0
- data/ext/libev/ev.c +5279 -0
- data/ext/libev/ev.h +856 -0
- data/ext/libev/ev_epoll.c +296 -0
- data/ext/libev/ev_kqueue.c +224 -0
- data/ext/libev/ev_linuxaio.c +642 -0
- data/ext/libev/ev_poll.c +156 -0
- data/ext/libev/ev_port.c +192 -0
- data/ext/libev/ev_select.c +316 -0
- data/ext/libev/ev_vars.h +215 -0
- data/ext/libev/ev_win32.c +162 -0
- data/ext/libev/ev_wrap.h +216 -0
- data/ext/libev/test_libev_win32.c +123 -0
- data/ext/libev_scheduler/extconf.rb +22 -0
- data/ext/libev_scheduler/libev.c +2 -0
- data/ext/libev_scheduler/libev.h +11 -0
- data/ext/libev_scheduler/libev_scheduler_ext.c +5 -0
- data/ext/libev_scheduler/scheduler.c +330 -0
- data/lib/libev_scheduler.rb +23 -0
- data/lib/libev_scheduler/version.rb +5 -0
- data/libev_scheduler.gemspec +26 -0
- data/test/run.rb +5 -0
- data/test/test_enumerator.rb +45 -0
- data/test/test_io.rb +57 -0
- data/test/test_mutex.rb +230 -0
- data/test/test_process.rb +38 -0
- data/test/test_sleep.rb +52 -0
- metadata +134 -0
@@ -0,0 +1,123 @@
|
|
1
|
+
// a single header file is required
|
2
|
+
#include <ev.h>
|
3
|
+
#include <stdio.h>
|
4
|
+
#include <io.h>
|
5
|
+
|
6
|
+
// every watcher type has its own typedef'd struct
|
7
|
+
// with the name ev_TYPE
|
8
|
+
ev_io stdin_watcher;
|
9
|
+
ev_timer timeout_watcher;
|
10
|
+
|
11
|
+
// all watcher callbacks have a similar signature
|
12
|
+
// this callback is called when data is readable on stdin
|
13
|
+
static void
|
14
|
+
stdin_cb (EV_P_ ev_io *w, int revents)
|
15
|
+
{
|
16
|
+
puts ("stdin ready or done or something");
|
17
|
+
// for one-shot events, one must manually stop the watcher
|
18
|
+
// with its corresponding stop function.
|
19
|
+
//ev_io_stop (EV_A_ w);
|
20
|
+
|
21
|
+
// this causes all nested ev_loop's to stop iterating
|
22
|
+
//ev_unloop (EV_A_ EVUNLOOP_ALL);
|
23
|
+
}
|
24
|
+
|
25
|
+
// another callback, this time for a time-out
|
26
|
+
static void
|
27
|
+
timeout_cb (EV_P_ ev_timer *w, int revents)
|
28
|
+
{
|
29
|
+
puts ("timeout");
|
30
|
+
// this causes the innermost ev_loop to stop iterating
|
31
|
+
ev_unloop (EV_A_ EVUNLOOP_ONE);
|
32
|
+
}
|
33
|
+
|
34
|
+
|
35
|
+
|
36
|
+
#include <winsock.h>
|
37
|
+
|
38
|
+
#include <stdlib.h>
|
39
|
+
#include <iostream>
|
40
|
+
int get_server_fd()
|
41
|
+
{
|
42
|
+
|
43
|
+
//----------------------
|
44
|
+
// Initialize Winsock.
|
45
|
+
WSADATA wsaData;
|
46
|
+
int iResult = WSAStartup(MAKEWORD(2,2), &wsaData);
|
47
|
+
if (iResult != NO_ERROR) {
|
48
|
+
printf("Error at WSAStartup()\n");
|
49
|
+
return 1;
|
50
|
+
}
|
51
|
+
|
52
|
+
//----------------------
|
53
|
+
// Create a SOCKET for listening for
|
54
|
+
// incoming connection requests.
|
55
|
+
SOCKET ListenSocket;
|
56
|
+
ListenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
57
|
+
if (ListenSocket == INVALID_SOCKET) {
|
58
|
+
printf("Error at socket(): %ld\n", WSAGetLastError());
|
59
|
+
WSACleanup();
|
60
|
+
return 1;
|
61
|
+
}
|
62
|
+
printf("socket returned %d\n", ListenSocket);
|
63
|
+
|
64
|
+
//----------------------
|
65
|
+
// The sockaddr_in structure specifies the address family,
|
66
|
+
// IP address, and port for the socket that is being bound.
|
67
|
+
sockaddr_in service;
|
68
|
+
service.sin_family = AF_INET;
|
69
|
+
service.sin_addr.s_addr = inet_addr("127.0.0.1");
|
70
|
+
service.sin_port = htons(4444);
|
71
|
+
|
72
|
+
if (bind( ListenSocket,
|
73
|
+
(SOCKADDR*) &service,
|
74
|
+
sizeof(service)) == SOCKET_ERROR) {
|
75
|
+
printf("bind() failed.\n");
|
76
|
+
closesocket(ListenSocket);
|
77
|
+
WSACleanup();
|
78
|
+
return 1;
|
79
|
+
}
|
80
|
+
|
81
|
+
//----------------------
|
82
|
+
// Listen for incoming connection requests.
|
83
|
+
// on the created socket
|
84
|
+
if (listen( ListenSocket, 1 ) == SOCKET_ERROR) {
|
85
|
+
printf("Error listening on socket.\n");
|
86
|
+
closesocket(ListenSocket);
|
87
|
+
WSACleanup();
|
88
|
+
return 1;
|
89
|
+
}
|
90
|
+
|
91
|
+
|
92
|
+
printf("sock and osf handle are %d %d, error is \n", ListenSocket, _get_osfhandle (ListenSocket)); // -1 is invalid file handle: http://msdn.microsoft.com/en-us/library/ks2530z6.aspx
|
93
|
+
printf("err was %d\n", WSAGetLastError());
|
94
|
+
//----------------------
|
95
|
+
return ListenSocket;
|
96
|
+
}
|
97
|
+
|
98
|
+
|
99
|
+
int
|
100
|
+
main (void)
|
101
|
+
{
|
102
|
+
struct ev_loop *loopy = ev_default_loop(0);
|
103
|
+
int fd = get_server_fd();
|
104
|
+
int fd_real = _open_osfhandle(fd, NULL);
|
105
|
+
int conv = _get_osfhandle(fd_real);
|
106
|
+
printf("got server fd %d, loop %d, fd_real %d, that converted %d\n", fd, loopy, fd_real, conv);
|
107
|
+
// accept(fd, NULL, NULL);
|
108
|
+
// initialise an io watcher, then start it
|
109
|
+
// this one will watch for stdin to become readable
|
110
|
+
ev_io_init (&stdin_watcher, stdin_cb, /*STDIN_FILENO*/ conv, EV_READ);
|
111
|
+
ev_io_start (loopy, &stdin_watcher);
|
112
|
+
|
113
|
+
// initialise a timer watcher, then start it
|
114
|
+
// simple non-repeating 5.5 second timeout
|
115
|
+
//ev_timer_init (&timeout_watcher, timeout_cb, 15.5, 0.);
|
116
|
+
//ev_timer_start (loopy, &timeout_watcher);
|
117
|
+
printf("starting loop\n");
|
118
|
+
// now wait for events to arrive
|
119
|
+
ev_loop (loopy, 0);
|
120
|
+
|
121
|
+
// unloop was called, so exit
|
122
|
+
return 0;
|
123
|
+
}
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'rubygems'
|
4
|
+
require 'mkmf'
|
5
|
+
|
6
|
+
$defs << "-DPOLYPHONY_BACKEND_LIBEV"
|
7
|
+
$defs << '-DEV_USE_LINUXAIO' if have_header('linux/aio_abi.h')
|
8
|
+
$defs << '-DEV_USE_SELECT' if have_header('sys/select.h')
|
9
|
+
$defs << '-DEV_USE_POLL' if have_type('port_event_t', 'poll.h')
|
10
|
+
$defs << '-DEV_USE_EPOLL' if have_header('sys/epoll.h')
|
11
|
+
$defs << '-DEV_USE_KQUEUE' if have_header('sys/event.h') && have_header('sys/queue.h')
|
12
|
+
$defs << '-DEV_USE_PORT' if have_type('port_event_t', 'port.h')
|
13
|
+
$defs << '-DHAVE_SYS_RESOURCE_H' if have_header('sys/resource.h')
|
14
|
+
$CFLAGS << " -Wno-comment"
|
15
|
+
$CFLAGS << " -Wno-unused-result"
|
16
|
+
$CFLAGS << " -Wno-dangling-else"
|
17
|
+
$CFLAGS << " -Wno-parentheses"
|
18
|
+
|
19
|
+
CONFIG['optflags'] << ' -fno-strict-aliasing' unless RUBY_PLATFORM =~ /mswin/
|
20
|
+
|
21
|
+
dir_config 'libev_scheduler_ext'
|
22
|
+
create_makefile 'libev_scheduler_ext'
|
@@ -0,0 +1,330 @@
|
|
1
|
+
#include <netdb.h>
|
2
|
+
#include <sys/socket.h>
|
3
|
+
#include <sys/uio.h>
|
4
|
+
#include <unistd.h>
|
5
|
+
#include <fcntl.h>
|
6
|
+
#include <netinet/in.h>
|
7
|
+
#include <arpa/inet.h>
|
8
|
+
#include <stdnoreturn.h>
|
9
|
+
|
10
|
+
#include "../libev/ev.h"
|
11
|
+
#include "ruby.h"
|
12
|
+
#include "ruby/io.h"
|
13
|
+
#include "../libev/ev.h"
|
14
|
+
|
15
|
+
#define INSPECT(str, obj) { printf(str); VALUE s = rb_funcall(obj, rb_intern("inspect"), 0); printf(": %s\n", StringValueCStr(s)); }
|
16
|
+
#define TRACE_CALLER() { VALUE c = rb_funcall(rb_mKernel, rb_intern("caller"), 0); INSPECT("caller: ", c); }
|
17
|
+
|
18
|
+
|
19
|
+
ID ID_ivar_is_nonblocking;
|
20
|
+
ID ID_ivar_io;
|
21
|
+
|
22
|
+
int event_readable;
|
23
|
+
int event_writable;
|
24
|
+
|
25
|
+
// VALUE SYM_libev;
|
26
|
+
|
27
|
+
typedef struct Scheduler_t {
|
28
|
+
struct ev_loop *ev_loop;
|
29
|
+
struct ev_async break_async;
|
30
|
+
|
31
|
+
unsigned int pending_count;
|
32
|
+
unsigned int currently_polling;
|
33
|
+
VALUE ready; // holds ready fibers (used only while polling)
|
34
|
+
} Scheduler_t;
|
35
|
+
|
36
|
+
static size_t Scheduler_size(const void *ptr) {
|
37
|
+
return sizeof(Scheduler_t);
|
38
|
+
}
|
39
|
+
|
40
|
+
static void Scheduler_mark(void *ptr) {
|
41
|
+
Scheduler_t *scheduler = ptr;
|
42
|
+
rb_gc_mark(scheduler->ready);
|
43
|
+
}
|
44
|
+
|
45
|
+
static const rb_data_type_t Scheduler_type = {
|
46
|
+
"LibevScheduler",
|
47
|
+
{Scheduler_mark, 0, Scheduler_size,},
|
48
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
49
|
+
};
|
50
|
+
|
51
|
+
static VALUE Scheduler_allocate(VALUE klass) {
|
52
|
+
Scheduler_t *scheduler = ALLOC(Scheduler_t);
|
53
|
+
|
54
|
+
return TypedData_Wrap_Struct(klass, &Scheduler_type, scheduler);
|
55
|
+
}
|
56
|
+
|
57
|
+
#define GetScheduler(obj, scheduler) \
|
58
|
+
TypedData_Get_Struct((obj), Scheduler_t, &Scheduler_type, (scheduler))
|
59
|
+
|
60
|
+
void break_async_callback(struct ev_loop *ev_loop, struct ev_async *ev_async, int revents) {
|
61
|
+
// This callback does nothing, the break async is used solely for breaking out
|
62
|
+
// of a *blocking* event loop (waking it up) in a thread-safe, signal-safe manner
|
63
|
+
}
|
64
|
+
|
65
|
+
static VALUE Scheduler_initialize(VALUE self) {
|
66
|
+
Scheduler_t *scheduler;
|
67
|
+
VALUE thread = rb_thread_current();
|
68
|
+
int is_main_thread = (thread == rb_thread_main());
|
69
|
+
|
70
|
+
GetScheduler(self, scheduler);
|
71
|
+
scheduler->ev_loop = is_main_thread ? EV_DEFAULT : ev_loop_new(EVFLAG_NOSIGMASK);
|
72
|
+
|
73
|
+
ev_async_init(&scheduler->break_async, break_async_callback);
|
74
|
+
ev_async_start(scheduler->ev_loop, &scheduler->break_async);
|
75
|
+
ev_unref(scheduler->ev_loop); // don't count the break_async watcher
|
76
|
+
|
77
|
+
scheduler->pending_count = 0;
|
78
|
+
scheduler->currently_polling = 0;
|
79
|
+
scheduler->ready = rb_ary_new();
|
80
|
+
|
81
|
+
return Qnil;
|
82
|
+
}
|
83
|
+
|
84
|
+
VALUE Scheduler_poll(VALUE self);
|
85
|
+
|
86
|
+
VALUE Scheduler_run(VALUE self) {
|
87
|
+
Scheduler_t *scheduler;
|
88
|
+
GetScheduler(self, scheduler);
|
89
|
+
|
90
|
+
while (scheduler->pending_count > 0 || RARRAY_LEN(scheduler->ready) > 0) {
|
91
|
+
Scheduler_poll(self);
|
92
|
+
}
|
93
|
+
|
94
|
+
return self;
|
95
|
+
}
|
96
|
+
|
97
|
+
VALUE Scheduler_close(VALUE self) {
|
98
|
+
Scheduler_t *scheduler;
|
99
|
+
GetScheduler(self, scheduler);
|
100
|
+
|
101
|
+
Scheduler_run(self);
|
102
|
+
|
103
|
+
ev_async_stop(scheduler->ev_loop, &scheduler->break_async);
|
104
|
+
if (!ev_is_default_loop(scheduler->ev_loop)) ev_loop_destroy(scheduler->ev_loop);
|
105
|
+
return self;
|
106
|
+
}
|
107
|
+
|
108
|
+
struct libev_timer {
|
109
|
+
struct ev_timer timer;
|
110
|
+
Scheduler_t *scheduler;
|
111
|
+
VALUE fiber;
|
112
|
+
};
|
113
|
+
|
114
|
+
void Scheduler_timer_callback(EV_P_ ev_timer *w, int revents)
|
115
|
+
{
|
116
|
+
struct libev_timer *watcher = (struct libev_timer *)w;
|
117
|
+
rb_ary_push(watcher->scheduler->ready, watcher->fiber);
|
118
|
+
}
|
119
|
+
|
120
|
+
VALUE Scheduler_sleep(VALUE self, VALUE duration) {
|
121
|
+
Scheduler_t *scheduler;
|
122
|
+
struct libev_timer watcher;
|
123
|
+
GetScheduler(self, scheduler);
|
124
|
+
|
125
|
+
watcher.scheduler = scheduler;
|
126
|
+
watcher.fiber = rb_fiber_current();
|
127
|
+
ev_timer_init(&watcher.timer, Scheduler_timer_callback, NUM2DBL(duration), 0.);
|
128
|
+
ev_timer_start(scheduler->ev_loop, &watcher.timer);
|
129
|
+
VALUE nil = Qnil;
|
130
|
+
scheduler->pending_count++;
|
131
|
+
VALUE ret = rb_fiber_yield(1, &nil);
|
132
|
+
scheduler->pending_count--;
|
133
|
+
ev_timer_stop(scheduler->ev_loop, &watcher.timer);
|
134
|
+
return ret;
|
135
|
+
}
|
136
|
+
|
137
|
+
VALUE Scheduler_pause(VALUE self) {
|
138
|
+
Scheduler_t *scheduler;
|
139
|
+
GetScheduler(self, scheduler);
|
140
|
+
|
141
|
+
ev_ref(scheduler->ev_loop);
|
142
|
+
VALUE nil = Qnil;
|
143
|
+
scheduler->pending_count++;
|
144
|
+
VALUE ret = rb_fiber_yield(1, &nil);
|
145
|
+
scheduler->pending_count--;
|
146
|
+
ev_unref(scheduler->ev_loop);
|
147
|
+
return ret;
|
148
|
+
}
|
149
|
+
|
150
|
+
VALUE Scheduler_block(int argc, VALUE *argv, VALUE self) {
|
151
|
+
VALUE timeout = (argc == 2) ? argv[1] : Qnil;
|
152
|
+
|
153
|
+
if (timeout != Qnil)
|
154
|
+
Scheduler_sleep(self, timeout);
|
155
|
+
else
|
156
|
+
Scheduler_pause(self);
|
157
|
+
|
158
|
+
return Qtrue;
|
159
|
+
}
|
160
|
+
|
161
|
+
VALUE Scheduler_unblock(VALUE self, VALUE blocker, VALUE fiber) {
|
162
|
+
Scheduler_t *scheduler;
|
163
|
+
GetScheduler(self, scheduler);
|
164
|
+
|
165
|
+
rb_ary_push(scheduler->ready, fiber);
|
166
|
+
|
167
|
+
if (scheduler->currently_polling)
|
168
|
+
ev_async_send(scheduler->ev_loop, &scheduler->break_async);
|
169
|
+
|
170
|
+
return self;
|
171
|
+
}
|
172
|
+
|
173
|
+
struct libev_io {
|
174
|
+
struct ev_io io;
|
175
|
+
Scheduler_t *scheduler;
|
176
|
+
VALUE fiber;
|
177
|
+
};
|
178
|
+
|
179
|
+
void Scheduler_io_callback(EV_P_ ev_io *w, int revents)
|
180
|
+
{
|
181
|
+
struct libev_io *watcher = (struct libev_io *)w;
|
182
|
+
rb_ary_push(watcher->scheduler->ready, watcher->fiber);
|
183
|
+
}
|
184
|
+
|
185
|
+
int io_event_mask(VALUE events) {
|
186
|
+
int interest = NUM2INT(events);
|
187
|
+
int mask = 0;
|
188
|
+
if (interest & event_readable) mask |= EV_READ;
|
189
|
+
if (interest & event_writable) mask |= EV_WRITE;
|
190
|
+
return mask;
|
191
|
+
}
|
192
|
+
|
193
|
+
VALUE Scheduler_io_wait(VALUE self, VALUE io, VALUE events, VALUE timeout) {
|
194
|
+
Scheduler_t *scheduler;
|
195
|
+
struct libev_io io_watcher;
|
196
|
+
struct libev_timer timeout_watcher;
|
197
|
+
GetScheduler(self, scheduler);
|
198
|
+
|
199
|
+
rb_io_t *fptr;
|
200
|
+
VALUE underlying_io = rb_ivar_get(io, ID_ivar_io);
|
201
|
+
if (underlying_io != Qnil) io = underlying_io;
|
202
|
+
GetOpenFile(io, fptr);
|
203
|
+
|
204
|
+
io_watcher.scheduler = scheduler;
|
205
|
+
io_watcher.fiber = rb_fiber_current();
|
206
|
+
ev_io_init(&io_watcher.io, Scheduler_io_callback, fptr->fd, io_event_mask(events));
|
207
|
+
|
208
|
+
int use_timeout = timeout != Qnil;
|
209
|
+
if (use_timeout) {
|
210
|
+
timeout_watcher.scheduler = scheduler;
|
211
|
+
timeout_watcher.fiber = rb_fiber_current();
|
212
|
+
ev_timer_init(&timeout_watcher.timer, Scheduler_timer_callback, NUM2DBL(timeout), 0.);
|
213
|
+
ev_timer_start(scheduler->ev_loop, &timeout_watcher.timer);
|
214
|
+
}
|
215
|
+
|
216
|
+
ev_io_start(scheduler->ev_loop, &io_watcher.io);
|
217
|
+
VALUE nil = Qnil;
|
218
|
+
scheduler->pending_count++;
|
219
|
+
rb_fiber_yield(1, &nil);
|
220
|
+
scheduler->pending_count--;
|
221
|
+
ev_io_stop(scheduler->ev_loop, &io_watcher.io);
|
222
|
+
if (use_timeout)
|
223
|
+
ev_timer_stop(scheduler->ev_loop, &timeout_watcher.timer);
|
224
|
+
|
225
|
+
return self;
|
226
|
+
}
|
227
|
+
|
228
|
+
struct libev_child {
|
229
|
+
struct ev_child child;
|
230
|
+
Scheduler_t *scheduler;
|
231
|
+
VALUE fiber;
|
232
|
+
VALUE status;
|
233
|
+
};
|
234
|
+
|
235
|
+
void Scheduler_child_callback(EV_P_ ev_child *w, int revents)
|
236
|
+
{
|
237
|
+
printf("Scheduler_child_callback\n");
|
238
|
+
struct libev_child *watcher = (struct libev_child *)w;
|
239
|
+
int exit_status = WEXITSTATUS(w->rstatus);
|
240
|
+
watcher->status = rb_ary_new_from_args(2, INT2NUM(w->rpid), INT2NUM(exit_status));
|
241
|
+
rb_ary_push(watcher->scheduler->ready, watcher->fiber);
|
242
|
+
}
|
243
|
+
|
244
|
+
VALUE Scheduler_process_wait(VALUE self, VALUE pid, VALUE flags) {
|
245
|
+
Scheduler_t *scheduler;
|
246
|
+
struct libev_child watcher;
|
247
|
+
VALUE result = Qnil;
|
248
|
+
GetScheduler(self, scheduler);
|
249
|
+
|
250
|
+
watcher.scheduler = scheduler;
|
251
|
+
watcher.fiber = rb_fiber_current();
|
252
|
+
watcher.status = Qnil;
|
253
|
+
ev_child_init(&watcher.child, Scheduler_child_callback, NUM2INT(pid), 0);
|
254
|
+
ev_child_start(scheduler->ev_loop, &watcher.child);
|
255
|
+
VALUE nil = Qnil;
|
256
|
+
scheduler->pending_count++;
|
257
|
+
rb_fiber_yield(1, &nil);
|
258
|
+
scheduler->pending_count--;
|
259
|
+
ev_child_stop(scheduler->ev_loop, &watcher.child);
|
260
|
+
RB_GC_GUARD(watcher.status);
|
261
|
+
RB_GC_GUARD(result);
|
262
|
+
return result;
|
263
|
+
}
|
264
|
+
|
265
|
+
void Scheduler_resume_ready(Scheduler_t *scheduler) {
|
266
|
+
VALUE nil = Qnil;
|
267
|
+
VALUE ready = Qnil;
|
268
|
+
|
269
|
+
unsigned int ready_count = RARRAY_LEN(scheduler->ready);
|
270
|
+
while (ready_count > 0) {
|
271
|
+
ready = scheduler->ready;
|
272
|
+
scheduler->ready = rb_ary_new();
|
273
|
+
|
274
|
+
for (unsigned int i = 0; i < ready_count; i++) {
|
275
|
+
VALUE fiber = RARRAY_AREF(ready, i);
|
276
|
+
rb_fiber_resume(fiber, 1, &nil);
|
277
|
+
}
|
278
|
+
|
279
|
+
ready_count = RARRAY_LEN(scheduler->ready);
|
280
|
+
}
|
281
|
+
|
282
|
+
RB_GC_GUARD(ready);
|
283
|
+
}
|
284
|
+
|
285
|
+
VALUE Scheduler_poll(VALUE self) {
|
286
|
+
Scheduler_t *scheduler;
|
287
|
+
GetScheduler(self, scheduler);
|
288
|
+
|
289
|
+
scheduler->currently_polling = 1;
|
290
|
+
ev_run(scheduler->ev_loop, EVRUN_ONCE);
|
291
|
+
scheduler->currently_polling = 0;
|
292
|
+
|
293
|
+
Scheduler_resume_ready(scheduler);
|
294
|
+
|
295
|
+
return self;
|
296
|
+
}
|
297
|
+
|
298
|
+
VALUE Scheduler_pending_count(VALUE self) {
|
299
|
+
Scheduler_t *scheduler;
|
300
|
+
GetScheduler(self, scheduler);
|
301
|
+
|
302
|
+
return INT2NUM(scheduler->pending_count);
|
303
|
+
}
|
304
|
+
|
305
|
+
void Init_Scheduler() {
|
306
|
+
ev_set_allocator(xrealloc);
|
307
|
+
|
308
|
+
VALUE mLibev = rb_define_module("Libev");
|
309
|
+
VALUE cScheduler = rb_define_class_under(mLibev, "Scheduler", rb_cObject);
|
310
|
+
rb_define_alloc_func(cScheduler, Scheduler_allocate);
|
311
|
+
|
312
|
+
rb_define_method(cScheduler, "initialize", Scheduler_initialize, 0);
|
313
|
+
|
314
|
+
// fiber scheduler interface
|
315
|
+
rb_define_method(cScheduler, "close", Scheduler_close, 0);
|
316
|
+
rb_define_method(cScheduler, "io_wait", Scheduler_io_wait, 3);
|
317
|
+
rb_define_method(cScheduler, "process_wait", Scheduler_process_wait, 2);
|
318
|
+
rb_define_method(cScheduler, "block", Scheduler_block, -1);
|
319
|
+
rb_define_method(cScheduler, "unblock", Scheduler_unblock, 2);
|
320
|
+
|
321
|
+
rb_define_method(cScheduler, "run", Scheduler_run, 0);
|
322
|
+
rb_define_method(cScheduler, "pending_count", Scheduler_pending_count, 0);
|
323
|
+
|
324
|
+
ID_ivar_is_nonblocking = rb_intern("@is_nonblocking");
|
325
|
+
ID_ivar_io = rb_intern("@io");
|
326
|
+
|
327
|
+
event_readable = NUM2INT(rb_const_get(rb_cIO, rb_intern("READABLE")));
|
328
|
+
event_writable = NUM2INT(rb_const_get(rb_cIO, rb_intern("WRITABLE")));
|
329
|
+
// SYM_libev = ID2SYM(rb_intern("libev"));
|
330
|
+
}
|