event 0.6.0 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '0380b6d95d8bd8f8d378820eefa77e6c6f0f1aaceb2afec54cb4d38f57747cbc'
4
- data.tar.gz: 2a0bfede977154f30a402fda4ec445eedc4daf3893bae8760eceaa1e58c999be
3
+ metadata.gz: 71ed23db589d68545077cbf271afe2145054b040e3c8001b052366c8588a76e1
4
+ data.tar.gz: 4c4e1e991044cc3abfbdd388dbfc6e4254de4ac3e97bec9faab60142c1f63525
5
5
  SHA512:
6
- metadata.gz: d12fab2ce89b8c22d583a7472670f8bdb5b5b3428832938f9c51ce3635f08bfaf7d107b6568f762849a4fa8b32fdb0d79823bbd4ebd795b690e885b0eff52798
7
- data.tar.gz: 793cf18caeb1457cd5499ee3d0fd2108dbf46e953cbc11b80e7c800efeb6beac279395a24da420b4c751a89ee5ecd54ad53a3b3a58d4b07d75e9a89d84a8be4d
6
+ metadata.gz: 2f9ec25ff8d890e41589099ca347fc7c1ca1279dcc40c0065b759ac9e9c04ede4a9ad772f4beaadb794360b0af0a132db6f4e664ff36503995cc02a6ab3f7ff1
7
+ data.tar.gz: 6767cd4dfc6bc9c69bcfdc9ecb08e327315480919805fcca78c31c32eed19a7decbbdd2fc858897b678db06d58d40d935e77697e0a67ca1ee8408188da02861a
data/ext/event/backend.o CHANGED
Binary file
@@ -21,88 +21,158 @@
21
21
  #include "backend.h"
22
22
  #include <fcntl.h>
23
23
 
24
- static ID id_transfer, id_wait;
25
- static VALUE rb_Process_Status = Qnil;
26
-
27
- void Init_Event_Backend(VALUE Event_Backend) {
28
- id_transfer = rb_intern("transfer");
29
- id_wait = rb_intern("wait");
30
- // id_alive_p = rb_intern("alive?");
31
- rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
32
- }
24
+ #ifndef HAVE__RB_FIBER_TRANSFER
25
+ static ID id_transfer;
33
26
 
34
27
  VALUE
35
- Event_Backend_transfer(VALUE fiber) {
36
- #ifdef HAVE__RB_FIBER_TRANSFER
37
- return rb_fiber_transfer(fiber, 0, NULL);
38
- #else
28
+ Event_Backend_fiber_transfer(VALUE fiber) {
39
29
  return rb_funcall(fiber, id_transfer, 0);
40
- #endif
41
30
  }
42
31
 
43
32
  VALUE
44
- Event_Backend_transfer_result(VALUE fiber, VALUE result) {
45
- // if (!RTEST(rb_fiber_alive_p(fiber))) {
46
- // return Qnil;
47
- // }
48
-
49
- #ifdef HAVE__RB_FIBER_TRANSFER
50
- return rb_fiber_transfer(fiber, 1, &result);
51
- #else
33
+ Event_Backend_fiber_transfer_result(VALUE fiber, VALUE result) {
52
34
  return rb_funcall(fiber, id_transfer, 1, result);
35
+ }
53
36
  #endif
37
+
38
+ #ifndef HAVE_RB_IO_DESCRIPTOR
39
+ static ID id_fileno;
40
+
41
+ int Event_Backend_io_descriptor(VALUE io) {
42
+ return RB_NUM2INT(rb_funcall(io, id_fileno, 0));
54
43
  }
44
+ #endif
45
+
46
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
47
+ static ID id_wait;
48
+ static VALUE rb_Process_Status = Qnil;
55
49
 
56
50
  VALUE Event_Backend_process_status_wait(rb_pid_t pid)
57
51
  {
58
52
  return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(WNOHANG));
59
53
  }
54
+ #endif
60
55
 
61
- char* Event_Backend_verify_size(VALUE buffer, size_t offset, size_t length) {
62
- if ((size_t)RSTRING_LEN(buffer) < offset + length) {
63
- rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
56
+ int Event_Backend_nonblock_set(int file_descriptor)
57
+ {
58
+ int flags = fcntl(file_descriptor, F_GETFL, 0);
59
+
60
+ if (!(flags & O_NONBLOCK)) {
61
+ fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
64
62
  }
65
63
 
66
- return RSTRING_PTR(buffer);
64
+ return flags;
67
65
  }
68
66
 
69
- char* Event_Backend_resize_to_capacity(VALUE string, size_t offset, size_t length) {
70
- size_t current_length = RSTRING_LEN(string);
71
- long difference = (long)(offset + length) - (long)current_length;
67
+ void Event_Backend_nonblock_restore(int file_descriptor, int flags)
68
+ {
69
+ if (!(flags & O_NONBLOCK)) {
70
+ fcntl(file_descriptor, F_SETFL, flags & ~flags);
71
+ }
72
+ }
73
+
74
+ void Init_Event_Backend(VALUE Event_Backend) {
75
+ #ifndef HAVE_RB_IO_DESCRIPTOR
76
+ id_fileno = rb_intern("fileno");
77
+ #endif
78
+
79
+ #ifndef HAVE__RB_FIBER_TRANSFER
80
+ id_transfer = rb_intern("transfer");
81
+ #endif
72
82
 
73
- difference += 1;
83
+ #ifndef HAVE_RB_PROCESS_STATUS_WAIT
84
+ id_wait = rb_intern("wait");
85
+ rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
86
+ #endif
87
+ }
88
+
89
+ struct wait_and_transfer_arguments {
90
+ struct Event_Backend *backend;
91
+ struct Event_Backend_Queue *waiting;
92
+ };
93
+
94
+ static void queue_pop(struct Event_Backend *backend, struct Event_Backend_Queue *waiting) {
95
+ if (waiting->behind) {
96
+ waiting->behind->infront = waiting->infront;
97
+ } else {
98
+ backend->waiting = waiting->infront;
99
+ }
74
100
 
75
- if (difference > 0) {
76
- rb_str_modify_expand(string, difference);
101
+ if (waiting->infront) {
102
+ waiting->infront->behind = waiting->behind;
103
+ } else {
104
+ backend->ready = waiting->behind;
105
+ }
106
+ }
107
+
108
+ static void queue_push(struct Event_Backend *backend, struct Event_Backend_Queue *waiting) {
109
+ if (backend->waiting) {
110
+ backend->waiting->behind = waiting;
111
+ waiting->infront = backend->waiting;
77
112
  } else {
78
- rb_str_modify(string);
113
+ backend->ready = waiting;
79
114
  }
80
115
 
81
- return RSTRING_PTR(string);
116
+ backend->waiting = waiting;
117
+ }
118
+
119
+ static VALUE wait_and_transfer(VALUE fiber) {
120
+ return Event_Backend_fiber_transfer(fiber);
82
121
  }
83
122
 
84
- void Event_Backend_resize_to_fit(VALUE string, size_t offset, size_t length) {
85
- size_t current_length = RSTRING_LEN(string);
123
+ static VALUE wait_and_transfer_ensure(VALUE _arguments) {
124
+ struct wait_and_transfer_arguments *arguments = (struct wait_and_transfer_arguments *)_arguments;
86
125
 
87
- if (current_length < (offset + length)) {
88
- rb_str_set_len(string, offset + length);
89
- }
126
+ queue_pop(arguments->backend, arguments->waiting);
127
+
128
+ return Qnil;
90
129
  }
91
130
 
92
- int Event_Backend_nonblock_set(int file_descriptor)
131
+ void Event_Backend_wait_and_transfer(struct Event_Backend *backend, VALUE fiber)
93
132
  {
94
- int flags = fcntl(file_descriptor, F_GETFL, 0);
133
+ struct Event_Backend_Queue waiting = {
134
+ .behind = NULL,
135
+ .infront = NULL,
136
+ .fiber = rb_fiber_current()
137
+ };
95
138
 
96
- if (!(flags & O_NONBLOCK)) {
97
- fcntl(file_descriptor, F_SETFL, flags | O_NONBLOCK);
98
- }
139
+ queue_push(backend, &waiting);
99
140
 
100
- return flags;
141
+ struct wait_and_transfer_arguments arguments = {
142
+ .backend = backend,
143
+ .waiting = &waiting,
144
+ };
145
+
146
+ rb_ensure(wait_and_transfer, fiber, wait_and_transfer_ensure, (VALUE)&arguments);
101
147
  }
102
148
 
103
- void Event_Backend_nonblock_restore(int file_descriptor, int flags)
149
+ void Event_Backend_ready_pop(struct Event_Backend *backend)
104
150
  {
105
- if (!(flags & O_NONBLOCK)) {
106
- fcntl(file_descriptor, F_SETFL, flags & ~flags);
151
+ // Get the current tail and head of the queue:
152
+ struct Event_Backend_Queue *waiting = backend->waiting;
153
+
154
+ // Process from head to tail in order:
155
+ // During this, more items may be appended to tail.
156
+ while (backend->ready) {
157
+ struct Event_Backend_Queue *ready = backend->ready;
158
+
159
+ Event_Backend_fiber_transfer(ready->fiber);
160
+
161
+ if (ready == waiting) break;
107
162
  }
108
- }
163
+ }
164
+
165
+ void Event_Backend_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration)
166
+ {
167
+ if ((stop->tv_nsec - start->tv_nsec) < 0) {
168
+ duration->tv_sec = stop->tv_sec - start->tv_sec - 1;
169
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec + 1000000000;
170
+ } else {
171
+ duration->tv_sec = stop->tv_sec - start->tv_sec;
172
+ duration->tv_nsec = stop->tv_nsec - start->tv_nsec;
173
+ }
174
+ }
175
+
176
+ void Event_Backend_current_time(struct timespec *time) {
177
+ clock_gettime(CLOCK_MONOTONIC, time);
178
+ }
@@ -20,6 +20,13 @@
20
20
 
21
21
  #include <ruby.h>
22
22
  #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
23
30
 
24
31
  enum Event {
25
32
  READABLE = 1,
@@ -29,17 +36,77 @@ enum Event {
29
36
  HANGUP = 16
30
37
  };
31
38
 
32
- void
33
- Init_Event_Backend();
39
+ void Init_Event_Backend();
34
40
 
35
- VALUE Event_Backend_transfer(VALUE fiber);
36
- VALUE Event_Backend_transfer_result(VALUE fiber, VALUE argument);
41
+ #ifdef HAVE__RB_FIBER_TRANSFER
42
+ #define Event_Backend_fiber_transfer(fiber) rb_fiber_transfer(fiber, 0, NULL)
43
+ #define Event_Backend_fiber_transfer_result(fiber, argument) rb_fiber_transfer(fiber, 1, &argument)
44
+ #else
45
+ VALUE Event_Backend_fiber_transfer(VALUE fiber);
46
+ VALUE Event_Backend_fiber_transfer_result(VALUE fiber, VALUE argument);
47
+ #endif
37
48
 
38
- VALUE Event_Backend_process_status_wait(rb_pid_t pid);
49
+ #ifdef HAVE_RB_IO_DESCRIPTOR
50
+ #define Event_Backend_io_descriptor(io) rb_io_descriptor(io)
51
+ #else
52
+ int Event_Backend_io_descriptor(VALUE io);
53
+ #endif
39
54
 
40
- char* Event_Backend_verify_size(VALUE buffer, size_t offset, size_t length);
41
- char* Event_Backend_resize_to_capacity(VALUE string, size_t offset, size_t length);
42
- void Event_Backend_resize_to_fit(VALUE string, size_t offset, size_t length);
55
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
56
+ #define Event_Backend_process_status_wait(pid) rb_process_status_wait(pid)
57
+ #else
58
+ VALUE Event_Backend_process_status_wait(rb_pid_t pid);
59
+ #endif
43
60
 
44
61
  int Event_Backend_nonblock_set(int file_descriptor);
45
62
  void Event_Backend_nonblock_restore(int file_descriptor, int flags);
63
+
64
+ struct Event_Backend_Queue {
65
+ struct Event_Backend_Queue *behind;
66
+ struct Event_Backend_Queue *infront;
67
+
68
+ VALUE fiber;
69
+ };
70
+
71
+ struct Event_Backend {
72
+ VALUE loop;
73
+
74
+ // Append to waiting.
75
+ struct Event_Backend_Queue *waiting;
76
+ // Process from ready.
77
+ struct Event_Backend_Queue *ready;
78
+ };
79
+
80
+ inline
81
+ void Event_Backend_initialize(struct Event_Backend *backend, VALUE loop) {
82
+ backend->loop = loop;
83
+ backend->waiting = NULL;
84
+ backend->ready = NULL;
85
+ }
86
+
87
+ inline
88
+ void Event_Backend_mark(struct Event_Backend *backend) {
89
+ rb_gc_mark(backend->loop);
90
+
91
+ struct Event_Backend_Queue *ready = backend->ready;
92
+ while (ready) {
93
+ rb_gc_mark(ready->fiber);
94
+ ready = ready->behind;
95
+ }
96
+ }
97
+
98
+ void Event_Backend_wait_and_transfer(struct Event_Backend *backend, VALUE fiber);
99
+
100
+ inline
101
+ void Event_Backend_defer(struct Event_Backend *backend)
102
+ {
103
+ Event_Backend_wait_and_transfer(backend, backend->loop);
104
+ }
105
+
106
+ void Event_Backend_ready_pop(struct Event_Backend *backend);
107
+
108
+ void Event_Backend_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
109
+ void Event_Backend_current_time(struct timespec *time);
110
+
111
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
112
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -28,19 +28,18 @@
28
28
  #include "pidfd.c"
29
29
 
30
30
  static VALUE Event_Backend_EPoll = Qnil;
31
- static ID id_fileno;
32
31
 
33
32
  enum {EPOLL_MAX_EVENTS = 64};
34
33
 
35
34
  struct Event_Backend_EPoll {
36
- VALUE loop;
35
+ struct Event_Backend backend;
37
36
  int descriptor;
38
37
  };
39
38
 
40
39
  void Event_Backend_EPoll_Type_mark(void *_data)
41
40
  {
42
41
  struct Event_Backend_EPoll *data = _data;
43
- rb_gc_mark(data->loop);
42
+ Event_Backend_mark(&data->backend);
44
43
  }
45
44
 
46
45
  static
@@ -80,7 +79,7 @@ VALUE Event_Backend_EPoll_allocate(VALUE self) {
80
79
  struct Event_Backend_EPoll *data = NULL;
81
80
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
82
81
 
83
- data->loop = Qnil;
82
+ Event_Backend_initialize(&data->backend, Qnil);
84
83
  data->descriptor = -1;
85
84
 
86
85
  return instance;
@@ -90,7 +89,7 @@ VALUE Event_Backend_EPoll_initialize(VALUE self, VALUE loop) {
90
89
  struct Event_Backend_EPoll *data = NULL;
91
90
  TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
92
91
 
93
- data->loop = loop;
92
+ Event_Backend_initialize(&data->backend, loop);
94
93
  int result = epoll_create1(EPOLL_CLOEXEC);
95
94
 
96
95
  if (result == -1) {
@@ -113,6 +112,33 @@ VALUE Event_Backend_EPoll_close(VALUE self) {
113
112
  return Qnil;
114
113
  }
115
114
 
115
+ VALUE Event_Backend_EPoll_transfer(VALUE self, VALUE fiber)
116
+ {
117
+ struct Event_Backend_EPoll *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
119
+
120
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
121
+
122
+ return Qnil;
123
+ }
124
+
125
+ VALUE Event_Backend_EPoll_defer(VALUE self)
126
+ {
127
+ struct Event_Backend_EPoll *data = NULL;
128
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
129
+
130
+ Event_Backend_defer(&data->backend);
131
+
132
+ return Qnil;
133
+ }
134
+
135
+ VALUE Event_Backend_EPoll_ready_p(VALUE self) {
136
+ struct Event_Backend_EPoll *data = NULL;
137
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
138
+
139
+ return data->backend.ready ? Qtrue : Qfalse;
140
+ }
141
+
116
142
  struct process_wait_arguments {
117
143
  struct Event_Backend_EPoll *data;
118
144
  pid_t pid;
@@ -124,7 +150,7 @@ static
124
150
  VALUE process_wait_transfer(VALUE _arguments) {
125
151
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
126
152
 
127
- Event_Backend_transfer(arguments->data->loop);
153
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
128
154
 
129
155
  return Event_Backend_process_status_wait(arguments->pid);
130
156
  }
@@ -143,7 +169,7 @@ VALUE process_wait_ensure(VALUE _arguments) {
143
169
  VALUE Event_Backend_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
144
170
  struct Event_Backend_EPoll *data = NULL;
145
171
  TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
146
-
172
+
147
173
  struct process_wait_arguments process_wait_arguments = {
148
174
  .data = data,
149
175
  .pid = NUM2PIDT(pid),
@@ -217,7 +243,7 @@ static
217
243
  VALUE io_wait_transfer(VALUE _arguments) {
218
244
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
219
245
 
220
- VALUE result = Event_Backend_transfer(arguments->data->loop);
246
+ VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
221
247
 
222
248
  return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
223
249
  };
@@ -228,7 +254,7 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
228
254
 
229
255
  struct epoll_event event = {0};
230
256
 
231
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
257
+ int descriptor = Event_Backend_io_descriptor(io);
232
258
  int duplicate = -1;
233
259
 
234
260
  event.events = epoll_flags_from_events(NUM2INT(events));
@@ -264,6 +290,8 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
264
290
  return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
265
291
  }
266
292
 
293
+ #ifdef HAVE_RUBY_IO_BUFFER_H
294
+
267
295
  struct io_read_arguments {
268
296
  VALUE self;
269
297
  VALUE fiber;
@@ -274,7 +302,6 @@ struct io_read_arguments {
274
302
  int descriptor;
275
303
 
276
304
  VALUE buffer;
277
- size_t offset;
278
305
  size_t length;
279
306
  };
280
307
 
@@ -282,18 +309,22 @@ static
282
309
  VALUE io_read_loop(VALUE _arguments) {
283
310
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
284
311
 
285
- size_t offset = arguments->offset;
312
+ void *base;
313
+ size_t size;
314
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
315
+
316
+ size_t offset = 0;
286
317
  size_t length = arguments->length;
287
- size_t total = 0;
288
318
 
289
319
  while (length > 0) {
290
- char *buffer = Event_Backend_resize_to_capacity(arguments->buffer, offset, length);
291
- ssize_t result = read(arguments->descriptor, buffer+offset, length);
320
+ size_t maximum_size = size - offset;
321
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
292
322
 
293
- if (result >= 0) {
323
+ if (result == 0) {
324
+ break;
325
+ } else if (result > 0) {
294
326
  offset += result;
295
327
  length -= result;
296
- total += result;
297
328
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
298
329
  Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
299
330
  } else {
@@ -301,9 +332,7 @@ VALUE io_read_loop(VALUE _arguments) {
301
332
  }
302
333
  }
303
334
 
304
- Event_Backend_resize_to_fit(arguments->buffer, arguments->offset, arguments->length);
305
-
306
- return SIZET2NUM(total);
335
+ return SIZET2NUM(offset);
307
336
  }
308
337
 
309
338
  static
@@ -315,16 +344,11 @@ VALUE io_read_ensure(VALUE _arguments) {
315
344
  return Qnil;
316
345
  }
317
346
 
318
- VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
319
- struct Event_Backend_EPoll *data = NULL;
320
- TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
321
-
322
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
347
+ VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
348
+ int descriptor = Event_Backend_io_descriptor(io);
323
349
 
324
- size_t offset = NUM2SIZET(_offset);
325
350
  size_t length = NUM2SIZET(_length);
326
351
 
327
-
328
352
  struct io_read_arguments io_read_arguments = {
329
353
  .self = self,
330
354
  .fiber = fiber,
@@ -333,7 +357,6 @@ VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffe
333
357
  .flags = Event_Backend_nonblock_set(descriptor),
334
358
  .descriptor = descriptor,
335
359
  .buffer = buffer,
336
- .offset = offset,
337
360
  .length = length,
338
361
  };
339
362
 
@@ -350,7 +373,6 @@ struct io_write_arguments {
350
373
  int descriptor;
351
374
 
352
375
  VALUE buffer;
353
- size_t offset;
354
376
  size_t length;
355
377
  };
356
378
 
@@ -358,18 +380,23 @@ static
358
380
  VALUE io_write_loop(VALUE _arguments) {
359
381
  struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
360
382
 
361
- size_t offset = arguments->offset;
383
+ const void *base;
384
+ size_t size;
385
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
386
+
387
+ size_t offset = 0;
362
388
  size_t length = arguments->length;
363
- size_t total = 0;
389
+
390
+ if (length > size) {
391
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
392
+ }
364
393
 
365
394
  while (length > 0) {
366
- char *buffer = Event_Backend_verify_size(arguments->buffer, offset, length);
367
- ssize_t result = write(arguments->descriptor, buffer+offset, length);
395
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
368
396
 
369
397
  if (result >= 0) {
370
- length -= result;
371
398
  offset += result;
372
- total += result;
399
+ length -= result;
373
400
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
374
401
  Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
375
402
  } else {
@@ -377,7 +404,7 @@ VALUE io_write_loop(VALUE _arguments) {
377
404
  }
378
405
  }
379
406
 
380
- return SIZET2NUM(total);
407
+ return SIZET2NUM(offset);
381
408
  };
382
409
 
383
410
  static
@@ -389,13 +416,9 @@ VALUE io_write_ensure(VALUE _arguments) {
389
416
  return Qnil;
390
417
  };
391
418
 
392
- VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
393
- struct Event_Backend_EPoll *data = NULL;
394
- TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
419
+ VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
420
+ int descriptor = Event_Backend_io_descriptor(io);
395
421
 
396
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
397
-
398
- size_t offset = NUM2SIZET(_offset);
399
422
  size_t length = NUM2SIZET(_length);
400
423
 
401
424
  struct io_write_arguments io_write_arguments = {
@@ -406,13 +429,14 @@ VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buff
406
429
  .flags = Event_Backend_nonblock_set(descriptor),
407
430
  .descriptor = descriptor,
408
431
  .buffer = buffer,
409
- .offset = offset,
410
432
  .length = length,
411
433
  };
412
434
 
413
435
  return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
414
436
  }
415
437
 
438
+ #endif
439
+
416
440
  static
417
441
  int make_timeout(VALUE duration) {
418
442
  if (duration == Qnil) {
@@ -472,6 +496,8 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
472
496
  struct Event_Backend_EPoll *data = NULL;
473
497
  TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
474
498
 
499
+ Event_Backend_ready_pop(&data->backend);
500
+
475
501
  struct select_arguments arguments = {
476
502
  .data = data,
477
503
  .timeout = 0
@@ -482,7 +508,7 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
482
508
  if (arguments.count == 0) {
483
509
  arguments.timeout = make_timeout(duration);
484
510
 
485
- if (arguments.timeout != 0) {
511
+ if (!data->backend.ready && arguments.timeout != 0) {
486
512
  select_internal_without_gvl(&arguments);
487
513
  }
488
514
  }
@@ -493,25 +519,29 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
493
519
 
494
520
  // fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
495
521
 
496
- Event_Backend_transfer_result(fiber, result);
522
+ Event_Backend_fiber_transfer_result(fiber, result);
497
523
  }
498
524
 
499
525
  return INT2NUM(arguments.count);
500
526
  }
501
527
 
502
528
  void Init_Event_Backend_EPoll(VALUE Event_Backend) {
503
- id_fileno = rb_intern("fileno");
504
-
505
529
  Event_Backend_EPoll = rb_define_class_under(Event_Backend, "EPoll", rb_cObject);
506
530
 
507
531
  rb_define_alloc_func(Event_Backend_EPoll, Event_Backend_EPoll_allocate);
508
532
  rb_define_method(Event_Backend_EPoll, "initialize", Event_Backend_EPoll_initialize, 1);
533
+ rb_define_method(Event_Backend_EPoll, "transfer", Event_Backend_EPoll_transfer, 1);
534
+ rb_define_method(Event_Backend_EPoll, "defer", Event_Backend_EPoll_defer, 0);
535
+ rb_define_method(Event_Backend_EPoll, "ready?", Event_Backend_EPoll_ready_p, 0);
509
536
  rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
510
537
  rb_define_method(Event_Backend_EPoll, "close", Event_Backend_EPoll_close, 0);
511
538
 
512
539
  rb_define_method(Event_Backend_EPoll, "io_wait", Event_Backend_EPoll_io_wait, 3);
513
- rb_define_method(Event_Backend_EPoll, "io_read", Event_Backend_EPoll_io_read, 5);
514
- rb_define_method(Event_Backend_EPoll, "io_write", Event_Backend_EPoll_io_write, 5);
540
+
541
+ #ifdef HAVE_RUBY_IO_BUFFER_H
542
+ rb_define_method(Event_Backend_EPoll, "io_read", Event_Backend_EPoll_io_read, 4);
543
+ rb_define_method(Event_Backend_EPoll, "io_write", Event_Backend_EPoll_io_write, 4);
544
+ #endif
515
545
 
516
546
  rb_define_method(Event_Backend_EPoll, "process_wait", Event_Backend_EPoll_process_wait, 3);
517
547
  }