cool.io 1.1.1 → 1.2.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.travis.yml +1 -1
- data/CHANGES.md +4 -2
- data/Gemfile +1 -1
- data/README.md +2 -1
- data/Rakefile +22 -3
- data/cool.io.gemspec +6 -8
- data/ext/cool.io/extconf.rb +2 -1
- data/ext/cool.io/stat_watcher.c +2 -0
- data/ext/iobuffer/extconf.rb +9 -0
- data/ext/iobuffer/iobuffer.c +765 -0
- data/ext/libev/ev.c +1186 -296
- data/ext/libev/ev.h +123 -107
- data/ext/libev/ev_epoll.c +23 -10
- data/ext/libev/ev_kqueue.c +23 -7
- data/ext/libev/ev_poll.c +4 -4
- data/ext/libev/ev_port.c +9 -3
- data/ext/libev/ev_select.c +10 -6
- data/ext/libev/ev_vars.h +18 -18
- data/ext/libev/ev_win32.c +12 -2
- data/ext/libev/ev_wrap.h +164 -160
- data/lib/cool.io.rb +4 -4
- data/lib/cool.io/custom_require.rb +9 -0
- data/lib/cool.io/dns_resolver.rb +8 -5
- data/lib/cool.io/http_client.rb +4 -3
- data/lib/cool.io/listener.rb +3 -9
- data/lib/cool.io/server.rb +1 -2
- data/lib/cool.io/version.rb +1 -1
- data/spec/async_watcher_spec.rb +1 -1
- data/spec/spec_helper.rb +8 -1
- data/spec/unix_listener_spec.rb +1 -1
- data/spec/unix_server_spec.rb +2 -4
- metadata +35 -29
- checksums.yaml +0 -7
data/.travis.yml
CHANGED
data/CHANGES.md
CHANGED
data/Gemfile
CHANGED
data/README.md
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
Cool.io
|
2
2
|
=======
|
3
3
|
|
4
|
-
|
4
|
+
### NOTE: cool.io is in maintenance mode only and is not being actively developed
|
5
|
+
### Please check out [Celluloid::IO](http://github.com/celluloid/celluloid-io) instead!
|
5
6
|
|
6
7
|
Cool.io is an event library for Ruby, built on the libev event library which
|
7
8
|
provides a cross-platform interface to high performance system calls . This
|
data/Rakefile
CHANGED
@@ -21,11 +21,29 @@ Rake::RDocTask.new do |rdoc|
|
|
21
21
|
end
|
22
22
|
|
23
23
|
require 'rake/extensiontask'
|
24
|
-
|
24
|
+
|
25
|
+
spec = eval(File.read("cool.io.gemspec"))
|
26
|
+
|
27
|
+
def configure_cross_compilation(ext)
|
28
|
+
unless RUBY_PLATFORM =~ /mswin|mingw/
|
29
|
+
ext.cross_compile = true
|
30
|
+
ext.cross_platform = 'i386-mingw32'#['i386-mswin32-60', 'i386-mingw32']
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
Rake::ExtensionTask.new('iobuffer_ext', spec) do |ext|
|
35
|
+
ext.ext_dir = 'ext/iobuffer'
|
36
|
+
configure_cross_compilation(ext)
|
37
|
+
end
|
38
|
+
|
39
|
+
Rake::ExtensionTask.new('http11_client', spec) do |ext|
|
40
|
+
ext.ext_dir = 'ext/http11_client'
|
41
|
+
configure_cross_compilation(ext)
|
25
42
|
end
|
26
43
|
|
27
|
-
Rake::ExtensionTask.new('cool.io_ext') do |ext|
|
44
|
+
Rake::ExtensionTask.new('cool.io_ext', spec) do |ext|
|
28
45
|
ext.ext_dir = 'ext/cool.io'
|
46
|
+
configure_cross_compilation(ext)
|
29
47
|
end
|
30
48
|
|
31
49
|
# Rebuild parser Ragel
|
@@ -59,4 +77,5 @@ namespace :spec do
|
|
59
77
|
end
|
60
78
|
end
|
61
79
|
|
62
|
-
CLEAN.include "**/*.rbc", "**/*.o", "**/*.so", "**/*.bundle"
|
80
|
+
CLEAN.include "**/*.rbc", "**/*.o", "**/*.so", "**/*.bundle"
|
81
|
+
CLEAN.exclude "vendor/**/*.rbc", "vendor/**/*.o", "vendor/**/*.so", "vendor/**/*.bundle"
|
data/cool.io.gemspec
CHANGED
@@ -10,21 +10,19 @@ end
|
|
10
10
|
Gem::Specification.new do |s|
|
11
11
|
s.name = "cool.io"
|
12
12
|
s.version = Coolio::VERSION
|
13
|
-
s.authors = ["Tony Arcieri"]
|
14
|
-
s.email = ["tony.arcieri@gmail.com"]
|
13
|
+
s.authors = ["Tony Arcieri", "Masahiro Nakagawa"]
|
14
|
+
s.email = ["tony.arcieri@gmail.com", "repeatedly@gmail.com"]
|
15
15
|
s.homepage = "http://coolio.github.com"
|
16
16
|
s.summary = "A cool framework for doing high performance I/O in Ruby"
|
17
17
|
s.description = "Cool.io provides a high performance event framework for Ruby which uses the libev C library"
|
18
|
-
s.extensions = ["ext/cool.io/extconf.rb", "ext/http11_client/extconf.rb"]
|
18
|
+
s.extensions = ["ext/cool.io/extconf.rb", "ext/http11_client/extconf.rb", "ext/iobuffer/extconf.rb"]
|
19
19
|
|
20
20
|
s.files = `git ls-files`.split("\n")
|
21
21
|
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
22
22
|
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
23
23
|
s.require_paths = ["lib"]
|
24
24
|
|
25
|
-
s.
|
26
|
-
|
27
|
-
s.add_development_dependency "rake-compiler", "~> 0.7.9"
|
28
|
-
s.add_development_dependency "rspec", ">= 2.6.0"
|
25
|
+
s.add_development_dependency "rake-compiler", "~> 0.8.3"
|
26
|
+
s.add_development_dependency "rspec", ">= 2.13.0"
|
29
27
|
s.add_development_dependency "rdoc", ">= 3.6.0"
|
30
|
-
end
|
28
|
+
end
|
data/ext/cool.io/extconf.rb
CHANGED
@@ -64,7 +64,8 @@ if RUBY_PLATFORM =~ /mingw|win32/
|
|
64
64
|
makefile_contents = File.read 'Makefile'
|
65
65
|
|
66
66
|
# "Init_cool could not be found" when loading cool.io.so.
|
67
|
-
|
67
|
+
# I'm not sure why this is needed. But this line causes "1114 A dynamic link library (DLL) initialization routine failed." So I commented out this line.
|
68
|
+
#makefile_contents.gsub! 'DLDFLAGS = ', 'DLDFLAGS = -export-all '
|
68
69
|
|
69
70
|
makefile_contents.gsub! 'LIBS = $(LIBRUBYARG_SHARED)', 'LIBS = -lws2_32 $(LIBRUBYARG_SHARED)'
|
70
71
|
File.open('Makefile', 'w') { |f| f.write makefile_contents }
|
data/ext/cool.io/stat_watcher.c
CHANGED
@@ -246,8 +246,10 @@ static VALUE Coolio_StatInfo_build(ev_statdata *statdata_struct)
|
|
246
246
|
gid = INT2NUM(statdata_struct->st_gid);
|
247
247
|
rdev = INT2NUM(statdata_struct->st_rdev);
|
248
248
|
size = INT2NUM(statdata_struct->st_size);
|
249
|
+
#ifdef HAVE_ST_BLKSIZE
|
249
250
|
blksize = INT2NUM(statdata_struct->st_blksize);
|
250
251
|
blocks = INT2NUM(statdata_struct->st_blocks);
|
252
|
+
#endif
|
251
253
|
|
252
254
|
return rb_struct_new(cCoolio_StatInfo,
|
253
255
|
mtime,
|
@@ -0,0 +1,765 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (C) 2007-12 Tony Arcieri
|
3
|
+
* You may redistribute this under the terms of the MIT license.
|
4
|
+
* See LICENSE for details
|
5
|
+
*/
|
6
|
+
|
7
|
+
#include "ruby.h"
|
8
|
+
#include "ruby/io.h"
|
9
|
+
|
10
|
+
#include <assert.h>
|
11
|
+
|
12
|
+
#include <string.h>
|
13
|
+
#include <time.h>
|
14
|
+
#include <unistd.h>
|
15
|
+
#include <errno.h>
|
16
|
+
|
17
|
+
/* 1 GiB maximum buffer size */
|
18
|
+
#define MAX_BUFFER_SIZE 0x40000000
|
19
|
+
|
20
|
+
/* Macro for retrieving the file descriptor from an FPTR */
|
21
|
+
#if !HAVE_RB_IO_T_FD
|
22
|
+
#define FPTR_TO_FD(fptr) fileno(fptr->f)
|
23
|
+
#else
|
24
|
+
#define FPTR_TO_FD(fptr) fptr->fd
|
25
|
+
#endif
|
26
|
+
|
27
|
+
/* Default number of bytes in each node's buffer. Should be >= MTU */
|
28
|
+
#define DEFAULT_NODE_SIZE 16384
|
29
|
+
static unsigned default_node_size = DEFAULT_NODE_SIZE;
|
30
|
+
|
31
|
+
struct buffer {
|
32
|
+
unsigned size, node_size;
|
33
|
+
struct buffer_node *head, *tail;
|
34
|
+
struct buffer_node *pool_head, *pool_tail;
|
35
|
+
|
36
|
+
};
|
37
|
+
|
38
|
+
struct buffer_node {
|
39
|
+
unsigned start, end;
|
40
|
+
struct buffer_node *next;
|
41
|
+
unsigned char data[0];
|
42
|
+
};
|
43
|
+
|
44
|
+
static VALUE cIO_Buffer = Qnil;
|
45
|
+
|
46
|
+
static VALUE IO_Buffer_allocate(VALUE klass);
|
47
|
+
static void IO_Buffer_mark(struct buffer *);
|
48
|
+
static void IO_Buffer_free(struct buffer *);
|
49
|
+
|
50
|
+
static VALUE IO_Buffer_default_node_size(VALUE klass);
|
51
|
+
static VALUE IO_Buffer_set_default_node_size(VALUE klass, VALUE size);
|
52
|
+
static VALUE IO_Buffer_initialize(int argc, VALUE * argv, VALUE self);
|
53
|
+
static VALUE IO_Buffer_clear(VALUE self);
|
54
|
+
static VALUE IO_Buffer_size(VALUE self);
|
55
|
+
static VALUE IO_Buffer_empty(VALUE self);
|
56
|
+
static VALUE IO_Buffer_append(VALUE self, VALUE data);
|
57
|
+
static VALUE IO_Buffer_prepend(VALUE self, VALUE data);
|
58
|
+
static VALUE IO_Buffer_read(int argc, VALUE * argv, VALUE self);
|
59
|
+
static VALUE IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark);
|
60
|
+
static VALUE IO_Buffer_to_str(VALUE self);
|
61
|
+
static VALUE IO_Buffer_read_from(VALUE self, VALUE io);
|
62
|
+
static VALUE IO_Buffer_write_to(VALUE self, VALUE io);
|
63
|
+
|
64
|
+
static struct buffer *buffer_new(void);
|
65
|
+
static void buffer_clear(struct buffer * buf);
|
66
|
+
static void buffer_free(struct buffer * buf);
|
67
|
+
static void buffer_free_pool(struct buffer * buf);
|
68
|
+
static void buffer_prepend(struct buffer * buf, char *str, unsigned len);
|
69
|
+
static void buffer_append(struct buffer * buf, char *str, unsigned len);
|
70
|
+
static void buffer_read(struct buffer * buf, char *str, unsigned len);
|
71
|
+
static int buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark);
|
72
|
+
static void buffer_copy(struct buffer * buf, char *str, unsigned len);
|
73
|
+
static int buffer_read_from(struct buffer * buf, int fd);
|
74
|
+
static int buffer_write_to(struct buffer * buf, int fd);
|
75
|
+
|
76
|
+
/*
|
77
|
+
* High-performance I/O buffer intended for use in non-blocking programs
|
78
|
+
*
|
79
|
+
* Data is stored in as a memory-pooled linked list of equally sized chunks.
|
80
|
+
* Routines are provided for high speed non-blocking reads and writes from
|
81
|
+
* Ruby IO objects.
|
82
|
+
*/
|
83
|
+
void
|
84
|
+
Init_iobuffer_ext()
|
85
|
+
{
|
86
|
+
cIO_Buffer = rb_define_class_under(rb_cIO, "Buffer", rb_cObject);
|
87
|
+
rb_define_alloc_func(cIO_Buffer, IO_Buffer_allocate);
|
88
|
+
|
89
|
+
rb_define_singleton_method(cIO_Buffer, "default_node_size",
|
90
|
+
IO_Buffer_default_node_size, 0);
|
91
|
+
rb_define_singleton_method(cIO_Buffer, "default_node_size=",
|
92
|
+
IO_Buffer_set_default_node_size, 1);
|
93
|
+
|
94
|
+
rb_define_method(cIO_Buffer, "initialize", IO_Buffer_initialize, -1);
|
95
|
+
rb_define_method(cIO_Buffer, "clear", IO_Buffer_clear, 0);
|
96
|
+
rb_define_method(cIO_Buffer, "size", IO_Buffer_size, 0);
|
97
|
+
rb_define_method(cIO_Buffer, "empty?", IO_Buffer_empty, 0);
|
98
|
+
rb_define_method(cIO_Buffer, "<<", IO_Buffer_append, 1);
|
99
|
+
rb_define_method(cIO_Buffer, "append", IO_Buffer_append, 1);
|
100
|
+
rb_define_method(cIO_Buffer, "write", IO_Buffer_append, 1);
|
101
|
+
rb_define_method(cIO_Buffer, "prepend", IO_Buffer_prepend, 1);
|
102
|
+
rb_define_method(cIO_Buffer, "read", IO_Buffer_read, -1);
|
103
|
+
rb_define_method(cIO_Buffer, "read_frame", IO_Buffer_read_frame, 2);
|
104
|
+
rb_define_method(cIO_Buffer, "to_str", IO_Buffer_to_str, 0);
|
105
|
+
rb_define_method(cIO_Buffer, "read_from", IO_Buffer_read_from, 1);
|
106
|
+
rb_define_method(cIO_Buffer, "write_to", IO_Buffer_write_to, 1);
|
107
|
+
|
108
|
+
rb_define_const(cIO_Buffer, "MAX_SIZE", INT2NUM(MAX_BUFFER_SIZE));
|
109
|
+
}
|
110
|
+
|
111
|
+
static VALUE
|
112
|
+
IO_Buffer_allocate(VALUE klass)
|
113
|
+
{
|
114
|
+
return Data_Wrap_Struct(klass, IO_Buffer_mark, IO_Buffer_free, buffer_new());
|
115
|
+
}
|
116
|
+
|
117
|
+
static void
|
118
|
+
IO_Buffer_mark(struct buffer * buf)
|
119
|
+
{
|
120
|
+
/* Naively discard the memory pool whenever Ruby garbage collects */
|
121
|
+
buffer_free_pool(buf);
|
122
|
+
}
|
123
|
+
|
124
|
+
static void
|
125
|
+
IO_Buffer_free(struct buffer * buf)
|
126
|
+
{
|
127
|
+
buffer_free(buf);
|
128
|
+
}
|
129
|
+
|
130
|
+
/**
|
131
|
+
* call-seq:
|
132
|
+
* IO_Buffer.default_node_size -> 4096
|
133
|
+
*
|
134
|
+
* Retrieves the current value of the default node size.
|
135
|
+
*/
|
136
|
+
static VALUE
|
137
|
+
IO_Buffer_default_node_size(VALUE klass)
|
138
|
+
{
|
139
|
+
return UINT2NUM(default_node_size);
|
140
|
+
}
|
141
|
+
|
142
|
+
/*
|
143
|
+
* safely converts node sizes from Ruby numerics to C and raising
|
144
|
+
* ArgumentError or RangeError on invalid sizes
|
145
|
+
*/
|
146
|
+
static unsigned
|
147
|
+
convert_node_size(VALUE size)
|
148
|
+
{
|
149
|
+
if (
|
150
|
+
rb_funcall(size, rb_intern("<"), 1, INT2NUM(1)) == Qtrue ||
|
151
|
+
rb_funcall(size, rb_intern(">"), 1, INT2NUM(MAX_BUFFER_SIZE)) == Qtrue
|
152
|
+
)
|
153
|
+
rb_raise(rb_eArgError, "invalid buffer size");
|
154
|
+
|
155
|
+
return (unsigned) NUM2INT(size);
|
156
|
+
}
|
157
|
+
|
158
|
+
/**
|
159
|
+
* call-seq:
|
160
|
+
* IO_Buffer.default_node_size = 16384
|
161
|
+
*
|
162
|
+
* Sets the default node size for calling IO::Buffer.new with no arguments.
|
163
|
+
*/
|
164
|
+
static VALUE
|
165
|
+
IO_Buffer_set_default_node_size(VALUE klass, VALUE size)
|
166
|
+
{
|
167
|
+
default_node_size = convert_node_size(size);
|
168
|
+
|
169
|
+
return size;
|
170
|
+
}
|
171
|
+
|
172
|
+
/**
|
173
|
+
* call-seq:
|
174
|
+
* IO_Buffer.new(size = IO::Buffer.default_node_size) -> IO_Buffer
|
175
|
+
*
|
176
|
+
* Create a new IO_Buffer with linked segments of the given size
|
177
|
+
*/
|
178
|
+
static VALUE
|
179
|
+
IO_Buffer_initialize(int argc, VALUE * argv, VALUE self)
|
180
|
+
{
|
181
|
+
VALUE node_size_obj;
|
182
|
+
struct buffer *buf;
|
183
|
+
|
184
|
+
if (rb_scan_args(argc, argv, "01", &node_size_obj) == 1) {
|
185
|
+
Data_Get_Struct(self, struct buffer, buf);
|
186
|
+
|
187
|
+
/*
|
188
|
+
* Make sure we're not changing the buffer size after data
|
189
|
+
* has been allocated
|
190
|
+
*/
|
191
|
+
assert(!buf->head);
|
192
|
+
assert(!buf->pool_head);
|
193
|
+
|
194
|
+
buf->node_size = convert_node_size(node_size_obj);
|
195
|
+
}
|
196
|
+
return Qnil;
|
197
|
+
}
|
198
|
+
|
199
|
+
/**
|
200
|
+
* call-seq:
|
201
|
+
* IO_Buffer#clear -> nil
|
202
|
+
*
|
203
|
+
* Clear all data from the IO_Buffer
|
204
|
+
*/
|
205
|
+
static VALUE
|
206
|
+
IO_Buffer_clear(VALUE self)
|
207
|
+
{
|
208
|
+
struct buffer *buf;
|
209
|
+
Data_Get_Struct(self, struct buffer, buf);
|
210
|
+
|
211
|
+
buffer_clear(buf);
|
212
|
+
|
213
|
+
return Qnil;
|
214
|
+
}
|
215
|
+
|
216
|
+
/**
|
217
|
+
* call-seq:
|
218
|
+
* IO_Buffer#size -> Integer
|
219
|
+
*
|
220
|
+
* Return the size of the buffer in bytes
|
221
|
+
*/
|
222
|
+
static VALUE
|
223
|
+
IO_Buffer_size(VALUE self)
|
224
|
+
{
|
225
|
+
struct buffer *buf;
|
226
|
+
Data_Get_Struct(self, struct buffer, buf);
|
227
|
+
|
228
|
+
return INT2NUM(buf->size);
|
229
|
+
}
|
230
|
+
|
231
|
+
/**
|
232
|
+
* call-seq:
|
233
|
+
* IO_Buffer#empty? -> Boolean
|
234
|
+
*
|
235
|
+
* Is the buffer empty?
|
236
|
+
*/
|
237
|
+
static VALUE
|
238
|
+
IO_Buffer_empty(VALUE self)
|
239
|
+
{
|
240
|
+
struct buffer *buf;
|
241
|
+
Data_Get_Struct(self, struct buffer, buf);
|
242
|
+
|
243
|
+
return buf->size > 0 ? Qfalse : Qtrue;
|
244
|
+
}
|
245
|
+
|
246
|
+
/**
|
247
|
+
* call-seq:
|
248
|
+
* IO_Buffer#append(data) -> String
|
249
|
+
*
|
250
|
+
* Append the given data to the end of the buffer
|
251
|
+
*/
|
252
|
+
static VALUE
|
253
|
+
IO_Buffer_append(VALUE self, VALUE data)
|
254
|
+
{
|
255
|
+
struct buffer *buf;
|
256
|
+
Data_Get_Struct(self, struct buffer, buf);
|
257
|
+
|
258
|
+
/* Is this needed? Never seen anyone else do it... */
|
259
|
+
data = rb_convert_type(data, T_STRING, "String", "to_str");
|
260
|
+
buffer_append(buf, RSTRING_PTR(data), RSTRING_LEN(data));
|
261
|
+
|
262
|
+
return data;
|
263
|
+
}
|
264
|
+
|
265
|
+
/**
|
266
|
+
* call-seq:
|
267
|
+
* IO_Buffer#prepend(data) -> String
|
268
|
+
*
|
269
|
+
* Prepend the given data to the beginning of the buffer
|
270
|
+
*/
|
271
|
+
static VALUE
|
272
|
+
IO_Buffer_prepend(VALUE self, VALUE data)
|
273
|
+
{
|
274
|
+
struct buffer *buf;
|
275
|
+
Data_Get_Struct(self, struct buffer, buf);
|
276
|
+
|
277
|
+
data = rb_convert_type(data, T_STRING, "String", "to_str");
|
278
|
+
buffer_prepend(buf, RSTRING_PTR(data), RSTRING_LEN(data));
|
279
|
+
|
280
|
+
return data;
|
281
|
+
}
|
282
|
+
|
283
|
+
/**
|
284
|
+
* call-seq:
|
285
|
+
* IO_Buffer#read(length = nil) -> String
|
286
|
+
*
|
287
|
+
* Read the specified abount of data from the buffer. If no value
|
288
|
+
* is given the entire contents of the buffer are returned. Any data
|
289
|
+
* read from the buffer is cleared.
|
290
|
+
* The given length must be greater than 0 or an exception would raise.
|
291
|
+
* If the buffer size is zero then an empty string is returned (regardless
|
292
|
+
* the given length).
|
293
|
+
*/
|
294
|
+
static VALUE
|
295
|
+
IO_Buffer_read(int argc, VALUE * argv, VALUE self)
|
296
|
+
{
|
297
|
+
VALUE length_obj, str;
|
298
|
+
int length;
|
299
|
+
struct buffer *buf;
|
300
|
+
|
301
|
+
Data_Get_Struct(self, struct buffer, buf);
|
302
|
+
|
303
|
+
if (rb_scan_args(argc, argv, "01", &length_obj) == 1) {
|
304
|
+
length = NUM2INT(length_obj);
|
305
|
+
if(length < 1)
|
306
|
+
rb_raise(rb_eArgError, "length must be greater than zero");
|
307
|
+
if(length > buf->size)
|
308
|
+
length = buf->size;
|
309
|
+
} else
|
310
|
+
length = buf->size;
|
311
|
+
|
312
|
+
if(buf->size == 0)
|
313
|
+
return rb_str_new2("");
|
314
|
+
|
315
|
+
str = rb_str_new(0, length);
|
316
|
+
buffer_read(buf, RSTRING_PTR(str), length);
|
317
|
+
|
318
|
+
return str;
|
319
|
+
}
|
320
|
+
|
321
|
+
/**
|
322
|
+
* call-seq:
|
323
|
+
* IO_Buffer#read_frame(str, mark) -> boolean
|
324
|
+
*
|
325
|
+
* Read up to and including the given frame marker (expressed a a
|
326
|
+
* Fixnum 0-255) byte, copying into the supplied string object. If the mark is
|
327
|
+
* not encountered before the end of the buffer, false is returned but data
|
328
|
+
* is still copied into str. True is returned if the end of a frame is reached.
|
329
|
+
*
|
330
|
+
*/
|
331
|
+
static VALUE
|
332
|
+
IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark)
|
333
|
+
{
|
334
|
+
char mark_c = (char) NUM2INT(mark);
|
335
|
+
struct buffer *buf;
|
336
|
+
|
337
|
+
Data_Get_Struct(self, struct buffer, buf);
|
338
|
+
|
339
|
+
if (buffer_read_frame(buf, data, mark_c)) {
|
340
|
+
return Qtrue;
|
341
|
+
} else {
|
342
|
+
return Qfalse;
|
343
|
+
}
|
344
|
+
}
|
345
|
+
|
346
|
+
/**
|
347
|
+
* call-seq:
|
348
|
+
* IO_Buffer#to_str -> String
|
349
|
+
*
|
350
|
+
* Convert the Buffer to a String. The original buffer is unmodified.
|
351
|
+
*/
|
352
|
+
static VALUE
|
353
|
+
IO_Buffer_to_str(VALUE self)
|
354
|
+
{
|
355
|
+
VALUE str;
|
356
|
+
struct buffer *buf;
|
357
|
+
|
358
|
+
Data_Get_Struct(self, struct buffer, buf);
|
359
|
+
|
360
|
+
str = rb_str_new(0, buf->size);
|
361
|
+
buffer_copy(buf, RSTRING_PTR(str), buf->size);
|
362
|
+
|
363
|
+
return str;
|
364
|
+
}
|
365
|
+
|
366
|
+
/**
|
367
|
+
* call-seq:
|
368
|
+
* IO_Buffer#read_from(io) -> Integer
|
369
|
+
*
|
370
|
+
* Perform a nonblocking read of the the given IO object and fill
|
371
|
+
* the buffer with any data received. The call will read as much
|
372
|
+
* data as it can until the read would block.
|
373
|
+
*/
|
374
|
+
static VALUE
|
375
|
+
IO_Buffer_read_from(VALUE self, VALUE io)
|
376
|
+
{
|
377
|
+
struct buffer *buf;
|
378
|
+
int ret;
|
379
|
+
#if HAVE_RB_IO_T
|
380
|
+
rb_io_t *fptr;
|
381
|
+
#else
|
382
|
+
OpenFile *fptr;
|
383
|
+
#endif
|
384
|
+
|
385
|
+
Data_Get_Struct(self, struct buffer, buf);
|
386
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
387
|
+
rb_io_set_nonblock(fptr);
|
388
|
+
|
389
|
+
ret = buffer_read_from(buf, FPTR_TO_FD(fptr));
|
390
|
+
return ret == -1 ? Qnil : INT2NUM(ret);
|
391
|
+
}
|
392
|
+
|
393
|
+
/**
|
394
|
+
* call-seq:
|
395
|
+
* IO_Buffer#write_to(io) -> Integer
|
396
|
+
*
|
397
|
+
* Perform a nonblocking write of the buffer to the given IO object.
|
398
|
+
* As much data as possible is written until the call would block.
|
399
|
+
* Any data which is written is removed from the buffer.
|
400
|
+
*/
|
401
|
+
static VALUE
|
402
|
+
IO_Buffer_write_to(VALUE self, VALUE io)
|
403
|
+
{
|
404
|
+
struct buffer *buf;
|
405
|
+
#if HAVE_RB_IO_T
|
406
|
+
rb_io_t *fptr;
|
407
|
+
#else
|
408
|
+
OpenFile *fptr;
|
409
|
+
#endif
|
410
|
+
|
411
|
+
Data_Get_Struct(self, struct buffer, buf);
|
412
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
413
|
+
rb_io_set_nonblock(fptr);
|
414
|
+
|
415
|
+
return INT2NUM(buffer_write_to(buf, FPTR_TO_FD(fptr)));
|
416
|
+
}
|
417
|
+
|
418
|
+
/*
|
419
|
+
* Ruby bindings end here. Below is the actual implementation of
|
420
|
+
* the underlying byte queue ADT
|
421
|
+
*/
|
422
|
+
|
423
|
+
/* Create a new buffer */
|
424
|
+
static struct buffer *
|
425
|
+
buffer_new(void)
|
426
|
+
{
|
427
|
+
struct buffer *buf;
|
428
|
+
|
429
|
+
buf = (struct buffer *) xmalloc(sizeof(struct buffer));
|
430
|
+
buf->head = buf->tail = buf->pool_head = buf->pool_tail = 0;
|
431
|
+
buf->size = 0;
|
432
|
+
buf->node_size = default_node_size;
|
433
|
+
|
434
|
+
return buf;
|
435
|
+
}
|
436
|
+
|
437
|
+
/* Clear all data from a buffer */
|
438
|
+
static void
|
439
|
+
buffer_clear(struct buffer * buf)
|
440
|
+
{
|
441
|
+
/* Move everything into the buffer pool */
|
442
|
+
if (!buf->pool_tail) {
|
443
|
+
buf->pool_head = buf->pool_tail = buf->head;
|
444
|
+
} else {
|
445
|
+
buf->pool_tail->next = buf->head;
|
446
|
+
}
|
447
|
+
|
448
|
+
buf->head = buf->tail = 0;
|
449
|
+
buf->size = 0;
|
450
|
+
}
|
451
|
+
|
452
|
+
/* Free a buffer */
|
453
|
+
static void
|
454
|
+
buffer_free(struct buffer * buf)
|
455
|
+
{
|
456
|
+
buffer_clear(buf);
|
457
|
+
buffer_free_pool(buf);
|
458
|
+
|
459
|
+
free(buf);
|
460
|
+
}
|
461
|
+
|
462
|
+
/* Free the memory pool */
|
463
|
+
static void
|
464
|
+
buffer_free_pool(struct buffer * buf)
|
465
|
+
{
|
466
|
+
struct buffer_node *tmp;
|
467
|
+
|
468
|
+
while (buf->pool_head) {
|
469
|
+
tmp = buf->pool_head;
|
470
|
+
buf->pool_head = tmp->next;
|
471
|
+
free(tmp);
|
472
|
+
}
|
473
|
+
|
474
|
+
buf->pool_tail = 0;
|
475
|
+
}
|
476
|
+
|
477
|
+
/* Create a new buffer_node (or pull one from the memory pool) */
|
478
|
+
static struct buffer_node *
|
479
|
+
buffer_node_new(struct buffer * buf)
|
480
|
+
{
|
481
|
+
struct buffer_node *node;
|
482
|
+
|
483
|
+
/* Pull from the memory pool if available */
|
484
|
+
if (buf->pool_head) {
|
485
|
+
node = buf->pool_head;
|
486
|
+
buf->pool_head = node->next;
|
487
|
+
|
488
|
+
if (node->next)
|
489
|
+
node->next = 0;
|
490
|
+
else
|
491
|
+
buf->pool_tail = 0;
|
492
|
+
} else {
|
493
|
+
node = (struct buffer_node *) xmalloc(sizeof(struct buffer_node) + buf->node_size);
|
494
|
+
node->next = 0;
|
495
|
+
}
|
496
|
+
|
497
|
+
node->start = node->end = 0;
|
498
|
+
return node;
|
499
|
+
}
|
500
|
+
|
501
|
+
/* Free a buffer node (i.e. return it to the memory pool) */
|
502
|
+
static void
|
503
|
+
buffer_node_free(struct buffer * buf, struct buffer_node * node)
|
504
|
+
{
|
505
|
+
node->next = buf->pool_head;
|
506
|
+
buf->pool_head = node;
|
507
|
+
|
508
|
+
if (!buf->pool_tail) {
|
509
|
+
buf->pool_tail = node;
|
510
|
+
}
|
511
|
+
}
|
512
|
+
|
513
|
+
/* Prepend data to the front of the buffer */
|
514
|
+
static void
|
515
|
+
buffer_prepend(struct buffer * buf, char *str, unsigned len)
|
516
|
+
{
|
517
|
+
struct buffer_node *node, *tmp;
|
518
|
+
buf->size += len;
|
519
|
+
|
520
|
+
/* If it fits in the beginning of the head */
|
521
|
+
if (buf->head && buf->head->start >= len) {
|
522
|
+
buf->head->start -= len;
|
523
|
+
memcpy(buf->head->data + buf->head->start, str, len);
|
524
|
+
} else {
|
525
|
+
node = buffer_node_new(buf);
|
526
|
+
node->next = buf->head;
|
527
|
+
buf->head = node;
|
528
|
+
if (!buf->tail)
|
529
|
+
buf->tail = node;
|
530
|
+
|
531
|
+
while (len > buf->node_size) {
|
532
|
+
memcpy(node->data, str, buf->node_size);
|
533
|
+
node->end = buf->node_size;
|
534
|
+
|
535
|
+
tmp = buffer_node_new(buf);
|
536
|
+
tmp->next = node->next;
|
537
|
+
node->next = tmp;
|
538
|
+
|
539
|
+
if (buf->tail == node)
|
540
|
+
buf->tail = tmp;
|
541
|
+
node = tmp;
|
542
|
+
|
543
|
+
str += buf->node_size;
|
544
|
+
len -= buf->node_size;
|
545
|
+
}
|
546
|
+
|
547
|
+
if (len > 0) {
|
548
|
+
memcpy(node->data, str, len);
|
549
|
+
node->end = len;
|
550
|
+
}
|
551
|
+
}
|
552
|
+
}
|
553
|
+
|
554
|
+
/* Append data to the front of the buffer */
|
555
|
+
static void
|
556
|
+
buffer_append(struct buffer * buf, char *str, unsigned len)
|
557
|
+
{
|
558
|
+
unsigned nbytes;
|
559
|
+
buf->size += len;
|
560
|
+
|
561
|
+
/* If it fits in the remaining space in the tail */
|
562
|
+
if (buf->tail && len <= buf->node_size - buf->tail->end) {
|
563
|
+
memcpy(buf->tail->data + buf->tail->end, str, len);
|
564
|
+
buf->tail->end += len;
|
565
|
+
return;
|
566
|
+
}
|
567
|
+
/* Empty list needs initialized */
|
568
|
+
if (!buf->head) {
|
569
|
+
buf->head = buffer_node_new(buf);
|
570
|
+
buf->tail = buf->head;
|
571
|
+
}
|
572
|
+
/* Build links out of the data */
|
573
|
+
while (len > 0) {
|
574
|
+
nbytes = buf->node_size - buf->tail->end;
|
575
|
+
if (len < nbytes)
|
576
|
+
nbytes = len;
|
577
|
+
|
578
|
+
memcpy(buf->tail->data + buf->tail->end, str, nbytes);
|
579
|
+
str += nbytes;
|
580
|
+
len -= nbytes;
|
581
|
+
|
582
|
+
buf->tail->end += nbytes;
|
583
|
+
|
584
|
+
if (len > 0) {
|
585
|
+
buf->tail->next = buffer_node_new(buf);
|
586
|
+
buf->tail = buf->tail->next;
|
587
|
+
}
|
588
|
+
}
|
589
|
+
}
|
590
|
+
|
591
|
+
/* Read data from the buffer (and clear what we've read) */
|
592
|
+
static void
|
593
|
+
buffer_read(struct buffer * buf, char *str, unsigned len)
|
594
|
+
{
|
595
|
+
unsigned nbytes;
|
596
|
+
struct buffer_node *tmp;
|
597
|
+
|
598
|
+
while (buf->size > 0 && len > 0) {
|
599
|
+
nbytes = buf->head->end - buf->head->start;
|
600
|
+
if (len < nbytes)
|
601
|
+
nbytes = len;
|
602
|
+
|
603
|
+
memcpy(str, buf->head->data + buf->head->start, nbytes);
|
604
|
+
str += nbytes;
|
605
|
+
len -= nbytes;
|
606
|
+
|
607
|
+
buf->head->start += nbytes;
|
608
|
+
buf->size -= nbytes;
|
609
|
+
|
610
|
+
if (buf->head->start == buf->head->end) {
|
611
|
+
tmp = buf->head;
|
612
|
+
buf->head = tmp->next;
|
613
|
+
buffer_node_free(buf, tmp);
|
614
|
+
|
615
|
+
if (!buf->head)
|
616
|
+
buf->tail = 0;
|
617
|
+
}
|
618
|
+
}
|
619
|
+
}
|
620
|
+
|
621
|
+
/*
|
622
|
+
* Read data from the buffer into str until byte frame_mark or empty. Bytes
|
623
|
+
* are copied into str and removed if a complete frame is read, a true value
|
624
|
+
* is returned
|
625
|
+
*/
|
626
|
+
static int
|
627
|
+
buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark)
|
628
|
+
{
|
629
|
+
unsigned nbytes = 0;
|
630
|
+
struct buffer_node *tmp;
|
631
|
+
|
632
|
+
while (buf->size > 0) {
|
633
|
+
struct buffer_node *head = buf->head;
|
634
|
+
char *loc, *s = head->data + head->start, *e = head->data + head->end;
|
635
|
+
nbytes = e - s;
|
636
|
+
|
637
|
+
loc = memchr(s, frame_mark, nbytes);
|
638
|
+
|
639
|
+
if (loc) {
|
640
|
+
nbytes = loc - s + 1;
|
641
|
+
}
|
642
|
+
|
643
|
+
/* Copy less than everything if we found a frame byte */
|
644
|
+
rb_str_cat(str, s, nbytes);
|
645
|
+
|
646
|
+
/* Fixup the buffer pointers to indicate the bytes were consumed */
|
647
|
+
head->start += nbytes;
|
648
|
+
buf->size -= nbytes;
|
649
|
+
|
650
|
+
if (head->start == head->end) {
|
651
|
+
buf->head = head->next;
|
652
|
+
buffer_node_free(buf, head);
|
653
|
+
|
654
|
+
if (!buf->head)
|
655
|
+
buf->tail = 0;
|
656
|
+
}
|
657
|
+
|
658
|
+
if (loc) {
|
659
|
+
return 1;
|
660
|
+
}
|
661
|
+
}
|
662
|
+
|
663
|
+
return 0;
|
664
|
+
}
|
665
|
+
|
666
|
+
/* Copy data from the buffer without clearing it */
|
667
|
+
static void
|
668
|
+
buffer_copy(struct buffer * buf, char *str, unsigned len)
|
669
|
+
{
|
670
|
+
unsigned nbytes;
|
671
|
+
struct buffer_node *node;
|
672
|
+
|
673
|
+
node = buf->head;
|
674
|
+
while (node && len > 0) {
|
675
|
+
nbytes = node->end - node->start;
|
676
|
+
if (len < nbytes)
|
677
|
+
nbytes = len;
|
678
|
+
|
679
|
+
memcpy(str, node->data + node->start, nbytes);
|
680
|
+
str += nbytes;
|
681
|
+
len -= nbytes;
|
682
|
+
|
683
|
+
if (node->start + nbytes == node->end)
|
684
|
+
node = node->next;
|
685
|
+
}
|
686
|
+
}
|
687
|
+
|
688
|
+
/* Write data from the buffer to a file descriptor */
|
689
|
+
static int
|
690
|
+
buffer_write_to(struct buffer * buf, int fd)
|
691
|
+
{
|
692
|
+
int bytes_written, total_bytes_written = 0;
|
693
|
+
struct buffer_node *tmp;
|
694
|
+
|
695
|
+
while (buf->head) {
|
696
|
+
bytes_written = write(fd, buf->head->data + buf->head->start, buf->head->end - buf->head->start);
|
697
|
+
|
698
|
+
/* If the write failed... */
|
699
|
+
if (bytes_written < 0) {
|
700
|
+
if (errno != EAGAIN)
|
701
|
+
rb_sys_fail("write");
|
702
|
+
|
703
|
+
return total_bytes_written;
|
704
|
+
}
|
705
|
+
|
706
|
+
total_bytes_written += bytes_written;
|
707
|
+
buf->size -= bytes_written;
|
708
|
+
|
709
|
+
/* If the write blocked... */
|
710
|
+
if (bytes_written < buf->head->end - buf->head->start) {
|
711
|
+
buf->head->start += bytes_written;
|
712
|
+
return total_bytes_written;
|
713
|
+
}
|
714
|
+
/* Otherwise we wrote the whole buffer */
|
715
|
+
tmp = buf->head;
|
716
|
+
buf->head = tmp->next;
|
717
|
+
buffer_node_free(buf, tmp);
|
718
|
+
|
719
|
+
if (!buf->head)
|
720
|
+
buf->tail = 0;
|
721
|
+
}
|
722
|
+
|
723
|
+
return total_bytes_written;
|
724
|
+
}
|
725
|
+
|
726
|
+
/* Read data from a file descriptor to a buffer */
|
727
|
+
/* Append data to the front of the buffer */
|
728
|
+
static int
|
729
|
+
buffer_read_from(struct buffer * buf, int fd)
|
730
|
+
{
|
731
|
+
int bytes_read, total_bytes_read = 0;
|
732
|
+
unsigned nbytes;
|
733
|
+
|
734
|
+
/* Empty list needs initialized */
|
735
|
+
if (!buf->head) {
|
736
|
+
buf->head = buffer_node_new(buf);
|
737
|
+
buf->tail = buf->head;
|
738
|
+
}
|
739
|
+
|
740
|
+
do {
|
741
|
+
nbytes = buf->node_size - buf->tail->end;
|
742
|
+
bytes_read = read(fd, buf->tail->data + buf->tail->end, nbytes);
|
743
|
+
|
744
|
+
if (bytes_read == 0) {
|
745
|
+
return -1;
|
746
|
+
//When the file reaches EOF
|
747
|
+
} else if (bytes_read < 0) {
|
748
|
+
if (errno != EAGAIN)
|
749
|
+
rb_sys_fail("read");
|
750
|
+
|
751
|
+
return total_bytes_read;
|
752
|
+
}
|
753
|
+
|
754
|
+
total_bytes_read += bytes_read;
|
755
|
+
buf->tail->end += bytes_read;
|
756
|
+
buf->size += bytes_read;
|
757
|
+
|
758
|
+
if (buf->tail->end == buf->node_size) {
|
759
|
+
buf->tail->next = buffer_node_new(buf);
|
760
|
+
buf->tail = buf->tail->next;
|
761
|
+
}
|
762
|
+
} while (bytes_read == nbytes);
|
763
|
+
|
764
|
+
return total_bytes_read;
|
765
|
+
}
|