iobuffer 1.0.0 → 1.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README.md +96 -0
- data/ext/iobuffer.c +490 -366
- data/lib/iobuffer/version.rb +5 -0
- metadata +57 -116
- data/CHANGES +0 -18
- data/Gemfile +0 -12
- data/Gemfile.lock +0 -26
- data/LICENSE +0 -19
- data/README.rdoc +0 -83
- data/Rakefile +0 -74
- data/VERSION +0 -1
- data/ext/.gitignore +0 -4
- data/iobuffer.gemspec +0 -66
- data/lib/.gitignore +0 -1
- data/spec/buffer_spec.rb +0 -144
data/ext/iobuffer.c
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (C) 2007-
|
2
|
+
* Copyright (C) 2007-12 Tony Arcieri
|
3
3
|
* You may redistribute this under the terms of the MIT license.
|
4
4
|
* See LICENSE for details
|
5
5
|
*/
|
@@ -14,6 +14,9 @@
|
|
14
14
|
#include <unistd.h>
|
15
15
|
#include <errno.h>
|
16
16
|
|
17
|
+
/* 1 GiB maximum buffer size */
|
18
|
+
#define MAX_BUFFER_SIZE 0x40000000
|
19
|
+
|
17
20
|
/* Macro for retrieving the file descriptor from an FPTR */
|
18
21
|
#if !HAVE_RB_IO_T_FD
|
19
22
|
#define FPTR_TO_FD(fptr) fileno(fptr->f)
|
@@ -26,93 +29,102 @@
|
|
26
29
|
static unsigned default_node_size = DEFAULT_NODE_SIZE;
|
27
30
|
|
28
31
|
struct buffer {
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
32
|
+
unsigned size, node_size;
|
33
|
+
struct buffer_node *head, *tail;
|
34
|
+
struct buffer_node *pool_head, *pool_tail;
|
35
|
+
|
33
36
|
};
|
34
37
|
|
35
38
|
struct buffer_node {
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
+
unsigned start, end;
|
40
|
+
struct buffer_node *next;
|
41
|
+
unsigned char data[0];
|
39
42
|
};
|
40
43
|
|
41
|
-
static VALUE
|
42
|
-
|
43
|
-
static VALUE
|
44
|
-
static void
|
45
|
-
static void
|
46
|
-
|
47
|
-
static VALUE
|
48
|
-
static VALUE
|
49
|
-
static VALUE
|
50
|
-
static VALUE
|
51
|
-
static VALUE
|
52
|
-
static VALUE
|
53
|
-
static VALUE
|
54
|
-
static VALUE
|
55
|
-
static VALUE
|
56
|
-
static VALUE
|
57
|
-
static VALUE
|
58
|
-
static VALUE
|
44
|
+
static VALUE cIO_Buffer = Qnil;
|
45
|
+
|
46
|
+
static VALUE IO_Buffer_allocate(VALUE klass);
|
47
|
+
static void IO_Buffer_mark(struct buffer *);
|
48
|
+
static void IO_Buffer_free(struct buffer *);
|
49
|
+
|
50
|
+
static VALUE IO_Buffer_default_node_size(VALUE klass);
|
51
|
+
static VALUE IO_Buffer_set_default_node_size(VALUE klass, VALUE size);
|
52
|
+
static VALUE IO_Buffer_initialize(int argc, VALUE * argv, VALUE self);
|
53
|
+
static VALUE IO_Buffer_clear(VALUE self);
|
54
|
+
static VALUE IO_Buffer_size(VALUE self);
|
55
|
+
static VALUE IO_Buffer_empty(VALUE self);
|
56
|
+
static VALUE IO_Buffer_append(VALUE self, VALUE data);
|
57
|
+
static VALUE IO_Buffer_prepend(VALUE self, VALUE data);
|
58
|
+
static VALUE IO_Buffer_read(int argc, VALUE * argv, VALUE self);
|
59
|
+
static VALUE IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark);
|
60
|
+
static VALUE IO_Buffer_to_str(VALUE self);
|
61
|
+
static VALUE IO_Buffer_read_from(VALUE self, VALUE io);
|
62
|
+
static VALUE IO_Buffer_write_to(VALUE self, VALUE io);
|
59
63
|
|
60
64
|
static struct buffer *buffer_new(void);
|
61
|
-
static void
|
62
|
-
static void
|
63
|
-
static void
|
64
|
-
static void
|
65
|
-
static void
|
66
|
-
static void
|
67
|
-
static
|
68
|
-
static
|
69
|
-
static int
|
70
|
-
|
71
|
-
|
65
|
+
static void buffer_clear(struct buffer * buf);
|
66
|
+
static void buffer_free(struct buffer * buf);
|
67
|
+
static void buffer_free_pool(struct buffer * buf);
|
68
|
+
static void buffer_prepend(struct buffer * buf, char *str, unsigned len);
|
69
|
+
static void buffer_append(struct buffer * buf, char *str, unsigned len);
|
70
|
+
static void buffer_read(struct buffer * buf, char *str, unsigned len);
|
71
|
+
static int buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark);
|
72
|
+
static void buffer_copy(struct buffer * buf, char *str, unsigned len);
|
73
|
+
static int buffer_read_from(struct buffer * buf, int fd);
|
74
|
+
static int buffer_write_to(struct buffer * buf, int fd);
|
75
|
+
|
76
|
+
/*
|
72
77
|
* High-performance I/O buffer intended for use in non-blocking programs
|
73
78
|
*
|
74
|
-
* Data is stored in as a memory-pooled linked list of equally sized
|
75
|
-
*
|
76
|
-
*
|
79
|
+
* Data is stored in as a memory-pooled linked list of equally sized chunks.
|
80
|
+
* Routines are provided for high speed non-blocking reads and writes from
|
81
|
+
* Ruby IO objects.
|
77
82
|
*/
|
78
|
-
void
|
83
|
+
void
|
84
|
+
Init_iobuffer()
|
79
85
|
{
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
86
|
+
cIO_Buffer = rb_define_class_under(rb_cIO, "Buffer", rb_cObject);
|
87
|
+
rb_define_alloc_func(cIO_Buffer, IO_Buffer_allocate);
|
88
|
+
|
89
|
+
rb_define_singleton_method(cIO_Buffer, "default_node_size",
|
90
|
+
IO_Buffer_default_node_size, 0);
|
91
|
+
rb_define_singleton_method(cIO_Buffer, "default_node_size=",
|
92
|
+
IO_Buffer_set_default_node_size, 1);
|
93
|
+
|
94
|
+
rb_define_method(cIO_Buffer, "initialize", IO_Buffer_initialize, -1);
|
95
|
+
rb_define_method(cIO_Buffer, "clear", IO_Buffer_clear, 0);
|
96
|
+
rb_define_method(cIO_Buffer, "size", IO_Buffer_size, 0);
|
97
|
+
rb_define_method(cIO_Buffer, "empty?", IO_Buffer_empty, 0);
|
98
|
+
rb_define_method(cIO_Buffer, "<<", IO_Buffer_append, 1);
|
99
|
+
rb_define_method(cIO_Buffer, "append", IO_Buffer_append, 1);
|
100
|
+
rb_define_method(cIO_Buffer, "write", IO_Buffer_append, 1);
|
101
|
+
rb_define_method(cIO_Buffer, "prepend", IO_Buffer_prepend, 1);
|
102
|
+
rb_define_method(cIO_Buffer, "read", IO_Buffer_read, -1);
|
103
|
+
rb_define_method(cIO_Buffer, "read_frame", IO_Buffer_read_frame, 2);
|
97
104
|
rb_define_method(cIO_Buffer, "to_str", IO_Buffer_to_str, 0);
|
98
105
|
rb_define_method(cIO_Buffer, "read_from", IO_Buffer_read_from, 1);
|
99
|
-
|
106
|
+
rb_define_method(cIO_Buffer, "write_to", IO_Buffer_write_to, 1);
|
107
|
+
|
108
|
+
rb_define_const(cIO_Buffer, "MAX_SIZE", INT2NUM(MAX_BUFFER_SIZE));
|
100
109
|
}
|
101
110
|
|
102
|
-
static VALUE
|
111
|
+
static VALUE
|
112
|
+
IO_Buffer_allocate(VALUE klass)
|
103
113
|
{
|
104
|
-
|
114
|
+
return Data_Wrap_Struct(klass, IO_Buffer_mark, IO_Buffer_free, buffer_new());
|
105
115
|
}
|
106
116
|
|
107
|
-
static void
|
117
|
+
static void
|
118
|
+
IO_Buffer_mark(struct buffer * buf)
|
108
119
|
{
|
109
|
-
|
110
|
-
|
120
|
+
/* Naively discard the memory pool whenever Ruby garbage collects */
|
121
|
+
buffer_free_pool(buf);
|
111
122
|
}
|
112
123
|
|
113
|
-
static void
|
124
|
+
static void
|
125
|
+
IO_Buffer_free(struct buffer * buf)
|
114
126
|
{
|
115
|
-
|
127
|
+
buffer_free(buf);
|
116
128
|
}
|
117
129
|
|
118
130
|
/**
|
@@ -121,22 +133,26 @@ static void IO_Buffer_free(struct buffer *buf)
|
|
121
133
|
*
|
122
134
|
* Retrieves the current value of the default node size.
|
123
135
|
*/
|
124
|
-
static VALUE
|
136
|
+
static VALUE
|
137
|
+
IO_Buffer_default_node_size(VALUE klass)
|
125
138
|
{
|
126
|
-
|
139
|
+
return UINT2NUM(default_node_size);
|
127
140
|
}
|
128
141
|
|
129
142
|
/*
|
130
143
|
* safely converts node sizes from Ruby numerics to C and raising
|
131
144
|
* ArgumentError or RangeError on invalid sizes
|
132
145
|
*/
|
133
|
-
static unsigned
|
146
|
+
static unsigned
|
147
|
+
convert_node_size(VALUE size)
|
134
148
|
{
|
135
|
-
|
136
|
-
|
137
|
-
|
149
|
+
if (
|
150
|
+
rb_funcall(size, rb_intern("<"), 1, INT2NUM(1)) == Qtrue ||
|
151
|
+
rb_funcall(size, rb_intern(">"), 1, INT2NUM(MAX_BUFFER_SIZE)) == Qtrue
|
152
|
+
)
|
153
|
+
rb_raise(rb_eArgError, "invalid buffer size");
|
138
154
|
|
139
|
-
|
155
|
+
return (unsigned) NUM2INT(size);
|
140
156
|
}
|
141
157
|
|
142
158
|
/**
|
@@ -145,496 +161,604 @@ static unsigned convert_node_size(VALUE size)
|
|
145
161
|
*
|
146
162
|
* Sets the default node size for calling IO::Buffer.new with no arguments.
|
147
163
|
*/
|
148
|
-
static VALUE
|
164
|
+
static VALUE
|
165
|
+
IO_Buffer_set_default_node_size(VALUE klass, VALUE size)
|
149
166
|
{
|
150
|
-
|
167
|
+
default_node_size = convert_node_size(size);
|
151
168
|
|
152
|
-
|
169
|
+
return size;
|
153
170
|
}
|
154
171
|
|
155
172
|
/**
|
156
173
|
* call-seq:
|
157
174
|
* IO_Buffer.new(size = IO::Buffer.default_node_size) -> IO_Buffer
|
158
|
-
*
|
175
|
+
*
|
159
176
|
* Create a new IO_Buffer with linked segments of the given size
|
160
177
|
*/
|
161
|
-
static VALUE
|
178
|
+
static VALUE
|
179
|
+
IO_Buffer_initialize(int argc, VALUE * argv, VALUE self)
|
162
180
|
{
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
if(rb_scan_args(argc, argv, "01", &node_size_obj) == 1) {
|
167
|
-
Data_Get_Struct(self, struct buffer, buf);
|
181
|
+
VALUE node_size_obj;
|
182
|
+
struct buffer *buf;
|
168
183
|
|
169
|
-
|
170
|
-
|
171
|
-
assert(!buf->pool_head);
|
184
|
+
if (rb_scan_args(argc, argv, "01", &node_size_obj) == 1) {
|
185
|
+
Data_Get_Struct(self, struct buffer, buf);
|
172
186
|
|
173
|
-
|
174
|
-
|
187
|
+
/*
|
188
|
+
* Make sure we're not changing the buffer size after data
|
189
|
+
* has been allocated
|
190
|
+
*/
|
191
|
+
assert(!buf->head);
|
192
|
+
assert(!buf->pool_head);
|
175
193
|
|
176
|
-
|
194
|
+
buf->node_size = convert_node_size(node_size_obj);
|
195
|
+
}
|
196
|
+
return Qnil;
|
177
197
|
}
|
178
198
|
|
179
199
|
/**
|
180
200
|
* call-seq:
|
181
201
|
* IO_Buffer#clear -> nil
|
182
|
-
*
|
202
|
+
*
|
183
203
|
* Clear all data from the IO_Buffer
|
184
204
|
*/
|
185
|
-
static VALUE
|
205
|
+
static VALUE
|
206
|
+
IO_Buffer_clear(VALUE self)
|
186
207
|
{
|
187
|
-
|
188
|
-
|
208
|
+
struct buffer *buf;
|
209
|
+
Data_Get_Struct(self, struct buffer, buf);
|
189
210
|
|
190
|
-
|
211
|
+
buffer_clear(buf);
|
191
212
|
|
192
|
-
|
213
|
+
return Qnil;
|
193
214
|
}
|
194
215
|
|
195
216
|
/**
|
196
217
|
* call-seq:
|
197
218
|
* IO_Buffer#size -> Integer
|
198
|
-
*
|
219
|
+
*
|
199
220
|
* Return the size of the buffer in bytes
|
200
221
|
*/
|
201
|
-
static VALUE
|
222
|
+
static VALUE
|
223
|
+
IO_Buffer_size(VALUE self)
|
202
224
|
{
|
203
|
-
|
204
|
-
|
225
|
+
struct buffer *buf;
|
226
|
+
Data_Get_Struct(self, struct buffer, buf);
|
205
227
|
|
206
|
-
|
228
|
+
return INT2NUM(buf->size);
|
207
229
|
}
|
208
230
|
|
209
231
|
/**
|
210
232
|
* call-seq:
|
211
233
|
* IO_Buffer#empty? -> Boolean
|
212
|
-
*
|
234
|
+
*
|
213
235
|
* Is the buffer empty?
|
214
236
|
*/
|
215
|
-
static VALUE
|
237
|
+
static VALUE
|
238
|
+
IO_Buffer_empty(VALUE self)
|
216
239
|
{
|
217
|
-
|
218
|
-
|
240
|
+
struct buffer *buf;
|
241
|
+
Data_Get_Struct(self, struct buffer, buf);
|
219
242
|
|
220
|
-
|
243
|
+
return buf->size > 0 ? Qfalse : Qtrue;
|
221
244
|
}
|
222
245
|
|
223
246
|
/**
|
224
247
|
* call-seq:
|
225
248
|
* IO_Buffer#append(data) -> String
|
226
|
-
*
|
249
|
+
*
|
227
250
|
* Append the given data to the end of the buffer
|
228
251
|
*/
|
229
|
-
static VALUE
|
252
|
+
static VALUE
|
253
|
+
IO_Buffer_append(VALUE self, VALUE data)
|
230
254
|
{
|
231
|
-
|
232
|
-
|
255
|
+
struct buffer *buf;
|
256
|
+
Data_Get_Struct(self, struct buffer, buf);
|
233
257
|
|
234
|
-
|
235
|
-
|
236
|
-
|
258
|
+
/* Is this needed? Never seen anyone else do it... */
|
259
|
+
data = rb_convert_type(data, T_STRING, "String", "to_str");
|
260
|
+
buffer_append(buf, RSTRING_PTR(data), RSTRING_LEN(data));
|
237
261
|
|
238
|
-
|
262
|
+
return data;
|
239
263
|
}
|
240
264
|
|
241
265
|
/**
|
242
266
|
* call-seq:
|
243
267
|
* IO_Buffer#prepend(data) -> String
|
244
|
-
*
|
268
|
+
*
|
245
269
|
* Prepend the given data to the beginning of the buffer
|
246
270
|
*/
|
247
|
-
static VALUE
|
271
|
+
static VALUE
|
272
|
+
IO_Buffer_prepend(VALUE self, VALUE data)
|
248
273
|
{
|
249
|
-
|
250
|
-
|
274
|
+
struct buffer *buf;
|
275
|
+
Data_Get_Struct(self, struct buffer, buf);
|
251
276
|
|
252
|
-
|
253
|
-
|
277
|
+
data = rb_convert_type(data, T_STRING, "String", "to_str");
|
278
|
+
buffer_prepend(buf, RSTRING_PTR(data), RSTRING_LEN(data));
|
254
279
|
|
255
|
-
|
280
|
+
return data;
|
256
281
|
}
|
257
282
|
|
258
283
|
/**
|
259
284
|
* call-seq:
|
260
285
|
* IO_Buffer#read(length = nil) -> String
|
261
|
-
*
|
286
|
+
*
|
262
287
|
* Read the specified abount of data from the buffer. If no value
|
263
288
|
* is given the entire contents of the buffer are returned. Any data
|
264
289
|
* read from the buffer is cleared.
|
265
290
|
*/
|
266
|
-
static VALUE
|
291
|
+
static VALUE
|
292
|
+
IO_Buffer_read(int argc, VALUE * argv, VALUE self)
|
267
293
|
{
|
268
|
-
|
269
|
-
|
270
|
-
|
294
|
+
VALUE length_obj, str;
|
295
|
+
int length;
|
296
|
+
struct buffer *buf;
|
297
|
+
|
298
|
+
Data_Get_Struct(self, struct buffer, buf);
|
299
|
+
|
300
|
+
if (rb_scan_args(argc, argv, "01", &length_obj) == 1) {
|
301
|
+
length = NUM2INT(length_obj);
|
302
|
+
} else {
|
303
|
+
if (buf->size == 0)
|
304
|
+
return rb_str_new2("");
|
271
305
|
|
272
|
-
|
306
|
+
length = buf->size;
|
307
|
+
}
|
273
308
|
|
274
|
-
|
275
|
-
|
276
|
-
} else {
|
277
|
-
if(buf->size == 0)
|
278
|
-
return rb_str_new2("");
|
309
|
+
if (length > buf->size)
|
310
|
+
length = buf->size;
|
279
311
|
|
280
|
-
|
281
|
-
|
312
|
+
if (length < 1)
|
313
|
+
rb_raise(rb_eArgError, "length must be greater than zero");
|
282
314
|
|
283
|
-
|
284
|
-
|
315
|
+
str = rb_str_new(0, length);
|
316
|
+
buffer_read(buf, RSTRING_PTR(str), length);
|
285
317
|
|
286
|
-
|
287
|
-
|
318
|
+
return str;
|
319
|
+
}
|
320
|
+
|
321
|
+
/**
|
322
|
+
* call-seq:
|
323
|
+
* IO_Buffer#read_frame(str, mark) -> boolean
|
324
|
+
*
|
325
|
+
* Read up to and including the given frame marker (expressed a a
|
326
|
+
* Fixnum 0-255) byte, copying into the supplied string object. If the mark is
|
327
|
+
* not encountered before the end of the buffer, false is returned but data
|
328
|
+
* is still copied into str. True is returned if the end of a frame is reached.
|
329
|
+
*
|
330
|
+
*/
|
331
|
+
static VALUE
|
332
|
+
IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark)
|
333
|
+
{
|
334
|
+
char mark_c = (char) NUM2INT(mark);
|
335
|
+
struct buffer *buf;
|
288
336
|
|
289
|
-
|
290
|
-
buffer_read(buf, RSTRING_PTR(str), length);
|
337
|
+
Data_Get_Struct(self, struct buffer, buf);
|
291
338
|
|
292
|
-
|
339
|
+
if (buffer_read_frame(buf, data, mark_c)) {
|
340
|
+
return Qtrue;
|
341
|
+
} else {
|
342
|
+
return Qfalse;
|
343
|
+
}
|
293
344
|
}
|
294
345
|
|
295
346
|
/**
|
296
347
|
* call-seq:
|
297
348
|
* IO_Buffer#to_str -> String
|
298
|
-
*
|
349
|
+
*
|
299
350
|
* Convert the Buffer to a String. The original buffer is unmodified.
|
300
351
|
*/
|
301
|
-
static VALUE
|
302
|
-
|
352
|
+
static VALUE
|
353
|
+
IO_Buffer_to_str(VALUE self)
|
354
|
+
{
|
355
|
+
VALUE str;
|
303
356
|
struct buffer *buf;
|
304
|
-
|
357
|
+
|
305
358
|
Data_Get_Struct(self, struct buffer, buf);
|
306
|
-
|
359
|
+
|
307
360
|
str = rb_str_new(0, buf->size);
|
308
361
|
buffer_copy(buf, RSTRING_PTR(str), buf->size);
|
309
|
-
|
310
|
-
|
362
|
+
|
363
|
+
return str;
|
311
364
|
}
|
312
365
|
|
313
366
|
/**
|
314
367
|
* call-seq:
|
315
368
|
* IO_Buffer#read_from(io) -> Integer
|
316
|
-
*
|
369
|
+
*
|
317
370
|
* Perform a nonblocking read of the the given IO object and fill
|
318
371
|
* the buffer with any data received. The call will read as much
|
319
372
|
* data as it can until the read would block.
|
320
373
|
*/
|
321
|
-
static VALUE
|
322
|
-
|
374
|
+
static VALUE
|
375
|
+
IO_Buffer_read_from(VALUE self, VALUE io)
|
376
|
+
{
|
377
|
+
struct buffer *buf;
|
378
|
+
int ret;
|
323
379
|
#if HAVE_RB_IO_T
|
324
|
-
|
380
|
+
rb_io_t *fptr;
|
325
381
|
#else
|
326
|
-
|
382
|
+
OpenFile *fptr;
|
327
383
|
#endif
|
328
384
|
|
329
|
-
|
330
|
-
|
331
|
-
|
385
|
+
Data_Get_Struct(self, struct buffer, buf);
|
386
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
387
|
+
rb_io_set_nonblock(fptr);
|
332
388
|
|
333
|
-
|
389
|
+
ret = buffer_read_from(buf, FPTR_TO_FD(fptr));
|
390
|
+
return ret == -1 ? Qnil : INT2NUM(ret);
|
334
391
|
}
|
335
392
|
|
336
393
|
/**
|
337
394
|
* call-seq:
|
338
395
|
* IO_Buffer#write_to(io) -> Integer
|
339
|
-
*
|
396
|
+
*
|
340
397
|
* Perform a nonblocking write of the buffer to the given IO object.
|
341
398
|
* As much data as possible is written until the call would block.
|
342
399
|
* Any data which is written is removed from the buffer.
|
343
400
|
*/
|
344
|
-
static VALUE
|
345
|
-
|
346
|
-
|
347
|
-
|
401
|
+
static VALUE
|
402
|
+
IO_Buffer_write_to(VALUE self, VALUE io)
|
403
|
+
{
|
404
|
+
struct buffer *buf;
|
405
|
+
#if HAVE_RB_IO_T
|
406
|
+
rb_io_t *fptr;
|
348
407
|
#else
|
349
|
-
|
408
|
+
OpenFile *fptr;
|
350
409
|
#endif
|
351
410
|
|
352
|
-
|
353
|
-
|
354
|
-
|
411
|
+
Data_Get_Struct(self, struct buffer, buf);
|
412
|
+
GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
|
413
|
+
rb_io_set_nonblock(fptr);
|
355
414
|
|
356
|
-
|
415
|
+
return INT2NUM(buffer_write_to(buf, FPTR_TO_FD(fptr)));
|
357
416
|
}
|
358
417
|
|
359
418
|
/*
|
360
|
-
* Ruby bindings end here. Below is the actual implementation of
|
419
|
+
* Ruby bindings end here. Below is the actual implementation of
|
361
420
|
* the underlying byte queue ADT
|
362
421
|
*/
|
363
422
|
|
364
423
|
/* Create a new buffer */
|
365
|
-
static struct buffer *
|
424
|
+
static struct buffer *
|
425
|
+
buffer_new(void)
|
366
426
|
{
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
427
|
+
struct buffer *buf;
|
428
|
+
|
429
|
+
buf = (struct buffer *) xmalloc(sizeof(struct buffer));
|
430
|
+
buf->head = buf->tail = buf->pool_head = buf->pool_tail = 0;
|
431
|
+
buf->size = 0;
|
432
|
+
buf->node_size = default_node_size;
|
433
|
+
|
434
|
+
return buf;
|
375
435
|
}
|
376
436
|
|
377
437
|
/* Clear all data from a buffer */
|
378
|
-
static void
|
438
|
+
static void
|
439
|
+
buffer_clear(struct buffer * buf)
|
379
440
|
{
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
441
|
+
/* Move everything into the buffer pool */
|
442
|
+
if (!buf->pool_tail) {
|
443
|
+
buf->pool_head = buf->pool_tail = buf->head;
|
444
|
+
} else {
|
445
|
+
buf->pool_tail->next = buf->head;
|
446
|
+
}
|
447
|
+
|
448
|
+
buf->head = buf->tail = 0;
|
449
|
+
buf->size = 0;
|
388
450
|
}
|
389
451
|
|
390
452
|
/* Free a buffer */
|
391
|
-
static void
|
453
|
+
static void
|
454
|
+
buffer_free(struct buffer * buf)
|
392
455
|
{
|
393
|
-
|
394
|
-
|
456
|
+
buffer_clear(buf);
|
457
|
+
buffer_free_pool(buf);
|
395
458
|
|
396
|
-
|
459
|
+
free(buf);
|
397
460
|
}
|
398
461
|
|
399
462
|
/* Free the memory pool */
|
400
|
-
static void
|
463
|
+
static void
|
464
|
+
buffer_free_pool(struct buffer * buf)
|
401
465
|
{
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
466
|
+
struct buffer_node *tmp;
|
467
|
+
|
468
|
+
while (buf->pool_head) {
|
469
|
+
tmp = buf->pool_head;
|
470
|
+
buf->pool_head = tmp->next;
|
471
|
+
free(tmp);
|
472
|
+
}
|
473
|
+
|
410
474
|
buf->pool_tail = 0;
|
411
475
|
}
|
412
476
|
|
413
477
|
/* Create a new buffer_node (or pull one from the memory pool) */
|
414
|
-
static struct buffer_node *
|
478
|
+
static struct buffer_node *
|
479
|
+
buffer_node_new(struct buffer * buf)
|
415
480
|
{
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
481
|
+
struct buffer_node *node;
|
482
|
+
|
483
|
+
/* Pull from the memory pool if available */
|
484
|
+
if (buf->pool_head) {
|
485
|
+
node = buf->pool_head;
|
486
|
+
buf->pool_head = node->next;
|
487
|
+
|
488
|
+
if (node->next)
|
489
|
+
node->next = 0;
|
490
|
+
else
|
491
|
+
buf->pool_tail = 0;
|
492
|
+
} else {
|
493
|
+
node = (struct buffer_node *) xmalloc(sizeof(struct buffer_node) + buf->node_size);
|
494
|
+
node->next = 0;
|
495
|
+
}
|
496
|
+
|
497
|
+
node->start = node->end = 0;
|
498
|
+
return node;
|
434
499
|
}
|
435
500
|
|
436
501
|
/* Free a buffer node (i.e. return it to the memory pool) */
|
437
|
-
static void
|
502
|
+
static void
|
503
|
+
buffer_node_free(struct buffer * buf, struct buffer_node * node)
|
438
504
|
{
|
439
|
-
|
440
|
-
|
505
|
+
node->next = buf->pool_head;
|
506
|
+
buf->pool_head = node;
|
441
507
|
|
442
|
-
|
443
|
-
|
508
|
+
if (!buf->pool_tail) {
|
509
|
+
buf->pool_tail = node;
|
510
|
+
}
|
444
511
|
}
|
445
512
|
|
446
513
|
/* Prepend data to the front of the buffer */
|
447
|
-
static void
|
514
|
+
static void
|
515
|
+
buffer_prepend(struct buffer * buf, char *str, unsigned len)
|
448
516
|
{
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
517
|
+
struct buffer_node *node, *tmp;
|
518
|
+
buf->size += len;
|
519
|
+
|
520
|
+
/* If it fits in the beginning of the head */
|
521
|
+
if (buf->head && buf->head->start >= len) {
|
522
|
+
buf->head->start -= len;
|
523
|
+
memcpy(buf->head->data + buf->head->start, str, len);
|
524
|
+
} else {
|
525
|
+
node = buffer_node_new(buf);
|
526
|
+
node->next = buf->head;
|
527
|
+
buf->head = node;
|
528
|
+
if (!buf->tail)
|
529
|
+
buf->tail = node;
|
530
|
+
|
531
|
+
while (len > buf->node_size) {
|
532
|
+
memcpy(node->data, str, buf->node_size);
|
533
|
+
node->end = buf->node_size;
|
534
|
+
|
535
|
+
tmp = buffer_node_new(buf);
|
536
|
+
tmp->next = node->next;
|
537
|
+
node->next = tmp;
|
538
|
+
|
539
|
+
if (buf->tail == node)
|
540
|
+
buf->tail = tmp;
|
541
|
+
node = tmp;
|
542
|
+
|
543
|
+
str += buf->node_size;
|
544
|
+
len -= buf->node_size;
|
545
|
+
}
|
546
|
+
|
547
|
+
if (len > 0) {
|
548
|
+
memcpy(node->data, str, len);
|
549
|
+
node->end = len;
|
550
|
+
}
|
551
|
+
}
|
482
552
|
}
|
483
553
|
|
484
554
|
/* Append data to the front of the buffer */
|
485
|
-
static void
|
555
|
+
static void
|
556
|
+
buffer_append(struct buffer * buf, char *str, unsigned len)
|
486
557
|
{
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
}
|
558
|
+
unsigned nbytes;
|
559
|
+
buf->size += len;
|
560
|
+
|
561
|
+
/* If it fits in the remaining space in the tail */
|
562
|
+
if (buf->tail && len <= buf->node_size - buf->tail->end) {
|
563
|
+
memcpy(buf->tail->data + buf->tail->end, str, len);
|
564
|
+
buf->tail->end += len;
|
565
|
+
return;
|
566
|
+
}
|
567
|
+
/* Empty list needs initialized */
|
568
|
+
if (!buf->head) {
|
569
|
+
buf->head = buffer_node_new(buf);
|
570
|
+
buf->tail = buf->head;
|
571
|
+
}
|
572
|
+
/* Build links out of the data */
|
573
|
+
while (len > 0) {
|
574
|
+
nbytes = buf->node_size - buf->tail->end;
|
575
|
+
if (len < nbytes)
|
576
|
+
nbytes = len;
|
577
|
+
|
578
|
+
memcpy(buf->tail->data + buf->tail->end, str, nbytes);
|
579
|
+
str += nbytes;
|
580
|
+
len -= nbytes;
|
581
|
+
|
582
|
+
buf->tail->end += nbytes;
|
583
|
+
|
584
|
+
if (len > 0) {
|
585
|
+
buf->tail->next = buffer_node_new(buf);
|
586
|
+
buf->tail = buf->tail->next;
|
587
|
+
}
|
588
|
+
}
|
519
589
|
}
|
520
590
|
|
521
591
|
/* Read data from the buffer (and clear what we've read) */
|
522
|
-
static void
|
592
|
+
static void
|
593
|
+
buffer_read(struct buffer * buf, char *str, unsigned len)
|
594
|
+
{
|
595
|
+
unsigned nbytes;
|
596
|
+
struct buffer_node *tmp;
|
597
|
+
|
598
|
+
while (buf->size > 0 && len > 0) {
|
599
|
+
nbytes = buf->head->end - buf->head->start;
|
600
|
+
if (len < nbytes)
|
601
|
+
nbytes = len;
|
602
|
+
|
603
|
+
memcpy(str, buf->head->data + buf->head->start, nbytes);
|
604
|
+
str += nbytes;
|
605
|
+
len -= nbytes;
|
606
|
+
|
607
|
+
buf->head->start += nbytes;
|
608
|
+
buf->size -= nbytes;
|
609
|
+
|
610
|
+
if (buf->head->start == buf->head->end) {
|
611
|
+
tmp = buf->head;
|
612
|
+
buf->head = tmp->next;
|
613
|
+
buffer_node_free(buf, tmp);
|
614
|
+
|
615
|
+
if (!buf->head)
|
616
|
+
buf->tail = 0;
|
617
|
+
}
|
618
|
+
}
|
619
|
+
}
|
620
|
+
|
621
|
+
/*
|
622
|
+
* Read data from the buffer into str until byte frame_mark or empty. Bytes
|
623
|
+
* are copied into str and removed if a complete frame is read, a true value
|
624
|
+
* is returned
|
625
|
+
*/
|
626
|
+
static int
|
627
|
+
buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark)
|
523
628
|
{
|
524
|
-
|
525
|
-
|
629
|
+
unsigned nbytes = 0;
|
630
|
+
struct buffer_node *tmp;
|
631
|
+
|
632
|
+
while (buf->size > 0) {
|
633
|
+
struct buffer_node *head = buf->head;
|
634
|
+
char *loc, *s = head->data + head->start, *e = head->data + head->end;
|
635
|
+
nbytes = e - s;
|
636
|
+
|
637
|
+
loc = memchr(s, frame_mark, nbytes);
|
638
|
+
|
639
|
+
if (loc) {
|
640
|
+
nbytes = loc - s + 1;
|
641
|
+
}
|
642
|
+
|
643
|
+
/* Copy less than everything if we found a frame byte */
|
644
|
+
rb_str_cat(str, s, nbytes);
|
526
645
|
|
527
|
-
|
528
|
-
|
529
|
-
|
646
|
+
/* Fixup the buffer pointers to indicate the bytes were consumed */
|
647
|
+
head->start += nbytes;
|
648
|
+
buf->size -= nbytes;
|
530
649
|
|
531
|
-
|
532
|
-
|
533
|
-
|
650
|
+
if (head->start == head->end) {
|
651
|
+
buf->head = head->next;
|
652
|
+
buffer_node_free(buf, head);
|
534
653
|
|
535
|
-
|
536
|
-
|
654
|
+
if (!buf->head)
|
655
|
+
buf->tail = 0;
|
656
|
+
}
|
537
657
|
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
658
|
+
if (loc) {
|
659
|
+
return 1;
|
660
|
+
}
|
661
|
+
}
|
542
662
|
|
543
|
-
|
544
|
-
}
|
545
|
-
}
|
663
|
+
return 0;
|
546
664
|
}
|
547
665
|
|
548
666
|
/* Copy data from the buffer without clearing it */
|
549
|
-
static void
|
667
|
+
static void
|
668
|
+
buffer_copy(struct buffer * buf, char *str, unsigned len)
|
550
669
|
{
|
551
|
-
|
552
|
-
|
670
|
+
unsigned nbytes;
|
671
|
+
struct buffer_node *node;
|
553
672
|
|
554
673
|
node = buf->head;
|
555
|
-
|
556
|
-
|
557
|
-
|
674
|
+
while (node && len > 0) {
|
675
|
+
nbytes = node->end - node->start;
|
676
|
+
if (len < nbytes)
|
677
|
+
nbytes = len;
|
558
678
|
|
559
|
-
|
560
|
-
|
561
|
-
|
679
|
+
memcpy(str, node->data + node->start, nbytes);
|
680
|
+
str += nbytes;
|
681
|
+
len -= nbytes;
|
562
682
|
|
563
|
-
|
683
|
+
if (node->start + nbytes == node->end)
|
564
684
|
node = node->next;
|
565
|
-
|
685
|
+
}
|
566
686
|
}
|
567
687
|
|
568
688
|
/* Write data from the buffer to a file descriptor */
|
569
|
-
static int
|
689
|
+
static int
|
690
|
+
buffer_write_to(struct buffer * buf, int fd)
|
570
691
|
{
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
while(buf->head) {
|
575
|
-
bytes_written = write(fd, buf->head->data + buf->head->start, buf->head->end - buf->head->start);
|
692
|
+
int bytes_written, total_bytes_written = 0;
|
693
|
+
struct buffer_node *tmp;
|
576
694
|
|
577
|
-
|
578
|
-
|
579
|
-
if(errno != EAGAIN)
|
580
|
-
rb_sys_fail("write");
|
695
|
+
while (buf->head) {
|
696
|
+
bytes_written = write(fd, buf->head->data + buf->head->start, buf->head->end - buf->head->start);
|
581
697
|
|
582
|
-
|
583
|
-
|
698
|
+
/* If the write failed... */
|
699
|
+
if (bytes_written < 0) {
|
700
|
+
if (errno != EAGAIN)
|
701
|
+
rb_sys_fail("write");
|
584
702
|
|
585
|
-
|
586
|
-
|
703
|
+
return total_bytes_written;
|
704
|
+
}
|
587
705
|
|
588
|
-
|
589
|
-
|
590
|
-
buf->head->start += bytes_written;
|
591
|
-
return total_bytes_written;
|
592
|
-
}
|
706
|
+
total_bytes_written += bytes_written;
|
707
|
+
buf->size -= bytes_written;
|
593
708
|
|
594
|
-
|
595
|
-
|
596
|
-
|
597
|
-
|
709
|
+
/* If the write blocked... */
|
710
|
+
if (bytes_written < buf->head->end - buf->head->start) {
|
711
|
+
buf->head->start += bytes_written;
|
712
|
+
return total_bytes_written;
|
713
|
+
}
|
714
|
+
/* Otherwise we wrote the whole buffer */
|
715
|
+
tmp = buf->head;
|
716
|
+
buf->head = tmp->next;
|
717
|
+
buffer_node_free(buf, tmp);
|
598
718
|
|
599
|
-
|
600
|
-
|
719
|
+
if (!buf->head)
|
720
|
+
buf->tail = 0;
|
721
|
+
}
|
601
722
|
|
602
|
-
|
723
|
+
return total_bytes_written;
|
603
724
|
}
|
604
725
|
|
605
726
|
/* Read data from a file descriptor to a buffer */
|
606
727
|
/* Append data to the front of the buffer */
|
607
|
-
static int
|
728
|
+
static int
|
729
|
+
buffer_read_from(struct buffer * buf, int fd)
|
608
730
|
{
|
609
|
-
int
|
610
|
-
|
731
|
+
int bytes_read, total_bytes_read = 0;
|
732
|
+
unsigned nbytes;
|
611
733
|
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
734
|
+
/* Empty list needs initialized */
|
735
|
+
if (!buf->head) {
|
736
|
+
buf->head = buffer_node_new(buf);
|
737
|
+
buf->tail = buf->head;
|
738
|
+
}
|
617
739
|
|
618
740
|
do {
|
619
|
-
|
741
|
+
nbytes = buf->node_size - buf->tail->end;
|
620
742
|
bytes_read = read(fd, buf->tail->data + buf->tail->end, nbytes);
|
621
|
-
|
622
|
-
if(bytes_read
|
623
|
-
|
624
|
-
|
625
|
-
|
743
|
+
|
744
|
+
if (bytes_read == 0) {
|
745
|
+
return -1;
|
746
|
+
//When the file reaches EOF
|
747
|
+
} else if (bytes_read < 0) {
|
748
|
+
if (errno != EAGAIN)
|
749
|
+
rb_sys_fail("read");
|
750
|
+
|
626
751
|
return total_bytes_read;
|
627
752
|
}
|
628
|
-
|
629
|
-
total_bytes_read += bytes_read;
|
753
|
+
total_bytes_read += bytes_read;
|
630
754
|
buf->tail->end += nbytes;
|
631
755
|
buf->size += nbytes;
|
632
|
-
|
633
|
-
if(buf->tail->end == buf->node_size) {
|
634
|
-
|
635
|
-
|
756
|
+
|
757
|
+
if (buf->tail->end == buf->node_size) {
|
758
|
+
buf->tail->next = buffer_node_new(buf);
|
759
|
+
buf->tail = buf->tail->next;
|
636
760
|
}
|
637
|
-
} while(bytes_read == nbytes);
|
638
|
-
|
761
|
+
} while (bytes_read == nbytes);
|
762
|
+
|
639
763
|
return total_bytes_read;
|
640
764
|
}
|