cool.io 1.4.1-x64-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (76) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +29 -0
  3. data/.rspec +3 -0
  4. data/.travis.yml +13 -0
  5. data/CHANGES.md +229 -0
  6. data/Gemfile +4 -0
  7. data/LICENSE +20 -0
  8. data/README.md +166 -0
  9. data/Rakefile +79 -0
  10. data/cool.io.gemspec +29 -0
  11. data/examples/callbacked_echo_server.rb +24 -0
  12. data/examples/dslified_echo_client.rb +34 -0
  13. data/examples/dslified_echo_server.rb +24 -0
  14. data/examples/echo_client.rb +38 -0
  15. data/examples/echo_server.rb +27 -0
  16. data/examples/google.rb +9 -0
  17. data/ext/cool.io/.gitignore +5 -0
  18. data/ext/cool.io/cool.io.h +59 -0
  19. data/ext/cool.io/cool.io_ext.c +25 -0
  20. data/ext/cool.io/ev_wrap.h +10 -0
  21. data/ext/cool.io/extconf.rb +61 -0
  22. data/ext/cool.io/iowatcher.c +189 -0
  23. data/ext/cool.io/libev.c +8 -0
  24. data/ext/cool.io/loop.c +261 -0
  25. data/ext/cool.io/stat_watcher.c +269 -0
  26. data/ext/cool.io/timer_watcher.c +219 -0
  27. data/ext/cool.io/utils.c +122 -0
  28. data/ext/cool.io/watcher.c +264 -0
  29. data/ext/cool.io/watcher.h +71 -0
  30. data/ext/iobuffer/extconf.rb +9 -0
  31. data/ext/iobuffer/iobuffer.c +767 -0
  32. data/ext/libev/Changes +507 -0
  33. data/ext/libev/LICENSE +37 -0
  34. data/ext/libev/README +58 -0
  35. data/ext/libev/README.embed +3 -0
  36. data/ext/libev/ev.c +5054 -0
  37. data/ext/libev/ev.h +853 -0
  38. data/ext/libev/ev_epoll.c +282 -0
  39. data/ext/libev/ev_kqueue.c +214 -0
  40. data/ext/libev/ev_poll.c +148 -0
  41. data/ext/libev/ev_port.c +185 -0
  42. data/ext/libev/ev_select.c +362 -0
  43. data/ext/libev/ev_vars.h +204 -0
  44. data/ext/libev/ev_win32.c +163 -0
  45. data/ext/libev/ev_wrap.h +200 -0
  46. data/ext/libev/ruby_gil.patch +97 -0
  47. data/ext/libev/test_libev_win32.c +123 -0
  48. data/ext/libev/win_select.patch +115 -0
  49. data/lib/.gitignore +2 -0
  50. data/lib/cool.io.rb +34 -0
  51. data/lib/cool.io/async_watcher.rb +43 -0
  52. data/lib/cool.io/custom_require.rb +9 -0
  53. data/lib/cool.io/dns_resolver.rb +219 -0
  54. data/lib/cool.io/dsl.rb +139 -0
  55. data/lib/cool.io/io.rb +194 -0
  56. data/lib/cool.io/iowatcher.rb +17 -0
  57. data/lib/cool.io/listener.rb +99 -0
  58. data/lib/cool.io/loop.rb +122 -0
  59. data/lib/cool.io/meta.rb +49 -0
  60. data/lib/cool.io/server.rb +75 -0
  61. data/lib/cool.io/socket.rb +230 -0
  62. data/lib/cool.io/timer_watcher.rb +17 -0
  63. data/lib/cool.io/version.rb +7 -0
  64. data/lib/coolio.rb +2 -0
  65. data/spec/async_watcher_spec.rb +57 -0
  66. data/spec/dns_spec.rb +43 -0
  67. data/spec/iobuffer_spec.rb +147 -0
  68. data/spec/spec_helper.rb +19 -0
  69. data/spec/stat_watcher_spec.rb +77 -0
  70. data/spec/tcp_server_spec.rb +225 -0
  71. data/spec/tcp_socket_spec.rb +185 -0
  72. data/spec/timer_watcher_spec.rb +59 -0
  73. data/spec/udp_socket_spec.rb +58 -0
  74. data/spec/unix_listener_spec.rb +25 -0
  75. data/spec/unix_server_spec.rb +27 -0
  76. metadata +182 -0
@@ -0,0 +1,71 @@
1
+ /*
2
+ * Copyright (C) 2007-10 Tony Arcieri
3
+ * You may redistribute this under the terms of the Ruby license.
4
+ * See LICENSE for details
5
+ */
6
+
7
+ #ifndef WATCHER_H
8
+ #define WATCHER_H
9
+
10
+ #define Watcher_Attach(watcher_type, detach_func, watcher, loop) \
11
+ struct Coolio_Watcher *watcher_data; \
12
+ struct Coolio_Loop *loop_data; \
13
+ \
14
+ if(!rb_obj_is_kind_of(loop, cCoolio_Loop)) \
15
+ rb_raise(rb_eArgError, "expected loop to be an instance of Coolio::Loop"); \
16
+ \
17
+ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \
18
+ Data_Get_Struct(loop, struct Coolio_Loop, loop_data); \
19
+ \
20
+ if(watcher_data->loop != Qnil) \
21
+ detach_func(watcher); \
22
+ \
23
+ watcher_data->loop = loop; \
24
+ ev_##watcher_type##_start(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type); \
25
+ rb_call_super(1, &loop)
26
+
27
+ #define Watcher_Detach(watcher_type, watcher) \
28
+ struct Coolio_Watcher *watcher_data; \
29
+ struct Coolio_Loop *loop_data; \
30
+ \
31
+ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \
32
+ \
33
+ if(watcher_data->loop == Qnil) \
34
+ rb_raise(rb_eRuntimeError, "not attached to a loop"); \
35
+ \
36
+ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \
37
+ \
38
+ ev_##watcher_type##_stop(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type); \
39
+ rb_call_super(0, 0)
40
+
41
+ #define Watcher_Enable(watcher_type, watcher) \
42
+ struct Coolio_Watcher *watcher_data; \
43
+ struct Coolio_Loop *loop_data; \
44
+ \
45
+ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \
46
+ \
47
+ if(watcher_data->loop == Qnil) \
48
+ rb_raise(rb_eRuntimeError, "not attached to a loop"); \
49
+ \
50
+ rb_call_super(0, 0); \
51
+ \
52
+ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \
53
+ \
54
+ ev_##watcher_type##_start(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type)
55
+
56
+ #define Watcher_Disable(watcher_type, watcher) \
57
+ struct Coolio_Watcher *watcher_data; \
58
+ struct Coolio_Loop *loop_data; \
59
+ \
60
+ Data_Get_Struct(watcher, struct Coolio_Watcher, watcher_data); \
61
+ \
62
+ if(watcher_data->loop == Qnil) \
63
+ rb_raise(rb_eRuntimeError, "not attached to a loop"); \
64
+ \
65
+ rb_call_super(0, 0); \
66
+ \
67
+ Data_Get_Struct(watcher_data->loop, struct Coolio_Loop, loop_data); \
68
+ \
69
+ ev_##watcher_type##_stop(loop_data->ev_loop, &watcher_data->event_types.ev_##watcher_type)
70
+
71
+ #endif
@@ -0,0 +1,9 @@
1
+ require 'mkmf'
2
+
3
+ dir_config("iobuffer")
4
+ have_library("c", "main")
5
+ if have_macro("HAVE_RB_IO_T", "rubyio.h")
6
+ have_struct_member("rb_io_t", "fd", "rubyio.h")
7
+ end
8
+
9
+ create_makefile("iobuffer_ext")
@@ -0,0 +1,767 @@
1
+ /*
2
+ * Copyright (C) 2007-12 Tony Arcieri
3
+ * You may redistribute this under the terms of the MIT license.
4
+ * See LICENSE for details
5
+ */
6
+
7
+ #include "ruby.h"
8
+ #include "ruby/io.h"
9
+
10
+ #include <assert.h>
11
+
12
+ #include <string.h>
13
+ #include <time.h>
14
+ #ifndef _MSC_VER
15
+ #include <unistd.h>
16
+ #endif
17
+ #include <errno.h>
18
+
19
+ /* 1 GiB maximum buffer size */
20
+ #define MAX_BUFFER_SIZE 0x40000000
21
+
22
+ /* Macro for retrieving the file descriptor from an FPTR */
23
+ #if !HAVE_RB_IO_T_FD
24
+ #define FPTR_TO_FD(fptr) fileno(fptr->f)
25
+ #else
26
+ #define FPTR_TO_FD(fptr) fptr->fd
27
+ #endif
28
+
29
+ /* Default number of bytes in each node's buffer. Should be >= MTU */
30
+ #define DEFAULT_NODE_SIZE 16384
31
+ static unsigned default_node_size = DEFAULT_NODE_SIZE;
32
+
33
+ struct buffer {
34
+ unsigned size, node_size;
35
+ struct buffer_node *head, *tail;
36
+ struct buffer_node *pool_head, *pool_tail;
37
+
38
+ };
39
+
40
+ struct buffer_node {
41
+ unsigned start, end;
42
+ struct buffer_node *next;
43
+ unsigned char data[0];
44
+ };
45
+
46
+ static VALUE cIO_Buffer = Qnil;
47
+
48
+ static VALUE IO_Buffer_allocate(VALUE klass);
49
+ static void IO_Buffer_mark(struct buffer *);
50
+ static void IO_Buffer_free(struct buffer *);
51
+
52
+ static VALUE IO_Buffer_default_node_size(VALUE klass);
53
+ static VALUE IO_Buffer_set_default_node_size(VALUE klass, VALUE size);
54
+ static VALUE IO_Buffer_initialize(int argc, VALUE * argv, VALUE self);
55
+ static VALUE IO_Buffer_clear(VALUE self);
56
+ static VALUE IO_Buffer_size(VALUE self);
57
+ static VALUE IO_Buffer_empty(VALUE self);
58
+ static VALUE IO_Buffer_append(VALUE self, VALUE data);
59
+ static VALUE IO_Buffer_prepend(VALUE self, VALUE data);
60
+ static VALUE IO_Buffer_read(int argc, VALUE * argv, VALUE self);
61
+ static VALUE IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark);
62
+ static VALUE IO_Buffer_to_str(VALUE self);
63
+ static VALUE IO_Buffer_read_from(VALUE self, VALUE io);
64
+ static VALUE IO_Buffer_write_to(VALUE self, VALUE io);
65
+
66
+ static struct buffer *buffer_new(void);
67
+ static void buffer_clear(struct buffer * buf);
68
+ static void buffer_free(struct buffer * buf);
69
+ static void buffer_free_pool(struct buffer * buf);
70
+ static void buffer_prepend(struct buffer * buf, char *str, unsigned len);
71
+ static void buffer_append(struct buffer * buf, char *str, unsigned len);
72
+ static void buffer_read(struct buffer * buf, char *str, unsigned len);
73
+ static int buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark);
74
+ static void buffer_copy(struct buffer * buf, char *str, unsigned len);
75
+ static int buffer_read_from(struct buffer * buf, int fd);
76
+ static int buffer_write_to(struct buffer * buf, int fd);
77
+
78
+ /*
79
+ * High-performance I/O buffer intended for use in non-blocking programs
80
+ *
81
+ * Data is stored in as a memory-pooled linked list of equally sized chunks.
82
+ * Routines are provided for high speed non-blocking reads and writes from
83
+ * Ruby IO objects.
84
+ */
85
+ void
86
+ Init_iobuffer_ext()
87
+ {
88
+ cIO_Buffer = rb_define_class_under(rb_cIO, "Buffer", rb_cObject);
89
+ rb_define_alloc_func(cIO_Buffer, IO_Buffer_allocate);
90
+
91
+ rb_define_singleton_method(cIO_Buffer, "default_node_size",
92
+ IO_Buffer_default_node_size, 0);
93
+ rb_define_singleton_method(cIO_Buffer, "default_node_size=",
94
+ IO_Buffer_set_default_node_size, 1);
95
+
96
+ rb_define_method(cIO_Buffer, "initialize", IO_Buffer_initialize, -1);
97
+ rb_define_method(cIO_Buffer, "clear", IO_Buffer_clear, 0);
98
+ rb_define_method(cIO_Buffer, "size", IO_Buffer_size, 0);
99
+ rb_define_method(cIO_Buffer, "empty?", IO_Buffer_empty, 0);
100
+ rb_define_method(cIO_Buffer, "<<", IO_Buffer_append, 1);
101
+ rb_define_method(cIO_Buffer, "append", IO_Buffer_append, 1);
102
+ rb_define_method(cIO_Buffer, "write", IO_Buffer_append, 1);
103
+ rb_define_method(cIO_Buffer, "prepend", IO_Buffer_prepend, 1);
104
+ rb_define_method(cIO_Buffer, "read", IO_Buffer_read, -1);
105
+ rb_define_method(cIO_Buffer, "read_frame", IO_Buffer_read_frame, 2);
106
+ rb_define_method(cIO_Buffer, "to_str", IO_Buffer_to_str, 0);
107
+ rb_define_method(cIO_Buffer, "read_from", IO_Buffer_read_from, 1);
108
+ rb_define_method(cIO_Buffer, "write_to", IO_Buffer_write_to, 1);
109
+
110
+ rb_define_const(cIO_Buffer, "MAX_SIZE", INT2NUM(MAX_BUFFER_SIZE));
111
+ }
112
+
113
+ static VALUE
114
+ IO_Buffer_allocate(VALUE klass)
115
+ {
116
+ return Data_Wrap_Struct(klass, IO_Buffer_mark, IO_Buffer_free, buffer_new());
117
+ }
118
+
119
+ static void
120
+ IO_Buffer_mark(struct buffer * buf)
121
+ {
122
+ /* Naively discard the memory pool whenever Ruby garbage collects */
123
+ buffer_free_pool(buf);
124
+ }
125
+
126
+ static void
127
+ IO_Buffer_free(struct buffer * buf)
128
+ {
129
+ buffer_free(buf);
130
+ }
131
+
132
+ /**
133
+ * call-seq:
134
+ * IO_Buffer.default_node_size -> 4096
135
+ *
136
+ * Retrieves the current value of the default node size.
137
+ */
138
+ static VALUE
139
+ IO_Buffer_default_node_size(VALUE klass)
140
+ {
141
+ return UINT2NUM(default_node_size);
142
+ }
143
+
144
+ /*
145
+ * safely converts node sizes from Ruby numerics to C and raising
146
+ * ArgumentError or RangeError on invalid sizes
147
+ */
148
+ static unsigned
149
+ convert_node_size(VALUE size)
150
+ {
151
+ if (
152
+ rb_funcall(size, rb_intern("<"), 1, INT2NUM(1)) == Qtrue ||
153
+ rb_funcall(size, rb_intern(">"), 1, INT2NUM(MAX_BUFFER_SIZE)) == Qtrue
154
+ )
155
+ rb_raise(rb_eArgError, "invalid buffer size");
156
+
157
+ return (unsigned) NUM2INT(size);
158
+ }
159
+
160
+ /**
161
+ * call-seq:
162
+ * IO_Buffer.default_node_size = 16384
163
+ *
164
+ * Sets the default node size for calling IO::Buffer.new with no arguments.
165
+ */
166
+ static VALUE
167
+ IO_Buffer_set_default_node_size(VALUE klass, VALUE size)
168
+ {
169
+ default_node_size = convert_node_size(size);
170
+
171
+ return size;
172
+ }
173
+
174
+ /**
175
+ * call-seq:
176
+ * IO_Buffer.new(size = IO::Buffer.default_node_size) -> IO_Buffer
177
+ *
178
+ * Create a new IO_Buffer with linked segments of the given size
179
+ */
180
+ static VALUE
181
+ IO_Buffer_initialize(int argc, VALUE * argv, VALUE self)
182
+ {
183
+ VALUE node_size_obj;
184
+ struct buffer *buf;
185
+
186
+ if (rb_scan_args(argc, argv, "01", &node_size_obj) == 1) {
187
+ Data_Get_Struct(self, struct buffer, buf);
188
+
189
+ /*
190
+ * Make sure we're not changing the buffer size after data
191
+ * has been allocated
192
+ */
193
+ assert(!buf->head);
194
+ assert(!buf->pool_head);
195
+
196
+ buf->node_size = convert_node_size(node_size_obj);
197
+ }
198
+ return Qnil;
199
+ }
200
+
201
+ /**
202
+ * call-seq:
203
+ * IO_Buffer#clear -> nil
204
+ *
205
+ * Clear all data from the IO_Buffer
206
+ */
207
+ static VALUE
208
+ IO_Buffer_clear(VALUE self)
209
+ {
210
+ struct buffer *buf;
211
+ Data_Get_Struct(self, struct buffer, buf);
212
+
213
+ buffer_clear(buf);
214
+
215
+ return Qnil;
216
+ }
217
+
218
+ /**
219
+ * call-seq:
220
+ * IO_Buffer#size -> Integer
221
+ *
222
+ * Return the size of the buffer in bytes
223
+ */
224
+ static VALUE
225
+ IO_Buffer_size(VALUE self)
226
+ {
227
+ struct buffer *buf;
228
+ Data_Get_Struct(self, struct buffer, buf);
229
+
230
+ return INT2NUM(buf->size);
231
+ }
232
+
233
+ /**
234
+ * call-seq:
235
+ * IO_Buffer#empty? -> Boolean
236
+ *
237
+ * Is the buffer empty?
238
+ */
239
+ static VALUE
240
+ IO_Buffer_empty(VALUE self)
241
+ {
242
+ struct buffer *buf;
243
+ Data_Get_Struct(self, struct buffer, buf);
244
+
245
+ return buf->size > 0 ? Qfalse : Qtrue;
246
+ }
247
+
248
+ /**
249
+ * call-seq:
250
+ * IO_Buffer#append(data) -> String
251
+ *
252
+ * Append the given data to the end of the buffer
253
+ */
254
+ static VALUE
255
+ IO_Buffer_append(VALUE self, VALUE data)
256
+ {
257
+ struct buffer *buf;
258
+ Data_Get_Struct(self, struct buffer, buf);
259
+
260
+ /* Is this needed? Never seen anyone else do it... */
261
+ data = rb_convert_type(data, T_STRING, "String", "to_str");
262
+ buffer_append(buf, RSTRING_PTR(data), RSTRING_LEN(data));
263
+
264
+ return data;
265
+ }
266
+
267
+ /**
268
+ * call-seq:
269
+ * IO_Buffer#prepend(data) -> String
270
+ *
271
+ * Prepend the given data to the beginning of the buffer
272
+ */
273
+ static VALUE
274
+ IO_Buffer_prepend(VALUE self, VALUE data)
275
+ {
276
+ struct buffer *buf;
277
+ Data_Get_Struct(self, struct buffer, buf);
278
+
279
+ data = rb_convert_type(data, T_STRING, "String", "to_str");
280
+ buffer_prepend(buf, RSTRING_PTR(data), RSTRING_LEN(data));
281
+
282
+ return data;
283
+ }
284
+
285
+ /**
286
+ * call-seq:
287
+ * IO_Buffer#read(length = nil) -> String
288
+ *
289
+ * Read the specified abount of data from the buffer. If no value
290
+ * is given the entire contents of the buffer are returned. Any data
291
+ * read from the buffer is cleared.
292
+ * The given length must be greater than 0 or an exception would raise.
293
+ * If the buffer size is zero then an empty string is returned (regardless
294
+ * the given length).
295
+ */
296
+ static VALUE
297
+ IO_Buffer_read(int argc, VALUE * argv, VALUE self)
298
+ {
299
+ VALUE length_obj, str;
300
+ int length;
301
+ struct buffer *buf;
302
+
303
+ Data_Get_Struct(self, struct buffer, buf);
304
+
305
+ if (rb_scan_args(argc, argv, "01", &length_obj) == 1) {
306
+ length = NUM2INT(length_obj);
307
+ if(length < 1)
308
+ rb_raise(rb_eArgError, "length must be greater than zero");
309
+ if(length > buf->size)
310
+ length = buf->size;
311
+ } else
312
+ length = buf->size;
313
+
314
+ if(buf->size == 0)
315
+ return rb_str_new2("");
316
+
317
+ str = rb_str_new(0, length);
318
+ buffer_read(buf, RSTRING_PTR(str), length);
319
+
320
+ return str;
321
+ }
322
+
323
+ /**
324
+ * call-seq:
325
+ * IO_Buffer#read_frame(str, mark) -> boolean
326
+ *
327
+ * Read up to and including the given frame marker (expressed a a
328
+ * Fixnum 0-255) byte, copying into the supplied string object. If the mark is
329
+ * not encountered before the end of the buffer, false is returned but data
330
+ * is still copied into str. True is returned if the end of a frame is reached.
331
+ *
332
+ */
333
+ static VALUE
334
+ IO_Buffer_read_frame(VALUE self, VALUE data, VALUE mark)
335
+ {
336
+ char mark_c = (char) NUM2INT(mark);
337
+ struct buffer *buf;
338
+
339
+ Data_Get_Struct(self, struct buffer, buf);
340
+
341
+ if (buffer_read_frame(buf, data, mark_c)) {
342
+ return Qtrue;
343
+ } else {
344
+ return Qfalse;
345
+ }
346
+ }
347
+
348
+ /**
349
+ * call-seq:
350
+ * IO_Buffer#to_str -> String
351
+ *
352
+ * Convert the Buffer to a String. The original buffer is unmodified.
353
+ */
354
+ static VALUE
355
+ IO_Buffer_to_str(VALUE self)
356
+ {
357
+ VALUE str;
358
+ struct buffer *buf;
359
+
360
+ Data_Get_Struct(self, struct buffer, buf);
361
+
362
+ str = rb_str_new(0, buf->size);
363
+ buffer_copy(buf, RSTRING_PTR(str), buf->size);
364
+
365
+ return str;
366
+ }
367
+
368
+ /**
369
+ * call-seq:
370
+ * IO_Buffer#read_from(io) -> Integer
371
+ *
372
+ * Perform a nonblocking read of the the given IO object and fill
373
+ * the buffer with any data received. The call will read as much
374
+ * data as it can until the read would block.
375
+ */
376
+ static VALUE
377
+ IO_Buffer_read_from(VALUE self, VALUE io)
378
+ {
379
+ struct buffer *buf;
380
+ int ret;
381
+ #if HAVE_RB_IO_T
382
+ rb_io_t *fptr;
383
+ #else
384
+ OpenFile *fptr;
385
+ #endif
386
+
387
+ Data_Get_Struct(self, struct buffer, buf);
388
+ GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
389
+ rb_io_set_nonblock(fptr);
390
+
391
+ ret = buffer_read_from(buf, FPTR_TO_FD(fptr));
392
+ return ret == -1 ? Qnil : INT2NUM(ret);
393
+ }
394
+
395
+ /**
396
+ * call-seq:
397
+ * IO_Buffer#write_to(io) -> Integer
398
+ *
399
+ * Perform a nonblocking write of the buffer to the given IO object.
400
+ * As much data as possible is written until the call would block.
401
+ * Any data which is written is removed from the buffer.
402
+ */
403
+ static VALUE
404
+ IO_Buffer_write_to(VALUE self, VALUE io)
405
+ {
406
+ struct buffer *buf;
407
+ #if HAVE_RB_IO_T
408
+ rb_io_t *fptr;
409
+ #else
410
+ OpenFile *fptr;
411
+ #endif
412
+
413
+ Data_Get_Struct(self, struct buffer, buf);
414
+ GetOpenFile(rb_convert_type(io, T_FILE, "IO", "to_io"), fptr);
415
+ rb_io_set_nonblock(fptr);
416
+
417
+ return INT2NUM(buffer_write_to(buf, FPTR_TO_FD(fptr)));
418
+ }
419
+
420
+ /*
421
+ * Ruby bindings end here. Below is the actual implementation of
422
+ * the underlying byte queue ADT
423
+ */
424
+
425
+ /* Create a new buffer */
426
+ static struct buffer *
427
+ buffer_new(void)
428
+ {
429
+ struct buffer *buf;
430
+
431
+ buf = (struct buffer *) xmalloc(sizeof(struct buffer));
432
+ buf->head = buf->tail = buf->pool_head = buf->pool_tail = 0;
433
+ buf->size = 0;
434
+ buf->node_size = default_node_size;
435
+
436
+ return buf;
437
+ }
438
+
439
+ /* Clear all data from a buffer */
440
+ static void
441
+ buffer_clear(struct buffer * buf)
442
+ {
443
+ /* Move everything into the buffer pool */
444
+ if (!buf->pool_tail) {
445
+ buf->pool_head = buf->pool_tail = buf->head;
446
+ } else {
447
+ buf->pool_tail->next = buf->head;
448
+ }
449
+
450
+ buf->head = buf->tail = 0;
451
+ buf->size = 0;
452
+ }
453
+
454
+ /* Free a buffer */
455
+ static void
456
+ buffer_free(struct buffer * buf)
457
+ {
458
+ buffer_clear(buf);
459
+ buffer_free_pool(buf);
460
+
461
+ free(buf);
462
+ }
463
+
464
+ /* Free the memory pool */
465
+ static void
466
+ buffer_free_pool(struct buffer * buf)
467
+ {
468
+ struct buffer_node *tmp;
469
+
470
+ while (buf->pool_head) {
471
+ tmp = buf->pool_head;
472
+ buf->pool_head = tmp->next;
473
+ free(tmp);
474
+ }
475
+
476
+ buf->pool_tail = 0;
477
+ }
478
+
479
+ /* Create a new buffer_node (or pull one from the memory pool) */
480
+ static struct buffer_node *
481
+ buffer_node_new(struct buffer * buf)
482
+ {
483
+ struct buffer_node *node;
484
+
485
+ /* Pull from the memory pool if available */
486
+ if (buf->pool_head) {
487
+ node = buf->pool_head;
488
+ buf->pool_head = node->next;
489
+
490
+ if (node->next)
491
+ node->next = 0;
492
+ else
493
+ buf->pool_tail = 0;
494
+ } else {
495
+ node = (struct buffer_node *) xmalloc(sizeof(struct buffer_node) + buf->node_size);
496
+ node->next = 0;
497
+ }
498
+
499
+ node->start = node->end = 0;
500
+ return node;
501
+ }
502
+
503
+ /* Free a buffer node (i.e. return it to the memory pool) */
504
+ static void
505
+ buffer_node_free(struct buffer * buf, struct buffer_node * node)
506
+ {
507
+ node->next = buf->pool_head;
508
+ buf->pool_head = node;
509
+
510
+ if (!buf->pool_tail) {
511
+ buf->pool_tail = node;
512
+ }
513
+ }
514
+
515
+ /* Prepend data to the front of the buffer */
516
+ static void
517
+ buffer_prepend(struct buffer * buf, char *str, unsigned len)
518
+ {
519
+ struct buffer_node *node, *tmp;
520
+ buf->size += len;
521
+
522
+ /* If it fits in the beginning of the head */
523
+ if (buf->head && buf->head->start >= len) {
524
+ buf->head->start -= len;
525
+ memcpy(buf->head->data + buf->head->start, str, len);
526
+ } else {
527
+ node = buffer_node_new(buf);
528
+ node->next = buf->head;
529
+ buf->head = node;
530
+ if (!buf->tail)
531
+ buf->tail = node;
532
+
533
+ while (len > buf->node_size) {
534
+ memcpy(node->data, str, buf->node_size);
535
+ node->end = buf->node_size;
536
+
537
+ tmp = buffer_node_new(buf);
538
+ tmp->next = node->next;
539
+ node->next = tmp;
540
+
541
+ if (buf->tail == node)
542
+ buf->tail = tmp;
543
+ node = tmp;
544
+
545
+ str += buf->node_size;
546
+ len -= buf->node_size;
547
+ }
548
+
549
+ if (len > 0) {
550
+ memcpy(node->data, str, len);
551
+ node->end = len;
552
+ }
553
+ }
554
+ }
555
+
556
+ /* Append data to the front of the buffer */
557
+ static void
558
+ buffer_append(struct buffer * buf, char *str, unsigned len)
559
+ {
560
+ unsigned nbytes;
561
+ buf->size += len;
562
+
563
+ /* If it fits in the remaining space in the tail */
564
+ if (buf->tail && len <= buf->node_size - buf->tail->end) {
565
+ memcpy(buf->tail->data + buf->tail->end, str, len);
566
+ buf->tail->end += len;
567
+ return;
568
+ }
569
+ /* Empty list needs initialized */
570
+ if (!buf->head) {
571
+ buf->head = buffer_node_new(buf);
572
+ buf->tail = buf->head;
573
+ }
574
+ /* Build links out of the data */
575
+ while (len > 0) {
576
+ nbytes = buf->node_size - buf->tail->end;
577
+ if (len < nbytes)
578
+ nbytes = len;
579
+
580
+ memcpy(buf->tail->data + buf->tail->end, str, nbytes);
581
+ str += nbytes;
582
+ len -= nbytes;
583
+
584
+ buf->tail->end += nbytes;
585
+
586
+ if (len > 0) {
587
+ buf->tail->next = buffer_node_new(buf);
588
+ buf->tail = buf->tail->next;
589
+ }
590
+ }
591
+ }
592
+
593
+ /* Read data from the buffer (and clear what we've read) */
594
+ static void
595
+ buffer_read(struct buffer * buf, char *str, unsigned len)
596
+ {
597
+ unsigned nbytes;
598
+ struct buffer_node *tmp;
599
+
600
+ while (buf->size > 0 && len > 0) {
601
+ nbytes = buf->head->end - buf->head->start;
602
+ if (len < nbytes)
603
+ nbytes = len;
604
+
605
+ memcpy(str, buf->head->data + buf->head->start, nbytes);
606
+ str += nbytes;
607
+ len -= nbytes;
608
+
609
+ buf->head->start += nbytes;
610
+ buf->size -= nbytes;
611
+
612
+ if (buf->head->start == buf->head->end) {
613
+ tmp = buf->head;
614
+ buf->head = tmp->next;
615
+ buffer_node_free(buf, tmp);
616
+
617
+ if (!buf->head)
618
+ buf->tail = 0;
619
+ }
620
+ }
621
+ }
622
+
623
+ /*
624
+ * Read data from the buffer into str until byte frame_mark or empty. Bytes
625
+ * are copied into str and removed if a complete frame is read, a true value
626
+ * is returned
627
+ */
628
+ static int
629
+ buffer_read_frame(struct buffer * buf, VALUE str, char frame_mark)
630
+ {
631
+ unsigned nbytes = 0;
632
+ struct buffer_node *tmp;
633
+
634
+ while (buf->size > 0) {
635
+ struct buffer_node *head = buf->head;
636
+ char *loc, *s = head->data + head->start, *e = head->data + head->end;
637
+ nbytes = e - s;
638
+
639
+ loc = memchr(s, frame_mark, nbytes);
640
+
641
+ if (loc) {
642
+ nbytes = loc - s + 1;
643
+ }
644
+
645
+ /* Copy less than everything if we found a frame byte */
646
+ rb_str_cat(str, s, nbytes);
647
+
648
+ /* Fixup the buffer pointers to indicate the bytes were consumed */
649
+ head->start += nbytes;
650
+ buf->size -= nbytes;
651
+
652
+ if (head->start == head->end) {
653
+ buf->head = head->next;
654
+ buffer_node_free(buf, head);
655
+
656
+ if (!buf->head)
657
+ buf->tail = 0;
658
+ }
659
+
660
+ if (loc) {
661
+ return 1;
662
+ }
663
+ }
664
+
665
+ return 0;
666
+ }
667
+
668
+ /* Copy data from the buffer without clearing it */
669
+ static void
670
+ buffer_copy(struct buffer * buf, char *str, unsigned len)
671
+ {
672
+ unsigned nbytes;
673
+ struct buffer_node *node;
674
+
675
+ node = buf->head;
676
+ while (node && len > 0) {
677
+ nbytes = node->end - node->start;
678
+ if (len < nbytes)
679
+ nbytes = len;
680
+
681
+ memcpy(str, node->data + node->start, nbytes);
682
+ str += nbytes;
683
+ len -= nbytes;
684
+
685
+ if (node->start + nbytes == node->end)
686
+ node = node->next;
687
+ }
688
+ }
689
+
690
+ /* Write data from the buffer to a file descriptor */
691
+ static int
692
+ buffer_write_to(struct buffer * buf, int fd)
693
+ {
694
+ int bytes_written, total_bytes_written = 0;
695
+ struct buffer_node *tmp;
696
+
697
+ while (buf->head) {
698
+ bytes_written = write(fd, buf->head->data + buf->head->start, buf->head->end - buf->head->start);
699
+
700
+ /* If the write failed... */
701
+ if (bytes_written < 0) {
702
+ if (errno != EAGAIN)
703
+ rb_sys_fail("write");
704
+
705
+ return total_bytes_written;
706
+ }
707
+
708
+ total_bytes_written += bytes_written;
709
+ buf->size -= bytes_written;
710
+
711
+ /* If the write blocked... */
712
+ if (bytes_written < buf->head->end - buf->head->start) {
713
+ buf->head->start += bytes_written;
714
+ return total_bytes_written;
715
+ }
716
+ /* Otherwise we wrote the whole buffer */
717
+ tmp = buf->head;
718
+ buf->head = tmp->next;
719
+ buffer_node_free(buf, tmp);
720
+
721
+ if (!buf->head)
722
+ buf->tail = 0;
723
+ }
724
+
725
+ return total_bytes_written;
726
+ }
727
+
728
+ /* Read data from a file descriptor to a buffer */
729
+ /* Append data to the front of the buffer */
730
+ static int
731
+ buffer_read_from(struct buffer * buf, int fd)
732
+ {
733
+ int bytes_read, total_bytes_read = 0;
734
+ unsigned nbytes;
735
+
736
+ /* Empty list needs initialized */
737
+ if (!buf->head) {
738
+ buf->head = buffer_node_new(buf);
739
+ buf->tail = buf->head;
740
+ }
741
+
742
+ do {
743
+ nbytes = buf->node_size - buf->tail->end;
744
+ bytes_read = read(fd, buf->tail->data + buf->tail->end, nbytes);
745
+
746
+ if (bytes_read == 0) {
747
+ return -1;
748
+ //When the file reaches EOF
749
+ } else if (bytes_read < 0) {
750
+ if (errno != EAGAIN)
751
+ rb_sys_fail("read");
752
+
753
+ return total_bytes_read;
754
+ }
755
+
756
+ total_bytes_read += bytes_read;
757
+ buf->tail->end += bytes_read;
758
+ buf->size += bytes_read;
759
+
760
+ if (buf->tail->end == buf->node_size) {
761
+ buf->tail->next = buffer_node_new(buf);
762
+ buf->tail = buf->tail->next;
763
+ }
764
+ } while (bytes_read == nbytes);
765
+
766
+ return total_bytes_read;
767
+ }