msgpack-ably 0.5.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (52) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +20 -0
  3. data/.travis.yml +26 -0
  4. data/ChangeLog +101 -0
  5. data/README.rdoc +129 -0
  6. data/Rakefile +110 -0
  7. data/doclib/msgpack.rb +77 -0
  8. data/doclib/msgpack/buffer.rb +193 -0
  9. data/doclib/msgpack/core_ext.rb +101 -0
  10. data/doclib/msgpack/error.rb +14 -0
  11. data/doclib/msgpack/packer.rb +134 -0
  12. data/doclib/msgpack/unpacker.rb +146 -0
  13. data/ext/msgpack/buffer.c +678 -0
  14. data/ext/msgpack/buffer.h +441 -0
  15. data/ext/msgpack/buffer_class.c +507 -0
  16. data/ext/msgpack/buffer_class.h +32 -0
  17. data/ext/msgpack/compat.h +113 -0
  18. data/ext/msgpack/core_ext.c +129 -0
  19. data/ext/msgpack/core_ext.h +26 -0
  20. data/ext/msgpack/extconf.rb +28 -0
  21. data/ext/msgpack/packer.c +168 -0
  22. data/ext/msgpack/packer.h +429 -0
  23. data/ext/msgpack/packer_class.c +302 -0
  24. data/ext/msgpack/packer_class.h +30 -0
  25. data/ext/msgpack/rbinit.c +33 -0
  26. data/ext/msgpack/rmem.c +94 -0
  27. data/ext/msgpack/rmem.h +109 -0
  28. data/ext/msgpack/sysdep.h +115 -0
  29. data/ext/msgpack/sysdep_endian.h +50 -0
  30. data/ext/msgpack/sysdep_types.h +46 -0
  31. data/ext/msgpack/unpacker.c +781 -0
  32. data/ext/msgpack/unpacker.h +122 -0
  33. data/ext/msgpack/unpacker_class.c +405 -0
  34. data/ext/msgpack/unpacker_class.h +32 -0
  35. data/lib/msgpack.rb +6 -0
  36. data/lib/msgpack/version.rb +3 -0
  37. data/msgpack.gemspec +26 -0
  38. data/msgpack.org.md +49 -0
  39. data/spec/cases.json +1 -0
  40. data/spec/cases.msg +0 -0
  41. data/spec/cases_compact.msg +0 -0
  42. data/spec/cases_spec.rb +39 -0
  43. data/spec/cruby/buffer_io_spec.rb +256 -0
  44. data/spec/cruby/buffer_packer.rb +29 -0
  45. data/spec/cruby/buffer_spec.rb +572 -0
  46. data/spec/cruby/buffer_unpacker.rb +19 -0
  47. data/spec/format_spec.rb +256 -0
  48. data/spec/packer_spec.rb +120 -0
  49. data/spec/random_compat.rb +24 -0
  50. data/spec/spec_helper.rb +21 -0
  51. data/spec/unpacker_spec.rb +305 -0
  52. metadata +195 -0
@@ -0,0 +1,678 @@
1
+ /*
2
+ * MessagePack for Ruby
3
+ *
4
+ * Copyright (C) 2008-2013 Sadayuki Furuhashi
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License");
7
+ * you may not use this file except in compliance with the License.
8
+ * You may obtain a copy of the License at
9
+ *
10
+ * http://www.apache.org/licenses/LICENSE-2.0
11
+ *
12
+ * Unless required by applicable law or agreed to in writing, software
13
+ * distributed under the License is distributed on an "AS IS" BASIS,
14
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ * See the License for the specific language governing permissions and
16
+ * limitations under the License.
17
+ */
18
+
19
+ #include "buffer.h"
20
+ #include "rmem.h"
21
+
22
+ #ifdef COMPAT_HAVE_ENCODING /* see compat.h*/
23
+ int s_enc_ascii8bit;
24
+ #endif
25
+
26
+ #ifndef HAVE_RB_STR_REPLACE
27
+ static ID s_replace;
28
+ #endif
29
+
30
+ #ifndef DISABLE_RMEM
31
+ static msgpack_rmem_t s_rmem;
32
+ #endif
33
+
34
+ void msgpack_buffer_static_init()
35
+ {
36
+ #ifndef DISABLE_RMEM
37
+ msgpack_rmem_init(&s_rmem);
38
+ #endif
39
+ #ifndef HAVE_RB_STR_REPLACE
40
+ s_replace = rb_intern("replace");
41
+ #endif
42
+
43
+ #ifdef COMPAT_HAVE_ENCODING
44
+ s_enc_ascii8bit = rb_ascii8bit_encindex();
45
+ #endif
46
+ }
47
+
48
+ void msgpack_buffer_static_destroy()
49
+ {
50
+ #ifndef DISABLE_RMEM
51
+ msgpack_rmem_destroy(&s_rmem);
52
+ #endif
53
+ }
54
+
55
+ void msgpack_buffer_init(msgpack_buffer_t* b)
56
+ {
57
+ memset(b, 0, sizeof(msgpack_buffer_t));
58
+
59
+ b->head = &b->tail;
60
+ b->write_reference_threshold = MSGPACK_BUFFER_STRING_WRITE_REFERENCE_DEFAULT;
61
+ b->read_reference_threshold = MSGPACK_BUFFER_STRING_READ_REFERENCE_DEFAULT;
62
+ b->io_buffer_size = MSGPACK_BUFFER_IO_BUFFER_SIZE_DEFAULT;
63
+ b->io = Qnil;
64
+ b->io_buffer = Qnil;
65
+ }
66
+
67
+ static void _msgpack_buffer_chunk_destroy(msgpack_buffer_chunk_t* c)
68
+ {
69
+ if(c->mem != NULL) {
70
+ #ifndef DISABLE_RMEM
71
+ if(!msgpack_rmem_free(&s_rmem, c->mem)) {
72
+ free(c->mem);
73
+ }
74
+ /* no needs to update rmem_owner because chunks will not be
75
+ * free()ed (left in free_list) and thus *rmem_owner is
76
+ * always valid. */
77
+ #else
78
+ free(c->mem);
79
+ #endif
80
+ }
81
+ c->first = NULL;
82
+ c->last = NULL;
83
+ c->mem = NULL;
84
+ }
85
+
86
+ void msgpack_buffer_destroy(msgpack_buffer_t* b)
87
+ {
88
+ /* head is always available */
89
+ msgpack_buffer_chunk_t* c = b->head;
90
+ while(c != &b->tail) {
91
+ msgpack_buffer_chunk_t* n = c->next;
92
+ _msgpack_buffer_chunk_destroy(c);
93
+ free(c);
94
+ c = n;
95
+ }
96
+ _msgpack_buffer_chunk_destroy(c);
97
+
98
+ c = b->free_list;
99
+ while(c != NULL) {
100
+ msgpack_buffer_chunk_t* n = c->next;
101
+ free(c);
102
+ c = n;
103
+ }
104
+ }
105
+
106
+ void msgpack_buffer_mark(msgpack_buffer_t* b)
107
+ {
108
+ /* head is always available */
109
+ msgpack_buffer_chunk_t* c = b->head;
110
+ while(c != &b->tail) {
111
+ rb_gc_mark(c->mapped_string);
112
+ c = c->next;
113
+ }
114
+ rb_gc_mark(c->mapped_string);
115
+
116
+ rb_gc_mark(b->io);
117
+ rb_gc_mark(b->io_buffer);
118
+
119
+ rb_gc_mark(b->owner);
120
+ }
121
+
122
+ bool _msgpack_buffer_shift_chunk(msgpack_buffer_t* b)
123
+ {
124
+ _msgpack_buffer_chunk_destroy(b->head);
125
+
126
+ if(b->head == &b->tail) {
127
+ /* list becomes empty. don't add head to free_list
128
+ * because head should be always available */
129
+ b->tail_buffer_end = NULL;
130
+ b->read_buffer = NULL;
131
+ return false;
132
+ }
133
+
134
+ /* add head to free_list */
135
+ msgpack_buffer_chunk_t* next_head = b->head->next;
136
+ b->head->next = b->free_list;
137
+ b->free_list = b->head;
138
+
139
+ b->head = next_head;
140
+ b->read_buffer = next_head->first;
141
+
142
+ return true;
143
+ }
144
+
145
+ void msgpack_buffer_clear(msgpack_buffer_t* b)
146
+ {
147
+ while(_msgpack_buffer_shift_chunk(b)) {
148
+ ;
149
+ }
150
+ }
151
+
152
+ size_t msgpack_buffer_read_to_string_nonblock(msgpack_buffer_t* b, VALUE string, size_t length)
153
+ {
154
+ size_t avail = msgpack_buffer_top_readable_size(b);
155
+
156
+ #ifndef DISABLE_BUFFER_READ_REFERENCE_OPTIMIZE
157
+ /* optimize */
158
+ if(length <= avail && RSTRING_LEN(string) == 0 &&
159
+ b->head->mapped_string != NO_MAPPED_STRING &&
160
+ length >= b->read_reference_threshold) {
161
+ VALUE s = _msgpack_buffer_refer_head_mapped_string(b, length);
162
+ #ifndef HAVE_RB_STR_REPLACE
163
+ /* TODO MRI 1.8 */
164
+ rb_funcall(string, s_replace, 1, s);
165
+ #else
166
+ rb_str_replace(string, s);
167
+ #endif
168
+ /* here doesn't have to call ENCODING_SET because
169
+ * encoding of s is always ASCII-8BIT */
170
+ _msgpack_buffer_consumed(b, length);
171
+ return length;
172
+ }
173
+ #endif
174
+
175
+ size_t const length_orig = length;
176
+
177
+ while(true) {
178
+ if(length <= avail) {
179
+ rb_str_buf_cat(string, b->read_buffer, length);
180
+ _msgpack_buffer_consumed(b, length);
181
+ return length_orig;
182
+ }
183
+
184
+ rb_str_buf_cat(string, b->read_buffer, avail);
185
+ length -= avail;
186
+
187
+ if(!_msgpack_buffer_shift_chunk(b)) {
188
+ return length_orig - length;
189
+ }
190
+
191
+ avail = msgpack_buffer_top_readable_size(b);
192
+ }
193
+ }
194
+
195
+ size_t msgpack_buffer_read_nonblock(msgpack_buffer_t* b, char* buffer, size_t length)
196
+ {
197
+ /* buffer == NULL means skip */
198
+ size_t const length_orig = length;
199
+
200
+ while(true) {
201
+ size_t avail = msgpack_buffer_top_readable_size(b);
202
+
203
+ if(length <= avail) {
204
+ if(buffer != NULL) {
205
+ memcpy(buffer, b->read_buffer, length);
206
+ }
207
+ _msgpack_buffer_consumed(b, length);
208
+ return length_orig;
209
+ }
210
+
211
+ if(buffer != NULL) {
212
+ memcpy(buffer, b->read_buffer, avail);
213
+ buffer += avail;
214
+ }
215
+ length -= avail;
216
+
217
+ if(!_msgpack_buffer_shift_chunk(b)) {
218
+ return length_orig - length;
219
+ }
220
+ }
221
+ }
222
+
223
+ size_t msgpack_buffer_all_readable_size(const msgpack_buffer_t* b)
224
+ {
225
+ size_t sz = msgpack_buffer_top_readable_size(b);
226
+
227
+ if(b->head == &b->tail) {
228
+ return sz;
229
+ }
230
+
231
+ msgpack_buffer_chunk_t* c = b->head->next;
232
+
233
+ while(true) {
234
+ sz += c->last - c->first;
235
+ if(c == &b->tail) {
236
+ return sz;
237
+ }
238
+ c = c->next;
239
+ }
240
+ }
241
+
242
+ bool _msgpack_buffer_read_all2(msgpack_buffer_t* b, char* buffer, size_t length)
243
+ {
244
+ if(!msgpack_buffer_ensure_readable(b, length)) {
245
+ return false;
246
+ }
247
+
248
+ msgpack_buffer_read_nonblock(b, buffer, length);
249
+ return true;
250
+ }
251
+
252
+
253
+ static inline msgpack_buffer_chunk_t* _msgpack_buffer_alloc_new_chunk(msgpack_buffer_t* b)
254
+ {
255
+ msgpack_buffer_chunk_t* reuse = b->free_list;
256
+ if(reuse == NULL) {
257
+ return malloc(sizeof(msgpack_buffer_chunk_t));
258
+ }
259
+ b->free_list = b->free_list->next;
260
+ return reuse;
261
+ }
262
+
263
+ static inline void _msgpack_buffer_add_new_chunk(msgpack_buffer_t* b)
264
+ {
265
+ if(b->head == &b->tail) {
266
+ if(b->tail.first == NULL) {
267
+ /* empty buffer */
268
+ return;
269
+ }
270
+
271
+ msgpack_buffer_chunk_t* nc = _msgpack_buffer_alloc_new_chunk(b);
272
+
273
+ *nc = b->tail;
274
+ b->head = nc;
275
+ nc->next = &b->tail;
276
+
277
+ } else {
278
+ /* search node before tail */
279
+ msgpack_buffer_chunk_t* before_tail = b->head;
280
+ while(before_tail->next != &b->tail) {
281
+ before_tail = before_tail->next;
282
+ }
283
+
284
+ msgpack_buffer_chunk_t* nc = _msgpack_buffer_alloc_new_chunk(b);
285
+
286
+ #ifndef DISABLE_RMEM
287
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
288
+ if(b->rmem_last == b->tail_buffer_end) {
289
+ /* reuse unused rmem space */
290
+ size_t unused = b->tail_buffer_end - b->tail.last;
291
+ b->rmem_last -= unused;
292
+ }
293
+ #endif
294
+ #endif
295
+
296
+ /* rebuild tail */
297
+ *nc = b->tail;
298
+ before_tail->next = nc;
299
+ nc->next = &b->tail;
300
+ }
301
+ }
302
+
303
+ static inline void _msgpack_buffer_append_reference(msgpack_buffer_t* b, VALUE string)
304
+ {
305
+ VALUE mapped_string = rb_str_dup(string);
306
+ #ifdef COMPAT_HAVE_ENCODING
307
+ ENCODING_SET(mapped_string, s_enc_ascii8bit);
308
+ #endif
309
+
310
+ _msgpack_buffer_add_new_chunk(b);
311
+
312
+ char* data = RSTRING_PTR(mapped_string);
313
+ size_t length = RSTRING_LEN(mapped_string);
314
+
315
+ b->tail.first = (char*) data;
316
+ b->tail.last = (char*) data + length;
317
+ b->tail.mapped_string = mapped_string;
318
+ b->tail.mem = NULL;
319
+
320
+ /* msgpack_buffer_writable_size should return 0 for mapped chunk */
321
+ b->tail_buffer_end = b->tail.last;
322
+
323
+ /* consider read_buffer */
324
+ if(b->head == &b->tail) {
325
+ b->read_buffer = b->tail.first;
326
+ }
327
+ }
328
+
329
+ void _msgpack_buffer_append_long_string(msgpack_buffer_t* b, VALUE string)
330
+ {
331
+ size_t length = RSTRING_LEN(string);
332
+
333
+ if(b->io != Qnil) {
334
+ msgpack_buffer_flush(b);
335
+ rb_funcall(b->io, b->io_write_all_method, 1, string);
336
+
337
+ } else if(!STR_DUP_LIKELY_DOES_COPY(string)) {
338
+ _msgpack_buffer_append_reference(b, string);
339
+
340
+ } else {
341
+ msgpack_buffer_append(b, RSTRING_PTR(string), length);
342
+ }
343
+ }
344
+
345
+ static inline void* _msgpack_buffer_chunk_malloc(
346
+ msgpack_buffer_t* b, msgpack_buffer_chunk_t* c,
347
+ size_t required_size, size_t* allocated_size)
348
+ {
349
+ #ifndef DISABLE_RMEM
350
+ if(required_size <= MSGPACK_RMEM_PAGE_SIZE) {
351
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
352
+ if((size_t)(b->rmem_end - b->rmem_last) < required_size) {
353
+ #endif
354
+ /* alloc new rmem page */
355
+ *allocated_size = MSGPACK_RMEM_PAGE_SIZE;
356
+ char* buffer = msgpack_rmem_alloc(&s_rmem);
357
+ c->mem = buffer;
358
+
359
+ /* update rmem owner */
360
+ b->rmem_owner = &c->mem;
361
+ b->rmem_last = b->rmem_end = buffer + MSGPACK_RMEM_PAGE_SIZE;
362
+
363
+ return buffer;
364
+
365
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
366
+ } else {
367
+ /* reuse unused rmem */
368
+ *allocated_size = (size_t)(b->rmem_end - b->rmem_last);
369
+ char* buffer = b->rmem_last;
370
+ b->rmem_last = b->rmem_end;
371
+
372
+ /* update rmem owner */
373
+ c->mem = *b->rmem_owner;
374
+ *b->rmem_owner = NULL;
375
+ b->rmem_owner = &c->mem;
376
+
377
+ return buffer;
378
+ }
379
+ #endif
380
+ }
381
+ #else
382
+ if(required_size < 72) {
383
+ required_size = 72;
384
+ }
385
+ #endif
386
+
387
+ // TODO alignment?
388
+ *allocated_size = required_size;
389
+ void* mem = malloc(required_size);
390
+ c->mem = mem;
391
+ return mem;
392
+ }
393
+
394
+ static inline void* _msgpack_buffer_chunk_realloc(
395
+ msgpack_buffer_t* b, msgpack_buffer_chunk_t* c,
396
+ void* mem, size_t required_size, size_t* current_size)
397
+ {
398
+ if(mem == NULL) {
399
+ return _msgpack_buffer_chunk_malloc(b, c, required_size, current_size);
400
+ }
401
+
402
+ size_t next_size = *current_size * 2;
403
+ while(next_size < required_size) {
404
+ next_size *= 2;
405
+ }
406
+ *current_size = next_size;
407
+ mem = realloc(mem, next_size);
408
+
409
+ c->mem = mem;
410
+ return mem;
411
+ }
412
+
413
+ void _msgpack_buffer_expand(msgpack_buffer_t* b, const char* data, size_t length, bool flush_to_io)
414
+ {
415
+ if(flush_to_io && b->io != Qnil) {
416
+ msgpack_buffer_flush(b);
417
+ if(msgpack_buffer_writable_size(b) >= length) {
418
+ /* data == NULL means ensure_writable */
419
+ if(data != NULL) {
420
+ size_t tail_avail = msgpack_buffer_writable_size(b);
421
+ memcpy(b->tail.last, data, length);
422
+ b->tail.last += tail_avail;
423
+ }
424
+ return;
425
+ }
426
+ }
427
+
428
+ /* data == NULL means ensure_writable */
429
+ if(data != NULL) {
430
+ size_t tail_avail = msgpack_buffer_writable_size(b);
431
+ memcpy(b->tail.last, data, tail_avail);
432
+ b->tail.last += tail_avail;
433
+ data += tail_avail;
434
+ length -= tail_avail;
435
+ }
436
+
437
+ size_t capacity = b->tail.last - b->tail.first;
438
+
439
+ /* can't realloc mapped chunk or rmem page */
440
+ if(b->tail.mapped_string != NO_MAPPED_STRING
441
+ #ifndef DISABLE_RMEM
442
+ || capacity <= MSGPACK_RMEM_PAGE_SIZE
443
+ #endif
444
+ ) {
445
+ /* allocate new chunk */
446
+ _msgpack_buffer_add_new_chunk(b);
447
+
448
+ char* mem = _msgpack_buffer_chunk_malloc(b, &b->tail, length, &capacity);
449
+
450
+ char* last = mem;
451
+ if(data != NULL) {
452
+ memcpy(mem, data, length);
453
+ last += length;
454
+ }
455
+
456
+ /* rebuild tail chunk */
457
+ b->tail.first = mem;
458
+ b->tail.last = last;
459
+ b->tail.mapped_string = NO_MAPPED_STRING;
460
+ b->tail_buffer_end = mem + capacity;
461
+
462
+ /* consider read_buffer */
463
+ if(b->head == &b->tail) {
464
+ b->read_buffer = b->tail.first;
465
+ }
466
+
467
+ } else {
468
+ /* realloc malloc()ed chunk or NULL */
469
+ size_t tail_filled = b->tail.last - b->tail.first;
470
+ char* mem = _msgpack_buffer_chunk_realloc(b, &b->tail,
471
+ b->tail.first, tail_filled+length, &capacity);
472
+
473
+ char* last = mem + tail_filled;
474
+ if(data != NULL) {
475
+ memcpy(last, data, length);
476
+ last += length;
477
+ }
478
+
479
+ /* consider read_buffer */
480
+ if(b->head == &b->tail) {
481
+ size_t read_offset = b->read_buffer - b->head->first;
482
+ b->read_buffer = mem + read_offset;
483
+ }
484
+
485
+ /* rebuild tail chunk */
486
+ b->tail.first = mem;
487
+ b->tail.last = last;
488
+ b->tail_buffer_end = mem + capacity;
489
+ }
490
+ }
491
+
492
+ static inline VALUE _msgpack_buffer_head_chunk_as_string(msgpack_buffer_t* b)
493
+ {
494
+ size_t length = b->head->last - b->read_buffer;
495
+ if(length == 0) {
496
+ return rb_str_buf_new(0);
497
+ }
498
+
499
+ if(b->head->mapped_string != NO_MAPPED_STRING) {
500
+ return _msgpack_buffer_refer_head_mapped_string(b, length);
501
+ }
502
+
503
+ return rb_str_new(b->read_buffer, length);
504
+ }
505
+
506
+ static inline VALUE _msgpack_buffer_chunk_as_string(msgpack_buffer_chunk_t* c)
507
+ {
508
+ size_t chunk_size = c->last - c->first;
509
+ if(chunk_size == 0) {
510
+ return rb_str_buf_new(0);
511
+ }
512
+
513
+ if(c->mapped_string != NO_MAPPED_STRING) {
514
+ return rb_str_dup(c->mapped_string);
515
+ }
516
+
517
+ return rb_str_new(c->first, chunk_size);
518
+ }
519
+
520
+ VALUE msgpack_buffer_all_as_string(msgpack_buffer_t* b)
521
+ {
522
+ if(b->head == &b->tail) {
523
+ return _msgpack_buffer_head_chunk_as_string(b);
524
+ }
525
+
526
+ size_t length = msgpack_buffer_all_readable_size(b);
527
+ VALUE string = rb_str_new(NULL, length);
528
+ char* buffer = RSTRING_PTR(string);
529
+
530
+ size_t avail = msgpack_buffer_top_readable_size(b);
531
+ memcpy(buffer, b->read_buffer, avail);
532
+ buffer += avail;
533
+ length -= avail;
534
+
535
+ msgpack_buffer_chunk_t* c = b->head->next;
536
+
537
+ while(true) {
538
+ avail = c->last - c->first;
539
+ memcpy(buffer, c->first, avail);
540
+
541
+ if(length <= avail) {
542
+ return string;
543
+ }
544
+ buffer += avail;
545
+ length -= avail;
546
+
547
+ c = c->next;
548
+ }
549
+ }
550
+
551
+ VALUE msgpack_buffer_all_as_string_array(msgpack_buffer_t* b)
552
+ {
553
+ if(b->head == &b->tail) {
554
+ VALUE s = msgpack_buffer_all_as_string(b);
555
+ VALUE ary = rb_ary_new3(1, s);
556
+ return ary;
557
+ }
558
+
559
+ /* TODO optimize ary construction */
560
+ VALUE ary = rb_ary_new();
561
+
562
+ VALUE s = _msgpack_buffer_head_chunk_as_string(b);
563
+ rb_ary_push(ary, s);
564
+
565
+ msgpack_buffer_chunk_t* c = b->head->next;
566
+
567
+ while(true) {
568
+ s = _msgpack_buffer_chunk_as_string(c);
569
+ rb_ary_push(ary, s);
570
+ if(c == &b->tail) {
571
+ return ary;
572
+ }
573
+ c = c->next;
574
+ }
575
+
576
+ return ary;
577
+ }
578
+
579
+ size_t msgpack_buffer_flush_to_io(msgpack_buffer_t* b, VALUE io, ID write_method, bool consume)
580
+ {
581
+ if(msgpack_buffer_top_readable_size(b) == 0) {
582
+ return 0;
583
+ }
584
+
585
+ VALUE s = _msgpack_buffer_head_chunk_as_string(b);
586
+ rb_funcall(io, write_method, 1, s);
587
+ size_t sz = RSTRING_LEN(s);
588
+
589
+ if(consume) {
590
+ while(_msgpack_buffer_shift_chunk(b)) {
591
+ s = _msgpack_buffer_chunk_as_string(b->head);
592
+ rb_funcall(io, write_method, 1, s);
593
+ sz += RSTRING_LEN(s);
594
+ }
595
+ return sz;
596
+
597
+ } else {
598
+ if(b->head == &b->tail) {
599
+ return sz;
600
+ }
601
+ msgpack_buffer_chunk_t* c = b->head->next;
602
+ while(true) {
603
+ s = _msgpack_buffer_chunk_as_string(c);
604
+ rb_funcall(io, write_method, 1, s);
605
+ sz += RSTRING_LEN(s);
606
+ if(c == &b->tail) {
607
+ return sz;
608
+ }
609
+ c = c->next;
610
+ }
611
+ }
612
+ }
613
+
614
+ size_t _msgpack_buffer_feed_from_io(msgpack_buffer_t* b)
615
+ {
616
+ if(b->io_buffer == Qnil) {
617
+ b->io_buffer = rb_funcall(b->io, b->io_partial_read_method, 1, LONG2NUM(b->io_buffer_size));
618
+ if(b->io_buffer == Qnil) {
619
+ rb_raise(rb_eEOFError, "IO reached end of file");
620
+ }
621
+ StringValue(b->io_buffer);
622
+ } else {
623
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(b->io_buffer_size), b->io_buffer);
624
+ if(ret == Qnil) {
625
+ rb_raise(rb_eEOFError, "IO reached end of file");
626
+ }
627
+ }
628
+
629
+ size_t len = RSTRING_LEN(b->io_buffer);
630
+ if(len == 0) {
631
+ rb_raise(rb_eEOFError, "IO reached end of file");
632
+ }
633
+
634
+ /* TODO zero-copy optimize? */
635
+ msgpack_buffer_append_nonblock(b, RSTRING_PTR(b->io_buffer), len);
636
+
637
+ return len;
638
+ }
639
+
640
+ size_t _msgpack_buffer_read_from_io_to_string(msgpack_buffer_t* b, VALUE string, size_t length)
641
+ {
642
+ if(RSTRING_LEN(string) == 0) {
643
+ /* direct read */
644
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), string);
645
+ if(ret == Qnil) {
646
+ return 0;
647
+ }
648
+ return RSTRING_LEN(string);
649
+ }
650
+
651
+ /* copy via io_buffer */
652
+ if(b->io_buffer == Qnil) {
653
+ b->io_buffer = rb_str_buf_new(0);
654
+ }
655
+
656
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), b->io_buffer);
657
+ if(ret == Qnil) {
658
+ return 0;
659
+ }
660
+ size_t rl = RSTRING_LEN(b->io_buffer);
661
+
662
+ rb_str_buf_cat(string, (const void*)RSTRING_PTR(b->io_buffer), rl);
663
+ return rl;
664
+ }
665
+
666
+ size_t _msgpack_buffer_skip_from_io(msgpack_buffer_t* b, size_t length)
667
+ {
668
+ if(b->io_buffer == Qnil) {
669
+ b->io_buffer = rb_str_buf_new(0);
670
+ }
671
+
672
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), b->io_buffer);
673
+ if(ret == Qnil) {
674
+ return 0;
675
+ }
676
+ return RSTRING_LEN(b->io_buffer);
677
+ }
678
+