cbor 0.5.6.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (55) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +22 -0
  3. data/.travis.yml +5 -0
  4. data/ChangeLog +87 -0
  5. data/README.rdoc +180 -0
  6. data/Rakefile +94 -0
  7. data/cbor.gemspec +26 -0
  8. data/doclib/cbor.rb +80 -0
  9. data/doclib/cbor/buffer.rb +193 -0
  10. data/doclib/cbor/core_ext.rb +133 -0
  11. data/doclib/cbor/error.rb +14 -0
  12. data/doclib/cbor/packer.rb +133 -0
  13. data/doclib/cbor/simple.rb +15 -0
  14. data/doclib/cbor/tagged.rb +16 -0
  15. data/doclib/cbor/unpacker.rb +138 -0
  16. data/ext/cbor/buffer.c +693 -0
  17. data/ext/cbor/buffer.h +469 -0
  18. data/ext/cbor/buffer_class.c +516 -0
  19. data/ext/cbor/buffer_class.h +41 -0
  20. data/ext/cbor/cbor.h +69 -0
  21. data/ext/cbor/compat.h +136 -0
  22. data/ext/cbor/core_ext.c +181 -0
  23. data/ext/cbor/core_ext.h +35 -0
  24. data/ext/cbor/extconf.rb +25 -0
  25. data/ext/cbor/packer.c +169 -0
  26. data/ext/cbor/packer.h +337 -0
  27. data/ext/cbor/packer_class.c +304 -0
  28. data/ext/cbor/packer_class.h +39 -0
  29. data/ext/cbor/rbinit.c +51 -0
  30. data/ext/cbor/renamer.h +56 -0
  31. data/ext/cbor/rmem.c +103 -0
  32. data/ext/cbor/rmem.h +118 -0
  33. data/ext/cbor/sysdep.h +135 -0
  34. data/ext/cbor/sysdep_endian.h +59 -0
  35. data/ext/cbor/sysdep_types.h +55 -0
  36. data/ext/cbor/unpacker.c +735 -0
  37. data/ext/cbor/unpacker.h +133 -0
  38. data/ext/cbor/unpacker_class.c +417 -0
  39. data/ext/cbor/unpacker_class.h +39 -0
  40. data/lib/cbor.rb +9 -0
  41. data/lib/cbor/version.rb +3 -0
  42. data/spec/buffer_io_spec.rb +260 -0
  43. data/spec/buffer_spec.rb +576 -0
  44. data/spec/cases.cbor +0 -0
  45. data/spec/cases.cbor_stream +0 -0
  46. data/spec/cases.json +1 -0
  47. data/spec/cases.msg +0 -0
  48. data/spec/cases_compact.msg +0 -0
  49. data/spec/cases_spec.rb +39 -0
  50. data/spec/format_spec.rb +445 -0
  51. data/spec/packer_spec.rb +127 -0
  52. data/spec/random_compat.rb +24 -0
  53. data/spec/spec_helper.rb +45 -0
  54. data/spec/unpacker_spec.rb +238 -0
  55. metadata +196 -0
@@ -0,0 +1,15 @@
1
+ module CBOR
2
+
3
+ #
4
+ # CBOR::Simple is used to carry around simple values that don't map
5
+ # into Ruby classes (false, true, nil are the ones that do).
6
+ class Simple
7
+
8
+ # @param value [Integer] The integer number of the simple value
9
+ def initialize(value)
10
+ end
11
+
12
+ attr_reader :value
13
+
14
+ end
15
+ end
@@ -0,0 +1,16 @@
1
+ module CBOR
2
+
3
+ #
4
+ # CBOR::Tagged is used to carry around tagged values that don't map
5
+ # into Ruby classes (false, true, nil are the ones that do).
6
+ class Tagged
7
+
8
+ # @param tag [Integer] The integer number of the tag
9
+ # @param value [Object] The value that is being tagged
10
+ def initialize(tag, value)
11
+ end
12
+
13
+ attr_reader :tag, :value
14
+
15
+ end
16
+ end
@@ -0,0 +1,138 @@
1
+ module CBOR
2
+
3
+ #
4
+ # CBOR::Unpacker is an interface to deserialize objects from an internal buffer,
5
+ # which is a CBOR::Buffer.
6
+ #
7
+ class Unpacker
8
+ #
9
+ # Creates a CBOR::Unpacker instance.
10
+ #
11
+ # @overload initialize(options={})
12
+ # @param options [Hash]
13
+ #
14
+ # @overload initialize(io, options={})
15
+ # @param io [IO]
16
+ # @param options [Hash]
17
+ # This unpacker reads data from the _io_ to fill the internal buffer.
18
+ # _io_ must respond to readpartial(length [,string]) or read(length [,string]) method.
19
+ #
20
+ # See Buffer#initialize for supported options.
21
+ #
22
+ def initialize(*args)
23
+ end
24
+
25
+ #
26
+ # Internal buffer
27
+ #
28
+ # @return [CBOR::Buffer]
29
+ #
30
+ attr_reader :buffer
31
+
32
+ #
33
+ # Deserializes an object from internal buffer and returns it.
34
+ #
35
+ # If there're not enough buffer, this method raises EOFError.
36
+ # If data format is invalid, this method raises CBOR::MalformedFormatError.
37
+ # If the stack is too deep, this method raises CBOR::StackError.
38
+ #
39
+ # @return [Object] deserialized object
40
+ #
41
+ def read
42
+ end
43
+
44
+ alias unpack read
45
+
46
+ #
47
+ # Deserializes an object and ignores it. This method is faster than _read_.
48
+ #
49
+ # This method could raise same errors with _read_.
50
+ #
51
+ # @return nil
52
+ #
53
+ def skip
54
+ end
55
+
56
+ #
57
+ # Deserializes a nil value if it exists and returns _true_.
58
+ # Otherwise, if a byte exists but the byte doesn't represent nil value,
59
+ # returns _false_.
60
+ #
61
+ # If there're not enough buffer, this method raises EOFError.
62
+ #
63
+ # @return [Boolean]
64
+ #
65
+ def skip_nil
66
+ end
67
+
68
+ #
69
+ # Read a header of an array and returns its size.
70
+ # It converts a serialized array into a stream of elements.
71
+ #
72
+ # If the serialized object is not an array, it raises CBOR::TypeError.
73
+ # If there're not enough buffer, this method raises EOFError.
74
+ #
75
+ # @return [Integer] size of the array
76
+ #
77
+ def read_array_header
78
+ end
79
+
80
+ #
81
+ # Reads a header of an map and returns its size.
82
+ # It converts a serialized map into a stream of key-value pairs.
83
+ #
84
+ # If the serialized object is not a map, it raises CBOR::TypeError.
85
+ # If there're not enough buffer, this method raises EOFError.
86
+ #
87
+ # @return [Integer] size of the map
88
+ #
89
+ def read_map_header
90
+ end
91
+
92
+ #
93
+ # Appends data into the internal buffer.
94
+ # This method calls buffer.append(data).
95
+ #
96
+ # @param data [String]
97
+ # @return [Unpacker] self
98
+ #
99
+ def feed(data)
100
+ end
101
+
102
+ #
103
+ # Repeats to deserialize objects.
104
+ #
105
+ # It repeats until the internal buffer does not include any complete objects.
106
+ #
107
+ # If the an IO is set, it repeats to read data from the IO when the buffer
108
+ # becomes empty until the IO raises EOFError.
109
+ #
110
+ # This method could raise same errors with _read_ excepting EOFError.
111
+ #
112
+ # @yieldparam object [Object] deserialized object
113
+ # @return nil
114
+ #
115
+ def each(&block)
116
+ end
117
+
118
+ #
119
+ # Appends data into the internal buffer and repeats to deserialize objects.
120
+ # This method is equals to feed(data) && each.
121
+ #
122
+ # @param data [String]
123
+ # @yieldparam object [Object] deserialized object
124
+ # @return nil
125
+ #
126
+ def feed_each(data, &block)
127
+ end
128
+
129
+ #
130
+ # Resets deserialization state of the unpacker and clears the internal buffer.
131
+ #
132
+ # @return nil
133
+ #
134
+ def reset
135
+ end
136
+ end
137
+
138
+ end
data/ext/cbor/buffer.c ADDED
@@ -0,0 +1,693 @@
1
+ /*
2
+ * CBOR for Ruby
3
+ *
4
+ * Copyright (C) 2013 Carsten Bormann
5
+ *
6
+ * Licensed under the Apache License, Version 2.0 (the "License").
7
+ *
8
+ * Based on:
9
+ ***********/
10
+ /*
11
+ * MessagePack for Ruby
12
+ *
13
+ * Copyright (C) 2008-2013 Sadayuki Furuhashi
14
+ *
15
+ * Licensed under the Apache License, Version 2.0 (the "License");
16
+ * you may not use this file except in compliance with the License.
17
+ * You may obtain a copy of the License at
18
+ *
19
+ * http://www.apache.org/licenses/LICENSE-2.0
20
+ *
21
+ * Unless required by applicable law or agreed to in writing, software
22
+ * distributed under the License is distributed on an "AS IS" BASIS,
23
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24
+ * See the License for the specific language governing permissions and
25
+ * limitations under the License.
26
+ */
27
+
28
+ #include "buffer.h"
29
+ #include "rmem.h"
30
+
31
+ #ifdef COMPAT_HAVE_ENCODING /* see compat.h*/
32
+ int s_enc_ascii8bit;
33
+ int s_enc_usascii;
34
+ int s_enc_utf8;
35
+ VALUE s_enc_utf8_value;
36
+ #endif
37
+
38
+ #ifndef HAVE_RB_STR_REPLACE
39
+ static ID s_replace;
40
+ #endif
41
+
42
+ #ifndef DISABLE_RMEM
43
+ static msgpack_rmem_t s_rmem;
44
+ #endif
45
+
46
+ void msgpack_buffer_static_init()
47
+ {
48
+ #ifndef DISABLE_RMEM
49
+ msgpack_rmem_init(&s_rmem);
50
+ #endif
51
+ #ifndef HAVE_RB_STR_REPLACE
52
+ s_replace = rb_intern("replace");
53
+ #endif
54
+
55
+ #ifdef COMPAT_HAVE_ENCODING
56
+ s_enc_ascii8bit = rb_ascii8bit_encindex();
57
+ s_enc_usascii = rb_usascii_encindex();
58
+ s_enc_utf8 = rb_utf8_encindex();
59
+ s_enc_utf8_value = rb_enc_from_encoding(rb_utf8_encoding());
60
+ #endif
61
+ }
62
+
63
+ void msgpack_buffer_static_destroy()
64
+ {
65
+ #ifndef DISABLE_RMEM
66
+ msgpack_rmem_destroy(&s_rmem);
67
+ #endif
68
+ }
69
+
70
+ void msgpack_buffer_init(msgpack_buffer_t* b)
71
+ {
72
+ memset(b, 0, sizeof(msgpack_buffer_t));
73
+
74
+ b->head = &b->tail;
75
+ b->write_reference_threshold = MSGPACK_BUFFER_STRING_WRITE_REFERENCE_DEFAULT;
76
+ b->read_reference_threshold = MSGPACK_BUFFER_STRING_READ_REFERENCE_DEFAULT;
77
+ b->io_buffer_size = MSGPACK_BUFFER_IO_BUFFER_SIZE_DEFAULT;
78
+ b->io = Qnil;
79
+ b->io_buffer = Qnil;
80
+ }
81
+
82
+ static void _msgpack_buffer_chunk_destroy(msgpack_buffer_chunk_t* c)
83
+ {
84
+ if(c->mem != NULL) {
85
+ #ifndef DISABLE_RMEM
86
+ if(!msgpack_rmem_free(&s_rmem, c->mem)) {
87
+ free(c->mem);
88
+ }
89
+ /* no needs to update rmem_owner because chunks will not be
90
+ * free()ed (left in free_list) and thus *rmem_owner is
91
+ * always valid. */
92
+ #else
93
+ free(c->mem);
94
+ #endif
95
+ }
96
+ c->first = NULL;
97
+ c->last = NULL;
98
+ c->mem = NULL;
99
+ }
100
+
101
+ void msgpack_buffer_destroy(msgpack_buffer_t* b)
102
+ {
103
+ /* head is always available */
104
+ msgpack_buffer_chunk_t* c = b->head;
105
+ while(c != &b->tail) {
106
+ msgpack_buffer_chunk_t* n = c->next;
107
+ _msgpack_buffer_chunk_destroy(c);
108
+ free(c);
109
+ c = n;
110
+ }
111
+ _msgpack_buffer_chunk_destroy(c);
112
+
113
+ c = b->free_list;
114
+ while(c != NULL) {
115
+ msgpack_buffer_chunk_t* n = c->next;
116
+ free(c);
117
+ c = n;
118
+ }
119
+ }
120
+
121
+ void msgpack_buffer_mark(msgpack_buffer_t* b)
122
+ {
123
+ /* head is always available */
124
+ msgpack_buffer_chunk_t* c = b->head;
125
+ while(c != &b->tail) {
126
+ rb_gc_mark(c->mapped_string);
127
+ c = c->next;
128
+ }
129
+ rb_gc_mark(c->mapped_string);
130
+
131
+ rb_gc_mark(b->io);
132
+ rb_gc_mark(b->io_buffer);
133
+
134
+ rb_gc_mark(b->owner);
135
+ }
136
+
137
+ bool _msgpack_buffer_shift_chunk(msgpack_buffer_t* b)
138
+ {
139
+ _msgpack_buffer_chunk_destroy(b->head);
140
+
141
+ if(b->head == &b->tail) {
142
+ /* list becomes empty. don't add head to free_list
143
+ * because head should be always available */
144
+ b->tail_buffer_end = NULL;
145
+ b->read_buffer = NULL;
146
+ return false;
147
+ }
148
+
149
+ /* add head to free_list */
150
+ msgpack_buffer_chunk_t* next_head = b->head->next;
151
+ b->head->next = b->free_list;
152
+ b->free_list = b->head;
153
+
154
+ b->head = next_head;
155
+ b->read_buffer = next_head->first;
156
+
157
+ return true;
158
+ }
159
+
160
+ void msgpack_buffer_clear(msgpack_buffer_t* b)
161
+ {
162
+ while(_msgpack_buffer_shift_chunk(b)) {
163
+ ;
164
+ }
165
+ }
166
+
167
+ size_t msgpack_buffer_read_to_string_nonblock(msgpack_buffer_t* b, VALUE string, size_t length)
168
+ {
169
+ size_t avail = msgpack_buffer_top_readable_size(b);
170
+
171
+ #ifndef DISABLE_BUFFER_READ_REFERENCE_OPTIMIZE
172
+ /* optimize */
173
+ if(length <= avail && RSTRING_LEN(string) == 0 &&
174
+ b->head->mapped_string != NO_MAPPED_STRING &&
175
+ length >= b->read_reference_threshold) {
176
+ VALUE s = _msgpack_buffer_refer_head_mapped_string(b, length);
177
+ #ifndef HAVE_RB_STR_REPLACE
178
+ /* TODO MRI 1.8 */
179
+ rb_funcall(string, s_replace, 1, s);
180
+ #else
181
+ rb_str_replace(string, s);
182
+ #endif
183
+ /* here doesn't have to call ENCODING_SET because
184
+ * encoding of s is always ASCII-8BIT */
185
+ _msgpack_buffer_consumed(b, length);
186
+ return length;
187
+ }
188
+ #endif
189
+
190
+ size_t const length_orig = length;
191
+
192
+ while(true) {
193
+ if(length <= avail) {
194
+ rb_str_buf_cat(string, b->read_buffer, length);
195
+ _msgpack_buffer_consumed(b, length);
196
+ return length_orig;
197
+ }
198
+
199
+ rb_str_buf_cat(string, b->read_buffer, avail);
200
+ length -= avail;
201
+
202
+ if(!_msgpack_buffer_shift_chunk(b)) {
203
+ return length_orig - length;
204
+ }
205
+
206
+ avail = msgpack_buffer_top_readable_size(b);
207
+ }
208
+ }
209
+
210
+ size_t msgpack_buffer_read_nonblock(msgpack_buffer_t* b, char* buffer, size_t length)
211
+ {
212
+ /* buffer == NULL means skip */
213
+ size_t const length_orig = length;
214
+
215
+ while(true) {
216
+ size_t avail = msgpack_buffer_top_readable_size(b);
217
+
218
+ if(length <= avail) {
219
+ if(buffer != NULL) {
220
+ memcpy(buffer, b->read_buffer, length);
221
+ }
222
+ _msgpack_buffer_consumed(b, length);
223
+ return length_orig;
224
+ }
225
+
226
+ if(buffer != NULL) {
227
+ memcpy(buffer, b->read_buffer, avail);
228
+ buffer += avail;
229
+ }
230
+ length -= avail;
231
+
232
+ if(!_msgpack_buffer_shift_chunk(b)) {
233
+ return length_orig - length;
234
+ }
235
+ }
236
+ }
237
+
238
+ size_t msgpack_buffer_all_readable_size(const msgpack_buffer_t* b)
239
+ {
240
+ size_t sz = msgpack_buffer_top_readable_size(b);
241
+
242
+ if(b->head == &b->tail) {
243
+ return sz;
244
+ }
245
+
246
+ msgpack_buffer_chunk_t* c = b->head->next;
247
+
248
+ while(true) {
249
+ sz += c->last - c->first;
250
+ if(c == &b->tail) {
251
+ return sz;
252
+ }
253
+ c = c->next;
254
+ }
255
+ }
256
+
257
+ bool _msgpack_buffer_read_all2(msgpack_buffer_t* b, char* buffer, size_t length)
258
+ {
259
+ if(!msgpack_buffer_ensure_readable(b, length)) {
260
+ return false;
261
+ }
262
+
263
+ msgpack_buffer_read_nonblock(b, buffer, length);
264
+ return true;
265
+ }
266
+
267
+
268
+ static inline msgpack_buffer_chunk_t* _msgpack_buffer_alloc_new_chunk(msgpack_buffer_t* b)
269
+ {
270
+ msgpack_buffer_chunk_t* reuse = b->free_list;
271
+ if(reuse == NULL) {
272
+ return malloc(sizeof(msgpack_buffer_chunk_t));
273
+ }
274
+ b->free_list = b->free_list->next;
275
+ return reuse;
276
+ }
277
+
278
+ static inline void _msgpack_buffer_add_new_chunk(msgpack_buffer_t* b)
279
+ {
280
+ if(b->head == &b->tail) {
281
+ if(b->tail.first == NULL) {
282
+ /* empty buffer */
283
+ return;
284
+ }
285
+
286
+ msgpack_buffer_chunk_t* nc = _msgpack_buffer_alloc_new_chunk(b);
287
+
288
+ *nc = b->tail;
289
+ b->head = nc;
290
+ nc->next = &b->tail;
291
+
292
+ } else {
293
+ /* search node before tail */
294
+ msgpack_buffer_chunk_t* before_tail = b->head;
295
+ while(before_tail->next != &b->tail) {
296
+ before_tail = before_tail->next;
297
+ }
298
+
299
+ msgpack_buffer_chunk_t* nc = _msgpack_buffer_alloc_new_chunk(b);
300
+
301
+ #ifndef DISABLE_RMEM
302
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
303
+ if(b->rmem_last == b->tail_buffer_end) {
304
+ /* reuse unused rmem space */
305
+ size_t unused = b->tail_buffer_end - b->tail.last;
306
+ b->rmem_last -= unused;
307
+ }
308
+ #endif
309
+ #endif
310
+
311
+ /* rebuild tail */
312
+ *nc = b->tail;
313
+ before_tail->next = nc;
314
+ nc->next = &b->tail;
315
+ }
316
+ }
317
+
318
+ static inline void _msgpack_buffer_append_reference(msgpack_buffer_t* b, VALUE string)
319
+ {
320
+ VALUE mapped_string = rb_str_dup(string);
321
+ #ifdef COMPAT_HAVE_ENCODING
322
+ ENCODING_SET(mapped_string, s_enc_ascii8bit);
323
+ #endif
324
+
325
+ _msgpack_buffer_add_new_chunk(b);
326
+
327
+ char* data = RSTRING_PTR(mapped_string);
328
+ size_t length = RSTRING_LEN(mapped_string);
329
+
330
+ b->tail.first = (char*) data;
331
+ b->tail.last = (char*) data + length;
332
+ b->tail.mapped_string = mapped_string;
333
+ b->tail.mem = NULL;
334
+
335
+ /* msgpack_buffer_writable_size should return 0 for mapped chunk */
336
+ b->tail_buffer_end = b->tail.last;
337
+
338
+ /* consider read_buffer */
339
+ if(b->head == &b->tail) {
340
+ b->read_buffer = b->tail.first;
341
+ }
342
+ }
343
+
344
+ void _msgpack_buffer_append_long_string(msgpack_buffer_t* b, VALUE string)
345
+ {
346
+ size_t length = RSTRING_LEN(string);
347
+
348
+ if(b->io != Qnil) {
349
+ msgpack_buffer_flush(b);
350
+ rb_funcall(b->io, b->io_write_all_method, 1, string);
351
+
352
+ } else if(!STR_DUP_LIKELY_DOES_COPY(string)) {
353
+ _msgpack_buffer_append_reference(b, string);
354
+
355
+ } else {
356
+ msgpack_buffer_append(b, RSTRING_PTR(string), length);
357
+ }
358
+ }
359
+
360
+ static inline void* _msgpack_buffer_chunk_malloc(
361
+ msgpack_buffer_t* b, msgpack_buffer_chunk_t* c,
362
+ size_t required_size, size_t* allocated_size)
363
+ {
364
+ #ifndef DISABLE_RMEM
365
+ if(required_size <= MSGPACK_RMEM_PAGE_SIZE) {
366
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
367
+ if((size_t)(b->rmem_end - b->rmem_last) < required_size) {
368
+ #endif
369
+ /* alloc new rmem page */
370
+ *allocated_size = MSGPACK_RMEM_PAGE_SIZE;
371
+ char* buffer = msgpack_rmem_alloc(&s_rmem);
372
+ c->mem = buffer;
373
+
374
+ /* update rmem owner */
375
+ b->rmem_owner = &c->mem;
376
+ b->rmem_last = b->rmem_end = buffer + MSGPACK_RMEM_PAGE_SIZE;
377
+
378
+ return buffer;
379
+
380
+ #ifndef DISABLE_RMEM_REUSE_INTERNAL_FRAGMENT
381
+ } else {
382
+ /* reuse unused rmem */
383
+ *allocated_size = (size_t)(b->rmem_end - b->rmem_last);
384
+ char* buffer = b->rmem_last;
385
+ b->rmem_last = b->rmem_end;
386
+
387
+ /* update rmem owner */
388
+ c->mem = *b->rmem_owner;
389
+ *b->rmem_owner = NULL;
390
+ b->rmem_owner = &c->mem;
391
+
392
+ return buffer;
393
+ }
394
+ #endif
395
+ }
396
+ #else
397
+ if(required_size < 72) {
398
+ required_size = 72;
399
+ }
400
+ #endif
401
+
402
+ // TODO alignment?
403
+ *allocated_size = required_size;
404
+ void* mem = malloc(required_size);
405
+ c->mem = mem;
406
+ return mem;
407
+ }
408
+
409
+ static inline void* _msgpack_buffer_chunk_realloc(
410
+ msgpack_buffer_t* b, msgpack_buffer_chunk_t* c,
411
+ void* mem, size_t required_size, size_t* current_size)
412
+ {
413
+ if(mem == NULL) {
414
+ return _msgpack_buffer_chunk_malloc(b, c, required_size, current_size);
415
+ }
416
+
417
+ size_t next_size = *current_size * 2;
418
+ while(next_size < required_size) {
419
+ next_size *= 2;
420
+ }
421
+ *current_size = next_size;
422
+ mem = realloc(mem, next_size);
423
+
424
+ c->mem = mem;
425
+ return mem;
426
+ }
427
+
428
+ void _msgpack_buffer_expand(msgpack_buffer_t* b, const char* data, size_t length, bool flush_to_io)
429
+ {
430
+ if(flush_to_io && b->io != Qnil) {
431
+ msgpack_buffer_flush(b);
432
+ if(msgpack_buffer_writable_size(b) >= length) {
433
+ /* data == NULL means ensure_writable */
434
+ if(data != NULL) {
435
+ size_t tail_avail = msgpack_buffer_writable_size(b);
436
+ memcpy(b->tail.last, data, length);
437
+ b->tail.last += tail_avail;
438
+ }
439
+ return;
440
+ }
441
+ }
442
+
443
+ /* data == NULL means ensure_writable */
444
+ if(data != NULL) {
445
+ size_t tail_avail = msgpack_buffer_writable_size(b);
446
+ memcpy(b->tail.last, data, tail_avail);
447
+ b->tail.last += tail_avail;
448
+ data += tail_avail;
449
+ length -= tail_avail;
450
+ }
451
+
452
+ size_t capacity = b->tail.last - b->tail.first;
453
+
454
+ /* can't realloc mapped chunk or rmem page */
455
+ if(b->tail.mapped_string != NO_MAPPED_STRING
456
+ #ifndef DISABLE_RMEM
457
+ || capacity <= MSGPACK_RMEM_PAGE_SIZE
458
+ #endif
459
+ ) {
460
+ /* allocate new chunk */
461
+ _msgpack_buffer_add_new_chunk(b);
462
+
463
+ char* mem = _msgpack_buffer_chunk_malloc(b, &b->tail, length, &capacity);
464
+
465
+ char* last = mem;
466
+ if(data != NULL) {
467
+ memcpy(mem, data, length);
468
+ last += length;
469
+ }
470
+
471
+ /* rebuild tail chunk */
472
+ b->tail.first = mem;
473
+ b->tail.last = last;
474
+ b->tail.mapped_string = NO_MAPPED_STRING;
475
+ b->tail_buffer_end = mem + capacity;
476
+
477
+ /* consider read_buffer */
478
+ if(b->head == &b->tail) {
479
+ b->read_buffer = b->tail.first;
480
+ }
481
+
482
+ } else {
483
+ /* realloc malloc()ed chunk or NULL */
484
+ size_t tail_filled = b->tail.last - b->tail.first;
485
+ char* mem = _msgpack_buffer_chunk_realloc(b, &b->tail,
486
+ b->tail.first, tail_filled+length, &capacity);
487
+
488
+ char* last = mem + tail_filled;
489
+ if(data != NULL) {
490
+ memcpy(last, data, length);
491
+ last += length;
492
+ }
493
+
494
+ /* consider read_buffer */
495
+ if(b->head == &b->tail) {
496
+ size_t read_offset = b->read_buffer - b->head->first;
497
+ b->read_buffer = mem + read_offset;
498
+ }
499
+
500
+ /* rebuild tail chunk */
501
+ b->tail.first = mem;
502
+ b->tail.last = last;
503
+ b->tail_buffer_end = mem + capacity;
504
+ }
505
+ }
506
+
507
+ static inline VALUE _msgpack_buffer_head_chunk_as_string(msgpack_buffer_t* b)
508
+ {
509
+ size_t length = b->head->last - b->read_buffer;
510
+ if(length == 0) {
511
+ return rb_str_buf_new(0);
512
+ }
513
+
514
+ if(b->head->mapped_string != NO_MAPPED_STRING) {
515
+ return _msgpack_buffer_refer_head_mapped_string(b, length);
516
+ }
517
+
518
+ return rb_str_new(b->read_buffer, length);
519
+ }
520
+
521
+ static inline VALUE _msgpack_buffer_chunk_as_string(msgpack_buffer_chunk_t* c)
522
+ {
523
+ size_t chunk_size = c->last - c->first;
524
+ if(chunk_size == 0) {
525
+ return rb_str_buf_new(0);
526
+ }
527
+
528
+ if(c->mapped_string != NO_MAPPED_STRING) {
529
+ return rb_str_dup(c->mapped_string);
530
+ }
531
+
532
+ return rb_str_new(c->first, chunk_size);
533
+ }
534
+
535
+ VALUE msgpack_buffer_all_as_string(msgpack_buffer_t* b)
536
+ {
537
+ if(b->head == &b->tail) {
538
+ return _msgpack_buffer_head_chunk_as_string(b);
539
+ }
540
+
541
+ size_t length = msgpack_buffer_all_readable_size(b);
542
+ VALUE string = rb_str_new(NULL, length);
543
+ char* buffer = RSTRING_PTR(string);
544
+
545
+ size_t avail = msgpack_buffer_top_readable_size(b);
546
+ memcpy(buffer, b->read_buffer, avail);
547
+ buffer += avail;
548
+ length -= avail;
549
+
550
+ msgpack_buffer_chunk_t* c = b->head->next;
551
+
552
+ while(true) {
553
+ avail = c->last - c->first;
554
+ memcpy(buffer, c->first, avail);
555
+
556
+ if(length <= avail) {
557
+ return string;
558
+ }
559
+ buffer += avail;
560
+ length -= avail;
561
+
562
+ c = c->next;
563
+ }
564
+ }
565
+
566
+ VALUE msgpack_buffer_all_as_string_array(msgpack_buffer_t* b)
567
+ {
568
+ if(b->head == &b->tail) {
569
+ VALUE s = msgpack_buffer_all_as_string(b);
570
+ VALUE ary = rb_ary_new3(1, s);
571
+ return ary;
572
+ }
573
+
574
+ /* TODO optimize ary construction */
575
+ VALUE ary = rb_ary_new();
576
+
577
+ VALUE s = _msgpack_buffer_head_chunk_as_string(b);
578
+ rb_ary_push(ary, s);
579
+
580
+ msgpack_buffer_chunk_t* c = b->head->next;
581
+
582
+ while(true) {
583
+ s = _msgpack_buffer_chunk_as_string(c);
584
+ rb_ary_push(ary, s);
585
+ if(c == &b->tail) {
586
+ return ary;
587
+ }
588
+ c = c->next;
589
+ }
590
+
591
+ return ary;
592
+ }
593
+
594
+ size_t msgpack_buffer_flush_to_io(msgpack_buffer_t* b, VALUE io, ID write_method, bool consume)
595
+ {
596
+ if(msgpack_buffer_top_readable_size(b) == 0) {
597
+ return 0;
598
+ }
599
+
600
+ VALUE s = _msgpack_buffer_head_chunk_as_string(b);
601
+ rb_funcall(io, write_method, 1, s);
602
+ size_t sz = RSTRING_LEN(s);
603
+
604
+ if(consume) {
605
+ while(_msgpack_buffer_shift_chunk(b)) {
606
+ s = _msgpack_buffer_chunk_as_string(b->head);
607
+ rb_funcall(io, write_method, 1, s);
608
+ sz += RSTRING_LEN(s);
609
+ }
610
+ return sz;
611
+
612
+ } else {
613
+ if(b->head == &b->tail) {
614
+ return sz;
615
+ }
616
+ msgpack_buffer_chunk_t* c = b->head->next;
617
+ while(true) {
618
+ s = _msgpack_buffer_chunk_as_string(c);
619
+ rb_funcall(io, write_method, 1, s);
620
+ sz += RSTRING_LEN(s);
621
+ if(c == &b->tail) {
622
+ return sz;
623
+ }
624
+ c = c->next;
625
+ }
626
+ }
627
+ }
628
+
629
+ size_t _msgpack_buffer_feed_from_io(msgpack_buffer_t* b)
630
+ {
631
+ if(b->io_buffer == Qnil) {
632
+ b->io_buffer = rb_funcall(b->io, b->io_partial_read_method, 1, LONG2NUM(b->io_buffer_size));
633
+ if(b->io_buffer == Qnil) {
634
+ rb_raise(rb_eEOFError, "IO reached end of file");
635
+ }
636
+ StringValue(b->io_buffer);
637
+ } else {
638
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(b->io_buffer_size), b->io_buffer);
639
+ if(ret == Qnil) {
640
+ rb_raise(rb_eEOFError, "IO reached end of file");
641
+ }
642
+ }
643
+
644
+ size_t len = RSTRING_LEN(b->io_buffer);
645
+ if(len == 0) {
646
+ rb_raise(rb_eEOFError, "IO reached end of file");
647
+ }
648
+
649
+ /* TODO zero-copy optimize? */
650
+ msgpack_buffer_append_nonblock(b, RSTRING_PTR(b->io_buffer), len);
651
+
652
+ return len;
653
+ }
654
+
655
+ size_t _msgpack_buffer_read_from_io_to_string(msgpack_buffer_t* b, VALUE string, size_t length)
656
+ {
657
+ if(RSTRING_LEN(string) == 0) {
658
+ /* direct read */
659
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), string);
660
+ if(ret == Qnil) {
661
+ return 0;
662
+ }
663
+ return RSTRING_LEN(string);
664
+ }
665
+
666
+ /* copy via io_buffer */
667
+ if(b->io_buffer == Qnil) {
668
+ b->io_buffer = rb_str_buf_new(0);
669
+ }
670
+
671
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), b->io_buffer);
672
+ if(ret == Qnil) {
673
+ return 0;
674
+ }
675
+ size_t rl = RSTRING_LEN(b->io_buffer);
676
+
677
+ rb_str_buf_cat(string, (const void*)RSTRING_PTR(b->io_buffer), rl);
678
+ return rl;
679
+ }
680
+
681
+ size_t _msgpack_buffer_skip_from_io(msgpack_buffer_t* b, size_t length)
682
+ {
683
+ if(b->io_buffer == Qnil) {
684
+ b->io_buffer = rb_str_buf_new(0);
685
+ }
686
+
687
+ VALUE ret = rb_funcall(b->io, b->io_partial_read_method, 2, LONG2NUM(length), b->io_buffer);
688
+ if(ret == Qnil) {
689
+ return 0;
690
+ }
691
+ return RSTRING_LEN(b->io_buffer);
692
+ }
693
+