google-protobuf 3.0.0.alpha.3.1.pre → 3.0.0.alpha.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-protobuf might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/defs.c +160 -83
- data/ext/google/protobuf_c/encode_decode.c +177 -144
- data/ext/google/protobuf_c/extconf.rb +1 -1
- data/ext/google/protobuf_c/map.c +15 -12
- data/ext/google/protobuf_c/message.c +104 -40
- data/ext/google/protobuf_c/protobuf.c +16 -6
- data/ext/google/protobuf_c/protobuf.h +11 -8
- data/ext/google/protobuf_c/repeated_field.c +138 -85
- data/ext/google/protobuf_c/storage.c +35 -20
- data/ext/google/protobuf_c/upb.c +3443 -2673
- data/ext/google/protobuf_c/upb.h +5086 -4919
- data/lib/google/protobuf.rb +36 -0
- data/lib/google/protobuf/message_exts.rb +53 -0
- data/lib/google/protobuf/repeated_field.rb +188 -0
- data/tests/basic.rb +119 -9
- metadata +6 -4
@@ -39,6 +39,9 @@
|
|
39
39
|
// Ruby integers) to MessageDef/EnumDef instances (as Ruby values).
|
40
40
|
VALUE upb_def_to_ruby_obj_map;
|
41
41
|
|
42
|
+
VALUE cError;
|
43
|
+
VALUE cParseError;
|
44
|
+
|
42
45
|
void add_def_obj(const void* def, VALUE value) {
|
43
46
|
rb_hash_aset(upb_def_to_ruby_obj_map, ULL2NUM((intptr_t)def), value);
|
44
47
|
}
|
@@ -64,6 +67,15 @@ rb_encoding* kRubyStringUtf8Encoding;
|
|
64
67
|
rb_encoding* kRubyStringASCIIEncoding;
|
65
68
|
rb_encoding* kRubyString8bitEncoding;
|
66
69
|
|
70
|
+
// Ruby-interned string: "descriptor". We use this identifier to store an
|
71
|
+
// instance variable on message classes we create in order to link them back to
|
72
|
+
// their descriptors.
|
73
|
+
//
|
74
|
+
// We intern this once at module load time then use the interned identifier at
|
75
|
+
// runtime in order to avoid the cost of repeatedly interning in hot paths.
|
76
|
+
const char* kDescriptorInstanceVar = "descriptor";
|
77
|
+
ID descriptor_instancevar_interned;
|
78
|
+
|
67
79
|
// -----------------------------------------------------------------------------
|
68
80
|
// Initialization/entry point.
|
69
81
|
// -----------------------------------------------------------------------------
|
@@ -74,6 +86,8 @@ void Init_protobuf_c() {
|
|
74
86
|
VALUE google = rb_define_module("Google");
|
75
87
|
VALUE protobuf = rb_define_module_under(google, "Protobuf");
|
76
88
|
VALUE internal = rb_define_module_under(protobuf, "Internal");
|
89
|
+
|
90
|
+
descriptor_instancevar_interned = rb_intern(kDescriptorInstanceVar);
|
77
91
|
DescriptorPool_register(protobuf);
|
78
92
|
Descriptor_register(protobuf);
|
79
93
|
FieldDescriptor_register(protobuf);
|
@@ -86,12 +100,8 @@ void Init_protobuf_c() {
|
|
86
100
|
RepeatedField_register(protobuf);
|
87
101
|
Map_register(protobuf);
|
88
102
|
|
89
|
-
|
90
|
-
|
91
|
-
rb_define_singleton_method(protobuf, "encode_json",
|
92
|
-
Google_Protobuf_encode_json, 1);
|
93
|
-
rb_define_singleton_method(protobuf, "decode_json",
|
94
|
-
Google_Protobuf_decode_json, 2);
|
103
|
+
cError = rb_const_get(protobuf, rb_intern("Error"));
|
104
|
+
cParseError = rb_const_get(protobuf, rb_intern("ParseError"));
|
95
105
|
|
96
106
|
rb_define_singleton_method(protobuf, "deep_copy",
|
97
107
|
Google_Protobuf_deep_copy, 1);
|
@@ -161,7 +161,8 @@ extern VALUE cOneofBuilderContext;
|
|
161
161
|
extern VALUE cEnumBuilderContext;
|
162
162
|
extern VALUE cBuilder;
|
163
163
|
|
164
|
-
extern
|
164
|
+
extern VALUE cError;
|
165
|
+
extern VALUE cParseError;
|
165
166
|
|
166
167
|
// We forward-declare all of the Ruby method implementations here because we
|
167
168
|
// sometimes call the methods directly across .c files, rather than going
|
@@ -361,19 +362,20 @@ extern VALUE cRepeatedField;
|
|
361
362
|
RepeatedField* ruby_to_RepeatedField(VALUE value);
|
362
363
|
|
363
364
|
VALUE RepeatedField_each(VALUE _self);
|
364
|
-
VALUE RepeatedField_index(VALUE
|
365
|
+
VALUE RepeatedField_index(int argc, VALUE* argv, VALUE _self);
|
365
366
|
void* RepeatedField_index_native(VALUE _self, int index);
|
366
367
|
VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val);
|
367
368
|
void RepeatedField_reserve(RepeatedField* self, int new_size);
|
368
369
|
VALUE RepeatedField_push(VALUE _self, VALUE val);
|
369
370
|
void RepeatedField_push_native(VALUE _self, void* data);
|
370
|
-
VALUE
|
371
|
+
VALUE RepeatedField_pop_one(VALUE _self);
|
371
372
|
VALUE RepeatedField_insert(int argc, VALUE* argv, VALUE _self);
|
372
373
|
VALUE RepeatedField_replace(VALUE _self, VALUE list);
|
373
374
|
VALUE RepeatedField_clear(VALUE _self);
|
374
375
|
VALUE RepeatedField_length(VALUE _self);
|
375
376
|
VALUE RepeatedField_dup(VALUE _self);
|
376
377
|
VALUE RepeatedField_deep_copy(VALUE _self);
|
378
|
+
VALUE RepeatedField_to_ary(VALUE _self);
|
377
379
|
VALUE RepeatedField_eq(VALUE _self, VALUE _other);
|
378
380
|
VALUE RepeatedField_hash(VALUE _self);
|
379
381
|
VALUE RepeatedField_inspect(VALUE _self);
|
@@ -497,11 +499,6 @@ VALUE Message_encode(VALUE klass, VALUE msg_rb);
|
|
497
499
|
VALUE Message_decode_json(VALUE klass, VALUE data);
|
498
500
|
VALUE Message_encode_json(VALUE klass, VALUE msg_rb);
|
499
501
|
|
500
|
-
VALUE Google_Protobuf_encode(VALUE self, VALUE msg_rb);
|
501
|
-
VALUE Google_Protobuf_decode(VALUE self, VALUE klass, VALUE msg_rb);
|
502
|
-
VALUE Google_Protobuf_encode_json(VALUE self, VALUE msg_rb);
|
503
|
-
VALUE Google_Protobuf_decode_json(VALUE self, VALUE klass, VALUE msg_rb);
|
504
|
-
|
505
502
|
VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj);
|
506
503
|
|
507
504
|
VALUE build_module_from_enumdesc(EnumDescriptor* enumdef);
|
@@ -511,6 +508,10 @@ VALUE enum_resolve(VALUE self, VALUE sym);
|
|
511
508
|
const upb_pbdecodermethod *new_fillmsg_decodermethod(
|
512
509
|
Descriptor* descriptor, const void *owner);
|
513
510
|
|
511
|
+
// Maximum depth allowed during encoding, to avoid stack overflows due to
|
512
|
+
// cycles.
|
513
|
+
#define ENCODE_MAX_NESTING 63
|
514
|
+
|
514
515
|
// -----------------------------------------------------------------------------
|
515
516
|
// Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
|
516
517
|
// instances.
|
@@ -530,4 +531,6 @@ void check_upb_status(const upb_status* status, const char* msg);
|
|
530
531
|
check_upb_status(&status, msg); \
|
531
532
|
} while (0)
|
532
533
|
|
534
|
+
extern ID descriptor_instancevar_interned;
|
535
|
+
|
533
536
|
#endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
@@ -47,6 +47,34 @@ RepeatedField* ruby_to_RepeatedField(VALUE _self) {
|
|
47
47
|
return self;
|
48
48
|
}
|
49
49
|
|
50
|
+
void* RepeatedField_memoryat(RepeatedField* self, int index, int element_size) {
|
51
|
+
return ((uint8_t *)self->elements) + index * element_size;
|
52
|
+
}
|
53
|
+
|
54
|
+
static int index_position(VALUE _index, RepeatedField* repeated_field) {
|
55
|
+
int index = NUM2INT(_index);
|
56
|
+
if (index < 0 && repeated_field->size > 0) {
|
57
|
+
index = repeated_field->size + index;
|
58
|
+
}
|
59
|
+
return index;
|
60
|
+
}
|
61
|
+
|
62
|
+
VALUE RepeatedField_subarray(VALUE _self, long beg, long len) {
|
63
|
+
RepeatedField* self = ruby_to_RepeatedField(_self);
|
64
|
+
int element_size = native_slot_size(self->field_type);
|
65
|
+
upb_fieldtype_t field_type = self->field_type;
|
66
|
+
VALUE field_type_class = self->field_type_class;
|
67
|
+
|
68
|
+
size_t off = beg * element_size;
|
69
|
+
VALUE ary = rb_ary_new2(len);
|
70
|
+
for (int i = beg; i < beg + len; i++, off += element_size) {
|
71
|
+
void* mem = ((uint8_t *)self->elements) + off;
|
72
|
+
VALUE elem = native_slot_get(field_type, field_type_class, mem);
|
73
|
+
rb_ary_push(ary, elem);
|
74
|
+
}
|
75
|
+
return ary;
|
76
|
+
}
|
77
|
+
|
50
78
|
/*
|
51
79
|
* call-seq:
|
52
80
|
* RepeatedField.each(&block)
|
@@ -67,29 +95,57 @@ VALUE RepeatedField_each(VALUE _self) {
|
|
67
95
|
VALUE val = native_slot_get(field_type, field_type_class, memory);
|
68
96
|
rb_yield(val);
|
69
97
|
}
|
70
|
-
return
|
98
|
+
return _self;
|
71
99
|
}
|
72
100
|
|
101
|
+
|
73
102
|
/*
|
74
103
|
* call-seq:
|
75
104
|
* RepeatedField.[](index) => value
|
76
105
|
*
|
77
|
-
* Accesses the element at the given index.
|
78
|
-
* errors.
|
106
|
+
* Accesses the element at the given index. Returns nil on out-of-bounds
|
79
107
|
*/
|
80
|
-
VALUE RepeatedField_index(VALUE
|
108
|
+
VALUE RepeatedField_index(int argc, VALUE* argv, VALUE _self) {
|
81
109
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
82
110
|
int element_size = native_slot_size(self->field_type);
|
83
111
|
upb_fieldtype_t field_type = self->field_type;
|
84
112
|
VALUE field_type_class = self->field_type_class;
|
85
113
|
|
86
|
-
|
87
|
-
|
88
|
-
|
114
|
+
VALUE arg = argv[0];
|
115
|
+
long beg, len;
|
116
|
+
|
117
|
+
if (argc == 1){
|
118
|
+
if (FIXNUM_P(arg)) {
|
119
|
+
/* standard case */
|
120
|
+
void* memory;
|
121
|
+
int index = index_position(argv[0], self);
|
122
|
+
if (index < 0 || index >= self->size) {
|
123
|
+
return Qnil;
|
124
|
+
}
|
125
|
+
memory = RepeatedField_memoryat(self, index, element_size);
|
126
|
+
return native_slot_get(field_type, field_type_class, memory);
|
127
|
+
}else{
|
128
|
+
/* check if idx is Range */
|
129
|
+
switch (rb_range_beg_len(arg, &beg, &len, self->size, 0)) {
|
130
|
+
case Qfalse:
|
131
|
+
break;
|
132
|
+
case Qnil:
|
133
|
+
return Qnil;
|
134
|
+
default:
|
135
|
+
return RepeatedField_subarray(_self, beg, len);
|
136
|
+
}
|
137
|
+
}
|
89
138
|
}
|
90
|
-
|
91
|
-
|
92
|
-
|
139
|
+
/* assume 2 arguments */
|
140
|
+
beg = NUM2LONG(argv[0]);
|
141
|
+
len = NUM2LONG(argv[1]);
|
142
|
+
if (beg < 0) {
|
143
|
+
beg += self->size;
|
144
|
+
}
|
145
|
+
if (beg >= self->size) {
|
146
|
+
return Qnil;
|
147
|
+
}
|
148
|
+
return RepeatedField_subarray(_self, beg, len);
|
93
149
|
}
|
94
150
|
|
95
151
|
/*
|
@@ -104,23 +160,24 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
|
|
104
160
|
upb_fieldtype_t field_type = self->field_type;
|
105
161
|
VALUE field_type_class = self->field_type_class;
|
106
162
|
int element_size = native_slot_size(field_type);
|
163
|
+
void* memory;
|
107
164
|
|
108
|
-
int index =
|
165
|
+
int index = index_position(_index, self);
|
109
166
|
if (index < 0 || index >= (INT_MAX - 1)) {
|
110
|
-
|
167
|
+
return Qnil;
|
111
168
|
}
|
112
169
|
if (index >= self->size) {
|
113
|
-
RepeatedField_reserve(self, index + 1);
|
114
170
|
upb_fieldtype_t field_type = self->field_type;
|
115
171
|
int element_size = native_slot_size(field_type);
|
172
|
+
RepeatedField_reserve(self, index + 1);
|
116
173
|
for (int i = self->size; i <= index; i++) {
|
117
|
-
void* elem = (
|
174
|
+
void* elem = RepeatedField_memoryat(self, i, element_size);
|
118
175
|
native_slot_init(field_type, elem);
|
119
176
|
}
|
120
177
|
self->size = index + 1;
|
121
178
|
}
|
122
179
|
|
123
|
-
|
180
|
+
memory = RepeatedField_memoryat(self, index, element_size);
|
124
181
|
native_slot_set(field_type, field_type_class, memory, val);
|
125
182
|
return Qnil;
|
126
183
|
}
|
@@ -128,6 +185,8 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
|
|
128
185
|
static int kInitialSize = 8;
|
129
186
|
|
130
187
|
void RepeatedField_reserve(RepeatedField* self, int new_size) {
|
188
|
+
void* old_elems = self->elements;
|
189
|
+
int elem_size = native_slot_size(self->field_type);
|
131
190
|
if (new_size <= self->capacity) {
|
132
191
|
return;
|
133
192
|
}
|
@@ -137,8 +196,6 @@ void RepeatedField_reserve(RepeatedField* self, int new_size) {
|
|
137
196
|
while (self->capacity < new_size) {
|
138
197
|
self->capacity *= 2;
|
139
198
|
}
|
140
|
-
void* old_elems = self->elements;
|
141
|
-
int elem_size = native_slot_size(self->field_type);
|
142
199
|
self->elements = ALLOC_N(uint8_t, elem_size * self->capacity);
|
143
200
|
if (old_elems != NULL) {
|
144
201
|
memcpy(self->elements, old_elems, self->size * elem_size);
|
@@ -156,23 +213,26 @@ VALUE RepeatedField_push(VALUE _self, VALUE val) {
|
|
156
213
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
157
214
|
upb_fieldtype_t field_type = self->field_type;
|
158
215
|
int element_size = native_slot_size(field_type);
|
216
|
+
void* memory;
|
217
|
+
|
159
218
|
RepeatedField_reserve(self, self->size + 1);
|
160
|
-
|
161
|
-
void* memory = (void *) (((uint8_t *)self->elements) + index * element_size);
|
219
|
+
memory = (void *) (((uint8_t *)self->elements) + self->size * element_size);
|
162
220
|
native_slot_set(field_type, self->field_type_class, memory, val);
|
163
|
-
// native_slot_set may raise an error; bump
|
221
|
+
// native_slot_set may raise an error; bump size only after set.
|
164
222
|
self->size++;
|
165
223
|
return _self;
|
166
224
|
}
|
167
225
|
|
226
|
+
|
168
227
|
// Used by parsing handlers.
|
169
228
|
void RepeatedField_push_native(VALUE _self, void* data) {
|
170
229
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
171
230
|
upb_fieldtype_t field_type = self->field_type;
|
172
231
|
int element_size = native_slot_size(field_type);
|
232
|
+
void* memory;
|
233
|
+
|
173
234
|
RepeatedField_reserve(self, self->size + 1);
|
174
|
-
|
175
|
-
void* memory = (void *) (((uint8_t *)self->elements) + index * element_size);
|
235
|
+
memory = (void *) (((uint8_t *)self->elements) + self->size * element_size);
|
176
236
|
memcpy(memory, data, element_size);
|
177
237
|
self->size++;
|
178
238
|
}
|
@@ -181,44 +241,31 @@ void* RepeatedField_index_native(VALUE _self, int index) {
|
|
181
241
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
182
242
|
upb_fieldtype_t field_type = self->field_type;
|
183
243
|
int element_size = native_slot_size(field_type);
|
184
|
-
return (
|
244
|
+
return RepeatedField_memoryat(self, index, element_size);
|
185
245
|
}
|
186
246
|
|
187
247
|
/*
|
188
|
-
*
|
189
|
-
* RepeatedField.pop => value
|
190
|
-
*
|
191
|
-
* Removes the last element and returns it. Throws an exception if the repeated
|
192
|
-
* field is empty.
|
248
|
+
* Private ruby method, used by RepeatedField.pop
|
193
249
|
*/
|
194
|
-
VALUE
|
250
|
+
VALUE RepeatedField_pop_one(VALUE _self) {
|
195
251
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
196
252
|
upb_fieldtype_t field_type = self->field_type;
|
197
253
|
VALUE field_type_class = self->field_type_class;
|
198
254
|
int element_size = native_slot_size(field_type);
|
255
|
+
int index;
|
256
|
+
void* memory;
|
257
|
+
VALUE ret;
|
258
|
+
|
199
259
|
if (self->size == 0) {
|
200
|
-
|
260
|
+
return Qnil;
|
201
261
|
}
|
202
|
-
|
203
|
-
|
204
|
-
|
262
|
+
index = self->size - 1;
|
263
|
+
memory = RepeatedField_memoryat(self, index, element_size);
|
264
|
+
ret = native_slot_get(field_type, field_type_class, memory);
|
205
265
|
self->size--;
|
206
266
|
return ret;
|
207
267
|
}
|
208
268
|
|
209
|
-
/*
|
210
|
-
* call-seq:
|
211
|
-
* RepeatedField.insert(*args)
|
212
|
-
*
|
213
|
-
* Pushes each arg in turn onto the end of the repeated field.
|
214
|
-
*/
|
215
|
-
VALUE RepeatedField_insert(int argc, VALUE* argv, VALUE _self) {
|
216
|
-
for (int i = 0; i < argc; i++) {
|
217
|
-
RepeatedField_push(_self, argv[i]);
|
218
|
-
}
|
219
|
-
return Qnil;
|
220
|
-
}
|
221
|
-
|
222
269
|
/*
|
223
270
|
* call-seq:
|
224
271
|
* RepeatedField.replace(list)
|
@@ -232,7 +279,7 @@ VALUE RepeatedField_replace(VALUE _self, VALUE list) {
|
|
232
279
|
for (int i = 0; i < RARRAY_LEN(list); i++) {
|
233
280
|
RepeatedField_push(_self, rb_ary_entry(list, i));
|
234
281
|
}
|
235
|
-
return
|
282
|
+
return list;
|
236
283
|
}
|
237
284
|
|
238
285
|
/*
|
@@ -244,7 +291,7 @@ VALUE RepeatedField_replace(VALUE _self, VALUE list) {
|
|
244
291
|
VALUE RepeatedField_clear(VALUE _self) {
|
245
292
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
246
293
|
self->size = 0;
|
247
|
-
return
|
294
|
+
return _self;
|
248
295
|
}
|
249
296
|
|
250
297
|
/*
|
@@ -283,10 +330,10 @@ VALUE RepeatedField_dup(VALUE _self) {
|
|
283
330
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
284
331
|
VALUE new_rptfield = RepeatedField_new_this_type(_self);
|
285
332
|
RepeatedField* new_rptfield_self = ruby_to_RepeatedField(new_rptfield);
|
286
|
-
RepeatedField_reserve(new_rptfield_self, self->size);
|
287
333
|
upb_fieldtype_t field_type = self->field_type;
|
288
334
|
size_t elem_size = native_slot_size(field_type);
|
289
335
|
size_t off = 0;
|
336
|
+
RepeatedField_reserve(new_rptfield_self, self->size);
|
290
337
|
for (int i = 0; i < self->size; i++, off += elem_size) {
|
291
338
|
void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
|
292
339
|
void* from_mem = (uint8_t *)self->elements + off;
|
@@ -302,10 +349,10 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
|
|
302
349
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
303
350
|
VALUE new_rptfield = RepeatedField_new_this_type(_self);
|
304
351
|
RepeatedField* new_rptfield_self = ruby_to_RepeatedField(new_rptfield);
|
305
|
-
RepeatedField_reserve(new_rptfield_self, self->size);
|
306
352
|
upb_fieldtype_t field_type = self->field_type;
|
307
353
|
size_t elem_size = native_slot_size(field_type);
|
308
354
|
size_t off = 0;
|
355
|
+
RepeatedField_reserve(new_rptfield_self, self->size);
|
309
356
|
for (int i = 0; i < self->size; i++, off += elem_size) {
|
310
357
|
void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
|
311
358
|
void* from_mem = (uint8_t *)self->elements + off;
|
@@ -333,7 +380,6 @@ VALUE RepeatedField_to_ary(VALUE _self) {
|
|
333
380
|
for (int i = 0; i < self->size; i++, off += elem_size) {
|
334
381
|
void* mem = ((uint8_t *)self->elements) + off;
|
335
382
|
VALUE elem = native_slot_get(field_type, self->field_type_class, mem);
|
336
|
-
|
337
383
|
rb_ary_push(ary, elem);
|
338
384
|
}
|
339
385
|
return ary;
|
@@ -353,34 +399,39 @@ VALUE RepeatedField_to_ary(VALUE _self) {
|
|
353
399
|
* indicated that every element has equal value.
|
354
400
|
*/
|
355
401
|
VALUE RepeatedField_eq(VALUE _self, VALUE _other) {
|
402
|
+
RepeatedField* self;
|
403
|
+
RepeatedField* other;
|
404
|
+
|
356
405
|
if (_self == _other) {
|
357
406
|
return Qtrue;
|
358
407
|
}
|
359
|
-
RepeatedField* self = ruby_to_RepeatedField(_self);
|
360
408
|
|
361
409
|
if (TYPE(_other) == T_ARRAY) {
|
362
410
|
VALUE self_ary = RepeatedField_to_ary(_self);
|
363
411
|
return rb_equal(self_ary, _other);
|
364
412
|
}
|
365
413
|
|
366
|
-
|
414
|
+
self = ruby_to_RepeatedField(_self);
|
415
|
+
other = ruby_to_RepeatedField(_other);
|
367
416
|
if (self->field_type != other->field_type ||
|
368
417
|
self->field_type_class != other->field_type_class ||
|
369
418
|
self->size != other->size) {
|
370
419
|
return Qfalse;
|
371
420
|
}
|
372
421
|
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
422
|
+
{
|
423
|
+
upb_fieldtype_t field_type = self->field_type;
|
424
|
+
size_t elem_size = native_slot_size(field_type);
|
425
|
+
size_t off = 0;
|
426
|
+
for (int i = 0; i < self->size; i++, off += elem_size) {
|
427
|
+
void* self_mem = ((uint8_t *)self->elements) + off;
|
428
|
+
void* other_mem = ((uint8_t *)other->elements) + off;
|
429
|
+
if (!native_slot_eq(field_type, self_mem, other_mem)) {
|
430
|
+
return Qfalse;
|
431
|
+
}
|
381
432
|
}
|
433
|
+
return Qtrue;
|
382
434
|
}
|
383
|
-
return Qtrue;
|
384
435
|
}
|
385
436
|
|
386
437
|
/*
|
@@ -409,19 +460,6 @@ VALUE RepeatedField_hash(VALUE _self) {
|
|
409
460
|
return hash;
|
410
461
|
}
|
411
462
|
|
412
|
-
/*
|
413
|
-
* call-seq:
|
414
|
-
* RepeatedField.inspect => string
|
415
|
-
*
|
416
|
-
* Returns a string representing this repeated field's elements. It will be
|
417
|
-
* formated as "[<element>, <element>, ...]", with each element's string
|
418
|
-
* representation computed by its own #inspect method.
|
419
|
-
*/
|
420
|
-
VALUE RepeatedField_inspect(VALUE _self) {
|
421
|
-
VALUE self_ary = RepeatedField_to_ary(_self);
|
422
|
-
return rb_funcall(self_ary, rb_intern("inspect"), 0);
|
423
|
-
}
|
424
|
-
|
425
463
|
/*
|
426
464
|
* call-seq:
|
427
465
|
* RepeatedField.+(other) => repeated field
|
@@ -458,14 +496,29 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
|
|
458
496
|
return dupped;
|
459
497
|
}
|
460
498
|
|
499
|
+
/*
|
500
|
+
* call-seq:
|
501
|
+
* RepeatedField.concat(other) => self
|
502
|
+
*
|
503
|
+
* concats the passed in array to self. Returns a Ruby array.
|
504
|
+
*/
|
505
|
+
VALUE RepeatedField_concat(VALUE _self, VALUE list) {
|
506
|
+
Check_Type(list, T_ARRAY);
|
507
|
+
for (int i = 0; i < RARRAY_LEN(list); i++) {
|
508
|
+
RepeatedField_push(_self, rb_ary_entry(list, i));
|
509
|
+
}
|
510
|
+
return _self;
|
511
|
+
}
|
512
|
+
|
513
|
+
|
461
514
|
void validate_type_class(upb_fieldtype_t type, VALUE klass) {
|
462
|
-
if (
|
515
|
+
if (rb_ivar_get(klass, descriptor_instancevar_interned) == Qnil) {
|
463
516
|
rb_raise(rb_eArgError,
|
464
517
|
"Type class has no descriptor. Please pass a "
|
465
518
|
"class or enum as returned by the DescriptorPool.");
|
466
519
|
}
|
467
520
|
if (type == UPB_TYPE_MESSAGE) {
|
468
|
-
VALUE desc =
|
521
|
+
VALUE desc = rb_ivar_get(klass, descriptor_instancevar_interned);
|
469
522
|
if (!RB_TYPE_P(desc, T_DATA) || !RTYPEDDATA_P(desc) ||
|
470
523
|
RTYPEDDATA_TYPE(desc) != &_Descriptor_type) {
|
471
524
|
rb_raise(rb_eArgError, "Descriptor has an incorrect type.");
|
@@ -475,7 +528,7 @@ void validate_type_class(upb_fieldtype_t type, VALUE klass) {
|
|
475
528
|
"Message class was not returned by the DescriptorPool.");
|
476
529
|
}
|
477
530
|
} else if (type == UPB_TYPE_ENUM) {
|
478
|
-
VALUE enumdesc =
|
531
|
+
VALUE enumdesc = rb_ivar_get(klass, descriptor_instancevar_interned);
|
479
532
|
if (!RB_TYPE_P(enumdesc, T_DATA) || !RTYPEDDATA_P(enumdesc) ||
|
480
533
|
RTYPEDDATA_TYPE(enumdesc) != &_EnumDescriptor_type) {
|
481
534
|
rb_raise(rb_eArgError, "Descriptor has an incorrect type.");
|
@@ -525,9 +578,9 @@ void RepeatedField_init_args(int argc, VALUE* argv,
|
|
525
578
|
|
526
579
|
void RepeatedField_mark(void* _self) {
|
527
580
|
RepeatedField* self = (RepeatedField*)_self;
|
528
|
-
rb_gc_mark(self->field_type_class);
|
529
581
|
upb_fieldtype_t field_type = self->field_type;
|
530
582
|
int element_size = native_slot_size(field_type);
|
583
|
+
rb_gc_mark(self->field_type_class);
|
531
584
|
for (int i = 0; i < self->size; i++) {
|
532
585
|
void* memory = (((uint8_t *)self->elements) + i * element_size);
|
533
586
|
native_slot_mark(self->field_type, memory);
|
@@ -558,8 +611,7 @@ VALUE RepeatedField_alloc(VALUE klass) {
|
|
558
611
|
self->capacity = 0;
|
559
612
|
self->field_type = -1;
|
560
613
|
self->field_type_class = Qnil;
|
561
|
-
|
562
|
-
return ret;
|
614
|
+
return TypedData_Wrap_Struct(klass, &RepeatedField_type, self);
|
563
615
|
}
|
564
616
|
|
565
617
|
VALUE RepeatedField_init(int argc, VALUE* argv, VALUE self) {
|
@@ -577,22 +629,23 @@ void RepeatedField_register(VALUE module) {
|
|
577
629
|
rb_define_method(klass, "initialize",
|
578
630
|
RepeatedField_init, -1);
|
579
631
|
rb_define_method(klass, "each", RepeatedField_each, 0);
|
580
|
-
rb_define_method(klass, "[]", RepeatedField_index, 1);
|
632
|
+
rb_define_method(klass, "[]", RepeatedField_index, -1);
|
633
|
+
rb_define_method(klass, "at", RepeatedField_index, -1);
|
581
634
|
rb_define_method(klass, "[]=", RepeatedField_index_set, 2);
|
582
635
|
rb_define_method(klass, "push", RepeatedField_push, 1);
|
583
636
|
rb_define_method(klass, "<<", RepeatedField_push, 1);
|
584
|
-
|
585
|
-
rb_define_method(klass, "insert", RepeatedField_insert, -1);
|
637
|
+
rb_define_private_method(klass, "pop_one", RepeatedField_pop_one, 0);
|
586
638
|
rb_define_method(klass, "replace", RepeatedField_replace, 1);
|
587
639
|
rb_define_method(klass, "clear", RepeatedField_clear, 0);
|
588
640
|
rb_define_method(klass, "length", RepeatedField_length, 0);
|
641
|
+
rb_define_method(klass, "size", RepeatedField_length, 0);
|
589
642
|
rb_define_method(klass, "dup", RepeatedField_dup, 0);
|
590
643
|
// Also define #clone so that we don't inherit Object#clone.
|
591
644
|
rb_define_method(klass, "clone", RepeatedField_dup, 0);
|
592
645
|
rb_define_method(klass, "==", RepeatedField_eq, 1);
|
593
646
|
rb_define_method(klass, "to_ary", RepeatedField_to_ary, 0);
|
594
647
|
rb_define_method(klass, "hash", RepeatedField_hash, 0);
|
595
|
-
rb_define_method(klass, "inspect", RepeatedField_inspect, 0);
|
596
648
|
rb_define_method(klass, "+", RepeatedField_plus, 1);
|
649
|
+
rb_define_method(klass, "concat", RepeatedField_concat, 1);
|
597
650
|
rb_include_module(klass, rb_mEnumerable);
|
598
651
|
}
|