google-protobuf 3.7.0 → 3.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-protobuf might be problematic. Click here for more details.

@@ -59,6 +59,7 @@ typedef struct OneofDescriptor OneofDescriptor;
59
59
  typedef struct EnumDescriptor EnumDescriptor;
60
60
  typedef struct MessageLayout MessageLayout;
61
61
  typedef struct MessageField MessageField;
62
+ typedef struct MessageOneof MessageOneof;
62
63
  typedef struct MessageHeader MessageHeader;
63
64
  typedef struct MessageBuilderContext MessageBuilderContext;
64
65
  typedef struct OneofBuilderContext OneofBuilderContext;
@@ -107,62 +108,68 @@ typedef struct Builder Builder;
107
108
  // -----------------------------------------------------------------------------
108
109
 
109
110
  struct DescriptorPool {
111
+ VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
110
112
  upb_symtab* symtab;
113
+ upb_handlercache* fill_handler_cache;
114
+ upb_handlercache* pb_serialize_handler_cache;
115
+ upb_handlercache* json_serialize_handler_cache;
116
+ upb_handlercache* json_serialize_handler_preserve_cache;
117
+ upb_pbcodecache* fill_method_cache;
118
+ upb_json_codecache* json_fill_method_cache;
111
119
  };
112
120
 
113
121
  struct Descriptor {
114
122
  const upb_msgdef* msgdef;
115
123
  MessageLayout* layout;
116
- VALUE klass; // begins as nil
117
- const upb_handlers* fill_handlers;
118
- const upb_pbdecodermethod* fill_method;
119
- const upb_json_parsermethod* json_fill_method;
120
- const upb_handlers* pb_serialize_handlers;
121
- const upb_handlers* json_serialize_handlers;
122
- const upb_handlers* json_serialize_handlers_preserve;
124
+ VALUE klass;
125
+ VALUE descriptor_pool;
123
126
  };
124
127
 
125
128
  struct FileDescriptor {
126
129
  const upb_filedef* filedef;
130
+ VALUE descriptor_pool; // Owns the upb_filedef.
127
131
  };
128
132
 
129
133
  struct FieldDescriptor {
130
134
  const upb_fielddef* fielddef;
135
+ VALUE descriptor_pool; // Owns the upb_fielddef.
131
136
  };
132
137
 
133
138
  struct OneofDescriptor {
134
139
  const upb_oneofdef* oneofdef;
140
+ VALUE descriptor_pool; // Owns the upb_oneofdef.
135
141
  };
136
142
 
137
143
  struct EnumDescriptor {
138
144
  const upb_enumdef* enumdef;
139
145
  VALUE module; // begins as nil
146
+ VALUE descriptor_pool; // Owns the upb_enumdef.
140
147
  };
141
148
 
142
149
  struct MessageBuilderContext {
143
- VALUE descriptor;
144
- VALUE builder;
150
+ google_protobuf_DescriptorProto* msg_proto;
151
+ VALUE file_builder;
145
152
  };
146
153
 
147
154
  struct OneofBuilderContext {
148
- VALUE descriptor;
149
- VALUE builder;
155
+ int oneof_index;
156
+ VALUE message_builder;
150
157
  };
151
158
 
152
159
  struct EnumBuilderContext {
153
- VALUE enumdesc;
160
+ google_protobuf_EnumDescriptorProto* enum_proto;
161
+ VALUE file_builder;
154
162
  };
155
163
 
156
164
  struct FileBuilderContext {
157
- VALUE pending_list;
158
- VALUE file_descriptor;
159
- VALUE builder;
165
+ upb_arena *arena;
166
+ google_protobuf_FileDescriptorProto* file_proto;
167
+ VALUE descriptor_pool;
160
168
  };
161
169
 
162
170
  struct Builder {
163
- VALUE pending_list;
164
- VALUE default_file_descriptor;
165
- upb_def** defs; // used only while finalizing
171
+ VALUE descriptor_pool;
172
+ VALUE default_file_builder;
166
173
  };
167
174
 
168
175
  extern VALUE cDescriptorPool;
@@ -191,7 +198,6 @@ void DescriptorPool_free(void* _self);
191
198
  VALUE DescriptorPool_alloc(VALUE klass);
192
199
  void DescriptorPool_register(VALUE module);
193
200
  DescriptorPool* ruby_to_DescriptorPool(VALUE value);
194
- VALUE DescriptorPool_add(VALUE _self, VALUE def);
195
201
  VALUE DescriptorPool_build(int argc, VALUE* argv, VALUE _self);
196
202
  VALUE DescriptorPool_lookup(VALUE _self, VALUE name);
197
203
  VALUE DescriptorPool_generated_pool(VALUE _self);
@@ -203,13 +209,11 @@ void Descriptor_free(void* _self);
203
209
  VALUE Descriptor_alloc(VALUE klass);
204
210
  void Descriptor_register(VALUE module);
205
211
  Descriptor* ruby_to_Descriptor(VALUE value);
206
- VALUE Descriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
212
+ VALUE Descriptor_initialize(VALUE _self, VALUE cookie, VALUE descriptor_pool,
213
+ VALUE ptr);
207
214
  VALUE Descriptor_name(VALUE _self);
208
- VALUE Descriptor_name_set(VALUE _self, VALUE str);
209
215
  VALUE Descriptor_each(VALUE _self);
210
216
  VALUE Descriptor_lookup(VALUE _self, VALUE name);
211
- VALUE Descriptor_add_field(VALUE _self, VALUE obj);
212
- VALUE Descriptor_add_oneof(VALUE _self, VALUE obj);
213
217
  VALUE Descriptor_each_oneof(VALUE _self);
214
218
  VALUE Descriptor_lookup_oneof(VALUE _self, VALUE name);
215
219
  VALUE Descriptor_msgclass(VALUE _self);
@@ -221,28 +225,24 @@ void FileDescriptor_free(void* _self);
221
225
  VALUE FileDescriptor_alloc(VALUE klass);
222
226
  void FileDescriptor_register(VALUE module);
223
227
  FileDescriptor* ruby_to_FileDescriptor(VALUE value);
224
- VALUE FileDescriptor_initialize(int argc, VALUE* argv, VALUE _self);
228
+ VALUE FileDescriptor_initialize(VALUE _self, VALUE cookie,
229
+ VALUE descriptor_pool, VALUE ptr);
225
230
  VALUE FileDescriptor_name(VALUE _self);
226
231
  VALUE FileDescriptor_syntax(VALUE _self);
227
- VALUE FileDescriptor_syntax_set(VALUE _self, VALUE syntax);
228
232
 
229
233
  void FieldDescriptor_mark(void* _self);
230
234
  void FieldDescriptor_free(void* _self);
231
235
  VALUE FieldDescriptor_alloc(VALUE klass);
232
236
  void FieldDescriptor_register(VALUE module);
233
237
  FieldDescriptor* ruby_to_FieldDescriptor(VALUE value);
238
+ VALUE FieldDescriptor_initialize(VALUE _self, VALUE cookie,
239
+ VALUE descriptor_pool, VALUE ptr);
234
240
  VALUE FieldDescriptor_name(VALUE _self);
235
- VALUE FieldDescriptor_name_set(VALUE _self, VALUE str);
236
241
  VALUE FieldDescriptor_type(VALUE _self);
237
- VALUE FieldDescriptor_type_set(VALUE _self, VALUE type);
238
242
  VALUE FieldDescriptor_default(VALUE _self);
239
- VALUE FieldDescriptor_default_set(VALUE _self, VALUE default_value);
240
243
  VALUE FieldDescriptor_label(VALUE _self);
241
- VALUE FieldDescriptor_label_set(VALUE _self, VALUE label);
242
244
  VALUE FieldDescriptor_number(VALUE _self);
243
- VALUE FieldDescriptor_number_set(VALUE _self, VALUE number);
244
245
  VALUE FieldDescriptor_submsg_name(VALUE _self);
245
- VALUE FieldDescriptor_submsg_name_set(VALUE _self, VALUE value);
246
246
  VALUE FieldDescriptor_subtype(VALUE _self);
247
247
  VALUE FieldDescriptor_has(VALUE _self, VALUE msg_rb);
248
248
  VALUE FieldDescriptor_clear(VALUE _self, VALUE msg_rb);
@@ -256,21 +256,20 @@ void OneofDescriptor_free(void* _self);
256
256
  VALUE OneofDescriptor_alloc(VALUE klass);
257
257
  void OneofDescriptor_register(VALUE module);
258
258
  OneofDescriptor* ruby_to_OneofDescriptor(VALUE value);
259
+ VALUE OneofDescriptor_initialize(VALUE _self, VALUE cookie,
260
+ VALUE descriptor_pool, VALUE ptr);
259
261
  VALUE OneofDescriptor_name(VALUE _self);
260
- VALUE OneofDescriptor_name_set(VALUE _self, VALUE value);
261
- VALUE OneofDescriptor_add_field(VALUE _self, VALUE field);
262
- VALUE OneofDescriptor_each(VALUE _self, VALUE field);
262
+ VALUE OneofDescriptor_each(VALUE _self);
263
263
 
264
264
  void EnumDescriptor_mark(void* _self);
265
265
  void EnumDescriptor_free(void* _self);
266
266
  VALUE EnumDescriptor_alloc(VALUE klass);
267
+ VALUE EnumDescriptor_initialize(VALUE _self, VALUE cookie,
268
+ VALUE descriptor_pool, VALUE ptr);
267
269
  void EnumDescriptor_register(VALUE module);
268
270
  EnumDescriptor* ruby_to_EnumDescriptor(VALUE value);
269
- VALUE EnumDescriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
270
271
  VALUE EnumDescriptor_file_descriptor(VALUE _self);
271
272
  VALUE EnumDescriptor_name(VALUE _self);
272
- VALUE EnumDescriptor_name_set(VALUE _self, VALUE str);
273
- VALUE EnumDescriptor_add_value(VALUE _self, VALUE name, VALUE number);
274
273
  VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name);
275
274
  VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number);
276
275
  VALUE EnumDescriptor_each(VALUE _self);
@@ -283,9 +282,10 @@ VALUE MessageBuilderContext_alloc(VALUE klass);
283
282
  void MessageBuilderContext_register(VALUE module);
284
283
  MessageBuilderContext* ruby_to_MessageBuilderContext(VALUE value);
285
284
  VALUE MessageBuilderContext_initialize(VALUE _self,
286
- VALUE descriptor,
287
- VALUE builder);
285
+ VALUE _file_builder,
286
+ VALUE name);
288
287
  VALUE MessageBuilderContext_optional(int argc, VALUE* argv, VALUE _self);
288
+ VALUE MessageBuilderContext_proto3_optional(int argc, VALUE* argv, VALUE _self);
289
289
  VALUE MessageBuilderContext_required(int argc, VALUE* argv, VALUE _self);
290
290
  VALUE MessageBuilderContext_repeated(int argc, VALUE* argv, VALUE _self);
291
291
  VALUE MessageBuilderContext_map(int argc, VALUE* argv, VALUE _self);
@@ -306,15 +306,20 @@ void EnumBuilderContext_free(void* _self);
306
306
  VALUE EnumBuilderContext_alloc(VALUE klass);
307
307
  void EnumBuilderContext_register(VALUE module);
308
308
  EnumBuilderContext* ruby_to_EnumBuilderContext(VALUE value);
309
- VALUE EnumBuilderContext_initialize(VALUE _self, VALUE enumdesc);
309
+ VALUE EnumBuilderContext_initialize(VALUE _self, VALUE _file_builder,
310
+ VALUE name);
310
311
  VALUE EnumBuilderContext_value(VALUE _self, VALUE name, VALUE number);
311
312
 
312
313
  void FileBuilderContext_mark(void* _self);
313
314
  void FileBuilderContext_free(void* _self);
314
315
  VALUE FileBuilderContext_alloc(VALUE klass);
315
316
  void FileBuilderContext_register(VALUE module);
316
- VALUE FileBuilderContext_initialize(VALUE _self, VALUE file_descriptor,
317
- VALUE builder);
317
+ FileBuilderContext* ruby_to_FileBuilderContext(VALUE _self);
318
+ upb_strview FileBuilderContext_strdup(VALUE _self, VALUE rb_str);
319
+ upb_strview FileBuilderContext_strdup_name(VALUE _self, VALUE rb_str);
320
+ upb_strview FileBuilderContext_strdup_sym(VALUE _self, VALUE rb_sym);
321
+ VALUE FileBuilderContext_initialize(VALUE _self, VALUE descriptor_pool,
322
+ VALUE name, VALUE options);
318
323
  VALUE FileBuilderContext_add_message(VALUE _self, VALUE name);
319
324
  VALUE FileBuilderContext_add_enum(VALUE _self, VALUE name);
320
325
  VALUE FileBuilderContext_pending_descriptors(VALUE _self);
@@ -324,7 +329,8 @@ void Builder_free(void* _self);
324
329
  VALUE Builder_alloc(VALUE klass);
325
330
  void Builder_register(VALUE module);
326
331
  Builder* ruby_to_Builder(VALUE value);
327
- VALUE Builder_initialize(VALUE _self);
332
+ VALUE Builder_build(VALUE _self);
333
+ VALUE Builder_initialize(VALUE _self, VALUE descriptor_pool);
328
334
  VALUE Builder_add_file(int argc, VALUE *argv, VALUE _self);
329
335
  VALUE Builder_add_message(VALUE _self, VALUE name);
330
336
  VALUE Builder_add_enum(VALUE _self, VALUE name);
@@ -337,14 +343,16 @@ VALUE Builder_finalize_to_pool(VALUE _self, VALUE pool_rb);
337
343
  #define NATIVE_SLOT_MAX_SIZE sizeof(uint64_t)
338
344
 
339
345
  size_t native_slot_size(upb_fieldtype_t type);
340
- void native_slot_set(upb_fieldtype_t type,
346
+ void native_slot_set(const char* name,
347
+ upb_fieldtype_t type,
341
348
  VALUE type_class,
342
349
  void* memory,
343
350
  VALUE value);
344
351
  // Atomically (with respect to Ruby VM calls) either update the value and set a
345
352
  // oneof case, or do neither. If |case_memory| is null, then no case value is
346
353
  // set.
347
- void native_slot_set_value_and_case(upb_fieldtype_t type,
354
+ void native_slot_set_value_and_case(const char* name,
355
+ upb_fieldtype_t type,
348
356
  VALUE type_class,
349
357
  void* memory,
350
358
  VALUE value,
@@ -356,17 +364,22 @@ VALUE native_slot_get(upb_fieldtype_t type,
356
364
  void native_slot_init(upb_fieldtype_t type, void* memory);
357
365
  void native_slot_mark(upb_fieldtype_t type, void* memory);
358
366
  void native_slot_dup(upb_fieldtype_t type, void* to, void* from);
359
- void native_slot_deep_copy(upb_fieldtype_t type, void* to, void* from);
360
- bool native_slot_eq(upb_fieldtype_t type, void* mem1, void* mem2);
367
+ void native_slot_deep_copy(upb_fieldtype_t type, VALUE type_class, void* to,
368
+ void* from);
369
+ bool native_slot_eq(upb_fieldtype_t type, VALUE type_class, void* mem1,
370
+ void* mem2);
361
371
 
362
372
  VALUE native_slot_encode_and_freeze_string(upb_fieldtype_t type, VALUE value);
363
- void native_slot_check_int_range_precision(upb_fieldtype_t type, VALUE value);
373
+ void native_slot_check_int_range_precision(const char* name, upb_fieldtype_t type, VALUE value);
374
+ uint32_t slot_read_oneof_case(MessageLayout* layout, const void* storage,
375
+ const upb_oneofdef* oneof);
376
+ bool is_value_field(const upb_fielddef* f);
364
377
 
365
378
  extern rb_encoding* kRubyStringUtf8Encoding;
366
379
  extern rb_encoding* kRubyStringASCIIEncoding;
367
380
  extern rb_encoding* kRubyString8bitEncoding;
368
381
 
369
- VALUE field_type_class(const upb_fielddef* field);
382
+ VALUE field_type_class(const MessageLayout* layout, const upb_fielddef* field);
370
383
 
371
384
  #define MAP_KEY_FIELD 1
372
385
  #define MAP_VALUE_FIELD 2
@@ -409,6 +422,7 @@ extern VALUE cRepeatedField;
409
422
 
410
423
  RepeatedField* ruby_to_RepeatedField(VALUE value);
411
424
 
425
+ VALUE RepeatedField_new_this_type(VALUE _self);
412
426
  VALUE RepeatedField_each(VALUE _self);
413
427
  VALUE RepeatedField_index(int argc, VALUE* argv, VALUE _self);
414
428
  void* RepeatedField_index_native(VALUE _self, int index);
@@ -457,6 +471,7 @@ extern VALUE cMap;
457
471
 
458
472
  Map* ruby_to_Map(VALUE value);
459
473
 
474
+ VALUE Map_new_this_type(VALUE _self);
460
475
  VALUE Map_each(VALUE _self);
461
476
  VALUE Map_keys(VALUE _self);
462
477
  VALUE Map_values(VALUE _self);
@@ -490,22 +505,35 @@ VALUE Map_iter_value(Map_iter* iter);
490
505
  // Message layout / storage.
491
506
  // -----------------------------------------------------------------------------
492
507
 
493
- #define MESSAGE_FIELD_NO_CASE ((size_t)-1)
494
- #define MESSAGE_FIELD_NO_HASBIT ((size_t)-1)
508
+ #define MESSAGE_FIELD_NO_HASBIT ((uint32_t)-1)
495
509
 
496
510
  struct MessageField {
497
- size_t offset;
498
- size_t case_offset; // for oneofs, a uint32. Else, MESSAGE_FIELD_NO_CASE.
499
- size_t hasbit;
511
+ uint32_t offset;
512
+ uint32_t hasbit;
500
513
  };
501
514
 
515
+ struct MessageOneof {
516
+ uint32_t offset;
517
+ uint32_t case_offset;
518
+ };
519
+
520
+ // MessageLayout is owned by the enclosing Descriptor, which must outlive us.
502
521
  struct MessageLayout {
522
+ const Descriptor* desc;
503
523
  const upb_msgdef* msgdef;
524
+ void* empty_template; // Can memcpy() onto a layout to clear it.
504
525
  MessageField* fields;
505
- size_t size;
526
+ MessageOneof* oneofs;
527
+ uint32_t size;
528
+ uint32_t value_offset;
529
+ int value_count;
530
+ int repeated_count;
531
+ int map_count;
506
532
  };
507
533
 
508
- MessageLayout* create_layout(const upb_msgdef* msgdef);
534
+ #define ONEOF_CASE_MASK 0x80000000
535
+
536
+ void create_layout(Descriptor* desc);
509
537
  void free_layout(MessageLayout* layout);
510
538
  bool field_contains_hasbit(MessageLayout* layout,
511
539
  const upb_fielddef* field);
@@ -531,6 +559,9 @@ VALUE layout_eq(MessageLayout* layout, void* msg1, void* msg2);
531
559
  VALUE layout_hash(MessageLayout* layout, void* storage);
532
560
  VALUE layout_inspect(MessageLayout* layout, void* storage);
533
561
 
562
+ bool is_wrapper_type_field(const upb_fielddef* field);
563
+ VALUE ruby_wrapper_type(VALUE type_class, VALUE value);
564
+
534
565
  // -----------------------------------------------------------------------------
535
566
  // Message class creation.
536
567
  // -----------------------------------------------------------------------------
@@ -554,7 +585,7 @@ struct MessageHeader {
554
585
 
555
586
  extern rb_data_type_t Message_type;
556
587
 
557
- VALUE build_class_from_descriptor(Descriptor* descriptor);
588
+ VALUE build_class_from_descriptor(VALUE descriptor);
558
589
  void* Message_data(void* msg);
559
590
  void Message_mark(void* self);
560
591
  void Message_free(void* self);
@@ -578,23 +609,33 @@ VALUE Message_encode_json(int argc, VALUE* argv, VALUE klass);
578
609
  VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb);
579
610
  VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj);
580
611
 
581
- VALUE build_module_from_enumdesc(EnumDescriptor* enumdef);
612
+ VALUE build_module_from_enumdesc(VALUE _enumdesc);
582
613
  VALUE enum_lookup(VALUE self, VALUE number);
583
614
  VALUE enum_resolve(VALUE self, VALUE sym);
615
+ VALUE enum_descriptor(VALUE self);
584
616
 
585
617
  const upb_pbdecodermethod *new_fillmsg_decodermethod(
586
618
  Descriptor* descriptor, const void *owner);
619
+ void add_handlers_for_message(const void *closure, upb_handlers *h);
587
620
 
588
621
  // Maximum depth allowed during encoding, to avoid stack overflows due to
589
622
  // cycles.
590
623
  #define ENCODE_MAX_NESTING 63
591
624
 
625
+ // -----------------------------------------------------------------------------
626
+ // A cache of frozen string objects to use as field defaults.
627
+ // -----------------------------------------------------------------------------
628
+ VALUE get_frozen_string(const char* data, size_t size, bool binary);
629
+
592
630
  // -----------------------------------------------------------------------------
593
631
  // Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
594
632
  // instances.
595
633
  // -----------------------------------------------------------------------------
596
- void add_def_obj(const void* def, VALUE value);
597
- VALUE get_def_obj(const void* def);
634
+ VALUE get_msgdef_obj(VALUE descriptor_pool, const upb_msgdef* def);
635
+ VALUE get_enumdef_obj(VALUE descriptor_pool, const upb_enumdef* def);
636
+ VALUE get_fielddef_obj(VALUE descriptor_pool, const upb_fielddef* def);
637
+ VALUE get_filedef_obj(VALUE descriptor_pool, const upb_filedef* def);
638
+ VALUE get_oneofdef_obj(VALUE descriptor_pool, const upb_oneofdef* def);
598
639
 
599
640
  // -----------------------------------------------------------------------------
600
641
  // Utilities.
@@ -610,4 +651,17 @@ void check_upb_status(const upb_status* status, const char* msg);
610
651
 
611
652
  extern ID descriptor_instancevar_interned;
612
653
 
654
+ // A distinct object that is not accessible from Ruby. We use this as a
655
+ // constructor argument to enforce that certain objects cannot be created from
656
+ // Ruby.
657
+ extern VALUE c_only_cookie;
658
+
659
+ #ifdef NDEBUG
660
+ #define UPB_ASSERT(expr) do {} while (false && (expr))
661
+ #else
662
+ #define UPB_ASSERT(expr) assert(expr)
663
+ #endif
664
+
665
+ #define UPB_UNUSED(var) (void)var
666
+
613
667
  #endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
@@ -64,10 +64,11 @@ VALUE RepeatedField_subarray(VALUE _self, long beg, long len) {
64
64
  int element_size = native_slot_size(self->field_type);
65
65
  upb_fieldtype_t field_type = self->field_type;
66
66
  VALUE field_type_class = self->field_type_class;
67
-
68
67
  size_t off = beg * element_size;
69
68
  VALUE ary = rb_ary_new2(len);
70
- for (int i = beg; i < beg + len; i++, off += element_size) {
69
+ int i;
70
+
71
+ for (i = beg; i < beg + len; i++, off += element_size) {
71
72
  void* mem = ((uint8_t *)self->elements) + off;
72
73
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
73
74
  rb_ary_push(ary, elem);
@@ -88,9 +89,10 @@ VALUE RepeatedField_each(VALUE _self) {
88
89
  upb_fieldtype_t field_type = self->field_type;
89
90
  VALUE field_type_class = self->field_type_class;
90
91
  int element_size = native_slot_size(field_type);
91
-
92
92
  size_t off = 0;
93
- for (int i = 0; i < self->size; i++, off += element_size) {
93
+ int i;
94
+
95
+ for (i = 0; i < self->size; i++, off += element_size) {
94
96
  void* memory = (void *) (((uint8_t *)self->elements) + off);
95
97
  VALUE val = native_slot_get(field_type, field_type_class, memory);
96
98
  rb_yield(val);
@@ -169,8 +171,10 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
169
171
  if (index >= self->size) {
170
172
  upb_fieldtype_t field_type = self->field_type;
171
173
  int element_size = native_slot_size(field_type);
174
+ int i;
175
+
172
176
  RepeatedField_reserve(self, index + 1);
173
- for (int i = self->size; i <= index; i++) {
177
+ for (i = self->size; i <= index; i++) {
174
178
  void* elem = RepeatedField_memoryat(self, i, element_size);
175
179
  native_slot_init(field_type, elem);
176
180
  }
@@ -178,7 +182,7 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
178
182
  }
179
183
 
180
184
  memory = RepeatedField_memoryat(self, index, element_size);
181
- native_slot_set(field_type, field_type_class, memory, val);
185
+ native_slot_set("", field_type, field_type_class, memory, val);
182
186
  return Qnil;
183
187
  }
184
188
 
@@ -217,12 +221,19 @@ VALUE RepeatedField_push(VALUE _self, VALUE val) {
217
221
 
218
222
  RepeatedField_reserve(self, self->size + 1);
219
223
  memory = (void *) (((uint8_t *)self->elements) + self->size * element_size);
220
- native_slot_set(field_type, self->field_type_class, memory, val);
224
+ native_slot_set("", field_type, self->field_type_class, memory, val);
221
225
  // native_slot_set may raise an error; bump size only after set.
222
226
  self->size++;
223
227
  return _self;
224
228
  }
225
229
 
230
+ VALUE RepeatedField_push_vararg(VALUE _self, VALUE args) {
231
+ int i;
232
+ for (i = 0; i < RARRAY_LEN(args); i++) {
233
+ RepeatedField_push(_self, rb_ary_entry(args, i));
234
+ }
235
+ return _self;
236
+ }
226
237
 
227
238
  // Used by parsing handlers.
228
239
  void RepeatedField_push_native(VALUE _self, void* data) {
@@ -279,9 +290,11 @@ VALUE RepeatedField_pop_one(VALUE _self) {
279
290
  */
280
291
  VALUE RepeatedField_replace(VALUE _self, VALUE list) {
281
292
  RepeatedField* self = ruby_to_RepeatedField(_self);
293
+ int i;
294
+
282
295
  Check_Type(list, T_ARRAY);
283
296
  self->size = 0;
284
- for (int i = 0; i < RARRAY_LEN(list); i++) {
297
+ for (i = 0; i < RARRAY_LEN(list); i++) {
285
298
  RepeatedField_push(_self, rb_ary_entry(list, i));
286
299
  }
287
300
  return list;
@@ -310,7 +323,7 @@ VALUE RepeatedField_length(VALUE _self) {
310
323
  return INT2NUM(self->size);
311
324
  }
312
325
 
313
- static VALUE RepeatedField_new_this_type(VALUE _self) {
326
+ VALUE RepeatedField_new_this_type(VALUE _self) {
314
327
  RepeatedField* self = ruby_to_RepeatedField(_self);
315
328
  VALUE new_rptfield = Qnil;
316
329
  VALUE element_type = fieldtype_to_ruby(self->field_type);
@@ -338,8 +351,10 @@ VALUE RepeatedField_dup(VALUE _self) {
338
351
  upb_fieldtype_t field_type = self->field_type;
339
352
  size_t elem_size = native_slot_size(field_type);
340
353
  size_t off = 0;
354
+ int i;
355
+
341
356
  RepeatedField_reserve(new_rptfield_self, self->size);
342
- for (int i = 0; i < self->size; i++, off += elem_size) {
357
+ for (i = 0; i < self->size; i++, off += elem_size) {
343
358
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
344
359
  void* from_mem = (uint8_t *)self->elements + off;
345
360
  native_slot_dup(field_type, to_mem, from_mem);
@@ -357,11 +372,13 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
357
372
  upb_fieldtype_t field_type = self->field_type;
358
373
  size_t elem_size = native_slot_size(field_type);
359
374
  size_t off = 0;
375
+ int i;
376
+
360
377
  RepeatedField_reserve(new_rptfield_self, self->size);
361
- for (int i = 0; i < self->size; i++, off += elem_size) {
378
+ for (i = 0; i < self->size; i++, off += elem_size) {
362
379
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
363
380
  void* from_mem = (uint8_t *)self->elements + off;
364
- native_slot_deep_copy(field_type, to_mem, from_mem);
381
+ native_slot_deep_copy(field_type, self->field_type_class, to_mem, from_mem);
365
382
  new_rptfield_self->size++;
366
383
  }
367
384
 
@@ -378,11 +395,12 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
378
395
  VALUE RepeatedField_to_ary(VALUE _self) {
379
396
  RepeatedField* self = ruby_to_RepeatedField(_self);
380
397
  upb_fieldtype_t field_type = self->field_type;
381
-
382
398
  size_t elem_size = native_slot_size(field_type);
383
399
  size_t off = 0;
384
400
  VALUE ary = rb_ary_new2(self->size);
385
- for (int i = 0; i < self->size; i++, off += elem_size) {
401
+ int i;
402
+
403
+ for (i = 0; i < self->size; i++, off += elem_size) {
386
404
  void* mem = ((uint8_t *)self->elements) + off;
387
405
  VALUE elem = native_slot_get(field_type, self->field_type_class, mem);
388
406
  rb_ary_push(ary, elem);
@@ -428,10 +446,13 @@ VALUE RepeatedField_eq(VALUE _self, VALUE _other) {
428
446
  upb_fieldtype_t field_type = self->field_type;
429
447
  size_t elem_size = native_slot_size(field_type);
430
448
  size_t off = 0;
431
- for (int i = 0; i < self->size; i++, off += elem_size) {
449
+ int i;
450
+
451
+ for (i = 0; i < self->size; i++, off += elem_size) {
432
452
  void* self_mem = ((uint8_t *)self->elements) + off;
433
453
  void* other_mem = ((uint8_t *)other->elements) + off;
434
- if (!native_slot_eq(field_type, self_mem, other_mem)) {
454
+ if (!native_slot_eq(field_type, self->field_type_class, self_mem,
455
+ other_mem)) {
435
456
  return Qfalse;
436
457
  }
437
458
  }
@@ -453,7 +474,9 @@ VALUE RepeatedField_hash(VALUE _self) {
453
474
  VALUE field_type_class = self->field_type_class;
454
475
  size_t elem_size = native_slot_size(field_type);
455
476
  size_t off = 0;
456
- for (int i = 0; i < self->size; i++, off += elem_size) {
477
+ int i;
478
+
479
+ for (i = 0; i < self->size; i++, off += elem_size) {
457
480
  void* mem = ((uint8_t *)self->elements) + off;
458
481
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
459
482
  h = rb_hash_uint(h, NUM2LONG(rb_funcall(elem, hash_sym, 0)));
@@ -475,7 +498,8 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
475
498
  VALUE dupped = RepeatedField_dup(_self);
476
499
 
477
500
  if (TYPE(list) == T_ARRAY) {
478
- for (int i = 0; i < RARRAY_LEN(list); i++) {
501
+ int i;
502
+ for (i = 0; i < RARRAY_LEN(list); i++) {
479
503
  VALUE elem = rb_ary_entry(list, i);
480
504
  RepeatedField_push(dupped, elem);
481
505
  }
@@ -483,12 +507,14 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
483
507
  RTYPEDDATA_TYPE(list) == &RepeatedField_type) {
484
508
  RepeatedField* self = ruby_to_RepeatedField(_self);
485
509
  RepeatedField* list_rptfield = ruby_to_RepeatedField(list);
510
+ int i;
511
+
486
512
  if (self->field_type != list_rptfield->field_type ||
487
513
  self->field_type_class != list_rptfield->field_type_class) {
488
514
  rb_raise(rb_eArgError,
489
515
  "Attempt to append RepeatedField with different element type.");
490
516
  }
491
- for (int i = 0; i < list_rptfield->size; i++) {
517
+ for (i = 0; i < list_rptfield->size; i++) {
492
518
  void* mem = RepeatedField_index_native(list, i);
493
519
  RepeatedField_push_native(dupped, mem);
494
520
  }
@@ -506,8 +532,10 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
506
532
  * concats the passed in array to self. Returns a Ruby array.
507
533
  */
508
534
  VALUE RepeatedField_concat(VALUE _self, VALUE list) {
535
+ int i;
536
+
509
537
  Check_Type(list, T_ARRAY);
510
- for (int i = 0; i < RARRAY_LEN(list); i++) {
538
+ for (i = 0; i < RARRAY_LEN(list); i++) {
511
539
  RepeatedField_push(_self, rb_ary_entry(list, i));
512
540
  }
513
541
  return _self;
@@ -568,10 +596,12 @@ void RepeatedField_init_args(int argc, VALUE* argv,
568
596
  }
569
597
 
570
598
  if (ary != Qnil) {
599
+ int i;
600
+
571
601
  if (!RB_TYPE_P(ary, T_ARRAY)) {
572
602
  rb_raise(rb_eArgError, "Expected array as initialize argument");
573
603
  }
574
- for (int i = 0; i < RARRAY_LEN(ary); i++) {
604
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
575
605
  RepeatedField_push(_self, rb_ary_entry(ary, i));
576
606
  }
577
607
  }
@@ -583,8 +613,10 @@ void RepeatedField_mark(void* _self) {
583
613
  RepeatedField* self = (RepeatedField*)_self;
584
614
  upb_fieldtype_t field_type = self->field_type;
585
615
  int element_size = native_slot_size(field_type);
616
+ int i;
617
+
586
618
  rb_gc_mark(self->field_type_class);
587
- for (int i = 0; i < self->size; i++) {
619
+ for (i = 0; i < self->size; i++) {
588
620
  void* memory = (((uint8_t *)self->elements) + i * element_size);
589
621
  native_slot_mark(self->field_type, memory);
590
622
  }
@@ -635,7 +667,7 @@ void RepeatedField_register(VALUE module) {
635
667
  rb_define_method(klass, "[]", RepeatedField_index, -1);
636
668
  rb_define_method(klass, "at", RepeatedField_index, -1);
637
669
  rb_define_method(klass, "[]=", RepeatedField_index_set, 2);
638
- rb_define_method(klass, "push", RepeatedField_push, 1);
670
+ rb_define_method(klass, "push", RepeatedField_push_vararg, -2);
639
671
  rb_define_method(klass, "<<", RepeatedField_push, 1);
640
672
  rb_define_private_method(klass, "pop_one", RepeatedField_pop_one, 0);
641
673
  rb_define_method(klass, "replace", RepeatedField_replace, 1);