google-protobuf 3.9.2-x64-mingw32 → 3.10.0.rc.1-x64-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-protobuf might be problematic. Click here for more details.

@@ -30,25 +30,35 @@
30
30
 
31
31
  #include "protobuf.h"
32
32
 
33
- // -----------------------------------------------------------------------------
34
- // Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
35
- // instances.
36
- // -----------------------------------------------------------------------------
37
-
38
- // This is a hash table from def objects (encoded by converting pointers to
39
- // Ruby integers) to MessageDef/EnumDef instances (as Ruby values).
40
- VALUE upb_def_to_ruby_obj_map;
41
-
42
33
  VALUE cError;
43
34
  VALUE cParseError;
44
35
  VALUE cTypeError;
36
+ VALUE c_only_cookie = Qnil;
45
37
 
46
- void add_def_obj(const void* def, VALUE value) {
47
- rb_hash_aset(upb_def_to_ruby_obj_map, ULL2NUM((intptr_t)def), value);
38
+ static VALUE cached_empty_string = Qnil;
39
+ static VALUE cached_empty_bytes = Qnil;
40
+
41
+ static VALUE create_frozen_string(const char* str, size_t size, bool binary) {
42
+ VALUE str_rb = rb_str_new(str, size);
43
+
44
+ rb_enc_associate(str_rb,
45
+ binary ? kRubyString8bitEncoding : kRubyStringUtf8Encoding);
46
+ rb_obj_freeze(str_rb);
47
+ return str_rb;
48
48
  }
49
49
 
50
- VALUE get_def_obj(const void* def) {
51
- return rb_hash_aref(upb_def_to_ruby_obj_map, ULL2NUM((intptr_t)def));
50
+ VALUE get_frozen_string(const char* str, size_t size, bool binary) {
51
+ if (size == 0) {
52
+ return binary ? cached_empty_bytes : cached_empty_string;
53
+ } else {
54
+ // It is harder to memoize non-empty strings. The obvious approach would be
55
+ // to use a Ruby hash keyed by string as memo table, but looking up in such a table
56
+ // requires constructing a string (the very thing we're trying to avoid).
57
+ //
58
+ // Since few fields have defaults, we will just optimize the empty string
59
+ // case for now.
60
+ return create_frozen_string(str, size, binary);
61
+ }
52
62
  }
53
63
 
54
64
  // -----------------------------------------------------------------------------
@@ -116,6 +126,11 @@ void Init_protobuf_c() {
116
126
  kRubyStringASCIIEncoding = rb_usascii_encoding();
117
127
  kRubyString8bitEncoding = rb_ascii8bit_encoding();
118
128
 
119
- rb_gc_register_address(&upb_def_to_ruby_obj_map);
120
- upb_def_to_ruby_obj_map = rb_hash_new();
129
+ rb_gc_register_address(&c_only_cookie);
130
+ c_only_cookie = rb_class_new_instance(0, NULL, rb_cObject);
131
+
132
+ rb_gc_register_address(&cached_empty_string);
133
+ rb_gc_register_address(&cached_empty_bytes);
134
+ cached_empty_string = create_frozen_string("", 0, false);
135
+ cached_empty_bytes = create_frozen_string("", 0, true);
121
136
  }
@@ -59,6 +59,7 @@ typedef struct OneofDescriptor OneofDescriptor;
59
59
  typedef struct EnumDescriptor EnumDescriptor;
60
60
  typedef struct MessageLayout MessageLayout;
61
61
  typedef struct MessageField MessageField;
62
+ typedef struct MessageOneof MessageOneof;
62
63
  typedef struct MessageHeader MessageHeader;
63
64
  typedef struct MessageBuilderContext MessageBuilderContext;
64
65
  typedef struct OneofBuilderContext OneofBuilderContext;
@@ -107,62 +108,68 @@ typedef struct Builder Builder;
107
108
  // -----------------------------------------------------------------------------
108
109
 
109
110
  struct DescriptorPool {
111
+ VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
110
112
  upb_symtab* symtab;
113
+ upb_handlercache* fill_handler_cache;
114
+ upb_handlercache* pb_serialize_handler_cache;
115
+ upb_handlercache* json_serialize_handler_cache;
116
+ upb_handlercache* json_serialize_handler_preserve_cache;
117
+ upb_pbcodecache* fill_method_cache;
118
+ upb_json_codecache* json_fill_method_cache;
111
119
  };
112
120
 
113
121
  struct Descriptor {
114
122
  const upb_msgdef* msgdef;
115
123
  MessageLayout* layout;
116
- VALUE klass; // begins as nil
117
- const upb_handlers* fill_handlers;
118
- const upb_pbdecodermethod* fill_method;
119
- const upb_json_parsermethod* json_fill_method;
120
- const upb_handlers* pb_serialize_handlers;
121
- const upb_handlers* json_serialize_handlers;
122
- const upb_handlers* json_serialize_handlers_preserve;
124
+ VALUE klass;
125
+ VALUE descriptor_pool;
123
126
  };
124
127
 
125
128
  struct FileDescriptor {
126
129
  const upb_filedef* filedef;
130
+ VALUE descriptor_pool; // Owns the upb_filedef.
127
131
  };
128
132
 
129
133
  struct FieldDescriptor {
130
134
  const upb_fielddef* fielddef;
135
+ VALUE descriptor_pool; // Owns the upb_fielddef.
131
136
  };
132
137
 
133
138
  struct OneofDescriptor {
134
139
  const upb_oneofdef* oneofdef;
140
+ VALUE descriptor_pool; // Owns the upb_oneofdef.
135
141
  };
136
142
 
137
143
  struct EnumDescriptor {
138
144
  const upb_enumdef* enumdef;
139
145
  VALUE module; // begins as nil
146
+ VALUE descriptor_pool; // Owns the upb_enumdef.
140
147
  };
141
148
 
142
149
  struct MessageBuilderContext {
143
- VALUE descriptor;
144
- VALUE builder;
150
+ google_protobuf_DescriptorProto* msg_proto;
151
+ VALUE file_builder;
145
152
  };
146
153
 
147
154
  struct OneofBuilderContext {
148
- VALUE descriptor;
149
- VALUE builder;
155
+ int oneof_index;
156
+ VALUE message_builder;
150
157
  };
151
158
 
152
159
  struct EnumBuilderContext {
153
- VALUE enumdesc;
160
+ google_protobuf_EnumDescriptorProto* enum_proto;
161
+ VALUE file_builder;
154
162
  };
155
163
 
156
164
  struct FileBuilderContext {
157
- VALUE pending_list;
158
- VALUE file_descriptor;
159
- VALUE builder;
165
+ upb_arena *arena;
166
+ google_protobuf_FileDescriptorProto* file_proto;
167
+ VALUE descriptor_pool;
160
168
  };
161
169
 
162
170
  struct Builder {
163
- VALUE pending_list;
164
- VALUE default_file_descriptor;
165
- upb_def** defs; // used only while finalizing
171
+ VALUE descriptor_pool;
172
+ VALUE default_file_builder;
166
173
  };
167
174
 
168
175
  extern VALUE cDescriptorPool;
@@ -191,7 +198,6 @@ void DescriptorPool_free(void* _self);
191
198
  VALUE DescriptorPool_alloc(VALUE klass);
192
199
  void DescriptorPool_register(VALUE module);
193
200
  DescriptorPool* ruby_to_DescriptorPool(VALUE value);
194
- VALUE DescriptorPool_add(VALUE _self, VALUE def);
195
201
  VALUE DescriptorPool_build(int argc, VALUE* argv, VALUE _self);
196
202
  VALUE DescriptorPool_lookup(VALUE _self, VALUE name);
197
203
  VALUE DescriptorPool_generated_pool(VALUE _self);
@@ -203,13 +209,11 @@ void Descriptor_free(void* _self);
203
209
  VALUE Descriptor_alloc(VALUE klass);
204
210
  void Descriptor_register(VALUE module);
205
211
  Descriptor* ruby_to_Descriptor(VALUE value);
206
- VALUE Descriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
212
+ VALUE Descriptor_initialize(VALUE _self, VALUE cookie, VALUE descriptor_pool,
213
+ VALUE ptr);
207
214
  VALUE Descriptor_name(VALUE _self);
208
- VALUE Descriptor_name_set(VALUE _self, VALUE str);
209
215
  VALUE Descriptor_each(VALUE _self);
210
216
  VALUE Descriptor_lookup(VALUE _self, VALUE name);
211
- VALUE Descriptor_add_field(VALUE _self, VALUE obj);
212
- VALUE Descriptor_add_oneof(VALUE _self, VALUE obj);
213
217
  VALUE Descriptor_each_oneof(VALUE _self);
214
218
  VALUE Descriptor_lookup_oneof(VALUE _self, VALUE name);
215
219
  VALUE Descriptor_msgclass(VALUE _self);
@@ -221,28 +225,24 @@ void FileDescriptor_free(void* _self);
221
225
  VALUE FileDescriptor_alloc(VALUE klass);
222
226
  void FileDescriptor_register(VALUE module);
223
227
  FileDescriptor* ruby_to_FileDescriptor(VALUE value);
224
- VALUE FileDescriptor_initialize(int argc, VALUE* argv, VALUE _self);
228
+ VALUE FileDescriptor_initialize(VALUE _self, VALUE cookie,
229
+ VALUE descriptor_pool, VALUE ptr);
225
230
  VALUE FileDescriptor_name(VALUE _self);
226
231
  VALUE FileDescriptor_syntax(VALUE _self);
227
- VALUE FileDescriptor_syntax_set(VALUE _self, VALUE syntax);
228
232
 
229
233
  void FieldDescriptor_mark(void* _self);
230
234
  void FieldDescriptor_free(void* _self);
231
235
  VALUE FieldDescriptor_alloc(VALUE klass);
232
236
  void FieldDescriptor_register(VALUE module);
233
237
  FieldDescriptor* ruby_to_FieldDescriptor(VALUE value);
238
+ VALUE FieldDescriptor_initialize(VALUE _self, VALUE cookie,
239
+ VALUE descriptor_pool, VALUE ptr);
234
240
  VALUE FieldDescriptor_name(VALUE _self);
235
- VALUE FieldDescriptor_name_set(VALUE _self, VALUE str);
236
241
  VALUE FieldDescriptor_type(VALUE _self);
237
- VALUE FieldDescriptor_type_set(VALUE _self, VALUE type);
238
242
  VALUE FieldDescriptor_default(VALUE _self);
239
- VALUE FieldDescriptor_default_set(VALUE _self, VALUE default_value);
240
243
  VALUE FieldDescriptor_label(VALUE _self);
241
- VALUE FieldDescriptor_label_set(VALUE _self, VALUE label);
242
244
  VALUE FieldDescriptor_number(VALUE _self);
243
- VALUE FieldDescriptor_number_set(VALUE _self, VALUE number);
244
245
  VALUE FieldDescriptor_submsg_name(VALUE _self);
245
- VALUE FieldDescriptor_submsg_name_set(VALUE _self, VALUE value);
246
246
  VALUE FieldDescriptor_subtype(VALUE _self);
247
247
  VALUE FieldDescriptor_has(VALUE _self, VALUE msg_rb);
248
248
  VALUE FieldDescriptor_clear(VALUE _self, VALUE msg_rb);
@@ -256,21 +256,20 @@ void OneofDescriptor_free(void* _self);
256
256
  VALUE OneofDescriptor_alloc(VALUE klass);
257
257
  void OneofDescriptor_register(VALUE module);
258
258
  OneofDescriptor* ruby_to_OneofDescriptor(VALUE value);
259
+ VALUE OneofDescriptor_initialize(VALUE _self, VALUE cookie,
260
+ VALUE descriptor_pool, VALUE ptr);
259
261
  VALUE OneofDescriptor_name(VALUE _self);
260
- VALUE OneofDescriptor_name_set(VALUE _self, VALUE value);
261
- VALUE OneofDescriptor_add_field(VALUE _self, VALUE field);
262
262
  VALUE OneofDescriptor_each(VALUE _self, VALUE field);
263
263
 
264
264
  void EnumDescriptor_mark(void* _self);
265
265
  void EnumDescriptor_free(void* _self);
266
266
  VALUE EnumDescriptor_alloc(VALUE klass);
267
+ VALUE EnumDescriptor_initialize(VALUE _self, VALUE cookie,
268
+ VALUE descriptor_pool, VALUE ptr);
267
269
  void EnumDescriptor_register(VALUE module);
268
270
  EnumDescriptor* ruby_to_EnumDescriptor(VALUE value);
269
- VALUE EnumDescriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
270
271
  VALUE EnumDescriptor_file_descriptor(VALUE _self);
271
272
  VALUE EnumDescriptor_name(VALUE _self);
272
- VALUE EnumDescriptor_name_set(VALUE _self, VALUE str);
273
- VALUE EnumDescriptor_add_value(VALUE _self, VALUE name, VALUE number);
274
273
  VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name);
275
274
  VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number);
276
275
  VALUE EnumDescriptor_each(VALUE _self);
@@ -283,8 +282,8 @@ VALUE MessageBuilderContext_alloc(VALUE klass);
283
282
  void MessageBuilderContext_register(VALUE module);
284
283
  MessageBuilderContext* ruby_to_MessageBuilderContext(VALUE value);
285
284
  VALUE MessageBuilderContext_initialize(VALUE _self,
286
- VALUE descriptor,
287
- VALUE builder);
285
+ VALUE _file_builder,
286
+ VALUE name);
288
287
  VALUE MessageBuilderContext_optional(int argc, VALUE* argv, VALUE _self);
289
288
  VALUE MessageBuilderContext_required(int argc, VALUE* argv, VALUE _self);
290
289
  VALUE MessageBuilderContext_repeated(int argc, VALUE* argv, VALUE _self);
@@ -306,15 +305,20 @@ void EnumBuilderContext_free(void* _self);
306
305
  VALUE EnumBuilderContext_alloc(VALUE klass);
307
306
  void EnumBuilderContext_register(VALUE module);
308
307
  EnumBuilderContext* ruby_to_EnumBuilderContext(VALUE value);
309
- VALUE EnumBuilderContext_initialize(VALUE _self, VALUE enumdesc);
308
+ VALUE EnumBuilderContext_initialize(VALUE _self, VALUE _file_builder,
309
+ VALUE name);
310
310
  VALUE EnumBuilderContext_value(VALUE _self, VALUE name, VALUE number);
311
311
 
312
312
  void FileBuilderContext_mark(void* _self);
313
313
  void FileBuilderContext_free(void* _self);
314
314
  VALUE FileBuilderContext_alloc(VALUE klass);
315
315
  void FileBuilderContext_register(VALUE module);
316
- VALUE FileBuilderContext_initialize(VALUE _self, VALUE file_descriptor,
317
- VALUE builder);
316
+ FileBuilderContext* ruby_to_FileBuilderContext(VALUE _self);
317
+ upb_strview FileBuilderContext_strdup(VALUE _self, VALUE rb_str);
318
+ upb_strview FileBuilderContext_strdup_name(VALUE _self, VALUE rb_str);
319
+ upb_strview FileBuilderContext_strdup_sym(VALUE _self, VALUE rb_sym);
320
+ VALUE FileBuilderContext_initialize(VALUE _self, VALUE descriptor_pool,
321
+ VALUE name, VALUE options);
318
322
  VALUE FileBuilderContext_add_message(VALUE _self, VALUE name);
319
323
  VALUE FileBuilderContext_add_enum(VALUE _self, VALUE name);
320
324
  VALUE FileBuilderContext_pending_descriptors(VALUE _self);
@@ -324,7 +328,8 @@ void Builder_free(void* _self);
324
328
  VALUE Builder_alloc(VALUE klass);
325
329
  void Builder_register(VALUE module);
326
330
  Builder* ruby_to_Builder(VALUE value);
327
- VALUE Builder_initialize(VALUE _self);
331
+ VALUE Builder_build(VALUE _self);
332
+ VALUE Builder_initialize(VALUE _self, VALUE descriptor_pool);
328
333
  VALUE Builder_add_file(int argc, VALUE *argv, VALUE _self);
329
334
  VALUE Builder_add_message(VALUE _self, VALUE name);
330
335
  VALUE Builder_add_enum(VALUE _self, VALUE name);
@@ -363,12 +368,15 @@ bool native_slot_eq(upb_fieldtype_t type, void* mem1, void* mem2);
363
368
 
364
369
  VALUE native_slot_encode_and_freeze_string(upb_fieldtype_t type, VALUE value);
365
370
  void native_slot_check_int_range_precision(const char* name, upb_fieldtype_t type, VALUE value);
371
+ uint32_t slot_read_oneof_case(MessageLayout* layout, const void* storage,
372
+ const upb_oneofdef* oneof);
373
+ bool is_value_field(const upb_fielddef* f);
366
374
 
367
375
  extern rb_encoding* kRubyStringUtf8Encoding;
368
376
  extern rb_encoding* kRubyStringASCIIEncoding;
369
377
  extern rb_encoding* kRubyString8bitEncoding;
370
378
 
371
- VALUE field_type_class(const upb_fielddef* field);
379
+ VALUE field_type_class(const MessageLayout* layout, const upb_fielddef* field);
372
380
 
373
381
  #define MAP_KEY_FIELD 1
374
382
  #define MAP_VALUE_FIELD 2
@@ -411,6 +419,7 @@ extern VALUE cRepeatedField;
411
419
 
412
420
  RepeatedField* ruby_to_RepeatedField(VALUE value);
413
421
 
422
+ VALUE RepeatedField_new_this_type(VALUE _self);
414
423
  VALUE RepeatedField_each(VALUE _self);
415
424
  VALUE RepeatedField_index(int argc, VALUE* argv, VALUE _self);
416
425
  void* RepeatedField_index_native(VALUE _self, int index);
@@ -459,6 +468,7 @@ extern VALUE cMap;
459
468
 
460
469
  Map* ruby_to_Map(VALUE value);
461
470
 
471
+ VALUE Map_new_this_type(VALUE _self);
462
472
  VALUE Map_each(VALUE _self);
463
473
  VALUE Map_keys(VALUE _self);
464
474
  VALUE Map_values(VALUE _self);
@@ -492,22 +502,35 @@ VALUE Map_iter_value(Map_iter* iter);
492
502
  // Message layout / storage.
493
503
  // -----------------------------------------------------------------------------
494
504
 
495
- #define MESSAGE_FIELD_NO_CASE ((size_t)-1)
496
- #define MESSAGE_FIELD_NO_HASBIT ((size_t)-1)
505
+ #define MESSAGE_FIELD_NO_HASBIT ((uint32_t)-1)
497
506
 
498
507
  struct MessageField {
499
- size_t offset;
500
- size_t case_offset; // for oneofs, a uint32. Else, MESSAGE_FIELD_NO_CASE.
501
- size_t hasbit;
508
+ uint32_t offset;
509
+ uint32_t hasbit;
502
510
  };
503
511
 
512
+ struct MessageOneof {
513
+ uint32_t offset;
514
+ uint32_t case_offset;
515
+ };
516
+
517
+ // MessageLayout is owned by the enclosing Descriptor, which must outlive us.
504
518
  struct MessageLayout {
519
+ const Descriptor* desc;
505
520
  const upb_msgdef* msgdef;
521
+ void* empty_template; // Can memcpy() onto a layout to clear it.
506
522
  MessageField* fields;
507
- size_t size;
523
+ MessageOneof* oneofs;
524
+ uint32_t size;
525
+ uint32_t value_offset;
526
+ int value_count;
527
+ int repeated_count;
528
+ int map_count;
508
529
  };
509
530
 
510
- MessageLayout* create_layout(const upb_msgdef* msgdef);
531
+ #define ONEOF_CASE_MASK 0x80000000
532
+
533
+ void create_layout(Descriptor* desc);
511
534
  void free_layout(MessageLayout* layout);
512
535
  bool field_contains_hasbit(MessageLayout* layout,
513
536
  const upb_fielddef* field);
@@ -556,7 +579,7 @@ struct MessageHeader {
556
579
 
557
580
  extern rb_data_type_t Message_type;
558
581
 
559
- VALUE build_class_from_descriptor(Descriptor* descriptor);
582
+ VALUE build_class_from_descriptor(VALUE descriptor);
560
583
  void* Message_data(void* msg);
561
584
  void Message_mark(void* self);
562
585
  void Message_free(void* self);
@@ -580,23 +603,33 @@ VALUE Message_encode_json(int argc, VALUE* argv, VALUE klass);
580
603
  VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb);
581
604
  VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj);
582
605
 
583
- VALUE build_module_from_enumdesc(EnumDescriptor* enumdef);
606
+ VALUE build_module_from_enumdesc(VALUE _enumdesc);
584
607
  VALUE enum_lookup(VALUE self, VALUE number);
585
608
  VALUE enum_resolve(VALUE self, VALUE sym);
609
+ VALUE enum_descriptor(VALUE self);
586
610
 
587
611
  const upb_pbdecodermethod *new_fillmsg_decodermethod(
588
612
  Descriptor* descriptor, const void *owner);
613
+ void add_handlers_for_message(const void *closure, upb_handlers *h);
589
614
 
590
615
  // Maximum depth allowed during encoding, to avoid stack overflows due to
591
616
  // cycles.
592
617
  #define ENCODE_MAX_NESTING 63
593
618
 
619
+ // -----------------------------------------------------------------------------
620
+ // A cache of frozen string objects to use as field defaults.
621
+ // -----------------------------------------------------------------------------
622
+ VALUE get_frozen_string(const char* data, size_t size, bool binary);
623
+
594
624
  // -----------------------------------------------------------------------------
595
625
  // Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
596
626
  // instances.
597
627
  // -----------------------------------------------------------------------------
598
- void add_def_obj(const void* def, VALUE value);
599
- VALUE get_def_obj(const void* def);
628
+ VALUE get_msgdef_obj(VALUE descriptor_pool, const upb_msgdef* def);
629
+ VALUE get_enumdef_obj(VALUE descriptor_pool, const upb_enumdef* def);
630
+ VALUE get_fielddef_obj(VALUE descriptor_pool, const upb_fielddef* def);
631
+ VALUE get_filedef_obj(VALUE descriptor_pool, const upb_filedef* def);
632
+ VALUE get_oneofdef_obj(VALUE descriptor_pool, const upb_oneofdef* def);
600
633
 
601
634
  // -----------------------------------------------------------------------------
602
635
  // Utilities.
@@ -612,4 +645,17 @@ void check_upb_status(const upb_status* status, const char* msg);
612
645
 
613
646
  extern ID descriptor_instancevar_interned;
614
647
 
648
+ // A distinct object that is not accessible from Ruby. We use this as a
649
+ // constructor argument to enforce that certain objects cannot be created from
650
+ // Ruby.
651
+ extern VALUE c_only_cookie;
652
+
653
+ #ifdef NDEBUG
654
+ #define UPB_ASSERT(expr) do {} while (false && (expr))
655
+ #else
656
+ #define UPB_ASSERT(expr) assert(expr)
657
+ #endif
658
+
659
+ #define UPB_UNUSED(var) (void)var
660
+
615
661
  #endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
@@ -64,10 +64,11 @@ VALUE RepeatedField_subarray(VALUE _self, long beg, long len) {
64
64
  int element_size = native_slot_size(self->field_type);
65
65
  upb_fieldtype_t field_type = self->field_type;
66
66
  VALUE field_type_class = self->field_type_class;
67
-
68
67
  size_t off = beg * element_size;
69
68
  VALUE ary = rb_ary_new2(len);
70
- for (int i = beg; i < beg + len; i++, off += element_size) {
69
+ int i;
70
+
71
+ for (i = beg; i < beg + len; i++, off += element_size) {
71
72
  void* mem = ((uint8_t *)self->elements) + off;
72
73
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
73
74
  rb_ary_push(ary, elem);
@@ -88,9 +89,10 @@ VALUE RepeatedField_each(VALUE _self) {
88
89
  upb_fieldtype_t field_type = self->field_type;
89
90
  VALUE field_type_class = self->field_type_class;
90
91
  int element_size = native_slot_size(field_type);
91
-
92
92
  size_t off = 0;
93
- for (int i = 0; i < self->size; i++, off += element_size) {
93
+ int i;
94
+
95
+ for (i = 0; i < self->size; i++, off += element_size) {
94
96
  void* memory = (void *) (((uint8_t *)self->elements) + off);
95
97
  VALUE val = native_slot_get(field_type, field_type_class, memory);
96
98
  rb_yield(val);
@@ -169,8 +171,10 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
169
171
  if (index >= self->size) {
170
172
  upb_fieldtype_t field_type = self->field_type;
171
173
  int element_size = native_slot_size(field_type);
174
+ int i;
175
+
172
176
  RepeatedField_reserve(self, index + 1);
173
- for (int i = self->size; i <= index; i++) {
177
+ for (i = self->size; i <= index; i++) {
174
178
  void* elem = RepeatedField_memoryat(self, i, element_size);
175
179
  native_slot_init(field_type, elem);
176
180
  }
@@ -224,7 +228,8 @@ VALUE RepeatedField_push(VALUE _self, VALUE val) {
224
228
  }
225
229
 
226
230
  VALUE RepeatedField_push_vararg(VALUE _self, VALUE args) {
227
- for (int i = 0; i < RARRAY_LEN(args); i++) {
231
+ int i;
232
+ for (i = 0; i < RARRAY_LEN(args); i++) {
228
233
  RepeatedField_push(_self, rb_ary_entry(args, i));
229
234
  }
230
235
  return _self;
@@ -285,9 +290,11 @@ VALUE RepeatedField_pop_one(VALUE _self) {
285
290
  */
286
291
  VALUE RepeatedField_replace(VALUE _self, VALUE list) {
287
292
  RepeatedField* self = ruby_to_RepeatedField(_self);
293
+ int i;
294
+
288
295
  Check_Type(list, T_ARRAY);
289
296
  self->size = 0;
290
- for (int i = 0; i < RARRAY_LEN(list); i++) {
297
+ for (i = 0; i < RARRAY_LEN(list); i++) {
291
298
  RepeatedField_push(_self, rb_ary_entry(list, i));
292
299
  }
293
300
  return list;
@@ -316,7 +323,7 @@ VALUE RepeatedField_length(VALUE _self) {
316
323
  return INT2NUM(self->size);
317
324
  }
318
325
 
319
- static VALUE RepeatedField_new_this_type(VALUE _self) {
326
+ VALUE RepeatedField_new_this_type(VALUE _self) {
320
327
  RepeatedField* self = ruby_to_RepeatedField(_self);
321
328
  VALUE new_rptfield = Qnil;
322
329
  VALUE element_type = fieldtype_to_ruby(self->field_type);
@@ -344,8 +351,10 @@ VALUE RepeatedField_dup(VALUE _self) {
344
351
  upb_fieldtype_t field_type = self->field_type;
345
352
  size_t elem_size = native_slot_size(field_type);
346
353
  size_t off = 0;
354
+ int i;
355
+
347
356
  RepeatedField_reserve(new_rptfield_self, self->size);
348
- for (int i = 0; i < self->size; i++, off += elem_size) {
357
+ for (i = 0; i < self->size; i++, off += elem_size) {
349
358
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
350
359
  void* from_mem = (uint8_t *)self->elements + off;
351
360
  native_slot_dup(field_type, to_mem, from_mem);
@@ -363,8 +372,10 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
363
372
  upb_fieldtype_t field_type = self->field_type;
364
373
  size_t elem_size = native_slot_size(field_type);
365
374
  size_t off = 0;
375
+ int i;
376
+
366
377
  RepeatedField_reserve(new_rptfield_self, self->size);
367
- for (int i = 0; i < self->size; i++, off += elem_size) {
378
+ for (i = 0; i < self->size; i++, off += elem_size) {
368
379
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
369
380
  void* from_mem = (uint8_t *)self->elements + off;
370
381
  native_slot_deep_copy(field_type, to_mem, from_mem);
@@ -384,11 +395,12 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
384
395
  VALUE RepeatedField_to_ary(VALUE _self) {
385
396
  RepeatedField* self = ruby_to_RepeatedField(_self);
386
397
  upb_fieldtype_t field_type = self->field_type;
387
-
388
398
  size_t elem_size = native_slot_size(field_type);
389
399
  size_t off = 0;
390
400
  VALUE ary = rb_ary_new2(self->size);
391
- for (int i = 0; i < self->size; i++, off += elem_size) {
401
+ int i;
402
+
403
+ for (i = 0; i < self->size; i++, off += elem_size) {
392
404
  void* mem = ((uint8_t *)self->elements) + off;
393
405
  VALUE elem = native_slot_get(field_type, self->field_type_class, mem);
394
406
  rb_ary_push(ary, elem);
@@ -434,7 +446,9 @@ VALUE RepeatedField_eq(VALUE _self, VALUE _other) {
434
446
  upb_fieldtype_t field_type = self->field_type;
435
447
  size_t elem_size = native_slot_size(field_type);
436
448
  size_t off = 0;
437
- for (int i = 0; i < self->size; i++, off += elem_size) {
449
+ int i;
450
+
451
+ for (i = 0; i < self->size; i++, off += elem_size) {
438
452
  void* self_mem = ((uint8_t *)self->elements) + off;
439
453
  void* other_mem = ((uint8_t *)other->elements) + off;
440
454
  if (!native_slot_eq(field_type, self_mem, other_mem)) {
@@ -459,7 +473,9 @@ VALUE RepeatedField_hash(VALUE _self) {
459
473
  VALUE field_type_class = self->field_type_class;
460
474
  size_t elem_size = native_slot_size(field_type);
461
475
  size_t off = 0;
462
- for (int i = 0; i < self->size; i++, off += elem_size) {
476
+ int i;
477
+
478
+ for (i = 0; i < self->size; i++, off += elem_size) {
463
479
  void* mem = ((uint8_t *)self->elements) + off;
464
480
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
465
481
  h = rb_hash_uint(h, NUM2LONG(rb_funcall(elem, hash_sym, 0)));
@@ -481,7 +497,8 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
481
497
  VALUE dupped = RepeatedField_dup(_self);
482
498
 
483
499
  if (TYPE(list) == T_ARRAY) {
484
- for (int i = 0; i < RARRAY_LEN(list); i++) {
500
+ int i;
501
+ for (i = 0; i < RARRAY_LEN(list); i++) {
485
502
  VALUE elem = rb_ary_entry(list, i);
486
503
  RepeatedField_push(dupped, elem);
487
504
  }
@@ -489,12 +506,14 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
489
506
  RTYPEDDATA_TYPE(list) == &RepeatedField_type) {
490
507
  RepeatedField* self = ruby_to_RepeatedField(_self);
491
508
  RepeatedField* list_rptfield = ruby_to_RepeatedField(list);
509
+ int i;
510
+
492
511
  if (self->field_type != list_rptfield->field_type ||
493
512
  self->field_type_class != list_rptfield->field_type_class) {
494
513
  rb_raise(rb_eArgError,
495
514
  "Attempt to append RepeatedField with different element type.");
496
515
  }
497
- for (int i = 0; i < list_rptfield->size; i++) {
516
+ for (i = 0; i < list_rptfield->size; i++) {
498
517
  void* mem = RepeatedField_index_native(list, i);
499
518
  RepeatedField_push_native(dupped, mem);
500
519
  }
@@ -512,8 +531,10 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
512
531
  * concats the passed in array to self. Returns a Ruby array.
513
532
  */
514
533
  VALUE RepeatedField_concat(VALUE _self, VALUE list) {
534
+ int i;
535
+
515
536
  Check_Type(list, T_ARRAY);
516
- for (int i = 0; i < RARRAY_LEN(list); i++) {
537
+ for (i = 0; i < RARRAY_LEN(list); i++) {
517
538
  RepeatedField_push(_self, rb_ary_entry(list, i));
518
539
  }
519
540
  return _self;
@@ -574,10 +595,12 @@ void RepeatedField_init_args(int argc, VALUE* argv,
574
595
  }
575
596
 
576
597
  if (ary != Qnil) {
598
+ int i;
599
+
577
600
  if (!RB_TYPE_P(ary, T_ARRAY)) {
578
601
  rb_raise(rb_eArgError, "Expected array as initialize argument");
579
602
  }
580
- for (int i = 0; i < RARRAY_LEN(ary); i++) {
603
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
581
604
  RepeatedField_push(_self, rb_ary_entry(ary, i));
582
605
  }
583
606
  }
@@ -589,8 +612,10 @@ void RepeatedField_mark(void* _self) {
589
612
  RepeatedField* self = (RepeatedField*)_self;
590
613
  upb_fieldtype_t field_type = self->field_type;
591
614
  int element_size = native_slot_size(field_type);
615
+ int i;
616
+
592
617
  rb_gc_mark(self->field_type_class);
593
- for (int i = 0; i < self->size; i++) {
618
+ for (i = 0; i < self->size; i++) {
594
619
  void* memory = (((uint8_t *)self->elements) + i * element_size);
595
620
  native_slot_mark(self->field_type, memory);
596
621
  }