google-protobuf 3.8.0 → 3.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-protobuf might be problematic. Click here for more details.

@@ -30,25 +30,35 @@
30
30
 
31
31
  #include "protobuf.h"
32
32
 
33
- // -----------------------------------------------------------------------------
34
- // Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
35
- // instances.
36
- // -----------------------------------------------------------------------------
37
-
38
- // This is a hash table from def objects (encoded by converting pointers to
39
- // Ruby integers) to MessageDef/EnumDef instances (as Ruby values).
40
- VALUE upb_def_to_ruby_obj_map;
41
-
42
33
  VALUE cError;
43
34
  VALUE cParseError;
44
35
  VALUE cTypeError;
36
+ VALUE c_only_cookie = Qnil;
45
37
 
46
- void add_def_obj(const void* def, VALUE value) {
47
- rb_hash_aset(upb_def_to_ruby_obj_map, ULL2NUM((intptr_t)def), value);
38
+ static VALUE cached_empty_string = Qnil;
39
+ static VALUE cached_empty_bytes = Qnil;
40
+
41
+ static VALUE create_frozen_string(const char* str, size_t size, bool binary) {
42
+ VALUE str_rb = rb_str_new(str, size);
43
+
44
+ rb_enc_associate(str_rb,
45
+ binary ? kRubyString8bitEncoding : kRubyStringUtf8Encoding);
46
+ rb_obj_freeze(str_rb);
47
+ return str_rb;
48
48
  }
49
49
 
50
- VALUE get_def_obj(const void* def) {
51
- return rb_hash_aref(upb_def_to_ruby_obj_map, ULL2NUM((intptr_t)def));
50
+ VALUE get_frozen_string(const char* str, size_t size, bool binary) {
51
+ if (size == 0) {
52
+ return binary ? cached_empty_bytes : cached_empty_string;
53
+ } else {
54
+ // It is harder to memoize non-empty strings. The obvious approach would be
55
+ // to use a Ruby hash keyed by string as memo table, but looking up in such a table
56
+ // requires constructing a string (the very thing we're trying to avoid).
57
+ //
58
+ // Since few fields have defaults, we will just optimize the empty string
59
+ // case for now.
60
+ return create_frozen_string(str, size, binary);
61
+ }
52
62
  }
53
63
 
54
64
  // -----------------------------------------------------------------------------
@@ -116,6 +126,11 @@ void Init_protobuf_c() {
116
126
  kRubyStringASCIIEncoding = rb_usascii_encoding();
117
127
  kRubyString8bitEncoding = rb_ascii8bit_encoding();
118
128
 
119
- rb_gc_register_address(&upb_def_to_ruby_obj_map);
120
- upb_def_to_ruby_obj_map = rb_hash_new();
129
+ rb_gc_register_address(&c_only_cookie);
130
+ c_only_cookie = rb_class_new_instance(0, NULL, rb_cObject);
131
+
132
+ rb_gc_register_address(&cached_empty_string);
133
+ rb_gc_register_address(&cached_empty_bytes);
134
+ cached_empty_string = create_frozen_string("", 0, false);
135
+ cached_empty_bytes = create_frozen_string("", 0, true);
121
136
  }
@@ -59,6 +59,7 @@ typedef struct OneofDescriptor OneofDescriptor;
59
59
  typedef struct EnumDescriptor EnumDescriptor;
60
60
  typedef struct MessageLayout MessageLayout;
61
61
  typedef struct MessageField MessageField;
62
+ typedef struct MessageOneof MessageOneof;
62
63
  typedef struct MessageHeader MessageHeader;
63
64
  typedef struct MessageBuilderContext MessageBuilderContext;
64
65
  typedef struct OneofBuilderContext OneofBuilderContext;
@@ -107,62 +108,68 @@ typedef struct Builder Builder;
107
108
  // -----------------------------------------------------------------------------
108
109
 
109
110
  struct DescriptorPool {
111
+ VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
110
112
  upb_symtab* symtab;
113
+ upb_handlercache* fill_handler_cache;
114
+ upb_handlercache* pb_serialize_handler_cache;
115
+ upb_handlercache* json_serialize_handler_cache;
116
+ upb_handlercache* json_serialize_handler_preserve_cache;
117
+ upb_pbcodecache* fill_method_cache;
118
+ upb_json_codecache* json_fill_method_cache;
111
119
  };
112
120
 
113
121
  struct Descriptor {
114
122
  const upb_msgdef* msgdef;
115
123
  MessageLayout* layout;
116
- VALUE klass; // begins as nil
117
- const upb_handlers* fill_handlers;
118
- const upb_pbdecodermethod* fill_method;
119
- const upb_json_parsermethod* json_fill_method;
120
- const upb_handlers* pb_serialize_handlers;
121
- const upb_handlers* json_serialize_handlers;
122
- const upb_handlers* json_serialize_handlers_preserve;
124
+ VALUE klass;
125
+ VALUE descriptor_pool;
123
126
  };
124
127
 
125
128
  struct FileDescriptor {
126
129
  const upb_filedef* filedef;
130
+ VALUE descriptor_pool; // Owns the upb_filedef.
127
131
  };
128
132
 
129
133
  struct FieldDescriptor {
130
134
  const upb_fielddef* fielddef;
135
+ VALUE descriptor_pool; // Owns the upb_fielddef.
131
136
  };
132
137
 
133
138
  struct OneofDescriptor {
134
139
  const upb_oneofdef* oneofdef;
140
+ VALUE descriptor_pool; // Owns the upb_oneofdef.
135
141
  };
136
142
 
137
143
  struct EnumDescriptor {
138
144
  const upb_enumdef* enumdef;
139
145
  VALUE module; // begins as nil
146
+ VALUE descriptor_pool; // Owns the upb_enumdef.
140
147
  };
141
148
 
142
149
  struct MessageBuilderContext {
143
- VALUE descriptor;
144
- VALUE builder;
150
+ google_protobuf_DescriptorProto* msg_proto;
151
+ VALUE file_builder;
145
152
  };
146
153
 
147
154
  struct OneofBuilderContext {
148
- VALUE descriptor;
149
- VALUE builder;
155
+ int oneof_index;
156
+ VALUE message_builder;
150
157
  };
151
158
 
152
159
  struct EnumBuilderContext {
153
- VALUE enumdesc;
160
+ google_protobuf_EnumDescriptorProto* enum_proto;
161
+ VALUE file_builder;
154
162
  };
155
163
 
156
164
  struct FileBuilderContext {
157
- VALUE pending_list;
158
- VALUE file_descriptor;
159
- VALUE builder;
165
+ upb_arena *arena;
166
+ google_protobuf_FileDescriptorProto* file_proto;
167
+ VALUE descriptor_pool;
160
168
  };
161
169
 
162
170
  struct Builder {
163
- VALUE pending_list;
164
- VALUE default_file_descriptor;
165
- upb_def** defs; // used only while finalizing
171
+ VALUE descriptor_pool;
172
+ VALUE default_file_builder;
166
173
  };
167
174
 
168
175
  extern VALUE cDescriptorPool;
@@ -191,7 +198,6 @@ void DescriptorPool_free(void* _self);
191
198
  VALUE DescriptorPool_alloc(VALUE klass);
192
199
  void DescriptorPool_register(VALUE module);
193
200
  DescriptorPool* ruby_to_DescriptorPool(VALUE value);
194
- VALUE DescriptorPool_add(VALUE _self, VALUE def);
195
201
  VALUE DescriptorPool_build(int argc, VALUE* argv, VALUE _self);
196
202
  VALUE DescriptorPool_lookup(VALUE _self, VALUE name);
197
203
  VALUE DescriptorPool_generated_pool(VALUE _self);
@@ -203,13 +209,11 @@ void Descriptor_free(void* _self);
203
209
  VALUE Descriptor_alloc(VALUE klass);
204
210
  void Descriptor_register(VALUE module);
205
211
  Descriptor* ruby_to_Descriptor(VALUE value);
206
- VALUE Descriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
212
+ VALUE Descriptor_initialize(VALUE _self, VALUE cookie, VALUE descriptor_pool,
213
+ VALUE ptr);
207
214
  VALUE Descriptor_name(VALUE _self);
208
- VALUE Descriptor_name_set(VALUE _self, VALUE str);
209
215
  VALUE Descriptor_each(VALUE _self);
210
216
  VALUE Descriptor_lookup(VALUE _self, VALUE name);
211
- VALUE Descriptor_add_field(VALUE _self, VALUE obj);
212
- VALUE Descriptor_add_oneof(VALUE _self, VALUE obj);
213
217
  VALUE Descriptor_each_oneof(VALUE _self);
214
218
  VALUE Descriptor_lookup_oneof(VALUE _self, VALUE name);
215
219
  VALUE Descriptor_msgclass(VALUE _self);
@@ -221,28 +225,24 @@ void FileDescriptor_free(void* _self);
221
225
  VALUE FileDescriptor_alloc(VALUE klass);
222
226
  void FileDescriptor_register(VALUE module);
223
227
  FileDescriptor* ruby_to_FileDescriptor(VALUE value);
224
- VALUE FileDescriptor_initialize(int argc, VALUE* argv, VALUE _self);
228
+ VALUE FileDescriptor_initialize(VALUE _self, VALUE cookie,
229
+ VALUE descriptor_pool, VALUE ptr);
225
230
  VALUE FileDescriptor_name(VALUE _self);
226
231
  VALUE FileDescriptor_syntax(VALUE _self);
227
- VALUE FileDescriptor_syntax_set(VALUE _self, VALUE syntax);
228
232
 
229
233
  void FieldDescriptor_mark(void* _self);
230
234
  void FieldDescriptor_free(void* _self);
231
235
  VALUE FieldDescriptor_alloc(VALUE klass);
232
236
  void FieldDescriptor_register(VALUE module);
233
237
  FieldDescriptor* ruby_to_FieldDescriptor(VALUE value);
238
+ VALUE FieldDescriptor_initialize(VALUE _self, VALUE cookie,
239
+ VALUE descriptor_pool, VALUE ptr);
234
240
  VALUE FieldDescriptor_name(VALUE _self);
235
- VALUE FieldDescriptor_name_set(VALUE _self, VALUE str);
236
241
  VALUE FieldDescriptor_type(VALUE _self);
237
- VALUE FieldDescriptor_type_set(VALUE _self, VALUE type);
238
242
  VALUE FieldDescriptor_default(VALUE _self);
239
- VALUE FieldDescriptor_default_set(VALUE _self, VALUE default_value);
240
243
  VALUE FieldDescriptor_label(VALUE _self);
241
- VALUE FieldDescriptor_label_set(VALUE _self, VALUE label);
242
244
  VALUE FieldDescriptor_number(VALUE _self);
243
- VALUE FieldDescriptor_number_set(VALUE _self, VALUE number);
244
245
  VALUE FieldDescriptor_submsg_name(VALUE _self);
245
- VALUE FieldDescriptor_submsg_name_set(VALUE _self, VALUE value);
246
246
  VALUE FieldDescriptor_subtype(VALUE _self);
247
247
  VALUE FieldDescriptor_has(VALUE _self, VALUE msg_rb);
248
248
  VALUE FieldDescriptor_clear(VALUE _self, VALUE msg_rb);
@@ -256,21 +256,20 @@ void OneofDescriptor_free(void* _self);
256
256
  VALUE OneofDescriptor_alloc(VALUE klass);
257
257
  void OneofDescriptor_register(VALUE module);
258
258
  OneofDescriptor* ruby_to_OneofDescriptor(VALUE value);
259
+ VALUE OneofDescriptor_initialize(VALUE _self, VALUE cookie,
260
+ VALUE descriptor_pool, VALUE ptr);
259
261
  VALUE OneofDescriptor_name(VALUE _self);
260
- VALUE OneofDescriptor_name_set(VALUE _self, VALUE value);
261
- VALUE OneofDescriptor_add_field(VALUE _self, VALUE field);
262
- VALUE OneofDescriptor_each(VALUE _self, VALUE field);
262
+ VALUE OneofDescriptor_each(VALUE _self);
263
263
 
264
264
  void EnumDescriptor_mark(void* _self);
265
265
  void EnumDescriptor_free(void* _self);
266
266
  VALUE EnumDescriptor_alloc(VALUE klass);
267
+ VALUE EnumDescriptor_initialize(VALUE _self, VALUE cookie,
268
+ VALUE descriptor_pool, VALUE ptr);
267
269
  void EnumDescriptor_register(VALUE module);
268
270
  EnumDescriptor* ruby_to_EnumDescriptor(VALUE value);
269
- VALUE EnumDescriptor_initialize(VALUE _self, VALUE file_descriptor_rb);
270
271
  VALUE EnumDescriptor_file_descriptor(VALUE _self);
271
272
  VALUE EnumDescriptor_name(VALUE _self);
272
- VALUE EnumDescriptor_name_set(VALUE _self, VALUE str);
273
- VALUE EnumDescriptor_add_value(VALUE _self, VALUE name, VALUE number);
274
273
  VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name);
275
274
  VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number);
276
275
  VALUE EnumDescriptor_each(VALUE _self);
@@ -283,9 +282,10 @@ VALUE MessageBuilderContext_alloc(VALUE klass);
283
282
  void MessageBuilderContext_register(VALUE module);
284
283
  MessageBuilderContext* ruby_to_MessageBuilderContext(VALUE value);
285
284
  VALUE MessageBuilderContext_initialize(VALUE _self,
286
- VALUE descriptor,
287
- VALUE builder);
285
+ VALUE _file_builder,
286
+ VALUE name);
288
287
  VALUE MessageBuilderContext_optional(int argc, VALUE* argv, VALUE _self);
288
+ VALUE MessageBuilderContext_proto3_optional(int argc, VALUE* argv, VALUE _self);
289
289
  VALUE MessageBuilderContext_required(int argc, VALUE* argv, VALUE _self);
290
290
  VALUE MessageBuilderContext_repeated(int argc, VALUE* argv, VALUE _self);
291
291
  VALUE MessageBuilderContext_map(int argc, VALUE* argv, VALUE _self);
@@ -306,15 +306,20 @@ void EnumBuilderContext_free(void* _self);
306
306
  VALUE EnumBuilderContext_alloc(VALUE klass);
307
307
  void EnumBuilderContext_register(VALUE module);
308
308
  EnumBuilderContext* ruby_to_EnumBuilderContext(VALUE value);
309
- VALUE EnumBuilderContext_initialize(VALUE _self, VALUE enumdesc);
309
+ VALUE EnumBuilderContext_initialize(VALUE _self, VALUE _file_builder,
310
+ VALUE name);
310
311
  VALUE EnumBuilderContext_value(VALUE _self, VALUE name, VALUE number);
311
312
 
312
313
  void FileBuilderContext_mark(void* _self);
313
314
  void FileBuilderContext_free(void* _self);
314
315
  VALUE FileBuilderContext_alloc(VALUE klass);
315
316
  void FileBuilderContext_register(VALUE module);
316
- VALUE FileBuilderContext_initialize(VALUE _self, VALUE file_descriptor,
317
- VALUE builder);
317
+ FileBuilderContext* ruby_to_FileBuilderContext(VALUE _self);
318
+ upb_strview FileBuilderContext_strdup(VALUE _self, VALUE rb_str);
319
+ upb_strview FileBuilderContext_strdup_name(VALUE _self, VALUE rb_str);
320
+ upb_strview FileBuilderContext_strdup_sym(VALUE _self, VALUE rb_sym);
321
+ VALUE FileBuilderContext_initialize(VALUE _self, VALUE descriptor_pool,
322
+ VALUE name, VALUE options);
318
323
  VALUE FileBuilderContext_add_message(VALUE _self, VALUE name);
319
324
  VALUE FileBuilderContext_add_enum(VALUE _self, VALUE name);
320
325
  VALUE FileBuilderContext_pending_descriptors(VALUE _self);
@@ -324,7 +329,8 @@ void Builder_free(void* _self);
324
329
  VALUE Builder_alloc(VALUE klass);
325
330
  void Builder_register(VALUE module);
326
331
  Builder* ruby_to_Builder(VALUE value);
327
- VALUE Builder_initialize(VALUE _self);
332
+ VALUE Builder_build(VALUE _self);
333
+ VALUE Builder_initialize(VALUE _self, VALUE descriptor_pool);
328
334
  VALUE Builder_add_file(int argc, VALUE *argv, VALUE _self);
329
335
  VALUE Builder_add_message(VALUE _self, VALUE name);
330
336
  VALUE Builder_add_enum(VALUE _self, VALUE name);
@@ -358,17 +364,22 @@ VALUE native_slot_get(upb_fieldtype_t type,
358
364
  void native_slot_init(upb_fieldtype_t type, void* memory);
359
365
  void native_slot_mark(upb_fieldtype_t type, void* memory);
360
366
  void native_slot_dup(upb_fieldtype_t type, void* to, void* from);
361
- void native_slot_deep_copy(upb_fieldtype_t type, void* to, void* from);
362
- bool native_slot_eq(upb_fieldtype_t type, void* mem1, void* mem2);
367
+ void native_slot_deep_copy(upb_fieldtype_t type, VALUE type_class, void* to,
368
+ void* from);
369
+ bool native_slot_eq(upb_fieldtype_t type, VALUE type_class, void* mem1,
370
+ void* mem2);
363
371
 
364
372
  VALUE native_slot_encode_and_freeze_string(upb_fieldtype_t type, VALUE value);
365
373
  void native_slot_check_int_range_precision(const char* name, upb_fieldtype_t type, VALUE value);
374
+ uint32_t slot_read_oneof_case(MessageLayout* layout, const void* storage,
375
+ const upb_oneofdef* oneof);
376
+ bool is_value_field(const upb_fielddef* f);
366
377
 
367
378
  extern rb_encoding* kRubyStringUtf8Encoding;
368
379
  extern rb_encoding* kRubyStringASCIIEncoding;
369
380
  extern rb_encoding* kRubyString8bitEncoding;
370
381
 
371
- VALUE field_type_class(const upb_fielddef* field);
382
+ VALUE field_type_class(const MessageLayout* layout, const upb_fielddef* field);
372
383
 
373
384
  #define MAP_KEY_FIELD 1
374
385
  #define MAP_VALUE_FIELD 2
@@ -411,6 +422,7 @@ extern VALUE cRepeatedField;
411
422
 
412
423
  RepeatedField* ruby_to_RepeatedField(VALUE value);
413
424
 
425
+ VALUE RepeatedField_new_this_type(VALUE _self);
414
426
  VALUE RepeatedField_each(VALUE _self);
415
427
  VALUE RepeatedField_index(int argc, VALUE* argv, VALUE _self);
416
428
  void* RepeatedField_index_native(VALUE _self, int index);
@@ -459,6 +471,7 @@ extern VALUE cMap;
459
471
 
460
472
  Map* ruby_to_Map(VALUE value);
461
473
 
474
+ VALUE Map_new_this_type(VALUE _self);
462
475
  VALUE Map_each(VALUE _self);
463
476
  VALUE Map_keys(VALUE _self);
464
477
  VALUE Map_values(VALUE _self);
@@ -492,22 +505,35 @@ VALUE Map_iter_value(Map_iter* iter);
492
505
  // Message layout / storage.
493
506
  // -----------------------------------------------------------------------------
494
507
 
495
- #define MESSAGE_FIELD_NO_CASE ((size_t)-1)
496
- #define MESSAGE_FIELD_NO_HASBIT ((size_t)-1)
508
+ #define MESSAGE_FIELD_NO_HASBIT ((uint32_t)-1)
497
509
 
498
510
  struct MessageField {
499
- size_t offset;
500
- size_t case_offset; // for oneofs, a uint32. Else, MESSAGE_FIELD_NO_CASE.
501
- size_t hasbit;
511
+ uint32_t offset;
512
+ uint32_t hasbit;
502
513
  };
503
514
 
515
+ struct MessageOneof {
516
+ uint32_t offset;
517
+ uint32_t case_offset;
518
+ };
519
+
520
+ // MessageLayout is owned by the enclosing Descriptor, which must outlive us.
504
521
  struct MessageLayout {
522
+ const Descriptor* desc;
505
523
  const upb_msgdef* msgdef;
524
+ void* empty_template; // Can memcpy() onto a layout to clear it.
506
525
  MessageField* fields;
507
- size_t size;
526
+ MessageOneof* oneofs;
527
+ uint32_t size;
528
+ uint32_t value_offset;
529
+ int value_count;
530
+ int repeated_count;
531
+ int map_count;
508
532
  };
509
533
 
510
- MessageLayout* create_layout(const upb_msgdef* msgdef);
534
+ #define ONEOF_CASE_MASK 0x80000000
535
+
536
+ void create_layout(Descriptor* desc);
511
537
  void free_layout(MessageLayout* layout);
512
538
  bool field_contains_hasbit(MessageLayout* layout,
513
539
  const upb_fielddef* field);
@@ -533,6 +559,9 @@ VALUE layout_eq(MessageLayout* layout, void* msg1, void* msg2);
533
559
  VALUE layout_hash(MessageLayout* layout, void* storage);
534
560
  VALUE layout_inspect(MessageLayout* layout, void* storage);
535
561
 
562
+ bool is_wrapper_type_field(const upb_fielddef* field);
563
+ VALUE ruby_wrapper_type(VALUE type_class, VALUE value);
564
+
536
565
  // -----------------------------------------------------------------------------
537
566
  // Message class creation.
538
567
  // -----------------------------------------------------------------------------
@@ -556,7 +585,7 @@ struct MessageHeader {
556
585
 
557
586
  extern rb_data_type_t Message_type;
558
587
 
559
- VALUE build_class_from_descriptor(Descriptor* descriptor);
588
+ VALUE build_class_from_descriptor(VALUE descriptor);
560
589
  void* Message_data(void* msg);
561
590
  void Message_mark(void* self);
562
591
  void Message_free(void* self);
@@ -580,23 +609,33 @@ VALUE Message_encode_json(int argc, VALUE* argv, VALUE klass);
580
609
  VALUE Google_Protobuf_discard_unknown(VALUE self, VALUE msg_rb);
581
610
  VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj);
582
611
 
583
- VALUE build_module_from_enumdesc(EnumDescriptor* enumdef);
612
+ VALUE build_module_from_enumdesc(VALUE _enumdesc);
584
613
  VALUE enum_lookup(VALUE self, VALUE number);
585
614
  VALUE enum_resolve(VALUE self, VALUE sym);
615
+ VALUE enum_descriptor(VALUE self);
586
616
 
587
617
  const upb_pbdecodermethod *new_fillmsg_decodermethod(
588
618
  Descriptor* descriptor, const void *owner);
619
+ void add_handlers_for_message(const void *closure, upb_handlers *h);
589
620
 
590
621
  // Maximum depth allowed during encoding, to avoid stack overflows due to
591
622
  // cycles.
592
623
  #define ENCODE_MAX_NESTING 63
593
624
 
625
+ // -----------------------------------------------------------------------------
626
+ // A cache of frozen string objects to use as field defaults.
627
+ // -----------------------------------------------------------------------------
628
+ VALUE get_frozen_string(const char* data, size_t size, bool binary);
629
+
594
630
  // -----------------------------------------------------------------------------
595
631
  // Global map from upb {msg,enum}defs to wrapper Descriptor/EnumDescriptor
596
632
  // instances.
597
633
  // -----------------------------------------------------------------------------
598
- void add_def_obj(const void* def, VALUE value);
599
- VALUE get_def_obj(const void* def);
634
+ VALUE get_msgdef_obj(VALUE descriptor_pool, const upb_msgdef* def);
635
+ VALUE get_enumdef_obj(VALUE descriptor_pool, const upb_enumdef* def);
636
+ VALUE get_fielddef_obj(VALUE descriptor_pool, const upb_fielddef* def);
637
+ VALUE get_filedef_obj(VALUE descriptor_pool, const upb_filedef* def);
638
+ VALUE get_oneofdef_obj(VALUE descriptor_pool, const upb_oneofdef* def);
600
639
 
601
640
  // -----------------------------------------------------------------------------
602
641
  // Utilities.
@@ -612,4 +651,17 @@ void check_upb_status(const upb_status* status, const char* msg);
612
651
 
613
652
  extern ID descriptor_instancevar_interned;
614
653
 
654
+ // A distinct object that is not accessible from Ruby. We use this as a
655
+ // constructor argument to enforce that certain objects cannot be created from
656
+ // Ruby.
657
+ extern VALUE c_only_cookie;
658
+
659
+ #ifdef NDEBUG
660
+ #define UPB_ASSERT(expr) do {} while (false && (expr))
661
+ #else
662
+ #define UPB_ASSERT(expr) assert(expr)
663
+ #endif
664
+
665
+ #define UPB_UNUSED(var) (void)var
666
+
615
667
  #endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
@@ -64,10 +64,11 @@ VALUE RepeatedField_subarray(VALUE _self, long beg, long len) {
64
64
  int element_size = native_slot_size(self->field_type);
65
65
  upb_fieldtype_t field_type = self->field_type;
66
66
  VALUE field_type_class = self->field_type_class;
67
-
68
67
  size_t off = beg * element_size;
69
68
  VALUE ary = rb_ary_new2(len);
70
- for (int i = beg; i < beg + len; i++, off += element_size) {
69
+ int i;
70
+
71
+ for (i = beg; i < beg + len; i++, off += element_size) {
71
72
  void* mem = ((uint8_t *)self->elements) + off;
72
73
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
73
74
  rb_ary_push(ary, elem);
@@ -88,9 +89,10 @@ VALUE RepeatedField_each(VALUE _self) {
88
89
  upb_fieldtype_t field_type = self->field_type;
89
90
  VALUE field_type_class = self->field_type_class;
90
91
  int element_size = native_slot_size(field_type);
91
-
92
92
  size_t off = 0;
93
- for (int i = 0; i < self->size; i++, off += element_size) {
93
+ int i;
94
+
95
+ for (i = 0; i < self->size; i++, off += element_size) {
94
96
  void* memory = (void *) (((uint8_t *)self->elements) + off);
95
97
  VALUE val = native_slot_get(field_type, field_type_class, memory);
96
98
  rb_yield(val);
@@ -169,8 +171,10 @@ VALUE RepeatedField_index_set(VALUE _self, VALUE _index, VALUE val) {
169
171
  if (index >= self->size) {
170
172
  upb_fieldtype_t field_type = self->field_type;
171
173
  int element_size = native_slot_size(field_type);
174
+ int i;
175
+
172
176
  RepeatedField_reserve(self, index + 1);
173
- for (int i = self->size; i <= index; i++) {
177
+ for (i = self->size; i <= index; i++) {
174
178
  void* elem = RepeatedField_memoryat(self, i, element_size);
175
179
  native_slot_init(field_type, elem);
176
180
  }
@@ -224,7 +228,8 @@ VALUE RepeatedField_push(VALUE _self, VALUE val) {
224
228
  }
225
229
 
226
230
  VALUE RepeatedField_push_vararg(VALUE _self, VALUE args) {
227
- for (int i = 0; i < RARRAY_LEN(args); i++) {
231
+ int i;
232
+ for (i = 0; i < RARRAY_LEN(args); i++) {
228
233
  RepeatedField_push(_self, rb_ary_entry(args, i));
229
234
  }
230
235
  return _self;
@@ -285,9 +290,11 @@ VALUE RepeatedField_pop_one(VALUE _self) {
285
290
  */
286
291
  VALUE RepeatedField_replace(VALUE _self, VALUE list) {
287
292
  RepeatedField* self = ruby_to_RepeatedField(_self);
293
+ int i;
294
+
288
295
  Check_Type(list, T_ARRAY);
289
296
  self->size = 0;
290
- for (int i = 0; i < RARRAY_LEN(list); i++) {
297
+ for (i = 0; i < RARRAY_LEN(list); i++) {
291
298
  RepeatedField_push(_self, rb_ary_entry(list, i));
292
299
  }
293
300
  return list;
@@ -316,7 +323,7 @@ VALUE RepeatedField_length(VALUE _self) {
316
323
  return INT2NUM(self->size);
317
324
  }
318
325
 
319
- static VALUE RepeatedField_new_this_type(VALUE _self) {
326
+ VALUE RepeatedField_new_this_type(VALUE _self) {
320
327
  RepeatedField* self = ruby_to_RepeatedField(_self);
321
328
  VALUE new_rptfield = Qnil;
322
329
  VALUE element_type = fieldtype_to_ruby(self->field_type);
@@ -344,8 +351,10 @@ VALUE RepeatedField_dup(VALUE _self) {
344
351
  upb_fieldtype_t field_type = self->field_type;
345
352
  size_t elem_size = native_slot_size(field_type);
346
353
  size_t off = 0;
354
+ int i;
355
+
347
356
  RepeatedField_reserve(new_rptfield_self, self->size);
348
- for (int i = 0; i < self->size; i++, off += elem_size) {
357
+ for (i = 0; i < self->size; i++, off += elem_size) {
349
358
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
350
359
  void* from_mem = (uint8_t *)self->elements + off;
351
360
  native_slot_dup(field_type, to_mem, from_mem);
@@ -363,11 +372,13 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
363
372
  upb_fieldtype_t field_type = self->field_type;
364
373
  size_t elem_size = native_slot_size(field_type);
365
374
  size_t off = 0;
375
+ int i;
376
+
366
377
  RepeatedField_reserve(new_rptfield_self, self->size);
367
- for (int i = 0; i < self->size; i++, off += elem_size) {
378
+ for (i = 0; i < self->size; i++, off += elem_size) {
368
379
  void* to_mem = (uint8_t *)new_rptfield_self->elements + off;
369
380
  void* from_mem = (uint8_t *)self->elements + off;
370
- native_slot_deep_copy(field_type, to_mem, from_mem);
381
+ native_slot_deep_copy(field_type, self->field_type_class, to_mem, from_mem);
371
382
  new_rptfield_self->size++;
372
383
  }
373
384
 
@@ -384,11 +395,12 @@ VALUE RepeatedField_deep_copy(VALUE _self) {
384
395
  VALUE RepeatedField_to_ary(VALUE _self) {
385
396
  RepeatedField* self = ruby_to_RepeatedField(_self);
386
397
  upb_fieldtype_t field_type = self->field_type;
387
-
388
398
  size_t elem_size = native_slot_size(field_type);
389
399
  size_t off = 0;
390
400
  VALUE ary = rb_ary_new2(self->size);
391
- for (int i = 0; i < self->size; i++, off += elem_size) {
401
+ int i;
402
+
403
+ for (i = 0; i < self->size; i++, off += elem_size) {
392
404
  void* mem = ((uint8_t *)self->elements) + off;
393
405
  VALUE elem = native_slot_get(field_type, self->field_type_class, mem);
394
406
  rb_ary_push(ary, elem);
@@ -434,10 +446,13 @@ VALUE RepeatedField_eq(VALUE _self, VALUE _other) {
434
446
  upb_fieldtype_t field_type = self->field_type;
435
447
  size_t elem_size = native_slot_size(field_type);
436
448
  size_t off = 0;
437
- for (int i = 0; i < self->size; i++, off += elem_size) {
449
+ int i;
450
+
451
+ for (i = 0; i < self->size; i++, off += elem_size) {
438
452
  void* self_mem = ((uint8_t *)self->elements) + off;
439
453
  void* other_mem = ((uint8_t *)other->elements) + off;
440
- if (!native_slot_eq(field_type, self_mem, other_mem)) {
454
+ if (!native_slot_eq(field_type, self->field_type_class, self_mem,
455
+ other_mem)) {
441
456
  return Qfalse;
442
457
  }
443
458
  }
@@ -459,7 +474,9 @@ VALUE RepeatedField_hash(VALUE _self) {
459
474
  VALUE field_type_class = self->field_type_class;
460
475
  size_t elem_size = native_slot_size(field_type);
461
476
  size_t off = 0;
462
- for (int i = 0; i < self->size; i++, off += elem_size) {
477
+ int i;
478
+
479
+ for (i = 0; i < self->size; i++, off += elem_size) {
463
480
  void* mem = ((uint8_t *)self->elements) + off;
464
481
  VALUE elem = native_slot_get(field_type, field_type_class, mem);
465
482
  h = rb_hash_uint(h, NUM2LONG(rb_funcall(elem, hash_sym, 0)));
@@ -481,7 +498,8 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
481
498
  VALUE dupped = RepeatedField_dup(_self);
482
499
 
483
500
  if (TYPE(list) == T_ARRAY) {
484
- for (int i = 0; i < RARRAY_LEN(list); i++) {
501
+ int i;
502
+ for (i = 0; i < RARRAY_LEN(list); i++) {
485
503
  VALUE elem = rb_ary_entry(list, i);
486
504
  RepeatedField_push(dupped, elem);
487
505
  }
@@ -489,12 +507,14 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
489
507
  RTYPEDDATA_TYPE(list) == &RepeatedField_type) {
490
508
  RepeatedField* self = ruby_to_RepeatedField(_self);
491
509
  RepeatedField* list_rptfield = ruby_to_RepeatedField(list);
510
+ int i;
511
+
492
512
  if (self->field_type != list_rptfield->field_type ||
493
513
  self->field_type_class != list_rptfield->field_type_class) {
494
514
  rb_raise(rb_eArgError,
495
515
  "Attempt to append RepeatedField with different element type.");
496
516
  }
497
- for (int i = 0; i < list_rptfield->size; i++) {
517
+ for (i = 0; i < list_rptfield->size; i++) {
498
518
  void* mem = RepeatedField_index_native(list, i);
499
519
  RepeatedField_push_native(dupped, mem);
500
520
  }
@@ -512,8 +532,10 @@ VALUE RepeatedField_plus(VALUE _self, VALUE list) {
512
532
  * concats the passed in array to self. Returns a Ruby array.
513
533
  */
514
534
  VALUE RepeatedField_concat(VALUE _self, VALUE list) {
535
+ int i;
536
+
515
537
  Check_Type(list, T_ARRAY);
516
- for (int i = 0; i < RARRAY_LEN(list); i++) {
538
+ for (i = 0; i < RARRAY_LEN(list); i++) {
517
539
  RepeatedField_push(_self, rb_ary_entry(list, i));
518
540
  }
519
541
  return _self;
@@ -574,10 +596,12 @@ void RepeatedField_init_args(int argc, VALUE* argv,
574
596
  }
575
597
 
576
598
  if (ary != Qnil) {
599
+ int i;
600
+
577
601
  if (!RB_TYPE_P(ary, T_ARRAY)) {
578
602
  rb_raise(rb_eArgError, "Expected array as initialize argument");
579
603
  }
580
- for (int i = 0; i < RARRAY_LEN(ary); i++) {
604
+ for (i = 0; i < RARRAY_LEN(ary); i++) {
581
605
  RepeatedField_push(_self, rb_ary_entry(ary, i));
582
606
  }
583
607
  }
@@ -589,8 +613,10 @@ void RepeatedField_mark(void* _self) {
589
613
  RepeatedField* self = (RepeatedField*)_self;
590
614
  upb_fieldtype_t field_type = self->field_type;
591
615
  int element_size = native_slot_size(field_type);
616
+ int i;
617
+
592
618
  rb_gc_mark(self->field_type_class);
593
- for (int i = 0; i < self->size; i++) {
619
+ for (i = 0; i < self->size; i++) {
594
620
  void* memory = (((uint8_t *)self->elements) + i * element_size);
595
621
  native_slot_mark(self->field_type, memory);
596
622
  }