google-protobuf 3.23.4-aarch64-linux → 3.24.0-aarch64-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of google-protobuf might be problematic. Click here for more details.

checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: df503ccc629dd0c0f3857f64d6030cf1fb811cd447bae2c95d4e089149e98a7f
4
- data.tar.gz: 2c24c94b2de6f6a224f41da1e09877d3b5db76d998b779f83d6fb427a5e3d1dd
3
+ metadata.gz: 1f40992d2395597e0a01961c0d4c774260b62e6d9f6cefeb5ad37b16682c7f52
4
+ data.tar.gz: 0cb0d1a1afae6c85598c3d774a9dba8587c2c62852d72501058d8966463b26d8
5
5
  SHA512:
6
- metadata.gz: e0646febd544906a8e9ced3c708a98293ca8a989162044033ed756251a548705d5161ee4d812b872e17feda5fc9ffb86b7d01982669d1c9dadd4463d488d3071
7
- data.tar.gz: 8626286a05a40f1cb30844428449af0f7e593243a04e85109de05924371b62d56f633c9abd91f1d8649a7ceae5c248c5c802acd166a183fd7f195a6a7c0b58b6
6
+ metadata.gz: 9df3257f567180569e5a45e9db856944485abbfc622bb40d5458631d114da585c0358565931082b11f201b22df39c3ed0b56d11a58405dd347d13e78b789e2ae
7
+ data.tar.gz: 28699811726ea9a397da62460fe2c4e28577082be607b641e55fc0bfd5766f8591afb98508937f4fc8263a3fd1f872d66de371a2d4eaee76503f7861a325cefa
@@ -31,8 +31,6 @@
31
31
  #ifndef RUBY_PROTOBUF_CONVERT_H_
32
32
  #define RUBY_PROTOBUF_CONVERT_H_
33
33
 
34
- #include <ruby/ruby.h>
35
-
36
34
  #include "protobuf.h"
37
35
  #include "ruby-upb.h"
38
36
 
@@ -73,6 +73,8 @@ static VALUE rb_str_maybe_null(const char* s) {
73
73
  // -----------------------------------------------------------------------------
74
74
 
75
75
  typedef struct {
76
+ // IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
77
+ // macro to update VALUE references, as to trigger write barriers.
76
78
  VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
77
79
  upb_DefPool* symtab;
78
80
  } DescriptorPool;
@@ -97,7 +99,7 @@ static void DescriptorPool_free(void* _self) {
97
99
  static const rb_data_type_t DescriptorPool_type = {
98
100
  "Google::Protobuf::DescriptorPool",
99
101
  {DescriptorPool_mark, DescriptorPool_free, NULL},
100
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
102
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
101
103
  };
102
104
 
103
105
  static DescriptorPool* ruby_to_DescriptorPool(VALUE val) {
@@ -125,11 +127,9 @@ static VALUE DescriptorPool_alloc(VALUE klass) {
125
127
  self->def_to_descriptor = Qnil;
126
128
  ret = TypedData_Wrap_Struct(klass, &DescriptorPool_type, self);
127
129
 
128
- self->def_to_descriptor = rb_hash_new();
130
+ RB_OBJ_WRITE(ret, &self->def_to_descriptor, rb_hash_new());
129
131
  self->symtab = upb_DefPool_New();
130
- ObjectCache_Add(self->symtab, ret);
131
-
132
- return ret;
132
+ return ObjectCache_TryAdd(self->symtab, ret);
133
133
  }
134
134
 
135
135
  /*
@@ -601,7 +601,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
601
601
 
602
602
  #define CONVERT(upb, ruby) \
603
603
  if (SYM2ID(type) == rb_intern(#ruby)) { \
604
- return kUpb_CType_##upb; \
604
+ return kUpb_CType_##upb; \
605
605
  }
606
606
 
607
607
  CONVERT(Float, float);
@@ -624,7 +624,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
624
624
 
625
625
  static VALUE descriptortype_to_ruby(upb_FieldType type) {
626
626
  switch (type) {
627
- #define CONVERT(upb, ruby) \
627
+ #define CONVERT(upb, ruby) \
628
628
  case kUpb_FieldType_##upb: \
629
629
  return ID2SYM(rb_intern(#ruby));
630
630
  CONVERT(Float, float);
@@ -709,7 +709,7 @@ static VALUE FieldDescriptor_label(VALUE _self) {
709
709
  FieldDescriptor* self = ruby_to_FieldDescriptor(_self);
710
710
  switch (upb_FieldDef_Label(self->fielddef)) {
711
711
  #define CONVERT(upb, ruby) \
712
- case kUpb_Label_##upb: \
712
+ case kUpb_Label_##upb: \
713
713
  return ID2SYM(rb_intern(#ruby));
714
714
 
715
715
  CONVERT(Optional, optional);
@@ -1091,7 +1091,7 @@ static VALUE EnumDescriptor_name(VALUE _self) {
1091
1091
  static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
1092
1092
  EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
1093
1093
  const char* name_str = rb_id2name(SYM2ID(name));
1094
- const upb_EnumValueDef *ev =
1094
+ const upb_EnumValueDef* ev =
1095
1095
  upb_EnumDef_FindValueByName(self->enumdef, name_str);
1096
1096
  if (ev) {
1097
1097
  return INT2NUM(upb_EnumValueDef_Number(ev));
@@ -1110,7 +1110,8 @@ static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
1110
1110
  static VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number) {
1111
1111
  EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
1112
1112
  int32_t val = NUM2INT(number);
1113
- const upb_EnumValueDef* ev = upb_EnumDef_FindValueByNumber(self->enumdef, val);
1113
+ const upb_EnumValueDef* ev =
1114
+ upb_EnumDef_FindValueByNumber(self->enumdef, val);
1114
1115
  if (ev) {
1115
1116
  return ID2SYM(rb_intern(upb_EnumValueDef_Name(ev)));
1116
1117
  } else {
@@ -31,8 +31,6 @@
31
31
  #ifndef RUBY_PROTOBUF_DEFS_H_
32
32
  #define RUBY_PROTOBUF_DEFS_H_
33
33
 
34
- #include <ruby/ruby.h>
35
-
36
34
  #include "protobuf.h"
37
35
  #include "ruby-upb.h"
38
36
 
@@ -93,7 +93,6 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
93
93
  if (val == Qnil) {
94
94
  val = Map_alloc(cMap);
95
95
  Map* self;
96
- ObjectCache_Add(map, val);
97
96
  TypedData_Get_Struct(val, Map, &Map_type, self);
98
97
  self->map = map;
99
98
  self->arena = arena;
@@ -103,6 +102,7 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
103
102
  const upb_MessageDef* val_m = self->value_type_info.def.msgdef;
104
103
  self->value_type_class = Descriptor_DefToClass(val_m);
105
104
  }
105
+ return ObjectCache_TryAdd(map, val);
106
106
  }
107
107
 
108
108
  return val;
@@ -319,7 +319,9 @@ static VALUE Map_init(int argc, VALUE* argv, VALUE _self) {
319
319
 
320
320
  self->map = upb_Map_New(Arena_get(self->arena), self->key_type,
321
321
  self->value_type_info.type);
322
- ObjectCache_Add(self->map, _self);
322
+ VALUE stored = ObjectCache_TryAdd(self->map, _self);
323
+ (void)stored;
324
+ PBRUBY_ASSERT(stored == _self);
323
325
 
324
326
  if (init_arg != Qnil) {
325
327
  Map_merge_into_self(_self, init_arg);
@@ -31,8 +31,6 @@
31
31
  #ifndef RUBY_PROTOBUF_MAP_H_
32
32
  #define RUBY_PROTOBUF_MAP_H_
33
33
 
34
- #include <ruby/ruby.h>
35
-
36
34
  #include "protobuf.h"
37
35
  #include "ruby-upb.h"
38
36
 
@@ -108,7 +108,9 @@ void Message_InitPtr(VALUE self_, upb_Message* msg, VALUE arena) {
108
108
  Message* self = ruby_to_Message(self_);
109
109
  self->msg = msg;
110
110
  RB_OBJ_WRITE(self_, &self->arena, arena);
111
- ObjectCache_Add(msg, self_);
111
+ VALUE stored = ObjectCache_TryAdd(msg, self_);
112
+ (void)stored;
113
+ PBRUBY_ASSERT(stored == self_);
112
114
  }
113
115
 
114
116
  VALUE Message_GetArena(VALUE msg_rb) {
@@ -31,8 +31,6 @@
31
31
  #ifndef RUBY_PROTOBUF_MESSAGE_H_
32
32
  #define RUBY_PROTOBUF_MESSAGE_H_
33
33
 
34
- #include <ruby/ruby.h>
35
-
36
34
  #include "protobuf.h"
37
35
  #include "ruby-upb.h"
38
36
 
@@ -195,7 +195,8 @@ const rb_data_type_t Arena_type = {
195
195
  .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
196
196
  };
197
197
 
198
- static void* ruby_upb_allocfunc(upb_alloc* alloc, void* ptr, size_t oldsize, size_t size) {
198
+ static void *ruby_upb_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
199
+ size_t size) {
199
200
  if (size == 0) {
200
201
  xfree(ptr);
201
202
  return NULL;
@@ -252,164 +253,40 @@ void Arena_register(VALUE module) {
252
253
  // Object Cache
253
254
  // -----------------------------------------------------------------------------
254
255
 
255
- // A pointer -> Ruby Object cache that keeps references to Ruby wrapper
256
- // objects. This allows us to look up any Ruby wrapper object by the address
257
- // of the object it is wrapping. That way we can avoid ever creating two
258
- // different wrapper objects for the same C object, which saves memory and
259
- // preserves object identity.
260
- //
261
- // We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash
262
- // to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable
263
- // keys.
264
- //
265
- // We also need the secondary Hash if sizeof(long) < sizeof(VALUE), because this
266
- // means it may not be possible to fit a pointer into a Fixnum. Keys are
267
- // pointers, and if they fit into a Fixnum, Ruby doesn't collect them, but if
268
- // they overflow and require allocating a Bignum, they could get collected
269
- // prematurely, thus removing the cache entry. This happens on 64-bit Windows,
270
- // on which pointers are 64 bits but longs are 32 bits. In this case, we enable
271
- // the secondary Hash to hold the keys and prevent them from being collected.
272
-
273
- #if RUBY_API_VERSION_CODE >= 20700 && SIZEOF_LONG >= SIZEOF_VALUE
274
- #define USE_SECONDARY_MAP 0
275
- #else
276
- #define USE_SECONDARY_MAP 1
277
- #endif
278
-
279
- #if USE_SECONDARY_MAP
280
-
281
- // Maps Numeric -> Object. The object is then used as a key into the WeakMap.
282
- // This is needed for Ruby <2.7 where a number cannot be a key to WeakMap.
283
- // The object is used only for its identity; it does not contain any data.
284
- VALUE secondary_map = Qnil;
285
-
286
- // Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
287
- // iterates over the map which cannot happen in parallel with insertions, or
288
- // Ruby will throw:
289
- // can't add a new key into hash during iteration (RuntimeError)
290
- VALUE secondary_map_mutex = Qnil;
291
-
292
- // Lambda that will GC entries from the secondary map that are no longer present
293
- // in the primary map.
294
- VALUE gc_secondary_map_lambda = Qnil;
295
- ID length;
296
-
297
- extern VALUE weak_obj_cache;
298
-
299
- static void SecondaryMap_Init() {
300
- rb_gc_register_address(&secondary_map);
301
- rb_gc_register_address(&gc_secondary_map_lambda);
302
- rb_gc_register_address(&secondary_map_mutex);
303
- secondary_map = rb_hash_new();
304
- gc_secondary_map_lambda = rb_eval_string(
305
- "->(secondary, weak) {\n"
306
- " secondary.delete_if { |k, v| !weak.key?(v) }\n"
307
- "}\n");
308
- secondary_map_mutex = rb_mutex_new();
309
- length = rb_intern("length");
310
- }
311
-
312
- // The secondary map is a regular Hash, and will never shrink on its own.
313
- // The main object cache is a WeakMap that will automatically remove entries
314
- // when the target object is no longer reachable, but unless we manually
315
- // remove the corresponding entries from the secondary map, it will grow
316
- // without bound.
317
- //
318
- // To avoid this unbounded growth we periodically remove entries from the
319
- // secondary map that are no longer present in the WeakMap. The logic of
320
- // how often to perform this GC is an artbirary tuning parameter that
321
- // represents a straightforward CPU/memory tradeoff.
322
- //
323
- // Requires: secondary_map_mutex is held.
324
- static void SecondaryMap_MaybeGC() {
325
- PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
326
- size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
327
- size_t secondary_len = RHASH_SIZE(secondary_map);
328
- if (secondary_len < weak_len) {
329
- // Logically this case should not be possible: a valid entry cannot exist in
330
- // the weak table unless there is a corresponding entry in the secondary
331
- // table. It should *always* be the case that secondary_len >= weak_len.
332
- //
333
- // However ObjectSpace::WeakMap#length (and therefore weak_len) is
334
- // unreliable: it overreports its true length by including non-live objects.
335
- // However these non-live objects are not yielded in iteration, so we may
336
- // have previously deleted them from the secondary map in a previous
337
- // invocation of SecondaryMap_MaybeGC().
338
- //
339
- // In this case, we can't measure any waste, so we just return.
340
- return;
341
- }
342
- size_t waste = secondary_len - weak_len;
343
- // GC if we could remove at least 2000 entries or 20% of the table size
344
- // (whichever is greater). Since the cost of the GC pass is O(N), we
345
- // want to make sure that we condition this on overall table size, to
346
- // avoid O(N^2) CPU costs.
347
- size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
348
- if (waste > threshold) {
349
- rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2, secondary_map,
350
- weak_obj_cache);
351
- }
352
- }
353
-
354
- // Requires: secondary_map_mutex is held by this thread iff create == true.
355
- static VALUE SecondaryMap_Get(VALUE key, bool create) {
356
- PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
357
- VALUE ret = rb_hash_lookup(secondary_map, key);
358
- if (ret == Qnil && create) {
359
- SecondaryMap_MaybeGC();
360
- ret = rb_class_new_instance(0, NULL, rb_cObject);
361
- rb_hash_aset(secondary_map, key, ret);
362
- }
363
- return ret;
364
- }
365
-
366
- #endif
367
-
368
- // Requires: secondary_map_mutex is held by this thread iff create == true.
369
- static VALUE ObjectCache_GetKey(const void *key, bool create) {
370
- VALUE key_val = (VALUE)key;
371
- PBRUBY_ASSERT((key_val & 3) == 0);
372
- VALUE ret = LL2NUM(key_val >> 2);
373
- #if USE_SECONDARY_MAP
374
- ret = SecondaryMap_Get(ret, create);
375
- #endif
376
- return ret;
377
- }
378
-
379
256
  // Public ObjectCache API.
380
257
 
381
258
  VALUE weak_obj_cache = Qnil;
382
259
  ID item_get;
383
- ID item_set;
260
+ ID item_try_add;
261
+
262
+ static void ObjectCache_Init(VALUE protobuf) {
263
+ item_get = rb_intern("get");
264
+ item_try_add = rb_intern("try_add");
384
265
 
385
- static void ObjectCache_Init() {
386
266
  rb_gc_register_address(&weak_obj_cache);
387
- VALUE klass = rb_eval_string("ObjectSpace::WeakMap");
388
- weak_obj_cache = rb_class_new_instance(0, NULL, klass);
389
- item_get = rb_intern("[]");
390
- item_set = rb_intern("[]=");
391
- #if USE_SECONDARY_MAP
392
- SecondaryMap_Init();
267
+ #if SIZEOF_LONG >= SIZEOF_VALUE
268
+ VALUE cache_class = rb_const_get(protobuf, rb_intern("ObjectCache"));
269
+ #else
270
+ VALUE cache_class = rb_const_get(protobuf, rb_intern("LegacyObjectCache"));
393
271
  #endif
272
+
273
+ weak_obj_cache = rb_class_new_instance(0, NULL, cache_class);
274
+ rb_const_set(protobuf, rb_intern("OBJECT_CACHE"), weak_obj_cache);
275
+ rb_const_set(protobuf, rb_intern("SIZEOF_LONG"), INT2NUM(SIZEOF_LONG));
276
+ rb_const_set(protobuf, rb_intern("SIZEOF_VALUE"), INT2NUM(SIZEOF_VALUE));
394
277
  }
395
278
 
396
- void ObjectCache_Add(const void *key, VALUE val) {
397
- PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil);
398
- #if USE_SECONDARY_MAP
399
- rb_mutex_lock(secondary_map_mutex);
400
- #endif
401
- VALUE key_rb = ObjectCache_GetKey(key, true);
402
- rb_funcall(weak_obj_cache, item_set, 2, key_rb, val);
403
- #if USE_SECONDARY_MAP
404
- rb_mutex_unlock(secondary_map_mutex);
405
- #endif
406
- PBRUBY_ASSERT(ObjectCache_Get(key) == val);
279
+ VALUE ObjectCache_TryAdd(const void *key, VALUE val) {
280
+ VALUE key_val = (VALUE)key;
281
+ PBRUBY_ASSERT((key_val & 3) == 0);
282
+ return rb_funcall(weak_obj_cache, item_try_add, 2, LL2NUM(key_val), val);
407
283
  }
408
284
 
409
285
  // Returns the cached object for this key, if any. Otherwise returns Qnil.
410
286
  VALUE ObjectCache_Get(const void *key) {
411
- VALUE key_rb = ObjectCache_GetKey(key, false);
412
- return rb_funcall(weak_obj_cache, item_get, 1, key_rb);
287
+ VALUE key_val = (VALUE)key;
288
+ PBRUBY_ASSERT((key_val & 3) == 0);
289
+ return rb_funcall(weak_obj_cache, item_get, 1, LL2NUM(key_val));
413
290
  }
414
291
 
415
292
  /*
@@ -459,11 +336,10 @@ VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj) {
459
336
  // This must be named "Init_protobuf_c" because the Ruby module is named
460
337
  // "protobuf_c" -- the VM looks for this symbol in our .so.
461
338
  __attribute__((visibility("default"))) void Init_protobuf_c() {
462
- ObjectCache_Init();
463
-
464
339
  VALUE google = rb_define_module("Google");
465
340
  VALUE protobuf = rb_define_module_under(google, "Protobuf");
466
341
 
342
+ ObjectCache_Init(protobuf);
467
343
  Arena_register(protobuf);
468
344
  Defs_register(protobuf);
469
345
  RepeatedField_register(protobuf);
@@ -31,8 +31,22 @@
31
31
  #ifndef __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
32
32
  #define __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
33
33
 
34
+ // Ruby 3+ defines NDEBUG itself, see: https://bugs.ruby-lang.org/issues/18777
35
+ #ifdef NDEBUG
36
+ #include <ruby.h>
37
+ #else
38
+ #include <ruby.h>
39
+ #undef NDEBUG
40
+ #endif
41
+
42
+ #include <ruby/version.h>
43
+
44
+ #if RUBY_API_VERSION_CODE < 20700
45
+ #error Protobuf requires Ruby >= 2.7
46
+ #endif
47
+
48
+ #include <assert.h> // Must be included after the NDEBUG logic above.
34
49
  #include <ruby/encoding.h>
35
- #include <ruby/ruby.h>
36
50
  #include <ruby/vm.h>
37
51
 
38
52
  #include "defs.h"
@@ -76,10 +90,9 @@ void Arena_Pin(VALUE arena, VALUE obj);
76
90
  // being collected (though in Ruby <2.7 is it effectively strong, due to
77
91
  // implementation limitations).
78
92
 
79
- // Adds an entry to the cache. The "arena" parameter must give the arena that
80
- // "key" was allocated from. In Ruby <2.7.0, it will be used to remove the key
81
- // from the cache when the arena is destroyed.
82
- void ObjectCache_Add(const void* key, VALUE val);
93
+ // Tries to add a new entry to the cache, returning the newly installed value or
94
+ // the pre-existing entry.
95
+ VALUE ObjectCache_TryAdd(const void* key, VALUE val);
83
96
 
84
97
  // Returns the cached object for this key, if any. Otherwise returns Qnil.
85
98
  VALUE ObjectCache_Get(const void* key);
@@ -110,7 +123,9 @@ extern VALUE cTypeError;
110
123
  do { \
111
124
  } while (false && (expr))
112
125
  #else
113
- #define PBRUBY_ASSERT(expr) assert(expr)
126
+ #define PBRUBY_ASSERT(expr) \
127
+ if (!(expr)) \
128
+ rb_bug("Assertion failed at %s:%d, expr: %s", __FILE__, __LINE__, #expr)
114
129
  #endif
115
130
 
116
131
  #define PBRUBY_MAX(x, y) (((x) > (y)) ? (x) : (y))
@@ -87,7 +87,6 @@ VALUE RepeatedField_GetRubyWrapper(upb_Array* array, TypeInfo type_info,
87
87
  if (val == Qnil) {
88
88
  val = RepeatedField_alloc(cRepeatedField);
89
89
  RepeatedField* self;
90
- ObjectCache_Add(array, val);
91
90
  TypedData_Get_Struct(val, RepeatedField, &RepeatedField_type, self);
92
91
  self->array = array;
93
92
  self->arena = arena;
@@ -95,11 +94,14 @@ VALUE RepeatedField_GetRubyWrapper(upb_Array* array, TypeInfo type_info,
95
94
  if (self->type_info.type == kUpb_CType_Message) {
96
95
  self->type_class = Descriptor_DefToClass(type_info.def.msgdef);
97
96
  }
97
+ val = ObjectCache_TryAdd(array, val);
98
98
  }
99
99
 
100
100
  PBRUBY_ASSERT(ruby_to_RepeatedField(val)->type_info.type == type_info.type);
101
101
  PBRUBY_ASSERT(ruby_to_RepeatedField(val)->type_info.def.msgdef ==
102
102
  type_info.def.msgdef);
103
+ PBRUBY_ASSERT(ruby_to_RepeatedField(val)->array == array);
104
+
103
105
  return val;
104
106
  }
105
107
 
@@ -613,7 +615,8 @@ VALUE RepeatedField_init(int argc, VALUE* argv, VALUE _self) {
613
615
 
614
616
  self->type_info = TypeInfo_FromClass(argc, argv, 0, &self->type_class, &ary);
615
617
  self->array = upb_Array_New(arena, self->type_info.type);
616
- ObjectCache_Add(self->array, _self);
618
+ VALUE stored_val = ObjectCache_TryAdd(self->array, _self);
619
+ PBRUBY_ASSERT(stored_val == _self);
617
620
 
618
621
  if (ary != Qnil) {
619
622
  if (!RB_TYPE_P(ary, T_ARRAY)) {
@@ -31,8 +31,6 @@
31
31
  #ifndef RUBY_PROTOBUF_REPEATED_FIELD_H_
32
32
  #define RUBY_PROTOBUF_REPEATED_FIELD_H_
33
33
 
34
- #include <ruby/ruby.h>
35
-
36
34
  #include "protobuf.h"
37
35
  #include "ruby-upb.h"
38
36