google-protobuf 3.15.2 → 3.15.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-protobuf might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/convert.c +1 -1
- data/ext/google/protobuf_c/defs.c +1 -1
- data/ext/google/protobuf_c/map.c +6 -5
- data/ext/google/protobuf_c/message.c +11 -10
- data/ext/google/protobuf_c/protobuf.c +149 -94
- data/ext/google/protobuf_c/protobuf.h +10 -9
- data/ext/google/protobuf_c/repeated_field.c +6 -5
- data/ext/google/protobuf_c/ruby-upb.c +1 -2
- data/tests/basic.rb +8 -3
- metadata +4 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 4756c4b6ddbf7341827bd33ac53b92833e1c122da77146cc7a9bcadeb43c704f
|
4
|
+
data.tar.gz: 813aeeed854daa71d558f9b4e44dfb04a99e4d3fce8b74d1d4c33a89cdebe1b9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 167cb6e1ff5ade9115a0f73a3f02743bf8428a9ad4716c6477f10ac6d5047ef379fbad27b1bf2b3bd3d97f550296d9c48a9a3f21e9670e67c61f4cb8f129ae9d
|
7
|
+
data.tar.gz: a9aed646593493f4940b3dca5eb93b0fa662f90263901f25f9fe744668787b0ec19eafb85c33b60ae36e7cb1fc7ad7d8ea575458b7f8304f4b86e07556113d7e
|
@@ -315,7 +315,7 @@ bool Msgval_IsEqual(upb_msgval val1, upb_msgval val2, TypeInfo type_info) {
|
|
315
315
|
return memcmp(&val1, &val2, 8) == 0;
|
316
316
|
case UPB_TYPE_STRING:
|
317
317
|
case UPB_TYPE_BYTES:
|
318
|
-
return val1.str_val.size
|
318
|
+
return val1.str_val.size == val2.str_val.size &&
|
319
319
|
memcmp(val1.str_val.data, val2.str_val.data,
|
320
320
|
val1.str_val.size) == 0;
|
321
321
|
case UPB_TYPE_MESSAGE:
|
@@ -295,7 +295,7 @@ static VALUE DescriptorPool_alloc(VALUE klass) {
|
|
295
295
|
|
296
296
|
self->def_to_descriptor = rb_hash_new();
|
297
297
|
self->symtab = upb_symtab_new();
|
298
|
-
ObjectCache_Add(self->symtab, ret
|
298
|
+
ObjectCache_Add(self->symtab, ret);
|
299
299
|
|
300
300
|
return ret;
|
301
301
|
}
|
data/ext/google/protobuf_c/map.c
CHANGED
@@ -93,7 +93,7 @@ VALUE Map_GetRubyWrapper(upb_map* map, upb_fieldtype_t key_type,
|
|
93
93
|
if (val == Qnil) {
|
94
94
|
val = Map_alloc(cMap);
|
95
95
|
Map* self;
|
96
|
-
ObjectCache_Add(map, val
|
96
|
+
ObjectCache_Add(map, val);
|
97
97
|
TypedData_Get_Struct(val, Map, &Map_type, self);
|
98
98
|
self->map = map;
|
99
99
|
self->arena = arena;
|
@@ -318,7 +318,7 @@ static VALUE Map_init(int argc, VALUE* argv, VALUE _self) {
|
|
318
318
|
|
319
319
|
self->map = upb_map_new(Arena_get(self->arena), self->key_type,
|
320
320
|
self->value_type_info.type);
|
321
|
-
ObjectCache_Add(self->map, _self
|
321
|
+
ObjectCache_Add(self->map, _self);
|
322
322
|
|
323
323
|
if (init_arg != Qnil) {
|
324
324
|
Map_merge_into_self(_self, init_arg);
|
@@ -590,9 +590,10 @@ VALUE Map_eq(VALUE _self, VALUE _other) {
|
|
590
590
|
*/
|
591
591
|
static VALUE Map_freeze(VALUE _self) {
|
592
592
|
Map* self = ruby_to_Map(_self);
|
593
|
-
|
594
|
-
|
595
|
-
|
593
|
+
if (!RB_OBJ_FROZEN(_self)) {
|
594
|
+
Arena_Pin(self->arena, _self);
|
595
|
+
RB_OBJ_FREEZE(_self);
|
596
|
+
}
|
596
597
|
return _self;
|
597
598
|
}
|
598
599
|
|
@@ -105,7 +105,7 @@ void Message_InitPtr(VALUE self_, upb_msg *msg, VALUE arena) {
|
|
105
105
|
Message* self = ruby_to_Message(self_);
|
106
106
|
self->msg = msg;
|
107
107
|
self->arena = arena;
|
108
|
-
ObjectCache_Add(msg, self_
|
108
|
+
ObjectCache_Add(msg, self_);
|
109
109
|
}
|
110
110
|
|
111
111
|
VALUE Message_GetArena(VALUE msg_rb) {
|
@@ -697,16 +697,13 @@ bool Message_Equal(const upb_msg *m1, const upb_msg *m2, const upb_msgdef *m) {
|
|
697
697
|
* field is of a primitive type).
|
698
698
|
*/
|
699
699
|
static VALUE Message_eq(VALUE _self, VALUE _other) {
|
700
|
-
if (
|
701
|
-
return Qfalse;
|
702
|
-
}
|
700
|
+
if (CLASS_OF(_self) != CLASS_OF(_other)) return Qfalse;
|
703
701
|
|
704
702
|
Message* self = ruby_to_Message(_self);
|
705
703
|
Message* other = ruby_to_Message(_other);
|
704
|
+
assert(self->msgdef == other->msgdef);
|
706
705
|
|
707
|
-
return Message_Equal(self->msg, other->msg, self->msgdef)
|
708
|
-
? Qtrue
|
709
|
-
: Qfalse;
|
706
|
+
return Message_Equal(self->msg, other->msg, self->msgdef) ? Qtrue : Qfalse;
|
710
707
|
}
|
711
708
|
|
712
709
|
uint64_t Message_Hash(const upb_msg* msg, const upb_msgdef* m, uint64_t seed) {
|
@@ -855,8 +852,10 @@ static VALUE Message_to_h(VALUE _self) {
|
|
855
852
|
*/
|
856
853
|
static VALUE Message_freeze(VALUE _self) {
|
857
854
|
Message* self = ruby_to_Message(_self);
|
858
|
-
|
859
|
-
|
855
|
+
if (!RB_OBJ_FROZEN(_self)) {
|
856
|
+
Arena_Pin(self->arena, _self);
|
857
|
+
RB_OBJ_FREEZE(_self);
|
858
|
+
}
|
860
859
|
return _self;
|
861
860
|
}
|
862
861
|
|
@@ -1248,7 +1247,9 @@ upb_msg* Message_deep_copy(const upb_msg* msg, const upb_msgdef* m,
|
|
1248
1247
|
|
1249
1248
|
const upb_msg* Message_GetUpbMessage(VALUE value, const upb_msgdef* m,
|
1250
1249
|
const char* name, upb_arena* arena) {
|
1251
|
-
if (value == Qnil)
|
1250
|
+
if (value == Qnil) {
|
1251
|
+
rb_raise(cTypeError, "nil message not allowed here.");
|
1252
|
+
}
|
1252
1253
|
|
1253
1254
|
VALUE klass = CLASS_OF(value);
|
1254
1255
|
VALUE desc_rb = rb_ivar_get(klass, descriptor_instancevar_interned);
|
@@ -167,30 +167,55 @@ void StringBuilder_PrintMsgval(StringBuilder* b, upb_msgval val,
|
|
167
167
|
// Arena
|
168
168
|
// -----------------------------------------------------------------------------
|
169
169
|
|
170
|
-
|
170
|
+
typedef struct {
|
171
|
+
upb_arena *arena;
|
172
|
+
VALUE pinned_objs;
|
173
|
+
} Arena;
|
174
|
+
|
175
|
+
static void Arena_mark(void *data) {
|
176
|
+
Arena *arena = data;
|
177
|
+
rb_gc_mark(arena->pinned_objs);
|
178
|
+
}
|
179
|
+
|
180
|
+
static void Arena_free(void *data) {
|
181
|
+
Arena *arena = data;
|
182
|
+
upb_arena_free(arena->arena);
|
183
|
+
}
|
171
184
|
|
172
185
|
static VALUE cArena;
|
173
186
|
|
174
187
|
const rb_data_type_t Arena_type = {
|
175
188
|
"Google::Protobuf::Internal::Arena",
|
176
|
-
{
|
189
|
+
{ Arena_mark, Arena_free, NULL },
|
190
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
177
191
|
};
|
178
192
|
|
179
193
|
static VALUE Arena_alloc(VALUE klass) {
|
180
|
-
|
194
|
+
Arena *arena = ALLOC(Arena);
|
195
|
+
arena->arena = upb_arena_new();
|
196
|
+
arena->pinned_objs = Qnil;
|
181
197
|
return TypedData_Wrap_Struct(klass, &Arena_type, arena);
|
182
198
|
}
|
183
199
|
|
184
200
|
upb_arena *Arena_get(VALUE _arena) {
|
185
|
-
|
186
|
-
TypedData_Get_Struct(_arena,
|
187
|
-
return arena;
|
201
|
+
Arena *arena;
|
202
|
+
TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
|
203
|
+
return arena->arena;
|
188
204
|
}
|
189
205
|
|
190
206
|
VALUE Arena_new() {
|
191
207
|
return Arena_alloc(cArena);
|
192
208
|
}
|
193
209
|
|
210
|
+
void Arena_Pin(VALUE _arena, VALUE obj) {
|
211
|
+
Arena *arena;
|
212
|
+
TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
|
213
|
+
if (arena->pinned_objs == Qnil) {
|
214
|
+
arena->pinned_objs = rb_ary_new();
|
215
|
+
}
|
216
|
+
rb_ary_push(arena->pinned_objs, obj);
|
217
|
+
}
|
218
|
+
|
194
219
|
void Arena_register(VALUE module) {
|
195
220
|
VALUE internal = rb_define_module_under(module, "Internal");
|
196
221
|
VALUE klass = rb_define_class_under(internal, "Arena", rb_cObject);
|
@@ -209,122 +234,152 @@ void Arena_register(VALUE module) {
|
|
209
234
|
// different wrapper objects for the same C object, which saves memory and
|
210
235
|
// preserves object identity.
|
211
236
|
//
|
212
|
-
// We use
|
213
|
-
//
|
214
|
-
//
|
215
|
-
// need to GC-root the object (notably when the object has been frozen).
|
237
|
+
// We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash
|
238
|
+
// to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable
|
239
|
+
// keys.
|
216
240
|
|
217
241
|
#if RUBY_API_VERSION_CODE >= 20700
|
218
|
-
#define
|
242
|
+
#define USE_SECONDARY_MAP 0
|
219
243
|
#else
|
220
|
-
#define
|
244
|
+
#define USE_SECONDARY_MAP 1
|
221
245
|
#endif
|
222
246
|
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
247
|
+
#if USE_SECONDARY_MAP
|
248
|
+
|
249
|
+
// Maps Numeric -> Object. The object is then used as a key into the WeakMap.
|
250
|
+
// This is needed for Ruby <2.7 where a number cannot be a key to WeakMap.
|
251
|
+
// The object is used only for its identity; it does not contain any data.
|
252
|
+
VALUE secondary_map = Qnil;
|
253
|
+
|
254
|
+
// Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
|
255
|
+
// iterates over the map which cannot happen in parallel with insertions, or
|
256
|
+
// Ruby will throw:
|
257
|
+
// can't add a new key into hash during iteration (RuntimeError)
|
258
|
+
VALUE secondary_map_mutex = Qnil;
|
259
|
+
|
260
|
+
// Lambda that will GC entries from the secondary map that are no longer present
|
261
|
+
// in the primary map.
|
262
|
+
VALUE gc_secondary_map_lambda = Qnil;
|
263
|
+
ID length;
|
264
|
+
|
265
|
+
extern VALUE weak_obj_cache;
|
266
|
+
|
267
|
+
static void SecondaryMap_Init() {
|
268
|
+
rb_gc_register_address(&secondary_map);
|
269
|
+
rb_gc_register_address(&gc_secondary_map_lambda);
|
270
|
+
rb_gc_register_address(&secondary_map_mutex);
|
271
|
+
secondary_map = rb_hash_new();
|
272
|
+
gc_secondary_map_lambda = rb_eval_string(
|
273
|
+
"->(secondary, weak) {\n"
|
274
|
+
" secondary.delete_if { |k, v| !weak.key?(v) }\n"
|
275
|
+
"}\n");
|
276
|
+
secondary_map_mutex = rb_mutex_new();
|
277
|
+
length = rb_intern("length");
|
229
278
|
}
|
230
279
|
|
231
|
-
//
|
232
|
-
//
|
233
|
-
//
|
234
|
-
//
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
280
|
+
// The secondary map is a regular Hash, and will never shrink on its own.
|
281
|
+
// The main object cache is a WeakMap that will automatically remove entries
|
282
|
+
// when the target object is no longer reachable, but unless we manually
|
283
|
+
// remove the corresponding entries from the secondary map, it will grow
|
284
|
+
// without bound.
|
285
|
+
//
|
286
|
+
// To avoid this unbounded growth we periodically remove entries from the
|
287
|
+
// secondary map that are no longer present in the WeakMap. The logic of
|
288
|
+
// how often to perform this GC is an artbirary tuning parameter that
|
289
|
+
// represents a straightforward CPU/memory tradeoff.
|
290
|
+
//
|
291
|
+
// Requires: secondary_map_mutex is held.
|
292
|
+
static void SecondaryMap_MaybeGC() {
|
293
|
+
PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
|
294
|
+
size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
|
295
|
+
size_t secondary_len = RHASH_SIZE(secondary_map);
|
296
|
+
if (secondary_len < weak_len) {
|
297
|
+
// Logically this case should not be possible: a valid entry cannot exist in
|
298
|
+
// the weak table unless there is a corresponding entry in the secondary
|
299
|
+
// table. It should *always* be the case that secondary_len >= weak_len.
|
300
|
+
//
|
301
|
+
// However ObjectSpace::WeakMap#length (and therefore weak_len) is
|
302
|
+
// unreliable: it overreports its true length by including non-live objects.
|
303
|
+
// However these non-live objects are not yielded in iteration, so we may
|
304
|
+
// have previously deleted them from the secondary map in a previous
|
305
|
+
// invocation of SecondaryMap_MaybeGC().
|
306
|
+
//
|
307
|
+
// In this case, we can't measure any waste, so we just return.
|
308
|
+
return;
|
309
|
+
}
|
310
|
+
size_t waste = secondary_len - weak_len;
|
311
|
+
// GC if we could remove at least 2000 entries or 20% of the table size
|
312
|
+
// (whichever is greater). Since the cost of the GC pass is O(N), we
|
313
|
+
// want to make sure that we condition this on overall table size, to
|
314
|
+
// avoid O(N^2) CPU costs.
|
315
|
+
size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
|
316
|
+
if (waste > threshold) {
|
317
|
+
rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2,
|
318
|
+
secondary_map, weak_obj_cache);
|
319
|
+
}
|
241
320
|
}
|
242
321
|
|
243
|
-
|
244
|
-
|
245
|
-
PBRUBY_ASSERT(
|
246
|
-
|
322
|
+
// Requires: secondary_map_mutex is held by this thread iff create == true.
|
323
|
+
static VALUE SecondaryMap_Get(VALUE key, bool create) {
|
324
|
+
PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
|
325
|
+
VALUE ret = rb_hash_lookup(secondary_map, key);
|
326
|
+
if (ret == Qnil && create) {
|
327
|
+
SecondaryMap_MaybeGC();
|
328
|
+
ret = rb_eval_string("Object.new");
|
329
|
+
rb_hash_aset(secondary_map, key, ret);
|
330
|
+
}
|
331
|
+
return ret;
|
247
332
|
}
|
248
333
|
|
249
|
-
|
250
|
-
VALUE key_rb = ObjectCache_GetKey(key);
|
251
|
-
return rb_hash_lookup(strong_obj_cache, key_rb);
|
252
|
-
}
|
334
|
+
#endif
|
253
335
|
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
336
|
+
// Requires: secondary_map_mutex is held by this thread iff create == true.
|
337
|
+
static VALUE ObjectCache_GetKey(const void* key, bool create) {
|
338
|
+
char buf[sizeof(key)];
|
339
|
+
memcpy(&buf, &key, sizeof(key));
|
340
|
+
intptr_t key_int = (intptr_t)key;
|
341
|
+
PBRUBY_ASSERT((key_int & 3) == 0);
|
342
|
+
VALUE ret = LL2NUM(key_int >> 2);
|
343
|
+
#if USE_SECONDARY_MAP
|
344
|
+
ret = SecondaryMap_Get(ret, create);
|
345
|
+
#endif
|
346
|
+
return ret;
|
260
347
|
}
|
261
348
|
|
262
|
-
//
|
263
|
-
// presume it speeds up real code also. However we can only use it in Ruby
|
264
|
-
// >=2.7 due to:
|
265
|
-
// https://bugs.ruby-lang.org/issues/16035
|
266
|
-
|
267
|
-
#if USE_WEAK_MAP
|
349
|
+
// Public ObjectCache API.
|
268
350
|
|
269
351
|
VALUE weak_obj_cache = Qnil;
|
352
|
+
ID item_get;
|
353
|
+
ID item_set;
|
270
354
|
|
271
|
-
static void
|
355
|
+
static void ObjectCache_Init() {
|
272
356
|
rb_gc_register_address(&weak_obj_cache);
|
273
357
|
VALUE klass = rb_eval_string("ObjectSpace::WeakMap");
|
274
358
|
weak_obj_cache = rb_class_new_instance(0, NULL, klass);
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
VALUE ret = rb_funcall(weak_obj_cache, rb_intern("[]"), 1, key_rb);
|
280
|
-
return ret;
|
281
|
-
}
|
282
|
-
|
283
|
-
static void WeakObjectCache_Add(const void* key, VALUE val) {
|
284
|
-
PBRUBY_ASSERT(WeakObjectCache_Get(key) == Qnil);
|
285
|
-
VALUE key_rb = ObjectCache_GetKey(key);
|
286
|
-
rb_funcall(weak_obj_cache, rb_intern("[]="), 2, key_rb, val);
|
287
|
-
PBRUBY_ASSERT(WeakObjectCache_Get(key) == val);
|
288
|
-
}
|
289
|
-
|
290
|
-
#endif
|
291
|
-
|
292
|
-
// Public ObjectCache API.
|
293
|
-
|
294
|
-
static void ObjectCache_Init() {
|
295
|
-
StrongObjectCache_Init();
|
296
|
-
#if USE_WEAK_MAP
|
297
|
-
WeakObjectCache_Init();
|
359
|
+
item_get = rb_intern("[]");
|
360
|
+
item_set = rb_intern("[]=");
|
361
|
+
#if USE_SECONDARY_MAP
|
362
|
+
SecondaryMap_Init();
|
298
363
|
#endif
|
299
364
|
}
|
300
365
|
|
301
|
-
void ObjectCache_Add(const void* key, VALUE val
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
#
|
306
|
-
|
366
|
+
void ObjectCache_Add(const void* key, VALUE val) {
|
367
|
+
PBRUBY_ASSERT(ObjectCache_Get(key) == Qnil);
|
368
|
+
#if USE_SECONDARY_MAP
|
369
|
+
rb_mutex_lock(secondary_map_mutex);
|
370
|
+
#endif
|
371
|
+
VALUE key_rb = ObjectCache_GetKey(key, true);
|
372
|
+
rb_funcall(weak_obj_cache, item_set, 2, key_rb, val);
|
373
|
+
#if USE_SECONDARY_MAP
|
374
|
+
rb_mutex_unlock(secondary_map_mutex);
|
307
375
|
#endif
|
376
|
+
PBRUBY_ASSERT(ObjectCache_Get(key) == val);
|
308
377
|
}
|
309
378
|
|
310
379
|
// Returns the cached object for this key, if any. Otherwise returns Qnil.
|
311
380
|
VALUE ObjectCache_Get(const void* key) {
|
312
|
-
|
313
|
-
return
|
314
|
-
#else
|
315
|
-
return StrongObjectCache_Get(key);
|
316
|
-
#endif
|
317
|
-
}
|
318
|
-
|
319
|
-
void ObjectCache_Pin(const void* key, VALUE val, upb_arena *arena) {
|
320
|
-
#if USE_WEAK_MAP
|
321
|
-
PBRUBY_ASSERT(WeakObjectCache_Get(key) == val);
|
322
|
-
// This will GC-root the object, but we'll still use the weak map for
|
323
|
-
// actual lookup.
|
324
|
-
StrongObjectCache_Add(key, val, arena);
|
325
|
-
#else
|
326
|
-
// Value is already pinned, nothing to do.
|
327
|
-
#endif
|
381
|
+
VALUE key_rb = ObjectCache_GetKey(key, false);
|
382
|
+
return rb_funcall(weak_obj_cache, item_get, 1, key_rb);
|
328
383
|
}
|
329
384
|
|
330
385
|
/*
|
@@ -55,6 +55,13 @@ const upb_fielddef* map_field_value(const upb_fielddef* field);
|
|
55
55
|
VALUE Arena_new();
|
56
56
|
upb_arena *Arena_get(VALUE arena);
|
57
57
|
|
58
|
+
// Pins this Ruby object to the lifetime of this arena, so that as long as the
|
59
|
+
// arena is alive this object will not be collected.
|
60
|
+
//
|
61
|
+
// We use this to guarantee that the "frozen" bit on the object will be
|
62
|
+
// remembered, even if the user drops their reference to this precise object.
|
63
|
+
void Arena_Pin(VALUE arena, VALUE obj);
|
64
|
+
|
58
65
|
// -----------------------------------------------------------------------------
|
59
66
|
// ObjectCache
|
60
67
|
// -----------------------------------------------------------------------------
|
@@ -68,19 +75,11 @@ upb_arena *Arena_get(VALUE arena);
|
|
68
75
|
// Adds an entry to the cache. The "arena" parameter must give the arena that
|
69
76
|
// "key" was allocated from. In Ruby <2.7.0, it will be used to remove the key
|
70
77
|
// from the cache when the arena is destroyed.
|
71
|
-
void ObjectCache_Add(const void* key, VALUE val
|
78
|
+
void ObjectCache_Add(const void* key, VALUE val);
|
72
79
|
|
73
80
|
// Returns the cached object for this key, if any. Otherwise returns Qnil.
|
74
81
|
VALUE ObjectCache_Get(const void* key);
|
75
82
|
|
76
|
-
// Pins the previously added object so it is GC-rooted. This turns the
|
77
|
-
// reference to "val" from weak to strong. We use this to guarantee that the
|
78
|
-
// "frozen" bit on the object will be remembered, even if the user drops their
|
79
|
-
// reference to this precise object.
|
80
|
-
//
|
81
|
-
// The "arena" parameter must give the arena that "key" was allocated from.
|
82
|
-
void ObjectCache_Pin(const void* key, VALUE val, upb_arena *arena);
|
83
|
-
|
84
83
|
// -----------------------------------------------------------------------------
|
85
84
|
// StringBuilder, for inspect
|
86
85
|
// -----------------------------------------------------------------------------
|
@@ -107,6 +106,8 @@ extern VALUE cTypeError;
|
|
107
106
|
#define PBRUBY_ASSERT(expr) assert(expr)
|
108
107
|
#endif
|
109
108
|
|
109
|
+
#define PBRUBY_MAX(x, y) (((x) > (y)) ? (x) : (y))
|
110
|
+
|
110
111
|
#define UPB_UNUSED(var) (void)var
|
111
112
|
|
112
113
|
#endif // __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
@@ -88,7 +88,7 @@ VALUE RepeatedField_GetRubyWrapper(upb_array* array, TypeInfo type_info,
|
|
88
88
|
if (val == Qnil) {
|
89
89
|
val = RepeatedField_alloc(cRepeatedField);
|
90
90
|
RepeatedField* self;
|
91
|
-
ObjectCache_Add(array, val
|
91
|
+
ObjectCache_Add(array, val);
|
92
92
|
TypedData_Get_Struct(val, RepeatedField, &RepeatedField_type, self);
|
93
93
|
self->array = array;
|
94
94
|
self->arena = arena;
|
@@ -500,9 +500,10 @@ VALUE RepeatedField_eq(VALUE _self, VALUE _other) {
|
|
500
500
|
*/
|
501
501
|
static VALUE RepeatedField_freeze(VALUE _self) {
|
502
502
|
RepeatedField* self = ruby_to_RepeatedField(_self);
|
503
|
-
|
504
|
-
|
505
|
-
|
503
|
+
if (!RB_OBJ_FROZEN(_self)) {
|
504
|
+
Arena_Pin(self->arena, _self);
|
505
|
+
RB_OBJ_FREEZE(_self);
|
506
|
+
}
|
506
507
|
return _self;
|
507
508
|
}
|
508
509
|
|
@@ -610,7 +611,7 @@ VALUE RepeatedField_init(int argc, VALUE* argv, VALUE _self) {
|
|
610
611
|
|
611
612
|
self->type_info = TypeInfo_FromClass(argc, argv, 0, &self->type_class, &ary);
|
612
613
|
self->array = upb_array_new(arena, self->type_info.type);
|
613
|
-
ObjectCache_Add(self->array, _self
|
614
|
+
ObjectCache_Add(self->array, _self);
|
614
615
|
|
615
616
|
if (ary != Qnil) {
|
616
617
|
if (!RB_TYPE_P(ary, T_ARRAY)) {
|
@@ -6663,10 +6663,9 @@ void upb_array_set(upb_array *arr, size_t i, upb_msgval val) {
|
|
6663
6663
|
}
|
6664
6664
|
|
6665
6665
|
bool upb_array_append(upb_array *arr, upb_msgval val, upb_arena *arena) {
|
6666
|
-
if (!
|
6666
|
+
if (!upb_array_resize(arr, arr->len + 1, arena)) {
|
6667
6667
|
return false;
|
6668
6668
|
}
|
6669
|
-
arr->len++;
|
6670
6669
|
upb_array_set(arr, arr->len - 1, val);
|
6671
6670
|
return true;
|
6672
6671
|
}
|
data/tests/basic.rb
CHANGED
@@ -52,10 +52,15 @@ module BasicTest
|
|
52
52
|
|
53
53
|
outer = ::Google::Protobuf::DescriptorPool.generated_pool.lookup("Outer").msgclass
|
54
54
|
|
55
|
-
|
55
|
+
outer.new(
|
56
56
|
inners: []
|
57
|
-
)
|
58
|
-
|
57
|
+
)['inners'].to_s
|
58
|
+
|
59
|
+
assert_raise Google::Protobuf::TypeError do
|
60
|
+
outer.new(
|
61
|
+
inners: [nil]
|
62
|
+
).to_s
|
63
|
+
end
|
59
64
|
end
|
60
65
|
|
61
66
|
def test_has_field
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: google-protobuf
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 3.15.
|
4
|
+
version: 3.15.7
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Protobuf Authors
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2021-02
|
11
|
+
date: 2021-04-02 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: rake-compiler-dock
|
@@ -123,7 +123,7 @@ homepage: https://developers.google.com/protocol-buffers
|
|
123
123
|
licenses:
|
124
124
|
- BSD-3-Clause
|
125
125
|
metadata:
|
126
|
-
source_code_uri: https://github.com/protocolbuffers/protobuf/tree/v3.15.
|
126
|
+
source_code_uri: https://github.com/protocolbuffers/protobuf/tree/v3.15.7/ruby
|
127
127
|
post_install_message:
|
128
128
|
rdoc_options: []
|
129
129
|
require_paths:
|
@@ -139,7 +139,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
139
139
|
- !ruby/object:Gem::Version
|
140
140
|
version: '0'
|
141
141
|
requirements: []
|
142
|
-
rubygems_version: 3.2.
|
142
|
+
rubygems_version: 3.2.15
|
143
143
|
signing_key:
|
144
144
|
specification_version: 4
|
145
145
|
summary: Protocol Buffers
|