google-protobuf 3.22.5 → 3.24.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-protobuf might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/convert.h +0 -2
- data/ext/google/protobuf_c/defs.c +33 -22
- data/ext/google/protobuf_c/defs.h +0 -2
- data/ext/google/protobuf_c/map.c +4 -2
- data/ext/google/protobuf_c/map.h +0 -2
- data/ext/google/protobuf_c/message.c +16 -6
- data/ext/google/protobuf_c/message.h +0 -2
- data/ext/google/protobuf_c/protobuf.c +36 -150
- data/ext/google/protobuf_c/protobuf.h +21 -6
- data/ext/google/protobuf_c/repeated_field.c +5 -2
- data/ext/google/protobuf_c/repeated_field.h +0 -2
- data/ext/google/protobuf_c/ruby-upb.c +7604 -7307
- data/ext/google/protobuf_c/ruby-upb.h +4734 -2656
- data/lib/google/protobuf/any_pb.rb +24 -5
- data/lib/google/protobuf/api_pb.rb +26 -23
- data/lib/google/protobuf/descriptor_pb.rb +37 -252
- data/lib/google/protobuf/duration_pb.rb +24 -5
- data/lib/google/protobuf/empty_pb.rb +24 -3
- data/lib/google/protobuf/field_mask_pb.rb +24 -4
- data/lib/google/protobuf/object_cache.rb +120 -0
- data/lib/google/protobuf/plugin_pb.rb +25 -28
- data/lib/google/protobuf/source_context_pb.rb +24 -4
- data/lib/google/protobuf/struct_pb.rb +24 -20
- data/lib/google/protobuf/timestamp_pb.rb +24 -5
- data/lib/google/protobuf/type_pb.rb +26 -68
- data/lib/google/protobuf/well_known_types.rb +2 -8
- data/lib/google/protobuf/wrappers_pb.rb +24 -28
- data/lib/google/protobuf.rb +1 -0
- metadata +5 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 802cae742cd0aa258920ff9bde883ab655092d76cde65b4b047dd60a5a6ebd24
|
4
|
+
data.tar.gz: 55ca2a83f2a774be049602510b19b1bd53fcc7a5b10988c8a5020cf38aa354c3
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: fd69b0c91d8bf5597c5ec98b77b597af7182386d99d78f4a84b4fde0c352a930005ad8bbba83650b5d54069cbb5503be4b302622229edee04d7e2fe8ee790804
|
7
|
+
data.tar.gz: 74c4c1057144c20b2e5efc49f665fd066e7507a32b77222154b3a4bd1dc7c6c1439a12398363a95be45244cc59461b651563c456c1c3916a51ee506de01d447f
|
@@ -73,6 +73,8 @@ static VALUE rb_str_maybe_null(const char* s) {
|
|
73
73
|
// -----------------------------------------------------------------------------
|
74
74
|
|
75
75
|
typedef struct {
|
76
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
77
|
+
// macro to update VALUE references, as to trigger write barriers.
|
76
78
|
VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
|
77
79
|
upb_DefPool* symtab;
|
78
80
|
} DescriptorPool;
|
@@ -97,7 +99,7 @@ static void DescriptorPool_free(void* _self) {
|
|
97
99
|
static const rb_data_type_t DescriptorPool_type = {
|
98
100
|
"Google::Protobuf::DescriptorPool",
|
99
101
|
{DescriptorPool_mark, DescriptorPool_free, NULL},
|
100
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
102
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
101
103
|
};
|
102
104
|
|
103
105
|
static DescriptorPool* ruby_to_DescriptorPool(VALUE val) {
|
@@ -125,11 +127,9 @@ static VALUE DescriptorPool_alloc(VALUE klass) {
|
|
125
127
|
self->def_to_descriptor = Qnil;
|
126
128
|
ret = TypedData_Wrap_Struct(klass, &DescriptorPool_type, self);
|
127
129
|
|
128
|
-
self->def_to_descriptor
|
130
|
+
RB_OBJ_WRITE(ret, &self->def_to_descriptor, rb_hash_new());
|
129
131
|
self->symtab = upb_DefPool_New();
|
130
|
-
|
131
|
-
|
132
|
-
return ret;
|
132
|
+
return ObjectCache_TryAdd(self->symtab, ret);
|
133
133
|
}
|
134
134
|
|
135
135
|
/*
|
@@ -223,6 +223,8 @@ static void DescriptorPool_register(VALUE module) {
|
|
223
223
|
|
224
224
|
typedef struct {
|
225
225
|
const upb_MessageDef* msgdef;
|
226
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
227
|
+
// macro to update VALUE references, as to trigger write barriers.
|
226
228
|
VALUE klass;
|
227
229
|
VALUE descriptor_pool;
|
228
230
|
} Descriptor;
|
@@ -238,7 +240,7 @@ static void Descriptor_mark(void* _self) {
|
|
238
240
|
static const rb_data_type_t Descriptor_type = {
|
239
241
|
"Google::Protobuf::Descriptor",
|
240
242
|
{Descriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
241
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
243
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
242
244
|
};
|
243
245
|
|
244
246
|
static Descriptor* ruby_to_Descriptor(VALUE val) {
|
@@ -280,7 +282,7 @@ static VALUE Descriptor_initialize(VALUE _self, VALUE cookie,
|
|
280
282
|
"Descriptor objects may not be created from Ruby.");
|
281
283
|
}
|
282
284
|
|
283
|
-
self->descriptor_pool
|
285
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
284
286
|
self->msgdef = (const upb_MessageDef*)NUM2ULL(ptr);
|
285
287
|
|
286
288
|
return Qnil;
|
@@ -390,7 +392,7 @@ static VALUE Descriptor_lookup_oneof(VALUE _self, VALUE name) {
|
|
390
392
|
static VALUE Descriptor_msgclass(VALUE _self) {
|
391
393
|
Descriptor* self = ruby_to_Descriptor(_self);
|
392
394
|
if (self->klass == Qnil) {
|
393
|
-
self->klass
|
395
|
+
RB_OBJ_WRITE(_self, &self->klass, build_class_from_descriptor(_self));
|
394
396
|
}
|
395
397
|
return self->klass;
|
396
398
|
}
|
@@ -417,6 +419,8 @@ static void Descriptor_register(VALUE module) {
|
|
417
419
|
|
418
420
|
typedef struct {
|
419
421
|
const upb_FileDef* filedef;
|
422
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
423
|
+
// macro to update VALUE references, as to trigger write barriers.
|
420
424
|
VALUE descriptor_pool; // Owns the upb_FileDef.
|
421
425
|
} FileDescriptor;
|
422
426
|
|
@@ -430,7 +434,7 @@ static void FileDescriptor_mark(void* _self) {
|
|
430
434
|
static const rb_data_type_t FileDescriptor_type = {
|
431
435
|
"Google::Protobuf::FileDescriptor",
|
432
436
|
{FileDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
433
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
437
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
434
438
|
};
|
435
439
|
|
436
440
|
static FileDescriptor* ruby_to_FileDescriptor(VALUE val) {
|
@@ -463,7 +467,7 @@ static VALUE FileDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
463
467
|
"Descriptor objects may not be created from Ruby.");
|
464
468
|
}
|
465
469
|
|
466
|
-
self->descriptor_pool
|
470
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
467
471
|
self->filedef = (const upb_FileDef*)NUM2ULL(ptr);
|
468
472
|
|
469
473
|
return Qnil;
|
@@ -519,6 +523,8 @@ static void FileDescriptor_register(VALUE module) {
|
|
519
523
|
|
520
524
|
typedef struct {
|
521
525
|
const upb_FieldDef* fielddef;
|
526
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
527
|
+
// macro to update VALUE references, as to trigger write barriers.
|
522
528
|
VALUE descriptor_pool; // Owns the upb_FieldDef.
|
523
529
|
} FieldDescriptor;
|
524
530
|
|
@@ -532,7 +538,7 @@ static void FieldDescriptor_mark(void* _self) {
|
|
532
538
|
static const rb_data_type_t FieldDescriptor_type = {
|
533
539
|
"Google::Protobuf::FieldDescriptor",
|
534
540
|
{FieldDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
535
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
541
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
536
542
|
};
|
537
543
|
|
538
544
|
static FieldDescriptor* ruby_to_FieldDescriptor(VALUE val) {
|
@@ -570,7 +576,7 @@ static VALUE FieldDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
570
576
|
"Descriptor objects may not be created from Ruby.");
|
571
577
|
}
|
572
578
|
|
573
|
-
self->descriptor_pool
|
579
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
574
580
|
self->fielddef = (const upb_FieldDef*)NUM2ULL(ptr);
|
575
581
|
|
576
582
|
return Qnil;
|
@@ -595,7 +601,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
|
|
595
601
|
|
596
602
|
#define CONVERT(upb, ruby) \
|
597
603
|
if (SYM2ID(type) == rb_intern(#ruby)) { \
|
598
|
-
return kUpb_CType_##upb;
|
604
|
+
return kUpb_CType_##upb; \
|
599
605
|
}
|
600
606
|
|
601
607
|
CONVERT(Float, float);
|
@@ -618,7 +624,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
|
|
618
624
|
|
619
625
|
static VALUE descriptortype_to_ruby(upb_FieldType type) {
|
620
626
|
switch (type) {
|
621
|
-
#define CONVERT(upb, ruby)
|
627
|
+
#define CONVERT(upb, ruby) \
|
622
628
|
case kUpb_FieldType_##upb: \
|
623
629
|
return ID2SYM(rb_intern(#ruby));
|
624
630
|
CONVERT(Float, float);
|
@@ -703,7 +709,7 @@ static VALUE FieldDescriptor_label(VALUE _self) {
|
|
703
709
|
FieldDescriptor* self = ruby_to_FieldDescriptor(_self);
|
704
710
|
switch (upb_FieldDef_Label(self->fielddef)) {
|
705
711
|
#define CONVERT(upb, ruby) \
|
706
|
-
case kUpb_Label_##upb:
|
712
|
+
case kUpb_Label_##upb: \
|
707
713
|
return ID2SYM(rb_intern(#ruby));
|
708
714
|
|
709
715
|
CONVERT(Optional, optional);
|
@@ -884,6 +890,8 @@ static void FieldDescriptor_register(VALUE module) {
|
|
884
890
|
|
885
891
|
typedef struct {
|
886
892
|
const upb_OneofDef* oneofdef;
|
893
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
894
|
+
// macro to update VALUE references, as to trigger write barriers.
|
887
895
|
VALUE descriptor_pool; // Owns the upb_OneofDef.
|
888
896
|
} OneofDescriptor;
|
889
897
|
|
@@ -897,7 +905,7 @@ static void OneofDescriptor_mark(void* _self) {
|
|
897
905
|
static const rb_data_type_t OneofDescriptor_type = {
|
898
906
|
"Google::Protobuf::OneofDescriptor",
|
899
907
|
{OneofDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
900
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
908
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
901
909
|
};
|
902
910
|
|
903
911
|
static OneofDescriptor* ruby_to_OneofDescriptor(VALUE val) {
|
@@ -936,7 +944,7 @@ static VALUE OneofDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
936
944
|
"Descriptor objects may not be created from Ruby.");
|
937
945
|
}
|
938
946
|
|
939
|
-
self->descriptor_pool
|
947
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
940
948
|
self->oneofdef = (const upb_OneofDef*)NUM2ULL(ptr);
|
941
949
|
|
942
950
|
return Qnil;
|
@@ -988,6 +996,8 @@ static void OneofDescriptor_register(VALUE module) {
|
|
988
996
|
|
989
997
|
typedef struct {
|
990
998
|
const upb_EnumDef* enumdef;
|
999
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
1000
|
+
// macro to update VALUE references, as to trigger write barriers.
|
991
1001
|
VALUE module; // begins as nil
|
992
1002
|
VALUE descriptor_pool; // Owns the upb_EnumDef.
|
993
1003
|
} EnumDescriptor;
|
@@ -1003,7 +1013,7 @@ static void EnumDescriptor_mark(void* _self) {
|
|
1003
1013
|
static const rb_data_type_t EnumDescriptor_type = {
|
1004
1014
|
"Google::Protobuf::EnumDescriptor",
|
1005
1015
|
{EnumDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
1006
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
1016
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
1007
1017
|
};
|
1008
1018
|
|
1009
1019
|
static EnumDescriptor* ruby_to_EnumDescriptor(VALUE val) {
|
@@ -1042,7 +1052,7 @@ static VALUE EnumDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
1042
1052
|
"Descriptor objects may not be created from Ruby.");
|
1043
1053
|
}
|
1044
1054
|
|
1045
|
-
self->descriptor_pool
|
1055
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
1046
1056
|
self->enumdef = (const upb_EnumDef*)NUM2ULL(ptr);
|
1047
1057
|
|
1048
1058
|
return Qnil;
|
@@ -1081,7 +1091,7 @@ static VALUE EnumDescriptor_name(VALUE _self) {
|
|
1081
1091
|
static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
|
1082
1092
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1083
1093
|
const char* name_str = rb_id2name(SYM2ID(name));
|
1084
|
-
const upb_EnumValueDef
|
1094
|
+
const upb_EnumValueDef* ev =
|
1085
1095
|
upb_EnumDef_FindValueByName(self->enumdef, name_str);
|
1086
1096
|
if (ev) {
|
1087
1097
|
return INT2NUM(upb_EnumValueDef_Number(ev));
|
@@ -1100,7 +1110,8 @@ static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
|
|
1100
1110
|
static VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number) {
|
1101
1111
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1102
1112
|
int32_t val = NUM2INT(number);
|
1103
|
-
const upb_EnumValueDef* ev =
|
1113
|
+
const upb_EnumValueDef* ev =
|
1114
|
+
upb_EnumDef_FindValueByNumber(self->enumdef, val);
|
1104
1115
|
if (ev) {
|
1105
1116
|
return ID2SYM(rb_intern(upb_EnumValueDef_Name(ev)));
|
1106
1117
|
} else {
|
@@ -1138,7 +1149,7 @@ static VALUE EnumDescriptor_each(VALUE _self) {
|
|
1138
1149
|
static VALUE EnumDescriptor_enummodule(VALUE _self) {
|
1139
1150
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1140
1151
|
if (self->module == Qnil) {
|
1141
|
-
self->module
|
1152
|
+
RB_OBJ_WRITE(_self, &self->module, build_module_from_enumdesc(_self));
|
1142
1153
|
}
|
1143
1154
|
return self->module;
|
1144
1155
|
}
|
data/ext/google/protobuf_c/map.c
CHANGED
@@ -93,7 +93,6 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
|
|
93
93
|
if (val == Qnil) {
|
94
94
|
val = Map_alloc(cMap);
|
95
95
|
Map* self;
|
96
|
-
ObjectCache_Add(map, val);
|
97
96
|
TypedData_Get_Struct(val, Map, &Map_type, self);
|
98
97
|
self->map = map;
|
99
98
|
self->arena = arena;
|
@@ -103,6 +102,7 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
|
|
103
102
|
const upb_MessageDef* val_m = self->value_type_info.def.msgdef;
|
104
103
|
self->value_type_class = Descriptor_DefToClass(val_m);
|
105
104
|
}
|
105
|
+
return ObjectCache_TryAdd(map, val);
|
106
106
|
}
|
107
107
|
|
108
108
|
return val;
|
@@ -319,7 +319,9 @@ static VALUE Map_init(int argc, VALUE* argv, VALUE _self) {
|
|
319
319
|
|
320
320
|
self->map = upb_Map_New(Arena_get(self->arena), self->key_type,
|
321
321
|
self->value_type_info.type);
|
322
|
-
|
322
|
+
VALUE stored = ObjectCache_TryAdd(self->map, _self);
|
323
|
+
(void)stored;
|
324
|
+
PBRUBY_ASSERT(stored == _self);
|
323
325
|
|
324
326
|
if (init_arg != Qnil) {
|
325
327
|
Map_merge_into_self(_self, init_arg);
|
data/ext/google/protobuf_c/map.h
CHANGED
@@ -53,6 +53,8 @@ VALUE MessageOrEnum_GetDescriptor(VALUE klass) {
|
|
53
53
|
// -----------------------------------------------------------------------------
|
54
54
|
|
55
55
|
typedef struct {
|
56
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
57
|
+
// macro to update VALUE references, as to trigger write barriers.
|
56
58
|
VALUE arena;
|
57
59
|
const upb_Message* msg; // Can get as mutable when non-frozen.
|
58
60
|
const upb_MessageDef*
|
@@ -65,9 +67,9 @@ static void Message_mark(void* _self) {
|
|
65
67
|
}
|
66
68
|
|
67
69
|
static rb_data_type_t Message_type = {
|
68
|
-
"Message",
|
70
|
+
"Google::Protobuf::Message",
|
69
71
|
{Message_mark, RUBY_DEFAULT_FREE, NULL},
|
70
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
72
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
71
73
|
};
|
72
74
|
|
73
75
|
static Message* ruby_to_Message(VALUE msg_rb) {
|
@@ -105,8 +107,10 @@ upb_Message* Message_GetMutable(VALUE msg_rb, const upb_MessageDef** m) {
|
|
105
107
|
void Message_InitPtr(VALUE self_, upb_Message* msg, VALUE arena) {
|
106
108
|
Message* self = ruby_to_Message(self_);
|
107
109
|
self->msg = msg;
|
108
|
-
self->arena
|
109
|
-
|
110
|
+
RB_OBJ_WRITE(self_, &self->arena, arena);
|
111
|
+
VALUE stored = ObjectCache_TryAdd(msg, self_);
|
112
|
+
(void)stored;
|
113
|
+
PBRUBY_ASSERT(stored == self_);
|
110
114
|
}
|
111
115
|
|
112
116
|
VALUE Message_GetArena(VALUE msg_rb) {
|
@@ -978,7 +982,7 @@ static VALUE Message_decode(int argc, VALUE* argv, VALUE klass) {
|
|
978
982
|
rb_hash_lookup(hash_args, ID2SYM(rb_intern("recursion_limit")));
|
979
983
|
|
980
984
|
if (depth != Qnil && TYPE(depth) == T_FIXNUM) {
|
981
|
-
options |=
|
985
|
+
options |= upb_DecodeOptions_MaxDepth(FIX2INT(depth));
|
982
986
|
}
|
983
987
|
}
|
984
988
|
|
@@ -1096,7 +1100,7 @@ static VALUE Message_encode(int argc, VALUE* argv, VALUE klass) {
|
|
1096
1100
|
rb_hash_lookup(hash_args, ID2SYM(rb_intern("recursion_limit")));
|
1097
1101
|
|
1098
1102
|
if (depth != Qnil && TYPE(depth) == T_FIXNUM) {
|
1099
|
-
options |=
|
1103
|
+
options |= upb_DecodeOptions_MaxDepth(FIX2INT(depth));
|
1100
1104
|
}
|
1101
1105
|
}
|
1102
1106
|
|
@@ -1162,6 +1166,12 @@ static VALUE Message_encode_json(int argc, VALUE* argv, VALUE klass) {
|
|
1162
1166
|
Qfalse))) {
|
1163
1167
|
options |= upb_JsonEncode_EmitDefaults;
|
1164
1168
|
}
|
1169
|
+
|
1170
|
+
if (RTEST(rb_hash_lookup2(hash_args,
|
1171
|
+
ID2SYM(rb_intern("format_enums_as_integers")),
|
1172
|
+
Qfalse))) {
|
1173
|
+
options |= upb_JsonEncode_FormatEnumsAsIntegers;
|
1174
|
+
}
|
1165
1175
|
}
|
1166
1176
|
|
1167
1177
|
upb_Status_Clear(&status);
|
@@ -171,6 +171,8 @@ void StringBuilder_PrintMsgval(StringBuilder *b, upb_MessageValue val,
|
|
171
171
|
|
172
172
|
typedef struct {
|
173
173
|
upb_Arena *arena;
|
174
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
175
|
+
// macro to update VALUE references, as to trigger write barriers.
|
174
176
|
VALUE pinned_objs;
|
175
177
|
} Arena;
|
176
178
|
|
@@ -190,10 +192,11 @@ static VALUE cArena;
|
|
190
192
|
const rb_data_type_t Arena_type = {
|
191
193
|
"Google::Protobuf::Internal::Arena",
|
192
194
|
{Arena_mark, Arena_free, NULL},
|
193
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
195
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
194
196
|
};
|
195
197
|
|
196
|
-
static void*
|
198
|
+
static void *ruby_upb_allocfunc(upb_alloc *alloc, void *ptr, size_t oldsize,
|
199
|
+
size_t size) {
|
197
200
|
if (size == 0) {
|
198
201
|
xfree(ptr);
|
199
202
|
return NULL;
|
@@ -233,7 +236,7 @@ void Arena_Pin(VALUE _arena, VALUE obj) {
|
|
233
236
|
Arena *arena;
|
234
237
|
TypedData_Get_Struct(_arena, Arena, &Arena_type, arena);
|
235
238
|
if (arena->pinned_objs == Qnil) {
|
236
|
-
arena->pinned_objs
|
239
|
+
RB_OBJ_WRITE(_arena, &arena->pinned_objs, rb_ary_new());
|
237
240
|
}
|
238
241
|
rb_ary_push(arena->pinned_objs, obj);
|
239
242
|
}
|
@@ -250,164 +253,48 @@ void Arena_register(VALUE module) {
|
|
250
253
|
// Object Cache
|
251
254
|
// -----------------------------------------------------------------------------
|
252
255
|
|
253
|
-
// A pointer -> Ruby Object cache that keeps references to Ruby wrapper
|
254
|
-
// objects. This allows us to look up any Ruby wrapper object by the address
|
255
|
-
// of the object it is wrapping. That way we can avoid ever creating two
|
256
|
-
// different wrapper objects for the same C object, which saves memory and
|
257
|
-
// preserves object identity.
|
258
|
-
//
|
259
|
-
// We use WeakMap for the cache. For Ruby <2.7 we also need a secondary Hash
|
260
|
-
// to store WeakMap keys because Ruby <2.7 WeakMap doesn't allow non-finalizable
|
261
|
-
// keys.
|
262
|
-
//
|
263
|
-
// We also need the secondary Hash if sizeof(long) < sizeof(VALUE), because this
|
264
|
-
// means it may not be possible to fit a pointer into a Fixnum. Keys are
|
265
|
-
// pointers, and if they fit into a Fixnum, Ruby doesn't collect them, but if
|
266
|
-
// they overflow and require allocating a Bignum, they could get collected
|
267
|
-
// prematurely, thus removing the cache entry. This happens on 64-bit Windows,
|
268
|
-
// on which pointers are 64 bits but longs are 32 bits. In this case, we enable
|
269
|
-
// the secondary Hash to hold the keys and prevent them from being collected.
|
270
|
-
|
271
|
-
#if RUBY_API_VERSION_CODE >= 20700 && SIZEOF_LONG >= SIZEOF_VALUE
|
272
|
-
#define USE_SECONDARY_MAP 0
|
273
|
-
#else
|
274
|
-
#define USE_SECONDARY_MAP 1
|
275
|
-
#endif
|
276
|
-
|
277
|
-
#if USE_SECONDARY_MAP
|
278
|
-
|
279
|
-
// Maps Numeric -> Object. The object is then used as a key into the WeakMap.
|
280
|
-
// This is needed for Ruby <2.7 where a number cannot be a key to WeakMap.
|
281
|
-
// The object is used only for its identity; it does not contain any data.
|
282
|
-
VALUE secondary_map = Qnil;
|
283
|
-
|
284
|
-
// Mutations to the map are under a mutex, because SeconaryMap_MaybeGC()
|
285
|
-
// iterates over the map which cannot happen in parallel with insertions, or
|
286
|
-
// Ruby will throw:
|
287
|
-
// can't add a new key into hash during iteration (RuntimeError)
|
288
|
-
VALUE secondary_map_mutex = Qnil;
|
289
|
-
|
290
|
-
// Lambda that will GC entries from the secondary map that are no longer present
|
291
|
-
// in the primary map.
|
292
|
-
VALUE gc_secondary_map_lambda = Qnil;
|
293
|
-
ID length;
|
294
|
-
|
295
|
-
extern VALUE weak_obj_cache;
|
296
|
-
|
297
|
-
static void SecondaryMap_Init() {
|
298
|
-
rb_gc_register_address(&secondary_map);
|
299
|
-
rb_gc_register_address(&gc_secondary_map_lambda);
|
300
|
-
rb_gc_register_address(&secondary_map_mutex);
|
301
|
-
secondary_map = rb_hash_new();
|
302
|
-
gc_secondary_map_lambda = rb_eval_string(
|
303
|
-
"->(secondary, weak) {\n"
|
304
|
-
" secondary.delete_if { |k, v| !weak.key?(v) }\n"
|
305
|
-
"}\n");
|
306
|
-
secondary_map_mutex = rb_mutex_new();
|
307
|
-
length = rb_intern("length");
|
308
|
-
}
|
309
|
-
|
310
|
-
// The secondary map is a regular Hash, and will never shrink on its own.
|
311
|
-
// The main object cache is a WeakMap that will automatically remove entries
|
312
|
-
// when the target object is no longer reachable, but unless we manually
|
313
|
-
// remove the corresponding entries from the secondary map, it will grow
|
314
|
-
// without bound.
|
315
|
-
//
|
316
|
-
// To avoid this unbounded growth we periodically remove entries from the
|
317
|
-
// secondary map that are no longer present in the WeakMap. The logic of
|
318
|
-
// how often to perform this GC is an artbirary tuning parameter that
|
319
|
-
// represents a straightforward CPU/memory tradeoff.
|
320
|
-
//
|
321
|
-
// Requires: secondary_map_mutex is held.
|
322
|
-
static void SecondaryMap_MaybeGC() {
|
323
|
-
PBRUBY_ASSERT(rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
|
324
|
-
size_t weak_len = NUM2ULL(rb_funcall(weak_obj_cache, length, 0));
|
325
|
-
size_t secondary_len = RHASH_SIZE(secondary_map);
|
326
|
-
if (secondary_len < weak_len) {
|
327
|
-
// Logically this case should not be possible: a valid entry cannot exist in
|
328
|
-
// the weak table unless there is a corresponding entry in the secondary
|
329
|
-
// table. It should *always* be the case that secondary_len >= weak_len.
|
330
|
-
//
|
331
|
-
// However ObjectSpace::WeakMap#length (and therefore weak_len) is
|
332
|
-
// unreliable: it overreports its true length by including non-live objects.
|
333
|
-
// However these non-live objects are not yielded in iteration, so we may
|
334
|
-
// have previously deleted them from the secondary map in a previous
|
335
|
-
// invocation of SecondaryMap_MaybeGC().
|
336
|
-
//
|
337
|
-
// In this case, we can't measure any waste, so we just return.
|
338
|
-
return;
|
339
|
-
}
|
340
|
-
size_t waste = secondary_len - weak_len;
|
341
|
-
// GC if we could remove at least 2000 entries or 20% of the table size
|
342
|
-
// (whichever is greater). Since the cost of the GC pass is O(N), we
|
343
|
-
// want to make sure that we condition this on overall table size, to
|
344
|
-
// avoid O(N^2) CPU costs.
|
345
|
-
size_t threshold = PBRUBY_MAX(secondary_len * 0.2, 2000);
|
346
|
-
if (waste > threshold) {
|
347
|
-
rb_funcall(gc_secondary_map_lambda, rb_intern("call"), 2, secondary_map,
|
348
|
-
weak_obj_cache);
|
349
|
-
}
|
350
|
-
}
|
351
|
-
|
352
|
-
// Requires: secondary_map_mutex is held by this thread iff create == true.
|
353
|
-
static VALUE SecondaryMap_Get(VALUE key, bool create) {
|
354
|
-
PBRUBY_ASSERT(!create || rb_mutex_locked_p(secondary_map_mutex) == Qtrue);
|
355
|
-
VALUE ret = rb_hash_lookup(secondary_map, key);
|
356
|
-
if (ret == Qnil && create) {
|
357
|
-
SecondaryMap_MaybeGC();
|
358
|
-
ret = rb_class_new_instance(0, NULL, rb_cObject);
|
359
|
-
rb_hash_aset(secondary_map, key, ret);
|
360
|
-
}
|
361
|
-
return ret;
|
362
|
-
}
|
363
|
-
|
364
|
-
#endif
|
365
|
-
|
366
|
-
// Requires: secondary_map_mutex is held by this thread iff create == true.
|
367
|
-
static VALUE ObjectCache_GetKey(const void *key, bool create) {
|
368
|
-
VALUE key_val = (VALUE)key;
|
369
|
-
PBRUBY_ASSERT((key_val & 3) == 0);
|
370
|
-
VALUE ret = LL2NUM(key_val >> 2);
|
371
|
-
#if USE_SECONDARY_MAP
|
372
|
-
ret = SecondaryMap_Get(ret, create);
|
373
|
-
#endif
|
374
|
-
return ret;
|
375
|
-
}
|
376
|
-
|
377
256
|
// Public ObjectCache API.
|
378
257
|
|
379
258
|
VALUE weak_obj_cache = Qnil;
|
380
259
|
ID item_get;
|
381
|
-
ID
|
260
|
+
ID item_try_add;
|
261
|
+
|
262
|
+
static void ObjectCache_Init(VALUE protobuf) {
|
263
|
+
item_get = rb_intern("get");
|
264
|
+
item_try_add = rb_intern("try_add");
|
382
265
|
|
383
|
-
static void ObjectCache_Init() {
|
384
266
|
rb_gc_register_address(&weak_obj_cache);
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
#if USE_SECONDARY_MAP
|
390
|
-
SecondaryMap_Init();
|
267
|
+
#if SIZEOF_LONG >= SIZEOF_VALUE
|
268
|
+
VALUE cache_class = rb_const_get(protobuf, rb_intern("ObjectCache"));
|
269
|
+
#else
|
270
|
+
VALUE cache_class = rb_const_get(protobuf, rb_intern("LegacyObjectCache"));
|
391
271
|
#endif
|
272
|
+
|
273
|
+
weak_obj_cache = rb_class_new_instance(0, NULL, cache_class);
|
274
|
+
rb_const_set(protobuf, rb_intern("OBJECT_CACHE"), weak_obj_cache);
|
275
|
+
rb_const_set(protobuf, rb_intern("SIZEOF_LONG"), INT2NUM(SIZEOF_LONG));
|
276
|
+
rb_const_set(protobuf, rb_intern("SIZEOF_VALUE"), INT2NUM(SIZEOF_VALUE));
|
392
277
|
}
|
393
278
|
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
VALUE
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
279
|
+
static VALUE ObjectCache_GetKey(const void *key) {
|
280
|
+
VALUE key_val = (VALUE)key;
|
281
|
+
PBRUBY_ASSERT((key_val & 3) == 0);
|
282
|
+
// Ensure the key can be stored as a Fixnum since 1 bit is needed for
|
283
|
+
// FIXNUM_FLAG and 1 bit is needed for the sign bit.
|
284
|
+
VALUE new_key = LL2NUM(key_val >> 2);
|
285
|
+
PBRUBY_ASSERT(FIXNUM_P(new_key));
|
286
|
+
return new_key;
|
287
|
+
}
|
288
|
+
|
289
|
+
VALUE ObjectCache_TryAdd(const void *key, VALUE val) {
|
290
|
+
VALUE key_val = ObjectCache_GetKey(key);
|
291
|
+
return rb_funcall(weak_obj_cache, item_try_add, 2, key_val, val);
|
405
292
|
}
|
406
293
|
|
407
294
|
// Returns the cached object for this key, if any. Otherwise returns Qnil.
|
408
295
|
VALUE ObjectCache_Get(const void *key) {
|
409
|
-
VALUE
|
410
|
-
return rb_funcall(weak_obj_cache, item_get, 1,
|
296
|
+
VALUE key_val = ObjectCache_GetKey(key);
|
297
|
+
return rb_funcall(weak_obj_cache, item_get, 1, key_val);
|
411
298
|
}
|
412
299
|
|
413
300
|
/*
|
@@ -457,11 +344,10 @@ VALUE Google_Protobuf_deep_copy(VALUE self, VALUE obj) {
|
|
457
344
|
// This must be named "Init_protobuf_c" because the Ruby module is named
|
458
345
|
// "protobuf_c" -- the VM looks for this symbol in our .so.
|
459
346
|
__attribute__((visibility("default"))) void Init_protobuf_c() {
|
460
|
-
ObjectCache_Init();
|
461
|
-
|
462
347
|
VALUE google = rb_define_module("Google");
|
463
348
|
VALUE protobuf = rb_define_module_under(google, "Protobuf");
|
464
349
|
|
350
|
+
ObjectCache_Init(protobuf);
|
465
351
|
Arena_register(protobuf);
|
466
352
|
Defs_register(protobuf);
|
467
353
|
RepeatedField_register(protobuf);
|
@@ -31,8 +31,22 @@
|
|
31
31
|
#ifndef __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
32
32
|
#define __GOOGLE_PROTOBUF_RUBY_PROTOBUF_H__
|
33
33
|
|
34
|
+
// Ruby 3+ defines NDEBUG itself, see: https://bugs.ruby-lang.org/issues/18777
|
35
|
+
#ifdef NDEBUG
|
36
|
+
#include <ruby.h>
|
37
|
+
#else
|
38
|
+
#include <ruby.h>
|
39
|
+
#undef NDEBUG
|
40
|
+
#endif
|
41
|
+
|
42
|
+
#include <ruby/version.h>
|
43
|
+
|
44
|
+
#if RUBY_API_VERSION_CODE < 20700
|
45
|
+
#error Protobuf requires Ruby >= 2.7
|
46
|
+
#endif
|
47
|
+
|
48
|
+
#include <assert.h> // Must be included after the NDEBUG logic above.
|
34
49
|
#include <ruby/encoding.h>
|
35
|
-
#include <ruby/ruby.h>
|
36
50
|
#include <ruby/vm.h>
|
37
51
|
|
38
52
|
#include "defs.h"
|
@@ -76,10 +90,9 @@ void Arena_Pin(VALUE arena, VALUE obj);
|
|
76
90
|
// being collected (though in Ruby <2.7 is it effectively strong, due to
|
77
91
|
// implementation limitations).
|
78
92
|
|
79
|
-
//
|
80
|
-
//
|
81
|
-
|
82
|
-
void ObjectCache_Add(const void* key, VALUE val);
|
93
|
+
// Tries to add a new entry to the cache, returning the newly installed value or
|
94
|
+
// the pre-existing entry.
|
95
|
+
VALUE ObjectCache_TryAdd(const void* key, VALUE val);
|
83
96
|
|
84
97
|
// Returns the cached object for this key, if any. Otherwise returns Qnil.
|
85
98
|
VALUE ObjectCache_Get(const void* key);
|
@@ -110,7 +123,9 @@ extern VALUE cTypeError;
|
|
110
123
|
do { \
|
111
124
|
} while (false && (expr))
|
112
125
|
#else
|
113
|
-
#define PBRUBY_ASSERT(expr)
|
126
|
+
#define PBRUBY_ASSERT(expr) \
|
127
|
+
if (!(expr)) \
|
128
|
+
rb_bug("Assertion failed at %s:%d, expr: %s", __FILE__, __LINE__, #expr)
|
114
129
|
#endif
|
115
130
|
|
116
131
|
#define PBRUBY_MAX(x, y) (((x) > (y)) ? (x) : (y))
|