google-protobuf 3.20.3 → 3.24.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of google-protobuf might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/convert.c +0 -0
- data/ext/google/protobuf_c/convert.h +0 -2
- data/ext/google/protobuf_c/defs.c +36 -25
- data/ext/google/protobuf_c/defs.h +0 -2
- data/ext/google/protobuf_c/extconf.rb +2 -3
- data/ext/google/protobuf_c/map.c +31 -44
- data/ext/google/protobuf_c/map.h +0 -2
- data/ext/google/protobuf_c/message.c +140 -106
- data/ext/google/protobuf_c/message.h +0 -2
- data/ext/google/protobuf_c/protobuf.c +36 -150
- data/ext/google/protobuf_c/protobuf.h +21 -6
- data/ext/google/protobuf_c/repeated_field.c +5 -2
- data/ext/google/protobuf_c/repeated_field.h +0 -2
- data/ext/google/protobuf_c/ruby-upb.c +11686 -8578
- data/ext/google/protobuf_c/ruby-upb.h +11727 -4588
- data/ext/google/protobuf_c/third_party/utf8_range/LICENSE +1 -0
- data/ext/google/protobuf_c/third_party/utf8_range/naive.c +0 -0
- data/ext/google/protobuf_c/third_party/utf8_range/range2-neon.c +1 -1
- data/ext/google/protobuf_c/third_party/utf8_range/range2-sse.c +0 -0
- data/ext/google/protobuf_c/third_party/utf8_range/utf8_range.h +12 -0
- data/ext/google/protobuf_c/wrap_memcpy.c +0 -0
- data/lib/google/protobuf/any_pb.rb +24 -5
- data/lib/google/protobuf/api_pb.rb +26 -23
- data/lib/google/protobuf/descriptor_dsl.rb +0 -0
- data/lib/google/protobuf/descriptor_pb.rb +40 -226
- data/lib/google/protobuf/duration_pb.rb +24 -5
- data/lib/google/protobuf/empty_pb.rb +24 -3
- data/lib/google/protobuf/field_mask_pb.rb +24 -4
- data/lib/google/protobuf/message_exts.rb +5 -0
- data/lib/google/protobuf/object_cache.rb +120 -0
- data/lib/google/protobuf/plugin_pb.rb +47 -0
- data/lib/google/protobuf/repeated_field.rb +15 -2
- data/lib/google/protobuf/source_context_pb.rb +24 -4
- data/lib/google/protobuf/struct_pb.rb +24 -20
- data/lib/google/protobuf/timestamp_pb.rb +24 -5
- data/lib/google/protobuf/type_pb.rb +26 -68
- data/lib/google/protobuf/well_known_types.rb +2 -8
- data/lib/google/protobuf/wrappers_pb.rb +24 -28
- data/lib/google/protobuf.rb +1 -0
- metadata +8 -12
- data/tests/basic.rb +0 -739
- data/tests/generated_code_test.rb +0 -23
- data/tests/stress.rb +0 -38
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: ab19f3d9a087b8f4b288ba5a610f1d85d5fc71f1939b34891bea8e8be9736069
|
4
|
+
data.tar.gz: 26e9e6b1108d10b1a1098af1820bfd6b4e7a6c892ae9bb946897d78ca765f624
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ff7e8795da8bc005ccf210891a7ef2e325a5e372ca1e6f66c00611ae2743680c7f276e2352502bdf20c23a74a0e8f6910f35b03165baf43860da797044594826
|
7
|
+
data.tar.gz: 8c0d6011a2b6f407e06fec6779f0dbdc5f6400a4afa04edc9cc63023785ab51eac053159c7c5c0297726e6ff83a1a71c0fdf17c3b2b7ef644c5623f72f560778
|
File without changes
|
@@ -73,6 +73,8 @@ static VALUE rb_str_maybe_null(const char* s) {
|
|
73
73
|
// -----------------------------------------------------------------------------
|
74
74
|
|
75
75
|
typedef struct {
|
76
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
77
|
+
// macro to update VALUE references, as to trigger write barriers.
|
76
78
|
VALUE def_to_descriptor; // Hash table of def* -> Ruby descriptor.
|
77
79
|
upb_DefPool* symtab;
|
78
80
|
} DescriptorPool;
|
@@ -97,7 +99,7 @@ static void DescriptorPool_free(void* _self) {
|
|
97
99
|
static const rb_data_type_t DescriptorPool_type = {
|
98
100
|
"Google::Protobuf::DescriptorPool",
|
99
101
|
{DescriptorPool_mark, DescriptorPool_free, NULL},
|
100
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
102
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
101
103
|
};
|
102
104
|
|
103
105
|
static DescriptorPool* ruby_to_DescriptorPool(VALUE val) {
|
@@ -125,11 +127,9 @@ static VALUE DescriptorPool_alloc(VALUE klass) {
|
|
125
127
|
self->def_to_descriptor = Qnil;
|
126
128
|
ret = TypedData_Wrap_Struct(klass, &DescriptorPool_type, self);
|
127
129
|
|
128
|
-
self->def_to_descriptor
|
130
|
+
RB_OBJ_WRITE(ret, &self->def_to_descriptor, rb_hash_new());
|
129
131
|
self->symtab = upb_DefPool_New();
|
130
|
-
|
131
|
-
|
132
|
-
return ret;
|
132
|
+
return ObjectCache_TryAdd(self->symtab, ret);
|
133
133
|
}
|
134
134
|
|
135
135
|
/*
|
@@ -223,6 +223,8 @@ static void DescriptorPool_register(VALUE module) {
|
|
223
223
|
|
224
224
|
typedef struct {
|
225
225
|
const upb_MessageDef* msgdef;
|
226
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
227
|
+
// macro to update VALUE references, as to trigger write barriers.
|
226
228
|
VALUE klass;
|
227
229
|
VALUE descriptor_pool;
|
228
230
|
} Descriptor;
|
@@ -238,7 +240,7 @@ static void Descriptor_mark(void* _self) {
|
|
238
240
|
static const rb_data_type_t Descriptor_type = {
|
239
241
|
"Google::Protobuf::Descriptor",
|
240
242
|
{Descriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
241
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
243
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
242
244
|
};
|
243
245
|
|
244
246
|
static Descriptor* ruby_to_Descriptor(VALUE val) {
|
@@ -280,7 +282,7 @@ static VALUE Descriptor_initialize(VALUE _self, VALUE cookie,
|
|
280
282
|
"Descriptor objects may not be created from Ruby.");
|
281
283
|
}
|
282
284
|
|
283
|
-
self->descriptor_pool
|
285
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
284
286
|
self->msgdef = (const upb_MessageDef*)NUM2ULL(ptr);
|
285
287
|
|
286
288
|
return Qnil;
|
@@ -390,7 +392,7 @@ static VALUE Descriptor_lookup_oneof(VALUE _self, VALUE name) {
|
|
390
392
|
static VALUE Descriptor_msgclass(VALUE _self) {
|
391
393
|
Descriptor* self = ruby_to_Descriptor(_self);
|
392
394
|
if (self->klass == Qnil) {
|
393
|
-
self->klass
|
395
|
+
RB_OBJ_WRITE(_self, &self->klass, build_class_from_descriptor(_self));
|
394
396
|
}
|
395
397
|
return self->klass;
|
396
398
|
}
|
@@ -417,6 +419,8 @@ static void Descriptor_register(VALUE module) {
|
|
417
419
|
|
418
420
|
typedef struct {
|
419
421
|
const upb_FileDef* filedef;
|
422
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
423
|
+
// macro to update VALUE references, as to trigger write barriers.
|
420
424
|
VALUE descriptor_pool; // Owns the upb_FileDef.
|
421
425
|
} FileDescriptor;
|
422
426
|
|
@@ -430,7 +434,7 @@ static void FileDescriptor_mark(void* _self) {
|
|
430
434
|
static const rb_data_type_t FileDescriptor_type = {
|
431
435
|
"Google::Protobuf::FileDescriptor",
|
432
436
|
{FileDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
433
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
437
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
434
438
|
};
|
435
439
|
|
436
440
|
static FileDescriptor* ruby_to_FileDescriptor(VALUE val) {
|
@@ -463,7 +467,7 @@ static VALUE FileDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
463
467
|
"Descriptor objects may not be created from Ruby.");
|
464
468
|
}
|
465
469
|
|
466
|
-
self->descriptor_pool
|
470
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
467
471
|
self->filedef = (const upb_FileDef*)NUM2ULL(ptr);
|
468
472
|
|
469
473
|
return Qnil;
|
@@ -519,6 +523,8 @@ static void FileDescriptor_register(VALUE module) {
|
|
519
523
|
|
520
524
|
typedef struct {
|
521
525
|
const upb_FieldDef* fielddef;
|
526
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
527
|
+
// macro to update VALUE references, as to trigger write barriers.
|
522
528
|
VALUE descriptor_pool; // Owns the upb_FieldDef.
|
523
529
|
} FieldDescriptor;
|
524
530
|
|
@@ -532,7 +538,7 @@ static void FieldDescriptor_mark(void* _self) {
|
|
532
538
|
static const rb_data_type_t FieldDescriptor_type = {
|
533
539
|
"Google::Protobuf::FieldDescriptor",
|
534
540
|
{FieldDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
535
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
541
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
536
542
|
};
|
537
543
|
|
538
544
|
static FieldDescriptor* ruby_to_FieldDescriptor(VALUE val) {
|
@@ -570,7 +576,7 @@ static VALUE FieldDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
570
576
|
"Descriptor objects may not be created from Ruby.");
|
571
577
|
}
|
572
578
|
|
573
|
-
self->descriptor_pool
|
579
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
574
580
|
self->fielddef = (const upb_FieldDef*)NUM2ULL(ptr);
|
575
581
|
|
576
582
|
return Qnil;
|
@@ -595,7 +601,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
|
|
595
601
|
|
596
602
|
#define CONVERT(upb, ruby) \
|
597
603
|
if (SYM2ID(type) == rb_intern(#ruby)) { \
|
598
|
-
return kUpb_CType_##upb;
|
604
|
+
return kUpb_CType_##upb; \
|
599
605
|
}
|
600
606
|
|
601
607
|
CONVERT(Float, float);
|
@@ -618,7 +624,7 @@ upb_CType ruby_to_fieldtype(VALUE type) {
|
|
618
624
|
|
619
625
|
static VALUE descriptortype_to_ruby(upb_FieldType type) {
|
620
626
|
switch (type) {
|
621
|
-
#define CONVERT(upb, ruby)
|
627
|
+
#define CONVERT(upb, ruby) \
|
622
628
|
case kUpb_FieldType_##upb: \
|
623
629
|
return ID2SYM(rb_intern(#ruby));
|
624
630
|
CONVERT(Float, float);
|
@@ -703,7 +709,7 @@ static VALUE FieldDescriptor_label(VALUE _self) {
|
|
703
709
|
FieldDescriptor* self = ruby_to_FieldDescriptor(_self);
|
704
710
|
switch (upb_FieldDef_Label(self->fielddef)) {
|
705
711
|
#define CONVERT(upb, ruby) \
|
706
|
-
case kUpb_Label_##upb:
|
712
|
+
case kUpb_Label_##upb: \
|
707
713
|
return ID2SYM(rb_intern(#ruby));
|
708
714
|
|
709
715
|
CONVERT(Optional, optional);
|
@@ -811,7 +817,7 @@ static VALUE FieldDescriptor_has(VALUE _self, VALUE msg_rb) {
|
|
811
817
|
rb_raise(rb_eArgError, "does not track presence");
|
812
818
|
}
|
813
819
|
|
814
|
-
return
|
820
|
+
return upb_Message_HasFieldByDef(msg, self->fielddef) ? Qtrue : Qfalse;
|
815
821
|
}
|
816
822
|
|
817
823
|
/*
|
@@ -829,7 +835,7 @@ static VALUE FieldDescriptor_clear(VALUE _self, VALUE msg_rb) {
|
|
829
835
|
rb_raise(cTypeError, "has method called on wrong message type");
|
830
836
|
}
|
831
837
|
|
832
|
-
|
838
|
+
upb_Message_ClearFieldByDef(msg, self->fielddef);
|
833
839
|
return Qnil;
|
834
840
|
}
|
835
841
|
|
@@ -854,7 +860,7 @@ static VALUE FieldDescriptor_set(VALUE _self, VALUE msg_rb, VALUE value) {
|
|
854
860
|
|
855
861
|
msgval = Convert_RubyToUpb(value, upb_FieldDef_Name(self->fielddef),
|
856
862
|
TypeInfo_get(self->fielddef), arena);
|
857
|
-
|
863
|
+
upb_Message_SetFieldByDef(msg, self->fielddef, msgval, arena);
|
858
864
|
return Qnil;
|
859
865
|
}
|
860
866
|
|
@@ -884,6 +890,8 @@ static void FieldDescriptor_register(VALUE module) {
|
|
884
890
|
|
885
891
|
typedef struct {
|
886
892
|
const upb_OneofDef* oneofdef;
|
893
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
894
|
+
// macro to update VALUE references, as to trigger write barriers.
|
887
895
|
VALUE descriptor_pool; // Owns the upb_OneofDef.
|
888
896
|
} OneofDescriptor;
|
889
897
|
|
@@ -897,7 +905,7 @@ static void OneofDescriptor_mark(void* _self) {
|
|
897
905
|
static const rb_data_type_t OneofDescriptor_type = {
|
898
906
|
"Google::Protobuf::OneofDescriptor",
|
899
907
|
{OneofDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
900
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
908
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
901
909
|
};
|
902
910
|
|
903
911
|
static OneofDescriptor* ruby_to_OneofDescriptor(VALUE val) {
|
@@ -936,7 +944,7 @@ static VALUE OneofDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
936
944
|
"Descriptor objects may not be created from Ruby.");
|
937
945
|
}
|
938
946
|
|
939
|
-
self->descriptor_pool
|
947
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
940
948
|
self->oneofdef = (const upb_OneofDef*)NUM2ULL(ptr);
|
941
949
|
|
942
950
|
return Qnil;
|
@@ -988,6 +996,8 @@ static void OneofDescriptor_register(VALUE module) {
|
|
988
996
|
|
989
997
|
typedef struct {
|
990
998
|
const upb_EnumDef* enumdef;
|
999
|
+
// IMPORTANT: WB_PROTECTED objects must only use the RB_OBJ_WRITE()
|
1000
|
+
// macro to update VALUE references, as to trigger write barriers.
|
991
1001
|
VALUE module; // begins as nil
|
992
1002
|
VALUE descriptor_pool; // Owns the upb_EnumDef.
|
993
1003
|
} EnumDescriptor;
|
@@ -1003,7 +1013,7 @@ static void EnumDescriptor_mark(void* _self) {
|
|
1003
1013
|
static const rb_data_type_t EnumDescriptor_type = {
|
1004
1014
|
"Google::Protobuf::EnumDescriptor",
|
1005
1015
|
{EnumDescriptor_mark, RUBY_DEFAULT_FREE, NULL},
|
1006
|
-
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
1016
|
+
.flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED,
|
1007
1017
|
};
|
1008
1018
|
|
1009
1019
|
static EnumDescriptor* ruby_to_EnumDescriptor(VALUE val) {
|
@@ -1042,7 +1052,7 @@ static VALUE EnumDescriptor_initialize(VALUE _self, VALUE cookie,
|
|
1042
1052
|
"Descriptor objects may not be created from Ruby.");
|
1043
1053
|
}
|
1044
1054
|
|
1045
|
-
self->descriptor_pool
|
1055
|
+
RB_OBJ_WRITE(_self, &self->descriptor_pool, descriptor_pool);
|
1046
1056
|
self->enumdef = (const upb_EnumDef*)NUM2ULL(ptr);
|
1047
1057
|
|
1048
1058
|
return Qnil;
|
@@ -1081,7 +1091,7 @@ static VALUE EnumDescriptor_name(VALUE _self) {
|
|
1081
1091
|
static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
|
1082
1092
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1083
1093
|
const char* name_str = rb_id2name(SYM2ID(name));
|
1084
|
-
const upb_EnumValueDef
|
1094
|
+
const upb_EnumValueDef* ev =
|
1085
1095
|
upb_EnumDef_FindValueByName(self->enumdef, name_str);
|
1086
1096
|
if (ev) {
|
1087
1097
|
return INT2NUM(upb_EnumValueDef_Number(ev));
|
@@ -1100,7 +1110,8 @@ static VALUE EnumDescriptor_lookup_name(VALUE _self, VALUE name) {
|
|
1100
1110
|
static VALUE EnumDescriptor_lookup_value(VALUE _self, VALUE number) {
|
1101
1111
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1102
1112
|
int32_t val = NUM2INT(number);
|
1103
|
-
const upb_EnumValueDef* ev =
|
1113
|
+
const upb_EnumValueDef* ev =
|
1114
|
+
upb_EnumDef_FindValueByNumber(self->enumdef, val);
|
1104
1115
|
if (ev) {
|
1105
1116
|
return ID2SYM(rb_intern(upb_EnumValueDef_Name(ev)));
|
1106
1117
|
} else {
|
@@ -1138,7 +1149,7 @@ static VALUE EnumDescriptor_each(VALUE _self) {
|
|
1138
1149
|
static VALUE EnumDescriptor_enummodule(VALUE _self) {
|
1139
1150
|
EnumDescriptor* self = ruby_to_EnumDescriptor(_self);
|
1140
1151
|
if (self->module == Qnil) {
|
1141
|
-
self->module
|
1152
|
+
RB_OBJ_WRITE(_self, &self->module, build_module_from_enumdesc(_self));
|
1142
1153
|
}
|
1143
1154
|
return self->module;
|
1144
1155
|
}
|
@@ -6,20 +6,19 @@ ext_name = "google/protobuf_c"
|
|
6
6
|
|
7
7
|
dir_config(ext_name)
|
8
8
|
|
9
|
-
if RUBY_PLATFORM =~ /darwin/ || RUBY_PLATFORM =~ /linux/
|
9
|
+
if RUBY_PLATFORM =~ /darwin/ || RUBY_PLATFORM =~ /linux/ || RUBY_PLATFORM =~ /freebsd/
|
10
10
|
$CFLAGS += " -std=gnu99 -O3 -DNDEBUG -fvisibility=hidden -Wall -Wsign-compare -Wno-declaration-after-statement"
|
11
11
|
else
|
12
12
|
$CFLAGS += " -std=gnu99 -O3 -DNDEBUG"
|
13
13
|
end
|
14
14
|
|
15
|
-
|
16
15
|
if RUBY_PLATFORM =~ /linux/
|
17
16
|
# Instruct the linker to point memcpy calls at our __wrap_memcpy wrapper.
|
18
17
|
$LDFLAGS += " -Wl,-wrap,memcpy"
|
19
18
|
end
|
20
19
|
|
21
20
|
$VPATH << "$(srcdir)/third_party/utf8_range"
|
22
|
-
$INCFLAGS
|
21
|
+
$INCFLAGS += " -I$(srcdir)/third_party/utf8_range"
|
23
22
|
|
24
23
|
$srcs = ["protobuf.c", "convert.c", "defs.c", "message.c",
|
25
24
|
"repeated_field.c", "map.c", "ruby-upb.c", "wrap_memcpy.c",
|
data/ext/google/protobuf_c/map.c
CHANGED
@@ -93,7 +93,6 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
|
|
93
93
|
if (val == Qnil) {
|
94
94
|
val = Map_alloc(cMap);
|
95
95
|
Map* self;
|
96
|
-
ObjectCache_Add(map, val);
|
97
96
|
TypedData_Get_Struct(val, Map, &Map_type, self);
|
98
97
|
self->map = map;
|
99
98
|
self->arena = arena;
|
@@ -103,6 +102,7 @@ VALUE Map_GetRubyWrapper(upb_Map* map, upb_CType key_type, TypeInfo value_type,
|
|
103
102
|
const upb_MessageDef* val_m = self->value_type_info.def.msgdef;
|
104
103
|
self->value_type_class = Descriptor_DefToClass(val_m);
|
105
104
|
}
|
105
|
+
return ObjectCache_TryAdd(map, val);
|
106
106
|
}
|
107
107
|
|
108
108
|
return val;
|
@@ -133,14 +133,13 @@ static upb_Map* Map_GetMutable(VALUE _self) {
|
|
133
133
|
VALUE Map_CreateHash(const upb_Map* map, upb_CType key_type,
|
134
134
|
TypeInfo val_info) {
|
135
135
|
VALUE hash = rb_hash_new();
|
136
|
-
size_t iter = kUpb_Map_Begin;
|
137
136
|
TypeInfo key_info = TypeInfo_from_type(key_type);
|
138
137
|
|
139
138
|
if (!map) return hash;
|
140
139
|
|
141
|
-
|
142
|
-
|
143
|
-
|
140
|
+
size_t iter = kUpb_Map_Begin;
|
141
|
+
upb_MessageValue key, val;
|
142
|
+
while (upb_Map_Next(map, &key, &val, &iter)) {
|
144
143
|
VALUE key_val = Convert_UpbToRuby(key, key_info, Qnil);
|
145
144
|
VALUE val_val = Scalar_CreateHash(val, val_info);
|
146
145
|
rb_hash_aset(hash, key_val, val_val);
|
@@ -156,9 +155,8 @@ VALUE Map_deep_copy(VALUE obj) {
|
|
156
155
|
upb_Map* new_map =
|
157
156
|
upb_Map_New(arena, self->key_type, self->value_type_info.type);
|
158
157
|
size_t iter = kUpb_Map_Begin;
|
159
|
-
|
160
|
-
|
161
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
158
|
+
upb_MessageValue key, val;
|
159
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
162
160
|
upb_MessageValue val_copy =
|
163
161
|
Msgval_DeepCopy(val, self->value_type_info, arena);
|
164
162
|
upb_Map_Set(new_map, key, val_copy, arena);
|
@@ -202,9 +200,8 @@ void Map_Inspect(StringBuilder* b, const upb_Map* map, upb_CType key_type,
|
|
202
200
|
StringBuilder_Printf(b, "{");
|
203
201
|
if (map) {
|
204
202
|
size_t iter = kUpb_Map_Begin;
|
205
|
-
|
206
|
-
|
207
|
-
upb_MessageValue val = upb_MapIterator_Value(map, iter);
|
203
|
+
upb_MessageValue key, val;
|
204
|
+
while (upb_Map_Next(map, &key, &val, &iter)) {
|
208
205
|
if (first) {
|
209
206
|
first = false;
|
210
207
|
} else {
|
@@ -239,7 +236,6 @@ static VALUE Map_merge_into_self(VALUE _self, VALUE hashmap) {
|
|
239
236
|
Map* other = ruby_to_Map(hashmap);
|
240
237
|
upb_Arena* arena = Arena_get(self->arena);
|
241
238
|
upb_Message* self_msg = Map_GetMutable(_self);
|
242
|
-
size_t iter = kUpb_Map_Begin;
|
243
239
|
|
244
240
|
Arena_fuse(other->arena, arena);
|
245
241
|
|
@@ -249,9 +245,9 @@ static VALUE Map_merge_into_self(VALUE _self, VALUE hashmap) {
|
|
249
245
|
rb_raise(rb_eArgError, "Attempt to merge Map with mismatching types");
|
250
246
|
}
|
251
247
|
|
252
|
-
|
253
|
-
|
254
|
-
|
248
|
+
size_t iter = kUpb_Map_Begin;
|
249
|
+
upb_MessageValue key, val;
|
250
|
+
while (upb_Map_Next(other->map, &key, &val, &iter)) {
|
255
251
|
upb_Map_Set(self_msg, key, val, arena);
|
256
252
|
}
|
257
253
|
} else {
|
@@ -323,7 +319,9 @@ static VALUE Map_init(int argc, VALUE* argv, VALUE _self) {
|
|
323
319
|
|
324
320
|
self->map = upb_Map_New(Arena_get(self->arena), self->key_type,
|
325
321
|
self->value_type_info.type);
|
326
|
-
|
322
|
+
VALUE stored = ObjectCache_TryAdd(self->map, _self);
|
323
|
+
(void)stored;
|
324
|
+
PBRUBY_ASSERT(stored == _self);
|
327
325
|
|
328
326
|
if (init_arg != Qnil) {
|
329
327
|
Map_merge_into_self(_self, init_arg);
|
@@ -343,10 +341,9 @@ static VALUE Map_init(int argc, VALUE* argv, VALUE _self) {
|
|
343
341
|
static VALUE Map_each(VALUE _self) {
|
344
342
|
Map* self = ruby_to_Map(_self);
|
345
343
|
size_t iter = kUpb_Map_Begin;
|
344
|
+
upb_MessageValue key, val;
|
346
345
|
|
347
|
-
while (
|
348
|
-
upb_MessageValue key = upb_MapIterator_Key(self->map, iter);
|
349
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
346
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
350
347
|
VALUE key_val = Convert_UpbToRuby(key, Map_keyinfo(self), self->arena);
|
351
348
|
VALUE val_val = Convert_UpbToRuby(val, self->value_type_info, self->arena);
|
352
349
|
rb_yield_values(2, key_val, val_val);
|
@@ -365,9 +362,9 @@ static VALUE Map_keys(VALUE _self) {
|
|
365
362
|
Map* self = ruby_to_Map(_self);
|
366
363
|
size_t iter = kUpb_Map_Begin;
|
367
364
|
VALUE ret = rb_ary_new();
|
365
|
+
upb_MessageValue key, val;
|
368
366
|
|
369
|
-
while (
|
370
|
-
upb_MessageValue key = upb_MapIterator_Key(self->map, iter);
|
367
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
371
368
|
VALUE key_val = Convert_UpbToRuby(key, Map_keyinfo(self), self->arena);
|
372
369
|
rb_ary_push(ret, key_val);
|
373
370
|
}
|
@@ -385,9 +382,9 @@ static VALUE Map_values(VALUE _self) {
|
|
385
382
|
Map* self = ruby_to_Map(_self);
|
386
383
|
size_t iter = kUpb_Map_Begin;
|
387
384
|
VALUE ret = rb_ary_new();
|
385
|
+
upb_MessageValue key, val;
|
388
386
|
|
389
|
-
while (
|
390
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
387
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
391
388
|
VALUE val_val = Convert_UpbToRuby(val, self->value_type_info, self->arena);
|
392
389
|
rb_ary_push(ret, val_val);
|
393
390
|
}
|
@@ -464,24 +461,17 @@ static VALUE Map_has_key(VALUE _self, VALUE key) {
|
|
464
461
|
*/
|
465
462
|
static VALUE Map_delete(VALUE _self, VALUE key) {
|
466
463
|
Map* self = ruby_to_Map(_self);
|
464
|
+
rb_check_frozen(_self);
|
465
|
+
|
467
466
|
upb_MessageValue key_upb =
|
468
467
|
Convert_RubyToUpb(key, "", Map_keyinfo(self), NULL);
|
469
468
|
upb_MessageValue val_upb;
|
470
|
-
VALUE ret;
|
471
469
|
|
472
|
-
|
473
|
-
|
474
|
-
// TODO(haberman): make upb_Map_Delete() also capable of returning the deleted
|
475
|
-
// value.
|
476
|
-
if (upb_Map_Get(self->map, key_upb, &val_upb)) {
|
477
|
-
ret = Convert_UpbToRuby(val_upb, self->value_type_info, self->arena);
|
470
|
+
if (upb_Map_Delete(self->map, key_upb, &val_upb)) {
|
471
|
+
return Convert_UpbToRuby(val_upb, self->value_type_info, self->arena);
|
478
472
|
} else {
|
479
|
-
|
473
|
+
return Qnil;
|
480
474
|
}
|
481
|
-
|
482
|
-
upb_Map_Delete(Map_GetMutable(_self), key_upb);
|
483
|
-
|
484
|
-
return ret;
|
485
475
|
}
|
486
476
|
|
487
477
|
/*
|
@@ -523,9 +513,8 @@ static VALUE Map_dup(VALUE _self) {
|
|
523
513
|
|
524
514
|
Arena_fuse(self->arena, arena);
|
525
515
|
|
526
|
-
|
527
|
-
|
528
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
516
|
+
upb_MessageValue key, val;
|
517
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
529
518
|
upb_Map_Set(new_map, key, val, arena);
|
530
519
|
}
|
531
520
|
|
@@ -574,9 +563,8 @@ VALUE Map_eq(VALUE _self, VALUE _other) {
|
|
574
563
|
// For each member of self, check that an equal member exists at the same key
|
575
564
|
// in other.
|
576
565
|
size_t iter = kUpb_Map_Begin;
|
577
|
-
|
578
|
-
|
579
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
566
|
+
upb_MessageValue key, val;
|
567
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
580
568
|
upb_MessageValue other_val;
|
581
569
|
if (!upb_Map_Get(other->map, key, &other_val)) {
|
582
570
|
// Not present in other map.
|
@@ -619,9 +607,8 @@ VALUE Map_hash(VALUE _self) {
|
|
619
607
|
|
620
608
|
size_t iter = kUpb_Map_Begin;
|
621
609
|
TypeInfo key_info = {self->key_type};
|
622
|
-
|
623
|
-
|
624
|
-
upb_MessageValue val = upb_MapIterator_Value(self->map, iter);
|
610
|
+
upb_MessageValue key, val;
|
611
|
+
while (upb_Map_Next(self->map, &key, &val, &iter)) {
|
625
612
|
hash = Msgval_GetHash(key, key_info, hash);
|
626
613
|
hash = Msgval_GetHash(val, self->value_type_info, hash);
|
627
614
|
}
|