google-protobuf 4.34.1-java → 4.35.0.rc.2-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/message.c +1 -1
- data/ext/google/protobuf_c/message.h +1 -1
- data/ext/google/protobuf_c/ruby-upb.c +425 -254
- data/ext/google/protobuf_c/ruby-upb.h +165 -32
- data/ext/google/protobuf_c/third_party/utf8_range/utf8_range.c +1 -1
- data/ext/google/protobuf_c/third_party/utf8_range/utf8_range.h +2 -1
- data/lib/google/protobuf/descriptor_pb.rb +1 -1
- data/lib/google/protobuf_java.jar +0 -0
- metadata +5 -5
|
@@ -558,7 +558,15 @@ Error, UINTPTR_MAX is undefined
|
|
|
558
558
|
// }
|
|
559
559
|
// }
|
|
560
560
|
|
|
561
|
+
#if defined(__GNUC__) && !defined(__clang__)
|
|
562
|
+
// GCC can't handle mismatched retain attributes in the same section:
|
|
563
|
+
// https://github.com/protocolbuffers/protobuf/issues/26385
|
|
564
|
+
// To work around this, we retain all linker array elements, even though this
|
|
565
|
+
// effectively disables tree-shaking of unused extensions when using GCC.
|
|
566
|
+
#define UPB_LINKARR_ATTR UPB_RETAIN
|
|
567
|
+
#else
|
|
561
568
|
#define UPB_LINKARR_ATTR
|
|
569
|
+
#endif
|
|
562
570
|
|
|
563
571
|
#define UPB_LINKARR_SENTINEL UPB_RETAIN __attribute__((weak, used))
|
|
564
572
|
|
|
@@ -1962,7 +1970,7 @@ const upb_MiniTable google__protobuf__GeneratedCodeInfo__Annotation_msg_init = {
|
|
|
1962
1970
|
|
|
1963
1971
|
const upb_MiniTableEnum google__protobuf__Edition_enum_init = {
|
|
1964
1972
|
64,
|
|
1965
|
-
|
|
1973
|
+
11,
|
|
1966
1974
|
{
|
|
1967
1975
|
0x7,
|
|
1968
1976
|
0x0,
|
|
@@ -1971,6 +1979,7 @@ const upb_MiniTableEnum google__protobuf__Edition_enum_init = {
|
|
|
1971
1979
|
0x3e7,
|
|
1972
1980
|
0x3e8,
|
|
1973
1981
|
0x3e9,
|
|
1982
|
+
0x3ea,
|
|
1974
1983
|
0x270f,
|
|
1975
1984
|
0x1869d,
|
|
1976
1985
|
0x1869e,
|
|
@@ -1992,7 +2001,7 @@ const upb_MiniTableEnum google__protobuf__FeatureSet__EnforceNamingStyle_enum_in
|
|
|
1992
2001
|
64,
|
|
1993
2002
|
0,
|
|
1994
2003
|
{
|
|
1995
|
-
|
|
2004
|
+
0xf,
|
|
1996
2005
|
0x0,
|
|
1997
2006
|
},
|
|
1998
2007
|
};
|
|
@@ -2338,10 +2347,15 @@ static int log2ceil(uint64_t v) {
|
|
|
2338
2347
|
return UPB_MIN(UPB_MAXARRSIZE, ret);
|
|
2339
2348
|
}
|
|
2340
2349
|
|
|
2341
|
-
/* A type to represent the lookup key of either a strtable
|
|
2350
|
+
/* A type to represent the lookup key of either a strtable, inttable or
|
|
2351
|
+
* exttable. */
|
|
2342
2352
|
typedef union {
|
|
2343
2353
|
uintptr_t num;
|
|
2344
2354
|
upb_StringView str;
|
|
2355
|
+
struct {
|
|
2356
|
+
const void* ptr;
|
|
2357
|
+
uint32_t ext_num;
|
|
2358
|
+
} ext;
|
|
2345
2359
|
} lookupkey_t;
|
|
2346
2360
|
|
|
2347
2361
|
static lookupkey_t strkey2(const char* str, size_t len) {
|
|
@@ -2350,16 +2364,24 @@ static lookupkey_t strkey2(const char* str, size_t len) {
|
|
|
2350
2364
|
|
|
2351
2365
|
static lookupkey_t intkey(uintptr_t key) { return (lookupkey_t){.num = key}; }
|
|
2352
2366
|
|
|
2353
|
-
|
|
2354
|
-
|
|
2367
|
+
static lookupkey_t extkey(const void* ptr, uint32_t ext_num) {
|
|
2368
|
+
return (lookupkey_t){.ext = {ptr, ext_num}};
|
|
2369
|
+
}
|
|
2370
|
+
|
|
2371
|
+
// Conceptually the hash and equal functions should only take the key, not the
|
|
2372
|
+
// value, but the extension table stores part of its logical key in the value
|
|
2373
|
+
// slot. This is a sign that we have outgrown the original architecture.
|
|
2374
|
+
typedef uint32_t hashfunc_t(upb_key key, upb_value val);
|
|
2375
|
+
typedef bool eqlfunc_t(upb_key k1, upb_value v1, lookupkey_t k2);
|
|
2355
2376
|
|
|
2356
2377
|
/* Base table (shared code) ***************************************************/
|
|
2357
2378
|
|
|
2358
2379
|
static uint32_t upb_inthash(uintptr_t key) {
|
|
2380
|
+
UPB_STATIC_ASSERT(sizeof(uintptr_t) == 4 || sizeof(uintptr_t) == 8,
|
|
2381
|
+
"Pointers don't fit");
|
|
2359
2382
|
if (sizeof(uintptr_t) == 8) {
|
|
2360
2383
|
return (uint32_t)key ^ (uint32_t)(key >> 32);
|
|
2361
2384
|
} else {
|
|
2362
|
-
UPB_ASSERT(sizeof(uintptr_t) == 4);
|
|
2363
2385
|
return (uint32_t)key;
|
|
2364
2386
|
}
|
|
2365
2387
|
}
|
|
@@ -2420,7 +2442,7 @@ static const upb_tabent* findentry(const upb_table* t, lookupkey_t key,
|
|
|
2420
2442
|
e = upb_getentry(t, hash);
|
|
2421
2443
|
if (upb_tabent_isempty(e)) return NULL;
|
|
2422
2444
|
while (1) {
|
|
2423
|
-
if (eql(e->key, key)) return e;
|
|
2445
|
+
if (eql(e->key, e->val, key)) return e;
|
|
2424
2446
|
if ((e = e->next) == NULL) return NULL;
|
|
2425
2447
|
}
|
|
2426
2448
|
}
|
|
@@ -2460,7 +2482,8 @@ static void insert(upb_table* t, lookupkey_t key, upb_key tabkey, upb_value val,
|
|
|
2460
2482
|
/* Collision. */
|
|
2461
2483
|
upb_tabent* new_e = emptyent(t, mainpos_e);
|
|
2462
2484
|
/* Head of collider's chain. */
|
|
2463
|
-
upb_tabent* chain =
|
|
2485
|
+
upb_tabent* chain =
|
|
2486
|
+
getentry_mutable(t, hashfunc(mainpos_e->key, mainpos_e->val));
|
|
2464
2487
|
if (chain == mainpos_e) {
|
|
2465
2488
|
/* Existing ent is in its main position (it has the same hash as us, and
|
|
2466
2489
|
* is the head of our chain). Insert to new ent and append to this chain.
|
|
@@ -2491,7 +2514,7 @@ static bool rm(upb_table* t, lookupkey_t key, upb_value* val, uint32_t hash,
|
|
|
2491
2514
|
eqlfunc_t* eql) {
|
|
2492
2515
|
upb_tabent* chain = getentry_mutable(t, hash);
|
|
2493
2516
|
if (upb_tabent_isempty(chain)) return false;
|
|
2494
|
-
if (eql(chain->key, key)) {
|
|
2517
|
+
if (eql(chain->key, chain->val, key)) {
|
|
2495
2518
|
/* Element to remove is at the head of its chain. */
|
|
2496
2519
|
t->count--;
|
|
2497
2520
|
if (val) *val = chain->val;
|
|
@@ -2506,7 +2529,7 @@ static bool rm(upb_table* t, lookupkey_t key, upb_value* val, uint32_t hash,
|
|
|
2506
2529
|
} else {
|
|
2507
2530
|
/* Element to remove is either in a non-head position or not in the
|
|
2508
2531
|
* table. */
|
|
2509
|
-
while (chain->next && !eql(chain->next->key, key)) {
|
|
2532
|
+
while (chain->next && !eql(chain->next->key, chain->next->val, key)) {
|
|
2510
2533
|
chain = (upb_tabent*)chain->next;
|
|
2511
2534
|
}
|
|
2512
2535
|
if (chain->next) {
|
|
@@ -2705,11 +2728,13 @@ static uint32_t _upb_Hash_NoSeed(const char* p, size_t n) {
|
|
|
2705
2728
|
return _upb_Hash(p, n, _upb_Seed());
|
|
2706
2729
|
}
|
|
2707
2730
|
|
|
2708
|
-
static uint32_t strhash(upb_key key) {
|
|
2731
|
+
static uint32_t strhash(upb_key key, upb_value val) {
|
|
2732
|
+
UPB_UNUSED(val);
|
|
2709
2733
|
return _upb_Hash_NoSeed(key.str->data, key.str->size);
|
|
2710
2734
|
}
|
|
2711
2735
|
|
|
2712
|
-
static bool streql(upb_key k1, lookupkey_t k2) {
|
|
2736
|
+
static bool streql(upb_key k1, upb_value v1, lookupkey_t k2) {
|
|
2737
|
+
UPB_UNUSED(v1);
|
|
2713
2738
|
const upb_SizePrefixString* k1s = k1.str;
|
|
2714
2739
|
const upb_StringView k2s = k2.str;
|
|
2715
2740
|
return k1s->size == k2s.size &&
|
|
@@ -2873,6 +2898,98 @@ void upb_strtable_setentryvalue(upb_strtable* t, intptr_t iter, upb_value v) {
|
|
|
2873
2898
|
t->t.entries[iter].val = v;
|
|
2874
2899
|
}
|
|
2875
2900
|
|
|
2901
|
+
/* upb_exttable ***************************************************************/
|
|
2902
|
+
|
|
2903
|
+
static uint32_t _upb_exttable_hash(const void* ptr, uint32_t ext_num) {
|
|
2904
|
+
uint64_t a = (uintptr_t)ptr;
|
|
2905
|
+
uint64_t b = ext_num;
|
|
2906
|
+
return (uint32_t)WyhashMix(a ^ kWyhashSalt[1], b ^ _upb_Seed());
|
|
2907
|
+
}
|
|
2908
|
+
|
|
2909
|
+
static uint32_t exthash(upb_key key, upb_value val) {
|
|
2910
|
+
const void* ptr = (const void*)key.num;
|
|
2911
|
+
uint32_t ext_num = *(const uint32_t*)upb_value_getconstptr(val);
|
|
2912
|
+
return _upb_exttable_hash(ptr, ext_num);
|
|
2913
|
+
}
|
|
2914
|
+
|
|
2915
|
+
static bool exteql(upb_key k1, upb_value v1, lookupkey_t k2) {
|
|
2916
|
+
if ((const void*)k1.num == k2.ext.ptr) {
|
|
2917
|
+
uint32_t ext_num1 = *(const uint32_t*)upb_value_getconstptr(v1);
|
|
2918
|
+
return ext_num1 == k2.ext.ext_num;
|
|
2919
|
+
}
|
|
2920
|
+
return false;
|
|
2921
|
+
}
|
|
2922
|
+
|
|
2923
|
+
bool upb_exttable_init(upb_exttable* t, size_t expected_size, upb_Arena* a) {
|
|
2924
|
+
int size_lg2 = upb_Log2Ceiling(_upb_entries_needed_for(expected_size));
|
|
2925
|
+
return init(&t->t, size_lg2, a);
|
|
2926
|
+
}
|
|
2927
|
+
|
|
2928
|
+
void upb_exttable_clear(upb_exttable* t) {
|
|
2929
|
+
size_t bytes = upb_table_size(&t->t) * sizeof(upb_tabent);
|
|
2930
|
+
t->t.count = 0;
|
|
2931
|
+
memset((char*)t->t.entries, 0, bytes);
|
|
2932
|
+
}
|
|
2933
|
+
|
|
2934
|
+
bool upb_exttable_resize(upb_exttable* t, size_t size_lg2, upb_Arena* a) {
|
|
2935
|
+
upb_exttable new_table;
|
|
2936
|
+
if (!init(&new_table.t, size_lg2, a)) return false;
|
|
2937
|
+
|
|
2938
|
+
size_t i;
|
|
2939
|
+
for (i = begin(&t->t); i < upb_table_size(&t->t); i = next(&t->t, i)) {
|
|
2940
|
+
const upb_tabent* e = &t->t.entries[i];
|
|
2941
|
+
uint32_t hash = exthash(e->key, e->val);
|
|
2942
|
+
uint32_t ext_num = *(const uint32_t*)upb_value_getconstptr(e->val);
|
|
2943
|
+
lookupkey_t lookupkey = extkey((const void*)e->key.num, ext_num);
|
|
2944
|
+
insert(&new_table.t, lookupkey, e->key, e->val, hash, &exthash, &exteql);
|
|
2945
|
+
}
|
|
2946
|
+
|
|
2947
|
+
*t = new_table;
|
|
2948
|
+
return true;
|
|
2949
|
+
}
|
|
2950
|
+
|
|
2951
|
+
bool upb_exttable_insert(upb_exttable* t, const void* k, const uint32_t* v,
|
|
2952
|
+
upb_Arena* a) {
|
|
2953
|
+
UPB_ASSERT(k != NULL);
|
|
2954
|
+
UPB_ASSERT(v != NULL);
|
|
2955
|
+
UPB_ASSERT(*v != 0);
|
|
2956
|
+
|
|
2957
|
+
if (isfull(&t->t)) {
|
|
2958
|
+
if (!upb_exttable_resize(t, _upb_log2_table_size(&t->t) + 1, a)) {
|
|
2959
|
+
return false;
|
|
2960
|
+
}
|
|
2961
|
+
}
|
|
2962
|
+
|
|
2963
|
+
lookupkey_t lookupkey = extkey(k, *v);
|
|
2964
|
+
upb_key key = {.num = (uintptr_t)k};
|
|
2965
|
+
upb_value val = upb_value_constptr(v);
|
|
2966
|
+
uint32_t hash = _upb_exttable_hash(k, *v);
|
|
2967
|
+
insert(&t->t, lookupkey, key, val, hash, &exthash, &exteql);
|
|
2968
|
+
return true;
|
|
2969
|
+
}
|
|
2970
|
+
|
|
2971
|
+
const uint32_t* upb_exttable_lookup(const upb_exttable* t, const void* k,
|
|
2972
|
+
uint32_t ext_number) {
|
|
2973
|
+
uint32_t hash = _upb_exttable_hash(k, ext_number);
|
|
2974
|
+
upb_value val;
|
|
2975
|
+
if (lookup(&t->t, extkey(k, ext_number), &val, hash, &exteql)) {
|
|
2976
|
+
return (const uint32_t*)upb_value_getconstptr(val);
|
|
2977
|
+
}
|
|
2978
|
+
return NULL;
|
|
2979
|
+
}
|
|
2980
|
+
|
|
2981
|
+
const uint32_t* upb_exttable_remove(upb_exttable* t, const void* k,
|
|
2982
|
+
uint32_t ext_number) {
|
|
2983
|
+
uint32_t hash = _upb_exttable_hash(k, ext_number);
|
|
2984
|
+
upb_value val;
|
|
2985
|
+
if (rm(&t->t, extkey(k, ext_number), &val, hash, &exteql)) {
|
|
2986
|
+
return (const uint32_t*)upb_value_getconstptr(val);
|
|
2987
|
+
}
|
|
2988
|
+
return NULL;
|
|
2989
|
+
}
|
|
2990
|
+
|
|
2991
|
+
size_t upb_exttable_size(const upb_exttable* t) { return t->t.count; }
|
|
2992
|
+
|
|
2876
2993
|
/* upb_inttable ***************************************************************/
|
|
2877
2994
|
|
|
2878
2995
|
/* For inttables we use a hybrid structure where small keys are kept in an
|
|
@@ -2887,9 +3004,15 @@ static uint32_t presence_mask_arr_size(uint32_t array_size) {
|
|
|
2887
3004
|
return (array_size + 7) / 8; // sizeof(uint8_t) is always 1.
|
|
2888
3005
|
}
|
|
2889
3006
|
|
|
2890
|
-
static uint32_t inthash(upb_key key
|
|
3007
|
+
static uint32_t inthash(upb_key key, upb_value val) {
|
|
3008
|
+
UPB_UNUSED(val);
|
|
3009
|
+
return upb_inthash(key.num);
|
|
3010
|
+
}
|
|
2891
3011
|
|
|
2892
|
-
static bool inteql(upb_key k1, lookupkey_t k2) {
|
|
3012
|
+
static bool inteql(upb_key k1, upb_value v1, lookupkey_t k2) {
|
|
3013
|
+
UPB_UNUSED(v1);
|
|
3014
|
+
return k1.num == k2.num;
|
|
3015
|
+
}
|
|
2893
3016
|
|
|
2894
3017
|
static upb_value* mutable_array(upb_inttable* t) {
|
|
2895
3018
|
return (upb_value*)t->array;
|
|
@@ -2995,8 +3118,8 @@ bool upb_inttable_insert(upb_inttable* t, uintptr_t key, upb_value val,
|
|
|
2995
3118
|
|
|
2996
3119
|
for (i = begin(&t->t); i < upb_table_size(&t->t); i = next(&t->t, i)) {
|
|
2997
3120
|
const upb_tabent* e = &t->t.entries[i];
|
|
2998
|
-
insert(&new_table, intkey(e->key.num), e->key, e->val,
|
|
2999
|
-
&inthash, &inteql);
|
|
3121
|
+
insert(&new_table, intkey(e->key.num), e->key, e->val,
|
|
3122
|
+
inthash(e->key, e->val), &inthash, &inteql);
|
|
3000
3123
|
}
|
|
3001
3124
|
|
|
3002
3125
|
UPB_ASSERT(t->t.count == new_table.count);
|
|
@@ -5655,24 +5778,33 @@ static int GetLocaleRadix(char *data, size_t capacity) {
|
|
|
5655
5778
|
const int size = snprintf(temp, sizeof(temp), "%.1f", 1.5);
|
|
5656
5779
|
UPB_ASSERT(temp[0] == '1');
|
|
5657
5780
|
UPB_ASSERT(temp[size - 1] == '5');
|
|
5658
|
-
|
|
5781
|
+
if (size < capacity) {
|
|
5782
|
+
return 0;
|
|
5783
|
+
}
|
|
5659
5784
|
temp[size - 1] = '\0';
|
|
5660
|
-
|
|
5785
|
+
strncpy(data, temp + 1, size);
|
|
5661
5786
|
return size - 2;
|
|
5662
5787
|
}
|
|
5663
5788
|
|
|
5664
5789
|
// Populates a string identical to *input except that the character pointed to
|
|
5665
5790
|
// by pos (which should be '.') is replaced with the locale-specific radix.
|
|
5666
5791
|
|
|
5667
|
-
static void LocalizeRadix(const char *input, const char *pos, char *output
|
|
5792
|
+
static void LocalizeRadix(const char *input, const char *pos, char *output,
|
|
5793
|
+
int output_size) {
|
|
5668
5794
|
const int len1 = pos - input;
|
|
5669
5795
|
|
|
5670
5796
|
char radix[8];
|
|
5671
5797
|
const int len2 = GetLocaleRadix(radix, sizeof(radix));
|
|
5672
5798
|
|
|
5799
|
+
const int n = output_size - len1 - len2 - 1;
|
|
5800
|
+
if (n < 0) {
|
|
5801
|
+
return;
|
|
5802
|
+
}
|
|
5803
|
+
|
|
5673
5804
|
memcpy(output, input, len1);
|
|
5674
5805
|
memcpy(output + len1, radix, len2);
|
|
5675
|
-
|
|
5806
|
+
strncpy(output + len1 + len2, input + len1 + 1, n);
|
|
5807
|
+
output[output_size - 1] = '\0';
|
|
5676
5808
|
}
|
|
5677
5809
|
|
|
5678
5810
|
double _upb_NoLocaleStrtod(const char *str, char **endptr) {
|
|
@@ -5692,7 +5824,7 @@ double _upb_NoLocaleStrtod(const char *str, char **endptr) {
|
|
|
5692
5824
|
// try again.
|
|
5693
5825
|
|
|
5694
5826
|
char localized[80];
|
|
5695
|
-
LocalizeRadix(str, temp_endptr, localized);
|
|
5827
|
+
LocalizeRadix(str, temp_endptr, localized, sizeof localized);
|
|
5696
5828
|
char *localized_endptr;
|
|
5697
5829
|
result = strtod(localized, &localized_endptr);
|
|
5698
5830
|
if ((localized_endptr - &localized[0]) > (temp_endptr - str)) {
|
|
@@ -5774,6 +5906,7 @@ upb_alloc upb_alloc_global = {&upb_global_allocfunc};
|
|
|
5774
5906
|
static UPB_ATOMIC(size_t) g_max_block_size = UPB_DEFAULT_MAX_BLOCK_SIZE;
|
|
5775
5907
|
|
|
5776
5908
|
void upb_Arena_SetMaxBlockSize(size_t max) {
|
|
5909
|
+
UPB_ASSERT(max <= UINT32_MAX);
|
|
5777
5910
|
upb_Atomic_Store(&g_max_block_size, max, memory_order_relaxed);
|
|
5778
5911
|
}
|
|
5779
5912
|
|
|
@@ -5810,8 +5943,14 @@ typedef struct upb_ArenaInternal {
|
|
|
5810
5943
|
UPB_ATOMIC(const upb_ArenaRef*) refs;
|
|
5811
5944
|
#endif
|
|
5812
5945
|
|
|
5813
|
-
//
|
|
5814
|
-
|
|
5946
|
+
// Size of the last block we allocated in the normal exponential scheme.
|
|
5947
|
+
uint32_t last_block_size;
|
|
5948
|
+
|
|
5949
|
+
// A hint that grows whenever we perform a "one-off" allocation into a a
|
|
5950
|
+
// dedicated block. This helps us determine if these outlier blocks are
|
|
5951
|
+
// actually common enough that we should switch back to the normal exponential
|
|
5952
|
+
// scheme at the larger size.
|
|
5953
|
+
uint32_t size_hint;
|
|
5815
5954
|
|
|
5816
5955
|
// All non atomic members used during allocation must be above this point, and
|
|
5817
5956
|
// are used by _SwapIn/_SwapOut
|
|
@@ -6107,23 +6246,60 @@ bool upb_Arena_HasRefChain(const upb_Arena* from, const upb_Arena* to) {
|
|
|
6107
6246
|
|
|
6108
6247
|
#endif
|
|
6109
6248
|
|
|
6110
|
-
|
|
6111
|
-
|
|
6112
|
-
|
|
6249
|
+
static upb_MemBlock* _upb_Arena_AllocBlockInternal(upb_alloc* alloc,
|
|
6250
|
+
size_t size) {
|
|
6251
|
+
UPB_ASSERT(size >= kUpb_MemblockReserve);
|
|
6252
|
+
upb_SizedPtr alloc_result = upb_SizeReturningMalloc(alloc, size);
|
|
6253
|
+
if (!alloc_result.p) return NULL;
|
|
6254
|
+
upb_MemBlock* block = alloc_result.p;
|
|
6255
|
+
block->size = alloc_result.n;
|
|
6256
|
+
return block;
|
|
6257
|
+
}
|
|
6258
|
+
|
|
6259
|
+
static upb_MemBlock* _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
|
|
6260
|
+
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6261
|
+
return _upb_Arena_AllocBlockInternal(_upb_ArenaInternal_BlockAlloc(ai), size);
|
|
6262
|
+
}
|
|
6263
|
+
|
|
6264
|
+
static void _upb_Arena_AddBlock(upb_Arena* a, upb_MemBlock* block) {
|
|
6113
6265
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6114
|
-
upb_MemBlock* block = ptr;
|
|
6115
6266
|
|
|
6116
|
-
|
|
6117
|
-
|
|
6118
|
-
|
|
6119
|
-
|
|
6120
|
-
|
|
6267
|
+
// Atomic add not required here, as threads won't race allocating blocks, plus
|
|
6268
|
+
// atomic fetch-add is slower than load/add/store on arm devices compiled
|
|
6269
|
+
// targeting pre-v8.1. Relaxed order is safe as nothing depends on order of
|
|
6270
|
+
// size allocated.
|
|
6271
|
+
uintptr_t old_space_allocated =
|
|
6272
|
+
upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
|
|
6273
|
+
upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block->size,
|
|
6274
|
+
memory_order_relaxed);
|
|
6275
|
+
|
|
6276
|
+
block->next = ai->blocks;
|
|
6121
6277
|
ai->blocks = block;
|
|
6122
|
-
|
|
6278
|
+
}
|
|
6279
|
+
|
|
6280
|
+
static void _upb_Arena_UseBlockInternal(upb_Arena* a, upb_MemBlock* block,
|
|
6281
|
+
size_t offset) {
|
|
6282
|
+
size_t block_size = block->size;
|
|
6283
|
+
char* start = UPB_PTR_AT(block, kUpb_MemblockReserve + offset, char);
|
|
6123
6284
|
a->UPB_PRIVATE(ptr) = start;
|
|
6124
6285
|
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, block_size, char);
|
|
6125
6286
|
UPB_PRIVATE(upb_Xsan_PoisonRegion)(start, a->UPB_PRIVATE(end) - start);
|
|
6126
|
-
|
|
6287
|
+
UPB_PRIVATE(upb_Xsan_Init)(UPB_XSAN(a));
|
|
6288
|
+
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >=
|
|
6289
|
+
block_size - kUpb_MemblockReserve - offset);
|
|
6290
|
+
}
|
|
6291
|
+
|
|
6292
|
+
static void _upb_Arena_UseBlock(upb_Arena* a, upb_MemBlock* block) {
|
|
6293
|
+
_upb_Arena_UseBlockInternal(a, block, 0);
|
|
6294
|
+
}
|
|
6295
|
+
|
|
6296
|
+
static bool _upb_Arena_WouldReduceFreeSpace(upb_Arena* a, size_t size,
|
|
6297
|
+
size_t block_size) {
|
|
6298
|
+
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6299
|
+
size_t current_free =
|
|
6300
|
+
ai->blocks ? a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr) : 0;
|
|
6301
|
+
size_t future_free = block_size - kUpb_MemblockReserve - size;
|
|
6302
|
+
return current_free >= future_free;
|
|
6127
6303
|
}
|
|
6128
6304
|
|
|
6129
6305
|
// Fulfills the allocation request by allocating a new block. Returns NULL on
|
|
@@ -6131,84 +6307,60 @@ static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t offset,
|
|
|
6131
6307
|
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
|
|
6132
6308
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6133
6309
|
if (!ai->block_alloc) return NULL;
|
|
6134
|
-
|
|
6135
|
-
|
|
6136
|
-
|
|
6137
|
-
|
|
6138
|
-
|
|
6139
|
-
|
|
6140
|
-
|
|
6310
|
+
|
|
6311
|
+
// Whether to satisfy the allocation from a one-off block which is right-sized
|
|
6312
|
+
// for the current allocation. We do this if we suspect that the current
|
|
6313
|
+
// allocation is an outlier that does not represent the typical size of
|
|
6314
|
+
// allocations from this arena, or if we would reduce free space by
|
|
6315
|
+
// using exponential growth.
|
|
6316
|
+
bool one_off = false;
|
|
6141
6317
|
|
|
6142
6318
|
// Relaxed order is safe here as we don't need any ordering with the setter.
|
|
6143
6319
|
size_t max_block_size =
|
|
6144
6320
|
upb_Atomic_Load(&g_max_block_size, memory_order_relaxed);
|
|
6145
|
-
|
|
6146
|
-
|
|
6147
|
-
|
|
6148
|
-
|
|
6149
|
-
|
|
6150
|
-
|
|
6151
|
-
|
|
6152
|
-
|
|
6153
|
-
|
|
6154
|
-
target_size < max_block_size) {
|
|
6155
|
-
last_size = ai->size_hint;
|
|
6156
|
-
// Recalculate sizes with possibly larger last_size
|
|
6157
|
-
target_size = UPB_MIN(last_size * 2, max_block_size);
|
|
6158
|
-
future_free = UPB_MAX(size, target_size - kUpb_MemblockReserve) - size;
|
|
6159
|
-
}
|
|
6160
|
-
bool insert_after_head = false;
|
|
6161
|
-
// Only insert after head if an allocated block is present; we don't want to
|
|
6162
|
-
// continue allocating out of the initial block because we'll have no way of
|
|
6163
|
-
// restoring the size of our allocated block if we add another.
|
|
6164
|
-
if (last_block && current_free >= future_free) {
|
|
6165
|
-
// If we're still going to net reduce free space with this new block, then
|
|
6166
|
-
// only allocate the precise size requested and keep the current last block
|
|
6167
|
-
// as the active block for future allocations.
|
|
6168
|
-
insert_after_head = true;
|
|
6169
|
-
target_size = size + kUpb_MemblockReserve;
|
|
6170
|
-
// Add something to our previous size each time, so that eventually we
|
|
6171
|
-
// will reach the max block size. Allocations larger than the max block size
|
|
6172
|
-
// will always get their own backing allocation, so don't include them.
|
|
6173
|
-
if (target_size <= max_block_size) {
|
|
6174
|
-
ai->size_hint = UPB_MIN(ai->size_hint + (size >> 1), max_block_size >> 1);
|
|
6321
|
+
size_t block_size = UPB_MIN(ai->last_block_size * 2, max_block_size);
|
|
6322
|
+
|
|
6323
|
+
if (size + kUpb_MemblockReserve > block_size) {
|
|
6324
|
+
// A regular doubling would not yield a large enough block. Does size_hint
|
|
6325
|
+
// indicate that we have consistently needed large blocks?
|
|
6326
|
+
block_size = UPB_MIN(ai->size_hint * 2, max_block_size);
|
|
6327
|
+
if (size + kUpb_MemblockReserve > block_size) {
|
|
6328
|
+
// Even size_hint is not large enough, we will have to do a one-off.
|
|
6329
|
+
one_off = true;
|
|
6175
6330
|
}
|
|
6176
6331
|
}
|
|
6177
|
-
// We may need to exceed the max block size if the user requested a large
|
|
6178
|
-
// allocation.
|
|
6179
|
-
size_t block_size = UPB_MAX(kUpb_MemblockReserve + size, target_size);
|
|
6180
|
-
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
|
|
6181
|
-
upb_SizedPtr alloc_result = upb_SizeReturningMalloc(block_alloc, block_size);
|
|
6182
6332
|
|
|
6183
|
-
|
|
6333
|
+
// If switching to a block of this size would *reduce* available free space,
|
|
6334
|
+
// we might as well make a one-off block instead.
|
|
6335
|
+
one_off = one_off || _upb_Arena_WouldReduceFreeSpace(a, size, block_size);
|
|
6184
6336
|
|
|
6185
|
-
|
|
6186
|
-
|
|
6337
|
+
if (one_off) {
|
|
6338
|
+
// Note: this may exceed the max block size, but that's okay.
|
|
6339
|
+
block_size = size + kUpb_MemblockReserve;
|
|
6340
|
+
}
|
|
6187
6341
|
|
|
6188
|
-
|
|
6189
|
-
|
|
6190
|
-
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
|
|
6191
|
-
// size allocated.
|
|
6342
|
+
upb_MemBlock* block = _upb_Arena_AllocBlock(a, block_size);
|
|
6343
|
+
if (!block) return NULL;
|
|
6192
6344
|
|
|
6193
|
-
|
|
6194
|
-
upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
|
|
6195
|
-
upb_Atomic_Store(&ai->space_allocated,
|
|
6196
|
-
old_space_allocated + actual_block_size,
|
|
6197
|
-
memory_order_relaxed);
|
|
6198
|
-
if (UPB_UNLIKELY(insert_after_head)) {
|
|
6199
|
-
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6200
|
-
block->size = actual_block_size;
|
|
6201
|
-
upb_MemBlock* head = ai->blocks;
|
|
6202
|
-
block->next = head->next;
|
|
6203
|
-
head->next = block;
|
|
6345
|
+
_upb_Arena_AddBlock(a, block);
|
|
6204
6346
|
|
|
6347
|
+
// Recheck size, in case the allocator gave us a much larger block than we
|
|
6348
|
+
// requested and we want to make it the new allocating region.
|
|
6349
|
+
if (UPB_UNLIKELY(one_off) &&
|
|
6350
|
+
_upb_Arena_WouldReduceFreeSpace(a, size, block->size)) {
|
|
6351
|
+
// Increase size_hint, so that a series of one-off allocations will
|
|
6352
|
+
// eventually convince us to switch to exponential growth at the larger
|
|
6353
|
+
// size.
|
|
6354
|
+
ai->size_hint = UPB_MIN(ai->size_hint + (size >> 1), max_block_size >> 1);
|
|
6205
6355
|
char* allocated = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
|
|
6206
|
-
|
|
6207
|
-
|
|
6356
|
+
char* poison_start = allocated + size - UPB_PRIVATE(kUpb_Asan_GuardSize);
|
|
6357
|
+
UPB_PRIVATE(upb_Xsan_PoisonRegion)(
|
|
6358
|
+
poison_start, UPB_PTR_AT(block, block->size, char) - poison_start);
|
|
6208
6359
|
return allocated;
|
|
6209
6360
|
} else {
|
|
6210
|
-
ai->
|
|
6211
|
-
|
|
6361
|
+
ai->last_block_size = UPB_MIN(block->size, UINT32_MAX);
|
|
6362
|
+
ai->size_hint = ai->last_block_size;
|
|
6363
|
+
_upb_Arena_UseBlock(a, block);
|
|
6212
6364
|
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size);
|
|
6213
6365
|
return upb_Arena_Malloc(a, size - UPB_PRIVATE(kUpb_Asan_GuardSize));
|
|
6214
6366
|
}
|
|
@@ -6219,27 +6371,26 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc, size_t first_size) {
|
|
|
6219
6371
|
UPB_ALIGN_MALLOC(kUpb_MemblockReserve + sizeof(upb_ArenaState));
|
|
6220
6372
|
upb_ArenaState* a;
|
|
6221
6373
|
|
|
6222
|
-
|
|
6374
|
+
if (!alloc) return NULL;
|
|
6223
6375
|
|
|
6376
|
+
// We need to malloc the initial block.
|
|
6224
6377
|
size_t block_size =
|
|
6225
6378
|
first_block_overhead + UPB_MAX(256, UPB_ALIGN_MALLOC(first_size) +
|
|
6226
6379
|
UPB_PRIVATE(kUpb_Asan_GuardSize));
|
|
6227
|
-
|
|
6228
|
-
if (!
|
|
6229
|
-
!(alloc_result = upb_SizeReturningMalloc(alloc, block_size)).p) {
|
|
6230
|
-
return NULL;
|
|
6231
|
-
}
|
|
6232
|
-
char* mem = alloc_result.p;
|
|
6233
|
-
size_t actual_block_size = alloc_result.n;
|
|
6380
|
+
upb_MemBlock* block = _upb_Arena_AllocBlockInternal(alloc, block_size);
|
|
6381
|
+
if (!block) return NULL;
|
|
6234
6382
|
|
|
6235
|
-
|
|
6383
|
+
// Initialize the arena state in the first block. We "borrow" the memory from
|
|
6384
|
+
// the block, because we can't yet call upb_Arena_Malloc.
|
|
6385
|
+
a = UPB_PTR_AT(block, kUpb_MemblockReserve, upb_ArenaState);
|
|
6236
6386
|
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 0);
|
|
6237
|
-
a->body.
|
|
6387
|
+
a->body.last_block_size = UPB_MIN(block->size, UINT32_MAX);
|
|
6388
|
+
a->body.size_hint = UPB_MIN(block->size, UINT32_MAX);
|
|
6238
6389
|
upb_Atomic_Init(&a->body.parent_or_count, _upb_Arena_TaggedFromRefcount(1));
|
|
6239
6390
|
upb_Atomic_Init(&a->body.next, NULL);
|
|
6240
6391
|
upb_Atomic_Init(&a->body.previous_or_tail,
|
|
6241
6392
|
_upb_Arena_TaggedFromTail(&a->body));
|
|
6242
|
-
upb_Atomic_Init(&a->body.space_allocated,
|
|
6393
|
+
upb_Atomic_Init(&a->body.space_allocated, 0);
|
|
6243
6394
|
a->body.blocks = NULL;
|
|
6244
6395
|
#ifndef NDEBUG
|
|
6245
6396
|
a->body.refs = NULL;
|
|
@@ -6247,15 +6398,16 @@ static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc, size_t first_size) {
|
|
|
6247
6398
|
a->body.upb_alloc_cleanup = NULL;
|
|
6248
6399
|
UPB_PRIVATE(upb_Xsan_Init)(UPB_XSAN(&a->body));
|
|
6249
6400
|
|
|
6250
|
-
_upb_Arena_AddBlock(&a->head,
|
|
6401
|
+
_upb_Arena_AddBlock(&a->head, block);
|
|
6402
|
+
_upb_Arena_UseBlockInternal(&a->head, block,
|
|
6403
|
+
UPB_ALIGN_MALLOC(sizeof(upb_ArenaState)));
|
|
6251
6404
|
|
|
6252
6405
|
return &a->head;
|
|
6253
6406
|
}
|
|
6254
6407
|
|
|
6255
6408
|
upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
|
|
6256
|
-
UPB_STATIC_ASSERT(
|
|
6257
|
-
|
|
6258
|
-
"Need to update UPB_ARENA_SIZE_HACK");
|
|
6409
|
+
UPB_STATIC_ASSERT(UPB_ARENA_SIZE_HACK >= sizeof(upb_ArenaState),
|
|
6410
|
+
"Need to update UPB_ARENA_SIZE_HACK");
|
|
6259
6411
|
upb_ArenaState* a;
|
|
6260
6412
|
|
|
6261
6413
|
if (mem) {
|
|
@@ -6285,6 +6437,7 @@ upb_Arena* upb_Arena_Init(void* mem, size_t n, upb_alloc* alloc) {
|
|
|
6285
6437
|
a->body.refs = NULL;
|
|
6286
6438
|
#endif
|
|
6287
6439
|
a->body.size_hint = 128;
|
|
6440
|
+
a->body.last_block_size = 128;
|
|
6288
6441
|
a->body.upb_alloc_cleanup = NULL;
|
|
6289
6442
|
a->body.block_alloc = _upb_Arena_MakeBlockAlloc(alloc, 1);
|
|
6290
6443
|
a->head.UPB_PRIVATE(ptr) = (void*)UPB_ALIGN_MALLOC((uintptr_t)(a + 1));
|
|
@@ -6731,14 +6884,11 @@ void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) {
|
|
|
6731
6884
|
bool _upb_Arena_WasLastAlloc(struct upb_Arena* a, void* ptr, size_t oldsize) {
|
|
6732
6885
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
|
6733
6886
|
upb_MemBlock* block = ai->blocks;
|
|
6734
|
-
if (block == NULL) return false;
|
|
6735
6887
|
// Skip any arena refs.
|
|
6736
6888
|
while (block != NULL && block->size == 0) {
|
|
6737
6889
|
block = block->next;
|
|
6738
6890
|
}
|
|
6739
6891
|
if (block == NULL) return false;
|
|
6740
|
-
block = block->next;
|
|
6741
|
-
if (block == NULL) return false;
|
|
6742
6892
|
char* start = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
|
|
6743
6893
|
return UPB_PRIVATE(upb_Xsan_PtrEq)(ptr, start) &&
|
|
6744
6894
|
UPB_PRIVATE(_upb_Arena_AllocSpan)(oldsize) ==
|
|
@@ -6820,6 +6970,46 @@ bool upb_Array_Append(upb_Array* arr, upb_MessageValue val, upb_Arena* arena) {
|
|
|
6820
6970
|
return true;
|
|
6821
6971
|
}
|
|
6822
6972
|
|
|
6973
|
+
bool upb_Array_Copy(upb_Array* dst, const upb_Array* src, upb_Arena* arena) {
|
|
6974
|
+
UPB_ASSERT(dst);
|
|
6975
|
+
UPB_ASSERT(src);
|
|
6976
|
+
UPB_ASSERT(!upb_Array_IsFrozen(dst));
|
|
6977
|
+
if (dst == src) return true;
|
|
6978
|
+
size_t len = upb_Array_Size(src);
|
|
6979
|
+
if (!UPB_PRIVATE(_upb_Array_ResizeUninitialized)(dst, len, arena)) {
|
|
6980
|
+
return false;
|
|
6981
|
+
}
|
|
6982
|
+
if (len == 0) return true;
|
|
6983
|
+
const int lg2 = UPB_PRIVATE(_upb_Array_ElemSizeLg2)(dst);
|
|
6984
|
+
const int src_lg2 = UPB_PRIVATE(_upb_Array_ElemSizeLg2)(src);
|
|
6985
|
+
UPB_ASSERT(lg2 == src_lg2);
|
|
6986
|
+
char* dst_data = upb_Array_MutableDataPtr(dst);
|
|
6987
|
+
const char* src_data = upb_Array_DataPtr(src);
|
|
6988
|
+
memcpy(dst_data, src_data, len << lg2);
|
|
6989
|
+
return true;
|
|
6990
|
+
}
|
|
6991
|
+
|
|
6992
|
+
bool upb_Array_AppendAll(upb_Array* dst, const upb_Array* src,
|
|
6993
|
+
upb_Arena* arena) {
|
|
6994
|
+
UPB_ASSERT(!upb_Array_IsFrozen(dst));
|
|
6995
|
+
UPB_ASSERT(src);
|
|
6996
|
+
size_t src_len = upb_Array_Size(src);
|
|
6997
|
+
if (src_len == 0) return true;
|
|
6998
|
+
size_t dst_len = upb_Array_Size(dst);
|
|
6999
|
+
size_t len = dst_len + src_len;
|
|
7000
|
+
if (UPB_UNLIKELY(len < dst_len)) return false;
|
|
7001
|
+
if (!UPB_PRIVATE(_upb_Array_ResizeUninitialized)(dst, len, arena)) {
|
|
7002
|
+
return false;
|
|
7003
|
+
}
|
|
7004
|
+
const int lg2 = UPB_PRIVATE(_upb_Array_ElemSizeLg2)(dst);
|
|
7005
|
+
const int src_lg2 = UPB_PRIVATE(_upb_Array_ElemSizeLg2)(src);
|
|
7006
|
+
UPB_ASSERT(lg2 == src_lg2);
|
|
7007
|
+
char* dst_data = upb_Array_MutableDataPtr(dst);
|
|
7008
|
+
const char* src_data = upb_Array_DataPtr(src);
|
|
7009
|
+
memcpy(dst_data + (dst_len << lg2), src_data, src_len << lg2);
|
|
7010
|
+
return true;
|
|
7011
|
+
}
|
|
7012
|
+
|
|
6823
7013
|
void upb_Array_Move(upb_Array* arr, size_t dst_idx, size_t src_idx,
|
|
6824
7014
|
size_t count) {
|
|
6825
7015
|
UPB_ASSERT(!upb_Array_IsFrozen(arr));
|
|
@@ -6880,9 +7070,17 @@ bool UPB_PRIVATE(_upb_Array_Realloc)(upb_Array* array, size_t min_capacity,
|
|
|
6880
7070
|
void* ptr = upb_Array_MutableDataPtr(array);
|
|
6881
7071
|
|
|
6882
7072
|
// Log2 ceiling of size.
|
|
6883
|
-
while (new_capacity < min_capacity)
|
|
7073
|
+
while (new_capacity < min_capacity) {
|
|
7074
|
+
if (upb_ShlOverflow(&new_capacity, 1)) {
|
|
7075
|
+
new_capacity = SIZE_MAX;
|
|
7076
|
+
break;
|
|
7077
|
+
}
|
|
7078
|
+
}
|
|
6884
7079
|
|
|
6885
|
-
|
|
7080
|
+
size_t new_bytes = new_capacity;
|
|
7081
|
+
if (upb_ShlOverflow(&new_bytes, lg2)) {
|
|
7082
|
+
return false;
|
|
7083
|
+
}
|
|
6886
7084
|
ptr = upb_Arena_Realloc(arena, ptr, old_bytes, new_bytes);
|
|
6887
7085
|
if (!ptr) return false;
|
|
6888
7086
|
|
|
@@ -7340,6 +7538,7 @@ UPB_NOINLINE bool UPB_PRIVATE(_upb_Message_AddUnknownSlowPath)(upb_Message* msg,
|
|
|
7340
7538
|
if (!view) return false;
|
|
7341
7539
|
view->data = data;
|
|
7342
7540
|
} else {
|
|
7541
|
+
if (SIZE_MAX - sizeof(upb_StringView) < len) return false;
|
|
7343
7542
|
view = upb_Arena_Malloc(arena, sizeof(upb_StringView) + len);
|
|
7344
7543
|
if (!view) return false;
|
|
7345
7544
|
char* copy = UPB_PTR_AT(view, sizeof(upb_StringView), char);
|
|
@@ -8116,7 +8315,7 @@ static bool upb_Clone_MessageValue(void* value, upb_CType value_type,
|
|
|
8116
8315
|
case kUpb_CType_String:
|
|
8117
8316
|
case kUpb_CType_Bytes: {
|
|
8118
8317
|
upb_StringView source = *(upb_StringView*)value;
|
|
8119
|
-
|
|
8318
|
+
size_t size = source.size;
|
|
8120
8319
|
void* cloned_data = upb_Arena_Malloc(arena, size);
|
|
8121
8320
|
if (cloned_data == NULL) {
|
|
8122
8321
|
return false;
|
|
@@ -8496,8 +8695,15 @@ bool UPB_PRIVATE(_upb_Message_ReserveSlot)(struct upb_Message* msg,
|
|
|
8496
8695
|
in->capacity = capacity;
|
|
8497
8696
|
UPB_PRIVATE(_upb_Message_SetInternal)(msg, in);
|
|
8498
8697
|
} else if (in->capacity == in->size) {
|
|
8698
|
+
if (in->size == UINT32_MAX) return false;
|
|
8499
8699
|
// Internal data is too small, reallocate.
|
|
8500
|
-
|
|
8700
|
+
size_t needed_pow2 = upb_RoundUpToPowerOfTwo(in->size + 1);
|
|
8701
|
+
if (needed_pow2 > UINT32_MAX) return false;
|
|
8702
|
+
uint32_t new_capacity = needed_pow2;
|
|
8703
|
+
if (UPB_SIZEOF_FLEX_WOULD_OVERFLOW(upb_Message_Internal, aux_data,
|
|
8704
|
+
new_capacity)) {
|
|
8705
|
+
return false;
|
|
8706
|
+
}
|
|
8501
8707
|
in = upb_Arena_Realloc(a, in, _upb_Message_SizeOfInternal(in->capacity),
|
|
8502
8708
|
_upb_Message_SizeOfInternal(new_capacity));
|
|
8503
8709
|
if (!in) return false;
|
|
@@ -8702,7 +8908,6 @@ upb_MiniTableEnum* upb_MiniTableEnum_Build(const char* data, size_t len,
|
|
|
8702
8908
|
|
|
8703
8909
|
|
|
8704
8910
|
#include <inttypes.h>
|
|
8705
|
-
#include <stdalign.h>
|
|
8706
8911
|
#include <stddef.h>
|
|
8707
8912
|
#include <stdint.h>
|
|
8708
8913
|
#include <stdlib.h>
|
|
@@ -10109,28 +10314,25 @@ char* upb_MtDataEncoder_EndEnum(upb_MtDataEncoder* e, char* ptr) {
|
|
|
10109
10314
|
|
|
10110
10315
|
// Must be last.
|
|
10111
10316
|
|
|
10112
|
-
#define EXTREG_KEY_SIZE (sizeof(upb_MiniTable*) + sizeof(uint32_t))
|
|
10113
|
-
|
|
10114
10317
|
struct upb_ExtensionRegistry {
|
|
10318
|
+
upb_exttable exts;
|
|
10115
10319
|
upb_Arena* arena;
|
|
10116
|
-
upb_strtable exts; // Key is upb_MiniTable* concatenated with fieldnum.
|
|
10117
10320
|
};
|
|
10118
10321
|
|
|
10119
|
-
static void extreg_key(char* buf, const upb_MiniTable* l, uint32_t fieldnum) {
|
|
10120
|
-
memcpy(buf, &l, sizeof(l));
|
|
10121
|
-
memcpy(buf + sizeof(l), &fieldnum, sizeof(fieldnum));
|
|
10122
|
-
}
|
|
10123
|
-
|
|
10124
10322
|
upb_ExtensionRegistry* upb_ExtensionRegistry_New(upb_Arena* arena) {
|
|
10125
10323
|
upb_ExtensionRegistry* r = upb_Arena_Malloc(arena, sizeof(*r));
|
|
10126
10324
|
if (!r) return NULL;
|
|
10127
10325
|
r->arena = arena;
|
|
10128
|
-
if (!
|
|
10326
|
+
if (!upb_exttable_init(&r->exts, 8, arena)) return NULL;
|
|
10129
10327
|
return r;
|
|
10130
10328
|
}
|
|
10131
10329
|
|
|
10132
10330
|
UPB_API upb_ExtensionRegistryStatus upb_ExtensionRegistry_Add(
|
|
10133
10331
|
upb_ExtensionRegistry* r, const upb_MiniTableExtension* e) {
|
|
10332
|
+
UPB_STATIC_ASSERT(
|
|
10333
|
+
offsetof(upb_MiniTableExtension,
|
|
10334
|
+
UPB_PRIVATE(field).UPB_PRIVATE(number)) == 0,
|
|
10335
|
+
"Extension must be first-member-of-struct convertable with uint32_t");
|
|
10134
10336
|
uint32_t fieldnum = upb_MiniTableExtension_Number(e);
|
|
10135
10337
|
const upb_MiniTable* extendee = upb_MiniTableExtension_Extendee(e);
|
|
10136
10338
|
|
|
@@ -10140,15 +10342,11 @@ UPB_API upb_ExtensionRegistryStatus upb_ExtensionRegistry_Add(
|
|
|
10140
10342
|
return kUpb_ExtensionRegistryStatus_InvalidExtension;
|
|
10141
10343
|
}
|
|
10142
10344
|
|
|
10143
|
-
|
|
10144
|
-
extreg_key(buf, extendee, fieldnum);
|
|
10145
|
-
|
|
10146
|
-
if (upb_strtable_lookup2(&r->exts, buf, EXTREG_KEY_SIZE, NULL)) {
|
|
10345
|
+
if (upb_exttable_lookup(&r->exts, extendee, fieldnum) != NULL) {
|
|
10147
10346
|
return kUpb_ExtensionRegistryStatus_DuplicateEntry;
|
|
10148
10347
|
}
|
|
10149
10348
|
|
|
10150
|
-
if (!
|
|
10151
|
-
upb_value_constptr(e), r->arena)) {
|
|
10349
|
+
if (!upb_exttable_insert(&r->exts, extendee, (const uint32_t*)e, r->arena)) {
|
|
10152
10350
|
return kUpb_ExtensionRegistryStatus_OutOfMemory;
|
|
10153
10351
|
}
|
|
10154
10352
|
return kUpb_ExtensionRegistryStatus_Ok;
|
|
@@ -10169,10 +10367,8 @@ failure:
|
|
|
10169
10367
|
// Back out the entries previously added.
|
|
10170
10368
|
for (end = e, e = start; e < end; e++) {
|
|
10171
10369
|
const upb_MiniTableExtension* ext = *e;
|
|
10172
|
-
|
|
10173
|
-
|
|
10174
|
-
upb_MiniTableExtension_Number(ext));
|
|
10175
|
-
upb_strtable_remove2(&r->exts, buf, EXTREG_KEY_SIZE, NULL);
|
|
10370
|
+
upb_exttable_remove(&r->exts, upb_MiniTableExtension_Extendee(ext),
|
|
10371
|
+
upb_MiniTableExtension_Number(ext));
|
|
10176
10372
|
}
|
|
10177
10373
|
UPB_ASSERT(status != kUpb_ExtensionRegistryStatus_Ok);
|
|
10178
10374
|
return status;
|
|
@@ -10180,17 +10376,16 @@ failure:
|
|
|
10180
10376
|
|
|
10181
10377
|
const upb_MiniTableExtension* upb_ExtensionRegistry_Lookup(
|
|
10182
10378
|
const upb_ExtensionRegistry* r, const upb_MiniTable* t, uint32_t num) {
|
|
10183
|
-
|
|
10184
|
-
|
|
10185
|
-
|
|
10186
|
-
|
|
10187
|
-
|
|
10188
|
-
|
|
10189
|
-
return NULL;
|
|
10190
|
-
}
|
|
10379
|
+
const uint32_t* v = upb_exttable_lookup(&r->exts, t, num);
|
|
10380
|
+
return (const upb_MiniTableExtension*)v;
|
|
10381
|
+
}
|
|
10382
|
+
|
|
10383
|
+
size_t upb_ExtensionRegistry_Size(const upb_ExtensionRegistry* r) {
|
|
10384
|
+
return upb_exttable_size(&r->exts);
|
|
10191
10385
|
}
|
|
10192
10386
|
|
|
10193
10387
|
|
|
10388
|
+
#include <stddef.h>
|
|
10194
10389
|
#include <stdint.h>
|
|
10195
10390
|
|
|
10196
10391
|
|
|
@@ -10218,21 +10413,17 @@ static bool _upb_GeneratedRegistry_AddAllLinkedExtensions(
|
|
|
10218
10413
|
const UPB_PRIVATE(upb_GeneratedExtensionListEntry)* entry =
|
|
10219
10414
|
UPB_PRIVATE(upb_generated_extension_list);
|
|
10220
10415
|
while (entry != NULL) {
|
|
10221
|
-
|
|
10222
|
-
|
|
10223
|
-
|
|
10224
|
-
uintptr_t end = (uintptr_t)entry->stop;
|
|
10225
|
-
uintptr_t current = begin;
|
|
10226
|
-
while (current < end) {
|
|
10227
|
-
const upb_MiniTableExtension* ext =
|
|
10228
|
-
(const upb_MiniTableExtension*)current;
|
|
10416
|
+
const upb_MiniTableExtension** current = entry->start;
|
|
10417
|
+
for (current = entry->start; current != entry->stop; ++current) {
|
|
10418
|
+
const upb_MiniTableExtension* ext = *current;
|
|
10229
10419
|
// Sentinels and padding introduced by the linker can result in zeroed
|
|
10230
10420
|
// entries, so simply skip them.
|
|
10231
|
-
if (
|
|
10421
|
+
if (*current == NULL) {
|
|
10232
10422
|
// MSVC introduces padding that might not be sized exactly the same as
|
|
10233
|
-
//
|
|
10234
|
-
//
|
|
10235
|
-
|
|
10423
|
+
// the linker array element, but it should be properly aligned, so just
|
|
10424
|
+
// skipping empty elements should be safe. (If the size and align of
|
|
10425
|
+
// the array elements was different, we'd have to do something more
|
|
10426
|
+
// complicated).
|
|
10236
10427
|
continue;
|
|
10237
10428
|
}
|
|
10238
10429
|
|
|
@@ -10240,7 +10431,6 @@ static bool _upb_GeneratedRegistry_AddAllLinkedExtensions(
|
|
|
10240
10431
|
kUpb_ExtensionRegistryStatus_Ok) {
|
|
10241
10432
|
return false;
|
|
10242
10433
|
}
|
|
10243
|
-
current += sizeof(upb_MiniTableExtension);
|
|
10244
10434
|
}
|
|
10245
10435
|
entry = entry->next;
|
|
10246
10436
|
}
|
|
@@ -10440,6 +10630,7 @@ struct upb_DefPool {
|
|
|
10440
10630
|
size_t scratch_size;
|
|
10441
10631
|
size_t bytes_loaded;
|
|
10442
10632
|
bool disable_closed_enum_checking;
|
|
10633
|
+
bool disable_implicit_field_presence;
|
|
10443
10634
|
};
|
|
10444
10635
|
|
|
10445
10636
|
void upb_DefPool_Free(upb_DefPool* s) {
|
|
@@ -10458,6 +10649,7 @@ upb_DefPool* upb_DefPool_New(void) {
|
|
|
10458
10649
|
s->arena = upb_Arena_New();
|
|
10459
10650
|
s->bytes_loaded = 0;
|
|
10460
10651
|
s->disable_closed_enum_checking = false;
|
|
10652
|
+
s->disable_implicit_field_presence = false;
|
|
10461
10653
|
|
|
10462
10654
|
s->scratch_size = 240;
|
|
10463
10655
|
s->scratch_data = upb_gmalloc(s->scratch_size);
|
|
@@ -10499,6 +10691,15 @@ bool upb_DefPool_ClosedEnumCheckingDisabled(const upb_DefPool* s) {
|
|
|
10499
10691
|
return s->disable_closed_enum_checking;
|
|
10500
10692
|
}
|
|
10501
10693
|
|
|
10694
|
+
void upb_DefPool_DisableImplicitFieldPresence(upb_DefPool* s) {
|
|
10695
|
+
UPB_ASSERT(upb_strtable_count(&s->files) == 0);
|
|
10696
|
+
s->disable_implicit_field_presence = true;
|
|
10697
|
+
}
|
|
10698
|
+
|
|
10699
|
+
bool upb_DefPool_ImplicitFieldPresenceDisabled(const upb_DefPool* s) {
|
|
10700
|
+
return s->disable_implicit_field_presence;
|
|
10701
|
+
}
|
|
10702
|
+
|
|
10502
10703
|
const google_protobuf_FeatureSetDefaults* upb_DefPool_FeatureSetDefaults(
|
|
10503
10704
|
const upb_DefPool* s) {
|
|
10504
10705
|
return s->feature_set_defaults;
|
|
@@ -10608,7 +10809,7 @@ void _upb_DefPool_SetPlatform(upb_DefPool* s, upb_MiniTablePlatform platform) {
|
|
|
10608
10809
|
|
|
10609
10810
|
const upb_MessageDef* upb_DefPool_FindMessageByName(const upb_DefPool* s,
|
|
10610
10811
|
const char* sym) {
|
|
10611
|
-
return
|
|
10812
|
+
return upb_DefPool_FindMessageByNameWithSize(s, sym, strlen(sym));
|
|
10612
10813
|
}
|
|
10613
10814
|
|
|
10614
10815
|
const upb_MessageDef* upb_DefPool_FindMessageByNameWithSize(
|
|
@@ -10618,12 +10819,23 @@ const upb_MessageDef* upb_DefPool_FindMessageByNameWithSize(
|
|
|
10618
10819
|
|
|
10619
10820
|
const upb_EnumDef* upb_DefPool_FindEnumByName(const upb_DefPool* s,
|
|
10620
10821
|
const char* sym) {
|
|
10621
|
-
return
|
|
10822
|
+
return upb_DefPool_FindEnumByNameWithSize(s, sym, strlen(sym));
|
|
10823
|
+
}
|
|
10824
|
+
|
|
10825
|
+
const upb_EnumDef* upb_DefPool_FindEnumByNameWithSize(const upb_DefPool* s,
|
|
10826
|
+
const char* sym,
|
|
10827
|
+
size_t len) {
|
|
10828
|
+
return _upb_DefPool_Unpack(s, sym, len, UPB_DEFTYPE_ENUM);
|
|
10829
|
+
}
|
|
10830
|
+
|
|
10831
|
+
const upb_EnumValueDef* upb_DefPool_FindEnumValueByName(const upb_DefPool* s,
|
|
10832
|
+
const char* sym) {
|
|
10833
|
+
return upb_DefPool_FindEnumValueByNameWithSize(s, sym, strlen(sym));
|
|
10622
10834
|
}
|
|
10623
10835
|
|
|
10624
|
-
const upb_EnumValueDef*
|
|
10625
|
-
|
|
10626
|
-
return _upb_DefPool_Unpack(s, sym,
|
|
10836
|
+
const upb_EnumValueDef* upb_DefPool_FindEnumValueByNameWithSize(
|
|
10837
|
+
const upb_DefPool* s, const char* sym, size_t len) {
|
|
10838
|
+
return _upb_DefPool_Unpack(s, sym, len, UPB_DEFTYPE_ENUMVAL);
|
|
10627
10839
|
}
|
|
10628
10840
|
|
|
10629
10841
|
const upb_FileDef* upb_DefPool_FindFileByName(const upb_DefPool* s,
|
|
@@ -12240,7 +12452,7 @@ static void _upb_FieldDef_Create(upb_DefBuilder* ctx, const char* prefix,
|
|
|
12240
12452
|
|
|
12241
12453
|
f->has_presence =
|
|
12242
12454
|
(!upb_FieldDef_IsRepeated(f)) &&
|
|
12243
|
-
(f->is_extension ||
|
|
12455
|
+
(f->is_extension || _upb_FileDef_ImplicitFieldPresenceDisabled(f->file) ||
|
|
12244
12456
|
(f->type_ == kUpb_FieldType_Message ||
|
|
12245
12457
|
f->type_ == kUpb_FieldType_Group || upb_FieldDef_ContainingOneof(f) ||
|
|
12246
12458
|
google_protobuf_FeatureSet_field_presence(f->resolved_features) !=
|
|
@@ -12644,6 +12856,10 @@ bool _upb_FileDef_ClosedEnumCheckingDisabled(const upb_FileDef* f) {
|
|
|
12644
12856
|
return upb_DefPool_ClosedEnumCheckingDisabled(f->symtab);
|
|
12645
12857
|
}
|
|
12646
12858
|
|
|
12859
|
+
bool _upb_FileDef_ImplicitFieldPresenceDisabled(const upb_FileDef* f) {
|
|
12860
|
+
return upb_DefPool_ImplicitFieldPresenceDisabled(f->symtab);
|
|
12861
|
+
}
|
|
12862
|
+
|
|
12647
12863
|
int upb_FileDef_TopLevelEnumCount(const upb_FileDef* f) {
|
|
12648
12864
|
return f->top_lvl_enum_count;
|
|
12649
12865
|
}
|
|
@@ -14532,6 +14748,7 @@ static void create_method(upb_DefBuilder* ctx,
|
|
|
14532
14748
|
m->output_type = _upb_DefBuilder_Resolve(
|
|
14533
14749
|
ctx, m->full_name, m->full_name,
|
|
14534
14750
|
google_protobuf_MethodDescriptorProto_output_type(method_proto), UPB_DEFTYPE_MSG);
|
|
14751
|
+
_upb_ServiceDef_InsertMethod(ctx, s, m);
|
|
14535
14752
|
}
|
|
14536
14753
|
|
|
14537
14754
|
// Allocate and initialize an array of |n| method defs belonging to |s|.
|
|
@@ -14763,6 +14980,7 @@ struct upb_ServiceDef {
|
|
|
14763
14980
|
upb_MethodDef* methods;
|
|
14764
14981
|
int method_count;
|
|
14765
14982
|
int index;
|
|
14983
|
+
upb_strtable ntom;
|
|
14766
14984
|
};
|
|
14767
14985
|
|
|
14768
14986
|
upb_ServiceDef* _upb_ServiceDef_At(const upb_ServiceDef* s, int index) {
|
|
@@ -14807,13 +15025,18 @@ const upb_MethodDef* upb_ServiceDef_Method(const upb_ServiceDef* s, int i) {
|
|
|
14807
15025
|
|
|
14808
15026
|
const upb_MethodDef* upb_ServiceDef_FindMethodByName(const upb_ServiceDef* s,
|
|
14809
15027
|
const char* name) {
|
|
14810
|
-
|
|
14811
|
-
|
|
14812
|
-
|
|
14813
|
-
|
|
14814
|
-
|
|
15028
|
+
return upb_ServiceDef_FindMethodByNameWithSize(s, name, strlen(name));
|
|
15029
|
+
}
|
|
15030
|
+
|
|
15031
|
+
const upb_MethodDef* upb_ServiceDef_FindMethodByNameWithSize(
|
|
15032
|
+
const upb_ServiceDef* s, const char* name, size_t len) {
|
|
15033
|
+
upb_value val;
|
|
15034
|
+
|
|
15035
|
+
if (!upb_strtable_lookup2(&s->ntom, name, len, &val)) {
|
|
15036
|
+
return NULL;
|
|
14815
15037
|
}
|
|
14816
|
-
|
|
15038
|
+
|
|
15039
|
+
return _upb_DefType_Unpack(val, UPB_DEFTYPE_METHOD);
|
|
14817
15040
|
}
|
|
14818
15041
|
|
|
14819
15042
|
static void create_service(upb_DefBuilder* ctx,
|
|
@@ -14838,6 +15061,8 @@ static void create_service(upb_DefBuilder* ctx,
|
|
|
14838
15061
|
const google_protobuf_MethodDescriptorProto* const* methods =
|
|
14839
15062
|
google_protobuf_ServiceDescriptorProto_method(svc_proto, &n);
|
|
14840
15063
|
s->method_count = n;
|
|
15064
|
+
bool ok = upb_strtable_init(&s->ntom, n, ctx->arena);
|
|
15065
|
+
if (!ok) _upb_DefBuilder_OomErr(ctx);
|
|
14841
15066
|
s->methods = _upb_MethodDefs_New(ctx, n, methods, s->resolved_features, s);
|
|
14842
15067
|
}
|
|
14843
15068
|
|
|
@@ -14855,6 +15080,20 @@ upb_ServiceDef* _upb_ServiceDefs_New(
|
|
|
14855
15080
|
return s;
|
|
14856
15081
|
}
|
|
14857
15082
|
|
|
15083
|
+
void _upb_ServiceDef_InsertMethod(upb_DefBuilder* ctx, upb_ServiceDef* s,
|
|
15084
|
+
const upb_MethodDef* m) {
|
|
15085
|
+
const char* shortname = upb_MethodDef_Name(m);
|
|
15086
|
+
const size_t shortnamelen = strlen(shortname);
|
|
15087
|
+
upb_value existing_v;
|
|
15088
|
+
if (upb_strtable_lookup(&s->ntom, shortname, &existing_v)) {
|
|
15089
|
+
_upb_DefBuilder_Errf(ctx, "duplicate method name (%s)", shortname);
|
|
15090
|
+
}
|
|
15091
|
+
const upb_value method_v = _upb_DefType_Pack(m, UPB_DEFTYPE_METHOD);
|
|
15092
|
+
bool ok = upb_strtable_insert(&s->ntom, shortname, shortnamelen, method_v,
|
|
15093
|
+
ctx->arena);
|
|
15094
|
+
if (!ok) _upb_DefBuilder_OomErr(ctx);
|
|
15095
|
+
}
|
|
15096
|
+
|
|
14858
15097
|
|
|
14859
15098
|
#include <inttypes.h>
|
|
14860
15099
|
#include <math.h>
|
|
@@ -16462,6 +16701,7 @@ const char* _upb_Decoder_DecodeWireValue(upb_Decoder* d, const char* ptr,
|
|
|
16462
16701
|
*op = kUpb_DecodeOp_UnknownField;
|
|
16463
16702
|
return ptr;
|
|
16464
16703
|
}
|
|
16704
|
+
_upb_Decoder_MungeInt32(val);
|
|
16465
16705
|
} else {
|
|
16466
16706
|
_upb_Decoder_Munge(field, val);
|
|
16467
16707
|
}
|
|
@@ -16504,7 +16744,6 @@ const char* _upb_Decoder_DecodeWireValue(upb_Decoder* d, const char* ptr,
|
|
|
16504
16744
|
UPB_FORCEINLINE
|
|
16505
16745
|
const char* _upb_Decoder_DecodeKnownField(upb_Decoder* d, const char* ptr,
|
|
16506
16746
|
upb_Message* msg,
|
|
16507
|
-
const upb_MiniTable* layout,
|
|
16508
16747
|
const upb_MiniTableField* field,
|
|
16509
16748
|
int op, wireval* val) {
|
|
16510
16749
|
uint8_t mode = field->UPB_PRIVATE(mode);
|
|
@@ -16533,67 +16772,13 @@ const char* _upb_Decoder_DecodeKnownField(upb_Decoder* d, const char* ptr,
|
|
|
16533
16772
|
}
|
|
16534
16773
|
}
|
|
16535
16774
|
|
|
16536
|
-
static const char* _upb_Decoder_FindFieldStart(upb_Decoder* d, const char* ptr,
|
|
16537
|
-
uint32_t field_number,
|
|
16538
|
-
uint32_t wire_type) {
|
|
16539
|
-
// Since unknown fields are the uncommon case, we do a little extra work here
|
|
16540
|
-
// to walk backwards through the buffer to find the field start. This frees
|
|
16541
|
-
// up a register in the fast paths (when the field is known), which leads to
|
|
16542
|
-
// significant speedups in benchmarks. Note that ptr may point into the slop
|
|
16543
|
-
// space, beyond the normal end of the input buffer.
|
|
16544
|
-
const char* start = ptr;
|
|
16545
|
-
|
|
16546
|
-
switch (wire_type) {
|
|
16547
|
-
case kUpb_WireType_Varint:
|
|
16548
|
-
case kUpb_WireType_Delimited:
|
|
16549
|
-
// Skip the last byte
|
|
16550
|
-
start--;
|
|
16551
|
-
// Skip bytes until we encounter the final byte of the tag varint.
|
|
16552
|
-
while (start[-1] & 0x80) start--;
|
|
16553
|
-
break;
|
|
16554
|
-
case kUpb_WireType_32Bit:
|
|
16555
|
-
start -= 4;
|
|
16556
|
-
break;
|
|
16557
|
-
case kUpb_WireType_64Bit:
|
|
16558
|
-
start -= 8;
|
|
16559
|
-
break;
|
|
16560
|
-
default:
|
|
16561
|
-
break;
|
|
16562
|
-
}
|
|
16563
|
-
assert(start == d->debug_valstart);
|
|
16564
|
-
|
|
16565
|
-
{
|
|
16566
|
-
// The varint parser does not enforce that integers are encoded with their
|
|
16567
|
-
// minimum size; for example the value 1 could be encoded with three
|
|
16568
|
-
// bytes: 0x81, 0x80, 0x00. These unnecessary trailing zeroes mean that we
|
|
16569
|
-
// cannot skip backwards by the minimum encoded size of the tag; and
|
|
16570
|
-
// unlike the loop for delimited or varint fields, we can't stop at a
|
|
16571
|
-
// sentinel value because anything can precede a tag. Instead, parse back
|
|
16572
|
-
// one byte at a time until we read the same tag value that was parsed
|
|
16573
|
-
// earlier.
|
|
16574
|
-
uint32_t tag = ((uint32_t)field_number << 3) | wire_type;
|
|
16575
|
-
uint32_t seen = 0;
|
|
16576
|
-
do {
|
|
16577
|
-
start--;
|
|
16578
|
-
seen <<= 7;
|
|
16579
|
-
seen |= *start & 0x7f;
|
|
16580
|
-
} while (seen != tag);
|
|
16581
|
-
}
|
|
16582
|
-
assert(start == d->debug_tagstart);
|
|
16583
|
-
|
|
16584
|
-
return start;
|
|
16585
|
-
}
|
|
16586
|
-
|
|
16587
16775
|
static const char* _upb_Decoder_DecodeUnknownField(
|
|
16588
16776
|
upb_Decoder* d, const char* ptr, upb_Message* msg, uint32_t field_number,
|
|
16589
|
-
uint32_t wire_type, wireval val) {
|
|
16777
|
+
uint32_t wire_type, wireval val, const char* start) {
|
|
16590
16778
|
if (field_number == 0) {
|
|
16591
16779
|
upb_ErrorHandler_ThrowError(&d->err, kUpb_DecodeStatus_Malformed);
|
|
16592
16780
|
}
|
|
16593
16781
|
|
|
16594
|
-
const char* start =
|
|
16595
|
-
_upb_Decoder_FindFieldStart(d, ptr, field_number, wire_type);
|
|
16596
|
-
|
|
16597
16782
|
upb_EpsCopyInputStream_StartCapture(&d->input, start);
|
|
16598
16783
|
|
|
16599
16784
|
if (wire_type == kUpb_WireType_Delimited) {
|
|
@@ -16632,10 +16817,6 @@ UPB_FORCEINLINE
|
|
|
16632
16817
|
const char* _upb_Decoder_DecodeFieldTag(upb_Decoder* d, const char* ptr,
|
|
16633
16818
|
uint32_t* field_number,
|
|
16634
16819
|
uint32_t* wire_type) {
|
|
16635
|
-
#ifndef NDEBUG
|
|
16636
|
-
d->debug_tagstart = ptr;
|
|
16637
|
-
#endif
|
|
16638
|
-
|
|
16639
16820
|
uint32_t tag;
|
|
16640
16821
|
UPB_ASSERT(ptr < d->input.limit_ptr);
|
|
16641
16822
|
ptr = upb_WireReader_ReadTag(ptr, &tag, EPS(d));
|
|
@@ -16645,15 +16826,9 @@ const char* _upb_Decoder_DecodeFieldTag(upb_Decoder* d, const char* ptr,
|
|
|
16645
16826
|
}
|
|
16646
16827
|
|
|
16647
16828
|
UPB_FORCEINLINE
|
|
16648
|
-
const char* _upb_Decoder_DecodeFieldData(
|
|
16649
|
-
|
|
16650
|
-
|
|
16651
|
-
uint32_t field_number,
|
|
16652
|
-
uint32_t wire_type) {
|
|
16653
|
-
#ifndef NDEBUG
|
|
16654
|
-
d->debug_valstart = ptr;
|
|
16655
|
-
#endif
|
|
16656
|
-
|
|
16829
|
+
const char* _upb_Decoder_DecodeFieldData(
|
|
16830
|
+
upb_Decoder* d, const char* ptr, upb_Message* msg, const upb_MiniTable* mt,
|
|
16831
|
+
uint32_t field_number, uint32_t wire_type, const char* start) {
|
|
16657
16832
|
int op;
|
|
16658
16833
|
wireval val;
|
|
16659
16834
|
|
|
@@ -16662,12 +16837,12 @@ const char* _upb_Decoder_DecodeFieldData(upb_Decoder* d, const char* ptr,
|
|
|
16662
16837
|
ptr = _upb_Decoder_DecodeWireValue(d, ptr, mt, field, wire_type, &val, &op);
|
|
16663
16838
|
|
|
16664
16839
|
if (op >= 0) {
|
|
16665
|
-
return _upb_Decoder_DecodeKnownField(d, ptr, msg,
|
|
16840
|
+
return _upb_Decoder_DecodeKnownField(d, ptr, msg, field, op, &val);
|
|
16666
16841
|
} else {
|
|
16667
16842
|
switch (op) {
|
|
16668
16843
|
case kUpb_DecodeOp_UnknownField:
|
|
16669
16844
|
return _upb_Decoder_DecodeUnknownField(d, ptr, msg, field_number,
|
|
16670
|
-
wire_type, val);
|
|
16845
|
+
wire_type, val, start);
|
|
16671
16846
|
case kUpb_DecodeOp_MessageSetItem:
|
|
16672
16847
|
return upb_Decoder_DecodeMessageSetItem(d, ptr, msg, mt);
|
|
16673
16848
|
default:
|
|
@@ -16688,6 +16863,7 @@ const char* _upb_Decoder_DecodeFieldNoFast(upb_Decoder* d, const char* ptr,
|
|
|
16688
16863
|
uint32_t field_number;
|
|
16689
16864
|
uint32_t wire_type;
|
|
16690
16865
|
|
|
16866
|
+
const char* start = ptr;
|
|
16691
16867
|
ptr = _upb_Decoder_DecodeFieldTag(d, ptr, &field_number, &wire_type);
|
|
16692
16868
|
|
|
16693
16869
|
if (wire_type == kUpb_WireType_EndGroup) {
|
|
@@ -16695,7 +16871,8 @@ const char* _upb_Decoder_DecodeFieldNoFast(upb_Decoder* d, const char* ptr,
|
|
|
16695
16871
|
return _upb_Decoder_EndMessage(d, ptr);
|
|
16696
16872
|
}
|
|
16697
16873
|
|
|
16698
|
-
ptr = _upb_Decoder_DecodeFieldData(d, ptr, msg, mt, field_number, wire_type
|
|
16874
|
+
ptr = _upb_Decoder_DecodeFieldData(d, ptr, msg, mt, field_number, wire_type,
|
|
16875
|
+
start);
|
|
16699
16876
|
_upb_Decoder_Trace(d, 'M');
|
|
16700
16877
|
return ptr;
|
|
16701
16878
|
}
|
|
@@ -16914,14 +17091,6 @@ typedef struct {
|
|
|
16914
17091
|
_upb_mapsorter sorter;
|
|
16915
17092
|
} upb_encstate;
|
|
16916
17093
|
|
|
16917
|
-
static size_t upb_roundup_pow2(size_t bytes) {
|
|
16918
|
-
size_t ret = 128;
|
|
16919
|
-
while (ret < bytes) {
|
|
16920
|
-
ret *= 2;
|
|
16921
|
-
}
|
|
16922
|
-
return ret;
|
|
16923
|
-
}
|
|
16924
|
-
|
|
16925
17094
|
UPB_NORETURN static void encode_err(upb_encstate* e, upb_EncodeStatus s) {
|
|
16926
17095
|
UPB_ASSERT(s != kUpb_EncodeStatus_Ok);
|
|
16927
17096
|
e->status = s;
|
|
@@ -16937,7 +17106,9 @@ UPB_NOINLINE static char* encode_growbuffer(char* ptr, upb_encstate* e,
|
|
|
16937
17106
|
size_t bytes) {
|
|
16938
17107
|
size_t old_size = e->limit - e->buf;
|
|
16939
17108
|
size_t needed_size = bytes + (e->limit - ptr);
|
|
16940
|
-
|
|
17109
|
+
if (needed_size < bytes) encode_err(e, kUpb_EncodeStatus_OutOfMemory);
|
|
17110
|
+
size_t new_size = upb_RoundUpToPowerOfTwo(UPB_MAX(128, needed_size));
|
|
17111
|
+
if (new_size == old_size) encode_err(e, kUpb_EncodeStatus_OutOfMemory);
|
|
16941
17112
|
void* old_buf = e->buf == &initial_buf_sentinel ? NULL : (void*)e->buf;
|
|
16942
17113
|
char* new_buf = upb_Arena_Realloc(e->arena, old_buf, old_size, new_size);
|
|
16943
17114
|
|