google-protobuf 4.30.2 → 4.31.0.rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/google/protobuf_c/defs.c +32 -0
- data/ext/google/protobuf_c/extconf.rb +6 -2
- data/ext/google/protobuf_c/map.c +2 -2
- data/ext/google/protobuf_c/message.c +2 -2
- data/ext/google/protobuf_c/protobuf.h +0 -6
- data/ext/google/protobuf_c/ruby-upb.c +1013 -478
- data/ext/google/protobuf_c/ruby-upb.h +787 -385
- data/lib/google/protobuf/any_pb.rb +1 -1
- data/lib/google/protobuf/api_pb.rb +1 -1
- data/lib/google/protobuf/descriptor_pb.rb +5 -2
- data/lib/google/protobuf/duration_pb.rb +1 -1
- data/lib/google/protobuf/empty_pb.rb +1 -1
- data/lib/google/protobuf/ffi/descriptor_pool.rb +3 -1
- data/lib/google/protobuf/ffi/field_descriptor.rb +6 -0
- data/lib/google/protobuf/ffi/file_descriptor.rb +26 -0
- data/lib/google/protobuf/ffi/internal/pointer_helper.rb +2 -1
- data/lib/google/protobuf/ffi/map.rb +2 -2
- data/lib/google/protobuf/field_mask_pb.rb +1 -1
- data/lib/google/protobuf/message_exts.rb +4 -0
- data/lib/google/protobuf/plugin_pb.rb +1 -1
- data/lib/google/protobuf/source_context_pb.rb +1 -1
- data/lib/google/protobuf/struct_pb.rb +1 -1
- data/lib/google/protobuf/timestamp_pb.rb +1 -1
- data/lib/google/protobuf/type_pb.rb +1 -1
- data/lib/google/protobuf/wrappers_pb.rb +1 -1
- data/lib/google/protobuf_ffi.rb +1 -1
- metadata +20 -20
@@ -67,6 +67,10 @@ Error, UINTPTR_MAX is undefined
|
|
67
67
|
#define UPB_SIZEOF_FLEX(type, member, count) \
|
68
68
|
UPB_MAX(sizeof(type), offsetof(type, member[count]))
|
69
69
|
|
70
|
+
#define UPB_SIZEOF_FLEX_WOULD_OVERFLOW(type, member, count) \
|
71
|
+
(((SIZE_MAX - offsetof(type, member[0])) / \
|
72
|
+
(offsetof(type, member[1]) - offsetof(type, member[0]))) < (size_t)count)
|
73
|
+
|
70
74
|
#define UPB_MAPTYPE_STRING 0
|
71
75
|
|
72
76
|
// UPB_EXPORT: always generate a public symbol.
|
@@ -125,6 +129,8 @@ Error, UINTPTR_MAX is undefined
|
|
125
129
|
#ifdef _MSC_VER
|
126
130
|
// Some versions of our Windows compiler don't support the C11 syntax.
|
127
131
|
#define UPB_ALIGN_AS(x) __declspec(align(x))
|
132
|
+
#elif defined(__GNUC__)
|
133
|
+
#define UPB_ALIGN_AS(x) __attribute__((aligned(x)))
|
128
134
|
#else
|
129
135
|
#define UPB_ALIGN_AS(x) _Alignas(x)
|
130
136
|
#endif
|
@@ -443,6 +449,10 @@ Error, UINTPTR_MAX is undefined
|
|
443
449
|
(!defined(UPB_BOOTSTRAP_STAGE) || UPB_BOOTSTRAP_STAGE != 0)
|
444
450
|
#define UPB_DESC(sym) proto2_##sym
|
445
451
|
#define UPB_DESC_MINITABLE(sym) &proto2__##sym##_msg_init
|
452
|
+
#elif defined(UPB_IS_GOOGLE3) && defined(UPB_BOOTSTRAP_STAGE) && \
|
453
|
+
UPB_BOOTSTRAP_STAGE == 0
|
454
|
+
#define UPB_DESC(sym) proto2_##sym
|
455
|
+
#define UPB_DESC_MINITABLE(sym) proto2__##sym##_msg_init()
|
446
456
|
#elif defined(UPB_BOOTSTRAP_STAGE) && UPB_BOOTSTRAP_STAGE == 0
|
447
457
|
#define UPB_DESC(sym) google_protobuf_##sym
|
448
458
|
#define UPB_DESC_MINITABLE(sym) google__protobuf__##sym##_msg_init()
|
@@ -453,6 +463,12 @@ Error, UINTPTR_MAX is undefined
|
|
453
463
|
|
454
464
|
#undef UPB_IS_GOOGLE3
|
455
465
|
|
466
|
+
#ifdef __clang__
|
467
|
+
#define UPB_NO_SANITIZE_ADDRESS __attribute__((no_sanitize("address")))
|
468
|
+
#else
|
469
|
+
#define UPB_NO_SANITIZE_ADDRESS
|
470
|
+
#endif
|
471
|
+
|
456
472
|
// Linker arrays combine elements from multiple translation units into a single
|
457
473
|
// array that can be iterated over at runtime.
|
458
474
|
//
|
@@ -479,12 +495,12 @@ Error, UINTPTR_MAX is undefined
|
|
479
495
|
|
480
496
|
#if defined(__ELF__) || defined(__wasm__)
|
481
497
|
|
482
|
-
#define UPB_LINKARR_APPEND(name)
|
483
|
-
__attribute__((retain, used,
|
484
|
-
|
485
|
-
#define UPB_LINKARR_DECLARE(name, type)
|
486
|
-
extern type
|
487
|
-
extern type
|
498
|
+
#define UPB_LINKARR_APPEND(name) \
|
499
|
+
__attribute__((retain, used, \
|
500
|
+
section("linkarr_" #name))) UPB_NO_SANITIZE_ADDRESS
|
501
|
+
#define UPB_LINKARR_DECLARE(name, type) \
|
502
|
+
extern type __start_linkarr_##name; \
|
503
|
+
extern type __stop_linkarr_##name; \
|
488
504
|
UPB_LINKARR_APPEND(name) type UPB_linkarr_internal_empty_##name[1]
|
489
505
|
#define UPB_LINKARR_START(name) (&__start_linkarr_##name)
|
490
506
|
#define UPB_LINKARR_STOP(name) (&__stop_linkarr_##name)
|
@@ -492,15 +508,15 @@ Error, UINTPTR_MAX is undefined
|
|
492
508
|
#elif defined(__MACH__)
|
493
509
|
|
494
510
|
/* As described in: https://stackoverflow.com/a/22366882 */
|
495
|
-
#define UPB_LINKARR_APPEND(name)
|
496
|
-
__attribute__((retain, used,
|
497
|
-
|
498
|
-
#define UPB_LINKARR_DECLARE(name, type)
|
499
|
-
extern type
|
500
|
-
"section$start$__DATA$__la_" #name);
|
501
|
-
extern type
|
502
|
-
"section$end$__DATA$"
|
503
|
-
"__la_" #name);
|
511
|
+
#define UPB_LINKARR_APPEND(name) \
|
512
|
+
__attribute__((retain, used, \
|
513
|
+
section("__DATA,__la_" #name))) UPB_NO_SANITIZE_ADDRESS
|
514
|
+
#define UPB_LINKARR_DECLARE(name, type) \
|
515
|
+
extern type __start_linkarr_##name __asm( \
|
516
|
+
"section$start$__DATA$__la_" #name); \
|
517
|
+
extern type __stop_linkarr_##name __asm( \
|
518
|
+
"section$end$__DATA$" \
|
519
|
+
"__la_" #name); \
|
504
520
|
UPB_LINKARR_APPEND(name) type UPB_linkarr_internal_empty_##name[1]
|
505
521
|
#define UPB_LINKARR_START(name) (&__start_linkarr_##name)
|
506
522
|
#define UPB_LINKARR_STOP(name) (&__stop_linkarr_##name)
|
@@ -516,7 +532,7 @@ Error, UINTPTR_MAX is undefined
|
|
516
532
|
// not work on MSVC.
|
517
533
|
#define UPB_LINKARR_APPEND(name) \
|
518
534
|
__declspec(allocate("la_" #name "$j")) \
|
519
|
-
__attribute__((retain, used
|
535
|
+
__attribute__((retain, used)) UPB_NO_SANITIZE_ADDRESS
|
520
536
|
#define UPB_LINKARR_DECLARE(name, type) \
|
521
537
|
__declspec(allocate("la_" #name "$a")) type __start_linkarr_##name; \
|
522
538
|
__declspec(allocate("la_" #name "$z")) type __stop_linkarr_##name; \
|
@@ -2969,7 +2985,9 @@ void upb_Arena_SetMaxBlockSize(size_t max) {
|
|
2969
2985
|
|
2970
2986
|
typedef struct upb_MemBlock {
|
2971
2987
|
struct upb_MemBlock* next;
|
2972
|
-
|
2988
|
+
// If this block is the head of the list, tracks a growing hint of what the
|
2989
|
+
// *next* block should be; otherwise tracks the size of the actual allocation.
|
2990
|
+
size_t size_or_hint;
|
2973
2991
|
// Data follows.
|
2974
2992
|
} upb_MemBlock;
|
2975
2993
|
|
@@ -2995,10 +3013,13 @@ typedef struct upb_ArenaInternal {
|
|
2995
3013
|
// == NULL at end of list.
|
2996
3014
|
UPB_ATOMIC(struct upb_ArenaInternal*) next;
|
2997
3015
|
|
2998
|
-
// If the low bit is set, is a pointer to the tail of the list (populated
|
2999
|
-
// roots, set to self for roots with no fused arenas).
|
3000
|
-
//
|
3001
|
-
// a
|
3016
|
+
// - If the low bit is set, is a pointer to the tail of the list (populated
|
3017
|
+
// for roots, set to self for roots with no fused arenas). This is best
|
3018
|
+
// effort, and it may not always reflect the true tail, but it will always
|
3019
|
+
// be a valid node in the list. This is useful for finding the list tail
|
3020
|
+
// without having to walk the entire list.
|
3021
|
+
// - If the low bit is not set, is a pointer to the previous node in the list,
|
3022
|
+
// such that a->previous_or_tail->next == a.
|
3002
3023
|
UPB_ATOMIC(uintptr_t) previous_or_tail;
|
3003
3024
|
|
3004
3025
|
// Linked list of blocks to free/cleanup.
|
@@ -3029,6 +3050,12 @@ static upb_ArenaInternal* upb_Arena_Internal(const upb_Arena* a) {
|
|
3029
3050
|
return &((upb_ArenaState*)a)->body;
|
3030
3051
|
}
|
3031
3052
|
|
3053
|
+
// Extracts the (upb_Arena*) from a (upb_ArenaInternal*)
|
3054
|
+
static upb_Arena* upb_Arena_FromInternal(const upb_ArenaInternal* ai) {
|
3055
|
+
ptrdiff_t offset = -offsetof(upb_ArenaState, body);
|
3056
|
+
return UPB_PTR_AT(ai, offset, upb_Arena);
|
3057
|
+
}
|
3058
|
+
|
3032
3059
|
static bool _upb_Arena_IsTaggedRefcount(uintptr_t parent_or_count) {
|
3033
3060
|
return (parent_or_count & 1) == 1;
|
3034
3061
|
}
|
@@ -3223,31 +3250,39 @@ uint32_t upb_Arena_DebugRefCount(const upb_Arena* a) {
|
|
3223
3250
|
return (uint32_t)_upb_Arena_RefCountFromTagged(tagged);
|
3224
3251
|
}
|
3225
3252
|
|
3253
|
+
// Adds an allocated block to the head of the list.
|
3226
3254
|
static void _upb_Arena_AddBlock(upb_Arena* a, void* ptr, size_t offset,
|
3227
3255
|
size_t block_size) {
|
3228
3256
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
3229
3257
|
upb_MemBlock* block = ptr;
|
3230
3258
|
|
3231
|
-
block->
|
3232
|
-
// Insert into linked list.
|
3233
|
-
block->next = ai->blocks;
|
3234
|
-
ai->blocks = block;
|
3235
|
-
|
3259
|
+
block->size_or_hint = block_size;
|
3236
3260
|
UPB_ASSERT(offset >= kUpb_MemblockReserve);
|
3237
|
-
|
3261
|
+
char* start = UPB_PTR_AT(block, offset, char);
|
3262
|
+
upb_MemBlock* head = ai->blocks;
|
3263
|
+
if (head && head->next) {
|
3264
|
+
// Fix up size to match actual allocation size
|
3265
|
+
head->size_or_hint = a->UPB_PRIVATE(end) - (char*)head;
|
3266
|
+
}
|
3267
|
+
block->next = head;
|
3268
|
+
ai->blocks = block;
|
3269
|
+
a->UPB_PRIVATE(ptr) = start;
|
3238
3270
|
a->UPB_PRIVATE(end) = UPB_PTR_AT(block, block_size, char);
|
3239
|
-
|
3240
|
-
|
3241
|
-
a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr));
|
3271
|
+
UPB_POISON_MEMORY_REGION(start, a->UPB_PRIVATE(end) - start);
|
3272
|
+
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= block_size - offset);
|
3242
3273
|
}
|
3243
3274
|
|
3244
|
-
|
3275
|
+
// Fulfills the allocation request by allocating a new block. Returns NULL on
|
3276
|
+
// allocation failure.
|
3277
|
+
void* UPB_PRIVATE(_upb_Arena_SlowMalloc)(upb_Arena* a, size_t size) {
|
3245
3278
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
3246
|
-
if (!ai->block_alloc) return
|
3279
|
+
if (!ai->block_alloc) return NULL;
|
3247
3280
|
size_t last_size = 128;
|
3281
|
+
size_t current_free = 0;
|
3248
3282
|
upb_MemBlock* last_block = ai->blocks;
|
3249
3283
|
if (last_block) {
|
3250
3284
|
last_size = a->UPB_PRIVATE(end) - (char*)last_block;
|
3285
|
+
current_free = a->UPB_PRIVATE(end) - a->UPB_PRIVATE(ptr);
|
3251
3286
|
}
|
3252
3287
|
|
3253
3288
|
// Relaxed order is safe here as we don't need any ordering with the setter.
|
@@ -3255,17 +3290,45 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
|
|
3255
3290
|
upb_Atomic_Load(&g_max_block_size, memory_order_relaxed);
|
3256
3291
|
|
3257
3292
|
// Don't naturally grow beyond the max block size.
|
3258
|
-
size_t
|
3259
|
-
|
3293
|
+
size_t target_size = UPB_MIN(last_size * 2, max_block_size);
|
3294
|
+
size_t future_free = UPB_MAX(size, target_size - kUpb_MemblockReserve) - size;
|
3295
|
+
// We want to preserve exponential growth in block size without wasting too
|
3296
|
+
// much unused space at the end of blocks. Once the head of our blocks list is
|
3297
|
+
// large enough to always trigger a max-sized block for all subsequent
|
3298
|
+
// allocations, allocate blocks that would net reduce free space behind it.
|
3299
|
+
if (last_block && current_free > future_free &&
|
3300
|
+
target_size < max_block_size) {
|
3301
|
+
last_size = last_block->size_or_hint;
|
3302
|
+
// Recalculate sizes with possibly larger last_size
|
3303
|
+
target_size = UPB_MIN(last_size * 2, max_block_size);
|
3304
|
+
future_free = UPB_MAX(size, target_size - kUpb_MemblockReserve) - size;
|
3305
|
+
}
|
3306
|
+
bool insert_after_head = false;
|
3307
|
+
// Only insert after head if an allocated block is present; we don't want to
|
3308
|
+
// continue allocating out of the initial block because we'll have no way of
|
3309
|
+
// restoring the size of our allocated block if we add another.
|
3310
|
+
if (last_block && current_free >= future_free) {
|
3311
|
+
// If we're still going to net reduce free space with this new block, then
|
3312
|
+
// only allocate the precise size requested and keep the current last block
|
3313
|
+
// as the active block for future allocations.
|
3314
|
+
insert_after_head = true;
|
3315
|
+
target_size = size + kUpb_MemblockReserve;
|
3316
|
+
// Add something to our previous size each time, so that eventually we
|
3317
|
+
// will reach the max block size. Allocations larger than the max block size
|
3318
|
+
// will always get their own backing allocation, so don't include them.
|
3319
|
+
if (target_size <= max_block_size) {
|
3320
|
+
last_block->size_or_hint =
|
3321
|
+
UPB_MIN(last_block->size_or_hint + (size >> 1), max_block_size >> 1);
|
3322
|
+
}
|
3323
|
+
}
|
3260
3324
|
// We may need to exceed the max block size if the user requested a large
|
3261
3325
|
// allocation.
|
3262
|
-
size_t block_size = UPB_MAX(kUpb_MemblockReserve + size,
|
3326
|
+
size_t block_size = UPB_MAX(kUpb_MemblockReserve + size, target_size);
|
3263
3327
|
|
3264
3328
|
upb_MemBlock* block =
|
3265
3329
|
upb_malloc(_upb_ArenaInternal_BlockAlloc(ai), block_size);
|
3266
3330
|
|
3267
|
-
if (!block) return
|
3268
|
-
_upb_Arena_AddBlock(a, block, kUpb_MemblockReserve, block_size);
|
3331
|
+
if (!block) return NULL;
|
3269
3332
|
// Atomic add not required here, as threads won't race allocating blocks, plus
|
3270
3333
|
// atomic fetch-add is slower than load/add/store on arm devices compiled
|
3271
3334
|
// targetting pre-v8.1. Relaxed order is safe as nothing depends on order of
|
@@ -3275,13 +3338,21 @@ static bool _upb_Arena_AllocBlock(upb_Arena* a, size_t size) {
|
|
3275
3338
|
upb_Atomic_Load(&ai->space_allocated, memory_order_relaxed);
|
3276
3339
|
upb_Atomic_Store(&ai->space_allocated, old_space_allocated + block_size,
|
3277
3340
|
memory_order_relaxed);
|
3278
|
-
|
3279
|
-
|
3280
|
-
|
3281
|
-
|
3282
|
-
|
3283
|
-
|
3284
|
-
|
3341
|
+
if (UPB_UNLIKELY(insert_after_head)) {
|
3342
|
+
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
3343
|
+
block->size_or_hint = block_size;
|
3344
|
+
upb_MemBlock* head = ai->blocks;
|
3345
|
+
block->next = head->next;
|
3346
|
+
head->next = block;
|
3347
|
+
|
3348
|
+
char* allocated = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
|
3349
|
+
UPB_POISON_MEMORY_REGION(allocated + size, UPB_ASAN_GUARD_SIZE);
|
3350
|
+
return allocated;
|
3351
|
+
} else {
|
3352
|
+
_upb_Arena_AddBlock(a, block, kUpb_MemblockReserve, block_size);
|
3353
|
+
UPB_ASSERT(UPB_PRIVATE(_upb_ArenaHas)(a) >= size);
|
3354
|
+
return upb_Arena_Malloc(a, size - UPB_ASAN_GUARD_SIZE);
|
3355
|
+
}
|
3285
3356
|
}
|
3286
3357
|
|
3287
3358
|
static upb_Arena* _upb_Arena_InitSlow(upb_alloc* alloc, size_t first_size) {
|
@@ -3360,18 +3431,22 @@ static void _upb_Arena_DoFree(upb_ArenaInternal* ai) {
|
|
3360
3431
|
// Load first since arena itself is likely from one of its blocks.
|
3361
3432
|
upb_ArenaInternal* next_arena =
|
3362
3433
|
(upb_ArenaInternal*)upb_Atomic_Load(&ai->next, memory_order_acquire);
|
3363
|
-
// Freeing may have memory barriers that confuse tsan, so assert
|
3434
|
+
// Freeing may have memory barriers that confuse tsan, so assert immediately
|
3364
3435
|
// after load here
|
3365
3436
|
if (next_arena) {
|
3366
3437
|
UPB_TSAN_CHECK_PUBLISHED(next_arena);
|
3367
3438
|
}
|
3368
3439
|
upb_alloc* block_alloc = _upb_ArenaInternal_BlockAlloc(ai);
|
3369
3440
|
upb_MemBlock* block = ai->blocks;
|
3441
|
+
if (block && block->next) {
|
3442
|
+
block->size_or_hint =
|
3443
|
+
upb_Arena_FromInternal(ai)->UPB_PRIVATE(end) - (char*)block;
|
3444
|
+
}
|
3370
3445
|
upb_AllocCleanupFunc* alloc_cleanup = *ai->upb_alloc_cleanup;
|
3371
3446
|
while (block != NULL) {
|
3372
3447
|
// Load first since we are deleting block.
|
3373
3448
|
upb_MemBlock* next_block = block->next;
|
3374
|
-
upb_free_sized(block_alloc, block, block->
|
3449
|
+
upb_free_sized(block_alloc, block, block->size_or_hint);
|
3375
3450
|
block = next_block;
|
3376
3451
|
}
|
3377
3452
|
if (alloc_cleanup != NULL) {
|
@@ -3417,48 +3492,77 @@ retry:
|
|
3417
3492
|
goto retry;
|
3418
3493
|
}
|
3419
3494
|
|
3420
|
-
|
3421
|
-
|
3495
|
+
// Logically performs the following operation, in a way that is safe against
|
3496
|
+
// racing fuses:
|
3497
|
+
// ret = TAIL(parent)
|
3498
|
+
// ret->next = child
|
3499
|
+
// return ret
|
3500
|
+
//
|
3501
|
+
// The caller is therefore guaranteed that ret->next == child.
|
3502
|
+
static upb_ArenaInternal* _upb_Arena_LinkForward(
|
3503
|
+
upb_ArenaInternal* const parent, upb_ArenaInternal* child) {
|
3422
3504
|
UPB_TSAN_CHECK_PUBLISHED(parent);
|
3423
3505
|
uintptr_t parent_previous_or_tail =
|
3424
3506
|
upb_Atomic_Load(&parent->previous_or_tail, memory_order_acquire);
|
3425
|
-
upb_ArenaInternal* parent_tail = parent;
|
3426
|
-
if (_upb_Arena_IsTaggedTail(parent_previous_or_tail)) {
|
3427
|
-
// Our tail might be stale, but it will always converge to the true tail.
|
3428
|
-
parent_tail = _upb_Arena_TailFromTagged(parent_previous_or_tail);
|
3429
|
-
}
|
3430
3507
|
|
3431
|
-
//
|
3432
|
-
|
3433
|
-
|
3434
|
-
|
3435
|
-
|
3508
|
+
// Optimization: use parent->previous_or_tail to skip to TAIL(parent) in O(1)
|
3509
|
+
// time when possible. This is the common case because we just fused into
|
3510
|
+
// parent, suggesting that it should be a root with a cached tail.
|
3511
|
+
//
|
3512
|
+
// However, if there was a racing fuse, parent may no longer be a root, in
|
3513
|
+
// which case we need to walk the entire list to find the tail. The tail
|
3514
|
+
// pointer is also not guaranteed to be the true tail, so even when the
|
3515
|
+
// optimization is taken, we still need to walk list nodes to find the true
|
3516
|
+
// tail.
|
3517
|
+
upb_ArenaInternal* parent_tail =
|
3518
|
+
_upb_Arena_IsTaggedTail(parent_previous_or_tail)
|
3519
|
+
? _upb_Arena_TailFromTagged(parent_previous_or_tail)
|
3520
|
+
: parent;
|
3521
|
+
|
3522
|
+
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
|
3523
|
+
upb_ArenaInternal* parent_tail_next =
|
3524
|
+
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
|
3525
|
+
|
3526
|
+
do {
|
3527
|
+
// Walk the list to find the true tail (a node with next == NULL).
|
3436
3528
|
while (parent_tail_next != NULL) {
|
3437
3529
|
parent_tail = parent_tail_next;
|
3438
3530
|
UPB_TSAN_CHECK_PUBLISHED(parent_tail);
|
3439
3531
|
parent_tail_next =
|
3440
3532
|
upb_Atomic_Load(&parent_tail->next, memory_order_acquire);
|
3441
3533
|
}
|
3442
|
-
|
3443
|
-
|
3444
|
-
|
3445
|
-
|
3446
|
-
|
3447
|
-
|
3448
|
-
parent_tail = parent_tail_next;
|
3449
|
-
}
|
3450
|
-
}
|
3534
|
+
} while (!upb_Atomic_CompareExchangeWeak( // Replace a NULL next with child.
|
3535
|
+
&parent_tail->next, &parent_tail_next, child, memory_order_release,
|
3536
|
+
memory_order_acquire));
|
3537
|
+
|
3538
|
+
return parent_tail;
|
3539
|
+
}
|
3451
3540
|
|
3452
|
-
|
3541
|
+
// Updates parent->previous_or_tail = child->previous_or_tail in hopes that the
|
3542
|
+
// latter represents the true tail of the newly-combined list.
|
3543
|
+
//
|
3544
|
+
// This is a best-effort operation that may set the tail to a stale value, and
|
3545
|
+
// may fail to update the tail at all.
|
3546
|
+
void _upb_Arena_UpdateParentTail(upb_ArenaInternal* parent,
|
3547
|
+
upb_ArenaInternal* child) {
|
3548
|
+
// We are guaranteed that child->previous_or_tail is tagged, because we have
|
3549
|
+
// just transitioned child from root -> non-root, which is an exclusive
|
3550
|
+
// operation that can only happen once. So we are the exclusive updater of
|
3551
|
+
// child->previous_or_tail that can transition it from tagged to untagged.
|
3552
|
+
//
|
3553
|
+
// However, we are not guaranteed that child->previous_or_tail is the true
|
3554
|
+
// tail. A racing fuse may have appended to child's list but not yet updated
|
3555
|
+
// child->previous_or_tail.
|
3453
3556
|
uintptr_t child_previous_or_tail =
|
3454
3557
|
upb_Atomic_Load(&child->previous_or_tail, memory_order_acquire);
|
3455
3558
|
upb_ArenaInternal* new_parent_tail =
|
3456
3559
|
_upb_Arena_TailFromTagged(child_previous_or_tail);
|
3457
3560
|
UPB_TSAN_CHECK_PUBLISHED(new_parent_tail);
|
3458
3561
|
|
3459
|
-
// If another thread fused with
|
3460
|
-
// with our tail. Relaxed order is fine
|
3461
|
-
|
3562
|
+
// If another thread fused with parent, such that it is no longer a root,
|
3563
|
+
// don't overwrite their previous pointer with our tail. Relaxed order is fine
|
3564
|
+
// here as we only inspect the tag bit.
|
3565
|
+
uintptr_t parent_previous_or_tail =
|
3462
3566
|
upb_Atomic_Load(&parent->previous_or_tail, memory_order_relaxed);
|
3463
3567
|
if (_upb_Arena_IsTaggedTail(parent_previous_or_tail)) {
|
3464
3568
|
upb_Atomic_CompareExchangeStrong(
|
@@ -3466,13 +3570,38 @@ static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
|
|
3466
3570
|
_upb_Arena_TaggedFromTail(new_parent_tail), memory_order_release,
|
3467
3571
|
memory_order_relaxed);
|
3468
3572
|
}
|
3573
|
+
}
|
3469
3574
|
|
3470
|
-
|
3575
|
+
static void _upb_Arena_LinkBackward(upb_ArenaInternal* child,
|
3576
|
+
upb_ArenaInternal* old_parent_tail) {
|
3577
|
+
// Link child to parent going backwards, for SpaceAllocated. This transitions
|
3578
|
+
// child->previous_or_tail from tail (tagged) to previous (untagged), after
|
3579
|
+
// which its value is immutable.
|
3580
|
+
//
|
3581
|
+
// - We are guaranteed that no other threads are also attempting to perform
|
3582
|
+
// this transition (tail -> previous), because we just updated
|
3583
|
+
// old_parent_tail->next from NULL to non-NULL, an exclusive operation that
|
3584
|
+
// can only happen once.
|
3585
|
+
//
|
3586
|
+
// - _upb_Arena_UpdateParentTail() uses CAS to ensure that it
|
3587
|
+
// does not perform the reverse transition (previous -> tail).
|
3588
|
+
//
|
3589
|
+
// - We are guaranteed that old_parent_tail is the correct "previous" pointer,
|
3590
|
+
// even in the presence of racing fuses that are adding more nodes to the
|
3591
|
+
// list, because _upb_Arena_LinkForward() guarantees that:
|
3592
|
+
// old_parent_tail->next == child.
|
3471
3593
|
upb_Atomic_Store(&child->previous_or_tail,
|
3472
|
-
_upb_Arena_TaggedFromPrevious(
|
3594
|
+
_upb_Arena_TaggedFromPrevious(old_parent_tail),
|
3473
3595
|
memory_order_release);
|
3474
3596
|
}
|
3475
3597
|
|
3598
|
+
static void _upb_Arena_DoFuseArenaLists(upb_ArenaInternal* const parent,
|
3599
|
+
upb_ArenaInternal* child) {
|
3600
|
+
upb_ArenaInternal* old_parent_tail = _upb_Arena_LinkForward(parent, child);
|
3601
|
+
_upb_Arena_UpdateParentTail(parent, child);
|
3602
|
+
_upb_Arena_LinkBackward(child, old_parent_tail);
|
3603
|
+
}
|
3604
|
+
|
3476
3605
|
void upb_Arena_SetAllocCleanup(upb_Arena* a, upb_AllocCleanupFunc* func) {
|
3477
3606
|
UPB_TSAN_CHECK_READ(a->UPB_ONLYBITS(ptr));
|
3478
3607
|
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
@@ -3664,8 +3793,16 @@ void UPB_PRIVATE(_upb_Arena_SwapOut)(upb_Arena* des, const upb_Arena* src) {
|
|
3664
3793
|
desi->blocks = srci->blocks;
|
3665
3794
|
}
|
3666
3795
|
|
3796
|
+
bool _upb_Arena_WasLastAlloc(struct upb_Arena* a, void* ptr, size_t oldsize) {
|
3797
|
+
upb_ArenaInternal* ai = upb_Arena_Internal(a);
|
3798
|
+
upb_MemBlock* block = ai->blocks;
|
3799
|
+
if (block == NULL) return false;
|
3800
|
+
block = block->next;
|
3801
|
+
if (block == NULL) return false;
|
3802
|
+
char* start = UPB_PTR_AT(block, kUpb_MemblockReserve, char);
|
3803
|
+
return ptr == start && oldsize == block->size_or_hint - kUpb_MemblockReserve;
|
3804
|
+
}
|
3667
3805
|
|
3668
|
-
#include <string.h>
|
3669
3806
|
|
3670
3807
|
|
3671
3808
|
// Must be last.
|
@@ -3683,8 +3820,7 @@ bool upb_Message_SetMapEntry(upb_Map* map, const upb_MiniTable* m,
|
|
3683
3820
|
upb_MiniTable_MapValue(map_entry_mini_table);
|
3684
3821
|
// Map key/value cannot have explicit defaults,
|
3685
3822
|
// hence assuming a zero default is valid.
|
3686
|
-
upb_MessageValue default_val;
|
3687
|
-
memset(&default_val, 0, sizeof(upb_MessageValue));
|
3823
|
+
upb_MessageValue default_val = upb_MessageValue_Zero();
|
3688
3824
|
upb_MessageValue map_entry_key =
|
3689
3825
|
upb_Message_GetField(map_entry_message, map_entry_key_field, default_val);
|
3690
3826
|
upb_MessageValue map_entry_value = upb_Message_GetField(
|
@@ -3713,12 +3849,14 @@ upb_MessageValue upb_Array_Get(const upb_Array* arr, size_t i) {
|
|
3713
3849
|
return ret;
|
3714
3850
|
}
|
3715
3851
|
|
3716
|
-
|
3852
|
+
upb_Message* upb_Array_GetMutable(upb_Array* arr, size_t i) {
|
3717
3853
|
UPB_ASSERT(i < upb_Array_Size(arr));
|
3718
|
-
|
3854
|
+
size_t elem_size = sizeof(upb_Message*);
|
3855
|
+
UPB_ASSERT(elem_size == (1 << UPB_PRIVATE(_upb_Array_ElemSizeLg2)(arr)));
|
3719
3856
|
char* data = upb_Array_MutableDataPtr(arr);
|
3720
|
-
|
3721
|
-
memcpy(&ret, data + (i
|
3857
|
+
upb_Message* ret;
|
3858
|
+
memcpy(&ret, data + (i * elem_size), elem_size);
|
3859
|
+
UPB_ASSERT(!upb_Message_IsFrozen(ret));
|
3722
3860
|
return ret;
|
3723
3861
|
}
|
3724
3862
|
|
@@ -3885,6 +4023,16 @@ bool upb_Map_Get(const upb_Map* map, upb_MessageValue key,
|
|
3885
4023
|
return _upb_Map_Get(map, &key, map->key_size, val, map->val_size);
|
3886
4024
|
}
|
3887
4025
|
|
4026
|
+
struct upb_Message* upb_Map_GetMutable(upb_Map* map, upb_MessageValue key) {
|
4027
|
+
UPB_ASSERT(map->val_size == sizeof(upb_Message*));
|
4028
|
+
upb_Message* val = NULL;
|
4029
|
+
if (_upb_Map_Get(map, &key, map->key_size, &val, sizeof(upb_Message*))) {
|
4030
|
+
return val;
|
4031
|
+
} else {
|
4032
|
+
return NULL;
|
4033
|
+
}
|
4034
|
+
}
|
4035
|
+
|
3888
4036
|
void upb_Map_Clear(upb_Map* map) { _upb_Map_Clear(map); }
|
3889
4037
|
|
3890
4038
|
upb_MapInsertStatus upb_Map_Insert(upb_Map* map, upb_MessageValue key,
|
@@ -3903,21 +4051,36 @@ bool upb_Map_Delete(upb_Map* map, upb_MessageValue key, upb_MessageValue* val) {
|
|
3903
4051
|
|
3904
4052
|
bool upb_Map_Next(const upb_Map* map, upb_MessageValue* key,
|
3905
4053
|
upb_MessageValue* val, size_t* iter) {
|
3906
|
-
upb_StringView k;
|
3907
4054
|
upb_value v;
|
3908
|
-
|
3909
|
-
if (
|
3910
|
-
|
4055
|
+
bool ret;
|
4056
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4057
|
+
upb_StringView strkey;
|
4058
|
+
ret = upb_strtable_next2(&map->t.strtable, &strkey, &v, (intptr_t*)iter);
|
4059
|
+
if (ret) {
|
4060
|
+
_upb_map_fromkey(strkey, key, map->key_size);
|
4061
|
+
}
|
4062
|
+
} else {
|
4063
|
+
uintptr_t intkey;
|
4064
|
+
ret = upb_inttable_next(&map->t.inttable, &intkey, &v, (intptr_t*)iter);
|
4065
|
+
if (ret) {
|
4066
|
+
memcpy(key, &intkey, map->key_size);
|
4067
|
+
}
|
4068
|
+
}
|
4069
|
+
if (ret) {
|
3911
4070
|
_upb_map_fromvalue(v, val, map->val_size);
|
3912
4071
|
}
|
3913
|
-
return
|
4072
|
+
return ret;
|
3914
4073
|
}
|
3915
4074
|
|
3916
4075
|
UPB_API void upb_Map_SetEntryValue(upb_Map* map, size_t iter,
|
3917
4076
|
upb_MessageValue val) {
|
3918
4077
|
upb_value v;
|
3919
4078
|
_upb_map_tovalue(&val, map->val_size, &v, NULL);
|
3920
|
-
|
4079
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4080
|
+
upb_strtable_setentryvalue(&map->t.strtable, iter, v);
|
4081
|
+
} else {
|
4082
|
+
upb_inttable_setentryvalue(&map->t.inttable, iter, v);
|
4083
|
+
}
|
3921
4084
|
}
|
3922
4085
|
|
3923
4086
|
bool upb_MapIterator_Next(const upb_Map* map, size_t* iter) {
|
@@ -3925,29 +4088,45 @@ bool upb_MapIterator_Next(const upb_Map* map, size_t* iter) {
|
|
3925
4088
|
}
|
3926
4089
|
|
3927
4090
|
bool upb_MapIterator_Done(const upb_Map* map, size_t iter) {
|
3928
|
-
upb_strtable_iter i;
|
3929
4091
|
UPB_ASSERT(iter != kUpb_Map_Begin);
|
3930
|
-
|
3931
|
-
|
3932
|
-
|
4092
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4093
|
+
upb_strtable_iter i;
|
4094
|
+
i.t = &map->t.strtable;
|
4095
|
+
i.index = iter;
|
4096
|
+
return upb_strtable_done(&i);
|
4097
|
+
} else {
|
4098
|
+
return upb_inttable_done(&map->t.inttable, iter);
|
4099
|
+
}
|
3933
4100
|
}
|
3934
4101
|
|
3935
4102
|
// Returns the key and value for this entry of the map.
|
3936
4103
|
upb_MessageValue upb_MapIterator_Key(const upb_Map* map, size_t iter) {
|
3937
|
-
upb_strtable_iter i;
|
3938
4104
|
upb_MessageValue ret;
|
3939
|
-
|
3940
|
-
|
3941
|
-
|
4105
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4106
|
+
upb_strtable_iter i;
|
4107
|
+
i.t = &map->t.strtable;
|
4108
|
+
i.index = iter;
|
4109
|
+
_upb_map_fromkey(upb_strtable_iter_key(&i), &ret, map->key_size);
|
4110
|
+
} else {
|
4111
|
+
uintptr_t intkey = upb_inttable_iter_key(&map->t.inttable, iter);
|
4112
|
+
memcpy(&ret, &intkey, map->key_size);
|
4113
|
+
}
|
3942
4114
|
return ret;
|
3943
4115
|
}
|
3944
4116
|
|
3945
4117
|
upb_MessageValue upb_MapIterator_Value(const upb_Map* map, size_t iter) {
|
3946
|
-
|
4118
|
+
upb_value v;
|
4119
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4120
|
+
upb_strtable_iter i;
|
4121
|
+
i.t = &map->t.strtable;
|
4122
|
+
i.index = iter;
|
4123
|
+
v = upb_strtable_iter_value(&i);
|
4124
|
+
} else {
|
4125
|
+
v = upb_inttable_iter_value(&map->t.inttable, iter);
|
4126
|
+
}
|
4127
|
+
|
3947
4128
|
upb_MessageValue ret;
|
3948
|
-
|
3949
|
-
i.index = iter;
|
3950
|
-
_upb_map_fromvalue(upb_strtable_iter_value(&i), &ret, map->val_size);
|
4129
|
+
_upb_map_fromvalue(v, &ret, map->val_size);
|
3951
4130
|
return ret;
|
3952
4131
|
}
|
3953
4132
|
|
@@ -3971,7 +4150,13 @@ upb_Map* _upb_Map_New(upb_Arena* a, size_t key_size, size_t value_size) {
|
|
3971
4150
|
upb_Map* map = upb_Arena_Malloc(a, sizeof(upb_Map));
|
3972
4151
|
if (!map) return NULL;
|
3973
4152
|
|
3974
|
-
|
4153
|
+
if (key_size <= sizeof(uintptr_t) && key_size != UPB_MAPTYPE_STRING) {
|
4154
|
+
if (!upb_inttable_init(&map->t.inttable, a)) return NULL;
|
4155
|
+
map->UPB_PRIVATE(is_strtable) = false;
|
4156
|
+
} else {
|
4157
|
+
if (!upb_strtable_init(&map->t.strtable, 4, a)) return NULL;
|
4158
|
+
map->UPB_PRIVATE(is_strtable) = true;
|
4159
|
+
}
|
3975
4160
|
map->key_size = key_size;
|
3976
4161
|
map->val_size = value_size;
|
3977
4162
|
map->UPB_PRIVATE(is_frozen) = false;
|
@@ -3987,12 +4172,20 @@ upb_Map* _upb_Map_New(upb_Arena* a, size_t key_size, size_t value_size) {
|
|
3987
4172
|
|
3988
4173
|
// Must be last.
|
3989
4174
|
|
4175
|
+
static int _upb_mapsorter_intkeys(const void* _a, const void* _b) {
|
4176
|
+
const upb_tabent* const* a = _a;
|
4177
|
+
const upb_tabent* const* b = _b;
|
4178
|
+
uintptr_t a_key = (*a)->key.num;
|
4179
|
+
uintptr_t b_key = (*b)->key.num;
|
4180
|
+
return a_key < b_key ? -1 : a_key > b_key;
|
4181
|
+
}
|
4182
|
+
|
3990
4183
|
static void _upb_mapsorter_getkeys(const void* _a, const void* _b, void* a_key,
|
3991
4184
|
void* b_key, size_t size) {
|
3992
4185
|
const upb_tabent* const* a = _a;
|
3993
4186
|
const upb_tabent* const* b = _b;
|
3994
|
-
upb_StringView a_tabkey =
|
3995
|
-
upb_StringView b_tabkey =
|
4187
|
+
upb_StringView a_tabkey = upb_key_strview((*a)->key);
|
4188
|
+
upb_StringView b_tabkey = upb_key_strview((*b)->key);
|
3996
4189
|
_upb_map_fromkey(a_tabkey, a_key, size);
|
3997
4190
|
_upb_map_fromkey(b_tabkey, b_key, size);
|
3998
4191
|
}
|
@@ -4078,15 +4271,30 @@ static bool _upb_mapsorter_resize(_upb_mapsorter* s, _upb_sortedmap* sorted,
|
|
4078
4271
|
|
4079
4272
|
bool _upb_mapsorter_pushmap(_upb_mapsorter* s, upb_FieldType key_type,
|
4080
4273
|
const upb_Map* map, _upb_sortedmap* sorted) {
|
4081
|
-
int map_size
|
4082
|
-
|
4274
|
+
int map_size;
|
4275
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4276
|
+
map_size = _upb_Map_Size(map);
|
4277
|
+
} else {
|
4278
|
+
// For inttable, only sort the table entries, since the array part is
|
4279
|
+
// already in a sorted order.
|
4280
|
+
map_size = map->t.inttable.t.count;
|
4281
|
+
}
|
4083
4282
|
|
4084
4283
|
if (!_upb_mapsorter_resize(s, sorted, map_size)) return false;
|
4085
4284
|
|
4086
4285
|
// Copy non-empty entries from the table to s->entries.
|
4087
4286
|
const void** dst = &s->entries[sorted->start];
|
4088
|
-
const upb_tabent* src
|
4089
|
-
const upb_tabent* end
|
4287
|
+
const upb_tabent* src;
|
4288
|
+
const upb_tabent* end;
|
4289
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
4290
|
+
src = map->t.strtable.t.entries;
|
4291
|
+
end = src + upb_table_size(&map->t.strtable.t);
|
4292
|
+
} else {
|
4293
|
+
// For inttable, only sort the table entries, since the array part is
|
4294
|
+
// already in a sorted order.
|
4295
|
+
src = map->t.inttable.t.entries;
|
4296
|
+
end = src + upb_table_size(&map->t.inttable.t);
|
4297
|
+
}
|
4090
4298
|
for (; src < end; src++) {
|
4091
4299
|
if (!upb_tabent_isempty(src)) {
|
4092
4300
|
*dst = src;
|
@@ -4097,7 +4305,8 @@ bool _upb_mapsorter_pushmap(_upb_mapsorter* s, upb_FieldType key_type,
|
|
4097
4305
|
|
4098
4306
|
// Sort entries according to the key type.
|
4099
4307
|
qsort(&s->entries[sorted->start], map_size, sizeof(*s->entries),
|
4100
|
-
compar[key_type]
|
4308
|
+
map->UPB_PRIVATE(is_strtable) ? compar[key_type]
|
4309
|
+
: _upb_mapsorter_intkeys);
|
4101
4310
|
return true;
|
4102
4311
|
}
|
4103
4312
|
|
@@ -4106,7 +4315,7 @@ static int _upb_mapsorter_cmpext(const void* _a, const void* _b) {
|
|
4106
4315
|
const upb_Extension* const* b = _b;
|
4107
4316
|
uint32_t a_num = upb_MiniTableExtension_Number((*a)->ext);
|
4108
4317
|
uint32_t b_num = upb_MiniTableExtension_Number((*b)->ext);
|
4109
|
-
|
4318
|
+
UPB_ASSERT(a_num != b_num);
|
4110
4319
|
return a_num < b_num ? -1 : 1;
|
4111
4320
|
}
|
4112
4321
|
|
@@ -4144,10 +4353,41 @@ upb_Message* upb_Message_New(const upb_MiniTable* m, upb_Arena* a) {
|
|
4144
4353
|
return _upb_Message_New(m, a);
|
4145
4354
|
}
|
4146
4355
|
|
4147
|
-
bool UPB_PRIVATE(
|
4148
|
-
|
4149
|
-
|
4150
|
-
|
4356
|
+
UPB_NOINLINE bool UPB_PRIVATE(_upb_Message_AddUnknownSlowPath)(upb_Message* msg,
|
4357
|
+
const char* data,
|
4358
|
+
size_t len,
|
4359
|
+
upb_Arena* arena,
|
4360
|
+
bool alias) {
|
4361
|
+
{
|
4362
|
+
upb_Message_Internal* in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
4363
|
+
// Alias fast path was already checked in the inline function that calls
|
4364
|
+
// this one
|
4365
|
+
if (!alias && in && in->size) {
|
4366
|
+
upb_TaggedAuxPtr ptr = in->aux_data[in->size - 1];
|
4367
|
+
if (upb_TaggedAuxPtr_IsUnknown(ptr)) {
|
4368
|
+
upb_StringView* existing = upb_TaggedAuxPtr_UnknownData(ptr);
|
4369
|
+
if (!upb_TaggedAuxPtr_IsUnknownAliased(ptr)) {
|
4370
|
+
// If part of the existing field was deleted at the beginning, we can
|
4371
|
+
// reconstruct it by comparing the address of the end with the address
|
4372
|
+
// of the entry itself; having the non-aliased tag means that the
|
4373
|
+
// string_view and the data it points to are part of the same original
|
4374
|
+
// upb_Arena_Malloc allocation, and the end of the string view
|
4375
|
+
// represents the end of that allocation.
|
4376
|
+
size_t prev_alloc_size =
|
4377
|
+
(existing->data + existing->size) - (char*)existing;
|
4378
|
+
if (SIZE_MAX - prev_alloc_size >= len) {
|
4379
|
+
size_t new_alloc_size = prev_alloc_size + len;
|
4380
|
+
if (upb_Arena_TryExtend(arena, existing, prev_alloc_size,
|
4381
|
+
new_alloc_size)) {
|
4382
|
+
memcpy(UPB_PTR_AT(existing, prev_alloc_size, void), data, len);
|
4383
|
+
existing->size += len;
|
4384
|
+
return true;
|
4385
|
+
}
|
4386
|
+
}
|
4387
|
+
}
|
4388
|
+
}
|
4389
|
+
}
|
4390
|
+
}
|
4151
4391
|
// TODO: b/376969853 - Add debug check that the unknown field is an overall
|
4152
4392
|
// valid proto field
|
4153
4393
|
if (!UPB_PRIVATE(_upb_Message_ReserveSlot)(msg, arena)) {
|
@@ -4167,7 +4407,9 @@ bool UPB_PRIVATE(_upb_Message_AddUnknown)(upb_Message* msg, const char* data,
|
|
4167
4407
|
}
|
4168
4408
|
view->size = len;
|
4169
4409
|
upb_Message_Internal* in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
4170
|
-
in->aux_data[in->size++] =
|
4410
|
+
in->aux_data[in->size++] = alias
|
4411
|
+
? upb_TaggedAuxPtr_MakeUnknownDataAliased(view)
|
4412
|
+
: upb_TaggedAuxPtr_MakeUnknownData(view);
|
4171
4413
|
return true;
|
4172
4414
|
}
|
4173
4415
|
|
@@ -4179,8 +4421,40 @@ bool UPB_PRIVATE(_upb_Message_AddUnknownV)(struct upb_Message* msg,
|
|
4179
4421
|
UPB_ASSERT(count > 0);
|
4180
4422
|
size_t total_len = 0;
|
4181
4423
|
for (size_t i = 0; i < count; i++) {
|
4424
|
+
if (SIZE_MAX - total_len < data[i].size) {
|
4425
|
+
return false;
|
4426
|
+
}
|
4182
4427
|
total_len += data[i].size;
|
4183
4428
|
}
|
4429
|
+
|
4430
|
+
{
|
4431
|
+
upb_Message_Internal* in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
4432
|
+
if (in && in->size) {
|
4433
|
+
upb_TaggedAuxPtr ptr = in->aux_data[in->size - 1];
|
4434
|
+
if (upb_TaggedAuxPtr_IsUnknown(ptr)) {
|
4435
|
+
upb_StringView* existing = upb_TaggedAuxPtr_UnknownData(ptr);
|
4436
|
+
if (!upb_TaggedAuxPtr_IsUnknownAliased(ptr)) {
|
4437
|
+
size_t prev_alloc_size =
|
4438
|
+
(existing->data + existing->size) - (char*)existing;
|
4439
|
+
if (SIZE_MAX - prev_alloc_size >= total_len) {
|
4440
|
+
size_t new_alloc_size = prev_alloc_size + total_len;
|
4441
|
+
if (upb_Arena_TryExtend(arena, existing, prev_alloc_size,
|
4442
|
+
new_alloc_size)) {
|
4443
|
+
char* copy = UPB_PTR_AT(existing, prev_alloc_size, char);
|
4444
|
+
for (size_t i = 0; i < count; i++) {
|
4445
|
+
memcpy(copy, data[i].data, data[i].size);
|
4446
|
+
copy += data[i].size;
|
4447
|
+
}
|
4448
|
+
existing->size += total_len;
|
4449
|
+
return true;
|
4450
|
+
}
|
4451
|
+
}
|
4452
|
+
}
|
4453
|
+
}
|
4454
|
+
}
|
4455
|
+
}
|
4456
|
+
|
4457
|
+
if (SIZE_MAX - sizeof(upb_StringView) < total_len) return false;
|
4184
4458
|
if (!UPB_PRIVATE(_upb_Message_ReserveSlot)(msg, arena)) return false;
|
4185
4459
|
|
4186
4460
|
upb_StringView* view =
|
@@ -4214,23 +4488,64 @@ void _upb_Message_DiscardUnknown_shallow(upb_Message* msg) {
|
|
4214
4488
|
in->size = size;
|
4215
4489
|
}
|
4216
4490
|
|
4217
|
-
|
4218
|
-
|
4491
|
+
upb_Message_DeleteUnknownStatus upb_Message_DeleteUnknown(upb_Message* msg,
|
4492
|
+
upb_StringView* data,
|
4493
|
+
uintptr_t* iter,
|
4494
|
+
upb_Arena* arena) {
|
4219
4495
|
UPB_ASSERT(!upb_Message_IsFrozen(msg));
|
4220
4496
|
UPB_ASSERT(*iter != kUpb_Message_UnknownBegin);
|
4221
4497
|
upb_Message_Internal* in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
4222
4498
|
UPB_ASSERT(in);
|
4223
4499
|
UPB_ASSERT(*iter <= in->size);
|
4224
|
-
#ifndef NDEBUG
|
4225
4500
|
upb_TaggedAuxPtr unknown_ptr = in->aux_data[*iter - 1];
|
4226
4501
|
UPB_ASSERT(upb_TaggedAuxPtr_IsUnknown(unknown_ptr));
|
4227
4502
|
upb_StringView* unknown = upb_TaggedAuxPtr_UnknownData(unknown_ptr);
|
4228
|
-
|
4229
|
-
|
4230
|
-
|
4231
|
-
|
4232
|
-
|
4233
|
-
|
4503
|
+
if (unknown->data == data->data && unknown->size == data->size) {
|
4504
|
+
// Remove whole field
|
4505
|
+
in->aux_data[*iter - 1] = upb_TaggedAuxPtr_Null();
|
4506
|
+
} else if (unknown->data == data->data) {
|
4507
|
+
// Strip prefix
|
4508
|
+
unknown->data += data->size;
|
4509
|
+
unknown->size -= data->size;
|
4510
|
+
*data = *unknown;
|
4511
|
+
return kUpb_DeleteUnknown_IterUpdated;
|
4512
|
+
} else if (unknown->data + unknown->size == data->data + data->size) {
|
4513
|
+
// Truncate existing field
|
4514
|
+
unknown->size -= data->size;
|
4515
|
+
if (!upb_TaggedAuxPtr_IsUnknownAliased(unknown_ptr)) {
|
4516
|
+
in->aux_data[*iter - 1] =
|
4517
|
+
upb_TaggedAuxPtr_MakeUnknownDataAliased(unknown);
|
4518
|
+
}
|
4519
|
+
} else {
|
4520
|
+
UPB_ASSERT(unknown->data < data->data &&
|
4521
|
+
unknown->data + unknown->size > data->data + data->size);
|
4522
|
+
// Split in the middle
|
4523
|
+
upb_StringView* prefix = unknown;
|
4524
|
+
upb_StringView* suffix = upb_Arena_Malloc(arena, sizeof(upb_StringView));
|
4525
|
+
if (!suffix) {
|
4526
|
+
return kUpb_DeleteUnknown_AllocFail;
|
4527
|
+
}
|
4528
|
+
if (!UPB_PRIVATE(_upb_Message_ReserveSlot)(msg, arena)) {
|
4529
|
+
return kUpb_DeleteUnknown_AllocFail;
|
4530
|
+
}
|
4531
|
+
in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
4532
|
+
if (*iter != in->size) {
|
4533
|
+
// Shift later entries down so that unknown field ordering is preserved
|
4534
|
+
memmove(&in->aux_data[*iter + 1], &in->aux_data[*iter],
|
4535
|
+
sizeof(upb_TaggedAuxPtr) * (in->size - *iter));
|
4536
|
+
}
|
4537
|
+
in->aux_data[*iter] = upb_TaggedAuxPtr_MakeUnknownDataAliased(suffix);
|
4538
|
+
if (!upb_TaggedAuxPtr_IsUnknownAliased(unknown_ptr)) {
|
4539
|
+
in->aux_data[*iter - 1] = upb_TaggedAuxPtr_MakeUnknownDataAliased(prefix);
|
4540
|
+
}
|
4541
|
+
in->size++;
|
4542
|
+
suffix->data = data->data + data->size;
|
4543
|
+
suffix->size = (prefix->data + prefix->size) - suffix->data;
|
4544
|
+
prefix->size = data->data - prefix->data;
|
4545
|
+
}
|
4546
|
+
return upb_Message_NextUnknown(msg, data, iter)
|
4547
|
+
? kUpb_DeleteUnknown_IterUpdated
|
4548
|
+
: kUpb_DeleteUnknown_DeletedLast;
|
4234
4549
|
}
|
4235
4550
|
|
4236
4551
|
size_t upb_Message_ExtensionCount(const upb_Message* msg) {
|
@@ -5164,7 +5479,7 @@ typedef struct {
|
|
5164
5479
|
uint32_t enum_data_capacity;
|
5165
5480
|
} upb_MdEnumDecoder;
|
5166
5481
|
|
5167
|
-
static size_t upb_MiniTableEnum_Size(
|
5482
|
+
static size_t upb_MiniTableEnum_Size(uint32_t count) {
|
5168
5483
|
return UPB_SIZEOF_FLEX(upb_MiniTableEnum, UPB_PRIVATE(data), count);
|
5169
5484
|
}
|
5170
5485
|
|
@@ -5172,10 +5487,18 @@ static upb_MiniTableEnum* _upb_MiniTable_AddEnumDataMember(upb_MdEnumDecoder* d,
|
|
5172
5487
|
uint32_t val) {
|
5173
5488
|
if (d->enum_data_count == d->enum_data_capacity) {
|
5174
5489
|
size_t old_sz = upb_MiniTableEnum_Size(d->enum_data_capacity);
|
5175
|
-
d->enum_data_capacity
|
5176
|
-
|
5490
|
+
if (d->enum_data_capacity > UINT32_MAX / 2) {
|
5491
|
+
upb_MdDecoder_ErrorJmp(&d->base, "Out of memory");
|
5492
|
+
}
|
5493
|
+
uint32_t new_capacity = UPB_MAX(2, d->enum_data_capacity * 2);
|
5494
|
+
if (UPB_SIZEOF_FLEX_WOULD_OVERFLOW(upb_MiniTableEnum, UPB_PRIVATE(data),
|
5495
|
+
new_capacity)) {
|
5496
|
+
upb_MdDecoder_ErrorJmp(&d->base, "Out of memory");
|
5497
|
+
}
|
5498
|
+
size_t new_sz = upb_MiniTableEnum_Size(new_capacity);
|
5177
5499
|
d->enum_table = upb_Arena_Realloc(d->arena, d->enum_table, old_sz, new_sz);
|
5178
5500
|
upb_MdDecoder_CheckOutOfMemory(&d->base, d->enum_table);
|
5501
|
+
d->enum_data_capacity = new_capacity;
|
5179
5502
|
}
|
5180
5503
|
d->enum_table->UPB_PRIVATE(data)[d->enum_data_count++] = val;
|
5181
5504
|
return d->enum_table;
|
@@ -5256,6 +5579,7 @@ static upb_MiniTableEnum* upb_MtDecoder_BuildMiniTableEnum(
|
|
5256
5579
|
upb_MiniTableEnum* upb_MiniTableEnum_Build(const char* data, size_t len,
|
5257
5580
|
upb_Arena* arena,
|
5258
5581
|
upb_Status* status) {
|
5582
|
+
uint32_t initial_capacity = 2;
|
5259
5583
|
upb_MdEnumDecoder decoder = {
|
5260
5584
|
.base =
|
5261
5585
|
{
|
@@ -5263,10 +5587,11 @@ upb_MiniTableEnum* upb_MiniTableEnum_Build(const char* data, size_t len,
|
|
5263
5587
|
.status = status,
|
5264
5588
|
},
|
5265
5589
|
.arena = arena,
|
5266
|
-
.enum_table =
|
5590
|
+
.enum_table =
|
5591
|
+
upb_Arena_Malloc(arena, upb_MiniTableEnum_Size(initial_capacity)),
|
5267
5592
|
.enum_value_count = 0,
|
5268
5593
|
.enum_data_count = 0,
|
5269
|
-
.enum_data_capacity =
|
5594
|
+
.enum_data_capacity = initial_capacity,
|
5270
5595
|
};
|
5271
5596
|
|
5272
5597
|
return upb_MtDecoder_BuildMiniTableEnum(&decoder, data, len);
|
@@ -5589,9 +5914,17 @@ static const char* upb_MtDecoder_DecodeOneofField(upb_MtDecoder* d,
|
|
5589
5914
|
|
5590
5915
|
// Oneof storage must be large enough to accommodate the largest member.
|
5591
5916
|
int rep = f->UPB_PRIVATE(mode) >> kUpb_FieldRep_Shift;
|
5592
|
-
|
5593
|
-
|
5917
|
+
size_t new_size = upb_MtDecoder_SizeOfRep(rep, d->platform);
|
5918
|
+
size_t new_align = upb_MtDecoder_AlignOfRep(rep, d->platform);
|
5919
|
+
size_t current_size = upb_MtDecoder_SizeOfRep(item->rep, d->platform);
|
5920
|
+
size_t current_align = upb_MtDecoder_AlignOfRep(item->rep, d->platform);
|
5921
|
+
|
5922
|
+
if (new_size > current_size ||
|
5923
|
+
(new_size == current_size && new_align > current_align)) {
|
5924
|
+
UPB_ASSERT(new_align >= current_align);
|
5594
5925
|
item->rep = rep;
|
5926
|
+
} else {
|
5927
|
+
UPB_ASSERT(current_align >= new_align);
|
5595
5928
|
}
|
5596
5929
|
// Prepend this field to the linked list.
|
5597
5930
|
f->UPB_PRIVATE(offset) = item->field_index;
|
@@ -5735,6 +6068,9 @@ static void upb_MtDecoder_ParseMessage(upb_MtDecoder* d, const char* data,
|
|
5735
6068
|
size_t len) {
|
5736
6069
|
// Buffer length is an upper bound on the number of fields. We will return
|
5737
6070
|
// what we don't use.
|
6071
|
+
if (SIZE_MAX / sizeof(*d->fields) < len) {
|
6072
|
+
upb_MdDecoder_ErrorJmp(&d->base, "Out of memory");
|
6073
|
+
}
|
5738
6074
|
d->fields = upb_Arena_Malloc(d->arena, sizeof(*d->fields) * len);
|
5739
6075
|
upb_MdDecoder_CheckOutOfMemory(&d->base, d->fields);
|
5740
6076
|
|
@@ -6027,6 +6363,11 @@ static const char* upb_MtDecoder_DoBuildMiniTableExtension(
|
|
6027
6363
|
upb_MtDecoder* decoder, const char* data, size_t len,
|
6028
6364
|
upb_MiniTableExtension* ext, const upb_MiniTable* extendee,
|
6029
6365
|
upb_MiniTableSub sub) {
|
6366
|
+
if (!(extendee->UPB_PRIVATE(ext) &
|
6367
|
+
(kUpb_ExtMode_Extendable | kUpb_ExtMode_IsMessageSet))) {
|
6368
|
+
upb_MdDecoder_ErrorJmp(&decoder->base, "Extendee is not extendable");
|
6369
|
+
}
|
6370
|
+
|
6030
6371
|
// If the string is non-empty then it must begin with a version tag.
|
6031
6372
|
if (len) {
|
6032
6373
|
if (*data != kUpb_EncodedVersion_ExtensionV1) {
|
@@ -6169,6 +6510,10 @@ bool upb_MiniTable_SetSubEnum(upb_MiniTable* table, upb_MiniTableField* field,
|
|
6169
6510
|
table->UPB_PRIVATE(field_count)));
|
6170
6511
|
UPB_ASSERT(sub);
|
6171
6512
|
|
6513
|
+
if (field->UPB_PRIVATE(descriptortype) != kUpb_FieldType_Enum) {
|
6514
|
+
return false;
|
6515
|
+
}
|
6516
|
+
|
6172
6517
|
upb_MiniTableSub* table_sub =
|
6173
6518
|
(void*)&table->UPB_PRIVATE(subs)[field->UPB_PRIVATE(submsg_index)];
|
6174
6519
|
*table_sub = upb_MiniTableSub_FromEnum(sub);
|
@@ -6304,7 +6649,7 @@ failure:
|
|
6304
6649
|
|
6305
6650
|
#ifdef UPB_LINKARR_DECLARE
|
6306
6651
|
|
6307
|
-
UPB_LINKARR_DECLARE(upb_AllExts, upb_MiniTableExtension);
|
6652
|
+
UPB_LINKARR_DECLARE(upb_AllExts, const upb_MiniTableExtension);
|
6308
6653
|
|
6309
6654
|
bool upb_ExtensionRegistry_AddAllLinkedExtensions(upb_ExtensionRegistry* r) {
|
6310
6655
|
const upb_MiniTableExtension* start = UPB_LINKARR_START(upb_AllExts);
|
@@ -7097,6 +7442,32 @@ static upb_Map* _upb_Decoder_CreateMap(upb_Decoder* d,
|
|
7097
7442
|
return ret;
|
7098
7443
|
}
|
7099
7444
|
|
7445
|
+
UPB_NOINLINE static void _upb_Decoder_AddMapEntryUnknown(
|
7446
|
+
upb_Decoder* d, upb_Message* msg, const upb_MiniTableField* field,
|
7447
|
+
upb_Message* ent_msg, const upb_MiniTable* entry) {
|
7448
|
+
char* buf;
|
7449
|
+
size_t size;
|
7450
|
+
upb_EncodeStatus status =
|
7451
|
+
upb_Encode(ent_msg, entry, 0, &d->arena, &buf, &size);
|
7452
|
+
if (status != kUpb_EncodeStatus_Ok) {
|
7453
|
+
_upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
|
7454
|
+
}
|
7455
|
+
char delim_buf[2 * kUpb_Decoder_EncodeVarint32MaxSize];
|
7456
|
+
char* delim_end = delim_buf;
|
7457
|
+
uint32_t tag =
|
7458
|
+
((uint32_t)field->UPB_PRIVATE(number) << 3) | kUpb_WireType_Delimited;
|
7459
|
+
delim_end = upb_Decoder_EncodeVarint32(tag, delim_end);
|
7460
|
+
delim_end = upb_Decoder_EncodeVarint32(size, delim_end);
|
7461
|
+
upb_StringView unknown[] = {
|
7462
|
+
{delim_buf, delim_end - delim_buf},
|
7463
|
+
{buf, size},
|
7464
|
+
};
|
7465
|
+
|
7466
|
+
if (!UPB_PRIVATE(_upb_Message_AddUnknownV)(msg, &d->arena, unknown, 2)) {
|
7467
|
+
_upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
|
7468
|
+
}
|
7469
|
+
}
|
7470
|
+
|
7100
7471
|
static const char* _upb_Decoder_DecodeToMap(
|
7101
7472
|
upb_Decoder* d, const char* ptr, upb_Message* msg,
|
7102
7473
|
const upb_MiniTableSubInternal* subs, const upb_MiniTableField* field,
|
@@ -7134,27 +7505,7 @@ static const char* _upb_Decoder_DecodeToMap(
|
|
7134
7505
|
ptr = _upb_Decoder_DecodeSubMessage(d, ptr, &ent.message, subs, field,
|
7135
7506
|
val->size);
|
7136
7507
|
if (upb_Message_HasUnknown(&ent.message)) {
|
7137
|
-
|
7138
|
-
size_t size;
|
7139
|
-
uint32_t tag =
|
7140
|
-
((uint32_t)field->UPB_PRIVATE(number) << 3) | kUpb_WireType_Delimited;
|
7141
|
-
upb_EncodeStatus status =
|
7142
|
-
upb_Encode(&ent.message, entry, 0, &d->arena, &buf, &size);
|
7143
|
-
if (status != kUpb_EncodeStatus_Ok) {
|
7144
|
-
_upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
|
7145
|
-
}
|
7146
|
-
char delim_buf[2 * kUpb_Decoder_EncodeVarint32MaxSize];
|
7147
|
-
char* delim_end = delim_buf;
|
7148
|
-
delim_end = upb_Decoder_EncodeVarint32(tag, delim_end);
|
7149
|
-
delim_end = upb_Decoder_EncodeVarint32(size, delim_end);
|
7150
|
-
upb_StringView unknown[] = {
|
7151
|
-
{delim_buf, delim_end - delim_buf},
|
7152
|
-
{buf, size},
|
7153
|
-
};
|
7154
|
-
|
7155
|
-
if (!UPB_PRIVATE(_upb_Message_AddUnknownV)(msg, &d->arena, unknown, 2)) {
|
7156
|
-
_upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_OutOfMemory);
|
7157
|
-
}
|
7508
|
+
_upb_Decoder_AddMapEntryUnknown(d, msg, field, &ent.message, entry);
|
7158
7509
|
} else {
|
7159
7510
|
if (_upb_Map_Insert(map, &ent.k, map->key_size, &ent.v, map->val_size,
|
7160
7511
|
&d->arena) == kUpb_MapInsertStatus_OutOfMemory) {
|
@@ -7398,56 +7749,75 @@ static const char* upb_Decoder_DecodeMessageSetItem(
|
|
7398
7749
|
_upb_Decoder_ErrorJmp(d, kUpb_DecodeStatus_Malformed);
|
7399
7750
|
}
|
7400
7751
|
|
7401
|
-
static
|
7402
|
-
|
7403
|
-
|
7404
|
-
|
7405
|
-
|
7406
|
-
|
7407
|
-
if
|
7752
|
+
static upb_MiniTableField upb_Decoder_FieldNotFoundField = {
|
7753
|
+
0, 0, 0, 0, kUpb_FakeFieldType_FieldNotFound, 0};
|
7754
|
+
|
7755
|
+
UPB_NOINLINE const upb_MiniTableField* _upb_Decoder_FindExtensionField(
|
7756
|
+
upb_Decoder* d, const upb_MiniTable* t, uint32_t field_number, int ext_mode,
|
7757
|
+
int wire_type) {
|
7758
|
+
// Treat a message set as an extendable message if it is a delimited field.
|
7759
|
+
// This provides compatibility with encoders that are unaware of message
|
7760
|
+
// sets and serialize them as normal extensions.
|
7761
|
+
if (ext_mode == kUpb_ExtMode_Extendable ||
|
7762
|
+
(ext_mode == kUpb_ExtMode_IsMessageSet &&
|
7763
|
+
wire_type == kUpb_WireType_Delimited)) {
|
7764
|
+
const upb_MiniTableExtension* ext =
|
7765
|
+
upb_ExtensionRegistry_Lookup(d->extreg, t, field_number);
|
7766
|
+
if (ext) return &ext->UPB_PRIVATE(field);
|
7767
|
+
} else if (ext_mode == kUpb_ExtMode_IsMessageSet) {
|
7768
|
+
if (field_number == kUpb_MsgSet_Item) {
|
7769
|
+
static upb_MiniTableField item = {
|
7770
|
+
0, 0, 0, 0, kUpb_FakeFieldType_MessageSetItem, 0};
|
7771
|
+
return &item;
|
7772
|
+
}
|
7773
|
+
}
|
7774
|
+
return &upb_Decoder_FieldNotFoundField;
|
7775
|
+
}
|
7408
7776
|
|
7409
|
-
|
7777
|
+
static const upb_MiniTableField* _upb_Decoder_FindField(
|
7778
|
+
upb_Decoder* d, const upb_MiniTable* t, uint32_t field_number,
|
7779
|
+
uint32_t* last_field_index, int wire_type) {
|
7780
|
+
if (t == NULL) return &upb_Decoder_FieldNotFoundField;
|
7781
|
+
|
7782
|
+
uint32_t idx = ((uint32_t)field_number) - 1; // 0 wraps to UINT32_MAX
|
7410
7783
|
if (idx < t->UPB_PRIVATE(dense_below)) {
|
7411
|
-
// Fastest case: index into dense fields.
|
7412
|
-
|
7784
|
+
// Fastest case: index into dense fields, and don't update last_field_index.
|
7785
|
+
return &t->UPB_PRIVATE(fields)[idx];
|
7413
7786
|
}
|
7414
7787
|
|
7415
|
-
|
7788
|
+
uint32_t field_count = t->UPB_PRIVATE(field_count);
|
7789
|
+
if (t->UPB_PRIVATE(dense_below) < field_count) {
|
7416
7790
|
// Linear search non-dense fields. Resume scanning from last_field_index
|
7417
7791
|
// since fields are usually in order.
|
7418
|
-
|
7419
|
-
|
7420
|
-
|
7792
|
+
idx = *last_field_index;
|
7793
|
+
uint32_t candidate;
|
7794
|
+
do {
|
7795
|
+
candidate = t->UPB_PRIVATE(fields)[idx].UPB_PRIVATE(number);
|
7796
|
+
if (candidate == field_number) {
|
7421
7797
|
goto found;
|
7422
7798
|
}
|
7423
|
-
}
|
7799
|
+
} while (++idx < field_count);
|
7424
7800
|
|
7425
|
-
|
7426
|
-
|
7427
|
-
|
7801
|
+
if (UPB_LIKELY(field_number > candidate)) {
|
7802
|
+
// The field number we encountered is larger than any of our known fields,
|
7803
|
+
// so it's likely that subsequent ones will be too.
|
7804
|
+
*last_field_index = idx - 1;
|
7805
|
+
} else {
|
7806
|
+
// Fields not in tag order - scan from beginning
|
7807
|
+
for (idx = t->UPB_PRIVATE(dense_below); idx < *last_field_index; idx++) {
|
7808
|
+
if (t->UPB_PRIVATE(fields)[idx].UPB_PRIVATE(number) == field_number) {
|
7809
|
+
goto found;
|
7810
|
+
}
|
7428
7811
|
}
|
7429
7812
|
}
|
7430
7813
|
}
|
7431
7814
|
|
7432
|
-
if (d->extreg) {
|
7433
|
-
|
7434
|
-
|
7435
|
-
const upb_MiniTableExtension* ext =
|
7436
|
-
upb_ExtensionRegistry_Lookup(d->extreg, t, field_number);
|
7437
|
-
if (ext) return &ext->UPB_PRIVATE(field);
|
7438
|
-
break;
|
7439
|
-
}
|
7440
|
-
case kUpb_ExtMode_IsMessageSet:
|
7441
|
-
if (field_number == kUpb_MsgSet_Item) {
|
7442
|
-
static upb_MiniTableField item = {
|
7443
|
-
0, 0, 0, 0, kUpb_FakeFieldType_MessageSetItem, 0};
|
7444
|
-
return &item;
|
7445
|
-
}
|
7446
|
-
break;
|
7447
|
-
}
|
7815
|
+
if (d->extreg && t->UPB_PRIVATE(ext)) {
|
7816
|
+
return _upb_Decoder_FindExtensionField(d, t, field_number,
|
7817
|
+
t->UPB_PRIVATE(ext), wire_type);
|
7448
7818
|
}
|
7449
7819
|
|
7450
|
-
return &
|
7820
|
+
return &upb_Decoder_FieldNotFoundField; // Unknown field.
|
7451
7821
|
|
7452
7822
|
found:
|
7453
7823
|
UPB_ASSERT(t->UPB_PRIVATE(fields)[idx].UPB_PRIVATE(number) == field_number);
|
@@ -7544,7 +7914,7 @@ static int _upb_Decoder_GetDelimitedOp(upb_Decoder* d, const upb_MiniTable* mt,
|
|
7544
7914
|
[kUpb_FieldType_SFixed64] = kUpb_DecodeOp_UnknownField,
|
7545
7915
|
[kUpb_FieldType_SInt32] = kUpb_DecodeOp_UnknownField,
|
7546
7916
|
[kUpb_FieldType_SInt64] = kUpb_DecodeOp_UnknownField,
|
7547
|
-
[kUpb_FakeFieldType_MessageSetItem] =
|
7917
|
+
[kUpb_FakeFieldType_MessageSetItem] = kUpb_DecodeOp_SubMessage,
|
7548
7918
|
// For repeated field type.
|
7549
7919
|
[kRepeatedBase + kUpb_FieldType_Double] = OP_FIXPCK_LG2(3),
|
7550
7920
|
[kRepeatedBase + kUpb_FieldType_Float] = OP_FIXPCK_LG2(2),
|
@@ -7676,17 +8046,6 @@ const char* _upb_Decoder_DecodeKnownField(upb_Decoder* d, const char* ptr,
|
|
7676
8046
|
}
|
7677
8047
|
}
|
7678
8048
|
|
7679
|
-
static const char* _upb_Decoder_ReverseSkipVarint(const char* ptr,
|
7680
|
-
uint32_t val) {
|
7681
|
-
uint32_t seen = 0;
|
7682
|
-
do {
|
7683
|
-
ptr--;
|
7684
|
-
seen <<= 7;
|
7685
|
-
seen |= *ptr & 0x7f;
|
7686
|
-
} while (seen != val);
|
7687
|
-
return ptr;
|
7688
|
-
}
|
7689
|
-
|
7690
8049
|
static const char* _upb_Decoder_DecodeUnknownField(upb_Decoder* d,
|
7691
8050
|
const char* ptr,
|
7692
8051
|
upb_Message* msg,
|
@@ -7706,7 +8065,9 @@ static const char* _upb_Decoder_DecodeUnknownField(upb_Decoder* d,
|
|
7706
8065
|
switch (wire_type) {
|
7707
8066
|
case kUpb_WireType_Varint:
|
7708
8067
|
case kUpb_WireType_Delimited:
|
8068
|
+
// Skip the last byte
|
7709
8069
|
start--;
|
8070
|
+
// Skip bytes until we encounter the final byte of the tag varint.
|
7710
8071
|
while (start[-1] & 0x80) start--;
|
7711
8072
|
break;
|
7712
8073
|
case kUpb_WireType_32Bit:
|
@@ -7720,8 +8081,23 @@ static const char* _upb_Decoder_DecodeUnknownField(upb_Decoder* d,
|
|
7720
8081
|
}
|
7721
8082
|
|
7722
8083
|
assert(start == d->debug_valstart);
|
7723
|
-
|
7724
|
-
|
8084
|
+
{
|
8085
|
+
// The varint parser does not enforce that integers are encoded with their
|
8086
|
+
// minimum size; for example the value 1 could be encoded with three
|
8087
|
+
// bytes: 0x81, 0x80, 0x00. These unnecessary trailing zeroes mean that we
|
8088
|
+
// cannot skip backwards by the minimum encoded size of the tag; and
|
8089
|
+
// unlike the loop for delimited or varint fields, we can't stop at a
|
8090
|
+
// sentinel value because anything can precede a tag. Instead, parse back
|
8091
|
+
// one byte at a time until we read the same tag value that was parsed
|
8092
|
+
// earlier.
|
8093
|
+
uint32_t tag = ((uint32_t)field_number << 3) | wire_type;
|
8094
|
+
uint32_t seen = 0;
|
8095
|
+
do {
|
8096
|
+
start--;
|
8097
|
+
seen <<= 7;
|
8098
|
+
seen |= *start & 0x7f;
|
8099
|
+
} while (seen != tag);
|
8100
|
+
}
|
7725
8101
|
assert(start == d->debug_tagstart);
|
7726
8102
|
|
7727
8103
|
const char* input_start =
|
@@ -7751,7 +8127,7 @@ UPB_NOINLINE
|
|
7751
8127
|
static const char* _upb_Decoder_DecodeMessage(upb_Decoder* d, const char* ptr,
|
7752
8128
|
upb_Message* msg,
|
7753
8129
|
const upb_MiniTable* layout) {
|
7754
|
-
|
8130
|
+
uint32_t last_field_index = 0;
|
7755
8131
|
|
7756
8132
|
#if UPB_FASTTABLE
|
7757
8133
|
// The first time we want to skip fast dispatch, because we may have just been
|
@@ -7791,7 +8167,8 @@ static const char* _upb_Decoder_DecodeMessage(upb_Decoder* d, const char* ptr,
|
|
7791
8167
|
return ptr;
|
7792
8168
|
}
|
7793
8169
|
|
7794
|
-
field = _upb_Decoder_FindField(d, layout, field_number, &last_field_index
|
8170
|
+
field = _upb_Decoder_FindField(d, layout, field_number, &last_field_index,
|
8171
|
+
wire_type);
|
7795
8172
|
ptr = _upb_Decoder_DecodeWireValue(d, ptr, layout, field, wire_type, &val,
|
7796
8173
|
&op);
|
7797
8174
|
|
@@ -8017,7 +8394,8 @@ UPB_NORETURN static void encode_err(upb_encstate* e, upb_EncodeStatus s) {
|
|
8017
8394
|
UPB_NOINLINE
|
8018
8395
|
static void encode_growbuffer(upb_encstate* e, size_t bytes) {
|
8019
8396
|
size_t old_size = e->limit - e->buf;
|
8020
|
-
size_t
|
8397
|
+
size_t needed_size = bytes + (e->limit - e->ptr);
|
8398
|
+
size_t new_size = upb_roundup_pow2(needed_size);
|
8021
8399
|
char* new_buf = upb_Arena_Realloc(e->arena, e->buf, old_size, new_size);
|
8022
8400
|
|
8023
8401
|
if (!new_buf) encode_err(e, kUpb_EncodeStatus_OutOfMemory);
|
@@ -8026,14 +8404,12 @@ static void encode_growbuffer(upb_encstate* e, size_t bytes) {
|
|
8026
8404
|
// TODO: This is somewhat inefficient since we are copying twice.
|
8027
8405
|
// Maybe create a realloc() that copies to the end of the new buffer?
|
8028
8406
|
if (old_size > 0) {
|
8029
|
-
memmove(new_buf + new_size - old_size,
|
8407
|
+
memmove(new_buf + new_size - old_size, new_buf, old_size);
|
8030
8408
|
}
|
8031
8409
|
|
8032
|
-
e->ptr = new_buf + new_size - (e->limit - e->ptr);
|
8033
|
-
e->limit = new_buf + new_size;
|
8034
8410
|
e->buf = new_buf;
|
8035
|
-
|
8036
|
-
e->ptr
|
8411
|
+
e->limit = new_buf + new_size;
|
8412
|
+
e->ptr = new_buf + new_size - needed_size;
|
8037
8413
|
}
|
8038
8414
|
|
8039
8415
|
/* Call to ensure that at least "bytes" bytes are available for writing at
|
@@ -8363,6 +8739,19 @@ static void encode_map(upb_encstate* e, const upb_Message* msg,
|
|
8363
8739
|
if (!map || !upb_Map_Size(map)) return;
|
8364
8740
|
|
8365
8741
|
if (e->options & kUpb_EncodeOption_Deterministic) {
|
8742
|
+
if (!map->UPB_PRIVATE(is_strtable)) {
|
8743
|
+
// For inttable, first encode the array part, then sort the table entries.
|
8744
|
+
intptr_t iter = UPB_INTTABLE_BEGIN;
|
8745
|
+
while ((size_t)++iter < map->t.inttable.array_size) {
|
8746
|
+
upb_value value = map->t.inttable.array[iter];
|
8747
|
+
if (upb_inttable_arrhas(&map->t.inttable, iter)) {
|
8748
|
+
upb_MapEntry ent;
|
8749
|
+
memcpy(&ent.k, &iter, sizeof(iter));
|
8750
|
+
_upb_map_fromvalue(value, &ent.v, map->val_size);
|
8751
|
+
encode_mapentry(e, upb_MiniTableField_Number(f), layout, &ent);
|
8752
|
+
}
|
8753
|
+
}
|
8754
|
+
}
|
8366
8755
|
_upb_sortedmap sorted;
|
8367
8756
|
_upb_mapsorter_pushmap(
|
8368
8757
|
&e->sorter, layout->UPB_PRIVATE(fields)[0].UPB_PRIVATE(descriptortype),
|
@@ -8373,14 +8762,25 @@ static void encode_map(upb_encstate* e, const upb_Message* msg,
|
|
8373
8762
|
}
|
8374
8763
|
_upb_mapsorter_popmap(&e->sorter, &sorted);
|
8375
8764
|
} else {
|
8376
|
-
intptr_t iter = UPB_STRTABLE_BEGIN;
|
8377
|
-
upb_StringView key;
|
8378
8765
|
upb_value val;
|
8379
|
-
|
8380
|
-
|
8381
|
-
|
8382
|
-
|
8383
|
-
|
8766
|
+
if (map->UPB_PRIVATE(is_strtable)) {
|
8767
|
+
intptr_t iter = UPB_STRTABLE_BEGIN;
|
8768
|
+
upb_StringView strkey;
|
8769
|
+
while (upb_strtable_next2(&map->t.strtable, &strkey, &val, &iter)) {
|
8770
|
+
upb_MapEntry ent;
|
8771
|
+
_upb_map_fromkey(strkey, &ent.k, map->key_size);
|
8772
|
+
_upb_map_fromvalue(val, &ent.v, map->val_size);
|
8773
|
+
encode_mapentry(e, upb_MiniTableField_Number(f), layout, &ent);
|
8774
|
+
}
|
8775
|
+
} else {
|
8776
|
+
intptr_t iter = UPB_INTTABLE_BEGIN;
|
8777
|
+
uintptr_t intkey = 0;
|
8778
|
+
while (upb_inttable_next(&map->t.inttable, &intkey, &val, &iter)) {
|
8779
|
+
upb_MapEntry ent;
|
8780
|
+
memcpy(&ent.k, &intkey, map->key_size);
|
8781
|
+
_upb_map_fromvalue(val, &ent.v, map->val_size);
|
8782
|
+
encode_mapentry(e, upb_MiniTableField_Number(f), layout, &ent);
|
8783
|
+
}
|
8384
8784
|
}
|
8385
8785
|
}
|
8386
8786
|
}
|
@@ -9547,8 +9947,8 @@ const char* fastdecode_tosubmsg(upb_EpsCopyInputStream* e, const char* ptr,
|
|
9547
9947
|
upb_Message** dst; \
|
9548
9948
|
uint32_t submsg_idx = (data >> 16) & 0xff; \
|
9549
9949
|
const upb_MiniTable* tablep = decode_totablep(table); \
|
9550
|
-
const upb_MiniTable* subtablep =
|
9551
|
-
|
9950
|
+
const upb_MiniTable* subtablep = \
|
9951
|
+
UPB_PRIVATE(_upb_MiniTable_GetSubTableByIndex)(tablep, submsg_idx); \
|
9552
9952
|
fastdecode_submsgdata submsg = {decode_totable(subtablep)}; \
|
9553
9953
|
fastdecode_arr farr; \
|
9554
9954
|
\
|
@@ -9716,9 +10116,9 @@ static const upb_MiniTableSubInternal google_protobuf_FileDescriptorProto__subms
|
|
9716
10116
|
{.UPB_PRIVATE(subenum) = &google__protobuf__Edition_enum_init},
|
9717
10117
|
};
|
9718
10118
|
|
9719
|
-
static const upb_MiniTableField google_protobuf_FileDescriptorProto__fields[
|
9720
|
-
{1, UPB_SIZE(
|
9721
|
-
{2, UPB_SIZE(
|
10119
|
+
static const upb_MiniTableField google_protobuf_FileDescriptorProto__fields[14] = {
|
10120
|
+
{1, UPB_SIZE(56, 16), 64, kUpb_NoSub, 12, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
10121
|
+
{2, UPB_SIZE(64, 32), 65, kUpb_NoSub, 12, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
9722
10122
|
{3, UPB_SIZE(12, 64), 0, kUpb_NoSub, 12, (int)kUpb_FieldMode_Array | (int)kUpb_LabelFlags_IsAlternate | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9723
10123
|
{4, UPB_SIZE(16, 72), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9724
10124
|
{5, UPB_SIZE(20, 80), 0, 1, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
@@ -9728,14 +10128,15 @@ static const upb_MiniTableField google_protobuf_FileDescriptorProto__fields[13]
|
|
9728
10128
|
{9, UPB_SIZE(36, 112), 67, 5, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9729
10129
|
{10, UPB_SIZE(40, 120), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9730
10130
|
{11, UPB_SIZE(44, 128), 0, kUpb_NoSub, 5, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9731
|
-
{12, UPB_SIZE(
|
10131
|
+
{12, UPB_SIZE(72, 48), 68, kUpb_NoSub, 12, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
9732
10132
|
{14, UPB_SIZE(48, 12), 69, 6, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10133
|
+
{15, UPB_SIZE(52, 136), 0, kUpb_NoSub, 12, (int)kUpb_FieldMode_Array | (int)kUpb_LabelFlags_IsAlternate | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9733
10134
|
};
|
9734
10135
|
|
9735
10136
|
const upb_MiniTable google__protobuf__FileDescriptorProto_msg_init = {
|
9736
10137
|
&google_protobuf_FileDescriptorProto__submsgs[0],
|
9737
10138
|
&google_protobuf_FileDescriptorProto__fields[0],
|
9738
|
-
UPB_SIZE(80,
|
10139
|
+
UPB_SIZE(80, 144), 14, kUpb_ExtMode_NonExtendable, 12, UPB_FASTTABLE_MASK(120), 0,
|
9739
10140
|
#ifdef UPB_TRACING_ENABLED
|
9740
10141
|
"google.protobuf.FileDescriptorProto",
|
9741
10142
|
#endif
|
@@ -9755,12 +10156,12 @@ const upb_MiniTable google__protobuf__FileDescriptorProto_msg_init = {
|
|
9755
10156
|
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
9756
10157
|
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
9757
10158
|
{0x0000000000000000, &_upb_FastDecoder_DecodeGeneric},
|
9758
|
-
{
|
10159
|
+
{0x008800003f00007a, &upb_prs_1bt},
|
9759
10160
|
})
|
9760
10161
|
};
|
9761
10162
|
|
9762
10163
|
const upb_MiniTable* google__protobuf__FileDescriptorProto_msg_init_ptr = &google__protobuf__FileDescriptorProto_msg_init;
|
9763
|
-
static const upb_MiniTableSubInternal google_protobuf_DescriptorProto__submsgs[
|
10164
|
+
static const upb_MiniTableSubInternal google_protobuf_DescriptorProto__submsgs[9] = {
|
9764
10165
|
{.UPB_PRIVATE(submsg) = &google__protobuf__FieldDescriptorProto_msg_init_ptr},
|
9765
10166
|
{.UPB_PRIVATE(submsg) = &google__protobuf__DescriptorProto_msg_init_ptr},
|
9766
10167
|
{.UPB_PRIVATE(submsg) = &google__protobuf__EnumDescriptorProto_msg_init_ptr},
|
@@ -9769,10 +10170,11 @@ static const upb_MiniTableSubInternal google_protobuf_DescriptorProto__submsgs[8
|
|
9769
10170
|
{.UPB_PRIVATE(submsg) = &google__protobuf__MessageOptions_msg_init_ptr},
|
9770
10171
|
{.UPB_PRIVATE(submsg) = &google__protobuf__OneofDescriptorProto_msg_init_ptr},
|
9771
10172
|
{.UPB_PRIVATE(submsg) = &google__protobuf__DescriptorProto__ReservedRange_msg_init_ptr},
|
10173
|
+
{.UPB_PRIVATE(subenum) = &google__protobuf__SymbolVisibility_enum_init},
|
9772
10174
|
};
|
9773
10175
|
|
9774
|
-
static const upb_MiniTableField google_protobuf_DescriptorProto__fields[
|
9775
|
-
{1, UPB_SIZE(
|
10176
|
+
static const upb_MiniTableField google_protobuf_DescriptorProto__fields[11] = {
|
10177
|
+
{1, UPB_SIZE(52, 16), 64, kUpb_NoSub, 12, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
9776
10178
|
{2, UPB_SIZE(12, 32), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9777
10179
|
{3, UPB_SIZE(16, 40), 0, 1, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9778
10180
|
{4, UPB_SIZE(20, 48), 0, 2, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
@@ -9782,12 +10184,13 @@ static const upb_MiniTableField google_protobuf_DescriptorProto__fields[10] = {
|
|
9782
10184
|
{8, UPB_SIZE(36, 80), 0, 6, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9783
10185
|
{9, UPB_SIZE(40, 88), 0, 7, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9784
10186
|
{10, UPB_SIZE(44, 96), 0, kUpb_NoSub, 12, (int)kUpb_FieldMode_Array | (int)kUpb_LabelFlags_IsAlternate | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
10187
|
+
{11, UPB_SIZE(48, 12), 66, 8, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
9785
10188
|
};
|
9786
10189
|
|
9787
10190
|
const upb_MiniTable google__protobuf__DescriptorProto_msg_init = {
|
9788
10191
|
&google_protobuf_DescriptorProto__submsgs[0],
|
9789
10192
|
&google_protobuf_DescriptorProto__fields[0],
|
9790
|
-
UPB_SIZE(
|
10193
|
+
UPB_SIZE(64, 104), 11, kUpb_ExtMode_NonExtendable, 11, UPB_FASTTABLE_MASK(120), 0,
|
9791
10194
|
#ifdef UPB_TRACING_ENABLED
|
9792
10195
|
"google.protobuf.DescriptorProto",
|
9793
10196
|
#endif
|
@@ -9972,24 +10375,26 @@ const upb_MiniTable google__protobuf__OneofDescriptorProto_msg_init = {
|
|
9972
10375
|
};
|
9973
10376
|
|
9974
10377
|
const upb_MiniTable* google__protobuf__OneofDescriptorProto_msg_init_ptr = &google__protobuf__OneofDescriptorProto_msg_init;
|
9975
|
-
static const upb_MiniTableSubInternal google_protobuf_EnumDescriptorProto__submsgs[
|
10378
|
+
static const upb_MiniTableSubInternal google_protobuf_EnumDescriptorProto__submsgs[4] = {
|
9976
10379
|
{.UPB_PRIVATE(submsg) = &google__protobuf__EnumValueDescriptorProto_msg_init_ptr},
|
9977
10380
|
{.UPB_PRIVATE(submsg) = &google__protobuf__EnumOptions_msg_init_ptr},
|
9978
10381
|
{.UPB_PRIVATE(submsg) = &google__protobuf__EnumDescriptorProto__EnumReservedRange_msg_init_ptr},
|
10382
|
+
{.UPB_PRIVATE(subenum) = &google__protobuf__SymbolVisibility_enum_init},
|
9979
10383
|
};
|
9980
10384
|
|
9981
|
-
static const upb_MiniTableField google_protobuf_EnumDescriptorProto__fields[
|
9982
|
-
{1, UPB_SIZE(
|
10385
|
+
static const upb_MiniTableField google_protobuf_EnumDescriptorProto__fields[6] = {
|
10386
|
+
{1, UPB_SIZE(32, 16), 64, kUpb_NoSub, 12, (int)kUpb_FieldMode_Scalar | (int)kUpb_LabelFlags_IsAlternate | ((int)kUpb_FieldRep_StringView << kUpb_FieldRep_Shift)},
|
9983
10387
|
{2, UPB_SIZE(12, 32), 0, 0, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9984
10388
|
{3, UPB_SIZE(16, 40), 65, 1, 11, (int)kUpb_FieldMode_Scalar | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9985
10389
|
{4, UPB_SIZE(20, 48), 0, 2, 11, (int)kUpb_FieldMode_Array | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
9986
10390
|
{5, UPB_SIZE(24, 56), 0, kUpb_NoSub, 12, (int)kUpb_FieldMode_Array | (int)kUpb_LabelFlags_IsAlternate | ((int)UPB_SIZE(kUpb_FieldRep_4Byte, kUpb_FieldRep_8Byte) << kUpb_FieldRep_Shift)},
|
10391
|
+
{6, UPB_SIZE(28, 12), 66, 3, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
9987
10392
|
};
|
9988
10393
|
|
9989
10394
|
const upb_MiniTable google__protobuf__EnumDescriptorProto_msg_init = {
|
9990
10395
|
&google_protobuf_EnumDescriptorProto__submsgs[0],
|
9991
10396
|
&google_protobuf_EnumDescriptorProto__fields[0],
|
9992
|
-
UPB_SIZE(40, 64),
|
10397
|
+
UPB_SIZE(40, 64), 6, kUpb_ExtMode_NonExtendable, 6, UPB_FASTTABLE_MASK(56), 0,
|
9993
10398
|
#ifdef UPB_TRACING_ENABLED
|
9994
10399
|
"google.protobuf.EnumDescriptorProto",
|
9995
10400
|
#endif
|
@@ -10664,7 +11069,7 @@ const upb_MiniTable google__protobuf__UninterpretedOption__NamePart_msg_init = {
|
|
10664
11069
|
};
|
10665
11070
|
|
10666
11071
|
const upb_MiniTable* google__protobuf__UninterpretedOption__NamePart_msg_init_ptr = &google__protobuf__UninterpretedOption__NamePart_msg_init;
|
10667
|
-
static const upb_MiniTableSubInternal google_protobuf_FeatureSet__submsgs[
|
11072
|
+
static const upb_MiniTableSubInternal google_protobuf_FeatureSet__submsgs[8] = {
|
10668
11073
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__FieldPresence_enum_init},
|
10669
11074
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__EnumType_enum_init},
|
10670
11075
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__RepeatedFieldEncoding_enum_init},
|
@@ -10672,9 +11077,10 @@ static const upb_MiniTableSubInternal google_protobuf_FeatureSet__submsgs[7] = {
|
|
10672
11077
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__MessageEncoding_enum_init},
|
10673
11078
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__JsonFormat_enum_init},
|
10674
11079
|
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__EnforceNamingStyle_enum_init},
|
11080
|
+
{.UPB_PRIVATE(subenum) = &google__protobuf__FeatureSet__VisibilityFeature__DefaultSymbolVisibility_enum_init},
|
10675
11081
|
};
|
10676
11082
|
|
10677
|
-
static const upb_MiniTableField google_protobuf_FeatureSet__fields[
|
11083
|
+
static const upb_MiniTableField google_protobuf_FeatureSet__fields[8] = {
|
10678
11084
|
{1, 12, 64, 0, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10679
11085
|
{2, 16, 65, 1, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10680
11086
|
{3, 20, 66, 2, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
@@ -10682,18 +11088,29 @@ static const upb_MiniTableField google_protobuf_FeatureSet__fields[7] = {
|
|
10682
11088
|
{5, 28, 68, 4, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10683
11089
|
{6, 32, 69, 5, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10684
11090
|
{7, 36, 70, 6, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
11091
|
+
{8, 40, 71, 7, 14, (int)kUpb_FieldMode_Scalar | ((int)kUpb_FieldRep_4Byte << kUpb_FieldRep_Shift)},
|
10685
11092
|
};
|
10686
11093
|
|
10687
11094
|
const upb_MiniTable google__protobuf__FeatureSet_msg_init = {
|
10688
11095
|
&google_protobuf_FeatureSet__submsgs[0],
|
10689
11096
|
&google_protobuf_FeatureSet__fields[0],
|
10690
|
-
|
11097
|
+
48, 8, kUpb_ExtMode_Extendable, 8, UPB_FASTTABLE_MASK(255), 0,
|
10691
11098
|
#ifdef UPB_TRACING_ENABLED
|
10692
11099
|
"google.protobuf.FeatureSet",
|
10693
11100
|
#endif
|
10694
11101
|
};
|
10695
11102
|
|
10696
11103
|
const upb_MiniTable* google__protobuf__FeatureSet_msg_init_ptr = &google__protobuf__FeatureSet_msg_init;
|
11104
|
+
const upb_MiniTable google__protobuf__FeatureSet__VisibilityFeature_msg_init = {
|
11105
|
+
NULL,
|
11106
|
+
NULL,
|
11107
|
+
8, 0, kUpb_ExtMode_NonExtendable, 0, UPB_FASTTABLE_MASK(255), 0,
|
11108
|
+
#ifdef UPB_TRACING_ENABLED
|
11109
|
+
"google.protobuf.FeatureSet.VisibilityFeature",
|
11110
|
+
#endif
|
11111
|
+
};
|
11112
|
+
|
11113
|
+
const upb_MiniTable* google__protobuf__FeatureSet__VisibilityFeature_msg_init_ptr = &google__protobuf__FeatureSet__VisibilityFeature_msg_init;
|
10697
11114
|
static const upb_MiniTableSubInternal google_protobuf_FeatureSetDefaults__submsgs[3] = {
|
10698
11115
|
{.UPB_PRIVATE(submsg) = &google__protobuf__FeatureSetDefaults__FeatureSetEditionDefault_msg_init_ptr},
|
10699
11116
|
{.UPB_PRIVATE(subenum) = &google__protobuf__Edition_enum_init},
|
@@ -10930,6 +11347,15 @@ const upb_MiniTableEnum google__protobuf__FeatureSet__Utf8Validation_enum_init =
|
|
10930
11347
|
},
|
10931
11348
|
};
|
10932
11349
|
|
11350
|
+
const upb_MiniTableEnum google__protobuf__FeatureSet__VisibilityFeature__DefaultSymbolVisibility_enum_init = {
|
11351
|
+
64,
|
11352
|
+
0,
|
11353
|
+
{
|
11354
|
+
0x1f,
|
11355
|
+
0x0,
|
11356
|
+
},
|
11357
|
+
};
|
11358
|
+
|
10933
11359
|
const upb_MiniTableEnum google__protobuf__FieldDescriptorProto__Label_enum_init = {
|
10934
11360
|
64,
|
10935
11361
|
0,
|
@@ -11011,7 +11437,16 @@ const upb_MiniTableEnum google__protobuf__MethodOptions__IdempotencyLevel_enum_i
|
|
11011
11437
|
},
|
11012
11438
|
};
|
11013
11439
|
|
11014
|
-
|
11440
|
+
const upb_MiniTableEnum google__protobuf__SymbolVisibility_enum_init = {
|
11441
|
+
64,
|
11442
|
+
0,
|
11443
|
+
{
|
11444
|
+
0x7,
|
11445
|
+
0x0,
|
11446
|
+
},
|
11447
|
+
};
|
11448
|
+
|
11449
|
+
static const upb_MiniTable *messages_layout[34] = {
|
11015
11450
|
&google__protobuf__FileDescriptorSet_msg_init,
|
11016
11451
|
&google__protobuf__FileDescriptorProto_msg_init,
|
11017
11452
|
&google__protobuf__DescriptorProto_msg_init,
|
@@ -11039,6 +11474,7 @@ static const upb_MiniTable *messages_layout[33] = {
|
|
11039
11474
|
&google__protobuf__UninterpretedOption_msg_init,
|
11040
11475
|
&google__protobuf__UninterpretedOption__NamePart_msg_init,
|
11041
11476
|
&google__protobuf__FeatureSet_msg_init,
|
11477
|
+
&google__protobuf__FeatureSet__VisibilityFeature_msg_init,
|
11042
11478
|
&google__protobuf__FeatureSetDefaults_msg_init,
|
11043
11479
|
&google__protobuf__FeatureSetDefaults__FeatureSetEditionDefault_msg_init,
|
11044
11480
|
&google__protobuf__SourceCodeInfo_msg_init,
|
@@ -11047,7 +11483,7 @@ static const upb_MiniTable *messages_layout[33] = {
|
|
11047
11483
|
&google__protobuf__GeneratedCodeInfo__Annotation_msg_init,
|
11048
11484
|
};
|
11049
11485
|
|
11050
|
-
static const upb_MiniTableEnum *enums_layout[
|
11486
|
+
static const upb_MiniTableEnum *enums_layout[20] = {
|
11051
11487
|
&google__protobuf__Edition_enum_init,
|
11052
11488
|
&google__protobuf__ExtensionRangeOptions__VerificationState_enum_init,
|
11053
11489
|
&google__protobuf__FeatureSet__EnforceNamingStyle_enum_init,
|
@@ -11057,6 +11493,7 @@ static const upb_MiniTableEnum *enums_layout[18] = {
|
|
11057
11493
|
&google__protobuf__FeatureSet__MessageEncoding_enum_init,
|
11058
11494
|
&google__protobuf__FeatureSet__RepeatedFieldEncoding_enum_init,
|
11059
11495
|
&google__protobuf__FeatureSet__Utf8Validation_enum_init,
|
11496
|
+
&google__protobuf__FeatureSet__VisibilityFeature__DefaultSymbolVisibility_enum_init,
|
11060
11497
|
&google__protobuf__FieldDescriptorProto__Label_enum_init,
|
11061
11498
|
&google__protobuf__FieldDescriptorProto__Type_enum_init,
|
11062
11499
|
&google__protobuf__FieldOptions__CType_enum_init,
|
@@ -11066,14 +11503,15 @@ static const upb_MiniTableEnum *enums_layout[18] = {
|
|
11066
11503
|
&google__protobuf__FileOptions__OptimizeMode_enum_init,
|
11067
11504
|
&google__protobuf__GeneratedCodeInfo__Annotation__Semantic_enum_init,
|
11068
11505
|
&google__protobuf__MethodOptions__IdempotencyLevel_enum_init,
|
11506
|
+
&google__protobuf__SymbolVisibility_enum_init,
|
11069
11507
|
};
|
11070
11508
|
|
11071
11509
|
const upb_MiniTableFile google_protobuf_descriptor_proto_upb_file_layout = {
|
11072
11510
|
messages_layout,
|
11073
11511
|
enums_layout,
|
11074
11512
|
NULL,
|
11075
|
-
|
11076
|
-
|
11513
|
+
34,
|
11514
|
+
20,
|
11077
11515
|
0,
|
11078
11516
|
};
|
11079
11517
|
|
@@ -11085,6 +11523,8 @@ const upb_MiniTableFile google_protobuf_descriptor_proto_upb_file_layout = {
|
|
11085
11523
|
* Implementation is heavily inspired by Lua's ltable.c.
|
11086
11524
|
*/
|
11087
11525
|
|
11526
|
+
|
11527
|
+
#include <stdint.h>
|
11088
11528
|
#include <string.h>
|
11089
11529
|
|
11090
11530
|
|
@@ -11096,21 +11536,42 @@ const upb_MiniTableFile google_protobuf_descriptor_proto_upb_file_layout = {
|
|
11096
11536
|
#define ARRAY_SIZE(x) \
|
11097
11537
|
((sizeof(x) / sizeof(0 [x])) / ((size_t)(!(sizeof(x) % sizeof(0 [x])))))
|
11098
11538
|
|
11099
|
-
static const double MAX_LOAD = 0.85;
|
11100
|
-
|
11101
11539
|
/* The minimum utilization of the array part of a mixed hash/array table. This
|
11102
11540
|
* is a speed/memory-usage tradeoff (though it's not straightforward because of
|
11103
11541
|
* cache effects). The lower this is, the more memory we'll use. */
|
11104
11542
|
static const double MIN_DENSITY = 0.1;
|
11105
11543
|
|
11106
|
-
|
11544
|
+
#if defined(__has_builtin)
|
11545
|
+
#if __has_builtin(__builtin_popcount)
|
11546
|
+
#define UPB_FAST_POPCOUNT32(i) __builtin_popcount(i)
|
11547
|
+
#endif
|
11548
|
+
#elif defined(__GNUC__)
|
11549
|
+
#define UPB_FAST_POPCOUNT32(i) __builtin_popcount(i)
|
11550
|
+
#elif defined(_MSC_VER)
|
11551
|
+
#define UPB_FAST_POPCOUNT32(i) __popcnt(i)
|
11552
|
+
#endif
|
11107
11553
|
|
11108
|
-
|
11109
|
-
|
11110
|
-
|
11111
|
-
|
11554
|
+
UPB_INLINE int _upb_popcnt32(uint32_t i) {
|
11555
|
+
#ifdef UPB_FAST_POPCOUNT32
|
11556
|
+
return UPB_FAST_POPCOUNT32(i);
|
11557
|
+
#else
|
11558
|
+
int count = 0;
|
11559
|
+
while (i != 0) {
|
11560
|
+
count += i & 1;
|
11561
|
+
i >>= 1;
|
11562
|
+
}
|
11563
|
+
return count;
|
11564
|
+
#endif
|
11112
11565
|
}
|
11113
11566
|
|
11567
|
+
#undef UPB_FAST_POPCOUNT32
|
11568
|
+
|
11569
|
+
UPB_INLINE uint8_t _upb_log2_table_size(upb_table* t) {
|
11570
|
+
return _upb_popcnt32(t->mask);
|
11571
|
+
}
|
11572
|
+
|
11573
|
+
static bool is_pow2(uint64_t v) { return v == 0 || (v & (v - 1)) == 0; }
|
11574
|
+
|
11114
11575
|
static int log2ceil(uint64_t v) {
|
11115
11576
|
int ret = 0;
|
11116
11577
|
bool pow2 = is_pow2(v);
|
@@ -11122,48 +11583,50 @@ static int log2ceil(uint64_t v) {
|
|
11122
11583
|
/* A type to represent the lookup key of either a strtable or an inttable. */
|
11123
11584
|
typedef union {
|
11124
11585
|
uintptr_t num;
|
11125
|
-
|
11126
|
-
const char* str;
|
11127
|
-
size_t len;
|
11128
|
-
} str;
|
11586
|
+
upb_StringView str;
|
11129
11587
|
} lookupkey_t;
|
11130
11588
|
|
11131
11589
|
static lookupkey_t strkey2(const char* str, size_t len) {
|
11132
|
-
lookupkey_t
|
11133
|
-
k.str.str = str;
|
11134
|
-
k.str.len = len;
|
11135
|
-
return k;
|
11590
|
+
return (lookupkey_t){.str = upb_StringView_FromDataAndSize(str, len)};
|
11136
11591
|
}
|
11137
11592
|
|
11138
|
-
static lookupkey_t intkey(uintptr_t key) {
|
11139
|
-
lookupkey_t k;
|
11140
|
-
k.num = key;
|
11141
|
-
return k;
|
11142
|
-
}
|
11593
|
+
static lookupkey_t intkey(uintptr_t key) { return (lookupkey_t){.num = key}; }
|
11143
11594
|
|
11144
|
-
typedef uint32_t hashfunc_t(
|
11145
|
-
typedef bool eqlfunc_t(
|
11595
|
+
typedef uint32_t hashfunc_t(upb_key key);
|
11596
|
+
typedef bool eqlfunc_t(upb_key k1, lookupkey_t k2);
|
11146
11597
|
|
11147
11598
|
/* Base table (shared code) ***************************************************/
|
11148
11599
|
|
11149
|
-
static uint32_t upb_inthash(uintptr_t key) {
|
11600
|
+
static uint32_t upb_inthash(uintptr_t key) {
|
11601
|
+
if (sizeof(uintptr_t) == 8) {
|
11602
|
+
return (uint32_t)key ^ (uint32_t)(key >> 32);
|
11603
|
+
} else {
|
11604
|
+
UPB_ASSERT(sizeof(uintptr_t) == 4);
|
11605
|
+
return (uint32_t)key;
|
11606
|
+
}
|
11607
|
+
}
|
11150
11608
|
|
11151
11609
|
static const upb_tabent* upb_getentry(const upb_table* t, uint32_t hash) {
|
11152
11610
|
return t->entries + (hash & t->mask);
|
11153
11611
|
}
|
11154
11612
|
|
11155
|
-
static bool
|
11156
|
-
|
11157
|
-
|
11613
|
+
static bool isfull(upb_table* t) {
|
11614
|
+
uint32_t size = upb_table_size(t);
|
11615
|
+
// 0.875 load factor
|
11616
|
+
return t->count == (size - (size >> 3));
|
11617
|
+
}
|
11158
11618
|
|
11159
11619
|
static bool init(upb_table* t, uint8_t size_lg2, upb_Arena* a) {
|
11160
|
-
|
11161
|
-
|
11620
|
+
if (size_lg2 >= 32) {
|
11621
|
+
return false;
|
11622
|
+
}
|
11162
11623
|
t->count = 0;
|
11163
|
-
|
11164
|
-
t->mask =
|
11165
|
-
|
11166
|
-
|
11624
|
+
uint32_t size = 1 << size_lg2;
|
11625
|
+
t->mask = size - 1; // 0 mask if size_lg2 is 0
|
11626
|
+
if (upb_table_size(t) > (SIZE_MAX / sizeof(upb_tabent))) {
|
11627
|
+
return false;
|
11628
|
+
}
|
11629
|
+
size_t bytes = upb_table_size(t) * sizeof(upb_tabent);
|
11167
11630
|
if (bytes > 0) {
|
11168
11631
|
t->entries = upb_Arena_Malloc(a, bytes);
|
11169
11632
|
if (!t->entries) return false;
|
@@ -11195,7 +11658,7 @@ static const upb_tabent* findentry(const upb_table* t, lookupkey_t key,
|
|
11195
11658
|
uint32_t hash, eqlfunc_t* eql) {
|
11196
11659
|
const upb_tabent* e;
|
11197
11660
|
|
11198
|
-
if (t->
|
11661
|
+
if (t->count == 0) return NULL;
|
11199
11662
|
e = upb_getentry(t, hash);
|
11200
11663
|
if (upb_tabent_isempty(e)) return NULL;
|
11201
11664
|
while (1) {
|
@@ -11213,9 +11676,7 @@ static bool lookup(const upb_table* t, lookupkey_t key, upb_value* v,
|
|
11213
11676
|
uint32_t hash, eqlfunc_t* eql) {
|
11214
11677
|
const upb_tabent* e = findentry(t, key, hash, eql);
|
11215
11678
|
if (e) {
|
11216
|
-
if (v)
|
11217
|
-
_upb_value_setval(v, e->val.val);
|
11218
|
-
}
|
11679
|
+
if (v) *v = e->val;
|
11219
11680
|
return true;
|
11220
11681
|
} else {
|
11221
11682
|
return false;
|
@@ -11223,9 +11684,8 @@ static bool lookup(const upb_table* t, lookupkey_t key, upb_value* v,
|
|
11223
11684
|
}
|
11224
11685
|
|
11225
11686
|
/* The given key must not already exist in the table. */
|
11226
|
-
static void insert(upb_table* t, lookupkey_t key,
|
11227
|
-
|
11228
|
-
eqlfunc_t* eql) {
|
11687
|
+
static void insert(upb_table* t, lookupkey_t key, upb_key tabkey, upb_value val,
|
11688
|
+
uint32_t hash, hashfunc_t* hashfunc, eqlfunc_t* eql) {
|
11229
11689
|
upb_tabent* mainpos_e;
|
11230
11690
|
upb_tabent* our_e;
|
11231
11691
|
|
@@ -11265,25 +11725,24 @@ static void insert(upb_table* t, lookupkey_t key, upb_tabkey tabkey,
|
|
11265
11725
|
}
|
11266
11726
|
}
|
11267
11727
|
our_e->key = tabkey;
|
11268
|
-
our_e->val
|
11728
|
+
our_e->val = val;
|
11269
11729
|
UPB_ASSERT(findentry(t, key, hash, eql) == our_e);
|
11270
11730
|
}
|
11271
11731
|
|
11272
|
-
static bool rm(upb_table* t, lookupkey_t key, upb_value* val,
|
11273
|
-
|
11732
|
+
static bool rm(upb_table* t, lookupkey_t key, upb_value* val, uint32_t hash,
|
11733
|
+
eqlfunc_t* eql) {
|
11274
11734
|
upb_tabent* chain = getentry_mutable(t, hash);
|
11275
11735
|
if (upb_tabent_isempty(chain)) return false;
|
11276
11736
|
if (eql(chain->key, key)) {
|
11277
11737
|
/* Element to remove is at the head of its chain. */
|
11278
11738
|
t->count--;
|
11279
|
-
if (val)
|
11280
|
-
if (removed) *removed = chain->key;
|
11739
|
+
if (val) *val = chain->val;
|
11281
11740
|
if (chain->next) {
|
11282
11741
|
upb_tabent* move = (upb_tabent*)chain->next;
|
11283
11742
|
*chain = *move;
|
11284
|
-
move->key =
|
11743
|
+
move->key = upb_key_empty();
|
11285
11744
|
} else {
|
11286
|
-
chain->key =
|
11745
|
+
chain->key = upb_key_empty();
|
11287
11746
|
}
|
11288
11747
|
return true;
|
11289
11748
|
} else {
|
@@ -11296,9 +11755,8 @@ static bool rm(upb_table* t, lookupkey_t key, upb_value* val,
|
|
11296
11755
|
/* Found element to remove. */
|
11297
11756
|
upb_tabent* rm = (upb_tabent*)chain->next;
|
11298
11757
|
t->count--;
|
11299
|
-
if (val)
|
11300
|
-
|
11301
|
-
rm->key = 0; /* Make the slot empty. */
|
11758
|
+
if (val) *val = chain->next->val;
|
11759
|
+
rm->key = upb_key_empty();
|
11302
11760
|
chain->next = rm->next;
|
11303
11761
|
return true;
|
11304
11762
|
} else {
|
@@ -11320,17 +11778,21 @@ static size_t begin(const upb_table* t) { return next(t, -1); }
|
|
11320
11778
|
|
11321
11779
|
/* upb_strtable ***************************************************************/
|
11322
11780
|
|
11323
|
-
|
11324
|
-
*/
|
11781
|
+
// A simple "subclass" of upb_table that only adds a hash function for strings.
|
11325
11782
|
|
11326
|
-
static
|
11327
|
-
|
11328
|
-
|
11329
|
-
|
11330
|
-
|
11331
|
-
|
11332
|
-
|
11333
|
-
|
11783
|
+
static upb_SizePrefixString* upb_SizePrefixString_Copy(upb_StringView s,
|
11784
|
+
upb_Arena* a) {
|
11785
|
+
// A 2GB string will fail at serialization time, but we accept up to 4GB in
|
11786
|
+
// memory here.
|
11787
|
+
if (s.size > UINT32_MAX) return NULL;
|
11788
|
+
upb_SizePrefixString* str =
|
11789
|
+
upb_Arena_Malloc(a, sizeof(uint32_t) + s.size + 1);
|
11790
|
+
if (str == NULL) return NULL;
|
11791
|
+
str->size = s.size;
|
11792
|
+
char* data = (char*)str->data;
|
11793
|
+
if (s.size) memcpy(data, s.data, s.size);
|
11794
|
+
data[s.size] = '\0';
|
11795
|
+
return str;
|
11334
11796
|
}
|
11335
11797
|
|
11336
11798
|
/* Adapted from ABSL's wyhash. */
|
@@ -11472,32 +11934,40 @@ uint32_t _upb_Hash(const void* p, size_t n, uint64_t seed) {
|
|
11472
11934
|
return Wyhash(p, n, seed, kWyhashSalt);
|
11473
11935
|
}
|
11474
11936
|
|
11475
|
-
|
11476
|
-
|
11477
|
-
|
11937
|
+
static const void* const _upb_seed;
|
11938
|
+
|
11939
|
+
// Returns a random seed for upb's hash function. This does not provide
|
11940
|
+
// high-quality randomness, but it should be enough to prevent unit tests from
|
11941
|
+
// relying on a deterministic map ordering. By returning the address of a
|
11942
|
+
// variable, we are able to get some randomness for free provided that ASLR is
|
11943
|
+
// enabled.
|
11944
|
+
static uint64_t _upb_Seed(void) { return (uint64_t)&_upb_seed; }
|
11478
11945
|
|
11479
11946
|
static uint32_t _upb_Hash_NoSeed(const char* p, size_t n) {
|
11480
11947
|
return _upb_Hash(p, n, _upb_Seed());
|
11481
11948
|
}
|
11482
11949
|
|
11483
|
-
static uint32_t strhash(
|
11484
|
-
|
11485
|
-
|
11486
|
-
|
11950
|
+
static uint32_t strhash(upb_key key) {
|
11951
|
+
return _upb_Hash_NoSeed(key.str->data, key.str->size);
|
11952
|
+
}
|
11953
|
+
|
11954
|
+
static bool streql(upb_key k1, lookupkey_t k2) {
|
11955
|
+
const upb_SizePrefixString* k1s = k1.str;
|
11956
|
+
const upb_StringView k2s = k2.str;
|
11957
|
+
return k1s->size == k2s.size &&
|
11958
|
+
(k1s->size == 0 || memcmp(k1s->data, k2s.data, k1s->size) == 0);
|
11487
11959
|
}
|
11488
11960
|
|
11489
|
-
|
11490
|
-
|
11491
|
-
|
11492
|
-
|
11961
|
+
/** Calculates the number of entries required to hold an expected number of
|
11962
|
+
* values, within the table's load factor. */
|
11963
|
+
static size_t _upb_entries_needed_for(size_t expected_size) {
|
11964
|
+
size_t need_entries = expected_size + 1 + expected_size / 7;
|
11965
|
+
UPB_ASSERT(need_entries - (need_entries >> 3) >= expected_size);
|
11966
|
+
return need_entries;
|
11493
11967
|
}
|
11494
11968
|
|
11495
11969
|
bool upb_strtable_init(upb_strtable* t, size_t expected_size, upb_Arena* a) {
|
11496
|
-
|
11497
|
-
// denominator.
|
11498
|
-
size_t need_entries = (expected_size + 1) * 1204 / 1024;
|
11499
|
-
UPB_ASSERT(need_entries >= expected_size * 0.85);
|
11500
|
-
int size_lg2 = upb_Log2Ceiling(need_entries);
|
11970
|
+
int size_lg2 = upb_Log2Ceiling(_upb_entries_needed_for(expected_size));
|
11501
11971
|
return init(&t->t, size_lg2, a);
|
11502
11972
|
}
|
11503
11973
|
|
@@ -11512,10 +11982,23 @@ bool upb_strtable_resize(upb_strtable* t, size_t size_lg2, upb_Arena* a) {
|
|
11512
11982
|
if (!init(&new_table.t, size_lg2, a)) return false;
|
11513
11983
|
|
11514
11984
|
intptr_t iter = UPB_STRTABLE_BEGIN;
|
11515
|
-
upb_StringView
|
11985
|
+
upb_StringView sv;
|
11516
11986
|
upb_value val;
|
11517
|
-
while (upb_strtable_next2(t, &
|
11518
|
-
|
11987
|
+
while (upb_strtable_next2(t, &sv, &val, &iter)) {
|
11988
|
+
// Unlike normal insert, does not copy string data or possibly reallocate
|
11989
|
+
// the table
|
11990
|
+
// The data pointer used in the table is guaranteed to point at a
|
11991
|
+
// upb_SizePrefixString, we just need to back up by the size of the uint32_t
|
11992
|
+
// length prefix.
|
11993
|
+
const upb_SizePrefixString* keystr =
|
11994
|
+
(const upb_SizePrefixString*)(sv.data - sizeof(uint32_t));
|
11995
|
+
UPB_ASSERT(keystr->data == sv.data);
|
11996
|
+
UPB_ASSERT(keystr->size == sv.size);
|
11997
|
+
|
11998
|
+
lookupkey_t lookupkey = {.str = sv};
|
11999
|
+
upb_key tabkey = {.str = keystr};
|
12000
|
+
uint32_t hash = _upb_Hash_NoSeed(sv.data, sv.size);
|
12001
|
+
insert(&new_table.t, lookupkey, tabkey, val, hash, &strhash, &streql);
|
11519
12002
|
}
|
11520
12003
|
*t = new_table;
|
11521
12004
|
return true;
|
@@ -11523,23 +12006,21 @@ bool upb_strtable_resize(upb_strtable* t, size_t size_lg2, upb_Arena* a) {
|
|
11523
12006
|
|
11524
12007
|
bool upb_strtable_insert(upb_strtable* t, const char* k, size_t len,
|
11525
12008
|
upb_value v, upb_Arena* a) {
|
11526
|
-
lookupkey_t key;
|
11527
|
-
upb_tabkey tabkey;
|
11528
|
-
uint32_t hash;
|
11529
|
-
|
11530
12009
|
if (isfull(&t->t)) {
|
11531
12010
|
/* Need to resize. New table of double the size, add old elements to it. */
|
11532
|
-
if (!upb_strtable_resize(t, t->t
|
12011
|
+
if (!upb_strtable_resize(t, _upb_log2_table_size(&t->t) + 1, a)) {
|
11533
12012
|
return false;
|
11534
12013
|
}
|
11535
12014
|
}
|
11536
12015
|
|
11537
|
-
|
11538
|
-
|
11539
|
-
if (
|
12016
|
+
upb_StringView sv = upb_StringView_FromDataAndSize(k, len);
|
12017
|
+
upb_SizePrefixString* size_prefix_string = upb_SizePrefixString_Copy(sv, a);
|
12018
|
+
if (!size_prefix_string) return false;
|
11540
12019
|
|
11541
|
-
|
11542
|
-
|
12020
|
+
lookupkey_t lookupkey = {.str = sv};
|
12021
|
+
upb_key key = {.str = size_prefix_string};
|
12022
|
+
uint32_t hash = _upb_Hash_NoSeed(k, len);
|
12023
|
+
insert(&t->t, lookupkey, key, v, hash, &strhash, &streql);
|
11543
12024
|
return true;
|
11544
12025
|
}
|
11545
12026
|
|
@@ -11552,8 +12033,7 @@ bool upb_strtable_lookup2(const upb_strtable* t, const char* key, size_t len,
|
|
11552
12033
|
bool upb_strtable_remove2(upb_strtable* t, const char* key, size_t len,
|
11553
12034
|
upb_value* val) {
|
11554
12035
|
uint32_t hash = _upb_Hash_NoSeed(key, len);
|
11555
|
-
|
11556
|
-
return rm(&t->t, strkey2(key, len), val, &tabkey, hash, &streql);
|
12036
|
+
return rm(&t->t, strkey2(key, len), val, hash, &streql);
|
11557
12037
|
}
|
11558
12038
|
|
11559
12039
|
/* Iteration */
|
@@ -11574,17 +12054,13 @@ bool upb_strtable_done(const upb_strtable_iter* i) {
|
|
11574
12054
|
}
|
11575
12055
|
|
11576
12056
|
upb_StringView upb_strtable_iter_key(const upb_strtable_iter* i) {
|
11577
|
-
upb_StringView key;
|
11578
|
-
uint32_t len;
|
11579
12057
|
UPB_ASSERT(!upb_strtable_done(i));
|
11580
|
-
|
11581
|
-
key.size = len;
|
11582
|
-
return key;
|
12058
|
+
return upb_key_strview(str_tabent(i)->key);
|
11583
12059
|
}
|
11584
12060
|
|
11585
12061
|
upb_value upb_strtable_iter_value(const upb_strtable_iter* i) {
|
11586
12062
|
UPB_ASSERT(!upb_strtable_done(i));
|
11587
|
-
return
|
12063
|
+
return str_tabent(i)->val;
|
11588
12064
|
}
|
11589
12065
|
|
11590
12066
|
void upb_strtable_iter_setdone(upb_strtable_iter* i) {
|
@@ -11598,22 +12074,79 @@ bool upb_strtable_iter_isequal(const upb_strtable_iter* i1,
|
|
11598
12074
|
return i1->t == i2->t && i1->index == i2->index;
|
11599
12075
|
}
|
11600
12076
|
|
12077
|
+
bool upb_strtable_next2(const upb_strtable* t, upb_StringView* key,
|
12078
|
+
upb_value* val, intptr_t* iter) {
|
12079
|
+
size_t tab_idx = next(&t->t, *iter);
|
12080
|
+
if (tab_idx < upb_table_size(&t->t)) {
|
12081
|
+
upb_tabent* ent = &t->t.entries[tab_idx];
|
12082
|
+
*key = upb_key_strview(ent->key);
|
12083
|
+
*val = ent->val;
|
12084
|
+
*iter = tab_idx;
|
12085
|
+
return true;
|
12086
|
+
}
|
12087
|
+
|
12088
|
+
return false;
|
12089
|
+
}
|
12090
|
+
|
12091
|
+
void upb_strtable_removeiter(upb_strtable* t, intptr_t* iter) {
|
12092
|
+
intptr_t i = *iter;
|
12093
|
+
upb_tabent* ent = &t->t.entries[i];
|
12094
|
+
upb_tabent* prev = NULL;
|
12095
|
+
|
12096
|
+
// Linear search, not great.
|
12097
|
+
upb_tabent* end = &t->t.entries[upb_table_size(&t->t)];
|
12098
|
+
for (upb_tabent* e = t->t.entries; e != end; e++) {
|
12099
|
+
if (e->next == ent) {
|
12100
|
+
prev = e;
|
12101
|
+
break;
|
12102
|
+
}
|
12103
|
+
}
|
12104
|
+
|
12105
|
+
if (prev) {
|
12106
|
+
prev->next = ent->next;
|
12107
|
+
}
|
12108
|
+
|
12109
|
+
t->t.count--;
|
12110
|
+
ent->key = upb_key_empty();
|
12111
|
+
ent->next = NULL;
|
12112
|
+
}
|
12113
|
+
|
12114
|
+
void upb_strtable_setentryvalue(upb_strtable* t, intptr_t iter, upb_value v) {
|
12115
|
+
t->t.entries[iter].val = v;
|
12116
|
+
}
|
12117
|
+
|
11601
12118
|
/* upb_inttable ***************************************************************/
|
11602
12119
|
|
11603
12120
|
/* For inttables we use a hybrid structure where small keys are kept in an
|
11604
12121
|
* array and large keys are put in the hash table. */
|
11605
12122
|
|
11606
|
-
|
12123
|
+
// The sentinel value used in the dense array part. Note that callers must
|
12124
|
+
// ensure that inttable is never used with a value of this sentinel type
|
12125
|
+
// (pointers and u32 values will never be; i32 needs to be handled carefully
|
12126
|
+
// to avoid sign-extending into this value).
|
12127
|
+
static const upb_value kInttableSentinel = {.val = UINT64_MAX};
|
12128
|
+
static uint32_t presence_mask_arr_size(uint32_t array_size) {
|
12129
|
+
return (array_size + 7) / 8; // sizeof(uint8_t) is always 1.
|
12130
|
+
}
|
12131
|
+
|
12132
|
+
static uint32_t inthash(upb_key key) { return upb_inthash(key.num); }
|
12133
|
+
|
12134
|
+
static bool inteql(upb_key k1, lookupkey_t k2) { return k1.num == k2.num; }
|
11607
12135
|
|
11608
|
-
static
|
12136
|
+
static upb_value* mutable_array(upb_inttable* t) {
|
12137
|
+
return (upb_value*)t->array;
|
12138
|
+
}
|
11609
12139
|
|
11610
|
-
static
|
11611
|
-
|
12140
|
+
static const upb_value* inttable_array_get(const upb_inttable* t,
|
12141
|
+
uintptr_t key) {
|
12142
|
+
UPB_ASSERT(key < t->array_size);
|
12143
|
+
const upb_value* val = &t->array[key];
|
12144
|
+
return upb_inttable_arrhas(t, key) ? val : NULL;
|
11612
12145
|
}
|
11613
12146
|
|
11614
|
-
static
|
12147
|
+
static upb_value* inttable_val(upb_inttable* t, uintptr_t key) {
|
11615
12148
|
if (key < t->array_size) {
|
11616
|
-
return
|
12149
|
+
return (upb_value*)inttable_array_get(t, key);
|
11617
12150
|
} else {
|
11618
12151
|
upb_tabent* e =
|
11619
12152
|
findentry_mutable(&t->t, intkey(key), upb_inthash(key), &inteql);
|
@@ -11621,8 +12154,8 @@ static upb_tabval* inttable_val(upb_inttable* t, uintptr_t key) {
|
|
11621
12154
|
}
|
11622
12155
|
}
|
11623
12156
|
|
11624
|
-
static const
|
11625
|
-
|
12157
|
+
static const upb_value* inttable_val_const(const upb_inttable* t,
|
12158
|
+
uintptr_t key) {
|
11626
12159
|
return inttable_val((upb_inttable*)t, key);
|
11627
12160
|
}
|
11628
12161
|
|
@@ -11647,108 +12180,117 @@ static void check(upb_inttable* t) {
|
|
11647
12180
|
#endif
|
11648
12181
|
}
|
11649
12182
|
|
11650
|
-
bool upb_inttable_sizedinit(upb_inttable* t,
|
12183
|
+
bool upb_inttable_sizedinit(upb_inttable* t, uint32_t asize, int hsize_lg2,
|
11651
12184
|
upb_Arena* a) {
|
11652
|
-
size_t array_bytes;
|
11653
|
-
|
11654
12185
|
if (!init(&t->t, hsize_lg2, a)) return false;
|
11655
12186
|
/* Always make the array part at least 1 long, so that we know key 0
|
11656
12187
|
* won't be in the hash part, which simplifies things. */
|
11657
12188
|
t->array_size = UPB_MAX(1, asize);
|
11658
12189
|
t->array_count = 0;
|
11659
|
-
|
11660
|
-
|
11661
|
-
|
12190
|
+
#if UINT32_MAX >= SIZE_MAX
|
12191
|
+
if (UPB_UNLIKELY(SIZE_MAX / sizeof(upb_value) < t->array_size)) {
|
12192
|
+
return false;
|
12193
|
+
}
|
12194
|
+
#endif
|
12195
|
+
|
12196
|
+
// Allocate the array part and the presence mask array in one allocation.
|
12197
|
+
size_t array_bytes = t->array_size * sizeof(upb_value);
|
12198
|
+
uint32_t presence_bytes = presence_mask_arr_size(t->array_size);
|
12199
|
+
uintptr_t total_bytes = array_bytes + presence_bytes;
|
12200
|
+
if (UPB_UNLIKELY(total_bytes > SIZE_MAX)) {
|
12201
|
+
return false;
|
12202
|
+
}
|
12203
|
+
void* alloc = upb_Arena_Malloc(a, total_bytes);
|
12204
|
+
if (!alloc) {
|
11662
12205
|
return false;
|
11663
12206
|
}
|
12207
|
+
t->array = alloc;
|
11664
12208
|
memset(mutable_array(t), 0xff, array_bytes);
|
12209
|
+
t->presence_mask = (uint8_t*)alloc + array_bytes;
|
12210
|
+
memset((uint8_t*)t->presence_mask, 0, presence_bytes);
|
12211
|
+
|
11665
12212
|
check(t);
|
11666
12213
|
return true;
|
11667
12214
|
}
|
11668
12215
|
|
11669
12216
|
bool upb_inttable_init(upb_inttable* t, upb_Arena* a) {
|
11670
|
-
|
12217
|
+
// The init size of the table part to match that of strtable.
|
12218
|
+
return upb_inttable_sizedinit(t, 0, 3, a);
|
11671
12219
|
}
|
11672
12220
|
|
11673
12221
|
bool upb_inttable_insert(upb_inttable* t, uintptr_t key, upb_value val,
|
11674
12222
|
upb_Arena* a) {
|
11675
|
-
upb_tabval tabval;
|
11676
|
-
tabval.val = val.val;
|
11677
|
-
UPB_ASSERT(
|
11678
|
-
upb_arrhas(tabval)); /* This will reject (uint64_t)-1. Fix this. */
|
11679
|
-
|
11680
12223
|
if (key < t->array_size) {
|
11681
|
-
UPB_ASSERT(!
|
12224
|
+
UPB_ASSERT(!upb_inttable_arrhas(t, key));
|
11682
12225
|
t->array_count++;
|
11683
|
-
mutable_array(t)[key]
|
12226
|
+
mutable_array(t)[key] = val;
|
12227
|
+
((uint8_t*)t->presence_mask)[key / 8] |= (1 << (key % 8));
|
11684
12228
|
} else {
|
11685
12229
|
if (isfull(&t->t)) {
|
11686
12230
|
/* Need to resize the hash part, but we re-use the array part. */
|
11687
12231
|
size_t i;
|
11688
12232
|
upb_table new_table;
|
11689
12233
|
|
11690
|
-
if (!init(&new_table, t->t
|
12234
|
+
if (!init(&new_table, _upb_log2_table_size(&t->t) + 1, a)) {
|
11691
12235
|
return false;
|
11692
12236
|
}
|
11693
12237
|
|
11694
12238
|
for (i = begin(&t->t); i < upb_table_size(&t->t); i = next(&t->t, i)) {
|
11695
12239
|
const upb_tabent* e = &t->t.entries[i];
|
11696
|
-
|
11697
|
-
|
11698
|
-
|
11699
|
-
_upb_value_setval(&v, e->val.val);
|
11700
|
-
hash = upb_inthash(e->key);
|
11701
|
-
insert(&new_table, intkey(e->key), e->key, v, hash, &inthash, &inteql);
|
12240
|
+
insert(&new_table, intkey(e->key.num), e->key, e->val, inthash(e->key),
|
12241
|
+
&inthash, &inteql);
|
11702
12242
|
}
|
11703
12243
|
|
11704
12244
|
UPB_ASSERT(t->t.count == new_table.count);
|
11705
12245
|
|
11706
12246
|
t->t = new_table;
|
11707
12247
|
}
|
11708
|
-
|
12248
|
+
upb_key tabkey = {.num = key};
|
12249
|
+
insert(&t->t, intkey(key), tabkey, val, upb_inthash(key), &inthash,
|
12250
|
+
&inteql);
|
11709
12251
|
}
|
11710
12252
|
check(t);
|
11711
12253
|
return true;
|
11712
12254
|
}
|
11713
12255
|
|
11714
12256
|
bool upb_inttable_lookup(const upb_inttable* t, uintptr_t key, upb_value* v) {
|
11715
|
-
const
|
12257
|
+
const upb_value* table_v = inttable_val_const(t, key);
|
11716
12258
|
if (!table_v) return false;
|
11717
|
-
if (v)
|
12259
|
+
if (v) *v = *table_v;
|
11718
12260
|
return true;
|
11719
12261
|
}
|
11720
12262
|
|
11721
12263
|
bool upb_inttable_replace(upb_inttable* t, uintptr_t key, upb_value val) {
|
11722
|
-
|
12264
|
+
upb_value* table_v = inttable_val(t, key);
|
11723
12265
|
if (!table_v) return false;
|
11724
|
-
table_v
|
12266
|
+
*table_v = val;
|
11725
12267
|
return true;
|
11726
12268
|
}
|
11727
12269
|
|
11728
12270
|
bool upb_inttable_remove(upb_inttable* t, uintptr_t key, upb_value* val) {
|
11729
12271
|
bool success;
|
11730
12272
|
if (key < t->array_size) {
|
11731
|
-
if (
|
11732
|
-
upb_tabval empty = UPB_TABVALUE_EMPTY_INIT;
|
12273
|
+
if (upb_inttable_arrhas(t, key)) {
|
11733
12274
|
t->array_count--;
|
11734
12275
|
if (val) {
|
11735
|
-
|
12276
|
+
*val = t->array[key];
|
11736
12277
|
}
|
11737
|
-
mutable_array(t)[key] =
|
12278
|
+
mutable_array(t)[key] = kInttableSentinel;
|
12279
|
+
((uint8_t*)t->presence_mask)[key / 8] &= ~(1 << (key % 8));
|
11738
12280
|
success = true;
|
11739
12281
|
} else {
|
11740
12282
|
success = false;
|
11741
12283
|
}
|
11742
12284
|
} else {
|
11743
|
-
success = rm(&t->t, intkey(key), val,
|
12285
|
+
success = rm(&t->t, intkey(key), val, upb_inthash(key), &inteql);
|
11744
12286
|
}
|
11745
12287
|
check(t);
|
11746
12288
|
return success;
|
11747
12289
|
}
|
11748
12290
|
|
11749
|
-
|
12291
|
+
bool upb_inttable_compact(upb_inttable* t, upb_Arena* a) {
|
11750
12292
|
/* A power-of-two histogram of the table keys. */
|
11751
|
-
|
12293
|
+
uint32_t counts[UPB_MAXARRSIZE + 1] = {0};
|
11752
12294
|
|
11753
12295
|
/* The max key in each bucket. */
|
11754
12296
|
uintptr_t max[UPB_MAXARRSIZE + 1] = {0};
|
@@ -11766,11 +12308,11 @@ void upb_inttable_compact(upb_inttable* t, upb_Arena* a) {
|
|
11766
12308
|
|
11767
12309
|
/* Find the largest power of two that satisfies the MIN_DENSITY
|
11768
12310
|
* definition (while actually having some keys). */
|
11769
|
-
|
11770
|
-
int size_lg2;
|
11771
|
-
upb_inttable new_t;
|
12311
|
+
uint32_t arr_count = upb_inttable_count(t);
|
11772
12312
|
|
11773
|
-
|
12313
|
+
// Scan all buckets except capped bucket
|
12314
|
+
int size_lg2 = ARRAY_SIZE(counts) - 1;
|
12315
|
+
for (; size_lg2 > 0; size_lg2--) {
|
11774
12316
|
if (counts[size_lg2] == 0) {
|
11775
12317
|
/* We can halve again without losing any entries. */
|
11776
12318
|
continue;
|
@@ -11783,14 +12325,17 @@ void upb_inttable_compact(upb_inttable* t, upb_Arena* a) {
|
|
11783
12325
|
|
11784
12326
|
UPB_ASSERT(arr_count <= upb_inttable_count(t));
|
11785
12327
|
|
12328
|
+
upb_inttable new_t;
|
11786
12329
|
{
|
11787
12330
|
/* Insert all elements into new, perfectly-sized table. */
|
11788
|
-
|
11789
|
-
|
11790
|
-
size_t hash_size = hash_count ? (hash_count
|
12331
|
+
uintptr_t arr_size = max[size_lg2] + 1; /* +1 so arr[max] will fit. */
|
12332
|
+
uint32_t hash_count = upb_inttable_count(t) - arr_count;
|
12333
|
+
size_t hash_size = hash_count ? _upb_entries_needed_for(hash_count) : 0;
|
11791
12334
|
int hashsize_lg2 = log2ceil(hash_size);
|
11792
12335
|
|
11793
|
-
upb_inttable_sizedinit(&new_t, arr_size, hashsize_lg2, a)
|
12336
|
+
if (!upb_inttable_sizedinit(&new_t, arr_size, hashsize_lg2, a)) {
|
12337
|
+
return false;
|
12338
|
+
}
|
11794
12339
|
|
11795
12340
|
{
|
11796
12341
|
intptr_t iter = UPB_INTTABLE_BEGIN;
|
@@ -11802,9 +12347,24 @@ void upb_inttable_compact(upb_inttable* t, upb_Arena* a) {
|
|
11802
12347
|
}
|
11803
12348
|
|
11804
12349
|
UPB_ASSERT(new_t.array_size == arr_size);
|
11805
|
-
UPB_ASSERT(new_t.t.size_lg2 == hashsize_lg2);
|
11806
12350
|
}
|
11807
12351
|
*t = new_t;
|
12352
|
+
return true;
|
12353
|
+
}
|
12354
|
+
|
12355
|
+
void upb_inttable_clear(upb_inttable* t) {
|
12356
|
+
// Clear the array part.
|
12357
|
+
size_t array_bytes = t->array_size * sizeof(upb_value);
|
12358
|
+
t->array_count = 0;
|
12359
|
+
// Clear the array by setting all bits to 1, as UINT64_MAX is the sentinel
|
12360
|
+
// value for an empty array.
|
12361
|
+
memset(mutable_array(t), 0xff, array_bytes);
|
12362
|
+
// Clear the presence mask array.
|
12363
|
+
memset((uint8_t*)t->presence_mask, 0, presence_mask_arr_size(t->array_size));
|
12364
|
+
// Clear the table part.
|
12365
|
+
size_t bytes = upb_table_size(&t->t) * sizeof(upb_tabent);
|
12366
|
+
t->t.count = 0;
|
12367
|
+
memset((char*)t->t.entries, 0, bytes);
|
11808
12368
|
}
|
11809
12369
|
|
11810
12370
|
// Iteration.
|
@@ -11814,10 +12374,10 @@ bool upb_inttable_next(const upb_inttable* t, uintptr_t* key, upb_value* val,
|
|
11814
12374
|
intptr_t i = *iter;
|
11815
12375
|
if ((size_t)(i + 1) <= t->array_size) {
|
11816
12376
|
while ((size_t)++i < t->array_size) {
|
11817
|
-
|
11818
|
-
if (
|
12377
|
+
const upb_value* ent = inttable_array_get(t, i);
|
12378
|
+
if (ent) {
|
11819
12379
|
*key = i;
|
11820
|
-
*val =
|
12380
|
+
*val = *ent;
|
11821
12381
|
*iter = i;
|
11822
12382
|
return true;
|
11823
12383
|
}
|
@@ -11828,13 +12388,18 @@ bool upb_inttable_next(const upb_inttable* t, uintptr_t* key, upb_value* val,
|
|
11828
12388
|
size_t tab_idx = next(&t->t, i - t->array_size);
|
11829
12389
|
if (tab_idx < upb_table_size(&t->t)) {
|
11830
12390
|
upb_tabent* ent = &t->t.entries[tab_idx];
|
11831
|
-
*key = ent->key;
|
11832
|
-
*val =
|
12391
|
+
*key = ent->key.num;
|
12392
|
+
*val = ent->val;
|
11833
12393
|
*iter = tab_idx + t->array_size;
|
11834
12394
|
return true;
|
12395
|
+
} else {
|
12396
|
+
// We should set the iterator any way. When we are done, the iterator value
|
12397
|
+
// is invalidated. `upb_inttable_done` will check on the iterator value to
|
12398
|
+
// determine if the iteration is done.
|
12399
|
+
*iter = INTPTR_MAX - 1; // To disambiguate from UPB_INTTABLE_BEGIN, to
|
12400
|
+
// match the behavior of `upb_strtable_iter`.
|
12401
|
+
return false;
|
11835
12402
|
}
|
11836
|
-
|
11837
|
-
return false;
|
11838
12403
|
}
|
11839
12404
|
|
11840
12405
|
void upb_inttable_removeiter(upb_inttable* t, intptr_t* iter) {
|
@@ -11860,53 +12425,44 @@ void upb_inttable_removeiter(upb_inttable* t, intptr_t* iter) {
|
|
11860
12425
|
}
|
11861
12426
|
|
11862
12427
|
t->t.count--;
|
11863
|
-
ent->key =
|
12428
|
+
ent->key = upb_key_empty();
|
11864
12429
|
ent->next = NULL;
|
11865
12430
|
}
|
11866
12431
|
}
|
11867
12432
|
|
11868
|
-
|
11869
|
-
|
11870
|
-
|
11871
|
-
|
11872
|
-
upb_tabent* ent = &t->t.entries[
|
11873
|
-
|
11874
|
-
key->data = upb_tabstr(ent->key, &len);
|
11875
|
-
key->size = len;
|
11876
|
-
*val = _upb_value_val(ent->val.val);
|
11877
|
-
*iter = tab_idx;
|
11878
|
-
return true;
|
12433
|
+
void upb_inttable_setentryvalue(upb_inttable* t, intptr_t iter, upb_value v) {
|
12434
|
+
if ((size_t)iter < t->array_size) {
|
12435
|
+
mutable_array(t)[iter] = v;
|
12436
|
+
} else {
|
12437
|
+
upb_tabent* ent = &t->t.entries[iter - t->array_size];
|
12438
|
+
ent->val = v;
|
11879
12439
|
}
|
11880
|
-
|
11881
|
-
return false;
|
11882
12440
|
}
|
11883
12441
|
|
11884
|
-
|
11885
|
-
|
11886
|
-
|
11887
|
-
|
11888
|
-
|
11889
|
-
|
11890
|
-
|
11891
|
-
for (upb_tabent* e = t->t.entries; e != end; e++) {
|
11892
|
-
if (e->next == ent) {
|
11893
|
-
prev = e;
|
11894
|
-
break;
|
11895
|
-
}
|
11896
|
-
}
|
11897
|
-
|
11898
|
-
if (prev) {
|
11899
|
-
prev->next = ent->next;
|
12442
|
+
bool upb_inttable_done(const upb_inttable* t, intptr_t iter) {
|
12443
|
+
if ((uintptr_t)iter >= t->array_size + upb_table_size(&t->t)) {
|
12444
|
+
return true;
|
12445
|
+
} else if ((size_t)iter < t->array_size) {
|
12446
|
+
return !upb_inttable_arrhas(t, iter);
|
12447
|
+
} else {
|
12448
|
+
return upb_tabent_isempty(&t->t.entries[iter - t->array_size]);
|
11900
12449
|
}
|
12450
|
+
}
|
11901
12451
|
|
11902
|
-
|
11903
|
-
|
11904
|
-
|
12452
|
+
uintptr_t upb_inttable_iter_key(const upb_inttable* t, intptr_t iter) {
|
12453
|
+
UPB_ASSERT(!upb_inttable_done(t, iter));
|
12454
|
+
return (size_t)iter < t->array_size
|
12455
|
+
? iter
|
12456
|
+
: t->t.entries[iter - t->array_size].key.num;
|
11905
12457
|
}
|
11906
12458
|
|
11907
|
-
|
11908
|
-
|
11909
|
-
|
12459
|
+
upb_value upb_inttable_iter_value(const upb_inttable* t, intptr_t iter) {
|
12460
|
+
UPB_ASSERT(!upb_inttable_done(t, iter));
|
12461
|
+
if ((size_t)iter < t->array_size) {
|
12462
|
+
return t->array[iter];
|
12463
|
+
} else {
|
12464
|
+
return t->t.entries[iter - t->array_size].val;
|
12465
|
+
}
|
11910
12466
|
}
|
11911
12467
|
|
11912
12468
|
|
@@ -12137,17 +12693,6 @@ upb_Extension* UPB_PRIVATE(_upb_Message_GetOrCreateExtension)(
|
|
12137
12693
|
return ext;
|
12138
12694
|
}
|
12139
12695
|
|
12140
|
-
void upb_Message_ReplaceUnknownWithExtension(struct upb_Message* msg,
|
12141
|
-
uintptr_t iter,
|
12142
|
-
const upb_Extension* ext) {
|
12143
|
-
UPB_ASSERT(iter != 0);
|
12144
|
-
upb_Message_Internal* in = UPB_PRIVATE(_upb_Message_GetInternal)(msg);
|
12145
|
-
UPB_ASSERT(in);
|
12146
|
-
size_t index = iter - 1;
|
12147
|
-
upb_TaggedAuxPtr tagged_ptr = in->aux_data[index];
|
12148
|
-
UPB_ASSERT(upb_TaggedAuxPtr_IsUnknown(tagged_ptr));
|
12149
|
-
in->aux_data[index] = upb_TaggedAuxPtr_MakeExtension(ext);
|
12150
|
-
}
|
12151
12696
|
|
12152
12697
|
#include <math.h>
|
12153
12698
|
#include <stddef.h>
|
@@ -13203,7 +13748,7 @@ bool _upb_DescState_Grow(upb_DescState* d, upb_Arena* a) {
|
|
13203
13748
|
// Must be last.
|
13204
13749
|
|
13205
13750
|
struct upb_EnumDef {
|
13206
|
-
const UPB_DESC(EnumOptions*) opts;
|
13751
|
+
UPB_ALIGN_AS(8) const UPB_DESC(EnumOptions*) opts;
|
13207
13752
|
const UPB_DESC(FeatureSet*) resolved_features;
|
13208
13753
|
const upb_MiniTableEnum* layout; // Only for proto2.
|
13209
13754
|
const upb_FileDef* file;
|
@@ -13394,7 +13939,7 @@ static upb_MiniTableEnum* create_enumlayout(upb_DefBuilder* ctx,
|
|
13394
13939
|
|
13395
13940
|
static upb_StringView* _upb_EnumReservedNames_New(
|
13396
13941
|
upb_DefBuilder* ctx, int n, const upb_StringView* protos) {
|
13397
|
-
upb_StringView* sv =
|
13942
|
+
upb_StringView* sv = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_StringView, n);
|
13398
13943
|
for (int i = 0; i < n; i++) {
|
13399
13944
|
sv[i].data =
|
13400
13945
|
upb_strdup2(protos[i].data, protos[i].size, _upb_DefBuilder_Arena(ctx));
|
@@ -13454,7 +13999,7 @@ static void create_enumdef(upb_DefBuilder* ctx, const char* prefix,
|
|
13454
13999
|
e->res_name_count = n_res_name;
|
13455
14000
|
e->res_names = _upb_EnumReservedNames_New(ctx, n_res_name, res_names);
|
13456
14001
|
|
13457
|
-
upb_inttable_compact(&e->iton, ctx->arena);
|
14002
|
+
if (!upb_inttable_compact(&e->iton, ctx->arena)) _upb_DefBuilder_OomErr(ctx);
|
13458
14003
|
|
13459
14004
|
if (upb_EnumDef_IsClosed(e)) {
|
13460
14005
|
if (ctx->layout) {
|
@@ -13479,7 +14024,7 @@ upb_EnumDef* _upb_EnumDefs_New(upb_DefBuilder* ctx, int n,
|
|
13479
14024
|
const char* name = containing_type ? upb_MessageDef_FullName(containing_type)
|
13480
14025
|
: _upb_FileDef_RawPackage(ctx->file);
|
13481
14026
|
|
13482
|
-
upb_EnumDef* e =
|
14027
|
+
upb_EnumDef* e = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_EnumDef, n);
|
13483
14028
|
for (int i = 0; i < n; i++) {
|
13484
14029
|
create_enumdef(ctx, name, protos[i], parent_features, &e[i]);
|
13485
14030
|
e[i].containing_type = containing_type;
|
@@ -13513,7 +14058,7 @@ upb_EnumReservedRange* _upb_EnumReservedRanges_New(
|
|
13513
14058
|
const UPB_DESC(EnumDescriptorProto_EnumReservedRange) * const* protos,
|
13514
14059
|
const upb_EnumDef* e) {
|
13515
14060
|
upb_EnumReservedRange* r =
|
13516
|
-
|
14061
|
+
UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_EnumReservedRange, n);
|
13517
14062
|
|
13518
14063
|
for (int i = 0; i < n; i++) {
|
13519
14064
|
const int32_t start =
|
@@ -13540,20 +14085,19 @@ upb_EnumReservedRange* _upb_EnumReservedRanges_New(
|
|
13540
14085
|
}
|
13541
14086
|
|
13542
14087
|
|
14088
|
+
#include <stddef.h>
|
13543
14089
|
#include <stdint.h>
|
14090
|
+
#include <stdlib.h>
|
13544
14091
|
|
13545
14092
|
|
13546
14093
|
// Must be last.
|
13547
14094
|
|
13548
14095
|
struct upb_EnumValueDef {
|
13549
|
-
const UPB_DESC(EnumValueOptions*) opts;
|
14096
|
+
UPB_ALIGN_AS(8) const UPB_DESC(EnumValueOptions*) opts;
|
13550
14097
|
const UPB_DESC(FeatureSet*) resolved_features;
|
13551
14098
|
const upb_EnumDef* parent;
|
13552
14099
|
const char* full_name;
|
13553
14100
|
int32_t number;
|
13554
|
-
#if UINTPTR_MAX == 0xffffffff
|
13555
|
-
uint32_t padding; // Increase size to a multiple of 8.
|
13556
|
-
#endif
|
13557
14101
|
};
|
13558
14102
|
|
13559
14103
|
upb_EnumValueDef* _upb_EnumValueDef_At(const upb_EnumValueDef* v, int i) {
|
@@ -13567,7 +14111,8 @@ static int _upb_EnumValueDef_Compare(const void* p1, const void* p2) {
|
|
13567
14111
|
}
|
13568
14112
|
|
13569
14113
|
const upb_EnumValueDef** _upb_EnumValueDefs_Sorted(const upb_EnumValueDef* v,
|
13570
|
-
|
14114
|
+
size_t n, upb_Arena* a) {
|
14115
|
+
if (SIZE_MAX / sizeof(void*) < n) return NULL;
|
13571
14116
|
// TODO: Try to replace this arena alloc with a persistent scratch buffer.
|
13572
14117
|
upb_EnumValueDef** out =
|
13573
14118
|
(upb_EnumValueDef**)upb_Arena_Malloc(a, n * sizeof(void*));
|
@@ -13656,8 +14201,7 @@ upb_EnumValueDef* _upb_EnumValueDefs_New(
|
|
13656
14201
|
bool* is_sorted) {
|
13657
14202
|
_upb_DefType_CheckPadding(sizeof(upb_EnumValueDef));
|
13658
14203
|
|
13659
|
-
upb_EnumValueDef* v =
|
13660
|
-
_upb_DefBuilder_Alloc(ctx, sizeof(upb_EnumValueDef) * n);
|
14204
|
+
upb_EnumValueDef* v = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_EnumValueDef, n);
|
13661
14205
|
|
13662
14206
|
*is_sorted = true;
|
13663
14207
|
uint32_t previous = 0;
|
@@ -13710,8 +14254,7 @@ upb_ExtensionRange* _upb_ExtensionRanges_New(
|
|
13710
14254
|
upb_DefBuilder* ctx, int n,
|
13711
14255
|
const UPB_DESC(DescriptorProto_ExtensionRange*) const* protos,
|
13712
14256
|
const UPB_DESC(FeatureSet*) parent_features, const upb_MessageDef* m) {
|
13713
|
-
upb_ExtensionRange* r =
|
13714
|
-
_upb_DefBuilder_Alloc(ctx, sizeof(upb_ExtensionRange) * n);
|
14257
|
+
upb_ExtensionRange* r = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_ExtensionRange, n);
|
13715
14258
|
|
13716
14259
|
for (int i = 0; i < n; i++) {
|
13717
14260
|
UPB_DEF_SET_OPTIONS(r[i].opts, DescriptorProto_ExtensionRange,
|
@@ -13763,7 +14306,7 @@ typedef struct {
|
|
13763
14306
|
} str_t;
|
13764
14307
|
|
13765
14308
|
struct upb_FieldDef {
|
13766
|
-
const UPB_DESC(FieldOptions*) opts;
|
14309
|
+
UPB_ALIGN_AS(8) const UPB_DESC(FieldOptions*) opts;
|
13767
14310
|
const UPB_DESC(FeatureSet*) resolved_features;
|
13768
14311
|
const upb_FileDef* file;
|
13769
14312
|
const upb_MessageDef* msgdef;
|
@@ -14495,8 +15038,7 @@ upb_FieldDef* _upb_Extensions_New(upb_DefBuilder* ctx, int n,
|
|
14495
15038
|
const UPB_DESC(FeatureSet*) parent_features,
|
14496
15039
|
const char* prefix, upb_MessageDef* m) {
|
14497
15040
|
_upb_DefType_CheckPadding(sizeof(upb_FieldDef));
|
14498
|
-
upb_FieldDef* defs =
|
14499
|
-
(upb_FieldDef*)_upb_DefBuilder_Alloc(ctx, sizeof(upb_FieldDef) * n);
|
15041
|
+
upb_FieldDef* defs = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_FieldDef, n);
|
14500
15042
|
|
14501
15043
|
for (int i = 0; i < n; i++) {
|
14502
15044
|
upb_FieldDef* f = &defs[i];
|
@@ -14515,8 +15057,7 @@ upb_FieldDef* _upb_FieldDefs_New(upb_DefBuilder* ctx, int n,
|
|
14515
15057
|
const char* prefix, upb_MessageDef* m,
|
14516
15058
|
bool* is_sorted) {
|
14517
15059
|
_upb_DefType_CheckPadding(sizeof(upb_FieldDef));
|
14518
|
-
upb_FieldDef* defs =
|
14519
|
-
(upb_FieldDef*)_upb_DefBuilder_Alloc(ctx, sizeof(upb_FieldDef) * n);
|
15060
|
+
upb_FieldDef* defs = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_FieldDef, n);
|
14520
15061
|
|
14521
15062
|
uint32_t previous = 0;
|
14522
15063
|
for (int i = 0; i < n; i++) {
|
@@ -15018,7 +15559,7 @@ void _upb_FileDef_Create(upb_DefBuilder* ctx,
|
|
15018
15559
|
file->ext_layouts = _upb_DefBuilder_Alloc(
|
15019
15560
|
ctx, sizeof(*file->ext_layouts) * file->ext_count);
|
15020
15561
|
upb_MiniTableExtension* ext =
|
15021
|
-
|
15562
|
+
UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_MiniTableExtension, file->ext_count);
|
15022
15563
|
for (int i = 0; i < file->ext_count; i++) {
|
15023
15564
|
file->ext_layouts[i] = &ext[i];
|
15024
15565
|
}
|
@@ -15077,7 +15618,7 @@ void _upb_FileDef_Create(upb_DefBuilder* ctx,
|
|
15077
15618
|
// Verify dependencies.
|
15078
15619
|
strs = UPB_DESC(FileDescriptorProto_dependency)(file_proto, &n);
|
15079
15620
|
file->dep_count = n;
|
15080
|
-
file->deps =
|
15621
|
+
file->deps = UPB_DEFBUILDER_ALLOCARRAY(ctx, const upb_FileDef*, n);
|
15081
15622
|
|
15082
15623
|
for (size_t i = 0; i < n; i++) {
|
15083
15624
|
upb_StringView str = strs[i];
|
@@ -15093,8 +15634,7 @@ void _upb_FileDef_Create(upb_DefBuilder* ctx,
|
|
15093
15634
|
|
15094
15635
|
public_deps = UPB_DESC(FileDescriptorProto_public_dependency)(file_proto, &n);
|
15095
15636
|
file->public_dep_count = n;
|
15096
|
-
file->public_deps =
|
15097
|
-
_upb_DefBuilder_Alloc(ctx, sizeof(*file->public_deps) * n);
|
15637
|
+
file->public_deps = UPB_DEFBUILDER_ALLOCARRAY(ctx, int32_t, n);
|
15098
15638
|
int32_t* mutable_public_deps = (int32_t*)file->public_deps;
|
15099
15639
|
for (size_t i = 0; i < n; i++) {
|
15100
15640
|
if (public_deps[i] >= file->dep_count) {
|
@@ -15106,7 +15646,7 @@ void _upb_FileDef_Create(upb_DefBuilder* ctx,
|
|
15106
15646
|
|
15107
15647
|
weak_deps = UPB_DESC(FileDescriptorProto_weak_dependency)(file_proto, &n);
|
15108
15648
|
file->weak_dep_count = n;
|
15109
|
-
file->weak_deps =
|
15649
|
+
file->weak_deps = UPB_DEFBUILDER_ALLOCARRAY(ctx, const int32_t, n);
|
15110
15650
|
int32_t* mutable_weak_deps = (int32_t*)file->weak_deps;
|
15111
15651
|
for (size_t i = 0; i < n; i++) {
|
15112
15652
|
if (weak_deps[i] >= file->dep_count) {
|
@@ -15837,7 +16377,7 @@ bool upb_Message_DiscardUnknown(upb_Message* msg, const upb_MessageDef* m,
|
|
15837
16377
|
// Must be last.
|
15838
16378
|
|
15839
16379
|
struct upb_MessageDef {
|
15840
|
-
const UPB_DESC(MessageOptions*) opts;
|
16380
|
+
UPB_ALIGN_AS(8) const UPB_DESC(MessageOptions*) opts;
|
15841
16381
|
const UPB_DESC(FeatureSet*) resolved_features;
|
15842
16382
|
const upb_MiniTable* layout;
|
15843
16383
|
const upb_FileDef* file;
|
@@ -15876,9 +16416,6 @@ struct upb_MessageDef {
|
|
15876
16416
|
bool in_message_set;
|
15877
16417
|
bool is_sorted;
|
15878
16418
|
upb_WellKnown well_known_type;
|
15879
|
-
#if UINTPTR_MAX == 0xffffffff
|
15880
|
-
uint32_t padding; // Increase size to a multiple of 8.
|
15881
|
-
#endif
|
15882
16419
|
};
|
15883
16420
|
|
15884
16421
|
static void assign_msg_wellknowntype(upb_MessageDef* m) {
|
@@ -16444,7 +16981,7 @@ bool upb_MessageDef_MiniDescriptorEncode(const upb_MessageDef* m, upb_Arena* a,
|
|
16444
16981
|
|
16445
16982
|
static upb_StringView* _upb_ReservedNames_New(upb_DefBuilder* ctx, int n,
|
16446
16983
|
const upb_StringView* protos) {
|
16447
|
-
upb_StringView* sv =
|
16984
|
+
upb_StringView* sv = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_StringView, n);
|
16448
16985
|
for (int i = 0; i < n; i++) {
|
16449
16986
|
sv[i].data =
|
16450
16987
|
upb_strdup2(protos[i].data, protos[i].size, _upb_DefBuilder_Arena(ctx));
|
@@ -16528,7 +17065,7 @@ static void create_msgdef(upb_DefBuilder* ctx, const char* prefix,
|
|
16528
17065
|
m->real_oneof_count = m->oneof_count - synthetic_count;
|
16529
17066
|
|
16530
17067
|
assign_msg_wellknowntype(m);
|
16531
|
-
upb_inttable_compact(&m->itof, ctx->arena);
|
17068
|
+
if (!upb_inttable_compact(&m->itof, ctx->arena)) _upb_DefBuilder_OomErr(ctx);
|
16532
17069
|
|
16533
17070
|
const UPB_DESC(EnumDescriptorProto)* const* enums =
|
16534
17071
|
UPB_DESC(DescriptorProto_enum_type)(msg_proto, &n_enum);
|
@@ -16561,7 +17098,7 @@ upb_MessageDef* _upb_MessageDefs_New(upb_DefBuilder* ctx, int n,
|
|
16561
17098
|
const char* name = containing_type ? containing_type->full_name
|
16562
17099
|
: _upb_FileDef_RawPackage(ctx->file);
|
16563
17100
|
|
16564
|
-
upb_MessageDef* m =
|
17101
|
+
upb_MessageDef* m = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_MessageDef, n);
|
16565
17102
|
for (int i = 0; i < n; i++) {
|
16566
17103
|
create_msgdef(ctx, name, protos[i], parent_features, containing_type,
|
16567
17104
|
&m[i]);
|
@@ -16594,7 +17131,7 @@ upb_MessageReservedRange* _upb_MessageReservedRanges_New(
|
|
16594
17131
|
const UPB_DESC(DescriptorProto_ReservedRange) * const* protos,
|
16595
17132
|
const upb_MessageDef* m) {
|
16596
17133
|
upb_MessageReservedRange* r =
|
16597
|
-
|
17134
|
+
UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_MessageReservedRange, n);
|
16598
17135
|
|
16599
17136
|
for (int i = 0; i < n; i++) {
|
16600
17137
|
const int32_t start =
|
@@ -16715,7 +17252,7 @@ upb_MethodDef* _upb_MethodDefs_New(upb_DefBuilder* ctx, int n,
|
|
16715
17252
|
const* protos,
|
16716
17253
|
const UPB_DESC(FeatureSet*) parent_features,
|
16717
17254
|
upb_ServiceDef* s) {
|
16718
|
-
upb_MethodDef* m =
|
17255
|
+
upb_MethodDef* m = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_MethodDef, n);
|
16719
17256
|
for (int i = 0; i < n; i++) {
|
16720
17257
|
create_method(ctx, protos[i], parent_features, s, &m[i]);
|
16721
17258
|
m[i].index = i;
|
@@ -16732,7 +17269,7 @@ upb_MethodDef* _upb_MethodDefs_New(upb_DefBuilder* ctx, int n,
|
|
16732
17269
|
// Must be last.
|
16733
17270
|
|
16734
17271
|
struct upb_OneofDef {
|
16735
|
-
const UPB_DESC(OneofOptions*) opts;
|
17272
|
+
UPB_ALIGN_AS(8) const UPB_DESC(OneofOptions*) opts;
|
16736
17273
|
const UPB_DESC(FeatureSet*) resolved_features;
|
16737
17274
|
const upb_MessageDef* parent;
|
16738
17275
|
const char* full_name;
|
@@ -16863,7 +17400,7 @@ size_t _upb_OneofDefs_Finalize(upb_DefBuilder* ctx, upb_MessageDef* m) {
|
|
16863
17400
|
}
|
16864
17401
|
|
16865
17402
|
o->fields =
|
16866
|
-
|
17403
|
+
UPB_DEFBUILDER_ALLOCARRAY(ctx, const upb_FieldDef*, o->field_count);
|
16867
17404
|
o->field_count = 0;
|
16868
17405
|
}
|
16869
17406
|
|
@@ -16919,7 +17456,7 @@ upb_OneofDef* _upb_OneofDefs_New(upb_DefBuilder* ctx, int n,
|
|
16919
17456
|
upb_MessageDef* m) {
|
16920
17457
|
_upb_DefType_CheckPadding(sizeof(upb_OneofDef));
|
16921
17458
|
|
16922
|
-
upb_OneofDef* o =
|
17459
|
+
upb_OneofDef* o = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_OneofDef, n);
|
16923
17460
|
for (int i = 0; i < n; i++) {
|
16924
17461
|
create_oneofdef(ctx, m, protos[i], parent_features, &o[i]);
|
16925
17462
|
}
|
@@ -16931,16 +17468,13 @@ upb_OneofDef* _upb_OneofDefs_New(upb_DefBuilder* ctx, int n,
|
|
16931
17468
|
// Must be last.
|
16932
17469
|
|
16933
17470
|
struct upb_ServiceDef {
|
16934
|
-
const UPB_DESC(ServiceOptions*) opts;
|
17471
|
+
UPB_ALIGN_AS(8) const UPB_DESC(ServiceOptions*) opts;
|
16935
17472
|
const UPB_DESC(FeatureSet*) resolved_features;
|
16936
17473
|
const upb_FileDef* file;
|
16937
17474
|
const char* full_name;
|
16938
17475
|
upb_MethodDef* methods;
|
16939
17476
|
int method_count;
|
16940
17477
|
int index;
|
16941
|
-
#if UINTPTR_MAX == 0xffffffff
|
16942
|
-
uint32_t padding; // Increase size to a multiple of 8.
|
16943
|
-
#endif
|
16944
17478
|
};
|
16945
17479
|
|
16946
17480
|
upb_ServiceDef* _upb_ServiceDef_At(const upb_ServiceDef* s, int index) {
|
@@ -17027,7 +17561,7 @@ upb_ServiceDef* _upb_ServiceDefs_New(upb_DefBuilder* ctx, int n,
|
|
17027
17561
|
parent_features) {
|
17028
17562
|
_upb_DefType_CheckPadding(sizeof(upb_ServiceDef));
|
17029
17563
|
|
17030
|
-
upb_ServiceDef* s =
|
17564
|
+
upb_ServiceDef* s = UPB_DEFBUILDER_ALLOCARRAY(ctx, upb_ServiceDef, n);
|
17031
17565
|
for (int i = 0; i < n; i++) {
|
17032
17566
|
create_service(ctx, protos[i], parent_features, &s[i]);
|
17033
17567
|
s[i].index = i;
|
@@ -17705,6 +18239,7 @@ google_protobuf_ServiceDescriptorProto* upb_ServiceDef_ToProto(const upb_Service
|
|
17705
18239
|
#undef UPB_SIZE
|
17706
18240
|
#undef UPB_PTR_AT
|
17707
18241
|
#undef UPB_SIZEOF_FLEX
|
18242
|
+
#undef UPB_SIZEOF_FLEX_WOULD_OVERFLOW
|
17708
18243
|
#undef UPB_MAPTYPE_STRING
|
17709
18244
|
#undef UPB_EXPORT
|
17710
18245
|
#undef UPB_INLINE
|