@lodestar/beacon-node 1.42.0-dev.d73eccfa56 → 1.42.0-dev.e3f53019a0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,12 +15,12 @@ export async function validateGossipSyncCommittee(
15
15
  chain: IBeaconChain,
16
16
  syncCommittee: altair.SyncCommitteeMessage,
17
17
  subnet: SubnetID
18
- ): Promise<{indexInSubcommittee: IndexInSubcommittee}> {
18
+ ): Promise<{indicesInSubcommittee: IndexInSubcommittee[]}> {
19
19
  const {slot, validatorIndex, beaconBlockRoot} = syncCommittee;
20
20
  const messageRoot = toRootHex(beaconBlockRoot);
21
21
 
22
22
  const headState = chain.getHeadState();
23
- const indexInSubcommittee = validateGossipSyncCommitteeExceptSig(chain, headState, subnet, syncCommittee);
23
+ const indicesInSubcommittee = validateGossipSyncCommitteeExceptSig(chain, headState, subnet, syncCommittee);
24
24
 
25
25
  // [IGNORE] The signature's slot is for the current slot, i.e. sync_committee_signature.slot == current_slot.
26
26
  // > Checked in validateGossipSyncCommitteeExceptSig()
@@ -68,7 +68,7 @@ export async function validateGossipSyncCommittee(
68
68
  // Register this valid item as seen
69
69
  chain.seenSyncCommitteeMessages.add(slot, subnet, validatorIndex, messageRoot);
70
70
 
71
- return {indexInSubcommittee};
71
+ return {indicesInSubcommittee};
72
72
  }
73
73
 
74
74
  export async function validateApiSyncCommittee(
@@ -105,7 +105,7 @@ export function validateGossipSyncCommitteeExceptSig(
105
105
  headState: CachedBeaconStateAllForks,
106
106
  subnet: SubnetID,
107
107
  data: Pick<altair.SyncCommitteeMessage, "slot" | "validatorIndex">
108
- ): IndexInSubcommittee {
108
+ ): IndexInSubcommittee[] {
109
109
  const {slot, validatorIndex} = data;
110
110
  // [IGNORE] The signature's slot is for the current slot, i.e. sync_committee_signature.slot == current_slot.
111
111
  // (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
@@ -127,26 +127,27 @@ export function validateGossipSyncCommitteeExceptSig(
127
127
 
128
128
  // [REJECT] The subnet_id is valid for the given validator, i.e. subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index).
129
129
  // Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
130
- const indexInSubcommittee = getIndexInSubcommittee(headState, subnet, data);
131
- if (indexInSubcommittee === null) {
130
+ const indicesInSubcommittee = getIndicesInSubcommittee(headState, subnet, data);
131
+ if (indicesInSubcommittee === null) {
132
132
  throw new SyncCommitteeError(GossipAction.REJECT, {
133
133
  code: SyncCommitteeErrorCode.VALIDATOR_NOT_IN_SYNC_COMMITTEE,
134
134
  validatorIndex,
135
135
  });
136
136
  }
137
137
 
138
- return indexInSubcommittee;
138
+ return indicesInSubcommittee;
139
139
  }
140
140
 
141
141
  /**
142
- * Returns the IndexInSubcommittee of the given `subnet`.
143
- * Returns `null` if not part of the sync committee or not part of the given `subnet`
142
+ * Returns all IndexInSubcommittee positions of the given `subnet`.
143
+ * Returns `null` if not part of the sync committee or not part of the given `subnet`.
144
+ * A validator may appear multiple times in the same subcommittee.
144
145
  */
145
- function getIndexInSubcommittee(
146
+ function getIndicesInSubcommittee(
146
147
  headState: CachedBeaconStateAllForks,
147
148
  subnet: SubnetID,
148
149
  data: Pick<altair.SyncCommitteeMessage, "slot" | "validatorIndex">
149
- ): IndexInSubcommittee | null {
150
+ ): IndexInSubcommittee[] | null {
150
151
  const syncCommittee = headState.epochCtx.getIndexedSyncCommittee(data.slot);
151
152
  const indexesInCommittee = syncCommittee.validatorIndexMap.get(data.validatorIndex);
152
153
  if (indexesInCommittee === undefined) {
@@ -154,12 +155,12 @@ function getIndexInSubcommittee(
154
155
  return null;
155
156
  }
156
157
 
158
+ const indices: IndexInSubcommittee[] = [];
157
159
  for (const indexInCommittee of indexesInCommittee) {
158
160
  if (Math.floor(indexInCommittee / SYNC_COMMITTEE_SUBNET_SIZE) === subnet) {
159
- return indexInCommittee % SYNC_COMMITTEE_SUBNET_SIZE;
161
+ indices.push(indexInCommittee % SYNC_COMMITTEE_SUBNET_SIZE);
160
162
  }
161
163
  }
162
164
 
163
- // Not part of this specific subnet
164
- return null;
165
+ return indices.length > 0 ? indices : null;
165
166
  }
@@ -24,12 +24,28 @@ const decoder = new snappyWasm.Decoder();
24
24
  // Shared buffer to convert msgId to string
25
25
  const sharedMsgIdBuf = Buffer.alloc(20);
26
26
 
27
+ // Cache topic -> seed to avoid per-message allocations on the hot path.
28
+ // Topics are a fixed set per fork (changes only at fork boundaries).
29
+ const topicSeedCache = new Map<string, bigint>();
30
+
27
31
  /**
28
32
  * The function used to generate a gossipsub message id
29
33
  * We use the first 8 bytes of SHA256(data) for content addressing
30
34
  */
31
35
  export function fastMsgIdFn(rpcMsg: RPC.Message): string {
32
36
  if (rpcMsg.data) {
37
+ if (rpcMsg.topic) {
38
+ // Use topic-derived seed to prevent cross-topic deduplication of identical messages.
39
+ // SyncCommitteeMessages are published to multiple sync_committee_{subnet} topics with
40
+ // identical data, so hashing only the data incorrectly deduplicates across subnets.
41
+ // See https://github.com/ChainSafe/lodestar/issues/8294
42
+ let topicSeed = topicSeedCache.get(rpcMsg.topic);
43
+ if (topicSeed === undefined) {
44
+ topicSeed = xxhash.h64Raw(Buffer.from(rpcMsg.topic), h64Seed);
45
+ topicSeedCache.set(rpcMsg.topic, topicSeed);
46
+ }
47
+ return xxhash.h64Raw(rpcMsg.data, topicSeed).toString(16);
48
+ }
33
49
  return xxhash.h64Raw(rpcMsg.data, h64Seed).toString(16);
34
50
  }
35
51
  return "0000000000000000";
@@ -777,9 +777,9 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
777
777
  const {serializedData} = gossipData;
778
778
  const syncCommittee = sszDeserialize(topic, serializedData);
779
779
  const {subnet} = topic;
780
- let indexInSubcommittee = 0;
780
+ let indicesInSubcommittee: number[] = [0];
781
781
  try {
782
- indexInSubcommittee = (await validateGossipSyncCommittee(chain, syncCommittee, subnet)).indexInSubcommittee;
782
+ indicesInSubcommittee = (await validateGossipSyncCommittee(chain, syncCommittee, subnet)).indicesInSubcommittee;
783
783
  } catch (e) {
784
784
  if (e instanceof SyncCommitteeError && e.action === GossipAction.REJECT) {
785
785
  chain.persistInvalidSszValue(ssz.altair.SyncCommitteeMessage, syncCommittee, "gossip_reject");
@@ -787,11 +787,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
787
787
  throw e;
788
788
  }
789
789
 
790
- // Handler
791
-
790
+ // Handler — add for ALL positions this validator holds in the subcommittee
792
791
  try {
793
- const insertOutcome = chain.syncCommitteeMessagePool.add(subnet, syncCommittee, indexInSubcommittee);
794
- metrics?.opPool.syncCommitteeMessagePoolInsertOutcome.inc({insertOutcome});
792
+ for (const indexInSubcommittee of indicesInSubcommittee) {
793
+ const insertOutcome = chain.syncCommitteeMessagePool.add(subnet, syncCommittee, indexInSubcommittee);
794
+ metrics?.opPool.syncCommitteeMessagePoolInsertOutcome.inc({insertOutcome});
795
+ }
795
796
  } catch (e) {
796
797
  logger.debug("Error adding to syncCommittee pool", {subnet}, e as Error);
797
798
  }
@@ -854,7 +855,7 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
854
855
 
855
856
  if (!payloadInput) {
856
857
  // This shouldn't happen because beacon block should have been imported and thus payload input should have been created.
857
- throw new ExecutionPayloadEnvelopeError(GossipAction.REJECT, {
858
+ throw new ExecutionPayloadEnvelopeError(GossipAction.IGNORE, {
858
859
  code: ExecutionPayloadEnvelopeErrorCode.PAYLOAD_ENVELOPE_INPUT_MISSING,
859
860
  blockRoot: blockRootHex,
860
861
  });
@@ -481,7 +481,7 @@ export class BlockInputSync {
481
481
  * From a set of shuffled peers:
482
482
  * - fetch the block
483
483
  * - from deneb, fetch all missing blobs
484
- * - from peerDAS, fetch sampled colmns
484
+ * - from peerDAS, fetch sampled columns
485
485
  * TODO: this means we only have block root, and nothing else. Consider to reflect this in the function name
486
486
  * prefulu, will attempt a max of `MAX_ATTEMPTS_PER_BLOCK` on different peers, postfulu we may attempt more as defined in `getMaxDownloadAttempts()` function
487
487
  * Also verifies the received block root + returns the peer that provided the block for future downscoring.
@@ -489,10 +489,7 @@ export class BlockInputSync {
489
489
  private async fetchBlockInput(cacheItem: BlockInputSyncCacheItem): Promise<PendingBlockInput> {
490
490
  const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem);
491
491
  const excludedPeers = new Set<PeerIdStr>();
492
- const defaultPendingColumns =
493
- this.config.getForkSeq(this.chain.clock.currentSlot) >= ForkSeq.fulu
494
- ? new Set(this.network.custodyConfig.sampledColumns)
495
- : null;
492
+ const defaultPendingColumns = new Set(this.network.custodyConfig.sampledColumns);
496
493
 
497
494
  const fetchStartSec = Date.now() / 1000;
498
495
  let slot = isPendingBlockInput(cacheItem) ? cacheItem.blockInput.slot : undefined;
@@ -506,14 +503,10 @@ export class BlockInputSync {
506
503
  isPendingBlockInput(cacheItem) && isBlockInputColumns(cacheItem.blockInput)
507
504
  ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().missing)
508
505
  : defaultPendingColumns;
509
- // pendingDataColumns is null pre-fulu
510
506
  const peerMeta = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers);
511
507
  if (peerMeta === null) {
512
508
  // no more peer with needed columns to try, throw error
513
- let message = `Error fetching UnknownBlockRoot slot=${slot} root=${rootHex} after ${i}: cannot find peer`;
514
- if (pendingColumns) {
515
- message += ` with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`;
516
- }
509
+ const message = `Error fetching UnknownBlockRoot slot=${slot} root=${rootHex} after ${i}: cannot find peer with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`;
517
510
  this.metrics?.blockInputSync.fetchTimeSec.observe(
518
511
  {result: FetchResult.FailureTriedAllPeers},
519
512
  Date.now() / 1000 - fetchStartSec
@@ -650,7 +643,7 @@ export class BlockInputSync {
650
643
  // TODO(fulu): why is this commented out here?
651
644
  //
652
645
  // this.knownBadBlocks.add(block.blockRootHex);
653
- // for (const peerIdStr of block.peerIdStrs) {
646
+ // for (const peerIdStr of block.peerIdStrings) {
654
647
  // // TODO: Refactor peerRpcScores to work with peerIdStr only
655
648
  // this.network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "BadBlockByRoot");
656
649
  // }
@@ -729,11 +722,11 @@ export class UnknownBlockPeerBalancer {
729
722
  }
730
723
 
731
724
  /**
732
- * called from fetchUnknownBlockRoot() where we only have block root and nothing else
725
+ * called from fetchBlockInput() where we only have block root and nothing else
733
726
  * excludedPeers are the peers that we requested already so we don't want to try again
734
727
  * pendingColumns is empty for prefulu, or the 1st time we we download a block by root
735
728
  */
736
- bestPeerForPendingColumns(pendingColumns: Set<number> | null, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
729
+ bestPeerForPendingColumns(pendingColumns: Set<number>, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
737
730
  const eligiblePeers = this.filterPeers(pendingColumns, excludedPeers);
738
731
  if (eligiblePeers.length === 0) {
739
732
  return null;
@@ -750,37 +743,6 @@ export class UnknownBlockPeerBalancer {
750
743
  return this.peersMeta.get(bestPeerId) ?? null;
751
744
  }
752
745
 
753
- /**
754
- * called from fetchUnavailableBlockInput() where we have either BlockInput or NullBlockInput
755
- * excludedPeers are the peers that we requested already so we don't want to try again
756
- */
757
- bestPeerForBlockInput(blockInput: IBlockInput, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
758
- const eligiblePeers: PeerIdStr[] = [];
759
-
760
- if (isBlockInputColumns(blockInput)) {
761
- const pendingDataColumns: Set<number> = new Set(blockInput.getMissingSampledColumnMeta().missing);
762
- // there could be no pending column in case when block is still missing
763
- eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers));
764
- } else {
765
- // prefulu
766
- eligiblePeers.push(...this.filterPeers(null, excludedPeers));
767
- }
768
-
769
- if (eligiblePeers.length === 0) {
770
- return null;
771
- }
772
-
773
- const sortedEligiblePeers = sortBy(
774
- shuffle(eligiblePeers),
775
- // prefer peers with least active req
776
- (peerId) => this.activeRequests.get(peerId) ?? 0
777
- );
778
-
779
- const bestPeerId = sortedEligiblePeers[0];
780
- this.onRequest(bestPeerId);
781
- return this.peersMeta.get(bestPeerId) ?? null;
782
- }
783
-
784
746
  /**
785
747
  * Consumers don't need to call this method directly, it is called internally by bestPeer*() methods
786
748
  * make this public for testing
@@ -804,8 +766,7 @@ export class UnknownBlockPeerBalancer {
804
766
  return totalActiveRequests;
805
767
  }
806
768
 
807
- // pendingDataColumns could be null for prefulu
808
- private filterPeers(pendingDataColumns: Set<number> | null, excludedPeers: Set<PeerIdStr>): PeerIdStr[] {
769
+ private filterPeers(pendingDataColumns: Set<number>, excludedPeers: Set<PeerIdStr>): PeerIdStr[] {
809
770
  let maxColumnCount = 0;
810
771
  const considerPeers: {peerId: PeerIdStr; columnCount: number}[] = [];
811
772
  for (const [peerId, syncMeta] of this.peersMeta.entries()) {
@@ -820,13 +781,12 @@ export class UnknownBlockPeerBalancer {
820
781
  continue;
821
782
  }
822
783
 
823
- if (pendingDataColumns === null || pendingDataColumns.size === 0) {
824
- // prefulu, no pending columns
784
+ if (pendingDataColumns.size === 0) {
825
785
  considerPeers.push({peerId, columnCount: 0});
826
786
  continue;
827
787
  }
828
788
 
829
- // postfulu, find peers that have custody columns that we need
789
+ // find peers that have custody columns that we need
830
790
  const {custodyColumns: peerColumns} = syncMeta;
831
791
  // check if the peer has all needed columns
832
792
  // get match