@lodestar/beacon-node 1.42.0-dev.1d50253953 → 1.42.0-dev.5007d8c6d6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -24,12 +24,28 @@ const decoder = new snappyWasm.Decoder();
24
24
  // Shared buffer to convert msgId to string
25
25
  const sharedMsgIdBuf = Buffer.alloc(20);
26
26
 
27
+ // Cache topic -> seed to avoid per-message allocations on the hot path.
28
+ // Topics are a fixed set per fork (changes only at fork boundaries).
29
+ const topicSeedCache = new Map<string, bigint>();
30
+
27
31
  /**
28
32
  * The function used to generate a gossipsub message id
29
33
  * We use the first 8 bytes of SHA256(data) for content addressing
30
34
  */
31
35
  export function fastMsgIdFn(rpcMsg: RPC.Message): string {
32
36
  if (rpcMsg.data) {
37
+ if (rpcMsg.topic) {
38
+ // Use topic-derived seed to prevent cross-topic deduplication of identical messages.
39
+ // SyncCommitteeMessages are published to multiple sync_committee_{subnet} topics with
40
+ // identical data, so hashing only the data incorrectly deduplicates across subnets.
41
+ // See https://github.com/ChainSafe/lodestar/issues/8294
42
+ let topicSeed = topicSeedCache.get(rpcMsg.topic);
43
+ if (topicSeed === undefined) {
44
+ topicSeed = xxhash.h64Raw(Buffer.from(rpcMsg.topic), h64Seed);
45
+ topicSeedCache.set(rpcMsg.topic, topicSeed);
46
+ }
47
+ return xxhash.h64Raw(rpcMsg.data, topicSeed).toString(16);
48
+ }
33
49
  return xxhash.h64Raw(rpcMsg.data, h64Seed).toString(16);
34
50
  }
35
51
  return "0000000000000000";
@@ -777,9 +777,9 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
777
777
  const {serializedData} = gossipData;
778
778
  const syncCommittee = sszDeserialize(topic, serializedData);
779
779
  const {subnet} = topic;
780
- let indexInSubcommittee = 0;
780
+ let indicesInSubcommittee: number[] = [0];
781
781
  try {
782
- indexInSubcommittee = (await validateGossipSyncCommittee(chain, syncCommittee, subnet)).indexInSubcommittee;
782
+ indicesInSubcommittee = (await validateGossipSyncCommittee(chain, syncCommittee, subnet)).indicesInSubcommittee;
783
783
  } catch (e) {
784
784
  if (e instanceof SyncCommitteeError && e.action === GossipAction.REJECT) {
785
785
  chain.persistInvalidSszValue(ssz.altair.SyncCommitteeMessage, syncCommittee, "gossip_reject");
@@ -787,11 +787,12 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
787
787
  throw e;
788
788
  }
789
789
 
790
- // Handler
791
-
790
+ // Handler — add for ALL positions this validator holds in the subcommittee
792
791
  try {
793
- const insertOutcome = chain.syncCommitteeMessagePool.add(subnet, syncCommittee, indexInSubcommittee);
794
- metrics?.opPool.syncCommitteeMessagePoolInsertOutcome.inc({insertOutcome});
792
+ for (const indexInSubcommittee of indicesInSubcommittee) {
793
+ const insertOutcome = chain.syncCommitteeMessagePool.add(subnet, syncCommittee, indexInSubcommittee);
794
+ metrics?.opPool.syncCommitteeMessagePoolInsertOutcome.inc({insertOutcome});
795
+ }
795
796
  } catch (e) {
796
797
  logger.debug("Error adding to syncCommittee pool", {subnet}, e as Error);
797
798
  }
@@ -481,7 +481,7 @@ export class BlockInputSync {
481
481
  * From a set of shuffled peers:
482
482
  * - fetch the block
483
483
  * - from deneb, fetch all missing blobs
484
- * - from peerDAS, fetch sampled colmns
484
+ * - from peerDAS, fetch sampled columns
485
485
  * TODO: this means we only have block root, and nothing else. Consider to reflect this in the function name
486
486
  * prefulu, will attempt a max of `MAX_ATTEMPTS_PER_BLOCK` on different peers, postfulu we may attempt more as defined in `getMaxDownloadAttempts()` function
487
487
  * Also verifies the received block root + returns the peer that provided the block for future downscoring.
@@ -489,10 +489,7 @@ export class BlockInputSync {
489
489
  private async fetchBlockInput(cacheItem: BlockInputSyncCacheItem): Promise<PendingBlockInput> {
490
490
  const rootHex = getBlockInputSyncCacheItemRootHex(cacheItem);
491
491
  const excludedPeers = new Set<PeerIdStr>();
492
- const defaultPendingColumns =
493
- this.config.getForkSeq(this.chain.clock.currentSlot) >= ForkSeq.fulu
494
- ? new Set(this.network.custodyConfig.sampledColumns)
495
- : null;
492
+ const defaultPendingColumns = new Set(this.network.custodyConfig.sampledColumns);
496
493
 
497
494
  const fetchStartSec = Date.now() / 1000;
498
495
  let slot = isPendingBlockInput(cacheItem) ? cacheItem.blockInput.slot : undefined;
@@ -506,14 +503,10 @@ export class BlockInputSync {
506
503
  isPendingBlockInput(cacheItem) && isBlockInputColumns(cacheItem.blockInput)
507
504
  ? new Set(cacheItem.blockInput.getMissingSampledColumnMeta().missing)
508
505
  : defaultPendingColumns;
509
- // pendingDataColumns is null pre-fulu
510
506
  const peerMeta = this.peerBalancer.bestPeerForPendingColumns(pendingColumns, excludedPeers);
511
507
  if (peerMeta === null) {
512
508
  // no more peer with needed columns to try, throw error
513
- let message = `Error fetching UnknownBlockRoot slot=${slot} root=${rootHex} after ${i}: cannot find peer`;
514
- if (pendingColumns) {
515
- message += ` with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`;
516
- }
509
+ const message = `Error fetching UnknownBlockRoot slot=${slot} root=${rootHex} after ${i}: cannot find peer with needed columns=${prettyPrintIndices(Array.from(pendingColumns))}`;
517
510
  this.metrics?.blockInputSync.fetchTimeSec.observe(
518
511
  {result: FetchResult.FailureTriedAllPeers},
519
512
  Date.now() / 1000 - fetchStartSec
@@ -650,7 +643,7 @@ export class BlockInputSync {
650
643
  // TODO(fulu): why is this commented out here?
651
644
  //
652
645
  // this.knownBadBlocks.add(block.blockRootHex);
653
- // for (const peerIdStr of block.peerIdStrs) {
646
+ // for (const peerIdStr of block.peerIdStrings) {
654
647
  // // TODO: Refactor peerRpcScores to work with peerIdStr only
655
648
  // this.network.reportPeer(peerIdStr, PeerAction.LowToleranceError, "BadBlockByRoot");
656
649
  // }
@@ -729,11 +722,11 @@ export class UnknownBlockPeerBalancer {
729
722
  }
730
723
 
731
724
  /**
732
- * called from fetchUnknownBlockRoot() where we only have block root and nothing else
725
+ * called from fetchBlockInput() where we only have block root and nothing else
733
726
  * excludedPeers are the peers that we requested already so we don't want to try again
734
727
  * pendingColumns is empty for prefulu, or the 1st time we we download a block by root
735
728
  */
736
- bestPeerForPendingColumns(pendingColumns: Set<number> | null, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
729
+ bestPeerForPendingColumns(pendingColumns: Set<number>, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
737
730
  const eligiblePeers = this.filterPeers(pendingColumns, excludedPeers);
738
731
  if (eligiblePeers.length === 0) {
739
732
  return null;
@@ -750,37 +743,6 @@ export class UnknownBlockPeerBalancer {
750
743
  return this.peersMeta.get(bestPeerId) ?? null;
751
744
  }
752
745
 
753
- /**
754
- * called from fetchUnavailableBlockInput() where we have either BlockInput or NullBlockInput
755
- * excludedPeers are the peers that we requested already so we don't want to try again
756
- */
757
- bestPeerForBlockInput(blockInput: IBlockInput, excludedPeers: Set<PeerIdStr>): PeerSyncMeta | null {
758
- const eligiblePeers: PeerIdStr[] = [];
759
-
760
- if (isBlockInputColumns(blockInput)) {
761
- const pendingDataColumns: Set<number> = new Set(blockInput.getMissingSampledColumnMeta().missing);
762
- // there could be no pending column in case when block is still missing
763
- eligiblePeers.push(...this.filterPeers(pendingDataColumns, excludedPeers));
764
- } else {
765
- // prefulu
766
- eligiblePeers.push(...this.filterPeers(null, excludedPeers));
767
- }
768
-
769
- if (eligiblePeers.length === 0) {
770
- return null;
771
- }
772
-
773
- const sortedEligiblePeers = sortBy(
774
- shuffle(eligiblePeers),
775
- // prefer peers with least active req
776
- (peerId) => this.activeRequests.get(peerId) ?? 0
777
- );
778
-
779
- const bestPeerId = sortedEligiblePeers[0];
780
- this.onRequest(bestPeerId);
781
- return this.peersMeta.get(bestPeerId) ?? null;
782
- }
783
-
784
746
  /**
785
747
  * Consumers don't need to call this method directly, it is called internally by bestPeer*() methods
786
748
  * make this public for testing
@@ -804,8 +766,7 @@ export class UnknownBlockPeerBalancer {
804
766
  return totalActiveRequests;
805
767
  }
806
768
 
807
- // pendingDataColumns could be null for prefulu
808
- private filterPeers(pendingDataColumns: Set<number> | null, excludedPeers: Set<PeerIdStr>): PeerIdStr[] {
769
+ private filterPeers(pendingDataColumns: Set<number>, excludedPeers: Set<PeerIdStr>): PeerIdStr[] {
809
770
  let maxColumnCount = 0;
810
771
  const considerPeers: {peerId: PeerIdStr; columnCount: number}[] = [];
811
772
  for (const [peerId, syncMeta] of this.peersMeta.entries()) {
@@ -820,13 +781,12 @@ export class UnknownBlockPeerBalancer {
820
781
  continue;
821
782
  }
822
783
 
823
- if (pendingDataColumns === null || pendingDataColumns.size === 0) {
824
- // prefulu, no pending columns
784
+ if (pendingDataColumns.size === 0) {
825
785
  considerPeers.push({peerId, columnCount: 0});
826
786
  continue;
827
787
  }
828
788
 
829
- // postfulu, find peers that have custody columns that we need
789
+ // find peers that have custody columns that we need
830
790
  const {custodyColumns: peerColumns} = syncMeta;
831
791
  // check if the peer has all needed columns
832
792
  // get match