@lodestar/beacon-node 1.43.0-dev.d166e3b6f7 → 1.43.0-dev.dfb984e779

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/lib/api/impl/beacon/blocks/index.d.ts.map +1 -1
  2. package/lib/api/impl/beacon/blocks/index.js +3 -2
  3. package/lib/api/impl/beacon/blocks/index.js.map +1 -1
  4. package/lib/api/impl/lodestar/index.js +1 -1
  5. package/lib/api/impl/lodestar/index.js.map +1 -1
  6. package/lib/chain/blocks/importBlock.d.ts.map +1 -1
  7. package/lib/chain/blocks/importBlock.js +6 -3
  8. package/lib/chain/blocks/importBlock.js.map +1 -1
  9. package/lib/chain/blocks/importExecutionPayload.d.ts +19 -8
  10. package/lib/chain/blocks/importExecutionPayload.d.ts.map +1 -1
  11. package/lib/chain/blocks/importExecutionPayload.js +31 -20
  12. package/lib/chain/blocks/importExecutionPayload.js.map +1 -1
  13. package/lib/chain/blocks/index.d.ts +5 -3
  14. package/lib/chain/blocks/index.d.ts.map +1 -1
  15. package/lib/chain/blocks/index.js +28 -9
  16. package/lib/chain/blocks/index.js.map +1 -1
  17. package/lib/chain/blocks/payloadEnvelopeProcessor.js +2 -2
  18. package/lib/chain/blocks/payloadEnvelopeProcessor.js.map +1 -1
  19. package/lib/chain/blocks/types.d.ts +2 -2
  20. package/lib/chain/blocks/types.d.ts.map +1 -1
  21. package/lib/chain/blocks/utils/chainSegment.d.ts +23 -2
  22. package/lib/chain/blocks/utils/chainSegment.d.ts.map +1 -1
  23. package/lib/chain/blocks/utils/chainSegment.js +81 -12
  24. package/lib/chain/blocks/utils/chainSegment.js.map +1 -1
  25. package/lib/chain/blocks/verifyBlock.d.ts +3 -2
  26. package/lib/chain/blocks/verifyBlock.d.ts.map +1 -1
  27. package/lib/chain/blocks/verifyBlock.js +30 -5
  28. package/lib/chain/blocks/verifyBlock.js.map +1 -1
  29. package/lib/chain/blocks/verifyBlocksSanityChecks.d.ts.map +1 -1
  30. package/lib/chain/blocks/verifyBlocksSanityChecks.js +15 -4
  31. package/lib/chain/blocks/verifyBlocksSanityChecks.js.map +1 -1
  32. package/lib/chain/blocks/verifyExecutionPayloadEnvelope.js +2 -2
  33. package/lib/chain/blocks/verifyExecutionPayloadEnvelope.js.map +1 -1
  34. package/lib/chain/chain.d.ts +1 -1
  35. package/lib/chain/chain.d.ts.map +1 -1
  36. package/lib/chain/chain.js +7 -3
  37. package/lib/chain/chain.js.map +1 -1
  38. package/lib/chain/errors/blockError.d.ts +8 -1
  39. package/lib/chain/errors/blockError.d.ts.map +1 -1
  40. package/lib/chain/errors/blockError.js +2 -0
  41. package/lib/chain/errors/blockError.js.map +1 -1
  42. package/lib/chain/interface.d.ts +1 -1
  43. package/lib/chain/interface.d.ts.map +1 -1
  44. package/lib/chain/produceBlock/produceBlockBody.d.ts.map +1 -1
  45. package/lib/chain/produceBlock/produceBlockBody.js +8 -2
  46. package/lib/chain/produceBlock/produceBlockBody.js.map +1 -1
  47. package/lib/chain/validation/block.d.ts.map +1 -1
  48. package/lib/chain/validation/block.js +1 -0
  49. package/lib/chain/validation/block.js.map +1 -1
  50. package/lib/metrics/metrics/lodestar.d.ts +1 -0
  51. package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
  52. package/lib/metrics/metrics/lodestar.js +4 -0
  53. package/lib/metrics/metrics/lodestar.js.map +1 -1
  54. package/lib/network/processor/gossipHandlers.js +4 -6
  55. package/lib/network/processor/gossipHandlers.js.map +1 -1
  56. package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
  57. package/lib/network/reqresp/handlers/beaconBlocksByRange.js +14 -6
  58. package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
  59. package/lib/network/reqresp/handlers/blobSidecarsByRange.d.ts.map +1 -1
  60. package/lib/network/reqresp/handlers/blobSidecarsByRange.js +11 -5
  61. package/lib/network/reqresp/handlers/blobSidecarsByRange.js.map +1 -1
  62. package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts.map +1 -1
  63. package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js +17 -5
  64. package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js.map +1 -1
  65. package/lib/network/reqresp/handlers/executionPayloadEnvelopesByRange.d.ts.map +1 -1
  66. package/lib/network/reqresp/handlers/executionPayloadEnvelopesByRange.js +7 -4
  67. package/lib/network/reqresp/handlers/executionPayloadEnvelopesByRange.js.map +1 -1
  68. package/lib/node/notifier.js +7 -1
  69. package/lib/node/notifier.js.map +1 -1
  70. package/lib/sync/range/batch.d.ts +12 -2
  71. package/lib/sync/range/batch.d.ts.map +1 -1
  72. package/lib/sync/range/batch.js +56 -30
  73. package/lib/sync/range/batch.js.map +1 -1
  74. package/lib/sync/range/chain.d.ts +6 -2
  75. package/lib/sync/range/chain.d.ts.map +1 -1
  76. package/lib/sync/range/chain.js +4 -3
  77. package/lib/sync/range/chain.js.map +1 -1
  78. package/lib/sync/range/range.d.ts.map +1 -1
  79. package/lib/sync/range/range.js +17 -6
  80. package/lib/sync/range/range.js.map +1 -1
  81. package/lib/sync/types.d.ts +34 -0
  82. package/lib/sync/types.d.ts.map +1 -1
  83. package/lib/sync/types.js +34 -0
  84. package/lib/sync/types.js.map +1 -1
  85. package/lib/sync/unknownBlock.d.ts +24 -1
  86. package/lib/sync/unknownBlock.d.ts.map +1 -1
  87. package/lib/sync/unknownBlock.js +649 -53
  88. package/lib/sync/unknownBlock.js.map +1 -1
  89. package/lib/sync/utils/downloadByRange.d.ts +46 -10
  90. package/lib/sync/utils/downloadByRange.d.ts.map +1 -1
  91. package/lib/sync/utils/downloadByRange.js +147 -24
  92. package/lib/sync/utils/downloadByRange.js.map +1 -1
  93. package/lib/sync/utils/downloadByRoot.d.ts.map +1 -1
  94. package/lib/sync/utils/downloadByRoot.js +6 -2
  95. package/lib/sync/utils/downloadByRoot.js.map +1 -1
  96. package/lib/sync/utils/pendingBlocksTree.d.ts +0 -1
  97. package/lib/sync/utils/pendingBlocksTree.d.ts.map +1 -1
  98. package/lib/sync/utils/pendingBlocksTree.js +0 -9
  99. package/lib/sync/utils/pendingBlocksTree.js.map +1 -1
  100. package/package.json +15 -15
  101. package/src/api/impl/beacon/blocks/index.ts +5 -2
  102. package/src/api/impl/lodestar/index.ts +1 -1
  103. package/src/chain/blocks/importBlock.ts +4 -2
  104. package/src/chain/blocks/importExecutionPayload.ts +36 -21
  105. package/src/chain/blocks/index.ts +44 -12
  106. package/src/chain/blocks/payloadEnvelopeProcessor.ts +2 -2
  107. package/src/chain/blocks/types.ts +2 -2
  108. package/src/chain/blocks/utils/chainSegment.ts +106 -17
  109. package/src/chain/blocks/verifyBlock.ts +35 -6
  110. package/src/chain/blocks/verifyBlocksSanityChecks.ts +16 -7
  111. package/src/chain/blocks/verifyExecutionPayloadEnvelope.ts +2 -2
  112. package/src/chain/chain.ts +11 -3
  113. package/src/chain/errors/blockError.ts +4 -1
  114. package/src/chain/interface.ts +5 -1
  115. package/src/chain/produceBlock/produceBlockBody.ts +8 -2
  116. package/src/chain/validation/block.ts +1 -0
  117. package/src/metrics/metrics/lodestar.ts +4 -0
  118. package/src/network/processor/gossipHandlers.ts +6 -6
  119. package/src/network/reqresp/handlers/beaconBlocksByRange.ts +14 -6
  120. package/src/network/reqresp/handlers/blobSidecarsByRange.ts +11 -5
  121. package/src/network/reqresp/handlers/dataColumnSidecarsByRange.ts +17 -5
  122. package/src/network/reqresp/handlers/executionPayloadEnvelopesByRange.ts +7 -4
  123. package/src/node/notifier.ts +8 -1
  124. package/src/sync/range/batch.ts +90 -35
  125. package/src/sync/range/chain.ts +13 -5
  126. package/src/sync/range/range.ts +18 -6
  127. package/src/sync/types.ts +72 -0
  128. package/src/sync/unknownBlock.ts +810 -57
  129. package/src/sync/utils/downloadByRange.ts +256 -39
  130. package/src/sync/utils/downloadByRoot.ts +12 -2
  131. package/src/sync/utils/pendingBlocksTree.ts +0 -15
@@ -49,7 +49,7 @@ import {
49
49
  ssz,
50
50
  } from "@lodestar/types";
51
51
  import {Logger, byteArrayEquals, fromHex, sleep, toHex, toPubkeyHex, toRootHex} from "@lodestar/utils";
52
- import {ZERO_HASH_HEX} from "../../constants/index.js";
52
+ import {ZERO_HASH, ZERO_HASH_HEX} from "../../constants/index.js";
53
53
  import {numToQuantity} from "../../execution/engine/utils.js";
54
54
  import {
55
55
  IExecutionBuilder,
@@ -215,9 +215,15 @@ export async function produceBlockBody<T extends BlockType>(
215
215
 
216
216
  // Get execution payload from EL
217
217
  const isExtendingPayload = this.forkChoice.shouldExtendPayload(toRootHex(parentBlockRoot));
218
- const parentBlockHash = isExtendingPayload
218
+ let parentBlockHash = isExtendingPayload
219
219
  ? currentState.latestExecutionPayloadBid.blockHash
220
220
  : currentState.latestExecutionPayloadBid.parentBlockHash;
221
+ // At gloas genesis the committed bid has no prior EL block to reference
222
+ // (`bid.parentBlockHash` is zero). Fall back to `bid.blockHash` (= eth1 genesis hash) so the
223
+ // FCU to the EL carries a valid head. Post-genesis bids always reference a non-zero parent.
224
+ if (isStatePostGloas(currentState) && byteArrayEquals(parentBlockHash, ZERO_HASH)) {
225
+ parentBlockHash = currentState.latestExecutionPayloadBid.blockHash;
226
+ }
221
227
  const parentExecutionRequests = isExtendingPayload
222
228
  ? await this.getParentExecutionRequests(parentBlock.slot, parentBlock.blockRoot)
223
229
  : ssz.electra.ExecutionRequests.defaultValue();
@@ -103,6 +103,7 @@ export async function validateGossipBlock(
103
103
  if (chain.forkChoice.getBlockHexAndBlockHash(parentRoot, parentBlockHashHex) === null) {
104
104
  throw new BlockGossipError(GossipAction.IGNORE, {
105
105
  code: BlockErrorCode.PARENT_PAYLOAD_UNKNOWN,
106
+ parentRoot,
106
107
  parentBlockHash: parentBlockHashHex,
107
108
  });
108
109
  }
@@ -613,6 +613,10 @@ export function createLodestarMetrics(
613
613
  name: "lodestar_sync_unknown_block_pending_blocks_size",
614
614
  help: "Current size of UnknownBlockSync pending blocks cache",
615
615
  }),
616
+ pendingPayloads: register.gauge({
617
+ name: "lodestar_sync_unknown_block_pending_payloads_size",
618
+ help: "Current size of UnknownBlockSync pending payloads cache",
619
+ }),
616
620
  knownBadBlocks: register.gauge({
617
621
  name: "lodestar_sync_unknown_block_known_bad_blocks_size",
618
622
  help: "Current size of UnknownBlockSync known bad blocks cache",
@@ -198,7 +198,10 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
198
198
  } catch (e) {
199
199
  if (e instanceof BlockGossipError) {
200
200
  logger.debug("Gossip block has error", {slot, root: blockShortHex, code: e.type.code});
201
- if (e.type.code === BlockErrorCode.PARENT_UNKNOWN && blockInput) {
201
+ if (
202
+ (e.type.code === BlockErrorCode.PARENT_UNKNOWN || e.type.code === BlockErrorCode.PARENT_PAYLOAD_UNKNOWN) &&
203
+ blockInput
204
+ ) {
202
205
  chain.emitter.emit(ChainEvent.blockUnknownParent, {
203
206
  blockInput,
204
207
  peer: peerIdStr,
@@ -1057,10 +1060,8 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
1057
1060
  const signedEnvelope = sszDeserialize(topic, serializedData);
1058
1061
  const envelope = signedEnvelope.message;
1059
1062
 
1060
- // TODO GLOAS: consider optimistically create PayloadEnvelopeInput here similar to how we do that for beacon_block
1061
- // so that UnknownBlockSync can handle backward sync
1062
- // the problem now is we cannot create a PayloadEnvelopeInput without the beacon block being known, we need at least the proposer index
1063
- // we can achieve that by looking into the EpochCache
1063
+ // unlike BlockInput, we send the envelope into UnknownBlockInput sync
1064
+ // inside the sync it'll reconcile into PayloadEnvelopeInput and share the same cache with gossip
1064
1065
  try {
1065
1066
  await validateGossipExecutionPayloadEnvelope(chain, signedEnvelope);
1066
1067
  } catch (e) {
@@ -1069,7 +1070,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
1069
1070
  const slot = signedEnvelope.message.payload.slotNumber;
1070
1071
  logger.debug("Gossip envelope has error", {slot, root: toRootHex(beaconBlockRoot), code: e.type.code});
1071
1072
  if (e.type.code === ExecutionPayloadEnvelopeErrorCode.BLOCK_ROOT_UNKNOWN) {
1072
- // TODO GLOAS: UnknownBlockSync to handle this
1073
1073
  chain.emitter.emit(ChainEvent.envelopeUnknownBlock, {
1074
1074
  envelope: signedEnvelope,
1075
1075
  peer: peerIdStr,
@@ -24,6 +24,10 @@ export async function* onBeaconBlocksByRange(
24
24
  // in the case of initializing from a non-finalized state, we don't have the finalized block so this api does not work
25
25
  // chain.forkChoice.getFinalizeBlock().slot
26
26
  const finalizedSlot = chain.forkChoice.getFinalizedCheckpointSlot();
27
+ // Blocks are migrated to blockArchive at finalization (including the finalized block itself),
28
+ // so the archive loop serves up to AND INCLUDING finalizedSlot and the headChain loop
29
+ // starts above it to avoid duplicate yields. See archiveBlocks.ts for the migration logic.
30
+ const archiveMaxSlot = finalizedSlot;
27
31
 
28
32
  const forkName = chain.config.getForkName(startSlot);
29
33
  if (isForkPostFulu(forkName) && startSlot < chain.earliestAvailableSlot) {
@@ -35,9 +39,12 @@ export async function* onBeaconBlocksByRange(
35
39
  }
36
40
 
37
41
  // Finalized range of blocks
38
- if (startSlot <= finalizedSlot) {
42
+ if (startSlot <= archiveMaxSlot) {
39
43
  // Chain of blobs won't change
40
- for await (const {key, value} of finalized.binaryEntriesStream({gte: startSlot, lt: endSlot})) {
44
+ for await (const {key, value} of finalized.binaryEntriesStream({
45
+ gte: startSlot,
46
+ lt: Math.min(endSlot, archiveMaxSlot + 1),
47
+ })) {
41
48
  yield {
42
49
  data: value,
43
50
  boundary: chain.config.getForkBoundaryAtEpoch(computeEpochAtSlot(finalized.decodeKey(key))),
@@ -46,19 +53,20 @@ export async function* onBeaconBlocksByRange(
46
53
  }
47
54
 
48
55
  // Non-finalized range of blocks
49
- if (endSlot > finalizedSlot) {
56
+ if (endSlot > archiveMaxSlot) {
50
57
  const headBlock = chain.forkChoice.getHead();
51
58
  const headRoot = headBlock.blockRoot;
52
59
  // TODO DENEB: forkChoice should mantain an array of canonical blocks, and change only on reorg
53
60
  const headChain = chain.forkChoice.getAllAncestorBlocks(headRoot, headBlock.payloadStatus);
54
- // getAllAncestorBlocks response includes the head node, so it's the full chain.
61
+ // `getAllAncestorBlocks` includes both the head and the previous-finalized boundary.
55
62
 
56
63
  // Iterate head chain with ascending block numbers
57
64
  for (let i = headChain.length - 1; i >= 0; i--) {
58
65
  const block = headChain[i];
59
66
 
60
- // Must include only blocks in the range requested
61
- if (block.slot >= startSlot && block.slot < endSlot) {
67
+ // Must include only blocks in the range requested, and skip anything the archive loop
68
+ // above already served via the block.slot > archiveMaxSlot filter.
69
+ if (block.slot > archiveMaxSlot && block.slot >= startSlot && block.slot < endSlot) {
62
70
  // Note: Here the forkChoice head may change due to a re-org, so the headChain reflects the canonical chain
63
71
  // at the time of the start of the request. Spec is clear the chain of blobs must be consistent, but on
64
72
  // re-org there's no need to abort the request
@@ -20,31 +20,37 @@ export async function* onBlobSidecarsByRange(
20
20
  const finalized = db.blobSidecarsArchive;
21
21
  const unfinalized = db.blobSidecars;
22
22
  const finalizedSlot = chain.forkChoice.getFinalizedBlock().slot;
23
+ // Blobs are migrated to blobSidecarsArchive at finalization (including the finalized block
24
+ // itself), so the archive loop serves up to AND INCLUDING finalizedSlot and the headChain
25
+ // loop starts above it to avoid duplicate yields. See archiveBlocks.ts for the migration logic.
26
+ const archiveMaxSlot = finalizedSlot;
23
27
 
24
28
  // Finalized range of blobs
25
- if (startSlot <= finalizedSlot) {
29
+ if (startSlot <= archiveMaxSlot) {
26
30
  // Chain of blobs won't change
27
31
  for await (const {key, value: blobSideCarsBytesWrapped} of finalized.binaryEntriesStream({
28
32
  gte: startSlot,
29
- lt: endSlot,
33
+ lt: Math.min(endSlot, archiveMaxSlot + 1),
30
34
  })) {
31
35
  yield* iterateBlobBytesFromWrapper(chain, blobSideCarsBytesWrapped, finalized.decodeKey(key));
32
36
  }
33
37
  }
34
38
 
35
39
  // Non-finalized range of blobs
36
- if (endSlot > finalizedSlot) {
40
+ if (endSlot > archiveMaxSlot) {
37
41
  const headBlock = chain.forkChoice.getHead();
38
42
  const headRoot = headBlock.blockRoot;
39
43
  // TODO DENEB: forkChoice should mantain an array of canonical blocks, and change only on reorg
40
44
  const headChain = chain.forkChoice.getAllAncestorBlocks(headRoot, headBlock.payloadStatus);
45
+ // `getAllAncestorBlocks` includes both the head and the previous-finalized boundary.
41
46
 
42
47
  // Iterate head chain with ascending block numbers
43
48
  for (let i = headChain.length - 1; i >= 0; i--) {
44
49
  const block = headChain[i];
45
50
 
46
- // Must include only blobs in the range requested
47
- if (block.slot >= startSlot && block.slot < endSlot) {
51
+ // Must include only blobs in the range requested, and skip anything the archive loop
52
+ // above already served via the block.slot > archiveMaxSlot filter.
53
+ if (block.slot > archiveMaxSlot && block.slot >= startSlot && block.slot < endSlot) {
48
54
  // Note: Here the forkChoice head may change due to a re-org, so the headChain reflects the canonical chain
49
55
  // at the time of the start of the request. Spec is clear the chain of blobs must be consistent, but on
50
56
  // re-org there's no need to abort the request
@@ -1,6 +1,6 @@
1
1
  import {PeerId} from "@libp2p/interface";
2
2
  import {ChainConfig} from "@lodestar/config";
3
- import {GENESIS_SLOT} from "@lodestar/params";
3
+ import {ForkSeq, GENESIS_SLOT} from "@lodestar/params";
4
4
  import {RespStatus, ResponseError, ResponseOutgoing} from "@lodestar/reqresp";
5
5
  import {computeEpochAtSlot} from "@lodestar/state-transition";
6
6
  import {ColumnIndex, Epoch, fulu} from "@lodestar/types";
@@ -43,10 +43,19 @@ export async function* onDataColumnSidecarsByRange(
43
43
 
44
44
  const finalized = db.dataColumnSidecarArchive;
45
45
  const finalizedSlot = chain.forkChoice.getFinalizedBlock().slot;
46
+ // Columns of the last finalized block live in different DBs depending on fork:
47
+ // - Pre-gloas (fulu): migrated to dataColumnSidecarArchive in the same finalization run.
48
+ // - Post-gloas: stay in the hot db (db.dataColumnSidecar) until the next finalization run,
49
+ // because the migration filter requires payloadStatus === FULL for gloas blocks.
50
+ // archiveMaxSlot is the last slot whose columns are served by the archive loop below;
51
+ // anything above it is served by the headChain loop.
52
+ const isPostGloasFinalized = chain.config.getForkSeq(finalizedSlot) >= ForkSeq.gloas;
53
+ const archiveMaxSlot = isPostGloasFinalized ? finalizedSlot - 1 : finalizedSlot;
46
54
 
47
55
  // Finalized range of columns
48
- if (startSlot <= finalizedSlot) {
49
- for (let slot = startSlot; slot < endSlot; slot++) {
56
+ if (startSlot <= archiveMaxSlot) {
57
+ const archiveEnd = Math.min(endSlot, archiveMaxSlot + 1);
58
+ for (let slot = startSlot; slot < archiveEnd; slot++) {
50
59
  const dataColumnSidecars = await finalized.getManyBinary(slot, availableColumns);
51
60
 
52
61
  const unavailableColumnIndices: ColumnIndex[] = [];
@@ -81,9 +90,12 @@ export async function* onDataColumnSidecarsByRange(
81
90
  }
82
91
 
83
92
  // Non-finalized range of columns
84
- if (endSlot > finalizedSlot) {
93
+ if (endSlot > archiveMaxSlot) {
85
94
  const headBlock = chain.forkChoice.getHead();
86
95
  const headRoot = headBlock.blockRoot;
96
+ // getAllAncestorBlocks includes the last finalized block as its final element.
97
+ // Skip anything the archive loop above already served via the block.slot > archiveMaxSlot
98
+ // filter below (pre-gloas this skips finalizedSlot, post-gloas it keeps it).
87
99
  const headChain = chain.forkChoice.getAllAncestorBlocks(headRoot, headBlock.payloadStatus);
88
100
 
89
101
  // Iterate head chain with ascending block numbers
@@ -91,7 +103,7 @@ export async function* onDataColumnSidecarsByRange(
91
103
  const block = headChain[i];
92
104
 
93
105
  // Must include only columns in the range requested
94
- if (block.slot >= startSlot && block.slot < endSlot) {
106
+ if (block.slot > archiveMaxSlot && block.slot >= startSlot && block.slot < endSlot) {
95
107
  // Note: Here the forkChoice head may change due to a re-org, so the headChain reflects the canonical chain
96
108
  // at the time of the start of the request. Spec is clear the chain of columns must be consistent, but on
97
109
  // re-org there's no need to abort the request
@@ -21,12 +21,15 @@ export async function* onExecutionPayloadEnvelopesByRange(
21
21
 
22
22
  const finalized = db.executionPayloadEnvelopeArchive;
23
23
  const finalizedSlot = chain.forkChoice.getFinalizedCheckpointSlot();
24
+ // The current finalized block's envelope is still in the hot db; archive migration happens
25
+ // in the next finalization run (see migrateExecutionPayloadEnvelopesFromHotToColdDb).
26
+ const archiveMaxSlot = finalizedSlot - 1;
24
27
 
25
28
  // Finalized range of envelopes
26
- if (startSlot <= finalizedSlot) {
29
+ if (startSlot <= archiveMaxSlot) {
27
30
  for await (const {key, value: envelopeBytes} of finalized.binaryEntriesStream({
28
31
  gte: startSlot,
29
- lt: endSlot,
32
+ lt: Math.min(endSlot, archiveMaxSlot + 1),
30
33
  })) {
31
34
  const slot = finalized.decodeKey(key);
32
35
  yield {
@@ -37,7 +40,7 @@ export async function* onExecutionPayloadEnvelopesByRange(
37
40
  }
38
41
 
39
42
  // Non-finalized range of envelopes
40
- if (endSlot > finalizedSlot) {
43
+ if (endSlot > archiveMaxSlot) {
41
44
  const headBlock = chain.forkChoice.getHead();
42
45
  const headRoot = headBlock.blockRoot;
43
46
  const headChain = chain.forkChoice.getAllAncestorBlocks(headRoot, headBlock.payloadStatus);
@@ -46,7 +49,7 @@ export async function* onExecutionPayloadEnvelopesByRange(
46
49
  for (let i = headChain.length - 1; i >= 0; i--) {
47
50
  const block = headChain[i];
48
51
 
49
- if (block.slot >= startSlot && block.slot < endSlot) {
52
+ if (block.slot > archiveMaxSlot && block.slot >= startSlot && block.slot < endSlot) {
50
53
  // Skip EMPTY blocks
51
54
  if (block.payloadStatus !== PayloadStatus.FULL) {
52
55
  continue;
@@ -167,7 +167,14 @@ function getHeadExecutionInfo(
167
167
  return [];
168
168
  }
169
169
 
170
- const executionStatusStr = headInfo.executionStatus.toLowerCase();
170
+ // A PayloadSeparated head is a gloas beacon block imported before its payload envelope
171
+ // arrives, in that case the exec-block row surfaces the inherited parent anchor (from the
172
+ // bid), which is already validated. Normalize to "valid" to avoid leaking internal
173
+ // fork-choice bookkeeping into the log. Once the payload envelope arrives and the FULL
174
+ // variant becomes head, executionStatus is Valid/Syncing naturally.
175
+ // TODO GLOAS: revisit once optimistic sync is implemented
176
+ const executionStatusStr =
177
+ headInfo.executionStatus === ExecutionStatus.PayloadSeparated ? "valid" : headInfo.executionStatus.toLowerCase();
171
178
 
172
179
  // Add execution status to notifier only if head is on/post bellatrix
173
180
  if (isStatePostBellatrix(headState) && headState.isExecutionStateType) {
@@ -1,10 +1,11 @@
1
1
  import {ChainForkConfig} from "@lodestar/config";
2
- import {ForkName, isForkPostDeneb, isForkPostFulu} from "@lodestar/params";
2
+ import {ForkName, isForkPostDeneb, isForkPostFulu, isForkPostGloas} from "@lodestar/params";
3
3
  import {Epoch, RootHex, Slot, phase0} from "@lodestar/types";
4
4
  import {LodestarError} from "@lodestar/utils";
5
5
  import {isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js";
6
6
  import {IBlockInput} from "../../chain/blocks/blockInput/types.js";
7
7
  import {isDaOutOfRange} from "../../chain/blocks/blockInput/utils.js";
8
+ import {PayloadEnvelopeInput} from "../../chain/blocks/payloadEnvelopeInput/payloadEnvelopeInput.js";
8
9
  import {BlockError, BlockErrorCode} from "../../chain/errors/index.js";
9
10
  import {PeerSyncMeta} from "../../network/peers/peersData.js";
10
11
  import {IClock} from "../../util/clock.js";
@@ -46,19 +47,36 @@ export type Attempt = {
46
47
  export type AwaitingDownloadState = {
47
48
  status: BatchStatus.AwaitingDownload;
48
49
  blocks: IBlockInput[];
50
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null;
49
51
  };
50
52
 
51
53
  export type DownloadSuccessState = {
52
54
  status: BatchStatus.AwaitingProcessing;
53
55
  blocks: IBlockInput[];
56
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null;
54
57
  };
55
58
 
56
59
  export type BatchState =
57
60
  | AwaitingDownloadState
58
- | {status: BatchStatus.Downloading; peer: PeerIdStr; blocks: IBlockInput[]}
61
+ | {
62
+ status: BatchStatus.Downloading;
63
+ peer: PeerIdStr;
64
+ blocks: IBlockInput[];
65
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null;
66
+ }
59
67
  | DownloadSuccessState
60
- | {status: BatchStatus.Processing; blocks: IBlockInput[]; attempt: Attempt}
61
- | {status: BatchStatus.AwaitingValidation; blocks: IBlockInput[]; attempt: Attempt};
68
+ | {
69
+ status: BatchStatus.Processing;
70
+ blocks: IBlockInput[];
71
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null;
72
+ attempt: Attempt;
73
+ }
74
+ | {
75
+ status: BatchStatus.AwaitingValidation;
76
+ blocks: IBlockInput[];
77
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null;
78
+ attempt: Attempt;
79
+ };
62
80
 
63
81
  export type BatchMetadata = {
64
82
  startEpoch: Epoch;
@@ -85,7 +103,7 @@ export class Batch {
85
103
  /** Block, blob and column requests that are used to determine the best peer and are used in downloadByRange */
86
104
  requests: DownloadByRangeRequests;
87
105
  /** State of the batch. */
88
- state: BatchState = {status: BatchStatus.AwaitingDownload, blocks: []};
106
+ state: BatchState = {status: BatchStatus.AwaitingDownload, blocks: [], payloadEnvelopes: null};
89
107
  /** Peers that provided good data */
90
108
  goodPeers: PeerIdStr[] = [];
91
109
  /** The `Attempts` that have been made and failed to send us this batch. */
@@ -129,35 +147,33 @@ export class Batch {
129
147
  count: this.count,
130
148
  step: 1,
131
149
  };
132
- if (isForkPostFulu(this.forkName) && withinValidRequestWindow) {
133
- return {
134
- blocksRequest,
135
- columnsRequest: {
136
- startSlot: this.startSlot,
137
- count: this.count,
138
- columns: this.custodyConfig.sampledColumns,
139
- },
140
- };
150
+ const requests: DownloadByRangeRequests = {blocksRequest};
151
+
152
+ // Post-Gloas envelopes are required for block processing, independent of DA retention window.
153
+ if (isForkPostGloas(this.forkName)) {
154
+ requests.envelopesRequest = {startSlot: this.startSlot, count: this.count};
141
155
  }
142
- if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) {
143
- return {
144
- blocksRequest,
145
- blobsRequest: {
146
- startSlot: this.startSlot,
147
- count: this.count,
148
- },
156
+
157
+ if (isForkPostFulu(this.forkName) && withinValidRequestWindow) {
158
+ requests.columnsRequest = {
159
+ startSlot: this.startSlot,
160
+ count: this.count,
161
+ columns: this.custodyConfig.sampledColumns,
149
162
  };
163
+ } else if (isForkPostDeneb(this.forkName) && withinValidRequestWindow) {
164
+ requests.blobsRequest = {startSlot: this.startSlot, count: this.count};
150
165
  }
151
- return {
152
- blocksRequest,
153
- };
166
+
167
+ return requests;
154
168
  }
155
169
 
156
170
  // subsequent request where part of the epoch has already been downloaded. Need to figure out what is the beginning
157
171
  // of the range where download needs to resume
158
172
  let blockStartSlot = this.startSlot;
159
173
  let dataStartSlot = this.startSlot;
174
+ let envelopeStartSlot = this.startSlot;
160
175
  const neededColumns = new Set<number>();
176
+ const envelopesBySlot = this.state.payloadEnvelopes ?? new Map<Slot, PayloadEnvelopeInput>();
161
177
 
162
178
  // ensure blocks are in slot-wise order
163
179
  for (const blockInput of blocks) {
@@ -175,6 +191,13 @@ export class Batch {
175
191
  if (blockInput.hasBlock() && blockStartSlot === blockSlot) {
176
192
  blockStartSlot = blockSlot + 1;
177
193
  }
194
+ if (
195
+ blockInput.hasBlock() &&
196
+ envelopeStartSlot === blockSlot &&
197
+ envelopesBySlot.get(blockSlot)?.hasPayloadEnvelope()
198
+ ) {
199
+ envelopeStartSlot = blockSlot + 1;
200
+ }
178
201
  if (!blockInput.hasAllData()) {
179
202
  if (isBlockInputColumns(blockInput)) {
180
203
  for (const index of blockInput.getMissingSampledColumnMeta().missing) {
@@ -216,6 +239,13 @@ export class Batch {
216
239
  // dataSlot will still have a value but do not create a request for preDeneb forks
217
240
  }
218
241
 
242
+ if (isForkPostGloas(this.forkName) && envelopeStartSlot <= endSlot) {
243
+ requests.envelopesRequest = {
244
+ startSlot: envelopeStartSlot,
245
+ count: endSlot - envelopeStartSlot + 1,
246
+ };
247
+ }
248
+
219
249
  return requests;
220
250
  }
221
251
 
@@ -263,6 +293,10 @@ export class Batch {
263
293
  return this.state.blocks;
264
294
  }
265
295
 
296
+ getPayloadEnvelopes(): Map<Slot, PayloadEnvelopeInput> | null {
297
+ return this.state.payloadEnvelopes;
298
+ }
299
+
266
300
  /**
267
301
  * AwaitingDownload -> Downloading
268
302
  */
@@ -271,13 +305,22 @@ export class Batch {
271
305
  throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingDownload));
272
306
  }
273
307
 
274
- this.state = {status: BatchStatus.Downloading, peer, blocks: this.state.blocks};
308
+ this.state = {
309
+ status: BatchStatus.Downloading,
310
+ peer,
311
+ blocks: this.state.blocks,
312
+ payloadEnvelopes: this.state.payloadEnvelopes,
313
+ };
275
314
  }
276
315
 
277
316
  /**
278
317
  * Downloading -> AwaitingProcessing
279
318
  */
280
- downloadingSuccess(peer: PeerIdStr, blocks: IBlockInput[]): DownloadSuccessState {
319
+ downloadingSuccess(
320
+ peer: PeerIdStr,
321
+ blocks: IBlockInput[],
322
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null
323
+ ): DownloadSuccessState {
281
324
  if (this.state.status !== BatchStatus.Downloading) {
282
325
  throw new BatchError(this.wrongStatusErrorType(BatchStatus.Downloading));
283
326
  }
@@ -305,11 +348,13 @@ export class Batch {
305
348
  status: this.state.status,
306
349
  });
307
350
  }
351
+ const newPayloadEnvelopes = payloadEnvelopes ?? this.state.payloadEnvelopes;
352
+
308
353
  if (allComplete) {
309
- this.state = {status: BatchStatus.AwaitingProcessing, blocks};
354
+ this.state = {status: BatchStatus.AwaitingProcessing, blocks, payloadEnvelopes: newPayloadEnvelopes};
310
355
  } else {
311
356
  this.requests = this.getRequests(blocks);
312
- this.state = {status: BatchStatus.AwaitingDownload, blocks};
357
+ this.state = {status: BatchStatus.AwaitingDownload, blocks, payloadEnvelopes: newPayloadEnvelopes};
313
358
  }
314
359
 
315
360
  return this.state as DownloadSuccessState;
@@ -328,25 +373,30 @@ export class Batch {
328
373
  throw new BatchError(this.errorType({code: BatchErrorCode.MAX_DOWNLOAD_ATTEMPTS}));
329
374
  }
330
375
 
331
- this.state = {status: BatchStatus.AwaitingDownload, blocks: this.state.blocks};
376
+ this.state = {
377
+ status: BatchStatus.AwaitingDownload,
378
+ blocks: this.state.blocks,
379
+ payloadEnvelopes: this.state.payloadEnvelopes,
380
+ };
332
381
  }
333
382
 
334
383
  /**
335
384
  * AwaitingProcessing -> Processing
336
385
  */
337
- startProcessing(): IBlockInput[] {
386
+ startProcessing(): {blocks: IBlockInput[]; payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null} {
338
387
  if (this.state.status !== BatchStatus.AwaitingProcessing) {
339
388
  throw new BatchError(this.wrongStatusErrorType(BatchStatus.AwaitingProcessing));
340
389
  }
341
390
 
342
391
  const blocks = this.state.blocks;
392
+ const payloadEnvelopes = this.state.payloadEnvelopes;
343
393
  const hash = hashBlocks(blocks, this.config); // tracks blocks to report peer on processing error
344
394
  // Reset goodPeers in case another download attempt needs to be made. When Attempt is successful or not the peers
345
395
  // that the data came from will be handled by the Attempt that goes for processing
346
396
  const peers = this.goodPeers;
347
397
  this.goodPeers = [];
348
- this.state = {status: BatchStatus.Processing, blocks, attempt: {peers, hash}};
349
- return blocks;
398
+ this.state = {status: BatchStatus.Processing, blocks, payloadEnvelopes, attempt: {peers, hash}};
399
+ return {blocks, payloadEnvelopes};
350
400
  }
351
401
 
352
402
  /**
@@ -357,7 +407,12 @@ export class Batch {
357
407
  throw new BatchError(this.wrongStatusErrorType(BatchStatus.Processing));
358
408
  }
359
409
 
360
- this.state = {status: BatchStatus.AwaitingValidation, blocks: this.state.blocks, attempt: this.state.attempt};
410
+ this.state = {
411
+ status: BatchStatus.AwaitingValidation,
412
+ blocks: this.state.blocks,
413
+ payloadEnvelopes: this.state.payloadEnvelopes,
414
+ attempt: this.state.attempt,
415
+ };
361
416
  }
362
417
 
363
418
  /**
@@ -408,7 +463,7 @@ export class Batch {
408
463
 
409
464
  // remove any downloaded blocks and re-attempt
410
465
  // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache
411
- this.state = {status: BatchStatus.AwaitingDownload, blocks: []};
466
+ this.state = {status: BatchStatus.AwaitingDownload, blocks: [], payloadEnvelopes: null};
412
467
  }
413
468
 
414
469
  private onProcessingError(attempt: Attempt): void {
@@ -419,7 +474,7 @@ export class Batch {
419
474
 
420
475
  // remove any downloaded blocks and re-attempt
421
476
  // TODO(fulu): need to remove the bad blocks from the SeenBlockInputCache
422
- this.state = {status: BatchStatus.AwaitingDownload, blocks: []};
477
+ this.state = {status: BatchStatus.AwaitingDownload, blocks: [], payloadEnvelopes: null};
423
478
  }
424
479
 
425
480
  /** Helper to construct typed BatchError. Stack traces are correct as the error is thrown above */
@@ -4,6 +4,7 @@ import {ErrorAborted, LodestarError, Logger, toRootHex} from "@lodestar/utils";
4
4
  import {isBlockInputBlobs, isBlockInputColumns} from "../../chain/blocks/blockInput/blockInput.js";
5
5
  import {BlockInputErrorCode} from "../../chain/blocks/blockInput/errors.js";
6
6
  import {IBlockInput} from "../../chain/blocks/blockInput/types.js";
7
+ import {PayloadEnvelopeInput} from "../../chain/blocks/payloadEnvelopeInput/payloadEnvelopeInput.js";
7
8
  import {BlobSidecarErrorCode} from "../../chain/errors/blobSidecarError.js";
8
9
  import {DataColumnSidecarErrorCode} from "../../chain/errors/dataColumnSidecarError.js";
9
10
  import {Metrics} from "../../metrics/metrics.js";
@@ -44,13 +45,19 @@ export type SyncChainFns = {
44
45
  * Must return if ALL blocks are processed successfully
45
46
  * If SOME blocks are processed must throw BlockProcessorError()
46
47
  */
47
- processChainSegment: (blocks: IBlockInput[], syncType: RangeSyncType) => Promise<void>;
48
+ processChainSegment: (
49
+ blocks: IBlockInput[],
50
+ payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null,
51
+ syncType: RangeSyncType
52
+ ) => Promise<void>;
48
53
  /** Must download blocks, and validate their range */
49
54
  downloadByRange: (
50
55
  peer: PeerSyncMeta,
51
56
  batch: Batch,
52
57
  syncType: RangeSyncType
53
- ) => Promise<WarnResult<IBlockInput[], DownloadByRangeError>>;
58
+ ) => Promise<
59
+ WarnResult<{blocks: IBlockInput[]; payloadEnvelopes: Map<Slot, PayloadEnvelopeInput> | null}, DownloadByRangeError>
60
+ >;
54
61
  /** Report peer for negative actions. Decouples from the full network instance */
55
62
  reportPeer: (peer: PeerIdStr, action: PeerAction, actionName: string) => void;
56
63
  /** Gets current peer custodyColumns and earliestAvailableSlot */
@@ -516,7 +523,8 @@ export class SyncChain {
516
523
  });
517
524
  this.metrics?.syncRange.downloadByRange.success.inc();
518
525
  const {warnings, result} = res.result;
519
- const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, result);
526
+ const {blocks: downloadedBlocks, payloadEnvelopes} = result;
527
+ const downloadSuccessOutput = batch.downloadingSuccess(peer.peerId, downloadedBlocks, payloadEnvelopes);
520
528
  const logMeta: Record<string, number> = {
521
529
  blockCount: downloadSuccessOutput.blocks.length,
522
530
  };
@@ -578,10 +586,10 @@ export class SyncChain {
578
586
  * Sends `batch` to the processor. Note: batch may be empty
579
587
  */
580
588
  private async processBatch(batch: Batch): Promise<void> {
581
- const blocks = batch.startProcessing();
589
+ const {blocks, payloadEnvelopes} = batch.startProcessing();
582
590
 
583
591
  // wrapError ensures to never call both batch success() and batch error()
584
- const res = await wrapError(this.processChainSegment(blocks, this.syncType));
592
+ const res = await wrapError(this.processChainSegment(blocks, payloadEnvelopes, this.syncType));
585
593
 
586
594
  if (!res.err) {
587
595
  batch.processingSuccess();
@@ -172,7 +172,7 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) {
172
172
  }
173
173
 
174
174
  /** Convenience method for `SyncChain` */
175
- private processChainSegment: SyncChainFns["processChainSegment"] = async (blocks, syncType) => {
175
+ private processChainSegment: SyncChainFns["processChainSegment"] = async (blocks, payloadEnvelopes, syncType) => {
176
176
  // Not trusted, verify signatures
177
177
  const flags: ImportBlockOpts = {
178
178
  // Only skip importing attestations for finalized sync. For head sync attestation are valuable.
@@ -192,9 +192,15 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) {
192
192
 
193
193
  if (this.opts?.disableProcessAsChainSegment) {
194
194
  // Should only be used for debugging or testing
195
- for (const block of blocks) await this.chain.processBlock(block, flags);
195
+ for (const block of blocks) {
196
+ await this.chain.processBlock(block, flags);
197
+ const payloadEnvelope = payloadEnvelopes?.get(block.slot);
198
+ if (payloadEnvelope) {
199
+ await this.chain.processExecutionPayload(payloadEnvelope);
200
+ }
201
+ }
196
202
  } else {
197
- await this.chain.processChainSegment(blocks, flags);
203
+ await this.chain.processChainSegment(blocks, payloadEnvelopes, flags);
198
204
  }
199
205
  };
200
206
 
@@ -209,13 +215,19 @@ export class RangeSync extends (EventEmitter as {new (): RangeSyncEmitter}) {
209
215
  peerDasMetrics: this.chain.metrics?.peerDas,
210
216
  ...batch.getRequestsForPeer(peer),
211
217
  });
212
- const cached = cacheByRangeResponses({
218
+ const {responses, payloadEnvelopes: downloadedPayloadEnvelopes} = result;
219
+ const {blocks, payloadEnvelopes} = cacheByRangeResponses({
213
220
  cache: this.chain.seenBlockInputCache,
221
+ seenPayloadEnvelopeInputCache: this.chain.seenPayloadEnvelopeInputCache,
214
222
  peerIdStr: peer.peerId,
215
- responses: result,
223
+ responses,
216
224
  batchBlocks,
225
+ downloadedPayloadEnvelopes,
226
+ existingPayloadEnvelopes: batch.getPayloadEnvelopes(),
227
+ custodyConfig: this.chain.custodyConfig,
228
+ seenTimestampSec: Date.now() / 1000,
217
229
  });
218
- return {result: cached, warnings};
230
+ return {result: {blocks, payloadEnvelopes}, warnings};
219
231
  };
220
232
 
221
233
  private pruneBlockInputs: SyncChainFns["pruneBlockInputs"] = (blocks: IBlockInput[]) => {