@lodestar/beacon-node 1.36.0-dev.6d67a79656 → 1.36.0-dev.6f46a8bd20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api/impl/lodestar/index.d.ts.map +1 -1
- package/lib/api/impl/lodestar/index.js +6 -9
- package/lib/api/impl/lodestar/index.js.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js +6 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +7 -0
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +1 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +14 -3
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/metrics/metrics/lodestar.js +1 -1
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +2 -7
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/discv5/worker.js +2 -7
- package/lib/network/discv5/worker.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +14 -8
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +2 -2
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/lib/util/profile.d.ts +6 -4
- package/lib/util/profile.d.ts.map +1 -1
- package/lib/util/profile.js +40 -3
- package/lib/util/profile.js.map +1 -1
- package/package.json +26 -24
- package/src/api/impl/lodestar/index.ts +6 -9
- package/src/chain/archiveStore/strategies/frequencyStateArchiveStrategy.ts +5 -1
- package/src/chain/errors/dataColumnSidecarError.ts +8 -0
- package/src/chain/validation/dataColumnSidecar.ts +21 -4
- package/src/metrics/metrics/lodestar.ts +1 -1
- package/src/network/core/networkCoreWorker.ts +2 -7
- package/src/network/discv5/worker.ts +2 -7
- package/src/network/processor/gossipHandlers.ts +16 -7
- package/src/sync/utils/remoteSyncType.ts +2 -2
- package/src/util/profile.ts +45 -3
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import {ChainConfig} from "@lodestar/config";
|
|
1
|
+
import {ChainConfig, ChainForkConfig} from "@lodestar/config";
|
|
2
2
|
import {
|
|
3
3
|
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
|
4
4
|
KZG_COMMITMENTS_SUBTREE_INDEX,
|
|
5
5
|
NUMBER_OF_COLUMNS,
|
|
6
6
|
} from "@lodestar/params";
|
|
7
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
computeEpochAtSlot,
|
|
9
|
+
computeStartSlotAtEpoch,
|
|
10
|
+
getBlockHeaderProposerSignatureSet,
|
|
11
|
+
} from "@lodestar/state-transition";
|
|
8
12
|
import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types";
|
|
9
13
|
import {toRootHex, verifyMerkleBranch} from "@lodestar/utils";
|
|
10
14
|
import {Metrics} from "../../metrics/metrics.js";
|
|
@@ -29,7 +33,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
29
33
|
const blockHeader = dataColumnSidecar.signedBlockHeader.message;
|
|
30
34
|
|
|
31
35
|
// 1) [REJECT] The sidecar is valid as verified by verify_data_column_sidecar
|
|
32
|
-
verifyDataColumnSidecar(dataColumnSidecar);
|
|
36
|
+
verifyDataColumnSidecar(chain.config, dataColumnSidecar);
|
|
33
37
|
|
|
34
38
|
// 2) [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id
|
|
35
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
@@ -184,7 +188,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
184
188
|
* SPEC FUNCTION
|
|
185
189
|
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
|
186
190
|
*/
|
|
187
|
-
function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
191
|
+
function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
188
192
|
if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
189
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
190
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
@@ -201,6 +205,19 @@ function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): voi
|
|
|
201
205
|
});
|
|
202
206
|
}
|
|
203
207
|
|
|
208
|
+
const epoch = computeEpochAtSlot(dataColumnSidecar.signedBlockHeader.message.slot);
|
|
209
|
+
const maxBlobsPerBlock = config.getMaxBlobsPerBlock(epoch);
|
|
210
|
+
|
|
211
|
+
if (dataColumnSidecar.kzgCommitments.length > maxBlobsPerBlock) {
|
|
212
|
+
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
|
+
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
|
+
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
+
columnIdx: dataColumnSidecar.index,
|
|
216
|
+
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
|
+
limit: maxBlobsPerBlock,
|
|
218
|
+
});
|
|
219
|
+
}
|
|
220
|
+
|
|
204
221
|
if (
|
|
205
222
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgCommitments.length ||
|
|
206
223
|
dataColumnSidecar.column.length !== dataColumnSidecar.kzgProofs.length
|
|
@@ -1878,7 +1878,7 @@ export function createLodestarMetrics(
|
|
|
1878
1878
|
fetchKeys: register.histogram({
|
|
1879
1879
|
name: "lodestar_prune_history_fetch_keys_time_seconds",
|
|
1880
1880
|
help: "Time to fetch keys in seconds",
|
|
1881
|
-
buckets: [0.001, 0.01, 0.1, 1],
|
|
1881
|
+
buckets: [0.001, 0.01, 0.1, 0.3, 0.5, 1],
|
|
1882
1882
|
}),
|
|
1883
1883
|
|
|
1884
1884
|
pruneKeys: register.histogram({
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -11,7 +9,7 @@ import {RegistryMetricCreator, collectNodeJSMetrics} from "../../metrics/index.j
|
|
|
11
9
|
import {AsyncIterableBridgeCaller, AsyncIterableBridgeHandler} from "../../util/asyncIterableToEvents.js";
|
|
12
10
|
import {Clock} from "../../util/clock.js";
|
|
13
11
|
import {peerIdToString} from "../../util/peerId.js";
|
|
14
|
-
import {
|
|
12
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
15
13
|
import {wireEventsOnWorkerThread} from "../../util/workerEvents.js";
|
|
16
14
|
import {NetworkEventBus, NetworkEventData, networkEventDirection} from "../events.js";
|
|
17
15
|
import {
|
|
@@ -157,10 +155,7 @@ const libp2pWorkerApi: NetworkWorkerApi = {
|
|
|
157
155
|
dumpDiscv5KadValues: () => core.dumpDiscv5KadValues(),
|
|
158
156
|
dumpMeshPeers: () => core.dumpMeshPeers(),
|
|
159
157
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
160
|
-
|
|
161
|
-
const filePath = path.join(dirpath, `network_thread_${new Date().toISOString()}.cpuprofile`);
|
|
162
|
-
fs.writeFileSync(filePath, profile);
|
|
163
|
-
return filePath;
|
|
158
|
+
return profileThread(ProfileThread.NETWORK, durationMs, dirpath);
|
|
164
159
|
},
|
|
165
160
|
writeDiscv5Profile: async (durationMs: number, dirpath: string) => {
|
|
166
161
|
return core.writeDiscv5Profile(durationMs, dirpath);
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -14,7 +12,7 @@ import {Gauge} from "@lodestar/utils";
|
|
|
14
12
|
import {RegistryMetricCreator} from "../../metrics/index.js";
|
|
15
13
|
import {collectNodeJSMetrics} from "../../metrics/nodeJsMetrics.js";
|
|
16
14
|
import {Clock} from "../../util/clock.js";
|
|
17
|
-
import {
|
|
15
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
18
16
|
import {Discv5WorkerApi, Discv5WorkerData} from "./types.js";
|
|
19
17
|
import {ENRRelevance, enrRelevance} from "./utils.js";
|
|
20
18
|
|
|
@@ -108,10 +106,7 @@ const module: Discv5WorkerApi = {
|
|
|
108
106
|
return (await metricsRegistry?.metrics()) ?? "";
|
|
109
107
|
},
|
|
110
108
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
111
|
-
|
|
112
|
-
const filePath = path.join(dirpath, `discv5_thread_${new Date().toISOString()}.cpuprofile`);
|
|
113
|
-
fs.writeFileSync(filePath, profile);
|
|
114
|
-
return filePath;
|
|
109
|
+
return profileThread(ProfileThread.DISC5, durationMs, dirpath);
|
|
115
110
|
},
|
|
116
111
|
writeHeapSnapshot: async (prefix: string, dirpath: string) => {
|
|
117
112
|
return writeHeapSnapshot(prefix, dirpath);
|
|
@@ -39,6 +39,7 @@ import {
|
|
|
39
39
|
BlockError,
|
|
40
40
|
BlockErrorCode,
|
|
41
41
|
BlockGossipError,
|
|
42
|
+
DataColumnSidecarErrorCode,
|
|
42
43
|
DataColumnSidecarGossipError,
|
|
43
44
|
GossipAction,
|
|
44
45
|
GossipActionError,
|
|
@@ -304,7 +305,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
304
305
|
...blockInput.getLogMeta(),
|
|
305
306
|
index: dataColumnSidecar.index,
|
|
306
307
|
});
|
|
307
|
-
|
|
308
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
309
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
310
|
+
columnIdx: dataColumnSidecar.index,
|
|
311
|
+
slot,
|
|
312
|
+
});
|
|
308
313
|
}
|
|
309
314
|
}
|
|
310
315
|
|
|
@@ -556,6 +561,16 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
556
561
|
metrics?.dataColumns.elapsedTimeTillReceived.observe({receivedOrder: receivedColumns}, delaySec);
|
|
557
562
|
break;
|
|
558
563
|
}
|
|
564
|
+
|
|
565
|
+
if (!blockInput.hasAllData()) {
|
|
566
|
+
// immediately attempt fetch of data columns from execution engine
|
|
567
|
+
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
568
|
+
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
569
|
+
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
570
|
+
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
559
574
|
if (!blockInput.hasBlockAndAllData()) {
|
|
560
575
|
const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS);
|
|
561
576
|
chain.logger.debug("Received gossip data column, waiting for full data availability", {
|
|
@@ -578,12 +593,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
578
593
|
source: BlockInputSource.gossip,
|
|
579
594
|
});
|
|
580
595
|
});
|
|
581
|
-
// immediately attempt fetch of data columns from execution engine
|
|
582
|
-
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
583
|
-
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
584
|
-
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
585
|
-
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
586
|
-
}
|
|
587
596
|
}
|
|
588
597
|
},
|
|
589
598
|
|
|
@@ -134,8 +134,8 @@ export function getRangeSyncTarget(
|
|
|
134
134
|
return {
|
|
135
135
|
syncType: RangeSyncType.Head,
|
|
136
136
|
// The new peer has the same finalized (earlier filters should prevent a peer with an
|
|
137
|
-
// earlier finalized chain from reaching here).
|
|
138
|
-
startEpoch:
|
|
137
|
+
// earlier finalized chain from reaching here) and local head will always be >= local finalized.
|
|
138
|
+
startEpoch: computeEpochAtSlot(local.headSlot),
|
|
139
139
|
target: {
|
|
140
140
|
slot: remote.headSlot,
|
|
141
141
|
root: remote.headRoot,
|
package/src/util/profile.ts
CHANGED
|
@@ -1,13 +1,32 @@
|
|
|
1
|
+
import fs from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
1
3
|
import {sleep} from "@lodestar/utils";
|
|
2
4
|
|
|
5
|
+
export enum ProfileThread {
|
|
6
|
+
MAIN = "main",
|
|
7
|
+
NETWORK = "network",
|
|
8
|
+
DISC5 = "discv5",
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* The time to take a Bun profile.
|
|
13
|
+
* If we increase this time it'll potentiall cause the app to crash.
|
|
14
|
+
* If we decrease this time, profile recorded will be fragmented and hard to analyze.
|
|
15
|
+
*/
|
|
16
|
+
const BUN_PROFILE_MS = 3 * 1000;
|
|
17
|
+
|
|
18
|
+
export async function profileThread(thread: ProfileThread, durationMs: number, dirpath: string): Promise<string> {
|
|
19
|
+
return globalThis.Bun ? profileBun(thread, durationMs) : profileNodeJS(thread, durationMs, dirpath);
|
|
20
|
+
}
|
|
21
|
+
|
|
3
22
|
/**
|
|
4
|
-
* Take
|
|
23
|
+
* Take `durationMs` profile of the current thread and return the persisted file path.
|
|
5
24
|
*/
|
|
6
|
-
|
|
25
|
+
async function profileNodeJS(thread: ProfileThread, durationMs: number, dirpath: string): Promise<string> {
|
|
7
26
|
const inspector = await import("node:inspector");
|
|
8
27
|
|
|
9
28
|
// due to some typing issues, not able to use promisify here
|
|
10
|
-
|
|
29
|
+
const profile = await new Promise<string>((resolve, reject) => {
|
|
11
30
|
// Start the inspector and connect to it
|
|
12
31
|
const session = new inspector.Session();
|
|
13
32
|
session.connect();
|
|
@@ -29,6 +48,29 @@ export async function profileNodeJS(durationMs: number): Promise<string> {
|
|
|
29
48
|
});
|
|
30
49
|
});
|
|
31
50
|
});
|
|
51
|
+
|
|
52
|
+
const filePath = path.join(dirpath, `${thread}_thread_${new Date().toISOString()}.cpuprofile`);
|
|
53
|
+
fs.writeFileSync(filePath, profile);
|
|
54
|
+
return filePath;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Unlike NodeJS, Bun console.profile() api flush data to the inspector,
|
|
59
|
+
* so this api returns ms taken of this profile instead of file path.
|
|
60
|
+
*/
|
|
61
|
+
async function profileBun(thread: ProfileThread, durationMs: number): Promise<string> {
|
|
62
|
+
const start = Date.now();
|
|
63
|
+
let now = Date.now();
|
|
64
|
+
while (now - start < durationMs) {
|
|
65
|
+
// biome-ignore lint/suspicious/noConsole: need to use console api to profile in Bun
|
|
66
|
+
console.profile(String(now));
|
|
67
|
+
await sleep(BUN_PROFILE_MS);
|
|
68
|
+
// biome-ignore lint/suspicious/noConsole: need to use console api to profile in Bun
|
|
69
|
+
console.profileEnd(String(now));
|
|
70
|
+
now = Date.now();
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return `Successfully take Bun ${thread} thread profile in ${now - start}ms. Check your inspector to see the profile.`;
|
|
32
74
|
}
|
|
33
75
|
|
|
34
76
|
/**
|