@lodestar/beacon-node 1.36.0-dev.fe5f423da3 → 1.36.0-rc.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api/impl/beacon/blocks/index.d.ts.map +1 -1
- package/lib/api/impl/beacon/blocks/index.js +41 -22
- package/lib/api/impl/beacon/blocks/index.js.map +1 -1
- package/lib/api/impl/lodestar/index.d.ts +5 -0
- package/lib/api/impl/lodestar/index.d.ts.map +1 -1
- package/lib/api/impl/lodestar/index.js +35 -10
- package/lib/api/impl/lodestar/index.js.map +1 -1
- package/lib/api/impl/node/utils.js +1 -1
- package/lib/api/impl/node/utils.js.map +1 -1
- package/lib/chain/chain.d.ts +5 -2
- package/lib/chain/chain.d.ts.map +1 -1
- package/lib/chain/chain.js +32 -16
- package/lib/chain/chain.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +17 -14
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +4 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/forkChoice/index.d.ts +9 -1
- package/lib/chain/forkChoice/index.d.ts.map +1 -1
- package/lib/chain/forkChoice/index.js +109 -4
- package/lib/chain/forkChoice/index.js.map +1 -1
- package/lib/chain/interface.d.ts +2 -0
- package/lib/chain/interface.d.ts.map +1 -1
- package/lib/chain/options.d.ts +0 -2
- package/lib/chain/options.d.ts.map +1 -1
- package/lib/chain/options.js +2 -2
- package/lib/chain/options.js.map +1 -1
- package/lib/chain/stateCache/datastore/db.d.ts +12 -0
- package/lib/chain/stateCache/datastore/db.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/db.js +70 -0
- package/lib/chain/stateCache/datastore/db.js.map +1 -1
- package/lib/chain/stateCache/datastore/file.d.ts +1 -0
- package/lib/chain/stateCache/datastore/file.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/file.js +7 -0
- package/lib/chain/stateCache/datastore/file.js.map +1 -1
- package/lib/chain/stateCache/datastore/types.d.ts +1 -0
- package/lib/chain/stateCache/datastore/types.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts +16 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js +31 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +32 -15
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/index.d.ts +2 -0
- package/lib/index.d.ts.map +1 -1
- package/lib/index.js +2 -0
- package/lib/index.js.map +1 -1
- package/lib/metrics/metrics/lodestar.js +1 -1
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/core/networkCore.d.ts.map +1 -1
- package/lib/network/core/networkCore.js +5 -1
- package/lib/network/core/networkCore.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +8 -8
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js.map +1 -1
- package/lib/network/discv5/worker.js +2 -7
- package/lib/network/discv5/worker.js.map +1 -1
- package/lib/network/events.d.ts +1 -0
- package/lib/network/events.d.ts.map +1 -1
- package/lib/network/gossip/encoding.js +1 -1
- package/lib/network/gossip/encoding.js.map +1 -1
- package/lib/network/gossip/snappy_bun.d.ts +3 -0
- package/lib/network/gossip/snappy_bun.d.ts.map +1 -0
- package/lib/network/gossip/snappy_bun.js +3 -0
- package/lib/network/gossip/snappy_bun.js.map +1 -0
- package/lib/network/metadata.d.ts +1 -1
- package/lib/network/metadata.d.ts.map +1 -1
- package/lib/network/metadata.js +1 -0
- package/lib/network/metadata.js.map +1 -1
- package/lib/network/options.d.ts +0 -1
- package/lib/network/options.d.ts.map +1 -1
- package/lib/network/options.js.map +1 -1
- package/lib/network/peers/discover.js +2 -2
- package/lib/network/peers/discover.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +6 -2
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.d.ts.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js +3 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js +14 -3
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js.map +1 -1
- package/lib/network/reqresp/handlers/index.js +6 -6
- package/lib/network/reqresp/handlers/index.js.map +1 -1
- package/lib/network/reqresp/types.d.ts +1 -0
- package/lib/network/reqresp/types.d.ts.map +1 -1
- package/lib/node/nodejs.d.ts +2 -1
- package/lib/node/nodejs.d.ts.map +1 -1
- package/lib/node/nodejs.js +2 -1
- package/lib/node/nodejs.js.map +1 -1
- package/lib/sync/range/range.d.ts.map +1 -1
- package/lib/sync/range/range.js +2 -1
- package/lib/sync/range/range.js.map +1 -1
- package/lib/sync/utils/downloadByRange.d.ts +58 -13
- package/lib/sync/utils/downloadByRange.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRange.js +201 -82
- package/lib/sync/utils/downloadByRange.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.d.ts +2 -1
- package/lib/sync/utils/remoteSyncType.d.ts.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +19 -4
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/lib/util/blobs.d.ts +1 -1
- package/lib/util/blobs.d.ts.map +1 -1
- package/lib/util/blobs.js +53 -20
- package/lib/util/blobs.js.map +1 -1
- package/lib/util/profile.d.ts +6 -4
- package/lib/util/profile.d.ts.map +1 -1
- package/lib/util/profile.js +40 -3
- package/lib/util/profile.js.map +1 -1
- package/lib/util/sszBytes.d.ts +2 -0
- package/lib/util/sszBytes.d.ts.map +1 -1
- package/lib/util/sszBytes.js +25 -0
- package/lib/util/sszBytes.js.map +1 -1
- package/package.json +32 -25
- package/src/api/impl/beacon/blocks/index.ts +47 -25
- package/src/api/impl/lodestar/index.ts +42 -10
- package/src/api/impl/node/utils.ts +1 -1
- package/src/chain/chain.ts +48 -23
- package/src/chain/errors/dataColumnSidecarError.ts +20 -14
- package/src/chain/forkChoice/index.ts +178 -2
- package/src/chain/interface.ts +2 -0
- package/src/chain/options.ts +2 -3
- package/src/chain/stateCache/datastore/db.ts +89 -1
- package/src/chain/stateCache/datastore/file.ts +8 -0
- package/src/chain/stateCache/datastore/types.ts +1 -0
- package/src/chain/stateCache/persistentCheckpointsCache.ts +45 -2
- package/src/chain/validation/dataColumnSidecar.ts +34 -16
- package/src/index.ts +2 -0
- package/src/metrics/metrics/lodestar.ts +1 -1
- package/src/network/core/networkCore.ts +5 -1
- package/src/network/core/networkCoreWorker.ts +9 -9
- package/src/network/core/networkCoreWorkerHandler.ts +1 -1
- package/src/network/discv5/worker.ts +2 -7
- package/src/network/events.ts +1 -1
- package/src/network/gossip/encoding.ts +1 -1
- package/src/network/gossip/snappy_bun.ts +2 -0
- package/src/network/metadata.ts +3 -1
- package/src/network/options.ts +0 -1
- package/src/network/peers/discover.ts +2 -2
- package/src/network/processor/gossipHandlers.ts +6 -1
- package/src/network/reqresp/ReqRespBeaconNode.ts +3 -1
- package/src/network/reqresp/handlers/beaconBlocksByRange.ts +18 -3
- package/src/network/reqresp/handlers/dataColumnSidecarsByRange.ts +13 -1
- package/src/network/reqresp/handlers/dataColumnSidecarsByRoot.ts +13 -1
- package/src/network/reqresp/handlers/index.ts +6 -6
- package/src/network/reqresp/types.ts +1 -0
- package/src/node/nodejs.ts +3 -0
- package/src/sync/range/range.ts +2 -1
- package/src/sync/utils/downloadByRange.ts +259 -103
- package/src/sync/utils/remoteSyncType.ts +23 -4
- package/src/util/blobs.ts +64 -20
- package/src/util/profile.ts +45 -3
- package/src/util/sszBytes.ts +30 -0
|
@@ -1,5 +1,11 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {SLOTS_PER_EPOCH} from "@lodestar/params";
|
|
2
|
+
import {Epoch, phase0, ssz} from "@lodestar/types";
|
|
3
|
+
import {MapDef} from "@lodestar/utils";
|
|
2
4
|
import {IBeaconDb} from "../../../db/interface.js";
|
|
5
|
+
import {
|
|
6
|
+
getLastProcessedSlotFromBeaconStateSerialized,
|
|
7
|
+
getSlotFromBeaconStateSerialized,
|
|
8
|
+
} from "../../../util/sszBytes.js";
|
|
3
9
|
import {CPStateDatastore, DatastoreKey} from "./types.js";
|
|
4
10
|
|
|
5
11
|
/**
|
|
@@ -22,6 +28,13 @@ export class DbCPStateDatastore implements CPStateDatastore {
|
|
|
22
28
|
return this.db.checkpointState.getBinary(serializedCheckpoint);
|
|
23
29
|
}
|
|
24
30
|
|
|
31
|
+
async readLatestSafe(): Promise<Uint8Array | null> {
|
|
32
|
+
const allKeys = await this.readKeys();
|
|
33
|
+
if (allKeys.length === 0) return null;
|
|
34
|
+
|
|
35
|
+
return getLatestSafeDatastoreKey(allKeys, this.read.bind(this));
|
|
36
|
+
}
|
|
37
|
+
|
|
25
38
|
async readKeys(): Promise<DatastoreKey[]> {
|
|
26
39
|
return this.db.checkpointState.keys();
|
|
27
40
|
}
|
|
@@ -34,3 +47,78 @@ export function datastoreKeyToCheckpoint(key: DatastoreKey): phase0.Checkpoint {
|
|
|
34
47
|
export function checkpointToDatastoreKey(cp: phase0.Checkpoint): DatastoreKey {
|
|
35
48
|
return ssz.phase0.Checkpoint.serialize(cp);
|
|
36
49
|
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Get the latest safe checkpoint state the node can use to boot from
|
|
53
|
+
* - it should be the checkpoint state that's unique in its epoch
|
|
54
|
+
* - its last processed block slot should be at epoch boundary or last slot of previous epoch
|
|
55
|
+
* - state slot should be at epoch boundary
|
|
56
|
+
* - state slot should be equal to epoch * SLOTS_PER_EPOCH
|
|
57
|
+
*
|
|
58
|
+
* return the serialized data of Current Root Checkpoint State (CRCS) or Previous Root Checkpoint State (PRCS)
|
|
59
|
+
*
|
|
60
|
+
*/
|
|
61
|
+
export async function getLatestSafeDatastoreKey(
|
|
62
|
+
allKeys: DatastoreKey[],
|
|
63
|
+
readFn: (key: DatastoreKey) => Promise<Uint8Array | null>
|
|
64
|
+
): Promise<Uint8Array | null> {
|
|
65
|
+
const checkpointsByEpoch = new MapDef<Epoch, DatastoreKey[]>(() => []);
|
|
66
|
+
for (const key of allKeys) {
|
|
67
|
+
const cp = datastoreKeyToCheckpoint(key);
|
|
68
|
+
checkpointsByEpoch.getOrDefault(cp.epoch).push(key);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
const dataStoreKeyByEpoch: Map<Epoch, DatastoreKey> = new Map();
|
|
72
|
+
for (const [epoch, keys] of checkpointsByEpoch.entries()) {
|
|
73
|
+
// only consider epochs with a single checkpoint to avoid ambiguity from forks
|
|
74
|
+
if (keys.length === 1) {
|
|
75
|
+
dataStoreKeyByEpoch.set(epoch, keys[0]);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
const epochsDesc = Array.from(dataStoreKeyByEpoch.keys()).sort((a, b) => b - a);
|
|
80
|
+
for (const epoch of epochsDesc) {
|
|
81
|
+
const datastoreKey = dataStoreKeyByEpoch.get(epoch);
|
|
82
|
+
if (datastoreKey == null) {
|
|
83
|
+
// should not happen
|
|
84
|
+
continue;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const stateBytes = await readFn(datastoreKey);
|
|
88
|
+
if (stateBytes == null) {
|
|
89
|
+
// should not happen
|
|
90
|
+
continue;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const lastProcessedSlot = getLastProcessedSlotFromBeaconStateSerialized(stateBytes);
|
|
94
|
+
if (lastProcessedSlot == null) {
|
|
95
|
+
// cannot extract last processed slot from serialized state, skip
|
|
96
|
+
continue;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const stateSlot = getSlotFromBeaconStateSerialized(stateBytes);
|
|
100
|
+
if (stateSlot == null) {
|
|
101
|
+
// cannot extract slot from serialized state, skip
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
if (lastProcessedSlot !== stateSlot && lastProcessedSlot !== stateSlot - 1) {
|
|
106
|
+
// not CRCS or PRCS, skip
|
|
107
|
+
continue;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
if (stateSlot % SLOTS_PER_EPOCH !== 0) {
|
|
111
|
+
// not at epoch boundary, skip
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (stateSlot !== SLOTS_PER_EPOCH * epoch) {
|
|
116
|
+
// should not happen after above checks, but just to be safe
|
|
117
|
+
continue;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return stateBytes;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
return null;
|
|
124
|
+
}
|
|
@@ -2,6 +2,7 @@ import path from "node:path";
|
|
|
2
2
|
import {phase0, ssz} from "@lodestar/types";
|
|
3
3
|
import {fromHex, toHex} from "@lodestar/utils";
|
|
4
4
|
import {ensureDir, readFile, readFileNames, removeFile, writeIfNotExist} from "../../../util/file.js";
|
|
5
|
+
import {getLatestSafeDatastoreKey} from "./db.js";
|
|
5
6
|
import {CPStateDatastore, DatastoreKey} from "./types.js";
|
|
6
7
|
|
|
7
8
|
const CHECKPOINT_STATES_FOLDER = "checkpoint_states";
|
|
@@ -44,6 +45,13 @@ export class FileCPStateDatastore implements CPStateDatastore {
|
|
|
44
45
|
return readFile(filePath);
|
|
45
46
|
}
|
|
46
47
|
|
|
48
|
+
async readLatestSafe(): Promise<Uint8Array | null> {
|
|
49
|
+
const allKeys = await this.readKeys();
|
|
50
|
+
if (allKeys.length === 0) return null;
|
|
51
|
+
|
|
52
|
+
return getLatestSafeDatastoreKey(allKeys, this.read.bind(this));
|
|
53
|
+
}
|
|
54
|
+
|
|
47
55
|
async readKeys(): Promise<DatastoreKey[]> {
|
|
48
56
|
const fileNames = await readFileNames(this.folderPath);
|
|
49
57
|
return fileNames
|
|
@@ -8,6 +8,7 @@ export interface CPStateDatastore {
|
|
|
8
8
|
write: (cpKey: phase0.Checkpoint, stateBytes: Uint8Array) => Promise<DatastoreKey>;
|
|
9
9
|
remove: (key: DatastoreKey) => Promise<void>;
|
|
10
10
|
read: (key: DatastoreKey) => Promise<Uint8Array | null>;
|
|
11
|
+
readLatestSafe: () => Promise<Uint8Array | null>;
|
|
11
12
|
readKeys: () => Promise<DatastoreKey[]>;
|
|
12
13
|
init?: () => Promise<void>;
|
|
13
14
|
}
|
|
@@ -17,8 +17,10 @@ import {MapTracker} from "./mapMetrics.js";
|
|
|
17
17
|
import {BlockStateCache, CacheItemType, CheckpointHex, CheckpointStateCache} from "./types.js";
|
|
18
18
|
|
|
19
19
|
export type PersistentCheckpointStateCacheOpts = {
|
|
20
|
-
/** Keep max n
|
|
20
|
+
/** Keep max n state epochs in memory, persist the rest to disk */
|
|
21
21
|
maxCPStateEpochsInMemory?: number;
|
|
22
|
+
/** Keep max n state epochs on disk */
|
|
23
|
+
maxCPStateEpochsOnDisk?: number;
|
|
22
24
|
};
|
|
23
25
|
|
|
24
26
|
type PersistentCheckpointStateCacheModules = {
|
|
@@ -58,6 +60,14 @@ type LoadedStateBytesData = {persistedKey: DatastoreKey; stateBytes: Uint8Array}
|
|
|
58
60
|
*/
|
|
59
61
|
export const DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY = 3;
|
|
60
62
|
|
|
63
|
+
/**
|
|
64
|
+
* By default we don't prune any persistent checkpoint states as it's not safe to delete them during
|
|
65
|
+
* long non-finality as we don't know the state of the chain and there could be a deep (hundreds of epochs) reorg
|
|
66
|
+
* if there two competing chains with similar weight but we wouldn't have a close enough state to pivot to this chain
|
|
67
|
+
* and instead require a resync from last finalized checkpoint state which could be very far in the past.
|
|
68
|
+
*/
|
|
69
|
+
export const DEFAULT_MAX_CP_STATE_ON_DISK = Infinity;
|
|
70
|
+
|
|
61
71
|
// TODO GLOAS: re-evaluate this timing
|
|
62
72
|
const PROCESS_CHECKPOINT_STATES_BPS = 6667;
|
|
63
73
|
|
|
@@ -104,6 +114,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
104
114
|
private preComputedCheckpoint: string | null = null;
|
|
105
115
|
private preComputedCheckpointHits: number | null = null;
|
|
106
116
|
private readonly maxEpochsInMemory: number;
|
|
117
|
+
private readonly maxEpochsOnDisk: number;
|
|
107
118
|
private readonly datastore: CPStateDatastore;
|
|
108
119
|
private readonly blockStateCache: BlockStateCache;
|
|
109
120
|
private readonly bufferPool?: BufferPool | null;
|
|
@@ -139,10 +150,16 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
139
150
|
this.logger = logger;
|
|
140
151
|
this.clock = clock;
|
|
141
152
|
this.signal = signal;
|
|
153
|
+
|
|
142
154
|
if (opts.maxCPStateEpochsInMemory !== undefined && opts.maxCPStateEpochsInMemory < 0) {
|
|
143
155
|
throw new Error("maxEpochsInMemory must be >= 0");
|
|
144
156
|
}
|
|
157
|
+
if (opts.maxCPStateEpochsOnDisk !== undefined && opts.maxCPStateEpochsOnDisk < 0) {
|
|
158
|
+
throw new Error("maxCPStateEpochsOnDisk must be >= 0");
|
|
159
|
+
}
|
|
160
|
+
|
|
145
161
|
this.maxEpochsInMemory = opts.maxCPStateEpochsInMemory ?? DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY;
|
|
162
|
+
this.maxEpochsOnDisk = opts.maxCPStateEpochsOnDisk ?? DEFAULT_MAX_CP_STATE_ON_DISK;
|
|
146
163
|
// Specify different datastore for testing
|
|
147
164
|
this.datastore = datastore;
|
|
148
165
|
this.blockStateCache = blockStateCache;
|
|
@@ -324,6 +341,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
324
341
|
this.logger.verbose("Added checkpoint state to memory", {epoch: cp.epoch, rootHex: cpHex.rootHex});
|
|
325
342
|
}
|
|
326
343
|
this.epochIndex.getOrDefault(cp.epoch).add(cpHex.rootHex);
|
|
344
|
+
this.prunePersistedStates();
|
|
327
345
|
}
|
|
328
346
|
|
|
329
347
|
/**
|
|
@@ -766,13 +784,38 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
766
784
|
this.cache.delete(key);
|
|
767
785
|
}
|
|
768
786
|
this.epochIndex.delete(epoch);
|
|
769
|
-
this.logger.verbose("Pruned
|
|
787
|
+
this.logger.verbose("Pruned checkpoint states for epoch", {
|
|
770
788
|
epoch,
|
|
771
789
|
persistCount,
|
|
772
790
|
rootHexes: Array.from(rootHexes).join(","),
|
|
773
791
|
});
|
|
774
792
|
}
|
|
775
793
|
|
|
794
|
+
/**
|
|
795
|
+
* Prune persisted checkpoint states from disk.
|
|
796
|
+
* Note that this should handle all possible errors and not throw.
|
|
797
|
+
*/
|
|
798
|
+
private prunePersistedStates(): void {
|
|
799
|
+
// epochsOnDisk epochsInMemory
|
|
800
|
+
// |----------------------------------------------------------|----------------------|
|
|
801
|
+
const maxTrackedEpochs = this.maxEpochsOnDisk + this.maxEpochsInMemory;
|
|
802
|
+
if (this.epochIndex.size <= maxTrackedEpochs) {
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
const sortedEpochs = Array.from(this.epochIndex.keys()).sort((a, b) => a - b);
|
|
807
|
+
const pruneEpochs = sortedEpochs.slice(0, sortedEpochs.length - maxTrackedEpochs);
|
|
808
|
+
for (const epoch of pruneEpochs) {
|
|
809
|
+
this.deleteAllEpochItems(epoch).catch((e) =>
|
|
810
|
+
this.logger.debug(
|
|
811
|
+
"Error delete all epoch items",
|
|
812
|
+
{epoch, maxEpochsOnDisk: this.maxEpochsOnDisk, maxEpochsInMemory: this.maxEpochsInMemory},
|
|
813
|
+
e as Error
|
|
814
|
+
)
|
|
815
|
+
);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
|
|
776
819
|
/**
|
|
777
820
|
* Serialize validators to bytes leveraging the buffer pool to save memory allocation.
|
|
778
821
|
* - As monitored on holesky as of Jan 2024, it helps save ~500ms state reload time (4.3s vs 3.8s)
|
|
@@ -39,7 +39,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
39
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
40
40
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
41
41
|
code: DataColumnSidecarErrorCode.INVALID_SUBNET,
|
|
42
|
-
|
|
42
|
+
columnIndex: dataColumnSidecar.index,
|
|
43
43
|
gossipSubnet: gossipSubnet,
|
|
44
44
|
});
|
|
45
45
|
}
|
|
@@ -156,7 +156,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
156
156
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
157
157
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
158
158
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
159
|
-
|
|
159
|
+
columnIndex: dataColumnSidecar.index,
|
|
160
160
|
});
|
|
161
161
|
}
|
|
162
162
|
|
|
@@ -173,7 +173,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
173
173
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
174
174
|
code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF,
|
|
175
175
|
slot: blockHeader.slot,
|
|
176
|
-
|
|
176
|
+
columnIndex: dataColumnSidecar.index,
|
|
177
177
|
});
|
|
178
178
|
} finally {
|
|
179
179
|
kzgProofTimer?.();
|
|
@@ -193,7 +193,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
193
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
194
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
195
195
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
196
|
-
|
|
196
|
+
columnIndex: dataColumnSidecar.index,
|
|
197
197
|
});
|
|
198
198
|
}
|
|
199
199
|
|
|
@@ -201,7 +201,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
201
201
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
202
202
|
code: DataColumnSidecarErrorCode.NO_COMMITMENTS,
|
|
203
203
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
204
|
-
|
|
204
|
+
columnIndex: dataColumnSidecar.index,
|
|
205
205
|
});
|
|
206
206
|
}
|
|
207
207
|
|
|
@@ -212,7 +212,7 @@ function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: ful
|
|
|
212
212
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
213
|
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
214
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
-
|
|
215
|
+
columnIndex: dataColumnSidecar.index,
|
|
216
216
|
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
217
|
limit: maxBlobsPerBlock,
|
|
218
218
|
});
|
|
@@ -293,7 +293,6 @@ export async function validateBlockDataColumnSidecars(
|
|
|
293
293
|
"Block has no blob commitments but data column sidecars were provided"
|
|
294
294
|
);
|
|
295
295
|
}
|
|
296
|
-
|
|
297
296
|
// Hash the first sidecar block header and compare the rest via (cheaper) equality
|
|
298
297
|
const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message;
|
|
299
298
|
const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader);
|
|
@@ -302,7 +301,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
302
301
|
{
|
|
303
302
|
code: DataColumnSidecarErrorCode.INCORRECT_BLOCK,
|
|
304
303
|
slot: blockSlot,
|
|
305
|
-
|
|
304
|
+
columnIndex: 0,
|
|
306
305
|
expected: toRootHex(blockRoot),
|
|
307
306
|
actual: toRootHex(firstBlockRoot),
|
|
308
307
|
},
|
|
@@ -317,33 +316,52 @@ export async function validateBlockDataColumnSidecars(
|
|
|
317
316
|
for (let i = 0; i < dataColumnSidecars.length; i++) {
|
|
318
317
|
const columnSidecar = dataColumnSidecars[i];
|
|
319
318
|
|
|
319
|
+
if (!ssz.phase0.BeaconBlockHeader.equals(firstSidecarBlockHeader, columnSidecar.signedBlockHeader.message)) {
|
|
320
|
+
throw new DataColumnSidecarValidationError({
|
|
321
|
+
code: DataColumnSidecarErrorCode.INCORRECT_HEADER_ROOT,
|
|
322
|
+
slot: blockSlot,
|
|
323
|
+
expected: toRootHex(blockRoot),
|
|
324
|
+
actual: toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message)),
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
|
|
320
328
|
if (columnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
321
329
|
throw new DataColumnSidecarValidationError(
|
|
322
330
|
{
|
|
323
331
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
324
332
|
slot: blockSlot,
|
|
325
|
-
|
|
333
|
+
columnIndex: columnSidecar.index,
|
|
326
334
|
},
|
|
327
335
|
"DataColumnSidecar has invalid index"
|
|
328
336
|
);
|
|
329
337
|
}
|
|
330
338
|
|
|
331
|
-
if (columnSidecar.
|
|
339
|
+
if (columnSidecar.column.length !== blockBlobCount) {
|
|
332
340
|
throw new DataColumnSidecarValidationError({
|
|
333
|
-
code: DataColumnSidecarErrorCode.
|
|
341
|
+
code: DataColumnSidecarErrorCode.INCORRECT_CELL_COUNT,
|
|
334
342
|
slot: blockSlot,
|
|
335
|
-
|
|
343
|
+
columnIndex: columnSidecar.index,
|
|
336
344
|
expected: blockBlobCount,
|
|
345
|
+
actual: columnSidecar.column.length,
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
if (columnSidecar.column.length !== columnSidecar.kzgCommitments.length) {
|
|
350
|
+
throw new DataColumnSidecarValidationError({
|
|
351
|
+
code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT,
|
|
352
|
+
slot: blockSlot,
|
|
353
|
+
columnIndex: columnSidecar.index,
|
|
354
|
+
expected: columnSidecar.column.length,
|
|
337
355
|
actual: columnSidecar.kzgCommitments.length,
|
|
338
356
|
});
|
|
339
357
|
}
|
|
340
358
|
|
|
341
|
-
if (columnSidecar.
|
|
359
|
+
if (columnSidecar.column.length !== columnSidecar.kzgProofs.length) {
|
|
342
360
|
throw new DataColumnSidecarValidationError({
|
|
343
361
|
code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT,
|
|
344
362
|
slot: blockSlot,
|
|
345
|
-
|
|
346
|
-
expected: columnSidecar.
|
|
363
|
+
columnIndex: columnSidecar.index,
|
|
364
|
+
expected: columnSidecar.column.length,
|
|
347
365
|
actual: columnSidecar.kzgProofs.length,
|
|
348
366
|
});
|
|
349
367
|
}
|
|
@@ -353,7 +371,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
353
371
|
{
|
|
354
372
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
355
373
|
slot: blockSlot,
|
|
356
|
-
|
|
374
|
+
columnIndex: columnSidecar.index,
|
|
357
375
|
},
|
|
358
376
|
"DataColumnSidecar has invalid inclusion proof"
|
|
359
377
|
);
|
package/src/index.ts
CHANGED
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
export type {RestApiServerMetrics, RestApiServerModules, RestApiServerOpts} from "./api/rest/base.js";
|
|
4
4
|
export {RestApiServer} from "./api/rest/base.js";
|
|
5
5
|
export {checkAndPersistAnchorState, initStateFromDb, initStateFromEth1} from "./chain/index.js";
|
|
6
|
+
export {DbCPStateDatastore} from "./chain/stateCache/datastore/db.js";
|
|
7
|
+
export {FileCPStateDatastore} from "./chain/stateCache/datastore/file.js";
|
|
6
8
|
export {BeaconDb, type IBeaconDb} from "./db/index.js";
|
|
7
9
|
export {Eth1Provider, type IEth1Provider} from "./eth1/index.js";
|
|
8
10
|
// Export metrics utilities to de-duplicate validator metrics
|
|
@@ -1878,7 +1878,7 @@ export function createLodestarMetrics(
|
|
|
1878
1878
|
fetchKeys: register.histogram({
|
|
1879
1879
|
name: "lodestar_prune_history_fetch_keys_time_seconds",
|
|
1880
1880
|
help: "Time to fetch keys in seconds",
|
|
1881
|
-
buckets: [0.001, 0.01, 0.1, 1],
|
|
1881
|
+
buckets: [0.001, 0.01, 0.1, 0.3, 0.5, 1],
|
|
1882
1882
|
}),
|
|
1883
1883
|
|
|
1884
1884
|
pruneKeys: register.histogram({
|
|
@@ -553,7 +553,11 @@ export class NetworkCore implements INetworkCore {
|
|
|
553
553
|
// On fork boundary transition
|
|
554
554
|
if (epoch === nextBoundaryEpoch) {
|
|
555
555
|
// updateEth2Field() MUST be called with clock epoch, onEpoch event is emitted in response to clock events
|
|
556
|
-
this.metadata.updateEth2Field(epoch);
|
|
556
|
+
const {forkDigest} = this.metadata.updateEth2Field(epoch);
|
|
557
|
+
// Update local status to reflect the new fork digest, otherwise we will disconnect peers that re-status us
|
|
558
|
+
// right after the fork transition due to incompatible forks as our fork digest is stale since we only
|
|
559
|
+
// update it once we import a new head or when emitting update status event.
|
|
560
|
+
this.statusCache.update({...this.statusCache.get(), forkDigest});
|
|
557
561
|
this.reqResp.registerProtocolsAtBoundary(nextBoundary);
|
|
558
562
|
}
|
|
559
563
|
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -11,7 +9,7 @@ import {RegistryMetricCreator, collectNodeJSMetrics} from "../../metrics/index.j
|
|
|
11
9
|
import {AsyncIterableBridgeCaller, AsyncIterableBridgeHandler} from "../../util/asyncIterableToEvents.js";
|
|
12
10
|
import {Clock} from "../../util/clock.js";
|
|
13
11
|
import {peerIdToString} from "../../util/peerId.js";
|
|
14
|
-
import {
|
|
12
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
15
13
|
import {wireEventsOnWorkerThread} from "../../util/workerEvents.js";
|
|
16
14
|
import {NetworkEventBus, NetworkEventData, networkEventDirection} from "../events.js";
|
|
17
15
|
import {
|
|
@@ -100,8 +98,13 @@ const core = await NetworkCore.init({
|
|
|
100
98
|
metricsRegistry: metricsRegister,
|
|
101
99
|
events,
|
|
102
100
|
clock,
|
|
103
|
-
getReqRespHandler: (method) => (req, peerId) =>
|
|
104
|
-
reqRespBridgeRespCaller.getAsyncIterable({
|
|
101
|
+
getReqRespHandler: (method) => (req, peerId, peerClient) =>
|
|
102
|
+
reqRespBridgeRespCaller.getAsyncIterable({
|
|
103
|
+
method,
|
|
104
|
+
req,
|
|
105
|
+
peerId: peerIdToString(peerId),
|
|
106
|
+
peerClient,
|
|
107
|
+
}),
|
|
105
108
|
activeValidatorCount: workerData.activeValidatorCount,
|
|
106
109
|
initialStatus: workerData.initialStatus,
|
|
107
110
|
initialCustodyGroupCount: workerData.initialCustodyGroupCount,
|
|
@@ -157,10 +160,7 @@ const libp2pWorkerApi: NetworkWorkerApi = {
|
|
|
157
160
|
dumpDiscv5KadValues: () => core.dumpDiscv5KadValues(),
|
|
158
161
|
dumpMeshPeers: () => core.dumpMeshPeers(),
|
|
159
162
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
160
|
-
|
|
161
|
-
const filePath = path.join(dirpath, `network_thread_${new Date().toISOString()}.cpuprofile`);
|
|
162
|
-
fs.writeFileSync(filePath, profile);
|
|
163
|
-
return filePath;
|
|
163
|
+
return profileThread(ProfileThread.NETWORK, durationMs, dirpath);
|
|
164
164
|
},
|
|
165
165
|
writeDiscv5Profile: async (durationMs: number, dirpath: string) => {
|
|
166
166
|
return core.writeDiscv5Profile(durationMs, dirpath);
|
|
@@ -73,7 +73,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
|
|
73
73
|
// Handles ReqResp response from worker and calls async generator in main thread
|
|
74
74
|
this.reqRespBridgeRespHandler = new AsyncIterableBridgeHandler(
|
|
75
75
|
getReqRespBridgeRespEvents(this.reqRespBridgeEventBus),
|
|
76
|
-
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId))
|
|
76
|
+
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId), data.peerClient)
|
|
77
77
|
);
|
|
78
78
|
|
|
79
79
|
wireEventsOnMainThread<NetworkEventData>(
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -14,7 +12,7 @@ import {Gauge} from "@lodestar/utils";
|
|
|
14
12
|
import {RegistryMetricCreator} from "../../metrics/index.js";
|
|
15
13
|
import {collectNodeJSMetrics} from "../../metrics/nodeJsMetrics.js";
|
|
16
14
|
import {Clock} from "../../util/clock.js";
|
|
17
|
-
import {
|
|
15
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
18
16
|
import {Discv5WorkerApi, Discv5WorkerData} from "./types.js";
|
|
19
17
|
import {ENRRelevance, enrRelevance} from "./utils.js";
|
|
20
18
|
|
|
@@ -108,10 +106,7 @@ const module: Discv5WorkerApi = {
|
|
|
108
106
|
return (await metricsRegistry?.metrics()) ?? "";
|
|
109
107
|
},
|
|
110
108
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
111
|
-
|
|
112
|
-
const filePath = path.join(dirpath, `discv5_thread_${new Date().toISOString()}.cpuprofile`);
|
|
113
|
-
fs.writeFileSync(filePath, profile);
|
|
114
|
-
return filePath;
|
|
109
|
+
return profileThread(ProfileThread.DISC5, durationMs, dirpath);
|
|
115
110
|
},
|
|
116
111
|
writeHeapSnapshot: async (prefix: string, dirpath: string) => {
|
|
117
112
|
return writeHeapSnapshot(prefix, dirpath);
|
package/src/network/events.ts
CHANGED
|
@@ -29,7 +29,7 @@ export type NetworkEventData = {
|
|
|
29
29
|
clientAgent: string;
|
|
30
30
|
};
|
|
31
31
|
[NetworkEvent.peerDisconnected]: {peer: PeerIdStr};
|
|
32
|
-
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId};
|
|
32
|
+
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId; peerClient: string};
|
|
33
33
|
[NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage;
|
|
34
34
|
[NetworkEvent.gossipMessageValidationResult]: {
|
|
35
35
|
msgId: string;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {Message} from "@libp2p/interface";
|
|
2
|
-
import {compress, uncompress} from "snappyjs";
|
|
3
2
|
import xxhashFactory from "xxhash-wasm";
|
|
3
|
+
import {compress, uncompress} from "#snappy";
|
|
4
4
|
import {digest} from "@chainsafe/as-sha256";
|
|
5
5
|
import {RPC} from "@chainsafe/libp2p-gossipsub/message";
|
|
6
6
|
import {DataTransform} from "@chainsafe/libp2p-gossipsub/types";
|
package/src/network/metadata.ts
CHANGED
|
@@ -126,7 +126,7 @@ export class MetadataController {
|
|
|
126
126
|
* 2. Network MUST call this method on fork transition.
|
|
127
127
|
* Current Clock implementation ensures no race conditions, epoch is correct if re-fetched
|
|
128
128
|
*/
|
|
129
|
-
updateEth2Field(epoch: Epoch):
|
|
129
|
+
updateEth2Field(epoch: Epoch): phase0.ENRForkID {
|
|
130
130
|
const config = this.networkConfig.config;
|
|
131
131
|
const enrForkId = getENRForkID(config, epoch);
|
|
132
132
|
const {forkDigest, nextForkVersion, nextForkEpoch} = enrForkId;
|
|
@@ -143,6 +143,8 @@ export class MetadataController {
|
|
|
143
143
|
: ssz.ForkDigest.defaultValue();
|
|
144
144
|
this.onSetValue(ENRKey.nfd, nextForkDigest);
|
|
145
145
|
this.logger.debug("Updated nfd field in ENR", {nextForkDigest: toHex(nextForkDigest)});
|
|
146
|
+
|
|
147
|
+
return enrForkId;
|
|
146
148
|
}
|
|
147
149
|
}
|
|
148
150
|
|
package/src/network/options.ts
CHANGED
|
@@ -391,8 +391,8 @@ export class PeerDiscovery {
|
|
|
391
391
|
// tcp multiaddr is known to be be present, checked inside the worker
|
|
392
392
|
const multiaddrTCP = enr.getLocationMultiaddr(ENRKey.tcp);
|
|
393
393
|
if (!multiaddrTCP) {
|
|
394
|
-
this.logger.
|
|
395
|
-
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.
|
|
394
|
+
this.logger.warn("Discv5 worker sent enr without tcp multiaddr", {enr: enr.encodeTxt()});
|
|
395
|
+
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.no_multiaddrs});
|
|
396
396
|
return;
|
|
397
397
|
}
|
|
398
398
|
// Are this fields mandatory?
|
|
@@ -39,6 +39,7 @@ import {
|
|
|
39
39
|
BlockError,
|
|
40
40
|
BlockErrorCode,
|
|
41
41
|
BlockGossipError,
|
|
42
|
+
DataColumnSidecarErrorCode,
|
|
42
43
|
DataColumnSidecarGossipError,
|
|
43
44
|
GossipAction,
|
|
44
45
|
GossipActionError,
|
|
@@ -304,7 +305,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
304
305
|
...blockInput.getLogMeta(),
|
|
305
306
|
index: dataColumnSidecar.index,
|
|
306
307
|
});
|
|
307
|
-
|
|
308
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
309
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
310
|
+
columnIndex: dataColumnSidecar.index,
|
|
311
|
+
slot,
|
|
312
|
+
});
|
|
308
313
|
}
|
|
309
314
|
}
|
|
310
315
|
|
|
@@ -19,6 +19,7 @@ import {callInNextEventLoop} from "../../util/eventLoop.js";
|
|
|
19
19
|
import {NetworkCoreMetrics} from "../core/metrics.js";
|
|
20
20
|
import {INetworkEventBus, NetworkEvent} from "../events.js";
|
|
21
21
|
import {MetadataController} from "../metadata.js";
|
|
22
|
+
import {ClientKind} from "../peers/client.ts";
|
|
22
23
|
import {PeersData} from "../peers/peersData.js";
|
|
23
24
|
import {IPeerRpcScoreStore, PeerAction} from "../peers/score/index.js";
|
|
24
25
|
import {StatusCache} from "../statusCache.js";
|
|
@@ -300,10 +301,11 @@ export class ReqRespBeaconNode extends ReqResp {
|
|
|
300
301
|
}
|
|
301
302
|
|
|
302
303
|
protected onIncomingRequestBody(request: RequestTypedContainer, peer: PeerId): void {
|
|
304
|
+
const peerClient = this.peersData.getPeerKind(peer.toString()) ?? ClientKind.Unknown;
|
|
303
305
|
// Allow onRequest to return and close the stream
|
|
304
306
|
// For Goodbye there may be a race condition where the listener of `receivedGoodbye`
|
|
305
307
|
// disconnects in the same synchronous call, preventing the stream from ending cleanly
|
|
306
|
-
callInNextEventLoop(() => this.networkEventBus.emit(NetworkEvent.reqRespRequest, {request, peer}));
|
|
308
|
+
callInNextEventLoop(() => this.networkEventBus.emit(NetworkEvent.reqRespRequest, {request, peer, peerClient}));
|
|
307
309
|
}
|
|
308
310
|
|
|
309
311
|
protected onIncomingRequest(peerId: PeerId, protocol: ProtocolDescriptor): void {
|
|
@@ -1,25 +1,40 @@
|
|
|
1
|
+
import {PeerId} from "@libp2p/interface";
|
|
1
2
|
import {BeaconConfig} from "@lodestar/config";
|
|
2
|
-
import {GENESIS_SLOT, isForkPostDeneb} from "@lodestar/params";
|
|
3
|
+
import {GENESIS_SLOT, isForkPostDeneb, isForkPostFulu} from "@lodestar/params";
|
|
3
4
|
import {RespStatus, ResponseError, ResponseOutgoing} from "@lodestar/reqresp";
|
|
4
5
|
import {computeEpochAtSlot} from "@lodestar/state-transition";
|
|
5
6
|
import {deneb, phase0} from "@lodestar/types";
|
|
6
7
|
import {fromHex} from "@lodestar/utils";
|
|
7
8
|
import {IBeaconChain} from "../../../chain/index.js";
|
|
8
9
|
import {IBeaconDb} from "../../../db/index.js";
|
|
10
|
+
import {prettyPrintPeerId} from "../../util.ts";
|
|
9
11
|
|
|
10
12
|
// TODO: Unit test
|
|
11
13
|
|
|
12
14
|
export async function* onBeaconBlocksByRange(
|
|
13
15
|
request: phase0.BeaconBlocksByRangeRequest,
|
|
14
16
|
chain: IBeaconChain,
|
|
15
|
-
db: IBeaconDb
|
|
17
|
+
db: IBeaconDb,
|
|
18
|
+
peerId: PeerId,
|
|
19
|
+
peerClient: string
|
|
16
20
|
): AsyncIterable<ResponseOutgoing> {
|
|
17
21
|
const {startSlot, count} = validateBeaconBlocksByRangeRequest(chain.config, request);
|
|
18
22
|
const endSlot = startSlot + count;
|
|
19
23
|
|
|
20
24
|
const finalized = db.blockArchive;
|
|
21
25
|
const unfinalized = db.block;
|
|
22
|
-
|
|
26
|
+
// in the case of initializing from a non-finalized state, we don't have the finalized block so this api does not work
|
|
27
|
+
// chain.forkChoice.getFinalizeBlock().slot
|
|
28
|
+
const finalizedSlot = chain.forkChoice.getFinalizedCheckpointSlot();
|
|
29
|
+
|
|
30
|
+
const forkName = chain.config.getForkName(startSlot);
|
|
31
|
+
if (isForkPostFulu(forkName) && startSlot < chain.earliestAvailableSlot) {
|
|
32
|
+
chain.logger.verbose("Peer did not respect earliestAvailableSlot for BeaconBlocksByRange", {
|
|
33
|
+
peer: prettyPrintPeerId(peerId),
|
|
34
|
+
client: peerClient,
|
|
35
|
+
});
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
23
38
|
|
|
24
39
|
// Finalized range of blocks
|
|
25
40
|
if (startSlot <= finalizedSlot) {
|