@lodestar/beacon-node 1.36.0-dev.598c1ec54e → 1.36.0-dev.6832b029e7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/api/impl/beacon/blocks/index.d.ts.map +1 -1
- package/lib/api/impl/beacon/blocks/index.js +41 -22
- package/lib/api/impl/beacon/blocks/index.js.map +1 -1
- package/lib/api/impl/lodestar/index.d.ts +5 -0
- package/lib/api/impl/lodestar/index.d.ts.map +1 -1
- package/lib/api/impl/lodestar/index.js +35 -10
- package/lib/api/impl/lodestar/index.js.map +1 -1
- package/lib/api/impl/node/utils.js +1 -1
- package/lib/api/impl/node/utils.js.map +1 -1
- package/lib/chain/archiveStore/archiveStore.d.ts +9 -0
- package/lib/chain/archiveStore/archiveStore.d.ts.map +1 -1
- package/lib/chain/archiveStore/archiveStore.js +24 -0
- package/lib/chain/archiveStore/archiveStore.js.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts +7 -0
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.d.ts.map +1 -1
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js +31 -5
- package/lib/chain/archiveStore/strategies/frequencyStateArchiveStrategy.js.map +1 -1
- package/lib/chain/beaconProposerCache.d.ts +3 -0
- package/lib/chain/beaconProposerCache.d.ts.map +1 -1
- package/lib/chain/beaconProposerCache.js +4 -6
- package/lib/chain/beaconProposerCache.js.map +1 -1
- package/lib/chain/chain.d.ts +5 -2
- package/lib/chain/chain.d.ts.map +1 -1
- package/lib/chain/chain.js +32 -16
- package/lib/chain/chain.js.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.d.ts +23 -13
- package/lib/chain/errors/dataColumnSidecarError.d.ts.map +1 -1
- package/lib/chain/errors/dataColumnSidecarError.js +5 -0
- package/lib/chain/errors/dataColumnSidecarError.js.map +1 -1
- package/lib/chain/errors/voluntaryExitError.d.ts +16 -2
- package/lib/chain/errors/voluntaryExitError.d.ts.map +1 -1
- package/lib/chain/errors/voluntaryExitError.js +22 -1
- package/lib/chain/errors/voluntaryExitError.js.map +1 -1
- package/lib/chain/forkChoice/index.d.ts +9 -1
- package/lib/chain/forkChoice/index.d.ts.map +1 -1
- package/lib/chain/forkChoice/index.js +109 -4
- package/lib/chain/forkChoice/index.js.map +1 -1
- package/lib/chain/interface.d.ts +2 -0
- package/lib/chain/interface.d.ts.map +1 -1
- package/lib/chain/options.d.ts +0 -2
- package/lib/chain/options.d.ts.map +1 -1
- package/lib/chain/options.js +2 -2
- package/lib/chain/options.js.map +1 -1
- package/lib/chain/stateCache/datastore/db.d.ts +12 -0
- package/lib/chain/stateCache/datastore/db.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/db.js +70 -0
- package/lib/chain/stateCache/datastore/db.js.map +1 -1
- package/lib/chain/stateCache/datastore/file.d.ts +1 -0
- package/lib/chain/stateCache/datastore/file.d.ts.map +1 -1
- package/lib/chain/stateCache/datastore/file.js +7 -0
- package/lib/chain/stateCache/datastore/file.js.map +1 -1
- package/lib/chain/stateCache/datastore/types.d.ts +1 -0
- package/lib/chain/stateCache/datastore/types.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts +16 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.d.ts.map +1 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js +31 -1
- package/lib/chain/stateCache/persistentCheckpointsCache.js.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.d.ts.map +1 -1
- package/lib/chain/validation/dataColumnSidecar.js +45 -17
- package/lib/chain/validation/dataColumnSidecar.js.map +1 -1
- package/lib/chain/validation/voluntaryExit.d.ts.map +1 -1
- package/lib/chain/validation/voluntaryExit.js +5 -4
- package/lib/chain/validation/voluntaryExit.js.map +1 -1
- package/lib/index.d.ts +2 -0
- package/lib/index.d.ts.map +1 -1
- package/lib/index.js +2 -0
- package/lib/index.js.map +1 -1
- package/lib/metrics/metrics/lodestar.d.ts +10 -0
- package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
- package/lib/metrics/metrics/lodestar.js +15 -1
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/network/core/networkCore.d.ts.map +1 -1
- package/lib/network/core/networkCore.js +5 -1
- package/lib/network/core/networkCore.js.map +1 -1
- package/lib/network/core/networkCoreWorker.js +8 -8
- package/lib/network/core/networkCoreWorker.js.map +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js +1 -1
- package/lib/network/core/networkCoreWorkerHandler.js.map +1 -1
- package/lib/network/discv5/worker.js +2 -7
- package/lib/network/discv5/worker.js.map +1 -1
- package/lib/network/events.d.ts +1 -0
- package/lib/network/events.d.ts.map +1 -1
- package/lib/network/gossip/encoding.js +1 -1
- package/lib/network/gossip/encoding.js.map +1 -1
- package/lib/network/gossip/gossipsub.d.ts.map +1 -1
- package/lib/network/gossip/gossipsub.js +6 -1
- package/lib/network/gossip/gossipsub.js.map +1 -1
- package/lib/network/gossip/interface.d.ts +2 -0
- package/lib/network/gossip/interface.d.ts.map +1 -1
- package/lib/network/gossip/snappy_bun.d.ts +3 -0
- package/lib/network/gossip/snappy_bun.d.ts.map +1 -0
- package/lib/network/gossip/snappy_bun.js +3 -0
- package/lib/network/gossip/snappy_bun.js.map +1 -0
- package/lib/network/metadata.d.ts +1 -1
- package/lib/network/metadata.d.ts.map +1 -1
- package/lib/network/metadata.js +1 -0
- package/lib/network/metadata.js.map +1 -1
- package/lib/network/options.d.ts +0 -1
- package/lib/network/options.d.ts.map +1 -1
- package/lib/network/options.js.map +1 -1
- package/lib/network/peers/discover.js +2 -2
- package/lib/network/peers/discover.js.map +1 -1
- package/lib/network/processor/gossipHandlers.d.ts.map +1 -1
- package/lib/network/processor/gossipHandlers.js +28 -8
- package/lib/network/processor/gossipHandlers.js.map +1 -1
- package/lib/network/processor/gossipValidatorFn.d.ts.map +1 -1
- package/lib/network/processor/gossipValidatorFn.js +3 -2
- package/lib/network/processor/gossipValidatorFn.js.map +1 -1
- package/lib/network/processor/types.d.ts +2 -0
- package/lib/network/processor/types.d.ts.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.d.ts.map +1 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js +3 -1
- package/lib/network/reqresp/ReqRespBeaconNode.js.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js +14 -3
- package/lib/network/reqresp/handlers/beaconBlocksByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRange.js.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts +2 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.d.ts.map +1 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js +9 -1
- package/lib/network/reqresp/handlers/dataColumnSidecarsByRoot.js.map +1 -1
- package/lib/network/reqresp/handlers/index.js +6 -6
- package/lib/network/reqresp/handlers/index.js.map +1 -1
- package/lib/network/reqresp/types.d.ts +1 -0
- package/lib/network/reqresp/types.d.ts.map +1 -1
- package/lib/node/nodejs.d.ts +2 -1
- package/lib/node/nodejs.d.ts.map +1 -1
- package/lib/node/nodejs.js +2 -1
- package/lib/node/nodejs.js.map +1 -1
- package/lib/sync/range/range.d.ts.map +1 -1
- package/lib/sync/range/range.js +2 -1
- package/lib/sync/range/range.js.map +1 -1
- package/lib/sync/utils/downloadByRange.d.ts +58 -13
- package/lib/sync/utils/downloadByRange.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRange.js +201 -82
- package/lib/sync/utils/downloadByRange.js.map +1 -1
- package/lib/sync/utils/downloadByRoot.d.ts.map +1 -1
- package/lib/sync/utils/downloadByRoot.js +7 -7
- package/lib/sync/utils/downloadByRoot.js.map +1 -1
- package/lib/sync/utils/remoteSyncType.d.ts +2 -1
- package/lib/sync/utils/remoteSyncType.d.ts.map +1 -1
- package/lib/sync/utils/remoteSyncType.js +19 -4
- package/lib/sync/utils/remoteSyncType.js.map +1 -1
- package/lib/util/blobs.d.ts +1 -1
- package/lib/util/blobs.d.ts.map +1 -1
- package/lib/util/blobs.js +53 -20
- package/lib/util/blobs.js.map +1 -1
- package/lib/util/profile.d.ts +6 -4
- package/lib/util/profile.d.ts.map +1 -1
- package/lib/util/profile.js +40 -3
- package/lib/util/profile.js.map +1 -1
- package/lib/util/sszBytes.d.ts +2 -0
- package/lib/util/sszBytes.d.ts.map +1 -1
- package/lib/util/sszBytes.js +25 -0
- package/lib/util/sszBytes.js.map +1 -1
- package/package.json +32 -25
- package/src/api/impl/beacon/blocks/index.ts +47 -25
- package/src/api/impl/lodestar/index.ts +42 -10
- package/src/api/impl/node/utils.ts +1 -1
- package/src/chain/archiveStore/archiveStore.ts +27 -0
- package/src/chain/archiveStore/strategies/frequencyStateArchiveStrategy.ts +32 -5
- package/src/chain/beaconProposerCache.ts +4 -8
- package/src/chain/chain.ts +48 -23
- package/src/chain/errors/dataColumnSidecarError.ts +27 -13
- package/src/chain/errors/voluntaryExitError.ts +30 -2
- package/src/chain/forkChoice/index.ts +178 -2
- package/src/chain/interface.ts +2 -0
- package/src/chain/options.ts +2 -3
- package/src/chain/stateCache/datastore/db.ts +89 -1
- package/src/chain/stateCache/datastore/file.ts +8 -0
- package/src/chain/stateCache/datastore/types.ts +1 -0
- package/src/chain/stateCache/persistentCheckpointsCache.ts +45 -2
- package/src/chain/validation/dataColumnSidecar.ts +54 -19
- package/src/chain/validation/voluntaryExit.ts +14 -4
- package/src/index.ts +2 -0
- package/src/metrics/metrics/lodestar.ts +18 -1
- package/src/network/core/networkCore.ts +5 -1
- package/src/network/core/networkCoreWorker.ts +9 -9
- package/src/network/core/networkCoreWorkerHandler.ts +1 -1
- package/src/network/discv5/worker.ts +2 -7
- package/src/network/events.ts +1 -1
- package/src/network/gossip/encoding.ts +1 -1
- package/src/network/gossip/gossipsub.ts +7 -1
- package/src/network/gossip/interface.ts +2 -0
- package/src/network/gossip/snappy_bun.ts +2 -0
- package/src/network/metadata.ts +3 -1
- package/src/network/options.ts +0 -1
- package/src/network/peers/discover.ts +2 -2
- package/src/network/processor/gossipHandlers.ts +31 -7
- package/src/network/processor/gossipValidatorFn.ts +15 -2
- package/src/network/processor/types.ts +2 -0
- package/src/network/reqresp/ReqRespBeaconNode.ts +3 -1
- package/src/network/reqresp/handlers/beaconBlocksByRange.ts +18 -3
- package/src/network/reqresp/handlers/dataColumnSidecarsByRange.ts +13 -1
- package/src/network/reqresp/handlers/dataColumnSidecarsByRoot.ts +13 -1
- package/src/network/reqresp/handlers/index.ts +6 -6
- package/src/network/reqresp/types.ts +1 -0
- package/src/node/nodejs.ts +3 -0
- package/src/sync/range/range.ts +2 -1
- package/src/sync/utils/downloadByRange.ts +259 -103
- package/src/sync/utils/downloadByRoot.ts +7 -7
- package/src/sync/utils/remoteSyncType.ts +23 -4
- package/src/util/blobs.ts +64 -20
- package/src/util/profile.ts +45 -3
- package/src/util/sszBytes.ts +30 -0
|
@@ -17,8 +17,10 @@ import {MapTracker} from "./mapMetrics.js";
|
|
|
17
17
|
import {BlockStateCache, CacheItemType, CheckpointHex, CheckpointStateCache} from "./types.js";
|
|
18
18
|
|
|
19
19
|
export type PersistentCheckpointStateCacheOpts = {
|
|
20
|
-
/** Keep max n
|
|
20
|
+
/** Keep max n state epochs in memory, persist the rest to disk */
|
|
21
21
|
maxCPStateEpochsInMemory?: number;
|
|
22
|
+
/** Keep max n state epochs on disk */
|
|
23
|
+
maxCPStateEpochsOnDisk?: number;
|
|
22
24
|
};
|
|
23
25
|
|
|
24
26
|
type PersistentCheckpointStateCacheModules = {
|
|
@@ -58,6 +60,14 @@ type LoadedStateBytesData = {persistedKey: DatastoreKey; stateBytes: Uint8Array}
|
|
|
58
60
|
*/
|
|
59
61
|
export const DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY = 3;
|
|
60
62
|
|
|
63
|
+
/**
|
|
64
|
+
* By default we don't prune any persistent checkpoint states as it's not safe to delete them during
|
|
65
|
+
* long non-finality as we don't know the state of the chain and there could be a deep (hundreds of epochs) reorg
|
|
66
|
+
* if there two competing chains with similar weight but we wouldn't have a close enough state to pivot to this chain
|
|
67
|
+
* and instead require a resync from last finalized checkpoint state which could be very far in the past.
|
|
68
|
+
*/
|
|
69
|
+
export const DEFAULT_MAX_CP_STATE_ON_DISK = Infinity;
|
|
70
|
+
|
|
61
71
|
// TODO GLOAS: re-evaluate this timing
|
|
62
72
|
const PROCESS_CHECKPOINT_STATES_BPS = 6667;
|
|
63
73
|
|
|
@@ -104,6 +114,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
104
114
|
private preComputedCheckpoint: string | null = null;
|
|
105
115
|
private preComputedCheckpointHits: number | null = null;
|
|
106
116
|
private readonly maxEpochsInMemory: number;
|
|
117
|
+
private readonly maxEpochsOnDisk: number;
|
|
107
118
|
private readonly datastore: CPStateDatastore;
|
|
108
119
|
private readonly blockStateCache: BlockStateCache;
|
|
109
120
|
private readonly bufferPool?: BufferPool | null;
|
|
@@ -139,10 +150,16 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
139
150
|
this.logger = logger;
|
|
140
151
|
this.clock = clock;
|
|
141
152
|
this.signal = signal;
|
|
153
|
+
|
|
142
154
|
if (opts.maxCPStateEpochsInMemory !== undefined && opts.maxCPStateEpochsInMemory < 0) {
|
|
143
155
|
throw new Error("maxEpochsInMemory must be >= 0");
|
|
144
156
|
}
|
|
157
|
+
if (opts.maxCPStateEpochsOnDisk !== undefined && opts.maxCPStateEpochsOnDisk < 0) {
|
|
158
|
+
throw new Error("maxCPStateEpochsOnDisk must be >= 0");
|
|
159
|
+
}
|
|
160
|
+
|
|
145
161
|
this.maxEpochsInMemory = opts.maxCPStateEpochsInMemory ?? DEFAULT_MAX_CP_STATE_EPOCHS_IN_MEMORY;
|
|
162
|
+
this.maxEpochsOnDisk = opts.maxCPStateEpochsOnDisk ?? DEFAULT_MAX_CP_STATE_ON_DISK;
|
|
146
163
|
// Specify different datastore for testing
|
|
147
164
|
this.datastore = datastore;
|
|
148
165
|
this.blockStateCache = blockStateCache;
|
|
@@ -324,6 +341,7 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
324
341
|
this.logger.verbose("Added checkpoint state to memory", {epoch: cp.epoch, rootHex: cpHex.rootHex});
|
|
325
342
|
}
|
|
326
343
|
this.epochIndex.getOrDefault(cp.epoch).add(cpHex.rootHex);
|
|
344
|
+
this.prunePersistedStates();
|
|
327
345
|
}
|
|
328
346
|
|
|
329
347
|
/**
|
|
@@ -766,13 +784,38 @@ export class PersistentCheckpointStateCache implements CheckpointStateCache {
|
|
|
766
784
|
this.cache.delete(key);
|
|
767
785
|
}
|
|
768
786
|
this.epochIndex.delete(epoch);
|
|
769
|
-
this.logger.verbose("Pruned
|
|
787
|
+
this.logger.verbose("Pruned checkpoint states for epoch", {
|
|
770
788
|
epoch,
|
|
771
789
|
persistCount,
|
|
772
790
|
rootHexes: Array.from(rootHexes).join(","),
|
|
773
791
|
});
|
|
774
792
|
}
|
|
775
793
|
|
|
794
|
+
/**
|
|
795
|
+
* Prune persisted checkpoint states from disk.
|
|
796
|
+
* Note that this should handle all possible errors and not throw.
|
|
797
|
+
*/
|
|
798
|
+
private prunePersistedStates(): void {
|
|
799
|
+
// epochsOnDisk epochsInMemory
|
|
800
|
+
// |----------------------------------------------------------|----------------------|
|
|
801
|
+
const maxTrackedEpochs = this.maxEpochsOnDisk + this.maxEpochsInMemory;
|
|
802
|
+
if (this.epochIndex.size <= maxTrackedEpochs) {
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
|
|
806
|
+
const sortedEpochs = Array.from(this.epochIndex.keys()).sort((a, b) => a - b);
|
|
807
|
+
const pruneEpochs = sortedEpochs.slice(0, sortedEpochs.length - maxTrackedEpochs);
|
|
808
|
+
for (const epoch of pruneEpochs) {
|
|
809
|
+
this.deleteAllEpochItems(epoch).catch((e) =>
|
|
810
|
+
this.logger.debug(
|
|
811
|
+
"Error delete all epoch items",
|
|
812
|
+
{epoch, maxEpochsOnDisk: this.maxEpochsOnDisk, maxEpochsInMemory: this.maxEpochsInMemory},
|
|
813
|
+
e as Error
|
|
814
|
+
)
|
|
815
|
+
);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
|
|
776
819
|
/**
|
|
777
820
|
* Serialize validators to bytes leveraging the buffer pool to save memory allocation.
|
|
778
821
|
* - As monitored on holesky as of Jan 2024, it helps save ~500ms state reload time (4.3s vs 3.8s)
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import {ChainConfig} from "@lodestar/config";
|
|
1
|
+
import {ChainConfig, ChainForkConfig} from "@lodestar/config";
|
|
2
2
|
import {
|
|
3
3
|
KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH,
|
|
4
4
|
KZG_COMMITMENTS_SUBTREE_INDEX,
|
|
5
5
|
NUMBER_OF_COLUMNS,
|
|
6
6
|
} from "@lodestar/params";
|
|
7
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
computeEpochAtSlot,
|
|
9
|
+
computeStartSlotAtEpoch,
|
|
10
|
+
getBlockHeaderProposerSignatureSet,
|
|
11
|
+
} from "@lodestar/state-transition";
|
|
8
12
|
import {Root, Slot, SubnetID, fulu, ssz} from "@lodestar/types";
|
|
9
13
|
import {toRootHex, verifyMerkleBranch} from "@lodestar/utils";
|
|
10
14
|
import {Metrics} from "../../metrics/metrics.js";
|
|
@@ -29,13 +33,13 @@ export async function validateGossipDataColumnSidecar(
|
|
|
29
33
|
const blockHeader = dataColumnSidecar.signedBlockHeader.message;
|
|
30
34
|
|
|
31
35
|
// 1) [REJECT] The sidecar is valid as verified by verify_data_column_sidecar
|
|
32
|
-
verifyDataColumnSidecar(dataColumnSidecar);
|
|
36
|
+
verifyDataColumnSidecar(chain.config, dataColumnSidecar);
|
|
33
37
|
|
|
34
38
|
// 2) [REJECT] The sidecar is for the correct subnet -- i.e. compute_subnet_for_data_column_sidecar(sidecar.index) == subnet_id
|
|
35
39
|
if (computeSubnetForDataColumnSidecar(chain.config, dataColumnSidecar) !== gossipSubnet) {
|
|
36
40
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
37
41
|
code: DataColumnSidecarErrorCode.INVALID_SUBNET,
|
|
38
|
-
|
|
42
|
+
columnIndex: dataColumnSidecar.index,
|
|
39
43
|
gossipSubnet: gossipSubnet,
|
|
40
44
|
});
|
|
41
45
|
}
|
|
@@ -152,7 +156,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
152
156
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
153
157
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
154
158
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
155
|
-
|
|
159
|
+
columnIndex: dataColumnSidecar.index,
|
|
156
160
|
});
|
|
157
161
|
}
|
|
158
162
|
|
|
@@ -169,7 +173,7 @@ export async function validateGossipDataColumnSidecar(
|
|
|
169
173
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
170
174
|
code: DataColumnSidecarErrorCode.INVALID_KZG_PROOF,
|
|
171
175
|
slot: blockHeader.slot,
|
|
172
|
-
|
|
176
|
+
columnIndex: dataColumnSidecar.index,
|
|
173
177
|
});
|
|
174
178
|
} finally {
|
|
175
179
|
kzgProofTimer?.();
|
|
@@ -184,12 +188,12 @@ export async function validateGossipDataColumnSidecar(
|
|
|
184
188
|
* SPEC FUNCTION
|
|
185
189
|
* https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.4/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
|
186
190
|
*/
|
|
187
|
-
function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
191
|
+
function verifyDataColumnSidecar(config: ChainForkConfig, dataColumnSidecar: fulu.DataColumnSidecar): void {
|
|
188
192
|
if (dataColumnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
189
193
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
190
194
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
191
195
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
192
|
-
|
|
196
|
+
columnIndex: dataColumnSidecar.index,
|
|
193
197
|
});
|
|
194
198
|
}
|
|
195
199
|
|
|
@@ -197,7 +201,20 @@ function verifyDataColumnSidecar(dataColumnSidecar: fulu.DataColumnSidecar): voi
|
|
|
197
201
|
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
198
202
|
code: DataColumnSidecarErrorCode.NO_COMMITMENTS,
|
|
199
203
|
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
200
|
-
|
|
204
|
+
columnIndex: dataColumnSidecar.index,
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const epoch = computeEpochAtSlot(dataColumnSidecar.signedBlockHeader.message.slot);
|
|
209
|
+
const maxBlobsPerBlock = config.getMaxBlobsPerBlock(epoch);
|
|
210
|
+
|
|
211
|
+
if (dataColumnSidecar.kzgCommitments.length > maxBlobsPerBlock) {
|
|
212
|
+
throw new DataColumnSidecarGossipError(GossipAction.REJECT, {
|
|
213
|
+
code: DataColumnSidecarErrorCode.TOO_MANY_KZG_COMMITMENTS,
|
|
214
|
+
slot: dataColumnSidecar.signedBlockHeader.message.slot,
|
|
215
|
+
columnIndex: dataColumnSidecar.index,
|
|
216
|
+
count: dataColumnSidecar.kzgCommitments.length,
|
|
217
|
+
limit: maxBlobsPerBlock,
|
|
201
218
|
});
|
|
202
219
|
}
|
|
203
220
|
|
|
@@ -276,7 +293,6 @@ export async function validateBlockDataColumnSidecars(
|
|
|
276
293
|
"Block has no blob commitments but data column sidecars were provided"
|
|
277
294
|
);
|
|
278
295
|
}
|
|
279
|
-
|
|
280
296
|
// Hash the first sidecar block header and compare the rest via (cheaper) equality
|
|
281
297
|
const firstSidecarBlockHeader = dataColumnSidecars[0].signedBlockHeader.message;
|
|
282
298
|
const firstBlockRoot = ssz.phase0.BeaconBlockHeader.hashTreeRoot(firstSidecarBlockHeader);
|
|
@@ -285,7 +301,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
285
301
|
{
|
|
286
302
|
code: DataColumnSidecarErrorCode.INCORRECT_BLOCK,
|
|
287
303
|
slot: blockSlot,
|
|
288
|
-
|
|
304
|
+
columnIndex: 0,
|
|
289
305
|
expected: toRootHex(blockRoot),
|
|
290
306
|
actual: toRootHex(firstBlockRoot),
|
|
291
307
|
},
|
|
@@ -300,33 +316,52 @@ export async function validateBlockDataColumnSidecars(
|
|
|
300
316
|
for (let i = 0; i < dataColumnSidecars.length; i++) {
|
|
301
317
|
const columnSidecar = dataColumnSidecars[i];
|
|
302
318
|
|
|
319
|
+
if (!ssz.phase0.BeaconBlockHeader.equals(firstSidecarBlockHeader, columnSidecar.signedBlockHeader.message)) {
|
|
320
|
+
throw new DataColumnSidecarValidationError({
|
|
321
|
+
code: DataColumnSidecarErrorCode.INCORRECT_HEADER_ROOT,
|
|
322
|
+
slot: blockSlot,
|
|
323
|
+
expected: toRootHex(blockRoot),
|
|
324
|
+
actual: toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(columnSidecar.signedBlockHeader.message)),
|
|
325
|
+
});
|
|
326
|
+
}
|
|
327
|
+
|
|
303
328
|
if (columnSidecar.index >= NUMBER_OF_COLUMNS) {
|
|
304
329
|
throw new DataColumnSidecarValidationError(
|
|
305
330
|
{
|
|
306
331
|
code: DataColumnSidecarErrorCode.INVALID_INDEX,
|
|
307
332
|
slot: blockSlot,
|
|
308
|
-
|
|
333
|
+
columnIndex: columnSidecar.index,
|
|
309
334
|
},
|
|
310
335
|
"DataColumnSidecar has invalid index"
|
|
311
336
|
);
|
|
312
337
|
}
|
|
313
338
|
|
|
314
|
-
if (columnSidecar.
|
|
339
|
+
if (columnSidecar.column.length !== blockBlobCount) {
|
|
315
340
|
throw new DataColumnSidecarValidationError({
|
|
316
|
-
code: DataColumnSidecarErrorCode.
|
|
341
|
+
code: DataColumnSidecarErrorCode.INCORRECT_CELL_COUNT,
|
|
317
342
|
slot: blockSlot,
|
|
318
|
-
|
|
343
|
+
columnIndex: columnSidecar.index,
|
|
319
344
|
expected: blockBlobCount,
|
|
345
|
+
actual: columnSidecar.column.length,
|
|
346
|
+
});
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
if (columnSidecar.column.length !== columnSidecar.kzgCommitments.length) {
|
|
350
|
+
throw new DataColumnSidecarValidationError({
|
|
351
|
+
code: DataColumnSidecarErrorCode.INCORRECT_KZG_COMMITMENTS_COUNT,
|
|
352
|
+
slot: blockSlot,
|
|
353
|
+
columnIndex: columnSidecar.index,
|
|
354
|
+
expected: columnSidecar.column.length,
|
|
320
355
|
actual: columnSidecar.kzgCommitments.length,
|
|
321
356
|
});
|
|
322
357
|
}
|
|
323
358
|
|
|
324
|
-
if (columnSidecar.
|
|
359
|
+
if (columnSidecar.column.length !== columnSidecar.kzgProofs.length) {
|
|
325
360
|
throw new DataColumnSidecarValidationError({
|
|
326
361
|
code: DataColumnSidecarErrorCode.INCORRECT_KZG_PROOF_COUNT,
|
|
327
362
|
slot: blockSlot,
|
|
328
|
-
|
|
329
|
-
expected: columnSidecar.
|
|
363
|
+
columnIndex: columnSidecar.index,
|
|
364
|
+
expected: columnSidecar.column.length,
|
|
330
365
|
actual: columnSidecar.kzgProofs.length,
|
|
331
366
|
});
|
|
332
367
|
}
|
|
@@ -336,7 +371,7 @@ export async function validateBlockDataColumnSidecars(
|
|
|
336
371
|
{
|
|
337
372
|
code: DataColumnSidecarErrorCode.INCLUSION_PROOF_INVALID,
|
|
338
373
|
slot: blockSlot,
|
|
339
|
-
|
|
374
|
+
columnIndex: columnSidecar.index,
|
|
340
375
|
},
|
|
341
376
|
"DataColumnSidecar has invalid inclusion proof"
|
|
342
377
|
);
|
|
@@ -1,6 +1,15 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import {
|
|
2
|
+
VoluntaryExitValidity,
|
|
3
|
+
getVoluntaryExitSignatureSet,
|
|
4
|
+
getVoluntaryExitValidity,
|
|
5
|
+
} from "@lodestar/state-transition";
|
|
2
6
|
import {phase0} from "@lodestar/types";
|
|
3
|
-
import {
|
|
7
|
+
import {
|
|
8
|
+
GossipAction,
|
|
9
|
+
VoluntaryExitError,
|
|
10
|
+
VoluntaryExitErrorCode,
|
|
11
|
+
voluntaryExitValidityToErrorCode,
|
|
12
|
+
} from "../errors/index.js";
|
|
4
13
|
import {IBeaconChain} from "../index.js";
|
|
5
14
|
import {RegenCaller} from "../regen/index.js";
|
|
6
15
|
|
|
@@ -43,9 +52,10 @@ async function validateVoluntaryExit(
|
|
|
43
52
|
|
|
44
53
|
// [REJECT] All of the conditions within process_voluntary_exit pass validation.
|
|
45
54
|
// verifySignature = false, verified in batch below
|
|
46
|
-
|
|
55
|
+
const validity = getVoluntaryExitValidity(chain.config.getForkSeq(state.slot), state, voluntaryExit, false);
|
|
56
|
+
if (validity !== VoluntaryExitValidity.valid) {
|
|
47
57
|
throw new VoluntaryExitError(GossipAction.REJECT, {
|
|
48
|
-
code:
|
|
58
|
+
code: voluntaryExitValidityToErrorCode(validity),
|
|
49
59
|
});
|
|
50
60
|
}
|
|
51
61
|
|
package/src/index.ts
CHANGED
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
export type {RestApiServerMetrics, RestApiServerModules, RestApiServerOpts} from "./api/rest/base.js";
|
|
4
4
|
export {RestApiServer} from "./api/rest/base.js";
|
|
5
5
|
export {checkAndPersistAnchorState, initStateFromDb, initStateFromEth1} from "./chain/index.js";
|
|
6
|
+
export {DbCPStateDatastore} from "./chain/stateCache/datastore/db.js";
|
|
7
|
+
export {FileCPStateDatastore} from "./chain/stateCache/datastore/file.js";
|
|
6
8
|
export {BeaconDb, type IBeaconDb} from "./db/index.js";
|
|
7
9
|
export {Eth1Provider, type IEth1Provider} from "./eth1/index.js";
|
|
8
10
|
// Export metrics utilities to de-duplicate validator metrics
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
/** biome-ignore-all lint/suspicious/noTemplateCurlyInString: The metric templates requires to have `${}` in a normal string */
|
|
2
2
|
import {NotReorgedReason} from "@lodestar/fork-choice";
|
|
3
|
+
import {ArchiveStoreTask} from "../../chain/archiveStore/archiveStore.js";
|
|
4
|
+
import {FrequencyStateArchiveStep} from "../../chain/archiveStore/strategies/frequencyStateArchiveStrategy.js";
|
|
3
5
|
import {BlockInputSource} from "../../chain/blocks/blockInput/index.js";
|
|
4
6
|
import {JobQueueItemType} from "../../chain/bls/index.js";
|
|
5
7
|
import {AttestationErrorCode, BlockErrorCode} from "../../chain/errors/index.js";
|
|
@@ -1420,6 +1422,21 @@ export function createLodestarMetrics(
|
|
|
1420
1422
|
},
|
|
1421
1423
|
},
|
|
1422
1424
|
|
|
1425
|
+
processFinalizedCheckpoint: {
|
|
1426
|
+
durationByTask: register.histogram<{source: ArchiveStoreTask}>({
|
|
1427
|
+
name: "lodestar_process_finalized_checkpoint_seconds",
|
|
1428
|
+
help: "Histogram of time to process finalized checkpoint",
|
|
1429
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1430
|
+
labelNames: ["source"],
|
|
1431
|
+
}),
|
|
1432
|
+
frequencyStateArchive: register.histogram<{step: FrequencyStateArchiveStep}>({
|
|
1433
|
+
name: "lodestar_process_finalized_checkpoint_frequency_state_archive_seconds",
|
|
1434
|
+
help: "Histogram of FrequencyStateArchive duration by step",
|
|
1435
|
+
buckets: [0.1, 0.5, 1, 2, 4, 8],
|
|
1436
|
+
labelNames: ["step"],
|
|
1437
|
+
}),
|
|
1438
|
+
},
|
|
1439
|
+
|
|
1423
1440
|
regenFnCallTotal: register.gauge<{entrypoint: RegenFnName; caller: RegenCaller}>({
|
|
1424
1441
|
name: "lodestar_regen_fn_call_total",
|
|
1425
1442
|
help: "Total number of calls for regen functions",
|
|
@@ -1861,7 +1878,7 @@ export function createLodestarMetrics(
|
|
|
1861
1878
|
fetchKeys: register.histogram({
|
|
1862
1879
|
name: "lodestar_prune_history_fetch_keys_time_seconds",
|
|
1863
1880
|
help: "Time to fetch keys in seconds",
|
|
1864
|
-
buckets: [0.001, 0.01, 0.1, 1],
|
|
1881
|
+
buckets: [0.001, 0.01, 0.1, 0.3, 0.5, 1],
|
|
1865
1882
|
}),
|
|
1866
1883
|
|
|
1867
1884
|
pruneKeys: register.histogram({
|
|
@@ -553,7 +553,11 @@ export class NetworkCore implements INetworkCore {
|
|
|
553
553
|
// On fork boundary transition
|
|
554
554
|
if (epoch === nextBoundaryEpoch) {
|
|
555
555
|
// updateEth2Field() MUST be called with clock epoch, onEpoch event is emitted in response to clock events
|
|
556
|
-
this.metadata.updateEth2Field(epoch);
|
|
556
|
+
const {forkDigest} = this.metadata.updateEth2Field(epoch);
|
|
557
|
+
// Update local status to reflect the new fork digest, otherwise we will disconnect peers that re-status us
|
|
558
|
+
// right after the fork transition due to incompatible forks as our fork digest is stale since we only
|
|
559
|
+
// update it once we import a new head or when emitting update status event.
|
|
560
|
+
this.statusCache.update({...this.statusCache.get(), forkDigest});
|
|
557
561
|
this.reqResp.registerProtocolsAtBoundary(nextBoundary);
|
|
558
562
|
}
|
|
559
563
|
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -11,7 +9,7 @@ import {RegistryMetricCreator, collectNodeJSMetrics} from "../../metrics/index.j
|
|
|
11
9
|
import {AsyncIterableBridgeCaller, AsyncIterableBridgeHandler} from "../../util/asyncIterableToEvents.js";
|
|
12
10
|
import {Clock} from "../../util/clock.js";
|
|
13
11
|
import {peerIdToString} from "../../util/peerId.js";
|
|
14
|
-
import {
|
|
12
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
15
13
|
import {wireEventsOnWorkerThread} from "../../util/workerEvents.js";
|
|
16
14
|
import {NetworkEventBus, NetworkEventData, networkEventDirection} from "../events.js";
|
|
17
15
|
import {
|
|
@@ -100,8 +98,13 @@ const core = await NetworkCore.init({
|
|
|
100
98
|
metricsRegistry: metricsRegister,
|
|
101
99
|
events,
|
|
102
100
|
clock,
|
|
103
|
-
getReqRespHandler: (method) => (req, peerId) =>
|
|
104
|
-
reqRespBridgeRespCaller.getAsyncIterable({
|
|
101
|
+
getReqRespHandler: (method) => (req, peerId, peerClient) =>
|
|
102
|
+
reqRespBridgeRespCaller.getAsyncIterable({
|
|
103
|
+
method,
|
|
104
|
+
req,
|
|
105
|
+
peerId: peerIdToString(peerId),
|
|
106
|
+
peerClient,
|
|
107
|
+
}),
|
|
105
108
|
activeValidatorCount: workerData.activeValidatorCount,
|
|
106
109
|
initialStatus: workerData.initialStatus,
|
|
107
110
|
initialCustodyGroupCount: workerData.initialCustodyGroupCount,
|
|
@@ -157,10 +160,7 @@ const libp2pWorkerApi: NetworkWorkerApi = {
|
|
|
157
160
|
dumpDiscv5KadValues: () => core.dumpDiscv5KadValues(),
|
|
158
161
|
dumpMeshPeers: () => core.dumpMeshPeers(),
|
|
159
162
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
160
|
-
|
|
161
|
-
const filePath = path.join(dirpath, `network_thread_${new Date().toISOString()}.cpuprofile`);
|
|
162
|
-
fs.writeFileSync(filePath, profile);
|
|
163
|
-
return filePath;
|
|
163
|
+
return profileThread(ProfileThread.NETWORK, durationMs, dirpath);
|
|
164
164
|
},
|
|
165
165
|
writeDiscv5Profile: async (durationMs: number, dirpath: string) => {
|
|
166
166
|
return core.writeDiscv5Profile(durationMs, dirpath);
|
|
@@ -73,7 +73,7 @@ export class WorkerNetworkCore implements INetworkCore {
|
|
|
73
73
|
// Handles ReqResp response from worker and calls async generator in main thread
|
|
74
74
|
this.reqRespBridgeRespHandler = new AsyncIterableBridgeHandler(
|
|
75
75
|
getReqRespBridgeRespEvents(this.reqRespBridgeEventBus),
|
|
76
|
-
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId))
|
|
76
|
+
(data) => modules.getReqRespHandler(data.method)(data.req, peerIdFromString(data.peerId), data.peerClient)
|
|
77
77
|
);
|
|
78
78
|
|
|
79
79
|
wireEventsOnMainThread<NetworkEventData>(
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
import fs from "node:fs";
|
|
2
|
-
import path from "node:path";
|
|
3
1
|
import worker from "node:worker_threads";
|
|
4
2
|
import {privateKeyFromProtobuf} from "@libp2p/crypto/keys";
|
|
5
3
|
import {peerIdFromPrivateKey} from "@libp2p/peer-id";
|
|
@@ -14,7 +12,7 @@ import {Gauge} from "@lodestar/utils";
|
|
|
14
12
|
import {RegistryMetricCreator} from "../../metrics/index.js";
|
|
15
13
|
import {collectNodeJSMetrics} from "../../metrics/nodeJsMetrics.js";
|
|
16
14
|
import {Clock} from "../../util/clock.js";
|
|
17
|
-
import {
|
|
15
|
+
import {ProfileThread, profileThread, writeHeapSnapshot} from "../../util/profile.js";
|
|
18
16
|
import {Discv5WorkerApi, Discv5WorkerData} from "./types.js";
|
|
19
17
|
import {ENRRelevance, enrRelevance} from "./utils.js";
|
|
20
18
|
|
|
@@ -108,10 +106,7 @@ const module: Discv5WorkerApi = {
|
|
|
108
106
|
return (await metricsRegistry?.metrics()) ?? "";
|
|
109
107
|
},
|
|
110
108
|
writeProfile: async (durationMs: number, dirpath: string) => {
|
|
111
|
-
|
|
112
|
-
const filePath = path.join(dirpath, `discv5_thread_${new Date().toISOString()}.cpuprofile`);
|
|
113
|
-
fs.writeFileSync(filePath, profile);
|
|
114
|
-
return filePath;
|
|
109
|
+
return profileThread(ProfileThread.DISC5, durationMs, dirpath);
|
|
115
110
|
},
|
|
116
111
|
writeHeapSnapshot: async (prefix: string, dirpath: string) => {
|
|
117
112
|
return writeHeapSnapshot(prefix, dirpath);
|
package/src/network/events.ts
CHANGED
|
@@ -29,7 +29,7 @@ export type NetworkEventData = {
|
|
|
29
29
|
clientAgent: string;
|
|
30
30
|
};
|
|
31
31
|
[NetworkEvent.peerDisconnected]: {peer: PeerIdStr};
|
|
32
|
-
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId};
|
|
32
|
+
[NetworkEvent.reqRespRequest]: {request: RequestTypedContainer; peer: PeerId; peerClient: string};
|
|
33
33
|
[NetworkEvent.pendingGossipsubMessage]: PendingGossipsubMessage;
|
|
34
34
|
[NetworkEvent.gossipMessageValidationResult]: {
|
|
35
35
|
msgId: string;
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {Message} from "@libp2p/interface";
|
|
2
|
-
import {compress, uncompress} from "snappyjs";
|
|
3
2
|
import xxhashFactory from "xxhash-wasm";
|
|
3
|
+
import {compress, uncompress} from "#snappy";
|
|
4
4
|
import {digest} from "@chainsafe/as-sha256";
|
|
5
5
|
import {RPC} from "@chainsafe/libp2p-gossipsub/message";
|
|
6
6
|
import {DataTransform} from "@chainsafe/libp2p-gossipsub/types";
|
|
@@ -296,6 +296,10 @@ export class Eth2Gossipsub extends GossipSub {
|
|
|
296
296
|
// Get seenTimestamp before adding the message to the queue or add async delays
|
|
297
297
|
const seenTimestampSec = Date.now() / 1000;
|
|
298
298
|
|
|
299
|
+
const peerIdStr = propagationSource.toString();
|
|
300
|
+
const clientAgent = this.peersData.getPeerKind(peerIdStr) ?? "Unknown";
|
|
301
|
+
const clientVersion = this.peersData.getAgentVersion(peerIdStr);
|
|
302
|
+
|
|
299
303
|
// Use setTimeout to yield to the macro queue
|
|
300
304
|
// Without this we'll have huge event loop lag
|
|
301
305
|
// See https://github.com/ChainSafe/lodestar/issues/5604
|
|
@@ -305,7 +309,9 @@ export class Eth2Gossipsub extends GossipSub {
|
|
|
305
309
|
msg,
|
|
306
310
|
msgId,
|
|
307
311
|
// Hot path, use cached .toString() version
|
|
308
|
-
propagationSource:
|
|
312
|
+
propagationSource: peerIdStr,
|
|
313
|
+
clientVersion,
|
|
314
|
+
clientAgent,
|
|
309
315
|
seenTimestampSec,
|
|
310
316
|
startProcessUnixSec: null,
|
|
311
317
|
});
|
package/src/network/metadata.ts
CHANGED
|
@@ -126,7 +126,7 @@ export class MetadataController {
|
|
|
126
126
|
* 2. Network MUST call this method on fork transition.
|
|
127
127
|
* Current Clock implementation ensures no race conditions, epoch is correct if re-fetched
|
|
128
128
|
*/
|
|
129
|
-
updateEth2Field(epoch: Epoch):
|
|
129
|
+
updateEth2Field(epoch: Epoch): phase0.ENRForkID {
|
|
130
130
|
const config = this.networkConfig.config;
|
|
131
131
|
const enrForkId = getENRForkID(config, epoch);
|
|
132
132
|
const {forkDigest, nextForkVersion, nextForkEpoch} = enrForkId;
|
|
@@ -143,6 +143,8 @@ export class MetadataController {
|
|
|
143
143
|
: ssz.ForkDigest.defaultValue();
|
|
144
144
|
this.onSetValue(ENRKey.nfd, nextForkDigest);
|
|
145
145
|
this.logger.debug("Updated nfd field in ENR", {nextForkDigest: toHex(nextForkDigest)});
|
|
146
|
+
|
|
147
|
+
return enrForkId;
|
|
146
148
|
}
|
|
147
149
|
}
|
|
148
150
|
|
package/src/network/options.ts
CHANGED
|
@@ -391,8 +391,8 @@ export class PeerDiscovery {
|
|
|
391
391
|
// tcp multiaddr is known to be be present, checked inside the worker
|
|
392
392
|
const multiaddrTCP = enr.getLocationMultiaddr(ENRKey.tcp);
|
|
393
393
|
if (!multiaddrTCP) {
|
|
394
|
-
this.logger.
|
|
395
|
-
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.
|
|
394
|
+
this.logger.warn("Discv5 worker sent enr without tcp multiaddr", {enr: enr.encodeTxt()});
|
|
395
|
+
this.metrics?.discovery.discoveredStatus.inc({status: DiscoveredPeerStatus.no_multiaddrs});
|
|
396
396
|
return;
|
|
397
397
|
}
|
|
398
398
|
// Are this fields mandatory?
|
|
@@ -39,6 +39,7 @@ import {
|
|
|
39
39
|
BlockError,
|
|
40
40
|
BlockErrorCode,
|
|
41
41
|
BlockGossipError,
|
|
42
|
+
DataColumnSidecarErrorCode,
|
|
42
43
|
DataColumnSidecarGossipError,
|
|
43
44
|
GossipAction,
|
|
44
45
|
GossipActionError,
|
|
@@ -295,6 +296,21 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
295
296
|
const slot = dataColumnBlockHeader.slot;
|
|
296
297
|
const blockRootHex = toRootHex(ssz.phase0.BeaconBlockHeader.hashTreeRoot(dataColumnBlockHeader));
|
|
297
298
|
|
|
299
|
+
// check to see if block has already been processed and BlockInput has been deleted (column received via reqresp or other means)
|
|
300
|
+
if (chain.forkChoice.hasBlockHex(blockRootHex)) {
|
|
301
|
+
metrics?.peerDas.dataColumnSidecarProcessingSkip.inc();
|
|
302
|
+
logger.debug("Already processed block for column sidecar, skipping processing", {
|
|
303
|
+
slot,
|
|
304
|
+
blockRoot: blockRootHex,
|
|
305
|
+
index: dataColumnSidecar.index,
|
|
306
|
+
});
|
|
307
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
308
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
309
|
+
columnIndex: dataColumnSidecar.index,
|
|
310
|
+
slot,
|
|
311
|
+
});
|
|
312
|
+
}
|
|
313
|
+
|
|
298
314
|
// first check if we should even process this column (we may have already processed it via getBlobsV2)
|
|
299
315
|
{
|
|
300
316
|
const blockInput = chain.seenBlockInputCache.get(blockRootHex);
|
|
@@ -304,7 +320,11 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
304
320
|
...blockInput.getLogMeta(),
|
|
305
321
|
index: dataColumnSidecar.index,
|
|
306
322
|
});
|
|
307
|
-
|
|
323
|
+
throw new DataColumnSidecarGossipError(GossipAction.IGNORE, {
|
|
324
|
+
code: DataColumnSidecarErrorCode.ALREADY_KNOWN,
|
|
325
|
+
columnIndex: dataColumnSidecar.index,
|
|
326
|
+
slot,
|
|
327
|
+
});
|
|
308
328
|
}
|
|
309
329
|
}
|
|
310
330
|
|
|
@@ -556,6 +576,16 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
556
576
|
metrics?.dataColumns.elapsedTimeTillReceived.observe({receivedOrder: receivedColumns}, delaySec);
|
|
557
577
|
break;
|
|
558
578
|
}
|
|
579
|
+
|
|
580
|
+
if (!blockInput.hasAllData()) {
|
|
581
|
+
// immediately attempt fetch of data columns from execution engine
|
|
582
|
+
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
583
|
+
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
584
|
+
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
585
|
+
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
|
|
559
589
|
if (!blockInput.hasBlockAndAllData()) {
|
|
560
590
|
const cutoffTimeMs = getCutoffTimeMs(chain, dataColumnSlot, BLOCK_AVAILABILITY_CUTOFF_MS);
|
|
561
591
|
chain.logger.debug("Received gossip data column, waiting for full data availability", {
|
|
@@ -578,12 +608,6 @@ function getSequentialHandlers(modules: ValidatorFnsModules, options: GossipHand
|
|
|
578
608
|
source: BlockInputSource.gossip,
|
|
579
609
|
});
|
|
580
610
|
});
|
|
581
|
-
// immediately attempt fetch of data columns from execution engine
|
|
582
|
-
chain.getBlobsTracker.triggerGetBlobs(blockInput);
|
|
583
|
-
// if we've received at least half of the columns, trigger reconstruction of the rest
|
|
584
|
-
if (blockInput.columnCount >= NUMBER_OF_COLUMNS / 2) {
|
|
585
|
-
chain.columnReconstructionTracker.triggerColumnReconstruction(blockInput);
|
|
586
|
-
}
|
|
587
611
|
}
|
|
588
612
|
},
|
|
589
613
|
|
|
@@ -11,6 +11,7 @@ import {
|
|
|
11
11
|
GossipValidatorBatchFn,
|
|
12
12
|
GossipValidatorFn,
|
|
13
13
|
} from "../gossip/interface.js";
|
|
14
|
+
import {prettyPrintPeerIdStr} from "../util.ts";
|
|
14
15
|
|
|
15
16
|
export type ValidatorFnModules = {
|
|
16
17
|
config: ChainForkConfig;
|
|
@@ -99,7 +100,15 @@ export function getGossipValidatorBatchFn(
|
|
|
99
100
|
export function getGossipValidatorFn(gossipHandlers: GossipHandlers, modules: ValidatorFnModules): GossipValidatorFn {
|
|
100
101
|
const {logger, metrics} = modules;
|
|
101
102
|
|
|
102
|
-
return async function gossipValidatorFn({
|
|
103
|
+
return async function gossipValidatorFn({
|
|
104
|
+
topic,
|
|
105
|
+
msg,
|
|
106
|
+
propagationSource,
|
|
107
|
+
clientAgent,
|
|
108
|
+
clientVersion,
|
|
109
|
+
seenTimestampSec,
|
|
110
|
+
msgSlot,
|
|
111
|
+
}) {
|
|
103
112
|
const type = topic.type;
|
|
104
113
|
|
|
105
114
|
try {
|
|
@@ -134,7 +143,11 @@ export function getGossipValidatorFn(gossipHandlers: GossipHandlers, modules: Va
|
|
|
134
143
|
|
|
135
144
|
case GossipAction.REJECT:
|
|
136
145
|
metrics?.networkProcessor.gossipValidationReject.inc({topic: type});
|
|
137
|
-
logger.debug(
|
|
146
|
+
logger.debug(
|
|
147
|
+
`Gossip validation ${type} rejected`,
|
|
148
|
+
{peerId: prettyPrintPeerIdStr(propagationSource), clientAgent, clientVersion},
|
|
149
|
+
e
|
|
150
|
+
);
|
|
138
151
|
return TopicValidatorResult.Reject;
|
|
139
152
|
}
|
|
140
153
|
}
|
|
@@ -15,6 +15,8 @@ export type PendingGossipsubMessage = {
|
|
|
15
15
|
msgSlot?: Slot;
|
|
16
16
|
msgId: string;
|
|
17
17
|
propagationSource: PeerIdStr;
|
|
18
|
+
clientAgent: string;
|
|
19
|
+
clientVersion: string;
|
|
18
20
|
seenTimestampSec: number;
|
|
19
21
|
startProcessUnixSec: number | null;
|
|
20
22
|
// specific properties for IndexedGossipQueueMinSize, for beacon_attestation topic only
|