@lodestar/beacon-node 1.39.0-dev.10b6636b92 → 1.39.0-dev.219c3c247e
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/chain/blocks/importBlock.d.ts.map +1 -1
- package/lib/chain/blocks/importBlock.js +6 -0
- package/lib/chain/blocks/importBlock.js.map +1 -1
- package/lib/chain/chain.d.ts +1 -0
- package/lib/chain/chain.d.ts.map +1 -1
- package/lib/chain/chain.js +11 -2
- package/lib/chain/chain.js.map +1 -1
- package/lib/chain/opPools/aggregatedAttestationPool.d.ts +1 -2
- package/lib/chain/opPools/aggregatedAttestationPool.d.ts.map +1 -1
- package/lib/chain/opPools/aggregatedAttestationPool.js +4 -129
- package/lib/chain/opPools/aggregatedAttestationPool.js.map +1 -1
- package/lib/chain/prepareNextSlot.d.ts.map +1 -1
- package/lib/chain/prepareNextSlot.js +1 -6
- package/lib/chain/prepareNextSlot.js.map +1 -1
- package/lib/chain/regen/interface.d.ts +0 -4
- package/lib/chain/regen/interface.d.ts.map +1 -1
- package/lib/chain/shufflingCache.d.ts +5 -12
- package/lib/chain/shufflingCache.d.ts.map +1 -1
- package/lib/chain/shufflingCache.js +12 -50
- package/lib/chain/shufflingCache.js.map +1 -1
- package/lib/chain/validation/attesterSlashing.d.ts.map +1 -1
- package/lib/chain/validation/attesterSlashing.js +1 -1
- package/lib/chain/validation/attesterSlashing.js.map +1 -1
- package/lib/chain/validation/blsToExecutionChange.d.ts.map +1 -1
- package/lib/chain/validation/blsToExecutionChange.js +9 -2
- package/lib/chain/validation/blsToExecutionChange.js.map +1 -1
- package/lib/chain/validation/proposerSlashing.js +2 -1
- package/lib/chain/validation/proposerSlashing.js.map +1 -1
- package/lib/execution/engine/mock.d.ts +9 -6
- package/lib/execution/engine/mock.d.ts.map +1 -1
- package/lib/execution/engine/mock.js +34 -7
- package/lib/execution/engine/mock.js.map +1 -1
- package/lib/metrics/metrics/lodestar.d.ts +1 -6
- package/lib/metrics/metrics/lodestar.d.ts.map +1 -1
- package/lib/metrics/metrics/lodestar.js +3 -17
- package/lib/metrics/metrics/lodestar.js.map +1 -1
- package/lib/node/nodejs.d.ts.map +1 -1
- package/lib/node/nodejs.js +17 -2
- package/lib/node/nodejs.js.map +1 -1
- package/package.json +15 -15
- package/src/chain/blocks/importBlock.ts +7 -0
- package/src/chain/chain.ts +13 -3
- package/src/chain/opPools/aggregatedAttestationPool.ts +3 -178
- package/src/chain/prepareNextSlot.ts +1 -6
- package/src/chain/regen/interface.ts +0 -4
- package/src/chain/shufflingCache.ts +15 -61
- package/src/chain/validation/attesterSlashing.ts +8 -1
- package/src/chain/validation/blsToExecutionChange.ts +9 -7
- package/src/chain/validation/proposerSlashing.ts +2 -1
- package/src/execution/engine/mock.ts +40 -13
- package/src/metrics/metrics/lodestar.ts +3 -17
- package/src/node/nodejs.ts +18 -3
|
@@ -5,7 +5,6 @@ import {IForkChoice} from "@lodestar/fork-choice";
|
|
|
5
5
|
import {
|
|
6
6
|
ForkName,
|
|
7
7
|
ForkSeq,
|
|
8
|
-
MAX_ATTESTATIONS,
|
|
9
8
|
MAX_ATTESTATIONS_ELECTRA,
|
|
10
9
|
MAX_COMMITTEES_PER_SLOT,
|
|
11
10
|
MIN_ATTESTATION_INCLUSION_DELAY,
|
|
@@ -23,7 +22,6 @@ import {
|
|
|
23
22
|
CachedBeaconStateAllForks,
|
|
24
23
|
CachedBeaconStateAltair,
|
|
25
24
|
CachedBeaconStateGloas,
|
|
26
|
-
CachedBeaconStatePhase0,
|
|
27
25
|
EffectiveBalanceIncrements,
|
|
28
26
|
RootCache,
|
|
29
27
|
computeEpochAtSlot,
|
|
@@ -32,17 +30,7 @@ import {
|
|
|
32
30
|
getAttestationParticipationStatus,
|
|
33
31
|
getBlockRootAtSlot,
|
|
34
32
|
} from "@lodestar/state-transition";
|
|
35
|
-
import {
|
|
36
|
-
Attestation,
|
|
37
|
-
Epoch,
|
|
38
|
-
RootHex,
|
|
39
|
-
Slot,
|
|
40
|
-
ValidatorIndex,
|
|
41
|
-
electra,
|
|
42
|
-
isElectraAttestation,
|
|
43
|
-
phase0,
|
|
44
|
-
ssz,
|
|
45
|
-
} from "@lodestar/types";
|
|
33
|
+
import {Attestation, Epoch, RootHex, Slot, electra, isElectraAttestation, phase0, ssz} from "@lodestar/types";
|
|
46
34
|
import {MapDef, assert, toRootHex} from "@lodestar/utils";
|
|
47
35
|
import {Metrics} from "../../metrics/metrics.js";
|
|
48
36
|
import {IntersectResult, intersectUint8Arrays} from "../../util/bitArray.js";
|
|
@@ -54,8 +42,6 @@ type DataRootHex = string;
|
|
|
54
42
|
|
|
55
43
|
type CommitteeIndex = number;
|
|
56
44
|
|
|
57
|
-
// for pre-electra
|
|
58
|
-
type AttestationWithScore = {attestation: Attestation; score: number};
|
|
59
45
|
/**
|
|
60
46
|
* for electra, this is to consolidate aggregated attestations of the same attestation data into a single attestation to be included in block
|
|
61
47
|
* note that this is local definition in this file and it's NOT validator consolidation
|
|
@@ -110,15 +96,6 @@ const MAX_RETAINED_ATTESTATIONS_PER_GROUP = 4;
|
|
|
110
96
|
*/
|
|
111
97
|
const MAX_RETAINED_ATTESTATIONS_PER_GROUP_ELECTRA = 8;
|
|
112
98
|
|
|
113
|
-
/**
|
|
114
|
-
* Pre-electra, each slot has 64 committees, and each block has 128 attestations max so in average
|
|
115
|
-
* we get 2 attestation per groups.
|
|
116
|
-
* Starting from Jan 2024, we have a performance issue getting attestations for a block. Based on the
|
|
117
|
-
* fact that lot of groups will have only 1 full participation attestation, increase this number
|
|
118
|
-
* a bit higher than average. This also help decrease number of slots to search for attestations.
|
|
119
|
-
*/
|
|
120
|
-
const MAX_ATTESTATIONS_PER_GROUP = 3;
|
|
121
|
-
|
|
122
99
|
/**
|
|
123
100
|
* For electra, there is on chain aggregation of attestations across committees, so we can just pick up to 8
|
|
124
101
|
* attestations per group, sort by scores to get first 8.
|
|
@@ -245,108 +222,7 @@ export class AggregatedAttestationPool {
|
|
|
245
222
|
forkChoice: IForkChoice,
|
|
246
223
|
state: CachedBeaconStateAllForks
|
|
247
224
|
): phase0.Attestation[] {
|
|
248
|
-
|
|
249
|
-
const stateEpoch = state.epochCtx.epoch;
|
|
250
|
-
const statePrevEpoch = stateEpoch - 1;
|
|
251
|
-
|
|
252
|
-
const notSeenValidatorsFn = getNotSeenValidatorsFn(this.config, state);
|
|
253
|
-
const validateAttestationDataFn = getValidateAttestationDataFn(forkChoice, state);
|
|
254
|
-
|
|
255
|
-
const attestationsByScore: AttestationWithScore[] = [];
|
|
256
|
-
|
|
257
|
-
const slots = Array.from(this.attestationGroupByIndexByDataHexBySlot.keys()).sort((a, b) => b - a);
|
|
258
|
-
let minScore = Number.MAX_SAFE_INTEGER;
|
|
259
|
-
let slotCount = 0;
|
|
260
|
-
slot: for (const slot of slots) {
|
|
261
|
-
slotCount++;
|
|
262
|
-
const attestationGroupByIndexByDataHash = this.attestationGroupByIndexByDataHexBySlot.get(slot);
|
|
263
|
-
// should not happen
|
|
264
|
-
if (!attestationGroupByIndexByDataHash) {
|
|
265
|
-
throw Error(`No aggregated attestation pool for slot=${slot}`);
|
|
266
|
-
}
|
|
267
|
-
|
|
268
|
-
const epoch = computeEpochAtSlot(slot);
|
|
269
|
-
// validateAttestation condition: Attestation target epoch not in previous or current epoch
|
|
270
|
-
if (!(epoch === stateEpoch || epoch === statePrevEpoch)) {
|
|
271
|
-
continue; // Invalid attestations
|
|
272
|
-
}
|
|
273
|
-
// validateAttestation condition: Attestation slot not within inclusion window
|
|
274
|
-
if (
|
|
275
|
-
!(
|
|
276
|
-
slot + MIN_ATTESTATION_INCLUSION_DELAY <= stateSlot &&
|
|
277
|
-
// Post deneb, attestations are valid for current and previous epoch
|
|
278
|
-
(ForkSeq[fork] >= ForkSeq.deneb || stateSlot <= slot + SLOTS_PER_EPOCH)
|
|
279
|
-
)
|
|
280
|
-
) {
|
|
281
|
-
continue; // Invalid attestations
|
|
282
|
-
}
|
|
283
|
-
|
|
284
|
-
const inclusionDistance = stateSlot - slot;
|
|
285
|
-
for (const attestationGroupByIndex of attestationGroupByIndexByDataHash.values()) {
|
|
286
|
-
for (const [committeeIndex, attestationGroup] of attestationGroupByIndex.entries()) {
|
|
287
|
-
const notSeenCommitteeMembers = notSeenValidatorsFn(epoch, slot, committeeIndex);
|
|
288
|
-
if (notSeenCommitteeMembers === null || notSeenCommitteeMembers.size === 0) {
|
|
289
|
-
continue;
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
if (
|
|
293
|
-
slotCount > 2 &&
|
|
294
|
-
attestationsByScore.length >= MAX_ATTESTATIONS &&
|
|
295
|
-
notSeenCommitteeMembers.size / inclusionDistance < minScore
|
|
296
|
-
) {
|
|
297
|
-
// after 2 slots, there are a good chance that we have 2 * MAX_ATTESTATIONS attestations and break the for loop early
|
|
298
|
-
// if not, we may have to scan all slots in the pool
|
|
299
|
-
// if we have enough attestations and the max possible score is lower than scores of `attestationsByScore`, we should skip
|
|
300
|
-
// otherwise it takes time to check attestation, add it and remove it later after the sort by score
|
|
301
|
-
continue;
|
|
302
|
-
}
|
|
303
|
-
|
|
304
|
-
if (validateAttestationDataFn(attestationGroup.data) !== null) {
|
|
305
|
-
continue;
|
|
306
|
-
}
|
|
307
|
-
|
|
308
|
-
// TODO: Is it necessary to validateAttestation for:
|
|
309
|
-
// - Attestation committee index not within current committee count
|
|
310
|
-
// - Attestation aggregation bits length does not match committee length
|
|
311
|
-
//
|
|
312
|
-
// These properties should not change after being validate in gossip
|
|
313
|
-
// IF they have to be validated, do it only with one attestation per group since same data
|
|
314
|
-
// The committeeCountPerSlot can be precomputed once per slot
|
|
315
|
-
const getAttestationsResult = attestationGroup.getAttestationsForBlock(
|
|
316
|
-
fork,
|
|
317
|
-
state.epochCtx.effectiveBalanceIncrements,
|
|
318
|
-
notSeenCommitteeMembers,
|
|
319
|
-
MAX_ATTESTATIONS_PER_GROUP
|
|
320
|
-
);
|
|
321
|
-
for (const {attestation, newSeenEffectiveBalance} of getAttestationsResult.result) {
|
|
322
|
-
const score = newSeenEffectiveBalance / inclusionDistance;
|
|
323
|
-
if (score < minScore) {
|
|
324
|
-
minScore = score;
|
|
325
|
-
}
|
|
326
|
-
attestationsByScore.push({
|
|
327
|
-
attestation,
|
|
328
|
-
score,
|
|
329
|
-
});
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
// Stop accumulating attestations there are enough that may have good scoring
|
|
333
|
-
if (attestationsByScore.length >= MAX_ATTESTATIONS * 2) {
|
|
334
|
-
break slot;
|
|
335
|
-
}
|
|
336
|
-
}
|
|
337
|
-
}
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
const sortedAttestationsByScore = attestationsByScore.sort((a, b) => b.score - a.score);
|
|
341
|
-
const attestationsForBlock: phase0.Attestation[] = [];
|
|
342
|
-
for (const [i, attestationWithScore] of sortedAttestationsByScore.entries()) {
|
|
343
|
-
if (i >= MAX_ATTESTATIONS) {
|
|
344
|
-
break;
|
|
345
|
-
}
|
|
346
|
-
// attestations could be modified in this op pool, so we need to clone for block
|
|
347
|
-
attestationsForBlock.push(ssz.phase0.Attestation.clone(attestationWithScore.attestation));
|
|
348
|
-
}
|
|
349
|
-
return attestationsForBlock;
|
|
225
|
+
throw new Error("Does not support producing blocks for pre-electra forks anymore");
|
|
350
226
|
}
|
|
351
227
|
|
|
352
228
|
/**
|
|
@@ -867,38 +743,7 @@ export function aggregateConsolidation({byCommittee, attData}: AttestationsConso
|
|
|
867
743
|
export function getNotSeenValidatorsFn(config: BeaconConfig, state: CachedBeaconStateAllForks): GetNotSeenValidatorsFn {
|
|
868
744
|
const stateSlot = state.slot;
|
|
869
745
|
if (config.getForkName(stateSlot) === ForkName.phase0) {
|
|
870
|
-
|
|
871
|
-
// As we are close to altair, this is not really important, it's mainly for e2e.
|
|
872
|
-
// The performance is not great due to the different BeaconState data structure to altair.
|
|
873
|
-
// check for phase0 block already
|
|
874
|
-
const phase0State = state as CachedBeaconStatePhase0;
|
|
875
|
-
const stateEpoch = computeEpochAtSlot(stateSlot);
|
|
876
|
-
|
|
877
|
-
const previousEpochParticipants = extractParticipationPhase0(
|
|
878
|
-
phase0State.previousEpochAttestations.getAllReadonly(),
|
|
879
|
-
state
|
|
880
|
-
);
|
|
881
|
-
const currentEpochParticipants = extractParticipationPhase0(
|
|
882
|
-
phase0State.currentEpochAttestations.getAllReadonly(),
|
|
883
|
-
state
|
|
884
|
-
);
|
|
885
|
-
|
|
886
|
-
return (epoch: Epoch, slot: Slot, committeeIndex: number) => {
|
|
887
|
-
const participants =
|
|
888
|
-
epoch === stateEpoch ? currentEpochParticipants : epoch === stateEpoch - 1 ? previousEpochParticipants : null;
|
|
889
|
-
if (participants === null) {
|
|
890
|
-
return null;
|
|
891
|
-
}
|
|
892
|
-
const committee = state.epochCtx.getBeaconCommittee(slot, committeeIndex);
|
|
893
|
-
|
|
894
|
-
const notSeenCommitteeMembers = new Set<number>();
|
|
895
|
-
for (const [i, validatorIndex] of committee.entries()) {
|
|
896
|
-
if (!participants.has(validatorIndex)) {
|
|
897
|
-
notSeenCommitteeMembers.add(i);
|
|
898
|
-
}
|
|
899
|
-
}
|
|
900
|
-
return notSeenCommitteeMembers.size === 0 ? null : notSeenCommitteeMembers;
|
|
901
|
-
};
|
|
746
|
+
throw new Error("getNotSeenValidatorsFn is not supported phase0 state");
|
|
902
747
|
}
|
|
903
748
|
|
|
904
749
|
// altair and future forks
|
|
@@ -942,26 +787,6 @@ export function getNotSeenValidatorsFn(config: BeaconConfig, state: CachedBeacon
|
|
|
942
787
|
};
|
|
943
788
|
}
|
|
944
789
|
|
|
945
|
-
export function extractParticipationPhase0(
|
|
946
|
-
attestations: phase0.PendingAttestation[],
|
|
947
|
-
state: CachedBeaconStateAllForks
|
|
948
|
-
): Set<ValidatorIndex> {
|
|
949
|
-
const {epochCtx} = state;
|
|
950
|
-
const allParticipants = new Set<ValidatorIndex>();
|
|
951
|
-
for (const att of attestations) {
|
|
952
|
-
const aggregationBits = att.aggregationBits;
|
|
953
|
-
const attData = att.data;
|
|
954
|
-
const attSlot = attData.slot;
|
|
955
|
-
const committeeIndex = attData.index;
|
|
956
|
-
const committee = epochCtx.getBeaconCommittee(attSlot, committeeIndex);
|
|
957
|
-
const participants = aggregationBits.intersectValues(committee);
|
|
958
|
-
for (const participant of participants) {
|
|
959
|
-
allParticipants.add(participant);
|
|
960
|
-
}
|
|
961
|
-
}
|
|
962
|
-
return allParticipants;
|
|
963
|
-
}
|
|
964
|
-
|
|
965
790
|
/**
|
|
966
791
|
* This returns a function to validate if an attestation data is compatible to a state.
|
|
967
792
|
*
|
|
@@ -117,12 +117,7 @@ export class PrepareNextSlotScheduler {
|
|
|
117
117
|
// the slot 0 of next epoch will likely use this Previous Root Checkpoint state for state transition so we transfer cache here
|
|
118
118
|
// the resulting state with cache will be cached in Checkpoint State Cache which is used for the upcoming block processing
|
|
119
119
|
// for other slots dontTransferCached=true because we don't run state transition on this state
|
|
120
|
-
|
|
121
|
-
// Shuffling calculation will be done asynchronously when passing asyncShufflingCalculation=true. Shuffling will be queued in
|
|
122
|
-
// beforeProcessEpoch and should theoretically be ready immediately after the synchronous epoch transition finished and the
|
|
123
|
-
// event loop is free. In long periods of non-finality too many forks will cause the shufflingCache to throw an error for
|
|
124
|
-
// too many queued shufflings so only run async during normal epoch transition. See issue ChainSafe/lodestar#7244
|
|
125
|
-
{dontTransferCache: !isEpochTransition, asyncShufflingCalculation: true},
|
|
120
|
+
{dontTransferCache: !isEpochTransition},
|
|
126
121
|
RegenCaller.precomputeEpoch
|
|
127
122
|
);
|
|
128
123
|
|
|
@@ -31,10 +31,6 @@ export enum RegenFnName {
|
|
|
31
31
|
|
|
32
32
|
export type StateRegenerationOpts = {
|
|
33
33
|
dontTransferCache: boolean;
|
|
34
|
-
/**
|
|
35
|
-
* Do not queue shuffling calculation async. Forces sync JIT calculation in afterProcessEpoch if not passed as `true`
|
|
36
|
-
*/
|
|
37
|
-
asyncShufflingCalculation?: boolean;
|
|
38
34
|
};
|
|
39
35
|
|
|
40
36
|
export interface IStateRegenerator extends IStateRegeneratorInternal {
|
|
@@ -1,11 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
BeaconStateAllForks,
|
|
3
|
-
EpochShuffling,
|
|
4
|
-
IShufflingCache,
|
|
5
|
-
ShufflingBuildProps,
|
|
6
|
-
computeEpochShuffling,
|
|
7
|
-
computeEpochShufflingAsync,
|
|
8
|
-
} from "@lodestar/state-transition";
|
|
1
|
+
import {CachedBeaconStateAllForks, EpochShuffling} from "@lodestar/state-transition";
|
|
9
2
|
import {Epoch, RootHex} from "@lodestar/types";
|
|
10
3
|
import {LodestarError, Logger, MapDef, pruneSetToMax} from "@lodestar/utils";
|
|
11
4
|
import {Metrics} from "../metrics/metrics.js";
|
|
@@ -53,7 +46,7 @@ export type ShufflingCacheOpts = {
|
|
|
53
46
|
* - if a shuffling is not available (which does not happen with default chain option of maxSkipSlots = 32), track a promise to make sure we don't compute the same shuffling twice
|
|
54
47
|
* - skip computing shuffling when loading state bytes from disk
|
|
55
48
|
*/
|
|
56
|
-
export class ShufflingCache
|
|
49
|
+
export class ShufflingCache {
|
|
57
50
|
/** LRU cache implemented as a map, pruned every time we add an item */
|
|
58
51
|
private readonly itemsByDecisionRootByEpoch: MapDef<Epoch, Map<RootHex, CacheItem>> = new MapDef(
|
|
59
52
|
() => new Map<RootHex, CacheItem>()
|
|
@@ -136,60 +129,20 @@ export class ShufflingCache implements IShufflingCache {
|
|
|
136
129
|
}
|
|
137
130
|
|
|
138
131
|
/**
|
|
139
|
-
*
|
|
140
|
-
*
|
|
141
|
-
*
|
|
142
|
-
* NOTE: If a shuffling is already queued and not calculated it will build and resolve
|
|
143
|
-
* the promise but the already queued build will happen at some later time
|
|
132
|
+
* Process a state to extract and cache all shufflings (previous, current, next).
|
|
133
|
+
* Uses the stored decision roots from epochCtx.
|
|
144
134
|
*/
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
decisionRoot: RootHex,
|
|
148
|
-
buildProps?: T
|
|
149
|
-
): T extends ShufflingBuildProps ? EpochShuffling : EpochShuffling | null {
|
|
150
|
-
const cacheItem = this.itemsByDecisionRootByEpoch.getOrDefault(epoch).get(decisionRoot);
|
|
151
|
-
if (!cacheItem) {
|
|
152
|
-
this.metrics?.shufflingCache.miss.inc();
|
|
153
|
-
} else if (isShufflingCacheItem(cacheItem)) {
|
|
154
|
-
this.metrics?.shufflingCache.hit.inc();
|
|
155
|
-
return cacheItem.shuffling;
|
|
156
|
-
} else if (buildProps) {
|
|
157
|
-
// TODO: (@matthewkeil) This should possible log a warning??
|
|
158
|
-
this.metrics?.shufflingCache.shufflingPromiseNotResolvedAndThrownAway.inc();
|
|
159
|
-
} else {
|
|
160
|
-
this.metrics?.shufflingCache.shufflingPromiseNotResolved.inc();
|
|
161
|
-
}
|
|
135
|
+
processState(state: CachedBeaconStateAllForks): void {
|
|
136
|
+
const {epochCtx} = state;
|
|
162
137
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
const timer = this.metrics?.shufflingCache.shufflingCalculationTime.startTimer({source: "getSync"});
|
|
166
|
-
shuffling = computeEpochShuffling(buildProps.state, buildProps.activeIndices, epoch);
|
|
167
|
-
timer?.();
|
|
168
|
-
this.set(shuffling, decisionRoot);
|
|
169
|
-
}
|
|
170
|
-
return shuffling as T extends ShufflingBuildProps ? EpochShuffling : EpochShuffling | null;
|
|
171
|
-
}
|
|
138
|
+
// Cache previous shuffling
|
|
139
|
+
this.set(epochCtx.previousShuffling, epochCtx.previousDecisionRoot);
|
|
172
140
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
this.
|
|
178
|
-
/**
|
|
179
|
-
* TODO: (@matthewkeil) This will get replaced by a proper build queue and a worker to do calculations
|
|
180
|
-
* on a NICE thread
|
|
181
|
-
*/
|
|
182
|
-
const timer = this.metrics?.shufflingCache.shufflingCalculationTime.startTimer({source: "build"});
|
|
183
|
-
computeEpochShufflingAsync(state, activeIndices, epoch)
|
|
184
|
-
.then((shuffling) => {
|
|
185
|
-
this.set(shuffling, decisionRoot);
|
|
186
|
-
})
|
|
187
|
-
.catch((err) =>
|
|
188
|
-
this.logger?.error(`error building shuffling for epoch ${epoch} at decisionRoot ${decisionRoot}`, {}, err)
|
|
189
|
-
)
|
|
190
|
-
.finally(() => {
|
|
191
|
-
timer?.();
|
|
192
|
-
});
|
|
141
|
+
// Cache current shuffling
|
|
142
|
+
this.set(epochCtx.currentShuffling, epochCtx.currentDecisionRoot);
|
|
143
|
+
|
|
144
|
+
// Cache next shuffling
|
|
145
|
+
this.set(epochCtx.nextShuffling, epochCtx.nextDecisionRoot);
|
|
193
146
|
}
|
|
194
147
|
|
|
195
148
|
/**
|
|
@@ -207,7 +160,8 @@ export class ShufflingCache implements IShufflingCache {
|
|
|
207
160
|
(Date.now() - cacheItem.timeInsertedMs) / 1000
|
|
208
161
|
);
|
|
209
162
|
} else {
|
|
210
|
-
this.metrics?.shufflingCache.
|
|
163
|
+
this.metrics?.shufflingCache.shufflingSetMultipleTimes.inc();
|
|
164
|
+
return;
|
|
211
165
|
}
|
|
212
166
|
}
|
|
213
167
|
// set the shuffling
|
|
@@ -43,7 +43,14 @@ export async function validateAttesterSlashing(
|
|
|
43
43
|
// [REJECT] All of the conditions within process_attester_slashing pass validation.
|
|
44
44
|
try {
|
|
45
45
|
// verifySignature = false, verified in batch below
|
|
46
|
-
assertValidAttesterSlashing(
|
|
46
|
+
assertValidAttesterSlashing(
|
|
47
|
+
chain.config,
|
|
48
|
+
chain.index2pubkey,
|
|
49
|
+
state.slot,
|
|
50
|
+
state.validators.length,
|
|
51
|
+
attesterSlashing,
|
|
52
|
+
false
|
|
53
|
+
);
|
|
47
54
|
} catch (e) {
|
|
48
55
|
throw new AttesterSlashingError(GossipAction.REJECT, {
|
|
49
56
|
code: AttesterSlashingErrorCode.INVALID,
|
|
@@ -1,8 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
CachedBeaconStateCapella,
|
|
3
|
-
getBlsToExecutionChangeSignatureSet,
|
|
4
|
-
isValidBlsToExecutionChange,
|
|
5
|
-
} from "@lodestar/state-transition";
|
|
1
|
+
import {getBlsToExecutionChangeSignatureSet, isValidBlsToExecutionChange} from "@lodestar/state-transition";
|
|
6
2
|
import {capella} from "@lodestar/types";
|
|
7
3
|
import {BlsToExecutionChangeError, BlsToExecutionChangeErrorCode, GossipAction} from "../errors/index.js";
|
|
8
4
|
import {IBeaconChain} from "../index.js";
|
|
@@ -42,10 +38,16 @@ async function validateBlsToExecutionChange(
|
|
|
42
38
|
// and chanes relevant to `isValidBlsToExecutionChange()` happen only on processBlock(), not processEpoch()
|
|
43
39
|
const state = chain.getHeadState();
|
|
44
40
|
const {config} = chain;
|
|
45
|
-
|
|
41
|
+
const addressChange = blsToExecutionChange.message;
|
|
42
|
+
if (addressChange.validatorIndex >= state.validators.length) {
|
|
43
|
+
throw new BlsToExecutionChangeError(GossipAction.REJECT, {
|
|
44
|
+
code: BlsToExecutionChangeErrorCode.INVALID,
|
|
45
|
+
});
|
|
46
|
+
}
|
|
47
|
+
const validator = state.validators.getReadonly(addressChange.validatorIndex);
|
|
46
48
|
// [REJECT] All of the conditions within process_bls_to_execution_change pass validation.
|
|
47
49
|
// verifySignature = false, verified in batch below
|
|
48
|
-
const {valid} = isValidBlsToExecutionChange(
|
|
50
|
+
const {valid} = isValidBlsToExecutionChange(config, validator, blsToExecutionChange, false);
|
|
49
51
|
if (!valid) {
|
|
50
52
|
throw new BlsToExecutionChangeError(GossipAction.REJECT, {
|
|
51
53
|
code: BlsToExecutionChangeErrorCode.INVALID,
|
|
@@ -35,8 +35,9 @@ async function validateProposerSlashing(
|
|
|
35
35
|
|
|
36
36
|
// [REJECT] All of the conditions within process_proposer_slashing pass validation.
|
|
37
37
|
try {
|
|
38
|
+
const proposer = state.validators.getReadonly(proposerSlashing.signedHeader1.message.proposerIndex);
|
|
38
39
|
// verifySignature = false, verified in batch below
|
|
39
|
-
assertValidProposerSlashing(state, proposerSlashing, false);
|
|
40
|
+
assertValidProposerSlashing(chain.config, chain.index2pubkey, state.slot, proposerSlashing, proposer, false);
|
|
40
41
|
} catch (e) {
|
|
41
42
|
throw new ProposerSlashingError(GossipAction.REJECT, {
|
|
42
43
|
code: ProposerSlashingErrorCode.INVALID,
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import crypto from "node:crypto";
|
|
2
|
+
import {ChainConfig} from "@lodestar/config";
|
|
2
3
|
import {
|
|
3
4
|
BLOB_TX_TYPE,
|
|
4
5
|
BYTES_PER_FIELD_ELEMENT,
|
|
@@ -7,7 +8,9 @@ import {
|
|
|
7
8
|
ForkPostBellatrix,
|
|
8
9
|
ForkPostCapella,
|
|
9
10
|
ForkSeq,
|
|
11
|
+
SLOTS_PER_EPOCH,
|
|
10
12
|
} from "@lodestar/params";
|
|
13
|
+
import {computeTimeAtSlot} from "@lodestar/state-transition";
|
|
11
14
|
import {ExecutionPayload, RootHex, bellatrix, deneb, ssz} from "@lodestar/types";
|
|
12
15
|
import {fromHex, toRootHex} from "@lodestar/utils";
|
|
13
16
|
import {ZERO_HASH_HEX} from "../../constants/index.js";
|
|
@@ -34,14 +37,11 @@ const INTEROP_GAS_LIMIT = 30e6;
|
|
|
34
37
|
const PRUNE_PAYLOAD_ID_AFTER_MS = 5000;
|
|
35
38
|
|
|
36
39
|
export type ExecutionEngineMockOpts = {
|
|
37
|
-
genesisBlockHash
|
|
40
|
+
genesisBlockHash?: string;
|
|
38
41
|
eth1BlockHash?: string;
|
|
39
42
|
onlyPredefinedResponses?: boolean;
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
electraForkTimestamp?: number;
|
|
43
|
-
fuluForkTimestamp?: number;
|
|
44
|
-
gloasForkTimestamp?: number;
|
|
43
|
+
genesisTime?: number;
|
|
44
|
+
config?: ChainConfig;
|
|
45
45
|
};
|
|
46
46
|
|
|
47
47
|
type ExecutionBlock = {
|
|
@@ -74,17 +74,21 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
|
|
74
74
|
/** Preparing payloads to be retrieved via engine_getPayloadV1 */
|
|
75
75
|
private readonly preparingPayloads = new Map<number, PreparedPayload>();
|
|
76
76
|
private readonly payloadsForDeletion = new Map<number, number>();
|
|
77
|
-
|
|
78
77
|
private readonly predefinedPayloadStatuses = new Map<RootHex, PayloadStatus>();
|
|
79
78
|
|
|
80
79
|
private payloadId = 0;
|
|
80
|
+
private capellaForkTimestamp: number;
|
|
81
|
+
private denebForkTimestamp: number;
|
|
82
|
+
private electraForkTimestamp: number;
|
|
83
|
+
private fuluForkTimestamp: number;
|
|
84
|
+
private gloasForkTimestamp: number;
|
|
81
85
|
|
|
82
86
|
readonly handlers: {
|
|
83
87
|
[K in keyof EngineApiRpcParamTypes]: (...args: EngineApiRpcParamTypes[K]) => EngineApiRpcReturnTypes[K];
|
|
84
88
|
};
|
|
85
89
|
|
|
86
90
|
constructor(private readonly opts: ExecutionEngineMockOpts) {
|
|
87
|
-
this.validBlocks.set(opts.genesisBlockHash, {
|
|
91
|
+
this.validBlocks.set(opts.genesisBlockHash ?? ZERO_HASH_HEX, {
|
|
88
92
|
parentHash: ZERO_HASH_HEX,
|
|
89
93
|
blockHash: ZERO_HASH_HEX,
|
|
90
94
|
timestamp: 0,
|
|
@@ -100,6 +104,29 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
|
|
100
104
|
blockNumber: 1,
|
|
101
105
|
});
|
|
102
106
|
|
|
107
|
+
const {config} = opts;
|
|
108
|
+
|
|
109
|
+
this.capellaForkTimestamp =
|
|
110
|
+
opts.genesisTime && config
|
|
111
|
+
? computeTimeAtSlot(config, config.CAPELLA_FORK_EPOCH * SLOTS_PER_EPOCH, opts.genesisTime)
|
|
112
|
+
: Infinity;
|
|
113
|
+
this.denebForkTimestamp =
|
|
114
|
+
opts.genesisTime && config
|
|
115
|
+
? computeTimeAtSlot(config, config.DENEB_FORK_EPOCH * SLOTS_PER_EPOCH, opts.genesisTime)
|
|
116
|
+
: Infinity;
|
|
117
|
+
this.electraForkTimestamp =
|
|
118
|
+
opts.genesisTime && config
|
|
119
|
+
? computeTimeAtSlot(config, config.ELECTRA_FORK_EPOCH * SLOTS_PER_EPOCH, opts.genesisTime)
|
|
120
|
+
: Infinity;
|
|
121
|
+
this.fuluForkTimestamp =
|
|
122
|
+
opts.genesisTime && config
|
|
123
|
+
? computeTimeAtSlot(config, config.FULU_FORK_EPOCH * SLOTS_PER_EPOCH, opts.genesisTime)
|
|
124
|
+
: Infinity;
|
|
125
|
+
this.gloasForkTimestamp =
|
|
126
|
+
opts.genesisTime && config
|
|
127
|
+
? computeTimeAtSlot(config, config.GLOAS_FORK_EPOCH * SLOTS_PER_EPOCH, opts.genesisTime)
|
|
128
|
+
: Infinity;
|
|
129
|
+
|
|
103
130
|
this.handlers = {
|
|
104
131
|
engine_newPayloadV1: this.notifyNewPayload.bind(this),
|
|
105
132
|
engine_newPayloadV2: this.notifyNewPayload.bind(this),
|
|
@@ -448,11 +475,11 @@ export class ExecutionEngineMockBackend implements JsonRpcBackend {
|
|
|
448
475
|
}
|
|
449
476
|
|
|
450
477
|
private timestampToFork(timestamp: number): ForkPostBellatrix {
|
|
451
|
-
if (timestamp >=
|
|
452
|
-
if (timestamp >=
|
|
453
|
-
if (timestamp >=
|
|
454
|
-
if (timestamp >=
|
|
455
|
-
if (timestamp >=
|
|
478
|
+
if (timestamp >= this.gloasForkTimestamp) return ForkName.gloas;
|
|
479
|
+
if (timestamp >= this.fuluForkTimestamp) return ForkName.fulu;
|
|
480
|
+
if (timestamp >= this.electraForkTimestamp) return ForkName.electra;
|
|
481
|
+
if (timestamp >= this.denebForkTimestamp) return ForkName.deneb;
|
|
482
|
+
if (timestamp >= this.capellaForkTimestamp) return ForkName.capella;
|
|
456
483
|
return ForkName.bellatrix;
|
|
457
484
|
}
|
|
458
485
|
}
|
|
@@ -1308,33 +1308,19 @@ export function createLodestarMetrics(
|
|
|
1308
1308
|
name: "lodestar_shuffling_cache_miss_count",
|
|
1309
1309
|
help: "Count of shuffling cache miss",
|
|
1310
1310
|
}),
|
|
1311
|
-
|
|
1312
|
-
name: "
|
|
1313
|
-
help: "Count of shuffling that were
|
|
1314
|
-
}),
|
|
1315
|
-
shufflingPromiseNotResolvedAndThrownAway: register.gauge({
|
|
1316
|
-
name: "lodestar_shuffling_cache_promise_not_resolved_and_thrown_away_count",
|
|
1317
|
-
help: "Count of shuffling cache promises that were discarded and the shuffling was built synchronously",
|
|
1311
|
+
shufflingSetMultipleTimes: register.gauge({
|
|
1312
|
+
name: "lodestar_shuffling_cache_set_multiple_times_count",
|
|
1313
|
+
help: "Count of shuffling that were set multiple times",
|
|
1318
1314
|
}),
|
|
1319
1315
|
shufflingPromiseNotResolved: register.gauge({
|
|
1320
1316
|
name: "lodestar_shuffling_cache_promise_not_resolved_count",
|
|
1321
1317
|
help: "Count of shuffling cache promises that were requested before the promise was resolved",
|
|
1322
1318
|
}),
|
|
1323
|
-
nextShufflingNotOnEpochCache: register.gauge({
|
|
1324
|
-
name: "lodestar_shuffling_cache_next_shuffling_not_on_epoch_cache",
|
|
1325
|
-
help: "The next shuffling was not on the epoch cache before the epoch transition",
|
|
1326
|
-
}),
|
|
1327
1319
|
shufflingPromiseResolutionTime: register.histogram({
|
|
1328
1320
|
name: "lodestar_shuffling_cache_promise_resolution_time_seconds",
|
|
1329
1321
|
help: "Time from promise insertion until promise resolution when shuffling was ready in seconds",
|
|
1330
1322
|
buckets: [0.5, 1, 1.5, 2],
|
|
1331
1323
|
}),
|
|
1332
|
-
shufflingCalculationTime: register.histogram<{source: "build" | "getSync"}>({
|
|
1333
|
-
name: "lodestar_shuffling_cache_shuffling_calculation_time_seconds",
|
|
1334
|
-
help: "Run time of shuffling calculation",
|
|
1335
|
-
buckets: [0.5, 0.75, 1, 1.25, 1.5],
|
|
1336
|
-
labelNames: ["source"],
|
|
1337
|
-
}),
|
|
1338
1324
|
},
|
|
1339
1325
|
|
|
1340
1326
|
seenCache: {
|
package/src/node/nodejs.ts
CHANGED
|
@@ -6,9 +6,10 @@ import {PubkeyIndexMap} from "@chainsafe/pubkey-index-map";
|
|
|
6
6
|
import {BeaconApiMethods} from "@lodestar/api/beacon/server";
|
|
7
7
|
import {BeaconConfig} from "@lodestar/config";
|
|
8
8
|
import type {LoggerNode} from "@lodestar/logger/node";
|
|
9
|
-
import {
|
|
9
|
+
import {ZERO_HASH_HEX} from "@lodestar/params";
|
|
10
|
+
import {CachedBeaconStateAllForks, Index2PubkeyCache, isExecutionCachedStateType} from "@lodestar/state-transition";
|
|
10
11
|
import {phase0} from "@lodestar/types";
|
|
11
|
-
import {sleep} from "@lodestar/utils";
|
|
12
|
+
import {sleep, toRootHex} from "@lodestar/utils";
|
|
12
13
|
import {ProcessShutdownCallback} from "@lodestar/validator";
|
|
13
14
|
import {BeaconRestApiServer, getApi} from "../api/index.js";
|
|
14
15
|
import {BeaconChain, IBeaconChain, initBeaconMetrics} from "../chain/index.js";
|
|
@@ -221,6 +222,20 @@ export class BeaconNode {
|
|
|
221
222
|
)
|
|
222
223
|
: null;
|
|
223
224
|
|
|
225
|
+
let executionEngineOpts = opts.executionEngine;
|
|
226
|
+
if (opts.executionEngine.mode === "mock") {
|
|
227
|
+
const eth1BlockHash = isExecutionCachedStateType(anchorState)
|
|
228
|
+
? toRootHex(anchorState.latestExecutionPayloadHeader.blockHash)
|
|
229
|
+
: undefined;
|
|
230
|
+
executionEngineOpts = {
|
|
231
|
+
...opts.executionEngine,
|
|
232
|
+
genesisBlockHash: ZERO_HASH_HEX,
|
|
233
|
+
eth1BlockHash,
|
|
234
|
+
genesisTime: anchorState.genesisTime,
|
|
235
|
+
config,
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
|
|
224
239
|
const chain = new BeaconChain(opts.chain, {
|
|
225
240
|
privateKey,
|
|
226
241
|
config,
|
|
@@ -236,7 +251,7 @@ export class BeaconNode {
|
|
|
236
251
|
validatorMonitor,
|
|
237
252
|
anchorState,
|
|
238
253
|
isAnchorStateFinalized,
|
|
239
|
-
executionEngine: initializeExecutionEngine(
|
|
254
|
+
executionEngine: initializeExecutionEngine(executionEngineOpts, {
|
|
240
255
|
metrics,
|
|
241
256
|
signal,
|
|
242
257
|
logger: logger.child({module: LoggerModule.execution}),
|