@lodestar/validator 1.35.0-rc.0 → 1.35.0-rc.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/metrics.js +14 -14
- package/lib/metrics.js.map +1 -1
- package/lib/services/attestation.d.ts.map +1 -1
- package/lib/services/attestation.js +35 -27
- package/lib/services/attestation.js.map +1 -1
- package/lib/services/attestationDuties.d.ts.map +1 -1
- package/lib/services/attestationDuties.js +1 -0
- package/lib/services/attestationDuties.js.map +1 -1
- package/lib/services/blockDuties.d.ts +2 -2
- package/lib/services/blockDuties.d.ts.map +1 -1
- package/lib/services/blockDuties.js +4 -3
- package/lib/services/blockDuties.js.map +1 -1
- package/lib/services/externalSignerSync.js +1 -1
- package/lib/services/externalSignerSync.js.map +1 -1
- package/lib/services/syncCommittee.d.ts.map +1 -1
- package/lib/services/syncCommittee.js +30 -22
- package/lib/services/syncCommittee.js.map +1 -1
- package/lib/util/clock.d.ts +3 -0
- package/lib/util/clock.d.ts.map +1 -1
- package/lib/util/clock.js +11 -8
- package/lib/util/clock.js.map +1 -1
- package/lib/util/params.js +8 -1
- package/lib/util/params.js.map +1 -1
- package/lib/validator.js +1 -1
- package/lib/validator.js.map +1 -1
- package/package.json +10 -10
- package/src/metrics.ts +14 -14
- package/src/services/attestation.ts +37 -24
- package/src/services/attestationDuties.ts +1 -0
- package/src/services/blockDuties.ts +7 -5
- package/src/services/externalSignerSync.ts +1 -1
- package/src/services/syncCommittee.ts +43 -22
- package/src/util/clock.ts +13 -8
- package/src/util/params.ts +8 -1
- package/src/validator.ts +1 -1
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {ApiClient, routes} from "@lodestar/api";
|
|
2
2
|
import {ChainForkConfig} from "@lodestar/config";
|
|
3
|
-
import {
|
|
3
|
+
import {ForkName, isForkPostElectra} from "@lodestar/params";
|
|
4
4
|
import {computeEpochAtSlot, isAggregatorFromCommitteeLength} from "@lodestar/state-transition";
|
|
5
5
|
import {BLSSignature, SignedAggregateAndProof, SingleAttestation, Slot, phase0, ssz} from "@lodestar/types";
|
|
6
6
|
import {prettyBytes, sleep, toRootHex} from "@lodestar/utils";
|
|
@@ -73,6 +73,7 @@ export class AttestationService {
|
|
|
73
73
|
if (duties.length === 0) {
|
|
74
74
|
return;
|
|
75
75
|
}
|
|
76
|
+
const fork = this.config.getForkName(slot);
|
|
76
77
|
|
|
77
78
|
if (this.opts?.distributedAggregationSelection) {
|
|
78
79
|
// Validator in distributed cluster only has a key share, not the full private key.
|
|
@@ -81,16 +82,20 @@ export class AttestationService {
|
|
|
81
82
|
// This will run in parallel to other attestation tasks but must be finished before starting
|
|
82
83
|
// attestation aggregation as it is required to correctly determine if validator is aggregator
|
|
83
84
|
// and to produce a AggregateAndProof that can be threshold aggregated by the middleware client.
|
|
84
|
-
this.runDistributedAggregationSelectionTasks(duties, slot, signal).catch((e) =>
|
|
85
|
+
this.runDistributedAggregationSelectionTasks(fork, duties, slot, signal).catch((e) =>
|
|
85
86
|
this.logger.error("Error on attestation aggregation selection", {slot}, e)
|
|
86
87
|
);
|
|
87
88
|
}
|
|
88
89
|
|
|
89
90
|
// A validator should create and broadcast the attestation to the associated attestation subnet when either
|
|
90
91
|
// (a) the validator has received a valid block from the expected block proposer for the assigned slot or
|
|
91
|
-
// (b)
|
|
92
|
-
|
|
93
|
-
|
|
92
|
+
// (b) ATTESTATION_DUE_BPS of the slot has transpired -- whichever comes first.
|
|
93
|
+
const attestationDueMs = this.config.getAttestationDueMs(fork);
|
|
94
|
+
await Promise.race([
|
|
95
|
+
sleep(attestationDueMs - this.clock.msFromSlot(slot), signal),
|
|
96
|
+
this.emitter.waitForBlockSlot(slot),
|
|
97
|
+
]);
|
|
98
|
+
this.metrics?.attesterStepCallProduceAttestation.observe(this.clock.secFromSlot(slot) - attestationDueMs / 1000);
|
|
94
99
|
|
|
95
100
|
// Beacon node's endpoint produceAttestationData return data is not dependent on committeeIndex.
|
|
96
101
|
// Produce a single attestation for all committees and submit unaggregated attestations in one go.
|
|
@@ -99,22 +104,25 @@ export class AttestationService {
|
|
|
99
104
|
const attestationNoCommittee = await this.produceAttestation(0, slot);
|
|
100
105
|
|
|
101
106
|
// Step 1. Mutate, and sign `Attestation` for each validator. Then publish all `Attestations` in one go
|
|
102
|
-
await this.signAndPublishAttestations(slot, attestationNoCommittee, duties);
|
|
107
|
+
await this.signAndPublishAttestations(fork, slot, attestationNoCommittee, duties);
|
|
103
108
|
|
|
104
109
|
// Step 2. after all attestations are submitted, make an aggregate.
|
|
105
|
-
// First, wait until the `aggregation_production_instant` (
|
|
106
|
-
|
|
107
|
-
|
|
110
|
+
// First, wait until the `aggregation_production_instant` (AGGREGATE_DUE_BPS of the way through the slot)
|
|
111
|
+
const aggregateDueMs = this.config.getAggregateDueMs(fork);
|
|
112
|
+
await sleep(aggregateDueMs - this.clock.msFromSlot(slot), signal);
|
|
113
|
+
this.metrics?.attesterStepCallProduceAggregate.observe(this.clock.secFromSlot(slot) - aggregateDueMs / 1000);
|
|
108
114
|
|
|
109
115
|
const dutiesByCommitteeIndex = groupAttDutiesByCommitteeIndex(duties);
|
|
110
|
-
const isPostElectra = this.config.getForkSeq(slot) >= ForkSeq.electra;
|
|
111
116
|
|
|
112
117
|
// Then download, sign and publish a `SignedAggregateAndProof` for each
|
|
113
118
|
// validator that is elected to aggregate for this `slot` and `committeeIndex`.
|
|
114
119
|
await Promise.all(
|
|
115
120
|
Array.from(dutiesByCommitteeIndex.entries()).map(([index, dutiesSameCommittee]) => {
|
|
116
|
-
const attestationData: phase0.AttestationData = {
|
|
117
|
-
|
|
121
|
+
const attestationData: phase0.AttestationData = {
|
|
122
|
+
...attestationNoCommittee,
|
|
123
|
+
index: isForkPostElectra(fork) ? 0 : index,
|
|
124
|
+
};
|
|
125
|
+
return this.produceAndPublishAggregates(fork, attestationData, index, dutiesSameCommittee);
|
|
118
126
|
})
|
|
119
127
|
);
|
|
120
128
|
} catch (e) {
|
|
@@ -138,6 +146,7 @@ export class AttestationService {
|
|
|
138
146
|
* validator and the list of individually-signed `Attestation` objects is returned to the BN.
|
|
139
147
|
*/
|
|
140
148
|
private async signAndPublishAttestations(
|
|
149
|
+
fork: ForkName,
|
|
141
150
|
slot: Slot,
|
|
142
151
|
attestationNoCommittee: phase0.AttestationData,
|
|
143
152
|
duties: AttDutyAndProof[]
|
|
@@ -145,11 +154,10 @@ export class AttestationService {
|
|
|
145
154
|
const signedAttestations: SingleAttestation[] = [];
|
|
146
155
|
const headRootHex = toRootHex(attestationNoCommittee.beaconBlockRoot);
|
|
147
156
|
const currentEpoch = computeEpochAtSlot(slot);
|
|
148
|
-
const isPostElectra = this.config.getForkSeq(slot) >= ForkSeq.electra;
|
|
149
157
|
|
|
150
158
|
await Promise.all(
|
|
151
159
|
duties.map(async ({duty}) => {
|
|
152
|
-
const index =
|
|
160
|
+
const index = isForkPostElectra(fork) ? 0 : duty.committeeIndex;
|
|
153
161
|
const attestationData: phase0.AttestationData = {...attestationNoCommittee, index};
|
|
154
162
|
const logCtxValidator = {slot, index, head: headRootHex, validatorIndex: duty.validatorIndex};
|
|
155
163
|
|
|
@@ -163,20 +171,21 @@ export class AttestationService {
|
|
|
163
171
|
})
|
|
164
172
|
);
|
|
165
173
|
|
|
166
|
-
// signAndPublishAttestations() may be called before the
|
|
174
|
+
// signAndPublishAttestations() may be called before the ATTESTATION_DUE_BPS cutoff time if the block was received early.
|
|
167
175
|
// If we produced the block or we got the block sooner than our peers, our attestations can be dropped because
|
|
168
176
|
// they reach our peers before the block. To prevent that, we wait 2 extra seconds AFTER block arrival, but
|
|
169
|
-
// never beyond the
|
|
177
|
+
// never beyond the ATTESTATION_DUE_BPS cutoff time.
|
|
170
178
|
// https://github.com/status-im/nimbus-eth2/blob/7b64c1dce4392731a4a59ee3a36caef2e0a8357a/beacon_chain/validators/validator_duties.nim#L1123
|
|
171
|
-
const
|
|
172
|
-
|
|
179
|
+
const attestationDueMs = this.config.getAttestationDueMs(fork);
|
|
180
|
+
const msToCutoffTime = attestationDueMs - this.clock.msFromSlot(slot);
|
|
181
|
+
// submitting attestations asap to avoid busy time at around ATTESTATION_DUE_BPS of slot
|
|
173
182
|
const afterBlockDelayMs =
|
|
174
183
|
1000 *
|
|
175
184
|
this.clock.secondsPerSlot *
|
|
176
185
|
(this.opts?.afterBlockDelaySlotFraction ?? DEFAULT_AFTER_BLOCK_DELAY_SLOT_FRACTION);
|
|
177
|
-
await sleep(Math.min(
|
|
186
|
+
await sleep(Math.min(msToCutoffTime, afterBlockDelayMs));
|
|
178
187
|
|
|
179
|
-
this.metrics?.attesterStepCallPublishAttestation.observe(this.clock.secFromSlot(slot
|
|
188
|
+
this.metrics?.attesterStepCallPublishAttestation.observe(this.clock.secFromSlot(slot) - attestationDueMs / 1000);
|
|
180
189
|
|
|
181
190
|
// Step 2. Publish all `Attestations` in one go
|
|
182
191
|
try {
|
|
@@ -205,6 +214,7 @@ export class AttestationService {
|
|
|
205
214
|
* returned to the BN.
|
|
206
215
|
*/
|
|
207
216
|
private async produceAndPublishAggregates(
|
|
217
|
+
fork: ForkName,
|
|
208
218
|
attestation: phase0.AttestationData,
|
|
209
219
|
committeeIndex: number,
|
|
210
220
|
duties: AttDutyAndProof[]
|
|
@@ -246,7 +256,9 @@ export class AttestationService {
|
|
|
246
256
|
})
|
|
247
257
|
);
|
|
248
258
|
|
|
249
|
-
this.metrics?.attesterStepCallPublishAggregate.observe(
|
|
259
|
+
this.metrics?.attesterStepCallPublishAggregate.observe(
|
|
260
|
+
this.clock.secFromSlot(attestation.slot) - this.config.getAggregateDueMs(fork) / 1000
|
|
261
|
+
);
|
|
250
262
|
|
|
251
263
|
if (signedAggregateAndProofs.length > 0) {
|
|
252
264
|
try {
|
|
@@ -274,6 +286,7 @@ export class AttestationService {
|
|
|
274
286
|
* See https://docs.google.com/document/d/1q9jOTPcYQa-3L8luRvQJ-M0eegtba4Nmon3dpO79TMk/mobilebasic
|
|
275
287
|
*/
|
|
276
288
|
private async runDistributedAggregationSelectionTasks(
|
|
289
|
+
fork: ForkName,
|
|
277
290
|
duties: AttDutyAndProof[],
|
|
278
291
|
slot: number,
|
|
279
292
|
signal: AbortSignal
|
|
@@ -290,16 +303,16 @@ export class AttestationService {
|
|
|
290
303
|
|
|
291
304
|
const res = await Promise.race([
|
|
292
305
|
this.api.validator.submitBeaconCommitteeSelections({selections: partialSelections}),
|
|
293
|
-
// Exit attestation aggregation flow if there is no response after
|
|
306
|
+
// Exit attestation aggregation flow if there is no response after ATTESTATION_DUE_BPS of the slot as
|
|
294
307
|
// beacon node would likely not have enough time to prepare an aggregate attestation.
|
|
295
308
|
// Note that the aggregations flow is not explicitly exited but rather will be skipped
|
|
296
309
|
// due to the fact that calculation of `is_aggregator` in AttestationDutiesService is not done
|
|
297
310
|
// and selectionProof is set to null, meaning no validator will be considered an aggregator.
|
|
298
|
-
sleep(this.clock.
|
|
311
|
+
sleep(this.config.getAttestationDueMs(fork) - this.clock.msFromSlot(slot), signal),
|
|
299
312
|
]);
|
|
300
313
|
|
|
301
314
|
if (!res) {
|
|
302
|
-
throw new Error("Failed to receive combined selection proofs before
|
|
315
|
+
throw new Error("Failed to receive combined selection proofs before ATTESTATION_DUE_BPS of the slot");
|
|
303
316
|
}
|
|
304
317
|
|
|
305
318
|
const combinedSelections = res.value();
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import {ApiClient, routes} from "@lodestar/api";
|
|
2
|
-
import {
|
|
2
|
+
import {ChainForkConfig} from "@lodestar/config";
|
|
3
3
|
import {computeEpochAtSlot, computeStartSlotAtEpoch} from "@lodestar/state-transition";
|
|
4
4
|
import {BLSPubkey, Epoch, RootHex, Slot} from "@lodestar/types";
|
|
5
5
|
import {sleep, toPubkeyHex} from "@lodestar/utils";
|
|
@@ -9,10 +9,11 @@ import {IClock, LoggerVc, differenceHex} from "../util/index.js";
|
|
|
9
9
|
import {ValidatorStore} from "./validatorStore.js";
|
|
10
10
|
|
|
11
11
|
/** This polls block duties 1s before the next epoch */
|
|
12
|
-
// TODO: change to 6 to do it 2s before the next epoch
|
|
12
|
+
// TODO: change to 8333 (5/6 of slot) to do it 2s before the next epoch
|
|
13
13
|
// once we have some improvement on epoch transition time
|
|
14
14
|
// see https://github.com/ChainSafe/lodestar/issues/5792#issuecomment-1647457442
|
|
15
|
-
|
|
15
|
+
// TODO GLOAS: re-evaluate timing
|
|
16
|
+
const BLOCK_DUTIES_LOOKAHEAD_BPS = 9167;
|
|
16
17
|
/** Only retain `HISTORICAL_DUTIES_EPOCHS` duties prior to the current epoch */
|
|
17
18
|
const HISTORICAL_DUTIES_EPOCHS = 2;
|
|
18
19
|
// Re-declaring to not have to depend on `lodestar-params` just for this 0
|
|
@@ -30,7 +31,7 @@ export class BlockDutiesService {
|
|
|
30
31
|
private readonly proposers = new Map<Epoch, BlockDutyAtEpoch>();
|
|
31
32
|
|
|
32
33
|
constructor(
|
|
33
|
-
private readonly config:
|
|
34
|
+
private readonly config: ChainForkConfig,
|
|
34
35
|
private readonly logger: LoggerVc,
|
|
35
36
|
private readonly api: ApiClient,
|
|
36
37
|
private readonly clock: IClock,
|
|
@@ -169,7 +170,8 @@ export class BlockDutiesService {
|
|
|
169
170
|
*/
|
|
170
171
|
private async pollBeaconProposersNextEpoch(currentSlot: Slot, nextEpoch: Epoch, signal: AbortSignal): Promise<void> {
|
|
171
172
|
const nextSlot = currentSlot + 1;
|
|
172
|
-
const lookAheadMs =
|
|
173
|
+
const lookAheadMs =
|
|
174
|
+
this.config.SLOT_DURATION_MS - this.config.getSlotComponentDurationMs(BLOCK_DUTIES_LOOKAHEAD_BPS);
|
|
173
175
|
await sleep(this.clock.msToSlot(nextSlot) - lookAheadMs, signal);
|
|
174
176
|
this.logger.debug("Polling proposers for next epoch", {nextEpoch, nextSlot});
|
|
175
177
|
// Poll proposers for the next epoch
|
|
@@ -68,7 +68,7 @@ export function pollExternalSignerPubkeys(
|
|
|
68
68
|
fetchExternalSignerPubkeys,
|
|
69
69
|
externalSigner.fetchInterval ??
|
|
70
70
|
// Once per epoch by default
|
|
71
|
-
SLOTS_PER_EPOCH * config.
|
|
71
|
+
SLOTS_PER_EPOCH * config.SLOT_DURATION_MS
|
|
72
72
|
);
|
|
73
73
|
signal.addEventListener("abort", () => clearInterval(interval), {once: true});
|
|
74
74
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import {ApiClient, routes} from "@lodestar/api";
|
|
2
2
|
import {ChainForkConfig} from "@lodestar/config";
|
|
3
|
-
import {
|
|
3
|
+
import {ForkName, isForkPostAltair} from "@lodestar/params";
|
|
4
|
+
import {isSyncCommitteeAggregator} from "@lodestar/state-transition";
|
|
4
5
|
import {BLSSignature, CommitteeIndex, Root, Slot, altair} from "@lodestar/types";
|
|
5
6
|
import {sleep} from "@lodestar/utils";
|
|
6
7
|
import {Metrics} from "../metrics.js";
|
|
@@ -58,9 +59,11 @@ export class SyncCommitteeService {
|
|
|
58
59
|
}
|
|
59
60
|
|
|
60
61
|
private runSyncCommitteeTasks = async (slot: Slot, signal: AbortSignal): Promise<void> => {
|
|
62
|
+
const fork = this.config.getForkName(slot);
|
|
63
|
+
|
|
61
64
|
try {
|
|
62
65
|
// Before altair fork no need to check duties
|
|
63
|
-
if (
|
|
66
|
+
if (!isForkPostAltair(fork)) {
|
|
64
67
|
return;
|
|
65
68
|
}
|
|
66
69
|
|
|
@@ -77,25 +80,32 @@ export class SyncCommitteeService {
|
|
|
77
80
|
// This will run in parallel to other sync committee tasks but must be finished before starting
|
|
78
81
|
// sync committee contributions as it is required to correctly determine if validator is aggregator
|
|
79
82
|
// and to produce a ContributionAndProof that can be threshold aggregated by the middleware client.
|
|
80
|
-
this.runDistributedAggregationSelectionTasks(dutiesAtSlot, slot, signal).catch((e) =>
|
|
83
|
+
this.runDistributedAggregationSelectionTasks(fork, dutiesAtSlot, slot, signal).catch((e) =>
|
|
81
84
|
this.logger.error("Error on sync committee aggregation selection", {slot}, e)
|
|
82
85
|
);
|
|
83
86
|
}
|
|
84
87
|
|
|
85
88
|
// unlike Attestation, SyncCommitteeSignature could be published asap
|
|
86
|
-
// especially with lodestar, it's very busy at
|
|
89
|
+
// especially with lodestar, it's very busy at ATTESTATION_DUE_BPS of the slot
|
|
87
90
|
// see https://github.com/ChainSafe/lodestar/issues/4608
|
|
88
|
-
|
|
89
|
-
|
|
91
|
+
const syncMessageDueMs = this.config.getSyncMessageDueMs(fork);
|
|
92
|
+
await Promise.race([
|
|
93
|
+
sleep(syncMessageDueMs - this.clock.msFromSlot(slot), signal),
|
|
94
|
+
this.emitter.waitForBlockSlot(slot),
|
|
95
|
+
]);
|
|
96
|
+
this.metrics?.syncCommitteeStepCallProduceMessage.observe(this.clock.secFromSlot(slot) - syncMessageDueMs / 1000);
|
|
90
97
|
|
|
91
98
|
// Step 1. Download, sign and publish an `SyncCommitteeMessage` for each validator.
|
|
92
99
|
// Differs from AttestationService, `SyncCommitteeMessage` are equal for all
|
|
93
|
-
const beaconBlockRoot = await this.produceAndPublishSyncCommittees(slot, dutiesAtSlot);
|
|
100
|
+
const beaconBlockRoot = await this.produceAndPublishSyncCommittees(fork, slot, dutiesAtSlot);
|
|
94
101
|
|
|
95
102
|
// Step 2. If an attestation was produced, make an aggregate.
|
|
96
|
-
// First, wait until the `
|
|
97
|
-
|
|
98
|
-
|
|
103
|
+
// First, wait until the `CONTRIBUTION_DUE_BPS` of the slot
|
|
104
|
+
const syncContributionDueMs = this.config.getSyncContributionDueMs(fork);
|
|
105
|
+
await sleep(syncContributionDueMs - this.clock.msFromSlot(slot), signal);
|
|
106
|
+
this.metrics?.syncCommitteeStepCallProduceAggregate.observe(
|
|
107
|
+
this.clock.secFromSlot(slot) - syncContributionDueMs / 1000
|
|
108
|
+
);
|
|
99
109
|
|
|
100
110
|
// await for all so if the Beacon node is overloaded it auto-throttles
|
|
101
111
|
// TODO: This approach is conservative to reduce the node's load, review
|
|
@@ -105,9 +115,11 @@ export class SyncCommitteeService {
|
|
|
105
115
|
if (duties.length === 0) return;
|
|
106
116
|
// Then download, sign and publish a `SignedAggregateAndProof` for each
|
|
107
117
|
// validator that is elected to aggregate for this `slot` and `subcommitteeIndex`.
|
|
108
|
-
await this.produceAndPublishAggregates(slot, subcommitteeIndex, beaconBlockRoot, duties).catch(
|
|
109
|
-
|
|
110
|
-
|
|
118
|
+
await this.produceAndPublishAggregates(fork, slot, subcommitteeIndex, beaconBlockRoot, duties).catch(
|
|
119
|
+
(e: Error) => {
|
|
120
|
+
this.logger.error("Error on SyncCommitteeContribution", {slot, index: subcommitteeIndex}, e);
|
|
121
|
+
}
|
|
122
|
+
);
|
|
111
123
|
})
|
|
112
124
|
);
|
|
113
125
|
} catch (e) {
|
|
@@ -124,7 +136,11 @@ export class SyncCommitteeService {
|
|
|
124
136
|
* Only one `SyncCommittee` is downloaded from the BN. It is then signed by each
|
|
125
137
|
* validator and the list of individually-signed `SyncCommittee` objects is returned to the BN.
|
|
126
138
|
*/
|
|
127
|
-
private async produceAndPublishSyncCommittees(
|
|
139
|
+
private async produceAndPublishSyncCommittees(
|
|
140
|
+
fork: ForkName,
|
|
141
|
+
slot: Slot,
|
|
142
|
+
duties: SyncDutyAndProofs[]
|
|
143
|
+
): Promise<Root> {
|
|
128
144
|
const logCtx = {slot};
|
|
129
145
|
|
|
130
146
|
// /eth/v1/beacon/blocks/:blockId/root -> at slot -1
|
|
@@ -156,14 +172,15 @@ export class SyncCommitteeService {
|
|
|
156
172
|
// by default we want to submit SyncCommitteeSignature asap after we receive block
|
|
157
173
|
// provide a delay option just in case any client implementation validate the existence of block in
|
|
158
174
|
// SyncCommitteeSignature gossip validation.
|
|
159
|
-
const
|
|
175
|
+
const syncMessageDueMs = this.config.getSyncMessageDueMs(fork);
|
|
176
|
+
const msToCutoffTime = syncMessageDueMs - this.clock.msFromSlot(slot);
|
|
160
177
|
const afterBlockDelayMs = 1000 * this.clock.secondsPerSlot * (this.opts?.scAfterBlockDelaySlotFraction ?? 0);
|
|
161
|
-
const toDelayMs = Math.min(
|
|
178
|
+
const toDelayMs = Math.min(msToCutoffTime, afterBlockDelayMs);
|
|
162
179
|
if (toDelayMs > 0) {
|
|
163
180
|
await sleep(toDelayMs);
|
|
164
181
|
}
|
|
165
182
|
|
|
166
|
-
this.metrics?.syncCommitteeStepCallPublishMessage.observe(this.clock.secFromSlot(slot
|
|
183
|
+
this.metrics?.syncCommitteeStepCallPublishMessage.observe(this.clock.secFromSlot(slot) - syncMessageDueMs / 1000);
|
|
167
184
|
|
|
168
185
|
if (signatures.length > 0) {
|
|
169
186
|
try {
|
|
@@ -189,6 +206,7 @@ export class SyncCommitteeService {
|
|
|
189
206
|
* returned to the BN.
|
|
190
207
|
*/
|
|
191
208
|
private async produceAndPublishAggregates(
|
|
209
|
+
fork: ForkName,
|
|
192
210
|
slot: Slot,
|
|
193
211
|
subcommitteeIndex: CommitteeIndex,
|
|
194
212
|
beaconBlockRoot: Root,
|
|
@@ -223,7 +241,9 @@ export class SyncCommitteeService {
|
|
|
223
241
|
})
|
|
224
242
|
);
|
|
225
243
|
|
|
226
|
-
this.metrics?.syncCommitteeStepCallPublishAggregate.observe(
|
|
244
|
+
this.metrics?.syncCommitteeStepCallPublishAggregate.observe(
|
|
245
|
+
this.clock.secFromSlot(slot) - this.config.getSyncContributionDueMs(fork) / 1000
|
|
246
|
+
);
|
|
227
247
|
|
|
228
248
|
if (signedContributions.length > 0) {
|
|
229
249
|
try {
|
|
@@ -248,6 +268,7 @@ export class SyncCommitteeService {
|
|
|
248
268
|
* See https://docs.google.com/document/d/1q9jOTPcYQa-3L8luRvQJ-M0eegtba4Nmon3dpO79TMk/mobilebasic
|
|
249
269
|
*/
|
|
250
270
|
private async runDistributedAggregationSelectionTasks(
|
|
271
|
+
fork: ForkName,
|
|
251
272
|
duties: SyncDutyAndProofs[],
|
|
252
273
|
slot: number,
|
|
253
274
|
signal: AbortSignal
|
|
@@ -270,18 +291,18 @@ export class SyncCommitteeService {
|
|
|
270
291
|
|
|
271
292
|
const res = await Promise.race([
|
|
272
293
|
this.api.validator.submitSyncCommitteeSelections({selections: partialSelections}),
|
|
273
|
-
// Exit sync committee contributions flow if there is no response after
|
|
274
|
-
// This is in contrast to attestations aggregations flow which is already exited at
|
|
294
|
+
// Exit sync committee contributions flow if there is no response after CONTRIBUTION_DUE_BPS of the slot.
|
|
295
|
+
// This is in contrast to attestations aggregations flow which is already exited at ATTESTATION_DUE_BPS of the slot
|
|
275
296
|
// because for sync committee is not required to resubscribe to subnets as beacon node will assume
|
|
276
297
|
// validator always aggregates. This allows us to wait until we have to produce sync committee contributions.
|
|
277
298
|
// Note that the sync committee contributions flow is not explicitly exited but rather will be skipped
|
|
278
299
|
// due to the fact that calculation of `is_sync_committee_aggregator` in SyncCommitteeDutiesService is not done
|
|
279
300
|
// and selectionProof is set to null, meaning no validator will be considered an aggregator.
|
|
280
|
-
sleep(this.clock.
|
|
301
|
+
sleep(this.config.getSyncContributionDueMs(fork) - this.clock.msFromSlot(slot), signal),
|
|
281
302
|
]);
|
|
282
303
|
|
|
283
304
|
if (!res) {
|
|
284
|
-
throw new Error("Failed to receive combined selection proofs before
|
|
305
|
+
throw new Error("Failed to receive combined selection proofs before CONTRIBUTION_DUE_BPS of the slot");
|
|
285
306
|
}
|
|
286
307
|
|
|
287
308
|
const combinedSelections = res.value();
|
package/src/util/clock.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import {ChainForkConfig} from "@lodestar/config";
|
|
2
2
|
import {GENESIS_SLOT, SLOTS_PER_EPOCH} from "@lodestar/params";
|
|
3
|
-
import {computeEpochAtSlot, getCurrentSlot} from "@lodestar/state-transition";
|
|
3
|
+
import {computeEpochAtSlot, computeTimeAtSlot, getCurrentSlot} from "@lodestar/state-transition";
|
|
4
4
|
import {Epoch, Slot, TimeSeconds} from "@lodestar/types";
|
|
5
5
|
import {ErrorAborted, Logger, isErrorAborted, sleep} from "@lodestar/utils";
|
|
6
6
|
|
|
@@ -16,6 +16,7 @@ export interface IClock {
|
|
|
16
16
|
runEverySlot(fn: (slot: Slot, signal: AbortSignal) => Promise<void>): void;
|
|
17
17
|
runEveryEpoch(fn: (epoch: Epoch, signal: AbortSignal) => Promise<void>): void;
|
|
18
18
|
msToSlot(slot: Slot): number;
|
|
19
|
+
msFromSlot(slot: Slot): number;
|
|
19
20
|
secFromSlot(slot: Slot): number;
|
|
20
21
|
getCurrentSlot(): Slot;
|
|
21
22
|
getCurrentEpoch(): Epoch;
|
|
@@ -35,7 +36,7 @@ export class Clock implements IClock {
|
|
|
35
36
|
|
|
36
37
|
constructor(config: ChainForkConfig, logger: Logger, opts: {genesisTime: number}) {
|
|
37
38
|
this.genesisTime = opts.genesisTime;
|
|
38
|
-
this.secondsPerSlot = config.
|
|
39
|
+
this.secondsPerSlot = config.SLOT_DURATION_MS / 1000;
|
|
39
40
|
this.config = config;
|
|
40
41
|
this.logger = logger;
|
|
41
42
|
}
|
|
@@ -72,13 +73,17 @@ export class Clock implements IClock {
|
|
|
72
73
|
|
|
73
74
|
/** Milliseconds from now to a specific slot */
|
|
74
75
|
msToSlot(slot: Slot): number {
|
|
75
|
-
|
|
76
|
-
|
|
76
|
+
return computeTimeAtSlot(this.config, slot, this.genesisTime) * 1000 - Date.now();
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/** Milliseconds elapsed from a specific slot to now */
|
|
80
|
+
msFromSlot(slot: Slot): number {
|
|
81
|
+
return Date.now() - computeTimeAtSlot(this.config, slot, this.genesisTime) * 1000;
|
|
77
82
|
}
|
|
78
83
|
|
|
79
84
|
/** Seconds elapsed from a specific slot to now */
|
|
80
85
|
secFromSlot(slot: Slot): number {
|
|
81
|
-
return Date.now() / 1000 - (this.
|
|
86
|
+
return Date.now() / 1000 - computeTimeAtSlot(this.config, slot, this.genesisTime);
|
|
82
87
|
}
|
|
83
88
|
|
|
84
89
|
/**
|
|
@@ -106,7 +111,7 @@ export class Clock implements IClock {
|
|
|
106
111
|
if (timeItem === TimeItem.Slot) {
|
|
107
112
|
if (nextSlot > slot + 1) {
|
|
108
113
|
// It's not very likely that we skip more than one slot as HTTP timeout is set
|
|
109
|
-
// to
|
|
114
|
+
// to SLOT_DURATION_MS so we will fail task before skipping another slot.
|
|
110
115
|
this.logger.warn("Skipped slot due to task taking more than one slot to run", {
|
|
111
116
|
skippedSlot: slot + 1,
|
|
112
117
|
});
|
|
@@ -126,7 +131,7 @@ export class Clock implements IClock {
|
|
|
126
131
|
}
|
|
127
132
|
|
|
128
133
|
private timeUntilNext(timeItem: TimeItem): number {
|
|
129
|
-
const milliSecondsPerSlot = this.config.
|
|
134
|
+
const milliSecondsPerSlot = this.config.SLOT_DURATION_MS;
|
|
130
135
|
const msFromGenesis = Date.now() - this.genesisTime * 1000;
|
|
131
136
|
|
|
132
137
|
if (timeItem === TimeItem.Slot) {
|
|
@@ -148,7 +153,7 @@ export class Clock implements IClock {
|
|
|
148
153
|
*/
|
|
149
154
|
export function getCurrentSlotAround(config: ChainForkConfig, genesisTime: TimeSeconds): Slot {
|
|
150
155
|
const diffInSeconds = Date.now() / 1000 - genesisTime;
|
|
151
|
-
const slotsSinceGenesis = Math.round(diffInSeconds / config.
|
|
156
|
+
const slotsSinceGenesis = Math.round((diffInSeconds * 1000) / config.SLOT_DURATION_MS);
|
|
152
157
|
return GENESIS_SLOT + slotsSinceGenesis;
|
|
153
158
|
}
|
|
154
159
|
|
package/src/util/params.ts
CHANGED
|
@@ -146,11 +146,18 @@ function getSpecCriticalParams(localConfig: ChainConfig): Record<keyof ConfigWit
|
|
|
146
146
|
GLOAS_FORK_EPOCH: gloasForkRelevant,
|
|
147
147
|
|
|
148
148
|
// Time parameters
|
|
149
|
-
SECONDS_PER_SLOT:
|
|
149
|
+
SECONDS_PER_SLOT: false, // Deprecated
|
|
150
|
+
SLOT_DURATION_MS: true,
|
|
150
151
|
SECONDS_PER_ETH1_BLOCK: false, // Legacy
|
|
151
152
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: true,
|
|
152
153
|
SHARD_COMMITTEE_PERIOD: true,
|
|
153
154
|
ETH1_FOLLOW_DISTANCE: true,
|
|
155
|
+
PROPOSER_REORG_CUTOFF_BPS: true,
|
|
156
|
+
ATTESTATION_DUE_BPS: true,
|
|
157
|
+
AGGREGATE_DUE_BPS: true,
|
|
158
|
+
// Altair
|
|
159
|
+
SYNC_MESSAGE_DUE_BPS: altairForkRelevant,
|
|
160
|
+
CONTRIBUTION_DUE_BPS: altairForkRelevant,
|
|
154
161
|
|
|
155
162
|
// Validator cycle
|
|
156
163
|
INACTIVITY_SCORE_BIAS: true,
|
package/src/validator.ts
CHANGED
|
@@ -178,7 +178,7 @@ export class Validator {
|
|
|
178
178
|
urls: typeof clientOrUrls === "string" ? [clientOrUrls] : clientOrUrls,
|
|
179
179
|
// Validator would need the beacon to respond within the slot
|
|
180
180
|
// See https://github.com/ChainSafe/lodestar/issues/5315 for rationale
|
|
181
|
-
globalInit: {timeoutMs: config.
|
|
181
|
+
globalInit: {timeoutMs: config.SLOT_DURATION_MS, signal: controller.signal, ...globalInit},
|
|
182
182
|
},
|
|
183
183
|
{config, logger, metrics: metrics?.restApiClient}
|
|
184
184
|
);
|