@lodestar/validator 1.35.0-dev.f80d2d52da → 1.35.0-dev.fd1dac853d
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/buckets.d.ts.map +1 -0
- package/lib/defaults.d.ts.map +1 -0
- package/lib/genesis.d.ts.map +1 -0
- package/lib/index.d.ts +7 -7
- package/lib/index.d.ts.map +1 -0
- package/lib/index.js +5 -5
- package/lib/index.js.map +1 -1
- package/lib/metrics.d.ts.map +1 -0
- package/lib/metrics.js +14 -14
- package/lib/metrics.js.map +1 -1
- package/lib/repositories/index.d.ts.map +1 -0
- package/lib/repositories/metaDataRepository.d.ts.map +1 -0
- package/lib/repositories/metaDataRepository.js +4 -3
- package/lib/repositories/metaDataRepository.js.map +1 -1
- package/lib/services/attestation.d.ts.map +1 -0
- package/lib/services/attestation.js +77 -60
- package/lib/services/attestation.js.map +1 -1
- package/lib/services/attestationDuties.d.ts.map +1 -0
- package/lib/services/attestationDuties.js +105 -98
- package/lib/services/attestationDuties.js.map +1 -1
- package/lib/services/block.d.ts.map +1 -0
- package/lib/services/block.js +64 -56
- package/lib/services/block.js.map +1 -1
- package/lib/services/blockDuties.d.ts +2 -2
- package/lib/services/blockDuties.d.ts.map +1 -0
- package/lib/services/blockDuties.js +35 -26
- package/lib/services/blockDuties.js.map +1 -1
- package/lib/services/chainHeaderTracker.d.ts.map +1 -0
- package/lib/services/chainHeaderTracker.js +30 -27
- package/lib/services/chainHeaderTracker.js.map +1 -1
- package/lib/services/doppelgangerService.d.ts.map +1 -0
- package/lib/services/doppelgangerService.js +52 -45
- package/lib/services/doppelgangerService.js.map +1 -1
- package/lib/services/emitter.d.ts +1 -1
- package/lib/services/emitter.d.ts.map +1 -0
- package/lib/services/externalSignerSync.d.ts.map +1 -0
- package/lib/services/externalSignerSync.js +1 -1
- package/lib/services/externalSignerSync.js.map +1 -1
- package/lib/services/indices.d.ts.map +1 -0
- package/lib/services/indices.js +8 -5
- package/lib/services/indices.js.map +1 -1
- package/lib/services/prepareBeaconProposer.d.ts.map +1 -0
- package/lib/services/prepareBeaconProposer.js.map +1 -1
- package/lib/services/syncCommittee.d.ts.map +1 -0
- package/lib/services/syncCommittee.js +80 -61
- package/lib/services/syncCommittee.js.map +1 -1
- package/lib/services/syncCommitteeDuties.d.ts.map +1 -0
- package/lib/services/syncCommitteeDuties.js +28 -23
- package/lib/services/syncCommitteeDuties.js.map +1 -1
- package/lib/services/syncingStatusTracker.d.ts.map +1 -0
- package/lib/services/syncingStatusTracker.js +32 -27
- package/lib/services/syncingStatusTracker.js.map +1 -1
- package/lib/services/utils.d.ts.map +1 -0
- package/lib/services/validatorStore.d.ts.map +1 -0
- package/lib/services/validatorStore.js +9 -3
- package/lib/services/validatorStore.js.map +1 -1
- package/lib/slashingProtection/attestation/attestationByTargetRepository.d.ts.map +1 -0
- package/lib/slashingProtection/attestation/attestationByTargetRepository.js +7 -3
- package/lib/slashingProtection/attestation/attestationByTargetRepository.js.map +1 -1
- package/lib/slashingProtection/attestation/attestationLowerBoundRepository.d.ts.map +1 -0
- package/lib/slashingProtection/attestation/attestationLowerBoundRepository.js +5 -3
- package/lib/slashingProtection/attestation/attestationLowerBoundRepository.js.map +1 -1
- package/lib/slashingProtection/attestation/errors.d.ts.map +1 -0
- package/lib/slashingProtection/attestation/index.d.ts.map +1 -0
- package/lib/slashingProtection/attestation/index.js +3 -0
- package/lib/slashingProtection/attestation/index.js.map +1 -1
- package/lib/slashingProtection/block/blockBySlotRepository.d.ts.map +1 -0
- package/lib/slashingProtection/block/blockBySlotRepository.js +7 -3
- package/lib/slashingProtection/block/blockBySlotRepository.js.map +1 -1
- package/lib/slashingProtection/block/errors.d.ts.map +1 -0
- package/lib/slashingProtection/block/index.d.ts.map +1 -0
- package/lib/slashingProtection/block/index.js +1 -0
- package/lib/slashingProtection/block/index.js.map +1 -1
- package/lib/slashingProtection/index.d.ts +1 -1
- package/lib/slashingProtection/index.d.ts.map +1 -0
- package/lib/slashingProtection/index.js +3 -0
- package/lib/slashingProtection/index.js.map +1 -1
- package/lib/slashingProtection/interchange/errors.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/formats/completeV4.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/formats/index.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/formats/v5.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/index.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/parseInterchange.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/serializeInterchange.d.ts.map +1 -0
- package/lib/slashingProtection/interchange/types.d.ts.map +1 -0
- package/lib/slashingProtection/interface.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/distanceStoreRepository.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/distanceStoreRepository.js +8 -0
- package/lib/slashingProtection/minMaxSurround/distanceStoreRepository.js.map +1 -1
- package/lib/slashingProtection/minMaxSurround/errors.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/index.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/interface.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/minMaxSurround.d.ts.map +1 -0
- package/lib/slashingProtection/minMaxSurround/minMaxSurround.js +2 -0
- package/lib/slashingProtection/minMaxSurround/minMaxSurround.js.map +1 -1
- package/lib/slashingProtection/types.d.ts.map +1 -0
- package/lib/slashingProtection/utils.d.ts +1 -1
- package/lib/slashingProtection/utils.d.ts.map +1 -0
- package/lib/types.d.ts.map +1 -0
- package/lib/util/batch.d.ts.map +1 -0
- package/lib/util/clock.d.ts +3 -0
- package/lib/util/clock.d.ts.map +1 -0
- package/lib/util/clock.js +16 -9
- package/lib/util/clock.js.map +1 -1
- package/lib/util/difference.d.ts.map +1 -0
- package/lib/util/externalSignerClient.d.ts.map +1 -0
- package/lib/util/format.d.ts.map +1 -0
- package/lib/util/index.d.ts.map +1 -0
- package/lib/util/logger.d.ts.map +1 -0
- package/lib/util/params.d.ts.map +1 -0
- package/lib/util/params.js +18 -2
- package/lib/util/params.js.map +1 -1
- package/lib/util/url.d.ts.map +1 -0
- package/lib/validator.d.ts.map +1 -0
- package/lib/validator.js +16 -1
- package/lib/validator.js.map +1 -1
- package/package.json +19 -16
- package/src/buckets.ts +30 -0
- package/src/defaults.ts +8 -0
- package/src/genesis.ts +19 -0
- package/src/index.ts +22 -0
- package/src/metrics.ts +417 -0
- package/src/repositories/index.ts +1 -0
- package/src/repositories/metaDataRepository.ts +42 -0
- package/src/services/attestation.ts +362 -0
- package/src/services/attestationDuties.ts +406 -0
- package/src/services/block.ts +261 -0
- package/src/services/blockDuties.ts +217 -0
- package/src/services/chainHeaderTracker.ts +89 -0
- package/src/services/doppelgangerService.ts +286 -0
- package/src/services/emitter.ts +43 -0
- package/src/services/externalSignerSync.ts +81 -0
- package/src/services/indices.ts +165 -0
- package/src/services/prepareBeaconProposer.ts +119 -0
- package/src/services/syncCommittee.ts +338 -0
- package/src/services/syncCommitteeDuties.ts +337 -0
- package/src/services/syncingStatusTracker.ts +74 -0
- package/src/services/utils.ts +58 -0
- package/src/services/validatorStore.ts +830 -0
- package/src/slashingProtection/attestation/attestationByTargetRepository.ts +77 -0
- package/src/slashingProtection/attestation/attestationLowerBoundRepository.ts +44 -0
- package/src/slashingProtection/attestation/errors.ts +66 -0
- package/src/slashingProtection/attestation/index.ts +171 -0
- package/src/slashingProtection/block/blockBySlotRepository.ts +78 -0
- package/src/slashingProtection/block/errors.ts +28 -0
- package/src/slashingProtection/block/index.ts +94 -0
- package/src/slashingProtection/index.ts +95 -0
- package/src/slashingProtection/interchange/errors.ts +15 -0
- package/src/slashingProtection/interchange/formats/completeV4.ts +125 -0
- package/src/slashingProtection/interchange/formats/index.ts +7 -0
- package/src/slashingProtection/interchange/formats/v5.ts +120 -0
- package/src/slashingProtection/interchange/index.ts +5 -0
- package/src/slashingProtection/interchange/parseInterchange.ts +55 -0
- package/src/slashingProtection/interchange/serializeInterchange.ts +35 -0
- package/src/slashingProtection/interchange/types.ts +18 -0
- package/src/slashingProtection/interface.ts +28 -0
- package/src/slashingProtection/minMaxSurround/distanceStoreRepository.ts +57 -0
- package/src/slashingProtection/minMaxSurround/errors.ts +27 -0
- package/src/slashingProtection/minMaxSurround/index.ts +4 -0
- package/src/slashingProtection/minMaxSurround/interface.ts +23 -0
- package/src/slashingProtection/minMaxSurround/minMaxSurround.ts +104 -0
- package/src/slashingProtection/types.ts +12 -0
- package/src/slashingProtection/utils.ts +42 -0
- package/src/types.ts +31 -0
- package/src/util/batch.ts +15 -0
- package/src/util/clock.ts +169 -0
- package/src/util/difference.ts +10 -0
- package/src/util/externalSignerClient.ts +277 -0
- package/src/util/format.ts +3 -0
- package/src/util/index.ts +6 -0
- package/src/util/logger.ts +51 -0
- package/src/util/params.ts +320 -0
- package/src/util/url.ts +16 -0
- package/src/validator.ts +418 -0
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
import {ApiClient, routes} from "@lodestar/api";
|
|
2
|
+
import {ChainForkConfig} from "@lodestar/config";
|
|
3
|
+
import {ForkName, isForkPostAltair} from "@lodestar/params";
|
|
4
|
+
import {isSyncCommitteeAggregator} from "@lodestar/state-transition";
|
|
5
|
+
import {BLSSignature, CommitteeIndex, Root, Slot, altair} from "@lodestar/types";
|
|
6
|
+
import {sleep} from "@lodestar/utils";
|
|
7
|
+
import {Metrics} from "../metrics.js";
|
|
8
|
+
import {PubkeyHex} from "../types.js";
|
|
9
|
+
import {IClock, LoggerVc} from "../util/index.js";
|
|
10
|
+
import {ChainHeaderTracker} from "./chainHeaderTracker.js";
|
|
11
|
+
import {ValidatorEventEmitter} from "./emitter.js";
|
|
12
|
+
import {SyncCommitteeDutiesService, SyncDutyAndProofs} from "./syncCommitteeDuties.js";
|
|
13
|
+
import {SyncingStatusTracker} from "./syncingStatusTracker.js";
|
|
14
|
+
import {SubcommitteeDuty, groupSyncDutiesBySubcommitteeIndex} from "./utils.js";
|
|
15
|
+
import {ValidatorStore} from "./validatorStore.js";
|
|
16
|
+
|
|
17
|
+
export type SyncCommitteeServiceOpts = {
|
|
18
|
+
scAfterBlockDelaySlotFraction?: number;
|
|
19
|
+
distributedAggregationSelection?: boolean;
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Service that sets up and handles validator sync duties.
|
|
24
|
+
*/
|
|
25
|
+
export class SyncCommitteeService {
|
|
26
|
+
private readonly dutiesService: SyncCommitteeDutiesService;
|
|
27
|
+
|
|
28
|
+
constructor(
|
|
29
|
+
private readonly config: ChainForkConfig,
|
|
30
|
+
private readonly logger: LoggerVc,
|
|
31
|
+
private readonly api: ApiClient,
|
|
32
|
+
private readonly clock: IClock,
|
|
33
|
+
private readonly validatorStore: ValidatorStore,
|
|
34
|
+
private readonly emitter: ValidatorEventEmitter,
|
|
35
|
+
private readonly chainHeaderTracker: ChainHeaderTracker,
|
|
36
|
+
readonly syncingStatusTracker: SyncingStatusTracker,
|
|
37
|
+
private readonly metrics: Metrics | null,
|
|
38
|
+
private readonly opts?: SyncCommitteeServiceOpts
|
|
39
|
+
) {
|
|
40
|
+
this.dutiesService = new SyncCommitteeDutiesService(
|
|
41
|
+
config,
|
|
42
|
+
logger,
|
|
43
|
+
api,
|
|
44
|
+
clock,
|
|
45
|
+
validatorStore,
|
|
46
|
+
syncingStatusTracker,
|
|
47
|
+
metrics,
|
|
48
|
+
{
|
|
49
|
+
distributedAggregationSelection: opts?.distributedAggregationSelection,
|
|
50
|
+
}
|
|
51
|
+
);
|
|
52
|
+
|
|
53
|
+
// At most every slot, check existing duties from SyncCommitteeDutiesService and run tasks
|
|
54
|
+
clock.runEverySlot(this.runSyncCommitteeTasks);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
removeDutiesForKey(pubkey: PubkeyHex): void {
|
|
58
|
+
this.dutiesService.removeDutiesForKey(pubkey);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
private runSyncCommitteeTasks = async (slot: Slot, signal: AbortSignal): Promise<void> => {
|
|
62
|
+
const fork = this.config.getForkName(slot);
|
|
63
|
+
|
|
64
|
+
try {
|
|
65
|
+
// Before altair fork no need to check duties
|
|
66
|
+
if (!isForkPostAltair(fork)) {
|
|
67
|
+
return;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Fetch info first so a potential delay is absorbed by the sleep() below
|
|
71
|
+
const dutiesAtSlot = await this.dutiesService.getDutiesAtSlot(slot);
|
|
72
|
+
if (dutiesAtSlot.length === 0) {
|
|
73
|
+
return;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if (this.opts?.distributedAggregationSelection) {
|
|
77
|
+
// Validator in distributed cluster only has a key share, not the full private key.
|
|
78
|
+
// The partial selection proofs must be exchanged for combined selection proofs by
|
|
79
|
+
// calling submitSyncCommitteeSelections on the distributed validator middleware client.
|
|
80
|
+
// This will run in parallel to other sync committee tasks but must be finished before starting
|
|
81
|
+
// sync committee contributions as it is required to correctly determine if validator is aggregator
|
|
82
|
+
// and to produce a ContributionAndProof that can be threshold aggregated by the middleware client.
|
|
83
|
+
this.runDistributedAggregationSelectionTasks(fork, dutiesAtSlot, slot, signal).catch((e) =>
|
|
84
|
+
this.logger.error("Error on sync committee aggregation selection", {slot}, e)
|
|
85
|
+
);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// unlike Attestation, SyncCommitteeSignature could be published asap
|
|
89
|
+
// especially with lodestar, it's very busy at ATTESTATION_DUE_BPS of the slot
|
|
90
|
+
// see https://github.com/ChainSafe/lodestar/issues/4608
|
|
91
|
+
const syncMessageDueMs = this.config.getSyncMessageDueMs(fork);
|
|
92
|
+
await Promise.race([
|
|
93
|
+
sleep(syncMessageDueMs - this.clock.msFromSlot(slot), signal),
|
|
94
|
+
this.emitter.waitForBlockSlot(slot),
|
|
95
|
+
]);
|
|
96
|
+
this.metrics?.syncCommitteeStepCallProduceMessage.observe(this.clock.secFromSlot(slot) - syncMessageDueMs / 1000);
|
|
97
|
+
|
|
98
|
+
// Step 1. Download, sign and publish an `SyncCommitteeMessage` for each validator.
|
|
99
|
+
// Differs from AttestationService, `SyncCommitteeMessage` are equal for all
|
|
100
|
+
const beaconBlockRoot = await this.produceAndPublishSyncCommittees(fork, slot, dutiesAtSlot);
|
|
101
|
+
|
|
102
|
+
// Step 2. If an attestation was produced, make an aggregate.
|
|
103
|
+
// First, wait until the `CONTRIBUTION_DUE_BPS` of the slot
|
|
104
|
+
const syncContributionDueMs = this.config.getSyncContributionDueMs(fork);
|
|
105
|
+
await sleep(syncContributionDueMs - this.clock.msFromSlot(slot), signal);
|
|
106
|
+
this.metrics?.syncCommitteeStepCallProduceAggregate.observe(
|
|
107
|
+
this.clock.secFromSlot(slot) - syncContributionDueMs / 1000
|
|
108
|
+
);
|
|
109
|
+
|
|
110
|
+
// await for all so if the Beacon node is overloaded it auto-throttles
|
|
111
|
+
// TODO: This approach is conservative to reduce the node's load, review
|
|
112
|
+
const dutiesBySubcommitteeIndex = groupSyncDutiesBySubcommitteeIndex(dutiesAtSlot);
|
|
113
|
+
await Promise.all(
|
|
114
|
+
Array.from(dutiesBySubcommitteeIndex.entries()).map(async ([subcommitteeIndex, duties]) => {
|
|
115
|
+
if (duties.length === 0) return;
|
|
116
|
+
// Then download, sign and publish a `SignedAggregateAndProof` for each
|
|
117
|
+
// validator that is elected to aggregate for this `slot` and `subcommitteeIndex`.
|
|
118
|
+
await this.produceAndPublishAggregates(fork, slot, subcommitteeIndex, beaconBlockRoot, duties).catch(
|
|
119
|
+
(e: Error) => {
|
|
120
|
+
this.logger.error("Error on SyncCommitteeContribution", {slot, index: subcommitteeIndex}, e);
|
|
121
|
+
}
|
|
122
|
+
);
|
|
123
|
+
})
|
|
124
|
+
);
|
|
125
|
+
} catch (e) {
|
|
126
|
+
this.logger.error("Error on runSyncCommitteeTasks", {slot}, e as Error);
|
|
127
|
+
}
|
|
128
|
+
};
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Performs the first step of the attesting process: downloading `SyncCommittee` objects,
|
|
132
|
+
* signing them and returning them to the validator.
|
|
133
|
+
*
|
|
134
|
+
* https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#sync-committee-messages
|
|
135
|
+
*
|
|
136
|
+
* Only one `SyncCommittee` is downloaded from the BN. It is then signed by each
|
|
137
|
+
* validator and the list of individually-signed `SyncCommittee` objects is returned to the BN.
|
|
138
|
+
*/
|
|
139
|
+
private async produceAndPublishSyncCommittees(
|
|
140
|
+
fork: ForkName,
|
|
141
|
+
slot: Slot,
|
|
142
|
+
duties: SyncDutyAndProofs[]
|
|
143
|
+
): Promise<Root> {
|
|
144
|
+
const logCtx = {slot};
|
|
145
|
+
|
|
146
|
+
// /eth/v1/beacon/blocks/:blockId/root -> at slot -1
|
|
147
|
+
|
|
148
|
+
// Produce one attestation data per slot and subcommitteeIndex
|
|
149
|
+
// Spec: the validator should prepare a SyncCommitteeMessage for the previous slot (slot - 1)
|
|
150
|
+
// as soon as they have determined the head block of slot - 1
|
|
151
|
+
|
|
152
|
+
const blockRoot: Uint8Array =
|
|
153
|
+
this.chainHeaderTracker.getCurrentChainHead(slot) ??
|
|
154
|
+
(await this.api.beacon.getBlockRoot({blockId: "head"})).value().root;
|
|
155
|
+
|
|
156
|
+
const signatures: altair.SyncCommitteeMessage[] = [];
|
|
157
|
+
|
|
158
|
+
await Promise.all(
|
|
159
|
+
duties.map(async ({duty}) => {
|
|
160
|
+
const logCtxValidator = {...logCtx, validatorIndex: duty.validatorIndex};
|
|
161
|
+
try {
|
|
162
|
+
signatures.push(
|
|
163
|
+
await this.validatorStore.signSyncCommitteeSignature(duty.pubkey, duty.validatorIndex, slot, blockRoot)
|
|
164
|
+
);
|
|
165
|
+
this.logger.debug("Signed SyncCommitteeMessage", logCtxValidator);
|
|
166
|
+
} catch (e) {
|
|
167
|
+
this.logger.error("Error signing SyncCommitteeMessage", logCtxValidator, e as Error);
|
|
168
|
+
}
|
|
169
|
+
})
|
|
170
|
+
);
|
|
171
|
+
|
|
172
|
+
// by default we want to submit SyncCommitteeSignature asap after we receive block
|
|
173
|
+
// provide a delay option just in case any client implementation validate the existence of block in
|
|
174
|
+
// SyncCommitteeSignature gossip validation.
|
|
175
|
+
const syncMessageDueMs = this.config.getSyncMessageDueMs(fork);
|
|
176
|
+
const msToCutoffTime = syncMessageDueMs - this.clock.msFromSlot(slot);
|
|
177
|
+
const afterBlockDelayMs = 1000 * this.clock.secondsPerSlot * (this.opts?.scAfterBlockDelaySlotFraction ?? 0);
|
|
178
|
+
const toDelayMs = Math.min(msToCutoffTime, afterBlockDelayMs);
|
|
179
|
+
if (toDelayMs > 0) {
|
|
180
|
+
await sleep(toDelayMs);
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
this.metrics?.syncCommitteeStepCallPublishMessage.observe(this.clock.secFromSlot(slot) - syncMessageDueMs / 1000);
|
|
184
|
+
|
|
185
|
+
if (signatures.length > 0) {
|
|
186
|
+
try {
|
|
187
|
+
(await this.api.beacon.submitPoolSyncCommitteeSignatures({signatures})).assertOk();
|
|
188
|
+
this.logger.info("Published SyncCommitteeMessage", {...logCtx, count: signatures.length});
|
|
189
|
+
this.metrics?.publishedSyncCommitteeMessage.inc(signatures.length);
|
|
190
|
+
} catch (e) {
|
|
191
|
+
this.logger.error("Error publishing SyncCommitteeMessage", logCtx, e as Error);
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
return blockRoot;
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Performs the second step of the attesting process: downloading an aggregated `SyncCommittee`,
|
|
200
|
+
* converting it into a `SignedAggregateAndProof` and returning it to the BN.
|
|
201
|
+
*
|
|
202
|
+
* https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#sync-committee-contributions
|
|
203
|
+
*
|
|
204
|
+
* Only one aggregated `SyncCommittee` is downloaded from the BN. It is then signed
|
|
205
|
+
* by each validator and the list of individually-signed `SignedAggregateAndProof` objects is
|
|
206
|
+
* returned to the BN.
|
|
207
|
+
*/
|
|
208
|
+
private async produceAndPublishAggregates(
|
|
209
|
+
fork: ForkName,
|
|
210
|
+
slot: Slot,
|
|
211
|
+
subcommitteeIndex: CommitteeIndex,
|
|
212
|
+
beaconBlockRoot: Root,
|
|
213
|
+
duties: SubcommitteeDuty[]
|
|
214
|
+
): Promise<void> {
|
|
215
|
+
const logCtx = {slot, index: subcommitteeIndex};
|
|
216
|
+
|
|
217
|
+
// No validator is aggregator, skip
|
|
218
|
+
if (duties.every(({selectionProof}) => selectionProof === null)) {
|
|
219
|
+
return;
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
this.logger.verbose("Producing SyncCommitteeContribution", logCtx);
|
|
223
|
+
const res = await this.api.validator.produceSyncCommitteeContribution({slot, subcommitteeIndex, beaconBlockRoot});
|
|
224
|
+
|
|
225
|
+
const signedContributions: altair.SignedContributionAndProof[] = [];
|
|
226
|
+
|
|
227
|
+
await Promise.all(
|
|
228
|
+
duties.map(async ({duty, selectionProof}) => {
|
|
229
|
+
const logCtxValidator = {...logCtx, validatorIndex: duty.validatorIndex};
|
|
230
|
+
try {
|
|
231
|
+
// Produce signed contributions only for validators that are subscribed aggregators.
|
|
232
|
+
if (selectionProof !== null) {
|
|
233
|
+
signedContributions.push(
|
|
234
|
+
await this.validatorStore.signContributionAndProof(duty, selectionProof, res.value())
|
|
235
|
+
);
|
|
236
|
+
this.logger.debug("Signed SyncCommitteeContribution", logCtxValidator);
|
|
237
|
+
}
|
|
238
|
+
} catch (e) {
|
|
239
|
+
this.logger.error("Error signing SyncCommitteeContribution", logCtxValidator, e as Error);
|
|
240
|
+
}
|
|
241
|
+
})
|
|
242
|
+
);
|
|
243
|
+
|
|
244
|
+
this.metrics?.syncCommitteeStepCallPublishAggregate.observe(
|
|
245
|
+
this.clock.secFromSlot(slot) - this.config.getSyncContributionDueMs(fork) / 1000
|
|
246
|
+
);
|
|
247
|
+
|
|
248
|
+
if (signedContributions.length > 0) {
|
|
249
|
+
try {
|
|
250
|
+
(
|
|
251
|
+
await this.api.validator.publishContributionAndProofs({contributionAndProofs: signedContributions})
|
|
252
|
+
).assertOk();
|
|
253
|
+
this.logger.info("Published SyncCommitteeContribution", {...logCtx, count: signedContributions.length});
|
|
254
|
+
this.metrics?.publishedSyncCommitteeContribution.inc(signedContributions.length);
|
|
255
|
+
} catch (e) {
|
|
256
|
+
this.logger.error("Error publishing SyncCommitteeContribution", logCtx, e as Error);
|
|
257
|
+
}
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
/**
|
|
262
|
+
* Performs additional sync committee contribution tasks required if validator is part of distributed cluster
|
|
263
|
+
*
|
|
264
|
+
* 1. Exchange partial for combined selection proofs
|
|
265
|
+
* 2. Determine validators that should produce sync committee contribution
|
|
266
|
+
* 3. Mutate duty objects to set selection proofs for aggregators
|
|
267
|
+
*
|
|
268
|
+
* See https://docs.google.com/document/d/1q9jOTPcYQa-3L8luRvQJ-M0eegtba4Nmon3dpO79TMk/mobilebasic
|
|
269
|
+
*/
|
|
270
|
+
private async runDistributedAggregationSelectionTasks(
|
|
271
|
+
fork: ForkName,
|
|
272
|
+
duties: SyncDutyAndProofs[],
|
|
273
|
+
slot: number,
|
|
274
|
+
signal: AbortSignal
|
|
275
|
+
): Promise<void> {
|
|
276
|
+
const partialSelections: routes.validator.SyncCommitteeSelection[] = [];
|
|
277
|
+
|
|
278
|
+
for (const {duty, selectionProofs} of duties) {
|
|
279
|
+
const validatorSelections: routes.validator.SyncCommitteeSelection[] = selectionProofs.map(
|
|
280
|
+
({subcommitteeIndex, partialSelectionProof}) => ({
|
|
281
|
+
validatorIndex: duty.validatorIndex,
|
|
282
|
+
slot,
|
|
283
|
+
subcommitteeIndex,
|
|
284
|
+
selectionProof: partialSelectionProof as BLSSignature,
|
|
285
|
+
})
|
|
286
|
+
);
|
|
287
|
+
partialSelections.push(...validatorSelections);
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
this.logger.debug("Submitting partial sync committee selection proofs", {slot, count: partialSelections.length});
|
|
291
|
+
|
|
292
|
+
const res = await Promise.race([
|
|
293
|
+
this.api.validator.submitSyncCommitteeSelections({selections: partialSelections}),
|
|
294
|
+
// Exit sync committee contributions flow if there is no response after CONTRIBUTION_DUE_BPS of the slot.
|
|
295
|
+
// This is in contrast to attestations aggregations flow which is already exited at ATTESTATION_DUE_BPS of the slot
|
|
296
|
+
// because for sync committee is not required to resubscribe to subnets as beacon node will assume
|
|
297
|
+
// validator always aggregates. This allows us to wait until we have to produce sync committee contributions.
|
|
298
|
+
// Note that the sync committee contributions flow is not explicitly exited but rather will be skipped
|
|
299
|
+
// due to the fact that calculation of `is_sync_committee_aggregator` in SyncCommitteeDutiesService is not done
|
|
300
|
+
// and selectionProof is set to null, meaning no validator will be considered an aggregator.
|
|
301
|
+
sleep(this.config.getSyncContributionDueMs(fork) - this.clock.msFromSlot(slot), signal),
|
|
302
|
+
]);
|
|
303
|
+
|
|
304
|
+
if (!res) {
|
|
305
|
+
throw new Error("Failed to receive combined selection proofs before CONTRIBUTION_DUE_BPS of the slot");
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
const combinedSelections = res.value();
|
|
309
|
+
this.logger.debug("Received combined sync committee selection proofs", {slot, count: combinedSelections.length});
|
|
310
|
+
|
|
311
|
+
for (const dutyAndProofs of duties) {
|
|
312
|
+
const {validatorIndex, subnets} = dutyAndProofs.duty;
|
|
313
|
+
|
|
314
|
+
for (const subnet of subnets) {
|
|
315
|
+
const logCtxValidator = {slot, index: subnet, validatorIndex};
|
|
316
|
+
|
|
317
|
+
const combinedSelection = combinedSelections.find(
|
|
318
|
+
(s) => s.validatorIndex === validatorIndex && s.slot === slot && s.subcommitteeIndex === subnet
|
|
319
|
+
);
|
|
320
|
+
|
|
321
|
+
if (!combinedSelection) {
|
|
322
|
+
this.logger.warn("Did not receive combined sync committee selection proof", logCtxValidator);
|
|
323
|
+
continue;
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
const isAggregator = isSyncCommitteeAggregator(combinedSelection.selectionProof);
|
|
327
|
+
|
|
328
|
+
if (isAggregator) {
|
|
329
|
+
const selectionProofObject = dutyAndProofs.selectionProofs.find((p) => p.subcommitteeIndex === subnet);
|
|
330
|
+
if (selectionProofObject) {
|
|
331
|
+
// Update selection proof by mutating proof objects in duty object
|
|
332
|
+
selectionProofObject.selectionProof = combinedSelection.selectionProof;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
}
|
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
import {ApiClient, routes} from "@lodestar/api";
|
|
2
|
+
import {ChainForkConfig} from "@lodestar/config";
|
|
3
|
+
import {EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SYNC_COMMITTEE_SUBNET_SIZE} from "@lodestar/params";
|
|
4
|
+
import {
|
|
5
|
+
computeEpochAtSlot,
|
|
6
|
+
computeSyncPeriodAtEpoch,
|
|
7
|
+
computeSyncPeriodAtSlot,
|
|
8
|
+
isStartSlotOfEpoch,
|
|
9
|
+
isSyncCommitteeAggregator,
|
|
10
|
+
} from "@lodestar/state-transition";
|
|
11
|
+
import {BLSSignature, Epoch, Slot, SyncPeriod, ValidatorIndex} from "@lodestar/types";
|
|
12
|
+
import {toPubkeyHex} from "@lodestar/utils";
|
|
13
|
+
import {Metrics} from "../metrics.js";
|
|
14
|
+
import {PubkeyHex} from "../types.js";
|
|
15
|
+
import {IClock, LoggerVc} from "../util/index.js";
|
|
16
|
+
import {SyncingStatusTracker} from "./syncingStatusTracker.js";
|
|
17
|
+
import {syncCommitteeIndicesToSubnets} from "./utils.js";
|
|
18
|
+
import {ValidatorStore} from "./validatorStore.js";
|
|
19
|
+
|
|
20
|
+
/** Only retain `HISTORICAL_DUTIES_PERIODS` duties prior to the current periods. */
|
|
21
|
+
const HISTORICAL_DUTIES_PERIODS = 2;
|
|
22
|
+
/**
|
|
23
|
+
* Epochs prior to `ALTAIR_FORK_EPOCH` to start fetching duties
|
|
24
|
+
*
|
|
25
|
+
* UPDATE: Setting it to 0 from 1, because looking ahead caused an "Empty SyncCommitteeCache"
|
|
26
|
+
* error (https://github.com/ChainSafe/lodestar/issues/3752) as currently the lodestar
|
|
27
|
+
* beacon's pre-altair placeholder object SyncCommitteeCacheEmpty just throws on
|
|
28
|
+
* any getter.
|
|
29
|
+
* This can be updated back to 1, once SyncCommitteeCacheEmpty supports the duties
|
|
30
|
+
* look-ahead. It can also be later turned as a cli param to interface with another
|
|
31
|
+
* client's beacon, which supports look-ahead of duties.
|
|
32
|
+
*/
|
|
33
|
+
const ALTAIR_FORK_LOOKAHEAD_EPOCHS = 0;
|
|
34
|
+
/** How many epochs prior from a subscription starting, ask the node to subscribe */
|
|
35
|
+
const SUBSCRIPTIONS_LOOKAHEAD_EPOCHS = 2;
|
|
36
|
+
|
|
37
|
+
export type SyncDutySubnet = {
|
|
38
|
+
pubkey: string;
|
|
39
|
+
/** Index of validator in validator registry. */
|
|
40
|
+
validatorIndex: ValidatorIndex;
|
|
41
|
+
/**
|
|
42
|
+
* The indices of the validator in the sync committee.
|
|
43
|
+
* The same validator can appear multiples in the sync committee. Given how sync messages are constructor, the
|
|
44
|
+
* validator client only cares in which subnets the validator is in, not the specific index.
|
|
45
|
+
*/
|
|
46
|
+
subnets: number[];
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
export type SyncSelectionProof = {
|
|
50
|
+
/** This value is only set to not null if the proof indicates that the validator is an aggregator. */
|
|
51
|
+
selectionProof: BLSSignature | null;
|
|
52
|
+
/** This value will only be set if validator is part of distributed cluster and only has a key share */
|
|
53
|
+
partialSelectionProof?: BLSSignature;
|
|
54
|
+
subcommitteeIndex: number;
|
|
55
|
+
};
|
|
56
|
+
|
|
57
|
+
/** Neatly joins SyncDuty with the locally-generated `selectionProof`. */
|
|
58
|
+
export type SyncDutyAndProofs = {
|
|
59
|
+
duty: SyncDutySubnet;
|
|
60
|
+
/**
|
|
61
|
+
* Array because the same validator can appear multiple times in the sync committee.
|
|
62
|
+
* `routes.validator.SyncDuty` `.validatorSyncCommitteeIndices` is an array for that reason.
|
|
63
|
+
* SelectionProof signs over slot + index in committee, so the length of `.selectionProofs` equals
|
|
64
|
+
* `.validatorSyncCommitteeIndices`.
|
|
65
|
+
*/
|
|
66
|
+
selectionProofs: SyncSelectionProof[];
|
|
67
|
+
};
|
|
68
|
+
|
|
69
|
+
// To assist with readability
|
|
70
|
+
type DutyAtPeriod = {duty: SyncDutySubnet};
|
|
71
|
+
|
|
72
|
+
type SyncCommitteeDutiesServiceOpts = {
|
|
73
|
+
distributedAggregationSelection?: boolean;
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Validators are part of a static long (~27h) sync committee, and part of static subnets.
|
|
78
|
+
* However, the isAggregator role changes per slot.
|
|
79
|
+
*/
|
|
80
|
+
export class SyncCommitteeDutiesService {
|
|
81
|
+
/** Maps a validator public key to their duties for each slot */
|
|
82
|
+
private readonly dutiesByIndexByPeriod = new Map<SyncPeriod, Map<ValidatorIndex, DutyAtPeriod>>();
|
|
83
|
+
|
|
84
|
+
constructor(
|
|
85
|
+
private readonly config: ChainForkConfig,
|
|
86
|
+
private readonly logger: LoggerVc,
|
|
87
|
+
private readonly api: ApiClient,
|
|
88
|
+
clock: IClock,
|
|
89
|
+
private readonly validatorStore: ValidatorStore,
|
|
90
|
+
syncingStatusTracker: SyncingStatusTracker,
|
|
91
|
+
metrics: Metrics | null,
|
|
92
|
+
private readonly opts?: SyncCommitteeDutiesServiceOpts
|
|
93
|
+
) {
|
|
94
|
+
// Running this task every epoch is safe since a re-org of many epochs is very unlikely
|
|
95
|
+
// TODO: If the re-org event is reliable consider re-running then
|
|
96
|
+
clock.runEveryEpoch(this.runDutiesTasks);
|
|
97
|
+
syncingStatusTracker.runOnResynced(async (slot) => {
|
|
98
|
+
// Skip on first slot of epoch since tasks are already scheduled
|
|
99
|
+
if (!isStartSlotOfEpoch(slot)) {
|
|
100
|
+
return this.runDutiesTasks(computeEpochAtSlot(slot));
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
if (metrics) {
|
|
105
|
+
metrics.syncCommitteeDutiesCount.addCollect(() => {
|
|
106
|
+
let duties = 0;
|
|
107
|
+
for (const dutiesByIndex of this.dutiesByIndexByPeriod.values()) {
|
|
108
|
+
duties += dutiesByIndex.size;
|
|
109
|
+
}
|
|
110
|
+
metrics.syncCommitteeDutiesCount.set(duties);
|
|
111
|
+
metrics.syncCommitteeDutiesEpochCount.set(this.dutiesByIndexByPeriod.size);
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Returns all `ValidatorDuty` for the given `slot`
|
|
118
|
+
*
|
|
119
|
+
* Note: The range of slots a validator has to perform duties is off by one.
|
|
120
|
+
* The previous slot wording means that if your validator is in a sync committee for a period that runs from slot
|
|
121
|
+
* 100 to 200,then you would actually produce signatures in slot 99 - 199.
|
|
122
|
+
* https://github.com/ethereum/consensus-specs/pull/2400
|
|
123
|
+
*/
|
|
124
|
+
async getDutiesAtSlot(slot: Slot): Promise<SyncDutyAndProofs[]> {
|
|
125
|
+
const period = computeSyncPeriodAtSlot(slot + 1); // See note above for the +1 offset
|
|
126
|
+
const duties: SyncDutyAndProofs[] = [];
|
|
127
|
+
|
|
128
|
+
const dutiesByIndex = this.dutiesByIndexByPeriod.get(period);
|
|
129
|
+
if (dutiesByIndex) {
|
|
130
|
+
for (const dutyAtPeriod of dutiesByIndex.values()) {
|
|
131
|
+
// Validator always has a duty during the entire period
|
|
132
|
+
duties.push({
|
|
133
|
+
duty: dutyAtPeriod.duty,
|
|
134
|
+
selectionProofs: await this.getSelectionProofs(slot, dutyAtPeriod.duty),
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
return duties;
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
removeDutiesForKey(pubkey: PubkeyHex): void {
|
|
143
|
+
for (const [syncPeriod, validatorDutyAtPeriodMap] of this.dutiesByIndexByPeriod) {
|
|
144
|
+
for (const [validatorIndex, dutyAtPeriod] of validatorDutyAtPeriodMap) {
|
|
145
|
+
if (dutyAtPeriod.duty.pubkey === pubkey) {
|
|
146
|
+
validatorDutyAtPeriodMap.delete(validatorIndex);
|
|
147
|
+
if (validatorDutyAtPeriodMap.size === 0) {
|
|
148
|
+
this.dutiesByIndexByPeriod.delete(syncPeriod);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
private runDutiesTasks = async (currentEpoch: Epoch): Promise<void> => {
|
|
156
|
+
// Before altair fork (+ lookahead) no need to check duties
|
|
157
|
+
if (currentEpoch < this.config.ALTAIR_FORK_EPOCH - ALTAIR_FORK_LOOKAHEAD_EPOCHS) {
|
|
158
|
+
return;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
await Promise.all([
|
|
162
|
+
// Run pollSyncCommittees immediately for all known local indices
|
|
163
|
+
this.pollSyncCommittees(currentEpoch, this.validatorStore.getAllLocalIndices()).catch((e: Error) => {
|
|
164
|
+
this.logger.error("Error on poll SyncDuties", {epoch: currentEpoch}, e);
|
|
165
|
+
}),
|
|
166
|
+
|
|
167
|
+
// At the same time fetch any remaining unknown validator indices, then poll duties for those newIndices only
|
|
168
|
+
this.validatorStore
|
|
169
|
+
.pollValidatorIndices()
|
|
170
|
+
.then((newIndices) => this.pollSyncCommittees(currentEpoch, newIndices))
|
|
171
|
+
.catch((e: Error) => {
|
|
172
|
+
this.logger.error("Error on poll indices and SyncDuties", {epoch: currentEpoch}, e);
|
|
173
|
+
}),
|
|
174
|
+
]);
|
|
175
|
+
|
|
176
|
+
// After both, prune
|
|
177
|
+
this.pruneOldDuties(currentEpoch);
|
|
178
|
+
};
|
|
179
|
+
|
|
180
|
+
/**
|
|
181
|
+
* Query the beacon node for SyncDuties for any known validators.
|
|
182
|
+
*
|
|
183
|
+
* This function will perform (in the following order):
|
|
184
|
+
*
|
|
185
|
+
* 1. Poll for current-period duties and update the local duties map.
|
|
186
|
+
* 2. As above, but for the next-period.
|
|
187
|
+
* 3. Push out any Sync subnet subscriptions to the BN.
|
|
188
|
+
* 4. Prune old entries from duties.
|
|
189
|
+
*/
|
|
190
|
+
private async pollSyncCommittees(currentEpoch: Epoch, indexArr: ValidatorIndex[]): Promise<void> {
|
|
191
|
+
// No need to bother the BN if we don't have any validators.
|
|
192
|
+
if (indexArr.length === 0) {
|
|
193
|
+
return;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
const nextPeriodEpoch = currentEpoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
197
|
+
for (const epoch of [currentEpoch, nextPeriodEpoch]) {
|
|
198
|
+
// Download the duties and update the duties for the current and next period.
|
|
199
|
+
await this.pollSyncCommitteesForEpoch(epoch, indexArr).catch((e: Error) => {
|
|
200
|
+
this.logger.error("Failed to download SyncDuties", {epoch}, e);
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
const currentPeriod = computeSyncPeriodAtEpoch(currentEpoch);
|
|
205
|
+
const syncCommitteeSubscriptions: routes.validator.SyncCommitteeSubscription[] = [];
|
|
206
|
+
|
|
207
|
+
// For this and the next period, produce any beacon committee subscriptions.
|
|
208
|
+
//
|
|
209
|
+
// We are *always* pushing out subscriptions, even if we've subscribed before. This is
|
|
210
|
+
// potentially excessive on the BN in normal cases, but it will help with fast re-subscriptions
|
|
211
|
+
// if the BN goes offline or we swap to a different one.
|
|
212
|
+
const indexSet = new Set(indexArr);
|
|
213
|
+
for (const period of [currentPeriod, currentPeriod + 1]) {
|
|
214
|
+
const dutiesByIndex = this.dutiesByIndexByPeriod.get(period);
|
|
215
|
+
if (dutiesByIndex) {
|
|
216
|
+
for (const [validatorIndex, dutyAtEpoch] of dutiesByIndex.entries()) {
|
|
217
|
+
if (indexSet.has(validatorIndex)) {
|
|
218
|
+
const fromEpoch = period * EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
219
|
+
const untilEpoch = (period + 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
220
|
+
// Don't subscribe too early to save node's resources
|
|
221
|
+
if (currentEpoch >= fromEpoch - SUBSCRIPTIONS_LOOKAHEAD_EPOCHS) {
|
|
222
|
+
syncCommitteeSubscriptions.push({
|
|
223
|
+
validatorIndex,
|
|
224
|
+
// prepareSyncCommitteeSubnets does not care about which specific index in the sync committee the
|
|
225
|
+
// validator is, but at what subnets is it participating.
|
|
226
|
+
syncCommitteeIndices: dutyAtEpoch.duty.subnets.map((subnet) => subnet * SYNC_COMMITTEE_SUBNET_SIZE),
|
|
227
|
+
untilEpoch,
|
|
228
|
+
// No need to send isAggregator here since the beacon node will assume validator always aggregates
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// If there are any subscriptions, push them out to the beacon node.
|
|
237
|
+
if (syncCommitteeSubscriptions.length > 0) {
|
|
238
|
+
// TODO: Should log or throw?
|
|
239
|
+
(await this.api.validator.prepareSyncCommitteeSubnets({subscriptions: syncCommitteeSubscriptions})).assertOk();
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* For the given `indexArr`, download the duties for the given `period` and store them in duties.
|
|
245
|
+
*/
|
|
246
|
+
private async pollSyncCommitteesForEpoch(epoch: Epoch, indexArr: ValidatorIndex[]): Promise<void> {
|
|
247
|
+
// Don't fetch duties for periods before genesis. However, should fetch period 0 duties at period -1
|
|
248
|
+
if (epoch < 0) {
|
|
249
|
+
return;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
const duties = (await this.api.validator.getSyncCommitteeDuties({epoch, indices: indexArr})).value();
|
|
253
|
+
|
|
254
|
+
const dutiesByIndex = new Map<ValidatorIndex, DutyAtPeriod>();
|
|
255
|
+
let count = 0;
|
|
256
|
+
|
|
257
|
+
for (const duty of duties) {
|
|
258
|
+
const {validatorIndex} = duty;
|
|
259
|
+
if (!this.validatorStore.hasValidatorIndex(validatorIndex)) {
|
|
260
|
+
continue;
|
|
261
|
+
}
|
|
262
|
+
count++;
|
|
263
|
+
|
|
264
|
+
// Note: For networks where `state.validators.length < SYNC_COMMITTEE_SIZE` the same validator can appear
|
|
265
|
+
// multiple times in the sync committee. So `routes.validator.SyncDuty` `.validatorSyncCommitteeIndices`
|
|
266
|
+
// is an array, with all of those appearances.
|
|
267
|
+
//
|
|
268
|
+
// Validator signs two messages:
|
|
269
|
+
// `SyncCommitteeMessage`:
|
|
270
|
+
// - depends on slot, blockRoot, and validatorIndex.
|
|
271
|
+
// - Validator signs and publishes only one message regardless of validatorSyncCommitteeIndices length
|
|
272
|
+
// `SyncCommitteeContribution`:
|
|
273
|
+
// - depends on slot, blockRoot, validatorIndex, and subnet.
|
|
274
|
+
// - Validator must sign and publish only one message per subnet MAX. Regardless of validatorSyncCommitteeIndices
|
|
275
|
+
const subnets = syncCommitteeIndicesToSubnets(duty.validatorSyncCommitteeIndices);
|
|
276
|
+
|
|
277
|
+
// TODO: Enable dependentRoot functionality
|
|
278
|
+
// Meanwhile just overwrite them, since the latest duty will be older and less likely to re-org
|
|
279
|
+
//
|
|
280
|
+
// Only update the duties if either is true:
|
|
281
|
+
//
|
|
282
|
+
// - There were no known duties for this period.
|
|
283
|
+
// - The dependent root has changed, signalling a re-org.
|
|
284
|
+
//
|
|
285
|
+
// if (reorg) this.metrics?.syncCommitteeDutiesReorg.inc()
|
|
286
|
+
//
|
|
287
|
+
// Using `alreadyWarnedReorg` avoids excessive logs.
|
|
288
|
+
|
|
289
|
+
// TODO: Use memory-efficient toHexString()
|
|
290
|
+
const pubkeyHex = toPubkeyHex(duty.pubkey);
|
|
291
|
+
dutiesByIndex.set(validatorIndex, {duty: {pubkey: pubkeyHex, validatorIndex, subnets}});
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// these could be redundant duties due to the state of next period query reorged
|
|
295
|
+
// see https://github.com/ChainSafe/lodestar/issues/3572
|
|
296
|
+
// so we always overwrite duties
|
|
297
|
+
const period = computeSyncPeriodAtEpoch(epoch);
|
|
298
|
+
this.dutiesByIndexByPeriod.set(period, dutiesByIndex);
|
|
299
|
+
|
|
300
|
+
this.logger.debug("Downloaded SyncDuties", {epoch, count});
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
private async getSelectionProofs(slot: Slot, duty: SyncDutySubnet): Promise<SyncSelectionProof[]> {
|
|
304
|
+
const dutiesAndProofs: SyncSelectionProof[] = [];
|
|
305
|
+
for (const subnet of duty.subnets) {
|
|
306
|
+
const selectionProof = await this.validatorStore.signSyncCommitteeSelectionProof(duty.pubkey, slot, subnet);
|
|
307
|
+
if (this.opts?.distributedAggregationSelection) {
|
|
308
|
+
// Validator in distributed cluster only has a key share, not the full private key.
|
|
309
|
+
// Passing a partial selection proof to `is_sync_committee_aggregator` would produce incorrect result.
|
|
310
|
+
// SyncCommitteeService will exchange partial for combined selection proofs retrieved from
|
|
311
|
+
// distributed validator middleware client and determine aggregators at beginning of every slot.
|
|
312
|
+
dutiesAndProofs.push({
|
|
313
|
+
selectionProof: null,
|
|
314
|
+
partialSelectionProof: selectionProof,
|
|
315
|
+
subcommitteeIndex: subnet,
|
|
316
|
+
});
|
|
317
|
+
} else {
|
|
318
|
+
dutiesAndProofs.push({
|
|
319
|
+
// selectionProof === null is used to check if is aggregator
|
|
320
|
+
selectionProof: isSyncCommitteeAggregator(selectionProof) ? selectionProof : null,
|
|
321
|
+
subcommitteeIndex: subnet,
|
|
322
|
+
});
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
return dutiesAndProofs;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
/** Run at least once per period to prune duties map */
|
|
329
|
+
private pruneOldDuties(currentEpoch: Epoch): void {
|
|
330
|
+
const currentPeriod = computeSyncPeriodAtEpoch(currentEpoch);
|
|
331
|
+
for (const period of this.dutiesByIndexByPeriod.keys()) {
|
|
332
|
+
if (period + HISTORICAL_DUTIES_PERIODS < currentPeriod) {
|
|
333
|
+
this.dutiesByIndexByPeriod.delete(period);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
}
|