@lodestar/validator 1.35.0-dev.8689cc3545 → 1.35.0-dev.8b45b1e978
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +15 -13
- package/lib/buckets.d.ts.map +0 -1
- package/lib/defaults.d.ts.map +0 -1
- package/lib/genesis.d.ts.map +0 -1
- package/lib/index.d.ts.map +0 -1
- package/lib/metrics.d.ts.map +0 -1
- package/lib/repositories/index.d.ts.map +0 -1
- package/lib/repositories/metaDataRepository.d.ts.map +0 -1
- package/lib/services/attestation.d.ts.map +0 -1
- package/lib/services/attestationDuties.d.ts.map +0 -1
- package/lib/services/block.d.ts.map +0 -1
- package/lib/services/blockDuties.d.ts.map +0 -1
- package/lib/services/chainHeaderTracker.d.ts.map +0 -1
- package/lib/services/doppelgangerService.d.ts.map +0 -1
- package/lib/services/emitter.d.ts.map +0 -1
- package/lib/services/externalSignerSync.d.ts.map +0 -1
- package/lib/services/indices.d.ts.map +0 -1
- package/lib/services/prepareBeaconProposer.d.ts.map +0 -1
- package/lib/services/syncCommittee.d.ts.map +0 -1
- package/lib/services/syncCommitteeDuties.d.ts.map +0 -1
- package/lib/services/syncingStatusTracker.d.ts.map +0 -1
- package/lib/services/utils.d.ts.map +0 -1
- package/lib/services/validatorStore.d.ts.map +0 -1
- package/lib/slashingProtection/attestation/attestationByTargetRepository.d.ts.map +0 -1
- package/lib/slashingProtection/attestation/attestationLowerBoundRepository.d.ts.map +0 -1
- package/lib/slashingProtection/attestation/errors.d.ts.map +0 -1
- package/lib/slashingProtection/attestation/index.d.ts.map +0 -1
- package/lib/slashingProtection/block/blockBySlotRepository.d.ts.map +0 -1
- package/lib/slashingProtection/block/errors.d.ts.map +0 -1
- package/lib/slashingProtection/block/index.d.ts.map +0 -1
- package/lib/slashingProtection/index.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/errors.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/formats/completeV4.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/formats/index.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/formats/v5.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/index.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/parseInterchange.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/serializeInterchange.d.ts.map +0 -1
- package/lib/slashingProtection/interchange/types.d.ts.map +0 -1
- package/lib/slashingProtection/interface.d.ts.map +0 -1
- package/lib/slashingProtection/minMaxSurround/distanceStoreRepository.d.ts.map +0 -1
- package/lib/slashingProtection/minMaxSurround/errors.d.ts.map +0 -1
- package/lib/slashingProtection/minMaxSurround/index.d.ts.map +0 -1
- package/lib/slashingProtection/minMaxSurround/interface.d.ts.map +0 -1
- package/lib/slashingProtection/minMaxSurround/minMaxSurround.d.ts.map +0 -1
- package/lib/slashingProtection/types.d.ts.map +0 -1
- package/lib/slashingProtection/utils.d.ts.map +0 -1
- package/lib/types.d.ts.map +0 -1
- package/lib/util/batch.d.ts.map +0 -1
- package/lib/util/clock.d.ts.map +0 -1
- package/lib/util/difference.d.ts.map +0 -1
- package/lib/util/externalSignerClient.d.ts.map +0 -1
- package/lib/util/format.d.ts.map +0 -1
- package/lib/util/index.d.ts.map +0 -1
- package/lib/util/logger.d.ts.map +0 -1
- package/lib/util/params.d.ts.map +0 -1
- package/lib/util/url.d.ts.map +0 -1
- package/lib/validator.d.ts.map +0 -1
- package/src/buckets.ts +0 -30
- package/src/defaults.ts +0 -8
- package/src/genesis.ts +0 -19
- package/src/index.ts +0 -22
- package/src/metrics.ts +0 -417
- package/src/repositories/index.ts +0 -1
- package/src/repositories/metaDataRepository.ts +0 -42
- package/src/services/attestation.ts +0 -349
- package/src/services/attestationDuties.ts +0 -405
- package/src/services/block.ts +0 -261
- package/src/services/blockDuties.ts +0 -215
- package/src/services/chainHeaderTracker.ts +0 -89
- package/src/services/doppelgangerService.ts +0 -286
- package/src/services/emitter.ts +0 -43
- package/src/services/externalSignerSync.ts +0 -81
- package/src/services/indices.ts +0 -165
- package/src/services/prepareBeaconProposer.ts +0 -119
- package/src/services/syncCommittee.ts +0 -317
- package/src/services/syncCommitteeDuties.ts +0 -337
- package/src/services/syncingStatusTracker.ts +0 -74
- package/src/services/utils.ts +0 -58
- package/src/services/validatorStore.ts +0 -830
- package/src/slashingProtection/attestation/attestationByTargetRepository.ts +0 -77
- package/src/slashingProtection/attestation/attestationLowerBoundRepository.ts +0 -44
- package/src/slashingProtection/attestation/errors.ts +0 -66
- package/src/slashingProtection/attestation/index.ts +0 -171
- package/src/slashingProtection/block/blockBySlotRepository.ts +0 -78
- package/src/slashingProtection/block/errors.ts +0 -28
- package/src/slashingProtection/block/index.ts +0 -94
- package/src/slashingProtection/index.ts +0 -95
- package/src/slashingProtection/interchange/errors.ts +0 -15
- package/src/slashingProtection/interchange/formats/completeV4.ts +0 -125
- package/src/slashingProtection/interchange/formats/index.ts +0 -7
- package/src/slashingProtection/interchange/formats/v5.ts +0 -120
- package/src/slashingProtection/interchange/index.ts +0 -5
- package/src/slashingProtection/interchange/parseInterchange.ts +0 -55
- package/src/slashingProtection/interchange/serializeInterchange.ts +0 -35
- package/src/slashingProtection/interchange/types.ts +0 -18
- package/src/slashingProtection/interface.ts +0 -28
- package/src/slashingProtection/minMaxSurround/distanceStoreRepository.ts +0 -57
- package/src/slashingProtection/minMaxSurround/errors.ts +0 -27
- package/src/slashingProtection/minMaxSurround/index.ts +0 -4
- package/src/slashingProtection/minMaxSurround/interface.ts +0 -23
- package/src/slashingProtection/minMaxSurround/minMaxSurround.ts +0 -104
- package/src/slashingProtection/types.ts +0 -12
- package/src/slashingProtection/utils.ts +0 -42
- package/src/types.ts +0 -31
- package/src/util/batch.ts +0 -15
- package/src/util/clock.ts +0 -164
- package/src/util/difference.ts +0 -10
- package/src/util/externalSignerClient.ts +0 -277
- package/src/util/format.ts +0 -3
- package/src/util/index.ts +0 -6
- package/src/util/logger.ts +0 -51
- package/src/util/params.ts +0 -313
- package/src/util/url.ts +0 -16
- package/src/validator.ts +0 -418
|
@@ -1,337 +0,0 @@
|
|
|
1
|
-
import {ApiClient, routes} from "@lodestar/api";
|
|
2
|
-
import {ChainForkConfig} from "@lodestar/config";
|
|
3
|
-
import {EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SYNC_COMMITTEE_SUBNET_SIZE} from "@lodestar/params";
|
|
4
|
-
import {
|
|
5
|
-
computeEpochAtSlot,
|
|
6
|
-
computeSyncPeriodAtEpoch,
|
|
7
|
-
computeSyncPeriodAtSlot,
|
|
8
|
-
isStartSlotOfEpoch,
|
|
9
|
-
isSyncCommitteeAggregator,
|
|
10
|
-
} from "@lodestar/state-transition";
|
|
11
|
-
import {BLSSignature, Epoch, Slot, SyncPeriod, ValidatorIndex} from "@lodestar/types";
|
|
12
|
-
import {toPubkeyHex} from "@lodestar/utils";
|
|
13
|
-
import {Metrics} from "../metrics.js";
|
|
14
|
-
import {PubkeyHex} from "../types.js";
|
|
15
|
-
import {IClock, LoggerVc} from "../util/index.js";
|
|
16
|
-
import {SyncingStatusTracker} from "./syncingStatusTracker.js";
|
|
17
|
-
import {syncCommitteeIndicesToSubnets} from "./utils.js";
|
|
18
|
-
import {ValidatorStore} from "./validatorStore.js";
|
|
19
|
-
|
|
20
|
-
/** Only retain `HISTORICAL_DUTIES_PERIODS` duties prior to the current periods. */
|
|
21
|
-
const HISTORICAL_DUTIES_PERIODS = 2;
|
|
22
|
-
/**
|
|
23
|
-
* Epochs prior to `ALTAIR_FORK_EPOCH` to start fetching duties
|
|
24
|
-
*
|
|
25
|
-
* UPDATE: Setting it to 0 from 1, because looking ahead caused an "Empty SyncCommitteeCache"
|
|
26
|
-
* error (https://github.com/ChainSafe/lodestar/issues/3752) as currently the lodestar
|
|
27
|
-
* beacon's pre-altair placeholder object SyncCommitteeCacheEmpty just throws on
|
|
28
|
-
* any getter.
|
|
29
|
-
* This can be updated back to 1, once SyncCommitteeCacheEmpty supports the duties
|
|
30
|
-
* look-ahead. It can also be later turned as a cli param to interface with another
|
|
31
|
-
* client's beacon, which supports look-ahead of duties.
|
|
32
|
-
*/
|
|
33
|
-
const ALTAIR_FORK_LOOKAHEAD_EPOCHS = 0;
|
|
34
|
-
/** How many epochs prior from a subscription starting, ask the node to subscribe */
|
|
35
|
-
const SUBSCRIPTIONS_LOOKAHEAD_EPOCHS = 2;
|
|
36
|
-
|
|
37
|
-
export type SyncDutySubnet = {
|
|
38
|
-
pubkey: string;
|
|
39
|
-
/** Index of validator in validator registry. */
|
|
40
|
-
validatorIndex: ValidatorIndex;
|
|
41
|
-
/**
|
|
42
|
-
* The indices of the validator in the sync committee.
|
|
43
|
-
* The same validator can appear multiples in the sync committee. Given how sync messages are constructor, the
|
|
44
|
-
* validator client only cares in which subnets the validator is in, not the specific index.
|
|
45
|
-
*/
|
|
46
|
-
subnets: number[];
|
|
47
|
-
};
|
|
48
|
-
|
|
49
|
-
export type SyncSelectionProof = {
|
|
50
|
-
/** This value is only set to not null if the proof indicates that the validator is an aggregator. */
|
|
51
|
-
selectionProof: BLSSignature | null;
|
|
52
|
-
/** This value will only be set if validator is part of distributed cluster and only has a key share */
|
|
53
|
-
partialSelectionProof?: BLSSignature;
|
|
54
|
-
subcommitteeIndex: number;
|
|
55
|
-
};
|
|
56
|
-
|
|
57
|
-
/** Neatly joins SyncDuty with the locally-generated `selectionProof`. */
|
|
58
|
-
export type SyncDutyAndProofs = {
|
|
59
|
-
duty: SyncDutySubnet;
|
|
60
|
-
/**
|
|
61
|
-
* Array because the same validator can appear multiple times in the sync committee.
|
|
62
|
-
* `routes.validator.SyncDuty` `.validatorSyncCommitteeIndices` is an array for that reason.
|
|
63
|
-
* SelectionProof signs over slot + index in committee, so the length of `.selectionProofs` equals
|
|
64
|
-
* `.validatorSyncCommitteeIndices`.
|
|
65
|
-
*/
|
|
66
|
-
selectionProofs: SyncSelectionProof[];
|
|
67
|
-
};
|
|
68
|
-
|
|
69
|
-
// To assist with readability
|
|
70
|
-
type DutyAtPeriod = {duty: SyncDutySubnet};
|
|
71
|
-
|
|
72
|
-
type SyncCommitteeDutiesServiceOpts = {
|
|
73
|
-
distributedAggregationSelection?: boolean;
|
|
74
|
-
};
|
|
75
|
-
|
|
76
|
-
/**
|
|
77
|
-
* Validators are part of a static long (~27h) sync committee, and part of static subnets.
|
|
78
|
-
* However, the isAggregator role changes per slot.
|
|
79
|
-
*/
|
|
80
|
-
export class SyncCommitteeDutiesService {
|
|
81
|
-
/** Maps a validator public key to their duties for each slot */
|
|
82
|
-
private readonly dutiesByIndexByPeriod = new Map<SyncPeriod, Map<ValidatorIndex, DutyAtPeriod>>();
|
|
83
|
-
|
|
84
|
-
constructor(
|
|
85
|
-
private readonly config: ChainForkConfig,
|
|
86
|
-
private readonly logger: LoggerVc,
|
|
87
|
-
private readonly api: ApiClient,
|
|
88
|
-
clock: IClock,
|
|
89
|
-
private readonly validatorStore: ValidatorStore,
|
|
90
|
-
syncingStatusTracker: SyncingStatusTracker,
|
|
91
|
-
metrics: Metrics | null,
|
|
92
|
-
private readonly opts?: SyncCommitteeDutiesServiceOpts
|
|
93
|
-
) {
|
|
94
|
-
// Running this task every epoch is safe since a re-org of many epochs is very unlikely
|
|
95
|
-
// TODO: If the re-org event is reliable consider re-running then
|
|
96
|
-
clock.runEveryEpoch(this.runDutiesTasks);
|
|
97
|
-
syncingStatusTracker.runOnResynced(async (slot) => {
|
|
98
|
-
// Skip on first slot of epoch since tasks are already scheduled
|
|
99
|
-
if (!isStartSlotOfEpoch(slot)) {
|
|
100
|
-
return this.runDutiesTasks(computeEpochAtSlot(slot));
|
|
101
|
-
}
|
|
102
|
-
});
|
|
103
|
-
|
|
104
|
-
if (metrics) {
|
|
105
|
-
metrics.syncCommitteeDutiesCount.addCollect(() => {
|
|
106
|
-
let duties = 0;
|
|
107
|
-
for (const dutiesByIndex of this.dutiesByIndexByPeriod.values()) {
|
|
108
|
-
duties += dutiesByIndex.size;
|
|
109
|
-
}
|
|
110
|
-
metrics.syncCommitteeDutiesCount.set(duties);
|
|
111
|
-
metrics.syncCommitteeDutiesEpochCount.set(this.dutiesByIndexByPeriod.size);
|
|
112
|
-
});
|
|
113
|
-
}
|
|
114
|
-
}
|
|
115
|
-
|
|
116
|
-
/**
|
|
117
|
-
* Returns all `ValidatorDuty` for the given `slot`
|
|
118
|
-
*
|
|
119
|
-
* Note: The range of slots a validator has to perform duties is off by one.
|
|
120
|
-
* The previous slot wording means that if your validator is in a sync committee for a period that runs from slot
|
|
121
|
-
* 100 to 200,then you would actually produce signatures in slot 99 - 199.
|
|
122
|
-
* https://github.com/ethereum/consensus-specs/pull/2400
|
|
123
|
-
*/
|
|
124
|
-
async getDutiesAtSlot(slot: Slot): Promise<SyncDutyAndProofs[]> {
|
|
125
|
-
const period = computeSyncPeriodAtSlot(slot + 1); // See note above for the +1 offset
|
|
126
|
-
const duties: SyncDutyAndProofs[] = [];
|
|
127
|
-
|
|
128
|
-
const dutiesByIndex = this.dutiesByIndexByPeriod.get(period);
|
|
129
|
-
if (dutiesByIndex) {
|
|
130
|
-
for (const dutyAtPeriod of dutiesByIndex.values()) {
|
|
131
|
-
// Validator always has a duty during the entire period
|
|
132
|
-
duties.push({
|
|
133
|
-
duty: dutyAtPeriod.duty,
|
|
134
|
-
selectionProofs: await this.getSelectionProofs(slot, dutyAtPeriod.duty),
|
|
135
|
-
});
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
return duties;
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
removeDutiesForKey(pubkey: PubkeyHex): void {
|
|
143
|
-
for (const [syncPeriod, validatorDutyAtPeriodMap] of this.dutiesByIndexByPeriod) {
|
|
144
|
-
for (const [validatorIndex, dutyAtPeriod] of validatorDutyAtPeriodMap) {
|
|
145
|
-
if (dutyAtPeriod.duty.pubkey === pubkey) {
|
|
146
|
-
validatorDutyAtPeriodMap.delete(validatorIndex);
|
|
147
|
-
if (validatorDutyAtPeriodMap.size === 0) {
|
|
148
|
-
this.dutiesByIndexByPeriod.delete(syncPeriod);
|
|
149
|
-
}
|
|
150
|
-
}
|
|
151
|
-
}
|
|
152
|
-
}
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
private runDutiesTasks = async (currentEpoch: Epoch): Promise<void> => {
|
|
156
|
-
// Before altair fork (+ lookahead) no need to check duties
|
|
157
|
-
if (currentEpoch < this.config.ALTAIR_FORK_EPOCH - ALTAIR_FORK_LOOKAHEAD_EPOCHS) {
|
|
158
|
-
return;
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
await Promise.all([
|
|
162
|
-
// Run pollSyncCommittees immediately for all known local indices
|
|
163
|
-
this.pollSyncCommittees(currentEpoch, this.validatorStore.getAllLocalIndices()).catch((e: Error) => {
|
|
164
|
-
this.logger.error("Error on poll SyncDuties", {epoch: currentEpoch}, e);
|
|
165
|
-
}),
|
|
166
|
-
|
|
167
|
-
// At the same time fetch any remaining unknown validator indices, then poll duties for those newIndices only
|
|
168
|
-
this.validatorStore
|
|
169
|
-
.pollValidatorIndices()
|
|
170
|
-
.then((newIndices) => this.pollSyncCommittees(currentEpoch, newIndices))
|
|
171
|
-
.catch((e: Error) => {
|
|
172
|
-
this.logger.error("Error on poll indices and SyncDuties", {epoch: currentEpoch}, e);
|
|
173
|
-
}),
|
|
174
|
-
]);
|
|
175
|
-
|
|
176
|
-
// After both, prune
|
|
177
|
-
this.pruneOldDuties(currentEpoch);
|
|
178
|
-
};
|
|
179
|
-
|
|
180
|
-
/**
|
|
181
|
-
* Query the beacon node for SyncDuties for any known validators.
|
|
182
|
-
*
|
|
183
|
-
* This function will perform (in the following order):
|
|
184
|
-
*
|
|
185
|
-
* 1. Poll for current-period duties and update the local duties map.
|
|
186
|
-
* 2. As above, but for the next-period.
|
|
187
|
-
* 3. Push out any Sync subnet subscriptions to the BN.
|
|
188
|
-
* 4. Prune old entries from duties.
|
|
189
|
-
*/
|
|
190
|
-
private async pollSyncCommittees(currentEpoch: Epoch, indexArr: ValidatorIndex[]): Promise<void> {
|
|
191
|
-
// No need to bother the BN if we don't have any validators.
|
|
192
|
-
if (indexArr.length === 0) {
|
|
193
|
-
return;
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
const nextPeriodEpoch = currentEpoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
197
|
-
for (const epoch of [currentEpoch, nextPeriodEpoch]) {
|
|
198
|
-
// Download the duties and update the duties for the current and next period.
|
|
199
|
-
await this.pollSyncCommitteesForEpoch(epoch, indexArr).catch((e: Error) => {
|
|
200
|
-
this.logger.error("Failed to download SyncDuties", {epoch}, e);
|
|
201
|
-
});
|
|
202
|
-
}
|
|
203
|
-
|
|
204
|
-
const currentPeriod = computeSyncPeriodAtEpoch(currentEpoch);
|
|
205
|
-
const syncCommitteeSubscriptions: routes.validator.SyncCommitteeSubscription[] = [];
|
|
206
|
-
|
|
207
|
-
// For this and the next period, produce any beacon committee subscriptions.
|
|
208
|
-
//
|
|
209
|
-
// We are *always* pushing out subscriptions, even if we've subscribed before. This is
|
|
210
|
-
// potentially excessive on the BN in normal cases, but it will help with fast re-subscriptions
|
|
211
|
-
// if the BN goes offline or we swap to a different one.
|
|
212
|
-
const indexSet = new Set(indexArr);
|
|
213
|
-
for (const period of [currentPeriod, currentPeriod + 1]) {
|
|
214
|
-
const dutiesByIndex = this.dutiesByIndexByPeriod.get(period);
|
|
215
|
-
if (dutiesByIndex) {
|
|
216
|
-
for (const [validatorIndex, dutyAtEpoch] of dutiesByIndex.entries()) {
|
|
217
|
-
if (indexSet.has(validatorIndex)) {
|
|
218
|
-
const fromEpoch = period * EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
219
|
-
const untilEpoch = (period + 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD;
|
|
220
|
-
// Don't subscribe too early to save node's resources
|
|
221
|
-
if (currentEpoch >= fromEpoch - SUBSCRIPTIONS_LOOKAHEAD_EPOCHS) {
|
|
222
|
-
syncCommitteeSubscriptions.push({
|
|
223
|
-
validatorIndex,
|
|
224
|
-
// prepareSyncCommitteeSubnets does not care about which specific index in the sync committee the
|
|
225
|
-
// validator is, but at what subnets is it participating.
|
|
226
|
-
syncCommitteeIndices: dutyAtEpoch.duty.subnets.map((subnet) => subnet * SYNC_COMMITTEE_SUBNET_SIZE),
|
|
227
|
-
untilEpoch,
|
|
228
|
-
// No need to send isAggregator here since the beacon node will assume validator always aggregates
|
|
229
|
-
});
|
|
230
|
-
}
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
// If there are any subscriptions, push them out to the beacon node.
|
|
237
|
-
if (syncCommitteeSubscriptions.length > 0) {
|
|
238
|
-
// TODO: Should log or throw?
|
|
239
|
-
(await this.api.validator.prepareSyncCommitteeSubnets({subscriptions: syncCommitteeSubscriptions})).assertOk();
|
|
240
|
-
}
|
|
241
|
-
}
|
|
242
|
-
|
|
243
|
-
/**
|
|
244
|
-
* For the given `indexArr`, download the duties for the given `period` and store them in duties.
|
|
245
|
-
*/
|
|
246
|
-
private async pollSyncCommitteesForEpoch(epoch: Epoch, indexArr: ValidatorIndex[]): Promise<void> {
|
|
247
|
-
// Don't fetch duties for periods before genesis. However, should fetch period 0 duties at period -1
|
|
248
|
-
if (epoch < 0) {
|
|
249
|
-
return;
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
const duties = (await this.api.validator.getSyncCommitteeDuties({epoch, indices: indexArr})).value();
|
|
253
|
-
|
|
254
|
-
const dutiesByIndex = new Map<ValidatorIndex, DutyAtPeriod>();
|
|
255
|
-
let count = 0;
|
|
256
|
-
|
|
257
|
-
for (const duty of duties) {
|
|
258
|
-
const {validatorIndex} = duty;
|
|
259
|
-
if (!this.validatorStore.hasValidatorIndex(validatorIndex)) {
|
|
260
|
-
continue;
|
|
261
|
-
}
|
|
262
|
-
count++;
|
|
263
|
-
|
|
264
|
-
// Note: For networks where `state.validators.length < SYNC_COMMITTEE_SIZE` the same validator can appear
|
|
265
|
-
// multiple times in the sync committee. So `routes.validator.SyncDuty` `.validatorSyncCommitteeIndices`
|
|
266
|
-
// is an array, with all of those appearances.
|
|
267
|
-
//
|
|
268
|
-
// Validator signs two messages:
|
|
269
|
-
// `SyncCommitteeMessage`:
|
|
270
|
-
// - depends on slot, blockRoot, and validatorIndex.
|
|
271
|
-
// - Validator signs and publishes only one message regardless of validatorSyncCommitteeIndices length
|
|
272
|
-
// `SyncCommitteeContribution`:
|
|
273
|
-
// - depends on slot, blockRoot, validatorIndex, and subnet.
|
|
274
|
-
// - Validator must sign and publish only one message per subnet MAX. Regardless of validatorSyncCommitteeIndices
|
|
275
|
-
const subnets = syncCommitteeIndicesToSubnets(duty.validatorSyncCommitteeIndices);
|
|
276
|
-
|
|
277
|
-
// TODO: Enable dependentRoot functionality
|
|
278
|
-
// Meanwhile just overwrite them, since the latest duty will be older and less likely to re-org
|
|
279
|
-
//
|
|
280
|
-
// Only update the duties if either is true:
|
|
281
|
-
//
|
|
282
|
-
// - There were no known duties for this period.
|
|
283
|
-
// - The dependent root has changed, signalling a re-org.
|
|
284
|
-
//
|
|
285
|
-
// if (reorg) this.metrics?.syncCommitteeDutiesReorg.inc()
|
|
286
|
-
//
|
|
287
|
-
// Using `alreadyWarnedReorg` avoids excessive logs.
|
|
288
|
-
|
|
289
|
-
// TODO: Use memory-efficient toHexString()
|
|
290
|
-
const pubkeyHex = toPubkeyHex(duty.pubkey);
|
|
291
|
-
dutiesByIndex.set(validatorIndex, {duty: {pubkey: pubkeyHex, validatorIndex, subnets}});
|
|
292
|
-
}
|
|
293
|
-
|
|
294
|
-
// these could be redundant duties due to the state of next period query reorged
|
|
295
|
-
// see https://github.com/ChainSafe/lodestar/issues/3572
|
|
296
|
-
// so we always overwrite duties
|
|
297
|
-
const period = computeSyncPeriodAtEpoch(epoch);
|
|
298
|
-
this.dutiesByIndexByPeriod.set(period, dutiesByIndex);
|
|
299
|
-
|
|
300
|
-
this.logger.debug("Downloaded SyncDuties", {epoch, count});
|
|
301
|
-
}
|
|
302
|
-
|
|
303
|
-
private async getSelectionProofs(slot: Slot, duty: SyncDutySubnet): Promise<SyncSelectionProof[]> {
|
|
304
|
-
const dutiesAndProofs: SyncSelectionProof[] = [];
|
|
305
|
-
for (const subnet of duty.subnets) {
|
|
306
|
-
const selectionProof = await this.validatorStore.signSyncCommitteeSelectionProof(duty.pubkey, slot, subnet);
|
|
307
|
-
if (this.opts?.distributedAggregationSelection) {
|
|
308
|
-
// Validator in distributed cluster only has a key share, not the full private key.
|
|
309
|
-
// Passing a partial selection proof to `is_sync_committee_aggregator` would produce incorrect result.
|
|
310
|
-
// SyncCommitteeService will exchange partial for combined selection proofs retrieved from
|
|
311
|
-
// distributed validator middleware client and determine aggregators at beginning of every slot.
|
|
312
|
-
dutiesAndProofs.push({
|
|
313
|
-
selectionProof: null,
|
|
314
|
-
partialSelectionProof: selectionProof,
|
|
315
|
-
subcommitteeIndex: subnet,
|
|
316
|
-
});
|
|
317
|
-
} else {
|
|
318
|
-
dutiesAndProofs.push({
|
|
319
|
-
// selectionProof === null is used to check if is aggregator
|
|
320
|
-
selectionProof: isSyncCommitteeAggregator(selectionProof) ? selectionProof : null,
|
|
321
|
-
subcommitteeIndex: subnet,
|
|
322
|
-
});
|
|
323
|
-
}
|
|
324
|
-
}
|
|
325
|
-
return dutiesAndProofs;
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
/** Run at least once per period to prune duties map */
|
|
329
|
-
private pruneOldDuties(currentEpoch: Epoch): void {
|
|
330
|
-
const currentPeriod = computeSyncPeriodAtEpoch(currentEpoch);
|
|
331
|
-
for (const period of this.dutiesByIndexByPeriod.keys()) {
|
|
332
|
-
if (period + HISTORICAL_DUTIES_PERIODS < currentPeriod) {
|
|
333
|
-
this.dutiesByIndexByPeriod.delete(period);
|
|
334
|
-
}
|
|
335
|
-
}
|
|
336
|
-
}
|
|
337
|
-
}
|
|
@@ -1,74 +0,0 @@
|
|
|
1
|
-
import {ApiClient, routes} from "@lodestar/api";
|
|
2
|
-
import {Slot} from "@lodestar/types";
|
|
3
|
-
import {Logger} from "@lodestar/utils";
|
|
4
|
-
import {BeaconHealth, Metrics} from "../metrics.js";
|
|
5
|
-
import {IClock} from "../util/clock.js";
|
|
6
|
-
|
|
7
|
-
export type SyncingStatus = routes.node.SyncingStatus;
|
|
8
|
-
|
|
9
|
-
type RunOnResyncedFn = (slot: Slot, signal: AbortSignal) => Promise<void>;
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Track the syncing status of connected beacon node(s)
|
|
13
|
-
*/
|
|
14
|
-
export class SyncingStatusTracker {
|
|
15
|
-
private prevSyncingStatus?: SyncingStatus | Error;
|
|
16
|
-
|
|
17
|
-
private readonly fns: RunOnResyncedFn[] = [];
|
|
18
|
-
|
|
19
|
-
constructor(
|
|
20
|
-
private readonly logger: Logger,
|
|
21
|
-
private readonly api: ApiClient,
|
|
22
|
-
private readonly clock: IClock,
|
|
23
|
-
private readonly metrics: Metrics | null
|
|
24
|
-
) {
|
|
25
|
-
this.clock.runEverySlot(this.checkSyncingStatus);
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
/**
|
|
29
|
-
* Run function when node status changes from syncing to synced
|
|
30
|
-
*
|
|
31
|
-
* Note: does not consider if execution client is offline or syncing and
|
|
32
|
-
* hence it is not useful to schedule tasks that require a non-optimistic node.
|
|
33
|
-
*/
|
|
34
|
-
runOnResynced(fn: RunOnResyncedFn): void {
|
|
35
|
-
this.fns.push(fn);
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
private checkSyncingStatus = async (slot: Slot, signal: AbortSignal): Promise<void> => {
|
|
39
|
-
try {
|
|
40
|
-
const syncingStatus = (await this.api.node.getSyncingStatus()).value();
|
|
41
|
-
const {isSyncing, headSlot, syncDistance, isOptimistic, elOffline} = syncingStatus;
|
|
42
|
-
const prevErrorOrSyncing = this.prevSyncingStatus instanceof Error || this.prevSyncingStatus?.isSyncing === true;
|
|
43
|
-
|
|
44
|
-
if (isSyncing === true) {
|
|
45
|
-
this.logger.warn("Node is syncing", {slot, headSlot, syncDistance});
|
|
46
|
-
} else if (this.prevSyncingStatus === undefined || prevErrorOrSyncing) {
|
|
47
|
-
this.logger.info("Node is synced", {slot, headSlot, isOptimistic, elOffline});
|
|
48
|
-
}
|
|
49
|
-
this.logger.verbose("Node syncing status", {slot, ...syncingStatus});
|
|
50
|
-
|
|
51
|
-
this.prevSyncingStatus = syncingStatus;
|
|
52
|
-
|
|
53
|
-
this.metrics?.beaconHealth.set(
|
|
54
|
-
!isSyncing && !isOptimistic && !elOffline ? BeaconHealth.READY : BeaconHealth.SYNCING
|
|
55
|
-
);
|
|
56
|
-
|
|
57
|
-
if (prevErrorOrSyncing && isSyncing === false) {
|
|
58
|
-
await Promise.all(
|
|
59
|
-
this.fns.map((fn) =>
|
|
60
|
-
fn(slot, signal).catch((e) => this.logger.error("Error calling resynced event handler", e))
|
|
61
|
-
)
|
|
62
|
-
);
|
|
63
|
-
}
|
|
64
|
-
} catch (e) {
|
|
65
|
-
// Error likely due to node being offline. In any case, handle failure to
|
|
66
|
-
// check syncing status the same way as if node was previously syncing
|
|
67
|
-
this.prevSyncingStatus = e as Error;
|
|
68
|
-
|
|
69
|
-
this.metrics?.beaconHealth.set(BeaconHealth.ERROR);
|
|
70
|
-
|
|
71
|
-
this.logger.error("Failed to check syncing status", {slot}, this.prevSyncingStatus);
|
|
72
|
-
}
|
|
73
|
-
};
|
|
74
|
-
}
|
package/src/services/utils.ts
DELETED
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
import {SYNC_COMMITTEE_SUBNET_SIZE} from "@lodestar/params";
|
|
2
|
-
import {CommitteeIndex, SubcommitteeIndex} from "@lodestar/types";
|
|
3
|
-
import {AttDutyAndProof} from "./attestationDuties.js";
|
|
4
|
-
import {SyncDutyAndProofs, SyncDutySubnet, SyncSelectionProof} from "./syncCommitteeDuties.js";
|
|
5
|
-
|
|
6
|
-
/** Sync committee duty associated to a single sub committee subnet */
|
|
7
|
-
export type SubcommitteeDuty = {
|
|
8
|
-
duty: SyncDutySubnet;
|
|
9
|
-
selectionProof: SyncSelectionProof["selectionProof"];
|
|
10
|
-
};
|
|
11
|
-
|
|
12
|
-
export function groupAttDutiesByCommitteeIndex(duties: AttDutyAndProof[]): Map<CommitteeIndex, AttDutyAndProof[]> {
|
|
13
|
-
const dutiesByCommitteeIndex = new Map<CommitteeIndex, AttDutyAndProof[]>();
|
|
14
|
-
|
|
15
|
-
for (const dutyAndProof of duties) {
|
|
16
|
-
const {committeeIndex} = dutyAndProof.duty;
|
|
17
|
-
let dutyAndProofArr = dutiesByCommitteeIndex.get(committeeIndex);
|
|
18
|
-
if (!dutyAndProofArr) {
|
|
19
|
-
dutyAndProofArr = [];
|
|
20
|
-
dutiesByCommitteeIndex.set(committeeIndex, dutyAndProofArr);
|
|
21
|
-
}
|
|
22
|
-
dutyAndProofArr.push(dutyAndProof);
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
return dutiesByCommitteeIndex;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
export function groupSyncDutiesBySubcommitteeIndex(
|
|
29
|
-
duties: SyncDutyAndProofs[]
|
|
30
|
-
): Map<SubcommitteeIndex, SubcommitteeDuty[]> {
|
|
31
|
-
const dutiesBySubcommitteeIndex = new Map<SubcommitteeIndex, SubcommitteeDuty[]>();
|
|
32
|
-
|
|
33
|
-
for (const validatorDuty of duties) {
|
|
34
|
-
for (const {selectionProof, subcommitteeIndex} of validatorDuty.selectionProofs) {
|
|
35
|
-
let dutyAndProofArr = dutiesBySubcommitteeIndex.get(subcommitteeIndex);
|
|
36
|
-
if (!dutyAndProofArr) {
|
|
37
|
-
dutyAndProofArr = [];
|
|
38
|
-
dutiesBySubcommitteeIndex.set(subcommitteeIndex, dutyAndProofArr);
|
|
39
|
-
}
|
|
40
|
-
dutyAndProofArr.push({duty: validatorDuty.duty, selectionProof: selectionProof});
|
|
41
|
-
}
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
return dutiesBySubcommitteeIndex;
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
/**
|
|
48
|
-
* Given a list of indexes of a sync committee returns the list of unique subnet numbers the indexes are part of
|
|
49
|
-
*/
|
|
50
|
-
export function syncCommitteeIndicesToSubnets(indexesInCommittee: number[]): number[] {
|
|
51
|
-
const subnets = new Set<number>();
|
|
52
|
-
|
|
53
|
-
for (const indexInCommittee of indexesInCommittee) {
|
|
54
|
-
subnets.add(Math.floor(indexInCommittee / SYNC_COMMITTEE_SUBNET_SIZE));
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
return Array.from(subnets);
|
|
58
|
-
}
|