@aztec/aztec-node 0.0.0-test.1 → 0.0.1-commit.b655e406
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/aztec-node/config.d.ts +14 -9
- package/dest/aztec-node/config.d.ts.map +1 -1
- package/dest/aztec-node/config.js +75 -14
- package/dest/aztec-node/node_metrics.d.ts +4 -0
- package/dest/aztec-node/node_metrics.d.ts.map +1 -1
- package/dest/aztec-node/node_metrics.js +21 -0
- package/dest/aztec-node/server.d.ts +90 -50
- package/dest/aztec-node/server.d.ts.map +1 -1
- package/dest/aztec-node/server.js +526 -218
- package/dest/bin/index.js +4 -2
- package/dest/index.d.ts +0 -1
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +0 -1
- package/dest/sentinel/config.d.ts +8 -0
- package/dest/sentinel/config.d.ts.map +1 -0
- package/dest/sentinel/config.js +29 -0
- package/dest/sentinel/factory.d.ts +9 -0
- package/dest/sentinel/factory.d.ts.map +1 -0
- package/dest/sentinel/factory.js +17 -0
- package/dest/sentinel/index.d.ts +3 -0
- package/dest/sentinel/index.d.ts.map +1 -0
- package/dest/sentinel/index.js +1 -0
- package/dest/sentinel/sentinel.d.ts +91 -0
- package/dest/sentinel/sentinel.d.ts.map +1 -0
- package/dest/sentinel/sentinel.js +391 -0
- package/dest/sentinel/store.d.ts +34 -0
- package/dest/sentinel/store.d.ts.map +1 -0
- package/dest/sentinel/store.js +169 -0
- package/dest/test/index.d.ts +31 -0
- package/dest/test/index.d.ts.map +1 -0
- package/dest/test/index.js +1 -0
- package/package.json +42 -32
- package/src/aztec-node/config.ts +128 -25
- package/src/aztec-node/node_metrics.ts +28 -0
- package/src/aztec-node/server.ts +684 -278
- package/src/bin/index.ts +4 -2
- package/src/index.ts +0 -1
- package/src/sentinel/config.ts +37 -0
- package/src/sentinel/factory.ts +36 -0
- package/src/sentinel/index.ts +8 -0
- package/src/sentinel/sentinel.ts +489 -0
- package/src/sentinel/store.ts +184 -0
- package/src/test/index.ts +32 -0
- package/dest/aztec-node/http_rpc_server.d.ts +0 -8
- package/dest/aztec-node/http_rpc_server.d.ts.map +0 -1
- package/dest/aztec-node/http_rpc_server.js +0 -9
- package/src/aztec-node/http_rpc_server.ts +0 -11
package/src/bin/index.ts
CHANGED
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
#!/usr/bin/env -S node --no-warnings
|
|
2
2
|
import { createLogger } from '@aztec/foundation/log';
|
|
3
|
+
import { AztecNodeApiSchema } from '@aztec/stdlib/interfaces/client';
|
|
4
|
+
import { createTracedJsonRpcServer } from '@aztec/telemetry-client';
|
|
3
5
|
|
|
4
6
|
import http from 'http';
|
|
5
7
|
|
|
6
|
-
import { type AztecNodeConfig, AztecNodeService,
|
|
8
|
+
import { type AztecNodeConfig, AztecNodeService, getConfigEnvVars } from '../index.js';
|
|
7
9
|
|
|
8
10
|
const { AZTEC_NODE_PORT = 8081, API_PREFIX = '' } = process.env;
|
|
9
11
|
|
|
@@ -37,7 +39,7 @@ async function main() {
|
|
|
37
39
|
// eslint-disable-next-line @typescript-eslint/no-misused-promises
|
|
38
40
|
process.once('SIGTERM', shutdown);
|
|
39
41
|
|
|
40
|
-
const rpcServer =
|
|
42
|
+
const rpcServer = createTracedJsonRpcServer(aztecNode, AztecNodeApiSchema);
|
|
41
43
|
const app = rpcServer.getApp(API_PREFIX);
|
|
42
44
|
|
|
43
45
|
// eslint-disable-next-line @typescript-eslint/no-misused-promises
|
package/src/index.ts
CHANGED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { type ConfigMappingsType, booleanConfigHelper, numberConfigHelper } from '@aztec/foundation/config';
|
|
2
|
+
|
|
3
|
+
export type SentinelConfig = {
|
|
4
|
+
sentinelHistoryLengthInEpochs: number;
|
|
5
|
+
sentinelHistoricProvenPerformanceLengthInEpochs: number;
|
|
6
|
+
sentinelEnabled: boolean;
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
export const sentinelConfigMappings: ConfigMappingsType<SentinelConfig> = {
|
|
10
|
+
sentinelHistoryLengthInEpochs: {
|
|
11
|
+
description: 'The number of L2 epochs kept of history for each validator for computing their stats.',
|
|
12
|
+
env: 'SENTINEL_HISTORY_LENGTH_IN_EPOCHS',
|
|
13
|
+
...numberConfigHelper(24),
|
|
14
|
+
},
|
|
15
|
+
/**
|
|
16
|
+
* The number of L2 epochs kept of proven performance history for each validator.
|
|
17
|
+
* This value must be large enough so that we have proven performance for every validator
|
|
18
|
+
* for at least slashInactivityConsecutiveEpochThreshold. Assuming this value is 3,
|
|
19
|
+
* and the committee size is 48, and we have 10k validators, then we pick 48 out of 10k each draw.
|
|
20
|
+
* For any fixed element, per-draw prob = 48/10000 = 0.0048.
|
|
21
|
+
* After n draws, count ~ Binomial(n, 0.0048). We want P(X >= 3).
|
|
22
|
+
* Results (exact binomial):
|
|
23
|
+
* - 90% chance: n = 1108
|
|
24
|
+
* - 95% chance: n = 1310
|
|
25
|
+
* - 99% chance: n = 1749
|
|
26
|
+
*/
|
|
27
|
+
sentinelHistoricProvenPerformanceLengthInEpochs: {
|
|
28
|
+
description: 'The number of L2 epochs kept of proven performance history for each validator.',
|
|
29
|
+
env: 'SENTINEL_HISTORIC_PROVEN_PERFORMANCE_LENGTH_IN_EPOCHS',
|
|
30
|
+
...numberConfigHelper(2000),
|
|
31
|
+
},
|
|
32
|
+
sentinelEnabled: {
|
|
33
|
+
description: 'Whether the sentinel is enabled or not.',
|
|
34
|
+
env: 'SENTINEL_ENABLED',
|
|
35
|
+
...booleanConfigHelper(false),
|
|
36
|
+
},
|
|
37
|
+
};
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import type { EpochCache } from '@aztec/epoch-cache';
|
|
2
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
3
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
4
|
+
import { createStore } from '@aztec/kv-store/lmdb-v2';
|
|
5
|
+
import type { P2PClient } from '@aztec/p2p';
|
|
6
|
+
import type { L2BlockSource } from '@aztec/stdlib/block';
|
|
7
|
+
import type { SlasherConfig } from '@aztec/stdlib/interfaces/server';
|
|
8
|
+
|
|
9
|
+
import type { SentinelConfig } from './config.js';
|
|
10
|
+
import { Sentinel } from './sentinel.js';
|
|
11
|
+
import { SentinelStore } from './store.js';
|
|
12
|
+
|
|
13
|
+
export async function createSentinel(
|
|
14
|
+
epochCache: EpochCache,
|
|
15
|
+
archiver: L2BlockSource,
|
|
16
|
+
p2p: P2PClient,
|
|
17
|
+
config: SentinelConfig & DataStoreConfig & SlasherConfig,
|
|
18
|
+
logger = createLogger('node:sentinel'),
|
|
19
|
+
): Promise<Sentinel | undefined> {
|
|
20
|
+
if (!config.sentinelEnabled) {
|
|
21
|
+
return undefined;
|
|
22
|
+
}
|
|
23
|
+
const kvStore = await createStore(
|
|
24
|
+
'sentinel',
|
|
25
|
+
SentinelStore.SCHEMA_VERSION,
|
|
26
|
+
config,
|
|
27
|
+
createLogger('node:sentinel:lmdb'),
|
|
28
|
+
);
|
|
29
|
+
const storeHistoryLength = config.sentinelHistoryLengthInEpochs * epochCache.getL1Constants().epochDuration;
|
|
30
|
+
const storeHistoricProvenPerformanceLength = config.sentinelHistoricProvenPerformanceLengthInEpochs;
|
|
31
|
+
const sentinelStore = new SentinelStore(kvStore, {
|
|
32
|
+
historyLength: storeHistoryLength,
|
|
33
|
+
historicProvenPerformanceLength: storeHistoricProvenPerformanceLength,
|
|
34
|
+
});
|
|
35
|
+
return new Sentinel(epochCache, archiver, p2p, sentinelStore, config, logger);
|
|
36
|
+
}
|
|
@@ -0,0 +1,489 @@
|
|
|
1
|
+
import type { EpochCache } from '@aztec/epoch-cache';
|
|
2
|
+
import { countWhile, filterAsync, fromEntries, getEntries, mapValues } from '@aztec/foundation/collection';
|
|
3
|
+
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
4
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
5
|
+
import { RunningPromise } from '@aztec/foundation/running-promise';
|
|
6
|
+
import { L2TipsMemoryStore, type L2TipsStore } from '@aztec/kv-store/stores';
|
|
7
|
+
import type { P2PClient } from '@aztec/p2p';
|
|
8
|
+
import {
|
|
9
|
+
OffenseType,
|
|
10
|
+
WANT_TO_SLASH_EVENT,
|
|
11
|
+
type WantToSlashArgs,
|
|
12
|
+
type Watcher,
|
|
13
|
+
type WatcherEmitter,
|
|
14
|
+
} from '@aztec/slasher';
|
|
15
|
+
import type { SlasherConfig } from '@aztec/slasher/config';
|
|
16
|
+
import {
|
|
17
|
+
type L2BlockSource,
|
|
18
|
+
L2BlockStream,
|
|
19
|
+
type L2BlockStreamEvent,
|
|
20
|
+
type L2BlockStreamEventHandler,
|
|
21
|
+
getAttestationInfoFromPublishedL2Block,
|
|
22
|
+
} from '@aztec/stdlib/block';
|
|
23
|
+
import { getEpochAtSlot, getSlotRangeForEpoch, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers';
|
|
24
|
+
import type {
|
|
25
|
+
SingleValidatorStats,
|
|
26
|
+
ValidatorStats,
|
|
27
|
+
ValidatorStatusHistory,
|
|
28
|
+
ValidatorStatusInSlot,
|
|
29
|
+
ValidatorStatusType,
|
|
30
|
+
ValidatorsEpochPerformance,
|
|
31
|
+
ValidatorsStats,
|
|
32
|
+
} from '@aztec/stdlib/validators';
|
|
33
|
+
|
|
34
|
+
import EventEmitter from 'node:events';
|
|
35
|
+
|
|
36
|
+
import { SentinelStore } from './store.js';
|
|
37
|
+
|
|
38
|
+
export class Sentinel extends (EventEmitter as new () => WatcherEmitter) implements L2BlockStreamEventHandler, Watcher {
|
|
39
|
+
protected runningPromise: RunningPromise;
|
|
40
|
+
protected blockStream!: L2BlockStream;
|
|
41
|
+
protected l2TipsStore: L2TipsStore;
|
|
42
|
+
|
|
43
|
+
protected initialSlot: bigint | undefined;
|
|
44
|
+
protected lastProcessedSlot: bigint | undefined;
|
|
45
|
+
protected slotNumberToBlock: Map<bigint, { blockNumber: number; archive: string; attestors: EthAddress[] }> =
|
|
46
|
+
new Map();
|
|
47
|
+
|
|
48
|
+
constructor(
|
|
49
|
+
protected epochCache: EpochCache,
|
|
50
|
+
protected archiver: L2BlockSource,
|
|
51
|
+
protected p2p: P2PClient,
|
|
52
|
+
protected store: SentinelStore,
|
|
53
|
+
protected config: Pick<
|
|
54
|
+
SlasherConfig,
|
|
55
|
+
'slashInactivityTargetPercentage' | 'slashInactivityPenalty' | 'slashInactivityConsecutiveEpochThreshold'
|
|
56
|
+
>,
|
|
57
|
+
protected logger = createLogger('node:sentinel'),
|
|
58
|
+
) {
|
|
59
|
+
super();
|
|
60
|
+
this.l2TipsStore = new L2TipsMemoryStore();
|
|
61
|
+
const interval = (epochCache.getL1Constants().ethereumSlotDuration * 1000) / 4;
|
|
62
|
+
this.runningPromise = new RunningPromise(this.work.bind(this), logger, interval);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
public updateConfig(config: Partial<SlasherConfig>) {
|
|
66
|
+
this.config = { ...this.config, ...config };
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
public async start() {
|
|
70
|
+
await this.init();
|
|
71
|
+
this.runningPromise.start();
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/** Loads initial slot and initializes blockstream. We will not process anything at or before the initial slot. */
|
|
75
|
+
protected async init() {
|
|
76
|
+
this.initialSlot = this.epochCache.getEpochAndSlotNow().slot;
|
|
77
|
+
const startingBlock = await this.archiver.getBlockNumber();
|
|
78
|
+
this.logger.info(`Starting validator sentinel with initial slot ${this.initialSlot} and block ${startingBlock}`);
|
|
79
|
+
this.blockStream = new L2BlockStream(this.archiver, this.l2TipsStore, this, this.logger, { startingBlock });
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
public stop() {
|
|
83
|
+
return this.runningPromise.stop();
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
public async handleBlockStreamEvent(event: L2BlockStreamEvent): Promise<void> {
|
|
87
|
+
await this.l2TipsStore.handleBlockStreamEvent(event);
|
|
88
|
+
if (event.type === 'blocks-added') {
|
|
89
|
+
// Store mapping from slot to archive, block number, and attestors
|
|
90
|
+
for (const block of event.blocks) {
|
|
91
|
+
this.slotNumberToBlock.set(block.block.header.getSlot(), {
|
|
92
|
+
blockNumber: block.block.number,
|
|
93
|
+
archive: block.block.archive.root.toString(),
|
|
94
|
+
attestors: getAttestationInfoFromPublishedL2Block(block)
|
|
95
|
+
.filter(a => a.status === 'recovered-from-signature')
|
|
96
|
+
.map(a => a.address!),
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Prune the archive map to only keep at most N entries
|
|
101
|
+
const historyLength = this.store.getHistoryLength();
|
|
102
|
+
if (this.slotNumberToBlock.size > historyLength) {
|
|
103
|
+
const toDelete = Array.from(this.slotNumberToBlock.keys())
|
|
104
|
+
.sort((a, b) => Number(a - b))
|
|
105
|
+
.slice(0, this.slotNumberToBlock.size - historyLength);
|
|
106
|
+
for (const key of toDelete) {
|
|
107
|
+
this.slotNumberToBlock.delete(key);
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
} else if (event.type === 'chain-proven') {
|
|
111
|
+
await this.handleChainProven(event);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
protected async handleChainProven(event: L2BlockStreamEvent) {
|
|
116
|
+
if (event.type !== 'chain-proven') {
|
|
117
|
+
return;
|
|
118
|
+
}
|
|
119
|
+
const blockNumber = event.block.number;
|
|
120
|
+
const block = await this.archiver.getBlock(blockNumber);
|
|
121
|
+
if (!block) {
|
|
122
|
+
this.logger.error(`Failed to get block ${blockNumber}`, { block });
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// TODO(palla/slash): We should only be computing proven performance if this is
|
|
127
|
+
// a full proof epoch and not a partial one, otherwise we'll end up with skewed stats.
|
|
128
|
+
const epoch = getEpochAtSlot(block.header.getSlot(), this.epochCache.getL1Constants());
|
|
129
|
+
this.logger.debug(`Computing proven performance for epoch ${epoch}`);
|
|
130
|
+
const performance = await this.computeProvenPerformance(epoch);
|
|
131
|
+
this.logger.info(`Computed proven performance for epoch ${epoch}`, performance);
|
|
132
|
+
|
|
133
|
+
await this.store.updateProvenPerformance(epoch, performance);
|
|
134
|
+
await this.handleProvenPerformance(epoch, performance);
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
protected async computeProvenPerformance(epoch: bigint): Promise<ValidatorsEpochPerformance> {
|
|
138
|
+
const [fromSlot, toSlot] = getSlotRangeForEpoch(epoch, this.epochCache.getL1Constants());
|
|
139
|
+
const { committee } = await this.epochCache.getCommittee(fromSlot);
|
|
140
|
+
if (!committee) {
|
|
141
|
+
this.logger.trace(`No committee found for slot ${fromSlot}`);
|
|
142
|
+
return {};
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const stats = await this.computeStats({ fromSlot, toSlot, validators: committee });
|
|
146
|
+
this.logger.debug(`Stats for epoch ${epoch}`, { ...stats, fromSlot, toSlot, epoch });
|
|
147
|
+
|
|
148
|
+
// Note that we are NOT using the total slots in the epoch as `total` here, since we only
|
|
149
|
+
// compute missed attestations over the blocks that had a proposal in them. So, let's say
|
|
150
|
+
// we have an epoch with 10 slots, but only 5 had a block proposal. A validator that was
|
|
151
|
+
// offline, assuming they were not picked as proposer, will then be reported as having missed
|
|
152
|
+
// 5/5 attestations. If we used the total, they'd be reported as 5/10, which would probably
|
|
153
|
+
// allow them to avoid being slashed.
|
|
154
|
+
return mapValues(stats.stats, stat => ({
|
|
155
|
+
missed: stat.missedAttestations.count + stat.missedProposals.count,
|
|
156
|
+
total: stat.missedAttestations.total + stat.missedProposals.total,
|
|
157
|
+
}));
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Checks if a validator has been inactive for the specified number of consecutive epochs for which we have data on it.
|
|
162
|
+
* @param validator The validator address to check
|
|
163
|
+
* @param currentEpoch Epochs strictly before the current one are evaluated only
|
|
164
|
+
* @param requiredConsecutiveEpochs Number of consecutive epochs required for slashing
|
|
165
|
+
*/
|
|
166
|
+
protected async checkPastInactivity(
|
|
167
|
+
validator: EthAddress,
|
|
168
|
+
currentEpoch: bigint,
|
|
169
|
+
requiredConsecutiveEpochs: number,
|
|
170
|
+
): Promise<boolean> {
|
|
171
|
+
if (requiredConsecutiveEpochs === 0) {
|
|
172
|
+
return true;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
// Get all historical performance for this validator
|
|
176
|
+
const allPerformance = await this.store.getProvenPerformance(validator);
|
|
177
|
+
|
|
178
|
+
// If we don't have enough historical data, don't slash
|
|
179
|
+
if (allPerformance.length < requiredConsecutiveEpochs) {
|
|
180
|
+
this.logger.debug(
|
|
181
|
+
`Not enough historical data for slashing ${validator} for inactivity (${allPerformance.length} epochs < ${requiredConsecutiveEpochs} required)`,
|
|
182
|
+
);
|
|
183
|
+
return false;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
// Sort by epoch descending to get most recent first, keep only epochs strictly before the current one, and get the first N
|
|
187
|
+
return allPerformance
|
|
188
|
+
.sort((a, b) => Number(b.epoch - a.epoch))
|
|
189
|
+
.filter(p => p.epoch < currentEpoch)
|
|
190
|
+
.slice(0, requiredConsecutiveEpochs)
|
|
191
|
+
.every(p => p.missed / p.total >= this.config.slashInactivityTargetPercentage);
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
protected async handleProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance) {
|
|
195
|
+
if (this.config.slashInactivityPenalty === 0n) {
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
const inactiveValidators = getEntries(performance)
|
|
200
|
+
.filter(([_, { missed, total }]) => missed / total >= this.config.slashInactivityTargetPercentage)
|
|
201
|
+
.map(([address]) => address);
|
|
202
|
+
|
|
203
|
+
this.logger.debug(`Found ${inactiveValidators.length} inactive validators in epoch ${epoch}`, {
|
|
204
|
+
inactiveValidators,
|
|
205
|
+
epoch,
|
|
206
|
+
inactivityTargetPercentage: this.config.slashInactivityTargetPercentage,
|
|
207
|
+
});
|
|
208
|
+
|
|
209
|
+
const epochThreshold = this.config.slashInactivityConsecutiveEpochThreshold;
|
|
210
|
+
const criminals: string[] = await filterAsync(inactiveValidators, address =>
|
|
211
|
+
this.checkPastInactivity(EthAddress.fromString(address), epoch, epochThreshold - 1),
|
|
212
|
+
);
|
|
213
|
+
|
|
214
|
+
const args: WantToSlashArgs[] = criminals.map(address => ({
|
|
215
|
+
validator: EthAddress.fromString(address),
|
|
216
|
+
amount: this.config.slashInactivityPenalty,
|
|
217
|
+
offenseType: OffenseType.INACTIVITY,
|
|
218
|
+
epochOrSlot: epoch,
|
|
219
|
+
}));
|
|
220
|
+
|
|
221
|
+
if (criminals.length > 0) {
|
|
222
|
+
this.logger.verbose(
|
|
223
|
+
`Identified ${criminals.length} validators to slash due to inactivity in at least ${epochThreshold} consecutive epochs`,
|
|
224
|
+
{ ...args, epochThreshold },
|
|
225
|
+
);
|
|
226
|
+
this.emit(WANT_TO_SLASH_EVENT, args);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
/**
|
|
231
|
+
* Process data for two L2 slots ago.
|
|
232
|
+
* Note that we do not process historical data, since we rely on p2p data for processing,
|
|
233
|
+
* and we don't have that data if we were offline during the period.
|
|
234
|
+
*/
|
|
235
|
+
public async work() {
|
|
236
|
+
const { slot: currentSlot } = this.epochCache.getEpochAndSlotNow();
|
|
237
|
+
try {
|
|
238
|
+
// Manually sync the block stream to ensure we have the latest data.
|
|
239
|
+
// Note we never `start` the blockstream, so it loops at the same pace as we do.
|
|
240
|
+
await this.blockStream.sync();
|
|
241
|
+
|
|
242
|
+
// Check if we are ready to process data for two L2 slots ago.
|
|
243
|
+
const targetSlot = await this.isReadyToProcess(currentSlot);
|
|
244
|
+
|
|
245
|
+
// And process it if we are.
|
|
246
|
+
if (targetSlot !== false) {
|
|
247
|
+
await this.processSlot(targetSlot);
|
|
248
|
+
}
|
|
249
|
+
} catch (err) {
|
|
250
|
+
this.logger.error(`Failed to process slot ${currentSlot}`, err);
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
/**
|
|
255
|
+
* Check if we are ready to process data for two L2 slots ago, so we allow plenty of time for p2p to process all in-flight attestations.
|
|
256
|
+
* We also don't move past the archiver last synced L2 slot, as we don't want to process data that is not yet available.
|
|
257
|
+
* Last, we check the p2p is synced with the archiver, so it has pulled all attestations from it.
|
|
258
|
+
*/
|
|
259
|
+
protected async isReadyToProcess(currentSlot: bigint) {
|
|
260
|
+
const targetSlot = currentSlot - 2n;
|
|
261
|
+
if (this.lastProcessedSlot && this.lastProcessedSlot >= targetSlot) {
|
|
262
|
+
this.logger.trace(`Already processed slot ${targetSlot}`, { lastProcessedSlot: this.lastProcessedSlot });
|
|
263
|
+
return false;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
if (this.initialSlot === undefined) {
|
|
267
|
+
this.logger.error(`Initial slot not loaded.`);
|
|
268
|
+
return false;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
if (targetSlot <= this.initialSlot) {
|
|
272
|
+
this.logger.trace(`Refusing to process slot ${targetSlot} given initial slot ${this.initialSlot}`);
|
|
273
|
+
return false;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
const archiverSlot = await this.archiver.getL2SlotNumber();
|
|
277
|
+
if (archiverSlot === undefined || archiverSlot < targetSlot) {
|
|
278
|
+
this.logger.debug(`Waiting for archiver to sync with L2 slot ${targetSlot}`, { archiverSlot, targetSlot });
|
|
279
|
+
return false;
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
const archiverLastBlockHash = await this.l2TipsStore.getL2Tips().then(tip => tip.latest.hash);
|
|
283
|
+
const p2pLastBlockHash = await this.p2p.getL2Tips().then(tips => tips.latest.hash);
|
|
284
|
+
const isP2pSynced = archiverLastBlockHash === p2pLastBlockHash;
|
|
285
|
+
if (!isP2pSynced) {
|
|
286
|
+
this.logger.debug(`Waiting for P2P client to sync with archiver`, { archiverLastBlockHash, p2pLastBlockHash });
|
|
287
|
+
return false;
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
return targetSlot;
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
/**
|
|
294
|
+
* Gathers committee and proposer data for a given slot, computes slot stats,
|
|
295
|
+
* and updates overall stats.
|
|
296
|
+
*/
|
|
297
|
+
protected async processSlot(slot: bigint) {
|
|
298
|
+
const { epoch, seed, committee } = await this.epochCache.getCommittee(slot);
|
|
299
|
+
if (!committee || committee.length === 0) {
|
|
300
|
+
this.logger.trace(`No committee found for slot ${slot} at epoch ${epoch}`);
|
|
301
|
+
this.lastProcessedSlot = slot;
|
|
302
|
+
return;
|
|
303
|
+
}
|
|
304
|
+
const proposerIndex = this.epochCache.computeProposerIndex(slot, epoch, seed, BigInt(committee.length));
|
|
305
|
+
const proposer = committee[Number(proposerIndex)];
|
|
306
|
+
const stats = await this.getSlotActivity(slot, epoch, proposer, committee);
|
|
307
|
+
this.logger.verbose(`Updating L2 slot ${slot} observed activity`, stats);
|
|
308
|
+
await this.updateValidators(slot, stats);
|
|
309
|
+
this.lastProcessedSlot = slot;
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
/** Computes activity for a given slot. */
|
|
313
|
+
protected async getSlotActivity(slot: bigint, epoch: bigint, proposer: EthAddress, committee: EthAddress[]) {
|
|
314
|
+
this.logger.debug(`Computing stats for slot ${slot} at epoch ${epoch}`, { slot, epoch, proposer, committee });
|
|
315
|
+
|
|
316
|
+
// Check if there is an L2 block in L1 for this L2 slot
|
|
317
|
+
|
|
318
|
+
// Here we get all attestations for the block mined at the given slot,
|
|
319
|
+
// or all attestations for all proposals in the slot if no block was mined.
|
|
320
|
+
// We gather from both p2p (contains the ones seen on the p2p layer) and archiver
|
|
321
|
+
// (contains the ones synced from mined blocks, which we may have missed from p2p).
|
|
322
|
+
const block = this.slotNumberToBlock.get(slot);
|
|
323
|
+
const p2pAttested = await this.p2p.getAttestationsForSlot(slot, block?.archive);
|
|
324
|
+
// Filter out attestations with invalid signatures
|
|
325
|
+
const p2pAttestors = p2pAttested.map(a => a.getSender()).filter((s): s is EthAddress => s !== undefined);
|
|
326
|
+
const attestors = new Set(
|
|
327
|
+
[...p2pAttestors.map(a => a.toString()), ...(block?.attestors.map(a => a.toString()) ?? [])].filter(
|
|
328
|
+
addr => proposer.toString() !== addr, // Exclude the proposer from the attestors
|
|
329
|
+
),
|
|
330
|
+
);
|
|
331
|
+
|
|
332
|
+
// We assume that there was a block proposal if at least one of the validators (other than the proposer) attested to it.
|
|
333
|
+
// It could be the case that every single validator failed, and we could differentiate it by having
|
|
334
|
+
// this node re-execute every block proposal it sees and storing it in the attestation pool.
|
|
335
|
+
// But we'll leave that corner case out to reduce pressure on the node.
|
|
336
|
+
// TODO(palla/slash): This breaks if a given node has more than one validator in the current committee,
|
|
337
|
+
// since they will attest to their own proposal it even if it's not re-executable.
|
|
338
|
+
const blockStatus = block ? 'mined' : attestors.size > 0 ? 'proposed' : 'missed';
|
|
339
|
+
this.logger.debug(`Block for slot ${slot} was ${blockStatus}`, { ...block, slot });
|
|
340
|
+
|
|
341
|
+
// Get attestors that failed their duties for this block, but only if there was a block proposed
|
|
342
|
+
const missedAttestors = new Set(
|
|
343
|
+
blockStatus === 'missed'
|
|
344
|
+
? []
|
|
345
|
+
: committee.filter(v => !attestors.has(v.toString()) && !proposer.equals(v)).map(v => v.toString()),
|
|
346
|
+
);
|
|
347
|
+
|
|
348
|
+
this.logger.debug(`Retrieved ${attestors.size} attestors out of ${committee.length} for slot ${slot}`, {
|
|
349
|
+
blockStatus,
|
|
350
|
+
proposer: proposer.toString(),
|
|
351
|
+
...block,
|
|
352
|
+
slot,
|
|
353
|
+
attestors: [...attestors],
|
|
354
|
+
missedAttestors: [...missedAttestors],
|
|
355
|
+
committee: committee.map(c => c.toString()),
|
|
356
|
+
});
|
|
357
|
+
|
|
358
|
+
// Compute the status for each validator in the committee
|
|
359
|
+
const statusFor = (who: `0x${string}`): ValidatorStatusInSlot | undefined => {
|
|
360
|
+
if (who === proposer.toString()) {
|
|
361
|
+
return `block-${blockStatus}`;
|
|
362
|
+
} else if (attestors.has(who)) {
|
|
363
|
+
return 'attestation-sent';
|
|
364
|
+
} else if (missedAttestors.has(who)) {
|
|
365
|
+
return 'attestation-missed';
|
|
366
|
+
} else {
|
|
367
|
+
return undefined;
|
|
368
|
+
}
|
|
369
|
+
};
|
|
370
|
+
|
|
371
|
+
return Object.fromEntries(committee.map(v => v.toString()).map(who => [who, statusFor(who)]));
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
/** Push the status for each slot for each validator. */
|
|
375
|
+
protected updateValidators(slot: bigint, stats: Record<`0x${string}`, ValidatorStatusInSlot | undefined>) {
|
|
376
|
+
return this.store.updateValidators(slot, stats);
|
|
377
|
+
}
|
|
378
|
+
|
|
379
|
+
/** Computes stats to be returned based on stored data. */
|
|
380
|
+
public async computeStats({
|
|
381
|
+
fromSlot,
|
|
382
|
+
toSlot,
|
|
383
|
+
validators,
|
|
384
|
+
}: { fromSlot?: bigint; toSlot?: bigint; validators?: EthAddress[] } = {}): Promise<ValidatorsStats> {
|
|
385
|
+
const histories = validators
|
|
386
|
+
? fromEntries(await Promise.all(validators.map(async v => [v.toString(), await this.store.getHistory(v)])))
|
|
387
|
+
: await this.store.getHistories();
|
|
388
|
+
|
|
389
|
+
const slotNow = this.epochCache.getEpochAndSlotNow().slot;
|
|
390
|
+
fromSlot ??= (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
|
|
391
|
+
toSlot ??= this.lastProcessedSlot ?? slotNow;
|
|
392
|
+
|
|
393
|
+
const stats = mapValues(histories, (history, address) =>
|
|
394
|
+
this.computeStatsForValidator(address, history ?? [], fromSlot, toSlot),
|
|
395
|
+
);
|
|
396
|
+
|
|
397
|
+
return {
|
|
398
|
+
stats,
|
|
399
|
+
lastProcessedSlot: this.lastProcessedSlot,
|
|
400
|
+
initialSlot: this.initialSlot,
|
|
401
|
+
slotWindow: this.store.getHistoryLength(),
|
|
402
|
+
};
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/** Computes stats for a single validator. */
|
|
406
|
+
public async getValidatorStats(
|
|
407
|
+
validatorAddress: EthAddress,
|
|
408
|
+
fromSlot?: bigint,
|
|
409
|
+
toSlot?: bigint,
|
|
410
|
+
): Promise<SingleValidatorStats | undefined> {
|
|
411
|
+
const history = await this.store.getHistory(validatorAddress);
|
|
412
|
+
|
|
413
|
+
if (!history || history.length === 0) {
|
|
414
|
+
return undefined;
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
const slotNow = this.epochCache.getEpochAndSlotNow().slot;
|
|
418
|
+
const effectiveFromSlot = fromSlot ?? (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
|
|
419
|
+
const effectiveToSlot = toSlot ?? this.lastProcessedSlot ?? slotNow;
|
|
420
|
+
|
|
421
|
+
const historyLength = BigInt(this.store.getHistoryLength());
|
|
422
|
+
if (effectiveToSlot - effectiveFromSlot > historyLength) {
|
|
423
|
+
throw new Error(
|
|
424
|
+
`Slot range (${effectiveToSlot - effectiveFromSlot}) exceeds history length (${historyLength}). ` +
|
|
425
|
+
`Requested range: ${effectiveFromSlot} to ${effectiveToSlot}.`,
|
|
426
|
+
);
|
|
427
|
+
}
|
|
428
|
+
|
|
429
|
+
const validator = this.computeStatsForValidator(
|
|
430
|
+
validatorAddress.toString(),
|
|
431
|
+
history,
|
|
432
|
+
effectiveFromSlot,
|
|
433
|
+
effectiveToSlot,
|
|
434
|
+
);
|
|
435
|
+
const allTimeProvenPerformance = await this.store.getProvenPerformance(validatorAddress);
|
|
436
|
+
|
|
437
|
+
return {
|
|
438
|
+
validator,
|
|
439
|
+
allTimeProvenPerformance,
|
|
440
|
+
lastProcessedSlot: this.lastProcessedSlot,
|
|
441
|
+
initialSlot: this.initialSlot,
|
|
442
|
+
slotWindow: this.store.getHistoryLength(),
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
protected computeStatsForValidator(
|
|
447
|
+
address: `0x${string}`,
|
|
448
|
+
allHistory: ValidatorStatusHistory,
|
|
449
|
+
fromSlot?: bigint,
|
|
450
|
+
toSlot?: bigint,
|
|
451
|
+
): ValidatorStats {
|
|
452
|
+
let history = fromSlot ? allHistory.filter(h => h.slot >= fromSlot) : allHistory;
|
|
453
|
+
history = toSlot ? history.filter(h => h.slot <= toSlot) : history;
|
|
454
|
+
const lastProposal = history.filter(h => h.status === 'block-proposed' || h.status === 'block-mined').at(-1);
|
|
455
|
+
const lastAttestation = history.filter(h => h.status === 'attestation-sent').at(-1);
|
|
456
|
+
return {
|
|
457
|
+
address: EthAddress.fromString(address),
|
|
458
|
+
lastProposal: this.computeFromSlot(lastProposal?.slot),
|
|
459
|
+
lastAttestation: this.computeFromSlot(lastAttestation?.slot),
|
|
460
|
+
totalSlots: history.length,
|
|
461
|
+
missedProposals: this.computeMissed(history, 'block', ['block-missed']),
|
|
462
|
+
missedAttestations: this.computeMissed(history, 'attestation', ['attestation-missed']),
|
|
463
|
+
history,
|
|
464
|
+
};
|
|
465
|
+
}
|
|
466
|
+
|
|
467
|
+
protected computeMissed(
|
|
468
|
+
history: ValidatorStatusHistory,
|
|
469
|
+
computeOverPrefix: ValidatorStatusType | undefined,
|
|
470
|
+
filter: ValidatorStatusInSlot[],
|
|
471
|
+
) {
|
|
472
|
+
const relevantHistory = history.filter(h => !computeOverPrefix || h.status.startsWith(computeOverPrefix));
|
|
473
|
+
const filteredHistory = relevantHistory.filter(h => filter.includes(h.status));
|
|
474
|
+
return {
|
|
475
|
+
currentStreak: countWhile([...relevantHistory].reverse(), h => filter.includes(h.status)),
|
|
476
|
+
rate: relevantHistory.length === 0 ? undefined : filteredHistory.length / relevantHistory.length,
|
|
477
|
+
count: filteredHistory.length,
|
|
478
|
+
total: relevantHistory.length,
|
|
479
|
+
};
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
protected computeFromSlot(slot: bigint | undefined) {
|
|
483
|
+
if (slot === undefined) {
|
|
484
|
+
return undefined;
|
|
485
|
+
}
|
|
486
|
+
const timestamp = getTimestampForSlot(slot, this.epochCache.getL1Constants());
|
|
487
|
+
return { timestamp, slot, date: new Date(Number(timestamp) * 1000).toISOString() };
|
|
488
|
+
}
|
|
489
|
+
}
|