@aztec/aztec-node 3.0.0-canary.a9708bd → 3.0.0-devnet.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,14 +11,12 @@ import {
11
11
  } from '@aztec/constants';
12
12
  import { EpochCache, type EpochCacheInterface } from '@aztec/epoch-cache';
13
13
  import {
14
- type EthSigner,
15
14
  type L1ContractAddresses,
16
15
  RegistryContract,
17
16
  RollupContract,
18
17
  createEthereumChain,
19
18
  getPublicClient,
20
19
  } from '@aztec/ethereum';
21
- import { createL1TxUtilsWithBlobsFromEthSigner } from '@aztec/ethereum/l1-tx-utils-with-blobs';
22
20
  import { compactArray, pick } from '@aztec/foundation/collection';
23
21
  import { EthAddress } from '@aztec/foundation/eth-address';
24
22
  import { Fr } from '@aztec/foundation/fields';
@@ -30,6 +28,7 @@ import { DateProvider, Timer } from '@aztec/foundation/timer';
30
28
  import { MembershipWitness, SiblingPath } from '@aztec/foundation/trees';
31
29
  import { KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore';
32
30
  import { trySnapshotSync, uploadSnapshot } from '@aztec/node-lib/actions';
31
+ import { createL1TxUtilsWithBlobsFromEthSigner } from '@aztec/node-lib/factories';
33
32
  import { type P2P, type P2PClientDeps, createP2PClient, getDefaultAllowedSetupFunctions } from '@aztec/p2p';
34
33
  import { ProtocolContractAddress } from '@aztec/protocol-contracts';
35
34
  import {
@@ -74,6 +73,7 @@ import {
74
73
  type GetPublicLogsResponse,
75
74
  } from '@aztec/stdlib/interfaces/client';
76
75
  import {
76
+ type AllowedElement,
77
77
  type ClientProtocolCircuitVerifier,
78
78
  type L2LogsSource,
79
79
  type Service,
@@ -82,7 +82,7 @@ import {
82
82
  tryStop,
83
83
  } from '@aztec/stdlib/interfaces/server';
84
84
  import type { LogFilter, PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs';
85
- import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging';
85
+ import { InboxLeaf, type L1ToL2MessageSource } from '@aztec/stdlib/messaging';
86
86
  import { P2PClientType } from '@aztec/stdlib/p2p';
87
87
  import type { Offense, SlashPayloadRound } from '@aztec/stdlib/slashing';
88
88
  import type { NullifierLeafPreimage, PublicDataTreeLeaf, PublicDataTreeLeafPreimage } from '@aztec/stdlib/trees';
@@ -108,7 +108,12 @@ import {
108
108
  getTelemetryClient,
109
109
  trackSpan,
110
110
  } from '@aztec/telemetry-client';
111
- import { NodeKeystoreAdapter, ValidatorClient, createValidatorClient } from '@aztec/validator-client';
111
+ import {
112
+ NodeKeystoreAdapter,
113
+ ValidatorClient,
114
+ createBlockProposalHandler,
115
+ createValidatorClient,
116
+ } from '@aztec/validator-client';
112
117
  import { createWorldStateSynchronizer } from '@aztec/world-state';
113
118
 
114
119
  import { createPublicClient, fallback, http } from 'viem';
@@ -212,6 +217,8 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
212
217
  }
213
218
  }
214
219
 
220
+ await keyStoreManager?.validateSigners();
221
+
215
222
  // If we are a validator, verify our configuration before doing too much more.
216
223
  if (!config.disableValidator) {
217
224
  if (keyStoreManager === undefined) {
@@ -222,7 +229,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
222
229
  'KEY STORE CREATED FROM ENVIRONMENT, IT IS RECOMMENDED TO USE A FILE-BASED KEY STORE IN PRODUCTION ENVIRONMENTS',
223
230
  );
224
231
  }
225
- ValidatorClient.validateKeyStoreConfiguration(keyStoreManager);
232
+ ValidatorClient.validateKeyStoreConfiguration(keyStoreManager, log);
226
233
  }
227
234
 
228
235
  // validate that the actual chain id matches that specified in configuration
@@ -300,12 +307,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
300
307
  deps.p2pClientDeps,
301
308
  );
302
309
 
303
- // Start world state and wait for it to sync to the archiver.
304
- await worldStateSynchronizer.start();
305
-
306
- // Start p2p. Note that it depends on world state to be running.
307
- await p2pClient.start();
308
-
310
+ // We should really not be modifying the config object
309
311
  config.txPublicSetupAllowList = config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
310
312
 
311
313
  const blockBuilder = new BlockBuilder(
@@ -316,8 +318,52 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
316
318
  telemetry,
317
319
  );
318
320
 
321
+ // We'll accumulate sentinel watchers here
319
322
  const watchers: Watcher[] = [];
320
323
 
324
+ // Create validator client if required
325
+ const validatorClient = createValidatorClient(config, {
326
+ p2pClient,
327
+ telemetry,
328
+ dateProvider,
329
+ epochCache,
330
+ blockBuilder,
331
+ blockSource: archiver,
332
+ l1ToL2MessageSource: archiver,
333
+ keyStoreManager,
334
+ });
335
+
336
+ // If we have a validator client, register it as a source of offenses for the slasher,
337
+ // and have it register callbacks on the p2p client *before* we start it, otherwise messages
338
+ // like attestations or auths will fail.
339
+ if (validatorClient) {
340
+ watchers.push(validatorClient);
341
+ if (!options.dontStartSequencer) {
342
+ await validatorClient.registerHandlers();
343
+ }
344
+ }
345
+
346
+ // If there's no validator client but alwaysReexecuteBlockProposals is enabled,
347
+ // create a BlockProposalHandler to reexecute block proposals for monitoring
348
+ if (!validatorClient && config.alwaysReexecuteBlockProposals) {
349
+ log.info('Setting up block proposal reexecution for monitoring');
350
+ createBlockProposalHandler(config, {
351
+ blockBuilder,
352
+ epochCache,
353
+ blockSource: archiver,
354
+ l1ToL2MessageSource: archiver,
355
+ p2pClient,
356
+ dateProvider,
357
+ telemetry,
358
+ }).registerForReexecution(p2pClient);
359
+ }
360
+
361
+ // Start world state and wait for it to sync to the archiver.
362
+ await worldStateSynchronizer.start();
363
+
364
+ // Start p2p. Note that it depends on world state to be running.
365
+ await p2pClient.start();
366
+
321
367
  const validatorsSentinel = await createSentinel(epochCache, archiver, p2pClient, config);
322
368
  if (validatorsSentinel) {
323
369
  // we can run a sentinel without trying to slash.
@@ -349,21 +395,6 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
349
395
  watchers.push(attestationsBlockWatcher);
350
396
  }
351
397
 
352
- const validatorClient = createValidatorClient(config, {
353
- p2pClient,
354
- telemetry,
355
- dateProvider,
356
- epochCache,
357
- blockBuilder,
358
- blockSource: archiver,
359
- l1ToL2MessageSource: archiver,
360
- keyStoreManager,
361
- });
362
-
363
- if (validatorClient) {
364
- watchers.push(validatorClient);
365
- }
366
-
367
398
  log.verbose(`All Aztec Node subsystems synced`);
368
399
 
369
400
  // Validator enabled, create/start relevant service
@@ -389,9 +420,12 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
389
420
  );
390
421
  await slasherClient.start();
391
422
 
392
- const l1TxUtils = keyStoreManager!.createAllValidatorPublisherSigners().map((signer: EthSigner) => {
393
- return createL1TxUtilsWithBlobsFromEthSigner(publicClient, signer, log, dateProvider, config);
394
- });
423
+ const l1TxUtils = await createL1TxUtilsWithBlobsFromEthSigner(
424
+ publicClient,
425
+ keyStoreManager!.createAllValidatorPublisherSigners(),
426
+ { ...config, scope: 'sequencer' },
427
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
428
+ );
395
429
 
396
430
  sequencer = await SequencerClient.new(config, {
397
431
  // if deps were provided, they should override the defaults,
@@ -416,6 +450,8 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
416
450
  if (!options.dontStartSequencer && sequencer) {
417
451
  await sequencer.start();
418
452
  log.verbose(`Sequencer started`);
453
+ } else if (sequencer) {
454
+ log.warn(`Sequencer created but not started`);
419
455
  }
420
456
 
421
457
  return new AztecNodeService(
@@ -473,6 +509,10 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
473
509
  return Promise.resolve(this.p2pClient.getEnr()?.encodeTxt());
474
510
  }
475
511
 
512
+ public async getAllowedPublicSetup(): Promise<AllowedElement[]> {
513
+ return this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
514
+ }
515
+
476
516
  /**
477
517
  * Method to determine if the node is ready to accept transactions.
478
518
  * @returns - Flag indicating the readiness for tx submission.
@@ -508,8 +548,29 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
508
548
  * @param number - The block number being requested.
509
549
  * @returns The requested block.
510
550
  */
511
- public async getBlock(number: number): Promise<L2Block | undefined> {
512
- return await this.blockSource.getBlock(number);
551
+ public async getBlock(number: L2BlockNumber): Promise<L2Block | undefined> {
552
+ const blockNumber = number === 'latest' ? await this.getBlockNumber() : number;
553
+ return await this.blockSource.getBlock(blockNumber);
554
+ }
555
+
556
+ /**
557
+ * Get a block specified by its hash.
558
+ * @param blockHash - The block hash being requested.
559
+ * @returns The requested block.
560
+ */
561
+ public async getBlockByHash(blockHash: Fr): Promise<L2Block | undefined> {
562
+ const publishedBlock = await this.blockSource.getPublishedBlockByHash(blockHash);
563
+ return publishedBlock?.block;
564
+ }
565
+
566
+ /**
567
+ * Get a block specified by its archive root.
568
+ * @param archive - The archive root being requested.
569
+ * @returns The requested block.
570
+ */
571
+ public async getBlockByArchive(archive: Fr): Promise<L2Block | undefined> {
572
+ const publishedBlock = await this.blockSource.getPublishedBlockByArchive(archive);
573
+ return publishedBlock?.block;
513
574
  }
514
575
 
515
576
  /**
@@ -858,13 +919,19 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
858
919
  return [witness.index, witness.path];
859
920
  }
860
921
 
922
+ public async getL1ToL2MessageBlock(l1ToL2Message: Fr): Promise<number | undefined> {
923
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
924
+ return messageIndex ? InboxLeaf.l2BlockFromIndex(messageIndex) : undefined;
925
+ }
926
+
861
927
  /**
862
928
  * Returns whether an L1 to L2 message is synced by archiver and if it's ready to be included in a block.
863
929
  * @param l1ToL2Message - The L1 to L2 message to check.
864
930
  * @returns Whether the message is synced and ready to be included in a block.
865
931
  */
866
932
  public async isL1ToL2MessageSynced(l1ToL2Message: Fr): Promise<boolean> {
867
- return (await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message)) !== undefined;
933
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
934
+ return messageIndex !== undefined;
868
935
  }
869
936
 
870
937
  /**
@@ -1014,6 +1081,24 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
1014
1081
  : this.blockSource.getBlockHeader(blockNumber);
1015
1082
  }
1016
1083
 
1084
+ /**
1085
+ * Get a block header specified by its hash.
1086
+ * @param blockHash - The block hash being requested.
1087
+ * @returns The requested block header.
1088
+ */
1089
+ public async getBlockHeaderByHash(blockHash: Fr): Promise<BlockHeader | undefined> {
1090
+ return await this.blockSource.getBlockHeaderByHash(blockHash);
1091
+ }
1092
+
1093
+ /**
1094
+ * Get a block header specified by its archive root.
1095
+ * @param archive - The archive root being requested.
1096
+ * @returns The requested block header.
1097
+ */
1098
+ public async getBlockHeaderByArchive(archive: Fr): Promise<BlockHeader | undefined> {
1099
+ return await this.blockSource.getBlockHeaderByArchive(archive);
1100
+ }
1101
+
1017
1102
  /**
1018
1103
  * Simulates the public part of a transaction with the current state.
1019
1104
  * @param tx - The transaction to simulate.
@@ -1062,12 +1147,11 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
1062
1147
 
1063
1148
  const merkleTreeFork = await this.worldStateSynchronizer.fork();
1064
1149
  try {
1065
- const processor = publicProcessorFactory.create(
1066
- merkleTreeFork,
1067
- newGlobalVariables,
1150
+ const processor = publicProcessorFactory.create(merkleTreeFork, newGlobalVariables, {
1068
1151
  skipFeeEnforcement,
1069
- /*clientInitiatedSimulation*/ true,
1070
- );
1152
+ clientInitiatedSimulation: true,
1153
+ maxDebugLogMemoryReads: this.config.rpcSimulatePublicMaxDebugLogMemoryReads,
1154
+ });
1071
1155
 
1072
1156
  // REFACTOR: Consider merging ProcessReturnValues into ProcessedTx
1073
1157
  const [processedTxs, failedTxs, _usedTxs, returns] = await processor.process([tx]);
@@ -1122,12 +1206,14 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
1122
1206
 
1123
1207
  public async setConfig(config: Partial<AztecNodeAdminConfig>): Promise<void> {
1124
1208
  const newConfig = { ...this.config, ...config };
1125
- this.sequencer?.updateSequencerConfig(config);
1209
+ this.sequencer?.updateConfig(config);
1126
1210
  this.slasherClient?.updateConfig(config);
1127
1211
  this.validatorsSentinel?.updateConfig(config);
1128
- // this.blockBuilder.updateConfig(config); // TODO: Spyros has a PR to add the builder to `this`, so we can do this
1129
1212
  await this.p2pClient.updateP2PConfig(config);
1130
-
1213
+ const archiver = this.blockSource as Archiver;
1214
+ if ('updateConfig' in archiver) {
1215
+ archiver.updateConfig(config);
1216
+ }
1131
1217
  if (newConfig.realProofs !== this.config.realProofs) {
1132
1218
  this.proofVerifier = config.realProofs ? await BBCircuitVerifier.new(newConfig) : new TestCircuitVerifier();
1133
1219
  }
@@ -2,6 +2,7 @@ import { type ConfigMappingsType, booleanConfigHelper, numberConfigHelper } from
2
2
 
3
3
  export type SentinelConfig = {
4
4
  sentinelHistoryLengthInEpochs: number;
5
+ sentinelHistoricProvenPerformanceLengthInEpochs: number;
5
6
  sentinelEnabled: boolean;
6
7
  };
7
8
 
@@ -11,6 +12,23 @@ export const sentinelConfigMappings: ConfigMappingsType<SentinelConfig> = {
11
12
  env: 'SENTINEL_HISTORY_LENGTH_IN_EPOCHS',
12
13
  ...numberConfigHelper(24),
13
14
  },
15
+ /**
16
+ * The number of L2 epochs kept of proven performance history for each validator.
17
+ * This value must be large enough so that we have proven performance for every validator
18
+ * for at least slashInactivityConsecutiveEpochThreshold. Assuming this value is 3,
19
+ * and the committee size is 48, and we have 10k validators, then we pick 48 out of 10k each draw.
20
+ * For any fixed element, per-draw prob = 48/10000 = 0.0048.
21
+ * After n draws, count ~ Binomial(n, 0.0048). We want P(X >= 3).
22
+ * Results (exact binomial):
23
+ * - 90% chance: n = 1108
24
+ * - 95% chance: n = 1310
25
+ * - 99% chance: n = 1749
26
+ */
27
+ sentinelHistoricProvenPerformanceLengthInEpochs: {
28
+ description: 'The number of L2 epochs kept of proven performance history for each validator.',
29
+ env: 'SENTINEL_HISTORIC_PROVEN_PERFORMANCE_LENGTH_IN_EPOCHS',
30
+ ...numberConfigHelper(2000),
31
+ },
14
32
  sentinelEnabled: {
15
33
  description: 'Whether the sentinel is enabled or not.',
16
34
  env: 'SENTINEL_ENABLED',
@@ -27,6 +27,10 @@ export async function createSentinel(
27
27
  createLogger('node:sentinel:lmdb'),
28
28
  );
29
29
  const storeHistoryLength = config.sentinelHistoryLengthInEpochs * epochCache.getL1Constants().epochDuration;
30
- const sentinelStore = new SentinelStore(kvStore, { historyLength: storeHistoryLength });
30
+ const storeHistoricProvenPerformanceLength = config.sentinelHistoricProvenPerformanceLengthInEpochs;
31
+ const sentinelStore = new SentinelStore(kvStore, {
32
+ historyLength: storeHistoryLength,
33
+ historicProvenPerformanceLength: storeHistoricProvenPerformanceLength,
34
+ });
31
35
  return new Sentinel(epochCache, archiver, p2p, sentinelStore, config, logger);
32
36
  }
@@ -1,20 +1,26 @@
1
1
  import type { EpochCache } from '@aztec/epoch-cache';
2
- import { countWhile, filterAsync } from '@aztec/foundation/collection';
2
+ import { countWhile, filterAsync, fromEntries, getEntries, mapValues } from '@aztec/foundation/collection';
3
3
  import { EthAddress } from '@aztec/foundation/eth-address';
4
4
  import { createLogger } from '@aztec/foundation/log';
5
5
  import { RunningPromise } from '@aztec/foundation/running-promise';
6
6
  import { L2TipsMemoryStore, type L2TipsStore } from '@aztec/kv-store/stores';
7
7
  import type { P2PClient } from '@aztec/p2p';
8
- import { OffenseType, WANT_TO_SLASH_EVENT, type Watcher, type WatcherEmitter } from '@aztec/slasher';
8
+ import {
9
+ OffenseType,
10
+ WANT_TO_SLASH_EVENT,
11
+ type WantToSlashArgs,
12
+ type Watcher,
13
+ type WatcherEmitter,
14
+ } from '@aztec/slasher';
9
15
  import type { SlasherConfig } from '@aztec/slasher/config';
10
16
  import {
11
17
  type L2BlockSource,
12
18
  L2BlockStream,
13
19
  type L2BlockStreamEvent,
14
20
  type L2BlockStreamEventHandler,
15
- getAttestationsFromPublishedL2Block,
21
+ getAttestationInfoFromPublishedL2Block,
16
22
  } from '@aztec/stdlib/block';
17
- import { getEpochAtSlot, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers';
23
+ import { getEpochAtSlot, getSlotRangeForEpoch, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers';
18
24
  import type {
19
25
  SingleValidatorStats,
20
26
  ValidatorStats,
@@ -85,7 +91,9 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
85
91
  this.slotNumberToBlock.set(block.block.header.getSlot(), {
86
92
  blockNumber: block.block.number,
87
93
  archive: block.block.archive.root.toString(),
88
- attestors: getAttestationsFromPublishedL2Block(block).map(att => att.getSender()),
94
+ attestors: getAttestationInfoFromPublishedL2Block(block)
95
+ .filter(a => a.status === 'recovered-from-signature')
96
+ .map(a => a.address!),
89
97
  });
90
98
  }
91
99
 
@@ -115,53 +123,38 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
115
123
  return;
116
124
  }
117
125
 
118
- const epoch = getEpochAtSlot(block.header.getSlot(), await this.archiver.getL1Constants());
126
+ // TODO(palla/slash): We should only be computing proven performance if this is
127
+ // a full proof epoch and not a partial one, otherwise we'll end up with skewed stats.
128
+ const epoch = getEpochAtSlot(block.header.getSlot(), this.epochCache.getL1Constants());
119
129
  this.logger.debug(`Computing proven performance for epoch ${epoch}`);
120
130
  const performance = await this.computeProvenPerformance(epoch);
121
131
  this.logger.info(`Computed proven performance for epoch ${epoch}`, performance);
122
132
 
123
- await this.updateProvenPerformance(epoch, performance);
133
+ await this.store.updateProvenPerformance(epoch, performance);
124
134
  await this.handleProvenPerformance(epoch, performance);
125
135
  }
126
136
 
127
- protected async computeProvenPerformance(epoch: bigint) {
128
- const headers = await this.archiver.getBlockHeadersForEpoch(epoch);
129
- const provenSlots = headers.map(h => h.getSlot());
130
- const fromSlot = provenSlots[0];
131
- const toSlot = provenSlots[provenSlots.length - 1];
137
+ protected async computeProvenPerformance(epoch: bigint): Promise<ValidatorsEpochPerformance> {
138
+ const [fromSlot, toSlot] = getSlotRangeForEpoch(epoch, this.epochCache.getL1Constants());
132
139
  const { committee } = await this.epochCache.getCommittee(fromSlot);
133
140
  if (!committee) {
134
141
  this.logger.trace(`No committee found for slot ${fromSlot}`);
135
142
  return {};
136
143
  }
137
- const stats = await this.computeStats({ fromSlot, toSlot });
138
- this.logger.debug(`Stats for epoch ${epoch}`, stats);
139
-
140
- const performance: ValidatorsEpochPerformance = {};
141
- for (const validator of Object.keys(stats.stats)) {
142
- let address;
143
- try {
144
- address = EthAddress.fromString(validator);
145
- } catch (e) {
146
- this.logger.error(`Invalid validator address ${validator}`, e);
147
- continue;
148
- }
149
- if (!committee.find(v => v.equals(address))) {
150
- continue;
151
- }
152
- let missed = 0;
153
- for (const history of stats.stats[validator].history) {
154
- if (provenSlots.includes(history.slot) && history.status === 'attestation-missed') {
155
- missed++;
156
- }
157
- }
158
- performance[address.toString()] = { missed, total: provenSlots.length };
159
- }
160
- return performance;
161
- }
162
144
 
163
- protected updateProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance) {
164
- return this.store.updateProvenPerformance(epoch, performance);
145
+ const stats = await this.computeStats({ fromSlot, toSlot, validators: committee });
146
+ this.logger.debug(`Stats for epoch ${epoch}`, { ...stats, fromSlot, toSlot, epoch });
147
+
148
+ // Note that we are NOT using the total slots in the epoch as `total` here, since we only
149
+ // compute missed attestations over the blocks that had a proposal in them. So, let's say
150
+ // we have an epoch with 10 slots, but only 5 had a block proposal. A validator that was
151
+ // offline, assuming they were not picked as proposer, will then be reported as having missed
152
+ // 5/5 attestations. If we used the total, they'd be reported as 5/10, which would probably
153
+ // allow them to avoid being slashed.
154
+ return mapValues(stats.stats, stat => ({
155
+ missed: stat.missedAttestations.count + stat.missedProposals.count,
156
+ total: stat.missedAttestations.total + stat.missedProposals.total,
157
+ }));
165
158
  }
166
159
 
167
160
  /**
@@ -199,11 +192,13 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
199
192
  }
200
193
 
201
194
  protected async handleProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance) {
202
- const inactiveValidators = Object.entries(performance)
203
- .filter(([_, { missed, total }]) => {
204
- return missed / total >= this.config.slashInactivityTargetPercentage;
205
- })
206
- .map(([address]) => address as `0x${string}`);
195
+ if (this.config.slashInactivityPenalty === 0n) {
196
+ return;
197
+ }
198
+
199
+ const inactiveValidators = getEntries(performance)
200
+ .filter(([_, { missed, total }]) => missed / total >= this.config.slashInactivityTargetPercentage)
201
+ .map(([address]) => address);
207
202
 
208
203
  this.logger.debug(`Found ${inactiveValidators.length} inactive validators in epoch ${epoch}`, {
209
204
  inactiveValidators,
@@ -216,7 +211,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
216
211
  this.checkPastInactivity(EthAddress.fromString(address), epoch, epochThreshold - 1),
217
212
  );
218
213
 
219
- const args = criminals.map(address => ({
214
+ const args: WantToSlashArgs[] = criminals.map(address => ({
220
215
  validator: EthAddress.fromString(address),
221
216
  amount: this.config.slashInactivityPenalty,
222
217
  offenseType: OffenseType.INACTIVITY,
@@ -224,7 +219,7 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
224
219
  }));
225
220
 
226
221
  if (criminals.length > 0) {
227
- this.logger.info(
222
+ this.logger.verbose(
228
223
  `Identified ${criminals.length} validators to slash due to inactivity in at least ${epochThreshold} consecutive epochs`,
229
224
  { ...args, epochThreshold },
230
225
  );
@@ -326,15 +321,20 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
326
321
  // (contains the ones synced from mined blocks, which we may have missed from p2p).
327
322
  const block = this.slotNumberToBlock.get(slot);
328
323
  const p2pAttested = await this.p2p.getAttestationsForSlot(slot, block?.archive);
329
- const attestors = new Set([
330
- ...p2pAttested.map(a => a.getSender().toString()),
331
- ...(block?.attestors.map(a => a.toString()) ?? []),
332
- ]);
324
+ // Filter out attestations with invalid signatures
325
+ const p2pAttestors = p2pAttested.map(a => a.getSender()).filter((s): s is EthAddress => s !== undefined);
326
+ const attestors = new Set(
327
+ [...p2pAttestors.map(a => a.toString()), ...(block?.attestors.map(a => a.toString()) ?? [])].filter(
328
+ addr => proposer.toString() !== addr, // Exclude the proposer from the attestors
329
+ ),
330
+ );
333
331
 
334
- // We assume that there was a block proposal if at least one of the validators attested to it.
332
+ // We assume that there was a block proposal if at least one of the validators (other than the proposer) attested to it.
335
333
  // It could be the case that every single validator failed, and we could differentiate it by having
336
334
  // this node re-execute every block proposal it sees and storing it in the attestation pool.
337
335
  // But we'll leave that corner case out to reduce pressure on the node.
336
+ // TODO(palla/slash): This breaks if a given node has more than one validator in the current committee,
337
+ // since they will attest to their own proposal it even if it's not re-executable.
338
338
  const blockStatus = block ? 'mined' : attestors.size > 0 ? 'proposed' : 'missed';
339
339
  this.logger.debug(`Block for slot ${slot} was ${blockStatus}`, { ...block, slot });
340
340
 
@@ -378,20 +378,24 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
378
378
 
379
379
  /** Computes stats to be returned based on stored data. */
380
380
  public async computeStats({
381
- fromSlot: _fromSlot,
382
- toSlot: _toSlot,
383
- }: { fromSlot?: bigint; toSlot?: bigint } = {}): Promise<ValidatorsStats> {
384
- const histories = await this.store.getHistories();
381
+ fromSlot,
382
+ toSlot,
383
+ validators,
384
+ }: { fromSlot?: bigint; toSlot?: bigint; validators?: EthAddress[] } = {}): Promise<ValidatorsStats> {
385
+ const histories = validators
386
+ ? fromEntries(await Promise.all(validators.map(async v => [v.toString(), await this.store.getHistory(v)])))
387
+ : await this.store.getHistories();
388
+
385
389
  const slotNow = this.epochCache.getEpochAndSlotNow().slot;
386
- const fromSlot = _fromSlot ?? (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
387
- const toSlot = _toSlot ?? this.lastProcessedSlot ?? slotNow;
388
- const result: Record<`0x${string}`, ValidatorStats> = {};
389
- for (const [address, history] of Object.entries(histories)) {
390
- const validatorAddress = address as `0x${string}`;
391
- result[validatorAddress] = this.computeStatsForValidator(validatorAddress, history, fromSlot, toSlot);
392
- }
390
+ fromSlot ??= (this.lastProcessedSlot ?? slotNow) - BigInt(this.store.getHistoryLength());
391
+ toSlot ??= this.lastProcessedSlot ?? slotNow;
392
+
393
+ const stats = mapValues(histories, (history, address) =>
394
+ this.computeStatsForValidator(address, history ?? [], fromSlot, toSlot),
395
+ );
396
+
393
397
  return {
394
- stats: result,
398
+ stats,
395
399
  lastProcessedSlot: this.lastProcessedSlot,
396
400
  initialSlot: this.initialSlot,
397
401
  slotWindow: this.store.getHistoryLength(),
@@ -447,30 +451,31 @@ export class Sentinel extends (EventEmitter as new () => WatcherEmitter) impleme
447
451
  ): ValidatorStats {
448
452
  let history = fromSlot ? allHistory.filter(h => h.slot >= fromSlot) : allHistory;
449
453
  history = toSlot ? history.filter(h => h.slot <= toSlot) : history;
454
+ const lastProposal = history.filter(h => h.status === 'block-proposed' || h.status === 'block-mined').at(-1);
455
+ const lastAttestation = history.filter(h => h.status === 'attestation-sent').at(-1);
450
456
  return {
451
457
  address: EthAddress.fromString(address),
452
- lastProposal: this.computeFromSlot(
453
- history.filter(h => h.status === 'block-proposed' || h.status === 'block-mined').at(-1)?.slot,
454
- ),
455
- lastAttestation: this.computeFromSlot(history.filter(h => h.status === 'attestation-sent').at(-1)?.slot),
458
+ lastProposal: this.computeFromSlot(lastProposal?.slot),
459
+ lastAttestation: this.computeFromSlot(lastAttestation?.slot),
456
460
  totalSlots: history.length,
457
- missedProposals: this.computeMissed(history, 'block', 'block-missed'),
458
- missedAttestations: this.computeMissed(history, 'attestation', 'attestation-missed'),
461
+ missedProposals: this.computeMissed(history, 'block', ['block-missed']),
462
+ missedAttestations: this.computeMissed(history, 'attestation', ['attestation-missed']),
459
463
  history,
460
464
  };
461
465
  }
462
466
 
463
467
  protected computeMissed(
464
468
  history: ValidatorStatusHistory,
465
- computeOverPrefix: ValidatorStatusType,
466
- filter: ValidatorStatusInSlot,
469
+ computeOverPrefix: ValidatorStatusType | undefined,
470
+ filter: ValidatorStatusInSlot[],
467
471
  ) {
468
- const relevantHistory = history.filter(h => h.status.startsWith(computeOverPrefix));
469
- const filteredHistory = relevantHistory.filter(h => h.status === filter);
472
+ const relevantHistory = history.filter(h => !computeOverPrefix || h.status.startsWith(computeOverPrefix));
473
+ const filteredHistory = relevantHistory.filter(h => filter.includes(h.status));
470
474
  return {
471
- currentStreak: countWhile([...relevantHistory].reverse(), h => h.status === filter),
475
+ currentStreak: countWhile([...relevantHistory].reverse(), h => filter.includes(h.status)),
472
476
  rate: relevantHistory.length === 0 ? undefined : filteredHistory.length / relevantHistory.length,
473
477
  count: filteredHistory.length,
478
+ total: relevantHistory.length,
474
479
  };
475
480
  }
476
481
 
@@ -19,7 +19,7 @@ export class SentinelStore {
19
19
 
20
20
  constructor(
21
21
  private store: AztecAsyncKVStore,
22
- private config: { historyLength: number },
22
+ private config: { historyLength: number; historicProvenPerformanceLength: number },
23
23
  ) {
24
24
  this.historyMap = store.openMap('sentinel-validator-status');
25
25
  this.provenMap = store.openMap('sentinel-validator-proven');
@@ -29,6 +29,10 @@ export class SentinelStore {
29
29
  return this.config.historyLength;
30
30
  }
31
31
 
32
+ public getHistoricProvenPerformanceLength() {
33
+ return this.config.historicProvenPerformanceLength;
34
+ }
35
+
32
36
  public async updateProvenPerformance(epoch: bigint, performance: ValidatorsEpochPerformance) {
33
37
  await this.store.transactionAsync(async () => {
34
38
  for (const [who, { missed, total }] of Object.entries(performance)) {
@@ -65,8 +69,8 @@ export class SentinelStore {
65
69
  // Since we keep the size small, this is not a big deal.
66
70
  currentPerformance.sort((a, b) => Number(a.epoch - b.epoch));
67
71
 
68
- // keep the most recent `historyLength` entries.
69
- const performanceToKeep = currentPerformance.slice(-this.config.historyLength);
72
+ // keep the most recent `historicProvenPerformanceLength` entries.
73
+ const performanceToKeep = currentPerformance.slice(-this.config.historicProvenPerformanceLength);
70
74
 
71
75
  await this.provenMap.set(who.toString(), this.serializePerformance(performanceToKeep));
72
76
  }