@aztec/aztec-node 0.0.0-test.0 → 0.0.1-commit.001888fc

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dest/aztec-node/config.d.ts +22 -11
  2. package/dest/aztec-node/config.d.ts.map +1 -1
  3. package/dest/aztec-node/config.js +90 -15
  4. package/dest/aztec-node/node_metrics.d.ts +5 -1
  5. package/dest/aztec-node/node_metrics.d.ts.map +1 -1
  6. package/dest/aztec-node/node_metrics.js +20 -6
  7. package/dest/aztec-node/server.d.ts +132 -154
  8. package/dest/aztec-node/server.d.ts.map +1 -1
  9. package/dest/aztec-node/server.js +1292 -371
  10. package/dest/bin/index.d.ts +1 -1
  11. package/dest/bin/index.js +4 -2
  12. package/dest/index.d.ts +1 -2
  13. package/dest/index.d.ts.map +1 -1
  14. package/dest/index.js +0 -1
  15. package/dest/sentinel/config.d.ts +8 -0
  16. package/dest/sentinel/config.d.ts.map +1 -0
  17. package/dest/sentinel/config.js +29 -0
  18. package/dest/sentinel/factory.d.ts +9 -0
  19. package/dest/sentinel/factory.d.ts.map +1 -0
  20. package/dest/sentinel/factory.js +17 -0
  21. package/dest/sentinel/index.d.ts +3 -0
  22. package/dest/sentinel/index.d.ts.map +1 -0
  23. package/dest/sentinel/index.js +1 -0
  24. package/dest/sentinel/sentinel.d.ts +93 -0
  25. package/dest/sentinel/sentinel.d.ts.map +1 -0
  26. package/dest/sentinel/sentinel.js +429 -0
  27. package/dest/sentinel/store.d.ts +35 -0
  28. package/dest/sentinel/store.d.ts.map +1 -0
  29. package/dest/sentinel/store.js +174 -0
  30. package/dest/test/index.d.ts +31 -0
  31. package/dest/test/index.d.ts.map +1 -0
  32. package/dest/test/index.js +1 -0
  33. package/package.json +47 -35
  34. package/src/aztec-node/config.ts +149 -26
  35. package/src/aztec-node/node_metrics.ts +23 -6
  36. package/src/aztec-node/server.ts +1162 -467
  37. package/src/bin/index.ts +4 -2
  38. package/src/index.ts +0 -1
  39. package/src/sentinel/config.ts +37 -0
  40. package/src/sentinel/factory.ts +31 -0
  41. package/src/sentinel/index.ts +8 -0
  42. package/src/sentinel/sentinel.ts +543 -0
  43. package/src/sentinel/store.ts +185 -0
  44. package/src/test/index.ts +32 -0
  45. package/dest/aztec-node/http_rpc_server.d.ts +0 -8
  46. package/dest/aztec-node/http_rpc_server.d.ts.map +0 -1
  47. package/dest/aztec-node/http_rpc_server.js +0 -9
  48. package/src/aztec-node/http_rpc_server.ts +0 -11
@@ -1,39 +1,55 @@
1
- import { createArchiver } from '@aztec/archiver';
2
- import { BBCircuitVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
- import { type BlobSinkClientInterface, createBlobSinkClient } from '@aztec/blob-sink/client';
4
- import {
5
- type ARCHIVE_HEIGHT,
6
- INITIAL_L2_BLOCK_NUM,
7
- type L1_TO_L2_MSG_TREE_HEIGHT,
8
- type NOTE_HASH_TREE_HEIGHT,
9
- type NULLIFIER_TREE_HEIGHT,
10
- type PUBLIC_DATA_TREE_HEIGHT,
11
- REGISTERER_CONTRACT_ADDRESS,
12
- } from '@aztec/constants';
13
- import { EpochCache } from '@aztec/epoch-cache';
14
- import { type L1ContractAddresses, createEthereumChain } from '@aztec/ethereum';
15
- import { compactArray } from '@aztec/foundation/collection';
1
+ import { Archiver, createArchiver } from '@aztec/archiver';
2
+ import { BBCircuitVerifier, QueuedIVCVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
+ import { type BlobClientInterface, createBlobClientWithFileStores } from '@aztec/blob-client/client';
4
+ import { Blob } from '@aztec/blob-lib';
5
+ import { ARCHIVE_HEIGHT, type L1_TO_L2_MSG_TREE_HEIGHT, type NOTE_HASH_TREE_HEIGHT } from '@aztec/constants';
6
+ import { EpochCache, type EpochCacheInterface } from '@aztec/epoch-cache';
7
+ import { createEthereumChain } from '@aztec/ethereum/chain';
8
+ import { getPublicClient, makeL1HttpTransport } from '@aztec/ethereum/client';
9
+ import { RegistryContract, RollupContract } from '@aztec/ethereum/contracts';
10
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
11
+ import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types';
12
+ import { chunkBy, compactArray, pick, unique } from '@aztec/foundation/collection';
13
+ import { Fr } from '@aztec/foundation/curves/bn254';
16
14
  import { EthAddress } from '@aztec/foundation/eth-address';
17
- import { Fr } from '@aztec/foundation/fields';
15
+ import { BadRequestError } from '@aztec/foundation/json-rpc';
18
16
  import { type Logger, createLogger } from '@aztec/foundation/log';
17
+ import { count } from '@aztec/foundation/string';
19
18
  import { DateProvider, Timer } from '@aztec/foundation/timer';
20
- import { SiblingPath } from '@aztec/foundation/trees';
21
- import type { AztecKVStore } from '@aztec/kv-store';
22
- import { openTmpStore } from '@aztec/kv-store/lmdb';
23
- import { SHA256Trunc, StandardTree, UnbalancedTree } from '@aztec/merkle-tree';
24
- import { type P2P, createP2PClient } from '@aztec/p2p';
25
- import { ProtocolContractAddress } from '@aztec/protocol-contracts';
19
+ import { MembershipWitness, SiblingPath } from '@aztec/foundation/trees';
20
+ import { type KeyStore, KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore';
21
+ import { trySnapshotSync, uploadSnapshot } from '@aztec/node-lib/actions';
22
+ import { createForwarderL1TxUtilsFromSigners, createL1TxUtilsFromSigners } from '@aztec/node-lib/factories';
26
23
  import {
27
- GlobalVariableBuilder,
28
- SequencerClient,
29
- type SequencerPublisher,
30
- createSlasherClient,
31
- createValidatorForAcceptingTxs,
24
+ type P2P,
25
+ type P2PClientDeps,
26
+ createP2PClient,
27
+ createTxValidatorForAcceptingTxsOverRPC,
32
28
  getDefaultAllowedSetupFunctions,
33
- } from '@aztec/sequencer-client';
29
+ } from '@aztec/p2p';
30
+ import { ProtocolContractAddress } from '@aztec/protocol-contracts';
31
+ import { type ProverNode, type ProverNodeDeps, createProverNode } from '@aztec/prover-node';
32
+ import { createKeyStoreForProver } from '@aztec/prover-node/config';
33
+ import { GlobalVariableBuilder, SequencerClient, type SequencerPublisher } from '@aztec/sequencer-client';
34
34
  import { PublicProcessorFactory } from '@aztec/simulator/server';
35
+ import {
36
+ AttestationsBlockWatcher,
37
+ EpochPruneWatcher,
38
+ type SlasherClientInterface,
39
+ type Watcher,
40
+ createSlasher,
41
+ } from '@aztec/slasher';
42
+ import { CollectionLimitsConfig, PublicSimulatorConfig } from '@aztec/stdlib/avm';
35
43
  import { AztecAddress } from '@aztec/stdlib/aztec-address';
36
- import type { InBlock, L2Block, L2BlockNumber, L2BlockSource, NullifierWithBlockSource } from '@aztec/stdlib/block';
44
+ import {
45
+ type BlockData,
46
+ BlockHash,
47
+ type BlockParameter,
48
+ type DataInBlock,
49
+ L2Block,
50
+ type L2BlockSource,
51
+ } from '@aztec/stdlib/block';
52
+ import type { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
37
53
  import type {
38
54
  ContractClassPublic,
39
55
  ContractDataSource,
@@ -41,34 +57,44 @@ import type {
41
57
  NodeInfo,
42
58
  ProtocolContractAddresses,
43
59
  } from '@aztec/stdlib/contract';
44
- import type { GasFees } from '@aztec/stdlib/gas';
45
- import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/stdlib/hash';
46
- import type { AztecNode, GetContractClassLogsResponse, GetPublicLogsResponse } from '@aztec/stdlib/interfaces/client';
60
+ import { GasFees } from '@aztec/stdlib/gas';
61
+ import { computePublicDataTreeLeafSlot } from '@aztec/stdlib/hash';
47
62
  import {
63
+ type AztecNode,
64
+ type AztecNodeAdmin,
65
+ type AztecNodeAdminConfig,
66
+ AztecNodeAdminConfigSchema,
67
+ type GetContractClassLogsResponse,
68
+ type GetPublicLogsResponse,
69
+ } from '@aztec/stdlib/interfaces/client';
70
+ import {
71
+ type AllowedElement,
48
72
  type ClientProtocolCircuitVerifier,
49
73
  type L2LogsSource,
50
- type ProverConfig,
51
- type SequencerConfig,
52
74
  type Service,
53
75
  type WorldStateSyncStatus,
54
76
  type WorldStateSynchronizer,
55
77
  tryStop,
56
78
  } from '@aztec/stdlib/interfaces/server';
57
- import type { LogFilter, PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs';
58
- import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging';
59
- import { P2PClientType } from '@aztec/stdlib/p2p';
60
- import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
79
+ import type { DebugLogStore, LogFilter, SiloedTag, Tag, TxScopedL2Log } from '@aztec/stdlib/logs';
80
+ import { InMemoryDebugLogStore, NullDebugLogStore } from '@aztec/stdlib/logs';
81
+ import { InboxLeaf, type L1ToL2MessageSource } from '@aztec/stdlib/messaging';
82
+ import type { Offense, SlashPayloadRound } from '@aztec/stdlib/slashing';
61
83
  import type { NullifierLeafPreimage, PublicDataTreeLeaf, PublicDataTreeLeafPreimage } from '@aztec/stdlib/trees';
84
+ import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
62
85
  import {
63
86
  type BlockHeader,
87
+ type GlobalVariableBuilder as GlobalVariableBuilderInterface,
88
+ type IndexedTxEffect,
64
89
  PublicSimulationOutput,
65
90
  Tx,
66
- TxEffect,
67
91
  type TxHash,
68
92
  TxReceipt,
69
93
  TxStatus,
70
94
  type TxValidationResult,
71
95
  } from '@aztec/stdlib/tx';
96
+ import { getPackageVersion } from '@aztec/stdlib/update-checker';
97
+ import type { SingleValidatorStats, ValidatorsStats } from '@aztec/stdlib/validators';
72
98
  import {
73
99
  Attributes,
74
100
  type TelemetryClient,
@@ -77,18 +103,32 @@ import {
77
103
  getTelemetryClient,
78
104
  trackSpan,
79
105
  } from '@aztec/telemetry-client';
80
- import { createValidatorClient } from '@aztec/validator-client';
106
+ import {
107
+ FullNodeCheckpointsBuilder as CheckpointsBuilder,
108
+ FullNodeCheckpointsBuilder,
109
+ NodeKeystoreAdapter,
110
+ ValidatorClient,
111
+ createBlockProposalHandler,
112
+ createValidatorClient,
113
+ } from '@aztec/validator-client';
81
114
  import { createWorldStateSynchronizer } from '@aztec/world-state';
82
115
 
83
- import { type AztecNodeConfig, getPackageVersion } from './config.js';
116
+ import { createPublicClient } from 'viem';
117
+
118
+ import { createSentinel } from '../sentinel/factory.js';
119
+ import { Sentinel } from '../sentinel/sentinel.js';
120
+ import { type AztecNodeConfig, createKeyStoreForValidator } from './config.js';
84
121
  import { NodeMetrics } from './node_metrics.js';
85
122
 
86
123
  /**
87
124
  * The aztec node.
88
125
  */
89
- export class AztecNodeService implements AztecNode, Traceable {
90
- private packageVersion: string;
126
+ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
91
127
  private metrics: NodeMetrics;
128
+ private initialHeaderHashPromise: Promise<BlockHash> | undefined = undefined;
129
+
130
+ // Prevent two snapshot operations to happen simultaneously
131
+ private isUploadingSnapshot = false;
92
132
 
93
133
  public readonly tracer: Tracer;
94
134
 
@@ -99,22 +139,37 @@ export class AztecNodeService implements AztecNode, Traceable {
99
139
  protected readonly logsSource: L2LogsSource,
100
140
  protected readonly contractDataSource: ContractDataSource,
101
141
  protected readonly l1ToL2MessageSource: L1ToL2MessageSource,
102
- protected readonly nullifierSource: NullifierWithBlockSource,
103
142
  protected readonly worldStateSynchronizer: WorldStateSynchronizer,
104
143
  protected readonly sequencer: SequencerClient | undefined,
144
+ protected readonly proverNode: ProverNode | undefined,
145
+ protected readonly slasherClient: SlasherClientInterface | undefined,
146
+ protected readonly validatorsSentinel: Sentinel | undefined,
147
+ protected readonly epochPruneWatcher: EpochPruneWatcher | undefined,
105
148
  protected readonly l1ChainId: number,
106
149
  protected readonly version: number,
107
- protected readonly globalVariableBuilder: GlobalVariableBuilder,
150
+ protected readonly globalVariableBuilder: GlobalVariableBuilderInterface,
151
+ protected readonly epochCache: EpochCacheInterface,
152
+ protected readonly packageVersion: string,
108
153
  private proofVerifier: ClientProtocolCircuitVerifier,
109
154
  private telemetry: TelemetryClient = getTelemetryClient(),
110
155
  private log = createLogger('node'),
156
+ private blobClient?: BlobClientInterface,
157
+ private validatorClient?: ValidatorClient,
158
+ private keyStoreManager?: KeystoreManager,
159
+ private debugLogStore: DebugLogStore = new NullDebugLogStore(),
111
160
  ) {
112
- this.packageVersion = getPackageVersion();
113
161
  this.metrics = new NodeMetrics(telemetry, 'AztecNodeService');
114
162
  this.tracer = telemetry.getTracer('AztecNodeService');
115
163
 
116
164
  this.log.info(`Aztec Node version: ${this.packageVersion}`);
117
165
  this.log.info(`Aztec Node started on chain 0x${l1ChainId.toString(16)}`, config.l1Contracts);
166
+
167
+ // A defensive check that protects us against introducing a bug in the complex `createAndSync` function. We must
168
+ // never have debugLogStore enabled when not in test mode because then we would be accumulating debug logs in
169
+ // memory which could be a DoS vector on the sequencer (since no fees are paid for debug logs).
170
+ if (debugLogStore.isEnabled && config.realProofs) {
171
+ throw new Error('debugLogStore should never be enabled when realProofs are set');
172
+ }
118
173
  }
119
174
 
120
175
  public async getWorldStateSyncStatus(): Promise<WorldStateSyncStatus> {
@@ -132,31 +187,117 @@ export class AztecNodeService implements AztecNode, Traceable {
132
187
  * @returns - A fully synced Aztec Node for use in development/testing.
133
188
  */
134
189
  public static async createAndSync(
135
- config: AztecNodeConfig,
190
+ inputConfig: AztecNodeConfig,
136
191
  deps: {
137
192
  telemetry?: TelemetryClient;
138
193
  logger?: Logger;
139
194
  publisher?: SequencerPublisher;
140
195
  dateProvider?: DateProvider;
141
- blobSinkClient?: BlobSinkClientInterface;
196
+ p2pClientDeps?: P2PClientDeps;
197
+ proverNodeDeps?: Partial<ProverNodeDeps>;
142
198
  } = {},
143
199
  options: {
144
200
  prefilledPublicData?: PublicDataTreeLeaf[];
201
+ dontStartSequencer?: boolean;
202
+ dontStartProverNode?: boolean;
145
203
  } = {},
146
204
  ): Promise<AztecNodeService> {
147
- const telemetry = deps.telemetry ?? getTelemetryClient();
205
+ const config = { ...inputConfig }; // Copy the config so we dont mutate the input object
148
206
  const log = deps.logger ?? createLogger('node');
207
+ const packageVersion = getPackageVersion() ?? '';
208
+ const telemetry = deps.telemetry ?? getTelemetryClient();
149
209
  const dateProvider = deps.dateProvider ?? new DateProvider();
150
- const blobSinkClient = deps.blobSinkClient ?? createBlobSinkClient(config);
151
210
  const ethereumChain = createEthereumChain(config.l1RpcUrls, config.l1ChainId);
152
- //validate that the actual chain id matches that specified in configuration
211
+
212
+ // Build a key store from file if given or from environment otherwise.
213
+ // We keep the raw KeyStore available so we can merge with prover keys if enableProverNode is set.
214
+ let keyStoreManager: KeystoreManager | undefined;
215
+ const keyStoreProvided = config.keyStoreDirectory !== undefined && config.keyStoreDirectory.length > 0;
216
+ if (keyStoreProvided) {
217
+ const keyStores = loadKeystores(config.keyStoreDirectory!);
218
+ keyStoreManager = new KeystoreManager(mergeKeystores(keyStores));
219
+ } else {
220
+ const rawKeyStores: KeyStore[] = [];
221
+ const validatorKeyStore = createKeyStoreForValidator(config);
222
+ if (validatorKeyStore) {
223
+ rawKeyStores.push(validatorKeyStore);
224
+ }
225
+ if (config.enableProverNode) {
226
+ const proverKeyStore = createKeyStoreForProver(config);
227
+ if (proverKeyStore) {
228
+ rawKeyStores.push(proverKeyStore);
229
+ }
230
+ }
231
+ if (rawKeyStores.length > 0) {
232
+ keyStoreManager = new KeystoreManager(
233
+ rawKeyStores.length === 1 ? rawKeyStores[0] : mergeKeystores(rawKeyStores),
234
+ );
235
+ }
236
+ }
237
+
238
+ await keyStoreManager?.validateSigners();
239
+
240
+ // If we are a validator, verify our configuration before doing too much more.
241
+ if (!config.disableValidator) {
242
+ if (keyStoreManager === undefined) {
243
+ throw new Error('Failed to create key store, a requirement for running a validator');
244
+ }
245
+ if (!keyStoreProvided && process.env.NODE_ENV !== 'test') {
246
+ log.warn("Keystore created from env: it's recommended to use a file-based key store for production");
247
+ }
248
+ ValidatorClient.validateKeyStoreConfiguration(keyStoreManager, log);
249
+ }
250
+
251
+ // validate that the actual chain id matches that specified in configuration
153
252
  if (config.l1ChainId !== ethereumChain.chainInfo.id) {
154
253
  throw new Error(
155
254
  `RPC URL configured for chain id ${ethereumChain.chainInfo.id} but expected id ${config.l1ChainId}`,
156
255
  );
157
256
  }
158
257
 
159
- const archiver = await createArchiver(config, blobSinkClient, { blockUntilSync: true }, telemetry);
258
+ const publicClient = createPublicClient({
259
+ chain: ethereumChain.chainInfo,
260
+ transport: makeL1HttpTransport(config.l1RpcUrls, { timeout: config.l1HttpTimeoutMS }),
261
+ pollingInterval: config.viemPollingIntervalMS,
262
+ });
263
+
264
+ const l1ContractsAddresses = await RegistryContract.collectAddresses(
265
+ publicClient,
266
+ config.l1Contracts.registryAddress,
267
+ config.rollupVersion ?? 'canonical',
268
+ );
269
+
270
+ // Overwrite the passed in vars.
271
+ config.l1Contracts = { ...config.l1Contracts, ...l1ContractsAddresses };
272
+
273
+ const rollupContract = new RollupContract(publicClient, config.l1Contracts.rollupAddress.toString());
274
+ const [l1GenesisTime, slotDuration, rollupVersionFromRollup, rollupManaLimit] = await Promise.all([
275
+ rollupContract.getL1GenesisTime(),
276
+ rollupContract.getSlotDuration(),
277
+ rollupContract.getVersion(),
278
+ rollupContract.getManaLimit().then(Number),
279
+ ] as const);
280
+
281
+ config.rollupVersion ??= Number(rollupVersionFromRollup);
282
+
283
+ if (config.rollupVersion !== Number(rollupVersionFromRollup)) {
284
+ log.warn(
285
+ `Registry looked up and returned a rollup with version (${config.rollupVersion}), but this does not match with version detected from the rollup directly: (${rollupVersionFromRollup}).`,
286
+ );
287
+ }
288
+
289
+ const blobClient = await createBlobClientWithFileStores(config, log.createChild('blob-client'));
290
+
291
+ // attempt snapshot sync if possible
292
+ await trySnapshotSync(config, log);
293
+
294
+ const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
295
+
296
+ const archiver = await createArchiver(
297
+ config,
298
+ { blobClient, epochCache, telemetry, dateProvider },
299
+ { blockUntilSync: !config.skipArchiverInitialSync },
300
+ );
160
301
 
161
302
  // now create the merkle trees and the world state synchronizer
162
303
  const worldStateSynchronizer = await createWorldStateSynchronizer(
@@ -165,66 +306,285 @@ export class AztecNodeService implements AztecNode, Traceable {
165
306
  options.prefilledPublicData,
166
307
  telemetry,
167
308
  );
168
- const proofVerifier = config.realProofs ? await BBCircuitVerifier.new(config) : new TestCircuitVerifier();
309
+ const circuitVerifier =
310
+ config.realProofs || config.debugForceTxProofVerification
311
+ ? await BBCircuitVerifier.new(config)
312
+ : new TestCircuitVerifier(config.proverTestVerificationDelayMs);
313
+
314
+ let debugLogStore: DebugLogStore;
169
315
  if (!config.realProofs) {
170
316
  log.warn(`Aztec node is accepting fake proofs`);
317
+
318
+ debugLogStore = new InMemoryDebugLogStore();
319
+ log.info(
320
+ 'Aztec node started in test mode (realProofs set to false) hence debug logs from public functions will be collected and served',
321
+ );
322
+ } else {
323
+ debugLogStore = new NullDebugLogStore();
171
324
  }
172
325
 
173
- const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
326
+ const proofVerifier = new QueuedIVCVerifier(config, circuitVerifier);
327
+
328
+ const proverOnly = config.enableProverNode && config.disableValidator;
329
+ if (proverOnly) {
330
+ log.info('Starting in prover-only mode: skipping validator, sequencer, sentinel, and slasher subsystems');
331
+ }
174
332
 
175
333
  // create the tx pool and the p2p client, which will need the l2 block source
176
334
  const p2pClient = await createP2PClient(
177
- P2PClientType.Full,
178
335
  config,
179
336
  archiver,
180
337
  proofVerifier,
181
338
  worldStateSynchronizer,
182
339
  epochCache,
340
+ packageVersion,
341
+ dateProvider,
183
342
  telemetry,
343
+ deps.p2pClientDeps,
184
344
  );
185
345
 
186
- const slasherClient = createSlasherClient(config, archiver, telemetry);
346
+ // We'll accumulate sentinel watchers here
347
+ const watchers: Watcher[] = [];
348
+
349
+ // Create FullNodeCheckpointsBuilder for block proposal handling and tx validation.
350
+ // Override maxTxsPerCheckpoint with the validator-specific limit if set.
351
+ const validatorCheckpointsBuilder = new FullNodeCheckpointsBuilder(
352
+ {
353
+ ...config,
354
+ l1GenesisTime,
355
+ slotDuration: Number(slotDuration),
356
+ rollupManaLimit,
357
+ maxTxsPerCheckpoint: config.validateMaxTxsPerCheckpoint,
358
+ },
359
+ worldStateSynchronizer,
360
+ archiver,
361
+ dateProvider,
362
+ telemetry,
363
+ );
364
+
365
+ let validatorClient: ValidatorClient | undefined;
366
+
367
+ if (!proverOnly) {
368
+ // Create validator client if required
369
+ validatorClient = await createValidatorClient(config, {
370
+ checkpointsBuilder: validatorCheckpointsBuilder,
371
+ worldState: worldStateSynchronizer,
372
+ p2pClient,
373
+ telemetry,
374
+ dateProvider,
375
+ epochCache,
376
+ blockSource: archiver,
377
+ l1ToL2MessageSource: archiver,
378
+ keyStoreManager,
379
+ blobClient,
380
+ });
381
+
382
+ // If we have a validator client, register it as a source of offenses for the slasher,
383
+ // and have it register callbacks on the p2p client *before* we start it, otherwise messages
384
+ // like attestations or auths will fail.
385
+ if (validatorClient) {
386
+ watchers.push(validatorClient);
387
+ if (!options.dontStartSequencer) {
388
+ await validatorClient.registerHandlers();
389
+ }
390
+ }
391
+ }
187
392
 
188
- // start both and wait for them to sync from the block source
189
- await Promise.all([p2pClient.start(), worldStateSynchronizer.start(), slasherClient.start()]);
190
- log.verbose(`All Aztec Node subsystems synced`);
393
+ // If there's no validator client, create a BlockProposalHandler to handle block proposals
394
+ // for monitoring or reexecution. Reexecution (default) allows us to follow the pending chain,
395
+ // while non-reexecution is used for validating the proposals and collecting their txs.
396
+ if (!validatorClient) {
397
+ const reexecute = !!config.alwaysReexecuteBlockProposals;
398
+ log.info(`Setting up block proposal handler` + (reexecute ? ' with reexecution of proposals' : ''));
399
+ createBlockProposalHandler(config, {
400
+ checkpointsBuilder: validatorCheckpointsBuilder,
401
+ worldState: worldStateSynchronizer,
402
+ epochCache,
403
+ blockSource: archiver,
404
+ l1ToL2MessageSource: archiver,
405
+ p2pClient,
406
+ dateProvider,
407
+ telemetry,
408
+ }).register(p2pClient, reexecute);
409
+ }
191
410
 
192
- const validatorClient = createValidatorClient(config, { p2pClient, telemetry, dateProvider, epochCache });
411
+ // Start world state and wait for it to sync to the archiver.
412
+ await worldStateSynchronizer.start();
193
413
 
194
- // now create the sequencer
195
- const sequencer = config.disableValidator
196
- ? undefined
197
- : await SequencerClient.new(config, {
198
- ...deps,
199
- validatorClient,
200
- p2pClient,
201
- worldStateSynchronizer,
202
- slasherClient,
203
- contractDataSource: archiver,
204
- l2BlockSource: archiver,
205
- l1ToL2MessageSource: archiver,
206
- telemetry,
207
- dateProvider,
208
- blobSinkClient,
209
- });
210
-
211
- return new AztecNodeService(
414
+ // Start p2p. Note that it depends on world state to be running.
415
+ await p2pClient.start();
416
+
417
+ let validatorsSentinel: Awaited<ReturnType<typeof createSentinel>> | undefined;
418
+ let epochPruneWatcher: EpochPruneWatcher | undefined;
419
+ let attestationsBlockWatcher: AttestationsBlockWatcher | undefined;
420
+
421
+ if (!proverOnly) {
422
+ validatorsSentinel = await createSentinel(epochCache, archiver, p2pClient, config);
423
+ if (validatorsSentinel && config.slashInactivityPenalty > 0n) {
424
+ watchers.push(validatorsSentinel);
425
+ }
426
+
427
+ if (config.slashPrunePenalty > 0n || config.slashDataWithholdingPenalty > 0n) {
428
+ epochPruneWatcher = new EpochPruneWatcher(
429
+ archiver,
430
+ archiver,
431
+ epochCache,
432
+ p2pClient.getTxProvider(),
433
+ validatorCheckpointsBuilder,
434
+ config,
435
+ );
436
+ watchers.push(epochPruneWatcher);
437
+ }
438
+
439
+ // We assume we want to slash for invalid attestations unless all max penalties are set to 0
440
+ if (config.slashProposeInvalidAttestationsPenalty > 0n || config.slashAttestDescendantOfInvalidPenalty > 0n) {
441
+ attestationsBlockWatcher = new AttestationsBlockWatcher(archiver, epochCache, config);
442
+ watchers.push(attestationsBlockWatcher);
443
+ }
444
+ }
445
+
446
+ // Start p2p-related services once the archiver has completed sync
447
+ void archiver
448
+ .waitForInitialSync()
449
+ .then(async () => {
450
+ await p2pClient.start();
451
+ await validatorsSentinel?.start();
452
+ await epochPruneWatcher?.start();
453
+ await attestationsBlockWatcher?.start();
454
+ log.info(`All p2p services started`);
455
+ })
456
+ .catch(err => log.error('Failed to start p2p services after archiver sync', err));
457
+
458
+ // Validator enabled, create/start relevant service
459
+ let sequencer: SequencerClient | undefined;
460
+ let slasherClient: SlasherClientInterface | undefined;
461
+ if (!config.disableValidator && validatorClient) {
462
+ // We create a slasher only if we have a sequencer, since all slashing actions go through the sequencer publisher
463
+ // as they are executed when the node is selected as proposer.
464
+ const validatorAddresses = keyStoreManager
465
+ ? NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager).getAddresses()
466
+ : [];
467
+
468
+ slasherClient = await createSlasher(
469
+ config,
470
+ config.l1Contracts,
471
+ getPublicClient(config),
472
+ watchers,
473
+ dateProvider,
474
+ epochCache,
475
+ validatorAddresses,
476
+ undefined, // logger
477
+ );
478
+ await slasherClient.start();
479
+
480
+ const l1TxUtils = config.sequencerPublisherForwarderAddress
481
+ ? await createForwarderL1TxUtilsFromSigners(
482
+ publicClient,
483
+ keyStoreManager!.createAllValidatorPublisherSigners(),
484
+ config.sequencerPublisherForwarderAddress,
485
+ { ...config, scope: 'sequencer' },
486
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider, kzg: Blob.getViemKzgInstance() },
487
+ )
488
+ : await createL1TxUtilsFromSigners(
489
+ publicClient,
490
+ keyStoreManager!.createAllValidatorPublisherSigners(),
491
+ { ...config, scope: 'sequencer' },
492
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider, kzg: Blob.getViemKzgInstance() },
493
+ );
494
+
495
+ // Create and start the sequencer client
496
+ const checkpointsBuilder = new CheckpointsBuilder(
497
+ { ...config, l1GenesisTime, slotDuration: Number(slotDuration), rollupManaLimit },
498
+ worldStateSynchronizer,
499
+ archiver,
500
+ dateProvider,
501
+ telemetry,
502
+ debugLogStore,
503
+ );
504
+
505
+ sequencer = await SequencerClient.new(config, {
506
+ ...deps,
507
+ epochCache,
508
+ l1TxUtils,
509
+ validatorClient,
510
+ p2pClient,
511
+ worldStateSynchronizer,
512
+ slasherClient,
513
+ checkpointsBuilder,
514
+ l2BlockSource: archiver,
515
+ l1ToL2MessageSource: archiver,
516
+ telemetry,
517
+ dateProvider,
518
+ blobClient,
519
+ nodeKeyStore: keyStoreManager!,
520
+ });
521
+ }
522
+
523
+ if (!options.dontStartSequencer && sequencer) {
524
+ await sequencer.start();
525
+ log.verbose(`Sequencer started`);
526
+ } else if (sequencer) {
527
+ log.warn(`Sequencer created but not started`);
528
+ }
529
+
530
+ // Create prover node subsystem if enabled
531
+ let proverNode: ProverNode | undefined;
532
+ if (config.enableProverNode) {
533
+ proverNode = await createProverNode(config, {
534
+ ...deps.proverNodeDeps,
535
+ telemetry,
536
+ dateProvider,
537
+ archiver,
538
+ worldStateSynchronizer,
539
+ p2pClient,
540
+ epochCache,
541
+ blobClient,
542
+ keyStoreManager,
543
+ });
544
+
545
+ if (!options.dontStartProverNode) {
546
+ await proverNode.start();
547
+ log.info(`Prover node subsystem started`);
548
+ } else {
549
+ log.info(`Prover node subsystem created but not started`);
550
+ }
551
+ }
552
+
553
+ const globalVariableBuilder = new GlobalVariableBuilder({
554
+ ...config,
555
+ rollupVersion: BigInt(config.rollupVersion),
556
+ l1GenesisTime,
557
+ slotDuration: Number(slotDuration),
558
+ });
559
+
560
+ const node = new AztecNodeService(
212
561
  config,
213
562
  p2pClient,
214
563
  archiver,
215
564
  archiver,
216
565
  archiver,
217
566
  archiver,
218
- archiver,
219
567
  worldStateSynchronizer,
220
568
  sequencer,
569
+ proverNode,
570
+ slasherClient,
571
+ validatorsSentinel,
572
+ epochPruneWatcher,
221
573
  ethereumChain.chainInfo.id,
222
- config.version,
223
- new GlobalVariableBuilder(config),
574
+ config.rollupVersion,
575
+ globalVariableBuilder,
576
+ epochCache,
577
+ packageVersion,
224
578
  proofVerifier,
225
579
  telemetry,
226
580
  log,
581
+ blobClient,
582
+ validatorClient,
583
+ keyStoreManager,
584
+ debugLogStore,
227
585
  );
586
+
587
+ return node;
228
588
  }
229
589
 
230
590
  /**
@@ -235,6 +595,11 @@ export class AztecNodeService implements AztecNode, Traceable {
235
595
  return this.sequencer;
236
596
  }
237
597
 
598
+ /** Returns the prover node subsystem, if enabled. */
599
+ public getProverNode(): ProverNode | undefined {
600
+ return this.proverNode;
601
+ }
602
+
238
603
  public getBlockSource(): L2BlockSource {
239
604
  return this.blockSource;
240
605
  }
@@ -259,6 +624,10 @@ export class AztecNodeService implements AztecNode, Traceable {
259
624
  return Promise.resolve(this.p2pClient.getEnr()?.encodeTxt());
260
625
  }
261
626
 
627
+ public async getAllowedPublicSetup(): Promise<AllowedElement[]> {
628
+ return [...(await getDefaultAllowedSetupFunctions()), ...(this.config.txPublicSetupAllowListExtend ?? [])];
629
+ }
630
+
262
631
  /**
263
632
  * Method to determine if the node is ready to accept transactions.
264
633
  * @returns - Flag indicating the readiness for tx submission.
@@ -268,35 +637,69 @@ export class AztecNodeService implements AztecNode, Traceable {
268
637
  }
269
638
 
270
639
  public async getNodeInfo(): Promise<NodeInfo> {
271
- const [nodeVersion, protocolVersion, chainId, enr, contractAddresses, protocolContractAddresses] =
272
- await Promise.all([
273
- this.getNodeVersion(),
274
- this.getVersion(),
275
- this.getChainId(),
276
- this.getEncodedEnr(),
277
- this.getL1ContractAddresses(),
278
- this.getProtocolContractAddresses(),
279
- ]);
640
+ const [nodeVersion, rollupVersion, chainId, enr, contractAddresses, protocolContractAddresses] = await Promise.all([
641
+ this.getNodeVersion(),
642
+ this.getVersion(),
643
+ this.getChainId(),
644
+ this.getEncodedEnr(),
645
+ this.getL1ContractAddresses(),
646
+ this.getProtocolContractAddresses(),
647
+ ]);
280
648
 
281
649
  const nodeInfo: NodeInfo = {
282
650
  nodeVersion,
283
651
  l1ChainId: chainId,
284
- protocolVersion,
652
+ rollupVersion,
285
653
  enr,
286
654
  l1ContractAddresses: contractAddresses,
287
655
  protocolContractAddresses: protocolContractAddresses,
656
+ realProofs: !!this.config.realProofs,
288
657
  };
289
658
 
290
659
  return nodeInfo;
291
660
  }
292
661
 
293
662
  /**
294
- * Get a block specified by its number.
295
- * @param number - The block number being requested.
663
+ * Get a block specified by its block number, block hash, or 'latest'.
664
+ * @param block - The block parameter (block number, block hash, or 'latest').
665
+ * @returns The requested block.
666
+ */
667
+ public async getBlock(block: BlockParameter): Promise<L2Block | undefined> {
668
+ if (BlockHash.isBlockHash(block)) {
669
+ return this.getBlockByHash(block);
670
+ }
671
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
672
+ if (blockNumber === BlockNumber.ZERO) {
673
+ return this.buildInitialBlock();
674
+ }
675
+ return await this.blockSource.getL2Block(blockNumber);
676
+ }
677
+
678
+ /**
679
+ * Get a block specified by its hash.
680
+ * @param blockHash - The block hash being requested.
681
+ * @returns The requested block.
682
+ */
683
+ public async getBlockByHash(blockHash: BlockHash): Promise<L2Block | undefined> {
684
+ const initialBlockHash = await this.#getInitialHeaderHash();
685
+ if (blockHash.equals(initialBlockHash)) {
686
+ return this.buildInitialBlock();
687
+ }
688
+ return await this.blockSource.getL2BlockByHash(blockHash);
689
+ }
690
+
691
+ private buildInitialBlock(): L2Block {
692
+ const initialHeader = this.worldStateSynchronizer.getCommitted().getInitialHeader();
693
+ return L2Block.empty(initialHeader);
694
+ }
695
+
696
+ /**
697
+ * Get a block specified by its archive root.
698
+ * @param archive - The archive root being requested.
296
699
  * @returns The requested block.
297
700
  */
298
- public async getBlock(number: number): Promise<L2Block | undefined> {
299
- return await this.blockSource.getBlock(number);
701
+ public async getBlockByArchive(archive: Fr): Promise<L2Block | undefined> {
702
+ return await this.blockSource.getL2BlockByArchive(archive);
300
703
  }
301
704
 
302
705
  /**
@@ -305,30 +708,58 @@ export class AztecNodeService implements AztecNode, Traceable {
305
708
  * @param limit - The maximum number of blocks to obtain.
306
709
  * @returns The blocks requested.
307
710
  */
308
- public async getBlocks(from: number, limit: number): Promise<L2Block[]> {
309
- return (await this.blockSource.getBlocks(from, limit)) ?? [];
711
+ public async getBlocks(from: BlockNumber, limit: number): Promise<L2Block[]> {
712
+ return (await this.blockSource.getBlocks(from, BlockNumber(limit))) ?? [];
713
+ }
714
+
715
+ public async getCheckpoints(from: CheckpointNumber, limit: number): Promise<PublishedCheckpoint[]> {
716
+ return (await this.blockSource.getCheckpoints(from, limit)) ?? [];
717
+ }
718
+
719
+ public async getCheckpointedBlocks(from: BlockNumber, limit: number) {
720
+ return (await this.blockSource.getCheckpointedBlocks(from, limit)) ?? [];
721
+ }
722
+
723
+ public getCheckpointsDataForEpoch(epochNumber: EpochNumber) {
724
+ return this.blockSource.getCheckpointsDataForEpoch(epochNumber);
310
725
  }
311
726
 
312
727
  /**
313
- * Method to fetch the current base fees.
314
- * @returns The current base fees.
728
+ * Method to fetch the current min L2 fees.
729
+ * @returns The current min L2 fees.
315
730
  */
316
- public async getCurrentBaseFees(): Promise<GasFees> {
317
- return await this.globalVariableBuilder.getCurrentBaseFees();
731
+ public async getCurrentMinFees(): Promise<GasFees> {
732
+ return await this.globalVariableBuilder.getCurrentMinFees();
733
+ }
734
+
735
+ public async getMaxPriorityFees(): Promise<GasFees> {
736
+ for await (const tx of this.p2pClient.iteratePendingTxs()) {
737
+ return tx.getGasSettings().maxPriorityFeesPerGas;
738
+ }
739
+
740
+ return GasFees.from({ feePerDaGas: 0n, feePerL2Gas: 0n });
318
741
  }
319
742
 
320
743
  /**
321
- * Method to fetch the current block number.
744
+ * Method to fetch the latest block number synchronized by the node.
322
745
  * @returns The block number.
323
746
  */
324
- public async getBlockNumber(): Promise<number> {
747
+ public async getBlockNumber(): Promise<BlockNumber> {
325
748
  return await this.blockSource.getBlockNumber();
326
749
  }
327
750
 
328
- public async getProvenBlockNumber(): Promise<number> {
751
+ public async getProvenBlockNumber(): Promise<BlockNumber> {
329
752
  return await this.blockSource.getProvenBlockNumber();
330
753
  }
331
754
 
755
+ public async getCheckpointedBlockNumber(): Promise<BlockNumber> {
756
+ return await this.blockSource.getCheckpointedL2BlockNumber();
757
+ }
758
+
759
+ public getCheckpointNumber(): Promise<CheckpointNumber> {
760
+ return this.blockSource.getCheckpointNumber();
761
+ }
762
+
332
763
  /**
333
764
  * Method to fetch the version of the package.
334
765
  * @returns The node package version
@@ -353,49 +784,59 @@ export class AztecNodeService implements AztecNode, Traceable {
353
784
  return Promise.resolve(this.l1ChainId);
354
785
  }
355
786
 
356
- public async getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
357
- const klazz = await this.contractDataSource.getContractClass(id);
358
-
359
- // TODO(#10007): Remove this check. This is needed only because we're manually registering
360
- // some contracts in the archiver so they are available to all nodes (see `registerCommonContracts`
361
- // in `archiver/src/factory.ts`), but we still want clients to send the registration tx in order
362
- // to emit the corresponding nullifier, which is now being checked. Note that this method
363
- // is only called by the PXE to check if a contract is publicly registered.
364
- if (klazz) {
365
- const classNullifier = await siloNullifier(AztecAddress.fromNumber(REGISTERER_CONTRACT_ADDRESS), id);
366
- const worldState = await this.#getWorldState('latest');
367
- const [index] = await worldState.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [classNullifier.toBuffer()]);
368
- this.log.debug(`Registration nullifier ${classNullifier} for contract class ${id} found at index ${index}`);
369
- if (index === undefined) {
370
- return undefined;
371
- }
372
- }
373
-
374
- return klazz;
787
+ public getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
788
+ return this.contractDataSource.getContractClass(id);
375
789
  }
376
790
 
377
791
  public getContract(address: AztecAddress): Promise<ContractInstanceWithAddress | undefined> {
378
792
  return this.contractDataSource.getContract(address);
379
793
  }
380
794
 
381
- /**
382
- * Retrieves all private logs from up to `limit` blocks, starting from the block number `from`.
383
- * @param from - The block number from which to begin retrieving logs.
384
- * @param limit - The maximum number of blocks to retrieve logs from.
385
- * @returns An array of private logs from the specified range of blocks.
386
- */
387
- public getPrivateLogs(from: number, limit: number): Promise<PrivateLog[]> {
388
- return this.logsSource.getPrivateLogs(from, limit);
795
+ public async getPrivateLogsByTags(
796
+ tags: SiloedTag[],
797
+ page?: number,
798
+ referenceBlock?: BlockHash,
799
+ ): Promise<TxScopedL2Log[][]> {
800
+ let upToBlockNumber: BlockNumber | undefined;
801
+ if (referenceBlock) {
802
+ const initialBlockHash = await this.#getInitialHeaderHash();
803
+ if (referenceBlock.equals(initialBlockHash)) {
804
+ upToBlockNumber = BlockNumber(0);
805
+ } else {
806
+ const header = await this.blockSource.getBlockHeaderByHash(referenceBlock);
807
+ if (!header) {
808
+ throw new Error(
809
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
810
+ );
811
+ }
812
+ upToBlockNumber = header.globalVariables.blockNumber;
813
+ }
814
+ }
815
+ return this.logsSource.getPrivateLogsByTags(tags, page, upToBlockNumber);
389
816
  }
390
817
 
391
- /**
392
- * Gets all logs that match any of the received tags (i.e. logs with their first field equal to a tag).
393
- * @param tags - The tags to filter the logs by.
394
- * @returns For each received tag, an array of matching logs is returned. An empty array implies no logs match
395
- * that tag.
396
- */
397
- public getLogsByTags(tags: Fr[]): Promise<TxScopedL2Log[][]> {
398
- return this.logsSource.getLogsByTags(tags);
818
+ public async getPublicLogsByTagsFromContract(
819
+ contractAddress: AztecAddress,
820
+ tags: Tag[],
821
+ page?: number,
822
+ referenceBlock?: BlockHash,
823
+ ): Promise<TxScopedL2Log[][]> {
824
+ let upToBlockNumber: BlockNumber | undefined;
825
+ if (referenceBlock) {
826
+ const initialBlockHash = await this.#getInitialHeaderHash();
827
+ if (referenceBlock.equals(initialBlockHash)) {
828
+ upToBlockNumber = BlockNumber(0);
829
+ } else {
830
+ const header = await this.blockSource.getBlockHeaderByHash(referenceBlock);
831
+ if (!header) {
832
+ throw new Error(
833
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
834
+ );
835
+ }
836
+ upToBlockNumber = header.globalVariables.blockNumber;
837
+ }
838
+ }
839
+ return this.logsSource.getPublicLogsByTagsFromContract(contractAddress, tags, page, upToBlockNumber);
399
840
  }
400
841
 
401
842
  /**
@@ -421,43 +862,55 @@ export class AztecNodeService implements AztecNode, Traceable {
421
862
  * @param tx - The transaction to be submitted.
422
863
  */
423
864
  public async sendTx(tx: Tx) {
865
+ await this.#sendTx(tx);
866
+ }
867
+
868
+ async #sendTx(tx: Tx) {
424
869
  const timer = new Timer();
425
- const txHash = (await tx.getTxHash()).toString();
870
+ const txHash = tx.getTxHash().toString();
426
871
 
427
872
  const valid = await this.isValidTx(tx);
428
873
  if (valid.result !== 'valid') {
429
874
  const reason = valid.reason.join(', ');
430
875
  this.metrics.receivedTx(timer.ms(), false);
431
- this.log.warn(`Invalid tx ${txHash}: ${reason}`, { txHash });
432
- // TODO(#10967): Throw when receiving an invalid tx instead of just returning
433
- // throw new Error(`Invalid tx: ${reason}`);
434
- return;
876
+ this.log.warn(`Received invalid tx ${txHash}: ${reason}`, { txHash });
877
+ throw new Error(`Invalid tx: ${reason}`);
435
878
  }
436
879
 
437
880
  await this.p2pClient!.sendTx(tx);
438
- this.metrics.receivedTx(timer.ms(), true);
439
- this.log.info(`Received tx ${txHash}`, { txHash });
881
+ const duration = timer.ms();
882
+ this.metrics.receivedTx(duration, true);
883
+ this.log.info(`Received tx ${txHash} in ${duration}ms`, { txHash });
440
884
  }
441
885
 
442
886
  public async getTxReceipt(txHash: TxHash): Promise<TxReceipt> {
443
- let txReceipt = new TxReceipt(txHash, TxStatus.DROPPED, 'Tx dropped by P2P node.');
444
-
445
- // We first check if the tx is in pending (instead of first checking if it is mined) because if we first check
446
- // for mined and then for pending there could be a race condition where the tx is mined between the two checks
447
- // and we would incorrectly return a TxReceipt with status DROPPED
448
- if ((await this.p2pClient.getTxStatus(txHash)) === 'pending') {
449
- txReceipt = new TxReceipt(txHash, TxStatus.PENDING, '');
450
- }
887
+ // Check the tx pool status first. If the tx is known to the pool (pending or mined), we'll use that
888
+ // as a fallback if we don't find a settled receipt in the archiver.
889
+ const txPoolStatus = await this.p2pClient.getTxStatus(txHash);
890
+ const isKnownToPool = txPoolStatus === 'pending' || txPoolStatus === 'mined';
451
891
 
892
+ // Then get the actual tx from the archiver, which tracks every tx in a mined block.
452
893
  const settledTxReceipt = await this.blockSource.getSettledTxReceipt(txHash);
894
+
895
+ let receipt: TxReceipt;
453
896
  if (settledTxReceipt) {
454
- txReceipt = settledTxReceipt;
897
+ receipt = settledTxReceipt;
898
+ } else if (isKnownToPool) {
899
+ // If the tx is in the pool but not in the archiver, it's pending.
900
+ // This handles race conditions between archiver and p2p, where the archiver
901
+ // has pruned the block in which a tx was mined, but p2p has not caught up yet.
902
+ receipt = new TxReceipt(txHash, TxStatus.PENDING, undefined, undefined);
903
+ } else {
904
+ // Otherwise, if we don't know the tx, we consider it dropped.
905
+ receipt = new TxReceipt(txHash, TxStatus.DROPPED, undefined, 'Tx dropped by P2P node');
455
906
  }
456
907
 
457
- return txReceipt;
908
+ this.debugLogStore.decorateReceiptWithLogs(txHash.toString(), receipt);
909
+
910
+ return receipt;
458
911
  }
459
912
 
460
- public getTxEffect(txHash: TxHash): Promise<InBlock<TxEffect> | undefined> {
913
+ public getTxEffect(txHash: TxHash): Promise<IndexedTxEffect | undefined> {
461
914
  return this.blockSource.getTxEffect(txHash);
462
915
  }
463
916
 
@@ -465,139 +918,176 @@ export class AztecNodeService implements AztecNode, Traceable {
465
918
  * Method to stop the aztec node.
466
919
  */
467
920
  public async stop() {
468
- this.log.info(`Stopping`);
469
- await this.sequencer?.stop();
470
- await this.p2pClient.stop();
471
- await this.worldStateSynchronizer.stop();
921
+ this.log.info(`Stopping Aztec Node`);
922
+ await tryStop(this.validatorsSentinel);
923
+ await tryStop(this.epochPruneWatcher);
924
+ await tryStop(this.slasherClient);
925
+ await tryStop(this.proofVerifier);
926
+ await tryStop(this.sequencer);
927
+ await tryStop(this.proverNode);
928
+ await tryStop(this.p2pClient);
929
+ await tryStop(this.worldStateSynchronizer);
472
930
  await tryStop(this.blockSource);
473
- await this.telemetry.stop();
474
- this.log.info(`Stopped`);
931
+ await tryStop(this.blobClient);
932
+ await tryStop(this.telemetry);
933
+ this.log.info(`Stopped Aztec Node`);
934
+ }
935
+
936
+ /**
937
+ * Returns the blob client used by this node.
938
+ * @internal - Exposed for testing purposes only.
939
+ */
940
+ public getBlobClient(): BlobClientInterface | undefined {
941
+ return this.blobClient;
475
942
  }
476
943
 
477
944
  /**
478
945
  * Method to retrieve pending txs.
946
+ * @param limit - The number of items to returns
947
+ * @param after - The last known pending tx. Used for pagination
479
948
  * @returns - The pending txs.
480
949
  */
481
- public getPendingTxs() {
482
- return this.p2pClient!.getPendingTxs();
950
+ public getPendingTxs(limit?: number, after?: TxHash): Promise<Tx[]> {
951
+ return this.p2pClient!.getPendingTxs(limit, after);
483
952
  }
484
953
 
485
- public async getPendingTxCount() {
486
- const pendingTxs = await this.getPendingTxs();
487
- return pendingTxs.length;
954
+ public getPendingTxCount(): Promise<number> {
955
+ return this.p2pClient!.getPendingTxCount();
488
956
  }
489
957
 
490
958
  /**
491
- * Method to retrieve a single tx from the mempool or unfinalised chain.
959
+ * Method to retrieve a single tx from the mempool or unfinalized chain.
492
960
  * @param txHash - The transaction hash to return.
493
961
  * @returns - The tx if it exists.
494
962
  */
495
- public getTxByHash(txHash: TxHash) {
963
+ public getTxByHash(txHash: TxHash): Promise<Tx | undefined> {
496
964
  return Promise.resolve(this.p2pClient!.getTxByHashFromPool(txHash));
497
965
  }
498
966
 
499
967
  /**
500
- * Method to retrieve txs from the mempool or unfinalised chain.
968
+ * Method to retrieve txs from the mempool or unfinalized chain.
501
969
  * @param txHash - The transaction hash to return.
502
970
  * @returns - The txs if it exists.
503
971
  */
504
- public async getTxsByHash(txHashes: TxHash[]) {
972
+ public async getTxsByHash(txHashes: TxHash[]): Promise<Tx[]> {
505
973
  return compactArray(await Promise.all(txHashes.map(txHash => this.getTxByHash(txHash))));
506
974
  }
507
975
 
508
- /**
509
- * Find the indexes of the given leaves in the given tree.
510
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
511
- * @param treeId - The tree to search in.
512
- * @param leafValue - The values to search for
513
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
514
- */
515
976
  public async findLeavesIndexes(
516
- blockNumber: L2BlockNumber,
977
+ referenceBlock: BlockParameter,
517
978
  treeId: MerkleTreeId,
518
979
  leafValues: Fr[],
519
- ): Promise<(bigint | undefined)[]> {
520
- const committedDb = await this.#getWorldState(blockNumber);
521
- return await committedDb.findLeafIndices(
980
+ ): Promise<(DataInBlock<bigint> | undefined)[]> {
981
+ const committedDb = await this.getWorldState(referenceBlock);
982
+ const maybeIndices = await committedDb.findLeafIndices(
522
983
  treeId,
523
984
  leafValues.map(x => x.toBuffer()),
524
985
  );
525
- }
986
+ // Filter out undefined values to query block numbers only for found leaves
987
+ const definedIndices = maybeIndices.filter(x => x !== undefined);
988
+
989
+ // Now we find the block numbers for the defined indices
990
+ const blockNumbers = await committedDb.getBlockNumbersForLeafIndices(treeId, definedIndices);
991
+
992
+ // Build a map from leaf index to block number
993
+ const indexToBlockNumber = new Map<bigint, BlockNumber>();
994
+ for (let i = 0; i < definedIndices.length; i++) {
995
+ const blockNumber = blockNumbers[i];
996
+ if (blockNumber === undefined) {
997
+ throw new Error(
998
+ `Block number is undefined for leaf index ${definedIndices[i]} in tree ${MerkleTreeId[treeId]}`,
999
+ );
1000
+ }
1001
+ indexToBlockNumber.set(definedIndices[i], blockNumber);
1002
+ }
526
1003
 
527
- /**
528
- * Find the block numbers of the given leaf indices in the given tree.
529
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
530
- * @param treeId - The tree to search in.
531
- * @param leafIndices - The values to search for
532
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
533
- */
534
- public async findBlockNumbersForIndexes(
535
- blockNumber: L2BlockNumber,
536
- treeId: MerkleTreeId,
537
- leafIndices: bigint[],
538
- ): Promise<(bigint | undefined)[]> {
539
- const committedDb = await this.#getWorldState(blockNumber);
540
- return await committedDb.getBlockNumbersForLeafIndices(treeId, leafIndices);
541
- }
1004
+ // Get unique block numbers in order to optimize num calls to getLeafValue function.
1005
+ const uniqueBlockNumbers = [...new Set(indexToBlockNumber.values())];
542
1006
 
543
- public async findNullifiersIndexesWithBlock(
544
- blockNumber: L2BlockNumber,
545
- nullifiers: Fr[],
546
- ): Promise<(InBlock<bigint> | undefined)[]> {
547
- if (blockNumber === 'latest') {
548
- blockNumber = await this.getBlockNumber();
1007
+ // Now we obtain the block hashes from the archive tree (block number = leaf index in archive tree).
1008
+ const blockHashes = await Promise.all(
1009
+ uniqueBlockNumbers.map(blockNumber => {
1010
+ return committedDb.getLeafValue(MerkleTreeId.ARCHIVE, BigInt(blockNumber));
1011
+ }),
1012
+ );
1013
+
1014
+ // Build a map from block number to block hash
1015
+ const blockNumberToHash = new Map<BlockNumber, Fr>();
1016
+ for (let i = 0; i < uniqueBlockNumbers.length; i++) {
1017
+ const blockHash = blockHashes[i];
1018
+ if (blockHash === undefined) {
1019
+ throw new Error(`Block hash is undefined for block number ${uniqueBlockNumbers[i]}`);
1020
+ }
1021
+ blockNumberToHash.set(uniqueBlockNumbers[i], blockHash);
549
1022
  }
550
- return this.nullifierSource.findNullifiersIndexesWithBlock(blockNumber, nullifiers);
1023
+
1024
+ // Create DataInBlock objects by combining indices, blockNumbers and blockHashes and return them.
1025
+ return maybeIndices.map(index => {
1026
+ if (index === undefined) {
1027
+ return undefined;
1028
+ }
1029
+ const blockNumber = indexToBlockNumber.get(index);
1030
+ if (blockNumber === undefined) {
1031
+ throw new Error(`Block number not found for leaf index ${index} in tree ${MerkleTreeId[treeId]}`);
1032
+ }
1033
+ const blockHash = blockNumberToHash.get(blockNumber);
1034
+ if (blockHash === undefined) {
1035
+ throw new Error(`Block hash not found for block number ${blockNumber}`);
1036
+ }
1037
+ return {
1038
+ l2BlockNumber: blockNumber,
1039
+ l2BlockHash: new BlockHash(blockHash),
1040
+ data: index,
1041
+ };
1042
+ });
551
1043
  }
552
1044
 
553
- /**
554
- * Returns a sibling path for the given index in the nullifier tree.
555
- * @param blockNumber - The block number at which to get the data.
556
- * @param leafIndex - The index of the leaf for which the sibling path is required.
557
- * @returns The sibling path for the leaf index.
558
- */
559
- public async getNullifierSiblingPath(
560
- blockNumber: L2BlockNumber,
561
- leafIndex: bigint,
562
- ): Promise<SiblingPath<typeof NULLIFIER_TREE_HEIGHT>> {
563
- const committedDb = await this.#getWorldState(blockNumber);
564
- return committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, leafIndex);
1045
+ public async getBlockHashMembershipWitness(
1046
+ referenceBlock: BlockParameter,
1047
+ blockHash: BlockHash,
1048
+ ): Promise<MembershipWitness<typeof ARCHIVE_HEIGHT> | undefined> {
1049
+ // The Noir circuit checks the archive membership proof against `anchor_block_header.last_archive.root`,
1050
+ // which is the archive tree root BEFORE the anchor block was added (i.e. the state after block N-1).
1051
+ // So we need the world state at block N-1, not block N, to produce a sibling path matching that root.
1052
+ const referenceBlockNumber = await this.resolveBlockNumber(referenceBlock);
1053
+ const committedDb = await this.getWorldState(BlockNumber(referenceBlockNumber - 1));
1054
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.ARCHIVE>(MerkleTreeId.ARCHIVE, [blockHash]);
1055
+ return pathAndIndex === undefined
1056
+ ? undefined
1057
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
565
1058
  }
566
1059
 
567
- /**
568
- * Returns a sibling path for the given index in the data tree.
569
- * @param blockNumber - The block number at which to get the data.
570
- * @param leafIndex - The index of the leaf for which the sibling path is required.
571
- * @returns The sibling path for the leaf index.
572
- */
573
- public async getNoteHashSiblingPath(
574
- blockNumber: L2BlockNumber,
575
- leafIndex: bigint,
576
- ): Promise<SiblingPath<typeof NOTE_HASH_TREE_HEIGHT>> {
577
- const committedDb = await this.#getWorldState(blockNumber);
578
- return committedDb.getSiblingPath(MerkleTreeId.NOTE_HASH_TREE, leafIndex);
1060
+ public async getNoteHashMembershipWitness(
1061
+ referenceBlock: BlockParameter,
1062
+ noteHash: Fr,
1063
+ ): Promise<MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT> | undefined> {
1064
+ const committedDb = await this.getWorldState(referenceBlock);
1065
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.NOTE_HASH_TREE>(
1066
+ MerkleTreeId.NOTE_HASH_TREE,
1067
+ [noteHash],
1068
+ );
1069
+ return pathAndIndex === undefined
1070
+ ? undefined
1071
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
579
1072
  }
580
1073
 
581
- /**
582
- * Returns the index and a sibling path for a leaf in the committed l1 to l2 data tree.
583
- * @param blockNumber - The block number at which to get the data.
584
- * @param l1ToL2Message - The l1ToL2Message to get the index / sibling path for.
585
- * @returns A tuple of the index and the sibling path of the L1ToL2Message (undefined if not found).
586
- */
587
1074
  public async getL1ToL2MessageMembershipWitness(
588
- blockNumber: L2BlockNumber,
1075
+ referenceBlock: BlockParameter,
589
1076
  l1ToL2Message: Fr,
590
1077
  ): Promise<[bigint, SiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>] | undefined> {
591
- const index = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
592
- if (index === undefined) {
1078
+ const db = await this.getWorldState(referenceBlock);
1079
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, [l1ToL2Message]);
1080
+ if (!witness) {
593
1081
  return undefined;
594
1082
  }
595
- const committedDb = await this.#getWorldState(blockNumber);
596
- const siblingPath = await committedDb.getSiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>(
597
- MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
598
- index,
599
- );
600
- return [index, siblingPath];
1083
+
1084
+ // REFACTOR: Return a MembershipWitness object
1085
+ return [witness.index, witness.path];
1086
+ }
1087
+
1088
+ public async getL1ToL2MessageCheckpoint(l1ToL2Message: Fr): Promise<CheckpointNumber | undefined> {
1089
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
1090
+ return messageIndex ? InboxLeaf.checkpointNumberFromIndex(messageIndex) : undefined;
601
1091
  }
602
1092
 
603
1093
  /**
@@ -606,188 +1096,68 @@ export class AztecNodeService implements AztecNode, Traceable {
606
1096
  * @returns Whether the message is synced and ready to be included in a block.
607
1097
  */
608
1098
  public async isL1ToL2MessageSynced(l1ToL2Message: Fr): Promise<boolean> {
609
- return (await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message)) !== undefined;
1099
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
1100
+ return messageIndex !== undefined;
610
1101
  }
611
1102
 
612
1103
  /**
613
- * Returns the index of a l2ToL1Message in a ephemeral l2 to l1 data tree as well as its sibling path.
614
- * @remarks This tree is considered ephemeral because it is created on-demand by: taking all the l2ToL1 messages
615
- * in a single block, and then using them to make a variable depth append-only tree with these messages as leaves.
616
- * The tree is discarded immediately after calculating what we need from it.
617
- * TODO: Handle the case where two messages in the same tx have the same hash.
618
- * @param blockNumber - The block number at which to get the data.
619
- * @param l2ToL1Message - The l2ToL1Message get the index / sibling path for.
620
- * @returns A tuple of the index and the sibling path of the L2ToL1Message.
1104
+ * Returns all the L2 to L1 messages in an epoch.
1105
+ * @param epoch - The epoch at which to get the data.
1106
+ * @returns The L2 to L1 messages (empty array if the epoch is not found).
621
1107
  */
622
- public async getL2ToL1MessageMembershipWitness(
623
- blockNumber: L2BlockNumber,
624
- l2ToL1Message: Fr,
625
- ): Promise<[bigint, SiblingPath<number>]> {
626
- const block = await this.blockSource.getBlock(blockNumber === 'latest' ? await this.getBlockNumber() : blockNumber);
627
-
628
- if (block === undefined) {
629
- throw new Error('Block is not defined');
630
- }
631
-
632
- const l2ToL1Messages = block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs);
633
-
634
- // Find index of message
635
- let indexOfMsgInSubtree = -1;
636
- const indexOfMsgTx = l2ToL1Messages.findIndex(msgs => {
637
- const idx = msgs.findIndex(msg => msg.equals(l2ToL1Message));
638
- indexOfMsgInSubtree = Math.max(indexOfMsgInSubtree, idx);
639
- return idx !== -1;
640
- });
641
-
642
- if (indexOfMsgTx === -1) {
643
- throw new Error('The L2ToL1Message you are trying to prove inclusion of does not exist');
644
- }
645
-
646
- const tempStores: AztecKVStore[] = [];
647
-
648
- // Construct message subtrees
649
- const l2toL1Subtrees = await Promise.all(
650
- l2ToL1Messages.map(async (msgs, i) => {
651
- const store = openTmpStore(true);
652
- tempStores.push(store);
653
- const treeHeight = msgs.length <= 1 ? 1 : Math.ceil(Math.log2(msgs.length));
654
- const tree = new StandardTree(store, new SHA256Trunc(), `temp_msgs_subtrees_${i}`, treeHeight, 0n, Fr);
655
- await tree.appendLeaves(msgs);
656
- return tree;
657
- }),
1108
+ public async getL2ToL1Messages(epoch: EpochNumber): Promise<Fr[][][][]> {
1109
+ // Assumes `getCheckpointedBlocksForEpoch` returns blocks in ascending order of block number.
1110
+ const checkpointedBlocks = await this.blockSource.getCheckpointedBlocksForEpoch(epoch);
1111
+ const blocksInCheckpoints = chunkBy(checkpointedBlocks, cb => cb.block.header.globalVariables.slotNumber).map(
1112
+ group => group.map(cb => cb.block),
658
1113
  );
659
-
660
- // path of the input msg from leaf -> first out hash calculated in base rolllup
661
- const subtreePathOfL2ToL1Message = await l2toL1Subtrees[indexOfMsgTx].getSiblingPath(
662
- BigInt(indexOfMsgInSubtree),
663
- true,
1114
+ return blocksInCheckpoints.map(blocks =>
1115
+ blocks.map(block => block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs)),
664
1116
  );
665
-
666
- const numTxs = block.body.txEffects.length;
667
- if (numTxs === 1) {
668
- return [BigInt(indexOfMsgInSubtree), subtreePathOfL2ToL1Message];
669
- }
670
-
671
- const l2toL1SubtreeRoots = l2toL1Subtrees.map(t => Fr.fromBuffer(t.getRoot(true)));
672
- const maxTreeHeight = Math.ceil(Math.log2(l2toL1SubtreeRoots.length));
673
- // The root of this tree is the out_hash calculated in Noir => we truncate to match Noir's SHA
674
- const outHashTree = new UnbalancedTree(new SHA256Trunc(), 'temp_outhash_sibling_path', maxTreeHeight, Fr);
675
- await outHashTree.appendLeaves(l2toL1SubtreeRoots);
676
-
677
- const pathOfTxInOutHashTree = await outHashTree.getSiblingPath(l2toL1SubtreeRoots[indexOfMsgTx].toBigInt());
678
- // Append subtree path to out hash tree path
679
- const mergedPath = subtreePathOfL2ToL1Message.toBufferArray().concat(pathOfTxInOutHashTree.toBufferArray());
680
- // Append binary index of subtree path to binary index of out hash tree path
681
- const mergedIndex = parseInt(
682
- indexOfMsgTx
683
- .toString(2)
684
- .concat(indexOfMsgInSubtree.toString(2).padStart(l2toL1Subtrees[indexOfMsgTx].getDepth(), '0')),
685
- 2,
686
- );
687
-
688
- // clear the tmp stores
689
- await Promise.all(tempStores.map(store => store.delete()));
690
-
691
- return [BigInt(mergedIndex), new SiblingPath(mergedPath.length, mergedPath)];
692
- }
693
-
694
- /**
695
- * Returns a sibling path for a leaf in the committed blocks tree.
696
- * @param blockNumber - The block number at which to get the data.
697
- * @param leafIndex - Index of the leaf in the tree.
698
- * @returns The sibling path.
699
- */
700
- public async getArchiveSiblingPath(
701
- blockNumber: L2BlockNumber,
702
- leafIndex: bigint,
703
- ): Promise<SiblingPath<typeof ARCHIVE_HEIGHT>> {
704
- const committedDb = await this.#getWorldState(blockNumber);
705
- return committedDb.getSiblingPath(MerkleTreeId.ARCHIVE, leafIndex);
706
1117
  }
707
1118
 
708
- /**
709
- * Returns a sibling path for a leaf in the committed public data tree.
710
- * @param blockNumber - The block number at which to get the data.
711
- * @param leafIndex - Index of the leaf in the tree.
712
- * @returns The sibling path.
713
- */
714
- public async getPublicDataSiblingPath(
715
- blockNumber: L2BlockNumber,
716
- leafIndex: bigint,
717
- ): Promise<SiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>> {
718
- const committedDb = await this.#getWorldState(blockNumber);
719
- return committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, leafIndex);
720
- }
721
-
722
- /**
723
- * Returns a nullifier membership witness for a given nullifier at a given block.
724
- * @param blockNumber - The block number at which to get the index.
725
- * @param nullifier - Nullifier we try to find witness for.
726
- * @returns The nullifier membership witness (if found).
727
- */
728
1119
  public async getNullifierMembershipWitness(
729
- blockNumber: L2BlockNumber,
1120
+ referenceBlock: BlockParameter,
730
1121
  nullifier: Fr,
731
1122
  ): Promise<NullifierMembershipWitness | undefined> {
732
- const db = await this.#getWorldState(blockNumber);
733
- const index = (await db.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]))[0];
734
- if (!index) {
1123
+ const db = await this.getWorldState(referenceBlock);
1124
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]);
1125
+ if (!witness) {
735
1126
  return undefined;
736
1127
  }
737
1128
 
738
- const leafPreimagePromise = db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
739
- const siblingPathPromise = db.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
740
- MerkleTreeId.NULLIFIER_TREE,
741
- BigInt(index),
742
- );
743
-
744
- const [leafPreimage, siblingPath] = await Promise.all([leafPreimagePromise, siblingPathPromise]);
745
-
1129
+ const { index, path } = witness;
1130
+ const leafPreimage = await db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
746
1131
  if (!leafPreimage) {
747
1132
  return undefined;
748
1133
  }
749
1134
 
750
- return new NullifierMembershipWitness(BigInt(index), leafPreimage as NullifierLeafPreimage, siblingPath);
1135
+ return new NullifierMembershipWitness(index, leafPreimage as NullifierLeafPreimage, path);
751
1136
  }
752
1137
 
753
- /**
754
- * Returns a low nullifier membership witness for a given nullifier at a given block.
755
- * @param blockNumber - The block number at which to get the index.
756
- * @param nullifier - Nullifier we try to find the low nullifier witness for.
757
- * @returns The low nullifier membership witness (if found).
758
- * @remarks Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked
759
- * list structure" of leaves and proving that a lower nullifier is pointing to a bigger next value than the nullifier
760
- * we are trying to prove non-inclusion for.
761
- *
762
- * Note: This function returns the membership witness of the nullifier itself and not the low nullifier when
763
- * the nullifier already exists in the tree. This is because the `getPreviousValueIndex` function returns the
764
- * index of the nullifier itself when it already exists in the tree.
765
- * TODO: This is a confusing behavior and we should eventually address that.
766
- */
767
1138
  public async getLowNullifierMembershipWitness(
768
- blockNumber: L2BlockNumber,
1139
+ referenceBlock: BlockParameter,
769
1140
  nullifier: Fr,
770
1141
  ): Promise<NullifierMembershipWitness | undefined> {
771
- const committedDb = await this.#getWorldState(blockNumber);
1142
+ const committedDb = await this.getWorldState(referenceBlock);
772
1143
  const findResult = await committedDb.getPreviousValueIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBigInt());
773
1144
  if (!findResult) {
774
1145
  return undefined;
775
1146
  }
776
1147
  const { index, alreadyPresent } = findResult;
777
1148
  if (alreadyPresent) {
778
- this.log.warn(`Nullifier ${nullifier.toBigInt()} already exists in the tree`);
1149
+ throw new Error(
1150
+ `Cannot prove nullifier non-inclusion: nullifier ${nullifier.toBigInt()} already exists in the tree`,
1151
+ );
779
1152
  }
780
1153
  const preimageData = (await committedDb.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index))!;
781
1154
 
782
- const siblingPath = await committedDb.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
783
- MerkleTreeId.NULLIFIER_TREE,
784
- BigInt(index),
785
- );
1155
+ const siblingPath = await committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, BigInt(index));
786
1156
  return new NullifierMembershipWitness(BigInt(index), preimageData as NullifierLeafPreimage, siblingPath);
787
1157
  }
788
1158
 
789
- async getPublicDataTreeWitness(blockNumber: L2BlockNumber, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
790
- const committedDb = await this.#getWorldState(blockNumber);
1159
+ async getPublicDataWitness(referenceBlock: BlockParameter, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
1160
+ const committedDb = await this.getWorldState(referenceBlock);
791
1161
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
792
1162
  if (!lowLeafResult) {
793
1163
  return undefined;
@@ -796,27 +1166,13 @@ export class AztecNodeService implements AztecNode, Traceable {
796
1166
  MerkleTreeId.PUBLIC_DATA_TREE,
797
1167
  lowLeafResult.index,
798
1168
  )) as PublicDataTreeLeafPreimage;
799
- const path = await committedDb.getSiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>(
800
- MerkleTreeId.PUBLIC_DATA_TREE,
801
- lowLeafResult.index,
802
- );
1169
+ const path = await committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, lowLeafResult.index);
803
1170
  return new PublicDataWitness(lowLeafResult.index, preimage, path);
804
1171
  }
805
1172
  }
806
1173
 
807
- /**
808
- * Gets the storage value at the given contract storage slot.
809
- *
810
- * @remarks The storage slot here refers to the slot as it is defined in Noir not the index in the merkle tree.
811
- * Aztec's version of `eth_getStorageAt`.
812
- *
813
- * @param contract - Address of the contract to query.
814
- * @param slot - Slot to query.
815
- * @param blockNumber - The block number at which to get the data or 'latest'.
816
- * @returns Storage value at the given contract slot.
817
- */
818
- public async getPublicStorageAt(blockNumber: L2BlockNumber, contract: AztecAddress, slot: Fr): Promise<Fr> {
819
- const committedDb = await this.#getWorldState(blockNumber);
1174
+ public async getPublicStorageAt(referenceBlock: BlockParameter, contract: AztecAddress, slot: Fr): Promise<Fr> {
1175
+ const committedDb = await this.getWorldState(referenceBlock);
820
1176
  const leafSlot = await computePublicDataTreeLeafSlot(contract, slot);
821
1177
 
822
1178
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
@@ -827,36 +1183,76 @@ export class AztecNodeService implements AztecNode, Traceable {
827
1183
  MerkleTreeId.PUBLIC_DATA_TREE,
828
1184
  lowLeafResult.index,
829
1185
  )) as PublicDataTreeLeafPreimage;
830
- return preimage.value;
1186
+ return preimage.leaf.value;
1187
+ }
1188
+
1189
+ public async getBlockHeader(block: BlockParameter = 'latest'): Promise<BlockHeader | undefined> {
1190
+ if (BlockHash.isBlockHash(block)) {
1191
+ const initialBlockHash = await this.#getInitialHeaderHash();
1192
+ if (block.equals(initialBlockHash)) {
1193
+ // Block source doesn't handle initial header so we need to handle the case separately.
1194
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1195
+ }
1196
+ return this.blockSource.getBlockHeaderByHash(block);
1197
+ } else {
1198
+ // Block source doesn't handle initial header so we need to handle the case separately.
1199
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
1200
+ if (blockNumber === BlockNumber.ZERO) {
1201
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1202
+ }
1203
+ return this.blockSource.getBlockHeader(block);
1204
+ }
831
1205
  }
832
1206
 
833
1207
  /**
834
- * Returns the currently committed block header, or the initial header if no blocks have been produced.
835
- * @returns The current committed block header.
1208
+ * Get a block header specified by its archive root.
1209
+ * @param archive - The archive root being requested.
1210
+ * @returns The requested block header.
836
1211
  */
837
- public async getBlockHeader(blockNumber: L2BlockNumber = 'latest'): Promise<BlockHeader | undefined> {
838
- return blockNumber === 0 || (blockNumber === 'latest' && (await this.blockSource.getBlockNumber()) === 0)
839
- ? this.worldStateSynchronizer.getCommitted().getInitialHeader()
840
- : this.blockSource.getBlockHeader(blockNumber);
1212
+ public async getBlockHeaderByArchive(archive: Fr): Promise<BlockHeader | undefined> {
1213
+ return await this.blockSource.getBlockHeaderByArchive(archive);
1214
+ }
1215
+
1216
+ public getBlockData(number: BlockNumber): Promise<BlockData | undefined> {
1217
+ return this.blockSource.getBlockData(number);
1218
+ }
1219
+
1220
+ public getBlockDataByArchive(archive: Fr): Promise<BlockData | undefined> {
1221
+ return this.blockSource.getBlockDataByArchive(archive);
841
1222
  }
842
1223
 
843
1224
  /**
844
1225
  * Simulates the public part of a transaction with the current state.
845
1226
  * @param tx - The transaction to simulate.
846
1227
  **/
847
- @trackSpan('AztecNodeService.simulatePublicCalls', async (tx: Tx) => ({
848
- [Attributes.TX_HASH]: (await tx.getTxHash()).toString(),
1228
+ @trackSpan('AztecNodeService.simulatePublicCalls', (tx: Tx) => ({
1229
+ [Attributes.TX_HASH]: tx.getTxHash().toString(),
849
1230
  }))
850
1231
  public async simulatePublicCalls(tx: Tx, skipFeeEnforcement = false): Promise<PublicSimulationOutput> {
851
- const txHash = await tx.getTxHash();
852
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
1232
+ // Check total gas limit for simulation
1233
+ const gasSettings = tx.data.constants.txContext.gasSettings;
1234
+ const txGasLimit = gasSettings.gasLimits.l2Gas;
1235
+ const teardownGasLimit = gasSettings.teardownGasLimits.l2Gas;
1236
+ if (txGasLimit + teardownGasLimit > this.config.rpcSimulatePublicMaxGasLimit) {
1237
+ throw new BadRequestError(
1238
+ `Transaction total gas limit ${
1239
+ txGasLimit + teardownGasLimit
1240
+ } (${txGasLimit} + ${teardownGasLimit}) exceeds maximum gas limit ${
1241
+ this.config.rpcSimulatePublicMaxGasLimit
1242
+ } for simulation`,
1243
+ );
1244
+ }
1245
+
1246
+ const txHash = tx.getTxHash();
1247
+ const latestBlockNumber = await this.blockSource.getBlockNumber();
1248
+ const blockNumber = BlockNumber.add(latestBlockNumber, 1);
853
1249
 
854
1250
  // If sequencer is not initialized, we just set these values to zero for simulation.
855
- const coinbase = this.sequencer?.coinbase || EthAddress.ZERO;
856
- const feeRecipient = this.sequencer?.feeRecipient || AztecAddress.ZERO;
1251
+ const coinbase = EthAddress.ZERO;
1252
+ const feeRecipient = AztecAddress.ZERO;
857
1253
 
858
1254
  const newGlobalVariables = await this.globalVariableBuilder.buildGlobalVariables(
859
- new Fr(blockNumber),
1255
+ blockNumber,
860
1256
  coinbase,
861
1257
  feeRecipient,
862
1258
  );
@@ -864,8 +1260,8 @@ export class AztecNodeService implements AztecNode, Traceable {
864
1260
  this.contractDataSource,
865
1261
  new DateProvider(),
866
1262
  this.telemetry,
1263
+ this.log.getBindings(),
867
1264
  );
868
- const fork = await this.worldStateSynchronizer.fork();
869
1265
 
870
1266
  this.log.verbose(`Simulating public calls for tx ${txHash}`, {
871
1267
  globalVariables: newGlobalVariables.toInspect(),
@@ -873,11 +1269,24 @@ export class AztecNodeService implements AztecNode, Traceable {
873
1269
  blockNumber,
874
1270
  });
875
1271
 
1272
+ // Ensure world-state has caught up with the latest block we loaded from the archiver
1273
+ await this.worldStateSynchronizer.syncImmediate(latestBlockNumber);
1274
+ const merkleTreeFork = await this.worldStateSynchronizer.fork();
876
1275
  try {
877
- const processor = publicProcessorFactory.create(fork, newGlobalVariables, skipFeeEnforcement);
1276
+ const config = PublicSimulatorConfig.from({
1277
+ skipFeeEnforcement,
1278
+ collectDebugLogs: true,
1279
+ collectHints: false,
1280
+ collectCallMetadata: true,
1281
+ collectStatistics: false,
1282
+ collectionLimits: CollectionLimitsConfig.from({
1283
+ maxDebugLogMemoryReads: this.config.rpcSimulatePublicMaxDebugLogMemoryReads,
1284
+ }),
1285
+ });
1286
+ const processor = publicProcessorFactory.create(merkleTreeFork, newGlobalVariables, config);
878
1287
 
879
1288
  // REFACTOR: Consider merging ProcessReturnValues into ProcessedTx
880
- const [processedTxs, failedTxs, returns] = await processor.process([tx]);
1289
+ const [processedTxs, failedTxs, _usedTxs, returns, debugLogs] = await processor.process([tx]);
881
1290
  // REFACTOR: Consider returning the error rather than throwing
882
1291
  if (failedTxs.length) {
883
1292
  this.log.warn(`Simulated tx ${txHash} fails: ${failedTxs[0].error}`, { txHash });
@@ -887,13 +1296,14 @@ export class AztecNodeService implements AztecNode, Traceable {
887
1296
  const [processedTx] = processedTxs;
888
1297
  return new PublicSimulationOutput(
889
1298
  processedTx.revertReason,
890
- processedTx.constants,
1299
+ processedTx.globalVariables,
891
1300
  processedTx.txEffect,
892
1301
  returns,
893
1302
  processedTx.gasUsed,
1303
+ debugLogs,
894
1304
  );
895
1305
  } finally {
896
- await fork.close();
1306
+ await merkleTreeFork.close();
897
1307
  }
898
1308
  }
899
1309
 
@@ -901,24 +1311,55 @@ export class AztecNodeService implements AztecNode, Traceable {
901
1311
  tx: Tx,
902
1312
  { isSimulation, skipFeeEnforcement }: { isSimulation?: boolean; skipFeeEnforcement?: boolean } = {},
903
1313
  ): Promise<TxValidationResult> {
904
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
905
1314
  const db = this.worldStateSynchronizer.getCommitted();
906
1315
  const verifier = isSimulation ? undefined : this.proofVerifier;
907
- const validator = createValidatorForAcceptingTxs(db, this.contractDataSource, verifier, {
908
- blockNumber,
909
- l1ChainId: this.l1ChainId,
910
- setupAllowList: this.config.allowedInSetup ?? (await getDefaultAllowedSetupFunctions()),
911
- gasFees: await this.getCurrentBaseFees(),
912
- skipFeeEnforcement,
913
- });
1316
+
1317
+ // We accept transactions if they are not expired by the next slot (checked based on the ExpirationTimestamp field)
1318
+ const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot();
1319
+ const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1);
1320
+ const l1Constants = await this.blockSource.getL1Constants();
1321
+ const validator = createTxValidatorForAcceptingTxsOverRPC(
1322
+ db,
1323
+ this.contractDataSource,
1324
+ verifier,
1325
+ {
1326
+ timestamp: nextSlotTimestamp,
1327
+ blockNumber,
1328
+ l1ChainId: this.l1ChainId,
1329
+ rollupVersion: this.version,
1330
+ setupAllowList: [
1331
+ ...(await getDefaultAllowedSetupFunctions()),
1332
+ ...(this.config.txPublicSetupAllowListExtend ?? []),
1333
+ ],
1334
+ gasFees: await this.getCurrentMinFees(),
1335
+ skipFeeEnforcement,
1336
+ txsPermitted: !this.config.disableTransactions,
1337
+ rollupManaLimit: l1Constants.rollupManaLimit,
1338
+ maxBlockL2Gas: this.config.validateMaxL2BlockGas,
1339
+ maxBlockDAGas: this.config.validateMaxDABlockGas,
1340
+ },
1341
+ this.log.getBindings(),
1342
+ );
914
1343
 
915
1344
  return await validator.validateTx(tx);
916
1345
  }
917
1346
 
918
- public async setConfig(config: Partial<SequencerConfig & ProverConfig>): Promise<void> {
919
- const newConfig = { ...this.config, ...config };
920
- await this.sequencer?.updateSequencerConfig(config);
1347
+ public getConfig(): Promise<AztecNodeAdminConfig> {
1348
+ const schema = AztecNodeAdminConfigSchema;
1349
+ const keys = schema.keyof().options;
1350
+ return Promise.resolve(pick(this.config, ...keys));
1351
+ }
921
1352
 
1353
+ public async setConfig(config: Partial<AztecNodeAdminConfig>): Promise<void> {
1354
+ const newConfig = { ...this.config, ...config };
1355
+ this.sequencer?.updateConfig(config);
1356
+ this.slasherClient?.updateConfig(config);
1357
+ this.validatorsSentinel?.updateConfig(config);
1358
+ await this.p2pClient.updateP2PConfig(config);
1359
+ const archiver = this.blockSource as Archiver;
1360
+ if ('updateConfig' in archiver) {
1361
+ archiver.updateConfig(config);
1362
+ }
922
1363
  if (newConfig.realProofs !== this.config.realProofs) {
923
1364
  this.proofVerifier = config.realProofs ? await BBCircuitVerifier.new(newConfig) : new TestCircuitVerifier();
924
1365
  }
@@ -928,42 +1369,242 @@ export class AztecNodeService implements AztecNode, Traceable {
928
1369
 
929
1370
  public getProtocolContractAddresses(): Promise<ProtocolContractAddresses> {
930
1371
  return Promise.resolve({
931
- classRegisterer: ProtocolContractAddress.ContractClassRegisterer,
1372
+ classRegistry: ProtocolContractAddress.ContractClassRegistry,
932
1373
  feeJuice: ProtocolContractAddress.FeeJuice,
933
- instanceDeployer: ProtocolContractAddress.ContractInstanceDeployer,
1374
+ instanceRegistry: ProtocolContractAddress.ContractInstanceRegistry,
934
1375
  multiCallEntrypoint: ProtocolContractAddress.MultiCallEntrypoint,
935
1376
  });
936
1377
  }
937
1378
 
938
- // TODO(#10007): Remove this method
939
- public addContractClass(contractClass: ContractClassPublic): Promise<void> {
940
- this.log.info(`Adding contract class via API ${contractClass.id}`);
941
- return this.contractDataSource.addContractClass(contractClass);
1379
+ public registerContractFunctionSignatures(signatures: string[]): Promise<void> {
1380
+ return this.contractDataSource.registerContractFunctionSignatures(signatures);
1381
+ }
1382
+
1383
+ public getValidatorsStats(): Promise<ValidatorsStats> {
1384
+ return this.validatorsSentinel?.computeStats() ?? Promise.resolve({ stats: {}, slotWindow: 0 });
942
1385
  }
943
1386
 
944
- public registerContractFunctionSignatures(_address: AztecAddress, signatures: string[]): Promise<void> {
945
- return this.contractDataSource.registerContractFunctionSignatures(_address, signatures);
1387
+ public getValidatorStats(
1388
+ validatorAddress: EthAddress,
1389
+ fromSlot?: SlotNumber,
1390
+ toSlot?: SlotNumber,
1391
+ ): Promise<SingleValidatorStats | undefined> {
1392
+ return this.validatorsSentinel?.getValidatorStats(validatorAddress, fromSlot, toSlot) ?? Promise.resolve(undefined);
946
1393
  }
947
1394
 
948
- public flushTxs(): Promise<void> {
949
- if (!this.sequencer) {
950
- throw new Error(`Sequencer is not initialized`);
1395
+ public async startSnapshotUpload(location: string): Promise<void> {
1396
+ // Note that we are forcefully casting the blocksource as an archiver
1397
+ // We break support for archiver running remotely to the node
1398
+ const archiver = this.blockSource as Archiver;
1399
+ if (!('backupTo' in archiver)) {
1400
+ this.metrics.recordSnapshotError();
1401
+ throw new Error('Archiver implementation does not support backups. Cannot generate snapshot.');
951
1402
  }
952
- this.sequencer.flush();
1403
+
1404
+ // Test that the archiver has done an initial sync.
1405
+ if (!archiver.isInitialSyncComplete()) {
1406
+ this.metrics.recordSnapshotError();
1407
+ throw new Error(`Archiver initial sync not complete. Cannot start snapshot.`);
1408
+ }
1409
+
1410
+ // And it has an L2 block hash
1411
+ const l2BlockHash = await archiver.getL2Tips().then(tips => tips.proposed.hash);
1412
+ if (!l2BlockHash) {
1413
+ this.metrics.recordSnapshotError();
1414
+ throw new Error(`Archiver has no latest L2 block hash downloaded. Cannot start snapshot.`);
1415
+ }
1416
+
1417
+ if (this.isUploadingSnapshot) {
1418
+ this.metrics.recordSnapshotError();
1419
+ throw new Error(`Snapshot upload already in progress. Cannot start another one until complete.`);
1420
+ }
1421
+
1422
+ // Do not wait for the upload to be complete to return to the caller, but flag that an operation is in progress
1423
+ this.isUploadingSnapshot = true;
1424
+ const timer = new Timer();
1425
+ void uploadSnapshot(location, this.blockSource as Archiver, this.worldStateSynchronizer, this.config, this.log)
1426
+ .then(() => {
1427
+ this.isUploadingSnapshot = false;
1428
+ this.metrics.recordSnapshot(timer.ms());
1429
+ })
1430
+ .catch(err => {
1431
+ this.isUploadingSnapshot = false;
1432
+ this.metrics.recordSnapshotError();
1433
+ this.log.error(`Error uploading snapshot: ${err}`);
1434
+ });
1435
+
1436
+ return Promise.resolve();
1437
+ }
1438
+
1439
+ public async rollbackTo(targetBlock: BlockNumber, force?: boolean): Promise<void> {
1440
+ const archiver = this.blockSource as Archiver;
1441
+ if (!('rollbackTo' in archiver)) {
1442
+ throw new Error('Archiver implementation does not support rollbacks.');
1443
+ }
1444
+
1445
+ const finalizedBlock = await archiver.getL2Tips().then(tips => tips.finalized.block.number);
1446
+ if (targetBlock < finalizedBlock) {
1447
+ if (force) {
1448
+ this.log.warn(`Clearing world state database to allow rolling back behind finalized block ${finalizedBlock}`);
1449
+ await this.worldStateSynchronizer.clear();
1450
+ await this.p2pClient.clear();
1451
+ } else {
1452
+ throw new Error(`Cannot rollback to block ${targetBlock} as it is before finalized ${finalizedBlock}`);
1453
+ }
1454
+ }
1455
+
1456
+ try {
1457
+ this.log.info(`Pausing archiver and world state sync to start rollback`);
1458
+ await archiver.stop();
1459
+ await this.worldStateSynchronizer.stopSync();
1460
+ const currentBlock = await archiver.getBlockNumber();
1461
+ const blocksToUnwind = currentBlock - targetBlock;
1462
+ this.log.info(`Unwinding ${count(blocksToUnwind, 'block')} from L2 block ${currentBlock} to ${targetBlock}`);
1463
+ await archiver.rollbackTo(targetBlock);
1464
+ this.log.info(`Unwinding complete.`);
1465
+ } catch (err) {
1466
+ this.log.error(`Error during rollback`, err);
1467
+ throw err;
1468
+ } finally {
1469
+ this.log.info(`Resuming world state and archiver sync.`);
1470
+ this.worldStateSynchronizer.resumeSync();
1471
+ archiver.resume();
1472
+ }
1473
+ }
1474
+
1475
+ public async pauseSync(): Promise<void> {
1476
+ this.log.info(`Pausing archiver and world state sync`);
1477
+ await (this.blockSource as Archiver).stop();
1478
+ await this.worldStateSynchronizer.stopSync();
1479
+ }
1480
+
1481
+ public resumeSync(): Promise<void> {
1482
+ this.log.info(`Resuming world state and archiver sync.`);
1483
+ this.worldStateSynchronizer.resumeSync();
1484
+ (this.blockSource as Archiver).resume();
953
1485
  return Promise.resolve();
954
1486
  }
955
1487
 
1488
+ public getSlashPayloads(): Promise<SlashPayloadRound[]> {
1489
+ if (!this.slasherClient) {
1490
+ throw new Error(`Slasher client not enabled`);
1491
+ }
1492
+ return this.slasherClient.getSlashPayloads();
1493
+ }
1494
+
1495
+ public getSlashOffenses(round: bigint | 'all' | 'current'): Promise<Offense[]> {
1496
+ if (!this.slasherClient) {
1497
+ throw new Error(`Slasher client not enabled`);
1498
+ }
1499
+ if (round === 'all') {
1500
+ return this.slasherClient.getPendingOffenses();
1501
+ } else {
1502
+ return this.slasherClient.gatherOffensesForRound(round === 'current' ? undefined : BigInt(round));
1503
+ }
1504
+ }
1505
+
1506
+ public async reloadKeystore(): Promise<void> {
1507
+ if (!this.config.keyStoreDirectory?.length) {
1508
+ throw new BadRequestError(
1509
+ 'Cannot reload keystore: node is not using a file-based keystore. ' +
1510
+ 'Set KEY_STORE_DIRECTORY to use file-based keystores.',
1511
+ );
1512
+ }
1513
+ if (!this.validatorClient) {
1514
+ throw new BadRequestError('Cannot reload keystore: validator is not enabled.');
1515
+ }
1516
+
1517
+ this.log.info('Reloading keystore from disk');
1518
+
1519
+ // Re-read and validate keystore files
1520
+ const keyStores = loadKeystores(this.config.keyStoreDirectory);
1521
+ const newManager = new KeystoreManager(mergeKeystores(keyStores));
1522
+ await newManager.validateSigners();
1523
+ ValidatorClient.validateKeyStoreConfiguration(newManager, this.log);
1524
+
1525
+ // Validate that every validator's publisher keys overlap with the L1 signers
1526
+ // that were initialized at startup. Publishers cannot be hot-reloaded, so a
1527
+ // validator with a publisher key that doesn't match any existing L1 signer
1528
+ // would silently fail on every proposer slot.
1529
+ if (this.keyStoreManager && this.sequencer) {
1530
+ const oldAdapter = NodeKeystoreAdapter.fromKeyStoreManager(this.keyStoreManager);
1531
+ const availablePublishers = new Set(
1532
+ oldAdapter
1533
+ .getAttesterAddresses()
1534
+ .flatMap(a => oldAdapter.getPublisherAddresses(a).map(p => p.toString().toLowerCase())),
1535
+ );
1536
+
1537
+ const newAdapter = NodeKeystoreAdapter.fromKeyStoreManager(newManager);
1538
+ for (const attester of newAdapter.getAttesterAddresses()) {
1539
+ const pubs = newAdapter.getPublisherAddresses(attester);
1540
+ if (pubs.length > 0 && !pubs.some(p => availablePublishers.has(p.toString().toLowerCase()))) {
1541
+ throw new BadRequestError(
1542
+ `Cannot reload keystore: validator ${attester} has publisher keys ` +
1543
+ `[${pubs.map(p => p.toString()).join(', ')}] but none match the L1 signers initialized at startup ` +
1544
+ `[${[...availablePublishers].join(', ')}]. Publishers cannot be hot-reloaded — ` +
1545
+ `use an existing publisher key or restart the node.`,
1546
+ );
1547
+ }
1548
+ }
1549
+ }
1550
+
1551
+ // Build adapters for old and new keystores to compute diff
1552
+ const newAdapter = NodeKeystoreAdapter.fromKeyStoreManager(newManager);
1553
+ const newAddresses = newAdapter.getAttesterAddresses();
1554
+ const oldAddresses = this.keyStoreManager
1555
+ ? NodeKeystoreAdapter.fromKeyStoreManager(this.keyStoreManager).getAttesterAddresses()
1556
+ : [];
1557
+
1558
+ const oldSet = new Set(oldAddresses.map(a => a.toString()));
1559
+ const newSet = new Set(newAddresses.map(a => a.toString()));
1560
+ const added = newAddresses.filter(a => !oldSet.has(a.toString()));
1561
+ const removed = oldAddresses.filter(a => !newSet.has(a.toString()));
1562
+
1563
+ if (added.length > 0) {
1564
+ this.log.info(`Keystore reload: adding attester keys: ${added.map(a => a.toString()).join(', ')}`);
1565
+ }
1566
+ if (removed.length > 0) {
1567
+ this.log.info(`Keystore reload: removing attester keys: ${removed.map(a => a.toString()).join(', ')}`);
1568
+ }
1569
+ if (added.length === 0 && removed.length === 0) {
1570
+ this.log.info('Keystore reload: attester keys unchanged');
1571
+ }
1572
+
1573
+ // Update the validator client (coinbase, feeRecipient, attester keys)
1574
+ this.validatorClient.reloadKeystore(newManager);
1575
+
1576
+ // Update the publisher factory's keystore so newly-added validators
1577
+ // can be matched to existing publisher keys when proposing blocks.
1578
+ if (this.sequencer) {
1579
+ this.sequencer.updatePublisherNodeKeyStore(newAdapter);
1580
+ }
1581
+
1582
+ // Update slasher's "don't-slash-self" list with new validator addresses
1583
+ if (this.slasherClient && !this.config.slashSelfAllowed) {
1584
+ const slashValidatorsNever = unique(
1585
+ [...(this.config.slashValidatorsNever ?? []), ...newAddresses].map(a => a.toString()),
1586
+ ).map(EthAddress.fromString);
1587
+ this.slasherClient.updateConfig({ slashValidatorsNever });
1588
+ }
1589
+
1590
+ this.keyStoreManager = newManager;
1591
+ this.log.info('Keystore reloaded: coinbase, feeRecipient, and attester keys updated');
1592
+ }
1593
+
1594
+ #getInitialHeaderHash(): Promise<BlockHash> {
1595
+ if (!this.initialHeaderHashPromise) {
1596
+ this.initialHeaderHashPromise = this.worldStateSynchronizer.getCommitted().getInitialHeader().hash();
1597
+ }
1598
+ return this.initialHeaderHashPromise;
1599
+ }
1600
+
956
1601
  /**
957
1602
  * Returns an instance of MerkleTreeOperations having first ensured the world state is fully synched
958
- * @param blockNumber - The block number at which to get the data.
1603
+ * @param block - The block parameter (block number, block hash, or 'latest') at which to get the data.
959
1604
  * @returns An instance of a committed MerkleTreeOperations
960
1605
  */
961
- async #getWorldState(blockNumber: L2BlockNumber) {
962
- if (typeof blockNumber === 'number' && blockNumber < INITIAL_L2_BLOCK_NUM - 1) {
963
- throw new Error('Invalid block number to get world state for: ' + blockNumber);
964
- }
965
-
966
- let blockSyncedTo: number = 0;
1606
+ protected async getWorldState(block: BlockParameter) {
1607
+ let blockSyncedTo: BlockNumber = BlockNumber.ZERO;
967
1608
  try {
968
1609
  // Attempt to sync the world state if necessary
969
1610
  blockSyncedTo = await this.#syncWorldState();
@@ -971,24 +1612,78 @@ export class AztecNodeService implements AztecNode, Traceable {
971
1612
  this.log.error(`Error getting world state: ${err}`);
972
1613
  }
973
1614
 
974
- // using a snapshot could be less efficient than using the committed db
975
- if (blockNumber === 'latest' /*|| blockNumber === blockSyncedTo*/) {
976
- this.log.debug(`Using committed db for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1615
+ if (block === 'latest') {
1616
+ this.log.debug(`Using committed db for block 'latest', world state synced upto ${blockSyncedTo}`);
977
1617
  return this.worldStateSynchronizer.getCommitted();
978
- } else if (blockNumber <= blockSyncedTo) {
979
- this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
980
- return this.worldStateSynchronizer.getSnapshot(blockNumber);
1618
+ }
1619
+
1620
+ // Get the block number, either directly from the parameter or by quering the archiver with the block hash
1621
+ let blockNumber: BlockNumber;
1622
+ if (BlockHash.isBlockHash(block)) {
1623
+ const initialBlockHash = await this.#getInitialHeaderHash();
1624
+ if (block.equals(initialBlockHash)) {
1625
+ // Block source doesn't handle initial header so we need to handle the case separately.
1626
+ return this.worldStateSynchronizer.getSnapshot(BlockNumber.ZERO);
1627
+ }
1628
+
1629
+ const header = await this.blockSource.getBlockHeaderByHash(block);
1630
+ if (!header) {
1631
+ throw new Error(
1632
+ `Block hash ${block.toString()} not found when querying world state. If the node API has been queried with anchor block hash possibly a reorg has occurred.`,
1633
+ );
1634
+ }
1635
+
1636
+ blockNumber = header.getBlockNumber();
981
1637
  } else {
982
- throw new Error(`Block ${blockNumber} not yet synced`);
1638
+ blockNumber = block as BlockNumber;
1639
+ }
1640
+
1641
+ // Check it's within world state sync range
1642
+ if (blockNumber > blockSyncedTo) {
1643
+ throw new Error(`Queried block ${block} not yet synced by the node (node is synced upto ${blockSyncedTo}).`);
1644
+ }
1645
+ this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1646
+
1647
+ const snapshot = this.worldStateSynchronizer.getSnapshot(blockNumber);
1648
+
1649
+ // Double-check world-state synced to the same block hash as was requested
1650
+ if (BlockHash.isBlockHash(block)) {
1651
+ const blockHash = await snapshot.getLeafValue(MerkleTreeId.ARCHIVE, BigInt(blockNumber));
1652
+ if (!blockHash || !new BlockHash(blockHash).equals(block)) {
1653
+ throw new Error(
1654
+ `Block hash ${block.toString()} not found in world state at block number ${blockNumber}. If the node API has been queried with anchor block hash possibly a reorg has occurred.`,
1655
+ );
1656
+ }
1657
+ }
1658
+
1659
+ return snapshot;
1660
+ }
1661
+
1662
+ /** Resolves a block parameter to a block number. */
1663
+ protected async resolveBlockNumber(block: BlockParameter): Promise<BlockNumber> {
1664
+ if (block === 'latest') {
1665
+ return BlockNumber(await this.blockSource.getBlockNumber());
1666
+ }
1667
+ if (BlockHash.isBlockHash(block)) {
1668
+ const initialBlockHash = await this.#getInitialHeaderHash();
1669
+ if (block.equals(initialBlockHash)) {
1670
+ return BlockNumber.ZERO;
1671
+ }
1672
+ const header = await this.blockSource.getBlockHeaderByHash(block);
1673
+ if (!header) {
1674
+ throw new Error(`Block hash ${block.toString()} not found.`);
1675
+ }
1676
+ return header.getBlockNumber();
983
1677
  }
1678
+ return block as BlockNumber;
984
1679
  }
985
1680
 
986
1681
  /**
987
1682
  * Ensure we fully sync the world state
988
1683
  * @returns A promise that fulfils once the world state is synced
989
1684
  */
990
- async #syncWorldState(): Promise<number> {
1685
+ async #syncWorldState(): Promise<BlockNumber> {
991
1686
  const blockSourceHeight = await this.blockSource.getBlockNumber();
992
- return this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
1687
+ return await this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
993
1688
  }
994
1689
  }