@aztec/aztec-node 0.0.0-test.1 → 0.0.1-commit.0b941701

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dest/aztec-node/config.d.ts +18 -10
  2. package/dest/aztec-node/config.d.ts.map +1 -1
  3. package/dest/aztec-node/config.js +81 -14
  4. package/dest/aztec-node/node_metrics.d.ts +5 -1
  5. package/dest/aztec-node/node_metrics.d.ts.map +1 -1
  6. package/dest/aztec-node/node_metrics.js +17 -7
  7. package/dest/aztec-node/server.d.ts +114 -141
  8. package/dest/aztec-node/server.d.ts.map +1 -1
  9. package/dest/aztec-node/server.js +1093 -339
  10. package/dest/bin/index.d.ts +1 -1
  11. package/dest/bin/index.js +4 -2
  12. package/dest/index.d.ts +1 -2
  13. package/dest/index.d.ts.map +1 -1
  14. package/dest/index.js +0 -1
  15. package/dest/sentinel/config.d.ts +8 -0
  16. package/dest/sentinel/config.d.ts.map +1 -0
  17. package/dest/sentinel/config.js +29 -0
  18. package/dest/sentinel/factory.d.ts +9 -0
  19. package/dest/sentinel/factory.d.ts.map +1 -0
  20. package/dest/sentinel/factory.js +17 -0
  21. package/dest/sentinel/index.d.ts +3 -0
  22. package/dest/sentinel/index.d.ts.map +1 -0
  23. package/dest/sentinel/index.js +1 -0
  24. package/dest/sentinel/sentinel.d.ts +93 -0
  25. package/dest/sentinel/sentinel.d.ts.map +1 -0
  26. package/dest/sentinel/sentinel.js +403 -0
  27. package/dest/sentinel/store.d.ts +35 -0
  28. package/dest/sentinel/store.d.ts.map +1 -0
  29. package/dest/sentinel/store.js +170 -0
  30. package/dest/test/index.d.ts +31 -0
  31. package/dest/test/index.d.ts.map +1 -0
  32. package/dest/test/index.js +1 -0
  33. package/package.json +46 -35
  34. package/src/aztec-node/config.ts +132 -25
  35. package/src/aztec-node/node_metrics.ts +24 -14
  36. package/src/aztec-node/server.ts +902 -418
  37. package/src/bin/index.ts +4 -2
  38. package/src/index.ts +0 -1
  39. package/src/sentinel/config.ts +37 -0
  40. package/src/sentinel/factory.ts +36 -0
  41. package/src/sentinel/index.ts +8 -0
  42. package/src/sentinel/sentinel.ts +510 -0
  43. package/src/sentinel/store.ts +185 -0
  44. package/src/test/index.ts +32 -0
  45. package/dest/aztec-node/http_rpc_server.d.ts +0 -8
  46. package/dest/aztec-node/http_rpc_server.d.ts.map +0 -1
  47. package/dest/aztec-node/http_rpc_server.js +0 -9
  48. package/src/aztec-node/http_rpc_server.ts +0 -11
@@ -1,39 +1,48 @@
1
- import { createArchiver } from '@aztec/archiver';
2
- import { BBCircuitVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
- import { type BlobSinkClientInterface, createBlobSinkClient } from '@aztec/blob-sink/client';
1
+ import { Archiver, createArchiver } from '@aztec/archiver';
2
+ import { BBCircuitVerifier, QueuedIVCVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
+ import { type BlobClientInterface, createBlobClientWithFileStores } from '@aztec/blob-client/client';
4
4
  import {
5
- type ARCHIVE_HEIGHT,
6
- INITIAL_L2_BLOCK_NUM,
5
+ ARCHIVE_HEIGHT,
7
6
  type L1_TO_L2_MSG_TREE_HEIGHT,
8
7
  type NOTE_HASH_TREE_HEIGHT,
9
8
  type NULLIFIER_TREE_HEIGHT,
10
9
  type PUBLIC_DATA_TREE_HEIGHT,
11
- REGISTERER_CONTRACT_ADDRESS,
12
10
  } from '@aztec/constants';
13
- import { EpochCache } from '@aztec/epoch-cache';
14
- import { type L1ContractAddresses, createEthereumChain } from '@aztec/ethereum';
15
- import { compactArray } from '@aztec/foundation/collection';
11
+ import { EpochCache, type EpochCacheInterface } from '@aztec/epoch-cache';
12
+ import { createEthereumChain } from '@aztec/ethereum/chain';
13
+ import { getPublicClient } from '@aztec/ethereum/client';
14
+ import { RegistryContract, RollupContract } from '@aztec/ethereum/contracts';
15
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
16
+ import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types';
17
+ import { compactArray, pick } from '@aztec/foundation/collection';
18
+ import { Fr } from '@aztec/foundation/curves/bn254';
16
19
  import { EthAddress } from '@aztec/foundation/eth-address';
17
- import { Fr } from '@aztec/foundation/fields';
20
+ import { BadRequestError } from '@aztec/foundation/json-rpc';
18
21
  import { type Logger, createLogger } from '@aztec/foundation/log';
22
+ import { count } from '@aztec/foundation/string';
19
23
  import { DateProvider, Timer } from '@aztec/foundation/timer';
20
- import { SiblingPath } from '@aztec/foundation/trees';
21
- import type { AztecKVStore } from '@aztec/kv-store';
22
- import { openTmpStore } from '@aztec/kv-store/lmdb';
23
- import { SHA256Trunc, StandardTree, UnbalancedTree } from '@aztec/merkle-tree';
24
- import { type P2P, createP2PClient } from '@aztec/p2p';
25
- import { ProtocolContractAddress } from '@aztec/protocol-contracts';
24
+ import { MembershipWitness, SiblingPath } from '@aztec/foundation/trees';
25
+ import { KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore';
26
+ import { trySnapshotSync, uploadSnapshot } from '@aztec/node-lib/actions';
26
27
  import {
27
- GlobalVariableBuilder,
28
- SequencerClient,
29
- type SequencerPublisher,
30
- createSlasherClient,
31
- createValidatorForAcceptingTxs,
32
- getDefaultAllowedSetupFunctions,
33
- } from '@aztec/sequencer-client';
28
+ createForwarderL1TxUtilsFromEthSigner,
29
+ createL1TxUtilsWithBlobsFromEthSigner,
30
+ } from '@aztec/node-lib/factories';
31
+ import { type P2P, type P2PClientDeps, createP2PClient, getDefaultAllowedSetupFunctions } from '@aztec/p2p';
32
+ import { ProtocolContractAddress } from '@aztec/protocol-contracts';
33
+ import { GlobalVariableBuilder, SequencerClient, type SequencerPublisher } from '@aztec/sequencer-client';
34
34
  import { PublicProcessorFactory } from '@aztec/simulator/server';
35
+ import {
36
+ AttestationsBlockWatcher,
37
+ EpochPruneWatcher,
38
+ type SlasherClientInterface,
39
+ type Watcher,
40
+ createSlasher,
41
+ } from '@aztec/slasher';
42
+ import { CollectionLimitsConfig, PublicSimulatorConfig } from '@aztec/stdlib/avm';
35
43
  import { AztecAddress } from '@aztec/stdlib/aztec-address';
36
- import type { InBlock, L2Block, L2BlockNumber, L2BlockSource, NullifierWithBlockSource } from '@aztec/stdlib/block';
44
+ import { type BlockParameter, type DataInBlock, L2Block, L2BlockHash, type L2BlockSource } from '@aztec/stdlib/block';
45
+ import type { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
37
46
  import type {
38
47
  ContractClassPublic,
39
48
  ContractDataSource,
@@ -41,34 +50,44 @@ import type {
41
50
  NodeInfo,
42
51
  ProtocolContractAddresses,
43
52
  } from '@aztec/stdlib/contract';
44
- import type { GasFees } from '@aztec/stdlib/gas';
45
- import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/stdlib/hash';
46
- import type { AztecNode, GetContractClassLogsResponse, GetPublicLogsResponse } from '@aztec/stdlib/interfaces/client';
53
+ import { GasFees } from '@aztec/stdlib/gas';
54
+ import { computePublicDataTreeLeafSlot } from '@aztec/stdlib/hash';
55
+ import {
56
+ type AztecNode,
57
+ type AztecNodeAdmin,
58
+ type AztecNodeAdminConfig,
59
+ AztecNodeAdminConfigSchema,
60
+ type GetContractClassLogsResponse,
61
+ type GetPublicLogsResponse,
62
+ } from '@aztec/stdlib/interfaces/client';
47
63
  import {
64
+ type AllowedElement,
48
65
  type ClientProtocolCircuitVerifier,
49
66
  type L2LogsSource,
50
- type ProverConfig,
51
- type SequencerConfig,
52
67
  type Service,
53
68
  type WorldStateSyncStatus,
54
69
  type WorldStateSynchronizer,
55
70
  tryStop,
56
71
  } from '@aztec/stdlib/interfaces/server';
57
- import type { LogFilter, PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs';
58
- import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging';
72
+ import type { LogFilter, SiloedTag, Tag, TxScopedL2Log } from '@aztec/stdlib/logs';
73
+ import { InboxLeaf, type L1ToL2MessageSource } from '@aztec/stdlib/messaging';
59
74
  import { P2PClientType } from '@aztec/stdlib/p2p';
60
- import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
75
+ import type { Offense, SlashPayloadRound } from '@aztec/stdlib/slashing';
61
76
  import type { NullifierLeafPreimage, PublicDataTreeLeaf, PublicDataTreeLeafPreimage } from '@aztec/stdlib/trees';
77
+ import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
62
78
  import {
63
79
  type BlockHeader,
80
+ type GlobalVariableBuilder as GlobalVariableBuilderInterface,
81
+ type IndexedTxEffect,
64
82
  PublicSimulationOutput,
65
83
  Tx,
66
- TxEffect,
67
84
  type TxHash,
68
85
  TxReceipt,
69
86
  TxStatus,
70
87
  type TxValidationResult,
71
88
  } from '@aztec/stdlib/tx';
89
+ import { getPackageVersion } from '@aztec/stdlib/update-checker';
90
+ import type { SingleValidatorStats, ValidatorsStats } from '@aztec/stdlib/validators';
72
91
  import {
73
92
  Attributes,
74
93
  type TelemetryClient,
@@ -77,18 +96,33 @@ import {
77
96
  getTelemetryClient,
78
97
  trackSpan,
79
98
  } from '@aztec/telemetry-client';
80
- import { createValidatorClient } from '@aztec/validator-client';
99
+ import {
100
+ FullNodeCheckpointsBuilder as CheckpointsBuilder,
101
+ FullNodeCheckpointsBuilder,
102
+ NodeKeystoreAdapter,
103
+ ValidatorClient,
104
+ createBlockProposalHandler,
105
+ createValidatorClient,
106
+ createValidatorForAcceptingTxs,
107
+ } from '@aztec/validator-client';
81
108
  import { createWorldStateSynchronizer } from '@aztec/world-state';
82
109
 
83
- import { type AztecNodeConfig, getPackageVersion } from './config.js';
110
+ import { createPublicClient, fallback, http } from 'viem';
111
+
112
+ import { createSentinel } from '../sentinel/factory.js';
113
+ import { Sentinel } from '../sentinel/sentinel.js';
114
+ import { type AztecNodeConfig, createKeyStoreForValidator } from './config.js';
84
115
  import { NodeMetrics } from './node_metrics.js';
85
116
 
86
117
  /**
87
118
  * The aztec node.
88
119
  */
89
- export class AztecNodeService implements AztecNode, Traceable {
90
- private packageVersion: string;
120
+ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
91
121
  private metrics: NodeMetrics;
122
+ private initialHeaderHashPromise: Promise<L2BlockHash> | undefined = undefined;
123
+
124
+ // Prevent two snapshot operations to happen simultaneously
125
+ private isUploadingSnapshot = false;
92
126
 
93
127
  public readonly tracer: Tracer;
94
128
 
@@ -99,17 +133,21 @@ export class AztecNodeService implements AztecNode, Traceable {
99
133
  protected readonly logsSource: L2LogsSource,
100
134
  protected readonly contractDataSource: ContractDataSource,
101
135
  protected readonly l1ToL2MessageSource: L1ToL2MessageSource,
102
- protected readonly nullifierSource: NullifierWithBlockSource,
103
136
  protected readonly worldStateSynchronizer: WorldStateSynchronizer,
104
137
  protected readonly sequencer: SequencerClient | undefined,
138
+ protected readonly slasherClient: SlasherClientInterface | undefined,
139
+ protected readonly validatorsSentinel: Sentinel | undefined,
140
+ protected readonly epochPruneWatcher: EpochPruneWatcher | undefined,
105
141
  protected readonly l1ChainId: number,
106
142
  protected readonly version: number,
107
- protected readonly globalVariableBuilder: GlobalVariableBuilder,
143
+ protected readonly globalVariableBuilder: GlobalVariableBuilderInterface,
144
+ protected readonly epochCache: EpochCacheInterface,
145
+ protected readonly packageVersion: string,
108
146
  private proofVerifier: ClientProtocolCircuitVerifier,
109
147
  private telemetry: TelemetryClient = getTelemetryClient(),
110
148
  private log = createLogger('node'),
149
+ private blobClient?: BlobClientInterface,
111
150
  ) {
112
- this.packageVersion = getPackageVersion();
113
151
  this.metrics = new NodeMetrics(telemetry, 'AztecNodeService');
114
152
  this.tracer = telemetry.getTracer('AztecNodeService');
115
153
 
@@ -132,31 +170,103 @@ export class AztecNodeService implements AztecNode, Traceable {
132
170
  * @returns - A fully synced Aztec Node for use in development/testing.
133
171
  */
134
172
  public static async createAndSync(
135
- config: AztecNodeConfig,
173
+ inputConfig: AztecNodeConfig,
136
174
  deps: {
137
175
  telemetry?: TelemetryClient;
138
176
  logger?: Logger;
139
177
  publisher?: SequencerPublisher;
140
178
  dateProvider?: DateProvider;
141
- blobSinkClient?: BlobSinkClientInterface;
179
+ p2pClientDeps?: P2PClientDeps<P2PClientType.Full>;
142
180
  } = {},
143
181
  options: {
144
182
  prefilledPublicData?: PublicDataTreeLeaf[];
183
+ dontStartSequencer?: boolean;
145
184
  } = {},
146
185
  ): Promise<AztecNodeService> {
147
- const telemetry = deps.telemetry ?? getTelemetryClient();
186
+ const config = { ...inputConfig }; // Copy the config so we dont mutate the input object
148
187
  const log = deps.logger ?? createLogger('node');
188
+ const packageVersion = getPackageVersion() ?? '';
189
+ const telemetry = deps.telemetry ?? getTelemetryClient();
149
190
  const dateProvider = deps.dateProvider ?? new DateProvider();
150
- const blobSinkClient = deps.blobSinkClient ?? createBlobSinkClient(config);
151
191
  const ethereumChain = createEthereumChain(config.l1RpcUrls, config.l1ChainId);
152
- //validate that the actual chain id matches that specified in configuration
192
+
193
+ // Build a key store from file if given or from environment otherwise
194
+ let keyStoreManager: KeystoreManager | undefined;
195
+ const keyStoreProvided = config.keyStoreDirectory !== undefined && config.keyStoreDirectory.length > 0;
196
+ if (keyStoreProvided) {
197
+ const keyStores = loadKeystores(config.keyStoreDirectory!);
198
+ keyStoreManager = new KeystoreManager(mergeKeystores(keyStores));
199
+ } else {
200
+ const keyStore = createKeyStoreForValidator(config);
201
+ if (keyStore) {
202
+ keyStoreManager = new KeystoreManager(keyStore);
203
+ }
204
+ }
205
+
206
+ await keyStoreManager?.validateSigners();
207
+
208
+ // If we are a validator, verify our configuration before doing too much more.
209
+ if (!config.disableValidator) {
210
+ if (keyStoreManager === undefined) {
211
+ throw new Error('Failed to create key store, a requirement for running a validator');
212
+ }
213
+ if (!keyStoreProvided) {
214
+ log.warn(
215
+ 'KEY STORE CREATED FROM ENVIRONMENT, IT IS RECOMMENDED TO USE A FILE-BASED KEY STORE IN PRODUCTION ENVIRONMENTS',
216
+ );
217
+ }
218
+ ValidatorClient.validateKeyStoreConfiguration(keyStoreManager, log);
219
+ }
220
+
221
+ // validate that the actual chain id matches that specified in configuration
153
222
  if (config.l1ChainId !== ethereumChain.chainInfo.id) {
154
223
  throw new Error(
155
224
  `RPC URL configured for chain id ${ethereumChain.chainInfo.id} but expected id ${config.l1ChainId}`,
156
225
  );
157
226
  }
158
227
 
159
- const archiver = await createArchiver(config, blobSinkClient, { blockUntilSync: true }, telemetry);
228
+ const publicClient = createPublicClient({
229
+ chain: ethereumChain.chainInfo,
230
+ transport: fallback(config.l1RpcUrls.map((url: string) => http(url, { batch: false }))),
231
+ pollingInterval: config.viemPollingIntervalMS,
232
+ });
233
+
234
+ const l1ContractsAddresses = await RegistryContract.collectAddresses(
235
+ publicClient,
236
+ config.l1Contracts.registryAddress,
237
+ config.rollupVersion ?? 'canonical',
238
+ );
239
+
240
+ // Overwrite the passed in vars.
241
+ config.l1Contracts = { ...config.l1Contracts, ...l1ContractsAddresses };
242
+
243
+ const rollupContract = new RollupContract(publicClient, config.l1Contracts.rollupAddress.toString());
244
+ const [l1GenesisTime, slotDuration, rollupVersionFromRollup] = await Promise.all([
245
+ rollupContract.getL1GenesisTime(),
246
+ rollupContract.getSlotDuration(),
247
+ rollupContract.getVersion(),
248
+ ] as const);
249
+
250
+ config.rollupVersion ??= Number(rollupVersionFromRollup);
251
+
252
+ if (config.rollupVersion !== Number(rollupVersionFromRollup)) {
253
+ log.warn(
254
+ `Registry looked up and returned a rollup with version (${config.rollupVersion}), but this does not match with version detected from the rollup directly: (${rollupVersionFromRollup}).`,
255
+ );
256
+ }
257
+
258
+ const blobClient = await createBlobClientWithFileStores(config, createLogger('node:blob-client:client'));
259
+
260
+ // attempt snapshot sync if possible
261
+ await trySnapshotSync(config, log);
262
+
263
+ const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
264
+
265
+ const archiver = await createArchiver(
266
+ config,
267
+ { blobClient, epochCache, telemetry, dateProvider },
268
+ { blockUntilSync: !config.skipArchiverInitialSync },
269
+ );
160
270
 
161
271
  // now create the merkle trees and the world state synchronizer
162
272
  const worldStateSynchronizer = await createWorldStateSynchronizer(
@@ -165,12 +275,14 @@ export class AztecNodeService implements AztecNode, Traceable {
165
275
  options.prefilledPublicData,
166
276
  telemetry,
167
277
  );
168
- const proofVerifier = config.realProofs ? await BBCircuitVerifier.new(config) : new TestCircuitVerifier();
278
+ const circuitVerifier =
279
+ config.realProofs || config.debugForceTxProofVerification
280
+ ? await BBCircuitVerifier.new(config)
281
+ : new TestCircuitVerifier(config.proverTestVerificationDelayMs);
169
282
  if (!config.realProofs) {
170
283
  log.warn(`Aztec node is accepting fake proofs`);
171
284
  }
172
-
173
- const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
285
+ const proofVerifier = new QueuedIVCVerifier(config, circuitVerifier);
174
286
 
175
287
  // create the tx pool and the p2p client, which will need the l2 block source
176
288
  const p2pClient = await createP2PClient(
@@ -180,33 +292,187 @@ export class AztecNodeService implements AztecNode, Traceable {
180
292
  proofVerifier,
181
293
  worldStateSynchronizer,
182
294
  epochCache,
295
+ packageVersion,
296
+ dateProvider,
183
297
  telemetry,
298
+ deps.p2pClientDeps,
184
299
  );
185
300
 
186
- const slasherClient = createSlasherClient(config, archiver, telemetry);
301
+ // We should really not be modifying the config object
302
+ config.txPublicSetupAllowList = config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
187
303
 
188
- // start both and wait for them to sync from the block source
189
- await Promise.all([p2pClient.start(), worldStateSynchronizer.start(), slasherClient.start()]);
190
- log.verbose(`All Aztec Node subsystems synced`);
304
+ // Create FullNodeCheckpointsBuilder for validator and non-validator block proposal handling
305
+ const validatorCheckpointsBuilder = new FullNodeCheckpointsBuilder(
306
+ { ...config, l1GenesisTime, slotDuration: Number(slotDuration) },
307
+ worldStateSynchronizer,
308
+ archiver,
309
+ dateProvider,
310
+ telemetry,
311
+ );
191
312
 
192
- const validatorClient = createValidatorClient(config, { p2pClient, telemetry, dateProvider, epochCache });
313
+ // We'll accumulate sentinel watchers here
314
+ const watchers: Watcher[] = [];
193
315
 
194
- // now create the sequencer
195
- const sequencer = config.disableValidator
196
- ? undefined
197
- : await SequencerClient.new(config, {
198
- ...deps,
199
- validatorClient,
200
- p2pClient,
201
- worldStateSynchronizer,
202
- slasherClient,
203
- contractDataSource: archiver,
204
- l2BlockSource: archiver,
205
- l1ToL2MessageSource: archiver,
206
- telemetry,
207
- dateProvider,
208
- blobSinkClient,
209
- });
316
+ // Create validator client if required
317
+ const validatorClient = await createValidatorClient(config, {
318
+ checkpointsBuilder: validatorCheckpointsBuilder,
319
+ worldState: worldStateSynchronizer,
320
+ p2pClient,
321
+ telemetry,
322
+ dateProvider,
323
+ epochCache,
324
+ blockSource: archiver,
325
+ l1ToL2MessageSource: archiver,
326
+ keyStoreManager,
327
+ blobClient,
328
+ });
329
+
330
+ // If we have a validator client, register it as a source of offenses for the slasher,
331
+ // and have it register callbacks on the p2p client *before* we start it, otherwise messages
332
+ // like attestations or auths will fail.
333
+ if (validatorClient) {
334
+ watchers.push(validatorClient);
335
+ if (!options.dontStartSequencer) {
336
+ await validatorClient.registerHandlers();
337
+ }
338
+ }
339
+
340
+ // If there's no validator client but alwaysReexecuteBlockProposals is enabled,
341
+ // create a BlockProposalHandler to reexecute block proposals for monitoring
342
+ if (!validatorClient && config.alwaysReexecuteBlockProposals) {
343
+ log.info('Setting up block proposal reexecution for monitoring');
344
+ createBlockProposalHandler(config, {
345
+ checkpointsBuilder: validatorCheckpointsBuilder,
346
+ worldState: worldStateSynchronizer,
347
+ epochCache,
348
+ blockSource: archiver,
349
+ l1ToL2MessageSource: archiver,
350
+ p2pClient,
351
+ dateProvider,
352
+ telemetry,
353
+ }).registerForReexecution(p2pClient);
354
+ }
355
+
356
+ // Start world state and wait for it to sync to the archiver.
357
+ await worldStateSynchronizer.start();
358
+
359
+ // Start p2p. Note that it depends on world state to be running.
360
+ await p2pClient.start();
361
+
362
+ const validatorsSentinel = await createSentinel(epochCache, archiver, p2pClient, config);
363
+ if (validatorsSentinel && config.slashInactivityPenalty > 0n) {
364
+ watchers.push(validatorsSentinel);
365
+ }
366
+
367
+ let epochPruneWatcher: EpochPruneWatcher | undefined;
368
+ if (config.slashPrunePenalty > 0n || config.slashDataWithholdingPenalty > 0n) {
369
+ epochPruneWatcher = new EpochPruneWatcher(
370
+ archiver,
371
+ archiver,
372
+ epochCache,
373
+ p2pClient.getTxProvider(),
374
+ validatorCheckpointsBuilder,
375
+ config,
376
+ );
377
+ watchers.push(epochPruneWatcher);
378
+ }
379
+
380
+ // We assume we want to slash for invalid attestations unless all max penalties are set to 0
381
+ let attestationsBlockWatcher: AttestationsBlockWatcher | undefined;
382
+ if (config.slashProposeInvalidAttestationsPenalty > 0n || config.slashAttestDescendantOfInvalidPenalty > 0n) {
383
+ attestationsBlockWatcher = new AttestationsBlockWatcher(archiver, epochCache, config);
384
+ watchers.push(attestationsBlockWatcher);
385
+ }
386
+
387
+ // Start p2p-related services once the archiver has completed sync
388
+ void archiver
389
+ .waitForInitialSync()
390
+ .then(async () => {
391
+ await p2pClient.start();
392
+ await validatorsSentinel?.start();
393
+ await epochPruneWatcher?.start();
394
+ await attestationsBlockWatcher?.start();
395
+ log.info(`All p2p services started`);
396
+ })
397
+ .catch(err => log.error('Failed to start p2p services after archiver sync', err));
398
+
399
+ // Validator enabled, create/start relevant service
400
+ let sequencer: SequencerClient | undefined;
401
+ let slasherClient: SlasherClientInterface | undefined;
402
+ if (!config.disableValidator && validatorClient) {
403
+ // We create a slasher only if we have a sequencer, since all slashing actions go through the sequencer publisher
404
+ // as they are executed when the node is selected as proposer.
405
+ const validatorAddresses = keyStoreManager
406
+ ? NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager).getAddresses()
407
+ : [];
408
+
409
+ slasherClient = await createSlasher(
410
+ config,
411
+ config.l1Contracts,
412
+ getPublicClient(config),
413
+ watchers,
414
+ dateProvider,
415
+ epochCache,
416
+ validatorAddresses,
417
+ undefined, // logger
418
+ );
419
+ await slasherClient.start();
420
+
421
+ const l1TxUtils = config.publisherForwarderAddress
422
+ ? await createForwarderL1TxUtilsFromEthSigner(
423
+ publicClient,
424
+ keyStoreManager!.createAllValidatorPublisherSigners(),
425
+ config.publisherForwarderAddress,
426
+ { ...config, scope: 'sequencer' },
427
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
428
+ )
429
+ : await createL1TxUtilsWithBlobsFromEthSigner(
430
+ publicClient,
431
+ keyStoreManager!.createAllValidatorPublisherSigners(),
432
+ { ...config, scope: 'sequencer' },
433
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
434
+ );
435
+
436
+ // Create and start the sequencer client
437
+ const checkpointsBuilder = new CheckpointsBuilder(
438
+ { ...config, l1GenesisTime, slotDuration: Number(slotDuration) },
439
+ worldStateSynchronizer,
440
+ archiver,
441
+ dateProvider,
442
+ telemetry,
443
+ );
444
+
445
+ sequencer = await SequencerClient.new(config, {
446
+ ...deps,
447
+ epochCache,
448
+ l1TxUtils,
449
+ validatorClient,
450
+ p2pClient,
451
+ worldStateSynchronizer,
452
+ slasherClient,
453
+ checkpointsBuilder,
454
+ l2BlockSource: archiver,
455
+ l1ToL2MessageSource: archiver,
456
+ telemetry,
457
+ dateProvider,
458
+ blobClient,
459
+ nodeKeyStore: keyStoreManager!,
460
+ });
461
+ }
462
+
463
+ if (!options.dontStartSequencer && sequencer) {
464
+ await sequencer.start();
465
+ log.verbose(`Sequencer started`);
466
+ } else if (sequencer) {
467
+ log.warn(`Sequencer created but not started`);
468
+ }
469
+
470
+ const globalVariableBuilder = new GlobalVariableBuilder({
471
+ ...config,
472
+ rollupVersion: BigInt(config.rollupVersion),
473
+ l1GenesisTime,
474
+ slotDuration: Number(slotDuration),
475
+ });
210
476
 
211
477
  return new AztecNodeService(
212
478
  config,
@@ -215,15 +481,20 @@ export class AztecNodeService implements AztecNode, Traceable {
215
481
  archiver,
216
482
  archiver,
217
483
  archiver,
218
- archiver,
219
484
  worldStateSynchronizer,
220
485
  sequencer,
486
+ slasherClient,
487
+ validatorsSentinel,
488
+ epochPruneWatcher,
221
489
  ethereumChain.chainInfo.id,
222
- config.version,
223
- new GlobalVariableBuilder(config),
490
+ config.rollupVersion,
491
+ globalVariableBuilder,
492
+ epochCache,
493
+ packageVersion,
224
494
  proofVerifier,
225
495
  telemetry,
226
496
  log,
497
+ blobClient,
227
498
  );
228
499
  }
229
500
 
@@ -259,6 +530,10 @@ export class AztecNodeService implements AztecNode, Traceable {
259
530
  return Promise.resolve(this.p2pClient.getEnr()?.encodeTxt());
260
531
  }
261
532
 
533
+ public async getAllowedPublicSetup(): Promise<AllowedElement[]> {
534
+ return this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
535
+ }
536
+
262
537
  /**
263
538
  * Method to determine if the node is ready to accept transactions.
264
539
  * @returns - Flag indicating the readiness for tx submission.
@@ -268,20 +543,19 @@ export class AztecNodeService implements AztecNode, Traceable {
268
543
  }
269
544
 
270
545
  public async getNodeInfo(): Promise<NodeInfo> {
271
- const [nodeVersion, protocolVersion, chainId, enr, contractAddresses, protocolContractAddresses] =
272
- await Promise.all([
273
- this.getNodeVersion(),
274
- this.getVersion(),
275
- this.getChainId(),
276
- this.getEncodedEnr(),
277
- this.getL1ContractAddresses(),
278
- this.getProtocolContractAddresses(),
279
- ]);
546
+ const [nodeVersion, rollupVersion, chainId, enr, contractAddresses, protocolContractAddresses] = await Promise.all([
547
+ this.getNodeVersion(),
548
+ this.getVersion(),
549
+ this.getChainId(),
550
+ this.getEncodedEnr(),
551
+ this.getL1ContractAddresses(),
552
+ this.getProtocolContractAddresses(),
553
+ ]);
280
554
 
281
555
  const nodeInfo: NodeInfo = {
282
556
  nodeVersion,
283
557
  l1ChainId: chainId,
284
- protocolVersion,
558
+ rollupVersion,
285
559
  enr,
286
560
  l1ContractAddresses: contractAddresses,
287
561
  protocolContractAddresses: protocolContractAddresses,
@@ -291,12 +565,46 @@ export class AztecNodeService implements AztecNode, Traceable {
291
565
  }
292
566
 
293
567
  /**
294
- * Get a block specified by its number.
295
- * @param number - The block number being requested.
568
+ * Get a block specified by its block number, block hash, or 'latest'.
569
+ * @param block - The block parameter (block number, block hash, or 'latest').
570
+ * @returns The requested block.
571
+ */
572
+ public async getBlock(block: BlockParameter): Promise<L2Block | undefined> {
573
+ if (L2BlockHash.isL2BlockHash(block)) {
574
+ return this.getBlockByHash(Fr.fromBuffer(block.toBuffer()));
575
+ }
576
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
577
+ if (blockNumber === BlockNumber.ZERO) {
578
+ return this.buildInitialBlock();
579
+ }
580
+ return await this.blockSource.getL2Block(blockNumber);
581
+ }
582
+
583
+ /**
584
+ * Get a block specified by its hash.
585
+ * @param blockHash - The block hash being requested.
586
+ * @returns The requested block.
587
+ */
588
+ public async getBlockByHash(blockHash: Fr): Promise<L2Block | undefined> {
589
+ const initialBlockHash = await this.#getInitialHeaderHash();
590
+ if (blockHash.equals(Fr.fromBuffer(initialBlockHash.toBuffer()))) {
591
+ return this.buildInitialBlock();
592
+ }
593
+ return await this.blockSource.getL2BlockByHash(blockHash);
594
+ }
595
+
596
+ private buildInitialBlock(): L2Block {
597
+ const initialHeader = this.worldStateSynchronizer.getCommitted().getInitialHeader();
598
+ return L2Block.empty(initialHeader);
599
+ }
600
+
601
+ /**
602
+ * Get a block specified by its archive root.
603
+ * @param archive - The archive root being requested.
296
604
  * @returns The requested block.
297
605
  */
298
- public async getBlock(number: number): Promise<L2Block | undefined> {
299
- return await this.blockSource.getBlock(number);
606
+ public async getBlockByArchive(archive: Fr): Promise<L2Block | undefined> {
607
+ return await this.blockSource.getL2BlockByArchive(archive);
300
608
  }
301
609
 
302
610
  /**
@@ -305,30 +613,50 @@ export class AztecNodeService implements AztecNode, Traceable {
305
613
  * @param limit - The maximum number of blocks to obtain.
306
614
  * @returns The blocks requested.
307
615
  */
308
- public async getBlocks(from: number, limit: number): Promise<L2Block[]> {
309
- return (await this.blockSource.getBlocks(from, limit)) ?? [];
616
+ public async getBlocks(from: BlockNumber, limit: number): Promise<L2Block[]> {
617
+ return (await this.blockSource.getBlocks(from, BlockNumber(limit))) ?? [];
618
+ }
619
+
620
+ public async getCheckpoints(from: CheckpointNumber, limit: number): Promise<PublishedCheckpoint[]> {
621
+ return (await this.blockSource.getCheckpoints(from, limit)) ?? [];
622
+ }
623
+
624
+ public async getCheckpointedBlocks(from: BlockNumber, limit: number) {
625
+ return (await this.blockSource.getCheckpointedBlocks(from, limit)) ?? [];
310
626
  }
311
627
 
312
628
  /**
313
- * Method to fetch the current base fees.
314
- * @returns The current base fees.
629
+ * Method to fetch the current min L2 fees.
630
+ * @returns The current min L2 fees.
315
631
  */
316
- public async getCurrentBaseFees(): Promise<GasFees> {
317
- return await this.globalVariableBuilder.getCurrentBaseFees();
632
+ public async getCurrentMinFees(): Promise<GasFees> {
633
+ return await this.globalVariableBuilder.getCurrentMinFees();
634
+ }
635
+
636
+ public async getMaxPriorityFees(): Promise<GasFees> {
637
+ for await (const tx of this.p2pClient.iteratePendingTxs()) {
638
+ return tx.getGasSettings().maxPriorityFeesPerGas;
639
+ }
640
+
641
+ return GasFees.from({ feePerDaGas: 0n, feePerL2Gas: 0n });
318
642
  }
319
643
 
320
644
  /**
321
- * Method to fetch the current block number.
645
+ * Method to fetch the latest block number synchronized by the node.
322
646
  * @returns The block number.
323
647
  */
324
- public async getBlockNumber(): Promise<number> {
648
+ public async getBlockNumber(): Promise<BlockNumber> {
325
649
  return await this.blockSource.getBlockNumber();
326
650
  }
327
651
 
328
- public async getProvenBlockNumber(): Promise<number> {
652
+ public async getProvenBlockNumber(): Promise<BlockNumber> {
329
653
  return await this.blockSource.getProvenBlockNumber();
330
654
  }
331
655
 
656
+ public async getCheckpointedBlockNumber(): Promise<BlockNumber> {
657
+ return await this.blockSource.getCheckpointedL2BlockNumber();
658
+ }
659
+
332
660
  /**
333
661
  * Method to fetch the version of the package.
334
662
  * @returns The node package version
@@ -353,49 +681,53 @@ export class AztecNodeService implements AztecNode, Traceable {
353
681
  return Promise.resolve(this.l1ChainId);
354
682
  }
355
683
 
356
- public async getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
357
- const klazz = await this.contractDataSource.getContractClass(id);
358
-
359
- // TODO(#10007): Remove this check. This is needed only because we're manually registering
360
- // some contracts in the archiver so they are available to all nodes (see `registerCommonContracts`
361
- // in `archiver/src/factory.ts`), but we still want clients to send the registration tx in order
362
- // to emit the corresponding nullifier, which is now being checked. Note that this method
363
- // is only called by the PXE to check if a contract is publicly registered.
364
- if (klazz) {
365
- const classNullifier = await siloNullifier(AztecAddress.fromNumber(REGISTERER_CONTRACT_ADDRESS), id);
366
- const worldState = await this.#getWorldState('latest');
367
- const [index] = await worldState.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [classNullifier.toBuffer()]);
368
- this.log.debug(`Registration nullifier ${classNullifier} for contract class ${id} found at index ${index}`);
369
- if (index === undefined) {
370
- return undefined;
371
- }
372
- }
373
-
374
- return klazz;
684
+ public getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
685
+ return this.contractDataSource.getContractClass(id);
375
686
  }
376
687
 
377
688
  public getContract(address: AztecAddress): Promise<ContractInstanceWithAddress | undefined> {
378
689
  return this.contractDataSource.getContract(address);
379
690
  }
380
691
 
381
- /**
382
- * Retrieves all private logs from up to `limit` blocks, starting from the block number `from`.
383
- * @param from - The block number from which to begin retrieving logs.
384
- * @param limit - The maximum number of blocks to retrieve logs from.
385
- * @returns An array of private logs from the specified range of blocks.
386
- */
387
- public getPrivateLogs(from: number, limit: number): Promise<PrivateLog[]> {
388
- return this.logsSource.getPrivateLogs(from, limit);
692
+ public async getPrivateLogsByTags(
693
+ tags: SiloedTag[],
694
+ page?: number,
695
+ referenceBlock?: L2BlockHash,
696
+ ): Promise<TxScopedL2Log[][]> {
697
+ if (referenceBlock) {
698
+ const initialBlockHash = await this.#getInitialHeaderHash();
699
+ if (!referenceBlock.equals(initialBlockHash)) {
700
+ const blockHashFr = Fr.fromBuffer(referenceBlock.toBuffer());
701
+ const header = await this.blockSource.getBlockHeaderByHash(blockHashFr);
702
+ if (!header) {
703
+ throw new Error(
704
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
705
+ );
706
+ }
707
+ }
708
+ }
709
+ return this.logsSource.getPrivateLogsByTags(tags, page);
389
710
  }
390
711
 
391
- /**
392
- * Gets all logs that match any of the received tags (i.e. logs with their first field equal to a tag).
393
- * @param tags - The tags to filter the logs by.
394
- * @returns For each received tag, an array of matching logs is returned. An empty array implies no logs match
395
- * that tag.
396
- */
397
- public getLogsByTags(tags: Fr[]): Promise<TxScopedL2Log[][]> {
398
- return this.logsSource.getLogsByTags(tags);
712
+ public async getPublicLogsByTagsFromContract(
713
+ contractAddress: AztecAddress,
714
+ tags: Tag[],
715
+ page?: number,
716
+ referenceBlock?: L2BlockHash,
717
+ ): Promise<TxScopedL2Log[][]> {
718
+ if (referenceBlock) {
719
+ const initialBlockHash = await this.#getInitialHeaderHash();
720
+ if (!referenceBlock.equals(initialBlockHash)) {
721
+ const blockHashFr = Fr.fromBuffer(referenceBlock.toBuffer());
722
+ const header = await this.blockSource.getBlockHeaderByHash(blockHashFr);
723
+ if (!header) {
724
+ throw new Error(
725
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
726
+ );
727
+ }
728
+ }
729
+ }
730
+ return this.logsSource.getPublicLogsByTagsFromContract(contractAddress, tags, page);
399
731
  }
400
732
 
401
733
  /**
@@ -421,17 +753,19 @@ export class AztecNodeService implements AztecNode, Traceable {
421
753
  * @param tx - The transaction to be submitted.
422
754
  */
423
755
  public async sendTx(tx: Tx) {
756
+ await this.#sendTx(tx);
757
+ }
758
+
759
+ async #sendTx(tx: Tx) {
424
760
  const timer = new Timer();
425
- const txHash = (await tx.getTxHash()).toString();
761
+ const txHash = tx.getTxHash().toString();
426
762
 
427
763
  const valid = await this.isValidTx(tx);
428
764
  if (valid.result !== 'valid') {
429
765
  const reason = valid.reason.join(', ');
430
766
  this.metrics.receivedTx(timer.ms(), false);
431
- this.log.warn(`Invalid tx ${txHash}: ${reason}`, { txHash });
432
- // TODO(#10967): Throw when receiving an invalid tx instead of just returning
433
- // throw new Error(`Invalid tx: ${reason}`);
434
- return;
767
+ this.log.warn(`Received invalid tx ${txHash}: ${reason}`, { txHash });
768
+ throw new Error(`Invalid tx: ${reason}`);
435
769
  }
436
770
 
437
771
  await this.p2pClient!.sendTx(tx);
@@ -440,24 +774,29 @@ export class AztecNodeService implements AztecNode, Traceable {
440
774
  }
441
775
 
442
776
  public async getTxReceipt(txHash: TxHash): Promise<TxReceipt> {
443
- let txReceipt = new TxReceipt(txHash, TxStatus.DROPPED, 'Tx dropped by P2P node.');
444
-
445
- // We first check if the tx is in pending (instead of first checking if it is mined) because if we first check
446
- // for mined and then for pending there could be a race condition where the tx is mined between the two checks
447
- // and we would incorrectly return a TxReceipt with status DROPPED
448
- if ((await this.p2pClient.getTxStatus(txHash)) === 'pending') {
449
- txReceipt = new TxReceipt(txHash, TxStatus.PENDING, '');
450
- }
777
+ // Check the tx pool status first. If the tx is known to the pool (pending or mined), we'll use that
778
+ // as a fallback if we don't find a settled receipt in the archiver.
779
+ const txPoolStatus = await this.p2pClient.getTxStatus(txHash);
780
+ const isKnownToPool = txPoolStatus === 'pending' || txPoolStatus === 'mined';
451
781
 
782
+ // Then get the actual tx from the archiver, which tracks every tx in a mined block.
452
783
  const settledTxReceipt = await this.blockSource.getSettledTxReceipt(txHash);
784
+
453
785
  if (settledTxReceipt) {
454
- txReceipt = settledTxReceipt;
786
+ // If the archiver has the receipt then return it.
787
+ return settledTxReceipt;
788
+ } else if (isKnownToPool) {
789
+ // If the tx is in the pool but not in the archiver, it's pending.
790
+ // This handles race conditions between archiver and p2p, where the archiver
791
+ // has pruned the block in which a tx was mined, but p2p has not caught up yet.
792
+ return new TxReceipt(txHash, TxStatus.PENDING, undefined, undefined);
793
+ } else {
794
+ // Otherwise, if we don't know the tx, we consider it dropped.
795
+ return new TxReceipt(txHash, TxStatus.DROPPED, undefined, 'Tx dropped by P2P node');
455
796
  }
456
-
457
- return txReceipt;
458
797
  }
459
798
 
460
- public getTxEffect(txHash: TxHash): Promise<InBlock<TxEffect> | undefined> {
799
+ public getTxEffect(txHash: TxHash): Promise<IndexedTxEffect | undefined> {
461
800
  return this.blockSource.getTxEffect(txHash);
462
801
  }
463
802
 
@@ -465,139 +804,183 @@ export class AztecNodeService implements AztecNode, Traceable {
465
804
  * Method to stop the aztec node.
466
805
  */
467
806
  public async stop() {
468
- this.log.info(`Stopping`);
469
- await this.sequencer?.stop();
470
- await this.p2pClient.stop();
471
- await this.worldStateSynchronizer.stop();
807
+ this.log.info(`Stopping Aztec Node`);
808
+ await tryStop(this.validatorsSentinel);
809
+ await tryStop(this.epochPruneWatcher);
810
+ await tryStop(this.slasherClient);
811
+ await tryStop(this.proofVerifier);
812
+ await tryStop(this.sequencer);
813
+ await tryStop(this.p2pClient);
814
+ await tryStop(this.worldStateSynchronizer);
472
815
  await tryStop(this.blockSource);
473
- await this.telemetry.stop();
474
- this.log.info(`Stopped`);
816
+ await tryStop(this.blobClient);
817
+ await tryStop(this.telemetry);
818
+ this.log.info(`Stopped Aztec Node`);
819
+ }
820
+
821
+ /**
822
+ * Returns the blob client used by this node.
823
+ * @internal - Exposed for testing purposes only.
824
+ */
825
+ public getBlobClient(): BlobClientInterface | undefined {
826
+ return this.blobClient;
475
827
  }
476
828
 
477
829
  /**
478
830
  * Method to retrieve pending txs.
831
+ * @param limit - The number of items to returns
832
+ * @param after - The last known pending tx. Used for pagination
479
833
  * @returns - The pending txs.
480
834
  */
481
- public getPendingTxs() {
482
- return this.p2pClient!.getPendingTxs();
835
+ public getPendingTxs(limit?: number, after?: TxHash): Promise<Tx[]> {
836
+ return this.p2pClient!.getPendingTxs(limit, after);
483
837
  }
484
838
 
485
- public async getPendingTxCount() {
486
- const pendingTxs = await this.getPendingTxs();
487
- return pendingTxs.length;
839
+ public getPendingTxCount(): Promise<number> {
840
+ return this.p2pClient!.getPendingTxCount();
488
841
  }
489
842
 
490
843
  /**
491
- * Method to retrieve a single tx from the mempool or unfinalised chain.
844
+ * Method to retrieve a single tx from the mempool or unfinalized chain.
492
845
  * @param txHash - The transaction hash to return.
493
846
  * @returns - The tx if it exists.
494
847
  */
495
- public getTxByHash(txHash: TxHash) {
848
+ public getTxByHash(txHash: TxHash): Promise<Tx | undefined> {
496
849
  return Promise.resolve(this.p2pClient!.getTxByHashFromPool(txHash));
497
850
  }
498
851
 
499
852
  /**
500
- * Method to retrieve txs from the mempool or unfinalised chain.
853
+ * Method to retrieve txs from the mempool or unfinalized chain.
501
854
  * @param txHash - The transaction hash to return.
502
855
  * @returns - The txs if it exists.
503
856
  */
504
- public async getTxsByHash(txHashes: TxHash[]) {
857
+ public async getTxsByHash(txHashes: TxHash[]): Promise<Tx[]> {
505
858
  return compactArray(await Promise.all(txHashes.map(txHash => this.getTxByHash(txHash))));
506
859
  }
507
860
 
508
- /**
509
- * Find the indexes of the given leaves in the given tree.
510
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
511
- * @param treeId - The tree to search in.
512
- * @param leafValue - The values to search for
513
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
514
- */
515
861
  public async findLeavesIndexes(
516
- blockNumber: L2BlockNumber,
862
+ block: BlockParameter,
517
863
  treeId: MerkleTreeId,
518
864
  leafValues: Fr[],
519
- ): Promise<(bigint | undefined)[]> {
520
- const committedDb = await this.#getWorldState(blockNumber);
521
- return await committedDb.findLeafIndices(
865
+ ): Promise<(DataInBlock<bigint> | undefined)[]> {
866
+ const committedDb = await this.#getWorldState(block);
867
+ const maybeIndices = await committedDb.findLeafIndices(
522
868
  treeId,
523
869
  leafValues.map(x => x.toBuffer()),
524
870
  );
525
- }
871
+ // We filter out undefined values
872
+ const indices = maybeIndices.filter(x => x !== undefined) as bigint[];
526
873
 
527
- /**
528
- * Find the block numbers of the given leaf indices in the given tree.
529
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
530
- * @param treeId - The tree to search in.
531
- * @param leafIndices - The values to search for
532
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
533
- */
534
- public async findBlockNumbersForIndexes(
535
- blockNumber: L2BlockNumber,
536
- treeId: MerkleTreeId,
537
- leafIndices: bigint[],
538
- ): Promise<(bigint | undefined)[]> {
539
- const committedDb = await this.#getWorldState(blockNumber);
540
- return await committedDb.getBlockNumbersForLeafIndices(treeId, leafIndices);
541
- }
874
+ // Now we find the block numbers for the indices
875
+ const blockNumbers = await committedDb.getBlockNumbersForLeafIndices(treeId, indices);
542
876
 
543
- public async findNullifiersIndexesWithBlock(
544
- blockNumber: L2BlockNumber,
545
- nullifiers: Fr[],
546
- ): Promise<(InBlock<bigint> | undefined)[]> {
547
- if (blockNumber === 'latest') {
548
- blockNumber = await this.getBlockNumber();
877
+ // If any of the block numbers are undefined, we throw an error.
878
+ for (let i = 0; i < indices.length; i++) {
879
+ if (blockNumbers[i] === undefined) {
880
+ throw new Error(`Block number is undefined for leaf index ${indices[i]} in tree ${MerkleTreeId[treeId]}`);
881
+ }
549
882
  }
550
- return this.nullifierSource.findNullifiersIndexesWithBlock(blockNumber, nullifiers);
883
+
884
+ // Get unique block numbers in order to optimize num calls to getLeafValue function.
885
+ const uniqueBlockNumbers = [...new Set(blockNumbers.filter(x => x !== undefined))];
886
+
887
+ // Now we obtain the block hashes from the archive tree by calling await `committedDb.getLeafValue(treeId, index)`
888
+ // (note that block number corresponds to the leaf index in the archive tree).
889
+ const blockHashes = await Promise.all(
890
+ uniqueBlockNumbers.map(blockNumber => {
891
+ return committedDb.getLeafValue(MerkleTreeId.ARCHIVE, BigInt(blockNumber));
892
+ }),
893
+ );
894
+
895
+ // If any of the block hashes are undefined, we throw an error.
896
+ for (let i = 0; i < uniqueBlockNumbers.length; i++) {
897
+ if (blockHashes[i] === undefined) {
898
+ throw new Error(`Block hash is undefined for block number ${uniqueBlockNumbers[i]}`);
899
+ }
900
+ }
901
+
902
+ // Create DataInBlock objects by combining indices, blockNumbers and blockHashes and return them.
903
+ return maybeIndices.map((index, i) => {
904
+ if (index === undefined) {
905
+ return undefined;
906
+ }
907
+ const blockNumber = blockNumbers[i];
908
+ if (blockNumber === undefined) {
909
+ return undefined;
910
+ }
911
+ const blockHashIndex = uniqueBlockNumbers.indexOf(blockNumber);
912
+ const blockHash = blockHashes[blockHashIndex];
913
+ if (!blockHash) {
914
+ return undefined;
915
+ }
916
+ return {
917
+ l2BlockNumber: BlockNumber(Number(blockNumber)),
918
+ l2BlockHash: L2BlockHash.fromField(blockHash),
919
+ data: index,
920
+ };
921
+ });
551
922
  }
552
923
 
553
- /**
554
- * Returns a sibling path for the given index in the nullifier tree.
555
- * @param blockNumber - The block number at which to get the data.
556
- * @param leafIndex - The index of the leaf for which the sibling path is required.
557
- * @returns The sibling path for the leaf index.
558
- */
559
924
  public async getNullifierSiblingPath(
560
- blockNumber: L2BlockNumber,
925
+ block: BlockParameter,
561
926
  leafIndex: bigint,
562
927
  ): Promise<SiblingPath<typeof NULLIFIER_TREE_HEIGHT>> {
563
- const committedDb = await this.#getWorldState(blockNumber);
928
+ const committedDb = await this.#getWorldState(block);
564
929
  return committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, leafIndex);
565
930
  }
566
931
 
567
- /**
568
- * Returns a sibling path for the given index in the data tree.
569
- * @param blockNumber - The block number at which to get the data.
570
- * @param leafIndex - The index of the leaf for which the sibling path is required.
571
- * @returns The sibling path for the leaf index.
572
- */
573
932
  public async getNoteHashSiblingPath(
574
- blockNumber: L2BlockNumber,
933
+ block: BlockParameter,
575
934
  leafIndex: bigint,
576
935
  ): Promise<SiblingPath<typeof NOTE_HASH_TREE_HEIGHT>> {
577
- const committedDb = await this.#getWorldState(blockNumber);
936
+ const committedDb = await this.#getWorldState(block);
578
937
  return committedDb.getSiblingPath(MerkleTreeId.NOTE_HASH_TREE, leafIndex);
579
938
  }
580
939
 
581
- /**
582
- * Returns the index and a sibling path for a leaf in the committed l1 to l2 data tree.
583
- * @param blockNumber - The block number at which to get the data.
584
- * @param l1ToL2Message - The l1ToL2Message to get the index / sibling path for.
585
- * @returns A tuple of the index and the sibling path of the L1ToL2Message (undefined if not found).
586
- */
940
+ public async getArchiveMembershipWitness(
941
+ block: BlockParameter,
942
+ archive: Fr,
943
+ ): Promise<MembershipWitness<typeof ARCHIVE_HEIGHT> | undefined> {
944
+ const committedDb = await this.#getWorldState(block);
945
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.ARCHIVE>(MerkleTreeId.ARCHIVE, [archive]);
946
+ return pathAndIndex === undefined
947
+ ? undefined
948
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
949
+ }
950
+
951
+ public async getNoteHashMembershipWitness(
952
+ block: BlockParameter,
953
+ noteHash: Fr,
954
+ ): Promise<MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT> | undefined> {
955
+ const committedDb = await this.#getWorldState(block);
956
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.NOTE_HASH_TREE>(
957
+ MerkleTreeId.NOTE_HASH_TREE,
958
+ [noteHash],
959
+ );
960
+ return pathAndIndex === undefined
961
+ ? undefined
962
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
963
+ }
964
+
587
965
  public async getL1ToL2MessageMembershipWitness(
588
- blockNumber: L2BlockNumber,
966
+ block: BlockParameter,
589
967
  l1ToL2Message: Fr,
590
968
  ): Promise<[bigint, SiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>] | undefined> {
591
- const index = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
592
- if (index === undefined) {
969
+ const db = await this.#getWorldState(block);
970
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, [l1ToL2Message]);
971
+ if (!witness) {
593
972
  return undefined;
594
973
  }
595
- const committedDb = await this.#getWorldState(blockNumber);
596
- const siblingPath = await committedDb.getSiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>(
597
- MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
598
- index,
599
- );
600
- return [index, siblingPath];
974
+
975
+ // REFACTOR: Return a MembershipWitness object
976
+ return [witness.index, witness.path];
977
+ }
978
+
979
+ public async getL1ToL2MessageBlock(l1ToL2Message: Fr): Promise<BlockNumber | undefined> {
980
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
981
+ return messageIndex
982
+ ? BlockNumber.fromCheckpointNumber(InboxLeaf.checkpointNumberFromIndex(messageIndex))
983
+ : undefined;
601
984
  }
602
985
 
603
986
  /**
@@ -606,153 +989,74 @@ export class AztecNodeService implements AztecNode, Traceable {
606
989
  * @returns Whether the message is synced and ready to be included in a block.
607
990
  */
608
991
  public async isL1ToL2MessageSynced(l1ToL2Message: Fr): Promise<boolean> {
609
- return (await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message)) !== undefined;
992
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
993
+ return messageIndex !== undefined;
610
994
  }
611
995
 
612
996
  /**
613
- * Returns the index of a l2ToL1Message in a ephemeral l2 to l1 data tree as well as its sibling path.
614
- * @remarks This tree is considered ephemeral because it is created on-demand by: taking all the l2ToL1 messages
615
- * in a single block, and then using them to make a variable depth append-only tree with these messages as leaves.
616
- * The tree is discarded immediately after calculating what we need from it.
617
- * TODO: Handle the case where two messages in the same tx have the same hash.
618
- * @param blockNumber - The block number at which to get the data.
619
- * @param l2ToL1Message - The l2ToL1Message get the index / sibling path for.
620
- * @returns A tuple of the index and the sibling path of the L2ToL1Message.
997
+ * Returns all the L2 to L1 messages in an epoch.
998
+ * @param epoch - The epoch at which to get the data.
999
+ * @returns The L2 to L1 messages (empty array if the epoch is not found).
621
1000
  */
622
- public async getL2ToL1MessageMembershipWitness(
623
- blockNumber: L2BlockNumber,
624
- l2ToL1Message: Fr,
625
- ): Promise<[bigint, SiblingPath<number>]> {
626
- const block = await this.blockSource.getBlock(blockNumber === 'latest' ? await this.getBlockNumber() : blockNumber);
627
-
628
- if (block === undefined) {
629
- throw new Error('Block is not defined');
630
- }
631
-
632
- const l2ToL1Messages = block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs);
633
-
634
- // Find index of message
635
- let indexOfMsgInSubtree = -1;
636
- const indexOfMsgTx = l2ToL1Messages.findIndex(msgs => {
637
- const idx = msgs.findIndex(msg => msg.equals(l2ToL1Message));
638
- indexOfMsgInSubtree = Math.max(indexOfMsgInSubtree, idx);
639
- return idx !== -1;
640
- });
641
-
642
- if (indexOfMsgTx === -1) {
643
- throw new Error('The L2ToL1Message you are trying to prove inclusion of does not exist');
1001
+ public async getL2ToL1Messages(epoch: EpochNumber): Promise<Fr[][][][]> {
1002
+ // Assumes `getCheckpointedBlocksForEpoch` returns blocks in ascending order of block number.
1003
+ const checkpointedBlocks = await this.blockSource.getCheckpointedBlocksForEpoch(epoch);
1004
+ const blocksInCheckpoints: L2Block[][] = [];
1005
+ let previousSlotNumber = SlotNumber.ZERO;
1006
+ let checkpointIndex = -1;
1007
+ for (const checkpointedBlock of checkpointedBlocks) {
1008
+ const block = checkpointedBlock.block;
1009
+ const slotNumber = block.header.globalVariables.slotNumber;
1010
+ if (slotNumber !== previousSlotNumber) {
1011
+ checkpointIndex++;
1012
+ blocksInCheckpoints.push([]);
1013
+ previousSlotNumber = slotNumber;
1014
+ }
1015
+ blocksInCheckpoints[checkpointIndex].push(block);
644
1016
  }
645
-
646
- const tempStores: AztecKVStore[] = [];
647
-
648
- // Construct message subtrees
649
- const l2toL1Subtrees = await Promise.all(
650
- l2ToL1Messages.map(async (msgs, i) => {
651
- const store = openTmpStore(true);
652
- tempStores.push(store);
653
- const treeHeight = msgs.length <= 1 ? 1 : Math.ceil(Math.log2(msgs.length));
654
- const tree = new StandardTree(store, new SHA256Trunc(), `temp_msgs_subtrees_${i}`, treeHeight, 0n, Fr);
655
- await tree.appendLeaves(msgs);
656
- return tree;
657
- }),
1017
+ return blocksInCheckpoints.map(blocks =>
1018
+ blocks.map(block => block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs)),
658
1019
  );
659
-
660
- // path of the input msg from leaf -> first out hash calculated in base rolllup
661
- const subtreePathOfL2ToL1Message = await l2toL1Subtrees[indexOfMsgTx].getSiblingPath(
662
- BigInt(indexOfMsgInSubtree),
663
- true,
664
- );
665
-
666
- const numTxs = block.body.txEffects.length;
667
- if (numTxs === 1) {
668
- return [BigInt(indexOfMsgInSubtree), subtreePathOfL2ToL1Message];
669
- }
670
-
671
- const l2toL1SubtreeRoots = l2toL1Subtrees.map(t => Fr.fromBuffer(t.getRoot(true)));
672
- const maxTreeHeight = Math.ceil(Math.log2(l2toL1SubtreeRoots.length));
673
- // The root of this tree is the out_hash calculated in Noir => we truncate to match Noir's SHA
674
- const outHashTree = new UnbalancedTree(new SHA256Trunc(), 'temp_outhash_sibling_path', maxTreeHeight, Fr);
675
- await outHashTree.appendLeaves(l2toL1SubtreeRoots);
676
-
677
- const pathOfTxInOutHashTree = await outHashTree.getSiblingPath(l2toL1SubtreeRoots[indexOfMsgTx].toBigInt());
678
- // Append subtree path to out hash tree path
679
- const mergedPath = subtreePathOfL2ToL1Message.toBufferArray().concat(pathOfTxInOutHashTree.toBufferArray());
680
- // Append binary index of subtree path to binary index of out hash tree path
681
- const mergedIndex = parseInt(
682
- indexOfMsgTx
683
- .toString(2)
684
- .concat(indexOfMsgInSubtree.toString(2).padStart(l2toL1Subtrees[indexOfMsgTx].getDepth(), '0')),
685
- 2,
686
- );
687
-
688
- // clear the tmp stores
689
- await Promise.all(tempStores.map(store => store.delete()));
690
-
691
- return [BigInt(mergedIndex), new SiblingPath(mergedPath.length, mergedPath)];
692
1020
  }
693
1021
 
694
- /**
695
- * Returns a sibling path for a leaf in the committed blocks tree.
696
- * @param blockNumber - The block number at which to get the data.
697
- * @param leafIndex - Index of the leaf in the tree.
698
- * @returns The sibling path.
699
- */
700
1022
  public async getArchiveSiblingPath(
701
- blockNumber: L2BlockNumber,
1023
+ block: BlockParameter,
702
1024
  leafIndex: bigint,
703
1025
  ): Promise<SiblingPath<typeof ARCHIVE_HEIGHT>> {
704
- const committedDb = await this.#getWorldState(blockNumber);
1026
+ const committedDb = await this.#getWorldState(block);
705
1027
  return committedDb.getSiblingPath(MerkleTreeId.ARCHIVE, leafIndex);
706
1028
  }
707
1029
 
708
- /**
709
- * Returns a sibling path for a leaf in the committed public data tree.
710
- * @param blockNumber - The block number at which to get the data.
711
- * @param leafIndex - Index of the leaf in the tree.
712
- * @returns The sibling path.
713
- */
714
1030
  public async getPublicDataSiblingPath(
715
- blockNumber: L2BlockNumber,
1031
+ block: BlockParameter,
716
1032
  leafIndex: bigint,
717
1033
  ): Promise<SiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>> {
718
- const committedDb = await this.#getWorldState(blockNumber);
1034
+ const committedDb = await this.#getWorldState(block);
719
1035
  return committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, leafIndex);
720
1036
  }
721
1037
 
722
- /**
723
- * Returns a nullifier membership witness for a given nullifier at a given block.
724
- * @param blockNumber - The block number at which to get the index.
725
- * @param nullifier - Nullifier we try to find witness for.
726
- * @returns The nullifier membership witness (if found).
727
- */
728
1038
  public async getNullifierMembershipWitness(
729
- blockNumber: L2BlockNumber,
1039
+ block: BlockParameter,
730
1040
  nullifier: Fr,
731
1041
  ): Promise<NullifierMembershipWitness | undefined> {
732
- const db = await this.#getWorldState(blockNumber);
733
- const index = (await db.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]))[0];
734
- if (!index) {
1042
+ const db = await this.#getWorldState(block);
1043
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]);
1044
+ if (!witness) {
735
1045
  return undefined;
736
1046
  }
737
1047
 
738
- const leafPreimagePromise = db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
739
- const siblingPathPromise = db.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
740
- MerkleTreeId.NULLIFIER_TREE,
741
- BigInt(index),
742
- );
743
-
744
- const [leafPreimage, siblingPath] = await Promise.all([leafPreimagePromise, siblingPathPromise]);
745
-
1048
+ const { index, path } = witness;
1049
+ const leafPreimage = await db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
746
1050
  if (!leafPreimage) {
747
1051
  return undefined;
748
1052
  }
749
1053
 
750
- return new NullifierMembershipWitness(BigInt(index), leafPreimage as NullifierLeafPreimage, siblingPath);
1054
+ return new NullifierMembershipWitness(index, leafPreimage as NullifierLeafPreimage, path);
751
1055
  }
752
1056
 
753
1057
  /**
754
1058
  * Returns a low nullifier membership witness for a given nullifier at a given block.
755
- * @param blockNumber - The block number at which to get the index.
1059
+ * @param block - The block parameter (block number, block hash, or 'latest') at which to get the data.
756
1060
  * @param nullifier - Nullifier we try to find the low nullifier witness for.
757
1061
  * @returns The low nullifier membership witness (if found).
758
1062
  * @remarks Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked
@@ -765,10 +1069,10 @@ export class AztecNodeService implements AztecNode, Traceable {
765
1069
  * TODO: This is a confusing behavior and we should eventually address that.
766
1070
  */
767
1071
  public async getLowNullifierMembershipWitness(
768
- blockNumber: L2BlockNumber,
1072
+ block: BlockParameter,
769
1073
  nullifier: Fr,
770
1074
  ): Promise<NullifierMembershipWitness | undefined> {
771
- const committedDb = await this.#getWorldState(blockNumber);
1075
+ const committedDb = await this.#getWorldState(block);
772
1076
  const findResult = await committedDb.getPreviousValueIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBigInt());
773
1077
  if (!findResult) {
774
1078
  return undefined;
@@ -779,15 +1083,12 @@ export class AztecNodeService implements AztecNode, Traceable {
779
1083
  }
780
1084
  const preimageData = (await committedDb.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index))!;
781
1085
 
782
- const siblingPath = await committedDb.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
783
- MerkleTreeId.NULLIFIER_TREE,
784
- BigInt(index),
785
- );
1086
+ const siblingPath = await committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, BigInt(index));
786
1087
  return new NullifierMembershipWitness(BigInt(index), preimageData as NullifierLeafPreimage, siblingPath);
787
1088
  }
788
1089
 
789
- async getPublicDataTreeWitness(blockNumber: L2BlockNumber, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
790
- const committedDb = await this.#getWorldState(blockNumber);
1090
+ async getPublicDataWitness(block: BlockParameter, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
1091
+ const committedDb = await this.#getWorldState(block);
791
1092
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
792
1093
  if (!lowLeafResult) {
793
1094
  return undefined;
@@ -796,27 +1097,13 @@ export class AztecNodeService implements AztecNode, Traceable {
796
1097
  MerkleTreeId.PUBLIC_DATA_TREE,
797
1098
  lowLeafResult.index,
798
1099
  )) as PublicDataTreeLeafPreimage;
799
- const path = await committedDb.getSiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>(
800
- MerkleTreeId.PUBLIC_DATA_TREE,
801
- lowLeafResult.index,
802
- );
1100
+ const path = await committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, lowLeafResult.index);
803
1101
  return new PublicDataWitness(lowLeafResult.index, preimage, path);
804
1102
  }
805
1103
  }
806
1104
 
807
- /**
808
- * Gets the storage value at the given contract storage slot.
809
- *
810
- * @remarks The storage slot here refers to the slot as it is defined in Noir not the index in the merkle tree.
811
- * Aztec's version of `eth_getStorageAt`.
812
- *
813
- * @param contract - Address of the contract to query.
814
- * @param slot - Slot to query.
815
- * @param blockNumber - The block number at which to get the data or 'latest'.
816
- * @returns Storage value at the given contract slot.
817
- */
818
- public async getPublicStorageAt(blockNumber: L2BlockNumber, contract: AztecAddress, slot: Fr): Promise<Fr> {
819
- const committedDb = await this.#getWorldState(blockNumber);
1105
+ public async getPublicStorageAt(block: BlockParameter, contract: AztecAddress, slot: Fr): Promise<Fr> {
1106
+ const committedDb = await this.#getWorldState(block);
820
1107
  const leafSlot = await computePublicDataTreeLeafSlot(contract, slot);
821
1108
 
822
1109
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
@@ -827,36 +1114,68 @@ export class AztecNodeService implements AztecNode, Traceable {
827
1114
  MerkleTreeId.PUBLIC_DATA_TREE,
828
1115
  lowLeafResult.index,
829
1116
  )) as PublicDataTreeLeafPreimage;
830
- return preimage.value;
1117
+ return preimage.leaf.value;
1118
+ }
1119
+
1120
+ public async getBlockHeader(block: BlockParameter = 'latest'): Promise<BlockHeader | undefined> {
1121
+ if (L2BlockHash.isL2BlockHash(block)) {
1122
+ const initialBlockHash = await this.#getInitialHeaderHash();
1123
+ if (block.equals(initialBlockHash)) {
1124
+ // Block source doesn't handle initial header so we need to handle the case separately.
1125
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1126
+ }
1127
+ const blockHashFr = Fr.fromBuffer(block.toBuffer());
1128
+ return this.blockSource.getBlockHeaderByHash(blockHashFr);
1129
+ } else {
1130
+ // Block source doesn't handle initial header so we need to handle the case separately.
1131
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
1132
+ if (blockNumber === BlockNumber.ZERO) {
1133
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1134
+ }
1135
+ return this.blockSource.getBlockHeader(block);
1136
+ }
831
1137
  }
832
1138
 
833
1139
  /**
834
- * Returns the currently committed block header, or the initial header if no blocks have been produced.
835
- * @returns The current committed block header.
1140
+ * Get a block header specified by its archive root.
1141
+ * @param archive - The archive root being requested.
1142
+ * @returns The requested block header.
836
1143
  */
837
- public async getBlockHeader(blockNumber: L2BlockNumber = 'latest'): Promise<BlockHeader | undefined> {
838
- return blockNumber === 0 || (blockNumber === 'latest' && (await this.blockSource.getBlockNumber()) === 0)
839
- ? this.worldStateSynchronizer.getCommitted().getInitialHeader()
840
- : this.blockSource.getBlockHeader(blockNumber);
1144
+ public async getBlockHeaderByArchive(archive: Fr): Promise<BlockHeader | undefined> {
1145
+ return await this.blockSource.getBlockHeaderByArchive(archive);
841
1146
  }
842
1147
 
843
1148
  /**
844
1149
  * Simulates the public part of a transaction with the current state.
845
1150
  * @param tx - The transaction to simulate.
846
1151
  **/
847
- @trackSpan('AztecNodeService.simulatePublicCalls', async (tx: Tx) => ({
848
- [Attributes.TX_HASH]: (await tx.getTxHash()).toString(),
1152
+ @trackSpan('AztecNodeService.simulatePublicCalls', (tx: Tx) => ({
1153
+ [Attributes.TX_HASH]: tx.getTxHash().toString(),
849
1154
  }))
850
1155
  public async simulatePublicCalls(tx: Tx, skipFeeEnforcement = false): Promise<PublicSimulationOutput> {
851
- const txHash = await tx.getTxHash();
852
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
1156
+ // Check total gas limit for simulation
1157
+ const gasSettings = tx.data.constants.txContext.gasSettings;
1158
+ const txGasLimit = gasSettings.gasLimits.l2Gas;
1159
+ const teardownGasLimit = gasSettings.teardownGasLimits.l2Gas;
1160
+ if (txGasLimit + teardownGasLimit > this.config.rpcSimulatePublicMaxGasLimit) {
1161
+ throw new BadRequestError(
1162
+ `Transaction total gas limit ${
1163
+ txGasLimit + teardownGasLimit
1164
+ } (${txGasLimit} + ${teardownGasLimit}) exceeds maximum gas limit ${
1165
+ this.config.rpcSimulatePublicMaxGasLimit
1166
+ } for simulation`,
1167
+ );
1168
+ }
1169
+
1170
+ const txHash = tx.getTxHash();
1171
+ const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1);
853
1172
 
854
1173
  // If sequencer is not initialized, we just set these values to zero for simulation.
855
- const coinbase = this.sequencer?.coinbase || EthAddress.ZERO;
856
- const feeRecipient = this.sequencer?.feeRecipient || AztecAddress.ZERO;
1174
+ const coinbase = EthAddress.ZERO;
1175
+ const feeRecipient = AztecAddress.ZERO;
857
1176
 
858
1177
  const newGlobalVariables = await this.globalVariableBuilder.buildGlobalVariables(
859
- new Fr(blockNumber),
1178
+ blockNumber,
860
1179
  coinbase,
861
1180
  feeRecipient,
862
1181
  );
@@ -865,7 +1184,6 @@ export class AztecNodeService implements AztecNode, Traceable {
865
1184
  new DateProvider(),
866
1185
  this.telemetry,
867
1186
  );
868
- const fork = await this.worldStateSynchronizer.fork();
869
1187
 
870
1188
  this.log.verbose(`Simulating public calls for tx ${txHash}`, {
871
1189
  globalVariables: newGlobalVariables.toInspect(),
@@ -873,11 +1191,22 @@ export class AztecNodeService implements AztecNode, Traceable {
873
1191
  blockNumber,
874
1192
  });
875
1193
 
1194
+ const merkleTreeFork = await this.worldStateSynchronizer.fork();
876
1195
  try {
877
- const processor = publicProcessorFactory.create(fork, newGlobalVariables, skipFeeEnforcement);
1196
+ const config = PublicSimulatorConfig.from({
1197
+ skipFeeEnforcement,
1198
+ collectDebugLogs: true,
1199
+ collectHints: false,
1200
+ collectCallMetadata: true,
1201
+ collectStatistics: false,
1202
+ collectionLimits: CollectionLimitsConfig.from({
1203
+ maxDebugLogMemoryReads: this.config.rpcSimulatePublicMaxDebugLogMemoryReads,
1204
+ }),
1205
+ });
1206
+ const processor = publicProcessorFactory.create(merkleTreeFork, newGlobalVariables, config);
878
1207
 
879
1208
  // REFACTOR: Consider merging ProcessReturnValues into ProcessedTx
880
- const [processedTxs, failedTxs, returns] = await processor.process([tx]);
1209
+ const [processedTxs, failedTxs, _usedTxs, returns] = await processor.process([tx]);
881
1210
  // REFACTOR: Consider returning the error rather than throwing
882
1211
  if (failedTxs.length) {
883
1212
  this.log.warn(`Simulated tx ${txHash} fails: ${failedTxs[0].error}`, { txHash });
@@ -887,13 +1216,13 @@ export class AztecNodeService implements AztecNode, Traceable {
887
1216
  const [processedTx] = processedTxs;
888
1217
  return new PublicSimulationOutput(
889
1218
  processedTx.revertReason,
890
- processedTx.constants,
1219
+ processedTx.globalVariables,
891
1220
  processedTx.txEffect,
892
1221
  returns,
893
1222
  processedTx.gasUsed,
894
1223
  );
895
1224
  } finally {
896
- await fork.close();
1225
+ await merkleTreeFork.close();
897
1226
  }
898
1227
  }
899
1228
 
@@ -901,24 +1230,42 @@ export class AztecNodeService implements AztecNode, Traceable {
901
1230
  tx: Tx,
902
1231
  { isSimulation, skipFeeEnforcement }: { isSimulation?: boolean; skipFeeEnforcement?: boolean } = {},
903
1232
  ): Promise<TxValidationResult> {
904
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
905
1233
  const db = this.worldStateSynchronizer.getCommitted();
906
1234
  const verifier = isSimulation ? undefined : this.proofVerifier;
1235
+
1236
+ // We accept transactions if they are not expired by the next slot (checked based on the IncludeByTimestamp field)
1237
+ const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot();
1238
+ const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1);
907
1239
  const validator = createValidatorForAcceptingTxs(db, this.contractDataSource, verifier, {
1240
+ timestamp: nextSlotTimestamp,
908
1241
  blockNumber,
909
1242
  l1ChainId: this.l1ChainId,
910
- setupAllowList: this.config.allowedInSetup ?? (await getDefaultAllowedSetupFunctions()),
911
- gasFees: await this.getCurrentBaseFees(),
1243
+ rollupVersion: this.version,
1244
+ setupAllowList: this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions()),
1245
+ gasFees: await this.getCurrentMinFees(),
912
1246
  skipFeeEnforcement,
1247
+ txsPermitted: !this.config.disableTransactions,
913
1248
  });
914
1249
 
915
1250
  return await validator.validateTx(tx);
916
1251
  }
917
1252
 
918
- public async setConfig(config: Partial<SequencerConfig & ProverConfig>): Promise<void> {
919
- const newConfig = { ...this.config, ...config };
920
- await this.sequencer?.updateSequencerConfig(config);
1253
+ public getConfig(): Promise<AztecNodeAdminConfig> {
1254
+ const schema = AztecNodeAdminConfigSchema;
1255
+ const keys = schema.keyof().options;
1256
+ return Promise.resolve(pick(this.config, ...keys));
1257
+ }
921
1258
 
1259
+ public async setConfig(config: Partial<AztecNodeAdminConfig>): Promise<void> {
1260
+ const newConfig = { ...this.config, ...config };
1261
+ this.sequencer?.updateConfig(config);
1262
+ this.slasherClient?.updateConfig(config);
1263
+ this.validatorsSentinel?.updateConfig(config);
1264
+ await this.p2pClient.updateP2PConfig(config);
1265
+ const archiver = this.blockSource as Archiver;
1266
+ if ('updateConfig' in archiver) {
1267
+ archiver.updateConfig(config);
1268
+ }
922
1269
  if (newConfig.realProofs !== this.config.realProofs) {
923
1270
  this.proofVerifier = config.realProofs ? await BBCircuitVerifier.new(newConfig) : new TestCircuitVerifier();
924
1271
  }
@@ -928,42 +1275,154 @@ export class AztecNodeService implements AztecNode, Traceable {
928
1275
 
929
1276
  public getProtocolContractAddresses(): Promise<ProtocolContractAddresses> {
930
1277
  return Promise.resolve({
931
- classRegisterer: ProtocolContractAddress.ContractClassRegisterer,
1278
+ classRegistry: ProtocolContractAddress.ContractClassRegistry,
932
1279
  feeJuice: ProtocolContractAddress.FeeJuice,
933
- instanceDeployer: ProtocolContractAddress.ContractInstanceDeployer,
1280
+ instanceRegistry: ProtocolContractAddress.ContractInstanceRegistry,
934
1281
  multiCallEntrypoint: ProtocolContractAddress.MultiCallEntrypoint,
935
1282
  });
936
1283
  }
937
1284
 
938
- // TODO(#10007): Remove this method
939
- public addContractClass(contractClass: ContractClassPublic): Promise<void> {
940
- this.log.info(`Adding contract class via API ${contractClass.id}`);
941
- return this.contractDataSource.addContractClass(contractClass);
1285
+ public registerContractFunctionSignatures(signatures: string[]): Promise<void> {
1286
+ return this.contractDataSource.registerContractFunctionSignatures(signatures);
1287
+ }
1288
+
1289
+ public getValidatorsStats(): Promise<ValidatorsStats> {
1290
+ return this.validatorsSentinel?.computeStats() ?? Promise.resolve({ stats: {}, slotWindow: 0 });
942
1291
  }
943
1292
 
944
- public registerContractFunctionSignatures(_address: AztecAddress, signatures: string[]): Promise<void> {
945
- return this.contractDataSource.registerContractFunctionSignatures(_address, signatures);
1293
+ public getValidatorStats(
1294
+ validatorAddress: EthAddress,
1295
+ fromSlot?: SlotNumber,
1296
+ toSlot?: SlotNumber,
1297
+ ): Promise<SingleValidatorStats | undefined> {
1298
+ return this.validatorsSentinel?.getValidatorStats(validatorAddress, fromSlot, toSlot) ?? Promise.resolve(undefined);
946
1299
  }
947
1300
 
948
- public flushTxs(): Promise<void> {
949
- if (!this.sequencer) {
950
- throw new Error(`Sequencer is not initialized`);
1301
+ public async startSnapshotUpload(location: string): Promise<void> {
1302
+ // Note that we are forcefully casting the blocksource as an archiver
1303
+ // We break support for archiver running remotely to the node
1304
+ const archiver = this.blockSource as Archiver;
1305
+ if (!('backupTo' in archiver)) {
1306
+ this.metrics.recordSnapshotError();
1307
+ throw new Error('Archiver implementation does not support backups. Cannot generate snapshot.');
1308
+ }
1309
+
1310
+ // Test that the archiver has done an initial sync.
1311
+ if (!archiver.isInitialSyncComplete()) {
1312
+ this.metrics.recordSnapshotError();
1313
+ throw new Error(`Archiver initial sync not complete. Cannot start snapshot.`);
1314
+ }
1315
+
1316
+ // And it has an L2 block hash
1317
+ const l2BlockHash = await archiver.getL2Tips().then(tips => tips.proposed.hash);
1318
+ if (!l2BlockHash) {
1319
+ this.metrics.recordSnapshotError();
1320
+ throw new Error(`Archiver has no latest L2 block hash downloaded. Cannot start snapshot.`);
1321
+ }
1322
+
1323
+ if (this.isUploadingSnapshot) {
1324
+ this.metrics.recordSnapshotError();
1325
+ throw new Error(`Snapshot upload already in progress. Cannot start another one until complete.`);
951
1326
  }
952
- this.sequencer.flush();
1327
+
1328
+ // Do not wait for the upload to be complete to return to the caller, but flag that an operation is in progress
1329
+ this.isUploadingSnapshot = true;
1330
+ const timer = new Timer();
1331
+ void uploadSnapshot(location, this.blockSource as Archiver, this.worldStateSynchronizer, this.config, this.log)
1332
+ .then(() => {
1333
+ this.isUploadingSnapshot = false;
1334
+ this.metrics.recordSnapshot(timer.ms());
1335
+ })
1336
+ .catch(err => {
1337
+ this.isUploadingSnapshot = false;
1338
+ this.metrics.recordSnapshotError();
1339
+ this.log.error(`Error uploading snapshot: ${err}`);
1340
+ });
1341
+
953
1342
  return Promise.resolve();
954
1343
  }
955
1344
 
1345
+ public async rollbackTo(targetBlock: BlockNumber, force?: boolean): Promise<void> {
1346
+ const archiver = this.blockSource as Archiver;
1347
+ if (!('rollbackTo' in archiver)) {
1348
+ throw new Error('Archiver implementation does not support rollbacks.');
1349
+ }
1350
+
1351
+ const finalizedBlock = await archiver.getL2Tips().then(tips => tips.finalized.block.number);
1352
+ if (targetBlock < finalizedBlock) {
1353
+ if (force) {
1354
+ this.log.warn(`Clearing world state database to allow rolling back behind finalized block ${finalizedBlock}`);
1355
+ await this.worldStateSynchronizer.clear();
1356
+ await this.p2pClient.clear();
1357
+ } else {
1358
+ throw new Error(`Cannot rollback to block ${targetBlock} as it is before finalized ${finalizedBlock}`);
1359
+ }
1360
+ }
1361
+
1362
+ try {
1363
+ this.log.info(`Pausing archiver and world state sync to start rollback`);
1364
+ await archiver.stop();
1365
+ await this.worldStateSynchronizer.stopSync();
1366
+ const currentBlock = await archiver.getBlockNumber();
1367
+ const blocksToUnwind = currentBlock - targetBlock;
1368
+ this.log.info(`Unwinding ${count(blocksToUnwind, 'block')} from L2 block ${currentBlock} to ${targetBlock}`);
1369
+ await archiver.rollbackTo(targetBlock);
1370
+ this.log.info(`Unwinding complete.`);
1371
+ } catch (err) {
1372
+ this.log.error(`Error during rollback`, err);
1373
+ throw err;
1374
+ } finally {
1375
+ this.log.info(`Resuming world state and archiver sync.`);
1376
+ this.worldStateSynchronizer.resumeSync();
1377
+ archiver.resume();
1378
+ }
1379
+ }
1380
+
1381
+ public async pauseSync(): Promise<void> {
1382
+ this.log.info(`Pausing archiver and world state sync`);
1383
+ await (this.blockSource as Archiver).stop();
1384
+ await this.worldStateSynchronizer.stopSync();
1385
+ }
1386
+
1387
+ public resumeSync(): Promise<void> {
1388
+ this.log.info(`Resuming world state and archiver sync.`);
1389
+ this.worldStateSynchronizer.resumeSync();
1390
+ (this.blockSource as Archiver).resume();
1391
+ return Promise.resolve();
1392
+ }
1393
+
1394
+ public getSlashPayloads(): Promise<SlashPayloadRound[]> {
1395
+ if (!this.slasherClient) {
1396
+ throw new Error(`Slasher client not enabled`);
1397
+ }
1398
+ return this.slasherClient.getSlashPayloads();
1399
+ }
1400
+
1401
+ public getSlashOffenses(round: bigint | 'all' | 'current'): Promise<Offense[]> {
1402
+ if (!this.slasherClient) {
1403
+ throw new Error(`Slasher client not enabled`);
1404
+ }
1405
+ if (round === 'all') {
1406
+ return this.slasherClient.getPendingOffenses();
1407
+ } else {
1408
+ return this.slasherClient.gatherOffensesForRound(round === 'current' ? undefined : BigInt(round));
1409
+ }
1410
+ }
1411
+
1412
+ #getInitialHeaderHash(): Promise<L2BlockHash> {
1413
+ if (!this.initialHeaderHashPromise) {
1414
+ this.initialHeaderHashPromise = this.worldStateSynchronizer.getCommitted().getInitialHeader().hash();
1415
+ }
1416
+ return this.initialHeaderHashPromise;
1417
+ }
1418
+
956
1419
  /**
957
1420
  * Returns an instance of MerkleTreeOperations having first ensured the world state is fully synched
958
- * @param blockNumber - The block number at which to get the data.
1421
+ * @param block - The block parameter (block number, block hash, or 'latest') at which to get the data.
959
1422
  * @returns An instance of a committed MerkleTreeOperations
960
1423
  */
961
- async #getWorldState(blockNumber: L2BlockNumber) {
962
- if (typeof blockNumber === 'number' && blockNumber < INITIAL_L2_BLOCK_NUM - 1) {
963
- throw new Error('Invalid block number to get world state for: ' + blockNumber);
964
- }
965
-
966
- let blockSyncedTo: number = 0;
1424
+ async #getWorldState(block: BlockParameter) {
1425
+ let blockSyncedTo: BlockNumber = BlockNumber.ZERO;
967
1426
  try {
968
1427
  // Attempt to sync the world state if necessary
969
1428
  blockSyncedTo = await this.#syncWorldState();
@@ -971,15 +1430,40 @@ export class AztecNodeService implements AztecNode, Traceable {
971
1430
  this.log.error(`Error getting world state: ${err}`);
972
1431
  }
973
1432
 
974
- // using a snapshot could be less efficient than using the committed db
975
- if (blockNumber === 'latest' /*|| blockNumber === blockSyncedTo*/) {
976
- this.log.debug(`Using committed db for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1433
+ if (block === 'latest') {
1434
+ this.log.debug(`Using committed db for block 'latest', world state synced upto ${blockSyncedTo}`);
977
1435
  return this.worldStateSynchronizer.getCommitted();
978
- } else if (blockNumber <= blockSyncedTo) {
1436
+ }
1437
+
1438
+ if (L2BlockHash.isL2BlockHash(block)) {
1439
+ const initialBlockHash = await this.#getInitialHeaderHash();
1440
+ if (block.equals(initialBlockHash)) {
1441
+ // Block source doesn't handle initial header so we need to handle the case separately.
1442
+ return this.worldStateSynchronizer.getSnapshot(BlockNumber.ZERO);
1443
+ }
1444
+
1445
+ const blockHashFr = Fr.fromBuffer(block.toBuffer());
1446
+ const header = await this.blockSource.getBlockHeaderByHash(blockHashFr);
1447
+ if (!header) {
1448
+ throw new Error(
1449
+ `Block hash ${block.toString()} not found when querying world state. If the node API has been queried with anchor block hash possibly a reorg has occurred.`,
1450
+ );
1451
+ }
1452
+ const blockNumber = header.getBlockNumber();
1453
+ this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1454
+ return this.worldStateSynchronizer.getSnapshot(blockNumber);
1455
+ }
1456
+
1457
+ // Block number provided
1458
+ {
1459
+ const blockNumber = block as BlockNumber;
1460
+
1461
+ if (blockNumber > blockSyncedTo) {
1462
+ throw new Error(`Queried block ${block} not yet synced by the node (node is synced upto ${blockSyncedTo}).`);
1463
+ }
1464
+
979
1465
  this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
980
1466
  return this.worldStateSynchronizer.getSnapshot(blockNumber);
981
- } else {
982
- throw new Error(`Block ${blockNumber} not yet synced`);
983
1467
  }
984
1468
  }
985
1469
 
@@ -987,8 +1471,8 @@ export class AztecNodeService implements AztecNode, Traceable {
987
1471
  * Ensure we fully sync the world state
988
1472
  * @returns A promise that fulfils once the world state is synced
989
1473
  */
990
- async #syncWorldState(): Promise<number> {
1474
+ async #syncWorldState(): Promise<BlockNumber> {
991
1475
  const blockSourceHeight = await this.blockSource.getBlockNumber();
992
- return this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
1476
+ return await this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
993
1477
  }
994
1478
  }