@aztec/aztec-node 0.0.0-test.1 → 0.0.1-commit.023c3e5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dest/aztec-node/config.d.ts +18 -10
  2. package/dest/aztec-node/config.d.ts.map +1 -1
  3. package/dest/aztec-node/config.js +81 -14
  4. package/dest/aztec-node/node_metrics.d.ts +5 -1
  5. package/dest/aztec-node/node_metrics.d.ts.map +1 -1
  6. package/dest/aztec-node/node_metrics.js +20 -6
  7. package/dest/aztec-node/server.d.ts +111 -141
  8. package/dest/aztec-node/server.d.ts.map +1 -1
  9. package/dest/aztec-node/server.js +1082 -347
  10. package/dest/bin/index.d.ts +1 -1
  11. package/dest/bin/index.js +4 -2
  12. package/dest/index.d.ts +1 -2
  13. package/dest/index.d.ts.map +1 -1
  14. package/dest/index.js +0 -1
  15. package/dest/sentinel/config.d.ts +8 -0
  16. package/dest/sentinel/config.d.ts.map +1 -0
  17. package/dest/sentinel/config.js +29 -0
  18. package/dest/sentinel/factory.d.ts +9 -0
  19. package/dest/sentinel/factory.d.ts.map +1 -0
  20. package/dest/sentinel/factory.js +17 -0
  21. package/dest/sentinel/index.d.ts +3 -0
  22. package/dest/sentinel/index.d.ts.map +1 -0
  23. package/dest/sentinel/index.js +1 -0
  24. package/dest/sentinel/sentinel.d.ts +93 -0
  25. package/dest/sentinel/sentinel.d.ts.map +1 -0
  26. package/dest/sentinel/sentinel.js +403 -0
  27. package/dest/sentinel/store.d.ts +35 -0
  28. package/dest/sentinel/store.d.ts.map +1 -0
  29. package/dest/sentinel/store.js +170 -0
  30. package/dest/test/index.d.ts +31 -0
  31. package/dest/test/index.d.ts.map +1 -0
  32. package/dest/test/index.js +1 -0
  33. package/package.json +46 -35
  34. package/src/aztec-node/config.ts +132 -25
  35. package/src/aztec-node/node_metrics.ts +23 -6
  36. package/src/aztec-node/server.ts +899 -449
  37. package/src/bin/index.ts +4 -2
  38. package/src/index.ts +0 -1
  39. package/src/sentinel/config.ts +37 -0
  40. package/src/sentinel/factory.ts +31 -0
  41. package/src/sentinel/index.ts +8 -0
  42. package/src/sentinel/sentinel.ts +510 -0
  43. package/src/sentinel/store.ts +185 -0
  44. package/src/test/index.ts +32 -0
  45. package/dest/aztec-node/http_rpc_server.d.ts +0 -8
  46. package/dest/aztec-node/http_rpc_server.d.ts.map +0 -1
  47. package/dest/aztec-node/http_rpc_server.js +0 -9
  48. package/src/aztec-node/http_rpc_server.ts +0 -11
@@ -1,39 +1,42 @@
1
- import { createArchiver } from '@aztec/archiver';
2
- import { BBCircuitVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
- import { type BlobSinkClientInterface, createBlobSinkClient } from '@aztec/blob-sink/client';
4
- import {
5
- type ARCHIVE_HEIGHT,
6
- INITIAL_L2_BLOCK_NUM,
7
- type L1_TO_L2_MSG_TREE_HEIGHT,
8
- type NOTE_HASH_TREE_HEIGHT,
9
- type NULLIFIER_TREE_HEIGHT,
10
- type PUBLIC_DATA_TREE_HEIGHT,
11
- REGISTERER_CONTRACT_ADDRESS,
12
- } from '@aztec/constants';
13
- import { EpochCache } from '@aztec/epoch-cache';
14
- import { type L1ContractAddresses, createEthereumChain } from '@aztec/ethereum';
15
- import { compactArray } from '@aztec/foundation/collection';
1
+ import { Archiver, createArchiver } from '@aztec/archiver';
2
+ import { BBCircuitVerifier, QueuedIVCVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
3
+ import { type BlobClientInterface, createBlobClientWithFileStores } from '@aztec/blob-client/client';
4
+ import { ARCHIVE_HEIGHT, type L1_TO_L2_MSG_TREE_HEIGHT, type NOTE_HASH_TREE_HEIGHT } from '@aztec/constants';
5
+ import { EpochCache, type EpochCacheInterface } from '@aztec/epoch-cache';
6
+ import { createEthereumChain } from '@aztec/ethereum/chain';
7
+ import { getPublicClient } from '@aztec/ethereum/client';
8
+ import { RegistryContract, RollupContract } from '@aztec/ethereum/contracts';
9
+ import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
10
+ import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types';
11
+ import { compactArray, pick } from '@aztec/foundation/collection';
12
+ import { Fr } from '@aztec/foundation/curves/bn254';
16
13
  import { EthAddress } from '@aztec/foundation/eth-address';
17
- import { Fr } from '@aztec/foundation/fields';
14
+ import { BadRequestError } from '@aztec/foundation/json-rpc';
18
15
  import { type Logger, createLogger } from '@aztec/foundation/log';
16
+ import { count } from '@aztec/foundation/string';
19
17
  import { DateProvider, Timer } from '@aztec/foundation/timer';
20
- import { SiblingPath } from '@aztec/foundation/trees';
21
- import type { AztecKVStore } from '@aztec/kv-store';
22
- import { openTmpStore } from '@aztec/kv-store/lmdb';
23
- import { SHA256Trunc, StandardTree, UnbalancedTree } from '@aztec/merkle-tree';
24
- import { type P2P, createP2PClient } from '@aztec/p2p';
25
- import { ProtocolContractAddress } from '@aztec/protocol-contracts';
18
+ import { MembershipWitness, SiblingPath } from '@aztec/foundation/trees';
19
+ import { KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore';
20
+ import { trySnapshotSync, uploadSnapshot } from '@aztec/node-lib/actions';
26
21
  import {
27
- GlobalVariableBuilder,
28
- SequencerClient,
29
- type SequencerPublisher,
30
- createSlasherClient,
31
- createValidatorForAcceptingTxs,
32
- getDefaultAllowedSetupFunctions,
33
- } from '@aztec/sequencer-client';
22
+ createForwarderL1TxUtilsFromEthSigner,
23
+ createL1TxUtilsWithBlobsFromEthSigner,
24
+ } from '@aztec/node-lib/factories';
25
+ import { type P2P, type P2PClientDeps, createP2PClient, getDefaultAllowedSetupFunctions } from '@aztec/p2p';
26
+ import { ProtocolContractAddress } from '@aztec/protocol-contracts';
27
+ import { GlobalVariableBuilder, SequencerClient, type SequencerPublisher } from '@aztec/sequencer-client';
34
28
  import { PublicProcessorFactory } from '@aztec/simulator/server';
29
+ import {
30
+ AttestationsBlockWatcher,
31
+ EpochPruneWatcher,
32
+ type SlasherClientInterface,
33
+ type Watcher,
34
+ createSlasher,
35
+ } from '@aztec/slasher';
36
+ import { CollectionLimitsConfig, PublicSimulatorConfig } from '@aztec/stdlib/avm';
35
37
  import { AztecAddress } from '@aztec/stdlib/aztec-address';
36
- import type { InBlock, L2Block, L2BlockNumber, L2BlockSource, NullifierWithBlockSource } from '@aztec/stdlib/block';
38
+ import { BlockHash, type BlockParameter, type DataInBlock, L2Block, type L2BlockSource } from '@aztec/stdlib/block';
39
+ import type { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
37
40
  import type {
38
41
  ContractClassPublic,
39
42
  ContractDataSource,
@@ -41,34 +44,44 @@ import type {
41
44
  NodeInfo,
42
45
  ProtocolContractAddresses,
43
46
  } from '@aztec/stdlib/contract';
44
- import type { GasFees } from '@aztec/stdlib/gas';
45
- import { computePublicDataTreeLeafSlot, siloNullifier } from '@aztec/stdlib/hash';
46
- import type { AztecNode, GetContractClassLogsResponse, GetPublicLogsResponse } from '@aztec/stdlib/interfaces/client';
47
+ import { GasFees } from '@aztec/stdlib/gas';
48
+ import { computePublicDataTreeLeafSlot } from '@aztec/stdlib/hash';
47
49
  import {
50
+ type AztecNode,
51
+ type AztecNodeAdmin,
52
+ type AztecNodeAdminConfig,
53
+ AztecNodeAdminConfigSchema,
54
+ type GetContractClassLogsResponse,
55
+ type GetPublicLogsResponse,
56
+ } from '@aztec/stdlib/interfaces/client';
57
+ import {
58
+ type AllowedElement,
48
59
  type ClientProtocolCircuitVerifier,
49
60
  type L2LogsSource,
50
- type ProverConfig,
51
- type SequencerConfig,
52
61
  type Service,
53
62
  type WorldStateSyncStatus,
54
63
  type WorldStateSynchronizer,
55
64
  tryStop,
56
65
  } from '@aztec/stdlib/interfaces/server';
57
- import type { LogFilter, PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs';
58
- import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging';
66
+ import type { LogFilter, SiloedTag, Tag, TxScopedL2Log } from '@aztec/stdlib/logs';
67
+ import { InboxLeaf, type L1ToL2MessageSource } from '@aztec/stdlib/messaging';
59
68
  import { P2PClientType } from '@aztec/stdlib/p2p';
60
- import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
69
+ import type { Offense, SlashPayloadRound } from '@aztec/stdlib/slashing';
61
70
  import type { NullifierLeafPreimage, PublicDataTreeLeaf, PublicDataTreeLeafPreimage } from '@aztec/stdlib/trees';
71
+ import { MerkleTreeId, NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
62
72
  import {
63
73
  type BlockHeader,
74
+ type GlobalVariableBuilder as GlobalVariableBuilderInterface,
75
+ type IndexedTxEffect,
64
76
  PublicSimulationOutput,
65
77
  Tx,
66
- TxEffect,
67
78
  type TxHash,
68
79
  TxReceipt,
69
80
  TxStatus,
70
81
  type TxValidationResult,
71
82
  } from '@aztec/stdlib/tx';
83
+ import { getPackageVersion } from '@aztec/stdlib/update-checker';
84
+ import type { SingleValidatorStats, ValidatorsStats } from '@aztec/stdlib/validators';
72
85
  import {
73
86
  Attributes,
74
87
  type TelemetryClient,
@@ -77,18 +90,33 @@ import {
77
90
  getTelemetryClient,
78
91
  trackSpan,
79
92
  } from '@aztec/telemetry-client';
80
- import { createValidatorClient } from '@aztec/validator-client';
93
+ import {
94
+ FullNodeCheckpointsBuilder as CheckpointsBuilder,
95
+ FullNodeCheckpointsBuilder,
96
+ NodeKeystoreAdapter,
97
+ ValidatorClient,
98
+ createBlockProposalHandler,
99
+ createValidatorClient,
100
+ createValidatorForAcceptingTxs,
101
+ } from '@aztec/validator-client';
81
102
  import { createWorldStateSynchronizer } from '@aztec/world-state';
82
103
 
83
- import { type AztecNodeConfig, getPackageVersion } from './config.js';
104
+ import { createPublicClient, fallback, http } from 'viem';
105
+
106
+ import { createSentinel } from '../sentinel/factory.js';
107
+ import { Sentinel } from '../sentinel/sentinel.js';
108
+ import { type AztecNodeConfig, createKeyStoreForValidator } from './config.js';
84
109
  import { NodeMetrics } from './node_metrics.js';
85
110
 
86
111
  /**
87
112
  * The aztec node.
88
113
  */
89
- export class AztecNodeService implements AztecNode, Traceable {
90
- private packageVersion: string;
114
+ export class AztecNodeService implements AztecNode, AztecNodeAdmin, Traceable {
91
115
  private metrics: NodeMetrics;
116
+ private initialHeaderHashPromise: Promise<BlockHash> | undefined = undefined;
117
+
118
+ // Prevent two snapshot operations to happen simultaneously
119
+ private isUploadingSnapshot = false;
92
120
 
93
121
  public readonly tracer: Tracer;
94
122
 
@@ -99,17 +127,21 @@ export class AztecNodeService implements AztecNode, Traceable {
99
127
  protected readonly logsSource: L2LogsSource,
100
128
  protected readonly contractDataSource: ContractDataSource,
101
129
  protected readonly l1ToL2MessageSource: L1ToL2MessageSource,
102
- protected readonly nullifierSource: NullifierWithBlockSource,
103
130
  protected readonly worldStateSynchronizer: WorldStateSynchronizer,
104
131
  protected readonly sequencer: SequencerClient | undefined,
132
+ protected readonly slasherClient: SlasherClientInterface | undefined,
133
+ protected readonly validatorsSentinel: Sentinel | undefined,
134
+ protected readonly epochPruneWatcher: EpochPruneWatcher | undefined,
105
135
  protected readonly l1ChainId: number,
106
136
  protected readonly version: number,
107
- protected readonly globalVariableBuilder: GlobalVariableBuilder,
137
+ protected readonly globalVariableBuilder: GlobalVariableBuilderInterface,
138
+ protected readonly epochCache: EpochCacheInterface,
139
+ protected readonly packageVersion: string,
108
140
  private proofVerifier: ClientProtocolCircuitVerifier,
109
141
  private telemetry: TelemetryClient = getTelemetryClient(),
110
142
  private log = createLogger('node'),
143
+ private blobClient?: BlobClientInterface,
111
144
  ) {
112
- this.packageVersion = getPackageVersion();
113
145
  this.metrics = new NodeMetrics(telemetry, 'AztecNodeService');
114
146
  this.tracer = telemetry.getTracer('AztecNodeService');
115
147
 
@@ -132,31 +164,103 @@ export class AztecNodeService implements AztecNode, Traceable {
132
164
  * @returns - A fully synced Aztec Node for use in development/testing.
133
165
  */
134
166
  public static async createAndSync(
135
- config: AztecNodeConfig,
167
+ inputConfig: AztecNodeConfig,
136
168
  deps: {
137
169
  telemetry?: TelemetryClient;
138
170
  logger?: Logger;
139
171
  publisher?: SequencerPublisher;
140
172
  dateProvider?: DateProvider;
141
- blobSinkClient?: BlobSinkClientInterface;
173
+ p2pClientDeps?: P2PClientDeps<P2PClientType.Full>;
142
174
  } = {},
143
175
  options: {
144
176
  prefilledPublicData?: PublicDataTreeLeaf[];
177
+ dontStartSequencer?: boolean;
145
178
  } = {},
146
179
  ): Promise<AztecNodeService> {
147
- const telemetry = deps.telemetry ?? getTelemetryClient();
180
+ const config = { ...inputConfig }; // Copy the config so we dont mutate the input object
148
181
  const log = deps.logger ?? createLogger('node');
182
+ const packageVersion = getPackageVersion() ?? '';
183
+ const telemetry = deps.telemetry ?? getTelemetryClient();
149
184
  const dateProvider = deps.dateProvider ?? new DateProvider();
150
- const blobSinkClient = deps.blobSinkClient ?? createBlobSinkClient(config);
151
185
  const ethereumChain = createEthereumChain(config.l1RpcUrls, config.l1ChainId);
152
- //validate that the actual chain id matches that specified in configuration
186
+
187
+ // Build a key store from file if given or from environment otherwise
188
+ let keyStoreManager: KeystoreManager | undefined;
189
+ const keyStoreProvided = config.keyStoreDirectory !== undefined && config.keyStoreDirectory.length > 0;
190
+ if (keyStoreProvided) {
191
+ const keyStores = loadKeystores(config.keyStoreDirectory!);
192
+ keyStoreManager = new KeystoreManager(mergeKeystores(keyStores));
193
+ } else {
194
+ const keyStore = createKeyStoreForValidator(config);
195
+ if (keyStore) {
196
+ keyStoreManager = new KeystoreManager(keyStore);
197
+ }
198
+ }
199
+
200
+ await keyStoreManager?.validateSigners();
201
+
202
+ // If we are a validator, verify our configuration before doing too much more.
203
+ if (!config.disableValidator) {
204
+ if (keyStoreManager === undefined) {
205
+ throw new Error('Failed to create key store, a requirement for running a validator');
206
+ }
207
+ if (!keyStoreProvided) {
208
+ log.warn(
209
+ 'KEY STORE CREATED FROM ENVIRONMENT, IT IS RECOMMENDED TO USE A FILE-BASED KEY STORE IN PRODUCTION ENVIRONMENTS',
210
+ );
211
+ }
212
+ ValidatorClient.validateKeyStoreConfiguration(keyStoreManager, log);
213
+ }
214
+
215
+ // validate that the actual chain id matches that specified in configuration
153
216
  if (config.l1ChainId !== ethereumChain.chainInfo.id) {
154
217
  throw new Error(
155
218
  `RPC URL configured for chain id ${ethereumChain.chainInfo.id} but expected id ${config.l1ChainId}`,
156
219
  );
157
220
  }
158
221
 
159
- const archiver = await createArchiver(config, blobSinkClient, { blockUntilSync: true }, telemetry);
222
+ const publicClient = createPublicClient({
223
+ chain: ethereumChain.chainInfo,
224
+ transport: fallback(config.l1RpcUrls.map((url: string) => http(url, { batch: false }))),
225
+ pollingInterval: config.viemPollingIntervalMS,
226
+ });
227
+
228
+ const l1ContractsAddresses = await RegistryContract.collectAddresses(
229
+ publicClient,
230
+ config.l1Contracts.registryAddress,
231
+ config.rollupVersion ?? 'canonical',
232
+ );
233
+
234
+ // Overwrite the passed in vars.
235
+ config.l1Contracts = { ...config.l1Contracts, ...l1ContractsAddresses };
236
+
237
+ const rollupContract = new RollupContract(publicClient, config.l1Contracts.rollupAddress.toString());
238
+ const [l1GenesisTime, slotDuration, rollupVersionFromRollup] = await Promise.all([
239
+ rollupContract.getL1GenesisTime(),
240
+ rollupContract.getSlotDuration(),
241
+ rollupContract.getVersion(),
242
+ ] as const);
243
+
244
+ config.rollupVersion ??= Number(rollupVersionFromRollup);
245
+
246
+ if (config.rollupVersion !== Number(rollupVersionFromRollup)) {
247
+ log.warn(
248
+ `Registry looked up and returned a rollup with version (${config.rollupVersion}), but this does not match with version detected from the rollup directly: (${rollupVersionFromRollup}).`,
249
+ );
250
+ }
251
+
252
+ const blobClient = await createBlobClientWithFileStores(config, createLogger('node:blob-client:client'));
253
+
254
+ // attempt snapshot sync if possible
255
+ await trySnapshotSync(config, log);
256
+
257
+ const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
258
+
259
+ const archiver = await createArchiver(
260
+ config,
261
+ { blobClient, epochCache, telemetry, dateProvider },
262
+ { blockUntilSync: !config.skipArchiverInitialSync },
263
+ );
160
264
 
161
265
  // now create the merkle trees and the world state synchronizer
162
266
  const worldStateSynchronizer = await createWorldStateSynchronizer(
@@ -165,12 +269,14 @@ export class AztecNodeService implements AztecNode, Traceable {
165
269
  options.prefilledPublicData,
166
270
  telemetry,
167
271
  );
168
- const proofVerifier = config.realProofs ? await BBCircuitVerifier.new(config) : new TestCircuitVerifier();
272
+ const circuitVerifier =
273
+ config.realProofs || config.debugForceTxProofVerification
274
+ ? await BBCircuitVerifier.new(config)
275
+ : new TestCircuitVerifier(config.proverTestVerificationDelayMs);
169
276
  if (!config.realProofs) {
170
277
  log.warn(`Aztec node is accepting fake proofs`);
171
278
  }
172
-
173
- const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config, { dateProvider });
279
+ const proofVerifier = new QueuedIVCVerifier(config, circuitVerifier);
174
280
 
175
281
  // create the tx pool and the p2p client, which will need the l2 block source
176
282
  const p2pClient = await createP2PClient(
@@ -180,33 +286,187 @@ export class AztecNodeService implements AztecNode, Traceable {
180
286
  proofVerifier,
181
287
  worldStateSynchronizer,
182
288
  epochCache,
289
+ packageVersion,
290
+ dateProvider,
183
291
  telemetry,
292
+ deps.p2pClientDeps,
184
293
  );
185
294
 
186
- const slasherClient = createSlasherClient(config, archiver, telemetry);
295
+ // We should really not be modifying the config object
296
+ config.txPublicSetupAllowList = config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
187
297
 
188
- // start both and wait for them to sync from the block source
189
- await Promise.all([p2pClient.start(), worldStateSynchronizer.start(), slasherClient.start()]);
190
- log.verbose(`All Aztec Node subsystems synced`);
298
+ // Create FullNodeCheckpointsBuilder for validator and non-validator block proposal handling
299
+ const validatorCheckpointsBuilder = new FullNodeCheckpointsBuilder(
300
+ { ...config, l1GenesisTime, slotDuration: Number(slotDuration) },
301
+ worldStateSynchronizer,
302
+ archiver,
303
+ dateProvider,
304
+ telemetry,
305
+ );
191
306
 
192
- const validatorClient = createValidatorClient(config, { p2pClient, telemetry, dateProvider, epochCache });
307
+ // We'll accumulate sentinel watchers here
308
+ const watchers: Watcher[] = [];
193
309
 
194
- // now create the sequencer
195
- const sequencer = config.disableValidator
196
- ? undefined
197
- : await SequencerClient.new(config, {
198
- ...deps,
199
- validatorClient,
200
- p2pClient,
201
- worldStateSynchronizer,
202
- slasherClient,
203
- contractDataSource: archiver,
204
- l2BlockSource: archiver,
205
- l1ToL2MessageSource: archiver,
206
- telemetry,
207
- dateProvider,
208
- blobSinkClient,
209
- });
310
+ // Create validator client if required
311
+ const validatorClient = await createValidatorClient(config, {
312
+ checkpointsBuilder: validatorCheckpointsBuilder,
313
+ worldState: worldStateSynchronizer,
314
+ p2pClient,
315
+ telemetry,
316
+ dateProvider,
317
+ epochCache,
318
+ blockSource: archiver,
319
+ l1ToL2MessageSource: archiver,
320
+ keyStoreManager,
321
+ blobClient,
322
+ });
323
+
324
+ // If we have a validator client, register it as a source of offenses for the slasher,
325
+ // and have it register callbacks on the p2p client *before* we start it, otherwise messages
326
+ // like attestations or auths will fail.
327
+ if (validatorClient) {
328
+ watchers.push(validatorClient);
329
+ if (!options.dontStartSequencer) {
330
+ await validatorClient.registerHandlers();
331
+ }
332
+ }
333
+
334
+ // If there's no validator client but alwaysReexecuteBlockProposals is enabled,
335
+ // create a BlockProposalHandler to reexecute block proposals for monitoring
336
+ if (!validatorClient && config.alwaysReexecuteBlockProposals) {
337
+ log.info('Setting up block proposal reexecution for monitoring');
338
+ createBlockProposalHandler(config, {
339
+ checkpointsBuilder: validatorCheckpointsBuilder,
340
+ worldState: worldStateSynchronizer,
341
+ epochCache,
342
+ blockSource: archiver,
343
+ l1ToL2MessageSource: archiver,
344
+ p2pClient,
345
+ dateProvider,
346
+ telemetry,
347
+ }).registerForReexecution(p2pClient);
348
+ }
349
+
350
+ // Start world state and wait for it to sync to the archiver.
351
+ await worldStateSynchronizer.start();
352
+
353
+ // Start p2p. Note that it depends on world state to be running.
354
+ await p2pClient.start();
355
+
356
+ const validatorsSentinel = await createSentinel(epochCache, archiver, p2pClient, config);
357
+ if (validatorsSentinel && config.slashInactivityPenalty > 0n) {
358
+ watchers.push(validatorsSentinel);
359
+ }
360
+
361
+ let epochPruneWatcher: EpochPruneWatcher | undefined;
362
+ if (config.slashPrunePenalty > 0n || config.slashDataWithholdingPenalty > 0n) {
363
+ epochPruneWatcher = new EpochPruneWatcher(
364
+ archiver,
365
+ archiver,
366
+ epochCache,
367
+ p2pClient.getTxProvider(),
368
+ validatorCheckpointsBuilder,
369
+ config,
370
+ );
371
+ watchers.push(epochPruneWatcher);
372
+ }
373
+
374
+ // We assume we want to slash for invalid attestations unless all max penalties are set to 0
375
+ let attestationsBlockWatcher: AttestationsBlockWatcher | undefined;
376
+ if (config.slashProposeInvalidAttestationsPenalty > 0n || config.slashAttestDescendantOfInvalidPenalty > 0n) {
377
+ attestationsBlockWatcher = new AttestationsBlockWatcher(archiver, epochCache, config);
378
+ watchers.push(attestationsBlockWatcher);
379
+ }
380
+
381
+ // Start p2p-related services once the archiver has completed sync
382
+ void archiver
383
+ .waitForInitialSync()
384
+ .then(async () => {
385
+ await p2pClient.start();
386
+ await validatorsSentinel?.start();
387
+ await epochPruneWatcher?.start();
388
+ await attestationsBlockWatcher?.start();
389
+ log.info(`All p2p services started`);
390
+ })
391
+ .catch(err => log.error('Failed to start p2p services after archiver sync', err));
392
+
393
+ // Validator enabled, create/start relevant service
394
+ let sequencer: SequencerClient | undefined;
395
+ let slasherClient: SlasherClientInterface | undefined;
396
+ if (!config.disableValidator && validatorClient) {
397
+ // We create a slasher only if we have a sequencer, since all slashing actions go through the sequencer publisher
398
+ // as they are executed when the node is selected as proposer.
399
+ const validatorAddresses = keyStoreManager
400
+ ? NodeKeystoreAdapter.fromKeyStoreManager(keyStoreManager).getAddresses()
401
+ : [];
402
+
403
+ slasherClient = await createSlasher(
404
+ config,
405
+ config.l1Contracts,
406
+ getPublicClient(config),
407
+ watchers,
408
+ dateProvider,
409
+ epochCache,
410
+ validatorAddresses,
411
+ undefined, // logger
412
+ );
413
+ await slasherClient.start();
414
+
415
+ const l1TxUtils = config.publisherForwarderAddress
416
+ ? await createForwarderL1TxUtilsFromEthSigner(
417
+ publicClient,
418
+ keyStoreManager!.createAllValidatorPublisherSigners(),
419
+ config.publisherForwarderAddress,
420
+ { ...config, scope: 'sequencer' },
421
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
422
+ )
423
+ : await createL1TxUtilsWithBlobsFromEthSigner(
424
+ publicClient,
425
+ keyStoreManager!.createAllValidatorPublisherSigners(),
426
+ { ...config, scope: 'sequencer' },
427
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
428
+ );
429
+
430
+ // Create and start the sequencer client
431
+ const checkpointsBuilder = new CheckpointsBuilder(
432
+ { ...config, l1GenesisTime, slotDuration: Number(slotDuration) },
433
+ worldStateSynchronizer,
434
+ archiver,
435
+ dateProvider,
436
+ telemetry,
437
+ );
438
+
439
+ sequencer = await SequencerClient.new(config, {
440
+ ...deps,
441
+ epochCache,
442
+ l1TxUtils,
443
+ validatorClient,
444
+ p2pClient,
445
+ worldStateSynchronizer,
446
+ slasherClient,
447
+ checkpointsBuilder,
448
+ l2BlockSource: archiver,
449
+ l1ToL2MessageSource: archiver,
450
+ telemetry,
451
+ dateProvider,
452
+ blobClient,
453
+ nodeKeyStore: keyStoreManager!,
454
+ });
455
+ }
456
+
457
+ if (!options.dontStartSequencer && sequencer) {
458
+ await sequencer.start();
459
+ log.verbose(`Sequencer started`);
460
+ } else if (sequencer) {
461
+ log.warn(`Sequencer created but not started`);
462
+ }
463
+
464
+ const globalVariableBuilder = new GlobalVariableBuilder({
465
+ ...config,
466
+ rollupVersion: BigInt(config.rollupVersion),
467
+ l1GenesisTime,
468
+ slotDuration: Number(slotDuration),
469
+ });
210
470
 
211
471
  return new AztecNodeService(
212
472
  config,
@@ -215,15 +475,20 @@ export class AztecNodeService implements AztecNode, Traceable {
215
475
  archiver,
216
476
  archiver,
217
477
  archiver,
218
- archiver,
219
478
  worldStateSynchronizer,
220
479
  sequencer,
480
+ slasherClient,
481
+ validatorsSentinel,
482
+ epochPruneWatcher,
221
483
  ethereumChain.chainInfo.id,
222
- config.version,
223
- new GlobalVariableBuilder(config),
484
+ config.rollupVersion,
485
+ globalVariableBuilder,
486
+ epochCache,
487
+ packageVersion,
224
488
  proofVerifier,
225
489
  telemetry,
226
490
  log,
491
+ blobClient,
227
492
  );
228
493
  }
229
494
 
@@ -259,6 +524,10 @@ export class AztecNodeService implements AztecNode, Traceable {
259
524
  return Promise.resolve(this.p2pClient.getEnr()?.encodeTxt());
260
525
  }
261
526
 
527
+ public async getAllowedPublicSetup(): Promise<AllowedElement[]> {
528
+ return this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions());
529
+ }
530
+
262
531
  /**
263
532
  * Method to determine if the node is ready to accept transactions.
264
533
  * @returns - Flag indicating the readiness for tx submission.
@@ -268,20 +537,19 @@ export class AztecNodeService implements AztecNode, Traceable {
268
537
  }
269
538
 
270
539
  public async getNodeInfo(): Promise<NodeInfo> {
271
- const [nodeVersion, protocolVersion, chainId, enr, contractAddresses, protocolContractAddresses] =
272
- await Promise.all([
273
- this.getNodeVersion(),
274
- this.getVersion(),
275
- this.getChainId(),
276
- this.getEncodedEnr(),
277
- this.getL1ContractAddresses(),
278
- this.getProtocolContractAddresses(),
279
- ]);
540
+ const [nodeVersion, rollupVersion, chainId, enr, contractAddresses, protocolContractAddresses] = await Promise.all([
541
+ this.getNodeVersion(),
542
+ this.getVersion(),
543
+ this.getChainId(),
544
+ this.getEncodedEnr(),
545
+ this.getL1ContractAddresses(),
546
+ this.getProtocolContractAddresses(),
547
+ ]);
280
548
 
281
549
  const nodeInfo: NodeInfo = {
282
550
  nodeVersion,
283
551
  l1ChainId: chainId,
284
- protocolVersion,
552
+ rollupVersion,
285
553
  enr,
286
554
  l1ContractAddresses: contractAddresses,
287
555
  protocolContractAddresses: protocolContractAddresses,
@@ -291,12 +559,46 @@ export class AztecNodeService implements AztecNode, Traceable {
291
559
  }
292
560
 
293
561
  /**
294
- * Get a block specified by its number.
295
- * @param number - The block number being requested.
562
+ * Get a block specified by its block number, block hash, or 'latest'.
563
+ * @param block - The block parameter (block number, block hash, or 'latest').
564
+ * @returns The requested block.
565
+ */
566
+ public async getBlock(block: BlockParameter): Promise<L2Block | undefined> {
567
+ if (BlockHash.isBlockHash(block)) {
568
+ return this.getBlockByHash(block);
569
+ }
570
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
571
+ if (blockNumber === BlockNumber.ZERO) {
572
+ return this.buildInitialBlock();
573
+ }
574
+ return await this.blockSource.getL2Block(blockNumber);
575
+ }
576
+
577
+ /**
578
+ * Get a block specified by its hash.
579
+ * @param blockHash - The block hash being requested.
580
+ * @returns The requested block.
581
+ */
582
+ public async getBlockByHash(blockHash: BlockHash): Promise<L2Block | undefined> {
583
+ const initialBlockHash = await this.#getInitialHeaderHash();
584
+ if (blockHash.equals(initialBlockHash)) {
585
+ return this.buildInitialBlock();
586
+ }
587
+ return await this.blockSource.getL2BlockByHash(blockHash);
588
+ }
589
+
590
+ private buildInitialBlock(): L2Block {
591
+ const initialHeader = this.worldStateSynchronizer.getCommitted().getInitialHeader();
592
+ return L2Block.empty(initialHeader);
593
+ }
594
+
595
+ /**
596
+ * Get a block specified by its archive root.
597
+ * @param archive - The archive root being requested.
296
598
  * @returns The requested block.
297
599
  */
298
- public async getBlock(number: number): Promise<L2Block | undefined> {
299
- return await this.blockSource.getBlock(number);
600
+ public async getBlockByArchive(archive: Fr): Promise<L2Block | undefined> {
601
+ return await this.blockSource.getL2BlockByArchive(archive);
300
602
  }
301
603
 
302
604
  /**
@@ -305,30 +607,50 @@ export class AztecNodeService implements AztecNode, Traceable {
305
607
  * @param limit - The maximum number of blocks to obtain.
306
608
  * @returns The blocks requested.
307
609
  */
308
- public async getBlocks(from: number, limit: number): Promise<L2Block[]> {
309
- return (await this.blockSource.getBlocks(from, limit)) ?? [];
610
+ public async getBlocks(from: BlockNumber, limit: number): Promise<L2Block[]> {
611
+ return (await this.blockSource.getBlocks(from, BlockNumber(limit))) ?? [];
612
+ }
613
+
614
+ public async getCheckpoints(from: CheckpointNumber, limit: number): Promise<PublishedCheckpoint[]> {
615
+ return (await this.blockSource.getCheckpoints(from, limit)) ?? [];
616
+ }
617
+
618
+ public async getCheckpointedBlocks(from: BlockNumber, limit: number) {
619
+ return (await this.blockSource.getCheckpointedBlocks(from, limit)) ?? [];
310
620
  }
311
621
 
312
622
  /**
313
- * Method to fetch the current base fees.
314
- * @returns The current base fees.
623
+ * Method to fetch the current min L2 fees.
624
+ * @returns The current min L2 fees.
315
625
  */
316
- public async getCurrentBaseFees(): Promise<GasFees> {
317
- return await this.globalVariableBuilder.getCurrentBaseFees();
626
+ public async getCurrentMinFees(): Promise<GasFees> {
627
+ return await this.globalVariableBuilder.getCurrentMinFees();
628
+ }
629
+
630
+ public async getMaxPriorityFees(): Promise<GasFees> {
631
+ for await (const tx of this.p2pClient.iteratePendingTxs()) {
632
+ return tx.getGasSettings().maxPriorityFeesPerGas;
633
+ }
634
+
635
+ return GasFees.from({ feePerDaGas: 0n, feePerL2Gas: 0n });
318
636
  }
319
637
 
320
638
  /**
321
- * Method to fetch the current block number.
639
+ * Method to fetch the latest block number synchronized by the node.
322
640
  * @returns The block number.
323
641
  */
324
- public async getBlockNumber(): Promise<number> {
642
+ public async getBlockNumber(): Promise<BlockNumber> {
325
643
  return await this.blockSource.getBlockNumber();
326
644
  }
327
645
 
328
- public async getProvenBlockNumber(): Promise<number> {
646
+ public async getProvenBlockNumber(): Promise<BlockNumber> {
329
647
  return await this.blockSource.getProvenBlockNumber();
330
648
  }
331
649
 
650
+ public async getCheckpointedBlockNumber(): Promise<BlockNumber> {
651
+ return await this.blockSource.getCheckpointedL2BlockNumber();
652
+ }
653
+
332
654
  /**
333
655
  * Method to fetch the version of the package.
334
656
  * @returns The node package version
@@ -353,49 +675,51 @@ export class AztecNodeService implements AztecNode, Traceable {
353
675
  return Promise.resolve(this.l1ChainId);
354
676
  }
355
677
 
356
- public async getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
357
- const klazz = await this.contractDataSource.getContractClass(id);
358
-
359
- // TODO(#10007): Remove this check. This is needed only because we're manually registering
360
- // some contracts in the archiver so they are available to all nodes (see `registerCommonContracts`
361
- // in `archiver/src/factory.ts`), but we still want clients to send the registration tx in order
362
- // to emit the corresponding nullifier, which is now being checked. Note that this method
363
- // is only called by the PXE to check if a contract is publicly registered.
364
- if (klazz) {
365
- const classNullifier = await siloNullifier(AztecAddress.fromNumber(REGISTERER_CONTRACT_ADDRESS), id);
366
- const worldState = await this.#getWorldState('latest');
367
- const [index] = await worldState.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [classNullifier.toBuffer()]);
368
- this.log.debug(`Registration nullifier ${classNullifier} for contract class ${id} found at index ${index}`);
369
- if (index === undefined) {
370
- return undefined;
371
- }
372
- }
373
-
374
- return klazz;
678
+ public getContractClass(id: Fr): Promise<ContractClassPublic | undefined> {
679
+ return this.contractDataSource.getContractClass(id);
375
680
  }
376
681
 
377
682
  public getContract(address: AztecAddress): Promise<ContractInstanceWithAddress | undefined> {
378
683
  return this.contractDataSource.getContract(address);
379
684
  }
380
685
 
381
- /**
382
- * Retrieves all private logs from up to `limit` blocks, starting from the block number `from`.
383
- * @param from - The block number from which to begin retrieving logs.
384
- * @param limit - The maximum number of blocks to retrieve logs from.
385
- * @returns An array of private logs from the specified range of blocks.
386
- */
387
- public getPrivateLogs(from: number, limit: number): Promise<PrivateLog[]> {
388
- return this.logsSource.getPrivateLogs(from, limit);
686
+ public async getPrivateLogsByTags(
687
+ tags: SiloedTag[],
688
+ page?: number,
689
+ referenceBlock?: BlockHash,
690
+ ): Promise<TxScopedL2Log[][]> {
691
+ if (referenceBlock) {
692
+ const initialBlockHash = await this.#getInitialHeaderHash();
693
+ if (!referenceBlock.equals(initialBlockHash)) {
694
+ const header = await this.blockSource.getBlockHeaderByHash(referenceBlock);
695
+ if (!header) {
696
+ throw new Error(
697
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
698
+ );
699
+ }
700
+ }
701
+ }
702
+ return this.logsSource.getPrivateLogsByTags(tags, page);
389
703
  }
390
704
 
391
- /**
392
- * Gets all logs that match any of the received tags (i.e. logs with their first field equal to a tag).
393
- * @param tags - The tags to filter the logs by.
394
- * @returns For each received tag, an array of matching logs is returned. An empty array implies no logs match
395
- * that tag.
396
- */
397
- public getLogsByTags(tags: Fr[]): Promise<TxScopedL2Log[][]> {
398
- return this.logsSource.getLogsByTags(tags);
705
+ public async getPublicLogsByTagsFromContract(
706
+ contractAddress: AztecAddress,
707
+ tags: Tag[],
708
+ page?: number,
709
+ referenceBlock?: BlockHash,
710
+ ): Promise<TxScopedL2Log[][]> {
711
+ if (referenceBlock) {
712
+ const initialBlockHash = await this.#getInitialHeaderHash();
713
+ if (!referenceBlock.equals(initialBlockHash)) {
714
+ const header = await this.blockSource.getBlockHeaderByHash(referenceBlock);
715
+ if (!header) {
716
+ throw new Error(
717
+ `Block ${referenceBlock.toString()} not found in the node. This might indicate a reorg has occurred.`,
718
+ );
719
+ }
720
+ }
721
+ }
722
+ return this.logsSource.getPublicLogsByTagsFromContract(contractAddress, tags, page);
399
723
  }
400
724
 
401
725
  /**
@@ -421,17 +745,19 @@ export class AztecNodeService implements AztecNode, Traceable {
421
745
  * @param tx - The transaction to be submitted.
422
746
  */
423
747
  public async sendTx(tx: Tx) {
748
+ await this.#sendTx(tx);
749
+ }
750
+
751
+ async #sendTx(tx: Tx) {
424
752
  const timer = new Timer();
425
- const txHash = (await tx.getTxHash()).toString();
753
+ const txHash = tx.getTxHash().toString();
426
754
 
427
755
  const valid = await this.isValidTx(tx);
428
756
  if (valid.result !== 'valid') {
429
757
  const reason = valid.reason.join(', ');
430
758
  this.metrics.receivedTx(timer.ms(), false);
431
- this.log.warn(`Invalid tx ${txHash}: ${reason}`, { txHash });
432
- // TODO(#10967): Throw when receiving an invalid tx instead of just returning
433
- // throw new Error(`Invalid tx: ${reason}`);
434
- return;
759
+ this.log.warn(`Received invalid tx ${txHash}: ${reason}`, { txHash });
760
+ throw new Error(`Invalid tx: ${reason}`);
435
761
  }
436
762
 
437
763
  await this.p2pClient!.sendTx(tx);
@@ -440,24 +766,29 @@ export class AztecNodeService implements AztecNode, Traceable {
440
766
  }
441
767
 
442
768
  public async getTxReceipt(txHash: TxHash): Promise<TxReceipt> {
443
- let txReceipt = new TxReceipt(txHash, TxStatus.DROPPED, 'Tx dropped by P2P node.');
444
-
445
- // We first check if the tx is in pending (instead of first checking if it is mined) because if we first check
446
- // for mined and then for pending there could be a race condition where the tx is mined between the two checks
447
- // and we would incorrectly return a TxReceipt with status DROPPED
448
- if ((await this.p2pClient.getTxStatus(txHash)) === 'pending') {
449
- txReceipt = new TxReceipt(txHash, TxStatus.PENDING, '');
450
- }
769
+ // Check the tx pool status first. If the tx is known to the pool (pending or mined), we'll use that
770
+ // as a fallback if we don't find a settled receipt in the archiver.
771
+ const txPoolStatus = await this.p2pClient.getTxStatus(txHash);
772
+ const isKnownToPool = txPoolStatus === 'pending' || txPoolStatus === 'mined';
451
773
 
774
+ // Then get the actual tx from the archiver, which tracks every tx in a mined block.
452
775
  const settledTxReceipt = await this.blockSource.getSettledTxReceipt(txHash);
776
+
453
777
  if (settledTxReceipt) {
454
- txReceipt = settledTxReceipt;
778
+ // If the archiver has the receipt then return it.
779
+ return settledTxReceipt;
780
+ } else if (isKnownToPool) {
781
+ // If the tx is in the pool but not in the archiver, it's pending.
782
+ // This handles race conditions between archiver and p2p, where the archiver
783
+ // has pruned the block in which a tx was mined, but p2p has not caught up yet.
784
+ return new TxReceipt(txHash, TxStatus.PENDING, undefined, undefined);
785
+ } else {
786
+ // Otherwise, if we don't know the tx, we consider it dropped.
787
+ return new TxReceipt(txHash, TxStatus.DROPPED, undefined, 'Tx dropped by P2P node');
455
788
  }
456
-
457
- return txReceipt;
458
789
  }
459
790
 
460
- public getTxEffect(txHash: TxHash): Promise<InBlock<TxEffect> | undefined> {
791
+ public getTxEffect(txHash: TxHash): Promise<IndexedTxEffect | undefined> {
461
792
  return this.blockSource.getTxEffect(txHash);
462
793
  }
463
794
 
@@ -465,139 +796,167 @@ export class AztecNodeService implements AztecNode, Traceable {
465
796
  * Method to stop the aztec node.
466
797
  */
467
798
  public async stop() {
468
- this.log.info(`Stopping`);
469
- await this.sequencer?.stop();
470
- await this.p2pClient.stop();
471
- await this.worldStateSynchronizer.stop();
799
+ this.log.info(`Stopping Aztec Node`);
800
+ await tryStop(this.validatorsSentinel);
801
+ await tryStop(this.epochPruneWatcher);
802
+ await tryStop(this.slasherClient);
803
+ await tryStop(this.proofVerifier);
804
+ await tryStop(this.sequencer);
805
+ await tryStop(this.p2pClient);
806
+ await tryStop(this.worldStateSynchronizer);
472
807
  await tryStop(this.blockSource);
473
- await this.telemetry.stop();
474
- this.log.info(`Stopped`);
808
+ await tryStop(this.blobClient);
809
+ await tryStop(this.telemetry);
810
+ this.log.info(`Stopped Aztec Node`);
811
+ }
812
+
813
+ /**
814
+ * Returns the blob client used by this node.
815
+ * @internal - Exposed for testing purposes only.
816
+ */
817
+ public getBlobClient(): BlobClientInterface | undefined {
818
+ return this.blobClient;
475
819
  }
476
820
 
477
821
  /**
478
822
  * Method to retrieve pending txs.
823
+ * @param limit - The number of items to returns
824
+ * @param after - The last known pending tx. Used for pagination
479
825
  * @returns - The pending txs.
480
826
  */
481
- public getPendingTxs() {
482
- return this.p2pClient!.getPendingTxs();
827
+ public getPendingTxs(limit?: number, after?: TxHash): Promise<Tx[]> {
828
+ return this.p2pClient!.getPendingTxs(limit, after);
483
829
  }
484
830
 
485
- public async getPendingTxCount() {
486
- const pendingTxs = await this.getPendingTxs();
487
- return pendingTxs.length;
831
+ public getPendingTxCount(): Promise<number> {
832
+ return this.p2pClient!.getPendingTxCount();
488
833
  }
489
834
 
490
835
  /**
491
- * Method to retrieve a single tx from the mempool or unfinalised chain.
836
+ * Method to retrieve a single tx from the mempool or unfinalized chain.
492
837
  * @param txHash - The transaction hash to return.
493
838
  * @returns - The tx if it exists.
494
839
  */
495
- public getTxByHash(txHash: TxHash) {
840
+ public getTxByHash(txHash: TxHash): Promise<Tx | undefined> {
496
841
  return Promise.resolve(this.p2pClient!.getTxByHashFromPool(txHash));
497
842
  }
498
843
 
499
844
  /**
500
- * Method to retrieve txs from the mempool or unfinalised chain.
845
+ * Method to retrieve txs from the mempool or unfinalized chain.
501
846
  * @param txHash - The transaction hash to return.
502
847
  * @returns - The txs if it exists.
503
848
  */
504
- public async getTxsByHash(txHashes: TxHash[]) {
849
+ public async getTxsByHash(txHashes: TxHash[]): Promise<Tx[]> {
505
850
  return compactArray(await Promise.all(txHashes.map(txHash => this.getTxByHash(txHash))));
506
851
  }
507
852
 
508
- /**
509
- * Find the indexes of the given leaves in the given tree.
510
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
511
- * @param treeId - The tree to search in.
512
- * @param leafValue - The values to search for
513
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
514
- */
515
853
  public async findLeavesIndexes(
516
- blockNumber: L2BlockNumber,
854
+ referenceBlock: BlockParameter,
517
855
  treeId: MerkleTreeId,
518
856
  leafValues: Fr[],
519
- ): Promise<(bigint | undefined)[]> {
520
- const committedDb = await this.#getWorldState(blockNumber);
521
- return await committedDb.findLeafIndices(
857
+ ): Promise<(DataInBlock<bigint> | undefined)[]> {
858
+ const committedDb = await this.#getWorldState(referenceBlock);
859
+ const maybeIndices = await committedDb.findLeafIndices(
522
860
  treeId,
523
861
  leafValues.map(x => x.toBuffer()),
524
862
  );
525
- }
863
+ // We filter out undefined values
864
+ const indices = maybeIndices.filter(x => x !== undefined) as bigint[];
526
865
 
527
- /**
528
- * Find the block numbers of the given leaf indices in the given tree.
529
- * @param blockNumber - The block number at which to get the data or 'latest' for latest data
530
- * @param treeId - The tree to search in.
531
- * @param leafIndices - The values to search for
532
- * @returns The indexes of the given leaves in the given tree or undefined if not found.
533
- */
534
- public async findBlockNumbersForIndexes(
535
- blockNumber: L2BlockNumber,
536
- treeId: MerkleTreeId,
537
- leafIndices: bigint[],
538
- ): Promise<(bigint | undefined)[]> {
539
- const committedDb = await this.#getWorldState(blockNumber);
540
- return await committedDb.getBlockNumbersForLeafIndices(treeId, leafIndices);
541
- }
866
+ // Now we find the block numbers for the indices
867
+ const blockNumbers = await committedDb.getBlockNumbersForLeafIndices(treeId, indices);
868
+
869
+ // If any of the block numbers are undefined, we throw an error.
870
+ for (let i = 0; i < indices.length; i++) {
871
+ if (blockNumbers[i] === undefined) {
872
+ throw new Error(`Block number is undefined for leaf index ${indices[i]} in tree ${MerkleTreeId[treeId]}`);
873
+ }
874
+ }
875
+
876
+ // Get unique block numbers in order to optimize num calls to getLeafValue function.
877
+ const uniqueBlockNumbers = [...new Set(blockNumbers.filter(x => x !== undefined))];
878
+
879
+ // Now we obtain the block hashes from the archive tree by calling await `committedDb.getLeafValue(treeId, index)`
880
+ // (note that block number corresponds to the leaf index in the archive tree).
881
+ const blockHashes = await Promise.all(
882
+ uniqueBlockNumbers.map(blockNumber => {
883
+ return committedDb.getLeafValue(MerkleTreeId.ARCHIVE, BigInt(blockNumber));
884
+ }),
885
+ );
542
886
 
543
- public async findNullifiersIndexesWithBlock(
544
- blockNumber: L2BlockNumber,
545
- nullifiers: Fr[],
546
- ): Promise<(InBlock<bigint> | undefined)[]> {
547
- if (blockNumber === 'latest') {
548
- blockNumber = await this.getBlockNumber();
887
+ // If any of the block hashes are undefined, we throw an error.
888
+ for (let i = 0; i < uniqueBlockNumbers.length; i++) {
889
+ if (blockHashes[i] === undefined) {
890
+ throw new Error(`Block hash is undefined for block number ${uniqueBlockNumbers[i]}`);
891
+ }
549
892
  }
550
- return this.nullifierSource.findNullifiersIndexesWithBlock(blockNumber, nullifiers);
893
+
894
+ // Create DataInBlock objects by combining indices, blockNumbers and blockHashes and return them.
895
+ return maybeIndices.map((index, i) => {
896
+ if (index === undefined) {
897
+ return undefined;
898
+ }
899
+ const blockNumber = blockNumbers[i];
900
+ if (blockNumber === undefined) {
901
+ return undefined;
902
+ }
903
+ const blockHashIndex = uniqueBlockNumbers.indexOf(blockNumber);
904
+ const blockHash = blockHashes[blockHashIndex];
905
+ if (!blockHash) {
906
+ return undefined;
907
+ }
908
+ return {
909
+ l2BlockNumber: BlockNumber(Number(blockNumber)),
910
+ l2BlockHash: new BlockHash(blockHash),
911
+ data: index,
912
+ };
913
+ });
551
914
  }
552
915
 
553
- /**
554
- * Returns a sibling path for the given index in the nullifier tree.
555
- * @param blockNumber - The block number at which to get the data.
556
- * @param leafIndex - The index of the leaf for which the sibling path is required.
557
- * @returns The sibling path for the leaf index.
558
- */
559
- public async getNullifierSiblingPath(
560
- blockNumber: L2BlockNumber,
561
- leafIndex: bigint,
562
- ): Promise<SiblingPath<typeof NULLIFIER_TREE_HEIGHT>> {
563
- const committedDb = await this.#getWorldState(blockNumber);
564
- return committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, leafIndex);
916
+ public async getBlockHashMembershipWitness(
917
+ referenceBlock: BlockParameter,
918
+ blockHash: BlockHash,
919
+ ): Promise<MembershipWitness<typeof ARCHIVE_HEIGHT> | undefined> {
920
+ const committedDb = await this.#getWorldState(referenceBlock);
921
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.ARCHIVE>(MerkleTreeId.ARCHIVE, [blockHash]);
922
+ return pathAndIndex === undefined
923
+ ? undefined
924
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
565
925
  }
566
926
 
567
- /**
568
- * Returns a sibling path for the given index in the data tree.
569
- * @param blockNumber - The block number at which to get the data.
570
- * @param leafIndex - The index of the leaf for which the sibling path is required.
571
- * @returns The sibling path for the leaf index.
572
- */
573
- public async getNoteHashSiblingPath(
574
- blockNumber: L2BlockNumber,
575
- leafIndex: bigint,
576
- ): Promise<SiblingPath<typeof NOTE_HASH_TREE_HEIGHT>> {
577
- const committedDb = await this.#getWorldState(blockNumber);
578
- return committedDb.getSiblingPath(MerkleTreeId.NOTE_HASH_TREE, leafIndex);
927
+ public async getNoteHashMembershipWitness(
928
+ referenceBlock: BlockParameter,
929
+ noteHash: Fr,
930
+ ): Promise<MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT> | undefined> {
931
+ const committedDb = await this.#getWorldState(referenceBlock);
932
+ const [pathAndIndex] = await committedDb.findSiblingPaths<MerkleTreeId.NOTE_HASH_TREE>(
933
+ MerkleTreeId.NOTE_HASH_TREE,
934
+ [noteHash],
935
+ );
936
+ return pathAndIndex === undefined
937
+ ? undefined
938
+ : MembershipWitness.fromSiblingPath(pathAndIndex.index, pathAndIndex.path);
579
939
  }
580
940
 
581
- /**
582
- * Returns the index and a sibling path for a leaf in the committed l1 to l2 data tree.
583
- * @param blockNumber - The block number at which to get the data.
584
- * @param l1ToL2Message - The l1ToL2Message to get the index / sibling path for.
585
- * @returns A tuple of the index and the sibling path of the L1ToL2Message (undefined if not found).
586
- */
587
941
  public async getL1ToL2MessageMembershipWitness(
588
- blockNumber: L2BlockNumber,
942
+ referenceBlock: BlockParameter,
589
943
  l1ToL2Message: Fr,
590
944
  ): Promise<[bigint, SiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>] | undefined> {
591
- const index = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
592
- if (index === undefined) {
945
+ const db = await this.#getWorldState(referenceBlock);
946
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, [l1ToL2Message]);
947
+ if (!witness) {
593
948
  return undefined;
594
949
  }
595
- const committedDb = await this.#getWorldState(blockNumber);
596
- const siblingPath = await committedDb.getSiblingPath<typeof L1_TO_L2_MSG_TREE_HEIGHT>(
597
- MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
598
- index,
599
- );
600
- return [index, siblingPath];
950
+
951
+ // REFACTOR: Return a MembershipWitness object
952
+ return [witness.index, witness.path];
953
+ }
954
+
955
+ public async getL1ToL2MessageBlock(l1ToL2Message: Fr): Promise<BlockNumber | undefined> {
956
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
957
+ return messageIndex
958
+ ? BlockNumber.fromCheckpointNumber(InboxLeaf.checkpointNumberFromIndex(messageIndex))
959
+ : undefined;
601
960
  }
602
961
 
603
962
  /**
@@ -606,153 +965,59 @@ export class AztecNodeService implements AztecNode, Traceable {
606
965
  * @returns Whether the message is synced and ready to be included in a block.
607
966
  */
608
967
  public async isL1ToL2MessageSynced(l1ToL2Message: Fr): Promise<boolean> {
609
- return (await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message)) !== undefined;
968
+ const messageIndex = await this.l1ToL2MessageSource.getL1ToL2MessageIndex(l1ToL2Message);
969
+ return messageIndex !== undefined;
610
970
  }
611
971
 
612
972
  /**
613
- * Returns the index of a l2ToL1Message in a ephemeral l2 to l1 data tree as well as its sibling path.
614
- * @remarks This tree is considered ephemeral because it is created on-demand by: taking all the l2ToL1 messages
615
- * in a single block, and then using them to make a variable depth append-only tree with these messages as leaves.
616
- * The tree is discarded immediately after calculating what we need from it.
617
- * TODO: Handle the case where two messages in the same tx have the same hash.
618
- * @param blockNumber - The block number at which to get the data.
619
- * @param l2ToL1Message - The l2ToL1Message get the index / sibling path for.
620
- * @returns A tuple of the index and the sibling path of the L2ToL1Message.
973
+ * Returns all the L2 to L1 messages in an epoch.
974
+ * @param epoch - The epoch at which to get the data.
975
+ * @returns The L2 to L1 messages (empty array if the epoch is not found).
621
976
  */
622
- public async getL2ToL1MessageMembershipWitness(
623
- blockNumber: L2BlockNumber,
624
- l2ToL1Message: Fr,
625
- ): Promise<[bigint, SiblingPath<number>]> {
626
- const block = await this.blockSource.getBlock(blockNumber === 'latest' ? await this.getBlockNumber() : blockNumber);
627
-
628
- if (block === undefined) {
629
- throw new Error('Block is not defined');
630
- }
631
-
632
- const l2ToL1Messages = block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs);
633
-
634
- // Find index of message
635
- let indexOfMsgInSubtree = -1;
636
- const indexOfMsgTx = l2ToL1Messages.findIndex(msgs => {
637
- const idx = msgs.findIndex(msg => msg.equals(l2ToL1Message));
638
- indexOfMsgInSubtree = Math.max(indexOfMsgInSubtree, idx);
639
- return idx !== -1;
640
- });
641
-
642
- if (indexOfMsgTx === -1) {
643
- throw new Error('The L2ToL1Message you are trying to prove inclusion of does not exist');
977
+ public async getL2ToL1Messages(epoch: EpochNumber): Promise<Fr[][][][]> {
978
+ // Assumes `getCheckpointedBlocksForEpoch` returns blocks in ascending order of block number.
979
+ const checkpointedBlocks = await this.blockSource.getCheckpointedBlocksForEpoch(epoch);
980
+ const blocksInCheckpoints: L2Block[][] = [];
981
+ let previousSlotNumber = SlotNumber.ZERO;
982
+ let checkpointIndex = -1;
983
+ for (const checkpointedBlock of checkpointedBlocks) {
984
+ const block = checkpointedBlock.block;
985
+ const slotNumber = block.header.globalVariables.slotNumber;
986
+ if (slotNumber !== previousSlotNumber) {
987
+ checkpointIndex++;
988
+ blocksInCheckpoints.push([]);
989
+ previousSlotNumber = slotNumber;
990
+ }
991
+ blocksInCheckpoints[checkpointIndex].push(block);
644
992
  }
645
-
646
- const tempStores: AztecKVStore[] = [];
647
-
648
- // Construct message subtrees
649
- const l2toL1Subtrees = await Promise.all(
650
- l2ToL1Messages.map(async (msgs, i) => {
651
- const store = openTmpStore(true);
652
- tempStores.push(store);
653
- const treeHeight = msgs.length <= 1 ? 1 : Math.ceil(Math.log2(msgs.length));
654
- const tree = new StandardTree(store, new SHA256Trunc(), `temp_msgs_subtrees_${i}`, treeHeight, 0n, Fr);
655
- await tree.appendLeaves(msgs);
656
- return tree;
657
- }),
658
- );
659
-
660
- // path of the input msg from leaf -> first out hash calculated in base rolllup
661
- const subtreePathOfL2ToL1Message = await l2toL1Subtrees[indexOfMsgTx].getSiblingPath(
662
- BigInt(indexOfMsgInSubtree),
663
- true,
993
+ return blocksInCheckpoints.map(blocks =>
994
+ blocks.map(block => block.body.txEffects.map(txEffect => txEffect.l2ToL1Msgs)),
664
995
  );
665
-
666
- const numTxs = block.body.txEffects.length;
667
- if (numTxs === 1) {
668
- return [BigInt(indexOfMsgInSubtree), subtreePathOfL2ToL1Message];
669
- }
670
-
671
- const l2toL1SubtreeRoots = l2toL1Subtrees.map(t => Fr.fromBuffer(t.getRoot(true)));
672
- const maxTreeHeight = Math.ceil(Math.log2(l2toL1SubtreeRoots.length));
673
- // The root of this tree is the out_hash calculated in Noir => we truncate to match Noir's SHA
674
- const outHashTree = new UnbalancedTree(new SHA256Trunc(), 'temp_outhash_sibling_path', maxTreeHeight, Fr);
675
- await outHashTree.appendLeaves(l2toL1SubtreeRoots);
676
-
677
- const pathOfTxInOutHashTree = await outHashTree.getSiblingPath(l2toL1SubtreeRoots[indexOfMsgTx].toBigInt());
678
- // Append subtree path to out hash tree path
679
- const mergedPath = subtreePathOfL2ToL1Message.toBufferArray().concat(pathOfTxInOutHashTree.toBufferArray());
680
- // Append binary index of subtree path to binary index of out hash tree path
681
- const mergedIndex = parseInt(
682
- indexOfMsgTx
683
- .toString(2)
684
- .concat(indexOfMsgInSubtree.toString(2).padStart(l2toL1Subtrees[indexOfMsgTx].getDepth(), '0')),
685
- 2,
686
- );
687
-
688
- // clear the tmp stores
689
- await Promise.all(tempStores.map(store => store.delete()));
690
-
691
- return [BigInt(mergedIndex), new SiblingPath(mergedPath.length, mergedPath)];
692
- }
693
-
694
- /**
695
- * Returns a sibling path for a leaf in the committed blocks tree.
696
- * @param blockNumber - The block number at which to get the data.
697
- * @param leafIndex - Index of the leaf in the tree.
698
- * @returns The sibling path.
699
- */
700
- public async getArchiveSiblingPath(
701
- blockNumber: L2BlockNumber,
702
- leafIndex: bigint,
703
- ): Promise<SiblingPath<typeof ARCHIVE_HEIGHT>> {
704
- const committedDb = await this.#getWorldState(blockNumber);
705
- return committedDb.getSiblingPath(MerkleTreeId.ARCHIVE, leafIndex);
706
- }
707
-
708
- /**
709
- * Returns a sibling path for a leaf in the committed public data tree.
710
- * @param blockNumber - The block number at which to get the data.
711
- * @param leafIndex - Index of the leaf in the tree.
712
- * @returns The sibling path.
713
- */
714
- public async getPublicDataSiblingPath(
715
- blockNumber: L2BlockNumber,
716
- leafIndex: bigint,
717
- ): Promise<SiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>> {
718
- const committedDb = await this.#getWorldState(blockNumber);
719
- return committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, leafIndex);
720
996
  }
721
997
 
722
- /**
723
- * Returns a nullifier membership witness for a given nullifier at a given block.
724
- * @param blockNumber - The block number at which to get the index.
725
- * @param nullifier - Nullifier we try to find witness for.
726
- * @returns The nullifier membership witness (if found).
727
- */
728
998
  public async getNullifierMembershipWitness(
729
- blockNumber: L2BlockNumber,
999
+ referenceBlock: BlockParameter,
730
1000
  nullifier: Fr,
731
1001
  ): Promise<NullifierMembershipWitness | undefined> {
732
- const db = await this.#getWorldState(blockNumber);
733
- const index = (await db.findLeafIndices(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]))[0];
734
- if (!index) {
1002
+ const db = await this.#getWorldState(referenceBlock);
1003
+ const [witness] = await db.findSiblingPaths(MerkleTreeId.NULLIFIER_TREE, [nullifier.toBuffer()]);
1004
+ if (!witness) {
735
1005
  return undefined;
736
1006
  }
737
1007
 
738
- const leafPreimagePromise = db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
739
- const siblingPathPromise = db.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
740
- MerkleTreeId.NULLIFIER_TREE,
741
- BigInt(index),
742
- );
743
-
744
- const [leafPreimage, siblingPath] = await Promise.all([leafPreimagePromise, siblingPathPromise]);
745
-
1008
+ const { index, path } = witness;
1009
+ const leafPreimage = await db.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index);
746
1010
  if (!leafPreimage) {
747
1011
  return undefined;
748
1012
  }
749
1013
 
750
- return new NullifierMembershipWitness(BigInt(index), leafPreimage as NullifierLeafPreimage, siblingPath);
1014
+ return new NullifierMembershipWitness(index, leafPreimage as NullifierLeafPreimage, path);
751
1015
  }
752
1016
 
753
1017
  /**
754
1018
  * Returns a low nullifier membership witness for a given nullifier at a given block.
755
- * @param blockNumber - The block number at which to get the index.
1019
+ * @param referenceBlock - The block parameter (block number, block hash, or 'latest') at which to get the data
1020
+ * (which contains the root of the nullifier tree in which we are searching for the nullifier).
756
1021
  * @param nullifier - Nullifier we try to find the low nullifier witness for.
757
1022
  * @returns The low nullifier membership witness (if found).
758
1023
  * @remarks Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked
@@ -765,10 +1030,10 @@ export class AztecNodeService implements AztecNode, Traceable {
765
1030
  * TODO: This is a confusing behavior and we should eventually address that.
766
1031
  */
767
1032
  public async getLowNullifierMembershipWitness(
768
- blockNumber: L2BlockNumber,
1033
+ referenceBlock: BlockParameter,
769
1034
  nullifier: Fr,
770
1035
  ): Promise<NullifierMembershipWitness | undefined> {
771
- const committedDb = await this.#getWorldState(blockNumber);
1036
+ const committedDb = await this.#getWorldState(referenceBlock);
772
1037
  const findResult = await committedDb.getPreviousValueIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBigInt());
773
1038
  if (!findResult) {
774
1039
  return undefined;
@@ -779,15 +1044,12 @@ export class AztecNodeService implements AztecNode, Traceable {
779
1044
  }
780
1045
  const preimageData = (await committedDb.getLeafPreimage(MerkleTreeId.NULLIFIER_TREE, index))!;
781
1046
 
782
- const siblingPath = await committedDb.getSiblingPath<typeof NULLIFIER_TREE_HEIGHT>(
783
- MerkleTreeId.NULLIFIER_TREE,
784
- BigInt(index),
785
- );
1047
+ const siblingPath = await committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, BigInt(index));
786
1048
  return new NullifierMembershipWitness(BigInt(index), preimageData as NullifierLeafPreimage, siblingPath);
787
1049
  }
788
1050
 
789
- async getPublicDataTreeWitness(blockNumber: L2BlockNumber, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
790
- const committedDb = await this.#getWorldState(blockNumber);
1051
+ async getPublicDataWitness(referenceBlock: BlockParameter, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
1052
+ const committedDb = await this.#getWorldState(referenceBlock);
791
1053
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
792
1054
  if (!lowLeafResult) {
793
1055
  return undefined;
@@ -796,27 +1058,13 @@ export class AztecNodeService implements AztecNode, Traceable {
796
1058
  MerkleTreeId.PUBLIC_DATA_TREE,
797
1059
  lowLeafResult.index,
798
1060
  )) as PublicDataTreeLeafPreimage;
799
- const path = await committedDb.getSiblingPath<typeof PUBLIC_DATA_TREE_HEIGHT>(
800
- MerkleTreeId.PUBLIC_DATA_TREE,
801
- lowLeafResult.index,
802
- );
1061
+ const path = await committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, lowLeafResult.index);
803
1062
  return new PublicDataWitness(lowLeafResult.index, preimage, path);
804
1063
  }
805
1064
  }
806
1065
 
807
- /**
808
- * Gets the storage value at the given contract storage slot.
809
- *
810
- * @remarks The storage slot here refers to the slot as it is defined in Noir not the index in the merkle tree.
811
- * Aztec's version of `eth_getStorageAt`.
812
- *
813
- * @param contract - Address of the contract to query.
814
- * @param slot - Slot to query.
815
- * @param blockNumber - The block number at which to get the data or 'latest'.
816
- * @returns Storage value at the given contract slot.
817
- */
818
- public async getPublicStorageAt(blockNumber: L2BlockNumber, contract: AztecAddress, slot: Fr): Promise<Fr> {
819
- const committedDb = await this.#getWorldState(blockNumber);
1066
+ public async getPublicStorageAt(referenceBlock: BlockParameter, contract: AztecAddress, slot: Fr): Promise<Fr> {
1067
+ const committedDb = await this.#getWorldState(referenceBlock);
820
1068
  const leafSlot = await computePublicDataTreeLeafSlot(contract, slot);
821
1069
 
822
1070
  const lowLeafResult = await committedDb.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot.toBigInt());
@@ -827,36 +1075,67 @@ export class AztecNodeService implements AztecNode, Traceable {
827
1075
  MerkleTreeId.PUBLIC_DATA_TREE,
828
1076
  lowLeafResult.index,
829
1077
  )) as PublicDataTreeLeafPreimage;
830
- return preimage.value;
1078
+ return preimage.leaf.value;
1079
+ }
1080
+
1081
+ public async getBlockHeader(block: BlockParameter = 'latest'): Promise<BlockHeader | undefined> {
1082
+ if (BlockHash.isBlockHash(block)) {
1083
+ const initialBlockHash = await this.#getInitialHeaderHash();
1084
+ if (block.equals(initialBlockHash)) {
1085
+ // Block source doesn't handle initial header so we need to handle the case separately.
1086
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1087
+ }
1088
+ return this.blockSource.getBlockHeaderByHash(block);
1089
+ } else {
1090
+ // Block source doesn't handle initial header so we need to handle the case separately.
1091
+ const blockNumber = block === 'latest' ? await this.getBlockNumber() : (block as BlockNumber);
1092
+ if (blockNumber === BlockNumber.ZERO) {
1093
+ return this.worldStateSynchronizer.getCommitted().getInitialHeader();
1094
+ }
1095
+ return this.blockSource.getBlockHeader(block);
1096
+ }
831
1097
  }
832
1098
 
833
1099
  /**
834
- * Returns the currently committed block header, or the initial header if no blocks have been produced.
835
- * @returns The current committed block header.
1100
+ * Get a block header specified by its archive root.
1101
+ * @param archive - The archive root being requested.
1102
+ * @returns The requested block header.
836
1103
  */
837
- public async getBlockHeader(blockNumber: L2BlockNumber = 'latest'): Promise<BlockHeader | undefined> {
838
- return blockNumber === 0 || (blockNumber === 'latest' && (await this.blockSource.getBlockNumber()) === 0)
839
- ? this.worldStateSynchronizer.getCommitted().getInitialHeader()
840
- : this.blockSource.getBlockHeader(blockNumber);
1104
+ public async getBlockHeaderByArchive(archive: Fr): Promise<BlockHeader | undefined> {
1105
+ return await this.blockSource.getBlockHeaderByArchive(archive);
841
1106
  }
842
1107
 
843
1108
  /**
844
1109
  * Simulates the public part of a transaction with the current state.
845
1110
  * @param tx - The transaction to simulate.
846
1111
  **/
847
- @trackSpan('AztecNodeService.simulatePublicCalls', async (tx: Tx) => ({
848
- [Attributes.TX_HASH]: (await tx.getTxHash()).toString(),
1112
+ @trackSpan('AztecNodeService.simulatePublicCalls', (tx: Tx) => ({
1113
+ [Attributes.TX_HASH]: tx.getTxHash().toString(),
849
1114
  }))
850
1115
  public async simulatePublicCalls(tx: Tx, skipFeeEnforcement = false): Promise<PublicSimulationOutput> {
851
- const txHash = await tx.getTxHash();
852
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
1116
+ // Check total gas limit for simulation
1117
+ const gasSettings = tx.data.constants.txContext.gasSettings;
1118
+ const txGasLimit = gasSettings.gasLimits.l2Gas;
1119
+ const teardownGasLimit = gasSettings.teardownGasLimits.l2Gas;
1120
+ if (txGasLimit + teardownGasLimit > this.config.rpcSimulatePublicMaxGasLimit) {
1121
+ throw new BadRequestError(
1122
+ `Transaction total gas limit ${
1123
+ txGasLimit + teardownGasLimit
1124
+ } (${txGasLimit} + ${teardownGasLimit}) exceeds maximum gas limit ${
1125
+ this.config.rpcSimulatePublicMaxGasLimit
1126
+ } for simulation`,
1127
+ );
1128
+ }
1129
+
1130
+ const txHash = tx.getTxHash();
1131
+ const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1);
853
1132
 
854
1133
  // If sequencer is not initialized, we just set these values to zero for simulation.
855
- const coinbase = this.sequencer?.coinbase || EthAddress.ZERO;
856
- const feeRecipient = this.sequencer?.feeRecipient || AztecAddress.ZERO;
1134
+ const coinbase = EthAddress.ZERO;
1135
+ const feeRecipient = AztecAddress.ZERO;
857
1136
 
858
1137
  const newGlobalVariables = await this.globalVariableBuilder.buildGlobalVariables(
859
- new Fr(blockNumber),
1138
+ blockNumber,
860
1139
  coinbase,
861
1140
  feeRecipient,
862
1141
  );
@@ -864,8 +1143,8 @@ export class AztecNodeService implements AztecNode, Traceable {
864
1143
  this.contractDataSource,
865
1144
  new DateProvider(),
866
1145
  this.telemetry,
1146
+ this.log.getBindings(),
867
1147
  );
868
- const fork = await this.worldStateSynchronizer.fork();
869
1148
 
870
1149
  this.log.verbose(`Simulating public calls for tx ${txHash}`, {
871
1150
  globalVariables: newGlobalVariables.toInspect(),
@@ -873,11 +1152,22 @@ export class AztecNodeService implements AztecNode, Traceable {
873
1152
  blockNumber,
874
1153
  });
875
1154
 
1155
+ const merkleTreeFork = await this.worldStateSynchronizer.fork();
876
1156
  try {
877
- const processor = publicProcessorFactory.create(fork, newGlobalVariables, skipFeeEnforcement);
1157
+ const config = PublicSimulatorConfig.from({
1158
+ skipFeeEnforcement,
1159
+ collectDebugLogs: true,
1160
+ collectHints: false,
1161
+ collectCallMetadata: true,
1162
+ collectStatistics: false,
1163
+ collectionLimits: CollectionLimitsConfig.from({
1164
+ maxDebugLogMemoryReads: this.config.rpcSimulatePublicMaxDebugLogMemoryReads,
1165
+ }),
1166
+ });
1167
+ const processor = publicProcessorFactory.create(merkleTreeFork, newGlobalVariables, config);
878
1168
 
879
1169
  // REFACTOR: Consider merging ProcessReturnValues into ProcessedTx
880
- const [processedTxs, failedTxs, returns] = await processor.process([tx]);
1170
+ const [processedTxs, failedTxs, _usedTxs, returns] = await processor.process([tx]);
881
1171
  // REFACTOR: Consider returning the error rather than throwing
882
1172
  if (failedTxs.length) {
883
1173
  this.log.warn(`Simulated tx ${txHash} fails: ${failedTxs[0].error}`, { txHash });
@@ -887,13 +1177,13 @@ export class AztecNodeService implements AztecNode, Traceable {
887
1177
  const [processedTx] = processedTxs;
888
1178
  return new PublicSimulationOutput(
889
1179
  processedTx.revertReason,
890
- processedTx.constants,
1180
+ processedTx.globalVariables,
891
1181
  processedTx.txEffect,
892
1182
  returns,
893
1183
  processedTx.gasUsed,
894
1184
  );
895
1185
  } finally {
896
- await fork.close();
1186
+ await merkleTreeFork.close();
897
1187
  }
898
1188
  }
899
1189
 
@@ -901,24 +1191,48 @@ export class AztecNodeService implements AztecNode, Traceable {
901
1191
  tx: Tx,
902
1192
  { isSimulation, skipFeeEnforcement }: { isSimulation?: boolean; skipFeeEnforcement?: boolean } = {},
903
1193
  ): Promise<TxValidationResult> {
904
- const blockNumber = (await this.blockSource.getBlockNumber()) + 1;
905
1194
  const db = this.worldStateSynchronizer.getCommitted();
906
1195
  const verifier = isSimulation ? undefined : this.proofVerifier;
907
- const validator = createValidatorForAcceptingTxs(db, this.contractDataSource, verifier, {
908
- blockNumber,
909
- l1ChainId: this.l1ChainId,
910
- setupAllowList: this.config.allowedInSetup ?? (await getDefaultAllowedSetupFunctions()),
911
- gasFees: await this.getCurrentBaseFees(),
912
- skipFeeEnforcement,
913
- });
1196
+
1197
+ // We accept transactions if they are not expired by the next slot (checked based on the IncludeByTimestamp field)
1198
+ const { ts: nextSlotTimestamp } = this.epochCache.getEpochAndSlotInNextL1Slot();
1199
+ const blockNumber = BlockNumber((await this.blockSource.getBlockNumber()) + 1);
1200
+ const validator = createValidatorForAcceptingTxs(
1201
+ db,
1202
+ this.contractDataSource,
1203
+ verifier,
1204
+ {
1205
+ timestamp: nextSlotTimestamp,
1206
+ blockNumber,
1207
+ l1ChainId: this.l1ChainId,
1208
+ rollupVersion: this.version,
1209
+ setupAllowList: this.config.txPublicSetupAllowList ?? (await getDefaultAllowedSetupFunctions()),
1210
+ gasFees: await this.getCurrentMinFees(),
1211
+ skipFeeEnforcement,
1212
+ txsPermitted: !this.config.disableTransactions,
1213
+ },
1214
+ this.log.getBindings(),
1215
+ );
914
1216
 
915
1217
  return await validator.validateTx(tx);
916
1218
  }
917
1219
 
918
- public async setConfig(config: Partial<SequencerConfig & ProverConfig>): Promise<void> {
919
- const newConfig = { ...this.config, ...config };
920
- await this.sequencer?.updateSequencerConfig(config);
1220
+ public getConfig(): Promise<AztecNodeAdminConfig> {
1221
+ const schema = AztecNodeAdminConfigSchema;
1222
+ const keys = schema.keyof().options;
1223
+ return Promise.resolve(pick(this.config, ...keys));
1224
+ }
921
1225
 
1226
+ public async setConfig(config: Partial<AztecNodeAdminConfig>): Promise<void> {
1227
+ const newConfig = { ...this.config, ...config };
1228
+ this.sequencer?.updateConfig(config);
1229
+ this.slasherClient?.updateConfig(config);
1230
+ this.validatorsSentinel?.updateConfig(config);
1231
+ await this.p2pClient.updateP2PConfig(config);
1232
+ const archiver = this.blockSource as Archiver;
1233
+ if ('updateConfig' in archiver) {
1234
+ archiver.updateConfig(config);
1235
+ }
922
1236
  if (newConfig.realProofs !== this.config.realProofs) {
923
1237
  this.proofVerifier = config.realProofs ? await BBCircuitVerifier.new(newConfig) : new TestCircuitVerifier();
924
1238
  }
@@ -928,42 +1242,154 @@ export class AztecNodeService implements AztecNode, Traceable {
928
1242
 
929
1243
  public getProtocolContractAddresses(): Promise<ProtocolContractAddresses> {
930
1244
  return Promise.resolve({
931
- classRegisterer: ProtocolContractAddress.ContractClassRegisterer,
1245
+ classRegistry: ProtocolContractAddress.ContractClassRegistry,
932
1246
  feeJuice: ProtocolContractAddress.FeeJuice,
933
- instanceDeployer: ProtocolContractAddress.ContractInstanceDeployer,
1247
+ instanceRegistry: ProtocolContractAddress.ContractInstanceRegistry,
934
1248
  multiCallEntrypoint: ProtocolContractAddress.MultiCallEntrypoint,
935
1249
  });
936
1250
  }
937
1251
 
938
- // TODO(#10007): Remove this method
939
- public addContractClass(contractClass: ContractClassPublic): Promise<void> {
940
- this.log.info(`Adding contract class via API ${contractClass.id}`);
941
- return this.contractDataSource.addContractClass(contractClass);
1252
+ public registerContractFunctionSignatures(signatures: string[]): Promise<void> {
1253
+ return this.contractDataSource.registerContractFunctionSignatures(signatures);
1254
+ }
1255
+
1256
+ public getValidatorsStats(): Promise<ValidatorsStats> {
1257
+ return this.validatorsSentinel?.computeStats() ?? Promise.resolve({ stats: {}, slotWindow: 0 });
942
1258
  }
943
1259
 
944
- public registerContractFunctionSignatures(_address: AztecAddress, signatures: string[]): Promise<void> {
945
- return this.contractDataSource.registerContractFunctionSignatures(_address, signatures);
1260
+ public getValidatorStats(
1261
+ validatorAddress: EthAddress,
1262
+ fromSlot?: SlotNumber,
1263
+ toSlot?: SlotNumber,
1264
+ ): Promise<SingleValidatorStats | undefined> {
1265
+ return this.validatorsSentinel?.getValidatorStats(validatorAddress, fromSlot, toSlot) ?? Promise.resolve(undefined);
946
1266
  }
947
1267
 
948
- public flushTxs(): Promise<void> {
949
- if (!this.sequencer) {
950
- throw new Error(`Sequencer is not initialized`);
1268
+ public async startSnapshotUpload(location: string): Promise<void> {
1269
+ // Note that we are forcefully casting the blocksource as an archiver
1270
+ // We break support for archiver running remotely to the node
1271
+ const archiver = this.blockSource as Archiver;
1272
+ if (!('backupTo' in archiver)) {
1273
+ this.metrics.recordSnapshotError();
1274
+ throw new Error('Archiver implementation does not support backups. Cannot generate snapshot.');
1275
+ }
1276
+
1277
+ // Test that the archiver has done an initial sync.
1278
+ if (!archiver.isInitialSyncComplete()) {
1279
+ this.metrics.recordSnapshotError();
1280
+ throw new Error(`Archiver initial sync not complete. Cannot start snapshot.`);
1281
+ }
1282
+
1283
+ // And it has an L2 block hash
1284
+ const l2BlockHash = await archiver.getL2Tips().then(tips => tips.proposed.hash);
1285
+ if (!l2BlockHash) {
1286
+ this.metrics.recordSnapshotError();
1287
+ throw new Error(`Archiver has no latest L2 block hash downloaded. Cannot start snapshot.`);
1288
+ }
1289
+
1290
+ if (this.isUploadingSnapshot) {
1291
+ this.metrics.recordSnapshotError();
1292
+ throw new Error(`Snapshot upload already in progress. Cannot start another one until complete.`);
1293
+ }
1294
+
1295
+ // Do not wait for the upload to be complete to return to the caller, but flag that an operation is in progress
1296
+ this.isUploadingSnapshot = true;
1297
+ const timer = new Timer();
1298
+ void uploadSnapshot(location, this.blockSource as Archiver, this.worldStateSynchronizer, this.config, this.log)
1299
+ .then(() => {
1300
+ this.isUploadingSnapshot = false;
1301
+ this.metrics.recordSnapshot(timer.ms());
1302
+ })
1303
+ .catch(err => {
1304
+ this.isUploadingSnapshot = false;
1305
+ this.metrics.recordSnapshotError();
1306
+ this.log.error(`Error uploading snapshot: ${err}`);
1307
+ });
1308
+
1309
+ return Promise.resolve();
1310
+ }
1311
+
1312
+ public async rollbackTo(targetBlock: BlockNumber, force?: boolean): Promise<void> {
1313
+ const archiver = this.blockSource as Archiver;
1314
+ if (!('rollbackTo' in archiver)) {
1315
+ throw new Error('Archiver implementation does not support rollbacks.');
1316
+ }
1317
+
1318
+ const finalizedBlock = await archiver.getL2Tips().then(tips => tips.finalized.block.number);
1319
+ if (targetBlock < finalizedBlock) {
1320
+ if (force) {
1321
+ this.log.warn(`Clearing world state database to allow rolling back behind finalized block ${finalizedBlock}`);
1322
+ await this.worldStateSynchronizer.clear();
1323
+ await this.p2pClient.clear();
1324
+ } else {
1325
+ throw new Error(`Cannot rollback to block ${targetBlock} as it is before finalized ${finalizedBlock}`);
1326
+ }
1327
+ }
1328
+
1329
+ try {
1330
+ this.log.info(`Pausing archiver and world state sync to start rollback`);
1331
+ await archiver.stop();
1332
+ await this.worldStateSynchronizer.stopSync();
1333
+ const currentBlock = await archiver.getBlockNumber();
1334
+ const blocksToUnwind = currentBlock - targetBlock;
1335
+ this.log.info(`Unwinding ${count(blocksToUnwind, 'block')} from L2 block ${currentBlock} to ${targetBlock}`);
1336
+ await archiver.rollbackTo(targetBlock);
1337
+ this.log.info(`Unwinding complete.`);
1338
+ } catch (err) {
1339
+ this.log.error(`Error during rollback`, err);
1340
+ throw err;
1341
+ } finally {
1342
+ this.log.info(`Resuming world state and archiver sync.`);
1343
+ this.worldStateSynchronizer.resumeSync();
1344
+ archiver.resume();
951
1345
  }
952
- this.sequencer.flush();
1346
+ }
1347
+
1348
+ public async pauseSync(): Promise<void> {
1349
+ this.log.info(`Pausing archiver and world state sync`);
1350
+ await (this.blockSource as Archiver).stop();
1351
+ await this.worldStateSynchronizer.stopSync();
1352
+ }
1353
+
1354
+ public resumeSync(): Promise<void> {
1355
+ this.log.info(`Resuming world state and archiver sync.`);
1356
+ this.worldStateSynchronizer.resumeSync();
1357
+ (this.blockSource as Archiver).resume();
953
1358
  return Promise.resolve();
954
1359
  }
955
1360
 
1361
+ public getSlashPayloads(): Promise<SlashPayloadRound[]> {
1362
+ if (!this.slasherClient) {
1363
+ throw new Error(`Slasher client not enabled`);
1364
+ }
1365
+ return this.slasherClient.getSlashPayloads();
1366
+ }
1367
+
1368
+ public getSlashOffenses(round: bigint | 'all' | 'current'): Promise<Offense[]> {
1369
+ if (!this.slasherClient) {
1370
+ throw new Error(`Slasher client not enabled`);
1371
+ }
1372
+ if (round === 'all') {
1373
+ return this.slasherClient.getPendingOffenses();
1374
+ } else {
1375
+ return this.slasherClient.gatherOffensesForRound(round === 'current' ? undefined : BigInt(round));
1376
+ }
1377
+ }
1378
+
1379
+ #getInitialHeaderHash(): Promise<BlockHash> {
1380
+ if (!this.initialHeaderHashPromise) {
1381
+ this.initialHeaderHashPromise = this.worldStateSynchronizer.getCommitted().getInitialHeader().hash();
1382
+ }
1383
+ return this.initialHeaderHashPromise;
1384
+ }
1385
+
956
1386
  /**
957
1387
  * Returns an instance of MerkleTreeOperations having first ensured the world state is fully synched
958
- * @param blockNumber - The block number at which to get the data.
1388
+ * @param block - The block parameter (block number, block hash, or 'latest') at which to get the data.
959
1389
  * @returns An instance of a committed MerkleTreeOperations
960
1390
  */
961
- async #getWorldState(blockNumber: L2BlockNumber) {
962
- if (typeof blockNumber === 'number' && blockNumber < INITIAL_L2_BLOCK_NUM - 1) {
963
- throw new Error('Invalid block number to get world state for: ' + blockNumber);
964
- }
965
-
966
- let blockSyncedTo: number = 0;
1391
+ async #getWorldState(block: BlockParameter) {
1392
+ let blockSyncedTo: BlockNumber = BlockNumber.ZERO;
967
1393
  try {
968
1394
  // Attempt to sync the world state if necessary
969
1395
  blockSyncedTo = await this.#syncWorldState();
@@ -971,15 +1397,39 @@ export class AztecNodeService implements AztecNode, Traceable {
971
1397
  this.log.error(`Error getting world state: ${err}`);
972
1398
  }
973
1399
 
974
- // using a snapshot could be less efficient than using the committed db
975
- if (blockNumber === 'latest' /*|| blockNumber === blockSyncedTo*/) {
976
- this.log.debug(`Using committed db for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1400
+ if (block === 'latest') {
1401
+ this.log.debug(`Using committed db for block 'latest', world state synced upto ${blockSyncedTo}`);
977
1402
  return this.worldStateSynchronizer.getCommitted();
978
- } else if (blockNumber <= blockSyncedTo) {
1403
+ }
1404
+
1405
+ if (BlockHash.isBlockHash(block)) {
1406
+ const initialBlockHash = await this.#getInitialHeaderHash();
1407
+ if (block.equals(initialBlockHash)) {
1408
+ // Block source doesn't handle initial header so we need to handle the case separately.
1409
+ return this.worldStateSynchronizer.getSnapshot(BlockNumber.ZERO);
1410
+ }
1411
+
1412
+ const header = await this.blockSource.getBlockHeaderByHash(block);
1413
+ if (!header) {
1414
+ throw new Error(
1415
+ `Block hash ${block.toString()} not found when querying world state. If the node API has been queried with anchor block hash possibly a reorg has occurred.`,
1416
+ );
1417
+ }
1418
+ const blockNumber = header.getBlockNumber();
1419
+ this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
1420
+ return this.worldStateSynchronizer.getSnapshot(blockNumber);
1421
+ }
1422
+
1423
+ // Block number provided
1424
+ {
1425
+ const blockNumber = block as BlockNumber;
1426
+
1427
+ if (blockNumber > blockSyncedTo) {
1428
+ throw new Error(`Queried block ${block} not yet synced by the node (node is synced upto ${blockSyncedTo}).`);
1429
+ }
1430
+
979
1431
  this.log.debug(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`);
980
1432
  return this.worldStateSynchronizer.getSnapshot(blockNumber);
981
- } else {
982
- throw new Error(`Block ${blockNumber} not yet synced`);
983
1433
  }
984
1434
  }
985
1435
 
@@ -987,8 +1437,8 @@ export class AztecNodeService implements AztecNode, Traceable {
987
1437
  * Ensure we fully sync the world state
988
1438
  * @returns A promise that fulfils once the world state is synced
989
1439
  */
990
- async #syncWorldState(): Promise<number> {
1440
+ async #syncWorldState(): Promise<BlockNumber> {
991
1441
  const blockSourceHeight = await this.blockSource.getBlockNumber();
992
- return this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
1442
+ return await this.worldStateSynchronizer.syncImmediate(blockSourceHeight);
993
1443
  }
994
1444
  }