@aztec/archiver 0.0.1-commit.f146247c → 0.0.1-commit.f224bb98b

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/dest/archiver.d.ts +7 -4
  2. package/dest/archiver.d.ts.map +1 -1
  3. package/dest/archiver.js +62 -110
  4. package/dest/errors.d.ts +7 -9
  5. package/dest/errors.d.ts.map +1 -1
  6. package/dest/errors.js +9 -14
  7. package/dest/factory.d.ts +3 -4
  8. package/dest/factory.d.ts.map +1 -1
  9. package/dest/factory.js +15 -13
  10. package/dest/index.d.ts +2 -1
  11. package/dest/index.d.ts.map +1 -1
  12. package/dest/index.js +1 -0
  13. package/dest/l1/bin/retrieve-calldata.js +36 -33
  14. package/dest/l1/calldata_retriever.d.ts +73 -50
  15. package/dest/l1/calldata_retriever.d.ts.map +1 -1
  16. package/dest/l1/calldata_retriever.js +190 -259
  17. package/dest/l1/data_retrieval.d.ts +9 -9
  18. package/dest/l1/data_retrieval.d.ts.map +1 -1
  19. package/dest/l1/data_retrieval.js +21 -19
  20. package/dest/l1/spire_proposer.d.ts +5 -5
  21. package/dest/l1/spire_proposer.d.ts.map +1 -1
  22. package/dest/l1/spire_proposer.js +9 -17
  23. package/dest/modules/data_source_base.d.ts +10 -5
  24. package/dest/modules/data_source_base.d.ts.map +1 -1
  25. package/dest/modules/data_source_base.js +28 -72
  26. package/dest/modules/data_store_updater.d.ts +22 -7
  27. package/dest/modules/data_store_updater.d.ts.map +1 -1
  28. package/dest/modules/data_store_updater.js +69 -29
  29. package/dest/modules/instrumentation.d.ts +15 -2
  30. package/dest/modules/instrumentation.d.ts.map +1 -1
  31. package/dest/modules/instrumentation.js +19 -2
  32. package/dest/modules/l1_synchronizer.d.ts +5 -8
  33. package/dest/modules/l1_synchronizer.d.ts.map +1 -1
  34. package/dest/modules/l1_synchronizer.js +41 -10
  35. package/dest/store/block_store.d.ts +27 -25
  36. package/dest/store/block_store.d.ts.map +1 -1
  37. package/dest/store/block_store.js +123 -74
  38. package/dest/store/kv_archiver_store.d.ts +33 -11
  39. package/dest/store/kv_archiver_store.d.ts.map +1 -1
  40. package/dest/store/kv_archiver_store.js +37 -7
  41. package/dest/store/l2_tips_cache.d.ts +19 -0
  42. package/dest/store/l2_tips_cache.d.ts.map +1 -0
  43. package/dest/store/l2_tips_cache.js +89 -0
  44. package/dest/store/log_store.d.ts +1 -1
  45. package/dest/store/log_store.d.ts.map +1 -1
  46. package/dest/store/log_store.js +55 -35
  47. package/dest/store/message_store.js +1 -1
  48. package/dest/test/fake_l1_state.d.ts +13 -1
  49. package/dest/test/fake_l1_state.d.ts.map +1 -1
  50. package/dest/test/fake_l1_state.js +84 -20
  51. package/dest/test/mock_archiver.d.ts +1 -1
  52. package/dest/test/mock_archiver.d.ts.map +1 -1
  53. package/dest/test/mock_archiver.js +3 -2
  54. package/dest/test/mock_l2_block_source.d.ts +21 -5
  55. package/dest/test/mock_l2_block_source.d.ts.map +1 -1
  56. package/dest/test/mock_l2_block_source.js +130 -84
  57. package/dest/test/mock_structs.d.ts +4 -1
  58. package/dest/test/mock_structs.d.ts.map +1 -1
  59. package/dest/test/mock_structs.js +13 -1
  60. package/dest/test/noop_l1_archiver.d.ts +4 -1
  61. package/dest/test/noop_l1_archiver.d.ts.map +1 -1
  62. package/dest/test/noop_l1_archiver.js +5 -1
  63. package/package.json +13 -13
  64. package/src/archiver.ts +74 -130
  65. package/src/errors.ts +10 -24
  66. package/src/factory.ts +29 -14
  67. package/src/index.ts +1 -0
  68. package/src/l1/README.md +25 -68
  69. package/src/l1/bin/retrieve-calldata.ts +46 -39
  70. package/src/l1/calldata_retriever.ts +249 -379
  71. package/src/l1/data_retrieval.ts +23 -25
  72. package/src/l1/spire_proposer.ts +7 -15
  73. package/src/modules/data_source_base.ts +55 -94
  74. package/src/modules/data_store_updater.ts +71 -30
  75. package/src/modules/instrumentation.ts +29 -2
  76. package/src/modules/l1_synchronizer.ts +46 -14
  77. package/src/store/block_store.ts +146 -103
  78. package/src/store/kv_archiver_store.ts +57 -11
  79. package/src/store/l2_tips_cache.ts +89 -0
  80. package/src/store/log_store.ts +93 -31
  81. package/src/store/message_store.ts +1 -1
  82. package/src/test/fake_l1_state.ts +110 -21
  83. package/src/test/mock_archiver.ts +3 -2
  84. package/src/test/mock_l2_block_source.ts +166 -80
  85. package/src/test/mock_structs.ts +20 -6
  86. package/src/test/noop_l1_archiver.ts +7 -1
package/src/archiver.ts CHANGED
@@ -1,5 +1,4 @@
1
1
  import type { BlobClientInterface } from '@aztec/blob-client/client';
2
- import { GENESIS_BLOCK_HEADER_HASH, INITIAL_L2_BLOCK_NUM } from '@aztec/constants';
3
2
  import { EpochCache } from '@aztec/epoch-cache';
4
3
  import { BlockTagTooOldError, RollupContract } from '@aztec/ethereum/contracts';
5
4
  import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
@@ -15,8 +14,6 @@ import { RunningPromise, makeLoggingErrorHandler } from '@aztec/foundation/runni
15
14
  import { DateProvider } from '@aztec/foundation/timer';
16
15
  import {
17
16
  type ArchiverEmitter,
18
- type CheckpointId,
19
- GENESIS_CHECKPOINT_HEADER_HASH,
20
17
  L2Block,
21
18
  type L2BlockSink,
22
19
  type L2Tips,
@@ -25,22 +22,22 @@ import {
25
22
  import { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
26
23
  import {
27
24
  type L1RollupConstants,
28
- getEpochNumberAtTimestamp,
25
+ getEpochAtSlot,
29
26
  getSlotAtNextL1Block,
30
- getSlotAtTimestamp,
31
27
  getSlotRangeForEpoch,
32
28
  getTimestampRangeForEpoch,
33
29
  } from '@aztec/stdlib/epoch-helpers';
34
30
  import { type TelemetryClient, type Traceable, type Tracer, trackSpan } from '@aztec/telemetry-client';
35
31
 
36
32
  import { type ArchiverConfig, mapArchiverConfig } from './config.js';
37
- import { NoBlobBodiesFoundError } from './errors.js';
33
+ import { BlockAlreadyCheckpointedError, NoBlobBodiesFoundError } from './errors.js';
38
34
  import { validateAndLogTraceAvailability } from './l1/validate_trace.js';
39
35
  import { ArchiverDataSourceBase } from './modules/data_source_base.js';
40
36
  import { ArchiverDataStoreUpdater } from './modules/data_store_updater.js';
41
37
  import type { ArchiverInstrumentation } from './modules/instrumentation.js';
42
38
  import type { ArchiverL1Synchronizer } from './modules/l1_synchronizer.js';
43
39
  import type { KVArchiverDataStore } from './store/kv_archiver_store.js';
40
+ import { L2TipsCache } from './store/l2_tips_cache.js';
44
41
 
45
42
  /** Export ArchiverEmitter for use in factory and tests. */
46
43
  export type { ArchiverEmitter };
@@ -83,6 +80,9 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
83
80
  /** Helper to handle updates to the store */
84
81
  private readonly updater: ArchiverDataStoreUpdater;
85
82
 
83
+ /** In-memory cache for L2 chain tips. */
84
+ private readonly l2TipsCache: L2TipsCache;
85
+
86
86
  public readonly tracer: Tracer;
87
87
 
88
88
  /**
@@ -119,9 +119,13 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
119
119
  },
120
120
  private readonly blobClient: BlobClientInterface,
121
121
  instrumentation: ArchiverInstrumentation,
122
- protected override readonly l1Constants: L1RollupConstants & { l1StartBlockHash: Buffer32; genesisArchiveRoot: Fr },
122
+ protected override readonly l1Constants: L1RollupConstants & {
123
+ l1StartBlockHash: Buffer32;
124
+ genesisArchiveRoot: Fr;
125
+ },
123
126
  synchronizer: ArchiverL1Synchronizer,
124
127
  events: ArchiverEmitter,
128
+ l2TipsCache?: L2TipsCache,
125
129
  private readonly log: Logger = createLogger('archiver'),
126
130
  ) {
127
131
  super(dataStore, l1Constants);
@@ -130,7 +134,10 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
130
134
  this.initialSyncPromise = promiseWithResolvers();
131
135
  this.synchronizer = synchronizer;
132
136
  this.events = events;
133
- this.updater = new ArchiverDataStoreUpdater(this.dataStore);
137
+ this.l2TipsCache = l2TipsCache ?? new L2TipsCache(this.dataStore.blockStore);
138
+ this.updater = new ArchiverDataStoreUpdater(this.dataStore, this.l2TipsCache, {
139
+ rollupManaLimit: l1Constants.rollupManaLimit,
140
+ });
134
141
 
135
142
  // Running promise starts with a small interval inbetween runs, so all iterations needed for the initial sync
136
143
  // are done as fast as possible. This then gets updated once the initial sync completes.
@@ -235,10 +242,15 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
235
242
  }
236
243
 
237
244
  try {
238
- await this.updater.addProposedBlocks([block]);
245
+ await this.updater.addProposedBlock(block);
239
246
  this.log.debug(`Added block ${block.number} to store`);
240
247
  resolve();
241
248
  } catch (err: any) {
249
+ if (err instanceof BlockAlreadyCheckpointedError) {
250
+ this.log.debug(`Proposed block ${block.number} matches already checkpointed block, ignoring late proposal`);
251
+ resolve();
252
+ continue;
253
+ }
242
254
  this.log.error(`Failed to add block ${block.number} to store: ${err.message}`);
243
255
  reject(err);
244
256
  }
@@ -330,16 +342,35 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
330
342
  return Promise.resolve(this.synchronizer.getL1Timestamp());
331
343
  }
332
344
 
333
- public getL2SlotNumber(): Promise<SlotNumber | undefined> {
345
+ public getSyncedL2SlotNumber(): Promise<SlotNumber | undefined> {
334
346
  const l1Timestamp = this.synchronizer.getL1Timestamp();
335
- return Promise.resolve(l1Timestamp === undefined ? undefined : getSlotAtTimestamp(l1Timestamp, this.l1Constants));
347
+ if (l1Timestamp === undefined) {
348
+ return Promise.resolve(undefined);
349
+ }
350
+ // The synced slot is the last L2 slot whose all L1 blocks have been processed.
351
+ // If the next L1 block (at l1Timestamp + ethereumSlotDuration) falls in slot N,
352
+ // then we've fully synced slot N-1.
353
+ const nextL1BlockSlot = getSlotAtNextL1Block(l1Timestamp, this.l1Constants);
354
+ if (Number(nextL1BlockSlot) === 0) {
355
+ return Promise.resolve(undefined);
356
+ }
357
+ return Promise.resolve(SlotNumber(nextL1BlockSlot - 1));
336
358
  }
337
359
 
338
- public getL2EpochNumber(): Promise<EpochNumber | undefined> {
339
- const l1Timestamp = this.synchronizer.getL1Timestamp();
340
- return Promise.resolve(
341
- l1Timestamp === undefined ? undefined : getEpochNumberAtTimestamp(l1Timestamp, this.l1Constants),
342
- );
360
+ public async getSyncedL2EpochNumber(): Promise<EpochNumber | undefined> {
361
+ const syncedSlot = await this.getSyncedL2SlotNumber();
362
+ if (syncedSlot === undefined) {
363
+ return undefined;
364
+ }
365
+ // An epoch is fully synced when all its slots are synced.
366
+ // We check if syncedSlot is the last slot of its epoch; if so, that epoch is fully synced.
367
+ // Otherwise, only the previous epoch is fully synced.
368
+ const epoch = getEpochAtSlot(syncedSlot, this.l1Constants);
369
+ const [, endSlot] = getSlotRangeForEpoch(epoch, this.l1Constants);
370
+ if (syncedSlot >= endSlot) {
371
+ return epoch;
372
+ }
373
+ return Number(epoch) > 0 ? EpochNumber(Number(epoch) - 1) : undefined;
343
374
  }
344
375
 
345
376
  public async isEpochComplete(epochNumber: EpochNumber): Promise<boolean> {
@@ -391,115 +422,11 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
391
422
  return true;
392
423
  }
393
424
 
394
- public async getL2Tips(): Promise<L2Tips> {
395
- const [latestBlockNumber, provenBlockNumber, checkpointedBlockNumber, finalizedBlockNumber] = await Promise.all([
396
- this.getBlockNumber(),
397
- this.getProvenBlockNumber(),
398
- this.getCheckpointedL2BlockNumber(),
399
- this.getFinalizedL2BlockNumber(),
400
- ] as const);
401
-
402
- const beforeInitialblockNumber = BlockNumber(INITIAL_L2_BLOCK_NUM - 1);
403
-
404
- // Get the latest block header and checkpointed blocks for proven, finalised and checkpointed blocks
405
- const [latestBlockHeader, provenCheckpointedBlock, finalizedCheckpointedBlock, checkpointedBlock] =
406
- await Promise.all([
407
- latestBlockNumber > beforeInitialblockNumber ? this.getBlockHeader(latestBlockNumber) : undefined,
408
- provenBlockNumber > beforeInitialblockNumber ? this.getCheckpointedBlock(provenBlockNumber) : undefined,
409
- finalizedBlockNumber > beforeInitialblockNumber ? this.getCheckpointedBlock(finalizedBlockNumber) : undefined,
410
- checkpointedBlockNumber > beforeInitialblockNumber
411
- ? this.getCheckpointedBlock(checkpointedBlockNumber)
412
- : undefined,
413
- ] as const);
414
-
415
- if (latestBlockNumber > beforeInitialblockNumber && !latestBlockHeader) {
416
- throw new Error(`Failed to retrieve latest block header for block ${latestBlockNumber}`);
417
- }
418
-
419
- // Checkpointed blocks must exist for proven, finalized and checkpointed tips if they are beyond the initial block number.
420
- if (checkpointedBlockNumber > beforeInitialblockNumber && !checkpointedBlock?.block.header) {
421
- throw new Error(
422
- `Failed to retrieve checkpointed block header for block ${checkpointedBlockNumber} (latest block is ${latestBlockNumber})`,
423
- );
424
- }
425
-
426
- if (provenBlockNumber > beforeInitialblockNumber && !provenCheckpointedBlock?.block.header) {
427
- throw new Error(
428
- `Failed to retrieve proven checkpointed for block ${provenBlockNumber} (latest block is ${latestBlockNumber})`,
429
- );
430
- }
431
-
432
- if (finalizedBlockNumber > beforeInitialblockNumber && !finalizedCheckpointedBlock?.block.header) {
433
- throw new Error(
434
- `Failed to retrieve finalized block header for block ${finalizedBlockNumber} (latest block is ${latestBlockNumber})`,
435
- );
436
- }
437
-
438
- const latestBlockHeaderHash = (await latestBlockHeader?.hash()) ?? GENESIS_BLOCK_HEADER_HASH;
439
- const provenBlockHeaderHash = (await provenCheckpointedBlock?.block.header?.hash()) ?? GENESIS_BLOCK_HEADER_HASH;
440
- const finalizedBlockHeaderHash =
441
- (await finalizedCheckpointedBlock?.block.header?.hash()) ?? GENESIS_BLOCK_HEADER_HASH;
442
- const checkpointedBlockHeaderHash = (await checkpointedBlock?.block.header?.hash()) ?? GENESIS_BLOCK_HEADER_HASH;
443
-
444
- // Now attempt to retrieve checkpoints for proven, finalised and checkpointed blocks
445
- const [[provenBlockCheckpoint], [finalizedBlockCheckpoint], [checkpointedBlockCheckpoint]] = await Promise.all([
446
- provenCheckpointedBlock !== undefined
447
- ? await this.getCheckpoints(provenCheckpointedBlock?.checkpointNumber, 1)
448
- : [undefined],
449
- finalizedCheckpointedBlock !== undefined
450
- ? await this.getCheckpoints(finalizedCheckpointedBlock?.checkpointNumber, 1)
451
- : [undefined],
452
- checkpointedBlock !== undefined ? await this.getCheckpoints(checkpointedBlock?.checkpointNumber, 1) : [undefined],
453
- ]);
454
-
455
- const initialcheckpointId: CheckpointId = {
456
- number: CheckpointNumber.ZERO,
457
- hash: GENESIS_CHECKPOINT_HEADER_HASH.toString(),
458
- };
459
-
460
- const makeCheckpointId = (checkpoint: PublishedCheckpoint | undefined) => {
461
- if (checkpoint === undefined) {
462
- return initialcheckpointId;
463
- }
464
- return {
465
- number: checkpoint.checkpoint.number,
466
- hash: checkpoint.checkpoint.hash().toString(),
467
- };
468
- };
469
-
470
- const l2Tips: L2Tips = {
471
- proposed: {
472
- number: latestBlockNumber,
473
- hash: latestBlockHeaderHash.toString(),
474
- },
475
- proven: {
476
- block: {
477
- number: provenBlockNumber,
478
- hash: provenBlockHeaderHash.toString(),
479
- },
480
- checkpoint: makeCheckpointId(provenBlockCheckpoint),
481
- },
482
- finalized: {
483
- block: {
484
- number: finalizedBlockNumber,
485
- hash: finalizedBlockHeaderHash.toString(),
486
- },
487
- checkpoint: makeCheckpointId(finalizedBlockCheckpoint),
488
- },
489
- checkpointed: {
490
- block: {
491
- number: checkpointedBlockNumber,
492
- hash: checkpointedBlockHeaderHash.toString(),
493
- },
494
- checkpoint: makeCheckpointId(checkpointedBlockCheckpoint),
495
- },
496
- };
497
-
498
- return l2Tips;
425
+ public getL2Tips(): Promise<L2Tips> {
426
+ return this.l2TipsCache.getL2Tips();
499
427
  }
500
428
 
501
429
  public async rollbackTo(targetL2BlockNumber: BlockNumber): Promise<void> {
502
- // TODO(pw/mbps): This still assumes 1 block per checkpoint
503
430
  const currentBlocks = await this.getL2Tips();
504
431
  const currentL2Block = currentBlocks.proposed.number;
505
432
  const currentProvenBlock = currentBlocks.proven.block.number;
@@ -511,8 +438,25 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
511
438
  if (!targetL2Block) {
512
439
  throw new Error(`Target L2 block ${targetL2BlockNumber} not found`);
513
440
  }
514
- const targetL1BlockNumber = targetL2Block.l1.blockNumber;
515
441
  const targetCheckpointNumber = targetL2Block.checkpointNumber;
442
+
443
+ // Rollback operates at checkpoint granularity: the target block must be the last block of its checkpoint.
444
+ const checkpointData = await this.store.getCheckpointData(targetCheckpointNumber);
445
+ if (checkpointData) {
446
+ const lastBlockInCheckpoint = BlockNumber(checkpointData.startBlock + checkpointData.blockCount - 1);
447
+ if (targetL2BlockNumber !== lastBlockInCheckpoint) {
448
+ const previousCheckpointBoundary =
449
+ checkpointData.startBlock > 1 ? BlockNumber(checkpointData.startBlock - 1) : BlockNumber(0);
450
+ throw new Error(
451
+ `Target L2 block ${targetL2BlockNumber} is not at a checkpoint boundary. ` +
452
+ `Checkpoint ${targetCheckpointNumber} spans blocks ${checkpointData.startBlock} to ${lastBlockInCheckpoint}. ` +
453
+ `Use block ${lastBlockInCheckpoint} to roll back to this checkpoint, ` +
454
+ `or block ${previousCheckpointBoundary} to roll back to the previous one.`,
455
+ );
456
+ }
457
+ }
458
+
459
+ const targetL1BlockNumber = targetL2Block.l1.blockNumber;
516
460
  const targetL1Block = await this.publicClient.getBlock({
517
461
  blockNumber: targetL1BlockNumber,
518
462
  includeTransactions: false,
@@ -531,13 +475,13 @@ export class Archiver extends ArchiverDataSourceBase implements L2BlockSink, Tra
531
475
  await this.store.setCheckpointSynchedL1BlockNumber(targetL1BlockNumber);
532
476
  await this.store.setMessageSynchedL1Block({ l1BlockNumber: targetL1BlockNumber, l1BlockHash: targetL1BlockHash });
533
477
  if (targetL2BlockNumber < currentProvenBlock) {
534
- this.log.info(`Clearing proven L2 block number`);
535
- await this.store.setProvenCheckpointNumber(CheckpointNumber.ZERO);
478
+ this.log.info(`Rolling back proven L2 checkpoint to ${targetCheckpointNumber}`);
479
+ await this.updater.setProvenCheckpointNumber(targetCheckpointNumber);
480
+ }
481
+ const currentFinalizedBlock = currentBlocks.finalized.block.number;
482
+ if (targetL2BlockNumber < currentFinalizedBlock) {
483
+ this.log.info(`Rolling back finalized L2 checkpoint to ${targetCheckpointNumber}`);
484
+ await this.updater.setFinalizedCheckpointNumber(targetCheckpointNumber);
536
485
  }
537
- // TODO(palla/reorg): Set the finalized block when we add support for it.
538
- // if (targetL2BlockNumber < currentFinalizedBlock) {
539
- // this.log.info(`Clearing finalized L2 block number`);
540
- // await this.store.setFinalizedL2BlockNumber(0);
541
- // }
542
486
  }
543
487
  }
package/src/errors.ts CHANGED
@@ -6,24 +6,9 @@ export class NoBlobBodiesFoundError extends Error {
6
6
  }
7
7
  }
8
8
 
9
- export class InitialBlockNumberNotSequentialError extends Error {
10
- constructor(
11
- public readonly newBlockNumber: number,
12
- public readonly previousBlockNumber: number | undefined,
13
- ) {
14
- super(
15
- `Cannot insert new block ${newBlockNumber} given previous block number in store is ${
16
- previousBlockNumber ?? 'undefined'
17
- }`,
18
- );
19
- }
20
- }
21
-
22
9
  export class BlockNumberNotSequentialError extends Error {
23
10
  constructor(newBlockNumber: number, previous: number | undefined) {
24
- super(
25
- `Cannot insert new block ${newBlockNumber} given previous block number in batch is ${previous ?? 'undefined'}`,
26
- );
11
+ super(`Cannot insert new block ${newBlockNumber} given previous block number is ${previous ?? 'undefined'}`);
27
12
  }
28
13
  }
29
14
 
@@ -48,14 +33,6 @@ export class CheckpointNumberNotSequentialError extends Error {
48
33
  }
49
34
  }
50
35
 
51
- export class CheckpointNumberNotConsistentError extends Error {
52
- constructor(newCheckpointNumber: number, previous: number | undefined) {
53
- super(
54
- `Cannot insert block for new checkpoint ${newCheckpointNumber} given previous block was checkpoint ${previous ?? 'undefined'}`,
55
- );
56
- }
57
- }
58
-
59
36
  export class BlockIndexNotSequentialError extends Error {
60
37
  constructor(newBlockIndex: number, previousBlockIndex: number | undefined) {
61
38
  super(
@@ -89,6 +66,15 @@ export class BlockNotFoundError extends Error {
89
66
  }
90
67
  }
91
68
 
69
+ /** Thrown when a proposed block matches a block that was already checkpointed. This is expected for late proposals. */
70
+ export class BlockAlreadyCheckpointedError extends Error {
71
+ constructor(public readonly blockNumber: number) {
72
+ super(`Block ${blockNumber} has already been checkpointed with the same content`);
73
+ this.name = 'BlockAlreadyCheckpointedError';
74
+ }
75
+ }
76
+
77
+ /** Thrown when a proposed block conflicts with an already checkpointed block (different content). */
92
78
  export class CannotOverwriteCheckpointedBlockError extends Error {
93
79
  constructor(
94
80
  public readonly blockNumber: number,
package/src/factory.ts CHANGED
@@ -7,14 +7,13 @@ import { Buffer32 } from '@aztec/foundation/buffer';
7
7
  import { merge } from '@aztec/foundation/collection';
8
8
  import { Fr } from '@aztec/foundation/curves/bn254';
9
9
  import { DateProvider } from '@aztec/foundation/timer';
10
- import type { DataStoreConfig } from '@aztec/kv-store/config';
11
10
  import { createStore } from '@aztec/kv-store/lmdb-v2';
12
11
  import { protocolContractNames } from '@aztec/protocol-contracts';
13
12
  import { BundledProtocolContractsProvider } from '@aztec/protocol-contracts/providers/bundle';
14
13
  import { FunctionType, decodeFunctionSignature } from '@aztec/stdlib/abi';
15
14
  import type { ArchiverEmitter } from '@aztec/stdlib/block';
16
15
  import { type ContractClassPublic, computePublicBytecodeCommitment } from '@aztec/stdlib/contract';
17
- import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers';
16
+ import type { DataStoreConfig } from '@aztec/stdlib/kv-store';
18
17
  import { getTelemetryClient } from '@aztec/telemetry-client';
19
18
 
20
19
  import { EventEmitter } from 'events';
@@ -25,20 +24,20 @@ import { type ArchiverConfig, mapArchiverConfig } from './config.js';
25
24
  import { ArchiverInstrumentation } from './modules/instrumentation.js';
26
25
  import { ArchiverL1Synchronizer } from './modules/l1_synchronizer.js';
27
26
  import { ARCHIVER_DB_VERSION, KVArchiverDataStore } from './store/kv_archiver_store.js';
27
+ import { L2TipsCache } from './store/l2_tips_cache.js';
28
28
 
29
29
  export const ARCHIVER_STORE_NAME = 'archiver';
30
30
 
31
31
  /** Creates an archiver store. */
32
32
  export async function createArchiverStore(
33
33
  userConfig: Pick<ArchiverConfig, 'archiverStoreMapSizeKb' | 'maxLogs'> & DataStoreConfig,
34
- l1Constants: Pick<L1RollupConstants, 'epochDuration'>,
35
34
  ) {
36
35
  const config = {
37
36
  ...userConfig,
38
37
  dataStoreMapSizeKb: userConfig.archiverStoreMapSizeKb ?? userConfig.dataStoreMapSizeKb,
39
38
  };
40
39
  const store = await createStore(ARCHIVER_STORE_NAME, ARCHIVER_DB_VERSION, config);
41
- return new KVArchiverDataStore(store, config.maxLogs, l1Constants);
40
+ return new KVArchiverDataStore(store, config.maxLogs);
42
41
  }
43
42
 
44
43
  /**
@@ -53,7 +52,7 @@ export async function createArchiver(
53
52
  deps: ArchiverDeps,
54
53
  opts: { blockUntilSync: boolean } = { blockUntilSync: true },
55
54
  ): Promise<Archiver> {
56
- const archiverStore = await createArchiverStore(config, { epochDuration: config.aztecEpochDuration });
55
+ const archiverStore = await createArchiverStore(config);
57
56
  await registerProtocolContracts(archiverStore);
58
57
 
59
58
  // Create Ethereum clients
@@ -77,14 +76,23 @@ export async function createArchiver(
77
76
  const inbox = new InboxContract(publicClient, config.l1Contracts.inboxAddress);
78
77
 
79
78
  // Fetch L1 constants from rollup contract
80
- const [l1StartBlock, l1GenesisTime, proofSubmissionEpochs, genesisArchiveRoot, slashingProposerAddress] =
81
- await Promise.all([
82
- rollup.getL1StartBlock(),
83
- rollup.getL1GenesisTime(),
84
- rollup.getProofSubmissionEpochs(),
85
- rollup.getGenesisArchiveTreeRoot(),
86
- rollup.getSlashingProposerAddress(),
87
- ] as const);
79
+ const [
80
+ l1StartBlock,
81
+ l1GenesisTime,
82
+ proofSubmissionEpochs,
83
+ genesisArchiveRoot,
84
+ slashingProposerAddress,
85
+ targetCommitteeSize,
86
+ rollupManaLimit,
87
+ ] = await Promise.all([
88
+ rollup.getL1StartBlock(),
89
+ rollup.getL1GenesisTime(),
90
+ rollup.getProofSubmissionEpochs(),
91
+ rollup.getGenesisArchiveTreeRoot(),
92
+ rollup.getSlashingProposerAddress(),
93
+ rollup.getTargetCommitteeSize(),
94
+ rollup.getManaLimit(),
95
+ ] as const);
88
96
 
89
97
  const l1StartBlockHash = await publicClient
90
98
  .getBlock({ blockNumber: l1StartBlock, includeTransactions: false })
@@ -100,7 +108,9 @@ export async function createArchiver(
100
108
  slotDuration,
101
109
  ethereumSlotDuration,
102
110
  proofSubmissionEpochs: Number(proofSubmissionEpochs),
111
+ targetCommitteeSize,
103
112
  genesisArchiveRoot: Fr.fromString(genesisArchiveRoot.toString()),
113
+ rollupManaLimit: Number(rollupManaLimit),
104
114
  };
105
115
 
106
116
  const archiverConfig = merge(
@@ -120,13 +130,15 @@ export async function createArchiver(
120
130
  // Create the event emitter that will be shared by archiver and synchronizer
121
131
  const events = new EventEmitter() as ArchiverEmitter;
122
132
 
133
+ // Create L2 tips cache shared by archiver and synchronizer
134
+ const l2TipsCache = new L2TipsCache(archiverStore.blockStore);
135
+
123
136
  // Create the L1 synchronizer
124
137
  const synchronizer = new ArchiverL1Synchronizer(
125
138
  publicClient,
126
139
  debugClient,
127
140
  rollup,
128
141
  inbox,
129
- { ...config.l1Contracts, slashingProposerAddress },
130
142
  archiverStore,
131
143
  archiverConfig,
132
144
  deps.blobClient,
@@ -136,6 +148,8 @@ export async function createArchiver(
136
148
  l1Constants,
137
149
  events,
138
150
  instrumentation.tracer,
151
+ l2TipsCache,
152
+ undefined, // log (use default)
139
153
  );
140
154
 
141
155
  const archiver = new Archiver(
@@ -150,6 +164,7 @@ export async function createArchiver(
150
164
  l1Constants,
151
165
  synchronizer,
152
166
  events,
167
+ l2TipsCache,
153
168
  );
154
169
 
155
170
  await archiver.start(opts.blockUntilSync);
package/src/index.ts CHANGED
@@ -8,5 +8,6 @@ export * from './config.js';
8
8
  export { type L1PublishedData } from './structs/published.js';
9
9
  export { KVArchiverDataStore, ARCHIVER_DB_VERSION } from './store/kv_archiver_store.js';
10
10
  export { ContractInstanceStore } from './store/contract_instance_store.js';
11
+ export { L2TipsCache } from './store/l2_tips_cache.js';
11
12
 
12
13
  export { retrieveCheckpointsFromRollup, retrieveL2ProofVerifiedEvents } from './l1/data_retrieval.js';
package/src/l1/README.md CHANGED
@@ -5,29 +5,27 @@ Modules and classes to handle data retrieval from L1 for the archiver.
5
5
  ## Calldata Retriever
6
6
 
7
7
  The sequencer publisher bundles multiple operations into a single multicall3 transaction for gas
8
- efficiency. A typical transaction includes:
8
+ efficiency. The archiver needs to extract the `propose` calldata from these bundled transactions
9
+ to reconstruct L2 blocks.
9
10
 
10
- 1. Attestation invalidations (if needed): `invalidateBadAttestation`, `invalidateInsufficientAttestations`
11
- 2. Block proposal: `propose` (exactly one per transaction to the rollup contract)
12
- 3. Governance and slashing (if needed): votes, payload creation/execution
11
+ The retriever uses hash matching against `attestationsHash` and `payloadDigest` from the
12
+ `CheckpointProposed` L1 event to verify it has found the correct propose calldata. These hashes
13
+ are always required.
13
14
 
14
- The archiver needs to extract the `propose` calldata from these bundled transactions to reconstruct
15
- L2 blocks. This class needs to handle scenarios where the transaction was submitted via multicall3,
16
- as well as alternative ways for submitting the `propose` call that other clients might use.
15
+ ### Multicall3 Decoding with Hash Matching
17
16
 
18
- ### Multicall3 Validation and Decoding
19
-
20
- First attempt to decode the transaction as a multicall3 `aggregate3` call with validation:
17
+ First attempt to decode the transaction as a multicall3 `aggregate3` call:
21
18
 
22
19
  - Check if transaction is to multicall3 address (`0xcA11bde05977b3631167028862bE2a173976CA11`)
23
20
  - Decode as `aggregate3(Call3[] calldata calls)`
24
- - Allow calls to known addresses and methods (rollup, governance, slashing contracts, etc.)
25
- - Find the single `propose` call to the rollup contract
26
- - Verify exactly one `propose` call exists
27
- - Extract and return the propose calldata
21
+ - Find all calls matching the rollup contract address and the `propose` function selector
22
+ - Verify each candidate by computing `attestationsHash` (keccak256 of ABI-encoded attestations)
23
+ and `payloadDigest` (keccak256 of the consensus payload signing hash) and comparing against
24
+ expected values from the `CheckpointProposed` event
25
+ - Return the verified candidate (if multiple verify, return the first with a warning)
28
26
 
29
- This step handles the common case efficiently without requiring expensive trace or debug RPC calls.
30
- Any validation failure triggers fallback to the next step.
27
+ This approach works regardless of what other calls are in the multicall3 bundle, because hash
28
+ matching identifies the correct propose call without needing an allowlist.
31
29
 
32
30
  ### Direct Propose Call
33
31
 
@@ -35,64 +33,23 @@ Second attempt to decode the transaction as a direct `propose` call to the rollu
35
33
 
36
34
  - Check if transaction is to the rollup address
37
35
  - Decode as `propose` function call
38
- - Verify the function is indeed `propose`
36
+ - Verify against expected hashes
39
37
  - Return the transaction input as the propose calldata
40
38
 
41
- This handles scenarios where clients submit transactions directly to the rollup contract without
42
- using multicall3 for bundling. Any validation failure triggers fallback to the next step.
43
-
44
39
  ### Spire Proposer Call
45
40
 
46
- Given existing attempts to route the call via the Spire proposer, we also check if the tx is `to` the
47
- proposer known address, and if so, we try decoding it as either a multicall3 or a direct call to the
48
- rollup contract.
49
-
50
- Similar as with the multicall3 check, we check that there are no other calls in the Spire proposer, so
51
- we are absolutely sure that the only call is the successful one to the rollup. Any extraneous call would
52
- imply an unexpected path to calling `propose` in the rollup contract, and since we cannot verify if the
53
- calldata arguments we extracted are the correct ones (see the section below), we cannot know for sure which
54
- one is the call that succeeded, so we don't know which calldata to process.
55
-
56
- Furthermore, since the Spire proposer is upgradeable, we check if the implementation has not changed in
57
- order to decode. As usual, any validation failure triggers fallback to the next step.
58
-
59
- ### Verifying Multicall3 Arguments
60
-
61
- **This is NOT implemented for simplicity's sake**
62
-
63
- If the checks above don't hold, such as when there are multiple calls to `propose`, then we cannot
64
- reliably extract the `propose` calldata from the multicall3 arguments alone. We can try a best-effort
65
- where we try all `propose` calls we see and validate them against on-chain data. Note that we can use these
66
- same strategies if we were to obtain the calldata from another source.
67
-
68
- #### TempBlockLog Verification
69
-
70
- Read the stored `TempBlockLog` for the L2 block number from L1 and verify it matches our decoded header hash,
71
- since the `TempBlockLog` stores the hash of the proposed block header, the payload commitment, and the attestations.
72
-
73
- However, `TempBlockLog` is only stored temporarily and deleted after proven, so this method only works for recent
74
- blocks, not for historical data syncing.
75
-
76
- #### Archive Verification
77
-
78
- Verify that the archive root in the decoded propose is correct with regard to the block header. This requires
79
- hashing the block header we have retrieved, inserting it into the archive tree, and checking the resulting root
80
- against the one we got from L1.
81
-
82
- However, this requires that the archive keeps a reference to world-state, which is not the case in the current
83
- system.
84
-
85
- #### Emit Commitments in Rollup Contract
86
-
87
- Modify rollup contract to emit commitments to the block header in the `L2BlockProposed` event, allowing us to easily
88
- verify the calldata we obtained vs the emitted event.
41
+ Given existing attempts to route the call via the Spire proposer, we also check if the tx is
42
+ `to` the proposer known address. If so, we extract all wrapped calls and try each as either
43
+ a multicall3 or direct propose call, using hash matching to find and verify the correct one.
89
44
 
90
- However, modifying the rollup contract is out of scope for this change. But we can implement this approach in `v2`.
45
+ Since the Spire proposer is upgradeable, we check that the implementation has not changed in
46
+ order to decode. Any validation failure triggers fallback to the next step.
91
47
 
92
48
  ### Debug and Trace Transaction Fallback
93
49
 
94
- Last, we use L1 node's trace/debug RPC methods to definitively identify the one successful `propose` call within the tx.
95
- We can then extract the exact calldata that hit the `propose` function in the rollup contract.
50
+ Last, we use L1 node's trace/debug RPC methods to definitively identify the one successful
51
+ `propose` call within the tx. We can then extract the exact calldata that hit the `propose`
52
+ function in the rollup contract.
96
53
 
97
- This approach requires access to a debug-enabled L1 node, which may be more resource-intensive, so we only
98
- use it as a fallback when the first step fails, which should be rare in practice.
54
+ This approach requires access to a debug-enabled L1 node, which may be more resource-intensive,
55
+ so we only use it as a fallback when earlier steps fail, which should be rare in practice.