@aztec/archiver 4.0.0-nightly.20260113 → 4.0.0-nightly.20260115
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +139 -22
- package/dest/archiver.d.ts +134 -0
- package/dest/archiver.d.ts.map +1 -0
- package/dest/archiver.js +767 -0
- package/dest/{archiver/config.d.ts → config.d.ts} +9 -1
- package/dest/config.d.ts.map +1 -0
- package/dest/{archiver/config.js → config.js} +9 -0
- package/dest/{archiver/errors.d.ts → errors.d.ts} +1 -1
- package/dest/errors.d.ts.map +1 -0
- package/dest/factory.d.ts +5 -6
- package/dest/factory.d.ts.map +1 -1
- package/dest/factory.js +82 -5
- package/dest/index.d.ts +10 -4
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +8 -3
- package/dest/interfaces.d.ts +9 -0
- package/dest/interfaces.d.ts.map +1 -0
- package/dest/interfaces.js +3 -0
- package/dest/{archiver/l1 → l1}/bin/retrieve-calldata.d.ts +1 -1
- package/dest/l1/bin/retrieve-calldata.d.ts.map +1 -0
- package/dest/{archiver/l1 → l1}/calldata_retriever.d.ts +2 -2
- package/dest/l1/calldata_retriever.d.ts.map +1 -0
- package/dest/l1/data_retrieval.d.ts +88 -0
- package/dest/l1/data_retrieval.d.ts.map +1 -0
- package/dest/{archiver/l1 → l1}/data_retrieval.js +32 -51
- package/dest/{archiver/l1 → l1}/debug_tx.d.ts +1 -1
- package/dest/l1/debug_tx.d.ts.map +1 -0
- package/dest/{archiver/l1 → l1}/spire_proposer.d.ts +1 -1
- package/dest/l1/spire_proposer.d.ts.map +1 -0
- package/dest/{archiver/l1 → l1}/trace_tx.d.ts +1 -1
- package/dest/l1/trace_tx.d.ts.map +1 -0
- package/dest/l1/types.d.ts +12 -0
- package/dest/l1/types.d.ts.map +1 -0
- package/dest/{archiver/l1 → l1}/validate_trace.d.ts +1 -1
- package/dest/l1/validate_trace.d.ts.map +1 -0
- package/dest/modules/data_source_base.d.ts +83 -0
- package/dest/modules/data_source_base.d.ts.map +1 -0
- package/dest/modules/data_source_base.js +301 -0
- package/dest/modules/data_store_updater.d.ts +46 -0
- package/dest/modules/data_store_updater.d.ts.map +1 -0
- package/dest/modules/data_store_updater.js +216 -0
- package/dest/modules/instrumentation.d.ts +37 -0
- package/dest/modules/instrumentation.d.ts.map +1 -0
- package/dest/modules/l1_synchronizer.d.ts +67 -0
- package/dest/modules/l1_synchronizer.d.ts.map +1 -0
- package/dest/modules/l1_synchronizer.js +1064 -0
- package/dest/{archiver → modules}/validation.d.ts +1 -1
- package/dest/modules/validation.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/block_store.d.ts +2 -2
- package/dest/store/block_store.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/block_store.js +1 -1
- package/dest/store/contract_class_store.d.ts +18 -0
- package/dest/store/contract_class_store.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/contract_class_store.js +1 -1
- package/dest/store/contract_instance_store.d.ts +24 -0
- package/dest/store/contract_instance_store.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/contract_instance_store.js +1 -1
- package/dest/{archiver/archiver_store.d.ts → store/kv_archiver_store.d.ts} +143 -139
- package/dest/store/kv_archiver_store.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/kv_archiver_store.js +157 -49
- package/dest/{archiver/kv_archiver_store → store}/log_store.d.ts +1 -1
- package/dest/store/log_store.d.ts.map +1 -0
- package/dest/{archiver/kv_archiver_store → store}/message_store.d.ts +1 -1
- package/dest/store/message_store.d.ts.map +1 -0
- package/dest/{archiver/structs → structs}/data_retrieval.d.ts +1 -1
- package/dest/structs/data_retrieval.d.ts.map +1 -0
- package/dest/structs/inbox_message.d.ts +15 -0
- package/dest/structs/inbox_message.d.ts.map +1 -0
- package/dest/{archiver/structs → structs}/published.d.ts +1 -1
- package/dest/structs/published.d.ts.map +1 -0
- package/dest/test/fake_l1_state.d.ts +173 -0
- package/dest/test/fake_l1_state.d.ts.map +1 -0
- package/dest/test/fake_l1_state.js +364 -0
- package/dest/test/index.d.ts +2 -1
- package/dest/test/index.d.ts.map +1 -1
- package/dest/test/index.js +1 -0
- package/dest/test/mock_structs.d.ts +76 -2
- package/dest/test/mock_structs.d.ts.map +1 -1
- package/dest/test/mock_structs.js +133 -2
- package/package.json +15 -17
- package/src/archiver.ts +522 -0
- package/src/{archiver/config.ts → config.ts} +11 -0
- package/src/factory.ts +118 -6
- package/src/index.ts +10 -3
- package/src/interfaces.ts +9 -0
- package/src/{archiver/l1 → l1}/calldata_retriever.ts +1 -1
- package/src/{archiver/l1 → l1}/data_retrieval.ts +52 -69
- package/src/modules/data_source_base.ts +439 -0
- package/src/modules/data_store_updater.ts +318 -0
- package/src/modules/l1_synchronizer.ts +870 -0
- package/src/{archiver/kv_archiver_store → store}/block_store.ts +1 -1
- package/src/{archiver/kv_archiver_store → store}/contract_class_store.ts +1 -1
- package/src/{archiver/kv_archiver_store → store}/contract_instance_store.ts +1 -1
- package/src/{archiver/kv_archiver_store → store}/kv_archiver_store.ts +170 -8
- package/src/test/fake_l1_state.ts +561 -0
- package/src/test/index.ts +1 -0
- package/src/test/mock_structs.ts +247 -2
- package/dest/archiver/archiver.d.ts +0 -307
- package/dest/archiver/archiver.d.ts.map +0 -1
- package/dest/archiver/archiver.js +0 -2102
- package/dest/archiver/archiver_store.d.ts.map +0 -1
- package/dest/archiver/archiver_store.js +0 -4
- package/dest/archiver/archiver_store_test_suite.d.ts +0 -8
- package/dest/archiver/archiver_store_test_suite.d.ts.map +0 -1
- package/dest/archiver/archiver_store_test_suite.js +0 -2770
- package/dest/archiver/config.d.ts.map +0 -1
- package/dest/archiver/errors.d.ts.map +0 -1
- package/dest/archiver/index.d.ts +0 -7
- package/dest/archiver/index.d.ts.map +0 -1
- package/dest/archiver/index.js +0 -4
- package/dest/archiver/instrumentation.d.ts +0 -37
- package/dest/archiver/instrumentation.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/block_store.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/contract_class_store.d.ts +0 -18
- package/dest/archiver/kv_archiver_store/contract_class_store.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/contract_instance_store.d.ts +0 -24
- package/dest/archiver/kv_archiver_store/contract_instance_store.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts +0 -159
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/log_store.d.ts.map +0 -1
- package/dest/archiver/kv_archiver_store/message_store.d.ts.map +0 -1
- package/dest/archiver/l1/bin/retrieve-calldata.d.ts.map +0 -1
- package/dest/archiver/l1/calldata_retriever.d.ts.map +0 -1
- package/dest/archiver/l1/data_retrieval.d.ts +0 -90
- package/dest/archiver/l1/data_retrieval.d.ts.map +0 -1
- package/dest/archiver/l1/debug_tx.d.ts.map +0 -1
- package/dest/archiver/l1/spire_proposer.d.ts.map +0 -1
- package/dest/archiver/l1/trace_tx.d.ts.map +0 -1
- package/dest/archiver/l1/types.d.ts +0 -12
- package/dest/archiver/l1/types.d.ts.map +0 -1
- package/dest/archiver/l1/validate_trace.d.ts.map +0 -1
- package/dest/archiver/structs/data_retrieval.d.ts.map +0 -1
- package/dest/archiver/structs/inbox_message.d.ts +0 -15
- package/dest/archiver/structs/inbox_message.d.ts.map +0 -1
- package/dest/archiver/structs/published.d.ts.map +0 -1
- package/dest/archiver/validation.d.ts.map +0 -1
- package/dest/rpc/index.d.ts +0 -9
- package/dest/rpc/index.d.ts.map +0 -1
- package/dest/rpc/index.js +0 -15
- package/src/archiver/archiver.ts +0 -2265
- package/src/archiver/archiver_store.ts +0 -380
- package/src/archiver/archiver_store_test_suite.ts +0 -2842
- package/src/archiver/index.ts +0 -6
- package/src/rpc/index.ts +0 -16
- /package/dest/{archiver/errors.js → errors.js} +0 -0
- /package/dest/{archiver/l1 → l1}/bin/retrieve-calldata.js +0 -0
- /package/dest/{archiver/l1 → l1}/calldata_retriever.js +0 -0
- /package/dest/{archiver/l1 → l1}/debug_tx.js +0 -0
- /package/dest/{archiver/l1 → l1}/spire_proposer.js +0 -0
- /package/dest/{archiver/l1 → l1}/trace_tx.js +0 -0
- /package/dest/{archiver/l1 → l1}/types.js +0 -0
- /package/dest/{archiver/l1 → l1}/validate_trace.js +0 -0
- /package/dest/{archiver → modules}/instrumentation.js +0 -0
- /package/dest/{archiver → modules}/validation.js +0 -0
- /package/dest/{archiver/kv_archiver_store → store}/log_store.js +0 -0
- /package/dest/{archiver/kv_archiver_store → store}/message_store.js +0 -0
- /package/dest/{archiver/structs → structs}/data_retrieval.js +0 -0
- /package/dest/{archiver/structs → structs}/inbox_message.js +0 -0
- /package/dest/{archiver/structs → structs}/published.js +0 -0
- /package/src/{archiver/errors.ts → errors.ts} +0 -0
- /package/src/{archiver/l1 → l1}/README.md +0 -0
- /package/src/{archiver/l1 → l1}/bin/retrieve-calldata.ts +0 -0
- /package/src/{archiver/l1 → l1}/debug_tx.ts +0 -0
- /package/src/{archiver/l1 → l1}/spire_proposer.ts +0 -0
- /package/src/{archiver/l1 → l1}/trace_tx.ts +0 -0
- /package/src/{archiver/l1 → l1}/types.ts +0 -0
- /package/src/{archiver/l1 → l1}/validate_trace.ts +0 -0
- /package/src/{archiver → modules}/instrumentation.ts +0 -0
- /package/src/{archiver → modules}/validation.ts +0 -0
- /package/src/{archiver/kv_archiver_store → store}/log_store.ts +0 -0
- /package/src/{archiver/kv_archiver_store → store}/message_store.ts +0 -0
- /package/src/{archiver/structs → structs}/data_retrieval.ts +0 -0
- /package/src/{archiver/structs → structs}/inbox_message.ts +0 -0
- /package/src/{archiver/structs → structs}/published.ts +0 -0
|
@@ -0,0 +1,870 @@
|
|
|
1
|
+
import type { BlobClientInterface } from '@aztec/blob-client/client';
|
|
2
|
+
import { EpochCache } from '@aztec/epoch-cache';
|
|
3
|
+
import { InboxContract, RollupContract } from '@aztec/ethereum/contracts';
|
|
4
|
+
import type { L1ContractAddresses } from '@aztec/ethereum/l1-contract-addresses';
|
|
5
|
+
import type { L1BlockId } from '@aztec/ethereum/l1-types';
|
|
6
|
+
import type { ViemPublicClient, ViemPublicDebugClient } from '@aztec/ethereum/types';
|
|
7
|
+
import { maxBigint } from '@aztec/foundation/bigint';
|
|
8
|
+
import { BlockNumber, CheckpointNumber, EpochNumber } from '@aztec/foundation/branded-types';
|
|
9
|
+
import { Buffer32 } from '@aztec/foundation/buffer';
|
|
10
|
+
import { pick } from '@aztec/foundation/collection';
|
|
11
|
+
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
12
|
+
import { EthAddress } from '@aztec/foundation/eth-address';
|
|
13
|
+
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
14
|
+
import { count } from '@aztec/foundation/string';
|
|
15
|
+
import { DateProvider, Timer, elapsed } from '@aztec/foundation/timer';
|
|
16
|
+
import { isDefined } from '@aztec/foundation/types';
|
|
17
|
+
import { type ArchiverEmitter, L2BlockSourceEvents, type ValidateCheckpointResult } from '@aztec/stdlib/block';
|
|
18
|
+
import { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
|
|
19
|
+
import { type L1RollupConstants, getEpochAtSlot } from '@aztec/stdlib/epoch-helpers';
|
|
20
|
+
import { computeInHashFromL1ToL2Messages } from '@aztec/stdlib/messaging';
|
|
21
|
+
import { type Traceable, type Tracer, execInSpan, trackSpan } from '@aztec/telemetry-client';
|
|
22
|
+
|
|
23
|
+
import { InitialCheckpointNumberNotSequentialError } from '../errors.js';
|
|
24
|
+
import {
|
|
25
|
+
retrieveCheckpointsFromRollup,
|
|
26
|
+
retrieveL1ToL2Message,
|
|
27
|
+
retrieveL1ToL2Messages,
|
|
28
|
+
retrievedToPublishedCheckpoint,
|
|
29
|
+
} from '../l1/data_retrieval.js';
|
|
30
|
+
import type { KVArchiverDataStore } from '../store/kv_archiver_store.js';
|
|
31
|
+
import type { InboxMessage } from '../structs/inbox_message.js';
|
|
32
|
+
import { ArchiverDataStoreUpdater } from './data_store_updater.js';
|
|
33
|
+
import type { ArchiverInstrumentation } from './instrumentation.js';
|
|
34
|
+
import { validateCheckpointAttestations } from './validation.js';
|
|
35
|
+
|
|
36
|
+
type RollupStatus = {
|
|
37
|
+
provenCheckpointNumber: CheckpointNumber;
|
|
38
|
+
provenArchive: string;
|
|
39
|
+
pendingCheckpointNumber: CheckpointNumber;
|
|
40
|
+
pendingArchive: string;
|
|
41
|
+
validationResult: ValidateCheckpointResult | undefined;
|
|
42
|
+
lastRetrievedCheckpoint?: PublishedCheckpoint;
|
|
43
|
+
lastL1BlockWithCheckpoint?: bigint;
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Handles L1 synchronization for the archiver.
|
|
48
|
+
* Responsible for fetching checkpoints, L1→L2 messages, and handling L1 reorgs.
|
|
49
|
+
*/
|
|
50
|
+
export class ArchiverL1Synchronizer implements Traceable {
|
|
51
|
+
private l1BlockNumber: bigint | undefined;
|
|
52
|
+
private l1Timestamp: bigint | undefined;
|
|
53
|
+
|
|
54
|
+
private readonly updater: ArchiverDataStoreUpdater;
|
|
55
|
+
public readonly tracer: Tracer;
|
|
56
|
+
|
|
57
|
+
constructor(
|
|
58
|
+
private readonly publicClient: ViemPublicClient,
|
|
59
|
+
private readonly debugClient: ViemPublicDebugClient,
|
|
60
|
+
private readonly rollup: RollupContract,
|
|
61
|
+
private readonly inbox: InboxContract,
|
|
62
|
+
private readonly l1Addresses: Pick<
|
|
63
|
+
L1ContractAddresses,
|
|
64
|
+
'registryAddress' | 'governanceProposerAddress' | 'slashFactoryAddress'
|
|
65
|
+
> & { slashingProposerAddress: EthAddress },
|
|
66
|
+
private readonly store: KVArchiverDataStore,
|
|
67
|
+
private readonly config: {
|
|
68
|
+
batchSize: number;
|
|
69
|
+
skipValidateCheckpointAttestations?: boolean;
|
|
70
|
+
maxAllowedEthClientDriftSeconds: number;
|
|
71
|
+
},
|
|
72
|
+
private readonly blobClient: BlobClientInterface,
|
|
73
|
+
private readonly epochCache: EpochCache,
|
|
74
|
+
private readonly dateProvider: DateProvider,
|
|
75
|
+
private readonly instrumentation: ArchiverInstrumentation,
|
|
76
|
+
private readonly l1constants: L1RollupConstants & { l1StartBlockHash: Buffer32; genesisArchiveRoot: Fr },
|
|
77
|
+
private readonly events: ArchiverEmitter,
|
|
78
|
+
tracer: Tracer,
|
|
79
|
+
private readonly log: Logger = createLogger('archiver:l1-sync'),
|
|
80
|
+
) {
|
|
81
|
+
this.updater = new ArchiverDataStoreUpdater(this.store);
|
|
82
|
+
this.tracer = tracer;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/** Returns the last L1 block number that was synced. */
|
|
86
|
+
public getL1BlockNumber(): bigint | undefined {
|
|
87
|
+
return this.l1BlockNumber;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/** Returns the last L1 timestamp that was synced. */
|
|
91
|
+
public getL1Timestamp(): bigint | undefined {
|
|
92
|
+
return this.l1Timestamp;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/** Checks that the ethereum node we are connected to has a latest timestamp no more than the allowed drift. Throw if not. */
|
|
96
|
+
public async testEthereumNodeSynced(): Promise<void> {
|
|
97
|
+
const maxAllowedDelay = this.config.maxAllowedEthClientDriftSeconds;
|
|
98
|
+
if (maxAllowedDelay === 0) {
|
|
99
|
+
return;
|
|
100
|
+
}
|
|
101
|
+
const { number, timestamp: l1Timestamp } = await this.publicClient.getBlock({ includeTransactions: false });
|
|
102
|
+
const currentTime = BigInt(this.dateProvider.nowInSeconds());
|
|
103
|
+
if (currentTime - l1Timestamp > BigInt(maxAllowedDelay)) {
|
|
104
|
+
throw new Error(
|
|
105
|
+
`Ethereum node is out of sync (last block synced ${number} at ${l1Timestamp} vs current time ${currentTime})`,
|
|
106
|
+
);
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
@trackSpan('Archiver.syncFromL1')
|
|
111
|
+
public async syncFromL1(initialSyncComplete: boolean): Promise<void> {
|
|
112
|
+
/**
|
|
113
|
+
* We keep track of three "pointers" to L1 blocks:
|
|
114
|
+
* 1. the last L1 block that published an L2 block
|
|
115
|
+
* 2. the last L1 block that added L1 to L2 messages
|
|
116
|
+
* 3. the last L1 block that cancelled L1 to L2 messages
|
|
117
|
+
*
|
|
118
|
+
* We do this to deal with L1 data providers that are eventually consistent (e.g. Infura).
|
|
119
|
+
* We guard against seeing block X with no data at one point, and later, the provider processes the block and it has data.
|
|
120
|
+
* The archiver will stay back, until there's data on L1 that will move the pointers forward.
|
|
121
|
+
*/
|
|
122
|
+
const { l1StartBlock, l1StartBlockHash } = this.l1constants;
|
|
123
|
+
const {
|
|
124
|
+
blocksSynchedTo = l1StartBlock,
|
|
125
|
+
messagesSynchedTo = { l1BlockNumber: l1StartBlock, l1BlockHash: l1StartBlockHash },
|
|
126
|
+
} = await this.store.getSynchPoint();
|
|
127
|
+
|
|
128
|
+
const currentL1Block = await this.publicClient.getBlock({ includeTransactions: false });
|
|
129
|
+
const currentL1BlockNumber = currentL1Block.number;
|
|
130
|
+
const currentL1BlockHash = Buffer32.fromString(currentL1Block.hash);
|
|
131
|
+
|
|
132
|
+
this.log.trace(`Starting new archiver sync iteration`, {
|
|
133
|
+
blocksSynchedTo,
|
|
134
|
+
messagesSynchedTo,
|
|
135
|
+
currentL1BlockNumber,
|
|
136
|
+
currentL1BlockHash,
|
|
137
|
+
});
|
|
138
|
+
|
|
139
|
+
// ********** Ensuring Consistency of data pulled from L1 **********
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* There are a number of calls in this sync operation to L1 for retrieving
|
|
143
|
+
* events and transaction data. There are a couple of things we need to bear in mind
|
|
144
|
+
* to ensure that data is read exactly once.
|
|
145
|
+
*
|
|
146
|
+
* The first is the problem of eventually consistent ETH service providers like Infura.
|
|
147
|
+
* Each L1 read operation will query data from the last L1 block that it saw emit its kind of data.
|
|
148
|
+
* (so pending L1 to L2 messages will read from the last L1 block that emitted a message and so on)
|
|
149
|
+
* This will mean the archiver will lag behind L1 and will only advance when there's L2-relevant activity on the chain.
|
|
150
|
+
*
|
|
151
|
+
* The second is that in between the various calls to L1, the block number can move meaning some
|
|
152
|
+
* of the following calls will return data for blocks that were not present during earlier calls.
|
|
153
|
+
* To combat this for the time being we simply ensure that all data retrieval methods only retrieve
|
|
154
|
+
* data up to the currentBlockNumber captured at the top of this function. We might want to improve on this
|
|
155
|
+
* in future but for the time being it should give us the guarantees that we need
|
|
156
|
+
*/
|
|
157
|
+
|
|
158
|
+
// ********** Events that are processed per L1 block **********
|
|
159
|
+
await this.handleL1ToL2Messages(messagesSynchedTo, currentL1BlockNumber, currentL1BlockHash);
|
|
160
|
+
|
|
161
|
+
// Get L1 timestamp for the current block
|
|
162
|
+
const currentL1Timestamp =
|
|
163
|
+
!this.l1Timestamp || !this.l1BlockNumber || this.l1BlockNumber !== currentL1BlockNumber
|
|
164
|
+
? (await this.publicClient.getBlock({ blockNumber: currentL1BlockNumber })).timestamp
|
|
165
|
+
: this.l1Timestamp;
|
|
166
|
+
|
|
167
|
+
// Warn if the latest L1 block timestamp is too old
|
|
168
|
+
const maxAllowedDelay = this.config.maxAllowedEthClientDriftSeconds;
|
|
169
|
+
const now = this.dateProvider.nowInSeconds();
|
|
170
|
+
if (maxAllowedDelay > 0 && Number(currentL1Timestamp) <= now - maxAllowedDelay) {
|
|
171
|
+
this.log.warn(
|
|
172
|
+
`Latest L1 block ${currentL1BlockNumber} timestamp ${currentL1Timestamp} is too old. Make sure your Ethereum node is synced.`,
|
|
173
|
+
{ currentL1BlockNumber, currentL1Timestamp, now, maxAllowedDelay },
|
|
174
|
+
);
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
// ********** Events that are processed per checkpoint **********
|
|
178
|
+
if (currentL1BlockNumber > blocksSynchedTo) {
|
|
179
|
+
// First we retrieve new checkpoints and L2 blocks and store them in the DB. This will also update the
|
|
180
|
+
// pending chain validation status, proven checkpoint number, and synched L1 block number.
|
|
181
|
+
const rollupStatus = await this.handleCheckpoints(blocksSynchedTo, currentL1BlockNumber, initialSyncComplete);
|
|
182
|
+
// Then we prune the current epoch if it'd reorg on next submission.
|
|
183
|
+
// Note that we don't do this before retrieving checkpoints because we may need to retrieve
|
|
184
|
+
// checkpoints from more than 2 epochs ago, so we want to make sure we have the latest view of
|
|
185
|
+
// the chain locally before we start unwinding stuff. This can be optimized by figuring out
|
|
186
|
+
// up to which point we're pruning, and then requesting checkpoints up to that point only.
|
|
187
|
+
const { rollupCanPrune } = await this.handleEpochPrune(
|
|
188
|
+
rollupStatus.provenCheckpointNumber,
|
|
189
|
+
currentL1BlockNumber,
|
|
190
|
+
currentL1Timestamp,
|
|
191
|
+
);
|
|
192
|
+
|
|
193
|
+
// If the last checkpoint we processed had an invalid attestation, we manually advance the L1 syncpoint
|
|
194
|
+
// past it, since otherwise we'll keep downloading it and reprocessing it on every iteration until
|
|
195
|
+
// we get a valid checkpoint to advance the syncpoint.
|
|
196
|
+
if (!rollupStatus.validationResult?.valid && rollupStatus.lastL1BlockWithCheckpoint !== undefined) {
|
|
197
|
+
await this.store.setCheckpointSynchedL1BlockNumber(rollupStatus.lastL1BlockWithCheckpoint);
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
// And lastly we check if we are missing any checkpoints behind us due to a possible L1 reorg.
|
|
201
|
+
// We only do this if rollup cant prune on the next submission. Otherwise we will end up
|
|
202
|
+
// re-syncing the checkpoints we have just unwound above. We also dont do this if the last checkpoint is invalid,
|
|
203
|
+
// since the archiver will rightfully refuse to sync up to it.
|
|
204
|
+
if (!rollupCanPrune && rollupStatus.validationResult?.valid) {
|
|
205
|
+
await this.checkForNewCheckpointsBeforeL1SyncPoint(rollupStatus, blocksSynchedTo, currentL1BlockNumber);
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
this.instrumentation.updateL1BlockHeight(currentL1BlockNumber);
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
// After syncing has completed, update the current l1 block number and timestamp,
|
|
212
|
+
// otherwise we risk announcing to the world that we've synced to a given point,
|
|
213
|
+
// but the corresponding blocks have not been processed (see #12631).
|
|
214
|
+
this.l1Timestamp = currentL1Timestamp;
|
|
215
|
+
this.l1BlockNumber = currentL1BlockNumber;
|
|
216
|
+
|
|
217
|
+
const l1BlockNumberAtEnd = await this.publicClient.getBlockNumber();
|
|
218
|
+
this.log.trace(`Archiver sync iteration complete`, {
|
|
219
|
+
l1BlockNumberAtStart: currentL1BlockNumber,
|
|
220
|
+
l1TimestampAtStart: currentL1Timestamp,
|
|
221
|
+
l1BlockNumberAtEnd,
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
/** Queries the rollup contract on whether a prune can be executed on the immediate next L1 block. */
|
|
226
|
+
private async canPrune(currentL1BlockNumber: bigint, currentL1Timestamp: bigint): Promise<boolean> {
|
|
227
|
+
const time = (currentL1Timestamp ?? 0n) + BigInt(this.l1constants.ethereumSlotDuration);
|
|
228
|
+
const result = await this.rollup.canPruneAtTime(time, { blockNumber: currentL1BlockNumber });
|
|
229
|
+
if (result) {
|
|
230
|
+
this.log.debug(`Rollup contract allows pruning at L1 block ${currentL1BlockNumber} time ${time}`, {
|
|
231
|
+
currentL1Timestamp,
|
|
232
|
+
pruneTime: time,
|
|
233
|
+
currentL1BlockNumber,
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
return result;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/** Checks if there'd be a reorg for the next checkpoint submission and start pruning now. */
|
|
240
|
+
@trackSpan('Archiver.handleEpochPrune')
|
|
241
|
+
private async handleEpochPrune(
|
|
242
|
+
provenCheckpointNumber: CheckpointNumber,
|
|
243
|
+
currentL1BlockNumber: bigint,
|
|
244
|
+
currentL1Timestamp: bigint,
|
|
245
|
+
): Promise<{ rollupCanPrune: boolean }> {
|
|
246
|
+
const rollupCanPrune = await this.canPrune(currentL1BlockNumber, currentL1Timestamp);
|
|
247
|
+
const localPendingCheckpointNumber = await this.store.getSynchedCheckpointNumber();
|
|
248
|
+
const canPrune = localPendingCheckpointNumber > provenCheckpointNumber && rollupCanPrune;
|
|
249
|
+
|
|
250
|
+
if (canPrune) {
|
|
251
|
+
const timer = new Timer();
|
|
252
|
+
const pruneFrom = CheckpointNumber(provenCheckpointNumber + 1);
|
|
253
|
+
|
|
254
|
+
const header = await this.getCheckpointHeader(pruneFrom);
|
|
255
|
+
if (header === undefined) {
|
|
256
|
+
throw new Error(`Missing checkpoint header ${pruneFrom}`);
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
const pruneFromSlotNumber = header.slotNumber;
|
|
260
|
+
const pruneFromEpochNumber: EpochNumber = getEpochAtSlot(pruneFromSlotNumber, this.l1constants);
|
|
261
|
+
|
|
262
|
+
const checkpointsToUnwind = localPendingCheckpointNumber - provenCheckpointNumber;
|
|
263
|
+
|
|
264
|
+
const checkpointPromises = Array.from({ length: checkpointsToUnwind })
|
|
265
|
+
.fill(0)
|
|
266
|
+
.map((_, i) => this.store.getCheckpointData(CheckpointNumber(i + pruneFrom)));
|
|
267
|
+
const checkpoints = await Promise.all(checkpointPromises);
|
|
268
|
+
|
|
269
|
+
const blockPromises = await Promise.all(
|
|
270
|
+
checkpoints
|
|
271
|
+
.filter(isDefined)
|
|
272
|
+
.map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))),
|
|
273
|
+
);
|
|
274
|
+
const newBlocks = blockPromises.filter(isDefined).flat();
|
|
275
|
+
|
|
276
|
+
// Emit an event for listening services to react to the chain prune
|
|
277
|
+
this.events.emit(L2BlockSourceEvents.L2PruneDetected, {
|
|
278
|
+
type: L2BlockSourceEvents.L2PruneDetected,
|
|
279
|
+
epochNumber: pruneFromEpochNumber,
|
|
280
|
+
blocks: newBlocks,
|
|
281
|
+
});
|
|
282
|
+
|
|
283
|
+
this.log.debug(
|
|
284
|
+
`L2 prune from ${provenCheckpointNumber + 1} to ${localPendingCheckpointNumber} will occur on next checkpoint submission.`,
|
|
285
|
+
);
|
|
286
|
+
await this.updater.unwindCheckpointsWithContractData(localPendingCheckpointNumber, checkpointsToUnwind);
|
|
287
|
+
this.log.warn(
|
|
288
|
+
`Unwound ${count(checkpointsToUnwind, 'checkpoint')} from checkpoint ${localPendingCheckpointNumber} ` +
|
|
289
|
+
`to ${provenCheckpointNumber} due to predicted reorg at L1 block ${currentL1BlockNumber}. ` +
|
|
290
|
+
`Updated latest checkpoint is ${await this.store.getSynchedCheckpointNumber()}.`,
|
|
291
|
+
);
|
|
292
|
+
this.instrumentation.processPrune(timer.ms());
|
|
293
|
+
// TODO(palla/reorg): Do we need to set the block synched L1 block number here?
|
|
294
|
+
// Seems like the next iteration should handle this.
|
|
295
|
+
// await this.store.setCheckpointSynchedL1BlockNumber(currentL1BlockNumber);
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
return { rollupCanPrune };
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
private nextRange(end: bigint, limit: bigint): [bigint, bigint] {
|
|
302
|
+
const batchSize = (this.config.batchSize * this.l1constants.slotDuration) / this.l1constants.ethereumSlotDuration;
|
|
303
|
+
const nextStart = end + 1n;
|
|
304
|
+
const nextEnd = nextStart + BigInt(batchSize);
|
|
305
|
+
if (nextEnd > limit) {
|
|
306
|
+
return [nextStart, limit];
|
|
307
|
+
}
|
|
308
|
+
return [nextStart, nextEnd];
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
@trackSpan('Archiver.handleL1ToL2Messages')
|
|
312
|
+
private async handleL1ToL2Messages(
|
|
313
|
+
messagesSyncPoint: L1BlockId,
|
|
314
|
+
currentL1BlockNumber: bigint,
|
|
315
|
+
_currentL1BlockHash: Buffer32,
|
|
316
|
+
): Promise<void> {
|
|
317
|
+
this.log.trace(`Handling L1 to L2 messages from ${messagesSyncPoint.l1BlockNumber} to ${currentL1BlockNumber}.`);
|
|
318
|
+
if (currentL1BlockNumber <= messagesSyncPoint.l1BlockNumber) {
|
|
319
|
+
return;
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// Load remote and local inbox states.
|
|
323
|
+
const localMessagesInserted = await this.store.getTotalL1ToL2MessageCount();
|
|
324
|
+
const localLastMessage = await this.store.getLastL1ToL2Message();
|
|
325
|
+
const remoteMessagesState = await this.inbox.getState({ blockNumber: currentL1BlockNumber });
|
|
326
|
+
|
|
327
|
+
this.log.trace(`Retrieved remote inbox state at L1 block ${currentL1BlockNumber}.`, {
|
|
328
|
+
localMessagesInserted,
|
|
329
|
+
localLastMessage,
|
|
330
|
+
remoteMessagesState,
|
|
331
|
+
});
|
|
332
|
+
|
|
333
|
+
// Compare message count and rolling hash. If they match, no need to retrieve anything.
|
|
334
|
+
if (
|
|
335
|
+
remoteMessagesState.totalMessagesInserted === localMessagesInserted &&
|
|
336
|
+
remoteMessagesState.messagesRollingHash.equals(localLastMessage?.rollingHash ?? Buffer32.ZERO)
|
|
337
|
+
) {
|
|
338
|
+
this.log.trace(
|
|
339
|
+
`No L1 to L2 messages to query between L1 blocks ${messagesSyncPoint.l1BlockNumber} and ${currentL1BlockNumber}.`,
|
|
340
|
+
);
|
|
341
|
+
return;
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
// Check if our syncpoint is still valid. If not, there was an L1 reorg and we need to re-retrieve messages.
|
|
345
|
+
// Note that we need to fetch it from logs and not from inbox state at the syncpoint l1 block number, since it
|
|
346
|
+
// could be older than 128 blocks and non-archive nodes cannot resolve it.
|
|
347
|
+
if (localLastMessage) {
|
|
348
|
+
const remoteLastMessage = await this.retrieveL1ToL2Message(localLastMessage.leaf);
|
|
349
|
+
this.log.trace(`Retrieved remote message for local last`, { remoteLastMessage, localLastMessage });
|
|
350
|
+
if (!remoteLastMessage || !remoteLastMessage.rollingHash.equals(localLastMessage.rollingHash)) {
|
|
351
|
+
this.log.warn(`Rolling back L1 to L2 messages due to hash mismatch or msg not found.`, {
|
|
352
|
+
remoteLastMessage,
|
|
353
|
+
messagesSyncPoint,
|
|
354
|
+
localLastMessage,
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
messagesSyncPoint = await this.rollbackL1ToL2Messages(localLastMessage, messagesSyncPoint);
|
|
358
|
+
this.log.debug(`Rolled back L1 to L2 messages to L1 block ${messagesSyncPoint.l1BlockNumber}.`, {
|
|
359
|
+
messagesSyncPoint,
|
|
360
|
+
});
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
// Retrieve and save messages in batches. Each batch is estimated to acommodate up to L2 'blockBatchSize' blocks,
|
|
365
|
+
let searchStartBlock: bigint = 0n;
|
|
366
|
+
let searchEndBlock: bigint = messagesSyncPoint.l1BlockNumber;
|
|
367
|
+
|
|
368
|
+
let lastMessage: InboxMessage | undefined;
|
|
369
|
+
let messageCount = 0;
|
|
370
|
+
|
|
371
|
+
do {
|
|
372
|
+
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
373
|
+
this.log.trace(`Retrieving L1 to L2 messages between L1 blocks ${searchStartBlock} and ${searchEndBlock}.`);
|
|
374
|
+
const messages = await retrieveL1ToL2Messages(this.inbox, searchStartBlock, searchEndBlock);
|
|
375
|
+
this.log.verbose(
|
|
376
|
+
`Retrieved ${messages.length} new L1 to L2 messages between L1 blocks ${searchStartBlock} and ${searchEndBlock}.`,
|
|
377
|
+
);
|
|
378
|
+
const timer = new Timer();
|
|
379
|
+
await this.store.addL1ToL2Messages(messages);
|
|
380
|
+
const perMsg = timer.ms() / messages.length;
|
|
381
|
+
this.instrumentation.processNewMessages(messages.length, perMsg);
|
|
382
|
+
for (const msg of messages) {
|
|
383
|
+
this.log.debug(`Downloaded L1 to L2 message`, { ...msg, leaf: msg.leaf.toString() });
|
|
384
|
+
lastMessage = msg;
|
|
385
|
+
messageCount++;
|
|
386
|
+
}
|
|
387
|
+
} while (searchEndBlock < currentL1BlockNumber);
|
|
388
|
+
|
|
389
|
+
// Log stats for messages retrieved (if any).
|
|
390
|
+
if (messageCount > 0) {
|
|
391
|
+
this.log.info(
|
|
392
|
+
`Retrieved ${messageCount} new L1 to L2 messages up to message with index ${lastMessage?.index} for checkpoint ${lastMessage?.checkpointNumber}`,
|
|
393
|
+
{ lastMessage, messageCount },
|
|
394
|
+
);
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
// Warn if the resulting rolling hash does not match the remote state we had retrieved.
|
|
398
|
+
if (lastMessage && !lastMessage.rollingHash.equals(remoteMessagesState.messagesRollingHash)) {
|
|
399
|
+
this.log.warn(`Last message retrieved rolling hash does not match remote state.`, {
|
|
400
|
+
lastMessage,
|
|
401
|
+
remoteMessagesState,
|
|
402
|
+
});
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
private async retrieveL1ToL2Message(leaf: Fr): Promise<InboxMessage | undefined> {
|
|
407
|
+
const currentL1BlockNumber = await this.publicClient.getBlockNumber();
|
|
408
|
+
let searchStartBlock: bigint = 0n;
|
|
409
|
+
let searchEndBlock: bigint = this.l1constants.l1StartBlock - 1n;
|
|
410
|
+
|
|
411
|
+
do {
|
|
412
|
+
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
413
|
+
|
|
414
|
+
const message = await retrieveL1ToL2Message(this.inbox, leaf, searchStartBlock, searchEndBlock);
|
|
415
|
+
|
|
416
|
+
if (message) {
|
|
417
|
+
return message;
|
|
418
|
+
}
|
|
419
|
+
} while (searchEndBlock < currentL1BlockNumber);
|
|
420
|
+
|
|
421
|
+
return undefined;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
private async rollbackL1ToL2Messages(
|
|
425
|
+
localLastMessage: InboxMessage,
|
|
426
|
+
messagesSyncPoint: L1BlockId,
|
|
427
|
+
): Promise<L1BlockId> {
|
|
428
|
+
// Slowly go back through our messages until we find the last common message.
|
|
429
|
+
// We could query the logs in batch as an optimization, but the depth of the reorg should not be deep, and this
|
|
430
|
+
// is a very rare case, so it's fine to query one log at a time.
|
|
431
|
+
let commonMsg: undefined | InboxMessage;
|
|
432
|
+
this.log.verbose(`Searching most recent common L1 to L2 message at or before index ${localLastMessage.index}`);
|
|
433
|
+
for await (const msg of this.store.iterateL1ToL2Messages({ reverse: true, end: localLastMessage.index })) {
|
|
434
|
+
const remoteMsg = await this.retrieveL1ToL2Message(msg.leaf);
|
|
435
|
+
const logCtx = { remoteMsg, localMsg: msg };
|
|
436
|
+
if (remoteMsg && remoteMsg.rollingHash.equals(msg.rollingHash)) {
|
|
437
|
+
this.log.verbose(
|
|
438
|
+
`Found most recent common L1 to L2 message at index ${msg.index} on L1 block ${msg.l1BlockNumber}`,
|
|
439
|
+
logCtx,
|
|
440
|
+
);
|
|
441
|
+
commonMsg = remoteMsg;
|
|
442
|
+
break;
|
|
443
|
+
} else if (remoteMsg) {
|
|
444
|
+
this.log.debug(`Local L1 to L2 message with index ${msg.index} has different rolling hash`, logCtx);
|
|
445
|
+
} else {
|
|
446
|
+
this.log.debug(`Local L1 to L2 message with index ${msg.index} not found on L1`, logCtx);
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
// Delete everything after the common message we found.
|
|
451
|
+
const lastGoodIndex = commonMsg?.index;
|
|
452
|
+
this.log.warn(`Deleting all local L1 to L2 messages after index ${lastGoodIndex ?? 'undefined'}`);
|
|
453
|
+
await this.store.removeL1ToL2Messages(lastGoodIndex !== undefined ? lastGoodIndex + 1n : 0n);
|
|
454
|
+
|
|
455
|
+
// Update the syncpoint so the loop below reprocesses the changed messages. We go to the block before
|
|
456
|
+
// the last common one, so we force reprocessing it, in case new messages were added on that same L1 block
|
|
457
|
+
// after the last common message.
|
|
458
|
+
const syncPointL1BlockNumber = commonMsg ? commonMsg.l1BlockNumber - 1n : this.l1constants.l1StartBlock;
|
|
459
|
+
const syncPointL1BlockHash = await this.getL1BlockHash(syncPointL1BlockNumber);
|
|
460
|
+
messagesSyncPoint = { l1BlockNumber: syncPointL1BlockNumber, l1BlockHash: syncPointL1BlockHash };
|
|
461
|
+
await this.store.setMessageSynchedL1Block(messagesSyncPoint);
|
|
462
|
+
return messagesSyncPoint;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
private async getL1BlockHash(l1BlockNumber: bigint): Promise<Buffer32> {
|
|
466
|
+
const block = await this.publicClient.getBlock({ blockNumber: l1BlockNumber, includeTransactions: false });
|
|
467
|
+
if (!block) {
|
|
468
|
+
throw new Error(`Missing L1 block ${l1BlockNumber}`);
|
|
469
|
+
}
|
|
470
|
+
return Buffer32.fromString(block.hash);
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
@trackSpan('Archiver.handleCheckpoints')
|
|
474
|
+
private async handleCheckpoints(
|
|
475
|
+
blocksSynchedTo: bigint,
|
|
476
|
+
currentL1BlockNumber: bigint,
|
|
477
|
+
initialSyncComplete: boolean,
|
|
478
|
+
): Promise<RollupStatus> {
|
|
479
|
+
const localPendingCheckpointNumber = await this.store.getSynchedCheckpointNumber();
|
|
480
|
+
const initialValidationResult: ValidateCheckpointResult | undefined =
|
|
481
|
+
await this.store.getPendingChainValidationStatus();
|
|
482
|
+
const {
|
|
483
|
+
provenCheckpointNumber,
|
|
484
|
+
provenArchive,
|
|
485
|
+
pendingCheckpointNumber,
|
|
486
|
+
pendingArchive,
|
|
487
|
+
archiveOfMyCheckpoint: archiveForLocalPendingCheckpointNumber,
|
|
488
|
+
} = await execInSpan(this.tracer, 'Archiver.getRollupStatus', () =>
|
|
489
|
+
this.rollup.status(localPendingCheckpointNumber, { blockNumber: currentL1BlockNumber }),
|
|
490
|
+
);
|
|
491
|
+
const rollupStatus: RollupStatus = {
|
|
492
|
+
provenCheckpointNumber,
|
|
493
|
+
provenArchive: provenArchive.toString(),
|
|
494
|
+
pendingCheckpointNumber,
|
|
495
|
+
pendingArchive: pendingArchive.toString(),
|
|
496
|
+
validationResult: initialValidationResult,
|
|
497
|
+
};
|
|
498
|
+
this.log.trace(`Retrieved rollup status at current L1 block ${currentL1BlockNumber}.`, {
|
|
499
|
+
localPendingCheckpointNumber,
|
|
500
|
+
blocksSynchedTo,
|
|
501
|
+
currentL1BlockNumber,
|
|
502
|
+
archiveForLocalPendingCheckpointNumber,
|
|
503
|
+
...rollupStatus,
|
|
504
|
+
});
|
|
505
|
+
|
|
506
|
+
const updateProvenCheckpoint = async () => {
|
|
507
|
+
// Annoying edge case: if proven checkpoint is moved back to 0 due to a reorg at the beginning of the chain,
|
|
508
|
+
// we need to set it to zero. This is an edge case because we dont have a checkpoint zero (initial checkpoint is one),
|
|
509
|
+
// so localCheckpointForDestinationProvenCheckpointNumber would not be found below.
|
|
510
|
+
if (provenCheckpointNumber === 0) {
|
|
511
|
+
const localProvenCheckpointNumber = await this.store.getProvenCheckpointNumber();
|
|
512
|
+
if (localProvenCheckpointNumber !== provenCheckpointNumber) {
|
|
513
|
+
await this.store.setProvenCheckpointNumber(provenCheckpointNumber);
|
|
514
|
+
this.log.info(`Rolled back proven chain to checkpoint ${provenCheckpointNumber}`, { provenCheckpointNumber });
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
const localCheckpointForDestinationProvenCheckpointNumber =
|
|
519
|
+
await this.store.getCheckpointData(provenCheckpointNumber);
|
|
520
|
+
|
|
521
|
+
// Sanity check. I've hit what seems to be a state where the proven checkpoint is set to a value greater than the latest
|
|
522
|
+
// synched checkpoint when requesting L2Tips from the archiver. This is the only place where the proven checkpoint is set.
|
|
523
|
+
const synched = await this.store.getSynchedCheckpointNumber();
|
|
524
|
+
if (
|
|
525
|
+
localCheckpointForDestinationProvenCheckpointNumber &&
|
|
526
|
+
synched < localCheckpointForDestinationProvenCheckpointNumber.checkpointNumber
|
|
527
|
+
) {
|
|
528
|
+
this.log.error(
|
|
529
|
+
`Hit local checkpoint greater than last synched checkpoint: ${localCheckpointForDestinationProvenCheckpointNumber.checkpointNumber} > ${synched}`,
|
|
530
|
+
);
|
|
531
|
+
}
|
|
532
|
+
|
|
533
|
+
this.log.trace(
|
|
534
|
+
`Local checkpoint for remote proven checkpoint ${provenCheckpointNumber} is ${
|
|
535
|
+
localCheckpointForDestinationProvenCheckpointNumber?.archive.root.toString() ?? 'undefined'
|
|
536
|
+
}`,
|
|
537
|
+
);
|
|
538
|
+
|
|
539
|
+
if (
|
|
540
|
+
localCheckpointForDestinationProvenCheckpointNumber &&
|
|
541
|
+
provenArchive.equals(localCheckpointForDestinationProvenCheckpointNumber.archive.root)
|
|
542
|
+
) {
|
|
543
|
+
const localProvenCheckpointNumber = await this.store.getProvenCheckpointNumber();
|
|
544
|
+
if (localProvenCheckpointNumber !== provenCheckpointNumber) {
|
|
545
|
+
await this.store.setProvenCheckpointNumber(provenCheckpointNumber);
|
|
546
|
+
this.log.info(`Updated proven chain to checkpoint ${provenCheckpointNumber}`, { provenCheckpointNumber });
|
|
547
|
+
const provenSlotNumber = localCheckpointForDestinationProvenCheckpointNumber.header.slotNumber;
|
|
548
|
+
const provenEpochNumber: EpochNumber = getEpochAtSlot(provenSlotNumber, this.l1constants);
|
|
549
|
+
const lastBlockNumberInCheckpoint =
|
|
550
|
+
localCheckpointForDestinationProvenCheckpointNumber.startBlock +
|
|
551
|
+
localCheckpointForDestinationProvenCheckpointNumber.numBlocks -
|
|
552
|
+
1;
|
|
553
|
+
|
|
554
|
+
this.events.emit(L2BlockSourceEvents.L2BlockProven, {
|
|
555
|
+
type: L2BlockSourceEvents.L2BlockProven,
|
|
556
|
+
blockNumber: BlockNumber(lastBlockNumberInCheckpoint),
|
|
557
|
+
slotNumber: provenSlotNumber,
|
|
558
|
+
epochNumber: provenEpochNumber,
|
|
559
|
+
});
|
|
560
|
+
this.instrumentation.updateLastProvenBlock(lastBlockNumberInCheckpoint);
|
|
561
|
+
} else {
|
|
562
|
+
this.log.trace(`Proven checkpoint ${provenCheckpointNumber} already stored.`);
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
};
|
|
566
|
+
|
|
567
|
+
// This is an edge case that we only hit if there are no proposed checkpoints.
|
|
568
|
+
// If we have 0 checkpoints locally and there are no checkpoints onchain there is nothing to do.
|
|
569
|
+
const noCheckpoints = localPendingCheckpointNumber === 0 && pendingCheckpointNumber === 0;
|
|
570
|
+
if (noCheckpoints) {
|
|
571
|
+
await this.store.setCheckpointSynchedL1BlockNumber(currentL1BlockNumber);
|
|
572
|
+
this.log.debug(
|
|
573
|
+
`No checkpoints to retrieve from ${blocksSynchedTo + 1n} to ${currentL1BlockNumber}, no checkpoints on chain`,
|
|
574
|
+
);
|
|
575
|
+
return rollupStatus;
|
|
576
|
+
}
|
|
577
|
+
|
|
578
|
+
await updateProvenCheckpoint();
|
|
579
|
+
|
|
580
|
+
// Related to the L2 reorgs of the pending chain. We are only interested in actually addressing a reorg if there
|
|
581
|
+
// are any state that could be impacted by it. If we have no checkpoints, there is no impact.
|
|
582
|
+
if (localPendingCheckpointNumber > 0) {
|
|
583
|
+
const localPendingCheckpoint = await this.store.getCheckpointData(localPendingCheckpointNumber);
|
|
584
|
+
if (localPendingCheckpoint === undefined) {
|
|
585
|
+
throw new Error(`Missing checkpoint ${localPendingCheckpointNumber}`);
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
const localPendingArchiveRoot = localPendingCheckpoint.archive.root.toString();
|
|
589
|
+
const noCheckpointSinceLast = localPendingCheckpoint && pendingArchive.toString() === localPendingArchiveRoot;
|
|
590
|
+
if (noCheckpointSinceLast) {
|
|
591
|
+
// We believe the following line causes a problem when we encounter L1 re-orgs.
|
|
592
|
+
// Basically, by setting the synched L1 block number here, we are saying that we have
|
|
593
|
+
// processed all checkpoints up to the current L1 block number and we will not attempt to retrieve logs from
|
|
594
|
+
// this block again (or any blocks before).
|
|
595
|
+
// However, in the re-org scenario, our L1 node is temporarily lying to us and we end up potentially missing checkpoints.
|
|
596
|
+
// We must only set this block number based on actually retrieved logs.
|
|
597
|
+
// TODO(#8621): Tackle this properly when we handle L1 Re-orgs.
|
|
598
|
+
// await this.store.setCheckpointSynchedL1BlockNumber(currentL1BlockNumber);
|
|
599
|
+
this.log.debug(`No checkpoints to retrieve from ${blocksSynchedTo + 1n} to ${currentL1BlockNumber}`);
|
|
600
|
+
return rollupStatus;
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
const localPendingCheckpointInChain = archiveForLocalPendingCheckpointNumber.equals(
|
|
604
|
+
localPendingCheckpoint.archive.root,
|
|
605
|
+
);
|
|
606
|
+
if (!localPendingCheckpointInChain) {
|
|
607
|
+
// If our local pending checkpoint tip is not in the chain on L1 a "prune" must have happened
|
|
608
|
+
// or the L1 have reorged.
|
|
609
|
+
// In any case, we have to figure out how far into the past the action will take us.
|
|
610
|
+
// For simplicity here, we will simply rewind until we end in a checkpoint that is also on the chain on L1.
|
|
611
|
+
this.log.debug(
|
|
612
|
+
`L2 prune has been detected due to local pending checkpoint ${localPendingCheckpointNumber} not in chain`,
|
|
613
|
+
{ localPendingCheckpointNumber, localPendingArchiveRoot, archiveForLocalPendingCheckpointNumber },
|
|
614
|
+
);
|
|
615
|
+
|
|
616
|
+
let tipAfterUnwind = localPendingCheckpointNumber;
|
|
617
|
+
while (true) {
|
|
618
|
+
const candidateCheckpoint = await this.store.getCheckpointData(tipAfterUnwind);
|
|
619
|
+
if (candidateCheckpoint === undefined) {
|
|
620
|
+
break;
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
const archiveAtContract = await this.rollup.archiveAt(candidateCheckpoint.checkpointNumber);
|
|
624
|
+
this.log.trace(
|
|
625
|
+
`Checking local checkpoint ${candidateCheckpoint.checkpointNumber} with archive ${candidateCheckpoint.archive.root}`,
|
|
626
|
+
{
|
|
627
|
+
archiveAtContract,
|
|
628
|
+
archiveLocal: candidateCheckpoint.archive.root.toString(),
|
|
629
|
+
},
|
|
630
|
+
);
|
|
631
|
+
if (archiveAtContract.equals(candidateCheckpoint.archive.root)) {
|
|
632
|
+
break;
|
|
633
|
+
}
|
|
634
|
+
tipAfterUnwind--;
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
const checkpointsToUnwind = localPendingCheckpointNumber - tipAfterUnwind;
|
|
638
|
+
await this.updater.unwindCheckpointsWithContractData(localPendingCheckpointNumber, checkpointsToUnwind);
|
|
639
|
+
|
|
640
|
+
this.log.warn(
|
|
641
|
+
`Unwound ${count(checkpointsToUnwind, 'checkpoint')} from checkpoint ${localPendingCheckpointNumber} ` +
|
|
642
|
+
`due to mismatched checkpoint hashes at L1 block ${currentL1BlockNumber}. ` +
|
|
643
|
+
`Updated L2 latest checkpoint is ${await this.store.getSynchedCheckpointNumber()}.`,
|
|
644
|
+
);
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
// Retrieve checkpoints in batches. Each batch is estimated to accommodate up to 'blockBatchSize' L1 blocks,
|
|
649
|
+
// computed using the L2 block time vs the L1 block time.
|
|
650
|
+
let searchStartBlock: bigint = blocksSynchedTo;
|
|
651
|
+
let searchEndBlock: bigint = blocksSynchedTo;
|
|
652
|
+
let lastRetrievedCheckpoint: PublishedCheckpoint | undefined;
|
|
653
|
+
let lastL1BlockWithCheckpoint: bigint | undefined = undefined;
|
|
654
|
+
|
|
655
|
+
do {
|
|
656
|
+
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
657
|
+
|
|
658
|
+
this.log.trace(`Retrieving checkpoints from L1 block ${searchStartBlock} to ${searchEndBlock}`);
|
|
659
|
+
|
|
660
|
+
// TODO(md): Retrieve from blob client then from consensus client, then from peers
|
|
661
|
+
const retrievedCheckpoints = await execInSpan(this.tracer, 'Archiver.retrieveCheckpointsFromRollup', () =>
|
|
662
|
+
retrieveCheckpointsFromRollup(
|
|
663
|
+
this.rollup,
|
|
664
|
+
this.publicClient,
|
|
665
|
+
this.debugClient,
|
|
666
|
+
this.blobClient,
|
|
667
|
+
searchStartBlock, // TODO(palla/reorg): If the L2 reorg was due to an L1 reorg, we need to start search earlier
|
|
668
|
+
searchEndBlock,
|
|
669
|
+
this.l1Addresses,
|
|
670
|
+
this.instrumentation,
|
|
671
|
+
this.log,
|
|
672
|
+
!initialSyncComplete, // isHistoricalSync
|
|
673
|
+
),
|
|
674
|
+
);
|
|
675
|
+
|
|
676
|
+
if (retrievedCheckpoints.length === 0) {
|
|
677
|
+
// We are not calling `setBlockSynchedL1BlockNumber` because it may cause sync issues if based off infura.
|
|
678
|
+
// See further details in earlier comments.
|
|
679
|
+
this.log.trace(`Retrieved no new checkpoints from L1 block ${searchStartBlock} to ${searchEndBlock}`);
|
|
680
|
+
continue;
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
this.log.debug(
|
|
684
|
+
`Retrieved ${retrievedCheckpoints.length} new checkpoints between L1 blocks ${searchStartBlock} and ${searchEndBlock}`,
|
|
685
|
+
{
|
|
686
|
+
lastProcessedCheckpoint: retrievedCheckpoints[retrievedCheckpoints.length - 1].l1,
|
|
687
|
+
searchStartBlock,
|
|
688
|
+
searchEndBlock,
|
|
689
|
+
},
|
|
690
|
+
);
|
|
691
|
+
|
|
692
|
+
const publishedCheckpoints = await Promise.all(retrievedCheckpoints.map(b => retrievedToPublishedCheckpoint(b)));
|
|
693
|
+
const validCheckpoints: PublishedCheckpoint[] = [];
|
|
694
|
+
|
|
695
|
+
for (const published of publishedCheckpoints) {
|
|
696
|
+
const validationResult = this.config.skipValidateCheckpointAttestations
|
|
697
|
+
? { valid: true as const }
|
|
698
|
+
: await validateCheckpointAttestations(published, this.epochCache, this.l1constants, this.log);
|
|
699
|
+
|
|
700
|
+
// Only update the validation result if it has changed, so we can keep track of the first invalid checkpoint
|
|
701
|
+
// in case there is a sequence of more than one invalid checkpoint, as we need to invalidate the first one.
|
|
702
|
+
// There is an exception though: if a checkpoint is invalidated and replaced with another invalid checkpoint,
|
|
703
|
+
// we need to update the validation result, since we need to be able to invalidate the new one.
|
|
704
|
+
// See test 'chain progresses if an invalid checkpoint is invalidated with an invalid one' for more info.
|
|
705
|
+
if (
|
|
706
|
+
rollupStatus.validationResult?.valid !== validationResult.valid ||
|
|
707
|
+
(!rollupStatus.validationResult.valid &&
|
|
708
|
+
!validationResult.valid &&
|
|
709
|
+
rollupStatus.validationResult.checkpoint.checkpointNumber === validationResult.checkpoint.checkpointNumber)
|
|
710
|
+
) {
|
|
711
|
+
rollupStatus.validationResult = validationResult;
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
if (!validationResult.valid) {
|
|
715
|
+
this.log.warn(`Skipping checkpoint ${published.checkpoint.number} due to invalid attestations`, {
|
|
716
|
+
checkpointHash: published.checkpoint.hash(),
|
|
717
|
+
l1BlockNumber: published.l1.blockNumber,
|
|
718
|
+
...pick(validationResult, 'reason'),
|
|
719
|
+
});
|
|
720
|
+
|
|
721
|
+
// Emit event for invalid checkpoint detection
|
|
722
|
+
this.events.emit(L2BlockSourceEvents.InvalidAttestationsCheckpointDetected, {
|
|
723
|
+
type: L2BlockSourceEvents.InvalidAttestationsCheckpointDetected,
|
|
724
|
+
validationResult,
|
|
725
|
+
});
|
|
726
|
+
|
|
727
|
+
// We keep consuming checkpoints if we find an invalid one, since we do not listen for CheckpointInvalidated events
|
|
728
|
+
// We just pretend the invalid ones are not there and keep consuming the next checkpoints
|
|
729
|
+
// Note that this breaks if the committee ever attests to a descendant of an invalid checkpoint
|
|
730
|
+
continue;
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
// Check the inHash of the checkpoint against the l1->l2 messages.
|
|
734
|
+
// The messages should've been synced up to the currentL1BlockNumber and must be available for the published
|
|
735
|
+
// checkpoints we just retrieved.
|
|
736
|
+
const l1ToL2Messages = await this.store.getL1ToL2Messages(published.checkpoint.number);
|
|
737
|
+
const computedInHash = computeInHashFromL1ToL2Messages(l1ToL2Messages);
|
|
738
|
+
const publishedInHash = published.checkpoint.header.inHash;
|
|
739
|
+
if (!computedInHash.equals(publishedInHash)) {
|
|
740
|
+
this.log.fatal(`Mismatch inHash for checkpoint ${published.checkpoint.number}`, {
|
|
741
|
+
checkpointHash: published.checkpoint.hash(),
|
|
742
|
+
l1BlockNumber: published.l1.blockNumber,
|
|
743
|
+
computedInHash,
|
|
744
|
+
publishedInHash,
|
|
745
|
+
});
|
|
746
|
+
// Throwing an error since this is most likely caused by a bug.
|
|
747
|
+
throw new Error(
|
|
748
|
+
`Mismatch inHash for checkpoint ${published.checkpoint.number}. Expected ${computedInHash} but got ${publishedInHash}`,
|
|
749
|
+
);
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
validCheckpoints.push(published);
|
|
753
|
+
this.log.debug(
|
|
754
|
+
`Ingesting new checkpoint ${published.checkpoint.number} with ${published.checkpoint.blocks.length} blocks`,
|
|
755
|
+
{
|
|
756
|
+
checkpointHash: published.checkpoint.hash(),
|
|
757
|
+
l1BlockNumber: published.l1.blockNumber,
|
|
758
|
+
...published.checkpoint.header.toInspect(),
|
|
759
|
+
blocks: published.checkpoint.blocks.map(b => b.getStats()),
|
|
760
|
+
},
|
|
761
|
+
);
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
try {
|
|
765
|
+
const updatedValidationResult =
|
|
766
|
+
rollupStatus.validationResult === initialValidationResult ? undefined : rollupStatus.validationResult;
|
|
767
|
+
const [processDuration] = await elapsed(() =>
|
|
768
|
+
execInSpan(this.tracer, 'Archiver.addCheckpoints', () =>
|
|
769
|
+
this.updater.addCheckpointsWithContractData(validCheckpoints, updatedValidationResult),
|
|
770
|
+
),
|
|
771
|
+
);
|
|
772
|
+
this.instrumentation.processNewBlocks(
|
|
773
|
+
processDuration / validCheckpoints.length,
|
|
774
|
+
validCheckpoints.flatMap(c => c.checkpoint.blocks),
|
|
775
|
+
);
|
|
776
|
+
} catch (err) {
|
|
777
|
+
if (err instanceof InitialCheckpointNumberNotSequentialError) {
|
|
778
|
+
const { previousCheckpointNumber, newCheckpointNumber } = err;
|
|
779
|
+
const previousCheckpoint = previousCheckpointNumber
|
|
780
|
+
? await this.store.getCheckpointData(CheckpointNumber(previousCheckpointNumber))
|
|
781
|
+
: undefined;
|
|
782
|
+
const updatedL1SyncPoint = previousCheckpoint?.l1.blockNumber ?? this.l1constants.l1StartBlock;
|
|
783
|
+
await this.store.setCheckpointSynchedL1BlockNumber(updatedL1SyncPoint);
|
|
784
|
+
this.log.warn(
|
|
785
|
+
`Attempting to insert checkpoint ${newCheckpointNumber} with previous block ${previousCheckpointNumber}. Rolling back L1 sync point to ${updatedL1SyncPoint} to try and fetch the missing blocks.`,
|
|
786
|
+
{
|
|
787
|
+
previousCheckpointNumber,
|
|
788
|
+
newCheckpointNumber,
|
|
789
|
+
updatedL1SyncPoint,
|
|
790
|
+
},
|
|
791
|
+
);
|
|
792
|
+
}
|
|
793
|
+
throw err;
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
for (const checkpoint of validCheckpoints) {
|
|
797
|
+
this.log.info(`Downloaded checkpoint ${checkpoint.checkpoint.number}`, {
|
|
798
|
+
checkpointHash: checkpoint.checkpoint.hash(),
|
|
799
|
+
checkpointNumber: checkpoint.checkpoint.number,
|
|
800
|
+
blockCount: checkpoint.checkpoint.blocks.length,
|
|
801
|
+
txCount: checkpoint.checkpoint.blocks.reduce((acc, b) => acc + b.body.txEffects.length, 0),
|
|
802
|
+
header: checkpoint.checkpoint.header.toInspect(),
|
|
803
|
+
archiveRoot: checkpoint.checkpoint.archive.root.toString(),
|
|
804
|
+
archiveNextLeafIndex: checkpoint.checkpoint.archive.nextAvailableLeafIndex,
|
|
805
|
+
});
|
|
806
|
+
}
|
|
807
|
+
lastRetrievedCheckpoint = validCheckpoints.at(-1) ?? lastRetrievedCheckpoint;
|
|
808
|
+
lastL1BlockWithCheckpoint = retrievedCheckpoints.at(-1)?.l1.blockNumber ?? lastL1BlockWithCheckpoint;
|
|
809
|
+
} while (searchEndBlock < currentL1BlockNumber);
|
|
810
|
+
|
|
811
|
+
// Important that we update AFTER inserting the blocks.
|
|
812
|
+
await updateProvenCheckpoint();
|
|
813
|
+
|
|
814
|
+
return { ...rollupStatus, lastRetrievedCheckpoint, lastL1BlockWithCheckpoint };
|
|
815
|
+
}
|
|
816
|
+
|
|
817
|
+
private async checkForNewCheckpointsBeforeL1SyncPoint(
|
|
818
|
+
status: RollupStatus,
|
|
819
|
+
blocksSynchedTo: bigint,
|
|
820
|
+
currentL1BlockNumber: bigint,
|
|
821
|
+
): Promise<void> {
|
|
822
|
+
const { lastRetrievedCheckpoint, pendingCheckpointNumber } = status;
|
|
823
|
+
// Compare the last checkpoint we have (either retrieved in this round or loaded from store) with what the
|
|
824
|
+
// rollup contract told us was the latest one (pinned at the currentL1BlockNumber).
|
|
825
|
+
const latestLocalCheckpointNumber =
|
|
826
|
+
lastRetrievedCheckpoint?.checkpoint.number ?? (await this.store.getSynchedCheckpointNumber());
|
|
827
|
+
if (latestLocalCheckpointNumber < pendingCheckpointNumber) {
|
|
828
|
+
// Here we have consumed all logs until the `currentL1Block` we pinned at the beginning of the archiver loop,
|
|
829
|
+
// but still haven't reached the pending checkpoint according to the call to the rollup contract.
|
|
830
|
+
// We suspect an L1 reorg that added checkpoints *behind* us. If that is the case, it must have happened between
|
|
831
|
+
// the last checkpoint we saw and the current one, so we reset the last synched L1 block number. In the edge case
|
|
832
|
+
// we don't have one, we go back 2 L1 epochs, which is the deepest possible reorg (assuming Casper is working).
|
|
833
|
+
let latestLocalCheckpointArchive: string | undefined = undefined;
|
|
834
|
+
let targetL1BlockNumber = maxBigint(currentL1BlockNumber - 64n, 0n);
|
|
835
|
+
if (lastRetrievedCheckpoint) {
|
|
836
|
+
latestLocalCheckpointArchive = lastRetrievedCheckpoint.checkpoint.archive.root.toString();
|
|
837
|
+
targetL1BlockNumber = lastRetrievedCheckpoint.l1.blockNumber;
|
|
838
|
+
} else if (latestLocalCheckpointNumber > 0) {
|
|
839
|
+
const checkpoint = await this.store.getRangeOfCheckpoints(latestLocalCheckpointNumber, 1).then(([c]) => c);
|
|
840
|
+
latestLocalCheckpointArchive = checkpoint.archive.root.toString();
|
|
841
|
+
targetL1BlockNumber = checkpoint.l1.blockNumber;
|
|
842
|
+
}
|
|
843
|
+
this.log.warn(
|
|
844
|
+
`Failed to reach checkpoint ${pendingCheckpointNumber} at ${currentL1BlockNumber} (latest is ${latestLocalCheckpointNumber}). ` +
|
|
845
|
+
`Rolling back last synched L1 block number to ${targetL1BlockNumber}.`,
|
|
846
|
+
{
|
|
847
|
+
latestLocalCheckpointNumber,
|
|
848
|
+
latestLocalCheckpointArchive,
|
|
849
|
+
blocksSynchedTo,
|
|
850
|
+
currentL1BlockNumber,
|
|
851
|
+
...status,
|
|
852
|
+
},
|
|
853
|
+
);
|
|
854
|
+
await this.store.setCheckpointSynchedL1BlockNumber(targetL1BlockNumber);
|
|
855
|
+
} else {
|
|
856
|
+
this.log.trace(`No new checkpoints behind L1 sync point to retrieve.`, {
|
|
857
|
+
latestLocalCheckpointNumber,
|
|
858
|
+
pendingCheckpointNumber,
|
|
859
|
+
});
|
|
860
|
+
}
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
private async getCheckpointHeader(number: CheckpointNumber) {
|
|
864
|
+
const checkpoint = await this.store.getCheckpointData(number);
|
|
865
|
+
if (!checkpoint) {
|
|
866
|
+
return undefined;
|
|
867
|
+
}
|
|
868
|
+
return checkpoint.header;
|
|
869
|
+
}
|
|
870
|
+
}
|