@aztec/archiver 0.0.1-commit.d1cd2107c → 0.0.1-commit.d1da697d6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -6
- package/dest/archiver.d.ts +5 -7
- package/dest/archiver.d.ts.map +1 -1
- package/dest/archiver.js +56 -20
- package/dest/config.d.ts +3 -3
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +2 -1
- package/dest/errors.d.ts +34 -10
- package/dest/errors.d.ts.map +1 -1
- package/dest/errors.js +45 -16
- package/dest/factory.d.ts +3 -4
- package/dest/factory.d.ts.map +1 -1
- package/dest/factory.js +19 -18
- package/dest/l1/calldata_retriever.d.ts +1 -1
- package/dest/l1/calldata_retriever.d.ts.map +1 -1
- package/dest/l1/calldata_retriever.js +2 -1
- package/dest/l1/data_retrieval.d.ts +2 -2
- package/dest/l1/data_retrieval.d.ts.map +1 -1
- package/dest/l1/data_retrieval.js +13 -14
- package/dest/modules/data_source_base.d.ts +8 -6
- package/dest/modules/data_source_base.d.ts.map +1 -1
- package/dest/modules/data_source_base.js +11 -5
- package/dest/modules/data_store_updater.d.ts +14 -11
- package/dest/modules/data_store_updater.d.ts.map +1 -1
- package/dest/modules/data_store_updater.js +78 -76
- package/dest/modules/l1_synchronizer.d.ts +4 -3
- package/dest/modules/l1_synchronizer.d.ts.map +1 -1
- package/dest/modules/l1_synchronizer.js +160 -127
- package/dest/modules/validation.d.ts +1 -1
- package/dest/modules/validation.d.ts.map +1 -1
- package/dest/modules/validation.js +2 -2
- package/dest/store/block_store.d.ts +50 -16
- package/dest/store/block_store.d.ts.map +1 -1
- package/dest/store/block_store.js +288 -119
- package/dest/store/contract_class_store.d.ts +2 -3
- package/dest/store/contract_class_store.d.ts.map +1 -1
- package/dest/store/contract_class_store.js +7 -67
- package/dest/store/contract_instance_store.d.ts +1 -1
- package/dest/store/contract_instance_store.d.ts.map +1 -1
- package/dest/store/contract_instance_store.js +6 -2
- package/dest/store/kv_archiver_store.d.ts +45 -22
- package/dest/store/kv_archiver_store.d.ts.map +1 -1
- package/dest/store/kv_archiver_store.js +57 -27
- package/dest/store/l2_tips_cache.d.ts +2 -1
- package/dest/store/l2_tips_cache.d.ts.map +1 -1
- package/dest/store/l2_tips_cache.js +25 -5
- package/dest/store/log_store.d.ts +6 -3
- package/dest/store/log_store.d.ts.map +1 -1
- package/dest/store/log_store.js +93 -16
- package/dest/store/message_store.d.ts +5 -1
- package/dest/store/message_store.d.ts.map +1 -1
- package/dest/store/message_store.js +20 -8
- package/dest/test/fake_l1_state.d.ts +16 -1
- package/dest/test/fake_l1_state.d.ts.map +1 -1
- package/dest/test/fake_l1_state.js +77 -8
- package/dest/test/mock_l1_to_l2_message_source.d.ts +1 -1
- package/dest/test/mock_l1_to_l2_message_source.d.ts.map +1 -1
- package/dest/test/mock_l1_to_l2_message_source.js +2 -1
- package/dest/test/mock_l2_block_source.d.ts +9 -4
- package/dest/test/mock_l2_block_source.d.ts.map +1 -1
- package/dest/test/mock_l2_block_source.js +32 -7
- package/dest/test/noop_l1_archiver.d.ts +4 -1
- package/dest/test/noop_l1_archiver.d.ts.map +1 -1
- package/dest/test/noop_l1_archiver.js +5 -1
- package/package.json +13 -13
- package/src/archiver.ts +62 -21
- package/src/config.ts +8 -1
- package/src/errors.ts +70 -26
- package/src/factory.ts +19 -14
- package/src/l1/calldata_retriever.ts +2 -1
- package/src/l1/data_retrieval.ts +7 -11
- package/src/modules/data_source_base.ts +26 -7
- package/src/modules/data_store_updater.ts +91 -107
- package/src/modules/l1_synchronizer.ts +174 -156
- package/src/modules/validation.ts +2 -2
- package/src/store/block_store.ts +370 -140
- package/src/store/contract_class_store.ts +8 -106
- package/src/store/contract_instance_store.ts +8 -5
- package/src/store/kv_archiver_store.ts +81 -39
- package/src/store/l2_tips_cache.ts +50 -11
- package/src/store/log_store.ts +126 -27
- package/src/store/message_store.ts +26 -9
- package/src/structs/inbox_message.ts +1 -1
- package/src/test/fake_l1_state.ts +103 -13
- package/src/test/mock_l1_to_l2_message_source.ts +1 -0
- package/src/test/mock_l2_block_source.ts +46 -5
- package/src/test/noop_l1_archiver.ts +7 -1
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
import type { BlobClientInterface } from '@aztec/blob-client/client';
|
|
2
2
|
import { EpochCache } from '@aztec/epoch-cache';
|
|
3
|
-
import { InboxContract, RollupContract } from '@aztec/ethereum/contracts';
|
|
3
|
+
import { InboxContract, type InboxContractState, RollupContract } from '@aztec/ethereum/contracts';
|
|
4
4
|
import type { L1BlockId } from '@aztec/ethereum/l1-types';
|
|
5
5
|
import type { ViemPublicClient, ViemPublicDebugClient } from '@aztec/ethereum/types';
|
|
6
|
+
import { asyncPool } from '@aztec/foundation/async-pool';
|
|
6
7
|
import { maxBigint } from '@aztec/foundation/bigint';
|
|
7
8
|
import { BlockNumber, CheckpointNumber, EpochNumber } from '@aztec/foundation/branded-types';
|
|
8
|
-
import { Buffer32 } from '@aztec/foundation/buffer';
|
|
9
|
+
import { Buffer16, Buffer32 } from '@aztec/foundation/buffer';
|
|
9
10
|
import { pick } from '@aztec/foundation/collection';
|
|
10
11
|
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
11
12
|
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
13
|
+
import { retryTimes } from '@aztec/foundation/retry';
|
|
12
14
|
import { count } from '@aztec/foundation/string';
|
|
13
15
|
import { DateProvider, Timer, elapsed } from '@aztec/foundation/timer';
|
|
14
|
-
import { isDefined } from '@aztec/foundation/types';
|
|
16
|
+
import { isDefined, isErrorClass } from '@aztec/foundation/types';
|
|
15
17
|
import { type ArchiverEmitter, L2BlockSourceEvents, type ValidateCheckpointResult } from '@aztec/stdlib/block';
|
|
16
18
|
import { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
|
|
17
19
|
import { type L1RollupConstants, getEpochAtSlot, getSlotAtNextL1Block } from '@aztec/stdlib/epoch-helpers';
|
|
@@ -27,6 +29,7 @@ import {
|
|
|
27
29
|
} from '../l1/data_retrieval.js';
|
|
28
30
|
import type { KVArchiverDataStore } from '../store/kv_archiver_store.js';
|
|
29
31
|
import type { L2TipsCache } from '../store/l2_tips_cache.js';
|
|
32
|
+
import { MessageStoreError } from '../store/message_store.js';
|
|
30
33
|
import type { InboxMessage } from '../structs/inbox_message.js';
|
|
31
34
|
import { ArchiverDataStoreUpdater } from './data_store_updater.js';
|
|
32
35
|
import type { ArchiverInstrumentation } from './instrumentation.js';
|
|
@@ -72,7 +75,6 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
72
75
|
private readonly l1Constants: L1RollupConstants & {
|
|
73
76
|
l1StartBlockHash: Buffer32;
|
|
74
77
|
genesisArchiveRoot: Fr;
|
|
75
|
-
rollupManaLimit?: number;
|
|
76
78
|
},
|
|
77
79
|
private readonly events: ArchiverEmitter,
|
|
78
80
|
tracer: Tracer,
|
|
@@ -121,10 +123,15 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
121
123
|
|
|
122
124
|
@trackSpan('Archiver.syncFromL1')
|
|
123
125
|
public async syncFromL1(initialSyncComplete: boolean): Promise<void> {
|
|
126
|
+
// In between the various calls to L1, the block number can move meaning some of the following
|
|
127
|
+
// calls will return data for blocks that were not present during earlier calls. To combat this
|
|
128
|
+
// we ensure that all data retrieval methods only retrieve data up to the currentBlockNumber
|
|
129
|
+
// captured at the top of this function.
|
|
124
130
|
const currentL1Block = await this.publicClient.getBlock({ includeTransactions: false });
|
|
125
131
|
const currentL1BlockNumber = currentL1Block.number;
|
|
126
132
|
const currentL1BlockHash = Buffer32.fromString(currentL1Block.hash);
|
|
127
133
|
const currentL1Timestamp = currentL1Block.timestamp;
|
|
134
|
+
const currentL1BlockData = { l1BlockNumber: currentL1BlockNumber, l1BlockHash: currentL1BlockHash };
|
|
128
135
|
|
|
129
136
|
if (this.l1BlockHash && currentL1BlockHash.equals(this.l1BlockHash)) {
|
|
130
137
|
this.log.trace(`No new L1 blocks since last sync at L1 block ${this.l1BlockNumber}`);
|
|
@@ -141,45 +148,15 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
141
148
|
);
|
|
142
149
|
}
|
|
143
150
|
|
|
144
|
-
// Load sync point for blocks
|
|
145
|
-
const {
|
|
146
|
-
|
|
147
|
-
messagesSynchedTo = {
|
|
148
|
-
l1BlockNumber: this.l1Constants.l1StartBlock,
|
|
149
|
-
l1BlockHash: this.l1Constants.l1StartBlockHash,
|
|
150
|
-
},
|
|
151
|
-
} = await this.store.getSynchPoint();
|
|
151
|
+
// Load sync point for blocks defaulting to start block
|
|
152
|
+
const { blocksSynchedTo = this.l1Constants.l1StartBlock } = await this.store.getSynchPoint();
|
|
153
|
+
this.log.debug(`Starting new archiver sync iteration`, { blocksSynchedTo, currentL1BlockData });
|
|
152
154
|
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
currentL1BlockHash,
|
|
158
|
-
});
|
|
155
|
+
// Sync L1 to L2 messages. We retry this a few times since there are error conditions that reset the sync point, requiring a new iteration.
|
|
156
|
+
// Note that we cannot just wait for the l1 synchronizer to loop again, since the synchronizer would report as synced up to the current L1
|
|
157
|
+
// block, when that wouldn't be the case, since L1 to L2 messages would need another iteration.
|
|
158
|
+
await retryTimes(() => this.handleL1ToL2Messages(currentL1BlockData), 'Handling L1 to L2 messages', 3, 0.1);
|
|
159
159
|
|
|
160
|
-
// ********** Ensuring Consistency of data pulled from L1 **********
|
|
161
|
-
|
|
162
|
-
/**
|
|
163
|
-
* There are a number of calls in this sync operation to L1 for retrieving
|
|
164
|
-
* events and transaction data. There are a couple of things we need to bear in mind
|
|
165
|
-
* to ensure that data is read exactly once.
|
|
166
|
-
*
|
|
167
|
-
* The first is the problem of eventually consistent ETH service providers like Infura.
|
|
168
|
-
* Each L1 read operation will query data from the last L1 block that it saw emit its kind of data.
|
|
169
|
-
* (so pending L1 to L2 messages will read from the last L1 block that emitted a message and so on)
|
|
170
|
-
* This will mean the archiver will lag behind L1 and will only advance when there's L2-relevant activity on the chain.
|
|
171
|
-
*
|
|
172
|
-
* The second is that in between the various calls to L1, the block number can move meaning some
|
|
173
|
-
* of the following calls will return data for blocks that were not present during earlier calls.
|
|
174
|
-
* To combat this for the time being we simply ensure that all data retrieval methods only retrieve
|
|
175
|
-
* data up to the currentBlockNumber captured at the top of this function. We might want to improve on this
|
|
176
|
-
* in future but for the time being it should give us the guarantees that we need
|
|
177
|
-
*/
|
|
178
|
-
|
|
179
|
-
// ********** Events that are processed per L1 block **********
|
|
180
|
-
await this.handleL1ToL2Messages(messagesSynchedTo, currentL1BlockNumber);
|
|
181
|
-
|
|
182
|
-
// ********** Events that are processed per checkpoint **********
|
|
183
160
|
if (currentL1BlockNumber > blocksSynchedTo) {
|
|
184
161
|
// First we retrieve new checkpoints and L2 blocks and store them in the DB. This will also update the
|
|
185
162
|
// pending chain validation status, proven checkpoint number, and synched L1 block number.
|
|
@@ -217,6 +194,9 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
217
194
|
this.instrumentation.updateL1BlockHeight(currentL1BlockNumber);
|
|
218
195
|
}
|
|
219
196
|
|
|
197
|
+
// Update the finalized L2 checkpoint based on L1 finality.
|
|
198
|
+
await this.updateFinalizedCheckpoint();
|
|
199
|
+
|
|
220
200
|
// After syncing has completed, update the current l1 block number and timestamp,
|
|
221
201
|
// otherwise we risk announcing to the world that we've synced to a given point,
|
|
222
202
|
// but the corresponding blocks have not been processed (see #12631).
|
|
@@ -232,6 +212,30 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
232
212
|
});
|
|
233
213
|
}
|
|
234
214
|
|
|
215
|
+
/** Query L1 for its finalized block and update the finalized checkpoint accordingly. */
|
|
216
|
+
private async updateFinalizedCheckpoint(): Promise<void> {
|
|
217
|
+
try {
|
|
218
|
+
const finalizedL1Block = await this.publicClient.getBlock({ blockTag: 'finalized', includeTransactions: false });
|
|
219
|
+
const finalizedL1BlockNumber = finalizedL1Block.number;
|
|
220
|
+
const finalizedCheckpointNumber = await this.rollup.getProvenCheckpointNumber({
|
|
221
|
+
blockNumber: finalizedL1BlockNumber,
|
|
222
|
+
});
|
|
223
|
+
const localFinalizedCheckpointNumber = await this.store.getFinalizedCheckpointNumber();
|
|
224
|
+
if (localFinalizedCheckpointNumber !== finalizedCheckpointNumber) {
|
|
225
|
+
await this.updater.setFinalizedCheckpointNumber(finalizedCheckpointNumber);
|
|
226
|
+
this.log.info(`Updated finalized chain to checkpoint ${finalizedCheckpointNumber}`, {
|
|
227
|
+
finalizedCheckpointNumber,
|
|
228
|
+
finalizedL1BlockNumber,
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
} catch (err: any) {
|
|
232
|
+
// The rollup contract may not exist at the finalized L1 block right after deployment.
|
|
233
|
+
if (!err?.message?.includes('returned no data')) {
|
|
234
|
+
this.log.warn(`Failed to update finalized checkpoint: ${err}`);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
235
239
|
/** Prune all proposed local blocks that should have been checkpointed by now. */
|
|
236
240
|
private async pruneUncheckpointedBlocks(currentL1Timestamp: bigint) {
|
|
237
241
|
const [lastCheckpointedBlockNumber, lastProposedBlockNumber] = await Promise.all([
|
|
@@ -245,29 +249,32 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
245
249
|
return;
|
|
246
250
|
}
|
|
247
251
|
|
|
248
|
-
// What's the slot
|
|
252
|
+
// What's the slot at the next L1 block? All blocks for slots strictly before this one should've been checkpointed by now.
|
|
253
|
+
const slotAtNextL1Block = getSlotAtNextL1Block(currentL1Timestamp, this.l1Constants);
|
|
249
254
|
const firstUncheckpointedBlockNumber = BlockNumber(lastCheckpointedBlockNumber + 1);
|
|
255
|
+
|
|
256
|
+
// What's the slot of the first uncheckpointed block?
|
|
250
257
|
const [firstUncheckpointedBlockHeader] = await this.store.getBlockHeaders(firstUncheckpointedBlockNumber, 1);
|
|
251
258
|
const firstUncheckpointedBlockSlot = firstUncheckpointedBlockHeader?.getSlot();
|
|
252
259
|
|
|
253
|
-
|
|
254
|
-
|
|
260
|
+
if (firstUncheckpointedBlockSlot === undefined || firstUncheckpointedBlockSlot >= slotAtNextL1Block) {
|
|
261
|
+
return;
|
|
262
|
+
}
|
|
255
263
|
|
|
256
|
-
// Prune provisional blocks from slots that have ended without being checkpointed
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
264
|
+
// Prune provisional blocks from slots that have ended without being checkpointed.
|
|
265
|
+
// This also clears any proposed checkpoint whose blocks are being pruned.
|
|
266
|
+
this.log.warn(
|
|
267
|
+
`Pruning blocks after block ${lastCheckpointedBlockNumber} due to slot ${firstUncheckpointedBlockSlot} not being checkpointed`,
|
|
268
|
+
{ firstUncheckpointedBlockHeader: firstUncheckpointedBlockHeader.toInspect(), slotAtNextL1Block },
|
|
269
|
+
);
|
|
270
|
+
const prunedBlocks = await this.updater.removeUncheckpointedBlocksAfter(lastCheckpointedBlockNumber);
|
|
263
271
|
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
}
|
|
272
|
+
if (prunedBlocks.length > 0) {
|
|
273
|
+
this.events.emit(L2BlockSourceEvents.L2PruneUncheckpointed, {
|
|
274
|
+
type: L2BlockSourceEvents.L2PruneUncheckpointed,
|
|
275
|
+
slotNumber: firstUncheckpointedBlockSlot,
|
|
276
|
+
blocks: prunedBlocks,
|
|
277
|
+
});
|
|
271
278
|
}
|
|
272
279
|
}
|
|
273
280
|
|
|
@@ -310,17 +317,20 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
310
317
|
|
|
311
318
|
const checkpointsToUnwind = localPendingCheckpointNumber - provenCheckpointNumber;
|
|
312
319
|
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
const
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
checkpoints
|
|
320
|
-
.filter(isDefined)
|
|
321
|
-
.map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))),
|
|
320
|
+
// Fetch checkpoints and blocks in bounded batches to avoid unbounded concurrent
|
|
321
|
+
// promises when the gap between local pending and proven checkpoint numbers is large.
|
|
322
|
+
const BATCH_SIZE = 10;
|
|
323
|
+
const indices = Array.from({ length: checkpointsToUnwind }, (_, i) => CheckpointNumber(i + pruneFrom));
|
|
324
|
+
const checkpoints = (await asyncPool(BATCH_SIZE, indices, idx => this.store.getCheckpointData(idx))).filter(
|
|
325
|
+
isDefined,
|
|
322
326
|
);
|
|
323
|
-
const newBlocks =
|
|
327
|
+
const newBlocks = (
|
|
328
|
+
await asyncPool(BATCH_SIZE, checkpoints, cp =>
|
|
329
|
+
this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber)),
|
|
330
|
+
)
|
|
331
|
+
)
|
|
332
|
+
.filter(isDefined)
|
|
333
|
+
.flat();
|
|
324
334
|
|
|
325
335
|
// Emit an event for listening services to react to the chain prune
|
|
326
336
|
this.events.emit(L2BlockSourceEvents.L2PruneUnproven, {
|
|
@@ -358,63 +368,87 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
358
368
|
}
|
|
359
369
|
|
|
360
370
|
@trackSpan('Archiver.handleL1ToL2Messages')
|
|
361
|
-
private async handleL1ToL2Messages(
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
371
|
+
private async handleL1ToL2Messages(currentL1Block: L1BlockId): Promise<boolean> {
|
|
372
|
+
// Load the syncpoint, which may have been updated in a previous iteration
|
|
373
|
+
const {
|
|
374
|
+
messagesSynchedTo = {
|
|
375
|
+
l1BlockNumber: this.l1Constants.l1StartBlock,
|
|
376
|
+
l1BlockHash: this.l1Constants.l1StartBlockHash,
|
|
377
|
+
},
|
|
378
|
+
} = await this.store.getSynchPoint();
|
|
379
|
+
|
|
380
|
+
// Nothing to do if L1 block number has not moved forward
|
|
381
|
+
const currentL1BlockNumber = currentL1Block.l1BlockNumber;
|
|
382
|
+
if (currentL1BlockNumber <= messagesSynchedTo.l1BlockNumber) {
|
|
383
|
+
return true;
|
|
365
384
|
}
|
|
366
385
|
|
|
367
|
-
//
|
|
368
|
-
const localMessagesInserted = await this.store.getTotalL1ToL2MessageCount();
|
|
369
|
-
const localLastMessage = await this.store.getLastL1ToL2Message();
|
|
386
|
+
// Compare local message store state with the remote. If they match, we just advance the match pointer.
|
|
370
387
|
const remoteMessagesState = await this.inbox.getState({ blockNumber: currentL1BlockNumber });
|
|
388
|
+
const localLastMessage = await this.store.getLastL1ToL2Message();
|
|
389
|
+
if (await this.localStateMatches(localLastMessage, remoteMessagesState)) {
|
|
390
|
+
this.log.trace(`Local L1 to L2 messages are already in sync with remote at L1 block ${currentL1BlockNumber}`);
|
|
391
|
+
await this.store.setMessageSyncState(currentL1Block, remoteMessagesState.treeInProgress);
|
|
392
|
+
return true;
|
|
393
|
+
}
|
|
371
394
|
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
395
|
+
// If not, then we are out of sync. Most likely there are new messages on the inbox, so we try retrieving them.
|
|
396
|
+
// However, it could also be the case that there was an L1 reorg and our syncpoint is no longer valid.
|
|
397
|
+
// If that's the case, we'd get an exception out of the message store since the rolling hash of the first message
|
|
398
|
+
// we try to insert would not match the one in the db, in which case we rollback to the last common message with L1.
|
|
399
|
+
try {
|
|
400
|
+
await this.retrieveAndStoreMessages(messagesSynchedTo.l1BlockNumber, currentL1BlockNumber);
|
|
401
|
+
} catch (error) {
|
|
402
|
+
if (isErrorClass(error, MessageStoreError)) {
|
|
403
|
+
this.log.warn(
|
|
404
|
+
`Failed to store L1 to L2 messages retrieved from L1: ${error.message}. Rolling back syncpoint to retry.`,
|
|
405
|
+
{ inboxMessage: error.inboxMessage },
|
|
406
|
+
);
|
|
407
|
+
await this.rollbackL1ToL2Messages(remoteMessagesState.treeInProgress);
|
|
408
|
+
return false;
|
|
409
|
+
}
|
|
410
|
+
throw error;
|
|
411
|
+
}
|
|
377
412
|
|
|
378
|
-
//
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
) {
|
|
383
|
-
this.log.
|
|
384
|
-
`
|
|
413
|
+
// Note that, if there are no new messages to insert, but there was an L1 reorg that pruned out last messages,
|
|
414
|
+
// we'd notice by comparing our local state with the remote one again, and seeing they don't match even after
|
|
415
|
+
// our sync attempt. In this case, we also rollback our syncpoint, and trigger a retry.
|
|
416
|
+
const localLastMessageAfterSync = await this.store.getLastL1ToL2Message();
|
|
417
|
+
if (!(await this.localStateMatches(localLastMessageAfterSync, remoteMessagesState))) {
|
|
418
|
+
this.log.warn(
|
|
419
|
+
`Local L1 to L2 messages state does not match remote after sync attempt. Rolling back syncpoint to retry.`,
|
|
420
|
+
{ localLastMessageAfterSync, remoteMessagesState },
|
|
385
421
|
);
|
|
386
|
-
|
|
422
|
+
await this.rollbackL1ToL2Messages(remoteMessagesState.treeInProgress);
|
|
423
|
+
return false;
|
|
387
424
|
}
|
|
388
425
|
|
|
389
|
-
//
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
const remoteLastMessage = await this.retrieveL1ToL2Message(localLastMessage.leaf);
|
|
394
|
-
this.log.trace(`Retrieved remote message for local last`, { remoteLastMessage, localLastMessage });
|
|
395
|
-
if (!remoteLastMessage || !remoteLastMessage.rollingHash.equals(localLastMessage.rollingHash)) {
|
|
396
|
-
this.log.warn(`Rolling back L1 to L2 messages due to hash mismatch or msg not found.`, {
|
|
397
|
-
remoteLastMessage,
|
|
398
|
-
messagesSyncPoint,
|
|
399
|
-
localLastMessage,
|
|
400
|
-
});
|
|
426
|
+
// Advance the syncpoint after a successful sync
|
|
427
|
+
await this.store.setMessageSyncState(currentL1Block, remoteMessagesState.treeInProgress);
|
|
428
|
+
return true;
|
|
429
|
+
}
|
|
401
430
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
431
|
+
/** Checks if the local rolling hash and message count matches the remote state */
|
|
432
|
+
private async localStateMatches(localLastMessage: InboxMessage | undefined, remoteState: InboxContractState) {
|
|
433
|
+
const localMessageCount = await this.store.getTotalL1ToL2MessageCount();
|
|
434
|
+
this.log.trace(`Comparing local and remote inbox state`, { localMessageCount, localLastMessage, remoteState });
|
|
435
|
+
|
|
436
|
+
return (
|
|
437
|
+
remoteState.totalMessagesInserted === localMessageCount &&
|
|
438
|
+
remoteState.messagesRollingHash.equals(localLastMessage?.rollingHash ?? Buffer16.ZERO)
|
|
439
|
+
);
|
|
440
|
+
}
|
|
408
441
|
|
|
409
|
-
|
|
442
|
+
/** Retrieves L1 to L2 messages from L1 in batches and stores them. */
|
|
443
|
+
private async retrieveAndStoreMessages(fromL1Block: bigint, toL1Block: bigint): Promise<void> {
|
|
410
444
|
let searchStartBlock: bigint = 0n;
|
|
411
|
-
let searchEndBlock: bigint =
|
|
445
|
+
let searchEndBlock: bigint = fromL1Block;
|
|
412
446
|
|
|
413
447
|
let lastMessage: InboxMessage | undefined;
|
|
414
448
|
let messageCount = 0;
|
|
415
449
|
|
|
416
450
|
do {
|
|
417
|
-
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock,
|
|
451
|
+
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, toL1Block);
|
|
418
452
|
this.log.trace(`Retrieving L1 to L2 messages in L1 blocks ${searchStartBlock}-${searchEndBlock}`);
|
|
419
453
|
const messages = await retrieveL1ToL2Messages(this.inbox, searchStartBlock, searchEndBlock);
|
|
420
454
|
const timer = new Timer();
|
|
@@ -426,81 +460,65 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
426
460
|
lastMessage = msg;
|
|
427
461
|
messageCount++;
|
|
428
462
|
}
|
|
429
|
-
} while (searchEndBlock <
|
|
463
|
+
} while (searchEndBlock < toL1Block);
|
|
430
464
|
|
|
431
|
-
// Log stats for messages retrieved (if any).
|
|
432
465
|
if (messageCount > 0) {
|
|
433
466
|
this.log.info(
|
|
434
467
|
`Retrieved ${messageCount} new L1 to L2 messages up to message with index ${lastMessage?.index} for checkpoint ${lastMessage?.checkpointNumber}`,
|
|
435
468
|
{ lastMessage, messageCount },
|
|
436
469
|
);
|
|
437
470
|
}
|
|
438
|
-
|
|
439
|
-
// Warn if the resulting rolling hash does not match the remote state we had retrieved.
|
|
440
|
-
if (lastMessage && !lastMessage.rollingHash.equals(remoteMessagesState.messagesRollingHash)) {
|
|
441
|
-
this.log.warn(`Last message retrieved rolling hash does not match remote state.`, {
|
|
442
|
-
lastMessage,
|
|
443
|
-
remoteMessagesState,
|
|
444
|
-
});
|
|
445
|
-
}
|
|
446
471
|
}
|
|
447
472
|
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
do {
|
|
454
|
-
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
455
|
-
|
|
456
|
-
const message = await retrieveL1ToL2Message(this.inbox, leaf, searchStartBlock, searchEndBlock);
|
|
457
|
-
|
|
458
|
-
if (message) {
|
|
459
|
-
return message;
|
|
460
|
-
}
|
|
461
|
-
} while (searchEndBlock < currentL1BlockNumber);
|
|
462
|
-
|
|
463
|
-
return undefined;
|
|
464
|
-
}
|
|
465
|
-
|
|
466
|
-
private async rollbackL1ToL2Messages(
|
|
467
|
-
localLastMessage: InboxMessage,
|
|
468
|
-
messagesSyncPoint: L1BlockId,
|
|
469
|
-
): Promise<L1BlockId> {
|
|
473
|
+
/**
|
|
474
|
+
* Rolls back local L1 to L2 messages to the last common message with L1, and updates the syncpoint to the L1 block of that message.
|
|
475
|
+
* If no common message is found, rolls back all messages and sets the syncpoint to the start block.
|
|
476
|
+
*/
|
|
477
|
+
private async rollbackL1ToL2Messages(remoteTreeInProgress: bigint): Promise<L1BlockId> {
|
|
470
478
|
// Slowly go back through our messages until we find the last common message.
|
|
471
479
|
// We could query the logs in batch as an optimization, but the depth of the reorg should not be deep, and this
|
|
472
480
|
// is a very rare case, so it's fine to query one log at a time.
|
|
473
481
|
let commonMsg: undefined | InboxMessage;
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
const
|
|
478
|
-
|
|
482
|
+
let messagesToDelete = 0;
|
|
483
|
+
this.log.verbose(`Searching most recent common L1 to L2 message`);
|
|
484
|
+
for await (const localMsg of this.store.iterateL1ToL2Messages({ reverse: true })) {
|
|
485
|
+
const remoteMsg = await retrieveL1ToL2Message(this.inbox, localMsg);
|
|
486
|
+
const logCtx = { remoteMsg, localMsg: localMsg };
|
|
487
|
+
if (remoteMsg && remoteMsg.rollingHash.equals(localMsg.rollingHash)) {
|
|
479
488
|
this.log.verbose(
|
|
480
|
-
`Found most recent common L1 to L2 message at index ${
|
|
489
|
+
`Found most recent common L1 to L2 message at index ${localMsg.index} on L1 block ${localMsg.l1BlockNumber}`,
|
|
481
490
|
logCtx,
|
|
482
491
|
);
|
|
483
492
|
commonMsg = remoteMsg;
|
|
484
493
|
break;
|
|
485
494
|
} else if (remoteMsg) {
|
|
486
|
-
this.log.debug(`Local L1 to L2 message with index ${
|
|
495
|
+
this.log.debug(`Local L1 to L2 message with index ${localMsg.index} has different rolling hash`, logCtx);
|
|
496
|
+
messagesToDelete++;
|
|
487
497
|
} else {
|
|
488
|
-
this.log.debug(`Local L1 to L2 message with index ${
|
|
498
|
+
this.log.debug(`Local L1 to L2 message with index ${localMsg.index} not found on L1`, logCtx);
|
|
499
|
+
messagesToDelete++;
|
|
489
500
|
}
|
|
490
501
|
}
|
|
491
502
|
|
|
492
|
-
// Delete everything after the common message we found.
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
503
|
+
// Delete everything after the common message we found, if anything needs to be deleted.
|
|
504
|
+
// Do not exit early if there are no messages to delete, we still want to update the syncpoint.
|
|
505
|
+
if (messagesToDelete > 0) {
|
|
506
|
+
const lastGoodIndex = commonMsg?.index;
|
|
507
|
+
this.log.warn(`Rolling back all local L1 to L2 messages after index ${lastGoodIndex ?? 'initial'}`);
|
|
508
|
+
await this.store.removeL1ToL2Messages(lastGoodIndex !== undefined ? lastGoodIndex + 1n : 0n);
|
|
509
|
+
}
|
|
496
510
|
|
|
497
511
|
// Update the syncpoint so the loop below reprocesses the changed messages. We go to the block before
|
|
498
512
|
// the last common one, so we force reprocessing it, in case new messages were added on that same L1 block
|
|
499
513
|
// after the last common message.
|
|
500
514
|
const syncPointL1BlockNumber = commonMsg ? commonMsg.l1BlockNumber - 1n : this.l1Constants.l1StartBlock;
|
|
501
515
|
const syncPointL1BlockHash = await this.getL1BlockHash(syncPointL1BlockNumber);
|
|
502
|
-
messagesSyncPoint = { l1BlockNumber: syncPointL1BlockNumber, l1BlockHash: syncPointL1BlockHash };
|
|
503
|
-
await this.store.
|
|
516
|
+
const messagesSyncPoint = { l1BlockNumber: syncPointL1BlockNumber, l1BlockHash: syncPointL1BlockHash };
|
|
517
|
+
await this.store.setMessageSyncState(messagesSyncPoint, remoteTreeInProgress);
|
|
518
|
+
this.log.verbose(`Updated messages syncpoint to L1 block ${syncPointL1BlockNumber}`, {
|
|
519
|
+
...messagesSyncPoint,
|
|
520
|
+
remoteTreeInProgress,
|
|
521
|
+
});
|
|
504
522
|
return messagesSyncPoint;
|
|
505
523
|
}
|
|
506
524
|
|
|
@@ -828,7 +846,7 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
828
846
|
const prunedCheckpointNumber = result.prunedBlocks[0].checkpointNumber;
|
|
829
847
|
const prunedSlotNumber = result.prunedBlocks[0].header.globalVariables.slotNumber;
|
|
830
848
|
|
|
831
|
-
this.log.
|
|
849
|
+
this.log.info(
|
|
832
850
|
`Pruned ${result.prunedBlocks.length} mismatching blocks for checkpoint ${prunedCheckpointNumber}`,
|
|
833
851
|
{ prunedBlocks: result.prunedBlocks.map(b => b.toBlockInfo()), prunedSlotNumber, prunedCheckpointNumber },
|
|
834
852
|
);
|
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
getAttestationInfoFromPayload,
|
|
10
10
|
} from '@aztec/stdlib/block';
|
|
11
11
|
import type { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
|
|
12
|
-
import { type L1RollupConstants, getEpochAtSlot } from '@aztec/stdlib/epoch-helpers';
|
|
12
|
+
import { type L1RollupConstants, computeQuorum, getEpochAtSlot } from '@aztec/stdlib/epoch-helpers';
|
|
13
13
|
import { ConsensusPayload } from '@aztec/stdlib/p2p';
|
|
14
14
|
|
|
15
15
|
export type { ValidateCheckpointResult };
|
|
@@ -66,7 +66,7 @@ export async function validateCheckpointAttestations(
|
|
|
66
66
|
return { valid: true };
|
|
67
67
|
}
|
|
68
68
|
|
|
69
|
-
const requiredAttestationCount =
|
|
69
|
+
const requiredAttestationCount = computeQuorum(committee.length);
|
|
70
70
|
|
|
71
71
|
const failedValidationResult = <TReason extends ValidateCheckpointNegativeResult['reason']>(reason: TReason) => ({
|
|
72
72
|
valid: false as const,
|