@aztec/archiver 0.0.1-commit.ec5f612 → 0.0.1-commit.ec7ac5448
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -6
- package/dest/archiver.d.ts +8 -7
- package/dest/archiver.d.ts.map +1 -1
- package/dest/archiver.js +77 -26
- package/dest/config.d.ts +3 -3
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +2 -1
- package/dest/errors.d.ts +34 -10
- package/dest/errors.d.ts.map +1 -1
- package/dest/errors.js +45 -16
- package/dest/factory.d.ts +4 -5
- package/dest/factory.d.ts.map +1 -1
- package/dest/factory.js +24 -21
- package/dest/l1/calldata_retriever.d.ts +1 -1
- package/dest/l1/calldata_retriever.d.ts.map +1 -1
- package/dest/l1/calldata_retriever.js +2 -1
- package/dest/l1/data_retrieval.d.ts +3 -3
- package/dest/l1/data_retrieval.d.ts.map +1 -1
- package/dest/l1/data_retrieval.js +14 -15
- package/dest/modules/data_source_base.d.ts +8 -6
- package/dest/modules/data_source_base.d.ts.map +1 -1
- package/dest/modules/data_source_base.js +11 -5
- package/dest/modules/data_store_updater.d.ts +18 -12
- package/dest/modules/data_store_updater.d.ts.map +1 -1
- package/dest/modules/data_store_updater.js +87 -77
- package/dest/modules/instrumentation.d.ts +4 -2
- package/dest/modules/instrumentation.d.ts.map +1 -1
- package/dest/modules/instrumentation.js +17 -6
- package/dest/modules/l1_synchronizer.d.ts +4 -2
- package/dest/modules/l1_synchronizer.d.ts.map +1 -1
- package/dest/modules/l1_synchronizer.js +166 -129
- package/dest/modules/validation.d.ts +1 -1
- package/dest/modules/validation.d.ts.map +1 -1
- package/dest/modules/validation.js +2 -2
- package/dest/store/block_store.d.ts +50 -16
- package/dest/store/block_store.d.ts.map +1 -1
- package/dest/store/block_store.js +288 -119
- package/dest/store/contract_class_store.d.ts +2 -3
- package/dest/store/contract_class_store.d.ts.map +1 -1
- package/dest/store/contract_class_store.js +7 -67
- package/dest/store/contract_instance_store.d.ts +1 -1
- package/dest/store/contract_instance_store.d.ts.map +1 -1
- package/dest/store/contract_instance_store.js +6 -2
- package/dest/store/kv_archiver_store.d.ts +45 -22
- package/dest/store/kv_archiver_store.d.ts.map +1 -1
- package/dest/store/kv_archiver_store.js +57 -27
- package/dest/store/l2_tips_cache.d.ts +2 -1
- package/dest/store/l2_tips_cache.d.ts.map +1 -1
- package/dest/store/l2_tips_cache.js +25 -5
- package/dest/store/log_store.d.ts +6 -3
- package/dest/store/log_store.d.ts.map +1 -1
- package/dest/store/log_store.js +93 -16
- package/dest/store/message_store.d.ts +5 -1
- package/dest/store/message_store.d.ts.map +1 -1
- package/dest/store/message_store.js +21 -9
- package/dest/test/fake_l1_state.d.ts +16 -1
- package/dest/test/fake_l1_state.d.ts.map +1 -1
- package/dest/test/fake_l1_state.js +77 -8
- package/dest/test/mock_l1_to_l2_message_source.d.ts +1 -1
- package/dest/test/mock_l1_to_l2_message_source.d.ts.map +1 -1
- package/dest/test/mock_l1_to_l2_message_source.js +2 -1
- package/dest/test/mock_l2_block_source.d.ts +10 -4
- package/dest/test/mock_l2_block_source.d.ts.map +1 -1
- package/dest/test/mock_l2_block_source.js +35 -7
- package/dest/test/mock_structs.d.ts +4 -1
- package/dest/test/mock_structs.d.ts.map +1 -1
- package/dest/test/mock_structs.js +13 -1
- package/dest/test/noop_l1_archiver.d.ts +4 -1
- package/dest/test/noop_l1_archiver.d.ts.map +1 -1
- package/dest/test/noop_l1_archiver.js +5 -2
- package/package.json +13 -13
- package/src/archiver.ts +98 -31
- package/src/config.ts +8 -1
- package/src/errors.ts +70 -26
- package/src/factory.ts +23 -15
- package/src/l1/calldata_retriever.ts +2 -1
- package/src/l1/data_retrieval.ts +8 -12
- package/src/modules/data_source_base.ts +26 -7
- package/src/modules/data_store_updater.ts +96 -107
- package/src/modules/instrumentation.ts +19 -7
- package/src/modules/l1_synchronizer.ts +189 -161
- package/src/modules/validation.ts +2 -2
- package/src/store/block_store.ts +370 -140
- package/src/store/contract_class_store.ts +8 -106
- package/src/store/contract_instance_store.ts +8 -5
- package/src/store/kv_archiver_store.ts +81 -39
- package/src/store/l2_tips_cache.ts +50 -11
- package/src/store/log_store.ts +126 -27
- package/src/store/message_store.ts +27 -10
- package/src/structs/inbox_message.ts +1 -1
- package/src/test/fake_l1_state.ts +103 -13
- package/src/test/mock_l1_to_l2_message_source.ts +1 -0
- package/src/test/mock_l2_block_source.ts +52 -5
- package/src/test/mock_structs.ts +20 -6
- package/src/test/noop_l1_archiver.ts +7 -2
|
@@ -1,17 +1,19 @@
|
|
|
1
1
|
import type { BlobClientInterface } from '@aztec/blob-client/client';
|
|
2
2
|
import { EpochCache } from '@aztec/epoch-cache';
|
|
3
|
-
import { InboxContract, RollupContract } from '@aztec/ethereum/contracts';
|
|
3
|
+
import { InboxContract, type InboxContractState, RollupContract } from '@aztec/ethereum/contracts';
|
|
4
4
|
import type { L1BlockId } from '@aztec/ethereum/l1-types';
|
|
5
5
|
import type { ViemPublicClient, ViemPublicDebugClient } from '@aztec/ethereum/types';
|
|
6
|
+
import { asyncPool } from '@aztec/foundation/async-pool';
|
|
6
7
|
import { maxBigint } from '@aztec/foundation/bigint';
|
|
7
8
|
import { BlockNumber, CheckpointNumber, EpochNumber } from '@aztec/foundation/branded-types';
|
|
8
|
-
import { Buffer32 } from '@aztec/foundation/buffer';
|
|
9
|
+
import { Buffer16, Buffer32 } from '@aztec/foundation/buffer';
|
|
9
10
|
import { pick } from '@aztec/foundation/collection';
|
|
10
11
|
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
11
12
|
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
13
|
+
import { retryTimes } from '@aztec/foundation/retry';
|
|
12
14
|
import { count } from '@aztec/foundation/string';
|
|
13
15
|
import { DateProvider, Timer, elapsed } from '@aztec/foundation/timer';
|
|
14
|
-
import { isDefined } from '@aztec/foundation/types';
|
|
16
|
+
import { isDefined, isErrorClass } from '@aztec/foundation/types';
|
|
15
17
|
import { type ArchiverEmitter, L2BlockSourceEvents, type ValidateCheckpointResult } from '@aztec/stdlib/block';
|
|
16
18
|
import { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
|
|
17
19
|
import { type L1RollupConstants, getEpochAtSlot, getSlotAtNextL1Block } from '@aztec/stdlib/epoch-helpers';
|
|
@@ -27,6 +29,7 @@ import {
|
|
|
27
29
|
} from '../l1/data_retrieval.js';
|
|
28
30
|
import type { KVArchiverDataStore } from '../store/kv_archiver_store.js';
|
|
29
31
|
import type { L2TipsCache } from '../store/l2_tips_cache.js';
|
|
32
|
+
import { MessageStoreError } from '../store/message_store.js';
|
|
30
33
|
import type { InboxMessage } from '../structs/inbox_message.js';
|
|
31
34
|
import { ArchiverDataStoreUpdater } from './data_store_updater.js';
|
|
32
35
|
import type { ArchiverInstrumentation } from './instrumentation.js';
|
|
@@ -69,13 +72,18 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
69
72
|
private readonly epochCache: EpochCache,
|
|
70
73
|
private readonly dateProvider: DateProvider,
|
|
71
74
|
private readonly instrumentation: ArchiverInstrumentation,
|
|
72
|
-
private readonly l1Constants: L1RollupConstants & {
|
|
75
|
+
private readonly l1Constants: L1RollupConstants & {
|
|
76
|
+
l1StartBlockHash: Buffer32;
|
|
77
|
+
genesisArchiveRoot: Fr;
|
|
78
|
+
},
|
|
73
79
|
private readonly events: ArchiverEmitter,
|
|
74
80
|
tracer: Tracer,
|
|
75
81
|
l2TipsCache?: L2TipsCache,
|
|
76
82
|
private readonly log: Logger = createLogger('archiver:l1-sync'),
|
|
77
83
|
) {
|
|
78
|
-
this.updater = new ArchiverDataStoreUpdater(this.store, l2TipsCache
|
|
84
|
+
this.updater = new ArchiverDataStoreUpdater(this.store, l2TipsCache, {
|
|
85
|
+
rollupManaLimit: l1Constants.rollupManaLimit,
|
|
86
|
+
});
|
|
79
87
|
this.tracer = tracer;
|
|
80
88
|
}
|
|
81
89
|
|
|
@@ -115,10 +123,15 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
115
123
|
|
|
116
124
|
@trackSpan('Archiver.syncFromL1')
|
|
117
125
|
public async syncFromL1(initialSyncComplete: boolean): Promise<void> {
|
|
126
|
+
// In between the various calls to L1, the block number can move meaning some of the following
|
|
127
|
+
// calls will return data for blocks that were not present during earlier calls. To combat this
|
|
128
|
+
// we ensure that all data retrieval methods only retrieve data up to the currentBlockNumber
|
|
129
|
+
// captured at the top of this function.
|
|
118
130
|
const currentL1Block = await this.publicClient.getBlock({ includeTransactions: false });
|
|
119
131
|
const currentL1BlockNumber = currentL1Block.number;
|
|
120
132
|
const currentL1BlockHash = Buffer32.fromString(currentL1Block.hash);
|
|
121
133
|
const currentL1Timestamp = currentL1Block.timestamp;
|
|
134
|
+
const currentL1BlockData = { l1BlockNumber: currentL1BlockNumber, l1BlockHash: currentL1BlockHash };
|
|
122
135
|
|
|
123
136
|
if (this.l1BlockHash && currentL1BlockHash.equals(this.l1BlockHash)) {
|
|
124
137
|
this.log.trace(`No new L1 blocks since last sync at L1 block ${this.l1BlockNumber}`);
|
|
@@ -135,45 +148,15 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
135
148
|
);
|
|
136
149
|
}
|
|
137
150
|
|
|
138
|
-
// Load sync point for blocks
|
|
139
|
-
const {
|
|
140
|
-
|
|
141
|
-
messagesSynchedTo = {
|
|
142
|
-
l1BlockNumber: this.l1Constants.l1StartBlock,
|
|
143
|
-
l1BlockHash: this.l1Constants.l1StartBlockHash,
|
|
144
|
-
},
|
|
145
|
-
} = await this.store.getSynchPoint();
|
|
151
|
+
// Load sync point for blocks defaulting to start block
|
|
152
|
+
const { blocksSynchedTo = this.l1Constants.l1StartBlock } = await this.store.getSynchPoint();
|
|
153
|
+
this.log.debug(`Starting new archiver sync iteration`, { blocksSynchedTo, currentL1BlockData });
|
|
146
154
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
currentL1BlockHash,
|
|
152
|
-
});
|
|
155
|
+
// Sync L1 to L2 messages. We retry this a few times since there are error conditions that reset the sync point, requiring a new iteration.
|
|
156
|
+
// Note that we cannot just wait for the l1 synchronizer to loop again, since the synchronizer would report as synced up to the current L1
|
|
157
|
+
// block, when that wouldn't be the case, since L1 to L2 messages would need another iteration.
|
|
158
|
+
await retryTimes(() => this.handleL1ToL2Messages(currentL1BlockData), 'Handling L1 to L2 messages', 3, 0.1);
|
|
153
159
|
|
|
154
|
-
// ********** Ensuring Consistency of data pulled from L1 **********
|
|
155
|
-
|
|
156
|
-
/**
|
|
157
|
-
* There are a number of calls in this sync operation to L1 for retrieving
|
|
158
|
-
* events and transaction data. There are a couple of things we need to bear in mind
|
|
159
|
-
* to ensure that data is read exactly once.
|
|
160
|
-
*
|
|
161
|
-
* The first is the problem of eventually consistent ETH service providers like Infura.
|
|
162
|
-
* Each L1 read operation will query data from the last L1 block that it saw emit its kind of data.
|
|
163
|
-
* (so pending L1 to L2 messages will read from the last L1 block that emitted a message and so on)
|
|
164
|
-
* This will mean the archiver will lag behind L1 and will only advance when there's L2-relevant activity on the chain.
|
|
165
|
-
*
|
|
166
|
-
* The second is that in between the various calls to L1, the block number can move meaning some
|
|
167
|
-
* of the following calls will return data for blocks that were not present during earlier calls.
|
|
168
|
-
* To combat this for the time being we simply ensure that all data retrieval methods only retrieve
|
|
169
|
-
* data up to the currentBlockNumber captured at the top of this function. We might want to improve on this
|
|
170
|
-
* in future but for the time being it should give us the guarantees that we need
|
|
171
|
-
*/
|
|
172
|
-
|
|
173
|
-
// ********** Events that are processed per L1 block **********
|
|
174
|
-
await this.handleL1ToL2Messages(messagesSynchedTo, currentL1BlockNumber);
|
|
175
|
-
|
|
176
|
-
// ********** Events that are processed per checkpoint **********
|
|
177
160
|
if (currentL1BlockNumber > blocksSynchedTo) {
|
|
178
161
|
// First we retrieve new checkpoints and L2 blocks and store them in the DB. This will also update the
|
|
179
162
|
// pending chain validation status, proven checkpoint number, and synched L1 block number.
|
|
@@ -211,6 +194,9 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
211
194
|
this.instrumentation.updateL1BlockHeight(currentL1BlockNumber);
|
|
212
195
|
}
|
|
213
196
|
|
|
197
|
+
// Update the finalized L2 checkpoint based on L1 finality.
|
|
198
|
+
await this.updateFinalizedCheckpoint();
|
|
199
|
+
|
|
214
200
|
// After syncing has completed, update the current l1 block number and timestamp,
|
|
215
201
|
// otherwise we risk announcing to the world that we've synced to a given point,
|
|
216
202
|
// but the corresponding blocks have not been processed (see #12631).
|
|
@@ -226,6 +212,30 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
226
212
|
});
|
|
227
213
|
}
|
|
228
214
|
|
|
215
|
+
/** Query L1 for its finalized block and update the finalized checkpoint accordingly. */
|
|
216
|
+
private async updateFinalizedCheckpoint(): Promise<void> {
|
|
217
|
+
try {
|
|
218
|
+
const finalizedL1Block = await this.publicClient.getBlock({ blockTag: 'finalized', includeTransactions: false });
|
|
219
|
+
const finalizedL1BlockNumber = finalizedL1Block.number;
|
|
220
|
+
const finalizedCheckpointNumber = await this.rollup.getProvenCheckpointNumber({
|
|
221
|
+
blockNumber: finalizedL1BlockNumber,
|
|
222
|
+
});
|
|
223
|
+
const localFinalizedCheckpointNumber = await this.store.getFinalizedCheckpointNumber();
|
|
224
|
+
if (localFinalizedCheckpointNumber !== finalizedCheckpointNumber) {
|
|
225
|
+
await this.updater.setFinalizedCheckpointNumber(finalizedCheckpointNumber);
|
|
226
|
+
this.log.info(`Updated finalized chain to checkpoint ${finalizedCheckpointNumber}`, {
|
|
227
|
+
finalizedCheckpointNumber,
|
|
228
|
+
finalizedL1BlockNumber,
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
} catch (err: any) {
|
|
232
|
+
// The rollup contract may not exist at the finalized L1 block right after deployment.
|
|
233
|
+
if (!err?.message?.includes('returned no data')) {
|
|
234
|
+
this.log.warn(`Failed to update finalized checkpoint: ${err}`);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
229
239
|
/** Prune all proposed local blocks that should have been checkpointed by now. */
|
|
230
240
|
private async pruneUncheckpointedBlocks(currentL1Timestamp: bigint) {
|
|
231
241
|
const [lastCheckpointedBlockNumber, lastProposedBlockNumber] = await Promise.all([
|
|
@@ -239,29 +249,33 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
239
249
|
return;
|
|
240
250
|
}
|
|
241
251
|
|
|
242
|
-
// What's the slot
|
|
252
|
+
// What's the slot at the next L1 block? All blocks for slots strictly before this one should've been checkpointed by now.
|
|
253
|
+
const slotAtNextL1Block = getSlotAtNextL1Block(currentL1Timestamp, this.l1Constants);
|
|
243
254
|
const firstUncheckpointedBlockNumber = BlockNumber(lastCheckpointedBlockNumber + 1);
|
|
255
|
+
|
|
256
|
+
// What's the slot of the first uncheckpointed block?
|
|
244
257
|
const [firstUncheckpointedBlockHeader] = await this.store.getBlockHeaders(firstUncheckpointedBlockNumber, 1);
|
|
245
258
|
const firstUncheckpointedBlockSlot = firstUncheckpointedBlockHeader?.getSlot();
|
|
246
259
|
|
|
247
|
-
|
|
248
|
-
|
|
260
|
+
if (firstUncheckpointedBlockSlot === undefined || firstUncheckpointedBlockSlot >= slotAtNextL1Block) {
|
|
261
|
+
return;
|
|
262
|
+
}
|
|
249
263
|
|
|
250
|
-
// Prune provisional blocks from slots that have ended without being checkpointed
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
const prunedBlocks = await this.updater.removeUncheckpointedBlocksAfter(lastCheckpointedBlockNumber);
|
|
264
|
+
// Prune provisional blocks from slots that have ended without being checkpointed.
|
|
265
|
+
// This also clears any proposed checkpoint whose blocks are being pruned.
|
|
266
|
+
this.log.warn(
|
|
267
|
+
`Pruning blocks after block ${lastCheckpointedBlockNumber} due to slot ${firstUncheckpointedBlockSlot} not being checkpointed`,
|
|
268
|
+
{ firstUncheckpointedBlockHeader: firstUncheckpointedBlockHeader.toInspect(), slotAtNextL1Block },
|
|
269
|
+
);
|
|
257
270
|
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
271
|
+
const prunedBlocks = await this.updater.removeUncheckpointedBlocksAfter(lastCheckpointedBlockNumber);
|
|
272
|
+
|
|
273
|
+
if (prunedBlocks.length > 0) {
|
|
274
|
+
this.events.emit(L2BlockSourceEvents.L2PruneUncheckpointed, {
|
|
275
|
+
type: L2BlockSourceEvents.L2PruneUncheckpointed,
|
|
276
|
+
slotNumber: firstUncheckpointedBlockSlot,
|
|
277
|
+
blocks: prunedBlocks,
|
|
278
|
+
});
|
|
265
279
|
}
|
|
266
280
|
}
|
|
267
281
|
|
|
@@ -304,17 +318,20 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
304
318
|
|
|
305
319
|
const checkpointsToUnwind = localPendingCheckpointNumber - provenCheckpointNumber;
|
|
306
320
|
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
const
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
checkpoints
|
|
314
|
-
.filter(isDefined)
|
|
315
|
-
.map(cp => this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber))),
|
|
321
|
+
// Fetch checkpoints and blocks in bounded batches to avoid unbounded concurrent
|
|
322
|
+
// promises when the gap between local pending and proven checkpoint numbers is large.
|
|
323
|
+
const BATCH_SIZE = 10;
|
|
324
|
+
const indices = Array.from({ length: checkpointsToUnwind }, (_, i) => CheckpointNumber(i + pruneFrom));
|
|
325
|
+
const checkpoints = (await asyncPool(BATCH_SIZE, indices, idx => this.store.getCheckpointData(idx))).filter(
|
|
326
|
+
isDefined,
|
|
316
327
|
);
|
|
317
|
-
const newBlocks =
|
|
328
|
+
const newBlocks = (
|
|
329
|
+
await asyncPool(BATCH_SIZE, checkpoints, cp =>
|
|
330
|
+
this.store.getBlocksForCheckpoint(CheckpointNumber(cp.checkpointNumber)),
|
|
331
|
+
)
|
|
332
|
+
)
|
|
333
|
+
.filter(isDefined)
|
|
334
|
+
.flat();
|
|
318
335
|
|
|
319
336
|
// Emit an event for listening services to react to the chain prune
|
|
320
337
|
this.events.emit(L2BlockSourceEvents.L2PruneUnproven, {
|
|
@@ -352,63 +369,87 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
352
369
|
}
|
|
353
370
|
|
|
354
371
|
@trackSpan('Archiver.handleL1ToL2Messages')
|
|
355
|
-
private async handleL1ToL2Messages(
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
372
|
+
private async handleL1ToL2Messages(currentL1Block: L1BlockId): Promise<boolean> {
|
|
373
|
+
// Load the syncpoint, which may have been updated in a previous iteration
|
|
374
|
+
const {
|
|
375
|
+
messagesSynchedTo = {
|
|
376
|
+
l1BlockNumber: this.l1Constants.l1StartBlock,
|
|
377
|
+
l1BlockHash: this.l1Constants.l1StartBlockHash,
|
|
378
|
+
},
|
|
379
|
+
} = await this.store.getSynchPoint();
|
|
380
|
+
|
|
381
|
+
// Nothing to do if L1 block number has not moved forward
|
|
382
|
+
const currentL1BlockNumber = currentL1Block.l1BlockNumber;
|
|
383
|
+
if (currentL1BlockNumber <= messagesSynchedTo.l1BlockNumber) {
|
|
384
|
+
return true;
|
|
359
385
|
}
|
|
360
386
|
|
|
361
|
-
//
|
|
362
|
-
const localMessagesInserted = await this.store.getTotalL1ToL2MessageCount();
|
|
363
|
-
const localLastMessage = await this.store.getLastL1ToL2Message();
|
|
387
|
+
// Compare local message store state with the remote. If they match, we just advance the match pointer.
|
|
364
388
|
const remoteMessagesState = await this.inbox.getState({ blockNumber: currentL1BlockNumber });
|
|
389
|
+
const localLastMessage = await this.store.getLastL1ToL2Message();
|
|
390
|
+
if (await this.localStateMatches(localLastMessage, remoteMessagesState)) {
|
|
391
|
+
this.log.trace(`Local L1 to L2 messages are already in sync with remote at L1 block ${currentL1BlockNumber}`);
|
|
392
|
+
await this.store.setMessageSyncState(currentL1Block, remoteMessagesState.treeInProgress);
|
|
393
|
+
return true;
|
|
394
|
+
}
|
|
365
395
|
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
396
|
+
// If not, then we are out of sync. Most likely there are new messages on the inbox, so we try retrieving them.
|
|
397
|
+
// However, it could also be the case that there was an L1 reorg and our syncpoint is no longer valid.
|
|
398
|
+
// If that's the case, we'd get an exception out of the message store since the rolling hash of the first message
|
|
399
|
+
// we try to insert would not match the one in the db, in which case we rollback to the last common message with L1.
|
|
400
|
+
try {
|
|
401
|
+
await this.retrieveAndStoreMessages(messagesSynchedTo.l1BlockNumber, currentL1BlockNumber);
|
|
402
|
+
} catch (error) {
|
|
403
|
+
if (isErrorClass(error, MessageStoreError)) {
|
|
404
|
+
this.log.warn(
|
|
405
|
+
`Failed to store L1 to L2 messages retrieved from L1: ${error.message}. Rolling back syncpoint to retry.`,
|
|
406
|
+
{ inboxMessage: error.inboxMessage },
|
|
407
|
+
);
|
|
408
|
+
await this.rollbackL1ToL2Messages(remoteMessagesState.treeInProgress);
|
|
409
|
+
return false;
|
|
410
|
+
}
|
|
411
|
+
throw error;
|
|
412
|
+
}
|
|
371
413
|
|
|
372
|
-
//
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
) {
|
|
377
|
-
this.log.
|
|
378
|
-
`
|
|
414
|
+
// Note that, if there are no new messages to insert, but there was an L1 reorg that pruned out last messages,
|
|
415
|
+
// we'd notice by comparing our local state with the remote one again, and seeing they don't match even after
|
|
416
|
+
// our sync attempt. In this case, we also rollback our syncpoint, and trigger a retry.
|
|
417
|
+
const localLastMessageAfterSync = await this.store.getLastL1ToL2Message();
|
|
418
|
+
if (!(await this.localStateMatches(localLastMessageAfterSync, remoteMessagesState))) {
|
|
419
|
+
this.log.warn(
|
|
420
|
+
`Local L1 to L2 messages state does not match remote after sync attempt. Rolling back syncpoint to retry.`,
|
|
421
|
+
{ localLastMessageAfterSync, remoteMessagesState },
|
|
379
422
|
);
|
|
380
|
-
|
|
423
|
+
await this.rollbackL1ToL2Messages(remoteMessagesState.treeInProgress);
|
|
424
|
+
return false;
|
|
381
425
|
}
|
|
382
426
|
|
|
383
|
-
//
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
const remoteLastMessage = await this.retrieveL1ToL2Message(localLastMessage.leaf);
|
|
388
|
-
this.log.trace(`Retrieved remote message for local last`, { remoteLastMessage, localLastMessage });
|
|
389
|
-
if (!remoteLastMessage || !remoteLastMessage.rollingHash.equals(localLastMessage.rollingHash)) {
|
|
390
|
-
this.log.warn(`Rolling back L1 to L2 messages due to hash mismatch or msg not found.`, {
|
|
391
|
-
remoteLastMessage,
|
|
392
|
-
messagesSyncPoint,
|
|
393
|
-
localLastMessage,
|
|
394
|
-
});
|
|
427
|
+
// Advance the syncpoint after a successful sync
|
|
428
|
+
await this.store.setMessageSyncState(currentL1Block, remoteMessagesState.treeInProgress);
|
|
429
|
+
return true;
|
|
430
|
+
}
|
|
395
431
|
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
}
|
|
401
|
-
}
|
|
432
|
+
/** Checks if the local rolling hash and message count matches the remote state */
|
|
433
|
+
private async localStateMatches(localLastMessage: InboxMessage | undefined, remoteState: InboxContractState) {
|
|
434
|
+
const localMessageCount = await this.store.getTotalL1ToL2MessageCount();
|
|
435
|
+
this.log.trace(`Comparing local and remote inbox state`, { localMessageCount, localLastMessage, remoteState });
|
|
402
436
|
|
|
403
|
-
|
|
437
|
+
return (
|
|
438
|
+
remoteState.totalMessagesInserted === localMessageCount &&
|
|
439
|
+
remoteState.messagesRollingHash.equals(localLastMessage?.rollingHash ?? Buffer16.ZERO)
|
|
440
|
+
);
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
/** Retrieves L1 to L2 messages from L1 in batches and stores them. */
|
|
444
|
+
private async retrieveAndStoreMessages(fromL1Block: bigint, toL1Block: bigint): Promise<void> {
|
|
404
445
|
let searchStartBlock: bigint = 0n;
|
|
405
|
-
let searchEndBlock: bigint =
|
|
446
|
+
let searchEndBlock: bigint = fromL1Block;
|
|
406
447
|
|
|
407
448
|
let lastMessage: InboxMessage | undefined;
|
|
408
449
|
let messageCount = 0;
|
|
409
450
|
|
|
410
451
|
do {
|
|
411
|
-
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock,
|
|
452
|
+
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, toL1Block);
|
|
412
453
|
this.log.trace(`Retrieving L1 to L2 messages in L1 blocks ${searchStartBlock}-${searchEndBlock}`);
|
|
413
454
|
const messages = await retrieveL1ToL2Messages(this.inbox, searchStartBlock, searchEndBlock);
|
|
414
455
|
const timer = new Timer();
|
|
@@ -420,81 +461,65 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
420
461
|
lastMessage = msg;
|
|
421
462
|
messageCount++;
|
|
422
463
|
}
|
|
423
|
-
} while (searchEndBlock <
|
|
464
|
+
} while (searchEndBlock < toL1Block);
|
|
424
465
|
|
|
425
|
-
// Log stats for messages retrieved (if any).
|
|
426
466
|
if (messageCount > 0) {
|
|
427
467
|
this.log.info(
|
|
428
468
|
`Retrieved ${messageCount} new L1 to L2 messages up to message with index ${lastMessage?.index} for checkpoint ${lastMessage?.checkpointNumber}`,
|
|
429
469
|
{ lastMessage, messageCount },
|
|
430
470
|
);
|
|
431
471
|
}
|
|
432
|
-
|
|
433
|
-
// Warn if the resulting rolling hash does not match the remote state we had retrieved.
|
|
434
|
-
if (lastMessage && !lastMessage.rollingHash.equals(remoteMessagesState.messagesRollingHash)) {
|
|
435
|
-
this.log.warn(`Last message retrieved rolling hash does not match remote state.`, {
|
|
436
|
-
lastMessage,
|
|
437
|
-
remoteMessagesState,
|
|
438
|
-
});
|
|
439
|
-
}
|
|
440
472
|
}
|
|
441
473
|
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
do {
|
|
448
|
-
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
449
|
-
|
|
450
|
-
const message = await retrieveL1ToL2Message(this.inbox, leaf, searchStartBlock, searchEndBlock);
|
|
451
|
-
|
|
452
|
-
if (message) {
|
|
453
|
-
return message;
|
|
454
|
-
}
|
|
455
|
-
} while (searchEndBlock < currentL1BlockNumber);
|
|
456
|
-
|
|
457
|
-
return undefined;
|
|
458
|
-
}
|
|
459
|
-
|
|
460
|
-
private async rollbackL1ToL2Messages(
|
|
461
|
-
localLastMessage: InboxMessage,
|
|
462
|
-
messagesSyncPoint: L1BlockId,
|
|
463
|
-
): Promise<L1BlockId> {
|
|
474
|
+
/**
|
|
475
|
+
* Rolls back local L1 to L2 messages to the last common message with L1, and updates the syncpoint to the L1 block of that message.
|
|
476
|
+
* If no common message is found, rolls back all messages and sets the syncpoint to the start block.
|
|
477
|
+
*/
|
|
478
|
+
private async rollbackL1ToL2Messages(remoteTreeInProgress: bigint): Promise<L1BlockId> {
|
|
464
479
|
// Slowly go back through our messages until we find the last common message.
|
|
465
480
|
// We could query the logs in batch as an optimization, but the depth of the reorg should not be deep, and this
|
|
466
481
|
// is a very rare case, so it's fine to query one log at a time.
|
|
467
482
|
let commonMsg: undefined | InboxMessage;
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
const
|
|
472
|
-
|
|
483
|
+
let messagesToDelete = 0;
|
|
484
|
+
this.log.verbose(`Searching most recent common L1 to L2 message`);
|
|
485
|
+
for await (const localMsg of this.store.iterateL1ToL2Messages({ reverse: true })) {
|
|
486
|
+
const remoteMsg = await retrieveL1ToL2Message(this.inbox, localMsg);
|
|
487
|
+
const logCtx = { remoteMsg, localMsg: localMsg };
|
|
488
|
+
if (remoteMsg && remoteMsg.rollingHash.equals(localMsg.rollingHash)) {
|
|
473
489
|
this.log.verbose(
|
|
474
|
-
`Found most recent common L1 to L2 message at index ${
|
|
490
|
+
`Found most recent common L1 to L2 message at index ${localMsg.index} on L1 block ${localMsg.l1BlockNumber}`,
|
|
475
491
|
logCtx,
|
|
476
492
|
);
|
|
477
493
|
commonMsg = remoteMsg;
|
|
478
494
|
break;
|
|
479
495
|
} else if (remoteMsg) {
|
|
480
|
-
this.log.debug(`Local L1 to L2 message with index ${
|
|
496
|
+
this.log.debug(`Local L1 to L2 message with index ${localMsg.index} has different rolling hash`, logCtx);
|
|
497
|
+
messagesToDelete++;
|
|
481
498
|
} else {
|
|
482
|
-
this.log.debug(`Local L1 to L2 message with index ${
|
|
499
|
+
this.log.debug(`Local L1 to L2 message with index ${localMsg.index} not found on L1`, logCtx);
|
|
500
|
+
messagesToDelete++;
|
|
483
501
|
}
|
|
484
502
|
}
|
|
485
503
|
|
|
486
|
-
// Delete everything after the common message we found.
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
504
|
+
// Delete everything after the common message we found, if anything needs to be deleted.
|
|
505
|
+
// Do not exit early if there are no messages to delete, we still want to update the syncpoint.
|
|
506
|
+
if (messagesToDelete > 0) {
|
|
507
|
+
const lastGoodIndex = commonMsg?.index;
|
|
508
|
+
this.log.warn(`Rolling back all local L1 to L2 messages after index ${lastGoodIndex ?? 'initial'}`);
|
|
509
|
+
await this.store.removeL1ToL2Messages(lastGoodIndex !== undefined ? lastGoodIndex + 1n : 0n);
|
|
510
|
+
}
|
|
490
511
|
|
|
491
512
|
// Update the syncpoint so the loop below reprocesses the changed messages. We go to the block before
|
|
492
513
|
// the last common one, so we force reprocessing it, in case new messages were added on that same L1 block
|
|
493
514
|
// after the last common message.
|
|
494
515
|
const syncPointL1BlockNumber = commonMsg ? commonMsg.l1BlockNumber - 1n : this.l1Constants.l1StartBlock;
|
|
495
516
|
const syncPointL1BlockHash = await this.getL1BlockHash(syncPointL1BlockNumber);
|
|
496
|
-
messagesSyncPoint = { l1BlockNumber: syncPointL1BlockNumber, l1BlockHash: syncPointL1BlockHash };
|
|
497
|
-
await this.store.
|
|
517
|
+
const messagesSyncPoint = { l1BlockNumber: syncPointL1BlockNumber, l1BlockHash: syncPointL1BlockHash };
|
|
518
|
+
await this.store.setMessageSyncState(messagesSyncPoint, remoteTreeInProgress);
|
|
519
|
+
this.log.verbose(`Updated messages syncpoint to L1 block ${syncPointL1BlockNumber}`, {
|
|
520
|
+
...messagesSyncPoint,
|
|
521
|
+
remoteTreeInProgress,
|
|
522
|
+
});
|
|
498
523
|
return messagesSyncPoint;
|
|
499
524
|
}
|
|
500
525
|
|
|
@@ -812,17 +837,20 @@ export class ArchiverL1Synchronizer implements Traceable {
|
|
|
812
837
|
this.updater.addCheckpoints(validCheckpoints, updatedValidationResult),
|
|
813
838
|
),
|
|
814
839
|
);
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
840
|
+
|
|
841
|
+
if (validCheckpoints.length > 0) {
|
|
842
|
+
this.instrumentation.processNewCheckpointedBlocks(
|
|
843
|
+
processDuration / validCheckpoints.length,
|
|
844
|
+
validCheckpoints.flatMap(c => c.checkpoint.blocks),
|
|
845
|
+
);
|
|
846
|
+
}
|
|
819
847
|
|
|
820
848
|
// If blocks were pruned due to conflict with L1 checkpoints, emit event
|
|
821
849
|
if (result.prunedBlocks && result.prunedBlocks.length > 0) {
|
|
822
850
|
const prunedCheckpointNumber = result.prunedBlocks[0].checkpointNumber;
|
|
823
851
|
const prunedSlotNumber = result.prunedBlocks[0].header.globalVariables.slotNumber;
|
|
824
852
|
|
|
825
|
-
this.log.
|
|
853
|
+
this.log.info(
|
|
826
854
|
`Pruned ${result.prunedBlocks.length} mismatching blocks for checkpoint ${prunedCheckpointNumber}`,
|
|
827
855
|
{ prunedBlocks: result.prunedBlocks.map(b => b.toBlockInfo()), prunedSlotNumber, prunedCheckpointNumber },
|
|
828
856
|
);
|
|
@@ -9,7 +9,7 @@ import {
|
|
|
9
9
|
getAttestationInfoFromPayload,
|
|
10
10
|
} from '@aztec/stdlib/block';
|
|
11
11
|
import type { PublishedCheckpoint } from '@aztec/stdlib/checkpoint';
|
|
12
|
-
import { type L1RollupConstants, getEpochAtSlot } from '@aztec/stdlib/epoch-helpers';
|
|
12
|
+
import { type L1RollupConstants, computeQuorum, getEpochAtSlot } from '@aztec/stdlib/epoch-helpers';
|
|
13
13
|
import { ConsensusPayload } from '@aztec/stdlib/p2p';
|
|
14
14
|
|
|
15
15
|
export type { ValidateCheckpointResult };
|
|
@@ -66,7 +66,7 @@ export async function validateCheckpointAttestations(
|
|
|
66
66
|
return { valid: true };
|
|
67
67
|
}
|
|
68
68
|
|
|
69
|
-
const requiredAttestationCount =
|
|
69
|
+
const requiredAttestationCount = computeQuorum(committee.length);
|
|
70
70
|
|
|
71
71
|
const failedValidationResult = <TReason extends ValidateCheckpointNegativeResult['reason']>(reason: TReason) => ({
|
|
72
72
|
valid: false as const,
|