@aztec/archiver 0.0.1-commit.b655e406 → 0.0.1-commit.fce3e4f
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/archiver/archiver.d.ts +30 -20
- package/dest/archiver/archiver.d.ts.map +1 -1
- package/dest/archiver/archiver.js +294 -208
- package/dest/archiver/archiver_store.d.ts +1 -1
- package/dest/archiver/archiver_store_test_suite.d.ts +1 -1
- package/dest/archiver/archiver_store_test_suite.d.ts.map +1 -1
- package/dest/archiver/archiver_store_test_suite.js +5 -4
- package/dest/archiver/config.d.ts +1 -1
- package/dest/archiver/config.d.ts.map +1 -1
- package/dest/archiver/config.js +5 -0
- package/dest/archiver/data_retrieval.d.ts +17 -17
- package/dest/archiver/data_retrieval.d.ts.map +1 -1
- package/dest/archiver/data_retrieval.js +110 -86
- package/dest/archiver/errors.d.ts +1 -1
- package/dest/archiver/errors.d.ts.map +1 -1
- package/dest/archiver/index.d.ts +1 -1
- package/dest/archiver/instrumentation.d.ts +3 -3
- package/dest/archiver/instrumentation.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/block_store.d.ts +1 -1
- package/dest/archiver/kv_archiver_store/block_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/contract_class_store.d.ts +1 -1
- package/dest/archiver/kv_archiver_store/contract_class_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/contract_instance_store.d.ts +1 -1
- package/dest/archiver/kv_archiver_store/contract_instance_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts +2 -2
- package/dest/archiver/kv_archiver_store/kv_archiver_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/log_store.d.ts +1 -1
- package/dest/archiver/kv_archiver_store/log_store.d.ts.map +1 -1
- package/dest/archiver/kv_archiver_store/message_store.d.ts +1 -1
- package/dest/archiver/kv_archiver_store/message_store.d.ts.map +1 -1
- package/dest/archiver/structs/data_retrieval.d.ts +1 -1
- package/dest/archiver/structs/inbox_message.d.ts +1 -1
- package/dest/archiver/structs/published.d.ts +3 -2
- package/dest/archiver/structs/published.d.ts.map +1 -1
- package/dest/archiver/validation.d.ts +10 -4
- package/dest/archiver/validation.d.ts.map +1 -1
- package/dest/archiver/validation.js +29 -21
- package/dest/factory.d.ts +1 -1
- package/dest/index.d.ts +2 -2
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +1 -1
- package/dest/rpc/index.d.ts +2 -2
- package/dest/test/index.d.ts +1 -1
- package/dest/test/mock_archiver.d.ts +1 -1
- package/dest/test/mock_archiver.d.ts.map +1 -1
- package/dest/test/mock_l1_to_l2_message_source.d.ts +1 -1
- package/dest/test/mock_l1_to_l2_message_source.d.ts.map +1 -1
- package/dest/test/mock_l2_block_source.d.ts +7 -6
- package/dest/test/mock_l2_block_source.d.ts.map +1 -1
- package/dest/test/mock_l2_block_source.js +1 -1
- package/dest/test/mock_structs.d.ts +1 -1
- package/package.json +17 -17
- package/src/archiver/archiver.ts +380 -244
- package/src/archiver/archiver_store_test_suite.ts +5 -4
- package/src/archiver/config.ts +5 -0
- package/src/archiver/data_retrieval.ts +156 -125
- package/src/archiver/instrumentation.ts +2 -2
- package/src/archiver/structs/published.ts +2 -1
- package/src/archiver/validation.ts +52 -27
- package/src/index.ts +1 -1
- package/src/test/mock_l2_block_source.ts +7 -6
|
@@ -13,31 +13,31 @@ import { Fr } from '@aztec/foundation/fields';
|
|
|
13
13
|
import { createLogger } from '@aztec/foundation/log';
|
|
14
14
|
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
15
15
|
import { RunningPromise, makeLoggingErrorHandler } from '@aztec/foundation/running-promise';
|
|
16
|
-
import { sleep } from '@aztec/foundation/sleep';
|
|
17
16
|
import { count } from '@aztec/foundation/string';
|
|
18
|
-
import { Timer, elapsed } from '@aztec/foundation/timer';
|
|
17
|
+
import { DateProvider, Timer, elapsed } from '@aztec/foundation/timer';
|
|
19
18
|
import { ContractClassPublishedEvent, PrivateFunctionBroadcastedEvent, UtilityFunctionBroadcastedEvent } from '@aztec/protocol-contracts/class-registry';
|
|
20
19
|
import { ContractInstancePublishedEvent, ContractInstanceUpdatedEvent } from '@aztec/protocol-contracts/instance-registry';
|
|
21
|
-
import { L2BlockSourceEvents } from '@aztec/stdlib/block';
|
|
20
|
+
import { L2Block, L2BlockSourceEvents, PublishedL2Block } from '@aztec/stdlib/block';
|
|
22
21
|
import { computePublicBytecodeCommitment, isValidPrivateFunctionMembershipProof, isValidUtilityFunctionMembershipProof } from '@aztec/stdlib/contract';
|
|
23
22
|
import { getEpochAtSlot, getEpochNumberAtTimestamp, getSlotAtTimestamp, getSlotRangeForEpoch, getTimestampRangeForEpoch } from '@aztec/stdlib/epoch-helpers';
|
|
24
|
-
import {
|
|
23
|
+
import { getTelemetryClient, trackSpan } from '@aztec/telemetry-client';
|
|
25
24
|
import { EventEmitter } from 'events';
|
|
26
25
|
import groupBy from 'lodash.groupby';
|
|
27
26
|
import { createPublicClient, fallback, http } from 'viem';
|
|
28
|
-
import {
|
|
27
|
+
import { retrieveCheckpointsFromRollup, retrieveL1ToL2Message, retrieveL1ToL2Messages, retrievedToPublishedCheckpoint } from './data_retrieval.js';
|
|
29
28
|
import { InitialBlockNumberNotSequentialError, NoBlobBodiesFoundError } from './errors.js';
|
|
30
29
|
import { ArchiverInstrumentation } from './instrumentation.js';
|
|
31
|
-
import {
|
|
30
|
+
import { validateCheckpointAttestations } from './validation.js';
|
|
32
31
|
function mapArchiverConfig(config) {
|
|
33
32
|
return {
|
|
34
33
|
pollingIntervalMs: config.archiverPollingIntervalMS,
|
|
35
34
|
batchSize: config.archiverBatchSize,
|
|
36
|
-
skipValidateBlockAttestations: config.skipValidateBlockAttestations
|
|
35
|
+
skipValidateBlockAttestations: config.skipValidateBlockAttestations,
|
|
36
|
+
maxAllowedEthClientDriftSeconds: config.maxAllowedEthClientDriftSeconds
|
|
37
37
|
};
|
|
38
38
|
}
|
|
39
39
|
/**
|
|
40
|
-
* Pulls
|
|
40
|
+
* Pulls checkpoints in a non-blocking manner and provides interface for their retrieval.
|
|
41
41
|
* Responsible for handling robust L1 polling so that other components do not need to
|
|
42
42
|
* concern themselves with it.
|
|
43
43
|
*/ export class Archiver extends EventEmitter {
|
|
@@ -47,10 +47,11 @@ function mapArchiverConfig(config) {
|
|
|
47
47
|
config;
|
|
48
48
|
blobSinkClient;
|
|
49
49
|
epochCache;
|
|
50
|
+
dateProvider;
|
|
50
51
|
instrumentation;
|
|
51
52
|
l1constants;
|
|
52
53
|
log;
|
|
53
|
-
/** A loop in which we will be continually fetching new
|
|
54
|
+
/** A loop in which we will be continually fetching new checkpoints. */ runningPromise;
|
|
54
55
|
rollup;
|
|
55
56
|
inbox;
|
|
56
57
|
store;
|
|
@@ -68,13 +69,16 @@ function mapArchiverConfig(config) {
|
|
|
68
69
|
* @param pollingIntervalMs - The interval for polling for L1 logs (in milliseconds).
|
|
69
70
|
* @param store - An archiver data store for storage & retrieval of blocks, encrypted logs & contract data.
|
|
70
71
|
* @param log - A logger.
|
|
71
|
-
*/ constructor(publicClient, l1Addresses, dataStore, config, blobSinkClient, epochCache, instrumentation, l1constants, log = createLogger('archiver')){
|
|
72
|
-
super(), this.publicClient = publicClient, this.l1Addresses = l1Addresses, this.dataStore = dataStore, this.config = config, this.blobSinkClient = blobSinkClient, this.epochCache = epochCache, this.instrumentation = instrumentation, this.l1constants = l1constants, this.log = log, this.initialSyncComplete = false;
|
|
72
|
+
*/ constructor(publicClient, l1Addresses, dataStore, config, blobSinkClient, epochCache, dateProvider, instrumentation, l1constants, log = createLogger('archiver')){
|
|
73
|
+
super(), this.publicClient = publicClient, this.l1Addresses = l1Addresses, this.dataStore = dataStore, this.config = config, this.blobSinkClient = blobSinkClient, this.epochCache = epochCache, this.dateProvider = dateProvider, this.instrumentation = instrumentation, this.l1constants = l1constants, this.log = log, this.initialSyncComplete = false;
|
|
73
74
|
this.tracer = instrumentation.tracer;
|
|
74
75
|
this.store = new ArchiverStoreHelper(dataStore);
|
|
75
76
|
this.rollup = new RollupContract(publicClient, l1Addresses.rollupAddress);
|
|
76
77
|
this.inbox = new InboxContract(publicClient, l1Addresses.inboxAddress);
|
|
77
78
|
this.initialSyncPromise = promiseWithResolvers();
|
|
79
|
+
// Running promise starts with a small interval inbetween runs, so all iterations needed for the initial sync
|
|
80
|
+
// are done as fast as possible. This then gets updated once the initial sync completes.
|
|
81
|
+
this.runningPromise = new RunningPromise(()=>this.sync(), this.log, this.config.pollingIntervalMs / 10, makeLoggingErrorHandler(this.log, NoBlobBodiesFoundError, BlockTagTooOldError));
|
|
78
82
|
}
|
|
79
83
|
/**
|
|
80
84
|
* Creates a new instance of the Archiver and blocks until it syncs from chain.
|
|
@@ -113,11 +117,12 @@ function mapArchiverConfig(config) {
|
|
|
113
117
|
};
|
|
114
118
|
const opts = merge({
|
|
115
119
|
pollingIntervalMs: 10_000,
|
|
116
|
-
batchSize: 100
|
|
120
|
+
batchSize: 100,
|
|
121
|
+
maxAllowedEthClientDriftSeconds: 300
|
|
117
122
|
}, mapArchiverConfig(config));
|
|
118
123
|
const epochCache = deps.epochCache ?? await EpochCache.create(config.l1Contracts.rollupAddress, config, deps);
|
|
119
124
|
const telemetry = deps.telemetry ?? getTelemetryClient();
|
|
120
|
-
const archiver = new Archiver(publicClient, config.l1Contracts, archiverStore, opts, deps.blobSinkClient, epochCache, await ArchiverInstrumentation.new(telemetry, ()=>archiverStore.estimateSize()), l1Constants);
|
|
125
|
+
const archiver = new Archiver(publicClient, config.l1Contracts, archiverStore, opts, deps.blobSinkClient, epochCache, deps.dateProvider ?? new DateProvider(), await ArchiverInstrumentation.new(telemetry, ()=>archiverStore.estimateSize()), l1Constants);
|
|
121
126
|
await archiver.start(blockUntilSynced);
|
|
122
127
|
return archiver;
|
|
123
128
|
}
|
|
@@ -128,48 +133,48 @@ function mapArchiverConfig(config) {
|
|
|
128
133
|
* Starts sync process.
|
|
129
134
|
* @param blockUntilSynced - If true, blocks until the archiver has fully synced.
|
|
130
135
|
*/ async start(blockUntilSynced) {
|
|
131
|
-
if (this.runningPromise) {
|
|
136
|
+
if (this.runningPromise.isRunning()) {
|
|
132
137
|
throw new Error('Archiver is already running');
|
|
133
138
|
}
|
|
134
139
|
await this.blobSinkClient.testSources();
|
|
140
|
+
await this.testEthereumNodeSynced();
|
|
141
|
+
// Log initial state for the archiver
|
|
142
|
+
const { l1StartBlock } = this.l1constants;
|
|
143
|
+
const { blocksSynchedTo = l1StartBlock, messagesSynchedTo = l1StartBlock } = await this.store.getSynchPoint();
|
|
144
|
+
const currentL2Block = await this.getBlockNumber();
|
|
145
|
+
this.log.info(`Starting archiver sync to rollup contract ${this.l1Addresses.rollupAddress.toString()} from L1 block ${blocksSynchedTo} and L2 block ${currentL2Block}`, {
|
|
146
|
+
blocksSynchedTo,
|
|
147
|
+
messagesSynchedTo,
|
|
148
|
+
currentL2Block
|
|
149
|
+
});
|
|
150
|
+
// Start sync loop, and return the wait for initial sync if we are asked to block until synced
|
|
151
|
+
this.runningPromise.start();
|
|
135
152
|
if (blockUntilSynced) {
|
|
136
|
-
|
|
137
|
-
this.log.info(`Retrying initial archiver sync in ${this.config.pollingIntervalMs}ms`);
|
|
138
|
-
await sleep(this.config.pollingIntervalMs);
|
|
139
|
-
}
|
|
153
|
+
return this.waitForInitialSync();
|
|
140
154
|
}
|
|
141
|
-
this.runningPromise = new RunningPromise(()=>this.sync(false), this.log, this.config.pollingIntervalMs, makeLoggingErrorHandler(this.log, // Ignored errors will not log to the console
|
|
142
|
-
// We ignore NoBlobBodiesFound as the message may not have been passed to the blob sink yet
|
|
143
|
-
NoBlobBodiesFoundError));
|
|
144
|
-
this.runningPromise.start();
|
|
145
155
|
}
|
|
146
156
|
syncImmediate() {
|
|
147
|
-
if (!this.runningPromise) {
|
|
148
|
-
throw new Error('Archiver is not running');
|
|
149
|
-
}
|
|
150
157
|
return this.runningPromise.trigger();
|
|
151
158
|
}
|
|
152
159
|
waitForInitialSync() {
|
|
153
160
|
return this.initialSyncPromise.promise;
|
|
154
161
|
}
|
|
155
|
-
async
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
return
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
}
|
|
167
|
-
return false;
|
|
162
|
+
/** Checks that the ethereum node we are connected to has a latest timestamp no more than the allowed drift. Throw if not. */ async testEthereumNodeSynced() {
|
|
163
|
+
const maxAllowedDelay = this.config.maxAllowedEthClientDriftSeconds;
|
|
164
|
+
if (maxAllowedDelay === 0) {
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
const { number, timestamp: l1Timestamp } = await this.publicClient.getBlock({
|
|
168
|
+
includeTransactions: false
|
|
169
|
+
});
|
|
170
|
+
const currentTime = BigInt(this.dateProvider.nowInSeconds());
|
|
171
|
+
if (currentTime - l1Timestamp > BigInt(maxAllowedDelay)) {
|
|
172
|
+
throw new Error(`Ethereum node is out of sync (last block synced ${number} at ${l1Timestamp} vs current time ${currentTime})`);
|
|
168
173
|
}
|
|
169
174
|
}
|
|
170
175
|
/**
|
|
171
176
|
* Fetches logs from L1 contracts and processes them.
|
|
172
|
-
*/ async sync(
|
|
177
|
+
*/ async sync() {
|
|
173
178
|
/**
|
|
174
179
|
* We keep track of three "pointers" to L1 blocks:
|
|
175
180
|
* 1. the last L1 block that published an L2 block
|
|
@@ -179,8 +184,6 @@ function mapArchiverConfig(config) {
|
|
|
179
184
|
* We do this to deal with L1 data providers that are eventually consistent (e.g. Infura).
|
|
180
185
|
* We guard against seeing block X with no data at one point, and later, the provider processes the block and it has data.
|
|
181
186
|
* The archiver will stay back, until there's data on L1 that will move the pointers forward.
|
|
182
|
-
*
|
|
183
|
-
* This code does not handle reorgs.
|
|
184
187
|
*/ const { l1StartBlock, l1StartBlockHash } = this.l1constants;
|
|
185
188
|
const { blocksSynchedTo = l1StartBlock, messagesSynchedTo = {
|
|
186
189
|
l1BlockNumber: l1StartBlock,
|
|
@@ -191,12 +194,12 @@ function mapArchiverConfig(config) {
|
|
|
191
194
|
});
|
|
192
195
|
const currentL1BlockNumber = currentL1Block.number;
|
|
193
196
|
const currentL1BlockHash = Buffer32.fromString(currentL1Block.hash);
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
}
|
|
197
|
+
this.log.trace(`Starting new archiver sync iteration`, {
|
|
198
|
+
blocksSynchedTo,
|
|
199
|
+
messagesSynchedTo,
|
|
200
|
+
currentL1BlockNumber,
|
|
201
|
+
currentL1BlockHash
|
|
202
|
+
});
|
|
200
203
|
// ********** Ensuring Consistency of data pulled from L1 **********
|
|
201
204
|
/**
|
|
202
205
|
* There are a number of calls in this sync operation to L1 for retrieving
|
|
@@ -219,23 +222,40 @@ function mapArchiverConfig(config) {
|
|
|
219
222
|
const currentL1Timestamp = !this.l1Timestamp || !this.l1BlockNumber || this.l1BlockNumber !== currentL1BlockNumber ? (await this.publicClient.getBlock({
|
|
220
223
|
blockNumber: currentL1BlockNumber
|
|
221
224
|
})).timestamp : this.l1Timestamp;
|
|
222
|
-
//
|
|
225
|
+
// Warn if the latest L1 block timestamp is too old
|
|
226
|
+
const maxAllowedDelay = this.config.maxAllowedEthClientDriftSeconds;
|
|
227
|
+
const now = this.dateProvider.nowInSeconds();
|
|
228
|
+
if (maxAllowedDelay > 0 && Number(currentL1Timestamp) <= now - maxAllowedDelay) {
|
|
229
|
+
this.log.warn(`Latest L1 block ${currentL1BlockNumber} timestamp ${currentL1Timestamp} is too old. Make sure your Ethereum node is synced.`, {
|
|
230
|
+
currentL1BlockNumber,
|
|
231
|
+
currentL1Timestamp,
|
|
232
|
+
now,
|
|
233
|
+
maxAllowedDelay
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
// ********** Events that are processed per checkpoint **********
|
|
223
237
|
if (currentL1BlockNumber > blocksSynchedTo) {
|
|
224
|
-
// First we retrieve new L2 blocks and store them in the DB. This will also update the
|
|
225
|
-
// pending chain validation status, proven
|
|
226
|
-
const rollupStatus = await this.
|
|
238
|
+
// First we retrieve new checkpoints and L2 blocks and store them in the DB. This will also update the
|
|
239
|
+
// pending chain validation status, proven checkpoint number, and synched L1 block number.
|
|
240
|
+
const rollupStatus = await this.handleCheckpoints(blocksSynchedTo, currentL1BlockNumber);
|
|
227
241
|
// Then we prune the current epoch if it'd reorg on next submission.
|
|
228
|
-
// Note that we don't do this before retrieving
|
|
229
|
-
//
|
|
242
|
+
// Note that we don't do this before retrieving checkpoints because we may need to retrieve
|
|
243
|
+
// checkpoints from more than 2 epochs ago, so we want to make sure we have the latest view of
|
|
230
244
|
// the chain locally before we start unwinding stuff. This can be optimized by figuring out
|
|
231
|
-
// up to which point we're pruning, and then requesting
|
|
232
|
-
const { rollupCanPrune } = await this.handleEpochPrune(rollupStatus.
|
|
233
|
-
//
|
|
245
|
+
// up to which point we're pruning, and then requesting checkpoints up to that point only.
|
|
246
|
+
const { rollupCanPrune } = await this.handleEpochPrune(rollupStatus.provenCheckpointNumber, currentL1BlockNumber, currentL1Timestamp);
|
|
247
|
+
// If the last checkpoint we processed had an invalid attestation, we manually advance the L1 syncpoint
|
|
248
|
+
// past it, since otherwise we'll keep downloading it and reprocessing it on every iteration until
|
|
249
|
+
// we get a valid checkpoint to advance the syncpoint.
|
|
250
|
+
if (!rollupStatus.validationResult?.valid && rollupStatus.lastL1BlockWithCheckpoint !== undefined) {
|
|
251
|
+
await this.store.setBlockSynchedL1BlockNumber(rollupStatus.lastL1BlockWithCheckpoint);
|
|
252
|
+
}
|
|
253
|
+
// And lastly we check if we are missing any checkpoints behind us due to a possible L1 reorg.
|
|
234
254
|
// We only do this if rollup cant prune on the next submission. Otherwise we will end up
|
|
235
|
-
// re-syncing the
|
|
255
|
+
// re-syncing the checkpoints we have just unwound above. We also dont do this if the last checkpoint is invalid,
|
|
236
256
|
// since the archiver will rightfully refuse to sync up to it.
|
|
237
257
|
if (!rollupCanPrune && rollupStatus.validationResult?.valid) {
|
|
238
|
-
await this.
|
|
258
|
+
await this.checkForNewCheckpointsBeforeL1SyncPoint(rollupStatus, blocksSynchedTo, currentL1BlockNumber);
|
|
239
259
|
}
|
|
240
260
|
this.instrumentation.updateL1BlockHeight(currentL1BlockNumber);
|
|
241
261
|
}
|
|
@@ -244,14 +264,17 @@ function mapArchiverConfig(config) {
|
|
|
244
264
|
// but the corresponding blocks have not been processed (see #12631).
|
|
245
265
|
this.l1Timestamp = currentL1Timestamp;
|
|
246
266
|
this.l1BlockNumber = currentL1BlockNumber;
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
if (
|
|
250
|
-
this.log.info(`Initial archiver sync to L1 block ${currentL1BlockNumber} complete
|
|
267
|
+
// We resolve the initial sync only once we've caught up with the latest L1 block number (with 1 block grace)
|
|
268
|
+
// so if the initial sync took too long, we still go for another iteration.
|
|
269
|
+
if (!this.initialSyncComplete && currentL1BlockNumber + 1n >= await this.publicClient.getBlockNumber()) {
|
|
270
|
+
this.log.info(`Initial archiver sync to L1 block ${currentL1BlockNumber} complete`, {
|
|
251
271
|
l1BlockNumber: currentL1BlockNumber,
|
|
252
272
|
syncPoint: await this.store.getSynchPoint(),
|
|
253
273
|
...await this.getL2Tips()
|
|
254
274
|
});
|
|
275
|
+
this.runningPromise.setPollingIntervalMS(this.config.pollingIntervalMs);
|
|
276
|
+
this.initialSyncComplete = true;
|
|
277
|
+
this.initialSyncPromise.resolve();
|
|
255
278
|
}
|
|
256
279
|
}
|
|
257
280
|
/** Queries the rollup contract on whether a prune can be executed on the immediate next L1 block. */ async canPrune(currentL1BlockNumber, currentL1Timestamp) {
|
|
@@ -268,30 +291,30 @@ function mapArchiverConfig(config) {
|
|
|
268
291
|
}
|
|
269
292
|
return result;
|
|
270
293
|
}
|
|
271
|
-
/** Checks if there'd be a reorg for the next
|
|
294
|
+
/** Checks if there'd be a reorg for the next checkpoint submission and start pruning now. */ async handleEpochPrune(provenCheckpointNumber, currentL1BlockNumber, currentL1Timestamp) {
|
|
272
295
|
const rollupCanPrune = await this.canPrune(currentL1BlockNumber, currentL1Timestamp);
|
|
273
|
-
const
|
|
274
|
-
const canPrune =
|
|
296
|
+
const localPendingCheckpointNumber = await this.getSynchedCheckpointNumber();
|
|
297
|
+
const canPrune = localPendingCheckpointNumber > provenCheckpointNumber && rollupCanPrune;
|
|
275
298
|
if (canPrune) {
|
|
276
299
|
const timer = new Timer();
|
|
277
|
-
const pruneFrom =
|
|
278
|
-
const header = await this.
|
|
300
|
+
const pruneFrom = provenCheckpointNumber + 1;
|
|
301
|
+
const header = await this.getCheckpointHeader(Number(pruneFrom));
|
|
279
302
|
if (header === undefined) {
|
|
280
|
-
throw new Error(`Missing
|
|
303
|
+
throw new Error(`Missing checkpoint header ${pruneFrom}`);
|
|
281
304
|
}
|
|
282
|
-
const pruneFromSlotNumber = header.
|
|
305
|
+
const pruneFromSlotNumber = header.slotNumber;
|
|
283
306
|
const pruneFromEpochNumber = getEpochAtSlot(pruneFromSlotNumber, this.l1constants);
|
|
284
|
-
const
|
|
285
|
-
const
|
|
307
|
+
const checkpointsToUnwind = localPendingCheckpointNumber - provenCheckpointNumber;
|
|
308
|
+
const checkpoints = await this.getCheckpoints(Number(provenCheckpointNumber) + 1, Number(checkpointsToUnwind));
|
|
286
309
|
// Emit an event for listening services to react to the chain prune
|
|
287
310
|
this.emit(L2BlockSourceEvents.L2PruneDetected, {
|
|
288
311
|
type: L2BlockSourceEvents.L2PruneDetected,
|
|
289
312
|
epochNumber: pruneFromEpochNumber,
|
|
290
|
-
blocks
|
|
313
|
+
blocks: checkpoints.flatMap((c)=>L2Block.fromCheckpoint(c))
|
|
291
314
|
});
|
|
292
|
-
this.log.debug(`L2 prune from ${
|
|
293
|
-
await this.
|
|
294
|
-
this.log.warn(`Unwound ${count(
|
|
315
|
+
this.log.debug(`L2 prune from ${provenCheckpointNumber + 1} to ${localPendingCheckpointNumber} will occur on next checkpoint submission.`);
|
|
316
|
+
await this.unwindCheckpoints(localPendingCheckpointNumber, checkpointsToUnwind);
|
|
317
|
+
this.log.warn(`Unwound ${count(checkpointsToUnwind, 'checkpoint')} from checkpoint ${localPendingCheckpointNumber} ` + `to ${provenCheckpointNumber} due to predicted reorg at L1 block ${currentL1BlockNumber}. ` + `Updated latest checkpoint is ${await this.getSynchedCheckpointNumber()}.`);
|
|
295
318
|
this.instrumentation.processPrune(timer.ms());
|
|
296
319
|
// TODO(palla/reorg): Do we need to set the block synched L1 block number here?
|
|
297
320
|
// Seems like the next iteration should handle this.
|
|
@@ -334,7 +357,7 @@ function mapArchiverConfig(config) {
|
|
|
334
357
|
});
|
|
335
358
|
// Compare message count and rolling hash. If they match, no need to retrieve anything.
|
|
336
359
|
if (remoteMessagesState.totalMessagesInserted === localMessagesInserted && remoteMessagesState.messagesRollingHash.equals(localLastMessage?.rollingHash ?? Buffer16.ZERO)) {
|
|
337
|
-
this.log.
|
|
360
|
+
this.log.trace(`No L1 to L2 messages to query between L1 blocks ${messagesSyncPoint.l1BlockNumber} and ${currentL1BlockNumber}.`);
|
|
338
361
|
return;
|
|
339
362
|
}
|
|
340
363
|
// Check if our syncpoint is still valid. If not, there was an L1 reorg and we need to re-retrieve messages.
|
|
@@ -460,162 +483,173 @@ function mapArchiverConfig(config) {
|
|
|
460
483
|
}
|
|
461
484
|
return Buffer32.fromString(block.hash);
|
|
462
485
|
}
|
|
463
|
-
async
|
|
464
|
-
const
|
|
486
|
+
async handleCheckpoints(blocksSynchedTo, currentL1BlockNumber) {
|
|
487
|
+
const localPendingCheckpointNumber = await this.getSynchedCheckpointNumber();
|
|
465
488
|
const initialValidationResult = await this.store.getPendingChainValidationStatus();
|
|
466
|
-
const [
|
|
489
|
+
const [rollupProvenCheckpointNumber, provenArchive, rollupPendingCheckpointNumber, pendingArchive, archiveForLocalPendingCheckpointNumber] = await this.rollup.status(BigInt(localPendingCheckpointNumber), {
|
|
467
490
|
blockNumber: currentL1BlockNumber
|
|
468
491
|
});
|
|
492
|
+
const provenCheckpointNumber = Number(rollupProvenCheckpointNumber);
|
|
493
|
+
const pendingCheckpointNumber = Number(rollupPendingCheckpointNumber);
|
|
469
494
|
const rollupStatus = {
|
|
470
|
-
|
|
495
|
+
provenCheckpointNumber,
|
|
471
496
|
provenArchive,
|
|
472
|
-
|
|
497
|
+
pendingCheckpointNumber,
|
|
473
498
|
pendingArchive,
|
|
474
499
|
validationResult: initialValidationResult
|
|
475
500
|
};
|
|
476
501
|
this.log.trace(`Retrieved rollup status at current L1 block ${currentL1BlockNumber}.`, {
|
|
477
|
-
|
|
502
|
+
localPendingCheckpointNumber,
|
|
478
503
|
blocksSynchedTo,
|
|
479
504
|
currentL1BlockNumber,
|
|
480
|
-
|
|
505
|
+
archiveForLocalPendingCheckpointNumber,
|
|
481
506
|
...rollupStatus
|
|
482
507
|
});
|
|
483
|
-
const
|
|
484
|
-
// Annoying edge case: if proven
|
|
485
|
-
// we need to set it to zero. This is an edge case because we dont have a
|
|
486
|
-
// so
|
|
487
|
-
if (
|
|
488
|
-
const
|
|
489
|
-
if (
|
|
490
|
-
await this.
|
|
491
|
-
this.log.info(`Rolled back proven chain to
|
|
492
|
-
|
|
508
|
+
const updateProvenCheckpoint = async ()=>{
|
|
509
|
+
// Annoying edge case: if proven checkpoint is moved back to 0 due to a reorg at the beginning of the chain,
|
|
510
|
+
// we need to set it to zero. This is an edge case because we dont have a checkpoint zero (initial checkpoint is one),
|
|
511
|
+
// so localCheckpointForDestinationProvenCheckpointNumber would not be found below.
|
|
512
|
+
if (provenCheckpointNumber === 0) {
|
|
513
|
+
const localProvenCheckpointNumber = await this.getProvenCheckpointNumber();
|
|
514
|
+
if (localProvenCheckpointNumber !== provenCheckpointNumber) {
|
|
515
|
+
await this.setProvenCheckpointNumber(provenCheckpointNumber);
|
|
516
|
+
this.log.info(`Rolled back proven chain to checkpoint ${provenCheckpointNumber}`, {
|
|
517
|
+
provenCheckpointNumber
|
|
493
518
|
});
|
|
494
519
|
}
|
|
495
520
|
}
|
|
496
|
-
const
|
|
497
|
-
// Sanity check. I've hit what seems to be a state where the proven
|
|
498
|
-
// synched
|
|
499
|
-
const synched = await this.
|
|
500
|
-
if (
|
|
501
|
-
this.log.error(`Hit local
|
|
521
|
+
const localCheckpointForDestinationProvenCheckpointNumber = await this.getCheckpoint(provenCheckpointNumber);
|
|
522
|
+
// Sanity check. I've hit what seems to be a state where the proven checkpoint is set to a value greater than the latest
|
|
523
|
+
// synched checkpoint when requesting L2Tips from the archiver. This is the only place where the proven checkpoint is set.
|
|
524
|
+
const synched = await this.getSynchedCheckpointNumber();
|
|
525
|
+
if (localCheckpointForDestinationProvenCheckpointNumber && synched < localCheckpointForDestinationProvenCheckpointNumber.number) {
|
|
526
|
+
this.log.error(`Hit local checkpoint greater than last synched checkpoint: ${localCheckpointForDestinationProvenCheckpointNumber.number} > ${synched}`);
|
|
502
527
|
}
|
|
503
|
-
this.log.trace(`Local
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
this.
|
|
509
|
-
|
|
528
|
+
this.log.trace(`Local checkpoint for remote proven checkpoint ${provenCheckpointNumber} is ${localCheckpointForDestinationProvenCheckpointNumber?.archive.root.toString() ?? 'undefined'}`);
|
|
529
|
+
const lastProvenBlockNumber = await this.getLastBlockNumberInCheckpoint(provenCheckpointNumber);
|
|
530
|
+
if (localCheckpointForDestinationProvenCheckpointNumber && provenArchive === localCheckpointForDestinationProvenCheckpointNumber.archive.root.toString()) {
|
|
531
|
+
const localProvenCheckpointNumber = await this.getProvenCheckpointNumber();
|
|
532
|
+
if (localProvenCheckpointNumber !== provenCheckpointNumber) {
|
|
533
|
+
await this.setProvenCheckpointNumber(provenCheckpointNumber);
|
|
534
|
+
this.log.info(`Updated proven chain to checkpoint ${provenCheckpointNumber}`, {
|
|
535
|
+
provenCheckpointNumber
|
|
510
536
|
});
|
|
511
|
-
const provenSlotNumber =
|
|
537
|
+
const provenSlotNumber = localCheckpointForDestinationProvenCheckpointNumber.header.slotNumber;
|
|
512
538
|
const provenEpochNumber = getEpochAtSlot(provenSlotNumber, this.l1constants);
|
|
513
539
|
this.emit(L2BlockSourceEvents.L2BlockProven, {
|
|
514
540
|
type: L2BlockSourceEvents.L2BlockProven,
|
|
515
|
-
blockNumber:
|
|
541
|
+
blockNumber: BigInt(lastProvenBlockNumber),
|
|
516
542
|
slotNumber: provenSlotNumber,
|
|
517
543
|
epochNumber: provenEpochNumber
|
|
518
544
|
});
|
|
519
545
|
} else {
|
|
520
|
-
this.log.trace(`Proven
|
|
546
|
+
this.log.trace(`Proven checkpoint ${provenCheckpointNumber} already stored.`);
|
|
521
547
|
}
|
|
522
548
|
}
|
|
523
|
-
this.instrumentation.updateLastProvenBlock(
|
|
549
|
+
this.instrumentation.updateLastProvenBlock(lastProvenBlockNumber);
|
|
524
550
|
};
|
|
525
|
-
// This is an edge case that we only hit if there are no proposed
|
|
526
|
-
// If we have 0
|
|
527
|
-
const
|
|
528
|
-
if (
|
|
551
|
+
// This is an edge case that we only hit if there are no proposed checkpoints.
|
|
552
|
+
// If we have 0 checkpoints locally and there are no checkpoints onchain there is nothing to do.
|
|
553
|
+
const noCheckpoints = localPendingCheckpointNumber === 0 && pendingCheckpointNumber === 0;
|
|
554
|
+
if (noCheckpoints) {
|
|
529
555
|
await this.store.setBlockSynchedL1BlockNumber(currentL1BlockNumber);
|
|
530
|
-
this.log.debug(`No
|
|
556
|
+
this.log.debug(`No checkpoints to retrieve from ${blocksSynchedTo + 1n} to ${currentL1BlockNumber}, no checkpoints on chain`);
|
|
531
557
|
return rollupStatus;
|
|
532
558
|
}
|
|
533
|
-
await
|
|
559
|
+
await updateProvenCheckpoint();
|
|
534
560
|
// Related to the L2 reorgs of the pending chain. We are only interested in actually addressing a reorg if there
|
|
535
|
-
// are any state that could be impacted by it. If we have no
|
|
536
|
-
if (
|
|
537
|
-
const
|
|
538
|
-
if (
|
|
539
|
-
throw new Error(`Missing
|
|
561
|
+
// are any state that could be impacted by it. If we have no checkpoints, there is no impact.
|
|
562
|
+
if (localPendingCheckpointNumber > 0) {
|
|
563
|
+
const localPendingCheckpoint = await this.getCheckpoint(localPendingCheckpointNumber);
|
|
564
|
+
if (localPendingCheckpoint === undefined) {
|
|
565
|
+
throw new Error(`Missing checkpoint ${localPendingCheckpointNumber}`);
|
|
540
566
|
}
|
|
541
|
-
const localPendingArchiveRoot =
|
|
542
|
-
const
|
|
543
|
-
if (
|
|
567
|
+
const localPendingArchiveRoot = localPendingCheckpoint.archive.root.toString();
|
|
568
|
+
const noCheckpointSinceLast = localPendingCheckpoint && pendingArchive === localPendingArchiveRoot;
|
|
569
|
+
if (noCheckpointSinceLast) {
|
|
544
570
|
// We believe the following line causes a problem when we encounter L1 re-orgs.
|
|
545
571
|
// Basically, by setting the synched L1 block number here, we are saying that we have
|
|
546
|
-
// processed all
|
|
572
|
+
// processed all checkpoints up to the current L1 block number and we will not attempt to retrieve logs from
|
|
547
573
|
// this block again (or any blocks before).
|
|
548
|
-
// However, in the re-org scenario, our L1 node is temporarily lying to us and we end up potentially missing
|
|
574
|
+
// However, in the re-org scenario, our L1 node is temporarily lying to us and we end up potentially missing checkpoints.
|
|
549
575
|
// We must only set this block number based on actually retrieved logs.
|
|
550
576
|
// TODO(#8621): Tackle this properly when we handle L1 Re-orgs.
|
|
551
577
|
// await this.store.setBlockSynchedL1BlockNumber(currentL1BlockNumber);
|
|
552
|
-
this.log.debug(`No
|
|
578
|
+
this.log.debug(`No checkpoints to retrieve from ${blocksSynchedTo + 1n} to ${currentL1BlockNumber}`);
|
|
553
579
|
return rollupStatus;
|
|
554
580
|
}
|
|
555
|
-
const
|
|
556
|
-
if (!
|
|
557
|
-
// If our local pending
|
|
581
|
+
const localPendingCheckpointInChain = archiveForLocalPendingCheckpointNumber === localPendingArchiveRoot;
|
|
582
|
+
if (!localPendingCheckpointInChain) {
|
|
583
|
+
// If our local pending checkpoint tip is not in the chain on L1 a "prune" must have happened
|
|
558
584
|
// or the L1 have reorged.
|
|
559
585
|
// In any case, we have to figure out how far into the past the action will take us.
|
|
560
|
-
// For simplicity here, we will simply rewind until we end in a
|
|
561
|
-
this.log.debug(`L2 prune has been detected due to local pending
|
|
562
|
-
|
|
586
|
+
// For simplicity here, we will simply rewind until we end in a checkpoint that is also on the chain on L1.
|
|
587
|
+
this.log.debug(`L2 prune has been detected due to local pending checkpoint ${localPendingCheckpointNumber} not in chain`, {
|
|
588
|
+
localPendingCheckpointNumber,
|
|
563
589
|
localPendingArchiveRoot,
|
|
564
|
-
|
|
590
|
+
archiveForLocalPendingCheckpointNumber
|
|
565
591
|
});
|
|
566
|
-
let tipAfterUnwind =
|
|
592
|
+
let tipAfterUnwind = localPendingCheckpointNumber;
|
|
567
593
|
while(true){
|
|
568
|
-
const
|
|
569
|
-
if (
|
|
594
|
+
const candidateCheckpoint = await this.getCheckpoint(tipAfterUnwind);
|
|
595
|
+
if (candidateCheckpoint === undefined) {
|
|
570
596
|
break;
|
|
571
597
|
}
|
|
572
|
-
const archiveAtContract = await this.rollup.archiveAt(BigInt(
|
|
573
|
-
|
|
598
|
+
const archiveAtContract = await this.rollup.archiveAt(BigInt(candidateCheckpoint.number));
|
|
599
|
+
this.log.trace(`Checking local checkpoint ${candidateCheckpoint.number} with archive ${candidateCheckpoint.archive.root}`, {
|
|
600
|
+
archiveAtContract,
|
|
601
|
+
archiveLocal: candidateCheckpoint.archive.root.toString()
|
|
602
|
+
});
|
|
603
|
+
if (archiveAtContract === candidateCheckpoint.archive.root.toString()) {
|
|
574
604
|
break;
|
|
575
605
|
}
|
|
576
606
|
tipAfterUnwind--;
|
|
577
607
|
}
|
|
578
|
-
const
|
|
579
|
-
await this.
|
|
580
|
-
this.log.warn(`Unwound ${count(
|
|
608
|
+
const checkpointsToUnwind = localPendingCheckpointNumber - tipAfterUnwind;
|
|
609
|
+
await this.unwindCheckpoints(localPendingCheckpointNumber, checkpointsToUnwind);
|
|
610
|
+
this.log.warn(`Unwound ${count(checkpointsToUnwind, 'checkpoint')} from checkpoint ${localPendingCheckpointNumber} ` + `due to mismatched checkpoint hashes at L1 block ${currentL1BlockNumber}. ` + `Updated L2 latest checkpoint is ${await this.getSynchedCheckpointNumber()}.`);
|
|
581
611
|
}
|
|
582
612
|
}
|
|
583
|
-
// Retrieve
|
|
613
|
+
// Retrieve checkpoints in batches. Each batch is estimated to accommodate up to 'blockBatchSize' L1 blocks,
|
|
584
614
|
// computed using the L2 block time vs the L1 block time.
|
|
585
615
|
let searchStartBlock = blocksSynchedTo;
|
|
586
616
|
let searchEndBlock = blocksSynchedTo;
|
|
587
|
-
let
|
|
617
|
+
let lastRetrievedCheckpoint;
|
|
618
|
+
let lastL1BlockWithCheckpoint = undefined;
|
|
588
619
|
do {
|
|
589
620
|
[searchStartBlock, searchEndBlock] = this.nextRange(searchEndBlock, currentL1BlockNumber);
|
|
590
|
-
this.log.trace(`Retrieving
|
|
621
|
+
this.log.trace(`Retrieving checkpoints from L1 block ${searchStartBlock} to ${searchEndBlock}`);
|
|
591
622
|
// TODO(md): Retrieve from blob sink then from consensus client, then from peers
|
|
592
|
-
const
|
|
593
|
-
if (
|
|
623
|
+
const retrievedCheckpoints = await retrieveCheckpointsFromRollup(this.rollup.getContract(), this.publicClient, this.blobSinkClient, searchStartBlock, searchEndBlock, this.log);
|
|
624
|
+
if (retrievedCheckpoints.length === 0) {
|
|
594
625
|
// We are not calling `setBlockSynchedL1BlockNumber` because it may cause sync issues if based off infura.
|
|
595
626
|
// See further details in earlier comments.
|
|
596
|
-
this.log.trace(`Retrieved no new
|
|
627
|
+
this.log.trace(`Retrieved no new checkpoints from L1 block ${searchStartBlock} to ${searchEndBlock}`);
|
|
597
628
|
continue;
|
|
598
629
|
}
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
630
|
+
this.log.debug(`Retrieved ${retrievedCheckpoints.length} new checkpoints between L1 blocks ${searchStartBlock} and ${searchEndBlock}`, {
|
|
631
|
+
lastProcessedCheckpoint: retrievedCheckpoints[retrievedCheckpoints.length - 1].l1,
|
|
632
|
+
searchStartBlock,
|
|
633
|
+
searchEndBlock
|
|
634
|
+
});
|
|
635
|
+
const publishedCheckpoints = await Promise.all(retrievedCheckpoints.map((b)=>retrievedToPublishedCheckpoint(b)));
|
|
636
|
+
const validCheckpoints = [];
|
|
637
|
+
for (const published of publishedCheckpoints){
|
|
604
638
|
const validationResult = this.config.skipValidateBlockAttestations ? {
|
|
605
639
|
valid: true
|
|
606
|
-
} : await
|
|
607
|
-
// Only update the validation result if it has changed, so we can keep track of the first invalid
|
|
608
|
-
// in case there is a sequence of more than one invalid
|
|
609
|
-
// There is an exception though: if
|
|
640
|
+
} : await validateCheckpointAttestations(published, this.epochCache, this.l1constants, this.log);
|
|
641
|
+
// Only update the validation result if it has changed, so we can keep track of the first invalid checkpoint
|
|
642
|
+
// in case there is a sequence of more than one invalid checkpoint, as we need to invalidate the first one.
|
|
643
|
+
// There is an exception though: if a checkpoint is invalidated and replaced with another invalid checkpoint,
|
|
610
644
|
// we need to update the validation result, since we need to be able to invalidate the new one.
|
|
611
|
-
// See test 'chain progresses if an invalid
|
|
645
|
+
// See test 'chain progresses if an invalid checkpoint is invalidated with an invalid one' for more info.
|
|
612
646
|
if (rollupStatus.validationResult?.valid !== validationResult.valid || !rollupStatus.validationResult.valid && !validationResult.valid && rollupStatus.validationResult.block.blockNumber === validationResult.block.blockNumber) {
|
|
613
647
|
rollupStatus.validationResult = validationResult;
|
|
614
648
|
}
|
|
615
649
|
if (!validationResult.valid) {
|
|
616
|
-
this.log.warn(`Skipping
|
|
617
|
-
|
|
618
|
-
l1BlockNumber:
|
|
650
|
+
this.log.warn(`Skipping checkpoint ${published.checkpoint.number} due to invalid attestations`, {
|
|
651
|
+
checkpointHash: published.checkpoint.hash(),
|
|
652
|
+
l1BlockNumber: published.l1.blockNumber,
|
|
619
653
|
...pick(validationResult, 'reason')
|
|
620
654
|
});
|
|
621
655
|
// Emit event for invalid block detection
|
|
@@ -625,18 +659,18 @@ function mapArchiverConfig(config) {
|
|
|
625
659
|
});
|
|
626
660
|
continue;
|
|
627
661
|
}
|
|
628
|
-
|
|
629
|
-
this.log.debug(`Ingesting new
|
|
630
|
-
|
|
631
|
-
l1BlockNumber:
|
|
632
|
-
...
|
|
633
|
-
|
|
662
|
+
validCheckpoints.push(published);
|
|
663
|
+
this.log.debug(`Ingesting new checkpoint ${published.checkpoint.number} with ${published.checkpoint.blocks.length} blocks`, {
|
|
664
|
+
checkpointHash: published.checkpoint.hash(),
|
|
665
|
+
l1BlockNumber: published.l1.blockNumber,
|
|
666
|
+
...published.checkpoint.header.toInspect(),
|
|
667
|
+
blocks: published.checkpoint.blocks.map((b)=>b.getStats())
|
|
634
668
|
});
|
|
635
669
|
}
|
|
636
670
|
try {
|
|
637
671
|
const updatedValidationResult = rollupStatus.validationResult === initialValidationResult ? undefined : rollupStatus.validationResult;
|
|
638
|
-
const [processDuration] = await elapsed(()=>this.
|
|
639
|
-
this.instrumentation.processNewBlocks(processDuration /
|
|
672
|
+
const [processDuration] = await elapsed(()=>this.addCheckpoints(validCheckpoints, updatedValidationResult));
|
|
673
|
+
this.instrumentation.processNewBlocks(processDuration / validCheckpoints.length, validCheckpoints.flatMap((c)=>c.checkpoint.blocks));
|
|
640
674
|
} catch (err) {
|
|
641
675
|
if (err instanceof InitialBlockNumberNotSequentialError) {
|
|
642
676
|
const { previousBlockNumber, newBlockNumber } = err;
|
|
@@ -652,58 +686,58 @@ function mapArchiverConfig(config) {
|
|
|
652
686
|
}
|
|
653
687
|
throw err;
|
|
654
688
|
}
|
|
655
|
-
for (const
|
|
656
|
-
this.log.info(`Downloaded
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
689
|
+
for (const checkpoint of validCheckpoints){
|
|
690
|
+
this.log.info(`Downloaded checkpoint ${checkpoint.checkpoint.number}`, {
|
|
691
|
+
checkpointHash: checkpoint.checkpoint.hash(),
|
|
692
|
+
checkpointNumber: checkpoint.checkpoint.number,
|
|
693
|
+
blockCount: checkpoint.checkpoint.blocks.length,
|
|
694
|
+
txCount: checkpoint.checkpoint.blocks.reduce((acc, b)=>acc + b.body.txEffects.length, 0),
|
|
695
|
+
header: checkpoint.checkpoint.header.toInspect(),
|
|
696
|
+
archiveRoot: checkpoint.checkpoint.archive.root.toString(),
|
|
697
|
+
archiveNextLeafIndex: checkpoint.checkpoint.archive.nextAvailableLeafIndex
|
|
663
698
|
});
|
|
664
699
|
}
|
|
665
|
-
|
|
700
|
+
lastRetrievedCheckpoint = validCheckpoints.at(-1) ?? lastRetrievedCheckpoint;
|
|
701
|
+
lastL1BlockWithCheckpoint = publishedCheckpoints.at(-1)?.l1.blockNumber ?? lastL1BlockWithCheckpoint;
|
|
666
702
|
}while (searchEndBlock < currentL1BlockNumber)
|
|
667
703
|
// Important that we update AFTER inserting the blocks.
|
|
668
|
-
await
|
|
704
|
+
await updateProvenCheckpoint();
|
|
669
705
|
return {
|
|
670
706
|
...rollupStatus,
|
|
671
|
-
|
|
707
|
+
lastRetrievedCheckpoint,
|
|
708
|
+
lastL1BlockWithCheckpoint
|
|
672
709
|
};
|
|
673
710
|
}
|
|
674
|
-
async
|
|
675
|
-
const {
|
|
676
|
-
// Compare the last
|
|
711
|
+
async checkForNewCheckpointsBeforeL1SyncPoint(status, blocksSynchedTo, currentL1BlockNumber) {
|
|
712
|
+
const { lastRetrievedCheckpoint, pendingCheckpointNumber } = status;
|
|
713
|
+
// Compare the last checkpoint we have (either retrieved in this round or loaded from store) with what the
|
|
677
714
|
// rollup contract told us was the latest one (pinned at the currentL1BlockNumber).
|
|
678
|
-
const
|
|
679
|
-
if (
|
|
715
|
+
const latestLocalCheckpointNumber = lastRetrievedCheckpoint?.checkpoint.number ?? await this.getSynchedCheckpointNumber();
|
|
716
|
+
if (latestLocalCheckpointNumber < pendingCheckpointNumber) {
|
|
680
717
|
// Here we have consumed all logs until the `currentL1Block` we pinned at the beginning of the archiver loop,
|
|
681
|
-
// but still
|
|
682
|
-
// We suspect an L1 reorg that added
|
|
683
|
-
// last
|
|
684
|
-
// don't have one, we go back 2 L1 epochs, which is the deepest possible reorg (assuming Casper is working).
|
|
685
|
-
const
|
|
686
|
-
const targetL1BlockNumber =
|
|
687
|
-
const
|
|
688
|
-
this.log.warn(`Failed to reach
|
|
689
|
-
|
|
690
|
-
|
|
718
|
+
// but still haven't reached the pending checkpoint according to the call to the rollup contract.
|
|
719
|
+
// We suspect an L1 reorg that added checkpoints *behind* us. If that is the case, it must have happened between
|
|
720
|
+
// the last checkpoint we saw and the current one, so we reset the last synched L1 block number. In the edge case
|
|
721
|
+
// we don't have one, we go back 2 L1 epochs, which is the deepest possible reorg (assuming Casper is working).
|
|
722
|
+
const latestLocalCheckpoint = lastRetrievedCheckpoint ?? (latestLocalCheckpointNumber > 0 ? await this.getPublishedCheckpoints(latestLocalCheckpointNumber, 1).then(([c])=>c) : undefined);
|
|
723
|
+
const targetL1BlockNumber = latestLocalCheckpoint?.l1.blockNumber ?? maxBigint(currentL1BlockNumber - 64n, 0n);
|
|
724
|
+
const latestLocalCheckpointArchive = latestLocalCheckpoint?.checkpoint.archive.root.toString();
|
|
725
|
+
this.log.warn(`Failed to reach checkpoint ${pendingCheckpointNumber} at ${currentL1BlockNumber} (latest is ${latestLocalCheckpointNumber}). ` + `Rolling back last synched L1 block number to ${targetL1BlockNumber}.`, {
|
|
726
|
+
latestLocalCheckpointNumber,
|
|
727
|
+
latestLocalCheckpointArchive,
|
|
691
728
|
blocksSynchedTo,
|
|
692
729
|
currentL1BlockNumber,
|
|
693
730
|
...status
|
|
694
731
|
});
|
|
695
732
|
await this.store.setBlockSynchedL1BlockNumber(targetL1BlockNumber);
|
|
696
733
|
} else {
|
|
697
|
-
this.log.trace(`No new
|
|
698
|
-
|
|
699
|
-
|
|
734
|
+
this.log.trace(`No new checkpoints behind L1 sync point to retrieve.`, {
|
|
735
|
+
latestLocalCheckpointNumber,
|
|
736
|
+
pendingCheckpointNumber
|
|
700
737
|
});
|
|
701
738
|
}
|
|
702
739
|
}
|
|
703
740
|
/** Resumes the archiver after a stop. */ resume() {
|
|
704
|
-
if (!this.runningPromise) {
|
|
705
|
-
throw new Error(`Archiver was never started`);
|
|
706
|
-
}
|
|
707
741
|
if (this.runningPromise.isRunning()) {
|
|
708
742
|
this.log.warn(`Archiver already running`);
|
|
709
743
|
}
|
|
@@ -715,7 +749,7 @@ function mapArchiverConfig(config) {
|
|
|
715
749
|
* @returns A promise signalling completion of the stop process.
|
|
716
750
|
*/ async stop() {
|
|
717
751
|
this.log.debug('Stopping...');
|
|
718
|
-
await this.runningPromise
|
|
752
|
+
await this.runningPromise.stop();
|
|
719
753
|
this.log.info('Stopped.');
|
|
720
754
|
return Promise.resolve();
|
|
721
755
|
}
|
|
@@ -754,7 +788,7 @@ function mapArchiverConfig(config) {
|
|
|
754
788
|
// Walk the list of blocks backwards and filter by slots matching the requested epoch.
|
|
755
789
|
// We'll typically ask for blocks for a very recent epoch, so we shouldn't need an index here.
|
|
756
790
|
let block = await this.getBlock(await this.store.getSynchedL2BlockNumber());
|
|
757
|
-
const slot = (b)=>b.header.globalVariables.slotNumber
|
|
791
|
+
const slot = (b)=>b.header.globalVariables.slotNumber;
|
|
758
792
|
while(block && slot(block) >= start){
|
|
759
793
|
if (slot(block) <= end) {
|
|
760
794
|
blocks.push(block);
|
|
@@ -770,7 +804,7 @@ function mapArchiverConfig(config) {
|
|
|
770
804
|
// We'll typically ask for blocks for a very recent epoch, so we shouldn't need an index here.
|
|
771
805
|
let number = await this.store.getSynchedL2BlockNumber();
|
|
772
806
|
let header = await this.getBlockHeader(number);
|
|
773
|
-
const slot = (b)=>b.globalVariables.slotNumber
|
|
807
|
+
const slot = (b)=>b.globalVariables.slotNumber;
|
|
774
808
|
while(header && slot(header) >= start){
|
|
775
809
|
if (slot(header) <= end) {
|
|
776
810
|
blocks.push(header);
|
|
@@ -782,7 +816,7 @@ function mapArchiverConfig(config) {
|
|
|
782
816
|
async isEpochComplete(epochNumber) {
|
|
783
817
|
// The epoch is complete if the current L2 block is the last one in the epoch (or later)
|
|
784
818
|
const header = await this.getBlockHeader('latest');
|
|
785
|
-
const slot = header
|
|
819
|
+
const slot = header ? header.globalVariables.slotNumber : undefined;
|
|
786
820
|
const [_startSlot, endSlot] = getSlotRangeForEpoch(epochNumber, this.l1constants);
|
|
787
821
|
if (slot && slot >= endSlot) {
|
|
788
822
|
return true;
|
|
@@ -806,6 +840,60 @@ function mapArchiverConfig(config) {
|
|
|
806
840
|
/** Returns whether the archiver has completed an initial sync run successfully. */ isInitialSyncComplete() {
|
|
807
841
|
return this.initialSyncComplete;
|
|
808
842
|
}
|
|
843
|
+
async getPublishedCheckpoints(from, limit, proven) {
|
|
844
|
+
const blocks = await this.getPublishedBlocks(from, limit, proven);
|
|
845
|
+
return blocks.map((b)=>b.toPublishedCheckpoint());
|
|
846
|
+
}
|
|
847
|
+
async getCheckpoints(from, limit, proven) {
|
|
848
|
+
const published = await this.getPublishedCheckpoints(from, limit, proven);
|
|
849
|
+
return published.map((p)=>p.checkpoint);
|
|
850
|
+
}
|
|
851
|
+
async getCheckpoint(number) {
|
|
852
|
+
if (number < 0) {
|
|
853
|
+
number = await this.getSynchedCheckpointNumber();
|
|
854
|
+
}
|
|
855
|
+
if (number === 0) {
|
|
856
|
+
return undefined;
|
|
857
|
+
}
|
|
858
|
+
const published = await this.getPublishedCheckpoints(number, 1);
|
|
859
|
+
return published[0]?.checkpoint;
|
|
860
|
+
}
|
|
861
|
+
async getCheckpointHeader(number) {
|
|
862
|
+
if (number === 'latest') {
|
|
863
|
+
number = await this.getSynchedCheckpointNumber();
|
|
864
|
+
}
|
|
865
|
+
if (number === 0) {
|
|
866
|
+
return undefined;
|
|
867
|
+
}
|
|
868
|
+
const checkpoint = await this.getCheckpoint(number);
|
|
869
|
+
return checkpoint?.header;
|
|
870
|
+
}
|
|
871
|
+
getCheckpointNumber() {
|
|
872
|
+
return this.getSynchedCheckpointNumber();
|
|
873
|
+
}
|
|
874
|
+
getSynchedCheckpointNumber() {
|
|
875
|
+
// TODO: Checkpoint number will no longer be the same as the block number once we support multiple blocks per checkpoint.
|
|
876
|
+
return this.store.getSynchedL2BlockNumber();
|
|
877
|
+
}
|
|
878
|
+
getProvenCheckpointNumber() {
|
|
879
|
+
// TODO: Proven checkpoint number will no longer be the same as the proven block number once we support multiple blocks per checkpoint.
|
|
880
|
+
return this.store.getProvenL2BlockNumber();
|
|
881
|
+
}
|
|
882
|
+
setProvenCheckpointNumber(checkpointNumber) {
|
|
883
|
+
// TODO: Proven checkpoint number will no longer be the same as the proven block number once we support multiple blocks per checkpoint.
|
|
884
|
+
return this.store.setProvenL2BlockNumber(checkpointNumber);
|
|
885
|
+
}
|
|
886
|
+
unwindCheckpoints(from, checkpointsToUnwind) {
|
|
887
|
+
// TODO: This only works if we have one block per checkpoint.
|
|
888
|
+
return this.store.unwindBlocks(from, checkpointsToUnwind);
|
|
889
|
+
}
|
|
890
|
+
getLastBlockNumberInCheckpoint(checkpointNumber) {
|
|
891
|
+
// TODO: Checkpoint number will no longer be the same as the block number once we support multiple blocks per checkpoint.
|
|
892
|
+
return Promise.resolve(checkpointNumber);
|
|
893
|
+
}
|
|
894
|
+
addCheckpoints(checkpoints, pendingChainValidationStatus) {
|
|
895
|
+
return this.store.addBlocks(checkpoints.map((p)=>PublishedL2Block.fromPublishedCheckpoint(p)), pendingChainValidationStatus);
|
|
896
|
+
}
|
|
809
897
|
/**
|
|
810
898
|
* Gets up to `limit` amount of L2 blocks starting from `from`.
|
|
811
899
|
* @param from - Number of the first block to return (inclusive).
|
|
@@ -1030,9 +1118,7 @@ function mapArchiverConfig(config) {
|
|
|
1030
1118
|
}
|
|
1031
1119
|
}
|
|
1032
1120
|
_ts_decorate([
|
|
1033
|
-
trackSpan('Archiver.sync'
|
|
1034
|
-
[Attributes.INITIAL_SYNC]: initialRun
|
|
1035
|
-
}))
|
|
1121
|
+
trackSpan('Archiver.sync')
|
|
1036
1122
|
], Archiver.prototype, "sync", null);
|
|
1037
1123
|
var Operation = /*#__PURE__*/ function(Operation) {
|
|
1038
1124
|
Operation[Operation["Store"] = 0] = "Store";
|