@aztec/node-lib 0.0.1-fake-c83136db25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -0
- package/dest/actions/build-snapshot-metadata.d.ts +5 -0
- package/dest/actions/build-snapshot-metadata.d.ts.map +1 -0
- package/dest/actions/build-snapshot-metadata.js +19 -0
- package/dest/actions/create-backups.d.ts +6 -0
- package/dest/actions/create-backups.d.ts.map +1 -0
- package/dest/actions/create-backups.js +32 -0
- package/dest/actions/index.d.ts +5 -0
- package/dest/actions/index.d.ts.map +1 -0
- package/dest/actions/index.js +4 -0
- package/dest/actions/snapshot-sync.d.ts +27 -0
- package/dest/actions/snapshot-sync.d.ts.map +1 -0
- package/dest/actions/snapshot-sync.js +236 -0
- package/dest/actions/upload-snapshot.d.ts +12 -0
- package/dest/actions/upload-snapshot.d.ts.map +1 -0
- package/dest/actions/upload-snapshot.js +38 -0
- package/dest/config/index.d.ts +19 -0
- package/dest/config/index.d.ts.map +1 -0
- package/dest/config/index.js +41 -0
- package/dest/factories/index.d.ts +2 -0
- package/dest/factories/index.d.ts.map +1 -0
- package/dest/factories/index.js +1 -0
- package/dest/factories/l1_tx_utils.d.ts +53 -0
- package/dest/factories/l1_tx_utils.d.ts.map +1 -0
- package/dest/factories/l1_tx_utils.js +50 -0
- package/dest/metrics/index.d.ts +2 -0
- package/dest/metrics/index.d.ts.map +1 -0
- package/dest/metrics/index.js +1 -0
- package/dest/metrics/l1_tx_metrics.d.ts +29 -0
- package/dest/metrics/l1_tx_metrics.d.ts.map +1 -0
- package/dest/metrics/l1_tx_metrics.js +138 -0
- package/dest/stores/index.d.ts +2 -0
- package/dest/stores/index.d.ts.map +1 -0
- package/dest/stores/index.js +1 -0
- package/dest/stores/l1_tx_store.d.ts +89 -0
- package/dest/stores/l1_tx_store.d.ts.map +1 -0
- package/dest/stores/l1_tx_store.js +264 -0
- package/package.json +99 -0
- package/src/actions/build-snapshot-metadata.ts +29 -0
- package/src/actions/create-backups.ts +41 -0
- package/src/actions/index.ts +4 -0
- package/src/actions/snapshot-sync.ts +277 -0
- package/src/actions/upload-snapshot.ts +50 -0
- package/src/config/index.ts +64 -0
- package/src/factories/index.ts +1 -0
- package/src/factories/l1_tx_utils.ts +122 -0
- package/src/metrics/index.ts +1 -0
- package/src/metrics/l1_tx_metrics.ts +169 -0
- package/src/stores/index.ts +1 -0
- package/src/stores/l1_tx_store.ts +387 -0
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
import type { Archiver } from '@aztec/archiver';
|
|
2
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
3
|
+
import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server';
|
|
4
|
+
import type { SnapshotDataUrls } from '@aztec/stdlib/snapshots';
|
|
5
|
+
|
|
6
|
+
import { existsSync } from 'fs';
|
|
7
|
+
import { join } from 'path/posix';
|
|
8
|
+
|
|
9
|
+
export async function createBackups(
|
|
10
|
+
backupDir: string,
|
|
11
|
+
archiver: Archiver,
|
|
12
|
+
worldState: WorldStateSynchronizer,
|
|
13
|
+
log: Logger,
|
|
14
|
+
) {
|
|
15
|
+
try {
|
|
16
|
+
log.info(`Pausing archiver and world state sync to start snapshot upload`);
|
|
17
|
+
await archiver.stop();
|
|
18
|
+
await worldState.stopSync();
|
|
19
|
+
|
|
20
|
+
log.info(`Creating backups of lmdb environments to ${backupDir}`);
|
|
21
|
+
const [archiverPath, worldStatePaths] = await Promise.all([
|
|
22
|
+
archiver.backupTo(join(backupDir, 'archiver')),
|
|
23
|
+
worldState.backupTo(join(backupDir, 'world-state')),
|
|
24
|
+
]);
|
|
25
|
+
const paths: SnapshotDataUrls = { ...worldStatePaths, archiver: archiverPath };
|
|
26
|
+
|
|
27
|
+
const missing = Object.entries(paths).filter(([_key, path]) => !path || !existsSync(path));
|
|
28
|
+
if (missing.length > 0) {
|
|
29
|
+
throw new Error(`Missing backup files: ${missing.map(([key, path]) => `${path} (${key})`).join(', ')}`);
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
log.info(`Data stores backed up to ${backupDir}`, { paths });
|
|
33
|
+
return paths;
|
|
34
|
+
} catch (err) {
|
|
35
|
+
throw new Error(`Error creating backups for snapshot upload: ${err}`);
|
|
36
|
+
} finally {
|
|
37
|
+
log.info(`Resuming archiver and world state sync`);
|
|
38
|
+
worldState.resumeSync();
|
|
39
|
+
archiver.resume();
|
|
40
|
+
}
|
|
41
|
+
}
|
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import { ARCHIVER_DB_VERSION, ARCHIVER_STORE_NAME, type ArchiverConfig, createArchiverStore } from '@aztec/archiver';
|
|
2
|
+
import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants';
|
|
3
|
+
import { type EthereumClientConfig, getPublicClient } from '@aztec/ethereum';
|
|
4
|
+
import type { EthAddress } from '@aztec/foundation/eth-address';
|
|
5
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
6
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
7
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
8
|
+
import { P2P_STORE_NAME } from '@aztec/p2p';
|
|
9
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
10
|
+
import { DatabaseVersionManager } from '@aztec/stdlib/database-version';
|
|
11
|
+
import { type ReadOnlyFileStore, createReadOnlyFileStore } from '@aztec/stdlib/file-store';
|
|
12
|
+
import {
|
|
13
|
+
type SnapshotMetadata,
|
|
14
|
+
type SnapshotsIndexMetadata,
|
|
15
|
+
downloadSnapshot,
|
|
16
|
+
getLatestSnapshotMetadata,
|
|
17
|
+
makeSnapshotPaths,
|
|
18
|
+
} from '@aztec/stdlib/snapshots';
|
|
19
|
+
import { NATIVE_WORLD_STATE_DBS, WORLD_STATE_DB_VERSION, WORLD_STATE_DIR } from '@aztec/world-state';
|
|
20
|
+
|
|
21
|
+
import { mkdir, mkdtemp, rename } from 'fs/promises';
|
|
22
|
+
import { join } from 'path';
|
|
23
|
+
|
|
24
|
+
import type { SharedNodeConfig } from '../config/index.js';
|
|
25
|
+
|
|
26
|
+
// Half day worth of L1 blocks
|
|
27
|
+
const MIN_L1_BLOCKS_TO_TRIGGER_REPLACE = 86400 / 2 / 12;
|
|
28
|
+
|
|
29
|
+
type SnapshotSyncConfig = Pick<SharedNodeConfig, 'syncMode'> &
|
|
30
|
+
Pick<ChainConfig, 'l1ChainId' | 'rollupVersion'> &
|
|
31
|
+
Pick<ArchiverConfig, 'archiverStoreMapSizeKb' | 'maxLogs'> &
|
|
32
|
+
Required<DataStoreConfig> &
|
|
33
|
+
EthereumClientConfig & {
|
|
34
|
+
snapshotsUrls?: string[];
|
|
35
|
+
minL1BlocksToTriggerReplace?: number;
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Connects to a remote snapshot index and downloads the latest snapshot if the local archiver is behind.
|
|
40
|
+
* Behaviour depends on syncing mode.
|
|
41
|
+
*/
|
|
42
|
+
export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) {
|
|
43
|
+
const { syncMode, snapshotsUrls, dataDirectory, l1ChainId, rollupVersion, l1Contracts } = config;
|
|
44
|
+
if (syncMode === 'full') {
|
|
45
|
+
log.debug('Snapshot sync is disabled. Running full sync.', { syncMode: syncMode });
|
|
46
|
+
return false;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
if (!snapshotsUrls || snapshotsUrls.length === 0) {
|
|
50
|
+
log.verbose('Snapshot sync is disabled. No snapshots URLs provided.');
|
|
51
|
+
return false;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
if (!dataDirectory) {
|
|
55
|
+
log.verbose('Snapshot sync is disabled. No local data directory defined.');
|
|
56
|
+
return false;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Create an archiver store to check the current state (do this only once)
|
|
60
|
+
log.verbose(`Creating temporary archiver data store`);
|
|
61
|
+
const archiverStore = await createArchiverStore(config);
|
|
62
|
+
let archiverL1BlockNumber: bigint | undefined;
|
|
63
|
+
let archiverL2BlockNumber: number | undefined;
|
|
64
|
+
try {
|
|
65
|
+
[archiverL1BlockNumber, archiverL2BlockNumber] = await Promise.all([
|
|
66
|
+
archiverStore.getSynchPoint().then(s => s.blocksSynchedTo),
|
|
67
|
+
archiverStore.getSynchedL2BlockNumber(),
|
|
68
|
+
] as const);
|
|
69
|
+
} finally {
|
|
70
|
+
log.verbose(`Closing temporary archiver data store`, { archiverL1BlockNumber, archiverL2BlockNumber });
|
|
71
|
+
await archiverStore.close();
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
const minL1BlocksToTriggerReplace = config.minL1BlocksToTriggerReplace ?? MIN_L1_BLOCKS_TO_TRIGGER_REPLACE;
|
|
75
|
+
if (syncMode === 'snapshot' && archiverL2BlockNumber !== undefined && archiverL2BlockNumber >= INITIAL_L2_BLOCK_NUM) {
|
|
76
|
+
log.verbose(
|
|
77
|
+
`Skipping non-forced snapshot sync as archiver is already synced to L2 block ${archiverL2BlockNumber}.`,
|
|
78
|
+
);
|
|
79
|
+
return false;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const currentL1BlockNumber = await getPublicClient(config).getBlockNumber();
|
|
83
|
+
if (archiverL1BlockNumber && currentL1BlockNumber - archiverL1BlockNumber < minL1BlocksToTriggerReplace) {
|
|
84
|
+
log.verbose(
|
|
85
|
+
`Skipping snapshot sync as archiver is less than ${
|
|
86
|
+
currentL1BlockNumber - archiverL1BlockNumber
|
|
87
|
+
} L1 blocks behind.`,
|
|
88
|
+
{ archiverL1BlockNumber, currentL1BlockNumber, minL1BlocksToTriggerReplace },
|
|
89
|
+
);
|
|
90
|
+
return false;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const indexMetadata: SnapshotsIndexMetadata = {
|
|
94
|
+
l1ChainId,
|
|
95
|
+
rollupVersion,
|
|
96
|
+
rollupAddress: l1Contracts.rollupAddress,
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
// Fetch latest snapshot from each URL
|
|
100
|
+
type SnapshotCandidate = { snapshot: SnapshotMetadata; url: string; fileStore: ReadOnlyFileStore };
|
|
101
|
+
const snapshotCandidates: SnapshotCandidate[] = [];
|
|
102
|
+
|
|
103
|
+
for (const snapshotsUrl of snapshotsUrls) {
|
|
104
|
+
let fileStore: ReadOnlyFileStore;
|
|
105
|
+
try {
|
|
106
|
+
fileStore = await createReadOnlyFileStore(snapshotsUrl, log);
|
|
107
|
+
} catch (err) {
|
|
108
|
+
log.error(`Invalid config for downloading snapshots from ${snapshotsUrl}`, err);
|
|
109
|
+
continue;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
let snapshot: SnapshotMetadata | undefined;
|
|
113
|
+
try {
|
|
114
|
+
snapshot = await getLatestSnapshotMetadata(indexMetadata, fileStore);
|
|
115
|
+
} catch (err) {
|
|
116
|
+
log.error(`Failed to get latest snapshot metadata from ${snapshotsUrl}. Skipping this URL.`, err, {
|
|
117
|
+
...indexMetadata,
|
|
118
|
+
snapshotsUrl,
|
|
119
|
+
});
|
|
120
|
+
continue;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
if (!snapshot) {
|
|
124
|
+
log.verbose(`No snapshot found at ${snapshotsUrl}. Skipping this URL.`, { ...indexMetadata, snapshotsUrl });
|
|
125
|
+
continue;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
if (snapshot.schemaVersions.archiver !== ARCHIVER_DB_VERSION) {
|
|
129
|
+
log.warn(
|
|
130
|
+
`Skipping snapshot from ${snapshotsUrl} as it has schema version ${snapshot.schemaVersions.archiver} but expected ${ARCHIVER_DB_VERSION}.`,
|
|
131
|
+
snapshot,
|
|
132
|
+
);
|
|
133
|
+
continue;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
if (snapshot.schemaVersions.worldState !== WORLD_STATE_DB_VERSION) {
|
|
137
|
+
log.warn(
|
|
138
|
+
`Skipping snapshot from ${snapshotsUrl} as it has world state schema version ${snapshot.schemaVersions.worldState} but we expected ${WORLD_STATE_DB_VERSION}.`,
|
|
139
|
+
snapshot,
|
|
140
|
+
);
|
|
141
|
+
continue;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber < archiverL1BlockNumber) {
|
|
145
|
+
log.verbose(
|
|
146
|
+
`Skipping snapshot from ${snapshotsUrl} since local archiver is at L1 block ${archiverL1BlockNumber} which is further than snapshot at ${snapshot.l1BlockNumber}`,
|
|
147
|
+
{ snapshot, archiverL1BlockNumber, snapshotsUrl },
|
|
148
|
+
);
|
|
149
|
+
continue;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber - Number(archiverL1BlockNumber) < minL1BlocksToTriggerReplace) {
|
|
153
|
+
log.verbose(
|
|
154
|
+
`Skipping snapshot from ${snapshotsUrl} as archiver is less than ${
|
|
155
|
+
snapshot.l1BlockNumber - Number(archiverL1BlockNumber)
|
|
156
|
+
} L1 blocks behind this snapshot.`,
|
|
157
|
+
{ snapshot, archiverL1BlockNumber, snapshotsUrl },
|
|
158
|
+
);
|
|
159
|
+
continue;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
snapshotCandidates.push({ snapshot, url: snapshotsUrl, fileStore });
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
if (snapshotCandidates.length === 0) {
|
|
166
|
+
log.verbose(`No valid snapshots found from any URL. Skipping snapshot sync.`, { ...indexMetadata, snapshotsUrls });
|
|
167
|
+
return false;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
// Sort candidates by L1 block number (highest first)
|
|
171
|
+
snapshotCandidates.sort((a, b) => b.snapshot.l1BlockNumber - a.snapshot.l1BlockNumber);
|
|
172
|
+
|
|
173
|
+
// Try each candidate in order until one succeeds
|
|
174
|
+
for (const { snapshot, url } of snapshotCandidates) {
|
|
175
|
+
const { l1BlockNumber, l2BlockNumber } = snapshot;
|
|
176
|
+
log.info(`Attempting to sync from snapshot at L1 block ${l1BlockNumber} L2 block ${l2BlockNumber}`, {
|
|
177
|
+
snapshot,
|
|
178
|
+
snapshotsUrl: url,
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
try {
|
|
182
|
+
await snapshotSync(snapshot, log, {
|
|
183
|
+
dataDirectory: config.dataDirectory!,
|
|
184
|
+
rollupAddress: config.l1Contracts.rollupAddress,
|
|
185
|
+
snapshotsUrl: url,
|
|
186
|
+
});
|
|
187
|
+
log.info(`Snapshot synced to L1 block ${l1BlockNumber} L2 block ${l2BlockNumber}`, {
|
|
188
|
+
snapshot,
|
|
189
|
+
snapshotsUrl: url,
|
|
190
|
+
});
|
|
191
|
+
return true;
|
|
192
|
+
} catch (err) {
|
|
193
|
+
log.error(`Failed to download snapshot from ${url}. Trying next candidate.`, err, {
|
|
194
|
+
snapshot,
|
|
195
|
+
snapshotsUrl: url,
|
|
196
|
+
});
|
|
197
|
+
continue;
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
log.error(`Failed to download snapshot from all URLs.`, { snapshotsUrls });
|
|
202
|
+
return false;
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* Downloads the given snapshot replacing any local data stores.
|
|
207
|
+
*/
|
|
208
|
+
export async function snapshotSync(
|
|
209
|
+
snapshot: Pick<SnapshotMetadata, 'dataUrls'>,
|
|
210
|
+
log: Logger,
|
|
211
|
+
config: { dataDirectory: string; rollupAddress: EthAddress; snapshotsUrl: string },
|
|
212
|
+
) {
|
|
213
|
+
const { dataDirectory, rollupAddress } = config;
|
|
214
|
+
if (!dataDirectory) {
|
|
215
|
+
throw new Error(`No local data directory defined. Cannot sync snapshot.`);
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
const fileStore = await createReadOnlyFileStore(config.snapshotsUrl, log);
|
|
219
|
+
|
|
220
|
+
let downloadDir: string | undefined;
|
|
221
|
+
|
|
222
|
+
try {
|
|
223
|
+
// Download the snapshot to a temp location.
|
|
224
|
+
await mkdir(dataDirectory, { recursive: true });
|
|
225
|
+
downloadDir = await mkdtemp(join(dataDirectory, 'download-'));
|
|
226
|
+
const downloadPaths = makeSnapshotPaths(downloadDir);
|
|
227
|
+
log.info(`Downloading snapshot to ${downloadDir}`, { snapshot, downloadPaths });
|
|
228
|
+
await downloadSnapshot(snapshot, downloadPaths, fileStore);
|
|
229
|
+
log.info(`Snapshot downloaded at ${downloadDir}`, { snapshot, downloadPaths });
|
|
230
|
+
|
|
231
|
+
// If download was successful, clear lock and version, and move download there
|
|
232
|
+
const archiverPath = join(dataDirectory, ARCHIVER_STORE_NAME);
|
|
233
|
+
await prepareTarget(archiverPath, ARCHIVER_DB_VERSION, rollupAddress);
|
|
234
|
+
await rename(downloadPaths.archiver, join(archiverPath, 'data.mdb'));
|
|
235
|
+
log.info(`Archiver database set up from snapshot`, {
|
|
236
|
+
path: archiverPath,
|
|
237
|
+
dbVersion: ARCHIVER_DB_VERSION,
|
|
238
|
+
rollupAddress,
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
// Same for the world state dbs, only that we do not close them, since we assume they are not yet in use
|
|
242
|
+
const worldStateBasePath = join(dataDirectory, WORLD_STATE_DIR);
|
|
243
|
+
await prepareTarget(worldStateBasePath, WORLD_STATE_DB_VERSION, rollupAddress);
|
|
244
|
+
for (const [name, dir] of NATIVE_WORLD_STATE_DBS) {
|
|
245
|
+
const path = join(worldStateBasePath, dir);
|
|
246
|
+
await mkdir(path, { recursive: true });
|
|
247
|
+
await rename(downloadPaths[name], join(path, 'data.mdb'));
|
|
248
|
+
log.info(`World state database ${name} set up from snapshot`, {
|
|
249
|
+
path,
|
|
250
|
+
dbVersion: WORLD_STATE_DB_VERSION,
|
|
251
|
+
rollupAddress,
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
// And clear the p2p db altogether
|
|
256
|
+
const p2pPath = join(dataDirectory, P2P_STORE_NAME);
|
|
257
|
+
await tryRmDir(p2pPath, log);
|
|
258
|
+
log.info(`P2P database cleared`, { path: p2pPath });
|
|
259
|
+
} finally {
|
|
260
|
+
if (downloadDir) {
|
|
261
|
+
await tryRmDir(downloadDir, log);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
/** Deletes target dir and writes the new version file. */
|
|
267
|
+
async function prepareTarget(target: string, schemaVersion: number, rollupAddress: EthAddress) {
|
|
268
|
+
const noOpen = () => Promise.resolve(undefined);
|
|
269
|
+
const versionManager = new DatabaseVersionManager<undefined>({
|
|
270
|
+
schemaVersion,
|
|
271
|
+
rollupAddress,
|
|
272
|
+
dataDirectory: target,
|
|
273
|
+
onOpen: noOpen,
|
|
274
|
+
});
|
|
275
|
+
await versionManager.resetDataDirectory();
|
|
276
|
+
await versionManager.writeVersion();
|
|
277
|
+
}
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import { ARCHIVER_DB_VERSION, type Archiver } from '@aztec/archiver';
|
|
2
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
3
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
4
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
5
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
6
|
+
import { createFileStore } from '@aztec/stdlib/file-store';
|
|
7
|
+
import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server';
|
|
8
|
+
import { uploadSnapshotToIndex } from '@aztec/stdlib/snapshots';
|
|
9
|
+
import { WORLD_STATE_DB_VERSION } from '@aztec/world-state';
|
|
10
|
+
|
|
11
|
+
import { mkdtemp } from 'fs/promises';
|
|
12
|
+
import { tmpdir } from 'os';
|
|
13
|
+
import { join } from 'path';
|
|
14
|
+
|
|
15
|
+
import { buildSnapshotMetadata } from './build-snapshot-metadata.js';
|
|
16
|
+
import { createBackups } from './create-backups.js';
|
|
17
|
+
|
|
18
|
+
export type UploadSnapshotConfig = Pick<ChainConfig, 'l1ChainId' | 'rollupVersion'> &
|
|
19
|
+
Pick<DataStoreConfig, 'dataDirectory'>;
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Pauses the archiver and world state sync, creates backups of the archiver and world state lmdb environments,
|
|
23
|
+
* and uploads them to the specified location. Location must be a URL supported by our file store (eg `gs://bucketname/path`).
|
|
24
|
+
*/
|
|
25
|
+
export async function uploadSnapshot(
|
|
26
|
+
location: string,
|
|
27
|
+
archiver: Archiver,
|
|
28
|
+
worldState: WorldStateSynchronizer,
|
|
29
|
+
config: UploadSnapshotConfig,
|
|
30
|
+
log: Logger,
|
|
31
|
+
) {
|
|
32
|
+
const store = await createFileStore(location);
|
|
33
|
+
if (!store) {
|
|
34
|
+
throw new Error(`Failed to create file store for snapshot upload for location ${location}.`);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const backupDir = await mkdtemp(join(config.dataDirectory ?? tmpdir(), 'snapshot-'));
|
|
38
|
+
|
|
39
|
+
try {
|
|
40
|
+
const paths = await createBackups(backupDir, archiver, worldState, log);
|
|
41
|
+
const versions = { archiver: ARCHIVER_DB_VERSION, worldState: WORLD_STATE_DB_VERSION };
|
|
42
|
+
const metadata = await buildSnapshotMetadata(archiver, config);
|
|
43
|
+
log.info(`Uploading snapshot to ${location}`, { snapshot: metadata });
|
|
44
|
+
const snapshot = await uploadSnapshotToIndex(paths, versions, metadata, store);
|
|
45
|
+
log.info(`Snapshot uploaded successfully`, { snapshot });
|
|
46
|
+
} finally {
|
|
47
|
+
log.info(`Cleaning up backup dir ${backupDir}`);
|
|
48
|
+
await tryRmDir(backupDir, log);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { type ConfigMappingsType, booleanConfigHelper } from '@aztec/foundation/config';
|
|
2
|
+
|
|
3
|
+
export type SharedNodeConfig = {
|
|
4
|
+
/** Whether to populate the genesis state with initial fee juice for the test accounts */
|
|
5
|
+
testAccounts: boolean;
|
|
6
|
+
/** Whether to populate the genesis state with initial fee juice for the sponsored FPC */
|
|
7
|
+
sponsoredFPC: boolean;
|
|
8
|
+
/** Sync mode: full to always sync via L1, snapshot to download a snapshot if there is no local data, force-snapshot to download even if there is local data. */
|
|
9
|
+
syncMode: 'full' | 'snapshot' | 'force-snapshot';
|
|
10
|
+
/** Base URLs for snapshots index. Index file will be searched at `SNAPSHOTS_BASE_URL/aztec-L1_CHAIN_ID-VERSION-ROLLUP_ADDRESS/index.json` */
|
|
11
|
+
snapshotsUrls?: string[];
|
|
12
|
+
|
|
13
|
+
/** Auto update mode: disabled - to completely ignore remote signals to update the node. enabled - to respect the signals (potentially shutting this node down). log - check for updates but log a warning instead of applying them*/
|
|
14
|
+
autoUpdate?: 'disabled' | 'notify' | 'config' | 'config-and-version';
|
|
15
|
+
/** The base URL against which to check for updates */
|
|
16
|
+
autoUpdateUrl?: string;
|
|
17
|
+
|
|
18
|
+
/** URL of the Web3Signer instance */
|
|
19
|
+
web3SignerUrl?: string;
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
export const sharedNodeConfigMappings: ConfigMappingsType<SharedNodeConfig> = {
|
|
23
|
+
testAccounts: {
|
|
24
|
+
env: 'TEST_ACCOUNTS',
|
|
25
|
+
description: 'Whether to populate the genesis state with initial fee juice for the test accounts.',
|
|
26
|
+
...booleanConfigHelper(),
|
|
27
|
+
},
|
|
28
|
+
sponsoredFPC: {
|
|
29
|
+
env: 'SPONSORED_FPC',
|
|
30
|
+
description: 'Whether to populate the genesis state with initial fee juice for the sponsored FPC.',
|
|
31
|
+
...booleanConfigHelper(false),
|
|
32
|
+
},
|
|
33
|
+
syncMode: {
|
|
34
|
+
env: 'SYNC_MODE',
|
|
35
|
+
description:
|
|
36
|
+
'Set sync mode to `full` to always sync via L1, `snapshot` to download a snapshot if there is no local data, `force-snapshot` to download even if there is local data.',
|
|
37
|
+
defaultValue: 'snapshot',
|
|
38
|
+
},
|
|
39
|
+
snapshotsUrls: {
|
|
40
|
+
env: 'SYNC_SNAPSHOTS_URLS',
|
|
41
|
+
description: 'Base URLs for snapshots index, comma-separated.',
|
|
42
|
+
parseEnv: (val: string) =>
|
|
43
|
+
val
|
|
44
|
+
.split(',')
|
|
45
|
+
.map(url => url.trim())
|
|
46
|
+
.filter(url => url.length > 0),
|
|
47
|
+
fallback: ['SYNC_SNAPSHOTS_URL'],
|
|
48
|
+
defaultValue: [],
|
|
49
|
+
},
|
|
50
|
+
autoUpdate: {
|
|
51
|
+
env: 'AUTO_UPDATE',
|
|
52
|
+
description: 'The auto update mode for this node',
|
|
53
|
+
defaultValue: 'disabled',
|
|
54
|
+
},
|
|
55
|
+
autoUpdateUrl: {
|
|
56
|
+
env: 'AUTO_UPDATE_URL',
|
|
57
|
+
description: 'Base URL to check for updates',
|
|
58
|
+
},
|
|
59
|
+
web3SignerUrl: {
|
|
60
|
+
env: 'WEB3_SIGNER_URL',
|
|
61
|
+
description: 'URL of the Web3Signer instance',
|
|
62
|
+
parseEnv: (val: string) => val.trim(),
|
|
63
|
+
},
|
|
64
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from './l1_tx_utils.js';
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import {
|
|
2
|
+
createL1TxUtilsFromEthSigner as createL1TxUtilsFromEthSignerBase,
|
|
3
|
+
createL1TxUtilsFromViemWallet as createL1TxUtilsFromViemWalletBase,
|
|
4
|
+
} from '@aztec/ethereum';
|
|
5
|
+
import type { EthSigner, ExtendedViemWalletClient, L1TxUtilsConfig, ViemClient } from '@aztec/ethereum';
|
|
6
|
+
import {
|
|
7
|
+
createL1TxUtilsWithBlobsFromEthSigner as createL1TxUtilsWithBlobsFromEthSignerBase,
|
|
8
|
+
createL1TxUtilsWithBlobsFromViemWallet as createL1TxUtilsWithBlobsFromViemWalletBase,
|
|
9
|
+
} from '@aztec/ethereum/l1-tx-utils-with-blobs';
|
|
10
|
+
import { omit } from '@aztec/foundation/collection';
|
|
11
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
12
|
+
import type { DateProvider } from '@aztec/foundation/timer';
|
|
13
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
14
|
+
import { createStore } from '@aztec/kv-store/lmdb-v2';
|
|
15
|
+
import type { TelemetryClient } from '@aztec/telemetry-client';
|
|
16
|
+
|
|
17
|
+
import type { L1TxScope } from '../metrics/l1_tx_metrics.js';
|
|
18
|
+
import { L1TxMetrics } from '../metrics/l1_tx_metrics.js';
|
|
19
|
+
import { L1TxStore } from '../stores/l1_tx_store.js';
|
|
20
|
+
|
|
21
|
+
const L1_TX_STORE_NAME = 'l1-tx-utils';
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Creates shared dependencies (logger, store, metrics) for L1TxUtils instances.
|
|
25
|
+
*/
|
|
26
|
+
async function createSharedDeps(
|
|
27
|
+
config: DataStoreConfig & { scope?: L1TxScope },
|
|
28
|
+
deps: {
|
|
29
|
+
telemetry: TelemetryClient;
|
|
30
|
+
logger?: ReturnType<typeof createLogger>;
|
|
31
|
+
dateProvider?: DateProvider;
|
|
32
|
+
},
|
|
33
|
+
) {
|
|
34
|
+
const logger = deps.logger ?? createLogger('l1-tx-utils');
|
|
35
|
+
|
|
36
|
+
// Note that we do NOT bind them to the rollup address, since we still need to
|
|
37
|
+
// monitor and cancel txs for previous rollups to free up our nonces.
|
|
38
|
+
const noRollupConfig = omit(config, 'l1Contracts');
|
|
39
|
+
const kvStore = await createStore(L1_TX_STORE_NAME, L1TxStore.SCHEMA_VERSION, noRollupConfig, logger);
|
|
40
|
+
const store = new L1TxStore(kvStore, logger);
|
|
41
|
+
|
|
42
|
+
const meter = deps.telemetry.getMeter('L1TxUtils');
|
|
43
|
+
const metrics = new L1TxMetrics(meter, config.scope ?? 'other', logger);
|
|
44
|
+
|
|
45
|
+
return { logger, store, metrics, dateProvider: deps.dateProvider };
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Creates L1TxUtils with blobs from multiple Viem wallets, sharing store and metrics.
|
|
50
|
+
*/
|
|
51
|
+
export async function createL1TxUtilsWithBlobsFromViemWallet(
|
|
52
|
+
clients: ExtendedViemWalletClient[],
|
|
53
|
+
config: DataStoreConfig & Partial<L1TxUtilsConfig> & { debugMaxGasLimit?: boolean; scope?: L1TxScope },
|
|
54
|
+
deps: {
|
|
55
|
+
telemetry: TelemetryClient;
|
|
56
|
+
logger?: ReturnType<typeof createLogger>;
|
|
57
|
+
dateProvider?: DateProvider;
|
|
58
|
+
},
|
|
59
|
+
) {
|
|
60
|
+
const sharedDeps = await createSharedDeps(config, deps);
|
|
61
|
+
|
|
62
|
+
return clients.map(client =>
|
|
63
|
+
createL1TxUtilsWithBlobsFromViemWalletBase(client, sharedDeps, config, config.debugMaxGasLimit),
|
|
64
|
+
);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Creates L1TxUtils with blobs from multiple EthSigners, sharing store and metrics.
|
|
69
|
+
*/
|
|
70
|
+
export async function createL1TxUtilsWithBlobsFromEthSigner(
|
|
71
|
+
client: ViemClient,
|
|
72
|
+
signers: EthSigner[],
|
|
73
|
+
config: DataStoreConfig & Partial<L1TxUtilsConfig> & { debugMaxGasLimit?: boolean; scope?: L1TxScope },
|
|
74
|
+
deps: {
|
|
75
|
+
telemetry: TelemetryClient;
|
|
76
|
+
logger?: ReturnType<typeof createLogger>;
|
|
77
|
+
dateProvider?: DateProvider;
|
|
78
|
+
},
|
|
79
|
+
) {
|
|
80
|
+
const sharedDeps = await createSharedDeps(config, deps);
|
|
81
|
+
|
|
82
|
+
return signers.map(signer =>
|
|
83
|
+
createL1TxUtilsWithBlobsFromEthSignerBase(client, signer, sharedDeps, config, config.debugMaxGasLimit),
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* Creates L1TxUtils (without blobs) from multiple Viem wallets, sharing store and metrics.
|
|
89
|
+
*/
|
|
90
|
+
export async function createL1TxUtilsFromViemWalletWithStore(
|
|
91
|
+
clients: ExtendedViemWalletClient[],
|
|
92
|
+
config: DataStoreConfig & Partial<L1TxUtilsConfig> & { debugMaxGasLimit?: boolean; scope?: L1TxScope },
|
|
93
|
+
deps: {
|
|
94
|
+
telemetry: TelemetryClient;
|
|
95
|
+
logger?: ReturnType<typeof createLogger>;
|
|
96
|
+
dateProvider?: DateProvider;
|
|
97
|
+
scope?: L1TxScope;
|
|
98
|
+
},
|
|
99
|
+
) {
|
|
100
|
+
const sharedDeps = await createSharedDeps(config, deps);
|
|
101
|
+
|
|
102
|
+
return clients.map(client => createL1TxUtilsFromViemWalletBase(client, sharedDeps, config));
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Creates L1TxUtils (without blobs) from multiple EthSigners, sharing store and metrics.
|
|
107
|
+
*/
|
|
108
|
+
export async function createL1TxUtilsFromEthSignerWithStore(
|
|
109
|
+
client: ViemClient,
|
|
110
|
+
signers: EthSigner[],
|
|
111
|
+
config: DataStoreConfig & Partial<L1TxUtilsConfig> & { debugMaxGasLimit?: boolean; scope?: L1TxScope },
|
|
112
|
+
deps: {
|
|
113
|
+
telemetry: TelemetryClient;
|
|
114
|
+
logger?: ReturnType<typeof createLogger>;
|
|
115
|
+
dateProvider?: DateProvider;
|
|
116
|
+
scope?: L1TxScope;
|
|
117
|
+
},
|
|
118
|
+
) {
|
|
119
|
+
const sharedDeps = await createSharedDeps(config, deps);
|
|
120
|
+
|
|
121
|
+
return signers.map(signer => createL1TxUtilsFromEthSignerBase(client, signer, sharedDeps, config));
|
|
122
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from './l1_tx_metrics.js';
|