@aztec/node-lib 0.82.3-nightly.20250330
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -0
- package/dest/actions/index.d.ts +3 -0
- package/dest/actions/index.d.ts.map +1 -0
- package/dest/actions/index.js +2 -0
- package/dest/actions/snapshot-sync.d.ts +12 -0
- package/dest/actions/snapshot-sync.d.ts.map +1 -0
- package/dest/actions/snapshot-sync.js +155 -0
- package/dest/actions/upload-snapshot.d.ts +13 -0
- package/dest/actions/upload-snapshot.d.ts.map +1 -0
- package/dest/actions/upload-snapshot.js +86 -0
- package/dest/config/index.d.ts +13 -0
- package/dest/config/index.d.ts.map +1 -0
- package/dest/config/index.js +22 -0
- package/package.json +93 -0
- package/src/actions/index.ts +2 -0
- package/src/actions/snapshot-sync.ts +198 -0
- package/src/actions/upload-snapshot.ts +102 -0
- package/src/config/index.ts +35 -0
package/README.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/actions/index.ts"],"names":[],"mappings":"AAAA,cAAc,oBAAoB,CAAC;AACnC,cAAc,sBAAsB,CAAC"}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { type ArchiverConfig } from '@aztec/archiver';
|
|
2
|
+
import { type EthereumClientConfig } from '@aztec/ethereum';
|
|
3
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
4
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
5
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
6
|
+
import type { SharedNodeConfig } from '../config/index.js';
|
|
7
|
+
type SnapshotSyncConfig = Pick<SharedNodeConfig, 'syncMode' | 'snapshotsUrl'> & Pick<ChainConfig, 'l1ChainId' | 'version'> & Pick<ArchiverConfig, 'archiverStoreMapSizeKb' | 'maxLogs'> & Required<DataStoreConfig> & EthereumClientConfig & {
|
|
8
|
+
minL1BlocksToTriggerReplace?: number;
|
|
9
|
+
};
|
|
10
|
+
export declare function trySnapshotSync(config: SnapshotSyncConfig, log: Logger): Promise<boolean>;
|
|
11
|
+
export {};
|
|
12
|
+
//# sourceMappingURL=snapshot-sync.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"snapshot-sync.d.ts","sourceRoot":"","sources":["../../src/actions/snapshot-sync.ts"],"names":[],"mappings":"AAAA,OAAO,EAGL,KAAK,cAAc,EAGpB,MAAM,iBAAiB,CAAC;AAEzB,OAAO,EAAE,KAAK,oBAAoB,EAAmB,MAAM,iBAAiB,CAAC;AAG7E,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AACpD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAC;AAC9D,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,sBAAsB,CAAC;AAexD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,oBAAoB,CAAC;AAK3D,KAAK,kBAAkB,GAAG,IAAI,CAAC,gBAAgB,EAAE,UAAU,GAAG,cAAc,CAAC,GAC3E,IAAI,CAAC,WAAW,EAAE,WAAW,GAAG,SAAS,CAAC,GAC1C,IAAI,CAAC,cAAc,EAAE,wBAAwB,GAAG,SAAS,CAAC,GAC1D,QAAQ,CAAC,eAAe,CAAC,GACzB,oBAAoB,GAAG;IACrB,2BAA2B,CAAC,EAAE,MAAM,CAAC;CACtC,CAAC;AAEJ,wBAAsB,eAAe,CAAC,MAAM,EAAE,kBAAkB,EAAE,GAAG,EAAE,MAAM,oBAoJ5E"}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
import { ARCHIVER_DB_VERSION, ARCHIVER_STORE_NAME, createArchiverStore } from '@aztec/archiver';
|
|
2
|
+
import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants';
|
|
3
|
+
import { getPublicClient } from '@aztec/ethereum';
|
|
4
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
5
|
+
import { DatabaseVersionManager } from '@aztec/stdlib/database-version';
|
|
6
|
+
import { createReadOnlyFileStore } from '@aztec/stdlib/file-store';
|
|
7
|
+
import { downloadSnapshot, getLatestSnapshotMetadata, makeSnapshotLocalPaths } from '@aztec/stdlib/snapshots';
|
|
8
|
+
import { NATIVE_WORLD_STATE_DBS, WORLD_STATE_DB_VERSION, WORLD_STATE_DIR } from '@aztec/world-state';
|
|
9
|
+
import { mkdir, mkdtemp, rename } from 'fs/promises';
|
|
10
|
+
import { join } from 'path';
|
|
11
|
+
// Half day worth of L1 blocks
|
|
12
|
+
const MIN_L1_BLOCKS_TO_TRIGGER_REPLACE = 86400 / 2 / 12;
|
|
13
|
+
export async function trySnapshotSync(config, log) {
|
|
14
|
+
let archiverStore;
|
|
15
|
+
let downloadDir;
|
|
16
|
+
try {
|
|
17
|
+
const { syncMode, snapshotsUrl, dataDirectory, l1ChainId, version: l2Version, l1Contracts } = config;
|
|
18
|
+
if (syncMode === 'full') {
|
|
19
|
+
log.debug('Snapshot sync is disabled. Running full sync.', {
|
|
20
|
+
syncMode: syncMode
|
|
21
|
+
});
|
|
22
|
+
return false;
|
|
23
|
+
}
|
|
24
|
+
if (!snapshotsUrl) {
|
|
25
|
+
log.verbose('Snapshot sync is disabled. No snapshots URL provided.');
|
|
26
|
+
return false;
|
|
27
|
+
}
|
|
28
|
+
if (!dataDirectory) {
|
|
29
|
+
log.verbose('Snapshot sync is disabled. No local data directory defined.');
|
|
30
|
+
return false;
|
|
31
|
+
}
|
|
32
|
+
let fileStore;
|
|
33
|
+
try {
|
|
34
|
+
fileStore = await createReadOnlyFileStore(snapshotsUrl, log);
|
|
35
|
+
} catch (err) {
|
|
36
|
+
log.error(`Invalid config for downloading snapshots`, err);
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
// Create an archiver store to check the current sync state
|
|
40
|
+
archiverStore = await createArchiverStore(config);
|
|
41
|
+
const minL1BlocksToTriggerReplace = config.minL1BlocksToTriggerReplace ?? MIN_L1_BLOCKS_TO_TRIGGER_REPLACE;
|
|
42
|
+
const archiverL2BlockNumber = await archiverStore.getSynchedL2BlockNumber();
|
|
43
|
+
if (syncMode === 'snapshot' && archiverL2BlockNumber !== undefined && archiverL2BlockNumber >= INITIAL_L2_BLOCK_NUM) {
|
|
44
|
+
log.verbose(`Skipping non-forced snapshot sync as archiver is already synced to L2 block ${archiverL2BlockNumber}.`);
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
const currentL1BlockNumber = await getPublicClient(config).getBlockNumber();
|
|
48
|
+
const archiverL1BlockNumber = await archiverStore.getSynchPoint().then((s)=>s.blocksSynchedTo);
|
|
49
|
+
if (archiverL1BlockNumber && currentL1BlockNumber - archiverL1BlockNumber < minL1BlocksToTriggerReplace) {
|
|
50
|
+
log.verbose(`Skipping snapshot sync as archiver is less than ${currentL1BlockNumber - archiverL1BlockNumber} L1 blocks behind.`, {
|
|
51
|
+
archiverL1BlockNumber,
|
|
52
|
+
currentL1BlockNumber,
|
|
53
|
+
minL1BlocksToTriggerReplace
|
|
54
|
+
});
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
const indexMetadata = {
|
|
58
|
+
l1ChainId,
|
|
59
|
+
l2Version,
|
|
60
|
+
rollupAddress: l1Contracts.rollupAddress
|
|
61
|
+
};
|
|
62
|
+
let snapshot;
|
|
63
|
+
try {
|
|
64
|
+
snapshot = await getLatestSnapshotMetadata(indexMetadata, fileStore);
|
|
65
|
+
} catch (err) {
|
|
66
|
+
log.error(`Failed to get latest snapshot metadata. Skipping snapshot sync.`, err, {
|
|
67
|
+
...indexMetadata,
|
|
68
|
+
snapshotsUrl
|
|
69
|
+
});
|
|
70
|
+
return false;
|
|
71
|
+
}
|
|
72
|
+
if (!snapshot) {
|
|
73
|
+
log.verbose(`No snapshot found. Skipping snapshot sync.`, {
|
|
74
|
+
...indexMetadata,
|
|
75
|
+
snapshotsUrl
|
|
76
|
+
});
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
if (snapshot.schemaVersions.archiver !== ARCHIVER_DB_VERSION) {
|
|
80
|
+
log.warn(`Skipping snapshot sync as last snapshot has schema version ${snapshot.schemaVersions.archiver} but expected ${ARCHIVER_DB_VERSION}.`, snapshot);
|
|
81
|
+
return false;
|
|
82
|
+
}
|
|
83
|
+
if (snapshot.schemaVersions.worldState !== WORLD_STATE_DB_VERSION) {
|
|
84
|
+
log.warn(`Skipping snapshot sync as last snapshot has world state schema version ${snapshot.schemaVersions.worldState} but we expected ${WORLD_STATE_DB_VERSION}.`, snapshot);
|
|
85
|
+
return false;
|
|
86
|
+
}
|
|
87
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber < archiverL1BlockNumber) {
|
|
88
|
+
log.verbose(`Skipping snapshot sync since local archiver is at L1 block ${archiverL1BlockNumber} which is further than last snapshot at ${snapshot.l1BlockNumber}`, {
|
|
89
|
+
snapshot,
|
|
90
|
+
archiverL1BlockNumber
|
|
91
|
+
});
|
|
92
|
+
return false;
|
|
93
|
+
}
|
|
94
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber - Number(archiverL1BlockNumber) < minL1BlocksToTriggerReplace) {
|
|
95
|
+
log.verbose(`Skipping snapshot sync as archiver is less than ${snapshot.l1BlockNumber - Number(archiverL1BlockNumber)} L1 blocks behind latest snapshot.`, {
|
|
96
|
+
snapshot,
|
|
97
|
+
archiverL1BlockNumber
|
|
98
|
+
});
|
|
99
|
+
return false;
|
|
100
|
+
}
|
|
101
|
+
// Green light. Download the snapshot to a temp location.
|
|
102
|
+
downloadDir = await mkdtemp(join(dataDirectory, 'download-'));
|
|
103
|
+
const downloadPaths = makeSnapshotLocalPaths(downloadDir);
|
|
104
|
+
log.info(`Downloading snapshot at L1 block ${snapshot.l1BlockNumber} L2 block ${snapshot.l2BlockNumber} from ${snapshotsUrl} to ${downloadDir} for snapshot sync`, {
|
|
105
|
+
snapshot,
|
|
106
|
+
downloadPaths
|
|
107
|
+
});
|
|
108
|
+
await downloadSnapshot(snapshot, downloadPaths, fileStore);
|
|
109
|
+
log.info(`Snapshot downloaded at ${downloadDir}`, {
|
|
110
|
+
snapshotsUrl,
|
|
111
|
+
snapshot,
|
|
112
|
+
downloadPaths
|
|
113
|
+
});
|
|
114
|
+
// If download was successful, close the archiver store, clear lock and version, and move download there
|
|
115
|
+
await archiverStore.close();
|
|
116
|
+
archiverStore = undefined;
|
|
117
|
+
const archiverPath = join(dataDirectory, ARCHIVER_STORE_NAME);
|
|
118
|
+
await prepareTarget(archiverPath, ARCHIVER_DB_VERSION, l1Contracts.rollupAddress);
|
|
119
|
+
await rename(downloadPaths.archiver, join(archiverPath, 'data.mdb'));
|
|
120
|
+
log.info(`Archiver database set up from snapshot`, {
|
|
121
|
+
path: archiverPath
|
|
122
|
+
});
|
|
123
|
+
// Same for the world state dbs, only that we do not close them, since we assume they are not yet in use
|
|
124
|
+
const worldStateBasePath = join(dataDirectory, WORLD_STATE_DIR);
|
|
125
|
+
await prepareTarget(worldStateBasePath, WORLD_STATE_DB_VERSION, l1Contracts.rollupAddress);
|
|
126
|
+
for (const [name, dir] of NATIVE_WORLD_STATE_DBS){
|
|
127
|
+
const path = join(worldStateBasePath, dir);
|
|
128
|
+
await mkdir(path, {
|
|
129
|
+
recursive: true
|
|
130
|
+
});
|
|
131
|
+
await rename(downloadPaths[name], join(path, 'data.mdb'));
|
|
132
|
+
log.info(`World state database ${name} set up from snapshot`, {
|
|
133
|
+
path
|
|
134
|
+
});
|
|
135
|
+
}
|
|
136
|
+
log.info(`Snapshot synced to L1 block ${snapshot.l1BlockNumber} L2 block ${snapshot.l2BlockNumber}`, {
|
|
137
|
+
snapshot
|
|
138
|
+
});
|
|
139
|
+
} finally{
|
|
140
|
+
if (archiverStore) {
|
|
141
|
+
log.verbose(`Closing temporary archiver data store`);
|
|
142
|
+
await archiverStore.close();
|
|
143
|
+
}
|
|
144
|
+
if (downloadDir) {
|
|
145
|
+
await tryRmDir(downloadDir, log);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
return true;
|
|
149
|
+
}
|
|
150
|
+
/** Deletes target dir and writes the new version file. */ async function prepareTarget(target, schemaVersion, rollupAddress) {
|
|
151
|
+
const noOpen = ()=>Promise.resolve(undefined);
|
|
152
|
+
const versionManager = new DatabaseVersionManager(schemaVersion, rollupAddress, target, noOpen);
|
|
153
|
+
await versionManager.resetDataDirectory();
|
|
154
|
+
await versionManager.writeVersion();
|
|
155
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { type Archiver } from '@aztec/archiver';
|
|
2
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
3
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
4
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
5
|
+
import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server';
|
|
6
|
+
type UploadSnapshotConfig = Pick<ChainConfig, 'l1ChainId' | 'version'> & Pick<DataStoreConfig, 'dataDirectory'>;
|
|
7
|
+
/**
|
|
8
|
+
* Pauses the archiver and world state sync, creates backups of the archiver and world state lmdb environments,
|
|
9
|
+
* and uploads them to the specified location. Location must be a URL supported by our file store (eg `gs://bucketname/path`).
|
|
10
|
+
*/
|
|
11
|
+
export declare function uploadSnapshot(location: string, archiver: Archiver, worldState: WorldStateSynchronizer, config: UploadSnapshotConfig, log: Logger): Promise<void>;
|
|
12
|
+
export {};
|
|
13
|
+
//# sourceMappingURL=upload-snapshot.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"upload-snapshot.d.ts","sourceRoot":"","sources":["../../src/actions/upload-snapshot.ts"],"names":[],"mappings":"AAAA,OAAO,EAAuB,KAAK,QAAQ,EAAE,MAAM,iBAAiB,CAAC;AAErE,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,uBAAuB,CAAC;AACpD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAC;AAC9D,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,sBAAsB,CAAC;AAExD,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iCAAiC,CAAC;AAU9E,KAAK,oBAAoB,GAAG,IAAI,CAAC,WAAW,EAAE,WAAW,GAAG,SAAS,CAAC,GAAG,IAAI,CAAC,eAAe,EAAE,eAAe,CAAC,CAAC;AAEhH;;;GAGG;AACH,wBAAsB,cAAc,CAClC,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,QAAQ,EAClB,UAAU,EAAE,sBAAsB,EAClC,MAAM,EAAE,oBAAoB,EAC5B,GAAG,EAAE,MAAM,iBAoBZ"}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import { ARCHIVER_DB_VERSION } from '@aztec/archiver';
|
|
2
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
3
|
+
import { createFileStore } from '@aztec/stdlib/file-store';
|
|
4
|
+
import { uploadSnapshot as uploadSnapshotToStore } from '@aztec/stdlib/snapshots';
|
|
5
|
+
import { WORLD_STATE_DB_VERSION } from '@aztec/world-state';
|
|
6
|
+
import { existsSync } from 'fs';
|
|
7
|
+
import { mkdtemp } from 'fs/promises';
|
|
8
|
+
import { tmpdir } from 'os';
|
|
9
|
+
import { join } from 'path';
|
|
10
|
+
/**
|
|
11
|
+
* Pauses the archiver and world state sync, creates backups of the archiver and world state lmdb environments,
|
|
12
|
+
* and uploads them to the specified location. Location must be a URL supported by our file store (eg `gs://bucketname/path`).
|
|
13
|
+
*/ export async function uploadSnapshot(location, archiver, worldState, config, log) {
|
|
14
|
+
const store = await createFileStore(location);
|
|
15
|
+
if (!store) {
|
|
16
|
+
throw new Error(`Failed to create file store for snapshot upload for location ${location}.`);
|
|
17
|
+
}
|
|
18
|
+
const backupDir = await mkdtemp(join(config.dataDirectory ?? tmpdir(), 'snapshot-'));
|
|
19
|
+
try {
|
|
20
|
+
const paths = await createBackups(backupDir, archiver, worldState, log);
|
|
21
|
+
const versions = {
|
|
22
|
+
archiver: ARCHIVER_DB_VERSION,
|
|
23
|
+
worldState: WORLD_STATE_DB_VERSION
|
|
24
|
+
};
|
|
25
|
+
const metadata = await buildSnapshotMetadata(archiver, config);
|
|
26
|
+
log.info(`Uploading snapshot to ${location}`, {
|
|
27
|
+
snapshot: metadata
|
|
28
|
+
});
|
|
29
|
+
const snapshot = await uploadSnapshotToStore(paths, versions, metadata, store);
|
|
30
|
+
log.info(`Snapshot uploaded successfully`, {
|
|
31
|
+
snapshot
|
|
32
|
+
});
|
|
33
|
+
} finally{
|
|
34
|
+
log.info(`Cleaning up backup dir ${backupDir}`);
|
|
35
|
+
await tryRmDir(backupDir, log);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
async function buildSnapshotMetadata(archiver, config) {
|
|
39
|
+
const [rollupAddress, l1BlockNumber, { latest }] = await Promise.all([
|
|
40
|
+
archiver.getRollupAddress(),
|
|
41
|
+
archiver.getL1BlockNumber(),
|
|
42
|
+
archiver.getL2Tips()
|
|
43
|
+
]);
|
|
44
|
+
const { number: l2BlockNumber, hash: l2BlockHash } = latest;
|
|
45
|
+
if (!l2BlockHash) {
|
|
46
|
+
throw new Error(`Failed to get L2 block hash from archiver.`);
|
|
47
|
+
}
|
|
48
|
+
return {
|
|
49
|
+
l1ChainId: config.l1ChainId,
|
|
50
|
+
l2Version: config.version,
|
|
51
|
+
rollupAddress,
|
|
52
|
+
l2BlockNumber,
|
|
53
|
+
l2BlockHash,
|
|
54
|
+
l1BlockNumber: Number(l1BlockNumber)
|
|
55
|
+
};
|
|
56
|
+
}
|
|
57
|
+
async function createBackups(backupDir, archiver, worldState, log) {
|
|
58
|
+
try {
|
|
59
|
+
log.info(`Pausing archiver and world state sync to start snapshot upload`);
|
|
60
|
+
await archiver.stop();
|
|
61
|
+
await worldState.stopSync();
|
|
62
|
+
log.info(`Creating backups of lmdb environments to ${backupDir}`);
|
|
63
|
+
const [archiverPath, worldStatePaths] = await Promise.all([
|
|
64
|
+
archiver.backupTo(join(backupDir, 'archiver')),
|
|
65
|
+
worldState.backupTo(join(backupDir, 'world-state'))
|
|
66
|
+
]);
|
|
67
|
+
const paths = {
|
|
68
|
+
...worldStatePaths,
|
|
69
|
+
archiver: archiverPath
|
|
70
|
+
};
|
|
71
|
+
const missing = Object.entries(paths).filter(([_key, path])=>!path || !existsSync(path));
|
|
72
|
+
if (missing.length > 0) {
|
|
73
|
+
throw new Error(`Missing backup files: ${missing.map(([key, path])=>`${path} (${key})`).join(', ')}`);
|
|
74
|
+
}
|
|
75
|
+
log.info(`Data stores backed up to ${backupDir}`, {
|
|
76
|
+
paths
|
|
77
|
+
});
|
|
78
|
+
return paths;
|
|
79
|
+
} catch (err) {
|
|
80
|
+
throw new Error(`Error creating backups for snapshot upload: ${err}`);
|
|
81
|
+
} finally{
|
|
82
|
+
log.info(`Resuming archiver and world state sync`);
|
|
83
|
+
worldState.resumeSync();
|
|
84
|
+
archiver.resume();
|
|
85
|
+
}
|
|
86
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import { type ConfigMappingsType } from '@aztec/foundation/config';
|
|
2
|
+
export type SharedNodeConfig = {
|
|
3
|
+
/** Whether to populate the genesis state with initial fee juice for the test accounts */
|
|
4
|
+
testAccounts: boolean;
|
|
5
|
+
/** Whether to populate the genesis state with initial fee juice for the sponsored FPC */
|
|
6
|
+
sponsoredFPC: boolean;
|
|
7
|
+
/** Sync mode: full to always sync via L1, snapshot to download a snapshot if there is no local data, force-snapshot to download even if there is local data. */
|
|
8
|
+
syncMode: 'full' | 'snapshot' | 'force-snapshot';
|
|
9
|
+
/** Base URL for snapshots index. Index file will be searched at `SNAPSHOTS_BASE_URL/aztec-L1_CHAIN_ID-VERSION-ROLLUP_ADDRESS/index.json` */
|
|
10
|
+
snapshotsUrl?: string;
|
|
11
|
+
};
|
|
12
|
+
export declare const sharedNodeConfigMappings: ConfigMappingsType<SharedNodeConfig>;
|
|
13
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/config/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,kBAAkB,EAAuB,MAAM,0BAA0B,CAAC;AAExF,MAAM,MAAM,gBAAgB,GAAG;IAC7B,yFAAyF;IACzF,YAAY,EAAE,OAAO,CAAC;IACtB,yFAAyF;IACzF,YAAY,EAAE,OAAO,CAAC;IACtB,gKAAgK;IAChK,QAAQ,EAAE,MAAM,GAAG,UAAU,GAAG,gBAAgB,CAAC;IACjD,4IAA4I;IAC5I,YAAY,CAAC,EAAE,MAAM,CAAC;CACvB,CAAC;AAEF,eAAO,MAAM,wBAAwB,EAAE,kBAAkB,CAAC,gBAAgB,CAqBzE,CAAC"}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { booleanConfigHelper } from '@aztec/foundation/config';
|
|
2
|
+
export const sharedNodeConfigMappings = {
|
|
3
|
+
testAccounts: {
|
|
4
|
+
env: 'TEST_ACCOUNTS',
|
|
5
|
+
description: 'Whether to populate the genesis state with initial fee juice for the test accounts.',
|
|
6
|
+
...booleanConfigHelper()
|
|
7
|
+
},
|
|
8
|
+
sponsoredFPC: {
|
|
9
|
+
env: 'SPONSORED_FPC',
|
|
10
|
+
description: 'Whether to populate the genesis state with initial fee juice for the sponsored FPC.',
|
|
11
|
+
...booleanConfigHelper(false)
|
|
12
|
+
},
|
|
13
|
+
syncMode: {
|
|
14
|
+
env: 'SYNC_MODE',
|
|
15
|
+
description: 'Set sync mode to `full` to always sync via L1, `snapshot` to download a snapshot if there is no local data, `force-snapshot` to download even if there is local data.',
|
|
16
|
+
defaultValue: 'snapshot'
|
|
17
|
+
},
|
|
18
|
+
snapshotsUrl: {
|
|
19
|
+
env: 'SYNC_SNAPSHOTS_URL',
|
|
20
|
+
description: 'Base URL for snapshots index.'
|
|
21
|
+
}
|
|
22
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@aztec/node-lib",
|
|
3
|
+
"version": "0.82.3-nightly.20250330",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"exports": {
|
|
6
|
+
"./actions": "./dest/actions/index.js",
|
|
7
|
+
"./config": "./dest/config/index.js"
|
|
8
|
+
},
|
|
9
|
+
"inherits": [
|
|
10
|
+
"../package.common.json"
|
|
11
|
+
],
|
|
12
|
+
"scripts": {
|
|
13
|
+
"build": "yarn clean && tsc -b",
|
|
14
|
+
"build:dev": "tsc -b --watch",
|
|
15
|
+
"clean": "rm -rf ./dest .tsbuildinfo",
|
|
16
|
+
"formatting": "run -T prettier --check ./src && run -T eslint ./src",
|
|
17
|
+
"formatting:fix": "run -T eslint --fix ./src && run -T prettier -w ./src",
|
|
18
|
+
"bb": "node --no-warnings ./dest/bb/index.js",
|
|
19
|
+
"test": "NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --passWithNoTests --maxWorkers=${JEST_MAX_WORKERS:-8}"
|
|
20
|
+
},
|
|
21
|
+
"jest": {
|
|
22
|
+
"moduleNameMapper": {
|
|
23
|
+
"^(\\.{1,2}/.*)\\.[cm]?js$": "$1"
|
|
24
|
+
},
|
|
25
|
+
"testRegex": "./src/.*\\.test\\.(js|mjs|ts)$",
|
|
26
|
+
"rootDir": "./src",
|
|
27
|
+
"transform": {
|
|
28
|
+
"^.+\\.tsx?$": [
|
|
29
|
+
"@swc/jest",
|
|
30
|
+
{
|
|
31
|
+
"jsc": {
|
|
32
|
+
"parser": {
|
|
33
|
+
"syntax": "typescript",
|
|
34
|
+
"decorators": true
|
|
35
|
+
},
|
|
36
|
+
"transform": {
|
|
37
|
+
"decoratorVersion": "2022-03"
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
]
|
|
42
|
+
},
|
|
43
|
+
"extensionsToTreatAsEsm": [
|
|
44
|
+
".ts"
|
|
45
|
+
],
|
|
46
|
+
"reporters": [
|
|
47
|
+
"default"
|
|
48
|
+
],
|
|
49
|
+
"testTimeout": 120000,
|
|
50
|
+
"setupFiles": [
|
|
51
|
+
"../../foundation/src/jest/setup.mjs"
|
|
52
|
+
]
|
|
53
|
+
},
|
|
54
|
+
"dependencies": {
|
|
55
|
+
"@aztec/archiver": "0.82.3-nightly.20250330",
|
|
56
|
+
"@aztec/bb-prover": "0.82.3-nightly.20250330",
|
|
57
|
+
"@aztec/blob-sink": "0.82.3-nightly.20250330",
|
|
58
|
+
"@aztec/constants": "0.82.3-nightly.20250330",
|
|
59
|
+
"@aztec/epoch-cache": "0.82.3-nightly.20250330",
|
|
60
|
+
"@aztec/ethereum": "0.82.3-nightly.20250330",
|
|
61
|
+
"@aztec/foundation": "0.82.3-nightly.20250330",
|
|
62
|
+
"@aztec/kv-store": "0.82.3-nightly.20250330",
|
|
63
|
+
"@aztec/merkle-tree": "0.82.3-nightly.20250330",
|
|
64
|
+
"@aztec/p2p": "0.82.3-nightly.20250330",
|
|
65
|
+
"@aztec/protocol-contracts": "0.82.3-nightly.20250330",
|
|
66
|
+
"@aztec/prover-client": "0.82.3-nightly.20250330",
|
|
67
|
+
"@aztec/sequencer-client": "0.82.3-nightly.20250330",
|
|
68
|
+
"@aztec/simulator": "0.82.3-nightly.20250330",
|
|
69
|
+
"@aztec/stdlib": "0.82.3-nightly.20250330",
|
|
70
|
+
"@aztec/telemetry-client": "0.82.3-nightly.20250330",
|
|
71
|
+
"@aztec/validator-client": "0.82.3-nightly.20250330",
|
|
72
|
+
"@aztec/world-state": "0.82.3-nightly.20250330",
|
|
73
|
+
"tslib": "^2.4.0"
|
|
74
|
+
},
|
|
75
|
+
"devDependencies": {
|
|
76
|
+
"@jest/globals": "^29.5.0",
|
|
77
|
+
"@types/jest": "^29.5.0",
|
|
78
|
+
"@types/node": "^18.7.23",
|
|
79
|
+
"jest": "^29.5.0",
|
|
80
|
+
"jest-mock-extended": "^3.0.3",
|
|
81
|
+
"ts-node": "^10.9.1",
|
|
82
|
+
"typescript": "^5.0.4"
|
|
83
|
+
},
|
|
84
|
+
"files": [
|
|
85
|
+
"dest",
|
|
86
|
+
"src",
|
|
87
|
+
"!*.test.*"
|
|
88
|
+
],
|
|
89
|
+
"types": "./dest/index.d.ts",
|
|
90
|
+
"engines": {
|
|
91
|
+
"node": ">=18"
|
|
92
|
+
}
|
|
93
|
+
}
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import {
|
|
2
|
+
ARCHIVER_DB_VERSION,
|
|
3
|
+
ARCHIVER_STORE_NAME,
|
|
4
|
+
type ArchiverConfig,
|
|
5
|
+
type ArchiverDataStore,
|
|
6
|
+
createArchiverStore,
|
|
7
|
+
} from '@aztec/archiver';
|
|
8
|
+
import { INITIAL_L2_BLOCK_NUM } from '@aztec/constants';
|
|
9
|
+
import { type EthereumClientConfig, getPublicClient } from '@aztec/ethereum';
|
|
10
|
+
import type { EthAddress } from '@aztec/foundation/eth-address';
|
|
11
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
12
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
13
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
14
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
15
|
+
import { DatabaseVersionManager } from '@aztec/stdlib/database-version';
|
|
16
|
+
import { type ReadOnlyFileStore, createReadOnlyFileStore } from '@aztec/stdlib/file-store';
|
|
17
|
+
import {
|
|
18
|
+
type SnapshotMetadata,
|
|
19
|
+
type SnapshotsIndexMetadata,
|
|
20
|
+
downloadSnapshot,
|
|
21
|
+
getLatestSnapshotMetadata,
|
|
22
|
+
makeSnapshotLocalPaths,
|
|
23
|
+
} from '@aztec/stdlib/snapshots';
|
|
24
|
+
import { NATIVE_WORLD_STATE_DBS, WORLD_STATE_DB_VERSION, WORLD_STATE_DIR } from '@aztec/world-state';
|
|
25
|
+
|
|
26
|
+
import { mkdir, mkdtemp, rename } from 'fs/promises';
|
|
27
|
+
import { join } from 'path';
|
|
28
|
+
|
|
29
|
+
import type { SharedNodeConfig } from '../config/index.js';
|
|
30
|
+
|
|
31
|
+
// Half day worth of L1 blocks
|
|
32
|
+
const MIN_L1_BLOCKS_TO_TRIGGER_REPLACE = 86400 / 2 / 12;
|
|
33
|
+
|
|
34
|
+
type SnapshotSyncConfig = Pick<SharedNodeConfig, 'syncMode' | 'snapshotsUrl'> &
|
|
35
|
+
Pick<ChainConfig, 'l1ChainId' | 'version'> &
|
|
36
|
+
Pick<ArchiverConfig, 'archiverStoreMapSizeKb' | 'maxLogs'> &
|
|
37
|
+
Required<DataStoreConfig> &
|
|
38
|
+
EthereumClientConfig & {
|
|
39
|
+
minL1BlocksToTriggerReplace?: number;
|
|
40
|
+
};
|
|
41
|
+
|
|
42
|
+
export async function trySnapshotSync(config: SnapshotSyncConfig, log: Logger) {
|
|
43
|
+
let archiverStore: ArchiverDataStore | undefined;
|
|
44
|
+
let downloadDir: string | undefined;
|
|
45
|
+
|
|
46
|
+
try {
|
|
47
|
+
const { syncMode, snapshotsUrl, dataDirectory, l1ChainId, version: l2Version, l1Contracts } = config;
|
|
48
|
+
if (syncMode === 'full') {
|
|
49
|
+
log.debug('Snapshot sync is disabled. Running full sync.', { syncMode: syncMode });
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
if (!snapshotsUrl) {
|
|
54
|
+
log.verbose('Snapshot sync is disabled. No snapshots URL provided.');
|
|
55
|
+
return false;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
if (!dataDirectory) {
|
|
59
|
+
log.verbose('Snapshot sync is disabled. No local data directory defined.');
|
|
60
|
+
return false;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
let fileStore: ReadOnlyFileStore;
|
|
64
|
+
try {
|
|
65
|
+
fileStore = await createReadOnlyFileStore(snapshotsUrl, log);
|
|
66
|
+
} catch (err) {
|
|
67
|
+
log.error(`Invalid config for downloading snapshots`, err);
|
|
68
|
+
return false;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Create an archiver store to check the current sync state
|
|
72
|
+
archiverStore = await createArchiverStore(config);
|
|
73
|
+
|
|
74
|
+
const minL1BlocksToTriggerReplace = config.minL1BlocksToTriggerReplace ?? MIN_L1_BLOCKS_TO_TRIGGER_REPLACE;
|
|
75
|
+
const archiverL2BlockNumber = await archiverStore.getSynchedL2BlockNumber();
|
|
76
|
+
if (
|
|
77
|
+
syncMode === 'snapshot' &&
|
|
78
|
+
archiverL2BlockNumber !== undefined &&
|
|
79
|
+
archiverL2BlockNumber >= INITIAL_L2_BLOCK_NUM
|
|
80
|
+
) {
|
|
81
|
+
log.verbose(
|
|
82
|
+
`Skipping non-forced snapshot sync as archiver is already synced to L2 block ${archiverL2BlockNumber}.`,
|
|
83
|
+
);
|
|
84
|
+
return false;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const currentL1BlockNumber = await getPublicClient(config).getBlockNumber();
|
|
88
|
+
const archiverL1BlockNumber = await archiverStore.getSynchPoint().then(s => s.blocksSynchedTo);
|
|
89
|
+
if (archiverL1BlockNumber && currentL1BlockNumber - archiverL1BlockNumber < minL1BlocksToTriggerReplace) {
|
|
90
|
+
log.verbose(
|
|
91
|
+
`Skipping snapshot sync as archiver is less than ${
|
|
92
|
+
currentL1BlockNumber - archiverL1BlockNumber
|
|
93
|
+
} L1 blocks behind.`,
|
|
94
|
+
{ archiverL1BlockNumber, currentL1BlockNumber, minL1BlocksToTriggerReplace },
|
|
95
|
+
);
|
|
96
|
+
return false;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const indexMetadata: SnapshotsIndexMetadata = { l1ChainId, l2Version, rollupAddress: l1Contracts.rollupAddress };
|
|
100
|
+
let snapshot: SnapshotMetadata | undefined;
|
|
101
|
+
try {
|
|
102
|
+
snapshot = await getLatestSnapshotMetadata(indexMetadata, fileStore);
|
|
103
|
+
} catch (err) {
|
|
104
|
+
log.error(`Failed to get latest snapshot metadata. Skipping snapshot sync.`, err, {
|
|
105
|
+
...indexMetadata,
|
|
106
|
+
snapshotsUrl,
|
|
107
|
+
});
|
|
108
|
+
return false;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if (!snapshot) {
|
|
112
|
+
log.verbose(`No snapshot found. Skipping snapshot sync.`, { ...indexMetadata, snapshotsUrl });
|
|
113
|
+
return false;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (snapshot.schemaVersions.archiver !== ARCHIVER_DB_VERSION) {
|
|
117
|
+
log.warn(
|
|
118
|
+
`Skipping snapshot sync as last snapshot has schema version ${snapshot.schemaVersions.archiver} but expected ${ARCHIVER_DB_VERSION}.`,
|
|
119
|
+
snapshot,
|
|
120
|
+
);
|
|
121
|
+
return false;
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if (snapshot.schemaVersions.worldState !== WORLD_STATE_DB_VERSION) {
|
|
125
|
+
log.warn(
|
|
126
|
+
`Skipping snapshot sync as last snapshot has world state schema version ${snapshot.schemaVersions.worldState} but we expected ${WORLD_STATE_DB_VERSION}.`,
|
|
127
|
+
snapshot,
|
|
128
|
+
);
|
|
129
|
+
return false;
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber < archiverL1BlockNumber) {
|
|
133
|
+
log.verbose(
|
|
134
|
+
`Skipping snapshot sync since local archiver is at L1 block ${archiverL1BlockNumber} which is further than last snapshot at ${snapshot.l1BlockNumber}`,
|
|
135
|
+
{ snapshot, archiverL1BlockNumber },
|
|
136
|
+
);
|
|
137
|
+
return false;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (archiverL1BlockNumber && snapshot.l1BlockNumber - Number(archiverL1BlockNumber) < minL1BlocksToTriggerReplace) {
|
|
141
|
+
log.verbose(
|
|
142
|
+
`Skipping snapshot sync as archiver is less than ${
|
|
143
|
+
snapshot.l1BlockNumber - Number(archiverL1BlockNumber)
|
|
144
|
+
} L1 blocks behind latest snapshot.`,
|
|
145
|
+
{ snapshot, archiverL1BlockNumber },
|
|
146
|
+
);
|
|
147
|
+
return false;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
// Green light. Download the snapshot to a temp location.
|
|
151
|
+
downloadDir = await mkdtemp(join(dataDirectory, 'download-'));
|
|
152
|
+
const downloadPaths = makeSnapshotLocalPaths(downloadDir);
|
|
153
|
+
log.info(
|
|
154
|
+
`Downloading snapshot at L1 block ${snapshot.l1BlockNumber} L2 block ${snapshot.l2BlockNumber} from ${snapshotsUrl} to ${downloadDir} for snapshot sync`,
|
|
155
|
+
{ snapshot, downloadPaths },
|
|
156
|
+
);
|
|
157
|
+
await downloadSnapshot(snapshot, downloadPaths, fileStore);
|
|
158
|
+
log.info(`Snapshot downloaded at ${downloadDir}`, { snapshotsUrl, snapshot, downloadPaths });
|
|
159
|
+
|
|
160
|
+
// If download was successful, close the archiver store, clear lock and version, and move download there
|
|
161
|
+
await archiverStore.close();
|
|
162
|
+
archiverStore = undefined;
|
|
163
|
+
const archiverPath = join(dataDirectory, ARCHIVER_STORE_NAME);
|
|
164
|
+
await prepareTarget(archiverPath, ARCHIVER_DB_VERSION, l1Contracts.rollupAddress);
|
|
165
|
+
await rename(downloadPaths.archiver, join(archiverPath, 'data.mdb'));
|
|
166
|
+
log.info(`Archiver database set up from snapshot`, { path: archiverPath });
|
|
167
|
+
|
|
168
|
+
// Same for the world state dbs, only that we do not close them, since we assume they are not yet in use
|
|
169
|
+
const worldStateBasePath = join(dataDirectory, WORLD_STATE_DIR);
|
|
170
|
+
await prepareTarget(worldStateBasePath, WORLD_STATE_DB_VERSION, l1Contracts.rollupAddress);
|
|
171
|
+
for (const [name, dir] of NATIVE_WORLD_STATE_DBS) {
|
|
172
|
+
const path = join(worldStateBasePath, dir);
|
|
173
|
+
await mkdir(path, { recursive: true });
|
|
174
|
+
await rename(downloadPaths[name], join(path, 'data.mdb'));
|
|
175
|
+
log.info(`World state database ${name} set up from snapshot`, { path });
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
log.info(`Snapshot synced to L1 block ${snapshot.l1BlockNumber} L2 block ${snapshot.l2BlockNumber}`, { snapshot });
|
|
179
|
+
} finally {
|
|
180
|
+
if (archiverStore) {
|
|
181
|
+
log.verbose(`Closing temporary archiver data store`);
|
|
182
|
+
await archiverStore.close();
|
|
183
|
+
}
|
|
184
|
+
if (downloadDir) {
|
|
185
|
+
await tryRmDir(downloadDir, log);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return true;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/** Deletes target dir and writes the new version file. */
|
|
193
|
+
async function prepareTarget(target: string, schemaVersion: number, rollupAddress: EthAddress) {
|
|
194
|
+
const noOpen = () => Promise.resolve(undefined);
|
|
195
|
+
const versionManager = new DatabaseVersionManager<undefined>(schemaVersion, rollupAddress, target, noOpen);
|
|
196
|
+
await versionManager.resetDataDirectory();
|
|
197
|
+
await versionManager.writeVersion();
|
|
198
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import { ARCHIVER_DB_VERSION, type Archiver } from '@aztec/archiver';
|
|
2
|
+
import { tryRmDir } from '@aztec/foundation/fs';
|
|
3
|
+
import type { Logger } from '@aztec/foundation/log';
|
|
4
|
+
import type { DataStoreConfig } from '@aztec/kv-store/config';
|
|
5
|
+
import type { ChainConfig } from '@aztec/stdlib/config';
|
|
6
|
+
import { createFileStore } from '@aztec/stdlib/file-store';
|
|
7
|
+
import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server';
|
|
8
|
+
import type { SnapshotDataUrls, UploadSnapshotMetadata } from '@aztec/stdlib/snapshots';
|
|
9
|
+
import { uploadSnapshot as uploadSnapshotToStore } from '@aztec/stdlib/snapshots';
|
|
10
|
+
import { WORLD_STATE_DB_VERSION } from '@aztec/world-state';
|
|
11
|
+
|
|
12
|
+
import { existsSync } from 'fs';
|
|
13
|
+
import { mkdtemp } from 'fs/promises';
|
|
14
|
+
import { tmpdir } from 'os';
|
|
15
|
+
import { join } from 'path';
|
|
16
|
+
|
|
17
|
+
type UploadSnapshotConfig = Pick<ChainConfig, 'l1ChainId' | 'version'> & Pick<DataStoreConfig, 'dataDirectory'>;
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Pauses the archiver and world state sync, creates backups of the archiver and world state lmdb environments,
|
|
21
|
+
* and uploads them to the specified location. Location must be a URL supported by our file store (eg `gs://bucketname/path`).
|
|
22
|
+
*/
|
|
23
|
+
export async function uploadSnapshot(
|
|
24
|
+
location: string,
|
|
25
|
+
archiver: Archiver,
|
|
26
|
+
worldState: WorldStateSynchronizer,
|
|
27
|
+
config: UploadSnapshotConfig,
|
|
28
|
+
log: Logger,
|
|
29
|
+
) {
|
|
30
|
+
const store = await createFileStore(location);
|
|
31
|
+
if (!store) {
|
|
32
|
+
throw new Error(`Failed to create file store for snapshot upload for location ${location}.`);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const backupDir = await mkdtemp(join(config.dataDirectory ?? tmpdir(), 'snapshot-'));
|
|
36
|
+
|
|
37
|
+
try {
|
|
38
|
+
const paths = await createBackups(backupDir, archiver, worldState, log);
|
|
39
|
+
const versions = { archiver: ARCHIVER_DB_VERSION, worldState: WORLD_STATE_DB_VERSION };
|
|
40
|
+
const metadata = await buildSnapshotMetadata(archiver, config);
|
|
41
|
+
log.info(`Uploading snapshot to ${location}`, { snapshot: metadata });
|
|
42
|
+
const snapshot = await uploadSnapshotToStore(paths, versions, metadata, store);
|
|
43
|
+
log.info(`Snapshot uploaded successfully`, { snapshot });
|
|
44
|
+
} finally {
|
|
45
|
+
log.info(`Cleaning up backup dir ${backupDir}`);
|
|
46
|
+
await tryRmDir(backupDir, log);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async function buildSnapshotMetadata(
|
|
51
|
+
archiver: Archiver,
|
|
52
|
+
config: UploadSnapshotConfig,
|
|
53
|
+
): Promise<UploadSnapshotMetadata> {
|
|
54
|
+
const [rollupAddress, l1BlockNumber, { latest }] = await Promise.all([
|
|
55
|
+
archiver.getRollupAddress(),
|
|
56
|
+
archiver.getL1BlockNumber(),
|
|
57
|
+
archiver.getL2Tips(),
|
|
58
|
+
] as const);
|
|
59
|
+
|
|
60
|
+
const { number: l2BlockNumber, hash: l2BlockHash } = latest;
|
|
61
|
+
if (!l2BlockHash) {
|
|
62
|
+
throw new Error(`Failed to get L2 block hash from archiver.`);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
return {
|
|
66
|
+
l1ChainId: config.l1ChainId,
|
|
67
|
+
l2Version: config.version,
|
|
68
|
+
rollupAddress,
|
|
69
|
+
l2BlockNumber,
|
|
70
|
+
l2BlockHash,
|
|
71
|
+
l1BlockNumber: Number(l1BlockNumber),
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
async function createBackups(backupDir: string, archiver: Archiver, worldState: WorldStateSynchronizer, log: Logger) {
|
|
76
|
+
try {
|
|
77
|
+
log.info(`Pausing archiver and world state sync to start snapshot upload`);
|
|
78
|
+
await archiver.stop();
|
|
79
|
+
await worldState.stopSync();
|
|
80
|
+
|
|
81
|
+
log.info(`Creating backups of lmdb environments to ${backupDir}`);
|
|
82
|
+
const [archiverPath, worldStatePaths] = await Promise.all([
|
|
83
|
+
archiver.backupTo(join(backupDir, 'archiver')),
|
|
84
|
+
worldState.backupTo(join(backupDir, 'world-state')),
|
|
85
|
+
]);
|
|
86
|
+
const paths: SnapshotDataUrls = { ...worldStatePaths, archiver: archiverPath };
|
|
87
|
+
|
|
88
|
+
const missing = Object.entries(paths).filter(([_key, path]) => !path || !existsSync(path));
|
|
89
|
+
if (missing.length > 0) {
|
|
90
|
+
throw new Error(`Missing backup files: ${missing.map(([key, path]) => `${path} (${key})`).join(', ')}`);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
log.info(`Data stores backed up to ${backupDir}`, { paths });
|
|
94
|
+
return paths;
|
|
95
|
+
} catch (err) {
|
|
96
|
+
throw new Error(`Error creating backups for snapshot upload: ${err}`);
|
|
97
|
+
} finally {
|
|
98
|
+
log.info(`Resuming archiver and world state sync`);
|
|
99
|
+
worldState.resumeSync();
|
|
100
|
+
archiver.resume();
|
|
101
|
+
}
|
|
102
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { type ConfigMappingsType, booleanConfigHelper } from '@aztec/foundation/config';
|
|
2
|
+
|
|
3
|
+
export type SharedNodeConfig = {
|
|
4
|
+
/** Whether to populate the genesis state with initial fee juice for the test accounts */
|
|
5
|
+
testAccounts: boolean;
|
|
6
|
+
/** Whether to populate the genesis state with initial fee juice for the sponsored FPC */
|
|
7
|
+
sponsoredFPC: boolean;
|
|
8
|
+
/** Sync mode: full to always sync via L1, snapshot to download a snapshot if there is no local data, force-snapshot to download even if there is local data. */
|
|
9
|
+
syncMode: 'full' | 'snapshot' | 'force-snapshot';
|
|
10
|
+
/** Base URL for snapshots index. Index file will be searched at `SNAPSHOTS_BASE_URL/aztec-L1_CHAIN_ID-VERSION-ROLLUP_ADDRESS/index.json` */
|
|
11
|
+
snapshotsUrl?: string;
|
|
12
|
+
};
|
|
13
|
+
|
|
14
|
+
export const sharedNodeConfigMappings: ConfigMappingsType<SharedNodeConfig> = {
|
|
15
|
+
testAccounts: {
|
|
16
|
+
env: 'TEST_ACCOUNTS',
|
|
17
|
+
description: 'Whether to populate the genesis state with initial fee juice for the test accounts.',
|
|
18
|
+
...booleanConfigHelper(),
|
|
19
|
+
},
|
|
20
|
+
sponsoredFPC: {
|
|
21
|
+
env: 'SPONSORED_FPC',
|
|
22
|
+
description: 'Whether to populate the genesis state with initial fee juice for the sponsored FPC.',
|
|
23
|
+
...booleanConfigHelper(false),
|
|
24
|
+
},
|
|
25
|
+
syncMode: {
|
|
26
|
+
env: 'SYNC_MODE',
|
|
27
|
+
description:
|
|
28
|
+
'Set sync mode to `full` to always sync via L1, `snapshot` to download a snapshot if there is no local data, `force-snapshot` to download even if there is local data.',
|
|
29
|
+
defaultValue: 'snapshot',
|
|
30
|
+
},
|
|
31
|
+
snapshotsUrl: {
|
|
32
|
+
env: 'SYNC_SNAPSHOTS_URL',
|
|
33
|
+
description: 'Base URL for snapshots index.',
|
|
34
|
+
},
|
|
35
|
+
};
|