@aztec/prover-node 0.0.0-test.1 → 0.0.1-commit.b655e406

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/dest/actions/download-epoch-proving-job.d.ts +18 -0
  2. package/dest/actions/download-epoch-proving-job.d.ts.map +1 -0
  3. package/dest/actions/download-epoch-proving-job.js +37 -0
  4. package/dest/actions/index.d.ts +3 -0
  5. package/dest/actions/index.d.ts.map +1 -0
  6. package/dest/actions/index.js +2 -0
  7. package/dest/actions/rerun-epoch-proving-job.d.ts +11 -0
  8. package/dest/actions/rerun-epoch-proving-job.d.ts.map +1 -0
  9. package/dest/actions/rerun-epoch-proving-job.js +40 -0
  10. package/dest/actions/upload-epoch-proof-failure.d.ts +15 -0
  11. package/dest/actions/upload-epoch-proof-failure.d.ts.map +1 -0
  12. package/dest/actions/upload-epoch-proof-failure.js +78 -0
  13. package/dest/bin/run-failed-epoch.d.ts +2 -0
  14. package/dest/bin/run-failed-epoch.d.ts.map +1 -0
  15. package/dest/bin/run-failed-epoch.js +67 -0
  16. package/dest/config.d.ts +12 -9
  17. package/dest/config.d.ts.map +1 -1
  18. package/dest/config.js +81 -14
  19. package/dest/factory.d.ts +12 -8
  20. package/dest/factory.d.ts.map +1 -1
  21. package/dest/factory.js +95 -31
  22. package/dest/index.d.ts +1 -1
  23. package/dest/index.d.ts.map +1 -1
  24. package/dest/index.js +1 -1
  25. package/dest/job/epoch-proving-job-data.d.ts +16 -0
  26. package/dest/job/epoch-proving-job-data.d.ts.map +1 -0
  27. package/dest/job/epoch-proving-job-data.js +52 -0
  28. package/dest/job/epoch-proving-job.d.ts +30 -15
  29. package/dest/job/epoch-proving-job.d.ts.map +1 -1
  30. package/dest/job/epoch-proving-job.js +149 -50
  31. package/dest/metrics.d.ts +28 -4
  32. package/dest/metrics.d.ts.map +1 -1
  33. package/dest/metrics.js +141 -35
  34. package/dest/monitors/epoch-monitor.d.ts +3 -1
  35. package/dest/monitors/epoch-monitor.d.ts.map +1 -1
  36. package/dest/monitors/epoch-monitor.js +15 -2
  37. package/dest/prover-node-publisher.d.ts +7 -10
  38. package/dest/prover-node-publisher.d.ts.map +1 -1
  39. package/dest/prover-node-publisher.js +59 -60
  40. package/dest/prover-node.d.ts +43 -39
  41. package/dest/prover-node.d.ts.map +1 -1
  42. package/dest/prover-node.js +171 -100
  43. package/dest/prover-publisher-factory.d.ts +21 -0
  44. package/dest/prover-publisher-factory.d.ts.map +1 -0
  45. package/dest/prover-publisher-factory.js +26 -0
  46. package/dest/test/index.d.ts +4 -2
  47. package/dest/test/index.d.ts.map +1 -1
  48. package/dest/test/index.js +1 -3
  49. package/package.json +36 -31
  50. package/src/actions/download-epoch-proving-job.ts +44 -0
  51. package/src/actions/index.ts +2 -0
  52. package/src/actions/rerun-epoch-proving-job.ts +61 -0
  53. package/src/actions/upload-epoch-proof-failure.ts +88 -0
  54. package/src/bin/run-failed-epoch.ts +77 -0
  55. package/src/config.ts +108 -24
  56. package/src/factory.ts +161 -43
  57. package/src/index.ts +1 -1
  58. package/src/job/epoch-proving-job-data.ts +76 -0
  59. package/src/job/epoch-proving-job.ts +215 -50
  60. package/src/metrics.ts +135 -37
  61. package/src/monitors/epoch-monitor.ts +16 -5
  62. package/src/prover-node-publisher.ts +93 -86
  63. package/src/prover-node.ts +203 -126
  64. package/src/prover-publisher-factory.ts +37 -0
  65. package/src/test/index.ts +7 -4
  66. package/dest/http.d.ts +0 -8
  67. package/dest/http.d.ts.map +0 -1
  68. package/dest/http.js +0 -9
  69. package/dest/prover-coordination/config.d.ts +0 -7
  70. package/dest/prover-coordination/config.d.ts.map +0 -1
  71. package/dest/prover-coordination/config.js +0 -11
  72. package/dest/prover-coordination/factory.d.ts +0 -22
  73. package/dest/prover-coordination/factory.d.ts.map +0 -1
  74. package/dest/prover-coordination/factory.js +0 -42
  75. package/dest/prover-coordination/index.d.ts +0 -3
  76. package/dest/prover-coordination/index.d.ts.map +0 -1
  77. package/dest/prover-coordination/index.js +0 -2
  78. package/src/http.ts +0 -13
  79. package/src/prover-coordination/config.ts +0 -17
  80. package/src/prover-coordination/factory.ts +0 -72
  81. package/src/prover-coordination/index.ts +0 -2
@@ -0,0 +1,61 @@
1
+ import { createArchiverStore } from '@aztec/archiver';
2
+ import type { Logger } from '@aztec/foundation/log';
3
+ import type { DataStoreConfig } from '@aztec/kv-store/config';
4
+ import { type ProverClientConfig, createProverClient } from '@aztec/prover-client';
5
+ import { ProverBrokerConfig, createAndStartProvingBroker } from '@aztec/prover-client/broker';
6
+ import { PublicProcessorFactory } from '@aztec/simulator/server';
7
+ import { getTelemetryClient } from '@aztec/telemetry-client';
8
+ import { createWorldState } from '@aztec/world-state';
9
+
10
+ import { readFileSync } from 'fs';
11
+
12
+ import { deserializeEpochProvingJobData } from '../job/epoch-proving-job-data.js';
13
+ import { EpochProvingJob } from '../job/epoch-proving-job.js';
14
+ import { ProverNodeJobMetrics } from '../metrics.js';
15
+
16
+ /**
17
+ * Given a local folder where `downloadEpochProvingJob` was called, creates a new archiver and world state
18
+ * using the state snapshots, and creates a new epoch proving job to prove the downloaded proving job.
19
+ * Proving is done with a local proving broker and agents as specified by the config.
20
+ */
21
+ export async function rerunEpochProvingJob(
22
+ localPath: string,
23
+ log: Logger,
24
+ config: DataStoreConfig & ProverBrokerConfig & ProverClientConfig,
25
+ ) {
26
+ const jobData = deserializeEpochProvingJobData(readFileSync(localPath));
27
+ log.info(`Loaded proving job data for epoch ${jobData.epochNumber}`);
28
+
29
+ const telemetry = getTelemetryClient();
30
+ const metrics = new ProverNodeJobMetrics(telemetry.getMeter('prover-job'), telemetry.getTracer('prover-job'));
31
+ const worldState = await createWorldState(config);
32
+ const archiver = await createArchiverStore(config);
33
+ const publicProcessorFactory = new PublicProcessorFactory(archiver);
34
+
35
+ const publisher = { submitEpochProof: () => Promise.resolve(true) };
36
+ const l2BlockSourceForReorgDetection = undefined;
37
+ const deadline = undefined;
38
+
39
+ // This starts a local proving broker that does not get exposed as a service. This should be good enough for
40
+ // smallish epochs to be proven if we run on a large machine, but as epochs grow larger, we may want to switch
41
+ // this out for a live proving broker with multiple agents that we can connect to.
42
+ const broker = await createAndStartProvingBroker(config, telemetry);
43
+ const prover = await createProverClient(config, worldState, broker, telemetry);
44
+
45
+ const provingJob = new EpochProvingJob(
46
+ jobData,
47
+ worldState,
48
+ prover.createEpochProver(),
49
+ publicProcessorFactory,
50
+ publisher,
51
+ l2BlockSourceForReorgDetection,
52
+ metrics,
53
+ deadline,
54
+ { skipEpochCheck: true },
55
+ );
56
+
57
+ log.info(`Rerunning epoch proving job for epoch ${jobData.epochNumber}`);
58
+ await provingJob.run();
59
+ log.info(`Completed job for epoch ${jobData.epochNumber} with status ${provingJob.getState()}`);
60
+ return provingJob.getState();
61
+ }
@@ -0,0 +1,88 @@
1
+ import { ARCHIVER_DB_VERSION, type Archiver } from '@aztec/archiver';
2
+ import { tryRmDir } from '@aztec/foundation/fs';
3
+ import { jsonStringify } from '@aztec/foundation/json-rpc';
4
+ import type { Logger } from '@aztec/foundation/log';
5
+ import { isoDate } from '@aztec/foundation/string';
6
+ import type { DataStoreConfig } from '@aztec/kv-store/config';
7
+ import { buildSnapshotMetadata, createBackups } from '@aztec/node-lib/actions';
8
+ import type { ChainConfig } from '@aztec/stdlib/config';
9
+ import { type FileStore, createFileStore } from '@aztec/stdlib/file-store';
10
+ import type { WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server';
11
+ import { type UploadSnapshotMetadata, getBasePath, uploadSnapshotData } from '@aztec/stdlib/snapshots';
12
+ import { WORLD_STATE_DB_VERSION } from '@aztec/world-state';
13
+
14
+ import { mkdtemp } from 'fs/promises';
15
+ import { tmpdir } from 'os';
16
+ import { dirname, join } from 'path';
17
+
18
+ import { type EpochProvingJobData, serializeEpochProvingJobData } from '../job/epoch-proving-job-data.js';
19
+
20
+ type UploadEpochProofConfig = Pick<ChainConfig, 'l1ChainId' | 'rollupVersion'> & Pick<DataStoreConfig, 'dataDirectory'>;
21
+
22
+ /** Whether uploaded data to the file store should be of public access. */
23
+ const PUBLIC_UPLOADS = true;
24
+
25
+ /**
26
+ * Uploads a snapshot of world state and archiver (requires pausing them) along with the proving job data,
27
+ * so we can download and re-run the job later under the same conditions.
28
+ * @param location The location to upload the data to (used to create the `FileStore`).
29
+ */
30
+ export async function uploadEpochProofFailure(
31
+ location: string,
32
+ jobId: string,
33
+ jobData: EpochProvingJobData,
34
+ archiver: Archiver,
35
+ worldState: WorldStateSynchronizer,
36
+ config: UploadEpochProofConfig,
37
+ log: Logger,
38
+ ) {
39
+ const epochNumber = jobData.epochNumber;
40
+ log.warn(`Uploading epoch proof failure for ${epochNumber} to ${location}`, { epochNumber, jobId, location });
41
+
42
+ const backupDir = await mkdtemp(join(config.dataDirectory ?? tmpdir(), 'epoch-proof-data-'));
43
+ const store = await createFileStore(location);
44
+ if (!store) {
45
+ throw new Error(`Failed to create file store for epoch proof failure upload for location ${location}.`);
46
+ }
47
+
48
+ try {
49
+ const versions = { archiver: ARCHIVER_DB_VERSION, worldState: WORLD_STATE_DB_VERSION };
50
+ const uploadMetadata = await buildSnapshotMetadata(archiver, config);
51
+ const paths = await createBackups(backupDir, archiver, worldState, log);
52
+
53
+ const basePath = `${getBasePath(uploadMetadata)}/${epochNumber}-${isoDate()}-${jobId}`;
54
+ const pathFor = (key: string) => `${basePath}/${key}.db`;
55
+ const [metadata, dataUrl, metadataUrl] = await Promise.all([
56
+ uploadSnapshotData(paths, versions, uploadMetadata, store, { pathFor, private: !PUBLIC_UPLOADS }),
57
+ uploadJobData(jobData, store, basePath),
58
+ uploadSnapshotMetadata(uploadMetadata, store, basePath),
59
+ ] as const);
60
+
61
+ const baseUrl = dirname(metadataUrl);
62
+ log.warn(`Uploaded epoch ${epochNumber} proof failure data to ${baseUrl}`, {
63
+ epochNumber,
64
+ location,
65
+ basePath,
66
+ metadataUrl,
67
+ dataUrl,
68
+ metadata,
69
+ jobId,
70
+ });
71
+ return baseUrl;
72
+ } finally {
73
+ log.info(`Cleaning up backup dir ${backupDir}`);
74
+ await tryRmDir(backupDir, log);
75
+ }
76
+ }
77
+
78
+ async function uploadJobData(jobData: EpochProvingJobData, store: FileStore, basePath: string) {
79
+ const data = serializeEpochProvingJobData(jobData);
80
+ const path = `${basePath}/data.bin`;
81
+ return await store.save(path, data, { compress: true, public: PUBLIC_UPLOADS });
82
+ }
83
+
84
+ async function uploadSnapshotMetadata(metadata: UploadSnapshotMetadata, store: FileStore, basePath: string) {
85
+ const data = Buffer.from(jsonStringify(metadata), 'utf-8');
86
+ const path = `${basePath}/metadata.json`;
87
+ return await store.save(path, data, { compress: false, public: PUBLIC_UPLOADS });
88
+ }
@@ -0,0 +1,77 @@
1
+ /* eslint-disable no-console */
2
+ import type { L1ContractAddresses } from '@aztec/ethereum';
3
+ import { EthAddress } from '@aztec/foundation/eth-address';
4
+ import { jsonParseWithSchema, jsonStringify } from '@aztec/foundation/json-rpc';
5
+ import { createLogger } from '@aztec/foundation/log';
6
+ import { downloadEpochProvingJob, getProverNodeConfigFromEnv, rerunEpochProvingJob } from '@aztec/prover-node';
7
+ import { type UploadSnapshotMetadata, UploadSnapshotMetadataSchema } from '@aztec/stdlib/snapshots';
8
+
9
+ import { existsSync, mkdirSync } from 'fs';
10
+ import { readFile, writeFile } from 'fs/promises';
11
+ import { basename, join } from 'path';
12
+
13
+ const logger = createLogger('prover-node:run-failed-epoch');
14
+
15
+ function printUsage() {
16
+ console.error('Usage: run-failed-epoch <proof-uri> [out-dir=./data]');
17
+ }
18
+
19
+ async function rerunFailedEpoch(provingJobUrl: string, baseLocalDir: string) {
20
+ const localDir = join(baseLocalDir, basename(provingJobUrl));
21
+ const jobPath = join(localDir, 'data.bin');
22
+ const dataDir = join(localDir, 'state');
23
+
24
+ const env = getProverNodeConfigFromEnv();
25
+ const config = {
26
+ ...getProverNodeConfigFromEnv(),
27
+ dataDirectory: dataDir,
28
+ dataStoreMapSizeKb: env.dataStoreMapSizeKb ?? 1024 * 1024,
29
+ proverId: env.proverId ?? EthAddress.random(),
30
+ };
31
+
32
+ let metadata: UploadSnapshotMetadata;
33
+ const metadataPath = join(localDir, 'metadata.json');
34
+ if (existsSync(metadataPath)) {
35
+ logger.info(`Using downloaded data`);
36
+ metadata = jsonParseWithSchema(await readFile(metadataPath, 'utf-8'), UploadSnapshotMetadataSchema);
37
+ } else {
38
+ logger.info(`Downloading epoch proving job data and state from ${provingJobUrl} to ${localDir}`);
39
+ metadata = await downloadEpochProvingJob(provingJobUrl!, logger, {
40
+ jobDataDownloadPath: jobPath,
41
+ dataDirectory: dataDir,
42
+ });
43
+ await writeFile(metadataPath, jsonStringify(metadata, true));
44
+ logger.info(`Download to ${localDir} complete`);
45
+ }
46
+
47
+ logger.info(`Rerunning proving job from ${jobPath} with state from ${dataDir}`, metadata);
48
+ const result = await rerunEpochProvingJob(jobPath, logger, {
49
+ ...config,
50
+ l1Contracts: { rollupAddress: metadata.rollupAddress } as L1ContractAddresses,
51
+ rollupVersion: metadata.rollupVersion,
52
+ });
53
+
54
+ console.error(`Epoch proving job complete with result ${result}`);
55
+ }
56
+
57
+ async function main() {
58
+ if (process.argv[2] === '--help') {
59
+ printUsage();
60
+ return;
61
+ }
62
+
63
+ const uri = process.argv[2];
64
+ const outDir = process.argv[3] || './data';
65
+ if (!uri) {
66
+ printUsage();
67
+ throw new Error('Missing URL to epoch proving job');
68
+ }
69
+
70
+ mkdirSync(outDir, { recursive: true });
71
+ await rerunFailedEpoch(uri, outDir);
72
+ }
73
+
74
+ main().catch(err => {
75
+ console.error(err);
76
+ process.exit(1);
77
+ });
package/src/config.ts CHANGED
@@ -1,5 +1,6 @@
1
1
  import { type ArchiverConfig, archiverConfigMappings } from '@aztec/archiver/config';
2
2
  import type { ACVMConfig, BBConfig } from '@aztec/bb-prover/config';
3
+ import { type GenesisStateConfig, genesisStateConfigMappings } from '@aztec/ethereum';
3
4
  import {
4
5
  type ConfigMappingsType,
5
6
  booleanConfigHelper,
@@ -7,6 +8,8 @@ import {
7
8
  numberConfigHelper,
8
9
  } from '@aztec/foundation/config';
9
10
  import { type DataStoreConfig, dataConfigMappings } from '@aztec/kv-store/config';
11
+ import { type KeyStore, type KeyStoreConfig, ethPrivateKeySchema, keyStoreConfigMappings } from '@aztec/node-keystore';
12
+ import { type SharedNodeConfig, sharedNodeConfigMappings } from '@aztec/node-lib/config';
10
13
  import { type P2PConfig, p2pConfigMappings } from '@aztec/p2p/config';
11
14
  import {
12
15
  type ProverAgentConfig,
@@ -14,7 +17,7 @@ import {
14
17
  proverAgentConfigMappings,
15
18
  proverBrokerConfigMappings,
16
19
  } from '@aztec/prover-client/broker';
17
- import { type ProverClientConfig, bbConfigMappings, proverClientConfigMappings } from '@aztec/prover-client/config';
20
+ import { type ProverClientUserConfig, bbConfigMappings, proverClientConfigMappings } from '@aztec/prover-client/config';
18
21
  import {
19
22
  type PublisherConfig,
20
23
  type TxSenderConfig,
@@ -23,28 +26,29 @@ import {
23
26
  } from '@aztec/sequencer-client/config';
24
27
  import { type WorldStateConfig, worldStateConfigMappings } from '@aztec/world-state/config';
25
28
 
26
- import { type ProverCoordinationConfig, proverCoordinationConfigMappings } from './prover-coordination/config.js';
27
-
28
29
  export type ProverNodeConfig = ArchiverConfig &
29
- ProverClientConfig &
30
+ ProverClientUserConfig &
30
31
  P2PConfig &
31
32
  WorldStateConfig &
32
33
  PublisherConfig &
33
34
  TxSenderConfig &
34
35
  DataStoreConfig &
35
- ProverCoordinationConfig &
36
- SpecificProverNodeConfig & {
37
- /** Whether to populate the genesis state with initial fee juice for the test accounts */
38
- testAccounts: boolean;
39
- };
36
+ KeyStoreConfig &
37
+ SharedNodeConfig &
38
+ SpecificProverNodeConfig &
39
+ GenesisStateConfig;
40
40
 
41
- type SpecificProverNodeConfig = {
41
+ export type SpecificProverNodeConfig = {
42
42
  proverNodeMaxPendingJobs: number;
43
43
  proverNodePollingIntervalMs: number;
44
44
  proverNodeMaxParallelBlocksPerEpoch: number;
45
+ proverNodeFailedEpochStore: string | undefined;
46
+ proverNodeEpochProvingDelayMs: number | undefined;
47
+ proverNodeDisableProofPublish?: boolean;
45
48
  txGatheringTimeoutMs: number;
46
49
  txGatheringIntervalMs: number;
47
- txGatheringMaxParallelRequests: number;
50
+ txGatheringBatchSize: number;
51
+ txGatheringMaxParallelRequestsPerNode: number;
48
52
  };
49
53
 
50
54
  const specificProverNodeConfigMappings: ConfigMappingsType<SpecificProverNodeConfig> = {
@@ -63,38 +67,54 @@ const specificProverNodeConfigMappings: ConfigMappingsType<SpecificProverNodeCon
63
67
  description: 'The Maximum number of blocks to process in parallel while proving an epoch',
64
68
  ...numberConfigHelper(32),
65
69
  },
66
- txGatheringTimeoutMs: {
67
- env: 'PROVER_NODE_TX_GATHERING_TIMEOUT_MS',
68
- description: 'The maximum amount of time to wait for tx data to be available',
69
- ...numberConfigHelper(60_000),
70
+ proverNodeFailedEpochStore: {
71
+ env: 'PROVER_NODE_FAILED_EPOCH_STORE',
72
+ description: 'File store where to upload node state when an epoch fails to be proven',
73
+ defaultValue: undefined,
74
+ },
75
+ proverNodeEpochProvingDelayMs: {
76
+ description: 'Optional delay in milliseconds to wait before proving a new epoch',
77
+ defaultValue: undefined,
70
78
  },
71
79
  txGatheringIntervalMs: {
72
80
  env: 'PROVER_NODE_TX_GATHERING_INTERVAL_MS',
73
81
  description: 'How often to check that tx data is available',
74
82
  ...numberConfigHelper(1_000),
75
83
  },
76
- txGatheringMaxParallelRequests: {
77
- env: 'PROVER_NODE_TX_GATHERING_MAX_PARALLEL_REQUESTS',
78
- description: 'How many txs to load up a time',
84
+ txGatheringBatchSize: {
85
+ env: 'PROVER_NODE_TX_GATHERING_BATCH_SIZE',
86
+ description: 'How many transactions to gather from a node in a single request',
87
+ ...numberConfigHelper(10),
88
+ },
89
+ txGatheringMaxParallelRequestsPerNode: {
90
+ env: 'PROVER_NODE_TX_GATHERING_MAX_PARALLEL_REQUESTS_PER_NODE',
91
+ description: 'How many tx requests to make in parallel to each node',
79
92
  ...numberConfigHelper(100),
80
93
  },
94
+ txGatheringTimeoutMs: {
95
+ env: 'PROVER_NODE_TX_GATHERING_TIMEOUT_MS',
96
+ description: 'How long to wait for tx data to be available before giving up',
97
+ ...numberConfigHelper(120_000),
98
+ },
99
+ proverNodeDisableProofPublish: {
100
+ env: 'PROVER_NODE_DISABLE_PROOF_PUBLISH',
101
+ description: 'Whether the prover node skips publishing proofs to L1',
102
+ ...booleanConfigHelper(false),
103
+ },
81
104
  };
82
105
 
83
106
  export const proverNodeConfigMappings: ConfigMappingsType<ProverNodeConfig> = {
84
107
  ...dataConfigMappings,
108
+ ...keyStoreConfigMappings,
85
109
  ...archiverConfigMappings,
86
110
  ...proverClientConfigMappings,
87
111
  ...p2pConfigMappings,
88
112
  ...worldStateConfigMappings,
89
113
  ...getPublisherConfigMappings('PROVER'),
90
114
  ...getTxSenderConfigMappings('PROVER'),
91
- ...proverCoordinationConfigMappings,
92
115
  ...specificProverNodeConfigMappings,
93
- testAccounts: {
94
- env: 'TEST_ACCOUNTS',
95
- description: 'Whether to populate the genesis state with initial fee juice for the test accounts.',
96
- ...booleanConfigHelper(false),
97
- },
116
+ ...genesisStateConfigMappings,
117
+ ...sharedNodeConfigMappings,
98
118
  };
99
119
 
100
120
  export function getProverNodeConfigFromEnv(): ProverNodeConfig {
@@ -113,3 +133,67 @@ export function getProverNodeAgentConfigFromEnv(): ProverAgentConfig & BBConfig
113
133
  ...getConfigFromMappings(bbConfigMappings),
114
134
  };
115
135
  }
136
+
137
+ function createKeyStoreFromWeb3Signer(config: ProverNodeConfig): KeyStore | undefined {
138
+ // If we don't have a valid prover Id then we can't build a valid key store with remote signers
139
+ if (config.proverId === undefined) {
140
+ return undefined;
141
+ }
142
+
143
+ // Also, we need at least one publisher address.
144
+ const publishers = config.publisherAddresses ?? [];
145
+
146
+ if (publishers.length === 0) {
147
+ return undefined;
148
+ }
149
+
150
+ const keyStore: KeyStore = {
151
+ schemaVersion: 1,
152
+ slasher: undefined,
153
+ prover: {
154
+ id: config.proverId,
155
+ publisher: publishers,
156
+ },
157
+ remoteSigner: config.web3SignerUrl,
158
+ validators: undefined,
159
+ };
160
+ return keyStore;
161
+ }
162
+
163
+ function createKeyStoreFromPublisherKeys(config: ProverNodeConfig): KeyStore | undefined {
164
+ // Extract the publisher keys from the provided config.
165
+ const publisherKeys = config.publisherPrivateKeys
166
+ ? config.publisherPrivateKeys.map(k => ethPrivateKeySchema.parse(k.getValue()))
167
+ : [];
168
+
169
+ // There must be at least 1.
170
+ if (publisherKeys.length === 0) {
171
+ return undefined;
172
+ }
173
+
174
+ // If we have a valid proverId then create a prover key store of the form { id, publisher: [publisherKeys] }
175
+ // Otherwise create one of the form ("0x12345678....." as EthAccount).
176
+
177
+ const keyStore: KeyStore = {
178
+ schemaVersion: 1,
179
+ slasher: undefined,
180
+ prover:
181
+ config.proverId === undefined
182
+ ? publisherKeys[0]
183
+ : {
184
+ id: config.proverId,
185
+ publisher: publisherKeys,
186
+ },
187
+ remoteSigner: undefined,
188
+ validators: undefined,
189
+ };
190
+ return keyStore;
191
+ }
192
+
193
+ export function createKeyStoreForProver(config: ProverNodeConfig): KeyStore | undefined {
194
+ if (config.web3SignerUrl !== undefined && config.web3SignerUrl.length > 0) {
195
+ return createKeyStoreFromWeb3Signer(config);
196
+ }
197
+
198
+ return createKeyStoreFromPublisherKeys(config);
199
+ }
package/src/factory.ts CHANGED
@@ -1,43 +1,108 @@
1
1
  import { type Archiver, createArchiver } from '@aztec/archiver';
2
+ import { BBCircuitVerifier, QueuedIVCVerifier, TestCircuitVerifier } from '@aztec/bb-prover';
2
3
  import { type BlobSinkClientInterface, createBlobSinkClient } from '@aztec/blob-sink/client';
3
4
  import { EpochCache } from '@aztec/epoch-cache';
4
- import { L1TxUtils, RollupContract, createEthereumChain, createL1Clients } from '@aztec/ethereum';
5
+ import { L1TxUtils, PublisherManager, RollupContract, createEthereumChain } from '@aztec/ethereum';
6
+ import { pick } from '@aztec/foundation/collection';
5
7
  import { type Logger, createLogger } from '@aztec/foundation/log';
8
+ import { DateProvider } from '@aztec/foundation/timer';
6
9
  import type { DataStoreConfig } from '@aztec/kv-store/config';
7
- import { createProverClient } from '@aztec/prover-client';
10
+ import { type KeyStoreConfig, KeystoreManager, loadKeystores, mergeKeystores } from '@aztec/node-keystore';
11
+ import { trySnapshotSync } from '@aztec/node-lib/actions';
12
+ import { createL1TxUtilsFromEthSignerWithStore } from '@aztec/node-lib/factories';
13
+ import { NodeRpcTxSource, createP2PClient } from '@aztec/p2p';
14
+ import { type ProverClientConfig, createProverClient } from '@aztec/prover-client';
8
15
  import { createAndStartProvingBroker } from '@aztec/prover-client/broker';
9
- import type { ProverCoordination, ProvingJobBroker } from '@aztec/stdlib/interfaces/server';
16
+ import type { AztecNode, ProvingJobBroker } from '@aztec/stdlib/interfaces/server';
17
+ import { P2PClientType } from '@aztec/stdlib/p2p';
10
18
  import type { PublicDataTreeLeaf } from '@aztec/stdlib/trees';
11
- import { type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client';
19
+ import { getPackageVersion } from '@aztec/stdlib/update-checker';
20
+ import { L1Metrics, type TelemetryClient, getTelemetryClient } from '@aztec/telemetry-client';
12
21
  import { createWorldStateSynchronizer } from '@aztec/world-state';
13
22
 
14
- import type { ProverNodeConfig } from './config.js';
23
+ import { createPublicClient, fallback, http } from 'viem';
24
+
25
+ import { type ProverNodeConfig, createKeyStoreForProver } from './config.js';
15
26
  import { EpochMonitor } from './monitors/epoch-monitor.js';
16
- import { createProverCoordination } from './prover-coordination/factory.js';
17
- import { ProverNodePublisher } from './prover-node-publisher.js';
18
- import { ProverNode, type ProverNodeOptions } from './prover-node.js';
27
+ import { ProverNode } from './prover-node.js';
28
+ import { ProverPublisherFactory } from './prover-publisher-factory.js';
29
+
30
+ export type ProverNodeDeps = {
31
+ telemetry?: TelemetryClient;
32
+ log?: Logger;
33
+ aztecNodeTxProvider?: Pick<AztecNode, 'getTxsByHash'>;
34
+ archiver?: Archiver;
35
+ publisherFactory?: ProverPublisherFactory;
36
+ blobSinkClient?: BlobSinkClientInterface;
37
+ broker?: ProvingJobBroker;
38
+ l1TxUtils?: L1TxUtils;
39
+ dateProvider?: DateProvider;
40
+ };
19
41
 
20
42
  /** Creates a new prover node given a config. */
21
43
  export async function createProverNode(
22
- config: ProverNodeConfig & DataStoreConfig,
23
- deps: {
24
- telemetry?: TelemetryClient;
25
- log?: Logger;
26
- aztecNodeTxProvider?: ProverCoordination;
27
- archiver?: Archiver;
28
- publisher?: ProverNodePublisher;
29
- blobSinkClient?: BlobSinkClientInterface;
30
- broker?: ProvingJobBroker;
31
- l1TxUtils?: L1TxUtils;
32
- } = {},
44
+ userConfig: ProverNodeConfig & DataStoreConfig & KeyStoreConfig,
45
+ deps: ProverNodeDeps = {},
33
46
  options: {
34
47
  prefilledPublicData?: PublicDataTreeLeaf[];
35
48
  } = {},
36
49
  ) {
50
+ const config = { ...userConfig };
37
51
  const telemetry = deps.telemetry ?? getTelemetryClient();
38
- const blobSinkClient = deps.blobSinkClient ?? createBlobSinkClient(config);
52
+ const dateProvider = deps.dateProvider ?? new DateProvider();
53
+ const blobSinkClient =
54
+ deps.blobSinkClient ?? createBlobSinkClient(config, { logger: createLogger('prover-node:blob-sink:client') });
39
55
  const log = deps.log ?? createLogger('prover-node');
40
- const archiver = deps.archiver ?? (await createArchiver(config, blobSinkClient, { blockUntilSync: true }, telemetry));
56
+
57
+ // Build a key store from file if given or from environment otherwise
58
+ let keyStoreManager: KeystoreManager | undefined;
59
+ const keyStoreProvided = config.keyStoreDirectory !== undefined && config.keyStoreDirectory.length > 0;
60
+ if (keyStoreProvided) {
61
+ const keyStores = loadKeystores(config.keyStoreDirectory!);
62
+ keyStoreManager = new KeystoreManager(mergeKeystores(keyStores));
63
+ } else {
64
+ const keyStore = createKeyStoreForProver(config);
65
+ if (keyStore) {
66
+ keyStoreManager = new KeystoreManager(keyStore);
67
+ }
68
+ }
69
+
70
+ await keyStoreManager?.validateSigners();
71
+
72
+ // Extract the prover signers from the key store and verify that we have one.
73
+ const proverSigners = keyStoreManager?.createProverSigners();
74
+
75
+ if (proverSigners === undefined) {
76
+ throw new Error('Failed to create prover key store configuration');
77
+ } else if (proverSigners.signers.length === 0) {
78
+ throw new Error('No prover signers found in the key store');
79
+ } else if (!keyStoreProvided) {
80
+ log.warn(
81
+ 'KEY STORE CREATED FROM ENVIRONMENT, IT IS RECOMMENDED TO USE A FILE-BASED KEY STORE IN PRODUCTION ENVIRONMENTS',
82
+ );
83
+ }
84
+
85
+ log.info(`Creating prover with publishers ${proverSigners.signers.map(signer => signer.address.toString()).join()}`);
86
+
87
+ // Only consider user provided config if it is valid
88
+ const proverIdInUserConfig = config.proverId === undefined || config.proverId.isZero() ? undefined : config.proverId;
89
+
90
+ // ProverId: Take from key store if provided, otherwise from user config if valid, otherwise address of first signer
91
+ const proverId = proverSigners.id ?? proverIdInUserConfig ?? proverSigners.signers[0].address;
92
+
93
+ // Now create the prover client configuration from this.
94
+ const proverClientConfig: ProverClientConfig = {
95
+ ...config,
96
+ proverId,
97
+ };
98
+
99
+ await trySnapshotSync(config, log);
100
+
101
+ const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config);
102
+
103
+ const archiver =
104
+ deps.archiver ??
105
+ (await createArchiver(config, { blobSinkClient, epochCache, telemetry, dateProvider }, { blockUntilSync: true }));
41
106
  log.verbose(`Created archiver and synced to block ${await archiver.getBlockNumber()}`);
42
107
 
43
108
  const worldStateConfig = { ...config, worldStateProvenBlocksOnly: false };
@@ -50,49 +115,102 @@ export async function createProverNode(
50
115
  await worldStateSynchronizer.start();
51
116
 
52
117
  const broker = deps.broker ?? (await createAndStartProvingBroker(config, telemetry));
53
- const prover = await createProverClient(config, worldStateSynchronizer, broker, telemetry);
54
118
 
55
- const { l1RpcUrls: rpcUrls, l1ChainId: chainId, publisherPrivateKey } = config;
119
+ const prover = await createProverClient(proverClientConfig, worldStateSynchronizer, broker, telemetry);
120
+
121
+ const { l1RpcUrls: rpcUrls, l1ChainId: chainId } = config;
56
122
  const chain = createEthereumChain(rpcUrls, chainId);
57
- const { publicClient, walletClient } = createL1Clients(rpcUrls, publisherPrivateKey, chain.chainInfo);
123
+
124
+ const publicClient = createPublicClient({
125
+ chain: chain.chainInfo,
126
+ transport: fallback(config.l1RpcUrls.map((url: string) => http(url))),
127
+ pollingInterval: config.viemPollingIntervalMS,
128
+ });
58
129
 
59
130
  const rollupContract = new RollupContract(publicClient, config.l1Contracts.rollupAddress.toString());
60
131
 
61
- const l1TxUtils = deps.l1TxUtils ?? new L1TxUtils(publicClient, walletClient, log, config);
62
- const publisher = deps.publisher ?? new ProverNodePublisher(config, { telemetry, rollupContract, l1TxUtils });
132
+ const l1TxUtils = deps.l1TxUtils
133
+ ? [deps.l1TxUtils]
134
+ : await createL1TxUtilsFromEthSignerWithStore(
135
+ publicClient,
136
+ proverSigners.signers,
137
+ { ...config, scope: 'prover' },
138
+ { telemetry, logger: log.createChild('l1-tx-utils'), dateProvider },
139
+ );
63
140
 
64
- const epochCache = await EpochCache.create(config.l1Contracts.rollupAddress, config);
141
+ const publisherFactory =
142
+ deps.publisherFactory ??
143
+ new ProverPublisherFactory(config, {
144
+ rollupContract,
145
+ publisherManager: new PublisherManager(l1TxUtils, config),
146
+ telemetry,
147
+ });
65
148
 
66
- // If config.p2pEnabled is true, createProverCoordination will create a p2p client where txs are requested
67
- // If config.p2pEnabled is false, createProverCoordination request information from the AztecNode
68
- const proverCoordination = await createProverCoordination(config, {
69
- aztecNodeTxProvider: deps.aztecNodeTxProvider,
70
- worldStateSynchronizer,
149
+ const proofVerifier = new QueuedIVCVerifier(
150
+ config,
151
+ config.realProofs ? await BBCircuitVerifier.new(config) : new TestCircuitVerifier(),
152
+ );
153
+
154
+ const p2pClient = await createP2PClient(
155
+ P2PClientType.Prover,
156
+ config,
71
157
  archiver,
158
+ proofVerifier,
159
+ worldStateSynchronizer,
72
160
  epochCache,
161
+ getPackageVersion() ?? '',
162
+ dateProvider,
73
163
  telemetry,
74
- });
164
+ {
165
+ txCollectionNodeSources: deps.aztecNodeTxProvider
166
+ ? [new NodeRpcTxSource(deps.aztecNodeTxProvider, 'TestNode')]
167
+ : [],
168
+ },
169
+ );
75
170
 
76
- const proverNodeConfig: ProverNodeOptions = {
77
- maxPendingJobs: config.proverNodeMaxPendingJobs,
78
- pollingIntervalMs: config.proverNodePollingIntervalMs,
79
- maxParallelBlocksPerEpoch: config.proverNodeMaxParallelBlocksPerEpoch,
80
- txGatheringMaxParallelRequests: config.txGatheringMaxParallelRequests,
81
- txGatheringIntervalMs: config.txGatheringIntervalMs,
82
- txGatheringTimeoutMs: config.txGatheringTimeoutMs,
171
+ await p2pClient.start();
172
+
173
+ const proverNodeConfig = {
174
+ ...pick(
175
+ config,
176
+ 'proverNodeMaxPendingJobs',
177
+ 'proverNodeMaxParallelBlocksPerEpoch',
178
+ 'proverNodePollingIntervalMs',
179
+ 'proverNodeEpochProvingDelayMs',
180
+ 'txGatheringMaxParallelRequests',
181
+ 'txGatheringIntervalMs',
182
+ 'txGatheringTimeoutMs',
183
+ 'proverNodeFailedEpochStore',
184
+ 'proverNodeDisableProofPublish',
185
+ 'dataDirectory',
186
+ 'l1ChainId',
187
+ 'rollupVersion',
188
+ ),
83
189
  };
84
190
 
85
- const epochMonitor = await EpochMonitor.create(archiver, proverNodeConfig, telemetry);
191
+ const epochMonitor = await EpochMonitor.create(
192
+ archiver,
193
+ { pollingIntervalMs: config.proverNodePollingIntervalMs, provingDelayMs: config.proverNodeEpochProvingDelayMs },
194
+ telemetry,
195
+ );
196
+
197
+ const l1Metrics = new L1Metrics(
198
+ telemetry.getMeter('ProverNodeL1Metrics'),
199
+ publicClient,
200
+ l1TxUtils.map(utils => utils.getSenderAddress()),
201
+ );
86
202
 
87
203
  return new ProverNode(
88
204
  prover,
89
- publisher,
205
+ publisherFactory,
90
206
  archiver,
91
207
  archiver,
92
208
  archiver,
93
209
  worldStateSynchronizer,
94
- proverCoordination,
210
+ p2pClient,
95
211
  epochMonitor,
212
+ rollupContract,
213
+ l1Metrics,
96
214
  proverNodeConfig,
97
215
  telemetry,
98
216
  );