@aztec/prover-client 0.65.2 → 0.67.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/block_builder/index.d.ts +6 -0
- package/dest/block_builder/index.d.ts.map +1 -0
- package/dest/block_builder/index.js +2 -0
- package/dest/block_builder/light.d.ts +32 -0
- package/dest/block_builder/light.d.ts.map +1 -0
- package/dest/block_builder/light.js +75 -0
- package/dest/index.d.ts +1 -2
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +2 -3
- package/dest/mocks/fixtures.d.ts +4 -5
- package/dest/mocks/fixtures.d.ts.map +1 -1
- package/dest/mocks/fixtures.js +4 -8
- package/dest/mocks/test_context.d.ts +30 -12
- package/dest/mocks/test_context.d.ts.map +1 -1
- package/dest/mocks/test_context.js +61 -24
- package/dest/orchestrator/block-building-helpers.d.ts +5 -5
- package/dest/orchestrator/block-building-helpers.d.ts.map +1 -1
- package/dest/orchestrator/block-building-helpers.js +10 -11
- package/dest/orchestrator/epoch-proving-state.d.ts +5 -6
- package/dest/orchestrator/epoch-proving-state.d.ts.map +1 -1
- package/dest/orchestrator/epoch-proving-state.js +10 -12
- package/dest/orchestrator/orchestrator.d.ts +8 -6
- package/dest/orchestrator/orchestrator.d.ts.map +1 -1
- package/dest/orchestrator/orchestrator.js +85 -74
- package/dest/orchestrator/orchestrator_metrics.d.ts.map +1 -1
- package/dest/orchestrator/orchestrator_metrics.js +2 -5
- package/dest/orchestrator/tx-proving-state.d.ts +0 -1
- package/dest/orchestrator/tx-proving-state.d.ts.map +1 -1
- package/dest/orchestrator/tx-proving-state.js +2 -34
- package/dest/prover-agent/memory-proving-queue.d.ts.map +1 -1
- package/dest/prover-agent/memory-proving-queue.js +5 -4
- package/dest/prover-agent/prover-agent.d.ts.map +1 -1
- package/dest/prover-agent/prover-agent.js +3 -3
- package/dest/prover-client/factory.d.ts +6 -0
- package/dest/prover-client/factory.d.ts.map +1 -0
- package/dest/prover-client/factory.js +6 -0
- package/dest/prover-client/index.d.ts +3 -0
- package/dest/prover-client/index.d.ts.map +1 -0
- package/dest/prover-client/index.js +3 -0
- package/dest/{tx-prover/tx-prover.d.ts → prover-client/prover-client.d.ts} +8 -11
- package/dest/prover-client/prover-client.d.ts.map +1 -0
- package/dest/prover-client/prover-client.js +107 -0
- package/dest/proving_broker/caching_broker_facade.d.ts +12 -12
- package/dest/proving_broker/caching_broker_facade.d.ts.map +1 -1
- package/dest/proving_broker/caching_broker_facade.js +32 -29
- package/dest/proving_broker/factory.d.ts +2 -1
- package/dest/proving_broker/factory.d.ts.map +1 -1
- package/dest/proving_broker/factory.js +4 -4
- package/dest/proving_broker/proving_agent.d.ts +5 -0
- package/dest/proving_broker/proving_agent.d.ts.map +1 -1
- package/dest/proving_broker/proving_agent.js +15 -4
- package/dest/proving_broker/proving_agent_instrumentation.d.ts +8 -0
- package/dest/proving_broker/proving_agent_instrumentation.d.ts.map +1 -0
- package/dest/proving_broker/proving_agent_instrumentation.js +16 -0
- package/dest/proving_broker/proving_broker.d.ts +29 -5
- package/dest/proving_broker/proving_broker.d.ts.map +1 -1
- package/dest/proving_broker/proving_broker.js +142 -41
- package/dest/proving_broker/proving_broker_database/persisted.d.ts +3 -1
- package/dest/proving_broker/proving_broker_database/persisted.d.ts.map +1 -1
- package/dest/proving_broker/proving_broker_database/persisted.js +6 -2
- package/dest/proving_broker/proving_broker_instrumentation.d.ts +25 -0
- package/dest/proving_broker/proving_broker_instrumentation.d.ts.map +1 -0
- package/dest/proving_broker/proving_broker_instrumentation.js +85 -0
- package/dest/proving_broker/rpc.d.ts.map +1 -1
- package/dest/proving_broker/rpc.js +3 -2
- package/dest/test/mock_prover.d.ts +3 -2
- package/dest/test/mock_prover.d.ts.map +1 -1
- package/dest/test/mock_prover.js +9 -5
- package/package.json +18 -13
- package/src/block_builder/index.ts +6 -0
- package/src/block_builder/light.ts +120 -0
- package/src/index.ts +1 -2
- package/src/mocks/fixtures.ts +6 -18
- package/src/mocks/test_context.ts +85 -29
- package/src/orchestrator/block-building-helpers.ts +13 -14
- package/src/orchestrator/epoch-proving-state.ts +10 -13
- package/src/orchestrator/orchestrator.ts +101 -81
- package/src/orchestrator/orchestrator_metrics.ts +1 -11
- package/src/orchestrator/tx-proving-state.ts +1 -56
- package/src/prover-agent/memory-proving-queue.ts +4 -3
- package/src/prover-agent/prover-agent.ts +2 -2
- package/src/{tx-prover → prover-client}/factory.ts +4 -3
- package/src/prover-client/index.ts +2 -0
- package/src/{tx-prover/tx-prover.ts → prover-client/prover-client.ts} +25 -15
- package/src/proving_broker/caching_broker_facade.ts +31 -15
- package/src/proving_broker/factory.ts +7 -3
- package/src/proving_broker/proving_agent.ts +18 -3
- package/src/proving_broker/proving_agent_instrumentation.ts +21 -0
- package/src/proving_broker/proving_broker.ts +182 -50
- package/src/proving_broker/proving_broker_database/persisted.ts +11 -2
- package/src/proving_broker/proving_broker_instrumentation.ts +123 -0
- package/src/proving_broker/rpc.ts +2 -1
- package/src/test/mock_prover.ts +8 -4
- package/dest/tx-prover/factory.d.ts +0 -6
- package/dest/tx-prover/factory.d.ts.map +0 -1
- package/dest/tx-prover/factory.js +0 -6
- package/dest/tx-prover/tx-prover.d.ts.map +0 -1
- package/dest/tx-prover/tx-prover.js +0 -110
|
@@ -50,20 +50,16 @@ export class EpochProvingState {
|
|
|
50
50
|
private mergeRollupInputs: BlockMergeRollupInputData[] = [];
|
|
51
51
|
public rootRollupPublicInputs: RootRollupPublicInputs | undefined;
|
|
52
52
|
public finalProof: Proof | undefined;
|
|
53
|
-
public blocks: BlockProvingState[] = [];
|
|
53
|
+
public blocks: (BlockProvingState | undefined)[] = [];
|
|
54
54
|
|
|
55
55
|
constructor(
|
|
56
56
|
public readonly epochNumber: number,
|
|
57
|
+
public readonly firstBlockNumber: number,
|
|
57
58
|
public readonly totalNumBlocks: number,
|
|
58
59
|
private completionCallback: (result: ProvingResult) => void,
|
|
59
60
|
private rejectionCallback: (reason: string) => void,
|
|
60
61
|
) {}
|
|
61
62
|
|
|
62
|
-
/** Returns the current block proving state */
|
|
63
|
-
public get currentBlock(): BlockProvingState | undefined {
|
|
64
|
-
return this.blocks.at(-1);
|
|
65
|
-
}
|
|
66
|
-
|
|
67
63
|
// Returns the number of levels of merge rollups
|
|
68
64
|
public get numMergeLevels() {
|
|
69
65
|
const totalLeaves = Math.max(2, this.totalNumBlocks);
|
|
@@ -110,9 +106,10 @@ export class EpochProvingState {
|
|
|
110
106
|
archiveTreeSnapshot: AppendOnlyTreeSnapshot,
|
|
111
107
|
archiveTreeRootSiblingPath: Tuple<Fr, typeof ARCHIVE_HEIGHT>,
|
|
112
108
|
previousBlockHash: Fr,
|
|
113
|
-
) {
|
|
109
|
+
): BlockProvingState {
|
|
110
|
+
const index = globalVariables.blockNumber.toNumber() - this.firstBlockNumber;
|
|
114
111
|
const block = new BlockProvingState(
|
|
115
|
-
|
|
112
|
+
index,
|
|
116
113
|
numTxs,
|
|
117
114
|
globalVariables,
|
|
118
115
|
padArrayEnd(l1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP),
|
|
@@ -124,11 +121,11 @@ export class EpochProvingState {
|
|
|
124
121
|
previousBlockHash,
|
|
125
122
|
this,
|
|
126
123
|
);
|
|
127
|
-
this.blocks
|
|
128
|
-
if (this.blocks.length === this.totalNumBlocks) {
|
|
124
|
+
this.blocks[index] = block;
|
|
125
|
+
if (this.blocks.filter(b => !!b).length === this.totalNumBlocks) {
|
|
129
126
|
this.provingStateLifecycle = PROVING_STATE_LIFECYCLE.PROVING_STATE_FULL;
|
|
130
127
|
}
|
|
131
|
-
return
|
|
128
|
+
return block;
|
|
132
129
|
}
|
|
133
130
|
|
|
134
131
|
// Returns true if this proving state is still valid, false otherwise
|
|
@@ -180,8 +177,8 @@ export class EpochProvingState {
|
|
|
180
177
|
}
|
|
181
178
|
|
|
182
179
|
// Returns a specific transaction proving state
|
|
183
|
-
public
|
|
184
|
-
return this.blocks
|
|
180
|
+
public getBlockProvingStateByBlockNumber(blockNumber: number) {
|
|
181
|
+
return this.blocks.find(block => block?.blockNumber === blockNumber);
|
|
185
182
|
}
|
|
186
183
|
|
|
187
184
|
// Returns a set of merge rollup inputs
|
|
@@ -7,6 +7,7 @@ import {
|
|
|
7
7
|
} from '@aztec/circuit-types';
|
|
8
8
|
import {
|
|
9
9
|
type EpochProver,
|
|
10
|
+
type ForkMerkleTreeOperations,
|
|
10
11
|
type MerkleTreeWriteOperations,
|
|
11
12
|
type ProofAndVerificationKey,
|
|
12
13
|
} from '@aztec/circuit-types/interfaces';
|
|
@@ -14,15 +15,16 @@ import { type CircuitName } from '@aztec/circuit-types/stats';
|
|
|
14
15
|
import {
|
|
15
16
|
AVM_PROOF_LENGTH_IN_FIELDS,
|
|
16
17
|
AVM_VERIFICATION_KEY_LENGTH_IN_FIELDS,
|
|
18
|
+
type AppendOnlyTreeSnapshot,
|
|
17
19
|
type BaseOrMergeRollupPublicInputs,
|
|
18
20
|
BaseParityInputs,
|
|
19
21
|
type BaseRollupHints,
|
|
22
|
+
type BlockHeader,
|
|
20
23
|
type BlockRootOrBlockMergePublicInputs,
|
|
21
24
|
BlockRootRollupInputs,
|
|
22
25
|
EmptyBlockRootRollupInputs,
|
|
23
26
|
Fr,
|
|
24
27
|
type GlobalVariables,
|
|
25
|
-
type Header,
|
|
26
28
|
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
27
29
|
L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH,
|
|
28
30
|
type NESTED_RECURSIVE_PROOF_LENGTH,
|
|
@@ -38,9 +40,9 @@ import {
|
|
|
38
40
|
makeEmptyRecursiveProof,
|
|
39
41
|
} from '@aztec/circuits.js';
|
|
40
42
|
import { makeTuple } from '@aztec/foundation/array';
|
|
41
|
-
import { padArrayEnd } from '@aztec/foundation/collection';
|
|
43
|
+
import { maxBy, padArrayEnd } from '@aztec/foundation/collection';
|
|
42
44
|
import { AbortError } from '@aztec/foundation/error';
|
|
43
|
-
import {
|
|
45
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
44
46
|
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
45
47
|
import { type Tuple } from '@aztec/foundation/serialize';
|
|
46
48
|
import { pushTestData } from '@aztec/foundation/testing';
|
|
@@ -75,7 +77,7 @@ import {
|
|
|
75
77
|
import { ProvingOrchestratorMetrics } from './orchestrator_metrics.js';
|
|
76
78
|
import { TxProvingState } from './tx-proving-state.js';
|
|
77
79
|
|
|
78
|
-
const logger =
|
|
80
|
+
const logger = createLogger('prover-client:orchestrator');
|
|
79
81
|
|
|
80
82
|
/**
|
|
81
83
|
* Implements an event driven proving scheduler to build the recursive proof tree. The idea being:
|
|
@@ -98,9 +100,10 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
98
100
|
|
|
99
101
|
private provingPromise: Promise<ProvingResult> | undefined = undefined;
|
|
100
102
|
private metrics: ProvingOrchestratorMetrics;
|
|
103
|
+
private dbs: Map<number, MerkleTreeWriteOperations> = new Map();
|
|
101
104
|
|
|
102
105
|
constructor(
|
|
103
|
-
private
|
|
106
|
+
private dbProvider: ForkMerkleTreeOperations,
|
|
104
107
|
private prover: ServerCircuitProver,
|
|
105
108
|
telemetryClient: TelemetryClient,
|
|
106
109
|
private readonly proverId: Fr = Fr.ZERO,
|
|
@@ -123,14 +126,14 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
123
126
|
this.paddingTxProof = undefined;
|
|
124
127
|
}
|
|
125
128
|
|
|
126
|
-
public startNewEpoch(epochNumber: number, totalNumBlocks: number) {
|
|
129
|
+
public startNewEpoch(epochNumber: number, firstBlockNumber: number, totalNumBlocks: number) {
|
|
127
130
|
const { promise: _promise, resolve, reject } = promiseWithResolvers<ProvingResult>();
|
|
128
131
|
const promise = _promise.catch((reason): ProvingResult => ({ status: 'failure', reason }));
|
|
129
132
|
if (totalNumBlocks <= 0 || !Number.isInteger(totalNumBlocks)) {
|
|
130
133
|
throw new Error(`Invalid number of blocks for epoch (got ${totalNumBlocks})`);
|
|
131
134
|
}
|
|
132
135
|
logger.info(`Starting epoch ${epochNumber} with ${totalNumBlocks} blocks`);
|
|
133
|
-
this.provingState = new EpochProvingState(epochNumber, totalNumBlocks, resolve, reject);
|
|
136
|
+
this.provingState = new EpochProvingState(epochNumber, firstBlockNumber, totalNumBlocks, resolve, reject);
|
|
134
137
|
this.provingPromise = promise;
|
|
135
138
|
}
|
|
136
139
|
|
|
@@ -159,24 +162,14 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
159
162
|
throw new Error(`Invalid number of txs for block (got ${numTxs})`);
|
|
160
163
|
}
|
|
161
164
|
|
|
162
|
-
if (this.provingState.currentBlock && !this.provingState.currentBlock.block) {
|
|
163
|
-
throw new Error(`Must end previous block before starting a new one`);
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
// TODO(palla/prover): Store block number in the db itself to make this check more reliable,
|
|
167
|
-
// and turn this warning into an exception that we throw.
|
|
168
|
-
const { blockNumber } = globalVariables;
|
|
169
|
-
const dbBlockNumber = (await this.db.getTreeInfo(MerkleTreeId.ARCHIVE)).size - 1n;
|
|
170
|
-
if (dbBlockNumber !== blockNumber.toBigInt() - 1n) {
|
|
171
|
-
logger.warn(
|
|
172
|
-
`Database is at wrong block number (starting block ${blockNumber.toBigInt()} with db at ${dbBlockNumber})`,
|
|
173
|
-
);
|
|
174
|
-
}
|
|
175
|
-
|
|
176
165
|
logger.info(
|
|
177
|
-
`Starting block ${globalVariables.blockNumber} for slot ${globalVariables.slotNumber} with ${numTxs} transactions`,
|
|
166
|
+
`Starting block ${globalVariables.blockNumber.toNumber()} for slot ${globalVariables.slotNumber.toNumber()} with ${numTxs} transactions`,
|
|
178
167
|
);
|
|
179
168
|
|
|
169
|
+
// Fork world state at the end of the immediately previous block
|
|
170
|
+
const db = await this.dbProvider.fork(globalVariables.blockNumber.toNumber() - 1);
|
|
171
|
+
this.dbs.set(globalVariables.blockNumber.toNumber(), db);
|
|
172
|
+
|
|
180
173
|
// we start the block by enqueueing all of the base parity circuits
|
|
181
174
|
let baseParityInputs: BaseParityInputs[] = [];
|
|
182
175
|
let l1ToL2MessagesPadded: Tuple<Fr, typeof NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP>;
|
|
@@ -189,12 +182,12 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
189
182
|
BaseParityInputs.fromSlice(l1ToL2MessagesPadded, i, getVKTreeRoot()),
|
|
190
183
|
);
|
|
191
184
|
|
|
192
|
-
const messageTreeSnapshot = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
|
|
185
|
+
const messageTreeSnapshot = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db);
|
|
193
186
|
|
|
194
187
|
const newL1ToL2MessageTreeRootSiblingPathArray = await getSubtreeSiblingPath(
|
|
195
188
|
MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
|
|
196
189
|
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
197
|
-
|
|
190
|
+
db,
|
|
198
191
|
);
|
|
199
192
|
|
|
200
193
|
const newL1ToL2MessageTreeRootSiblingPath = makeTuple(
|
|
@@ -205,18 +198,18 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
205
198
|
);
|
|
206
199
|
|
|
207
200
|
// Update the local trees to include the new l1 to l2 messages
|
|
208
|
-
await
|
|
209
|
-
const messageTreeSnapshotAfterInsertion = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
|
|
201
|
+
await db.appendLeaves(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, l1ToL2MessagesPadded);
|
|
202
|
+
const messageTreeSnapshotAfterInsertion = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db);
|
|
210
203
|
|
|
211
204
|
// Get archive snapshot before this block lands
|
|
212
|
-
const startArchiveSnapshot = await getTreeSnapshot(MerkleTreeId.ARCHIVE,
|
|
213
|
-
const newArchiveSiblingPath = await getRootTreeSiblingPath(MerkleTreeId.ARCHIVE,
|
|
214
|
-
const previousBlockHash = await
|
|
205
|
+
const startArchiveSnapshot = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
|
|
206
|
+
const newArchiveSiblingPath = await getRootTreeSiblingPath(MerkleTreeId.ARCHIVE, db);
|
|
207
|
+
const previousBlockHash = await db.getLeafValue(
|
|
215
208
|
MerkleTreeId.ARCHIVE,
|
|
216
209
|
BigInt(startArchiveSnapshot.nextAvailableLeafIndex - 1),
|
|
217
210
|
);
|
|
218
211
|
|
|
219
|
-
this.provingState!.startNewBlock(
|
|
212
|
+
const blockProvingState = this.provingState!.startNewBlock(
|
|
220
213
|
numTxs,
|
|
221
214
|
globalVariables,
|
|
222
215
|
l1ToL2MessagesPadded,
|
|
@@ -230,7 +223,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
230
223
|
|
|
231
224
|
// Enqueue base parity circuits for the block
|
|
232
225
|
for (let i = 0; i < baseParityInputs.length; i++) {
|
|
233
|
-
this.enqueueBaseParityCircuit(
|
|
226
|
+
this.enqueueBaseParityCircuit(blockProvingState, baseParityInputs[i], i);
|
|
234
227
|
}
|
|
235
228
|
}
|
|
236
229
|
|
|
@@ -242,33 +235,40 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
242
235
|
[Attributes.TX_HASH]: tx.hash.toString(),
|
|
243
236
|
}))
|
|
244
237
|
public async addNewTx(tx: ProcessedTx): Promise<void> {
|
|
245
|
-
const
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
238
|
+
const blockNumber = tx.constants.globalVariables.blockNumber.toNumber();
|
|
239
|
+
try {
|
|
240
|
+
const provingState = this.provingState?.getBlockProvingStateByBlockNumber(blockNumber);
|
|
241
|
+
if (!provingState) {
|
|
242
|
+
throw new Error(`Block proving state for ${blockNumber} not found`);
|
|
243
|
+
}
|
|
249
244
|
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
245
|
+
if (!provingState.isAcceptingTransactions()) {
|
|
246
|
+
throw new Error(`Rollup not accepting further transactions`);
|
|
247
|
+
}
|
|
253
248
|
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
249
|
+
if (!provingState.verifyState()) {
|
|
250
|
+
throw new Error(`Invalid proving state when adding a tx`);
|
|
251
|
+
}
|
|
257
252
|
|
|
258
|
-
|
|
253
|
+
validateTx(tx);
|
|
259
254
|
|
|
260
|
-
|
|
255
|
+
logger.info(`Received transaction: ${tx.hash}`);
|
|
261
256
|
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
257
|
+
if (tx.isEmpty) {
|
|
258
|
+
logger.warn(`Ignoring empty transaction ${tx.hash} - it will not be added to this block`);
|
|
259
|
+
return;
|
|
260
|
+
}
|
|
266
261
|
|
|
267
|
-
|
|
268
|
-
|
|
262
|
+
const [hints, treeSnapshots] = await this.prepareTransaction(tx, provingState);
|
|
263
|
+
this.enqueueFirstProofs(hints, treeSnapshots, tx, provingState);
|
|
269
264
|
|
|
270
|
-
|
|
271
|
-
|
|
265
|
+
if (provingState.transactionsReceived === provingState.totalNumTxs) {
|
|
266
|
+
logger.verbose(`All transactions received for block ${provingState.globalVariables.blockNumber}.`);
|
|
267
|
+
}
|
|
268
|
+
} catch (err: any) {
|
|
269
|
+
throw new Error(`Error adding transaction ${tx.hash.toString()} to block ${blockNumber}: ${err.message}`, {
|
|
270
|
+
cause: err,
|
|
271
|
+
});
|
|
272
272
|
}
|
|
273
273
|
}
|
|
274
274
|
|
|
@@ -276,21 +276,13 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
276
276
|
* Marks the block as full and pads it if required, no more transactions will be accepted.
|
|
277
277
|
* Computes the block header and updates the archive tree.
|
|
278
278
|
*/
|
|
279
|
-
@trackSpan('ProvingOrchestrator.setBlockCompleted',
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
return {
|
|
285
|
-
[Attributes.BLOCK_NUMBER]: block.globalVariables.blockNumber.toNumber(),
|
|
286
|
-
[Attributes.BLOCK_SIZE]: block.totalNumTxs,
|
|
287
|
-
[Attributes.BLOCK_TXS_COUNT]: block.transactionsReceived,
|
|
288
|
-
};
|
|
289
|
-
})
|
|
290
|
-
public async setBlockCompleted(expectedHeader?: Header): Promise<L2Block> {
|
|
291
|
-
const provingState = this.provingState?.currentBlock;
|
|
279
|
+
@trackSpan('ProvingOrchestrator.setBlockCompleted', (blockNumber: number) => ({
|
|
280
|
+
[Attributes.BLOCK_NUMBER]: blockNumber,
|
|
281
|
+
}))
|
|
282
|
+
public async setBlockCompleted(blockNumber: number, expectedHeader?: BlockHeader): Promise<L2Block> {
|
|
283
|
+
const provingState = this.provingState?.getBlockProvingStateByBlockNumber(blockNumber);
|
|
292
284
|
if (!provingState) {
|
|
293
|
-
throw new Error(`
|
|
285
|
+
throw new Error(`Block proving state for ${blockNumber} not found`);
|
|
294
286
|
}
|
|
295
287
|
|
|
296
288
|
if (!provingState.verifyState()) {
|
|
@@ -313,7 +305,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
313
305
|
// base rollup inputs
|
|
314
306
|
// Then enqueue the proving of all the transactions
|
|
315
307
|
const unprovenPaddingTx = makeEmptyProcessedTx(
|
|
316
|
-
this.
|
|
308
|
+
this.dbs.get(blockNumber)!.getInitialHeader(),
|
|
317
309
|
provingState.globalVariables.chainId,
|
|
318
310
|
provingState.globalVariables.version,
|
|
319
311
|
getVKTreeRoot(),
|
|
@@ -344,7 +336,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
344
336
|
|
|
345
337
|
/** Returns the block as built for a given index. */
|
|
346
338
|
public getBlock(index: number): L2Block {
|
|
347
|
-
const block = this.provingState?.blocks[index]
|
|
339
|
+
const block = this.provingState?.blocks[index]?.block;
|
|
348
340
|
if (!block) {
|
|
349
341
|
throw new Error(`Block at index ${index} not available`);
|
|
350
342
|
}
|
|
@@ -362,7 +354,10 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
362
354
|
})
|
|
363
355
|
private padEpoch(): Promise<void> {
|
|
364
356
|
const provingState = this.provingState!;
|
|
365
|
-
const lastBlock =
|
|
357
|
+
const lastBlock = maxBy(
|
|
358
|
+
provingState.blocks.filter(b => !!b),
|
|
359
|
+
b => b!.blockNumber,
|
|
360
|
+
)?.block;
|
|
366
361
|
if (!lastBlock) {
|
|
367
362
|
return Promise.reject(new Error(`Epoch needs at least one completed block in order to be padded`));
|
|
368
363
|
}
|
|
@@ -412,17 +407,20 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
412
407
|
return Promise.resolve();
|
|
413
408
|
}
|
|
414
409
|
|
|
415
|
-
private async buildBlock(provingState: BlockProvingState, expectedHeader?:
|
|
410
|
+
private async buildBlock(provingState: BlockProvingState, expectedHeader?: BlockHeader) {
|
|
416
411
|
// Collect all new nullifiers, commitments, and contracts from all txs in this block to build body
|
|
417
412
|
const txs = provingState!.allTxs.map(a => a.processedTx);
|
|
418
413
|
|
|
414
|
+
// Get db for this block
|
|
415
|
+
const db = this.dbs.get(provingState.blockNumber)!;
|
|
416
|
+
|
|
419
417
|
// Given we've applied every change from this block, now assemble the block header
|
|
420
418
|
// and update the archive tree, so we're ready to start processing the next block
|
|
421
419
|
const { header, body } = await buildHeaderAndBodyFromTxs(
|
|
422
420
|
txs,
|
|
423
421
|
provingState.globalVariables,
|
|
424
422
|
provingState.newL1ToL2Messages,
|
|
425
|
-
|
|
423
|
+
db,
|
|
426
424
|
);
|
|
427
425
|
|
|
428
426
|
if (expectedHeader && !header.equals(expectedHeader)) {
|
|
@@ -431,10 +429,10 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
431
429
|
}
|
|
432
430
|
|
|
433
431
|
logger.verbose(`Updating archive tree with block ${provingState.blockNumber} header ${header.hash().toString()}`);
|
|
434
|
-
await
|
|
432
|
+
await db.updateArchive(header);
|
|
435
433
|
|
|
436
434
|
// Assemble the L2 block
|
|
437
|
-
const newArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE,
|
|
435
|
+
const newArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
|
|
438
436
|
const l2Block = new L2Block(newArchive, header, body);
|
|
439
437
|
|
|
440
438
|
if (!l2Block.body.getTxsEffectsHash().equals(header.contentCommitment.txsEffectsHash)) {
|
|
@@ -445,10 +443,24 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
445
443
|
);
|
|
446
444
|
}
|
|
447
445
|
|
|
446
|
+
await this.verifyBuiltBlockAgainstSyncedState(l2Block, newArchive);
|
|
447
|
+
|
|
448
448
|
logger.verbose(`Orchestrator finalised block ${l2Block.number}`);
|
|
449
449
|
provingState.block = l2Block;
|
|
450
450
|
}
|
|
451
451
|
|
|
452
|
+
// Flagged as protected to disable in certain unit tests
|
|
453
|
+
protected async verifyBuiltBlockAgainstSyncedState(l2Block: L2Block, newArchive: AppendOnlyTreeSnapshot) {
|
|
454
|
+
const syncedArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, this.dbProvider.getSnapshot(l2Block.number));
|
|
455
|
+
if (!syncedArchive.equals(newArchive)) {
|
|
456
|
+
throw new Error(
|
|
457
|
+
`Archive tree mismatch for block ${l2Block.number}: world state synced to ${inspect(
|
|
458
|
+
syncedArchive,
|
|
459
|
+
)} but built ${inspect(newArchive)}`,
|
|
460
|
+
);
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
452
464
|
// Enqueues the proving of the required padding transactions
|
|
453
465
|
// If the fully proven padding transaction is not available, this will first be proven
|
|
454
466
|
private enqueuePaddingTxs(
|
|
@@ -602,13 +614,6 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
602
614
|
provingState: BlockProvingState,
|
|
603
615
|
) {
|
|
604
616
|
const txProvingState = new TxProvingState(tx, hints, treeSnapshots);
|
|
605
|
-
|
|
606
|
-
const rejectReason = txProvingState.verifyStateOrReject();
|
|
607
|
-
if (rejectReason) {
|
|
608
|
-
provingState.reject(rejectReason);
|
|
609
|
-
return;
|
|
610
|
-
}
|
|
611
|
-
|
|
612
617
|
const txIndex = provingState.addNewTx(txProvingState);
|
|
613
618
|
this.enqueueTube(provingState, txIndex);
|
|
614
619
|
if (txProvingState.requireAvmProof) {
|
|
@@ -692,9 +697,11 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
692
697
|
return;
|
|
693
698
|
}
|
|
694
699
|
|
|
700
|
+
const db = this.dbs.get(provingState.blockNumber)!;
|
|
701
|
+
|
|
695
702
|
// We build the base rollup inputs using a mock proof and verification key.
|
|
696
703
|
// These will be overwritten later once we have proven the tube circuit and any public kernels
|
|
697
|
-
const [ms, hints] = await elapsed(buildBaseRollupHints(tx, provingState.globalVariables,
|
|
704
|
+
const [ms, hints] = await elapsed(buildBaseRollupHints(tx, provingState.globalVariables, db));
|
|
698
705
|
|
|
699
706
|
if (!tx.isEmpty) {
|
|
700
707
|
this.metrics.recordBaseRollupInputs(ms);
|
|
@@ -702,7 +709,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
702
709
|
|
|
703
710
|
const promises = [MerkleTreeId.NOTE_HASH_TREE, MerkleTreeId.NULLIFIER_TREE, MerkleTreeId.PUBLIC_DATA_TREE].map(
|
|
704
711
|
async (id: MerkleTreeId) => {
|
|
705
|
-
return { key: id, value: await getTreeSnapshot(id,
|
|
712
|
+
return { key: id, value: await getTreeSnapshot(id, db) };
|
|
706
713
|
},
|
|
707
714
|
);
|
|
708
715
|
const treeSnapshots: TreeSnapshots = new Map((await Promise.all(promises)).map(obj => [obj.key, obj.value]));
|
|
@@ -1055,6 +1062,19 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
1055
1062
|
logger.debug('Block root rollup already started');
|
|
1056
1063
|
return;
|
|
1057
1064
|
}
|
|
1065
|
+
const blockNumber = provingState.blockNumber;
|
|
1066
|
+
|
|
1067
|
+
// TODO(palla/prover): This closes the fork only on the happy path. If this epoch orchestrator
|
|
1068
|
+
// is aborted and never reaches this point, it will leak the fork. We need to add a global cleanup,
|
|
1069
|
+
// but have to make sure it only runs once all operations are completed, otherwise some function here
|
|
1070
|
+
// will attempt to access the fork after it was closed.
|
|
1071
|
+
logger.debug(`Cleaning up world state fork for ${blockNumber}`);
|
|
1072
|
+
void this.dbs
|
|
1073
|
+
.get(blockNumber)
|
|
1074
|
+
?.close()
|
|
1075
|
+
.then(() => this.dbs.delete(blockNumber))
|
|
1076
|
+
.catch(err => logger.error(`Error closing db for block ${blockNumber}`, err));
|
|
1077
|
+
|
|
1058
1078
|
this.enqueueBlockRootRollup(provingState);
|
|
1059
1079
|
}
|
|
1060
1080
|
|
|
@@ -1,11 +1,4 @@
|
|
|
1
|
-
import {
|
|
2
|
-
type Histogram,
|
|
3
|
-
Metrics,
|
|
4
|
-
type TelemetryClient,
|
|
5
|
-
type Tracer,
|
|
6
|
-
ValueType,
|
|
7
|
-
millisecondBuckets,
|
|
8
|
-
} from '@aztec/telemetry-client';
|
|
1
|
+
import { type Histogram, Metrics, type TelemetryClient, type Tracer, ValueType } from '@aztec/telemetry-client';
|
|
9
2
|
|
|
10
3
|
export class ProvingOrchestratorMetrics {
|
|
11
4
|
public readonly tracer: Tracer;
|
|
@@ -20,9 +13,6 @@ export class ProvingOrchestratorMetrics {
|
|
|
20
13
|
unit: 'ms',
|
|
21
14
|
description: 'Duration to build base rollup inputs',
|
|
22
15
|
valueType: ValueType.INT,
|
|
23
|
-
advice: {
|
|
24
|
-
explicitBucketBoundaries: millisecondBuckets(1), // 10ms -> ~327s
|
|
25
|
-
},
|
|
26
16
|
});
|
|
27
17
|
}
|
|
28
18
|
|
|
@@ -1,18 +1,10 @@
|
|
|
1
|
-
import {
|
|
2
|
-
EncryptedNoteTxL2Logs,
|
|
3
|
-
EncryptedTxL2Logs,
|
|
4
|
-
type MerkleTreeId,
|
|
5
|
-
type ProcessedTx,
|
|
6
|
-
type ProofAndVerificationKey,
|
|
7
|
-
UnencryptedTxL2Logs,
|
|
8
|
-
} from '@aztec/circuit-types';
|
|
1
|
+
import { type MerkleTreeId, type ProcessedTx, type ProofAndVerificationKey } from '@aztec/circuit-types';
|
|
9
2
|
import {
|
|
10
3
|
type AVM_PROOF_LENGTH_IN_FIELDS,
|
|
11
4
|
AVM_VK_INDEX,
|
|
12
5
|
type AppendOnlyTreeSnapshot,
|
|
13
6
|
AvmProofData,
|
|
14
7
|
type BaseRollupHints,
|
|
15
|
-
Fr,
|
|
16
8
|
PrivateBaseRollupHints,
|
|
17
9
|
PrivateBaseRollupInputs,
|
|
18
10
|
PrivateTubeData,
|
|
@@ -112,53 +104,6 @@ export class TxProvingState {
|
|
|
112
104
|
this.avm = avmProofAndVk;
|
|
113
105
|
}
|
|
114
106
|
|
|
115
|
-
public verifyStateOrReject(): string | undefined {
|
|
116
|
-
const txEffect = this.processedTx.txEffect;
|
|
117
|
-
const fromPrivate = this.processedTx.data;
|
|
118
|
-
|
|
119
|
-
const noteEncryptedLogsHashes = [
|
|
120
|
-
fromPrivate.forRollup?.end.noteEncryptedLogsHashes || [],
|
|
121
|
-
fromPrivate.forPublic?.nonRevertibleAccumulatedData.noteEncryptedLogsHashes || [],
|
|
122
|
-
fromPrivate.forPublic?.revertibleAccumulatedData.noteEncryptedLogsHashes || [],
|
|
123
|
-
].flat();
|
|
124
|
-
const txNoteEncryptedLogsHash = EncryptedNoteTxL2Logs.hashNoteLogs(
|
|
125
|
-
noteEncryptedLogsHashes.filter(log => !log.isEmpty()).map(log => log.value.toBuffer()),
|
|
126
|
-
);
|
|
127
|
-
if (!txNoteEncryptedLogsHash.equals(txEffect.noteEncryptedLogs.hash())) {
|
|
128
|
-
return `Note encrypted logs hash mismatch: ${Fr.fromBuffer(txNoteEncryptedLogsHash)} === ${Fr.fromBuffer(
|
|
129
|
-
txEffect.noteEncryptedLogs.hash(),
|
|
130
|
-
)}`;
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
const encryptedLogsHashes = [
|
|
134
|
-
fromPrivate.forRollup?.end.encryptedLogsHashes || [],
|
|
135
|
-
fromPrivate.forPublic?.nonRevertibleAccumulatedData.encryptedLogsHashes || [],
|
|
136
|
-
fromPrivate.forPublic?.revertibleAccumulatedData.encryptedLogsHashes || [],
|
|
137
|
-
].flat();
|
|
138
|
-
const txEncryptedLogsHash = EncryptedTxL2Logs.hashSiloedLogs(
|
|
139
|
-
encryptedLogsHashes.filter(log => !log.isEmpty()).map(log => log.getSiloedHash()),
|
|
140
|
-
);
|
|
141
|
-
if (!txEncryptedLogsHash.equals(txEffect.encryptedLogs.hash())) {
|
|
142
|
-
// @todo This rejection messages is never seen. Never making it out to the logs
|
|
143
|
-
return `Encrypted logs hash mismatch: ${Fr.fromBuffer(txEncryptedLogsHash)} === ${Fr.fromBuffer(
|
|
144
|
-
txEffect.encryptedLogs.hash(),
|
|
145
|
-
)}`;
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
const avmOutput = this.processedTx.avmProvingRequest?.inputs.output;
|
|
149
|
-
const unencryptedLogsHashes = avmOutput
|
|
150
|
-
? avmOutput.accumulatedData.unencryptedLogsHashes
|
|
151
|
-
: fromPrivate.forRollup!.end.unencryptedLogsHashes;
|
|
152
|
-
const txUnencryptedLogsHash = UnencryptedTxL2Logs.hashSiloedLogs(
|
|
153
|
-
unencryptedLogsHashes.filter(log => !log.isEmpty()).map(log => log.getSiloedHash()),
|
|
154
|
-
);
|
|
155
|
-
if (!txUnencryptedLogsHash.equals(txEffect.unencryptedLogs.hash())) {
|
|
156
|
-
return `Unencrypted logs hash mismatch: ${Fr.fromBuffer(txUnencryptedLogsHash)} === ${Fr.fromBuffer(
|
|
157
|
-
txEffect.unencryptedLogs.hash(),
|
|
158
|
-
)}`;
|
|
159
|
-
}
|
|
160
|
-
}
|
|
161
|
-
|
|
162
107
|
private getTubeVkData() {
|
|
163
108
|
let vkIndex = TUBE_VK_INDEX;
|
|
164
109
|
try {
|
|
@@ -32,7 +32,7 @@ import type {
|
|
|
32
32
|
} from '@aztec/circuits.js';
|
|
33
33
|
import { randomBytes } from '@aztec/foundation/crypto';
|
|
34
34
|
import { AbortError, TimeoutError } from '@aztec/foundation/error';
|
|
35
|
-
import {
|
|
35
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
36
36
|
import { type PromiseWithResolvers, RunningPromise, promiseWithResolvers } from '@aztec/foundation/promise';
|
|
37
37
|
import { PriorityMemoryQueue } from '@aztec/foundation/queue';
|
|
38
38
|
import { type TelemetryClient } from '@aztec/telemetry-client';
|
|
@@ -57,7 +57,7 @@ const defaultTimeSource = () => Date.now();
|
|
|
57
57
|
* The queue accumulates jobs and provides them to agents prioritized by block number.
|
|
58
58
|
*/
|
|
59
59
|
export class MemoryProvingQueue implements ServerCircuitProver, ProvingJobSource {
|
|
60
|
-
private log =
|
|
60
|
+
private log = createLogger('prover-client:prover-pool:queue');
|
|
61
61
|
private queue = new PriorityMemoryQueue<ProvingJobWithResolvers>(
|
|
62
62
|
(a, b) => (a.epochNumber ?? 0) - (b.epochNumber ?? 0),
|
|
63
63
|
);
|
|
@@ -120,6 +120,7 @@ export class MemoryProvingQueue implements ServerCircuitProver, ProvingJobSource
|
|
|
120
120
|
id: job.id,
|
|
121
121
|
type: job.type,
|
|
122
122
|
inputsUri: job.inputsUri,
|
|
123
|
+
epochNumber: job.epochNumber,
|
|
123
124
|
};
|
|
124
125
|
} catch (err) {
|
|
125
126
|
if (err instanceof TimeoutError) {
|
|
@@ -244,7 +245,7 @@ export class MemoryProvingQueue implements ServerCircuitProver, ProvingJobSource
|
|
|
244
245
|
reject,
|
|
245
246
|
attempts: 1,
|
|
246
247
|
heartbeat: 0,
|
|
247
|
-
epochNumber,
|
|
248
|
+
epochNumber: epochNumber ?? 0,
|
|
248
249
|
};
|
|
249
250
|
|
|
250
251
|
if (signal) {
|
|
@@ -8,7 +8,7 @@ import {
|
|
|
8
8
|
type ServerCircuitProver,
|
|
9
9
|
makeProvingRequestResult,
|
|
10
10
|
} from '@aztec/circuit-types';
|
|
11
|
-
import {
|
|
11
|
+
import { createLogger } from '@aztec/foundation/log';
|
|
12
12
|
import { RunningPromise } from '@aztec/foundation/running-promise';
|
|
13
13
|
import { elapsed } from '@aztec/foundation/timer';
|
|
14
14
|
|
|
@@ -38,7 +38,7 @@ export class ProverAgent implements ProverAgentApi {
|
|
|
38
38
|
private maxConcurrency = 1,
|
|
39
39
|
/** How long to wait between jobs */
|
|
40
40
|
private pollIntervalMs = 100,
|
|
41
|
-
private log =
|
|
41
|
+
private log = createLogger('prover-client:prover-agent'),
|
|
42
42
|
) {}
|
|
43
43
|
|
|
44
44
|
setMaxConcurrency(maxConcurrency: number): Promise<void> {
|
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
import { type ProvingJobBroker } from '@aztec/circuit-types';
|
|
1
|
+
import { type ForkMerkleTreeOperations, type ProvingJobBroker } from '@aztec/circuit-types';
|
|
2
2
|
import { type TelemetryClient } from '@aztec/telemetry-client';
|
|
3
3
|
import { NoopTelemetryClient } from '@aztec/telemetry-client/noop';
|
|
4
4
|
|
|
5
5
|
import { type ProverClientConfig } from '../config.js';
|
|
6
|
-
import {
|
|
6
|
+
import { ProverClient } from './prover-client.js';
|
|
7
7
|
|
|
8
8
|
export function createProverClient(
|
|
9
9
|
config: ProverClientConfig,
|
|
10
|
+
worldState: ForkMerkleTreeOperations,
|
|
10
11
|
broker: ProvingJobBroker,
|
|
11
12
|
telemetry: TelemetryClient = new NoopTelemetryClient(),
|
|
12
13
|
) {
|
|
13
|
-
return
|
|
14
|
+
return ProverClient.new(config, worldState, broker, telemetry);
|
|
14
15
|
}
|