@aztec/prover-client 0.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dest/config.d.ts +21 -0
- package/dest/config.d.ts.map +1 -0
- package/dest/config.js +31 -0
- package/dest/index.d.ts +4 -0
- package/dest/index.d.ts.map +1 -0
- package/dest/index.js +3 -0
- package/dest/mocks/fixtures.d.ts +22 -0
- package/dest/mocks/fixtures.d.ts.map +1 -0
- package/dest/mocks/fixtures.js +95 -0
- package/dest/mocks/test_context.d.ts +32 -0
- package/dest/mocks/test_context.d.ts.map +1 -0
- package/dest/mocks/test_context.js +116 -0
- package/dest/orchestrator/block-building-helpers.d.ts +36 -0
- package/dest/orchestrator/block-building-helpers.d.ts.map +1 -0
- package/dest/orchestrator/block-building-helpers.js +236 -0
- package/dest/orchestrator/orchestrator.d.ts +113 -0
- package/dest/orchestrator/orchestrator.d.ts.map +1 -0
- package/dest/orchestrator/orchestrator.js +574 -0
- package/dest/orchestrator/proving-state.d.ts +68 -0
- package/dest/orchestrator/proving-state.d.ts.map +1 -0
- package/dest/orchestrator/proving-state.js +142 -0
- package/dest/orchestrator/tx-proving-state.d.ts +35 -0
- package/dest/orchestrator/tx-proving-state.d.ts.map +1 -0
- package/dest/orchestrator/tx-proving-state.js +92 -0
- package/dest/prover-agent/index.d.ts +4 -0
- package/dest/prover-agent/index.d.ts.map +1 -0
- package/dest/prover-agent/index.js +4 -0
- package/dest/prover-agent/memory-proving-queue.d.ts +64 -0
- package/dest/prover-agent/memory-proving-queue.d.ts.map +1 -0
- package/dest/prover-agent/memory-proving-queue.js +187 -0
- package/dest/prover-agent/prover-agent.d.ts +30 -0
- package/dest/prover-agent/prover-agent.d.ts.map +1 -0
- package/dest/prover-agent/prover-agent.js +115 -0
- package/dest/prover-agent/proving-error.d.ts +5 -0
- package/dest/prover-agent/proving-error.d.ts.map +1 -0
- package/dest/prover-agent/proving-error.js +9 -0
- package/dest/prover-agent/rpc.d.ts +5 -0
- package/dest/prover-agent/rpc.d.ts.map +1 -0
- package/dest/prover-agent/rpc.js +53 -0
- package/dest/tx-prover/tx-prover.d.ts +65 -0
- package/dest/tx-prover/tx-prover.d.ts.map +1 -0
- package/dest/tx-prover/tx-prover.js +122 -0
- package/package.json +87 -0
- package/src/config.ts +59 -0
- package/src/index.ts +4 -0
- package/src/mocks/fixtures.ts +182 -0
- package/src/mocks/test_context.ts +217 -0
- package/src/orchestrator/block-building-helpers.ts +470 -0
- package/src/orchestrator/orchestrator.ts +883 -0
- package/src/orchestrator/proving-state.ts +210 -0
- package/src/orchestrator/tx-proving-state.ts +139 -0
- package/src/prover-agent/index.ts +3 -0
- package/src/prover-agent/memory-proving-queue.ts +303 -0
- package/src/prover-agent/prover-agent.ts +144 -0
- package/src/prover-agent/proving-error.ts +9 -0
- package/src/prover-agent/rpc.ts +91 -0
- package/src/tx-prover/tx-prover.ts +171 -0
|
@@ -0,0 +1,883 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Body,
|
|
3
|
+
L2Block,
|
|
4
|
+
MerkleTreeId,
|
|
5
|
+
type PaddingProcessedTx,
|
|
6
|
+
type ProcessedTx,
|
|
7
|
+
PublicKernelType,
|
|
8
|
+
Tx,
|
|
9
|
+
type TxEffect,
|
|
10
|
+
makeEmptyProcessedTx,
|
|
11
|
+
makePaddingProcessedTx,
|
|
12
|
+
toTxEffect,
|
|
13
|
+
} from '@aztec/circuit-types';
|
|
14
|
+
import {
|
|
15
|
+
BlockProofError,
|
|
16
|
+
type BlockResult,
|
|
17
|
+
PROVING_STATUS,
|
|
18
|
+
type ProvingResult,
|
|
19
|
+
type ProvingTicket,
|
|
20
|
+
type PublicInputsAndRecursiveProof,
|
|
21
|
+
type ServerCircuitProver,
|
|
22
|
+
} from '@aztec/circuit-types/interfaces';
|
|
23
|
+
import {
|
|
24
|
+
AGGREGATION_OBJECT_LENGTH,
|
|
25
|
+
AvmCircuitInputs,
|
|
26
|
+
type BaseOrMergeRollupPublicInputs,
|
|
27
|
+
BaseParityInputs,
|
|
28
|
+
type BaseRollupInputs,
|
|
29
|
+
Fr,
|
|
30
|
+
type GlobalVariables,
|
|
31
|
+
type Header,
|
|
32
|
+
type KernelCircuitPublicInputs,
|
|
33
|
+
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
34
|
+
L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH,
|
|
35
|
+
type NESTED_RECURSIVE_PROOF_LENGTH,
|
|
36
|
+
NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
|
|
37
|
+
NUM_BASE_PARITY_PER_ROOT_PARITY,
|
|
38
|
+
type Proof,
|
|
39
|
+
type PublicKernelCircuitPublicInputs,
|
|
40
|
+
type RECURSIVE_PROOF_LENGTH,
|
|
41
|
+
type RecursiveProof,
|
|
42
|
+
type RootParityInput,
|
|
43
|
+
RootParityInputs,
|
|
44
|
+
type VerificationKeyAsFields,
|
|
45
|
+
VerificationKeyData,
|
|
46
|
+
type VerificationKeys,
|
|
47
|
+
makeEmptyProof,
|
|
48
|
+
} from '@aztec/circuits.js';
|
|
49
|
+
import { makeTuple } from '@aztec/foundation/array';
|
|
50
|
+
import { padArrayEnd } from '@aztec/foundation/collection';
|
|
51
|
+
import { AbortedError } from '@aztec/foundation/error';
|
|
52
|
+
import { createDebugLogger } from '@aztec/foundation/log';
|
|
53
|
+
import { promiseWithResolvers } from '@aztec/foundation/promise';
|
|
54
|
+
import { BufferReader, type Tuple } from '@aztec/foundation/serialize';
|
|
55
|
+
import { pushTestData } from '@aztec/foundation/testing';
|
|
56
|
+
import { type MerkleTreeOperations } from '@aztec/world-state';
|
|
57
|
+
|
|
58
|
+
import { inspect } from 'util';
|
|
59
|
+
|
|
60
|
+
import {
|
|
61
|
+
buildBaseRollupInput,
|
|
62
|
+
createMergeRollupInputs,
|
|
63
|
+
getRootRollupInput,
|
|
64
|
+
getSubtreeSiblingPath,
|
|
65
|
+
getTreeSnapshot,
|
|
66
|
+
validatePartialState,
|
|
67
|
+
validateRootOutput,
|
|
68
|
+
validateTx,
|
|
69
|
+
} from './block-building-helpers.js';
|
|
70
|
+
import { type MergeRollupInputData, ProvingState, type TreeSnapshots } from './proving-state.js';
|
|
71
|
+
import { TX_PROVING_CODE, TxProvingState } from './tx-proving-state.js';
|
|
72
|
+
|
|
73
|
+
const logger = createDebugLogger('aztec:prover:proving-orchestrator');
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Implements an event driven proving scheduler to build the recursive proof tree. The idea being:
|
|
77
|
+
* 1. Transactions are provided to the scheduler post simulation.
|
|
78
|
+
* 2. Tree insertions are performed as required to generate transaction specific proofs
|
|
79
|
+
* 3. Those transaction specific proofs are generated in the necessary order accounting for dependencies
|
|
80
|
+
* 4. Once a transaction is proven, it will be incorporated into a merge proof
|
|
81
|
+
* 5. Merge proofs are produced at each level of the tree until the root proof is produced
|
|
82
|
+
*
|
|
83
|
+
* The proving implementation is determined by the provided prover. This could be for example a local prover or a remote prover pool.
|
|
84
|
+
*/
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* The orchestrator, managing the flow of recursive proving operations required to build the rollup proof tree.
|
|
88
|
+
*/
|
|
89
|
+
export class ProvingOrchestrator {
|
|
90
|
+
private provingState: ProvingState | undefined = undefined;
|
|
91
|
+
private pendingProvingJobs: AbortController[] = [];
|
|
92
|
+
private paddingTx: PaddingProcessedTx | undefined = undefined;
|
|
93
|
+
|
|
94
|
+
constructor(private db: MerkleTreeOperations, private prover: ServerCircuitProver, private initialHeader?: Header) {}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Resets the orchestrator's cached padding tx.
|
|
98
|
+
*/
|
|
99
|
+
public reset() {
|
|
100
|
+
this.paddingTx = undefined;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Starts off a new block
|
|
105
|
+
* @param numTxs - The total number of transactions in the block. Must be a power of 2
|
|
106
|
+
* @param globalVariables - The global variables for the block
|
|
107
|
+
* @param l1ToL2Messages - The l1 to l2 messages for the block
|
|
108
|
+
* @param verificationKeys - The private kernel verification keys
|
|
109
|
+
* @returns A proving ticket, containing a promise notifying of proving completion
|
|
110
|
+
*/
|
|
111
|
+
public async startNewBlock(
|
|
112
|
+
numTxs: number,
|
|
113
|
+
globalVariables: GlobalVariables,
|
|
114
|
+
l1ToL2Messages: Fr[],
|
|
115
|
+
verificationKeys: VerificationKeys,
|
|
116
|
+
): Promise<ProvingTicket> {
|
|
117
|
+
// Create initial header if not done so yet
|
|
118
|
+
if (!this.initialHeader) {
|
|
119
|
+
this.initialHeader = await this.db.buildInitialHeader();
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Check that the length of the array of txs is a power of two
|
|
123
|
+
// See https://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
|
|
124
|
+
if (!Number.isInteger(numTxs) || numTxs < 2 || (numTxs & (numTxs - 1)) !== 0) {
|
|
125
|
+
throw new Error(`Length of txs for the block should be a power of two and at least two (got ${numTxs})`);
|
|
126
|
+
}
|
|
127
|
+
// Cancel any currently proving block before starting a new one
|
|
128
|
+
this.cancelBlock();
|
|
129
|
+
logger.info(`Starting new block with ${numTxs} transactions`);
|
|
130
|
+
// we start the block by enqueueing all of the base parity circuits
|
|
131
|
+
let baseParityInputs: BaseParityInputs[] = [];
|
|
132
|
+
let l1ToL2MessagesPadded: Tuple<Fr, typeof NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP>;
|
|
133
|
+
try {
|
|
134
|
+
l1ToL2MessagesPadded = padArrayEnd(l1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
|
|
135
|
+
} catch (err) {
|
|
136
|
+
throw new Error('Too many L1 to L2 messages');
|
|
137
|
+
}
|
|
138
|
+
baseParityInputs = Array.from({ length: NUM_BASE_PARITY_PER_ROOT_PARITY }, (_, i) =>
|
|
139
|
+
BaseParityInputs.fromSlice(l1ToL2MessagesPadded, i),
|
|
140
|
+
);
|
|
141
|
+
|
|
142
|
+
const messageTreeSnapshot = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, this.db);
|
|
143
|
+
|
|
144
|
+
const newL1ToL2MessageTreeRootSiblingPathArray = await getSubtreeSiblingPath(
|
|
145
|
+
MerkleTreeId.L1_TO_L2_MESSAGE_TREE,
|
|
146
|
+
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
147
|
+
this.db,
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
const newL1ToL2MessageTreeRootSiblingPath = makeTuple(
|
|
151
|
+
L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH,
|
|
152
|
+
i =>
|
|
153
|
+
i < newL1ToL2MessageTreeRootSiblingPathArray.length ? newL1ToL2MessageTreeRootSiblingPathArray[i] : Fr.ZERO,
|
|
154
|
+
0,
|
|
155
|
+
);
|
|
156
|
+
|
|
157
|
+
// Update the local trees to include the new l1 to l2 messages
|
|
158
|
+
await this.db.appendLeaves(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, l1ToL2MessagesPadded);
|
|
159
|
+
|
|
160
|
+
const { promise: _promise, resolve, reject } = promiseWithResolvers<ProvingResult>();
|
|
161
|
+
const promise = _promise.catch(
|
|
162
|
+
(reason): ProvingResult => ({
|
|
163
|
+
status: PROVING_STATUS.FAILURE,
|
|
164
|
+
reason,
|
|
165
|
+
}),
|
|
166
|
+
);
|
|
167
|
+
|
|
168
|
+
const provingState = new ProvingState(
|
|
169
|
+
numTxs,
|
|
170
|
+
resolve,
|
|
171
|
+
reject,
|
|
172
|
+
globalVariables,
|
|
173
|
+
l1ToL2MessagesPadded,
|
|
174
|
+
baseParityInputs.length,
|
|
175
|
+
messageTreeSnapshot,
|
|
176
|
+
newL1ToL2MessageTreeRootSiblingPath,
|
|
177
|
+
verificationKeys,
|
|
178
|
+
);
|
|
179
|
+
|
|
180
|
+
for (let i = 0; i < baseParityInputs.length; i++) {
|
|
181
|
+
this.enqueueBaseParityCircuit(provingState, baseParityInputs[i], i);
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
this.provingState = provingState;
|
|
185
|
+
|
|
186
|
+
const ticket: ProvingTicket = {
|
|
187
|
+
provingPromise: promise,
|
|
188
|
+
};
|
|
189
|
+
return ticket;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* The interface to add a simulated transaction to the scheduler
|
|
194
|
+
* @param tx - The transaction to be proven
|
|
195
|
+
*/
|
|
196
|
+
public async addNewTx(tx: ProcessedTx): Promise<void> {
|
|
197
|
+
if (!this.provingState) {
|
|
198
|
+
throw new Error(`Invalid proving state, call startNewBlock before adding transactions`);
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
if (!this.provingState.isAcceptingTransactions()) {
|
|
202
|
+
throw new Error(`Rollup not accepting further transactions`);
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
validateTx(tx);
|
|
206
|
+
|
|
207
|
+
logger.info(`Received transaction: ${tx.hash}`);
|
|
208
|
+
|
|
209
|
+
const [inputs, treeSnapshots] = await this.prepareTransaction(tx, this.provingState);
|
|
210
|
+
this.enqueueFirstProof(inputs, treeSnapshots, tx, this.provingState);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/**
|
|
214
|
+
* Marks the block as full and pads it to the full power of 2 block size, no more transactions will be accepted.
|
|
215
|
+
*/
|
|
216
|
+
public async setBlockCompleted() {
|
|
217
|
+
if (!this.provingState) {
|
|
218
|
+
throw new Error(`Invalid proving state, call startNewBlock before adding transactions or completing the block`);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// we may need to pad the rollup with empty transactions
|
|
222
|
+
const paddingTxCount = this.provingState.totalNumTxs - this.provingState.transactionsReceived;
|
|
223
|
+
if (paddingTxCount === 0) {
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
logger.debug(`Padding rollup with ${paddingTxCount} empty transactions`);
|
|
228
|
+
// Make an empty padding transaction
|
|
229
|
+
// Insert it into the tree the required number of times to get all of the
|
|
230
|
+
// base rollup inputs
|
|
231
|
+
// Then enqueue the proving of all the transactions
|
|
232
|
+
const unprovenPaddingTx = makeEmptyProcessedTx(
|
|
233
|
+
this.initialHeader ?? (await this.db.buildInitialHeader()),
|
|
234
|
+
this.provingState.globalVariables.chainId,
|
|
235
|
+
this.provingState.globalVariables.version,
|
|
236
|
+
);
|
|
237
|
+
const txInputs: Array<{ inputs: BaseRollupInputs; snapshot: TreeSnapshots }> = [];
|
|
238
|
+
for (let i = 0; i < paddingTxCount; i++) {
|
|
239
|
+
const [inputs, snapshot] = await this.prepareTransaction(unprovenPaddingTx, this.provingState);
|
|
240
|
+
const txInput = {
|
|
241
|
+
inputs,
|
|
242
|
+
snapshot,
|
|
243
|
+
};
|
|
244
|
+
txInputs.push(txInput);
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Now enqueue the proving
|
|
248
|
+
this.enqueuePaddingTxs(this.provingState, txInputs, unprovenPaddingTx);
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// Enqueues the proving of the required padding transactions
|
|
252
|
+
// If the fully proven padding transaction is not available, this will first be proven
|
|
253
|
+
private enqueuePaddingTxs(
|
|
254
|
+
provingState: ProvingState,
|
|
255
|
+
txInputs: Array<{ inputs: BaseRollupInputs; snapshot: TreeSnapshots }>,
|
|
256
|
+
unprovenPaddingTx: ProcessedTx,
|
|
257
|
+
) {
|
|
258
|
+
if (this.paddingTx) {
|
|
259
|
+
// We already have the padding transaction
|
|
260
|
+
logger.debug(`Enqueuing ${txInputs.length} padding transactions using existing padding tx`);
|
|
261
|
+
this.provePaddingTransactions(txInputs, this.paddingTx, provingState);
|
|
262
|
+
return;
|
|
263
|
+
}
|
|
264
|
+
logger.debug(`Enqueuing deferred proving for padding txs to enqueue ${txInputs.length} paddings`);
|
|
265
|
+
this.deferredProving(
|
|
266
|
+
provingState,
|
|
267
|
+
signal =>
|
|
268
|
+
this.prover.getEmptyPrivateKernelProof(
|
|
269
|
+
{
|
|
270
|
+
// Chain id and version should not change even if the proving state does, so it's safe to use them for the padding tx
|
|
271
|
+
// which gets cached across multiple runs of the orchestrator with different proving states. If they were to change,
|
|
272
|
+
// we'd have to clear out the paddingTx here and regenerate it when they do.
|
|
273
|
+
chainId: unprovenPaddingTx.data.constants.txContext.chainId,
|
|
274
|
+
version: unprovenPaddingTx.data.constants.txContext.version,
|
|
275
|
+
header: unprovenPaddingTx.data.constants.historicalHeader,
|
|
276
|
+
},
|
|
277
|
+
signal,
|
|
278
|
+
),
|
|
279
|
+
result => {
|
|
280
|
+
logger.debug(`Completed proof for padding tx, now enqueuing ${txInputs.length} padding txs`);
|
|
281
|
+
this.paddingTx = makePaddingProcessedTx(result);
|
|
282
|
+
this.provePaddingTransactions(txInputs, this.paddingTx, provingState);
|
|
283
|
+
},
|
|
284
|
+
);
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
/**
|
|
288
|
+
* Prepares the cached sets of base rollup inputs for padding transactions and proves them
|
|
289
|
+
* @param txInputs - The base rollup inputs, start and end hash paths etc
|
|
290
|
+
* @param paddingTx - The padding tx, contains the header, proof, vk, public inputs used in the proof
|
|
291
|
+
* @param provingState - The block proving state
|
|
292
|
+
*/
|
|
293
|
+
private provePaddingTransactions(
|
|
294
|
+
txInputs: Array<{ inputs: BaseRollupInputs; snapshot: TreeSnapshots }>,
|
|
295
|
+
paddingTx: PaddingProcessedTx,
|
|
296
|
+
provingState: ProvingState,
|
|
297
|
+
) {
|
|
298
|
+
// The padding tx contains the proof and vk, generated separately from the base inputs
|
|
299
|
+
// Copy these into the base rollup inputs
|
|
300
|
+
for (let i = 0; i < txInputs.length; i++) {
|
|
301
|
+
txInputs[i].inputs.kernelData.vk = paddingTx.verificationKey;
|
|
302
|
+
txInputs[i].inputs.kernelData.proof = paddingTx.recursiveProof;
|
|
303
|
+
this.enqueueFirstProof(txInputs[i].inputs, txInputs[i].snapshot, paddingTx, provingState);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* Cancel any further proving of the block
|
|
309
|
+
*/
|
|
310
|
+
public cancelBlock() {
|
|
311
|
+
for (const controller of this.pendingProvingJobs) {
|
|
312
|
+
controller.abort();
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
this.provingState?.cancel();
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
/**
|
|
319
|
+
* Performs the final tree update for the block and returns the fully proven block.
|
|
320
|
+
* @returns The fully proven block and proof.
|
|
321
|
+
*/
|
|
322
|
+
public async finaliseBlock() {
|
|
323
|
+
try {
|
|
324
|
+
if (
|
|
325
|
+
!this.provingState ||
|
|
326
|
+
!this.provingState.rootRollupPublicInputs ||
|
|
327
|
+
!this.provingState.finalProof ||
|
|
328
|
+
!this.provingState.finalAggregationObject
|
|
329
|
+
) {
|
|
330
|
+
throw new Error(`Invalid proving state, a block must be proven before it can be finalised`);
|
|
331
|
+
}
|
|
332
|
+
if (this.provingState.block) {
|
|
333
|
+
throw new Error('Block already finalised');
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
const rootRollupOutputs = this.provingState.rootRollupPublicInputs;
|
|
337
|
+
|
|
338
|
+
logger?.debug(`Updating and validating root trees`);
|
|
339
|
+
await this.db.updateArchive(rootRollupOutputs.header);
|
|
340
|
+
|
|
341
|
+
await validateRootOutput(rootRollupOutputs, this.db);
|
|
342
|
+
|
|
343
|
+
// Collect all new nullifiers, commitments, and contracts from all txs in this block
|
|
344
|
+
const gasFees = this.provingState.globalVariables.gasFees;
|
|
345
|
+
const nonEmptyTxEffects: TxEffect[] = this.provingState!.allTxs.map(txProvingState =>
|
|
346
|
+
toTxEffect(txProvingState.processedTx, gasFees),
|
|
347
|
+
).filter(txEffect => !txEffect.isEmpty());
|
|
348
|
+
const blockBody = new Body(nonEmptyTxEffects);
|
|
349
|
+
|
|
350
|
+
const l2Block = L2Block.fromFields({
|
|
351
|
+
archive: rootRollupOutputs.archive,
|
|
352
|
+
header: rootRollupOutputs.header,
|
|
353
|
+
body: blockBody,
|
|
354
|
+
});
|
|
355
|
+
|
|
356
|
+
if (!l2Block.body.getTxsEffectsHash().equals(rootRollupOutputs.header.contentCommitment.txsEffectsHash)) {
|
|
357
|
+
logger.debug(inspect(blockBody));
|
|
358
|
+
throw new Error(
|
|
359
|
+
`Txs effects hash mismatch, ${l2Block.body
|
|
360
|
+
.getTxsEffectsHash()
|
|
361
|
+
.toString('hex')} == ${rootRollupOutputs.header.contentCommitment.txsEffectsHash.toString('hex')} `,
|
|
362
|
+
);
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
logger.info(`Successfully proven block ${l2Block.number}!`);
|
|
366
|
+
|
|
367
|
+
this.provingState.block = l2Block;
|
|
368
|
+
|
|
369
|
+
const blockResult: BlockResult = {
|
|
370
|
+
proof: this.provingState.finalProof,
|
|
371
|
+
aggregationObject: this.provingState.finalAggregationObject,
|
|
372
|
+
block: l2Block,
|
|
373
|
+
};
|
|
374
|
+
|
|
375
|
+
pushTestData('blockResults', {
|
|
376
|
+
block: l2Block.toString(),
|
|
377
|
+
proof: this.provingState.finalProof.toString(),
|
|
378
|
+
aggregationObject: blockResult.aggregationObject.map(x => x.toString()),
|
|
379
|
+
});
|
|
380
|
+
|
|
381
|
+
return blockResult;
|
|
382
|
+
} catch (err) {
|
|
383
|
+
throw new BlockProofError(
|
|
384
|
+
err && typeof err === 'object' && 'message' in err ? String(err.message) : String(err),
|
|
385
|
+
this.provingState?.allTxs.map(x => Tx.getHash(x.processedTx)) ?? [],
|
|
386
|
+
);
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
/**
|
|
391
|
+
* Starts the proving process for the given transaction and adds it to our state
|
|
392
|
+
* @param tx - The transaction whose proving we wish to commence
|
|
393
|
+
* @param provingState - The proving state being worked on
|
|
394
|
+
*/
|
|
395
|
+
private async prepareTransaction(tx: ProcessedTx, provingState: ProvingState) {
|
|
396
|
+
// Pass the private kernel tail vk here as the previous one.
|
|
397
|
+
// If there are public functions then this key will be overwritten once the public tail has been proven
|
|
398
|
+
const previousKernelVerificationKey = provingState.privateKernelVerificationKeys.privateKernelCircuit;
|
|
399
|
+
|
|
400
|
+
const txInputs = await this.prepareBaseRollupInputs(provingState, tx, previousKernelVerificationKey);
|
|
401
|
+
if (!txInputs) {
|
|
402
|
+
// This should not be possible
|
|
403
|
+
throw new Error(`Unable to add padding transaction, preparing base inputs failed`);
|
|
404
|
+
}
|
|
405
|
+
return txInputs;
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
private enqueueFirstProof(
|
|
409
|
+
inputs: BaseRollupInputs,
|
|
410
|
+
treeSnapshots: TreeSnapshots,
|
|
411
|
+
tx: ProcessedTx,
|
|
412
|
+
provingState: ProvingState,
|
|
413
|
+
) {
|
|
414
|
+
const txProvingState = new TxProvingState(
|
|
415
|
+
tx,
|
|
416
|
+
inputs,
|
|
417
|
+
treeSnapshots,
|
|
418
|
+
provingState.privateKernelVerificationKeys.privateKernelToPublicCircuit,
|
|
419
|
+
);
|
|
420
|
+
const txIndex = provingState.addNewTx(txProvingState);
|
|
421
|
+
const numPublicKernels = txProvingState.getNumPublicKernels();
|
|
422
|
+
if (!numPublicKernels) {
|
|
423
|
+
// no public functions, go straight to the base rollup
|
|
424
|
+
logger.debug(`Enqueueing base rollup for tx ${txIndex}`);
|
|
425
|
+
this.enqueueBaseRollup(provingState, BigInt(txIndex), txProvingState);
|
|
426
|
+
return;
|
|
427
|
+
}
|
|
428
|
+
// Enqueue all of the VM/kernel proving requests
|
|
429
|
+
// Rather than handle the Kernel Tail as a special case here, we will just handle it inside enqueueVM
|
|
430
|
+
for (let i = 0; i < numPublicKernels; i++) {
|
|
431
|
+
logger.debug(`Enqueueing public VM ${i} for tx ${txIndex}`);
|
|
432
|
+
this.enqueueVM(provingState, txIndex, i);
|
|
433
|
+
}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
/**
|
|
437
|
+
* Enqueue a job to be scheduled
|
|
438
|
+
* @param provingState - The proving state object being operated on
|
|
439
|
+
* @param jobType - The type of job to be queued
|
|
440
|
+
* @param job - The actual job, returns a promise notifying of the job's completion
|
|
441
|
+
*/
|
|
442
|
+
private deferredProving<T>(
|
|
443
|
+
provingState: ProvingState | undefined,
|
|
444
|
+
request: (signal: AbortSignal) => Promise<T>,
|
|
445
|
+
callback: (result: T) => void | Promise<void>,
|
|
446
|
+
) {
|
|
447
|
+
if (!provingState?.verifyState()) {
|
|
448
|
+
logger.debug(`Not enqueuing job, state no longer valid`);
|
|
449
|
+
return;
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
const controller = new AbortController();
|
|
453
|
+
this.pendingProvingJobs.push(controller);
|
|
454
|
+
|
|
455
|
+
// We use a 'safeJob'. We don't want promise rejections in the proving pool, we want to capture the error here
|
|
456
|
+
// and reject the proving job whilst keeping the event loop free of rejections
|
|
457
|
+
const safeJob = async () => {
|
|
458
|
+
try {
|
|
459
|
+
// there's a delay between enqueueing this job and it actually running
|
|
460
|
+
if (controller.signal.aborted) {
|
|
461
|
+
return;
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
const result = await request(controller.signal);
|
|
465
|
+
if (!provingState?.verifyState()) {
|
|
466
|
+
logger.debug(`State no longer valid, discarding result`);
|
|
467
|
+
return;
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
// we could have been cancelled whilst waiting for the result
|
|
471
|
+
// and the prover ignored the signal. Drop the result in that case
|
|
472
|
+
if (controller.signal.aborted) {
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
await callback(result);
|
|
477
|
+
} catch (err) {
|
|
478
|
+
if (err instanceof AbortedError) {
|
|
479
|
+
// operation was cancelled, probably because the block was cancelled
|
|
480
|
+
// drop this result
|
|
481
|
+
return;
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
logger.error(`Error thrown when proving job`);
|
|
485
|
+
provingState!.reject(`${err}`);
|
|
486
|
+
} finally {
|
|
487
|
+
const index = this.pendingProvingJobs.indexOf(controller);
|
|
488
|
+
if (index > -1) {
|
|
489
|
+
this.pendingProvingJobs.splice(index, 1);
|
|
490
|
+
}
|
|
491
|
+
}
|
|
492
|
+
};
|
|
493
|
+
|
|
494
|
+
// let the callstack unwind before adding the job to the queue
|
|
495
|
+
setImmediate(safeJob);
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
// Updates the merkle trees for a transaction. The first enqueued job for a transaction
|
|
499
|
+
private async prepareBaseRollupInputs(
|
|
500
|
+
provingState: ProvingState | undefined,
|
|
501
|
+
tx: ProcessedTx,
|
|
502
|
+
kernelVk: VerificationKeyData,
|
|
503
|
+
): Promise<[BaseRollupInputs, TreeSnapshots] | undefined> {
|
|
504
|
+
if (!provingState?.verifyState()) {
|
|
505
|
+
logger.debug('Not preparing base rollup inputs, state invalid');
|
|
506
|
+
return;
|
|
507
|
+
}
|
|
508
|
+
const inputs = await buildBaseRollupInput(tx, provingState.globalVariables, this.db, kernelVk);
|
|
509
|
+
const promises = [MerkleTreeId.NOTE_HASH_TREE, MerkleTreeId.NULLIFIER_TREE, MerkleTreeId.PUBLIC_DATA_TREE].map(
|
|
510
|
+
async (id: MerkleTreeId) => {
|
|
511
|
+
return { key: id, value: await getTreeSnapshot(id, this.db) };
|
|
512
|
+
},
|
|
513
|
+
);
|
|
514
|
+
const treeSnapshots: TreeSnapshots = new Map((await Promise.all(promises)).map(obj => [obj.key, obj.value]));
|
|
515
|
+
|
|
516
|
+
if (!provingState?.verifyState()) {
|
|
517
|
+
logger.debug(`Discarding proving job, state no longer valid`);
|
|
518
|
+
return;
|
|
519
|
+
}
|
|
520
|
+
return [inputs, treeSnapshots];
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
// Stores the intermediate inputs prepared for a merge proof
|
|
524
|
+
private storeMergeInputs(
|
|
525
|
+
provingState: ProvingState,
|
|
526
|
+
currentLevel: bigint,
|
|
527
|
+
currentIndex: bigint,
|
|
528
|
+
mergeInputs: [
|
|
529
|
+
BaseOrMergeRollupPublicInputs,
|
|
530
|
+
RecursiveProof<typeof NESTED_RECURSIVE_PROOF_LENGTH>,
|
|
531
|
+
VerificationKeyAsFields,
|
|
532
|
+
],
|
|
533
|
+
) {
|
|
534
|
+
const mergeLevel = currentLevel - 1n;
|
|
535
|
+
const indexWithinMergeLevel = currentIndex >> 1n;
|
|
536
|
+
const mergeIndex = 2n ** mergeLevel - 1n + indexWithinMergeLevel;
|
|
537
|
+
const subscript = Number(mergeIndex);
|
|
538
|
+
const indexWithinMerge = Number(currentIndex & 1n);
|
|
539
|
+
const ready = provingState.storeMergeInputs(mergeInputs, indexWithinMerge, subscript);
|
|
540
|
+
return { ready, indexWithinMergeLevel, mergeLevel, mergeInputData: provingState.getMergeInputs(subscript) };
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
// Executes the base rollup circuit and stored the output as intermediate state for the parent merge/root circuit
|
|
544
|
+
// Executes the next level of merge if all inputs are available
|
|
545
|
+
private enqueueBaseRollup(provingState: ProvingState | undefined, index: bigint, tx: TxProvingState) {
|
|
546
|
+
if (!provingState?.verifyState()) {
|
|
547
|
+
logger.debug('Not running base rollup, state invalid');
|
|
548
|
+
return;
|
|
549
|
+
}
|
|
550
|
+
if (
|
|
551
|
+
!tx.baseRollupInputs.kernelData.publicInputs.end.noteEncryptedLogsHash
|
|
552
|
+
.toBuffer()
|
|
553
|
+
.equals(tx.processedTx.noteEncryptedLogs.hash())
|
|
554
|
+
) {
|
|
555
|
+
provingState.reject(
|
|
556
|
+
`Note encrypted logs hash mismatch: ${
|
|
557
|
+
tx.baseRollupInputs.kernelData.publicInputs.end.noteEncryptedLogsHash
|
|
558
|
+
} === ${Fr.fromBuffer(tx.processedTx.noteEncryptedLogs.hash())}`,
|
|
559
|
+
);
|
|
560
|
+
return;
|
|
561
|
+
}
|
|
562
|
+
if (
|
|
563
|
+
!tx.baseRollupInputs.kernelData.publicInputs.end.encryptedLogsHash
|
|
564
|
+
.toBuffer()
|
|
565
|
+
.equals(tx.processedTx.encryptedLogs.hash())
|
|
566
|
+
) {
|
|
567
|
+
// @todo This rejection messages is never seen. Never making it out to the logs
|
|
568
|
+
provingState.reject(
|
|
569
|
+
`Encrypted logs hash mismatch: ${
|
|
570
|
+
tx.baseRollupInputs.kernelData.publicInputs.end.encryptedLogsHash
|
|
571
|
+
} === ${Fr.fromBuffer(tx.processedTx.encryptedLogs.hash())}`,
|
|
572
|
+
);
|
|
573
|
+
return;
|
|
574
|
+
}
|
|
575
|
+
if (
|
|
576
|
+
!tx.baseRollupInputs.kernelData.publicInputs.end.unencryptedLogsHash
|
|
577
|
+
.toBuffer()
|
|
578
|
+
.equals(tx.processedTx.unencryptedLogs.hash())
|
|
579
|
+
) {
|
|
580
|
+
provingState.reject(
|
|
581
|
+
`Unencrypted logs hash mismatch: ${
|
|
582
|
+
tx.baseRollupInputs.kernelData.publicInputs.end.unencryptedLogsHash
|
|
583
|
+
} === ${Fr.fromBuffer(tx.processedTx.unencryptedLogs.hash())}`,
|
|
584
|
+
);
|
|
585
|
+
return;
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
logger.debug(
|
|
589
|
+
`Enqueuing deferred proving base rollup${
|
|
590
|
+
tx.processedTx.isEmpty ? ' with padding tx' : ''
|
|
591
|
+
} for ${tx.processedTx.hash.toString()}`,
|
|
592
|
+
);
|
|
593
|
+
|
|
594
|
+
this.deferredProving(
|
|
595
|
+
provingState,
|
|
596
|
+
signal => this.prover.getBaseRollupProof(tx.baseRollupInputs, signal),
|
|
597
|
+
result => {
|
|
598
|
+
logger.debug(`Completed proof for base rollup for tx ${tx.processedTx.hash.toString()}`);
|
|
599
|
+
validatePartialState(result.inputs.end, tx.treeSnapshots);
|
|
600
|
+
const currentLevel = provingState.numMergeLevels + 1n;
|
|
601
|
+
this.storeAndExecuteNextMergeLevel(provingState, currentLevel, index, [
|
|
602
|
+
result.inputs,
|
|
603
|
+
result.proof,
|
|
604
|
+
result.verificationKey.keyAsFields,
|
|
605
|
+
]);
|
|
606
|
+
},
|
|
607
|
+
);
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
// Executes the merge rollup circuit and stored the output as intermediate state for the parent merge/root circuit
|
|
611
|
+
// Enqueues the next level of merge if all inputs are available
|
|
612
|
+
private enqueueMergeRollup(
|
|
613
|
+
provingState: ProvingState,
|
|
614
|
+
level: bigint,
|
|
615
|
+
index: bigint,
|
|
616
|
+
mergeInputData: MergeRollupInputData,
|
|
617
|
+
) {
|
|
618
|
+
const inputs = createMergeRollupInputs(
|
|
619
|
+
[mergeInputData.inputs[0]!, mergeInputData.proofs[0]!, mergeInputData.verificationKeys[0]!],
|
|
620
|
+
[mergeInputData.inputs[1]!, mergeInputData.proofs[1]!, mergeInputData.verificationKeys[1]!],
|
|
621
|
+
);
|
|
622
|
+
|
|
623
|
+
this.deferredProving(
|
|
624
|
+
provingState,
|
|
625
|
+
signal => this.prover.getMergeRollupProof(inputs, signal),
|
|
626
|
+
result => {
|
|
627
|
+
this.storeAndExecuteNextMergeLevel(provingState, level, index, [
|
|
628
|
+
result.inputs,
|
|
629
|
+
result.proof,
|
|
630
|
+
result.verificationKey.keyAsFields,
|
|
631
|
+
]);
|
|
632
|
+
},
|
|
633
|
+
);
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
// Executes the root rollup circuit
|
|
637
|
+
private async enqueueRootRollup(provingState: ProvingState | undefined) {
|
|
638
|
+
if (!provingState?.verifyState()) {
|
|
639
|
+
logger.debug('Not running root rollup, state no longer valid');
|
|
640
|
+
return;
|
|
641
|
+
}
|
|
642
|
+
const mergeInputData = provingState.getMergeInputs(0);
|
|
643
|
+
const rootParityInput = provingState.finalRootParityInput!;
|
|
644
|
+
|
|
645
|
+
const inputs = await getRootRollupInput(
|
|
646
|
+
mergeInputData.inputs[0]!,
|
|
647
|
+
mergeInputData.proofs[0]!,
|
|
648
|
+
mergeInputData.verificationKeys[0]!,
|
|
649
|
+
mergeInputData.inputs[1]!,
|
|
650
|
+
mergeInputData.proofs[1]!,
|
|
651
|
+
mergeInputData.verificationKeys[1]!,
|
|
652
|
+
rootParityInput,
|
|
653
|
+
provingState.newL1ToL2Messages,
|
|
654
|
+
provingState.messageTreeSnapshot,
|
|
655
|
+
provingState.messageTreeRootSiblingPath,
|
|
656
|
+
this.db,
|
|
657
|
+
);
|
|
658
|
+
|
|
659
|
+
this.deferredProving(
|
|
660
|
+
provingState,
|
|
661
|
+
signal => this.prover.getRootRollupProof(inputs, signal),
|
|
662
|
+
result => {
|
|
663
|
+
provingState.rootRollupPublicInputs = result.inputs;
|
|
664
|
+
provingState.finalAggregationObject = extractAggregationObject(
|
|
665
|
+
result.proof.binaryProof,
|
|
666
|
+
result.verificationKey.numPublicInputs,
|
|
667
|
+
);
|
|
668
|
+
provingState.finalProof = result.proof.binaryProof;
|
|
669
|
+
|
|
670
|
+
const provingResult: ProvingResult = {
|
|
671
|
+
status: PROVING_STATUS.SUCCESS,
|
|
672
|
+
};
|
|
673
|
+
provingState.resolve(provingResult);
|
|
674
|
+
},
|
|
675
|
+
);
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
// Executes the base parity circuit and stores the intermediate state for the root parity circuit
|
|
679
|
+
// Enqueues the root parity circuit if all inputs are available
|
|
680
|
+
private enqueueBaseParityCircuit(provingState: ProvingState, inputs: BaseParityInputs, index: number) {
|
|
681
|
+
this.deferredProving(
|
|
682
|
+
provingState,
|
|
683
|
+
signal => this.prover.getBaseParityProof(inputs, signal),
|
|
684
|
+
rootInput => {
|
|
685
|
+
provingState.setRootParityInputs(rootInput, index);
|
|
686
|
+
if (provingState.areRootParityInputsReady()) {
|
|
687
|
+
const rootParityInputs = new RootParityInputs(
|
|
688
|
+
provingState.rootParityInput as Tuple<
|
|
689
|
+
RootParityInput<typeof RECURSIVE_PROOF_LENGTH>,
|
|
690
|
+
typeof NUM_BASE_PARITY_PER_ROOT_PARITY
|
|
691
|
+
>,
|
|
692
|
+
);
|
|
693
|
+
this.enqueueRootParityCircuit(provingState, rootParityInputs);
|
|
694
|
+
}
|
|
695
|
+
},
|
|
696
|
+
);
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
// Runs the root parity circuit ans stored the outputs
|
|
700
|
+
// Enqueues the root rollup proof if all inputs are available
|
|
701
|
+
private enqueueRootParityCircuit(provingState: ProvingState | undefined, inputs: RootParityInputs) {
|
|
702
|
+
this.deferredProving(
|
|
703
|
+
provingState,
|
|
704
|
+
signal => this.prover.getRootParityProof(inputs, signal),
|
|
705
|
+
async rootInput => {
|
|
706
|
+
provingState!.finalRootParityInput = rootInput;
|
|
707
|
+
await this.checkAndEnqueueRootRollup(provingState);
|
|
708
|
+
},
|
|
709
|
+
);
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
private async checkAndEnqueueRootRollup(provingState: ProvingState | undefined) {
|
|
713
|
+
if (!provingState?.isReadyForRootRollup()) {
|
|
714
|
+
logger.debug('Not ready for root rollup');
|
|
715
|
+
return;
|
|
716
|
+
}
|
|
717
|
+
await this.enqueueRootRollup(provingState);
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
/**
|
|
721
|
+
* Stores the inputs to a merge/root circuit and enqueues the circuit if ready
|
|
722
|
+
* @param provingState - The proving state being operated on
|
|
723
|
+
* @param currentLevel - The level of the merge/root circuit
|
|
724
|
+
* @param currentIndex - The index of the merge/root circuit
|
|
725
|
+
* @param mergeInputData - The inputs to be stored
|
|
726
|
+
*/
|
|
727
|
+
private storeAndExecuteNextMergeLevel(
|
|
728
|
+
provingState: ProvingState,
|
|
729
|
+
currentLevel: bigint,
|
|
730
|
+
currentIndex: bigint,
|
|
731
|
+
mergeInputData: [
|
|
732
|
+
BaseOrMergeRollupPublicInputs,
|
|
733
|
+
RecursiveProof<typeof NESTED_RECURSIVE_PROOF_LENGTH>,
|
|
734
|
+
VerificationKeyAsFields,
|
|
735
|
+
],
|
|
736
|
+
) {
|
|
737
|
+
const result = this.storeMergeInputs(provingState, currentLevel, currentIndex, mergeInputData);
|
|
738
|
+
|
|
739
|
+
// Are we ready to execute the next circuit?
|
|
740
|
+
if (!result.ready) {
|
|
741
|
+
return;
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
if (result.mergeLevel === 0n) {
|
|
745
|
+
// TODO (alexg) remove this `void`
|
|
746
|
+
void this.checkAndEnqueueRootRollup(provingState);
|
|
747
|
+
} else {
|
|
748
|
+
// onto the next merge level
|
|
749
|
+
this.enqueueMergeRollup(provingState, result.mergeLevel, result.indexWithinMergeLevel, result.mergeInputData);
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
|
|
753
|
+
/**
|
|
754
|
+
* Executes the VM circuit for a public function, will enqueue the corresponding kernel if the
|
|
755
|
+
* previous kernel is ready
|
|
756
|
+
* @param provingState - The proving state being operated on
|
|
757
|
+
* @param txIndex - The index of the transaction being proven
|
|
758
|
+
* @param functionIndex - The index of the function/kernel being proven
|
|
759
|
+
*/
|
|
760
|
+
private enqueueVM(provingState: ProvingState | undefined, txIndex: number, functionIndex: number) {
|
|
761
|
+
if (!provingState?.verifyState()) {
|
|
762
|
+
logger.debug(`Not running VM circuit as state is no longer valid`);
|
|
763
|
+
return;
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
767
|
+
const publicFunction = txProvingState.getPublicFunctionState(functionIndex);
|
|
768
|
+
|
|
769
|
+
// If there is a VM request, we need to prove it. Otherwise, continue with the kernel.
|
|
770
|
+
if (publicFunction.vmRequest) {
|
|
771
|
+
// This function tries to do AVM proving. If there is a failure, it fakes the proof unless AVM_PROVING_STRICT is defined.
|
|
772
|
+
// Nothing downstream depends on the AVM proof yet. So having this mode lets us incrementally build the AVM circuit.
|
|
773
|
+
const doAvmProving = async (signal: AbortSignal) => {
|
|
774
|
+
const inputs: AvmCircuitInputs = new AvmCircuitInputs(
|
|
775
|
+
publicFunction.vmRequest!.bytecode,
|
|
776
|
+
publicFunction.vmRequest!.calldata,
|
|
777
|
+
publicFunction.vmRequest!.kernelRequest.inputs.publicCall.callStackItem.publicInputs,
|
|
778
|
+
publicFunction.vmRequest!.avmHints,
|
|
779
|
+
);
|
|
780
|
+
try {
|
|
781
|
+
return await this.prover.getAvmProof(inputs, signal);
|
|
782
|
+
} catch (err) {
|
|
783
|
+
if (process.env.AVM_PROVING_STRICT) {
|
|
784
|
+
throw err;
|
|
785
|
+
} else {
|
|
786
|
+
logger.warn(`Error thrown when proving AVM circuit: ${err}`);
|
|
787
|
+
logger.warn(`AVM_PROVING_STRICT is off, faking AVM proof and carrying on...`);
|
|
788
|
+
return { proof: makeEmptyProof(), verificationKey: VerificationKeyData.makeFake() };
|
|
789
|
+
}
|
|
790
|
+
}
|
|
791
|
+
};
|
|
792
|
+
this.deferredProving(provingState, doAvmProving, proofAndVk => {
|
|
793
|
+
logger.debug(`Proven VM for function index ${functionIndex} of tx index ${txIndex}`);
|
|
794
|
+
this.checkAndEnqueuePublicKernel(provingState, txIndex, functionIndex, proofAndVk.proof);
|
|
795
|
+
});
|
|
796
|
+
} else {
|
|
797
|
+
this.checkAndEnqueuePublicKernel(provingState, txIndex, functionIndex, /*vmProof=*/ makeEmptyProof());
|
|
798
|
+
}
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
private checkAndEnqueuePublicKernel(
|
|
802
|
+
provingState: ProvingState,
|
|
803
|
+
txIndex: number,
|
|
804
|
+
functionIndex: number,
|
|
805
|
+
vmProof: Proof,
|
|
806
|
+
) {
|
|
807
|
+
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
808
|
+
const kernelRequest = txProvingState.getNextPublicKernelFromVMProof(functionIndex, vmProof);
|
|
809
|
+
if (kernelRequest.code === TX_PROVING_CODE.READY) {
|
|
810
|
+
if (kernelRequest.function === undefined) {
|
|
811
|
+
// Should not be possible
|
|
812
|
+
throw new Error(`Error occurred, public function request undefined after VM proof completed`);
|
|
813
|
+
}
|
|
814
|
+
logger.debug(`Enqueuing kernel from VM for tx ${txIndex}, function ${functionIndex}`);
|
|
815
|
+
this.enqueuePublicKernel(provingState, txIndex, functionIndex);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
/**
|
|
820
|
+
* Executes the kernel circuit for a public function, will enqueue the next kernel circuit if it's VM is already proven
|
|
821
|
+
* or the base rollup circuit if there are no more kernels to be proven
|
|
822
|
+
* @param provingState - The proving state being operated on
|
|
823
|
+
* @param txIndex - The index of the transaction being proven
|
|
824
|
+
* @param functionIndex - The index of the function/kernel being proven
|
|
825
|
+
*/
|
|
826
|
+
private enqueuePublicKernel(provingState: ProvingState | undefined, txIndex: number, functionIndex: number) {
|
|
827
|
+
if (!provingState?.verifyState()) {
|
|
828
|
+
logger.debug(`Not running public kernel circuit as state is no longer valid`);
|
|
829
|
+
return;
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
833
|
+
const request = txProvingState.getPublicFunctionState(functionIndex).publicKernelRequest;
|
|
834
|
+
|
|
835
|
+
this.deferredProving(
|
|
836
|
+
provingState,
|
|
837
|
+
(signal): Promise<PublicInputsAndRecursiveProof<KernelCircuitPublicInputs | PublicKernelCircuitPublicInputs>> => {
|
|
838
|
+
if (request.type === PublicKernelType.TAIL) {
|
|
839
|
+
return this.prover.getPublicTailProof(request, signal);
|
|
840
|
+
} else {
|
|
841
|
+
return this.prover.getPublicKernelProof(request, signal);
|
|
842
|
+
}
|
|
843
|
+
},
|
|
844
|
+
result => {
|
|
845
|
+
const nextKernelRequest = txProvingState.getNextPublicKernelFromKernelProof(
|
|
846
|
+
functionIndex,
|
|
847
|
+
result.proof,
|
|
848
|
+
result.verificationKey,
|
|
849
|
+
);
|
|
850
|
+
// What's the status of the next kernel?
|
|
851
|
+
if (nextKernelRequest.code === TX_PROVING_CODE.NOT_READY) {
|
|
852
|
+
// Must be waiting on a VM proof
|
|
853
|
+
return;
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
if (nextKernelRequest.code === TX_PROVING_CODE.COMPLETED) {
|
|
857
|
+
// We must have completed all public function proving, we now move to the base rollup
|
|
858
|
+
logger.debug(`Public functions completed for tx ${txIndex} enqueueing base rollup`);
|
|
859
|
+
// Take the final public tail proof and verification key and pass them to the base rollup
|
|
860
|
+
txProvingState.baseRollupInputs.kernelData.proof = result.proof;
|
|
861
|
+
txProvingState.baseRollupInputs.kernelData.vk = result.verificationKey;
|
|
862
|
+
this.enqueueBaseRollup(provingState, BigInt(txIndex), txProvingState);
|
|
863
|
+
return;
|
|
864
|
+
}
|
|
865
|
+
// There must be another kernel ready to be proven
|
|
866
|
+
if (nextKernelRequest.function === undefined) {
|
|
867
|
+
// Should not be possible
|
|
868
|
+
throw new Error(`Error occurred, public function request undefined after kernel proof completed`);
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
this.enqueuePublicKernel(provingState, txIndex, functionIndex + 1);
|
|
872
|
+
},
|
|
873
|
+
);
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
|
|
877
|
+
function extractAggregationObject(proof: Proof, numPublicInputs: number): Fr[] {
|
|
878
|
+
const buffer = proof.buffer.subarray(
|
|
879
|
+
Fr.SIZE_IN_BYTES * (numPublicInputs - AGGREGATION_OBJECT_LENGTH),
|
|
880
|
+
Fr.SIZE_IN_BYTES * numPublicInputs,
|
|
881
|
+
);
|
|
882
|
+
return BufferReader.asReader(buffer).readArray(AGGREGATION_OBJECT_LENGTH, Fr);
|
|
883
|
+
}
|