@aztec/prover-client 0.0.1-fake-c83136db25 → 0.0.1-fake-ceab37513c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bin/get-proof-inputs.d.ts +2 -0
- package/dest/bin/get-proof-inputs.d.ts.map +1 -0
- package/dest/bin/get-proof-inputs.js +51 -0
- package/dest/block-factory/light.d.ts +3 -5
- package/dest/block-factory/light.d.ts.map +1 -1
- package/dest/block-factory/light.js +9 -16
- package/dest/config.js +1 -1
- package/dest/mocks/fixtures.d.ts +1 -4
- package/dest/mocks/fixtures.d.ts.map +1 -1
- package/dest/mocks/fixtures.js +3 -31
- package/dest/mocks/test_context.d.ts +9 -32
- package/dest/mocks/test_context.d.ts.map +1 -1
- package/dest/mocks/test_context.js +22 -78
- package/dest/orchestrator/block-building-helpers.d.ts +31 -33
- package/dest/orchestrator/block-building-helpers.d.ts.map +1 -1
- package/dest/orchestrator/block-building-helpers.js +137 -126
- package/dest/orchestrator/block-proving-state.d.ts +53 -60
- package/dest/orchestrator/block-proving-state.d.ts.map +1 -1
- package/dest/orchestrator/block-proving-state.js +187 -214
- package/dest/orchestrator/epoch-proving-state.d.ts +28 -34
- package/dest/orchestrator/epoch-proving-state.d.ts.map +1 -1
- package/dest/orchestrator/epoch-proving-state.js +84 -128
- package/dest/orchestrator/orchestrator.d.ts +30 -31
- package/dest/orchestrator/orchestrator.d.ts.map +1 -1
- package/dest/orchestrator/orchestrator.js +236 -368
- package/dest/orchestrator/tx-proving-state.d.ts +9 -11
- package/dest/orchestrator/tx-proving-state.d.ts.map +1 -1
- package/dest/orchestrator/tx-proving-state.js +23 -26
- package/dest/prover-client/server-epoch-prover.d.ts +8 -9
- package/dest/prover-client/server-epoch-prover.d.ts.map +1 -1
- package/dest/prover-client/server-epoch-prover.js +9 -9
- package/dest/proving_broker/broker_prover_facade.d.ts +15 -20
- package/dest/proving_broker/broker_prover_facade.d.ts.map +1 -1
- package/dest/proving_broker/broker_prover_facade.js +21 -36
- package/dest/proving_broker/fixtures.js +1 -1
- package/dest/proving_broker/proof_store/index.d.ts +0 -1
- package/dest/proving_broker/proof_store/index.d.ts.map +1 -1
- package/dest/proving_broker/proof_store/index.js +0 -1
- package/dest/proving_broker/proving_broker.d.ts.map +1 -1
- package/dest/proving_broker/proving_broker.js +18 -29
- package/dest/proving_broker/proving_job_controller.d.ts.map +1 -1
- package/dest/proving_broker/proving_job_controller.js +18 -38
- package/dest/test/mock_prover.d.ts +17 -22
- package/dest/test/mock_prover.d.ts.map +1 -1
- package/dest/test/mock_prover.js +20 -35
- package/package.json +17 -16
- package/src/bin/get-proof-inputs.ts +59 -0
- package/src/block-factory/light.ts +9 -35
- package/src/config.ts +1 -1
- package/src/mocks/fixtures.ts +11 -39
- package/src/mocks/test_context.ts +31 -137
- package/src/orchestrator/block-building-helpers.ts +211 -211
- package/src/orchestrator/block-proving-state.ts +245 -235
- package/src/orchestrator/epoch-proving-state.ts +127 -172
- package/src/orchestrator/orchestrator.ts +303 -545
- package/src/orchestrator/tx-proving-state.ts +43 -49
- package/src/prover-client/server-epoch-prover.ts +18 -28
- package/src/proving_broker/broker_prover_facade.ts +86 -157
- package/src/proving_broker/fixtures.ts +1 -1
- package/src/proving_broker/proof_store/index.ts +0 -1
- package/src/proving_broker/proving_broker.ts +18 -36
- package/src/proving_broker/proving_job_controller.ts +18 -38
- package/src/test/mock_prover.ts +60 -142
- package/dest/orchestrator/checkpoint-proving-state.d.ts +0 -63
- package/dest/orchestrator/checkpoint-proving-state.d.ts.map +0 -1
- package/dest/orchestrator/checkpoint-proving-state.js +0 -211
- package/src/orchestrator/checkpoint-proving-state.ts +0 -299
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { BlobAccumulatorPublicInputs, FinalBlobBatchingChallenges } from '@aztec/blob-lib';
|
|
2
2
|
import {
|
|
3
3
|
L1_TO_L2_MSG_SUBTREE_HEIGHT,
|
|
4
|
-
|
|
5
|
-
NESTED_RECURSIVE_ROLLUP_HONK_PROOF_LENGTH,
|
|
4
|
+
L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH,
|
|
6
5
|
NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
|
|
7
6
|
NUM_BASE_PARITY_PER_ROOT_PARITY,
|
|
7
|
+
type TUBE_PROOF_LENGTH,
|
|
8
8
|
} from '@aztec/constants';
|
|
9
|
-
import { padArrayEnd } from '@aztec/foundation/collection';
|
|
9
|
+
import { padArrayEnd, times } from '@aztec/foundation/collection';
|
|
10
10
|
import { AbortError } from '@aztec/foundation/error';
|
|
11
11
|
import { Fr } from '@aztec/foundation/fields';
|
|
12
12
|
import { createLogger } from '@aztec/foundation/log';
|
|
@@ -15,33 +15,27 @@ import { assertLength } from '@aztec/foundation/serialize';
|
|
|
15
15
|
import { pushTestData } from '@aztec/foundation/testing';
|
|
16
16
|
import { elapsed } from '@aztec/foundation/timer';
|
|
17
17
|
import type { TreeNodeLocation } from '@aztec/foundation/trees';
|
|
18
|
+
import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree';
|
|
18
19
|
import { readAvmMinimalPublicTxInputsFromFile } from '@aztec/simulator/public/fixtures';
|
|
19
|
-
import { EthAddress,
|
|
20
|
+
import { EthAddress, L2Block } from '@aztec/stdlib/block';
|
|
20
21
|
import type {
|
|
21
22
|
EpochProver,
|
|
22
23
|
ForkMerkleTreeOperations,
|
|
23
24
|
MerkleTreeWriteOperations,
|
|
24
|
-
|
|
25
|
+
ProofAndVerificationKey,
|
|
25
26
|
ServerCircuitProver,
|
|
26
27
|
} from '@aztec/stdlib/interfaces/server';
|
|
27
|
-
import
|
|
28
|
+
import { BaseParityInputs } from '@aztec/stdlib/parity';
|
|
28
29
|
import {
|
|
29
30
|
type BaseRollupHints,
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
CheckpointConstantData,
|
|
35
|
-
CheckpointRootSingleBlockRollupPrivateInputs,
|
|
36
|
-
PrivateTxBaseRollupPrivateInputs,
|
|
37
|
-
PublicChonkVerifierPrivateInputs,
|
|
38
|
-
PublicChonkVerifierPublicInputs,
|
|
39
|
-
RootRollupPublicInputs,
|
|
31
|
+
EmptyBlockRootRollupInputs,
|
|
32
|
+
PrivateBaseRollupInputs,
|
|
33
|
+
SingleTxBlockRootRollupInputs,
|
|
34
|
+
TubeInputs,
|
|
40
35
|
} from '@aztec/stdlib/rollup';
|
|
41
36
|
import type { CircuitName } from '@aztec/stdlib/stats';
|
|
42
37
|
import { type AppendOnlyTreeSnapshot, MerkleTreeId } from '@aztec/stdlib/trees';
|
|
43
|
-
import type
|
|
44
|
-
import type { UInt64 } from '@aztec/stdlib/types';
|
|
38
|
+
import { type BlockHeader, type GlobalVariables, type ProcessedTx, type Tx, toNumBlobFields } from '@aztec/stdlib/tx';
|
|
45
39
|
import {
|
|
46
40
|
Attributes,
|
|
47
41
|
type TelemetryClient,
|
|
@@ -54,10 +48,8 @@ import {
|
|
|
54
48
|
import { inspect } from 'util';
|
|
55
49
|
|
|
56
50
|
import {
|
|
57
|
-
|
|
58
|
-
buildHeaderFromCircuitOutputs,
|
|
51
|
+
buildHeaderAndBodyFromTxs,
|
|
59
52
|
getLastSiblingPath,
|
|
60
|
-
getPublicChonkVerifierPrivateInputsFromTx,
|
|
61
53
|
getRootTreeSiblingPath,
|
|
62
54
|
getSubtreeSiblingPath,
|
|
63
55
|
getTreeSnapshot,
|
|
@@ -66,7 +58,6 @@ import {
|
|
|
66
58
|
validateTx,
|
|
67
59
|
} from './block-building-helpers.js';
|
|
68
60
|
import type { BlockProvingState } from './block-proving-state.js';
|
|
69
|
-
import type { CheckpointProvingState } from './checkpoint-proving-state.js';
|
|
70
61
|
import { EpochProvingState, type ProvingResult, type TreeSnapshots } from './epoch-proving-state.js';
|
|
71
62
|
import { ProvingOrchestratorMetrics } from './orchestrator_metrics.js';
|
|
72
63
|
import { TxProvingState } from './tx-proving-state.js';
|
|
@@ -119,141 +110,80 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
119
110
|
|
|
120
111
|
public startNewEpoch(
|
|
121
112
|
epochNumber: number,
|
|
122
|
-
|
|
113
|
+
firstBlockNumber: number,
|
|
114
|
+
totalNumBlocks: number,
|
|
123
115
|
finalBlobBatchingChallenges: FinalBlobBatchingChallenges,
|
|
124
116
|
) {
|
|
125
|
-
if (this.provingState?.verifyState()) {
|
|
126
|
-
throw new Error(
|
|
127
|
-
`Cannot start epoch ${epochNumber} when epoch ${this.provingState.epochNumber} is still being processed.`,
|
|
128
|
-
);
|
|
129
|
-
}
|
|
130
|
-
|
|
131
117
|
const { promise: _promise, resolve, reject } = promiseWithResolvers<ProvingResult>();
|
|
132
118
|
const promise = _promise.catch((reason): ProvingResult => ({ status: 'failure', reason }));
|
|
133
|
-
|
|
119
|
+
if (totalNumBlocks <= 0 || !Number.isInteger(totalNumBlocks)) {
|
|
120
|
+
throw new Error(`Invalid number of blocks for epoch (got ${totalNumBlocks})`);
|
|
121
|
+
}
|
|
122
|
+
logger.info(`Starting epoch ${epochNumber} with ${totalNumBlocks} blocks`);
|
|
134
123
|
this.provingState = new EpochProvingState(
|
|
135
124
|
epochNumber,
|
|
136
|
-
|
|
125
|
+
firstBlockNumber,
|
|
126
|
+
totalNumBlocks,
|
|
137
127
|
finalBlobBatchingChallenges,
|
|
138
|
-
provingState => this.checkAndEnqueueCheckpointRootRollup(provingState),
|
|
139
128
|
resolve,
|
|
140
129
|
reject,
|
|
141
130
|
);
|
|
142
131
|
this.provingPromise = promise;
|
|
143
132
|
}
|
|
144
133
|
|
|
145
|
-
public async startNewCheckpoint(
|
|
146
|
-
checkpointIndex: number,
|
|
147
|
-
constants: CheckpointConstantData,
|
|
148
|
-
l1ToL2Messages: Fr[],
|
|
149
|
-
totalNumBlocks: number,
|
|
150
|
-
totalNumBlobFields: number,
|
|
151
|
-
headerOfLastBlockInPreviousCheckpoint: BlockHeader,
|
|
152
|
-
) {
|
|
153
|
-
if (!this.provingState) {
|
|
154
|
-
throw new Error('Empty epoch proving state. Call startNewEpoch before starting a checkpoint.');
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
if (!this.provingState.isAcceptingCheckpoints()) {
|
|
158
|
-
throw new Error(`Epoch not accepting further checkpoints.`);
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
// Fork world state at the end of the immediately previous block.
|
|
162
|
-
const lastBlockNumber = headerOfLastBlockInPreviousCheckpoint.globalVariables.blockNumber;
|
|
163
|
-
const db = await this.dbProvider.fork(lastBlockNumber);
|
|
164
|
-
|
|
165
|
-
const firstBlockNumber = lastBlockNumber + 1;
|
|
166
|
-
this.dbs.set(firstBlockNumber, db);
|
|
167
|
-
|
|
168
|
-
// Get archive sibling path before any block in this checkpoint lands.
|
|
169
|
-
const lastArchiveSiblingPath = await getLastSiblingPath(MerkleTreeId.ARCHIVE, db);
|
|
170
|
-
|
|
171
|
-
// Insert all the l1 to l2 messages into the db. And get the states before and after the insertion.
|
|
172
|
-
const {
|
|
173
|
-
lastL1ToL2MessageTreeSnapshot,
|
|
174
|
-
lastL1ToL2MessageSubtreeRootSiblingPath,
|
|
175
|
-
newL1ToL2MessageTreeSnapshot,
|
|
176
|
-
newL1ToL2MessageSubtreeRootSiblingPath,
|
|
177
|
-
} = await this.updateL1ToL2MessageTree(l1ToL2Messages, db);
|
|
178
|
-
|
|
179
|
-
this.provingState.startNewCheckpoint(
|
|
180
|
-
checkpointIndex,
|
|
181
|
-
constants,
|
|
182
|
-
totalNumBlocks,
|
|
183
|
-
totalNumBlobFields,
|
|
184
|
-
headerOfLastBlockInPreviousCheckpoint,
|
|
185
|
-
lastArchiveSiblingPath,
|
|
186
|
-
l1ToL2Messages,
|
|
187
|
-
lastL1ToL2MessageTreeSnapshot,
|
|
188
|
-
lastL1ToL2MessageSubtreeRootSiblingPath,
|
|
189
|
-
newL1ToL2MessageTreeSnapshot,
|
|
190
|
-
newL1ToL2MessageSubtreeRootSiblingPath,
|
|
191
|
-
);
|
|
192
|
-
}
|
|
193
|
-
|
|
194
134
|
/**
|
|
195
135
|
* Starts off a new block
|
|
196
|
-
* @param
|
|
197
|
-
* @param
|
|
198
|
-
*
|
|
199
|
-
* @param totalNumTxs - The total number of txs in the block
|
|
136
|
+
* @param globalVariables - The global variables for the block
|
|
137
|
+
* @param l1ToL2Messages - The l1 to l2 messages for the block
|
|
138
|
+
* @returns A proving ticket, containing a promise notifying of proving completion
|
|
200
139
|
*/
|
|
201
|
-
@trackSpan('ProvingOrchestrator.startNewBlock',
|
|
202
|
-
[Attributes.BLOCK_NUMBER]: blockNumber,
|
|
140
|
+
@trackSpan('ProvingOrchestrator.startNewBlock', globalVariables => ({
|
|
141
|
+
[Attributes.BLOCK_NUMBER]: globalVariables.blockNumber,
|
|
203
142
|
}))
|
|
204
|
-
public async startNewBlock(
|
|
143
|
+
public async startNewBlock(globalVariables: GlobalVariables, l1ToL2Messages: Fr[], previousBlockHeader: BlockHeader) {
|
|
205
144
|
if (!this.provingState) {
|
|
206
|
-
throw new Error(
|
|
207
|
-
}
|
|
208
|
-
|
|
209
|
-
const checkpointProvingState = this.provingState.getCheckpointProvingStateByBlockNumber(blockNumber);
|
|
210
|
-
if (!checkpointProvingState) {
|
|
211
|
-
throw new Error(`Checkpoint not started. Call startNewCheckpoint first.`);
|
|
145
|
+
throw new Error(`Invalid proving state, call startNewEpoch before starting a block`);
|
|
212
146
|
}
|
|
213
147
|
|
|
214
|
-
if (!
|
|
215
|
-
throw new Error(`
|
|
148
|
+
if (!this.provingState?.isAcceptingBlocks()) {
|
|
149
|
+
throw new Error(`Epoch not accepting further blocks`);
|
|
216
150
|
}
|
|
217
151
|
|
|
218
|
-
|
|
219
|
-
logger.info(`Starting block ${blockNumber} for slot ${constants.slotNumber.toNumber()}.`);
|
|
152
|
+
logger.info(`Starting block ${globalVariables.blockNumber} for slot ${globalVariables.slotNumber.toNumber()}`);
|
|
220
153
|
|
|
221
|
-
// Fork
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
const db = await this.dbProvider.fork(blockNumber - 1);
|
|
225
|
-
this.dbs.set(blockNumber, db);
|
|
226
|
-
}
|
|
227
|
-
const db = this.dbs.get(blockNumber)!;
|
|
154
|
+
// Fork world state at the end of the immediately previous block
|
|
155
|
+
const db = await this.dbProvider.fork(globalVariables.blockNumber - 1);
|
|
156
|
+
this.dbs.set(globalVariables.blockNumber, db);
|
|
228
157
|
|
|
229
|
-
//
|
|
230
|
-
const
|
|
231
|
-
|
|
158
|
+
// we start the block by enqueueing all of the base parity circuits
|
|
159
|
+
const {
|
|
160
|
+
l1ToL2MessageTreeSnapshot,
|
|
161
|
+
l1ToL2MessageSubtreeSiblingPath,
|
|
162
|
+
l1ToL2MessageTreeSnapshotAfterInsertion,
|
|
163
|
+
baseParityInputs,
|
|
164
|
+
} = await this.prepareBaseParityInputs(l1ToL2Messages, db);
|
|
165
|
+
|
|
166
|
+
// Get archive snapshot before this block lands
|
|
167
|
+
const lastArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
|
|
168
|
+
const lastArchiveSiblingPath = await getLastSiblingPath(MerkleTreeId.ARCHIVE, db);
|
|
169
|
+
const newArchiveSiblingPath = await getRootTreeSiblingPath(MerkleTreeId.ARCHIVE, db);
|
|
232
170
|
|
|
233
|
-
const blockProvingState =
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
171
|
+
const blockProvingState = this.provingState!.startNewBlock(
|
|
172
|
+
globalVariables,
|
|
173
|
+
l1ToL2Messages,
|
|
174
|
+
l1ToL2MessageTreeSnapshot,
|
|
175
|
+
l1ToL2MessageSubtreeSiblingPath,
|
|
176
|
+
l1ToL2MessageTreeSnapshotAfterInsertion,
|
|
177
|
+
lastArchive,
|
|
238
178
|
lastArchiveSiblingPath,
|
|
179
|
+
newArchiveSiblingPath,
|
|
180
|
+
previousBlockHeader,
|
|
181
|
+
this.proverId,
|
|
239
182
|
);
|
|
240
183
|
|
|
241
|
-
// Enqueue base parity circuits for the
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
this.enqueueBaseParityCircuit(checkpointProvingState, blockProvingState, i);
|
|
245
|
-
}
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
// Because `addTxs` won't be called for a block without txs, and that's where the sponge blob state is computed.
|
|
249
|
-
// We need to set its end sponge blob here, which will become the start sponge blob for the next block.
|
|
250
|
-
if (totalNumTxs === 0) {
|
|
251
|
-
const endSpongeBlob = blockProvingState.getStartSpongeBlob().clone();
|
|
252
|
-
await endSpongeBlob.absorb([createBlockEndMarker(0)]);
|
|
253
|
-
blockProvingState.setEndSpongeBlob(endSpongeBlob);
|
|
254
|
-
|
|
255
|
-
// And also try to accumulate the blobs as far as we can:
|
|
256
|
-
await this.provingState.setBlobAccumulators();
|
|
184
|
+
// Enqueue base parity circuits for the block
|
|
185
|
+
for (let i = 0; i < baseParityInputs.length; i++) {
|
|
186
|
+
this.enqueueBaseParityCircuit(blockProvingState, baseParityInputs[i], i);
|
|
257
187
|
}
|
|
258
188
|
}
|
|
259
189
|
|
|
@@ -265,40 +195,28 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
265
195
|
[Attributes.BLOCK_TXS_COUNT]: txs.length,
|
|
266
196
|
}))
|
|
267
197
|
public async addTxs(txs: ProcessedTx[]): Promise<void> {
|
|
268
|
-
if (!this.provingState) {
|
|
269
|
-
throw new Error(`Empty epoch proving state. Call startNewEpoch before adding txs.`);
|
|
270
|
-
}
|
|
271
|
-
|
|
272
198
|
if (!txs.length) {
|
|
273
199
|
// To avoid an ugly throw below. If we require an empty block, we can just call setBlockCompleted
|
|
274
200
|
// on a block with no txs. We cannot do that here because we cannot find the blockNumber without any txs.
|
|
275
201
|
logger.warn(`Provided no txs to orchestrator addTxs.`);
|
|
276
202
|
return;
|
|
277
203
|
}
|
|
278
|
-
|
|
279
204
|
const blockNumber = txs[0].globalVariables.blockNumber;
|
|
280
|
-
const provingState = this.provingState
|
|
205
|
+
const provingState = this.provingState?.getBlockProvingStateByBlockNumber(blockNumber!);
|
|
281
206
|
if (!provingState) {
|
|
282
|
-
throw new Error(`
|
|
283
|
-
}
|
|
284
|
-
|
|
285
|
-
if (provingState.totalNumTxs !== txs.length) {
|
|
286
|
-
throw new Error(
|
|
287
|
-
`Block ${blockNumber} should be filled with ${provingState.totalNumTxs} txs. Received ${txs.length} txs.`,
|
|
288
|
-
);
|
|
207
|
+
throw new Error(`Block proving state for ${blockNumber} not found`);
|
|
289
208
|
}
|
|
290
209
|
|
|
291
|
-
if (
|
|
210
|
+
if (provingState.totalNumTxs) {
|
|
292
211
|
throw new Error(`Block ${blockNumber} has been initialized with transactions.`);
|
|
293
212
|
}
|
|
294
213
|
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
const db = this.dbs.get(blockNumber)!;
|
|
298
|
-
const lastArchive = provingState.lastArchiveTreeSnapshot;
|
|
299
|
-
const newL1ToL2MessageTreeSnapshot = provingState.newL1ToL2MessageTreeSnapshot;
|
|
300
|
-
const spongeBlobState = provingState.getStartSpongeBlob().clone();
|
|
214
|
+
const numBlobFields = toNumBlobFields(txs);
|
|
215
|
+
provingState.startNewBlock(txs.length, numBlobFields);
|
|
301
216
|
|
|
217
|
+
logger.info(
|
|
218
|
+
`Adding ${txs.length} transactions with ${numBlobFields} blob fields to block ${provingState.blockNumber}`,
|
|
219
|
+
);
|
|
302
220
|
for (const tx of txs) {
|
|
303
221
|
try {
|
|
304
222
|
if (!provingState.verifyState()) {
|
|
@@ -309,30 +227,13 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
309
227
|
|
|
310
228
|
logger.info(`Received transaction: ${tx.hash}`);
|
|
311
229
|
|
|
312
|
-
const
|
|
313
|
-
const
|
|
314
|
-
tx,
|
|
315
|
-
lastArchive,
|
|
316
|
-
newL1ToL2MessageTreeSnapshot,
|
|
317
|
-
startSpongeBlob,
|
|
318
|
-
db,
|
|
319
|
-
);
|
|
320
|
-
|
|
321
|
-
if (!provingState.verifyState()) {
|
|
322
|
-
throw new Error(`Unable to add transaction, preparing base inputs failed`);
|
|
323
|
-
}
|
|
324
|
-
|
|
325
|
-
await spongeBlobState.absorb(tx.txEffect.toBlobFields());
|
|
326
|
-
|
|
327
|
-
const txProvingState = new TxProvingState(tx, hints, treeSnapshots, this.proverId.toField());
|
|
230
|
+
const [hints, treeSnapshots] = await this.prepareTransaction(tx, provingState);
|
|
231
|
+
const txProvingState = new TxProvingState(tx, hints, treeSnapshots);
|
|
328
232
|
const txIndex = provingState.addNewTx(txProvingState);
|
|
233
|
+
this.getOrEnqueueTube(provingState, txIndex);
|
|
329
234
|
if (txProvingState.requireAvmProof) {
|
|
330
|
-
this.getOrEnqueueChonkVerifier(provingState, txIndex);
|
|
331
235
|
logger.debug(`Enqueueing public VM for tx ${txIndex}`);
|
|
332
236
|
this.enqueueVM(provingState, txIndex);
|
|
333
|
-
} else {
|
|
334
|
-
logger.debug(`Enqueueing base rollup for private-only tx ${txIndex}`);
|
|
335
|
-
this.enqueueBaseRollup(provingState, txIndex);
|
|
336
237
|
}
|
|
337
238
|
} catch (err: any) {
|
|
338
239
|
throw new Error(`Error adding transaction ${tx.hash.toString()} to block ${blockNumber}: ${err.message}`, {
|
|
@@ -340,40 +241,24 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
340
241
|
});
|
|
341
242
|
}
|
|
342
243
|
}
|
|
343
|
-
|
|
344
|
-
await spongeBlobState.absorb([createBlockEndMarker(txs.length)]);
|
|
345
|
-
|
|
346
|
-
provingState.setEndSpongeBlob(spongeBlobState);
|
|
347
|
-
|
|
348
|
-
// Txs have been added to the block. Now try to accumulate the blobs as far as we can:
|
|
349
|
-
await this.provingState.setBlobAccumulators();
|
|
350
244
|
}
|
|
351
245
|
|
|
352
246
|
/**
|
|
353
|
-
* Kickstarts
|
|
354
|
-
* Note that if the
|
|
247
|
+
* Kickstarts tube circuits for the specified txs. These will be used during epoch proving.
|
|
248
|
+
* Note that if the tube circuits are not started this way, they will be started nontheless after processing.
|
|
355
249
|
*/
|
|
356
|
-
@trackSpan('ProvingOrchestrator.
|
|
357
|
-
public
|
|
250
|
+
@trackSpan('ProvingOrchestrator.startTubeCircuits')
|
|
251
|
+
public startTubeCircuits(txs: Tx[]) {
|
|
358
252
|
if (!this.provingState?.verifyState()) {
|
|
359
|
-
throw new Error(`
|
|
253
|
+
throw new Error(`Invalid proving state, call startNewEpoch before starting tube circuits`);
|
|
360
254
|
}
|
|
361
|
-
const
|
|
362
|
-
for (const tx of publicTxs) {
|
|
255
|
+
for (const tx of txs) {
|
|
363
256
|
const txHash = tx.getTxHash().toString();
|
|
364
|
-
const
|
|
365
|
-
const tubeProof =
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
typeof NESTED_RECURSIVE_ROLLUP_HONK_PROOF_LENGTH
|
|
370
|
-
>
|
|
371
|
-
>();
|
|
372
|
-
logger.debug(`Starting chonk verifier circuit for tx ${txHash}`);
|
|
373
|
-
this.doEnqueueChonkVerifier(txHash, privateInputs, proof => {
|
|
374
|
-
tubeProof.resolve(proof);
|
|
375
|
-
});
|
|
376
|
-
this.provingState.cachedChonkVerifierProofs.set(txHash, tubeProof.promise);
|
|
257
|
+
const tubeInputs = new TubeInputs(!!tx.data.forPublic, tx.clientIvcProof);
|
|
258
|
+
const tubeProof = promiseWithResolvers<ProofAndVerificationKey<typeof TUBE_PROOF_LENGTH>>();
|
|
259
|
+
logger.debug(`Starting tube circuit for tx ${txHash}`);
|
|
260
|
+
this.doEnqueueTube(txHash, tubeInputs, proof => tubeProof.resolve(proof));
|
|
261
|
+
this.provingState?.cachedTubeProofs.set(txHash, tubeProof.promise);
|
|
377
262
|
}
|
|
378
263
|
return Promise.resolve();
|
|
379
264
|
}
|
|
@@ -385,50 +270,58 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
385
270
|
@trackSpan('ProvingOrchestrator.setBlockCompleted', (blockNumber: number) => ({
|
|
386
271
|
[Attributes.BLOCK_NUMBER]: blockNumber,
|
|
387
272
|
}))
|
|
388
|
-
public async setBlockCompleted(blockNumber: number, expectedHeader?: BlockHeader): Promise<
|
|
273
|
+
public async setBlockCompleted(blockNumber: number, expectedHeader?: BlockHeader): Promise<L2Block> {
|
|
389
274
|
const provingState = this.provingState?.getBlockProvingStateByBlockNumber(blockNumber);
|
|
390
275
|
if (!provingState) {
|
|
391
276
|
throw new Error(`Block proving state for ${blockNumber} not found`);
|
|
392
277
|
}
|
|
393
278
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
279
|
+
if (!provingState.spongeBlobState) {
|
|
280
|
+
// If we are completing an empty block, initialize the provingState.
|
|
281
|
+
// We will have 0 txs and no blob fields.
|
|
282
|
+
provingState.startNewBlock(0, 0);
|
|
398
283
|
}
|
|
399
284
|
|
|
400
|
-
// Abort if the proving state is not valid due to errors occurred elsewhere.
|
|
401
285
|
if (!provingState.verifyState()) {
|
|
402
|
-
throw new Error(`
|
|
403
|
-
}
|
|
404
|
-
|
|
405
|
-
if (provingState.isAcceptingTxs()) {
|
|
406
|
-
throw new Error(
|
|
407
|
-
`Block ${blockNumber} is still accepting txs. Call setBlockCompleted after all txs have been added.`,
|
|
408
|
-
);
|
|
286
|
+
throw new Error(`Block proving failed: ${provingState.error}`);
|
|
409
287
|
}
|
|
410
288
|
|
|
411
289
|
// And build the block header
|
|
412
290
|
logger.verbose(`Block ${blockNumber} completed. Assembling header.`);
|
|
413
|
-
|
|
291
|
+
await this.buildBlock(provingState, expectedHeader);
|
|
414
292
|
|
|
415
|
-
|
|
293
|
+
logger.debug(`Accumulating blobs for ${blockNumber}`);
|
|
294
|
+
await this.provingState?.setBlobAccumulators(blockNumber);
|
|
416
295
|
|
|
417
|
-
|
|
296
|
+
// If the proofs were faster than the block building, then we need to try the block root rollup again here
|
|
297
|
+
await this.checkAndEnqueueBlockRootRollup(provingState);
|
|
298
|
+
return provingState.block!;
|
|
418
299
|
}
|
|
419
300
|
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
const
|
|
301
|
+
/** Returns the block as built for a given index. */
|
|
302
|
+
public getBlock(index: number): L2Block {
|
|
303
|
+
const block = this.provingState?.blocks[index]?.block;
|
|
304
|
+
if (!block) {
|
|
305
|
+
throw new Error(`Block at index ${index} not available`);
|
|
306
|
+
}
|
|
307
|
+
return block;
|
|
308
|
+
}
|
|
423
309
|
|
|
424
|
-
|
|
310
|
+
private async buildBlock(provingState: BlockProvingState, expectedHeader?: BlockHeader) {
|
|
311
|
+
// Collect all new nullifiers, commitments, and contracts from all txs in this block to build body
|
|
312
|
+
const txs = provingState.allTxs.map(a => a.processedTx);
|
|
425
313
|
|
|
426
314
|
// Get db for this block
|
|
427
315
|
const db = this.dbs.get(provingState.blockNumber)!;
|
|
428
316
|
|
|
429
317
|
// Given we've applied every change from this block, now assemble the block header
|
|
430
318
|
// and update the archive tree, so we're ready to start processing the next block
|
|
431
|
-
const header = await
|
|
319
|
+
const { header, body } = await buildHeaderAndBodyFromTxs(
|
|
320
|
+
txs,
|
|
321
|
+
provingState.globalVariables,
|
|
322
|
+
provingState.newL1ToL2Messages,
|
|
323
|
+
db,
|
|
324
|
+
);
|
|
432
325
|
|
|
433
326
|
if (expectedHeader && !header.equals(expectedHeader)) {
|
|
434
327
|
logger.error(`Block header mismatch: header=${header} expectedHeader=${expectedHeader}`);
|
|
@@ -440,65 +333,26 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
440
333
|
);
|
|
441
334
|
await db.updateArchive(header);
|
|
442
335
|
|
|
443
|
-
|
|
336
|
+
// Assemble the L2 block
|
|
337
|
+
const newArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
|
|
338
|
+
const l2Block = new L2Block(newArchive, header, body);
|
|
339
|
+
|
|
340
|
+
await this.verifyBuiltBlockAgainstSyncedState(l2Block, newArchive);
|
|
444
341
|
|
|
445
|
-
|
|
342
|
+
logger.verbose(`Orchestrator finalized block ${l2Block.number}`);
|
|
343
|
+
provingState.setBlock(l2Block);
|
|
446
344
|
}
|
|
447
345
|
|
|
448
346
|
// Flagged as protected to disable in certain unit tests
|
|
449
|
-
protected async verifyBuiltBlockAgainstSyncedState(
|
|
450
|
-
const
|
|
451
|
-
if (!builtBlockHeader) {
|
|
452
|
-
logger.debug('Block header not built yet, skipping header check.');
|
|
453
|
-
return;
|
|
454
|
-
}
|
|
455
|
-
|
|
456
|
-
const output = provingState.getBlockRootRollupOutput();
|
|
457
|
-
if (!output) {
|
|
458
|
-
logger.debug('Block root rollup proof not built yet, skipping header check.');
|
|
459
|
-
return;
|
|
460
|
-
}
|
|
461
|
-
const header = await buildHeaderFromCircuitOutputs(output);
|
|
462
|
-
|
|
463
|
-
if (!(await header.hash()).equals(await builtBlockHeader.hash())) {
|
|
464
|
-
logger.error(`Block header mismatch.\nCircuit: ${inspect(header)}\nComputed: ${inspect(builtBlockHeader)}`);
|
|
465
|
-
provingState.reject(`Block header hash mismatch.`);
|
|
466
|
-
return;
|
|
467
|
-
}
|
|
468
|
-
|
|
469
|
-
// Get db for this block
|
|
470
|
-
const blockNumber = provingState.blockNumber;
|
|
471
|
-
const db = this.dbs.get(blockNumber)!;
|
|
472
|
-
|
|
473
|
-
const newArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
|
|
474
|
-
const syncedArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, this.dbProvider.getSnapshot(blockNumber));
|
|
347
|
+
protected async verifyBuiltBlockAgainstSyncedState(l2Block: L2Block, newArchive: AppendOnlyTreeSnapshot) {
|
|
348
|
+
const syncedArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, this.dbProvider.getSnapshot(l2Block.number));
|
|
475
349
|
if (!syncedArchive.equals(newArchive)) {
|
|
476
|
-
|
|
477
|
-
`Archive tree mismatch for block ${
|
|
350
|
+
throw new Error(
|
|
351
|
+
`Archive tree mismatch for block ${l2Block.number}: world state synced to ${inspect(
|
|
478
352
|
syncedArchive,
|
|
479
353
|
)} but built ${inspect(newArchive)}`,
|
|
480
354
|
);
|
|
481
|
-
provingState.reject(`Archive tree mismatch.`);
|
|
482
|
-
return;
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
const circuitArchive = output.newArchive;
|
|
486
|
-
if (!newArchive.equals(circuitArchive)) {
|
|
487
|
-
logger.error(`New archive mismatch.\nCircuit: ${output.newArchive}\nComputed: ${newArchive}`);
|
|
488
|
-
provingState.reject(`New archive mismatch.`);
|
|
489
|
-
return;
|
|
490
355
|
}
|
|
491
|
-
|
|
492
|
-
// TODO(palla/prover): This closes the fork only on the happy path. If this epoch orchestrator
|
|
493
|
-
// is aborted and never reaches this point, it will leak the fork. We need to add a global cleanup,
|
|
494
|
-
// but have to make sure it only runs once all operations are completed, otherwise some function here
|
|
495
|
-
// will attempt to access the fork after it was closed.
|
|
496
|
-
logger.debug(`Cleaning up world state fork for ${blockNumber}`);
|
|
497
|
-
void this.dbs
|
|
498
|
-
.get(blockNumber)
|
|
499
|
-
?.close()
|
|
500
|
-
.then(() => this.dbs.delete(blockNumber))
|
|
501
|
-
.catch(err => logger.error(`Error closing db for block ${blockNumber}`, err));
|
|
502
356
|
}
|
|
503
357
|
|
|
504
358
|
/**
|
|
@@ -515,11 +369,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
515
369
|
/**
|
|
516
370
|
* Returns the proof for the current epoch.
|
|
517
371
|
*/
|
|
518
|
-
public async finalizeEpoch()
|
|
519
|
-
publicInputs: RootRollupPublicInputs;
|
|
520
|
-
proof: Proof;
|
|
521
|
-
batchedBlobInputs: BatchedBlob;
|
|
522
|
-
}> {
|
|
372
|
+
public async finalizeEpoch() {
|
|
523
373
|
if (!this.provingState || !this.provingPromise) {
|
|
524
374
|
throw new Error(`Invalid proving state, an epoch must be proven before it can be finalized`);
|
|
525
375
|
}
|
|
@@ -529,7 +379,14 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
529
379
|
throw new Error(`Epoch proving failed: ${result.reason}`);
|
|
530
380
|
}
|
|
531
381
|
|
|
532
|
-
|
|
382
|
+
// TODO(MW): Move this? Requires async and don't want to force root methods to be async
|
|
383
|
+
// TODO(MW): EpochProvingState uses this.blocks.filter(b => !!b).length as total blocks, use this below:
|
|
384
|
+
const finalBlock = this.provingState.blocks[this.provingState.totalNumBlocks - 1];
|
|
385
|
+
if (!finalBlock || !finalBlock.endBlobAccumulator) {
|
|
386
|
+
throw new Error(`Epoch's final block not ready for finalize`);
|
|
387
|
+
}
|
|
388
|
+
const finalBatchedBlob = await finalBlock.endBlobAccumulator.finalize();
|
|
389
|
+
this.provingState.setFinalBatchedBlob(finalBatchedBlob);
|
|
533
390
|
|
|
534
391
|
const epochProofResult = this.provingState.getEpochProofResult();
|
|
535
392
|
|
|
@@ -541,6 +398,20 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
541
398
|
return epochProofResult;
|
|
542
399
|
}
|
|
543
400
|
|
|
401
|
+
/**
|
|
402
|
+
* Starts the proving process for the given transaction and adds it to our state
|
|
403
|
+
* @param tx - The transaction whose proving we wish to commence
|
|
404
|
+
* @param provingState - The proving state being worked on
|
|
405
|
+
*/
|
|
406
|
+
private async prepareTransaction(tx: ProcessedTx, provingState: BlockProvingState) {
|
|
407
|
+
const txInputs = await this.prepareBaseRollupInputs(provingState, tx);
|
|
408
|
+
if (!txInputs) {
|
|
409
|
+
// This should not be possible
|
|
410
|
+
throw new Error(`Unable to add transaction, preparing base inputs failed`);
|
|
411
|
+
}
|
|
412
|
+
return txInputs;
|
|
413
|
+
}
|
|
414
|
+
|
|
544
415
|
/**
|
|
545
416
|
* Enqueue a job to be scheduled
|
|
546
417
|
* @param provingState - The proving state object being operated on
|
|
@@ -548,11 +419,11 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
548
419
|
* @param job - The actual job, returns a promise notifying of the job's completion
|
|
549
420
|
*/
|
|
550
421
|
private deferredProving<T>(
|
|
551
|
-
provingState: EpochProvingState |
|
|
422
|
+
provingState: EpochProvingState | BlockProvingState | undefined,
|
|
552
423
|
request: (signal: AbortSignal) => Promise<T>,
|
|
553
424
|
callback: (result: T) => void | Promise<void>,
|
|
554
425
|
) {
|
|
555
|
-
if (!provingState
|
|
426
|
+
if (!provingState?.verifyState()) {
|
|
556
427
|
logger.debug(`Not enqueuing job, state no longer valid`);
|
|
557
428
|
return;
|
|
558
429
|
}
|
|
@@ -570,7 +441,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
570
441
|
}
|
|
571
442
|
|
|
572
443
|
const result = await request(controller.signal);
|
|
573
|
-
if (!provingState
|
|
444
|
+
if (!provingState?.verifyState()) {
|
|
574
445
|
logger.debug(`State no longer valid, discarding result`);
|
|
575
446
|
return;
|
|
576
447
|
}
|
|
@@ -603,58 +474,60 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
603
474
|
setImmediate(() => void safeJob());
|
|
604
475
|
}
|
|
605
476
|
|
|
606
|
-
private async
|
|
607
|
-
const l1ToL2MessagesPadded = padArrayEnd
|
|
477
|
+
private async prepareBaseParityInputs(l1ToL2Messages: Fr[], db: MerkleTreeWriteOperations) {
|
|
478
|
+
const l1ToL2MessagesPadded = padArrayEnd(
|
|
608
479
|
l1ToL2Messages,
|
|
609
480
|
Fr.ZERO,
|
|
610
481
|
NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
|
|
611
482
|
'Too many L1 to L2 messages',
|
|
612
483
|
);
|
|
484
|
+
const baseParityInputs = times(NUM_BASE_PARITY_PER_ROOT_PARITY, i =>
|
|
485
|
+
BaseParityInputs.fromSlice(l1ToL2MessagesPadded, i, getVKTreeRoot()),
|
|
486
|
+
);
|
|
613
487
|
|
|
614
|
-
const
|
|
615
|
-
|
|
488
|
+
const l1ToL2MessageTreeSnapshot = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db);
|
|
489
|
+
|
|
490
|
+
const l1ToL2MessageSubtreeSiblingPath = assertLength(
|
|
616
491
|
await getSubtreeSiblingPath(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, L1_TO_L2_MSG_SUBTREE_HEIGHT, db),
|
|
617
|
-
|
|
492
|
+
L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH,
|
|
618
493
|
);
|
|
619
494
|
|
|
620
495
|
// Update the local trees to include the new l1 to l2 messages
|
|
621
496
|
await db.appendLeaves(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, l1ToL2MessagesPadded);
|
|
622
|
-
|
|
623
|
-
const newL1ToL2MessageTreeSnapshot = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db);
|
|
624
|
-
const newL1ToL2MessageSubtreeRootSiblingPath = assertLength(
|
|
625
|
-
await getSubtreeSiblingPath(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, L1_TO_L2_MSG_SUBTREE_HEIGHT, db),
|
|
626
|
-
L1_TO_L2_MSG_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
|
|
627
|
-
);
|
|
497
|
+
const l1ToL2MessageTreeSnapshotAfterInsertion = await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db);
|
|
628
498
|
|
|
629
499
|
return {
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
500
|
+
l1ToL2MessageTreeSnapshot,
|
|
501
|
+
l1ToL2MessageSubtreeSiblingPath,
|
|
502
|
+
l1ToL2MessageTreeSnapshotAfterInsertion,
|
|
503
|
+
baseParityInputs,
|
|
634
504
|
};
|
|
635
505
|
}
|
|
636
506
|
|
|
637
507
|
// Updates the merkle trees for a transaction. The first enqueued job for a transaction
|
|
638
|
-
@trackSpan('ProvingOrchestrator.prepareBaseRollupInputs', tx => ({
|
|
508
|
+
@trackSpan('ProvingOrchestrator.prepareBaseRollupInputs', (_, tx) => ({
|
|
639
509
|
[Attributes.TX_HASH]: tx.hash.toString(),
|
|
640
510
|
}))
|
|
641
511
|
private async prepareBaseRollupInputs(
|
|
512
|
+
provingState: BlockProvingState,
|
|
642
513
|
tx: ProcessedTx,
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
514
|
+
): Promise<[BaseRollupHints, TreeSnapshots] | undefined> {
|
|
515
|
+
if (!provingState.verifyState() || !provingState.spongeBlobState) {
|
|
516
|
+
logger.debug('Not preparing base rollup inputs, state invalid');
|
|
517
|
+
return;
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
const db = this.dbs.get(provingState.blockNumber)!;
|
|
521
|
+
|
|
648
522
|
// We build the base rollup inputs using a mock proof and verification key.
|
|
649
|
-
// These will be overwritten later once we have proven the
|
|
523
|
+
// These will be overwritten later once we have proven the tube circuit and any public kernels
|
|
650
524
|
const [ms, hints] = await elapsed(
|
|
651
525
|
insertSideEffectsAndBuildBaseRollupHints(
|
|
652
526
|
tx,
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
startSpongeBlob,
|
|
656
|
-
this.proverId.toField(),
|
|
527
|
+
provingState.globalVariables,
|
|
528
|
+
provingState.l1ToL2MessageTreeSnapshotAfterInsertion,
|
|
657
529
|
db,
|
|
530
|
+
provingState.spongeBlobState,
|
|
658
531
|
),
|
|
659
532
|
);
|
|
660
533
|
|
|
@@ -667,6 +540,10 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
667
540
|
);
|
|
668
541
|
const treeSnapshots: TreeSnapshots = new Map((await Promise.all(promises)).map(obj => [obj.key, obj.value]));
|
|
669
542
|
|
|
543
|
+
if (!provingState.verifyState()) {
|
|
544
|
+
logger.debug(`Discarding proving job, state no longer valid`);
|
|
545
|
+
return;
|
|
546
|
+
}
|
|
670
547
|
return [hints, treeSnapshots];
|
|
671
548
|
}
|
|
672
549
|
|
|
@@ -678,11 +555,6 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
678
555
|
return;
|
|
679
556
|
}
|
|
680
557
|
|
|
681
|
-
if (!provingState.tryStartProvingBase(txIndex)) {
|
|
682
|
-
logger.debug(`Base rollup for tx ${txIndex} already started.`);
|
|
683
|
-
return;
|
|
684
|
-
}
|
|
685
|
-
|
|
686
558
|
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
687
559
|
const { processedTx } = txProvingState;
|
|
688
560
|
const { rollupType, inputs } = txProvingState.getBaseRollupTypeAndInputs();
|
|
@@ -694,81 +566,69 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
694
566
|
wrapCallbackInSpan(
|
|
695
567
|
this.tracer,
|
|
696
568
|
`ProvingOrchestrator.prover.${
|
|
697
|
-
inputs instanceof
|
|
698
|
-
? 'getPrivateTxBaseRollupProof'
|
|
699
|
-
: 'getPublicTxBaseRollupProof'
|
|
569
|
+
inputs instanceof PrivateBaseRollupInputs ? 'getPrivateBaseRollupProof' : 'getPublicBaseRollupProof'
|
|
700
570
|
}`,
|
|
701
571
|
{
|
|
702
572
|
[Attributes.TX_HASH]: processedTx.hash.toString(),
|
|
703
573
|
[Attributes.PROTOCOL_CIRCUIT_NAME]: rollupType,
|
|
704
574
|
},
|
|
705
575
|
signal => {
|
|
706
|
-
if (inputs instanceof
|
|
707
|
-
return this.prover.
|
|
576
|
+
if (inputs instanceof PrivateBaseRollupInputs) {
|
|
577
|
+
return this.prover.getPrivateBaseRollupProof(inputs, signal, provingState.epochNumber);
|
|
708
578
|
} else {
|
|
709
|
-
return this.prover.
|
|
579
|
+
return this.prover.getPublicBaseRollupProof(inputs, signal, provingState.epochNumber);
|
|
710
580
|
}
|
|
711
581
|
},
|
|
712
582
|
),
|
|
713
|
-
result => {
|
|
583
|
+
async result => {
|
|
714
584
|
logger.debug(`Completed proof for ${rollupType} for tx ${processedTx.hash.toString()}`);
|
|
715
|
-
validatePartialState(result.inputs.
|
|
585
|
+
validatePartialState(result.inputs.end, txProvingState.treeSnapshots);
|
|
716
586
|
const leafLocation = provingState.setBaseRollupProof(txIndex, result);
|
|
717
587
|
if (provingState.totalNumTxs === 1) {
|
|
718
|
-
this.checkAndEnqueueBlockRootRollup(provingState);
|
|
588
|
+
await this.checkAndEnqueueBlockRootRollup(provingState);
|
|
719
589
|
} else {
|
|
720
|
-
this.checkAndEnqueueNextMergeRollup(provingState, leafLocation);
|
|
590
|
+
await this.checkAndEnqueueNextMergeRollup(provingState, leafLocation);
|
|
721
591
|
}
|
|
722
592
|
},
|
|
723
593
|
);
|
|
724
594
|
}
|
|
725
595
|
|
|
726
|
-
// Enqueues the
|
|
727
|
-
// Once completed, will enqueue the
|
|
728
|
-
private
|
|
596
|
+
// Enqueues the tube circuit for a given transaction index, or reuses the one already enqueued
|
|
597
|
+
// Once completed, will enqueue the next circuit, either a public kernel or the base rollup
|
|
598
|
+
private getOrEnqueueTube(provingState: BlockProvingState, txIndex: number) {
|
|
729
599
|
if (!provingState.verifyState()) {
|
|
730
|
-
logger.debug('Not running
|
|
600
|
+
logger.debug('Not running tube circuit, state invalid');
|
|
731
601
|
return;
|
|
732
602
|
}
|
|
733
603
|
|
|
734
604
|
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
735
605
|
const txHash = txProvingState.processedTx.hash.toString();
|
|
736
|
-
|
|
737
|
-
const handleResult = (
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
) => {
|
|
743
|
-
logger.debug(`Got chonk verifier proof for tx index: ${txIndex}`, { txHash });
|
|
744
|
-
txProvingState.setPublicChonkVerifierProof(result);
|
|
745
|
-
this.provingState?.cachedChonkVerifierProofs.delete(txHash);
|
|
746
|
-
this.checkAndEnqueueBaseRollup(provingState, txIndex);
|
|
606
|
+
|
|
607
|
+
const handleResult = (result: ProofAndVerificationKey<typeof TUBE_PROOF_LENGTH>) => {
|
|
608
|
+
logger.debug(`Got tube proof for tx index: ${txIndex}`, { txHash });
|
|
609
|
+
txProvingState.setTubeProof(result);
|
|
610
|
+
this.provingState?.cachedTubeProofs.delete(txHash);
|
|
611
|
+
this.checkAndEnqueueNextTxCircuit(provingState, txIndex);
|
|
747
612
|
};
|
|
748
613
|
|
|
749
|
-
if (this.provingState?.
|
|
750
|
-
logger.debug(`
|
|
751
|
-
void this.provingState!.
|
|
614
|
+
if (this.provingState?.cachedTubeProofs.has(txHash)) {
|
|
615
|
+
logger.debug(`Tube proof already enqueued for tx index: ${txIndex}`, { txHash });
|
|
616
|
+
void this.provingState!.cachedTubeProofs.get(txHash)!.then(handleResult);
|
|
752
617
|
return;
|
|
753
618
|
}
|
|
754
619
|
|
|
755
|
-
logger.debug(`Enqueuing
|
|
756
|
-
this.
|
|
620
|
+
logger.debug(`Enqueuing tube circuit for tx index: ${txIndex}`);
|
|
621
|
+
this.doEnqueueTube(txHash, txProvingState.getTubeInputs(), handleResult);
|
|
757
622
|
}
|
|
758
623
|
|
|
759
|
-
private
|
|
624
|
+
private doEnqueueTube(
|
|
760
625
|
txHash: string,
|
|
761
|
-
inputs:
|
|
762
|
-
handler: (
|
|
763
|
-
result: PublicInputsAndRecursiveProof<
|
|
764
|
-
PublicChonkVerifierPublicInputs,
|
|
765
|
-
typeof NESTED_RECURSIVE_ROLLUP_HONK_PROOF_LENGTH
|
|
766
|
-
>,
|
|
767
|
-
) => void,
|
|
626
|
+
inputs: TubeInputs,
|
|
627
|
+
handler: (result: ProofAndVerificationKey<typeof TUBE_PROOF_LENGTH>) => void,
|
|
768
628
|
provingState: EpochProvingState | BlockProvingState = this.provingState!,
|
|
769
629
|
) {
|
|
770
|
-
if (!provingState
|
|
771
|
-
logger.debug('Not running
|
|
630
|
+
if (!provingState?.verifyState()) {
|
|
631
|
+
logger.debug('Not running tube circuit, state invalid');
|
|
772
632
|
return;
|
|
773
633
|
}
|
|
774
634
|
|
|
@@ -776,12 +636,12 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
776
636
|
provingState,
|
|
777
637
|
wrapCallbackInSpan(
|
|
778
638
|
this.tracer,
|
|
779
|
-
'ProvingOrchestrator.prover.
|
|
639
|
+
'ProvingOrchestrator.prover.getTubeProof',
|
|
780
640
|
{
|
|
781
641
|
[Attributes.TX_HASH]: txHash,
|
|
782
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: '
|
|
642
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'tube-circuit' satisfies CircuitName,
|
|
783
643
|
},
|
|
784
|
-
signal => this.prover.
|
|
644
|
+
signal => this.prover.getTubeProof(inputs, signal, this.provingState!.epochNumber),
|
|
785
645
|
),
|
|
786
646
|
handler,
|
|
787
647
|
);
|
|
@@ -795,45 +655,39 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
795
655
|
return;
|
|
796
656
|
}
|
|
797
657
|
|
|
798
|
-
if (!provingState.tryStartProvingMerge(location)) {
|
|
799
|
-
logger.debug('Merge rollup already started.');
|
|
800
|
-
return;
|
|
801
|
-
}
|
|
802
|
-
|
|
803
658
|
const inputs = provingState.getMergeRollupInputs(location);
|
|
804
659
|
|
|
805
660
|
this.deferredProving(
|
|
806
661
|
provingState,
|
|
807
662
|
wrapCallbackInSpan(
|
|
808
663
|
this.tracer,
|
|
809
|
-
'ProvingOrchestrator.prover.
|
|
664
|
+
'ProvingOrchestrator.prover.getMergeRollupProof',
|
|
810
665
|
{
|
|
811
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'rollup
|
|
666
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'merge-rollup' satisfies CircuitName,
|
|
812
667
|
},
|
|
813
|
-
signal => this.prover.
|
|
668
|
+
signal => this.prover.getMergeRollupProof(inputs, signal, provingState.epochNumber),
|
|
814
669
|
),
|
|
815
|
-
result => {
|
|
670
|
+
async result => {
|
|
816
671
|
provingState.setMergeRollupProof(location, result);
|
|
817
|
-
this.checkAndEnqueueNextMergeRollup(provingState, location);
|
|
672
|
+
await this.checkAndEnqueueNextMergeRollup(provingState, location);
|
|
818
673
|
},
|
|
819
674
|
);
|
|
820
675
|
}
|
|
821
676
|
|
|
822
677
|
// Executes the block root rollup circuit
|
|
823
|
-
private enqueueBlockRootRollup(provingState: BlockProvingState) {
|
|
678
|
+
private async enqueueBlockRootRollup(provingState: BlockProvingState) {
|
|
824
679
|
if (!provingState.verifyState()) {
|
|
825
680
|
logger.debug('Not running block root rollup, state no longer valid');
|
|
826
681
|
return;
|
|
827
682
|
}
|
|
828
683
|
|
|
829
|
-
|
|
830
|
-
logger.debug('Block root rollup already started.');
|
|
831
|
-
return;
|
|
832
|
-
}
|
|
684
|
+
provingState.blockRootRollupStarted = true;
|
|
833
685
|
|
|
834
|
-
const { rollupType, inputs } = provingState.getBlockRootRollupTypeAndInputs();
|
|
686
|
+
const { rollupType, inputs } = await provingState.getBlockRootRollupTypeAndInputs();
|
|
835
687
|
|
|
836
|
-
logger.debug(
|
|
688
|
+
logger.debug(
|
|
689
|
+
`Enqueuing ${rollupType} for block ${provingState.blockNumber} with ${provingState.newL1ToL2Messages.length} l1 to l2 msgs.`,
|
|
690
|
+
);
|
|
837
691
|
|
|
838
692
|
this.deferredProving(
|
|
839
693
|
provingState,
|
|
@@ -844,32 +698,56 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
844
698
|
[Attributes.PROTOCOL_CIRCUIT_NAME]: rollupType,
|
|
845
699
|
},
|
|
846
700
|
signal => {
|
|
847
|
-
if (inputs instanceof
|
|
848
|
-
return this.prover.
|
|
849
|
-
} else if (inputs instanceof
|
|
850
|
-
return this.prover.
|
|
851
|
-
} else if (inputs instanceof BlockRootEmptyTxFirstRollupPrivateInputs) {
|
|
852
|
-
return this.prover.getBlockRootEmptyTxFirstRollupProof(inputs, signal, provingState.epochNumber);
|
|
853
|
-
} else if (inputs instanceof BlockRootSingleTxRollupPrivateInputs) {
|
|
854
|
-
return this.prover.getBlockRootSingleTxRollupProof(inputs, signal, provingState.epochNumber);
|
|
701
|
+
if (inputs instanceof EmptyBlockRootRollupInputs) {
|
|
702
|
+
return this.prover.getEmptyBlockRootRollupProof(inputs, signal, provingState.epochNumber);
|
|
703
|
+
} else if (inputs instanceof SingleTxBlockRootRollupInputs) {
|
|
704
|
+
return this.prover.getSingleTxBlockRootRollupProof(inputs, signal, provingState.epochNumber);
|
|
855
705
|
} else {
|
|
856
706
|
return this.prover.getBlockRootRollupProof(inputs, signal, provingState.epochNumber);
|
|
857
707
|
}
|
|
858
708
|
},
|
|
859
709
|
),
|
|
860
710
|
async result => {
|
|
861
|
-
|
|
862
|
-
await
|
|
711
|
+
provingState.setBlockRootRollupProof(result);
|
|
712
|
+
const header = await provingState.buildHeaderFromProvingOutputs();
|
|
713
|
+
if (!(await header.hash()).equals(await provingState.block!.header.hash())) {
|
|
714
|
+
logger.error(
|
|
715
|
+
`Block header mismatch.\nCircuit: ${inspect(header)}\nComputed: ${inspect(provingState.block!.header)}`,
|
|
716
|
+
);
|
|
717
|
+
provingState.reject(`Block header hash mismatch.`);
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
const dbArchiveRoot = provingState.block!.archive.root;
|
|
721
|
+
const circuitArchiveRoot = result.inputs.newArchive.root;
|
|
722
|
+
if (!dbArchiveRoot.equals(circuitArchiveRoot)) {
|
|
723
|
+
logger.error(
|
|
724
|
+
`New archive root mismatch.\nCircuit: ${result.inputs.newArchive.root}\nComputed: ${dbArchiveRoot}`,
|
|
725
|
+
);
|
|
726
|
+
provingState.reject(`New archive root mismatch.`);
|
|
727
|
+
}
|
|
863
728
|
|
|
864
|
-
|
|
729
|
+
const endBlobAccumulatorPublicInputs = BlobAccumulatorPublicInputs.fromBatchedBlobAccumulator(
|
|
730
|
+
provingState.endBlobAccumulator!,
|
|
731
|
+
);
|
|
732
|
+
const circuitEndBlobAccumulatorState = result.inputs.blobPublicInputs.endBlobAccumulator;
|
|
733
|
+
if (!circuitEndBlobAccumulatorState.equals(endBlobAccumulatorPublicInputs)) {
|
|
734
|
+
logger.error(
|
|
735
|
+
`Blob accumulator state mismatch.\nCircuit: ${inspect(circuitEndBlobAccumulatorState)}\nComputed: ${inspect(
|
|
736
|
+
endBlobAccumulatorPublicInputs,
|
|
737
|
+
)}`,
|
|
738
|
+
);
|
|
739
|
+
provingState.reject(`Blob accumulator state mismatch.`);
|
|
740
|
+
}
|
|
865
741
|
|
|
866
|
-
|
|
867
|
-
|
|
742
|
+
logger.debug(`Completed ${rollupType} proof for block ${provingState.block!.number}`);
|
|
743
|
+
// validatePartialState(result.inputs.end, tx.treeSnapshots); // TODO(palla/prover)
|
|
868
744
|
|
|
869
|
-
|
|
870
|
-
|
|
745
|
+
const epochProvingState = this.provingState!;
|
|
746
|
+
const leafLocation = epochProvingState.setBlockRootRollupProof(provingState.index, result);
|
|
747
|
+
if (epochProvingState.totalNumBlocks === 1) {
|
|
748
|
+
this.enqueueEpochPadding(epochProvingState);
|
|
871
749
|
} else {
|
|
872
|
-
this.checkAndEnqueueNextBlockMergeRollup(
|
|
750
|
+
this.checkAndEnqueueNextBlockMergeRollup(epochProvingState, leafLocation);
|
|
873
751
|
}
|
|
874
752
|
},
|
|
875
753
|
);
|
|
@@ -877,35 +755,24 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
877
755
|
|
|
878
756
|
// Executes the base parity circuit and stores the intermediate state for the root parity circuit
|
|
879
757
|
// Enqueues the root parity circuit if all inputs are available
|
|
880
|
-
private enqueueBaseParityCircuit(
|
|
881
|
-
checkpointProvingState: CheckpointProvingState,
|
|
882
|
-
provingState: BlockProvingState,
|
|
883
|
-
baseParityIndex: number,
|
|
884
|
-
) {
|
|
758
|
+
private enqueueBaseParityCircuit(provingState: BlockProvingState, inputs: BaseParityInputs, index: number) {
|
|
885
759
|
if (!provingState.verifyState()) {
|
|
886
760
|
logger.debug('Not running base parity. State no longer valid.');
|
|
887
761
|
return;
|
|
888
762
|
}
|
|
889
763
|
|
|
890
|
-
if (!provingState.tryStartProvingBaseParity(baseParityIndex)) {
|
|
891
|
-
logger.warn(`Base parity ${baseParityIndex} already started.`);
|
|
892
|
-
return;
|
|
893
|
-
}
|
|
894
|
-
|
|
895
|
-
const inputs = checkpointProvingState.getBaseParityInputs(baseParityIndex);
|
|
896
|
-
|
|
897
764
|
this.deferredProving(
|
|
898
765
|
provingState,
|
|
899
766
|
wrapCallbackInSpan(
|
|
900
767
|
this.tracer,
|
|
901
768
|
'ProvingOrchestrator.prover.getBaseParityProof',
|
|
902
769
|
{
|
|
903
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'parity
|
|
770
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'base-parity' satisfies CircuitName,
|
|
904
771
|
},
|
|
905
772
|
signal => this.prover.getBaseParityProof(inputs, signal, provingState.epochNumber),
|
|
906
773
|
),
|
|
907
774
|
provingOutput => {
|
|
908
|
-
provingState.setBaseParityProof(
|
|
775
|
+
provingState.setBaseParityProof(index, provingOutput);
|
|
909
776
|
this.checkAndEnqueueRootParityCircuit(provingState);
|
|
910
777
|
},
|
|
911
778
|
);
|
|
@@ -927,12 +794,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
927
794
|
return;
|
|
928
795
|
}
|
|
929
796
|
|
|
930
|
-
|
|
931
|
-
logger.debug('Root parity already started.');
|
|
932
|
-
return;
|
|
933
|
-
}
|
|
934
|
-
|
|
935
|
-
const inputs = provingState.getParityRootInputs();
|
|
797
|
+
const inputs = provingState.getRootParityInputs();
|
|
936
798
|
|
|
937
799
|
this.deferredProving(
|
|
938
800
|
provingState,
|
|
@@ -940,30 +802,25 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
940
802
|
this.tracer,
|
|
941
803
|
'ProvingOrchestrator.prover.getRootParityProof',
|
|
942
804
|
{
|
|
943
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'parity
|
|
805
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'root-parity' satisfies CircuitName,
|
|
944
806
|
},
|
|
945
807
|
signal => this.prover.getRootParityProof(inputs, signal, provingState.epochNumber),
|
|
946
808
|
),
|
|
947
|
-
result => {
|
|
809
|
+
async result => {
|
|
948
810
|
provingState.setRootParityProof(result);
|
|
949
|
-
this.checkAndEnqueueBlockRootRollup(provingState);
|
|
811
|
+
await this.checkAndEnqueueBlockRootRollup(provingState);
|
|
950
812
|
},
|
|
951
813
|
);
|
|
952
814
|
}
|
|
953
815
|
|
|
954
816
|
// Executes the block merge rollup circuit and stored the output as intermediate state for the parent merge/block root circuit
|
|
955
817
|
// Enqueues the next level of merge if all inputs are available
|
|
956
|
-
private enqueueBlockMergeRollup(provingState:
|
|
818
|
+
private enqueueBlockMergeRollup(provingState: EpochProvingState, location: TreeNodeLocation) {
|
|
957
819
|
if (!provingState.verifyState()) {
|
|
958
820
|
logger.debug('Not running block merge rollup. State no longer valid.');
|
|
959
821
|
return;
|
|
960
822
|
}
|
|
961
823
|
|
|
962
|
-
if (!provingState.tryStartProvingBlockMerge(location)) {
|
|
963
|
-
logger.debug('Block merge rollup already started.');
|
|
964
|
-
return;
|
|
965
|
-
}
|
|
966
|
-
|
|
967
824
|
const inputs = provingState.getBlockMergeRollupInputs(location);
|
|
968
825
|
this.deferredProving(
|
|
969
826
|
provingState,
|
|
@@ -971,7 +828,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
971
828
|
this.tracer,
|
|
972
829
|
'ProvingOrchestrator.prover.getBlockMergeRollupProof',
|
|
973
830
|
{
|
|
974
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: '
|
|
831
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'block-merge-rollup' satisfies CircuitName,
|
|
975
832
|
},
|
|
976
833
|
signal => this.prover.getBlockMergeRollupProof(inputs, signal, provingState.epochNumber),
|
|
977
834
|
),
|
|
@@ -982,125 +839,29 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
982
839
|
);
|
|
983
840
|
}
|
|
984
841
|
|
|
985
|
-
private enqueueCheckpointRootRollup(provingState: CheckpointProvingState) {
|
|
986
|
-
if (!provingState.verifyState()) {
|
|
987
|
-
logger.debug('Not running checkpoint root rollup. State no longer valid.');
|
|
988
|
-
return;
|
|
989
|
-
}
|
|
990
|
-
|
|
991
|
-
if (!provingState.tryStartProvingCheckpointRoot()) {
|
|
992
|
-
logger.debug('Checkpoint root rollup already started.');
|
|
993
|
-
return;
|
|
994
|
-
}
|
|
995
|
-
|
|
996
|
-
const rollupType = provingState.getCheckpointRootRollupType();
|
|
997
|
-
|
|
998
|
-
logger.debug(`Enqueuing ${rollupType} for checkpoint ${provingState.index}.`);
|
|
999
|
-
|
|
1000
|
-
const inputs = provingState.getCheckpointRootRollupInputs();
|
|
1001
|
-
|
|
1002
|
-
this.deferredProving(
|
|
1003
|
-
provingState,
|
|
1004
|
-
wrapCallbackInSpan(
|
|
1005
|
-
this.tracer,
|
|
1006
|
-
'ProvingOrchestrator.prover.getCheckpointRootRollupProof',
|
|
1007
|
-
{
|
|
1008
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: rollupType,
|
|
1009
|
-
},
|
|
1010
|
-
signal => {
|
|
1011
|
-
if (inputs instanceof CheckpointRootSingleBlockRollupPrivateInputs) {
|
|
1012
|
-
return this.prover.getCheckpointRootSingleBlockRollupProof(inputs, signal, provingState.epochNumber);
|
|
1013
|
-
} else {
|
|
1014
|
-
return this.prover.getCheckpointRootRollupProof(inputs, signal, provingState.epochNumber);
|
|
1015
|
-
}
|
|
1016
|
-
},
|
|
1017
|
-
),
|
|
1018
|
-
result => {
|
|
1019
|
-
const computedEndBlobAccumulatorState = provingState.getEndBlobAccumulator()!.toBlobAccumulator();
|
|
1020
|
-
const circuitEndBlobAccumulatorState = result.inputs.endBlobAccumulator;
|
|
1021
|
-
if (!circuitEndBlobAccumulatorState.equals(computedEndBlobAccumulatorState)) {
|
|
1022
|
-
logger.error(
|
|
1023
|
-
`Blob accumulator state mismatch.\nCircuit: ${inspect(circuitEndBlobAccumulatorState)}\nComputed: ${inspect(
|
|
1024
|
-
computedEndBlobAccumulatorState,
|
|
1025
|
-
)}`,
|
|
1026
|
-
);
|
|
1027
|
-
provingState.reject(`Blob accumulator state mismatch.`);
|
|
1028
|
-
return;
|
|
1029
|
-
}
|
|
1030
|
-
|
|
1031
|
-
logger.debug(`Completed ${rollupType} proof for checkpoint ${provingState.index}.`);
|
|
1032
|
-
|
|
1033
|
-
const leafLocation = provingState.setCheckpointRootRollupProof(result);
|
|
1034
|
-
const epochProvingState = provingState.parentEpoch;
|
|
1035
|
-
|
|
1036
|
-
if (epochProvingState.totalNumCheckpoints === 1) {
|
|
1037
|
-
this.enqueueEpochPadding(epochProvingState);
|
|
1038
|
-
} else {
|
|
1039
|
-
this.checkAndEnqueueNextCheckpointMergeRollup(epochProvingState, leafLocation);
|
|
1040
|
-
}
|
|
1041
|
-
},
|
|
1042
|
-
);
|
|
1043
|
-
}
|
|
1044
|
-
|
|
1045
|
-
private enqueueCheckpointMergeRollup(provingState: EpochProvingState, location: TreeNodeLocation) {
|
|
1046
|
-
if (!provingState.verifyState()) {
|
|
1047
|
-
logger.debug('Not running checkpoint merge rollup. State no longer valid.');
|
|
1048
|
-
return;
|
|
1049
|
-
}
|
|
1050
|
-
|
|
1051
|
-
if (!provingState.tryStartProvingCheckpointMerge(location)) {
|
|
1052
|
-
logger.debug('Checkpoint merge rollup already started.');
|
|
1053
|
-
return;
|
|
1054
|
-
}
|
|
1055
|
-
|
|
1056
|
-
const inputs = provingState.getCheckpointMergeRollupInputs(location);
|
|
1057
|
-
|
|
1058
|
-
this.deferredProving(
|
|
1059
|
-
provingState,
|
|
1060
|
-
wrapCallbackInSpan(
|
|
1061
|
-
this.tracer,
|
|
1062
|
-
'ProvingOrchestrator.prover.getCheckpointMergeRollupProof',
|
|
1063
|
-
{
|
|
1064
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'rollup-checkpoint-merge' satisfies CircuitName,
|
|
1065
|
-
},
|
|
1066
|
-
signal => this.prover.getCheckpointMergeRollupProof(inputs, signal, provingState.epochNumber),
|
|
1067
|
-
),
|
|
1068
|
-
result => {
|
|
1069
|
-
logger.debug('Completed proof for checkpoint merge rollup.');
|
|
1070
|
-
provingState.setCheckpointMergeRollupProof(location, result);
|
|
1071
|
-
this.checkAndEnqueueNextCheckpointMergeRollup(provingState, location);
|
|
1072
|
-
},
|
|
1073
|
-
);
|
|
1074
|
-
}
|
|
1075
|
-
|
|
1076
842
|
private enqueueEpochPadding(provingState: EpochProvingState) {
|
|
1077
843
|
if (!provingState.verifyState()) {
|
|
1078
844
|
logger.debug('Not running epoch padding. State no longer valid.');
|
|
1079
845
|
return;
|
|
1080
846
|
}
|
|
1081
847
|
|
|
1082
|
-
if (!provingState.tryStartProvingPaddingCheckpoint()) {
|
|
1083
|
-
logger.debug('Padding checkpoint already started.');
|
|
1084
|
-
return;
|
|
1085
|
-
}
|
|
1086
|
-
|
|
1087
848
|
logger.debug('Padding epoch proof with a padding block root proof.');
|
|
1088
849
|
|
|
1089
|
-
const inputs = provingState.
|
|
850
|
+
const inputs = provingState.getPaddingBlockRootInputs();
|
|
1090
851
|
|
|
1091
852
|
this.deferredProving(
|
|
1092
853
|
provingState,
|
|
1093
854
|
wrapCallbackInSpan(
|
|
1094
855
|
this.tracer,
|
|
1095
|
-
'ProvingOrchestrator.prover.
|
|
856
|
+
'ProvingOrchestrator.prover.getPaddingBlockRootRollupProof',
|
|
1096
857
|
{
|
|
1097
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: '
|
|
858
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'padding-block-root-rollup' satisfies CircuitName,
|
|
1098
859
|
},
|
|
1099
|
-
signal => this.prover.
|
|
860
|
+
signal => this.prover.getPaddingBlockRootRollupProof(inputs, signal, provingState.epochNumber),
|
|
1100
861
|
),
|
|
1101
862
|
result => {
|
|
1102
|
-
logger.debug('Completed proof for padding
|
|
1103
|
-
provingState.
|
|
863
|
+
logger.debug('Completed proof for padding block root.');
|
|
864
|
+
provingState.setPaddingBlockRootProof(result);
|
|
1104
865
|
this.checkAndEnqueueRootRollup(provingState);
|
|
1105
866
|
},
|
|
1106
867
|
);
|
|
@@ -1123,7 +884,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
1123
884
|
this.tracer,
|
|
1124
885
|
'ProvingOrchestrator.prover.getRootRollupProof',
|
|
1125
886
|
{
|
|
1126
|
-
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'rollup
|
|
887
|
+
[Attributes.PROTOCOL_CIRCUIT_NAME]: 'root-rollup' satisfies CircuitName,
|
|
1127
888
|
},
|
|
1128
889
|
signal => this.prover.getRootRollupProof(inputs, signal, provingState.epochNumber),
|
|
1129
890
|
),
|
|
@@ -1135,51 +896,48 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
1135
896
|
);
|
|
1136
897
|
}
|
|
1137
898
|
|
|
1138
|
-
private checkAndEnqueueNextMergeRollup(provingState: BlockProvingState, currentLocation: TreeNodeLocation) {
|
|
899
|
+
private async checkAndEnqueueNextMergeRollup(provingState: BlockProvingState, currentLocation: TreeNodeLocation) {
|
|
1139
900
|
if (!provingState.isReadyForMergeRollup(currentLocation)) {
|
|
1140
901
|
return;
|
|
1141
902
|
}
|
|
1142
903
|
|
|
1143
904
|
const parentLocation = provingState.getParentLocation(currentLocation);
|
|
1144
905
|
if (parentLocation.level === 0) {
|
|
1145
|
-
this.checkAndEnqueueBlockRootRollup(provingState);
|
|
906
|
+
await this.checkAndEnqueueBlockRootRollup(provingState);
|
|
1146
907
|
} else {
|
|
1147
908
|
this.enqueueMergeRollup(provingState, parentLocation);
|
|
1148
909
|
}
|
|
1149
910
|
}
|
|
1150
911
|
|
|
1151
|
-
private checkAndEnqueueBlockRootRollup(provingState: BlockProvingState) {
|
|
912
|
+
private async checkAndEnqueueBlockRootRollup(provingState: BlockProvingState) {
|
|
913
|
+
const blockNumber = provingState.blockNumber;
|
|
914
|
+
// Accumulate as far as we can, in case blocks came in out of order and we are behind:
|
|
915
|
+
await this.provingState?.setBlobAccumulators(blockNumber);
|
|
1152
916
|
if (!provingState.isReadyForBlockRootRollup()) {
|
|
1153
917
|
logger.debug('Not ready for block root rollup');
|
|
1154
918
|
return;
|
|
1155
919
|
}
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
}
|
|
1159
|
-
|
|
1160
|
-
private checkAndEnqueueNextBlockMergeRollup(provingState: CheckpointProvingState, currentLocation: TreeNodeLocation) {
|
|
1161
|
-
if (!provingState.isReadyForBlockMerge(currentLocation)) {
|
|
920
|
+
if (provingState.blockRootRollupStarted) {
|
|
921
|
+
logger.debug('Block root rollup already started');
|
|
1162
922
|
return;
|
|
1163
923
|
}
|
|
1164
924
|
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
return;
|
|
1176
|
-
}
|
|
925
|
+
// TODO(palla/prover): This closes the fork only on the happy path. If this epoch orchestrator
|
|
926
|
+
// is aborted and never reaches this point, it will leak the fork. We need to add a global cleanup,
|
|
927
|
+
// but have to make sure it only runs once all operations are completed, otherwise some function here
|
|
928
|
+
// will attempt to access the fork after it was closed.
|
|
929
|
+
logger.debug(`Cleaning up world state fork for ${blockNumber}`);
|
|
930
|
+
void this.dbs
|
|
931
|
+
.get(blockNumber)
|
|
932
|
+
?.close()
|
|
933
|
+
.then(() => this.dbs.delete(blockNumber))
|
|
934
|
+
.catch(err => logger.error(`Error closing db for block ${blockNumber}`, err));
|
|
1177
935
|
|
|
1178
|
-
this.
|
|
936
|
+
await this.enqueueBlockRootRollup(provingState);
|
|
1179
937
|
}
|
|
1180
938
|
|
|
1181
|
-
private
|
|
1182
|
-
if (!provingState.
|
|
939
|
+
private checkAndEnqueueNextBlockMergeRollup(provingState: EpochProvingState, currentLocation: TreeNodeLocation) {
|
|
940
|
+
if (!provingState.isReadyForBlockMerge(currentLocation)) {
|
|
1183
941
|
return;
|
|
1184
942
|
}
|
|
1185
943
|
|
|
@@ -1187,7 +945,7 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
1187
945
|
if (parentLocation.level === 0) {
|
|
1188
946
|
this.checkAndEnqueueRootRollup(provingState);
|
|
1189
947
|
} else {
|
|
1190
|
-
this.
|
|
948
|
+
this.enqueueBlockMergeRollup(provingState, parentLocation);
|
|
1191
949
|
}
|
|
1192
950
|
}
|
|
1193
951
|
|
|
@@ -1254,17 +1012,17 @@ export class ProvingOrchestrator implements EpochProver {
|
|
|
1254
1012
|
this.deferredProving(provingState, doAvmProving, proofAndVk => {
|
|
1255
1013
|
logger.debug(`Proven VM for tx index: ${txIndex}`);
|
|
1256
1014
|
txProvingState.setAvmProof(proofAndVk);
|
|
1257
|
-
this.
|
|
1015
|
+
this.checkAndEnqueueNextTxCircuit(provingState, txIndex);
|
|
1258
1016
|
});
|
|
1259
1017
|
}
|
|
1260
1018
|
|
|
1261
|
-
private
|
|
1019
|
+
private checkAndEnqueueNextTxCircuit(provingState: BlockProvingState, txIndex: number) {
|
|
1262
1020
|
const txProvingState = provingState.getTxProvingState(txIndex);
|
|
1263
1021
|
if (!txProvingState.ready()) {
|
|
1264
1022
|
return;
|
|
1265
1023
|
}
|
|
1266
1024
|
|
|
1267
|
-
// We must have completed all proving (
|
|
1025
|
+
// We must have completed all proving (tube proof and (if required) vm proof are generated), we now move to the base rollup.
|
|
1268
1026
|
logger.debug(`Public functions completed for tx ${txIndex} enqueueing base rollup`);
|
|
1269
1027
|
|
|
1270
1028
|
this.enqueueBaseRollup(provingState, txIndex);
|