@aztec/prover-client 3.0.0-canary.a9708bd → 3.0.0-manual.20251030

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dest/block-factory/light.d.ts +5 -3
  2. package/dest/block-factory/light.d.ts.map +1 -1
  3. package/dest/block-factory/light.js +16 -9
  4. package/dest/config.js +1 -1
  5. package/dest/mocks/fixtures.d.ts +4 -1
  6. package/dest/mocks/fixtures.d.ts.map +1 -1
  7. package/dest/mocks/fixtures.js +31 -3
  8. package/dest/mocks/test_context.d.ts +32 -9
  9. package/dest/mocks/test_context.d.ts.map +1 -1
  10. package/dest/mocks/test_context.js +78 -22
  11. package/dest/orchestrator/block-building-helpers.d.ts +33 -31
  12. package/dest/orchestrator/block-building-helpers.d.ts.map +1 -1
  13. package/dest/orchestrator/block-building-helpers.js +126 -137
  14. package/dest/orchestrator/block-proving-state.d.ts +60 -53
  15. package/dest/orchestrator/block-proving-state.d.ts.map +1 -1
  16. package/dest/orchestrator/block-proving-state.js +214 -187
  17. package/dest/orchestrator/checkpoint-proving-state.d.ts +63 -0
  18. package/dest/orchestrator/checkpoint-proving-state.d.ts.map +1 -0
  19. package/dest/orchestrator/checkpoint-proving-state.js +211 -0
  20. package/dest/orchestrator/epoch-proving-state.d.ts +34 -28
  21. package/dest/orchestrator/epoch-proving-state.d.ts.map +1 -1
  22. package/dest/orchestrator/epoch-proving-state.js +128 -84
  23. package/dest/orchestrator/orchestrator.d.ts +31 -30
  24. package/dest/orchestrator/orchestrator.d.ts.map +1 -1
  25. package/dest/orchestrator/orchestrator.js +368 -236
  26. package/dest/orchestrator/tx-proving-state.d.ts +11 -9
  27. package/dest/orchestrator/tx-proving-state.d.ts.map +1 -1
  28. package/dest/orchestrator/tx-proving-state.js +26 -23
  29. package/dest/prover-client/server-epoch-prover.d.ts +9 -8
  30. package/dest/prover-client/server-epoch-prover.d.ts.map +1 -1
  31. package/dest/prover-client/server-epoch-prover.js +9 -9
  32. package/dest/proving_broker/broker_prover_facade.d.ts +20 -15
  33. package/dest/proving_broker/broker_prover_facade.d.ts.map +1 -1
  34. package/dest/proving_broker/broker_prover_facade.js +36 -21
  35. package/dest/proving_broker/config.d.ts +8 -8
  36. package/dest/proving_broker/config.js +5 -5
  37. package/dest/proving_broker/factory.js +1 -1
  38. package/dest/proving_broker/fixtures.js +1 -1
  39. package/dest/proving_broker/proof_store/index.d.ts +1 -0
  40. package/dest/proving_broker/proof_store/index.d.ts.map +1 -1
  41. package/dest/proving_broker/proof_store/index.js +1 -0
  42. package/dest/proving_broker/proving_broker.d.ts.map +1 -1
  43. package/dest/proving_broker/proving_broker.js +29 -18
  44. package/dest/proving_broker/proving_broker_database/persisted.js +5 -5
  45. package/dest/proving_broker/proving_job_controller.d.ts.map +1 -1
  46. package/dest/proving_broker/proving_job_controller.js +38 -18
  47. package/dest/test/mock_prover.d.ts +22 -17
  48. package/dest/test/mock_prover.d.ts.map +1 -1
  49. package/dest/test/mock_prover.js +35 -20
  50. package/package.json +16 -17
  51. package/src/block-factory/light.ts +35 -9
  52. package/src/config.ts +1 -1
  53. package/src/mocks/fixtures.ts +39 -11
  54. package/src/mocks/test_context.ts +137 -31
  55. package/src/orchestrator/block-building-helpers.ts +211 -211
  56. package/src/orchestrator/block-proving-state.ts +235 -245
  57. package/src/orchestrator/checkpoint-proving-state.ts +299 -0
  58. package/src/orchestrator/epoch-proving-state.ts +172 -127
  59. package/src/orchestrator/orchestrator.ts +545 -303
  60. package/src/orchestrator/tx-proving-state.ts +49 -43
  61. package/src/prover-client/server-epoch-prover.ts +28 -18
  62. package/src/proving_broker/broker_prover_facade.ts +157 -86
  63. package/src/proving_broker/config.ts +7 -7
  64. package/src/proving_broker/factory.ts +1 -1
  65. package/src/proving_broker/fixtures.ts +1 -1
  66. package/src/proving_broker/proof_store/index.ts +1 -0
  67. package/src/proving_broker/proving_broker.ts +36 -18
  68. package/src/proving_broker/proving_broker_database/persisted.ts +5 -5
  69. package/src/proving_broker/proving_job_controller.ts +38 -18
  70. package/src/test/mock_prover.ts +142 -60
  71. package/dest/bin/get-proof-inputs.d.ts +0 -2
  72. package/dest/bin/get-proof-inputs.d.ts.map +0 -1
  73. package/dest/bin/get-proof-inputs.js +0 -51
  74. package/src/bin/get-proof-inputs.ts +0 -59
@@ -1,38 +1,50 @@
1
- import { BatchedBlobAccumulator, Blob, type SpongeBlob } from '@aztec/blob-lib';
1
+ import {
2
+ BatchedBlob,
3
+ BatchedBlobAccumulator,
4
+ SpongeBlob,
5
+ computeBlobsHashFromBlobs,
6
+ getBlobCommitmentsFromBlobs,
7
+ getBlobsPerL1Block,
8
+ } from '@aztec/blob-lib';
2
9
  import {
3
10
  ARCHIVE_HEIGHT,
11
+ CHONK_PROOF_LENGTH,
4
12
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
5
13
  MAX_NOTE_HASHES_PER_TX,
6
14
  MAX_NULLIFIERS_PER_TX,
7
15
  NOTE_HASH_SUBTREE_HEIGHT,
8
- NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH,
16
+ NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
9
17
  NULLIFIER_SUBTREE_HEIGHT,
10
- NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH,
18
+ NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
11
19
  NULLIFIER_TREE_HEIGHT,
12
20
  NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
13
21
  PUBLIC_DATA_TREE_HEIGHT,
14
22
  } from '@aztec/constants';
15
23
  import { makeTuple } from '@aztec/foundation/array';
16
24
  import { padArrayEnd } from '@aztec/foundation/collection';
17
- import { sha256ToField, sha256Trunc } from '@aztec/foundation/crypto';
18
- import { BLS12Point, Fr } from '@aztec/foundation/fields';
19
- import { type Tuple, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
20
- import { MembershipWitness, MerkleTreeCalculator, computeUnbalancedMerkleTreeRoot } from '@aztec/foundation/trees';
21
- import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree';
22
- import { protocolContractTreeRoot } from '@aztec/protocol-contracts';
25
+ import { sha256Trunc } from '@aztec/foundation/crypto';
26
+ import { Fr } from '@aztec/foundation/fields';
27
+ import { type Bufferable, type Tuple, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
28
+ import {
29
+ MembershipWitness,
30
+ MerkleTreeCalculator,
31
+ computeCompressedUnbalancedMerkleTreeRoot,
32
+ } from '@aztec/foundation/trees';
33
+ import { getVkData } from '@aztec/noir-protocol-circuits-types/server/vks';
34
+ import { getVKIndex, getVKSiblingPath } from '@aztec/noir-protocol-circuits-types/vk-tree';
23
35
  import { computeFeePayerBalanceLeafSlot } from '@aztec/protocol-contracts/fee-juice';
24
- import { PublicDataHint } from '@aztec/stdlib/avm';
25
- import { Body } from '@aztec/stdlib/block';
26
- import type { MerkleTreeWriteOperations } from '@aztec/stdlib/interfaces/server';
36
+ import { Body, L2BlockHeader, getBlockBlobFields } from '@aztec/stdlib/block';
37
+ import { getCheckpointBlobFields } from '@aztec/stdlib/checkpoint';
38
+ import type { MerkleTreeWriteOperations, PublicInputsAndRecursiveProof } from '@aztec/stdlib/interfaces/server';
27
39
  import { ContractClassLogFields } from '@aztec/stdlib/logs';
28
- import type { ParityPublicInputs } from '@aztec/stdlib/parity';
40
+ import { Proof, ProofData, RecursiveProof } from '@aztec/stdlib/proofs';
29
41
  import {
30
- type BaseOrMergeRollupPublicInputs,
31
42
  BlockConstantData,
32
- type BlockRootOrBlockMergePublicInputs,
43
+ BlockRollupPublicInputs,
33
44
  PrivateBaseRollupHints,
34
- PrivateBaseStateDiffHints,
35
45
  PublicBaseRollupHints,
46
+ PublicChonkVerifierPrivateInputs,
47
+ TreeSnapshotDiffHints,
36
48
  } from '@aztec/stdlib/rollup';
37
49
  import {
38
50
  AppendOnlyTreeSnapshot,
@@ -45,12 +57,13 @@ import {
45
57
  import {
46
58
  BlockHeader,
47
59
  ContentCommitment,
48
- type GlobalVariables,
60
+ GlobalVariables,
49
61
  PartialStateReference,
50
62
  type ProcessedTx,
51
63
  StateReference,
52
- TxEffect,
64
+ Tx,
53
65
  } from '@aztec/stdlib/tx';
66
+ import { VkData } from '@aztec/stdlib/vks';
54
67
  import { Attributes, type Span, runInSpan } from '@aztec/telemetry-client';
55
68
  import type { MerkleTreeReadOperations } from '@aztec/world-state';
56
69
 
@@ -70,28 +83,24 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
70
83
  async (
71
84
  span: Span,
72
85
  tx: ProcessedTx,
73
- globalVariables: GlobalVariables,
86
+ lastArchive: AppendOnlyTreeSnapshot,
74
87
  newL1ToL2MessageTreeSnapshot: AppendOnlyTreeSnapshot,
75
- db: MerkleTreeWriteOperations,
76
88
  startSpongeBlob: SpongeBlob,
89
+ proverId: Fr,
90
+ db: MerkleTreeWriteOperations,
77
91
  ) => {
78
92
  span.setAttribute(Attributes.TX_HASH, tx.hash.toString());
79
93
  // Get trees info before any changes hit
80
- const lastArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
81
94
  const start = new PartialStateReference(
82
95
  await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
83
96
  await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
84
97
  await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
85
98
  );
86
- // Get the subtree sibling paths for the circuit
87
- const noteHashSubtreeSiblingPathArray = await getSubtreeSiblingPath(
88
- MerkleTreeId.NOTE_HASH_TREE,
89
- NOTE_HASH_SUBTREE_HEIGHT,
90
- db,
91
- );
92
99
 
93
- const noteHashSubtreeSiblingPath = makeTuple(NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH, i =>
94
- i < noteHashSubtreeSiblingPathArray.length ? noteHashSubtreeSiblingPathArray[i] : Fr.ZERO,
100
+ // Get the note hash subtree root sibling path for insertion.
101
+ const noteHashSubtreeRootSiblingPath = assertLength(
102
+ await getSubtreeSiblingPath(MerkleTreeId.NOTE_HASH_TREE, NOTE_HASH_SUBTREE_HEIGHT, db),
103
+ NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
95
104
  );
96
105
 
97
106
  // Update the note hash trees with the new items being inserted to get the new roots
@@ -99,10 +108,6 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
99
108
  const noteHashes = padArrayEnd(tx.txEffect.noteHashes, Fr.ZERO, MAX_NOTE_HASHES_PER_TX);
100
109
  await db.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes);
101
110
 
102
- // Create data hint for reading fee payer initial balance in Fee Juice
103
- const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
104
- const feePayerFeeJuiceBalanceReadHint = await getPublicDataHint(db, leafSlot.toBigInt());
105
-
106
111
  // The read witnesses for a given TX should be generated before the writes of the same TX are applied.
107
112
  // All reads that refer to writes in the same tx are transient and can be simplified out.
108
113
  const txPublicDataUpdateRequestInfo = await processPublicDataUpdateRequests(tx, db);
@@ -110,8 +115,8 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
110
115
  // Update the nullifier tree, capturing the low nullifier info for each individual operation
111
116
  const {
112
117
  lowLeavesWitnessData: nullifierWitnessLeaves,
113
- newSubtreeSiblingPath: nullifiersSubtreeSiblingPath,
114
- sortedNewLeaves: sortednullifiers,
118
+ newSubtreeSiblingPath: nullifiersSubtreeRootSiblingPath,
119
+ sortedNewLeaves: sortedNullifiers,
115
120
  sortedNewLeavesIndexes,
116
121
  } = await db.batchInsert(
117
122
  MerkleTreeId.NULLIFIER_TREE,
@@ -123,21 +128,10 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
123
128
  throw new Error(`Could not craft nullifier batch insertion proofs`);
124
129
  }
125
130
 
126
- // Extract witness objects from returned data
127
- const nullifierPredecessorMembershipWitnessesWithoutPadding: MembershipWitness<typeof NULLIFIER_TREE_HEIGHT>[] =
128
- nullifierWitnessLeaves.map(l =>
129
- MembershipWitness.fromBufferArray(l.index, assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT)),
130
- );
131
-
132
- const nullifierSubtreeSiblingPathArray = nullifiersSubtreeSiblingPath.toFields();
133
-
134
- const nullifierSubtreeSiblingPath = makeTuple(NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, i =>
135
- i < nullifierSubtreeSiblingPathArray.length ? nullifierSubtreeSiblingPathArray[i] : Fr.ZERO,
136
- );
137
-
138
- // Append new data to startSpongeBlob
139
- const inputSpongeBlob = startSpongeBlob.clone();
140
- await startSpongeBlob.absorb(tx.txEffect.toBlobFields());
131
+ const blockHash = await tx.data.constants.anchorBlockHeader.hash();
132
+ const anchorBlockArchiveSiblingPath = (
133
+ await getMembershipWitnessFor(blockHash, MerkleTreeId.ARCHIVE, ARCHIVE_HEIGHT, db)
134
+ ).siblingPath;
141
135
 
142
136
  const contractClassLogsFields = makeTuple(
143
137
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
@@ -145,18 +139,10 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
145
139
  );
146
140
 
147
141
  if (tx.avmProvingRequest) {
148
- const blockHash = await tx.data.constants.historicalHeader.hash();
149
- const archiveRootMembershipWitness = await getMembershipWitnessFor(
150
- blockHash,
151
- MerkleTreeId.ARCHIVE,
152
- ARCHIVE_HEIGHT,
153
- db,
154
- );
155
-
156
142
  return PublicBaseRollupHints.from({
157
- startSpongeBlob: inputSpongeBlob,
143
+ startSpongeBlob,
158
144
  lastArchive,
159
- archiveRootMembershipWitness,
145
+ anchorBlockArchiveSiblingPath,
160
146
  contractClassLogsFields,
161
147
  });
162
148
  } else {
@@ -168,57 +154,62 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
168
154
  throw new Error(`More than one public data write in a private only tx`);
169
155
  }
170
156
 
171
- const feeWriteLowLeafPreimage =
172
- txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages[0] || PublicDataTreeLeafPreimage.empty();
173
- const feeWriteLowLeafMembershipWitness =
174
- txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses[0] ||
175
- MembershipWitness.empty<typeof PUBLIC_DATA_TREE_HEIGHT>(PUBLIC_DATA_TREE_HEIGHT);
176
- const feeWriteSiblingPath =
177
- txPublicDataUpdateRequestInfo.publicDataWritesSiblingPaths[0] ||
178
- makeTuple(PUBLIC_DATA_TREE_HEIGHT, () => Fr.ZERO);
179
-
180
- const stateDiffHints = PrivateBaseStateDiffHints.from({
181
- nullifierPredecessorPreimages: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
182
- i < nullifierWitnessLeaves.length
183
- ? (nullifierWitnessLeaves[i].leafPreimage as NullifierLeafPreimage)
184
- : NullifierLeafPreimage.empty(),
157
+ // Get hints for reading fee payer's balance in the public data tree.
158
+ const feePayerBalanceMembershipWitness = txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses[0];
159
+ const feePayerBalanceLeafPreimage = txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages[0];
160
+ const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
161
+ if (!feePayerBalanceMembershipWitness || !leafSlot.equals(feePayerBalanceLeafPreimage?.leaf.slot)) {
162
+ throw new Error(`Cannot find the public data tree leaf for the fee payer's balance`);
163
+ }
164
+
165
+ // Extract witness objects from returned data
166
+ const nullifierPredecessorMembershipWitnessesWithoutPadding: MembershipWitness<typeof NULLIFIER_TREE_HEIGHT>[] =
167
+ nullifierWitnessLeaves.map(l =>
168
+ MembershipWitness.fromBufferArray(
169
+ l.index,
170
+ assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT),
171
+ ),
172
+ );
173
+
174
+ const treeSnapshotDiffHints = TreeSnapshotDiffHints.from({
175
+ noteHashSubtreeRootSiblingPath,
176
+ nullifierPredecessorPreimages: padArrayEnd(
177
+ nullifierWitnessLeaves.map(l => l.leafPreimage as NullifierLeafPreimage),
178
+ NullifierLeafPreimage.empty(),
179
+ MAX_NULLIFIERS_PER_TX,
185
180
  ),
186
181
  nullifierPredecessorMembershipWitnesses: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
187
182
  i < nullifierPredecessorMembershipWitnessesWithoutPadding.length
188
183
  ? nullifierPredecessorMembershipWitnessesWithoutPadding[i]
189
184
  : makeEmptyMembershipWitness(NULLIFIER_TREE_HEIGHT),
190
185
  ),
191
- sortedNullifiers: makeTuple(MAX_NULLIFIERS_PER_TX, i => Fr.fromBuffer(sortednullifiers[i])),
192
- sortedNullifierIndexes: makeTuple(MAX_NULLIFIERS_PER_TX, i => sortedNewLeavesIndexes[i]),
193
- noteHashSubtreeSiblingPath,
194
- nullifierSubtreeSiblingPath,
195
- feeWriteLowLeafPreimage,
196
- feeWriteLowLeafMembershipWitness,
197
- feeWriteSiblingPath,
186
+ sortedNullifiers: assertLength(
187
+ sortedNullifiers.map(n => Fr.fromBuffer(n)),
188
+ MAX_NULLIFIERS_PER_TX,
189
+ ),
190
+ sortedNullifierIndexes: assertLength(sortedNewLeavesIndexes, MAX_NULLIFIERS_PER_TX),
191
+ nullifierSubtreeRootSiblingPath: assertLength(
192
+ nullifiersSubtreeRootSiblingPath.toFields(),
193
+ NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
194
+ ),
195
+ feePayerBalanceMembershipWitness,
198
196
  });
199
197
 
200
- const blockHash = await tx.data.constants.historicalHeader.hash();
201
- const archiveRootMembershipWitness = await getMembershipWitnessFor(
202
- blockHash,
203
- MerkleTreeId.ARCHIVE,
204
- ARCHIVE_HEIGHT,
205
- db,
206
- );
207
-
208
198
  const constants = BlockConstantData.from({
209
199
  lastArchive,
210
- newL1ToL2: newL1ToL2MessageTreeSnapshot,
211
- vkTreeRoot: getVKTreeRoot(),
212
- protocolContractTreeRoot,
213
- globalVariables,
200
+ l1ToL2TreeSnapshot: newL1ToL2MessageTreeSnapshot,
201
+ vkTreeRoot: tx.data.constants.vkTreeRoot,
202
+ protocolContractsHash: tx.data.constants.protocolContractsHash,
203
+ globalVariables: tx.globalVariables,
204
+ proverId,
214
205
  });
215
206
 
216
207
  return PrivateBaseRollupHints.from({
217
208
  start,
218
- startSpongeBlob: inputSpongeBlob,
219
- stateDiffHints,
220
- feePayerFeeJuiceBalanceReadHint,
221
- archiveRootMembershipWitness,
209
+ startSpongeBlob,
210
+ treeSnapshotDiffHints,
211
+ feePayerBalanceLeafPreimage,
212
+ anchorBlockArchiveSiblingPath,
222
213
  contractClassLogsFields,
223
214
  constants,
224
215
  });
@@ -226,50 +217,53 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
226
217
  },
227
218
  );
228
219
 
229
- export async function getPublicDataHint(db: MerkleTreeWriteOperations, leafSlot: bigint) {
230
- const { index } = (await db.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot)) ?? {};
231
- if (index === undefined) {
232
- throw new Error(`Cannot find the previous value index for public data ${leafSlot}.`);
233
- }
234
-
235
- const siblingPath = await db.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, index);
236
- const membershipWitness = new MembershipWitness(PUBLIC_DATA_TREE_HEIGHT, index, siblingPath.toTuple());
237
-
238
- const leafPreimage = (await db.getLeafPreimage(MerkleTreeId.PUBLIC_DATA_TREE, index)) as PublicDataTreeLeafPreimage;
239
- if (!leafPreimage) {
240
- throw new Error(`Cannot find the leaf preimage for public data tree at index ${index}.`);
241
- }
220
+ export function getChonkProofFromTx(tx: Tx | ProcessedTx) {
221
+ const publicInputs = tx.data.publicInputs().toFields();
242
222
 
243
- const exists = leafPreimage.leaf.slot.toBigInt() === leafSlot;
244
- const value = exists ? leafPreimage.leaf.value : Fr.ZERO;
223
+ const binaryProof = new Proof(
224
+ Buffer.concat(tx.chonkProof.attachPublicInputs(publicInputs).fieldsWithPublicInputs.map(field => field.toBuffer())),
225
+ publicInputs.length,
226
+ );
227
+ return new RecursiveProof(tx.chonkProof.fields, binaryProof, true, CHONK_PROOF_LENGTH);
228
+ }
245
229
 
246
- return new PublicDataHint(new Fr(leafSlot), value, membershipWitness, leafPreimage);
230
+ export function getPublicChonkVerifierPrivateInputsFromTx(tx: Tx | ProcessedTx, proverId: Fr) {
231
+ const proofData = new ProofData(
232
+ tx.data.toPrivateToPublicKernelCircuitPublicInputs(),
233
+ getChonkProofFromTx(tx),
234
+ getVkData('HidingKernelToPublic'),
235
+ );
236
+ return new PublicChonkVerifierPrivateInputs(proofData, proverId);
247
237
  }
248
238
 
249
- export const buildBlobHints = runInSpan(
250
- 'BlockBuilderHelpers',
251
- 'buildBlobHints',
252
- async (_span: Span, txEffects: TxEffect[]) => {
253
- const blobFields = txEffects.flatMap(tx => tx.toBlobFields());
254
- const blobs = await Blob.getBlobsPerBlock(blobFields);
255
- // TODO(#13430): The blobsHash is confusingly similar to blobCommitmentsHash, calculated from below blobCommitments:
256
- // - blobsHash := sha256([blobhash_0, ..., blobhash_m]) = a hash of all blob hashes in a block with m+1 blobs inserted into the header, exists so a user can cross check blobs.
257
- // - blobCommitmentsHash := sha256( ...sha256(sha256(C_0), C_1) ... C_n) = iteratively calculated hash of all blob commitments in an epoch with n+1 blobs (see calculateBlobCommitmentsHash()),
258
- // exists so we can validate injected commitments to the rollup circuits correspond to the correct real blobs.
259
- // We may be able to combine these values e.g. blobCommitmentsHash := sha256( ...sha256(sha256(blobshash_0), blobshash_1) ... blobshash_l) for an epoch with l+1 blocks.
260
- const blobCommitments = blobs.map(b => BLS12Point.decompress(b.commitment));
261
- const blobsHash = new Fr(getBlobsHashFromBlobs(blobs));
262
- return { blobFields, blobCommitments, blobs, blobsHash };
263
- },
264
- );
239
+ // Build "hints" as the private inputs for the checkpoint root rollup circuit.
240
+ // The `blobCommitments` will be accumulated and checked in the root rollup against the `finalBlobChallenges`.
241
+ // The `blobsHash` will be validated on L1 against the submitted blob data.
242
+ export const buildBlobHints = (blobFields: Fr[]) => {
243
+ const blobs = getBlobsPerL1Block(blobFields);
244
+ const blobCommitments = getBlobCommitmentsFromBlobs(blobs);
245
+ const blobsHash = computeBlobsHashFromBlobs(blobs);
246
+ return { blobCommitments, blobs, blobsHash };
247
+ };
248
+
249
+ // Build the data required to prove the txs in an epoch. Currently only used in tests. It assumes 1 block per checkpoint.
250
+ export const buildBlobDataFromTxs = async (txsPerCheckpoint: ProcessedTx[][]) => {
251
+ const blobFields = txsPerCheckpoint.map(txs => getCheckpointBlobFields([txs.map(tx => tx.txEffect)]));
252
+ const finalBlobChallenges = await buildFinalBlobChallenges(blobFields);
253
+ return { blobFieldsLengths: blobFields.map(fields => fields.length), finalBlobChallenges };
254
+ };
255
+
256
+ export const buildFinalBlobChallenges = async (blobFieldsPerCheckpoint: Fr[][]) => {
257
+ const blobs = blobFieldsPerCheckpoint.map(blobFields => getBlobsPerL1Block(blobFields));
258
+ return await BatchedBlob.precomputeBatchedBlobChallenges(blobs);
259
+ };
265
260
 
266
261
  export const accumulateBlobs = runInSpan(
267
262
  'BlockBuilderHelpers',
268
263
  'accumulateBlobs',
269
- async (_span: Span, txs: ProcessedTx[], startBlobAccumulator: BatchedBlobAccumulator) => {
270
- const blobFields = txs.flatMap(tx => tx.txEffect.toBlobFields());
271
- const blobs = await Blob.getBlobsPerBlock(blobFields);
272
- const endBlobAccumulator = startBlobAccumulator.accumulateBlobs(blobs);
264
+ async (_span: Span, blobFields: Fr[], startBlobAccumulator: BatchedBlobAccumulator) => {
265
+ const blobs = getBlobsPerL1Block(blobFields);
266
+ const endBlobAccumulator = await startBlobAccumulator.accumulateBlobs(blobs);
273
267
  return endBlobAccumulator;
274
268
  },
275
269
  );
@@ -277,36 +271,28 @@ export const accumulateBlobs = runInSpan(
277
271
  export const buildHeaderFromCircuitOutputs = runInSpan(
278
272
  'BlockBuilderHelpers',
279
273
  'buildHeaderFromCircuitOutputs',
280
- (
281
- _span,
282
- previousRollupData: BaseOrMergeRollupPublicInputs[],
283
- parityPublicInputs: ParityPublicInputs,
284
- rootRollupOutputs: BlockRootOrBlockMergePublicInputs,
285
- blobsHash: Fr,
286
- endState: StateReference,
287
- ) => {
288
- if (previousRollupData.length > 2) {
289
- throw new Error(`There can't be more than 2 previous rollups. Received ${previousRollupData.length}.`);
290
- }
291
-
292
- const outHash =
293
- previousRollupData.length === 0
294
- ? Fr.ZERO
295
- : previousRollupData.length === 1
296
- ? previousRollupData[0].outHash
297
- : sha256ToField([previousRollupData[0].outHash, previousRollupData[1].outHash]);
298
- const contentCommitment = new ContentCommitment(blobsHash, parityPublicInputs.shaRoot, outHash);
274
+ async (_span, blockRootRollupOutput: BlockRollupPublicInputs) => {
275
+ const constants = blockRootRollupOutput.constants;
276
+ const globalVariables = GlobalVariables.from({
277
+ chainId: constants.chainId,
278
+ version: constants.version,
279
+ blockNumber: blockRootRollupOutput.previousArchive.nextAvailableLeafIndex,
280
+ timestamp: blockRootRollupOutput.endTimestamp,
281
+ slotNumber: constants.slotNumber,
282
+ coinbase: constants.coinbase,
283
+ feeRecipient: constants.feeRecipient,
284
+ gasFees: constants.gasFees,
285
+ });
299
286
 
300
- const accumulatedFees = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedFees), Fr.ZERO);
301
- const accumulatedManaUsed = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedManaUsed), Fr.ZERO);
287
+ const spongeBlobHash = await blockRootRollupOutput.endSpongeBlob.clone().squeeze();
302
288
 
303
289
  return new BlockHeader(
304
- rootRollupOutputs.previousArchive,
305
- contentCommitment,
306
- endState,
307
- rootRollupOutputs.endGlobalVariables,
308
- accumulatedFees,
309
- accumulatedManaUsed,
290
+ blockRootRollupOutput.previousArchive,
291
+ blockRootRollupOutput.endState,
292
+ spongeBlobHash,
293
+ globalVariables,
294
+ blockRootRollupOutput.accumulatedFees,
295
+ blockRootRollupOutput.accumulatedManaUsed,
310
296
  );
311
297
  },
312
298
  );
@@ -320,6 +306,7 @@ export const buildHeaderAndBodyFromTxs = runInSpan(
320
306
  globalVariables: GlobalVariables,
321
307
  l1ToL2Messages: Fr[],
322
308
  db: MerkleTreeReadOperations,
309
+ startSpongeBlob?: SpongeBlob,
323
310
  ) => {
324
311
  span.setAttribute(Attributes.BLOCK_NUMBER, globalVariables.blockNumber);
325
312
  const stateReference = new StateReference(
@@ -337,25 +324,75 @@ export const buildHeaderAndBodyFromTxs = runInSpan(
337
324
  const body = new Body(txEffects);
338
325
 
339
326
  const txOutHashes = txEffects.map(tx => tx.txOutHash());
340
- const outHash = txOutHashes.length === 0 ? Fr.ZERO : new Fr(computeUnbalancedMerkleTreeRoot(txOutHashes));
327
+ const outHash = txOutHashes.length === 0 ? Fr.ZERO : new Fr(computeCompressedUnbalancedMerkleTreeRoot(txOutHashes));
341
328
 
342
329
  const parityShaRoot = await computeInHashFromL1ToL2Messages(l1ToL2Messages);
343
- const blobsHash = getBlobsHashFromBlobs(await Blob.getBlobsPerBlock(body.toBlobFields()));
330
+ const blockBlobFields = body.toBlobFields();
331
+ // TODO(#17027): This only works when there's one block per checkpoint.
332
+ const blobFields = [new Fr(blockBlobFields.length + 1)].concat(blockBlobFields);
333
+ const blobsHash = computeBlobsHashFromBlobs(getBlobsPerL1Block(blobFields));
344
334
 
345
335
  const contentCommitment = new ContentCommitment(blobsHash, parityShaRoot, outHash);
346
336
 
347
337
  const fees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
348
338
  const manaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
349
339
 
350
- const header = new BlockHeader(previousArchive, contentCommitment, stateReference, globalVariables, fees, manaUsed);
340
+ const endSpongeBlob = startSpongeBlob?.clone() ?? (await SpongeBlob.init(blobFields.length));
341
+ await endSpongeBlob.absorb(blockBlobFields);
342
+ const spongeBlobHash = await endSpongeBlob.squeeze();
343
+
344
+ const header = new L2BlockHeader(
345
+ previousArchive,
346
+ contentCommitment,
347
+ stateReference,
348
+ globalVariables,
349
+ fees,
350
+ manaUsed,
351
+ spongeBlobHash,
352
+ );
351
353
 
352
354
  return { header, body };
353
355
  },
354
356
  );
355
357
 
358
+ export const buildBlockHeaderFromTxs = runInSpan(
359
+ 'BlockBuilderHelpers',
360
+ 'buildBlockHeaderFromTxs',
361
+ async (
362
+ span,
363
+ txs: ProcessedTx[],
364
+ globalVariables: GlobalVariables,
365
+ startSpongeBlob: SpongeBlob,
366
+ db: MerkleTreeReadOperations,
367
+ ) => {
368
+ span.setAttribute(Attributes.BLOCK_NUMBER, globalVariables.blockNumber);
369
+ const stateReference = new StateReference(
370
+ await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
371
+ new PartialStateReference(
372
+ await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
373
+ await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
374
+ await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
375
+ ),
376
+ );
377
+
378
+ const previousArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
379
+
380
+ const blobFields = getBlockBlobFields(txs.map(tx => tx.txEffect));
381
+ const endSpongeBlob = startSpongeBlob.clone();
382
+ await endSpongeBlob.absorb(blobFields);
383
+ const spongeBlobHash = await endSpongeBlob.squeeze();
384
+
385
+ const txEffects = txs.map(tx => tx.txEffect);
386
+ const fees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
387
+ const manaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
388
+
389
+ return new BlockHeader(previousArchive, stateReference, spongeBlobHash, globalVariables, fees, manaUsed);
390
+ },
391
+ );
392
+
356
393
  /** Computes the inHash for a block's ContentCommitment given its l1 to l2 messages. */
357
394
  export async function computeInHashFromL1ToL2Messages(unpaddedL1ToL2Messages: Fr[]): Promise<Fr> {
358
- const l1ToL2Messages = padArrayEnd(unpaddedL1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
395
+ const l1ToL2Messages = padArrayEnd<Fr, number>(unpaddedL1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
359
396
  const hasher = (left: Buffer, right: Buffer) =>
360
397
  Promise.resolve(sha256Trunc(Buffer.concat([left, right])) as Buffer<ArrayBuffer>);
361
398
  const parityHeight = Math.ceil(Math.log2(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP));
@@ -363,52 +400,6 @@ export async function computeInHashFromL1ToL2Messages(unpaddedL1ToL2Messages: Fr
363
400
  return new Fr(await parityCalculator.computeTreeRoot(l1ToL2Messages.map(msg => msg.toBuffer())));
364
401
  }
365
402
 
366
- export function getBlobsHashFromBlobs(inputs: Blob[]): Fr {
367
- return sha256ToField(inputs.map(b => b.getEthVersionedBlobHash()));
368
- }
369
-
370
- // Note: tested against the constant values in block_root/empty_block_root_rollup_inputs.nr, set by block_building_helpers.test.ts.
371
- // Having this separate fn hopefully makes it clear how we treat empty blocks and their blobs, and won't break if we decide to change how
372
- // getBlobsPerBlock() works on empty input.
373
- export async function getEmptyBlockBlobsHash(): Promise<Fr> {
374
- const blobHash = (await Blob.getBlobsPerBlock([])).map(b => b.getEthVersionedBlobHash());
375
- return sha256ToField(blobHash);
376
- }
377
-
378
- // Validate that the roots of all local trees match the output of the root circuit simulation
379
- // TODO: does this get called?
380
- export async function validateBlockRootOutput(
381
- blockRootOutput: BlockRootOrBlockMergePublicInputs,
382
- blockHeader: BlockHeader,
383
- db: MerkleTreeReadOperations,
384
- ) {
385
- await Promise.all([
386
- validateState(blockHeader.state, db),
387
- validateSimulatedTree(await getTreeSnapshot(MerkleTreeId.ARCHIVE, db), blockRootOutput.newArchive, 'Archive'),
388
- ]);
389
- }
390
-
391
- export const validateState = runInSpan(
392
- 'BlockBuilderHelpers',
393
- 'validateState',
394
- async (_span, state: StateReference, db: MerkleTreeReadOperations) => {
395
- const promises = [MerkleTreeId.NOTE_HASH_TREE, MerkleTreeId.NULLIFIER_TREE, MerkleTreeId.PUBLIC_DATA_TREE].map(
396
- async (id: MerkleTreeId) => {
397
- return { key: id, value: await getTreeSnapshot(id, db) };
398
- },
399
- );
400
- const snapshots: Map<MerkleTreeId, AppendOnlyTreeSnapshot> = new Map(
401
- (await Promise.all(promises)).map(obj => [obj.key, obj.value]),
402
- );
403
- validatePartialState(state.partial, snapshots);
404
- validateSimulatedTree(
405
- await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
406
- state.l1ToL2MessageTree,
407
- 'L1ToL2MessageTree',
408
- );
409
- },
410
- );
411
-
412
403
  export async function getLastSiblingPath<TID extends MerkleTreeId>(treeId: TID, db: MerkleTreeReadOperations) {
413
404
  const { size } = await db.getTreeInfo(treeId);
414
405
  const path = await db.getSiblingPath(treeId, size - 1n);
@@ -541,7 +532,7 @@ function validateSimulatedTree(
541
532
  }
542
533
 
543
534
  export function validateTx(tx: ProcessedTx) {
544
- const txHeader = tx.data.constants.historicalHeader;
535
+ const txHeader = tx.data.constants.anchorBlockHeader;
545
536
  if (txHeader.state.l1ToL2MessageTree.isEmpty()) {
546
537
  throw new Error(`Empty L1 to L2 messages tree in tx: ${toFriendlyJSON(tx)}`);
547
538
  }
@@ -555,3 +546,12 @@ export function validateTx(tx: ProcessedTx) {
555
546
  throw new Error(`Empty public data tree in tx: ${toFriendlyJSON(tx)}`);
556
547
  }
557
548
  }
549
+
550
+ export function toProofData<T extends Bufferable, PROOF_LENGTH extends number>(
551
+ { inputs, proof, verificationKey }: PublicInputsAndRecursiveProof<T, PROOF_LENGTH>,
552
+ vkIndex?: number,
553
+ ) {
554
+ const leafIndex = vkIndex || getVKIndex(verificationKey.keyAsFields);
555
+ const vkData = new VkData(verificationKey, leafIndex, getVKSiblingPath(leafIndex));
556
+ return new ProofData(inputs, proof, vkData);
557
+ }