@aztec/prover-client 0.0.1-fake-c83136db25 → 0.0.1-fake-ceab37513c

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/dest/bin/get-proof-inputs.d.ts +2 -0
  2. package/dest/bin/get-proof-inputs.d.ts.map +1 -0
  3. package/dest/bin/get-proof-inputs.js +51 -0
  4. package/dest/block-factory/light.d.ts +3 -5
  5. package/dest/block-factory/light.d.ts.map +1 -1
  6. package/dest/block-factory/light.js +9 -16
  7. package/dest/config.js +1 -1
  8. package/dest/mocks/fixtures.d.ts +1 -4
  9. package/dest/mocks/fixtures.d.ts.map +1 -1
  10. package/dest/mocks/fixtures.js +3 -31
  11. package/dest/mocks/test_context.d.ts +9 -32
  12. package/dest/mocks/test_context.d.ts.map +1 -1
  13. package/dest/mocks/test_context.js +22 -78
  14. package/dest/orchestrator/block-building-helpers.d.ts +31 -33
  15. package/dest/orchestrator/block-building-helpers.d.ts.map +1 -1
  16. package/dest/orchestrator/block-building-helpers.js +137 -126
  17. package/dest/orchestrator/block-proving-state.d.ts +53 -60
  18. package/dest/orchestrator/block-proving-state.d.ts.map +1 -1
  19. package/dest/orchestrator/block-proving-state.js +187 -214
  20. package/dest/orchestrator/epoch-proving-state.d.ts +28 -34
  21. package/dest/orchestrator/epoch-proving-state.d.ts.map +1 -1
  22. package/dest/orchestrator/epoch-proving-state.js +84 -128
  23. package/dest/orchestrator/orchestrator.d.ts +30 -31
  24. package/dest/orchestrator/orchestrator.d.ts.map +1 -1
  25. package/dest/orchestrator/orchestrator.js +236 -368
  26. package/dest/orchestrator/tx-proving-state.d.ts +9 -11
  27. package/dest/orchestrator/tx-proving-state.d.ts.map +1 -1
  28. package/dest/orchestrator/tx-proving-state.js +23 -26
  29. package/dest/prover-client/server-epoch-prover.d.ts +8 -9
  30. package/dest/prover-client/server-epoch-prover.d.ts.map +1 -1
  31. package/dest/prover-client/server-epoch-prover.js +9 -9
  32. package/dest/proving_broker/broker_prover_facade.d.ts +15 -20
  33. package/dest/proving_broker/broker_prover_facade.d.ts.map +1 -1
  34. package/dest/proving_broker/broker_prover_facade.js +21 -36
  35. package/dest/proving_broker/fixtures.js +1 -1
  36. package/dest/proving_broker/proof_store/index.d.ts +0 -1
  37. package/dest/proving_broker/proof_store/index.d.ts.map +1 -1
  38. package/dest/proving_broker/proof_store/index.js +0 -1
  39. package/dest/proving_broker/proving_broker.d.ts.map +1 -1
  40. package/dest/proving_broker/proving_broker.js +18 -29
  41. package/dest/proving_broker/proving_job_controller.d.ts.map +1 -1
  42. package/dest/proving_broker/proving_job_controller.js +18 -38
  43. package/dest/test/mock_prover.d.ts +17 -22
  44. package/dest/test/mock_prover.d.ts.map +1 -1
  45. package/dest/test/mock_prover.js +20 -35
  46. package/package.json +17 -16
  47. package/src/bin/get-proof-inputs.ts +59 -0
  48. package/src/block-factory/light.ts +9 -35
  49. package/src/config.ts +1 -1
  50. package/src/mocks/fixtures.ts +11 -39
  51. package/src/mocks/test_context.ts +31 -137
  52. package/src/orchestrator/block-building-helpers.ts +211 -211
  53. package/src/orchestrator/block-proving-state.ts +245 -235
  54. package/src/orchestrator/epoch-proving-state.ts +127 -172
  55. package/src/orchestrator/orchestrator.ts +303 -545
  56. package/src/orchestrator/tx-proving-state.ts +43 -49
  57. package/src/prover-client/server-epoch-prover.ts +18 -28
  58. package/src/proving_broker/broker_prover_facade.ts +86 -157
  59. package/src/proving_broker/fixtures.ts +1 -1
  60. package/src/proving_broker/proof_store/index.ts +0 -1
  61. package/src/proving_broker/proving_broker.ts +18 -36
  62. package/src/proving_broker/proving_job_controller.ts +18 -38
  63. package/src/test/mock_prover.ts +60 -142
  64. package/dest/orchestrator/checkpoint-proving-state.d.ts +0 -63
  65. package/dest/orchestrator/checkpoint-proving-state.d.ts.map +0 -1
  66. package/dest/orchestrator/checkpoint-proving-state.js +0 -211
  67. package/src/orchestrator/checkpoint-proving-state.ts +0 -299
@@ -1,50 +1,38 @@
1
- import {
2
- BatchedBlob,
3
- BatchedBlobAccumulator,
4
- SpongeBlob,
5
- computeBlobsHashFromBlobs,
6
- getBlobCommitmentsFromBlobs,
7
- getBlobsPerL1Block,
8
- } from '@aztec/blob-lib';
1
+ import { BatchedBlobAccumulator, Blob, type SpongeBlob } from '@aztec/blob-lib';
9
2
  import {
10
3
  ARCHIVE_HEIGHT,
11
- CHONK_PROOF_LENGTH,
12
4
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
13
5
  MAX_NOTE_HASHES_PER_TX,
14
6
  MAX_NULLIFIERS_PER_TX,
15
7
  NOTE_HASH_SUBTREE_HEIGHT,
16
- NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
8
+ NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH,
17
9
  NULLIFIER_SUBTREE_HEIGHT,
18
- NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
10
+ NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH,
19
11
  NULLIFIER_TREE_HEIGHT,
20
12
  NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
21
13
  PUBLIC_DATA_TREE_HEIGHT,
22
14
  } from '@aztec/constants';
23
15
  import { makeTuple } from '@aztec/foundation/array';
24
16
  import { padArrayEnd } from '@aztec/foundation/collection';
25
- import { sha256Trunc } from '@aztec/foundation/crypto';
26
- import { Fr } from '@aztec/foundation/fields';
27
- import { type Bufferable, type Tuple, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
28
- import {
29
- MembershipWitness,
30
- MerkleTreeCalculator,
31
- computeCompressedUnbalancedMerkleTreeRoot,
32
- } from '@aztec/foundation/trees';
33
- import { getVkData } from '@aztec/noir-protocol-circuits-types/server/vks';
34
- import { getVKIndex, getVKSiblingPath } from '@aztec/noir-protocol-circuits-types/vk-tree';
17
+ import { sha256ToField, sha256Trunc } from '@aztec/foundation/crypto';
18
+ import { BLS12Point, Fr } from '@aztec/foundation/fields';
19
+ import { type Tuple, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
20
+ import { MembershipWitness, MerkleTreeCalculator, computeUnbalancedMerkleTreeRoot } from '@aztec/foundation/trees';
21
+ import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree';
22
+ import { protocolContractTreeRoot } from '@aztec/protocol-contracts';
35
23
  import { computeFeePayerBalanceLeafSlot } from '@aztec/protocol-contracts/fee-juice';
36
- import { Body, L2BlockHeader, getBlockBlobFields } from '@aztec/stdlib/block';
37
- import { getCheckpointBlobFields } from '@aztec/stdlib/checkpoint';
38
- import type { MerkleTreeWriteOperations, PublicInputsAndRecursiveProof } from '@aztec/stdlib/interfaces/server';
24
+ import { PublicDataHint } from '@aztec/stdlib/avm';
25
+ import { Body } from '@aztec/stdlib/block';
26
+ import type { MerkleTreeWriteOperations } from '@aztec/stdlib/interfaces/server';
39
27
  import { ContractClassLogFields } from '@aztec/stdlib/logs';
40
- import { Proof, ProofData, RecursiveProof } from '@aztec/stdlib/proofs';
28
+ import type { ParityPublicInputs } from '@aztec/stdlib/parity';
41
29
  import {
30
+ type BaseOrMergeRollupPublicInputs,
42
31
  BlockConstantData,
43
- BlockRollupPublicInputs,
32
+ type BlockRootOrBlockMergePublicInputs,
44
33
  PrivateBaseRollupHints,
34
+ PrivateBaseStateDiffHints,
45
35
  PublicBaseRollupHints,
46
- PublicChonkVerifierPrivateInputs,
47
- TreeSnapshotDiffHints,
48
36
  } from '@aztec/stdlib/rollup';
49
37
  import {
50
38
  AppendOnlyTreeSnapshot,
@@ -57,13 +45,12 @@ import {
57
45
  import {
58
46
  BlockHeader,
59
47
  ContentCommitment,
60
- GlobalVariables,
48
+ type GlobalVariables,
61
49
  PartialStateReference,
62
50
  type ProcessedTx,
63
51
  StateReference,
64
- Tx,
52
+ TxEffect,
65
53
  } from '@aztec/stdlib/tx';
66
- import { VkData } from '@aztec/stdlib/vks';
67
54
  import { Attributes, type Span, runInSpan } from '@aztec/telemetry-client';
68
55
  import type { MerkleTreeReadOperations } from '@aztec/world-state';
69
56
 
@@ -83,24 +70,28 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
83
70
  async (
84
71
  span: Span,
85
72
  tx: ProcessedTx,
86
- lastArchive: AppendOnlyTreeSnapshot,
73
+ globalVariables: GlobalVariables,
87
74
  newL1ToL2MessageTreeSnapshot: AppendOnlyTreeSnapshot,
88
- startSpongeBlob: SpongeBlob,
89
- proverId: Fr,
90
75
  db: MerkleTreeWriteOperations,
76
+ startSpongeBlob: SpongeBlob,
91
77
  ) => {
92
78
  span.setAttribute(Attributes.TX_HASH, tx.hash.toString());
93
79
  // Get trees info before any changes hit
80
+ const lastArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
94
81
  const start = new PartialStateReference(
95
82
  await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
96
83
  await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
97
84
  await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
98
85
  );
86
+ // Get the subtree sibling paths for the circuit
87
+ const noteHashSubtreeSiblingPathArray = await getSubtreeSiblingPath(
88
+ MerkleTreeId.NOTE_HASH_TREE,
89
+ NOTE_HASH_SUBTREE_HEIGHT,
90
+ db,
91
+ );
99
92
 
100
- // Get the note hash subtree root sibling path for insertion.
101
- const noteHashSubtreeRootSiblingPath = assertLength(
102
- await getSubtreeSiblingPath(MerkleTreeId.NOTE_HASH_TREE, NOTE_HASH_SUBTREE_HEIGHT, db),
103
- NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
93
+ const noteHashSubtreeSiblingPath = makeTuple(NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH, i =>
94
+ i < noteHashSubtreeSiblingPathArray.length ? noteHashSubtreeSiblingPathArray[i] : Fr.ZERO,
104
95
  );
105
96
 
106
97
  // Update the note hash trees with the new items being inserted to get the new roots
@@ -108,6 +99,10 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
108
99
  const noteHashes = padArrayEnd(tx.txEffect.noteHashes, Fr.ZERO, MAX_NOTE_HASHES_PER_TX);
109
100
  await db.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes);
110
101
 
102
+ // Create data hint for reading fee payer initial balance in Fee Juice
103
+ const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
104
+ const feePayerFeeJuiceBalanceReadHint = await getPublicDataHint(db, leafSlot.toBigInt());
105
+
111
106
  // The read witnesses for a given TX should be generated before the writes of the same TX are applied.
112
107
  // All reads that refer to writes in the same tx are transient and can be simplified out.
113
108
  const txPublicDataUpdateRequestInfo = await processPublicDataUpdateRequests(tx, db);
@@ -115,8 +110,8 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
115
110
  // Update the nullifier tree, capturing the low nullifier info for each individual operation
116
111
  const {
117
112
  lowLeavesWitnessData: nullifierWitnessLeaves,
118
- newSubtreeSiblingPath: nullifiersSubtreeRootSiblingPath,
119
- sortedNewLeaves: sortedNullifiers,
113
+ newSubtreeSiblingPath: nullifiersSubtreeSiblingPath,
114
+ sortedNewLeaves: sortednullifiers,
120
115
  sortedNewLeavesIndexes,
121
116
  } = await db.batchInsert(
122
117
  MerkleTreeId.NULLIFIER_TREE,
@@ -128,10 +123,21 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
128
123
  throw new Error(`Could not craft nullifier batch insertion proofs`);
129
124
  }
130
125
 
131
- const blockHash = await tx.data.constants.anchorBlockHeader.hash();
132
- const anchorBlockArchiveSiblingPath = (
133
- await getMembershipWitnessFor(blockHash, MerkleTreeId.ARCHIVE, ARCHIVE_HEIGHT, db)
134
- ).siblingPath;
126
+ // Extract witness objects from returned data
127
+ const nullifierPredecessorMembershipWitnessesWithoutPadding: MembershipWitness<typeof NULLIFIER_TREE_HEIGHT>[] =
128
+ nullifierWitnessLeaves.map(l =>
129
+ MembershipWitness.fromBufferArray(l.index, assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT)),
130
+ );
131
+
132
+ const nullifierSubtreeSiblingPathArray = nullifiersSubtreeSiblingPath.toFields();
133
+
134
+ const nullifierSubtreeSiblingPath = makeTuple(NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, i =>
135
+ i < nullifierSubtreeSiblingPathArray.length ? nullifierSubtreeSiblingPathArray[i] : Fr.ZERO,
136
+ );
137
+
138
+ // Append new data to startSpongeBlob
139
+ const inputSpongeBlob = startSpongeBlob.clone();
140
+ await startSpongeBlob.absorb(tx.txEffect.toBlobFields());
135
141
 
136
142
  const contractClassLogsFields = makeTuple(
137
143
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
@@ -139,10 +145,18 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
139
145
  );
140
146
 
141
147
  if (tx.avmProvingRequest) {
148
+ const blockHash = await tx.data.constants.historicalHeader.hash();
149
+ const archiveRootMembershipWitness = await getMembershipWitnessFor(
150
+ blockHash,
151
+ MerkleTreeId.ARCHIVE,
152
+ ARCHIVE_HEIGHT,
153
+ db,
154
+ );
155
+
142
156
  return PublicBaseRollupHints.from({
143
- startSpongeBlob,
157
+ startSpongeBlob: inputSpongeBlob,
144
158
  lastArchive,
145
- anchorBlockArchiveSiblingPath,
159
+ archiveRootMembershipWitness,
146
160
  contractClassLogsFields,
147
161
  });
148
162
  } else {
@@ -154,62 +168,57 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
154
168
  throw new Error(`More than one public data write in a private only tx`);
155
169
  }
156
170
 
157
- // Get hints for reading fee payer's balance in the public data tree.
158
- const feePayerBalanceMembershipWitness = txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses[0];
159
- const feePayerBalanceLeafPreimage = txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages[0];
160
- const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
161
- if (!feePayerBalanceMembershipWitness || !leafSlot.equals(feePayerBalanceLeafPreimage?.leaf.slot)) {
162
- throw new Error(`Cannot find the public data tree leaf for the fee payer's balance`);
163
- }
164
-
165
- // Extract witness objects from returned data
166
- const nullifierPredecessorMembershipWitnessesWithoutPadding: MembershipWitness<typeof NULLIFIER_TREE_HEIGHT>[] =
167
- nullifierWitnessLeaves.map(l =>
168
- MembershipWitness.fromBufferArray(
169
- l.index,
170
- assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT),
171
- ),
172
- );
173
-
174
- const treeSnapshotDiffHints = TreeSnapshotDiffHints.from({
175
- noteHashSubtreeRootSiblingPath,
176
- nullifierPredecessorPreimages: padArrayEnd(
177
- nullifierWitnessLeaves.map(l => l.leafPreimage as NullifierLeafPreimage),
178
- NullifierLeafPreimage.empty(),
179
- MAX_NULLIFIERS_PER_TX,
171
+ const feeWriteLowLeafPreimage =
172
+ txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages[0] || PublicDataTreeLeafPreimage.empty();
173
+ const feeWriteLowLeafMembershipWitness =
174
+ txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses[0] ||
175
+ MembershipWitness.empty<typeof PUBLIC_DATA_TREE_HEIGHT>(PUBLIC_DATA_TREE_HEIGHT);
176
+ const feeWriteSiblingPath =
177
+ txPublicDataUpdateRequestInfo.publicDataWritesSiblingPaths[0] ||
178
+ makeTuple(PUBLIC_DATA_TREE_HEIGHT, () => Fr.ZERO);
179
+
180
+ const stateDiffHints = PrivateBaseStateDiffHints.from({
181
+ nullifierPredecessorPreimages: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
182
+ i < nullifierWitnessLeaves.length
183
+ ? (nullifierWitnessLeaves[i].leafPreimage as NullifierLeafPreimage)
184
+ : NullifierLeafPreimage.empty(),
180
185
  ),
181
186
  nullifierPredecessorMembershipWitnesses: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
182
187
  i < nullifierPredecessorMembershipWitnessesWithoutPadding.length
183
188
  ? nullifierPredecessorMembershipWitnessesWithoutPadding[i]
184
189
  : makeEmptyMembershipWitness(NULLIFIER_TREE_HEIGHT),
185
190
  ),
186
- sortedNullifiers: assertLength(
187
- sortedNullifiers.map(n => Fr.fromBuffer(n)),
188
- MAX_NULLIFIERS_PER_TX,
189
- ),
190
- sortedNullifierIndexes: assertLength(sortedNewLeavesIndexes, MAX_NULLIFIERS_PER_TX),
191
- nullifierSubtreeRootSiblingPath: assertLength(
192
- nullifiersSubtreeRootSiblingPath.toFields(),
193
- NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
194
- ),
195
- feePayerBalanceMembershipWitness,
191
+ sortedNullifiers: makeTuple(MAX_NULLIFIERS_PER_TX, i => Fr.fromBuffer(sortednullifiers[i])),
192
+ sortedNullifierIndexes: makeTuple(MAX_NULLIFIERS_PER_TX, i => sortedNewLeavesIndexes[i]),
193
+ noteHashSubtreeSiblingPath,
194
+ nullifierSubtreeSiblingPath,
195
+ feeWriteLowLeafPreimage,
196
+ feeWriteLowLeafMembershipWitness,
197
+ feeWriteSiblingPath,
196
198
  });
197
199
 
200
+ const blockHash = await tx.data.constants.historicalHeader.hash();
201
+ const archiveRootMembershipWitness = await getMembershipWitnessFor(
202
+ blockHash,
203
+ MerkleTreeId.ARCHIVE,
204
+ ARCHIVE_HEIGHT,
205
+ db,
206
+ );
207
+
198
208
  const constants = BlockConstantData.from({
199
209
  lastArchive,
200
- l1ToL2TreeSnapshot: newL1ToL2MessageTreeSnapshot,
201
- vkTreeRoot: tx.data.constants.vkTreeRoot,
202
- protocolContractsHash: tx.data.constants.protocolContractsHash,
203
- globalVariables: tx.globalVariables,
204
- proverId,
210
+ newL1ToL2: newL1ToL2MessageTreeSnapshot,
211
+ vkTreeRoot: getVKTreeRoot(),
212
+ protocolContractTreeRoot,
213
+ globalVariables,
205
214
  });
206
215
 
207
216
  return PrivateBaseRollupHints.from({
208
217
  start,
209
- startSpongeBlob,
210
- treeSnapshotDiffHints,
211
- feePayerBalanceLeafPreimage,
212
- anchorBlockArchiveSiblingPath,
218
+ startSpongeBlob: inputSpongeBlob,
219
+ stateDiffHints,
220
+ feePayerFeeJuiceBalanceReadHint,
221
+ archiveRootMembershipWitness,
213
222
  contractClassLogsFields,
214
223
  constants,
215
224
  });
@@ -217,53 +226,50 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
217
226
  },
218
227
  );
219
228
 
220
- export function getChonkProofFromTx(tx: Tx | ProcessedTx) {
221
- const publicInputs = tx.data.publicInputs().toFields();
229
+ export async function getPublicDataHint(db: MerkleTreeWriteOperations, leafSlot: bigint) {
230
+ const { index } = (await db.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot)) ?? {};
231
+ if (index === undefined) {
232
+ throw new Error(`Cannot find the previous value index for public data ${leafSlot}.`);
233
+ }
222
234
 
223
- const binaryProof = new Proof(
224
- Buffer.concat(tx.chonkProof.attachPublicInputs(publicInputs).fieldsWithPublicInputs.map(field => field.toBuffer())),
225
- publicInputs.length,
226
- );
227
- return new RecursiveProof(tx.chonkProof.fields, binaryProof, true, CHONK_PROOF_LENGTH);
228
- }
235
+ const siblingPath = await db.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, index);
236
+ const membershipWitness = new MembershipWitness(PUBLIC_DATA_TREE_HEIGHT, index, siblingPath.toTuple());
229
237
 
230
- export function getPublicChonkVerifierPrivateInputsFromTx(tx: Tx | ProcessedTx, proverId: Fr) {
231
- const proofData = new ProofData(
232
- tx.data.toPrivateToPublicKernelCircuitPublicInputs(),
233
- getChonkProofFromTx(tx),
234
- getVkData('HidingKernelToPublic'),
235
- );
236
- return new PublicChonkVerifierPrivateInputs(proofData, proverId);
238
+ const leafPreimage = (await db.getLeafPreimage(MerkleTreeId.PUBLIC_DATA_TREE, index)) as PublicDataTreeLeafPreimage;
239
+ if (!leafPreimage) {
240
+ throw new Error(`Cannot find the leaf preimage for public data tree at index ${index}.`);
241
+ }
242
+
243
+ const exists = leafPreimage.leaf.slot.toBigInt() === leafSlot;
244
+ const value = exists ? leafPreimage.leaf.value : Fr.ZERO;
245
+
246
+ return new PublicDataHint(new Fr(leafSlot), value, membershipWitness, leafPreimage);
237
247
  }
238
248
 
239
- // Build "hints" as the private inputs for the checkpoint root rollup circuit.
240
- // The `blobCommitments` will be accumulated and checked in the root rollup against the `finalBlobChallenges`.
241
- // The `blobsHash` will be validated on L1 against the submitted blob data.
242
- export const buildBlobHints = (blobFields: Fr[]) => {
243
- const blobs = getBlobsPerL1Block(blobFields);
244
- const blobCommitments = getBlobCommitmentsFromBlobs(blobs);
245
- const blobsHash = computeBlobsHashFromBlobs(blobs);
246
- return { blobCommitments, blobs, blobsHash };
247
- };
248
-
249
- // Build the data required to prove the txs in an epoch. Currently only used in tests. It assumes 1 block per checkpoint.
250
- export const buildBlobDataFromTxs = async (txsPerCheckpoint: ProcessedTx[][]) => {
251
- const blobFields = txsPerCheckpoint.map(txs => getCheckpointBlobFields([txs.map(tx => tx.txEffect)]));
252
- const finalBlobChallenges = await buildFinalBlobChallenges(blobFields);
253
- return { blobFieldsLengths: blobFields.map(fields => fields.length), finalBlobChallenges };
254
- };
255
-
256
- export const buildFinalBlobChallenges = async (blobFieldsPerCheckpoint: Fr[][]) => {
257
- const blobs = blobFieldsPerCheckpoint.map(blobFields => getBlobsPerL1Block(blobFields));
258
- return await BatchedBlob.precomputeBatchedBlobChallenges(blobs);
259
- };
249
+ export const buildBlobHints = runInSpan(
250
+ 'BlockBuilderHelpers',
251
+ 'buildBlobHints',
252
+ async (_span: Span, txEffects: TxEffect[]) => {
253
+ const blobFields = txEffects.flatMap(tx => tx.toBlobFields());
254
+ const blobs = await Blob.getBlobsPerBlock(blobFields);
255
+ // TODO(#13430): The blobsHash is confusingly similar to blobCommitmentsHash, calculated from below blobCommitments:
256
+ // - blobsHash := sha256([blobhash_0, ..., blobhash_m]) = a hash of all blob hashes in a block with m+1 blobs inserted into the header, exists so a user can cross check blobs.
257
+ // - blobCommitmentsHash := sha256( ...sha256(sha256(C_0), C_1) ... C_n) = iteratively calculated hash of all blob commitments in an epoch with n+1 blobs (see calculateBlobCommitmentsHash()),
258
+ // exists so we can validate injected commitments to the rollup circuits correspond to the correct real blobs.
259
+ // We may be able to combine these values e.g. blobCommitmentsHash := sha256( ...sha256(sha256(blobshash_0), blobshash_1) ... blobshash_l) for an epoch with l+1 blocks.
260
+ const blobCommitments = blobs.map(b => BLS12Point.decompress(b.commitment));
261
+ const blobsHash = new Fr(getBlobsHashFromBlobs(blobs));
262
+ return { blobFields, blobCommitments, blobs, blobsHash };
263
+ },
264
+ );
260
265
 
261
266
  export const accumulateBlobs = runInSpan(
262
267
  'BlockBuilderHelpers',
263
268
  'accumulateBlobs',
264
- async (_span: Span, blobFields: Fr[], startBlobAccumulator: BatchedBlobAccumulator) => {
265
- const blobs = getBlobsPerL1Block(blobFields);
266
- const endBlobAccumulator = await startBlobAccumulator.accumulateBlobs(blobs);
269
+ async (_span: Span, txs: ProcessedTx[], startBlobAccumulator: BatchedBlobAccumulator) => {
270
+ const blobFields = txs.flatMap(tx => tx.txEffect.toBlobFields());
271
+ const blobs = await Blob.getBlobsPerBlock(blobFields);
272
+ const endBlobAccumulator = startBlobAccumulator.accumulateBlobs(blobs);
267
273
  return endBlobAccumulator;
268
274
  },
269
275
  );
@@ -271,28 +277,36 @@ export const accumulateBlobs = runInSpan(
271
277
  export const buildHeaderFromCircuitOutputs = runInSpan(
272
278
  'BlockBuilderHelpers',
273
279
  'buildHeaderFromCircuitOutputs',
274
- async (_span, blockRootRollupOutput: BlockRollupPublicInputs) => {
275
- const constants = blockRootRollupOutput.constants;
276
- const globalVariables = GlobalVariables.from({
277
- chainId: constants.chainId,
278
- version: constants.version,
279
- blockNumber: blockRootRollupOutput.previousArchive.nextAvailableLeafIndex,
280
- timestamp: blockRootRollupOutput.endTimestamp,
281
- slotNumber: constants.slotNumber,
282
- coinbase: constants.coinbase,
283
- feeRecipient: constants.feeRecipient,
284
- gasFees: constants.gasFees,
285
- });
280
+ (
281
+ _span,
282
+ previousRollupData: BaseOrMergeRollupPublicInputs[],
283
+ parityPublicInputs: ParityPublicInputs,
284
+ rootRollupOutputs: BlockRootOrBlockMergePublicInputs,
285
+ blobsHash: Fr,
286
+ endState: StateReference,
287
+ ) => {
288
+ if (previousRollupData.length > 2) {
289
+ throw new Error(`There can't be more than 2 previous rollups. Received ${previousRollupData.length}.`);
290
+ }
291
+
292
+ const outHash =
293
+ previousRollupData.length === 0
294
+ ? Fr.ZERO
295
+ : previousRollupData.length === 1
296
+ ? previousRollupData[0].outHash
297
+ : sha256ToField([previousRollupData[0].outHash, previousRollupData[1].outHash]);
298
+ const contentCommitment = new ContentCommitment(blobsHash, parityPublicInputs.shaRoot, outHash);
286
299
 
287
- const spongeBlobHash = await blockRootRollupOutput.endSpongeBlob.clone().squeeze();
300
+ const accumulatedFees = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedFees), Fr.ZERO);
301
+ const accumulatedManaUsed = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedManaUsed), Fr.ZERO);
288
302
 
289
303
  return new BlockHeader(
290
- blockRootRollupOutput.previousArchive,
291
- blockRootRollupOutput.endState,
292
- spongeBlobHash,
293
- globalVariables,
294
- blockRootRollupOutput.accumulatedFees,
295
- blockRootRollupOutput.accumulatedManaUsed,
304
+ rootRollupOutputs.previousArchive,
305
+ contentCommitment,
306
+ endState,
307
+ rootRollupOutputs.endGlobalVariables,
308
+ accumulatedFees,
309
+ accumulatedManaUsed,
296
310
  );
297
311
  },
298
312
  );
@@ -306,7 +320,6 @@ export const buildHeaderAndBodyFromTxs = runInSpan(
306
320
  globalVariables: GlobalVariables,
307
321
  l1ToL2Messages: Fr[],
308
322
  db: MerkleTreeReadOperations,
309
- startSpongeBlob?: SpongeBlob,
310
323
  ) => {
311
324
  span.setAttribute(Attributes.BLOCK_NUMBER, globalVariables.blockNumber);
312
325
  const stateReference = new StateReference(
@@ -324,75 +337,25 @@ export const buildHeaderAndBodyFromTxs = runInSpan(
324
337
  const body = new Body(txEffects);
325
338
 
326
339
  const txOutHashes = txEffects.map(tx => tx.txOutHash());
327
- const outHash = txOutHashes.length === 0 ? Fr.ZERO : new Fr(computeCompressedUnbalancedMerkleTreeRoot(txOutHashes));
340
+ const outHash = txOutHashes.length === 0 ? Fr.ZERO : new Fr(computeUnbalancedMerkleTreeRoot(txOutHashes));
328
341
 
329
342
  const parityShaRoot = await computeInHashFromL1ToL2Messages(l1ToL2Messages);
330
- const blockBlobFields = body.toBlobFields();
331
- // TODO(#17027): This only works when there's one block per checkpoint.
332
- const blobFields = [new Fr(blockBlobFields.length + 1)].concat(blockBlobFields);
333
- const blobsHash = computeBlobsHashFromBlobs(getBlobsPerL1Block(blobFields));
343
+ const blobsHash = getBlobsHashFromBlobs(await Blob.getBlobsPerBlock(body.toBlobFields()));
334
344
 
335
345
  const contentCommitment = new ContentCommitment(blobsHash, parityShaRoot, outHash);
336
346
 
337
347
  const fees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
338
348
  const manaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
339
349
 
340
- const endSpongeBlob = startSpongeBlob?.clone() ?? (await SpongeBlob.init(blobFields.length));
341
- await endSpongeBlob.absorb(blockBlobFields);
342
- const spongeBlobHash = await endSpongeBlob.squeeze();
343
-
344
- const header = new L2BlockHeader(
345
- previousArchive,
346
- contentCommitment,
347
- stateReference,
348
- globalVariables,
349
- fees,
350
- manaUsed,
351
- spongeBlobHash,
352
- );
350
+ const header = new BlockHeader(previousArchive, contentCommitment, stateReference, globalVariables, fees, manaUsed);
353
351
 
354
352
  return { header, body };
355
353
  },
356
354
  );
357
355
 
358
- export const buildBlockHeaderFromTxs = runInSpan(
359
- 'BlockBuilderHelpers',
360
- 'buildBlockHeaderFromTxs',
361
- async (
362
- span,
363
- txs: ProcessedTx[],
364
- globalVariables: GlobalVariables,
365
- startSpongeBlob: SpongeBlob,
366
- db: MerkleTreeReadOperations,
367
- ) => {
368
- span.setAttribute(Attributes.BLOCK_NUMBER, globalVariables.blockNumber);
369
- const stateReference = new StateReference(
370
- await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
371
- new PartialStateReference(
372
- await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
373
- await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
374
- await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
375
- ),
376
- );
377
-
378
- const previousArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
379
-
380
- const blobFields = getBlockBlobFields(txs.map(tx => tx.txEffect));
381
- const endSpongeBlob = startSpongeBlob.clone();
382
- await endSpongeBlob.absorb(blobFields);
383
- const spongeBlobHash = await endSpongeBlob.squeeze();
384
-
385
- const txEffects = txs.map(tx => tx.txEffect);
386
- const fees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
387
- const manaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
388
-
389
- return new BlockHeader(previousArchive, stateReference, spongeBlobHash, globalVariables, fees, manaUsed);
390
- },
391
- );
392
-
393
356
  /** Computes the inHash for a block's ContentCommitment given its l1 to l2 messages. */
394
357
  export async function computeInHashFromL1ToL2Messages(unpaddedL1ToL2Messages: Fr[]): Promise<Fr> {
395
- const l1ToL2Messages = padArrayEnd<Fr, number>(unpaddedL1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
358
+ const l1ToL2Messages = padArrayEnd(unpaddedL1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
396
359
  const hasher = (left: Buffer, right: Buffer) =>
397
360
  Promise.resolve(sha256Trunc(Buffer.concat([left, right])) as Buffer<ArrayBuffer>);
398
361
  const parityHeight = Math.ceil(Math.log2(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP));
@@ -400,6 +363,52 @@ export async function computeInHashFromL1ToL2Messages(unpaddedL1ToL2Messages: Fr
400
363
  return new Fr(await parityCalculator.computeTreeRoot(l1ToL2Messages.map(msg => msg.toBuffer())));
401
364
  }
402
365
 
366
+ export function getBlobsHashFromBlobs(inputs: Blob[]): Fr {
367
+ return sha256ToField(inputs.map(b => b.getEthVersionedBlobHash()));
368
+ }
369
+
370
+ // Note: tested against the constant values in block_root/empty_block_root_rollup_inputs.nr, set by block_building_helpers.test.ts.
371
+ // Having this separate fn hopefully makes it clear how we treat empty blocks and their blobs, and won't break if we decide to change how
372
+ // getBlobsPerBlock() works on empty input.
373
+ export async function getEmptyBlockBlobsHash(): Promise<Fr> {
374
+ const blobHash = (await Blob.getBlobsPerBlock([])).map(b => b.getEthVersionedBlobHash());
375
+ return sha256ToField(blobHash);
376
+ }
377
+
378
+ // Validate that the roots of all local trees match the output of the root circuit simulation
379
+ // TODO: does this get called?
380
+ export async function validateBlockRootOutput(
381
+ blockRootOutput: BlockRootOrBlockMergePublicInputs,
382
+ blockHeader: BlockHeader,
383
+ db: MerkleTreeReadOperations,
384
+ ) {
385
+ await Promise.all([
386
+ validateState(blockHeader.state, db),
387
+ validateSimulatedTree(await getTreeSnapshot(MerkleTreeId.ARCHIVE, db), blockRootOutput.newArchive, 'Archive'),
388
+ ]);
389
+ }
390
+
391
+ export const validateState = runInSpan(
392
+ 'BlockBuilderHelpers',
393
+ 'validateState',
394
+ async (_span, state: StateReference, db: MerkleTreeReadOperations) => {
395
+ const promises = [MerkleTreeId.NOTE_HASH_TREE, MerkleTreeId.NULLIFIER_TREE, MerkleTreeId.PUBLIC_DATA_TREE].map(
396
+ async (id: MerkleTreeId) => {
397
+ return { key: id, value: await getTreeSnapshot(id, db) };
398
+ },
399
+ );
400
+ const snapshots: Map<MerkleTreeId, AppendOnlyTreeSnapshot> = new Map(
401
+ (await Promise.all(promises)).map(obj => [obj.key, obj.value]),
402
+ );
403
+ validatePartialState(state.partial, snapshots);
404
+ validateSimulatedTree(
405
+ await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
406
+ state.l1ToL2MessageTree,
407
+ 'L1ToL2MessageTree',
408
+ );
409
+ },
410
+ );
411
+
403
412
  export async function getLastSiblingPath<TID extends MerkleTreeId>(treeId: TID, db: MerkleTreeReadOperations) {
404
413
  const { size } = await db.getTreeInfo(treeId);
405
414
  const path = await db.getSiblingPath(treeId, size - 1n);
@@ -532,7 +541,7 @@ function validateSimulatedTree(
532
541
  }
533
542
 
534
543
  export function validateTx(tx: ProcessedTx) {
535
- const txHeader = tx.data.constants.anchorBlockHeader;
544
+ const txHeader = tx.data.constants.historicalHeader;
536
545
  if (txHeader.state.l1ToL2MessageTree.isEmpty()) {
537
546
  throw new Error(`Empty L1 to L2 messages tree in tx: ${toFriendlyJSON(tx)}`);
538
547
  }
@@ -546,12 +555,3 @@ export function validateTx(tx: ProcessedTx) {
546
555
  throw new Error(`Empty public data tree in tx: ${toFriendlyJSON(tx)}`);
547
556
  }
548
557
  }
549
-
550
- export function toProofData<T extends Bufferable, PROOF_LENGTH extends number>(
551
- { inputs, proof, verificationKey }: PublicInputsAndRecursiveProof<T, PROOF_LENGTH>,
552
- vkIndex?: number,
553
- ) {
554
- const leafIndex = vkIndex || getVKIndex(verificationKey.keyAsFields);
555
- const vkData = new VkData(verificationKey, leafIndex, getVKSiblingPath(leafIndex));
556
- return new ProofData(inputs, proof, vkData);
557
- }