@aztec/prover-client 4.0.0-nightly.20250907 → 4.0.0-nightly.20260107

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/dest/block-factory/index.d.ts +1 -1
  2. package/dest/block-factory/light.d.ts +5 -3
  3. package/dest/block-factory/light.d.ts.map +1 -1
  4. package/dest/block-factory/light.js +32 -11
  5. package/dest/config.d.ts +2 -2
  6. package/dest/config.d.ts.map +1 -1
  7. package/dest/config.js +2 -2
  8. package/dest/index.d.ts +1 -1
  9. package/dest/light/index.d.ts +2 -0
  10. package/dest/light/index.d.ts.map +1 -0
  11. package/dest/light/index.js +1 -0
  12. package/dest/light/lightweight_checkpoint_builder.d.ts +36 -0
  13. package/dest/light/lightweight_checkpoint_builder.d.ts.map +1 -0
  14. package/dest/light/lightweight_checkpoint_builder.js +147 -0
  15. package/dest/mocks/fixtures.d.ts +5 -5
  16. package/dest/mocks/fixtures.d.ts.map +1 -1
  17. package/dest/mocks/fixtures.js +33 -15
  18. package/dest/mocks/test_context.d.ts +38 -33
  19. package/dest/mocks/test_context.d.ts.map +1 -1
  20. package/dest/mocks/test_context.js +133 -82
  21. package/dest/orchestrator/block-building-helpers.d.ts +35 -35
  22. package/dest/orchestrator/block-building-helpers.d.ts.map +1 -1
  23. package/dest/orchestrator/block-building-helpers.js +151 -187
  24. package/dest/orchestrator/block-proving-state.d.ts +68 -55
  25. package/dest/orchestrator/block-proving-state.d.ts.map +1 -1
  26. package/dest/orchestrator/block-proving-state.js +273 -185
  27. package/dest/orchestrator/checkpoint-proving-state.d.ts +63 -0
  28. package/dest/orchestrator/checkpoint-proving-state.d.ts.map +1 -0
  29. package/dest/orchestrator/checkpoint-proving-state.js +210 -0
  30. package/dest/orchestrator/epoch-proving-state.d.ts +38 -31
  31. package/dest/orchestrator/epoch-proving-state.d.ts.map +1 -1
  32. package/dest/orchestrator/epoch-proving-state.js +128 -84
  33. package/dest/orchestrator/index.d.ts +1 -1
  34. package/dest/orchestrator/orchestrator.d.ts +35 -34
  35. package/dest/orchestrator/orchestrator.d.ts.map +1 -1
  36. package/dest/orchestrator/orchestrator.js +777 -292
  37. package/dest/orchestrator/orchestrator_metrics.d.ts +1 -3
  38. package/dest/orchestrator/orchestrator_metrics.d.ts.map +1 -1
  39. package/dest/orchestrator/orchestrator_metrics.js +0 -9
  40. package/dest/orchestrator/tx-proving-state.d.ts +12 -10
  41. package/dest/orchestrator/tx-proving-state.d.ts.map +1 -1
  42. package/dest/orchestrator/tx-proving-state.js +23 -29
  43. package/dest/prover-client/factory.d.ts +3 -3
  44. package/dest/prover-client/factory.d.ts.map +1 -1
  45. package/dest/prover-client/index.d.ts +1 -1
  46. package/dest/prover-client/prover-client.d.ts +3 -3
  47. package/dest/prover-client/prover-client.d.ts.map +1 -1
  48. package/dest/prover-client/server-epoch-prover.d.ts +13 -11
  49. package/dest/prover-client/server-epoch-prover.d.ts.map +1 -1
  50. package/dest/prover-client/server-epoch-prover.js +9 -9
  51. package/dest/proving_broker/broker_prover_facade.d.ts +23 -18
  52. package/dest/proving_broker/broker_prover_facade.d.ts.map +1 -1
  53. package/dest/proving_broker/broker_prover_facade.js +42 -33
  54. package/dest/proving_broker/config.d.ts +18 -14
  55. package/dest/proving_broker/config.d.ts.map +1 -1
  56. package/dest/proving_broker/config.js +12 -6
  57. package/dest/proving_broker/factory.d.ts +1 -1
  58. package/dest/proving_broker/factory.js +1 -1
  59. package/dest/proving_broker/fixtures.d.ts +3 -2
  60. package/dest/proving_broker/fixtures.d.ts.map +1 -1
  61. package/dest/proving_broker/fixtures.js +3 -2
  62. package/dest/proving_broker/index.d.ts +1 -1
  63. package/dest/proving_broker/proof_store/factory.d.ts +2 -2
  64. package/dest/proving_broker/proof_store/gcs_proof_store.d.ts +1 -1
  65. package/dest/proving_broker/proof_store/gcs_proof_store.d.ts.map +1 -1
  66. package/dest/proving_broker/proof_store/index.d.ts +2 -1
  67. package/dest/proving_broker/proof_store/index.d.ts.map +1 -1
  68. package/dest/proving_broker/proof_store/index.js +1 -0
  69. package/dest/proving_broker/proof_store/inline_proof_store.d.ts +1 -1
  70. package/dest/proving_broker/proof_store/inline_proof_store.d.ts.map +1 -1
  71. package/dest/proving_broker/proof_store/proof_store.d.ts +1 -1
  72. package/dest/proving_broker/proving_agent.d.ts +1 -1
  73. package/dest/proving_broker/proving_agent.d.ts.map +1 -1
  74. package/dest/proving_broker/proving_agent.js +383 -8
  75. package/dest/proving_broker/proving_agent_instrumentation.d.ts +1 -1
  76. package/dest/proving_broker/proving_agent_instrumentation.d.ts.map +1 -1
  77. package/dest/proving_broker/proving_broker.d.ts +2 -2
  78. package/dest/proving_broker/proving_broker.d.ts.map +1 -1
  79. package/dest/proving_broker/proving_broker.js +418 -29
  80. package/dest/proving_broker/proving_broker_database/memory.d.ts +3 -2
  81. package/dest/proving_broker/proving_broker_database/memory.d.ts.map +1 -1
  82. package/dest/proving_broker/proving_broker_database/persisted.d.ts +3 -2
  83. package/dest/proving_broker/proving_broker_database/persisted.d.ts.map +1 -1
  84. package/dest/proving_broker/proving_broker_database/persisted.js +8 -7
  85. package/dest/proving_broker/proving_broker_database.d.ts +3 -2
  86. package/dest/proving_broker/proving_broker_database.d.ts.map +1 -1
  87. package/dest/proving_broker/proving_broker_instrumentation.d.ts +1 -1
  88. package/dest/proving_broker/proving_broker_instrumentation.d.ts.map +1 -1
  89. package/dest/proving_broker/proving_job_controller.d.ts +3 -2
  90. package/dest/proving_broker/proving_job_controller.d.ts.map +1 -1
  91. package/dest/proving_broker/proving_job_controller.js +40 -21
  92. package/dest/proving_broker/rpc.d.ts +4 -4
  93. package/dest/test/mock_proof_store.d.ts +3 -3
  94. package/dest/test/mock_proof_store.d.ts.map +1 -1
  95. package/dest/test/mock_prover.d.ts +23 -19
  96. package/dest/test/mock_prover.d.ts.map +1 -1
  97. package/dest/test/mock_prover.js +36 -21
  98. package/package.json +21 -19
  99. package/src/block-factory/light.ts +40 -17
  100. package/src/config.ts +2 -2
  101. package/src/light/index.ts +1 -0
  102. package/src/light/lightweight_checkpoint_builder.ts +198 -0
  103. package/src/mocks/fixtures.ts +41 -36
  104. package/src/mocks/test_context.ts +196 -114
  105. package/src/orchestrator/block-building-helpers.ts +233 -313
  106. package/src/orchestrator/block-proving-state.ts +315 -247
  107. package/src/orchestrator/checkpoint-proving-state.ts +303 -0
  108. package/src/orchestrator/epoch-proving-state.ts +176 -129
  109. package/src/orchestrator/orchestrator.ts +558 -348
  110. package/src/orchestrator/orchestrator_metrics.ts +1 -20
  111. package/src/orchestrator/tx-proving-state.ts +47 -55
  112. package/src/prover-client/factory.ts +6 -2
  113. package/src/prover-client/prover-client.ts +3 -2
  114. package/src/prover-client/server-epoch-prover.ts +30 -21
  115. package/src/proving_broker/broker_prover_facade.ts +175 -112
  116. package/src/proving_broker/config.ts +14 -7
  117. package/src/proving_broker/factory.ts +1 -1
  118. package/src/proving_broker/fixtures.ts +8 -3
  119. package/src/proving_broker/proof_store/index.ts +1 -0
  120. package/src/proving_broker/proving_broker.ts +41 -19
  121. package/src/proving_broker/proving_broker_database/memory.ts +2 -1
  122. package/src/proving_broker/proving_broker_database/persisted.ts +10 -9
  123. package/src/proving_broker/proving_broker_database.ts +2 -1
  124. package/src/proving_broker/proving_job_controller.ts +42 -22
  125. package/src/test/mock_prover.ts +143 -66
  126. package/dest/bin/get-proof-inputs.d.ts +0 -2
  127. package/dest/bin/get-proof-inputs.d.ts.map +0 -1
  128. package/dest/bin/get-proof-inputs.js +0 -51
  129. package/src/bin/get-proof-inputs.ts +0 -59
@@ -1,56 +1,61 @@
1
- import { BatchedBlobAccumulator, Blob, type SpongeBlob } from '@aztec/blob-lib';
1
+ import {
2
+ BatchedBlobAccumulator,
3
+ SpongeBlob,
4
+ computeBlobsHashFromBlobs,
5
+ encodeBlockBlobData,
6
+ getBlobCommitmentsFromBlobs,
7
+ getBlobsPerL1Block,
8
+ } from '@aztec/blob-lib';
2
9
  import {
3
10
  ARCHIVE_HEIGHT,
11
+ CHONK_PROOF_LENGTH,
4
12
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
5
13
  MAX_NOTE_HASHES_PER_TX,
6
14
  MAX_NULLIFIERS_PER_TX,
7
15
  NOTE_HASH_SUBTREE_HEIGHT,
8
- NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH,
16
+ NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
9
17
  NULLIFIER_SUBTREE_HEIGHT,
10
- NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH,
18
+ NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
11
19
  NULLIFIER_TREE_HEIGHT,
12
- NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP,
13
20
  PUBLIC_DATA_TREE_HEIGHT,
14
21
  } from '@aztec/constants';
15
22
  import { makeTuple } from '@aztec/foundation/array';
23
+ import { BlockNumber } from '@aztec/foundation/branded-types';
16
24
  import { padArrayEnd } from '@aztec/foundation/collection';
17
- import { sha256ToField, sha256Trunc } from '@aztec/foundation/crypto';
18
- import { BLS12Point, Fr } from '@aztec/foundation/fields';
19
- import { type Tuple, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
20
- import { MembershipWitness, MerkleTreeCalculator, computeUnbalancedMerkleTreeRoot } from '@aztec/foundation/trees';
21
- import { getVKTreeRoot } from '@aztec/noir-protocol-circuits-types/vk-tree';
22
- import { protocolContractTreeRoot } from '@aztec/protocol-contracts';
25
+ import { Fr } from '@aztec/foundation/curves/bn254';
26
+ import { type Bufferable, assertLength, toFriendlyJSON } from '@aztec/foundation/serialize';
27
+ import { MembershipWitness } from '@aztec/foundation/trees';
28
+ import { getVkData } from '@aztec/noir-protocol-circuits-types/server/vks';
29
+ import { getVKIndex, getVKSiblingPath } from '@aztec/noir-protocol-circuits-types/vk-tree';
23
30
  import { computeFeePayerBalanceLeafSlot } from '@aztec/protocol-contracts/fee-juice';
24
- import { PublicDataHint } from '@aztec/stdlib/avm';
25
31
  import { Body } from '@aztec/stdlib/block';
26
- import type { MerkleTreeWriteOperations } from '@aztec/stdlib/interfaces/server';
32
+ import type { MerkleTreeWriteOperations, PublicInputsAndRecursiveProof } from '@aztec/stdlib/interfaces/server';
27
33
  import { ContractClassLogFields } from '@aztec/stdlib/logs';
28
- import type { ParityPublicInputs } from '@aztec/stdlib/parity';
34
+ import { Proof, ProofData, RecursiveProof } from '@aztec/stdlib/proofs';
29
35
  import {
30
- type BaseOrMergeRollupPublicInputs,
31
36
  BlockConstantData,
32
- type BlockRootOrBlockMergePublicInputs,
37
+ BlockRollupPublicInputs,
33
38
  PrivateBaseRollupHints,
34
- PrivateBaseStateDiffHints,
35
39
  PublicBaseRollupHints,
40
+ PublicChonkVerifierPrivateInputs,
41
+ TreeSnapshotDiffHints,
36
42
  } from '@aztec/stdlib/rollup';
37
43
  import {
38
44
  AppendOnlyTreeSnapshot,
39
45
  MerkleTreeId,
40
46
  NullifierLeafPreimage,
41
- PublicDataTreeLeaf,
42
47
  PublicDataTreeLeafPreimage,
43
48
  getTreeHeight,
44
49
  } from '@aztec/stdlib/trees';
45
50
  import {
46
51
  BlockHeader,
47
- ContentCommitment,
48
- type GlobalVariables,
52
+ GlobalVariables,
49
53
  PartialStateReference,
50
54
  type ProcessedTx,
51
55
  StateReference,
52
- TxEffect,
56
+ Tx,
53
57
  } from '@aztec/stdlib/tx';
58
+ import { VkData } from '@aztec/stdlib/vks';
54
59
  import { Attributes, type Span, runInSpan } from '@aztec/telemetry-client';
55
60
  import type { MerkleTreeReadOperations } from '@aztec/world-state';
56
61
 
@@ -70,74 +75,32 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
70
75
  async (
71
76
  span: Span,
72
77
  tx: ProcessedTx,
73
- globalVariables: GlobalVariables,
78
+ lastArchive: AppendOnlyTreeSnapshot,
74
79
  newL1ToL2MessageTreeSnapshot: AppendOnlyTreeSnapshot,
75
- db: MerkleTreeWriteOperations,
76
80
  startSpongeBlob: SpongeBlob,
81
+ proverId: Fr,
82
+ db: MerkleTreeWriteOperations,
77
83
  ) => {
78
84
  span.setAttribute(Attributes.TX_HASH, tx.hash.toString());
79
85
  // Get trees info before any changes hit
80
- const lastArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
81
86
  const start = new PartialStateReference(
82
87
  await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
83
88
  await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
84
89
  await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
85
90
  );
86
- // Get the subtree sibling paths for the circuit
87
- const noteHashSubtreeSiblingPathArray = await getSubtreeSiblingPath(
88
- MerkleTreeId.NOTE_HASH_TREE,
89
- NOTE_HASH_SUBTREE_HEIGHT,
90
- db,
91
- );
92
91
 
93
- const noteHashSubtreeSiblingPath = makeTuple(NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH, i =>
94
- i < noteHashSubtreeSiblingPathArray.length ? noteHashSubtreeSiblingPathArray[i] : Fr.ZERO,
92
+ // Get the note hash subtree root sibling path for insertion.
93
+ const noteHashSubtreeRootSiblingPath = assertLength(
94
+ await getSubtreeSiblingPath(MerkleTreeId.NOTE_HASH_TREE, NOTE_HASH_SUBTREE_HEIGHT, db),
95
+ NOTE_HASH_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
95
96
  );
96
97
 
97
- // Update the note hash trees with the new items being inserted to get the new roots
98
- // that will be used by the next iteration of the base rollup circuit, skipping the empty ones
99
- const noteHashes = padArrayEnd(tx.txEffect.noteHashes, Fr.ZERO, MAX_NOTE_HASHES_PER_TX);
100
- await db.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes);
98
+ const { nullifierInsertionResult, publicDataInsertionResult } = await insertSideEffects(tx, db);
101
99
 
102
- // Create data hint for reading fee payer initial balance in Fee Juice
103
- const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
104
- const feePayerFeeJuiceBalanceReadHint = await getPublicDataHint(db, leafSlot.toBigInt());
105
-
106
- // The read witnesses for a given TX should be generated before the writes of the same TX are applied.
107
- // All reads that refer to writes in the same tx are transient and can be simplified out.
108
- const txPublicDataUpdateRequestInfo = await processPublicDataUpdateRequests(tx, db);
109
-
110
- // Update the nullifier tree, capturing the low nullifier info for each individual operation
111
- const {
112
- lowLeavesWitnessData: nullifierWitnessLeaves,
113
- newSubtreeSiblingPath: nullifiersSubtreeSiblingPath,
114
- sortedNewLeaves: sortednullifiers,
115
- sortedNewLeavesIndexes,
116
- } = await db.batchInsert(
117
- MerkleTreeId.NULLIFIER_TREE,
118
- padArrayEnd(tx.txEffect.nullifiers, Fr.ZERO, MAX_NULLIFIERS_PER_TX).map(n => n.toBuffer()),
119
- NULLIFIER_SUBTREE_HEIGHT,
120
- );
121
-
122
- if (nullifierWitnessLeaves === undefined) {
123
- throw new Error(`Could not craft nullifier batch insertion proofs`);
124
- }
125
-
126
- // Extract witness objects from returned data
127
- const nullifierPredecessorMembershipWitnessesWithoutPadding: MembershipWitness<typeof NULLIFIER_TREE_HEIGHT>[] =
128
- nullifierWitnessLeaves.map(l =>
129
- MembershipWitness.fromBufferArray(l.index, assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT)),
130
- );
131
-
132
- const nullifierSubtreeSiblingPathArray = nullifiersSubtreeSiblingPath.toFields();
133
-
134
- const nullifierSubtreeSiblingPath = makeTuple(NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, i =>
135
- i < nullifierSubtreeSiblingPathArray.length ? nullifierSubtreeSiblingPathArray[i] : Fr.ZERO,
136
- );
137
-
138
- // Append new data to startSpongeBlob
139
- const inputSpongeBlob = startSpongeBlob.clone();
140
- await startSpongeBlob.absorb(tx.txEffect.toBlobFields());
100
+ const blockHash = await tx.data.constants.anchorBlockHeader.hash();
101
+ const anchorBlockArchiveSiblingPath = (
102
+ await getMembershipWitnessFor(blockHash, MerkleTreeId.ARCHIVE, ARCHIVE_HEIGHT, db)
103
+ ).siblingPath;
141
104
 
142
105
  const contractClassLogsFields = makeTuple(
143
106
  MAX_CONTRACT_CLASS_LOGS_PER_TX,
@@ -145,80 +108,84 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
145
108
  );
146
109
 
147
110
  if (tx.avmProvingRequest) {
148
- const blockHash = await tx.data.constants.historicalHeader.hash();
149
- const archiveRootMembershipWitness = await getMembershipWitnessFor(
150
- blockHash,
151
- MerkleTreeId.ARCHIVE,
152
- ARCHIVE_HEIGHT,
153
- db,
154
- );
155
-
156
111
  return PublicBaseRollupHints.from({
157
- startSpongeBlob: inputSpongeBlob,
112
+ startSpongeBlob,
158
113
  lastArchive,
159
- archiveRootMembershipWitness,
114
+ anchorBlockArchiveSiblingPath,
160
115
  contractClassLogsFields,
161
116
  });
162
117
  } else {
163
- if (
164
- txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses.length > 1 ||
165
- txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages.length > 1 ||
166
- txPublicDataUpdateRequestInfo.publicDataWritesSiblingPaths.length > 1
167
- ) {
118
+ if (tx.txEffect.publicDataWrites.length > 1) {
168
119
  throw new Error(`More than one public data write in a private only tx`);
169
120
  }
170
121
 
171
- const feeWriteLowLeafPreimage =
172
- txPublicDataUpdateRequestInfo.lowPublicDataWritesPreimages[0] || PublicDataTreeLeafPreimage.empty();
173
- const feeWriteLowLeafMembershipWitness =
174
- txPublicDataUpdateRequestInfo.lowPublicDataWritesMembershipWitnesses[0] ||
175
- MembershipWitness.empty<typeof PUBLIC_DATA_TREE_HEIGHT>(PUBLIC_DATA_TREE_HEIGHT);
176
- const feeWriteSiblingPath =
177
- txPublicDataUpdateRequestInfo.publicDataWritesSiblingPaths[0] ||
178
- makeTuple(PUBLIC_DATA_TREE_HEIGHT, () => Fr.ZERO);
179
-
180
- const stateDiffHints = PrivateBaseStateDiffHints.from({
181
- nullifierPredecessorPreimages: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
182
- i < nullifierWitnessLeaves.length
183
- ? (nullifierWitnessLeaves[i].leafPreimage as NullifierLeafPreimage)
184
- : NullifierLeafPreimage.empty(),
185
- ),
186
- nullifierPredecessorMembershipWitnesses: makeTuple(MAX_NULLIFIERS_PER_TX, i =>
187
- i < nullifierPredecessorMembershipWitnessesWithoutPadding.length
188
- ? nullifierPredecessorMembershipWitnessesWithoutPadding[i]
189
- : makeEmptyMembershipWitness(NULLIFIER_TREE_HEIGHT),
190
- ),
191
- sortedNullifiers: makeTuple(MAX_NULLIFIERS_PER_TX, i => Fr.fromBuffer(sortednullifiers[i])),
192
- sortedNullifierIndexes: makeTuple(MAX_NULLIFIERS_PER_TX, i => sortedNewLeavesIndexes[i]),
193
- noteHashSubtreeSiblingPath,
194
- nullifierSubtreeSiblingPath,
195
- feeWriteLowLeafPreimage,
196
- feeWriteLowLeafMembershipWitness,
197
- feeWriteSiblingPath,
198
- });
122
+ // Get hints for reading fee payer's balance in the public data tree.
123
+ const feePayerBalanceLeafWitnessData = publicDataInsertionResult.lowLeavesWitnessData[0];
124
+ const feePayerBalanceMembershipWitness = MembershipWitness.fromBufferArray<typeof PUBLIC_DATA_TREE_HEIGHT>(
125
+ feePayerBalanceLeafWitnessData.index,
126
+ assertLength(feePayerBalanceLeafWitnessData.siblingPath.toBufferArray(), PUBLIC_DATA_TREE_HEIGHT),
127
+ );
128
+ const feePayerBalanceLeafPreimage = feePayerBalanceLeafWitnessData.leafPreimage as PublicDataTreeLeafPreimage;
129
+ const leafSlot = await computeFeePayerBalanceLeafSlot(tx.data.feePayer);
130
+ if (!leafSlot.equals(feePayerBalanceLeafPreimage.leaf.slot)) {
131
+ throw new Error(`Cannot find the public data tree leaf for the fee payer's balance`);
132
+ }
199
133
 
200
- const blockHash = await tx.data.constants.historicalHeader.hash();
201
- const archiveRootMembershipWitness = await getMembershipWitnessFor(
202
- blockHash,
203
- MerkleTreeId.ARCHIVE,
204
- ARCHIVE_HEIGHT,
205
- db,
134
+ // Get hints for inserting the nullifiers.
135
+ const nullifierLowLeavesWitnessData = nullifierInsertionResult.lowLeavesWitnessData!;
136
+ const nullifierPredecessorPreimages = padArrayEnd(
137
+ nullifierLowLeavesWitnessData.map(l => l.leafPreimage as NullifierLeafPreimage),
138
+ NullifierLeafPreimage.empty(),
139
+ MAX_NULLIFIERS_PER_TX,
140
+ );
141
+ const nullifierPredecessorMembershipWitnesses = padArrayEnd(
142
+ nullifierLowLeavesWitnessData.map(l =>
143
+ MembershipWitness.fromBufferArray<typeof NULLIFIER_TREE_HEIGHT>(
144
+ l.index,
145
+ assertLength(l.siblingPath.toBufferArray(), NULLIFIER_TREE_HEIGHT),
146
+ ),
147
+ ),
148
+ makeEmptyMembershipWitness(NULLIFIER_TREE_HEIGHT),
149
+ MAX_NULLIFIERS_PER_TX,
150
+ );
151
+ const sortedNullifiers = assertLength(
152
+ nullifierInsertionResult.sortedNewLeaves.map(n => Fr.fromBuffer(n)),
153
+ MAX_NULLIFIERS_PER_TX,
206
154
  );
155
+ const sortedNullifierIndexes = assertLength(
156
+ nullifierInsertionResult.sortedNewLeavesIndexes,
157
+ MAX_NULLIFIERS_PER_TX,
158
+ );
159
+ const nullifierSubtreeRootSiblingPath = assertLength(
160
+ nullifierInsertionResult.newSubtreeSiblingPath.toFields(),
161
+ NULLIFIER_SUBTREE_ROOT_SIBLING_PATH_LENGTH,
162
+ );
163
+
164
+ const treeSnapshotDiffHints = TreeSnapshotDiffHints.from({
165
+ noteHashSubtreeRootSiblingPath,
166
+ nullifierPredecessorPreimages,
167
+ nullifierPredecessorMembershipWitnesses,
168
+ sortedNullifiers,
169
+ sortedNullifierIndexes,
170
+ nullifierSubtreeRootSiblingPath,
171
+ feePayerBalanceMembershipWitness,
172
+ });
207
173
 
208
174
  const constants = BlockConstantData.from({
209
175
  lastArchive,
210
- newL1ToL2: newL1ToL2MessageTreeSnapshot,
211
- vkTreeRoot: getVKTreeRoot(),
212
- protocolContractTreeRoot,
213
- globalVariables,
176
+ l1ToL2TreeSnapshot: newL1ToL2MessageTreeSnapshot,
177
+ vkTreeRoot: tx.data.constants.vkTreeRoot,
178
+ protocolContractsHash: tx.data.constants.protocolContractsHash,
179
+ globalVariables: tx.globalVariables,
180
+ proverId,
214
181
  });
215
182
 
216
183
  return PrivateBaseRollupHints.from({
217
184
  start,
218
- startSpongeBlob: inputSpongeBlob,
219
- stateDiffHints,
220
- feePayerFeeJuiceBalanceReadHint,
221
- archiveRootMembershipWitness,
185
+ startSpongeBlob,
186
+ treeSnapshotDiffHints,
187
+ feePayerBalanceLeafPreimage,
188
+ anchorBlockArchiveSiblingPath,
222
189
  contractClassLogsFields,
223
190
  constants,
224
191
  });
@@ -226,50 +193,82 @@ export const insertSideEffectsAndBuildBaseRollupHints = runInSpan(
226
193
  },
227
194
  );
228
195
 
229
- export async function getPublicDataHint(db: MerkleTreeWriteOperations, leafSlot: bigint) {
230
- const { index } = (await db.getPreviousValueIndex(MerkleTreeId.PUBLIC_DATA_TREE, leafSlot)) ?? {};
231
- if (index === undefined) {
232
- throw new Error(`Cannot find the previous value index for public data ${leafSlot}.`);
233
- }
234
-
235
- const siblingPath = await db.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, index);
236
- const membershipWitness = new MembershipWitness(PUBLIC_DATA_TREE_HEIGHT, index, siblingPath.toTuple());
196
+ export const insertSideEffects = runInSpan(
197
+ 'BlockBuilderHelpers',
198
+ 'buildBaseRollupHints',
199
+ async (span: Span, tx: ProcessedTx, db: MerkleTreeWriteOperations) => {
200
+ span.setAttribute(Attributes.TX_HASH, tx.hash.toString());
237
201
 
238
- const leafPreimage = (await db.getLeafPreimage(MerkleTreeId.PUBLIC_DATA_TREE, index)) as PublicDataTreeLeafPreimage;
239
- if (!leafPreimage) {
240
- throw new Error(`Cannot find the leaf preimage for public data tree at index ${index}.`);
241
- }
202
+ // Insert the note hashes. Padded with zeros to the max number of note hashes per tx.
203
+ const noteHashes = padArrayEnd(tx.txEffect.noteHashes, Fr.ZERO, MAX_NOTE_HASHES_PER_TX);
204
+ await db.appendLeaves(MerkleTreeId.NOTE_HASH_TREE, noteHashes);
242
205
 
243
- const exists = leafPreimage.leaf.slot.toBigInt() === leafSlot;
244
- const value = exists ? leafPreimage.leaf.value : Fr.ZERO;
206
+ // Insert the nullifiers. Padded with zeros to the max number of nullifiers per tx.
207
+ // Capturing the low nullifier info for each individual operation.
208
+ const nullifierInsertionResult = await db.batchInsert(
209
+ MerkleTreeId.NULLIFIER_TREE,
210
+ padArrayEnd(tx.txEffect.nullifiers, Fr.ZERO, MAX_NULLIFIERS_PER_TX).map(n => n.toBuffer()),
211
+ NULLIFIER_SUBTREE_HEIGHT,
212
+ );
213
+ if (nullifierInsertionResult.lowLeavesWitnessData === undefined) {
214
+ throw new Error(`Failed to batch insert nullifiers.`);
215
+ }
245
216
 
246
- return new PublicDataHint(new Fr(leafSlot), value, membershipWitness, leafPreimage);
247
- }
217
+ if (tx.txEffect.publicDataWrites.some(write => write.isEmpty())) {
218
+ throw new Error(`Empty public data write in tx: ${toFriendlyJSON(tx)}.`);
219
+ }
220
+ // Insert the public data writes sequentially. No need to pad them to the max array size.
221
+ // Capturing the low leaf info for each individual operation.
222
+ const publicDataInsertionResult = await db.sequentialInsert(
223
+ MerkleTreeId.PUBLIC_DATA_TREE,
224
+ tx.txEffect.publicDataWrites.map(write => write.toBuffer()),
225
+ );
248
226
 
249
- export const buildBlobHints = runInSpan(
250
- 'BlockBuilderHelpers',
251
- 'buildBlobHints',
252
- async (_span: Span, txEffects: TxEffect[]) => {
253
- const blobFields = txEffects.flatMap(tx => tx.toBlobFields());
254
- const blobs = await Blob.getBlobsPerBlock(blobFields);
255
- // TODO(#13430): The blobsHash is confusingly similar to blobCommitmentsHash, calculated from below blobCommitments:
256
- // - blobsHash := sha256([blobhash_0, ..., blobhash_m]) = a hash of all blob hashes in a block with m+1 blobs inserted into the header, exists so a user can cross check blobs.
257
- // - blobCommitmentsHash := sha256( ...sha256(sha256(C_0), C_1) ... C_n) = iteratively calculated hash of all blob commitments in an epoch with n+1 blobs (see calculateBlobCommitmentsHash()),
258
- // exists so we can validate injected commitments to the rollup circuits correspond to the correct real blobs.
259
- // We may be able to combine these values e.g. blobCommitmentsHash := sha256( ...sha256(sha256(blobshash_0), blobshash_1) ... blobshash_l) for an epoch with l+1 blocks.
260
- const blobCommitments = blobs.map(b => BLS12Point.decompress(b.commitment));
261
- const blobsHash = new Fr(getBlobsHashFromBlobs(blobs));
262
- return { blobFields, blobCommitments, blobs, blobsHash };
227
+ return {
228
+ nullifierInsertionResult,
229
+ publicDataInsertionResult,
230
+ };
263
231
  },
264
232
  );
265
233
 
234
+ export function getChonkProofFromTx(tx: Tx | ProcessedTx) {
235
+ const publicInputs = tx.data.publicInputs().toFields();
236
+
237
+ const binaryProof = new Proof(
238
+ Buffer.concat(tx.chonkProof.attachPublicInputs(publicInputs).fieldsWithPublicInputs.map(field => field.toBuffer())),
239
+ publicInputs.length,
240
+ );
241
+ return new RecursiveProof(tx.chonkProof.fields, binaryProof, true, CHONK_PROOF_LENGTH);
242
+ }
243
+
244
+ export function getPublicChonkVerifierPrivateInputsFromTx(tx: Tx | ProcessedTx, proverId: Fr) {
245
+ const proofData = new ProofData(
246
+ tx.data.toPrivateToPublicKernelCircuitPublicInputs(),
247
+ getChonkProofFromTx(tx),
248
+ getVkData('HidingKernelToPublic'),
249
+ );
250
+ return new PublicChonkVerifierPrivateInputs(proofData, proverId);
251
+ }
252
+
253
+ // Build "hints" as the private inputs for the checkpoint root rollup circuit.
254
+ // The `blobCommitments` will be accumulated and checked in the root rollup against the `finalBlobChallenges`.
255
+ // The `blobsHash` will be validated on L1 against the submitted blob data.
256
+ export const buildBlobHints = (blobFields: Fr[]) => {
257
+ const blobs = getBlobsPerL1Block(blobFields);
258
+ const blobCommitments = getBlobCommitmentsFromBlobs(blobs);
259
+ const blobsHash = computeBlobsHashFromBlobs(blobs);
260
+ return { blobCommitments, blobs, blobsHash };
261
+ };
262
+
263
+ export const buildFinalBlobChallenges = async (blobFieldsPerCheckpoint: Fr[][]) => {
264
+ return await BatchedBlobAccumulator.precomputeBatchedBlobChallenges(blobFieldsPerCheckpoint);
265
+ };
266
+
266
267
  export const accumulateBlobs = runInSpan(
267
268
  'BlockBuilderHelpers',
268
269
  'accumulateBlobs',
269
- async (_span: Span, txs: ProcessedTx[], startBlobAccumulator: BatchedBlobAccumulator) => {
270
- const blobFields = txs.flatMap(tx => tx.txEffect.toBlobFields());
271
- const blobs = await Blob.getBlobsPerBlock(blobFields);
272
- const endBlobAccumulator = startBlobAccumulator.accumulateBlobs(blobs);
270
+ async (_span: Span, blobFields: Fr[], startBlobAccumulator: BatchedBlobAccumulator) => {
271
+ const endBlobAccumulator = await startBlobAccumulator.accumulateFields(blobFields);
273
272
  return endBlobAccumulator;
274
273
  },
275
274
  );
@@ -277,36 +276,28 @@ export const accumulateBlobs = runInSpan(
277
276
  export const buildHeaderFromCircuitOutputs = runInSpan(
278
277
  'BlockBuilderHelpers',
279
278
  'buildHeaderFromCircuitOutputs',
280
- (
281
- _span,
282
- previousRollupData: BaseOrMergeRollupPublicInputs[],
283
- parityPublicInputs: ParityPublicInputs,
284
- rootRollupOutputs: BlockRootOrBlockMergePublicInputs,
285
- blobsHash: Fr,
286
- endState: StateReference,
287
- ) => {
288
- if (previousRollupData.length > 2) {
289
- throw new Error(`There can't be more than 2 previous rollups. Received ${previousRollupData.length}.`);
290
- }
291
-
292
- const outHash =
293
- previousRollupData.length === 0
294
- ? Fr.ZERO
295
- : previousRollupData.length === 1
296
- ? previousRollupData[0].outHash
297
- : sha256ToField([previousRollupData[0].outHash, previousRollupData[1].outHash]);
298
- const contentCommitment = new ContentCommitment(blobsHash, parityPublicInputs.shaRoot, outHash);
279
+ async (_span, blockRootRollupOutput: BlockRollupPublicInputs) => {
280
+ const constants = blockRootRollupOutput.constants;
281
+ const globalVariables = GlobalVariables.from({
282
+ chainId: constants.chainId,
283
+ version: constants.version,
284
+ blockNumber: BlockNumber(blockRootRollupOutput.previousArchive.nextAvailableLeafIndex),
285
+ timestamp: blockRootRollupOutput.timestamp,
286
+ slotNumber: constants.slotNumber,
287
+ coinbase: constants.coinbase,
288
+ feeRecipient: constants.feeRecipient,
289
+ gasFees: constants.gasFees,
290
+ });
299
291
 
300
- const accumulatedFees = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedFees), Fr.ZERO);
301
- const accumulatedManaUsed = previousRollupData.reduce((sum, d) => sum.add(d.accumulatedManaUsed), Fr.ZERO);
292
+ const spongeBlobHash = await blockRootRollupOutput.endSpongeBlob.clone().squeeze();
302
293
 
303
294
  return new BlockHeader(
304
- rootRollupOutputs.previousArchive,
305
- contentCommitment,
306
- endState,
307
- rootRollupOutputs.endGlobalVariables,
308
- accumulatedFees,
309
- accumulatedManaUsed,
295
+ blockRootRollupOutput.previousArchive,
296
+ blockRootRollupOutput.endState,
297
+ spongeBlobHash,
298
+ globalVariables,
299
+ blockRootRollupOutput.accumulatedFees,
300
+ blockRootRollupOutput.accumulatedManaUsed,
310
301
  );
311
302
  },
312
303
  );
@@ -317,95 +308,57 @@ export const buildHeaderAndBodyFromTxs = runInSpan(
317
308
  async (
318
309
  span,
319
310
  txs: ProcessedTx[],
311
+ lastArchive: AppendOnlyTreeSnapshot,
312
+ endState: StateReference,
320
313
  globalVariables: GlobalVariables,
321
- l1ToL2Messages: Fr[],
322
- db: MerkleTreeReadOperations,
314
+ startSpongeBlob: SpongeBlob,
315
+ isFirstBlock: boolean,
323
316
  ) => {
324
317
  span.setAttribute(Attributes.BLOCK_NUMBER, globalVariables.blockNumber);
325
- const stateReference = new StateReference(
326
- await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
327
- new PartialStateReference(
328
- await getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE, db),
329
- await getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE, db),
330
- await getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE, db),
331
- ),
332
- );
333
-
334
- const previousArchive = await getTreeSnapshot(MerkleTreeId.ARCHIVE, db);
335
318
 
336
319
  const txEffects = txs.map(tx => tx.txEffect);
337
320
  const body = new Body(txEffects);
338
321
 
339
- const txOutHashes = txEffects.map(tx => tx.txOutHash());
340
- const outHash = txOutHashes.length === 0 ? Fr.ZERO : new Fr(computeUnbalancedMerkleTreeRoot(txOutHashes));
322
+ const totalFees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
323
+ const totalManaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
341
324
 
342
- const parityShaRoot = await computeInHashFromL1ToL2Messages(l1ToL2Messages);
343
- const blobsHash = getBlobsHashFromBlobs(await Blob.getBlobsPerBlock(body.toBlobFields()));
325
+ const { l1ToL2MessageTree, partial } = endState;
344
326
 
345
- const contentCommitment = new ContentCommitment(blobsHash, parityShaRoot, outHash);
346
-
347
- const fees = txEffects.reduce((acc, tx) => acc.add(tx.transactionFee), Fr.ZERO);
348
- const manaUsed = txs.reduce((acc, tx) => acc.add(new Fr(tx.gasUsed.billedGas.l2Gas)), Fr.ZERO);
349
-
350
- const header = new BlockHeader(previousArchive, contentCommitment, stateReference, globalVariables, fees, manaUsed);
351
-
352
- return { header, body };
353
- },
354
- );
355
-
356
- /** Computes the inHash for a block's ContentCommitment given its l1 to l2 messages. */
357
- export async function computeInHashFromL1ToL2Messages(unpaddedL1ToL2Messages: Fr[]): Promise<Fr> {
358
- const l1ToL2Messages = padArrayEnd(unpaddedL1ToL2Messages, Fr.ZERO, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP);
359
- const hasher = (left: Buffer, right: Buffer) =>
360
- Promise.resolve(sha256Trunc(Buffer.concat([left, right])) as Buffer<ArrayBuffer>);
361
- const parityHeight = Math.ceil(Math.log2(NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP));
362
- const parityCalculator = await MerkleTreeCalculator.create(parityHeight, Fr.ZERO.toBuffer(), hasher);
363
- return new Fr(await parityCalculator.computeTreeRoot(l1ToL2Messages.map(msg => msg.toBuffer())));
364
- }
365
-
366
- export function getBlobsHashFromBlobs(inputs: Blob[]): Fr {
367
- return sha256ToField(inputs.map(b => b.getEthVersionedBlobHash()));
368
- }
369
-
370
- // Note: tested against the constant values in block_root/empty_block_root_rollup_inputs.nr, set by block_building_helpers.test.ts.
371
- // Having this separate fn hopefully makes it clear how we treat empty blocks and their blobs, and won't break if we decide to change how
372
- // getBlobsPerBlock() works on empty input.
373
- export async function getEmptyBlockBlobsHash(): Promise<Fr> {
374
- const blobHash = (await Blob.getBlobsPerBlock([])).map(b => b.getEthVersionedBlobHash());
375
- return sha256ToField(blobHash);
376
- }
327
+ const blockBlobFields = encodeBlockBlobData({
328
+ blockEndMarker: {
329
+ timestamp: globalVariables.timestamp,
330
+ blockNumber: globalVariables.blockNumber,
331
+ numTxs: txs.length,
332
+ },
333
+ blockEndStateField: {
334
+ l1ToL2MessageNextAvailableLeafIndex: l1ToL2MessageTree.nextAvailableLeafIndex,
335
+ noteHashNextAvailableLeafIndex: partial.noteHashTree.nextAvailableLeafIndex,
336
+ nullifierNextAvailableLeafIndex: partial.nullifierTree.nextAvailableLeafIndex,
337
+ publicDataNextAvailableLeafIndex: partial.publicDataTree.nextAvailableLeafIndex,
338
+ totalManaUsed: totalManaUsed.toBigInt(),
339
+ },
340
+ lastArchiveRoot: lastArchive.root,
341
+ noteHashRoot: partial.noteHashTree.root,
342
+ nullifierRoot: partial.nullifierTree.root,
343
+ publicDataRoot: partial.publicDataTree.root,
344
+ l1ToL2MessageRoot: isFirstBlock ? l1ToL2MessageTree.root : undefined,
345
+ txs: body.toTxBlobData(),
346
+ });
377
347
 
378
- // Validate that the roots of all local trees match the output of the root circuit simulation
379
- // TODO: does this get called?
380
- export async function validateBlockRootOutput(
381
- blockRootOutput: BlockRootOrBlockMergePublicInputs,
382
- blockHeader: BlockHeader,
383
- db: MerkleTreeReadOperations,
384
- ) {
385
- await Promise.all([
386
- validateState(blockHeader.state, db),
387
- validateSimulatedTree(await getTreeSnapshot(MerkleTreeId.ARCHIVE, db), blockRootOutput.newArchive, 'Archive'),
388
- ]);
389
- }
348
+ const endSpongeBlob = startSpongeBlob.clone();
349
+ await endSpongeBlob.absorb(blockBlobFields);
350
+ const spongeBlobHash = await endSpongeBlob.squeeze();
351
+
352
+ const header = BlockHeader.from({
353
+ lastArchive,
354
+ state: endState,
355
+ spongeBlobHash,
356
+ globalVariables,
357
+ totalFees,
358
+ totalManaUsed,
359
+ });
390
360
 
391
- export const validateState = runInSpan(
392
- 'BlockBuilderHelpers',
393
- 'validateState',
394
- async (_span, state: StateReference, db: MerkleTreeReadOperations) => {
395
- const promises = [MerkleTreeId.NOTE_HASH_TREE, MerkleTreeId.NULLIFIER_TREE, MerkleTreeId.PUBLIC_DATA_TREE].map(
396
- async (id: MerkleTreeId) => {
397
- return { key: id, value: await getTreeSnapshot(id, db) };
398
- },
399
- );
400
- const snapshots: Map<MerkleTreeId, AppendOnlyTreeSnapshot> = new Map(
401
- (await Promise.all(promises)).map(obj => [obj.key, obj.value]),
402
- );
403
- validatePartialState(state.partial, snapshots);
404
- validateSimulatedTree(
405
- await getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGE_TREE, db),
406
- state.l1ToL2MessageTree,
407
- 'L1ToL2MessageTree',
408
- );
361
+ return { header, body, blockBlobFields };
409
362
  },
410
363
  );
411
364
 
@@ -434,48 +387,6 @@ export function makeEmptyMembershipWitness<N extends number>(height: N) {
434
387
  );
435
388
  }
436
389
 
437
- const processPublicDataUpdateRequests = runInSpan(
438
- 'BlockBuilderHelpers',
439
- 'processPublicDataUpdateRequests',
440
- async (span, tx: ProcessedTx, db: MerkleTreeWriteOperations) => {
441
- span.setAttribute(Attributes.TX_HASH, tx.hash.toString());
442
- const allPublicDataWrites = tx.txEffect.publicDataWrites.map(
443
- ({ leafSlot, value }) => new PublicDataTreeLeaf(leafSlot, value),
444
- );
445
-
446
- const { lowLeavesWitnessData, insertionWitnessData } = await db.sequentialInsert(
447
- MerkleTreeId.PUBLIC_DATA_TREE,
448
- allPublicDataWrites.map(write => {
449
- if (write.isEmpty()) {
450
- throw new Error(`Empty public data write in tx: ${toFriendlyJSON(tx)}`);
451
- }
452
- return write.toBuffer();
453
- }),
454
- );
455
-
456
- const lowPublicDataWritesPreimages = lowLeavesWitnessData.map(
457
- lowLeafWitness => lowLeafWitness.leafPreimage as PublicDataTreeLeafPreimage,
458
- );
459
- const lowPublicDataWritesMembershipWitnesses = lowLeavesWitnessData.map(lowLeafWitness =>
460
- MembershipWitness.fromBufferArray<typeof PUBLIC_DATA_TREE_HEIGHT>(
461
- lowLeafWitness.index,
462
- assertLength(lowLeafWitness.siblingPath.toBufferArray(), PUBLIC_DATA_TREE_HEIGHT),
463
- ),
464
- );
465
- const publicDataWritesSiblingPaths = insertionWitnessData.map(w => {
466
- const insertionSiblingPath = w.siblingPath.toFields();
467
- assertLength(insertionSiblingPath, PUBLIC_DATA_TREE_HEIGHT);
468
- return insertionSiblingPath as Tuple<Fr, typeof PUBLIC_DATA_TREE_HEIGHT>;
469
- });
470
-
471
- return {
472
- lowPublicDataWritesPreimages,
473
- lowPublicDataWritesMembershipWitnesses,
474
- publicDataWritesSiblingPaths,
475
- };
476
- },
477
- );
478
-
479
390
  export async function getSubtreeSiblingPath(
480
391
  treeId: MerkleTreeId,
481
392
  subtreeHeight: number,
@@ -541,7 +452,7 @@ function validateSimulatedTree(
541
452
  }
542
453
 
543
454
  export function validateTx(tx: ProcessedTx) {
544
- const txHeader = tx.data.constants.historicalHeader;
455
+ const txHeader = tx.data.constants.anchorBlockHeader;
545
456
  if (txHeader.state.l1ToL2MessageTree.isEmpty()) {
546
457
  throw new Error(`Empty L1 to L2 messages tree in tx: ${toFriendlyJSON(tx)}`);
547
458
  }
@@ -555,3 +466,12 @@ export function validateTx(tx: ProcessedTx) {
555
466
  throw new Error(`Empty public data tree in tx: ${toFriendlyJSON(tx)}`);
556
467
  }
557
468
  }
469
+
470
+ export function toProofData<T extends Bufferable, PROOF_LENGTH extends number>(
471
+ { inputs, proof, verificationKey }: PublicInputsAndRecursiveProof<T, PROOF_LENGTH>,
472
+ vkIndex?: number,
473
+ ) {
474
+ const leafIndex = vkIndex || getVKIndex(verificationKey.keyAsFields);
475
+ const vkData = new VkData(verificationKey, leafIndex, getVKSiblingPath(leafIndex));
476
+ return new ProofData(inputs, proof, vkData);
477
+ }