@aztec/blob-lib 0.0.1-fake-c83136db25 → 0.0.1-fake-ceab37513c

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/dest/blob.d.ts +98 -52
  2. package/dest/blob.d.ts.map +1 -1
  3. package/dest/blob.js +167 -73
  4. package/dest/blob_batching.d.ts +48 -15
  5. package/dest/blob_batching.d.ts.map +1 -1
  6. package/dest/blob_batching.js +120 -81
  7. package/dest/blob_batching_public_inputs.d.ts +71 -0
  8. package/dest/blob_batching_public_inputs.d.ts.map +1 -0
  9. package/dest/blob_batching_public_inputs.js +168 -0
  10. package/dest/encoding.d.ts +62 -22
  11. package/dest/encoding.d.ts.map +1 -1
  12. package/dest/encoding.js +104 -114
  13. package/dest/index.d.ts +2 -5
  14. package/dest/index.d.ts.map +1 -1
  15. package/dest/index.js +15 -5
  16. package/dest/sponge_blob.d.ts +9 -13
  17. package/dest/sponge_blob.d.ts.map +1 -1
  18. package/dest/sponge_blob.js +17 -28
  19. package/dest/testing.d.ts +12 -7
  20. package/dest/testing.d.ts.map +1 -1
  21. package/dest/testing.js +41 -54
  22. package/dest/types.d.ts +0 -2
  23. package/dest/types.d.ts.map +1 -1
  24. package/dest/types.js +0 -2
  25. package/package.json +4 -5
  26. package/src/blob.ts +198 -76
  27. package/src/blob_batching.ts +137 -109
  28. package/src/blob_batching_public_inputs.ts +252 -0
  29. package/src/encoding.ts +120 -136
  30. package/src/index.ts +18 -5
  31. package/src/sponge_blob.ts +14 -24
  32. package/src/testing.ts +40 -55
  33. package/src/types.ts +2 -2
  34. package/dest/blob_utils.d.ts +0 -30
  35. package/dest/blob_utils.d.ts.map +0 -1
  36. package/dest/blob_utils.js +0 -60
  37. package/dest/circuit_types/blob_accumulator.d.ts +0 -21
  38. package/dest/circuit_types/blob_accumulator.d.ts.map +0 -1
  39. package/dest/circuit_types/blob_accumulator.js +0 -58
  40. package/dest/circuit_types/final_blob_accumulator.d.ts +0 -22
  41. package/dest/circuit_types/final_blob_accumulator.d.ts.map +0 -1
  42. package/dest/circuit_types/final_blob_accumulator.js +0 -63
  43. package/dest/circuit_types/final_blob_batching_challenges.d.ts +0 -15
  44. package/dest/circuit_types/final_blob_batching_challenges.d.ts.map +0 -1
  45. package/dest/circuit_types/final_blob_batching_challenges.js +0 -25
  46. package/dest/circuit_types/index.d.ts +0 -4
  47. package/dest/circuit_types/index.d.ts.map +0 -1
  48. package/dest/circuit_types/index.js +0 -4
  49. package/dest/deserialize.d.ts +0 -14
  50. package/dest/deserialize.d.ts.map +0 -1
  51. package/dest/deserialize.js +0 -33
  52. package/dest/hash.d.ts +0 -35
  53. package/dest/hash.d.ts.map +0 -1
  54. package/dest/hash.js +0 -69
  55. package/dest/kzg_context.d.ts +0 -4
  56. package/dest/kzg_context.d.ts.map +0 -1
  57. package/dest/kzg_context.js +0 -5
  58. package/src/blob_utils.ts +0 -71
  59. package/src/circuit_types/blob_accumulator.ts +0 -84
  60. package/src/circuit_types/final_blob_accumulator.ts +0 -75
  61. package/src/circuit_types/final_blob_batching_challenges.ts +0 -29
  62. package/src/circuit_types/index.ts +0 -4
  63. package/src/deserialize.ts +0 -38
  64. package/src/hash.ts +0 -77
  65. package/src/kzg_context.ts +0 -5
@@ -1,12 +1,14 @@
1
1
  import { AZTEC_MAX_EPOCH_DURATION, BLOBS_PER_BLOCK } from '@aztec/constants';
2
- import { poseidon2Hash, sha256ToField } from '@aztec/foundation/crypto';
3
- import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
2
+ import { poseidon2Hash, sha256, sha256ToField } from '@aztec/foundation/crypto';
3
+ import { BLS12Field, BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
4
+ import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize';
4
5
 
5
- import { Blob } from './blob.js';
6
- import { computeBlobFieldsHashFromBlobs } from './blob_utils.js';
7
- import { BlobAccumulator, FinalBlobAccumulator, FinalBlobBatchingChallenges } from './circuit_types/index.js';
8
- import { computeEthVersionedBlobHash, hashNoirBigNumLimbs } from './hash.js';
9
- import { kzg } from './kzg_context.js';
6
+ // Importing directly from 'c-kzg' does not work:
7
+ import cKzg from 'c-kzg';
8
+
9
+ import { Blob, VERSIONED_HASH_VERSION_KZG } from './blob.js';
10
+
11
+ const { computeKzgProof, verifyKzgProof } = cKzg;
10
12
 
11
13
  /**
12
14
  * A class to create, manage, and prove batched EVM blobs.
@@ -32,19 +34,17 @@ export class BatchedBlob {
32
34
  *
33
35
  * @returns A batched blob.
34
36
  */
35
- static async batch(blobs: Blob[][]): Promise<BatchedBlob> {
36
- if (blobs.length > AZTEC_MAX_EPOCH_DURATION) {
37
+ static async batch(blobs: Blob[]): Promise<BatchedBlob> {
38
+ const numBlobs = blobs.length;
39
+ if (numBlobs > BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION) {
37
40
  throw new Error(
38
- `Too many blocks sent to batch(). The maximum is ${AZTEC_MAX_EPOCH_DURATION}. Got ${blobs.length}.`,
41
+ `Too many blobs (${numBlobs}) sent to batch(). The maximum is ${BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION}.`,
39
42
  );
40
43
  }
41
-
42
44
  // Precalculate the values (z and gamma) and initialize the accumulator:
43
45
  let acc = await this.newAccumulator(blobs);
44
46
  // Now we can create a multi opening proof of all input blobs:
45
- for (const blockBlobs of blobs) {
46
- acc = await acc.accumulateBlobs(blockBlobs);
47
- }
47
+ acc = await acc.accumulateBlobs(blobs);
48
48
  return await acc.finalize();
49
49
  }
50
50
 
@@ -53,7 +53,7 @@ export class BatchedBlob {
53
53
  * @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
54
54
  * beforehand from ALL blobs.
55
55
  */
56
- static async newAccumulator(blobs: Blob[][]): Promise<BatchedBlobAccumulator> {
56
+ static async newAccumulator(blobs: Blob[]): Promise<BatchedBlobAccumulator> {
57
57
  const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobs);
58
58
  return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
59
59
  }
@@ -66,52 +66,54 @@ export class BatchedBlob {
66
66
  * - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
67
67
  * - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
68
68
  * - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }, for all blob evaluations y_i (see above) and commitments C_i.
69
- *
70
- * @param blobs - The blobs to precompute the challenges for. Each sub-array is the blobs for an L1 block.
71
69
  * @returns Challenges z and gamma.
72
70
  */
73
- static async precomputeBatchedBlobChallenges(blobs: Blob[][]): Promise<FinalBlobBatchingChallenges> {
74
- // Compute the final challenge z to evaluate the blobs.
75
- let z: Fr | undefined;
76
- for (const blockBlobs of blobs) {
77
- // Compute the hash of all the fields in the block.
78
- const blobFieldsHash = await computeBlobFieldsHashFromBlobs(blockBlobs);
79
- for (const blob of blockBlobs) {
80
- // Compute the challenge z for each blob and accumulate it.
81
- const challengeZ = await blob.computeChallengeZ(blobFieldsHash);
82
- if (!z) {
83
- z = challengeZ;
84
- } else {
85
- z = await poseidon2Hash([z, challengeZ]);
86
- }
87
- }
88
- }
89
- if (!z) {
90
- throw new Error('No blobs to precompute challenges for.');
71
+ static async precomputeBatchedBlobChallenges(blobs: Blob[]): Promise<FinalBlobBatchingChallenges> {
72
+ // We need to precompute the final challenge values to evaluate the blobs.
73
+ let z = blobs[0].challengeZ;
74
+ // We start at i = 1, because z is initialized as the first blob's challenge.
75
+ for (let i = 1; i < blobs.length; i++) {
76
+ z = await poseidon2Hash([z, blobs[i].challengeZ]);
91
77
  }
92
-
93
78
  // Now we have a shared challenge for all blobs, evaluate them...
94
- const allBlobs = blobs.flat();
95
- const proofObjects = allBlobs.map(b => b.evaluate(z));
96
- const evaluations = await Promise.all(proofObjects.map(({ y }) => hashNoirBigNumLimbs(y)));
79
+ const proofObjects = blobs.map(b => computeKzgProof(b.data, z.toBuffer()));
80
+ const evaluations = proofObjects.map(([_, evaluation]) => BLS12Fr.fromBuffer(Buffer.from(evaluation)));
97
81
  // ...and find the challenge for the linear combination of blobs.
98
- let gamma = evaluations[0];
82
+ let gamma = await hashNoirBigNumLimbs(evaluations[0]);
99
83
  // We start at i = 1, because gamma is initialized as the first blob's evaluation.
100
- for (let i = 1; i < allBlobs.length; i++) {
101
- gamma = await poseidon2Hash([gamma, evaluations[i]]);
84
+ for (let i = 1; i < blobs.length; i++) {
85
+ gamma = await poseidon2Hash([gamma, await hashNoirBigNumLimbs(evaluations[i])]);
102
86
  }
103
87
  gamma = await poseidon2Hash([gamma, z]);
104
88
 
105
89
  return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
106
90
  }
107
91
 
108
- verify() {
109
- return kzg.verifyKzgProof(this.commitment.compress(), this.z.toBuffer(), this.y.toBuffer(), this.q.compress());
92
+ static async precomputeEmptyBatchedBlobChallenges(): Promise<FinalBlobBatchingChallenges> {
93
+ const blobs = [await Blob.fromFields([])];
94
+ // We need to precompute the final challenge values to evaluate the blobs.
95
+ const z = blobs[0].challengeZ;
96
+ // Now we have a shared challenge for all blobs, evaluate them...
97
+ const proofObjects = blobs.map(b => computeKzgProof(b.data, z.toBuffer()));
98
+ const evaluations = proofObjects.map(([_, evaluation]) => BLS12Fr.fromBuffer(Buffer.from(evaluation)));
99
+ // ...and find the challenge for the linear combination of blobs.
100
+ let gamma = await hashNoirBigNumLimbs(evaluations[0]);
101
+ gamma = await poseidon2Hash([gamma, z]);
102
+
103
+ return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
110
104
  }
111
105
 
112
106
  // Returns ethereum's versioned blob hash, following kzg_to_versioned_hash: https://eips.ethereum.org/EIPS/eip-4844#helpers
113
107
  getEthVersionedBlobHash(): Buffer {
114
- return computeEthVersionedBlobHash(this.commitment.compress());
108
+ const hash = sha256(this.commitment.compress());
109
+ hash[0] = VERSIONED_HASH_VERSION_KZG;
110
+ return hash;
111
+ }
112
+
113
+ static getEthVersionedBlobHash(commitment: Buffer): Buffer {
114
+ const hash = sha256(commitment);
115
+ hash[0] = VERSIONED_HASH_VERSION_KZG;
116
+ return hash;
115
117
  }
116
118
 
117
119
  /**
@@ -135,14 +137,49 @@ export class BatchedBlob {
135
137
  ]);
136
138
  return `0x${buf.toString('hex')}`;
137
139
  }
140
+ }
141
+
142
+ /**
143
+ * Final values z and gamma are injected into each block root circuit. We ensure they are correct by:
144
+ * - Checking equality in each block merge circuit and propagating up
145
+ * - Checking final z_acc == z in root circuit
146
+ * - Checking final gamma_acc == gamma in root circuit
147
+ *
148
+ * - z = H(...H(H(z_0, z_1) z_2)..z_n)
149
+ * - where z_i = H(H(fields of blob_i), C_i),
150
+ * - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
151
+ * - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
152
+ * - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }
153
+ * for all blob evaluations y_i (see above) and commitments C_i.
154
+ *
155
+ * Iteratively calculated by BlobAccumulatorPublicInputs.accumulate() in nr. See also precomputeBatchedBlobChallenges() above.
156
+ */
157
+ export class FinalBlobBatchingChallenges {
158
+ constructor(
159
+ public readonly z: Fr,
160
+ public readonly gamma: BLS12Fr,
161
+ ) {}
162
+
163
+ equals(other: FinalBlobBatchingChallenges) {
164
+ return this.z.equals(other.z) && this.gamma.equals(other.gamma);
165
+ }
166
+
167
+ static empty(): FinalBlobBatchingChallenges {
168
+ return new FinalBlobBatchingChallenges(Fr.ZERO, BLS12Fr.ZERO);
169
+ }
170
+
171
+ static fromBuffer(buffer: Buffer | BufferReader): FinalBlobBatchingChallenges {
172
+ const reader = BufferReader.asReader(buffer);
173
+ return new FinalBlobBatchingChallenges(Fr.fromBuffer(reader), reader.readObject(BLS12Fr));
174
+ }
138
175
 
139
- toFinalBlobAccumulator() {
140
- return new FinalBlobAccumulator(this.blobCommitmentsHash, this.z, this.y, this.commitment);
176
+ toBuffer() {
177
+ return serializeToBuffer(this.z, this.gamma);
141
178
  }
142
179
  }
143
180
 
144
181
  /**
145
- * See noir-projects/noir-protocol-circuits/crates/blob/src/abis/blob_accumulator.nr
182
+ * See noir-projects/noir-protocol-circuits/crates/blob/src/blob_batching_public_inputs.nr -> BlobAccumulatorPublicInputs
146
183
  */
147
184
  export class BatchedBlobAccumulator {
148
185
  constructor(
@@ -168,6 +205,39 @@ export class BatchedBlobAccumulator {
168
205
  public readonly finalBlobChallenges: FinalBlobBatchingChallenges,
169
206
  ) {}
170
207
 
208
+ /**
209
+ * Init the first accumulation state of the epoch.
210
+ * We assume the input blob has not been evaluated at z.
211
+ *
212
+ * First state of the accumulator:
213
+ * - v_acc := sha256(C_0)
214
+ * - z_acc := z_0
215
+ * - y_acc := gamma^0 * y_0 = y_0
216
+ * - c_acc := gamma^0 * c_0 = c_0
217
+ * - gamma_acc := poseidon2(y_0.limbs)
218
+ * - gamma^(i + 1) = gamma^1 = gamma // denoted gamma_pow_acc
219
+ *
220
+ * @returns An initial blob accumulator.
221
+ */
222
+ static async initialize(
223
+ blob: Blob,
224
+ finalBlobChallenges: FinalBlobBatchingChallenges,
225
+ ): Promise<BatchedBlobAccumulator> {
226
+ const [q, evaluation] = computeKzgProof(blob.data, finalBlobChallenges.z.toBuffer());
227
+ const firstY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
228
+ // Here, i = 0, so:
229
+ return new BatchedBlobAccumulator(
230
+ sha256ToField([blob.commitment]), // blobCommitmentsHashAcc = sha256(C_0)
231
+ blob.challengeZ, // zAcc = z_0
232
+ firstY, // yAcc = gamma^0 * y_0 = 1 * y_0
233
+ BLS12Point.decompress(blob.commitment), // cAcc = gamma^0 * C_0 = 1 * C_0
234
+ BLS12Point.decompress(Buffer.from(q)), // qAcc = gamma^0 * Q_0 = 1 * Q_0
235
+ await hashNoirBigNumLimbs(firstY), // gammaAcc = poseidon2(y_0.limbs)
236
+ finalBlobChallenges.gamma, // gammaPow = gamma^(i + 1) = gamma^1 = gamma
237
+ finalBlobChallenges,
238
+ );
239
+ }
240
+
171
241
  /**
172
242
  * Create the empty accumulation state of the epoch.
173
243
  * @returns An empty blob accumulator with challenges.
@@ -190,40 +260,20 @@ export class BatchedBlobAccumulator {
190
260
  * We assume the input blob has not been evaluated at z.
191
261
  * @returns An updated blob accumulator.
192
262
  */
193
- private async accumulate(blob: Blob, blobFieldsHash: Fr) {
194
- const { proof, y: thisY } = blob.evaluate(this.finalBlobChallenges.z);
195
- const thisC = BLS12Point.decompress(blob.commitment);
196
- const thisQ = BLS12Point.decompress(proof);
197
- const blobChallengeZ = await blob.computeChallengeZ(blobFieldsHash);
198
-
263
+ async accumulate(blob: Blob) {
199
264
  if (this.isEmptyState()) {
200
- /**
201
- * Init the first accumulation state of the epoch.
202
- * - v_acc := sha256(C_0)
203
- * - z_acc := z_0
204
- * - y_acc := gamma^0 * y_0 = y_0
205
- * - c_acc := gamma^0 * c_0 = c_0
206
- * - gamma_acc := poseidon2(y_0.limbs)
207
- * - gamma^(i + 1) = gamma^1 = gamma // denoted gamma_pow_acc
208
- */
209
- return new BatchedBlobAccumulator(
210
- sha256ToField([blob.commitment]), // blobCommitmentsHashAcc = sha256(C_0)
211
- blobChallengeZ, // zAcc = z_0
212
- thisY, // yAcc = gamma^0 * y_0 = 1 * y_0
213
- thisC, // cAcc = gamma^0 * C_0 = 1 * C_0
214
- thisQ, // qAcc = gamma^0 * Q_0 = 1 * Q_0
215
- await hashNoirBigNumLimbs(thisY), // gammaAcc = poseidon2(y_0.limbs)
216
- this.finalBlobChallenges.gamma, // gammaPow = gamma^(i + 1) = gamma^1 = gamma
217
- this.finalBlobChallenges,
218
- );
265
+ return BatchedBlobAccumulator.initialize(blob, this.finalBlobChallenges);
219
266
  } else {
267
+ const [q, evaluation] = computeKzgProof(blob.data, this.finalBlobChallenges.z.toBuffer());
268
+ const thisY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
269
+
220
270
  // Moving from i - 1 to i, so:
221
271
  return new BatchedBlobAccumulator(
222
272
  sha256ToField([this.blobCommitmentsHashAcc, blob.commitment]), // blobCommitmentsHashAcc := sha256(blobCommitmentsHashAcc, C_i)
223
- await poseidon2Hash([this.zAcc, blobChallengeZ]), // zAcc := poseidon2(zAcc, z_i)
273
+ await poseidon2Hash([this.zAcc, blob.challengeZ]), // zAcc := poseidon2(zAcc, z_i)
224
274
  this.yAcc.add(thisY.mul(this.gammaPow)), // yAcc := yAcc + (gamma^i * y_i)
225
- this.cAcc.add(thisC.mul(this.gammaPow)), // cAcc := cAcc + (gamma^i * C_i)
226
- this.qAcc.add(thisQ.mul(this.gammaPow)), // qAcc := qAcc + (gamma^i * C_i)
275
+ this.cAcc.add(BLS12Point.decompress(blob.commitment).mul(this.gammaPow)), // cAcc := cAcc + (gamma^i * C_i)
276
+ this.qAcc.add(BLS12Point.decompress(Buffer.from(q)).mul(this.gammaPow)), // qAcc := qAcc + (gamma^i * C_i)
227
277
  await poseidon2Hash([this.gammaAcc, await hashNoirBigNumLimbs(thisY)]), // gammaAcc := poseidon2(gammaAcc, poseidon2(y_i.limbs))
228
278
  this.gammaPow.mul(this.finalBlobChallenges.gamma), // gammaPow = gamma^(i + 1) = gamma^i * final_gamma
229
279
  this.finalBlobChallenges,
@@ -234,23 +284,13 @@ export class BatchedBlobAccumulator {
234
284
  /**
235
285
  * Given blobs, accumulate all state.
236
286
  * We assume the input blobs have not been evaluated at z.
237
- * @param blobs - The blobs to accumulate. They should be in the same L1 block.
238
287
  * @returns An updated blob accumulator.
239
288
  */
240
289
  async accumulateBlobs(blobs: Blob[]) {
241
- if (blobs.length > BLOBS_PER_BLOCK) {
242
- throw new Error(
243
- `Too many blobs to accumulate. The maximum is ${BLOBS_PER_BLOCK} per block. Got ${blobs.length}.`,
244
- );
245
- }
246
-
247
- // Compute the hash of all the fields in the block.
248
- const blobFieldsHash = await computeBlobFieldsHashFromBlobs(blobs);
249
-
250
290
  // Initialize the acc to iterate over:
251
291
  let acc: BatchedBlobAccumulator = this.clone();
252
- for (const blob of blobs) {
253
- acc = await acc.accumulate(blob, blobFieldsHash);
292
+ for (let i = 0; i < blobs.length; i++) {
293
+ acc = await acc.accumulate(blobs[i]);
254
294
  }
255
295
  return acc;
256
296
  }
@@ -266,10 +306,9 @@ export class BatchedBlobAccumulator {
266
306
  * - c := c_acc (final commitment to be checked on L1)
267
307
  * - gamma := poseidon2(gamma_acc, z) (challenge for linear combination of y and C, above)
268
308
  *
269
- * @param verifyProof - Whether to verify the KZG proof.
270
309
  * @returns A batched blob.
271
310
  */
272
- async finalize(verifyProof = false): Promise<BatchedBlob> {
311
+ async finalize(): Promise<BatchedBlob> {
273
312
  // All values in acc are final, apart from gamma := poseidon2(gammaAcc, z):
274
313
  const calculatedGamma = await poseidon2Hash([this.gammaAcc, this.zAcc]);
275
314
  // Check final values:
@@ -283,14 +322,11 @@ export class BatchedBlobAccumulator {
283
322
  `Blob batching mismatch: accumulated gamma ${calculatedGamma} does not equal injected gamma ${this.finalBlobChallenges.gamma.toBN254Fr()}`,
284
323
  );
285
324
  }
286
-
287
- const batchedBlob = new BatchedBlob(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.qAcc);
288
-
289
- if (verifyProof && !batchedBlob.verify()) {
325
+ if (!verifyKzgProof(this.cAcc.compress(), this.zAcc.toBuffer(), this.yAcc.toBuffer(), this.qAcc.compress())) {
290
326
  throw new Error(`KZG proof did not verify.`);
291
327
  }
292
328
 
293
- return batchedBlob;
329
+ return new BatchedBlob(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.qAcc);
294
330
  }
295
331
 
296
332
  isEmptyState() {
@@ -317,19 +353,11 @@ export class BatchedBlobAccumulator {
317
353
  FinalBlobBatchingChallenges.fromBuffer(this.finalBlobChallenges.toBuffer()),
318
354
  );
319
355
  }
356
+ }
320
357
 
321
- toBlobAccumulator() {
322
- return new BlobAccumulator(
323
- this.blobCommitmentsHashAcc,
324
- this.zAcc,
325
- this.yAcc,
326
- this.cAcc,
327
- this.gammaAcc,
328
- this.gammaPow,
329
- );
330
- }
331
-
332
- toFinalBlobAccumulator() {
333
- return new FinalBlobAccumulator(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc);
334
- }
358
+ // To mimic the hash accumulation in the rollup circuits, here we hash
359
+ // each u128 limb of the noir bignum struct representing the BLS field.
360
+ async function hashNoirBigNumLimbs(field: BLS12Field): Promise<Fr> {
361
+ const num = field.toNoirBigNum();
362
+ return await poseidon2Hash(num.limbs.map(Fr.fromHexString));
335
363
  }
@@ -0,0 +1,252 @@
1
+ import { BLS12_FQ_LIMBS, BLS12_FR_LIMBS } from '@aztec/constants';
2
+ import { BLS12Fq, BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
3
+ import { BufferReader, FieldReader, serializeToBuffer } from '@aztec/foundation/serialize';
4
+
5
+ import { inspect } from 'util';
6
+
7
+ import { Blob } from './blob.js';
8
+ import { BatchedBlob, BatchedBlobAccumulator, FinalBlobBatchingChallenges } from './blob_batching.js';
9
+
10
+ /**
11
+ * See nr BlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
12
+ */
13
+ export class BlobAccumulatorPublicInputs {
14
+ constructor(
15
+ public blobCommitmentsHashAcc: Fr,
16
+ public zAcc: Fr,
17
+ public yAcc: BLS12Fr,
18
+ public cAcc: BLS12Point,
19
+ public gammaAcc: Fr,
20
+ public gammaPowAcc: BLS12Fr,
21
+ ) {}
22
+
23
+ static empty(): BlobAccumulatorPublicInputs {
24
+ return new BlobAccumulatorPublicInputs(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO, Fr.ZERO, BLS12Fr.ZERO);
25
+ }
26
+
27
+ equals(other: BlobAccumulatorPublicInputs) {
28
+ return (
29
+ this.blobCommitmentsHashAcc.equals(other.blobCommitmentsHashAcc) &&
30
+ this.zAcc.equals(other.zAcc) &&
31
+ this.yAcc.equals(other.yAcc) &&
32
+ this.cAcc.equals(other.cAcc) &&
33
+ this.gammaAcc.equals(other.gammaAcc) &&
34
+ this.gammaPowAcc.equals(other.gammaPowAcc)
35
+ );
36
+ }
37
+
38
+ static fromBuffer(buffer: Buffer | BufferReader): BlobAccumulatorPublicInputs {
39
+ const reader = BufferReader.asReader(buffer);
40
+ return new BlobAccumulatorPublicInputs(
41
+ Fr.fromBuffer(reader),
42
+ Fr.fromBuffer(reader),
43
+ BLS12Fr.fromBuffer(reader),
44
+ BLS12Point.fromBuffer(reader),
45
+ Fr.fromBuffer(reader),
46
+ BLS12Fr.fromBuffer(reader),
47
+ );
48
+ }
49
+
50
+ toBuffer() {
51
+ return serializeToBuffer(
52
+ this.blobCommitmentsHashAcc,
53
+ this.zAcc,
54
+ this.yAcc,
55
+ this.cAcc,
56
+ this.gammaAcc,
57
+ this.gammaPowAcc,
58
+ );
59
+ }
60
+
61
+ /**
62
+ * Given blobs, accumulate all public inputs state.
63
+ * We assume the input blobs have not been evaluated at z.
64
+ * NOTE: Does NOT accumulate non circuit values including Q. This exists to simulate/check exactly what the circuit is doing
65
+ * and is unsafe for other use. For that reason, a toBatchedBlobAccumulator does not exist. See evaluateBlobs() oracle for usage.
66
+ * @returns An updated blob accumulator.
67
+ */
68
+ async accumulateBlobs(blobs: Blob[], finalBlobChallenges: FinalBlobBatchingChallenges) {
69
+ let acc = new BatchedBlobAccumulator(
70
+ this.blobCommitmentsHashAcc,
71
+ this.zAcc,
72
+ this.yAcc,
73
+ this.cAcc,
74
+ BLS12Point.ZERO,
75
+ this.gammaAcc,
76
+ this.gammaPowAcc,
77
+ finalBlobChallenges,
78
+ );
79
+ acc = await acc.accumulateBlobs(blobs);
80
+ return new BlobAccumulatorPublicInputs(
81
+ acc.blobCommitmentsHashAcc,
82
+ acc.zAcc,
83
+ acc.yAcc,
84
+ acc.cAcc,
85
+ acc.gammaAcc,
86
+ acc.gammaPow,
87
+ );
88
+ }
89
+
90
+ toFields() {
91
+ return [
92
+ this.blobCommitmentsHashAcc,
93
+ this.zAcc,
94
+ ...this.yAcc.toNoirBigNum().limbs.map(Fr.fromString),
95
+ ...this.cAcc.x.toNoirBigNum().limbs.map(Fr.fromString),
96
+ ...this.cAcc.y.toNoirBigNum().limbs.map(Fr.fromString),
97
+ new Fr(this.cAcc.isInfinite),
98
+ this.gammaAcc,
99
+ ...this.gammaPowAcc.toNoirBigNum().limbs.map(Fr.fromString),
100
+ ];
101
+ }
102
+
103
+ static fromFields(fields: Fr[] | FieldReader): BlobAccumulatorPublicInputs {
104
+ const reader = FieldReader.asReader(fields);
105
+ return new BlobAccumulatorPublicInputs(
106
+ reader.readField(),
107
+ reader.readField(),
108
+ BLS12Fr.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FR_LIMBS).map(f => f.toString()) }),
109
+ new BLS12Point(
110
+ BLS12Fq.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map(f => f.toString()) }),
111
+ BLS12Fq.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map(f => f.toString()) }),
112
+ reader.readBoolean(),
113
+ ),
114
+ reader.readField(),
115
+ BLS12Fr.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FR_LIMBS).map(f => f.toString()) }),
116
+ );
117
+ }
118
+
119
+ /**
120
+ * Converts from an accumulator to a struct for the public inputs of our rollup circuits.
121
+ * @returns A BlobAccumulatorPublicInputs instance.
122
+ */
123
+ static fromBatchedBlobAccumulator(accumulator: BatchedBlobAccumulator) {
124
+ return new BlobAccumulatorPublicInputs(
125
+ accumulator.blobCommitmentsHashAcc,
126
+ accumulator.zAcc,
127
+ accumulator.yAcc,
128
+ accumulator.cAcc,
129
+ accumulator.gammaAcc,
130
+ accumulator.gammaPow,
131
+ );
132
+ }
133
+ }
134
+
135
+ /**
136
+ * See nr FinalBlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
137
+ */
138
+ export class FinalBlobAccumulatorPublicInputs {
139
+ constructor(
140
+ public blobCommitmentsHash: Fr,
141
+ public z: Fr,
142
+ public y: BLS12Fr,
143
+ public c: BLS12Point,
144
+ ) {}
145
+
146
+ static empty(): FinalBlobAccumulatorPublicInputs {
147
+ return new FinalBlobAccumulatorPublicInputs(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO);
148
+ }
149
+
150
+ static fromBuffer(buffer: Buffer | BufferReader): FinalBlobAccumulatorPublicInputs {
151
+ const reader = BufferReader.asReader(buffer);
152
+ return new FinalBlobAccumulatorPublicInputs(
153
+ Fr.fromBuffer(reader),
154
+ Fr.fromBuffer(reader),
155
+ BLS12Fr.fromBuffer(reader),
156
+ BLS12Point.fromBuffer(reader),
157
+ );
158
+ }
159
+
160
+ toBuffer() {
161
+ return serializeToBuffer(this.blobCommitmentsHash, this.z, this.y, this.c);
162
+ }
163
+
164
+ static fromBatchedBlob(blob: BatchedBlob) {
165
+ return new FinalBlobAccumulatorPublicInputs(blob.blobCommitmentsHash, blob.z, blob.y, blob.commitment);
166
+ }
167
+
168
+ toFields() {
169
+ return [
170
+ this.blobCommitmentsHash,
171
+ this.z,
172
+ ...this.y.toNoirBigNum().limbs.map(Fr.fromString),
173
+ ...this.c.toBN254Fields(),
174
+ ];
175
+ }
176
+
177
+ // The below is used to send to L1 for proof verification
178
+ toString() {
179
+ // We prepend 32 bytes for the (unused) 'blobHash' slot. This is not read or required by getEpochProofPublicInputs() on L1, but
180
+ // is expected since we usually pass the full precompile inputs via verifyEpochRootProof() to getEpochProofPublicInputs() to ensure
181
+ // we use calldata rather than a slice in memory:
182
+ const buf = Buffer.concat([Buffer.alloc(32), this.z.toBuffer(), this.y.toBuffer(), this.c.compress()]);
183
+ return buf.toString('hex');
184
+ }
185
+
186
+ equals(other: FinalBlobAccumulatorPublicInputs) {
187
+ return (
188
+ this.blobCommitmentsHash.equals(other.blobCommitmentsHash) &&
189
+ this.z.equals(other.z) &&
190
+ this.y.equals(other.y) &&
191
+ this.c.equals(other.c)
192
+ );
193
+ }
194
+
195
+ // Creates a random instance. Used for testing only - will not prove/verify.
196
+ static random() {
197
+ return new FinalBlobAccumulatorPublicInputs(Fr.random(), Fr.random(), BLS12Fr.random(), BLS12Point.random());
198
+ }
199
+
200
+ // Warning: MUST be final accumulator state.
201
+ static fromBatchedBlobAccumulator(accumulator: BatchedBlobAccumulator) {
202
+ return new FinalBlobAccumulatorPublicInputs(
203
+ accumulator.blobCommitmentsHashAcc,
204
+ accumulator.zAcc,
205
+ accumulator.yAcc,
206
+ accumulator.cAcc,
207
+ );
208
+ }
209
+
210
+ [inspect.custom]() {
211
+ return `FinalBlobAccumulatorPublicInputs {
212
+ blobCommitmentsHash: ${inspect(this.blobCommitmentsHash)},
213
+ z: ${inspect(this.z)},
214
+ y: ${inspect(this.y)},
215
+ c: ${inspect(this.c)},
216
+ }`;
217
+ }
218
+ }
219
+
220
+ /**
221
+ * startBlobAccumulator: Accumulated opening proofs for all blobs before this block range.
222
+ * endBlobAccumulator: Accumulated opening proofs for all blobs after adding this block range.
223
+ * finalBlobChallenges: Final values z and gamma, shared across the epoch.
224
+ */
225
+ export class BlockBlobPublicInputs {
226
+ constructor(
227
+ public startBlobAccumulator: BlobAccumulatorPublicInputs,
228
+ public endBlobAccumulator: BlobAccumulatorPublicInputs,
229
+ public finalBlobChallenges: FinalBlobBatchingChallenges,
230
+ ) {}
231
+
232
+ static empty(): BlockBlobPublicInputs {
233
+ return new BlockBlobPublicInputs(
234
+ BlobAccumulatorPublicInputs.empty(),
235
+ BlobAccumulatorPublicInputs.empty(),
236
+ FinalBlobBatchingChallenges.empty(),
237
+ );
238
+ }
239
+
240
+ static fromBuffer(buffer: Buffer | BufferReader): BlockBlobPublicInputs {
241
+ const reader = BufferReader.asReader(buffer);
242
+ return new BlockBlobPublicInputs(
243
+ reader.readObject(BlobAccumulatorPublicInputs),
244
+ reader.readObject(BlobAccumulatorPublicInputs),
245
+ reader.readObject(FinalBlobBatchingChallenges),
246
+ );
247
+ }
248
+
249
+ toBuffer() {
250
+ return serializeToBuffer(this.startBlobAccumulator, this.endBlobAccumulator, this.finalBlobChallenges);
251
+ }
252
+ }