@aztec/blob-lib 4.0.0-nightly.20250907 → 4.0.0-nightly.20260107
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/batched_blob.d.ts +26 -0
- package/dest/batched_blob.d.ts.map +1 -0
- package/dest/batched_blob.js +20 -0
- package/dest/blob.d.ts +50 -99
- package/dest/blob.d.ts.map +1 -1
- package/dest/blob.js +78 -169
- package/dest/blob_batching.d.ts +41 -123
- package/dest/blob_batching.d.ts.map +1 -1
- package/dest/blob_batching.js +129 -203
- package/dest/blob_utils.d.ts +40 -0
- package/dest/blob_utils.d.ts.map +1 -0
- package/dest/blob_utils.js +69 -0
- package/dest/circuit_types/blob_accumulator.d.ts +23 -0
- package/dest/circuit_types/blob_accumulator.d.ts.map +1 -0
- package/dest/circuit_types/blob_accumulator.js +62 -0
- package/dest/circuit_types/final_blob_accumulator.d.ts +23 -0
- package/dest/circuit_types/final_blob_accumulator.d.ts.map +1 -0
- package/dest/circuit_types/final_blob_accumulator.js +66 -0
- package/dest/circuit_types/final_blob_batching_challenges.d.ts +16 -0
- package/dest/circuit_types/final_blob_batching_challenges.d.ts.map +1 -0
- package/dest/circuit_types/final_blob_batching_challenges.js +26 -0
- package/dest/circuit_types/index.d.ts +4 -0
- package/dest/circuit_types/index.d.ts.map +1 -0
- package/dest/circuit_types/index.js +4 -0
- package/dest/encoding/block_blob_data.d.ts +22 -0
- package/dest/encoding/block_blob_data.d.ts.map +1 -0
- package/dest/encoding/block_blob_data.js +65 -0
- package/dest/encoding/block_end_marker.d.ts +11 -0
- package/dest/encoding/block_end_marker.d.ts.map +1 -0
- package/dest/encoding/block_end_marker.js +41 -0
- package/dest/encoding/block_end_state_field.d.ts +12 -0
- package/dest/encoding/block_end_state_field.d.ts.map +1 -0
- package/dest/encoding/block_end_state_field.js +39 -0
- package/dest/encoding/checkpoint_blob_data.d.ts +15 -0
- package/dest/encoding/checkpoint_blob_data.d.ts.map +1 -0
- package/dest/encoding/checkpoint_blob_data.js +67 -0
- package/dest/encoding/checkpoint_end_marker.d.ts +8 -0
- package/dest/encoding/checkpoint_end_marker.d.ts.map +1 -0
- package/dest/encoding/checkpoint_end_marker.js +28 -0
- package/dest/encoding/fixtures.d.ts +41 -0
- package/dest/encoding/fixtures.d.ts.map +1 -0
- package/dest/encoding/fixtures.js +140 -0
- package/dest/encoding/index.d.ts +10 -0
- package/dest/encoding/index.d.ts.map +1 -0
- package/dest/encoding/index.js +9 -0
- package/dest/encoding/tx_blob_data.d.ts +19 -0
- package/dest/encoding/tx_blob_data.d.ts.map +1 -0
- package/dest/encoding/tx_blob_data.js +79 -0
- package/dest/encoding/tx_start_marker.d.ts +16 -0
- package/dest/encoding/tx_start_marker.d.ts.map +1 -0
- package/dest/encoding/tx_start_marker.js +77 -0
- package/dest/errors.d.ts +1 -1
- package/dest/errors.d.ts.map +1 -1
- package/dest/hash.d.ts +43 -0
- package/dest/hash.d.ts.map +1 -0
- package/dest/hash.js +80 -0
- package/dest/index.d.ts +7 -4
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +6 -16
- package/dest/interface.d.ts +1 -2
- package/dest/interface.d.ts.map +1 -1
- package/dest/kzg_context.d.ts +8 -0
- package/dest/kzg_context.d.ts.map +1 -0
- package/dest/kzg_context.js +14 -0
- package/dest/sponge_blob.d.ts +12 -14
- package/dest/sponge_blob.d.ts.map +1 -1
- package/dest/sponge_blob.js +26 -30
- package/dest/testing.d.ts +10 -23
- package/dest/testing.d.ts.map +1 -1
- package/dest/testing.js +37 -53
- package/dest/types.d.ts +17 -0
- package/dest/types.d.ts.map +1 -0
- package/dest/types.js +4 -0
- package/package.json +10 -7
- package/src/batched_blob.ts +26 -0
- package/src/blob.ts +81 -195
- package/src/blob_batching.ts +168 -231
- package/src/blob_utils.ts +82 -0
- package/src/circuit_types/blob_accumulator.ts +96 -0
- package/src/circuit_types/final_blob_accumulator.ts +76 -0
- package/src/circuit_types/final_blob_batching_challenges.ts +30 -0
- package/src/circuit_types/index.ts +4 -0
- package/src/encoding/block_blob_data.ts +102 -0
- package/src/encoding/block_end_marker.ts +55 -0
- package/src/encoding/block_end_state_field.ts +59 -0
- package/src/encoding/checkpoint_blob_data.ts +95 -0
- package/src/encoding/checkpoint_end_marker.ts +40 -0
- package/src/encoding/fixtures.ts +210 -0
- package/src/encoding/index.ts +9 -0
- package/src/encoding/tx_blob_data.ts +116 -0
- package/src/encoding/tx_start_marker.ts +97 -0
- package/src/hash.ts +89 -0
- package/src/index.ts +6 -19
- package/src/interface.ts +0 -1
- package/src/kzg_context.ts +16 -0
- package/src/sponge_blob.ts +28 -31
- package/src/testing.ts +48 -59
- package/src/types.ts +17 -0
- package/dest/blob_batching_public_inputs.d.ts +0 -71
- package/dest/blob_batching_public_inputs.d.ts.map +0 -1
- package/dest/blob_batching_public_inputs.js +0 -168
- package/dest/encoding.d.ts +0 -66
- package/dest/encoding.d.ts.map +0 -1
- package/dest/encoding.js +0 -113
- package/src/blob_batching_public_inputs.ts +0 -252
- package/src/encoding.ts +0 -138
package/src/blob_batching.ts
CHANGED
|
@@ -1,185 +1,19 @@
|
|
|
1
|
-
import { AZTEC_MAX_EPOCH_DURATION,
|
|
2
|
-
import { poseidon2Hash
|
|
3
|
-
import {
|
|
4
|
-
import {
|
|
1
|
+
import { AZTEC_MAX_EPOCH_DURATION, BLOBS_PER_CHECKPOINT } from '@aztec/constants';
|
|
2
|
+
import { poseidon2Hash } from '@aztec/foundation/crypto/poseidon';
|
|
3
|
+
import { sha256ToField } from '@aztec/foundation/crypto/sha256';
|
|
4
|
+
import { BLS12Fr, BLS12Point } from '@aztec/foundation/curves/bls12';
|
|
5
|
+
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
5
6
|
|
|
6
|
-
|
|
7
|
-
import
|
|
8
|
-
|
|
9
|
-
import {
|
|
10
|
-
|
|
11
|
-
|
|
7
|
+
import { BatchedBlob } from './batched_blob.js';
|
|
8
|
+
import { Blob } from './blob.js';
|
|
9
|
+
import { getBlobsPerL1Block } from './blob_utils.js';
|
|
10
|
+
import { BlobAccumulator, FinalBlobAccumulator, FinalBlobBatchingChallenges } from './circuit_types/index.js';
|
|
11
|
+
import { computeBlobFieldsHash, hashNoirBigNumLimbs } from './hash.js';
|
|
12
|
+
import { getKzg } from './kzg_context.js';
|
|
12
13
|
|
|
13
14
|
/**
|
|
14
15
|
* A class to create, manage, and prove batched EVM blobs.
|
|
15
|
-
|
|
16
|
-
export class BatchedBlob {
|
|
17
|
-
constructor(
|
|
18
|
-
/** Hash of Cs (to link to L1 blob hashes). */
|
|
19
|
-
public readonly blobCommitmentsHash: Fr,
|
|
20
|
-
/** Challenge point z such that p_i(z) = y_i. */
|
|
21
|
-
public readonly z: Fr,
|
|
22
|
-
/** Evaluation y, linear combination of all evaluations y_i = p_i(z) with gamma. */
|
|
23
|
-
public readonly y: BLS12Fr,
|
|
24
|
-
/** Commitment C, linear combination of all commitments C_i = [p_i] with gamma. */
|
|
25
|
-
public readonly commitment: BLS12Point,
|
|
26
|
-
/** KZG opening 'proof' Q (commitment to the quotient poly.), linear combination of all blob kzg 'proofs' Q_i with gamma. */
|
|
27
|
-
public readonly q: BLS12Point,
|
|
28
|
-
) {}
|
|
29
|
-
|
|
30
|
-
/**
|
|
31
|
-
* Get the final batched opening proof from multiple blobs.
|
|
32
|
-
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
33
|
-
* beforehand from ALL blobs.
|
|
34
|
-
*
|
|
35
|
-
* @returns A batched blob.
|
|
36
|
-
*/
|
|
37
|
-
static async batch(blobs: Blob[]): Promise<BatchedBlob> {
|
|
38
|
-
const numBlobs = blobs.length;
|
|
39
|
-
if (numBlobs > BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION) {
|
|
40
|
-
throw new Error(
|
|
41
|
-
`Too many blobs (${numBlobs}) sent to batch(). The maximum is ${BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION}.`,
|
|
42
|
-
);
|
|
43
|
-
}
|
|
44
|
-
// Precalculate the values (z and gamma) and initialize the accumulator:
|
|
45
|
-
let acc = await this.newAccumulator(blobs);
|
|
46
|
-
// Now we can create a multi opening proof of all input blobs:
|
|
47
|
-
acc = await acc.accumulateBlobs(blobs);
|
|
48
|
-
return await acc.finalize();
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
/**
|
|
52
|
-
* Returns an empty BatchedBlobAccumulator with precomputed challenges from all blobs in the epoch.
|
|
53
|
-
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
54
|
-
* beforehand from ALL blobs.
|
|
55
|
-
*/
|
|
56
|
-
static async newAccumulator(blobs: Blob[]): Promise<BatchedBlobAccumulator> {
|
|
57
|
-
const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobs);
|
|
58
|
-
return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Gets the final challenges based on all blobs and their elements to perform a multi opening proof.
|
|
63
|
-
* Used in BatchedBlobAccumulator as 'finalZ' and finalGamma':
|
|
64
|
-
* - z = H(...H(H(z_0, z_1) z_2)..z_n)
|
|
65
|
-
* - where z_i = H(H(fields of blob_i), C_i) = Blob.challengeZ,
|
|
66
|
-
* - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
|
|
67
|
-
* - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
|
|
68
|
-
* - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }, for all blob evaluations y_i (see above) and commitments C_i.
|
|
69
|
-
* @returns Challenges z and gamma.
|
|
70
|
-
*/
|
|
71
|
-
static async precomputeBatchedBlobChallenges(blobs: Blob[]): Promise<FinalBlobBatchingChallenges> {
|
|
72
|
-
// We need to precompute the final challenge values to evaluate the blobs.
|
|
73
|
-
let z = blobs[0].challengeZ;
|
|
74
|
-
// We start at i = 1, because z is initialized as the first blob's challenge.
|
|
75
|
-
for (let i = 1; i < blobs.length; i++) {
|
|
76
|
-
z = await poseidon2Hash([z, blobs[i].challengeZ]);
|
|
77
|
-
}
|
|
78
|
-
// Now we have a shared challenge for all blobs, evaluate them...
|
|
79
|
-
const proofObjects = blobs.map(b => computeKzgProof(b.data, z.toBuffer()));
|
|
80
|
-
const evaluations = proofObjects.map(([_, evaluation]) => BLS12Fr.fromBuffer(Buffer.from(evaluation)));
|
|
81
|
-
// ...and find the challenge for the linear combination of blobs.
|
|
82
|
-
let gamma = await hashNoirBigNumLimbs(evaluations[0]);
|
|
83
|
-
// We start at i = 1, because gamma is initialized as the first blob's evaluation.
|
|
84
|
-
for (let i = 1; i < blobs.length; i++) {
|
|
85
|
-
gamma = await poseidon2Hash([gamma, await hashNoirBigNumLimbs(evaluations[i])]);
|
|
86
|
-
}
|
|
87
|
-
gamma = await poseidon2Hash([gamma, z]);
|
|
88
|
-
|
|
89
|
-
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
static async precomputeEmptyBatchedBlobChallenges(): Promise<FinalBlobBatchingChallenges> {
|
|
93
|
-
const blobs = [await Blob.fromFields([])];
|
|
94
|
-
// We need to precompute the final challenge values to evaluate the blobs.
|
|
95
|
-
const z = blobs[0].challengeZ;
|
|
96
|
-
// Now we have a shared challenge for all blobs, evaluate them...
|
|
97
|
-
const proofObjects = blobs.map(b => computeKzgProof(b.data, z.toBuffer()));
|
|
98
|
-
const evaluations = proofObjects.map(([_, evaluation]) => BLS12Fr.fromBuffer(Buffer.from(evaluation)));
|
|
99
|
-
// ...and find the challenge for the linear combination of blobs.
|
|
100
|
-
let gamma = await hashNoirBigNumLimbs(evaluations[0]);
|
|
101
|
-
gamma = await poseidon2Hash([gamma, z]);
|
|
102
|
-
|
|
103
|
-
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
// Returns ethereum's versioned blob hash, following kzg_to_versioned_hash: https://eips.ethereum.org/EIPS/eip-4844#helpers
|
|
107
|
-
getEthVersionedBlobHash(): Buffer {
|
|
108
|
-
const hash = sha256(this.commitment.compress());
|
|
109
|
-
hash[0] = VERSIONED_HASH_VERSION_KZG;
|
|
110
|
-
return hash;
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
static getEthVersionedBlobHash(commitment: Buffer): Buffer {
|
|
114
|
-
const hash = sha256(commitment);
|
|
115
|
-
hash[0] = VERSIONED_HASH_VERSION_KZG;
|
|
116
|
-
return hash;
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
/**
|
|
120
|
-
* Returns a proof of opening of the blobs to verify on L1 using the point evaluation precompile:
|
|
121
|
-
*
|
|
122
|
-
* input[:32] - versioned_hash
|
|
123
|
-
* input[32:64] - z
|
|
124
|
-
* input[64:96] - y
|
|
125
|
-
* input[96:144] - commitment C
|
|
126
|
-
* input[144:192] - commitment Q (a 'proof' committing to the quotient polynomial q(X))
|
|
127
|
-
*
|
|
128
|
-
* See https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
|
129
|
-
*/
|
|
130
|
-
getEthBlobEvaluationInputs(): `0x${string}` {
|
|
131
|
-
const buf = Buffer.concat([
|
|
132
|
-
this.getEthVersionedBlobHash(),
|
|
133
|
-
this.z.toBuffer(),
|
|
134
|
-
this.y.toBuffer(),
|
|
135
|
-
this.commitment.compress(),
|
|
136
|
-
this.q.compress(),
|
|
137
|
-
]);
|
|
138
|
-
return `0x${buf.toString('hex')}`;
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
/**
|
|
143
|
-
* Final values z and gamma are injected into each block root circuit. We ensure they are correct by:
|
|
144
|
-
* - Checking equality in each block merge circuit and propagating up
|
|
145
|
-
* - Checking final z_acc == z in root circuit
|
|
146
|
-
* - Checking final gamma_acc == gamma in root circuit
|
|
147
|
-
*
|
|
148
|
-
* - z = H(...H(H(z_0, z_1) z_2)..z_n)
|
|
149
|
-
* - where z_i = H(H(fields of blob_i), C_i),
|
|
150
|
-
* - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
|
|
151
|
-
* - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
|
|
152
|
-
* - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }
|
|
153
|
-
* for all blob evaluations y_i (see above) and commitments C_i.
|
|
154
|
-
*
|
|
155
|
-
* Iteratively calculated by BlobAccumulatorPublicInputs.accumulate() in nr. See also precomputeBatchedBlobChallenges() above.
|
|
156
|
-
*/
|
|
157
|
-
export class FinalBlobBatchingChallenges {
|
|
158
|
-
constructor(
|
|
159
|
-
public readonly z: Fr,
|
|
160
|
-
public readonly gamma: BLS12Fr,
|
|
161
|
-
) {}
|
|
162
|
-
|
|
163
|
-
equals(other: FinalBlobBatchingChallenges) {
|
|
164
|
-
return this.z.equals(other.z) && this.gamma.equals(other.gamma);
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
static empty(): FinalBlobBatchingChallenges {
|
|
168
|
-
return new FinalBlobBatchingChallenges(Fr.ZERO, BLS12Fr.ZERO);
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
static fromBuffer(buffer: Buffer | BufferReader): FinalBlobBatchingChallenges {
|
|
172
|
-
const reader = BufferReader.asReader(buffer);
|
|
173
|
-
return new FinalBlobBatchingChallenges(Fr.fromBuffer(reader), reader.readObject(BLS12Fr));
|
|
174
|
-
}
|
|
175
|
-
|
|
176
|
-
toBuffer() {
|
|
177
|
-
return serializeToBuffer(this.z, this.gamma);
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
/**
|
|
182
|
-
* See noir-projects/noir-protocol-circuits/crates/blob/src/blob_batching_public_inputs.nr -> BlobAccumulatorPublicInputs
|
|
16
|
+
* See noir-projects/noir-protocol-circuits/crates/blob/src/abis/blob_accumulator.nr
|
|
183
17
|
*/
|
|
184
18
|
export class BatchedBlobAccumulator {
|
|
185
19
|
constructor(
|
|
@@ -205,39 +39,6 @@ export class BatchedBlobAccumulator {
|
|
|
205
39
|
public readonly finalBlobChallenges: FinalBlobBatchingChallenges,
|
|
206
40
|
) {}
|
|
207
41
|
|
|
208
|
-
/**
|
|
209
|
-
* Init the first accumulation state of the epoch.
|
|
210
|
-
* We assume the input blob has not been evaluated at z.
|
|
211
|
-
*
|
|
212
|
-
* First state of the accumulator:
|
|
213
|
-
* - v_acc := sha256(C_0)
|
|
214
|
-
* - z_acc := z_0
|
|
215
|
-
* - y_acc := gamma^0 * y_0 = y_0
|
|
216
|
-
* - c_acc := gamma^0 * c_0 = c_0
|
|
217
|
-
* - gamma_acc := poseidon2(y_0.limbs)
|
|
218
|
-
* - gamma^(i + 1) = gamma^1 = gamma // denoted gamma_pow_acc
|
|
219
|
-
*
|
|
220
|
-
* @returns An initial blob accumulator.
|
|
221
|
-
*/
|
|
222
|
-
static async initialize(
|
|
223
|
-
blob: Blob,
|
|
224
|
-
finalBlobChallenges: FinalBlobBatchingChallenges,
|
|
225
|
-
): Promise<BatchedBlobAccumulator> {
|
|
226
|
-
const [q, evaluation] = computeKzgProof(blob.data, finalBlobChallenges.z.toBuffer());
|
|
227
|
-
const firstY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
|
|
228
|
-
// Here, i = 0, so:
|
|
229
|
-
return new BatchedBlobAccumulator(
|
|
230
|
-
sha256ToField([blob.commitment]), // blobCommitmentsHashAcc = sha256(C_0)
|
|
231
|
-
blob.challengeZ, // zAcc = z_0
|
|
232
|
-
firstY, // yAcc = gamma^0 * y_0 = 1 * y_0
|
|
233
|
-
BLS12Point.decompress(blob.commitment), // cAcc = gamma^0 * C_0 = 1 * C_0
|
|
234
|
-
BLS12Point.decompress(Buffer.from(q)), // qAcc = gamma^0 * Q_0 = 1 * Q_0
|
|
235
|
-
await hashNoirBigNumLimbs(firstY), // gammaAcc = poseidon2(y_0.limbs)
|
|
236
|
-
finalBlobChallenges.gamma, // gammaPow = gamma^(i + 1) = gamma^1 = gamma
|
|
237
|
-
finalBlobChallenges,
|
|
238
|
-
);
|
|
239
|
-
}
|
|
240
|
-
|
|
241
42
|
/**
|
|
242
43
|
* Create the empty accumulation state of the epoch.
|
|
243
44
|
* @returns An empty blob accumulator with challenges.
|
|
@@ -255,25 +56,128 @@ export class BatchedBlobAccumulator {
|
|
|
255
56
|
);
|
|
256
57
|
}
|
|
257
58
|
|
|
59
|
+
/**
|
|
60
|
+
* Returns an empty BatchedBlobAccumulator with precomputed challenges from all blobs in the epoch.
|
|
61
|
+
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
62
|
+
* beforehand from ALL blobs.
|
|
63
|
+
*/
|
|
64
|
+
static async fromBlobFields(blobFieldsPerCheckpoint: Fr[][]): Promise<BatchedBlobAccumulator> {
|
|
65
|
+
const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobFieldsPerCheckpoint);
|
|
66
|
+
return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Get the final batched opening proof from multiple blobs.
|
|
71
|
+
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
72
|
+
* beforehand from ALL blobs.
|
|
73
|
+
*
|
|
74
|
+
* @returns A batched blob.
|
|
75
|
+
*/
|
|
76
|
+
static async batch(blobFieldsPerCheckpoint: Fr[][], verifyProof = false): Promise<BatchedBlob> {
|
|
77
|
+
const numCheckpoints = blobFieldsPerCheckpoint.length;
|
|
78
|
+
if (numCheckpoints > AZTEC_MAX_EPOCH_DURATION) {
|
|
79
|
+
throw new Error(
|
|
80
|
+
`Too many checkpoints sent to batch(). The maximum is ${AZTEC_MAX_EPOCH_DURATION}. Got ${numCheckpoints}.`,
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Precalculate the values (z and gamma) and initialize the accumulator:
|
|
85
|
+
let acc = await this.fromBlobFields(blobFieldsPerCheckpoint);
|
|
86
|
+
// Now we can create a multi opening proof of all input blobs:
|
|
87
|
+
for (const blobFields of blobFieldsPerCheckpoint) {
|
|
88
|
+
acc = await acc.accumulateFields(blobFields);
|
|
89
|
+
}
|
|
90
|
+
return await acc.finalize(verifyProof);
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/**
|
|
94
|
+
* Gets the final challenges based on all blobs and their elements to perform a multi opening proof.
|
|
95
|
+
* Used in BatchedBlobAccumulator as 'finalZ' and finalGamma':
|
|
96
|
+
* - z = H(...H(H(z_0, z_1) z_2)..z_n)
|
|
97
|
+
* - where z_i = H(H(fields of blob_i), C_i) = Blob.challengeZ,
|
|
98
|
+
* - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
|
|
99
|
+
* - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
|
|
100
|
+
* - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }, for all blob evaluations y_i (see above) and commitments C_i.
|
|
101
|
+
*
|
|
102
|
+
* @param blobs - The blobs to precompute the challenges for. Each sub-array is the blobs for an L1 block.
|
|
103
|
+
* @returns Challenges z and gamma.
|
|
104
|
+
*/
|
|
105
|
+
static async precomputeBatchedBlobChallenges(blobFieldsPerCheckpoint: Fr[][]): Promise<FinalBlobBatchingChallenges> {
|
|
106
|
+
// Compute the final challenge z to evaluate the blobs.
|
|
107
|
+
let z: Fr | undefined;
|
|
108
|
+
const allBlobs = [];
|
|
109
|
+
for (const blobFields of blobFieldsPerCheckpoint) {
|
|
110
|
+
// Compute the hash of all the fields in the block.
|
|
111
|
+
const blobFieldsHash = await computeBlobFieldsHash(blobFields);
|
|
112
|
+
const blobs = getBlobsPerL1Block(blobFields);
|
|
113
|
+
for (const blob of blobs) {
|
|
114
|
+
// Compute the challenge z for each blob and accumulate it.
|
|
115
|
+
const challengeZ = await blob.computeChallengeZ(blobFieldsHash);
|
|
116
|
+
if (!z) {
|
|
117
|
+
z = challengeZ;
|
|
118
|
+
} else {
|
|
119
|
+
z = await poseidon2Hash([z, challengeZ]);
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
allBlobs.push(...blobs);
|
|
123
|
+
}
|
|
124
|
+
if (!z) {
|
|
125
|
+
throw new Error('No blobs to precompute challenges for.');
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// Now we have a shared challenge for all blobs, evaluate them...
|
|
129
|
+
const proofObjects = allBlobs.map(b => b.evaluate(z));
|
|
130
|
+
const evaluations = await Promise.all(proofObjects.map(({ y }) => hashNoirBigNumLimbs(y)));
|
|
131
|
+
// ...and find the challenge for the linear combination of blobs.
|
|
132
|
+
let gamma = evaluations[0];
|
|
133
|
+
// We start at i = 1, because gamma is initialized as the first blob's evaluation.
|
|
134
|
+
for (let i = 1; i < allBlobs.length; i++) {
|
|
135
|
+
gamma = await poseidon2Hash([gamma, evaluations[i]]);
|
|
136
|
+
}
|
|
137
|
+
gamma = await poseidon2Hash([gamma, z]);
|
|
138
|
+
|
|
139
|
+
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
140
|
+
}
|
|
141
|
+
|
|
258
142
|
/**
|
|
259
143
|
* Given blob i, accumulate all state.
|
|
260
144
|
* We assume the input blob has not been evaluated at z.
|
|
261
145
|
* @returns An updated blob accumulator.
|
|
262
146
|
*/
|
|
263
|
-
async
|
|
147
|
+
async accumulateBlob(blob: Blob, blobFieldsHash: Fr) {
|
|
148
|
+
const { proof, y: thisY } = blob.evaluate(this.finalBlobChallenges.z);
|
|
149
|
+
const thisC = BLS12Point.decompress(blob.commitment);
|
|
150
|
+
const thisQ = BLS12Point.decompress(proof);
|
|
151
|
+
const blobChallengeZ = await blob.computeChallengeZ(blobFieldsHash);
|
|
152
|
+
|
|
264
153
|
if (this.isEmptyState()) {
|
|
265
|
-
|
|
154
|
+
/**
|
|
155
|
+
* Init the first accumulation state of the epoch.
|
|
156
|
+
* - v_acc := sha256(C_0)
|
|
157
|
+
* - z_acc := z_0
|
|
158
|
+
* - y_acc := gamma^0 * y_0 = y_0
|
|
159
|
+
* - c_acc := gamma^0 * c_0 = c_0
|
|
160
|
+
* - gamma_acc := poseidon2(y_0.limbs)
|
|
161
|
+
* - gamma^(i + 1) = gamma^1 = gamma // denoted gamma_pow_acc
|
|
162
|
+
*/
|
|
163
|
+
return new BatchedBlobAccumulator(
|
|
164
|
+
sha256ToField([blob.commitment]), // blobCommitmentsHashAcc = sha256(C_0)
|
|
165
|
+
blobChallengeZ, // zAcc = z_0
|
|
166
|
+
thisY, // yAcc = gamma^0 * y_0 = 1 * y_0
|
|
167
|
+
thisC, // cAcc = gamma^0 * C_0 = 1 * C_0
|
|
168
|
+
thisQ, // qAcc = gamma^0 * Q_0 = 1 * Q_0
|
|
169
|
+
await hashNoirBigNumLimbs(thisY), // gammaAcc = poseidon2(y_0.limbs)
|
|
170
|
+
this.finalBlobChallenges.gamma, // gammaPow = gamma^(i + 1) = gamma^1 = gamma
|
|
171
|
+
this.finalBlobChallenges,
|
|
172
|
+
);
|
|
266
173
|
} else {
|
|
267
|
-
const [q, evaluation] = computeKzgProof(blob.data, this.finalBlobChallenges.z.toBuffer());
|
|
268
|
-
const thisY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
|
|
269
|
-
|
|
270
174
|
// Moving from i - 1 to i, so:
|
|
271
175
|
return new BatchedBlobAccumulator(
|
|
272
176
|
sha256ToField([this.blobCommitmentsHashAcc, blob.commitment]), // blobCommitmentsHashAcc := sha256(blobCommitmentsHashAcc, C_i)
|
|
273
|
-
await poseidon2Hash([this.zAcc,
|
|
177
|
+
await poseidon2Hash([this.zAcc, blobChallengeZ]), // zAcc := poseidon2(zAcc, z_i)
|
|
274
178
|
this.yAcc.add(thisY.mul(this.gammaPow)), // yAcc := yAcc + (gamma^i * y_i)
|
|
275
|
-
this.cAcc.add(
|
|
276
|
-
this.qAcc.add(
|
|
179
|
+
this.cAcc.add(thisC.mul(this.gammaPow)), // cAcc := cAcc + (gamma^i * C_i)
|
|
180
|
+
this.qAcc.add(thisQ.mul(this.gammaPow)), // qAcc := qAcc + (gamma^i * C_i)
|
|
277
181
|
await poseidon2Hash([this.gammaAcc, await hashNoirBigNumLimbs(thisY)]), // gammaAcc := poseidon2(gammaAcc, poseidon2(y_i.limbs))
|
|
278
182
|
this.gammaPow.mul(this.finalBlobChallenges.gamma), // gammaPow = gamma^(i + 1) = gamma^i * final_gamma
|
|
279
183
|
this.finalBlobChallenges,
|
|
@@ -284,13 +188,25 @@ export class BatchedBlobAccumulator {
|
|
|
284
188
|
/**
|
|
285
189
|
* Given blobs, accumulate all state.
|
|
286
190
|
* We assume the input blobs have not been evaluated at z.
|
|
191
|
+
* @param blobFields - The blob fields of a checkpoint to accumulate.
|
|
287
192
|
* @returns An updated blob accumulator.
|
|
288
193
|
*/
|
|
289
|
-
async
|
|
194
|
+
async accumulateFields(blobFields: Fr[]) {
|
|
195
|
+
const blobs = getBlobsPerL1Block(blobFields);
|
|
196
|
+
|
|
197
|
+
if (blobs.length > BLOBS_PER_CHECKPOINT) {
|
|
198
|
+
throw new Error(
|
|
199
|
+
`Too many blobs to accumulate. The maximum is ${BLOBS_PER_CHECKPOINT} per checkpoint. Got ${blobs.length}.`,
|
|
200
|
+
);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Compute the hash of all the fields in the block.
|
|
204
|
+
const blobFieldsHash = await computeBlobFieldsHash(blobFields);
|
|
205
|
+
|
|
290
206
|
// Initialize the acc to iterate over:
|
|
291
207
|
let acc: BatchedBlobAccumulator = this.clone();
|
|
292
|
-
for (
|
|
293
|
-
acc = await acc.
|
|
208
|
+
for (const blob of blobs) {
|
|
209
|
+
acc = await acc.accumulateBlob(blob, blobFieldsHash);
|
|
294
210
|
}
|
|
295
211
|
return acc;
|
|
296
212
|
}
|
|
@@ -306,9 +222,10 @@ export class BatchedBlobAccumulator {
|
|
|
306
222
|
* - c := c_acc (final commitment to be checked on L1)
|
|
307
223
|
* - gamma := poseidon2(gamma_acc, z) (challenge for linear combination of y and C, above)
|
|
308
224
|
*
|
|
225
|
+
* @param verifyProof - Whether to verify the KZG proof.
|
|
309
226
|
* @returns A batched blob.
|
|
310
227
|
*/
|
|
311
|
-
async finalize(): Promise<BatchedBlob> {
|
|
228
|
+
async finalize(verifyProof = false): Promise<BatchedBlob> {
|
|
312
229
|
// All values in acc are final, apart from gamma := poseidon2(gammaAcc, z):
|
|
313
230
|
const calculatedGamma = await poseidon2Hash([this.gammaAcc, this.zAcc]);
|
|
314
231
|
// Check final values:
|
|
@@ -322,11 +239,23 @@ export class BatchedBlobAccumulator {
|
|
|
322
239
|
`Blob batching mismatch: accumulated gamma ${calculatedGamma} does not equal injected gamma ${this.finalBlobChallenges.gamma.toBN254Fr()}`,
|
|
323
240
|
);
|
|
324
241
|
}
|
|
325
|
-
|
|
242
|
+
|
|
243
|
+
const batchedBlob = new BatchedBlob(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.qAcc);
|
|
244
|
+
|
|
245
|
+
if (verifyProof && !this.verify()) {
|
|
326
246
|
throw new Error(`KZG proof did not verify.`);
|
|
327
247
|
}
|
|
328
248
|
|
|
329
|
-
return
|
|
249
|
+
return batchedBlob;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
verify() {
|
|
253
|
+
return getKzg().verifyKzgProof(
|
|
254
|
+
this.cAcc.compress(),
|
|
255
|
+
this.zAcc.toBuffer(),
|
|
256
|
+
this.yAcc.toBuffer(),
|
|
257
|
+
this.qAcc.compress(),
|
|
258
|
+
);
|
|
330
259
|
}
|
|
331
260
|
|
|
332
261
|
isEmptyState() {
|
|
@@ -353,11 +282,19 @@ export class BatchedBlobAccumulator {
|
|
|
353
282
|
FinalBlobBatchingChallenges.fromBuffer(this.finalBlobChallenges.toBuffer()),
|
|
354
283
|
);
|
|
355
284
|
}
|
|
356
|
-
}
|
|
357
285
|
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
286
|
+
toBlobAccumulator() {
|
|
287
|
+
return new BlobAccumulator(
|
|
288
|
+
this.blobCommitmentsHashAcc,
|
|
289
|
+
this.zAcc,
|
|
290
|
+
this.yAcc,
|
|
291
|
+
this.cAcc,
|
|
292
|
+
this.gammaAcc,
|
|
293
|
+
this.gammaPow,
|
|
294
|
+
);
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
toFinalBlobAccumulator() {
|
|
298
|
+
return new FinalBlobAccumulator(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc);
|
|
299
|
+
}
|
|
363
300
|
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import { FIELDS_PER_BLOB } from '@aztec/constants';
|
|
2
|
+
import { BLS12Point } from '@aztec/foundation/curves/bls12';
|
|
3
|
+
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
4
|
+
|
|
5
|
+
import type { BatchedBlob } from './batched_blob.js';
|
|
6
|
+
import { Blob } from './blob.js';
|
|
7
|
+
import { type CheckpointBlobData, decodeCheckpointBlobDataFromBuffer } from './encoding/index.js';
|
|
8
|
+
import { computeBlobsHash, computeEthVersionedBlobHash } from './hash.js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* @param blobs - The blobs to emit.
|
|
12
|
+
* @returns The blobs' compressed commitments in hex prefixed by the number of blobs. 1 byte for the prefix, 48 bytes
|
|
13
|
+
* per blob commitment.
|
|
14
|
+
* @dev Used for proposing blocks to validate injected blob commitments match real broadcast blobs.
|
|
15
|
+
*/
|
|
16
|
+
export function getPrefixedEthBlobCommitments(blobs: Blob[]): `0x${string}` {
|
|
17
|
+
// Prefix the number of blobs.
|
|
18
|
+
const lenBuf = Buffer.alloc(1);
|
|
19
|
+
lenBuf.writeUint8(blobs.length);
|
|
20
|
+
|
|
21
|
+
const blobBuf = Buffer.concat(blobs.map(blob => blob.commitment));
|
|
22
|
+
|
|
23
|
+
const buf = Buffer.concat([lenBuf, blobBuf]);
|
|
24
|
+
return `0x${buf.toString('hex')}`;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* @param fields - Fields to broadcast in the blob(s)
|
|
29
|
+
* @returns As many blobs as required to broadcast the given fields to an L1 block.
|
|
30
|
+
*
|
|
31
|
+
* @throws If the number of fields does not match what's indicated by the checkpoint prefix.
|
|
32
|
+
*/
|
|
33
|
+
export function getBlobsPerL1Block(fields: Fr[]): Blob[] {
|
|
34
|
+
if (!fields.length) {
|
|
35
|
+
throw new Error('Cannot create blobs from empty fields.');
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const numBlobs = Math.ceil(fields.length / FIELDS_PER_BLOB);
|
|
39
|
+
return Array.from({ length: numBlobs }, (_, i) =>
|
|
40
|
+
Blob.fromFields(fields.slice(i * FIELDS_PER_BLOB, (i + 1) * FIELDS_PER_BLOB)),
|
|
41
|
+
);
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* Get the encoded data from all blobs in the checkpoint.
|
|
46
|
+
* @param blobs - The blobs to read data from. Should be all the blobs for the L1 block proposing the checkpoint.
|
|
47
|
+
* @returns The encoded data of the checkpoint.
|
|
48
|
+
*/
|
|
49
|
+
export function decodeCheckpointBlobDataFromBlobs(blobs: Blob[]): CheckpointBlobData {
|
|
50
|
+
const buf = Buffer.concat(blobs.map(b => b.data));
|
|
51
|
+
return decodeCheckpointBlobDataFromBuffer(buf);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function computeBlobsHashFromBlobs(blobs: Blob[]): Fr {
|
|
55
|
+
return computeBlobsHash(blobs.map(b => b.getEthVersionedBlobHash()));
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function getBlobCommitmentsFromBlobs(blobs: Blob[]): BLS12Point[] {
|
|
59
|
+
return blobs.map(b => BLS12Point.decompress(b.commitment));
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Returns a proof of opening of the blobs to verify on L1 using the point evaluation precompile:
|
|
64
|
+
*
|
|
65
|
+
* input[:32] - versioned_hash
|
|
66
|
+
* input[32:64] - z
|
|
67
|
+
* input[64:96] - y
|
|
68
|
+
* input[96:144] - commitment C
|
|
69
|
+
* input[144:192] - commitment Q (a 'proof' committing to the quotient polynomial q(X))
|
|
70
|
+
*
|
|
71
|
+
* See https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
|
72
|
+
*/
|
|
73
|
+
export function getEthBlobEvaluationInputs(batchedBlob: BatchedBlob): `0x${string}` {
|
|
74
|
+
const buf = Buffer.concat([
|
|
75
|
+
computeEthVersionedBlobHash(batchedBlob.commitment.compress()),
|
|
76
|
+
batchedBlob.z.toBuffer(),
|
|
77
|
+
batchedBlob.y.toBuffer(),
|
|
78
|
+
batchedBlob.commitment.compress(),
|
|
79
|
+
batchedBlob.q.compress(),
|
|
80
|
+
]);
|
|
81
|
+
return `0x${buf.toString('hex')}`;
|
|
82
|
+
}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
import { BLS12_FQ_LIMBS, BLS12_FR_LIMBS } from '@aztec/constants';
|
|
2
|
+
import { BLS12Fq, BLS12Fr, BLS12Point } from '@aztec/foundation/curves/bls12';
|
|
3
|
+
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
4
|
+
import { BufferReader, FieldReader, serializeToBuffer } from '@aztec/foundation/serialize';
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* See `noir-projects/noir-protocol-circuits/crates/blob/src/abis/blob_accumulator.nr` for documentation.
|
|
8
|
+
*/
|
|
9
|
+
export class BlobAccumulator {
|
|
10
|
+
constructor(
|
|
11
|
+
public blobCommitmentsHashAcc: Fr,
|
|
12
|
+
public zAcc: Fr,
|
|
13
|
+
public yAcc: BLS12Fr,
|
|
14
|
+
public cAcc: BLS12Point,
|
|
15
|
+
public gammaAcc: Fr,
|
|
16
|
+
public gammaPowAcc: BLS12Fr,
|
|
17
|
+
) {}
|
|
18
|
+
|
|
19
|
+
static empty(): BlobAccumulator {
|
|
20
|
+
return new BlobAccumulator(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO, Fr.ZERO, BLS12Fr.ZERO);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
equals(other: BlobAccumulator) {
|
|
24
|
+
return (
|
|
25
|
+
this.blobCommitmentsHashAcc.equals(other.blobCommitmentsHashAcc) &&
|
|
26
|
+
this.zAcc.equals(other.zAcc) &&
|
|
27
|
+
this.yAcc.equals(other.yAcc) &&
|
|
28
|
+
this.cAcc.equals(other.cAcc) &&
|
|
29
|
+
this.gammaAcc.equals(other.gammaAcc) &&
|
|
30
|
+
this.gammaPowAcc.equals(other.gammaPowAcc)
|
|
31
|
+
);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
static fromBuffer(buffer: Buffer | BufferReader): BlobAccumulator {
|
|
35
|
+
const reader = BufferReader.asReader(buffer);
|
|
36
|
+
return new BlobAccumulator(
|
|
37
|
+
Fr.fromBuffer(reader),
|
|
38
|
+
Fr.fromBuffer(reader),
|
|
39
|
+
BLS12Fr.fromBuffer(reader),
|
|
40
|
+
BLS12Point.fromBuffer(reader),
|
|
41
|
+
Fr.fromBuffer(reader),
|
|
42
|
+
BLS12Fr.fromBuffer(reader),
|
|
43
|
+
);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
toBuffer() {
|
|
47
|
+
return serializeToBuffer(
|
|
48
|
+
this.blobCommitmentsHashAcc,
|
|
49
|
+
this.zAcc,
|
|
50
|
+
this.yAcc,
|
|
51
|
+
this.cAcc,
|
|
52
|
+
this.gammaAcc,
|
|
53
|
+
this.gammaPowAcc,
|
|
54
|
+
);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
toFields() {
|
|
58
|
+
return [
|
|
59
|
+
this.blobCommitmentsHashAcc,
|
|
60
|
+
this.zAcc,
|
|
61
|
+
...this.yAcc.toNoirBigNum().limbs.map(Fr.fromString),
|
|
62
|
+
...this.cAcc.x.toNoirBigNum().limbs.map(Fr.fromString),
|
|
63
|
+
...this.cAcc.y.toNoirBigNum().limbs.map(Fr.fromString),
|
|
64
|
+
new Fr(this.cAcc.isInfinite),
|
|
65
|
+
this.gammaAcc,
|
|
66
|
+
...this.gammaPowAcc.toNoirBigNum().limbs.map(Fr.fromString),
|
|
67
|
+
];
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
static fromFields(fields: Fr[] | FieldReader): BlobAccumulator {
|
|
71
|
+
const reader = FieldReader.asReader(fields);
|
|
72
|
+
return new BlobAccumulator(
|
|
73
|
+
reader.readField(),
|
|
74
|
+
reader.readField(),
|
|
75
|
+
BLS12Fr.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FR_LIMBS).map(f => f.toString()) }),
|
|
76
|
+
new BLS12Point(
|
|
77
|
+
BLS12Fq.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map(f => f.toString()) }),
|
|
78
|
+
BLS12Fq.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map(f => f.toString()) }),
|
|
79
|
+
reader.readBoolean(),
|
|
80
|
+
),
|
|
81
|
+
reader.readField(),
|
|
82
|
+
BLS12Fr.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FR_LIMBS).map(f => f.toString()) }),
|
|
83
|
+
);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
static random() {
|
|
87
|
+
return new BlobAccumulator(
|
|
88
|
+
Fr.random(),
|
|
89
|
+
Fr.random(),
|
|
90
|
+
BLS12Fr.random(),
|
|
91
|
+
BLS12Point.random(),
|
|
92
|
+
Fr.random(),
|
|
93
|
+
BLS12Fr.random(),
|
|
94
|
+
);
|
|
95
|
+
}
|
|
96
|
+
}
|