@aztec/blob-lib 0.0.0-test.1 → 0.0.1-fake-ceab37513c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/blob.d.ts +31 -26
- package/dest/blob.d.ts.map +1 -1
- package/dest/blob.js +47 -53
- package/dest/blob_batching.d.ts +188 -0
- package/dest/blob_batching.d.ts.map +1 -0
- package/dest/blob_batching.js +299 -0
- package/dest/blob_batching_public_inputs.d.ts +71 -0
- package/dest/blob_batching_public_inputs.d.ts.map +1 -0
- package/dest/blob_batching_public_inputs.js +168 -0
- package/dest/index.d.ts +2 -1
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +4 -3
- package/dest/interface.d.ts +1 -2
- package/dest/interface.d.ts.map +1 -1
- package/dest/sponge_blob.d.ts +2 -4
- package/dest/sponge_blob.d.ts.map +1 -1
- package/dest/testing.d.ts +6 -5
- package/dest/testing.d.ts.map +1 -1
- package/dest/testing.js +10 -9
- package/dest/types.d.ts +14 -0
- package/dest/types.d.ts.map +1 -0
- package/dest/types.js +1 -0
- package/package.json +15 -12
- package/src/blob.ts +49 -66
- package/src/blob_batching.ts +363 -0
- package/src/blob_batching_public_inputs.ts +252 -0
- package/src/index.ts +3 -3
- package/src/interface.ts +1 -4
- package/src/testing.ts +21 -11
- package/src/trusted_setup_bit_reversed.json +4100 -0
- package/src/types.ts +16 -0
- package/dest/blob_public_inputs.d.ts +0 -50
- package/dest/blob_public_inputs.d.ts.map +0 -1
- package/dest/blob_public_inputs.js +0 -146
- package/src/blob_public_inputs.ts +0 -157
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import { AZTEC_MAX_EPOCH_DURATION, BLOBS_PER_BLOCK } from '@aztec/constants';
|
|
2
|
+
import { poseidon2Hash, sha256, sha256ToField } from '@aztec/foundation/crypto';
|
|
3
|
+
import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
4
|
+
import { BufferReader, serializeToBuffer } from '@aztec/foundation/serialize';
|
|
5
|
+
// Importing directly from 'c-kzg' does not work:
|
|
6
|
+
import cKzg from 'c-kzg';
|
|
7
|
+
import { Blob, VERSIONED_HASH_VERSION_KZG } from './blob.js';
|
|
8
|
+
const { computeKzgProof, verifyKzgProof } = cKzg;
|
|
9
|
+
/**
|
|
10
|
+
* A class to create, manage, and prove batched EVM blobs.
|
|
11
|
+
*/ export class BatchedBlob {
|
|
12
|
+
blobCommitmentsHash;
|
|
13
|
+
z;
|
|
14
|
+
y;
|
|
15
|
+
commitment;
|
|
16
|
+
q;
|
|
17
|
+
constructor(/** Hash of Cs (to link to L1 blob hashes). */ blobCommitmentsHash, /** Challenge point z such that p_i(z) = y_i. */ z, /** Evaluation y, linear combination of all evaluations y_i = p_i(z) with gamma. */ y, /** Commitment C, linear combination of all commitments C_i = [p_i] with gamma. */ commitment, /** KZG opening 'proof' Q (commitment to the quotient poly.), linear combination of all blob kzg 'proofs' Q_i with gamma. */ q){
|
|
18
|
+
this.blobCommitmentsHash = blobCommitmentsHash;
|
|
19
|
+
this.z = z;
|
|
20
|
+
this.y = y;
|
|
21
|
+
this.commitment = commitment;
|
|
22
|
+
this.q = q;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Get the final batched opening proof from multiple blobs.
|
|
26
|
+
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
27
|
+
* beforehand from ALL blobs.
|
|
28
|
+
*
|
|
29
|
+
* @returns A batched blob.
|
|
30
|
+
*/ static async batch(blobs) {
|
|
31
|
+
const numBlobs = blobs.length;
|
|
32
|
+
if (numBlobs > BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION) {
|
|
33
|
+
throw new Error(`Too many blobs (${numBlobs}) sent to batch(). The maximum is ${BLOBS_PER_BLOCK * AZTEC_MAX_EPOCH_DURATION}.`);
|
|
34
|
+
}
|
|
35
|
+
// Precalculate the values (z and gamma) and initialize the accumulator:
|
|
36
|
+
let acc = await this.newAccumulator(blobs);
|
|
37
|
+
// Now we can create a multi opening proof of all input blobs:
|
|
38
|
+
acc = await acc.accumulateBlobs(blobs);
|
|
39
|
+
return await acc.finalize();
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Returns an empty BatchedBlobAccumulator with precomputed challenges from all blobs in the epoch.
|
|
43
|
+
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
44
|
+
* beforehand from ALL blobs.
|
|
45
|
+
*/ static async newAccumulator(blobs) {
|
|
46
|
+
const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobs);
|
|
47
|
+
return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
|
|
48
|
+
}
|
|
49
|
+
/**
|
|
50
|
+
* Gets the final challenges based on all blobs and their elements to perform a multi opening proof.
|
|
51
|
+
* Used in BatchedBlobAccumulator as 'finalZ' and finalGamma':
|
|
52
|
+
* - z = H(...H(H(z_0, z_1) z_2)..z_n)
|
|
53
|
+
* - where z_i = H(H(fields of blob_i), C_i) = Blob.challengeZ,
|
|
54
|
+
* - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
|
|
55
|
+
* - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
|
|
56
|
+
* - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }, for all blob evaluations y_i (see above) and commitments C_i.
|
|
57
|
+
* @returns Challenges z and gamma.
|
|
58
|
+
*/ static async precomputeBatchedBlobChallenges(blobs) {
|
|
59
|
+
// We need to precompute the final challenge values to evaluate the blobs.
|
|
60
|
+
let z = blobs[0].challengeZ;
|
|
61
|
+
// We start at i = 1, because z is initialized as the first blob's challenge.
|
|
62
|
+
for(let i = 1; i < blobs.length; i++){
|
|
63
|
+
z = await poseidon2Hash([
|
|
64
|
+
z,
|
|
65
|
+
blobs[i].challengeZ
|
|
66
|
+
]);
|
|
67
|
+
}
|
|
68
|
+
// Now we have a shared challenge for all blobs, evaluate them...
|
|
69
|
+
const proofObjects = blobs.map((b)=>computeKzgProof(b.data, z.toBuffer()));
|
|
70
|
+
const evaluations = proofObjects.map(([_, evaluation])=>BLS12Fr.fromBuffer(Buffer.from(evaluation)));
|
|
71
|
+
// ...and find the challenge for the linear combination of blobs.
|
|
72
|
+
let gamma = await hashNoirBigNumLimbs(evaluations[0]);
|
|
73
|
+
// We start at i = 1, because gamma is initialized as the first blob's evaluation.
|
|
74
|
+
for(let i = 1; i < blobs.length; i++){
|
|
75
|
+
gamma = await poseidon2Hash([
|
|
76
|
+
gamma,
|
|
77
|
+
await hashNoirBigNumLimbs(evaluations[i])
|
|
78
|
+
]);
|
|
79
|
+
}
|
|
80
|
+
gamma = await poseidon2Hash([
|
|
81
|
+
gamma,
|
|
82
|
+
z
|
|
83
|
+
]);
|
|
84
|
+
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
85
|
+
}
|
|
86
|
+
static async precomputeEmptyBatchedBlobChallenges() {
|
|
87
|
+
const blobs = [
|
|
88
|
+
await Blob.fromFields([])
|
|
89
|
+
];
|
|
90
|
+
// We need to precompute the final challenge values to evaluate the blobs.
|
|
91
|
+
const z = blobs[0].challengeZ;
|
|
92
|
+
// Now we have a shared challenge for all blobs, evaluate them...
|
|
93
|
+
const proofObjects = blobs.map((b)=>computeKzgProof(b.data, z.toBuffer()));
|
|
94
|
+
const evaluations = proofObjects.map(([_, evaluation])=>BLS12Fr.fromBuffer(Buffer.from(evaluation)));
|
|
95
|
+
// ...and find the challenge for the linear combination of blobs.
|
|
96
|
+
let gamma = await hashNoirBigNumLimbs(evaluations[0]);
|
|
97
|
+
gamma = await poseidon2Hash([
|
|
98
|
+
gamma,
|
|
99
|
+
z
|
|
100
|
+
]);
|
|
101
|
+
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
102
|
+
}
|
|
103
|
+
// Returns ethereum's versioned blob hash, following kzg_to_versioned_hash: https://eips.ethereum.org/EIPS/eip-4844#helpers
|
|
104
|
+
getEthVersionedBlobHash() {
|
|
105
|
+
const hash = sha256(this.commitment.compress());
|
|
106
|
+
hash[0] = VERSIONED_HASH_VERSION_KZG;
|
|
107
|
+
return hash;
|
|
108
|
+
}
|
|
109
|
+
static getEthVersionedBlobHash(commitment) {
|
|
110
|
+
const hash = sha256(commitment);
|
|
111
|
+
hash[0] = VERSIONED_HASH_VERSION_KZG;
|
|
112
|
+
return hash;
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Returns a proof of opening of the blobs to verify on L1 using the point evaluation precompile:
|
|
116
|
+
*
|
|
117
|
+
* input[:32] - versioned_hash
|
|
118
|
+
* input[32:64] - z
|
|
119
|
+
* input[64:96] - y
|
|
120
|
+
* input[96:144] - commitment C
|
|
121
|
+
* input[144:192] - commitment Q (a 'proof' committing to the quotient polynomial q(X))
|
|
122
|
+
*
|
|
123
|
+
* See https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
|
124
|
+
*/ getEthBlobEvaluationInputs() {
|
|
125
|
+
const buf = Buffer.concat([
|
|
126
|
+
this.getEthVersionedBlobHash(),
|
|
127
|
+
this.z.toBuffer(),
|
|
128
|
+
this.y.toBuffer(),
|
|
129
|
+
this.commitment.compress(),
|
|
130
|
+
this.q.compress()
|
|
131
|
+
]);
|
|
132
|
+
return `0x${buf.toString('hex')}`;
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
/**
|
|
136
|
+
* Final values z and gamma are injected into each block root circuit. We ensure they are correct by:
|
|
137
|
+
* - Checking equality in each block merge circuit and propagating up
|
|
138
|
+
* - Checking final z_acc == z in root circuit
|
|
139
|
+
* - Checking final gamma_acc == gamma in root circuit
|
|
140
|
+
*
|
|
141
|
+
* - z = H(...H(H(z_0, z_1) z_2)..z_n)
|
|
142
|
+
* - where z_i = H(H(fields of blob_i), C_i),
|
|
143
|
+
* - used such that p_i(z) = y_i = Blob.evaluationY for all n blob polynomials p_i().
|
|
144
|
+
* - gamma = H(H(...H(H(y_0, y_1) y_2)..y_n), z)
|
|
145
|
+
* - used such that y = sum_i { gamma^i * y_i }, and C = sum_i { gamma^i * C_i }
|
|
146
|
+
* for all blob evaluations y_i (see above) and commitments C_i.
|
|
147
|
+
*
|
|
148
|
+
* Iteratively calculated by BlobAccumulatorPublicInputs.accumulate() in nr. See also precomputeBatchedBlobChallenges() above.
|
|
149
|
+
*/ export class FinalBlobBatchingChallenges {
|
|
150
|
+
z;
|
|
151
|
+
gamma;
|
|
152
|
+
constructor(z, gamma){
|
|
153
|
+
this.z = z;
|
|
154
|
+
this.gamma = gamma;
|
|
155
|
+
}
|
|
156
|
+
equals(other) {
|
|
157
|
+
return this.z.equals(other.z) && this.gamma.equals(other.gamma);
|
|
158
|
+
}
|
|
159
|
+
static empty() {
|
|
160
|
+
return new FinalBlobBatchingChallenges(Fr.ZERO, BLS12Fr.ZERO);
|
|
161
|
+
}
|
|
162
|
+
static fromBuffer(buffer) {
|
|
163
|
+
const reader = BufferReader.asReader(buffer);
|
|
164
|
+
return new FinalBlobBatchingChallenges(Fr.fromBuffer(reader), reader.readObject(BLS12Fr));
|
|
165
|
+
}
|
|
166
|
+
toBuffer() {
|
|
167
|
+
return serializeToBuffer(this.z, this.gamma);
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
/**
|
|
171
|
+
* See noir-projects/noir-protocol-circuits/crates/blob/src/blob_batching_public_inputs.nr -> BlobAccumulatorPublicInputs
|
|
172
|
+
*/ export class BatchedBlobAccumulator {
|
|
173
|
+
blobCommitmentsHashAcc;
|
|
174
|
+
zAcc;
|
|
175
|
+
yAcc;
|
|
176
|
+
cAcc;
|
|
177
|
+
qAcc;
|
|
178
|
+
gammaAcc;
|
|
179
|
+
gammaPow;
|
|
180
|
+
finalBlobChallenges;
|
|
181
|
+
constructor(/** Hash of Cs (to link to L1 blob hashes). */ blobCommitmentsHashAcc, /** Challenge point z_acc. Final value used such that p_i(z) = y_i. */ zAcc, /** Evaluation y_acc. Final value is is linear combination of all evaluations y_i = p_i(z) with gamma. */ yAcc, /** Commitment c_acc. Final value is linear combination of all commitments C_i = [p_i] with gamma. */ cAcc, /** KZG opening q_acc. Final value is linear combination of all blob kzg 'proofs' Q_i with gamma. */ qAcc, /**
|
|
182
|
+
* Challenge point gamma_acc for multi opening. Used with y, C, and kzg 'proof' Q above.
|
|
183
|
+
* TODO(#13608): We calculate this by hashing natively in the circuit (hence Fr representation), but it's actually used
|
|
184
|
+
* as a BLS12Fr field elt. Is this safe? Is there a skew?
|
|
185
|
+
*/ gammaAcc, /** Simply gamma^(i + 1) at blob i. Used for calculating the i'th element of the above linear comb.s */ gammaPow, /** Final challenge values used in evaluation. Optimistically input and checked in the final acc. */ finalBlobChallenges){
|
|
186
|
+
this.blobCommitmentsHashAcc = blobCommitmentsHashAcc;
|
|
187
|
+
this.zAcc = zAcc;
|
|
188
|
+
this.yAcc = yAcc;
|
|
189
|
+
this.cAcc = cAcc;
|
|
190
|
+
this.qAcc = qAcc;
|
|
191
|
+
this.gammaAcc = gammaAcc;
|
|
192
|
+
this.gammaPow = gammaPow;
|
|
193
|
+
this.finalBlobChallenges = finalBlobChallenges;
|
|
194
|
+
}
|
|
195
|
+
/**
|
|
196
|
+
* Init the first accumulation state of the epoch.
|
|
197
|
+
* We assume the input blob has not been evaluated at z.
|
|
198
|
+
*
|
|
199
|
+
* First state of the accumulator:
|
|
200
|
+
* - v_acc := sha256(C_0)
|
|
201
|
+
* - z_acc := z_0
|
|
202
|
+
* - y_acc := gamma^0 * y_0 = y_0
|
|
203
|
+
* - c_acc := gamma^0 * c_0 = c_0
|
|
204
|
+
* - gamma_acc := poseidon2(y_0.limbs)
|
|
205
|
+
* - gamma^(i + 1) = gamma^1 = gamma // denoted gamma_pow_acc
|
|
206
|
+
*
|
|
207
|
+
* @returns An initial blob accumulator.
|
|
208
|
+
*/ static async initialize(blob, finalBlobChallenges) {
|
|
209
|
+
const [q, evaluation] = computeKzgProof(blob.data, finalBlobChallenges.z.toBuffer());
|
|
210
|
+
const firstY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
|
|
211
|
+
// Here, i = 0, so:
|
|
212
|
+
return new BatchedBlobAccumulator(sha256ToField([
|
|
213
|
+
blob.commitment
|
|
214
|
+
]), blob.challengeZ, firstY, BLS12Point.decompress(blob.commitment), BLS12Point.decompress(Buffer.from(q)), await hashNoirBigNumLimbs(firstY), finalBlobChallenges.gamma, finalBlobChallenges);
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Create the empty accumulation state of the epoch.
|
|
218
|
+
* @returns An empty blob accumulator with challenges.
|
|
219
|
+
*/ static newWithChallenges(finalBlobChallenges) {
|
|
220
|
+
return new BatchedBlobAccumulator(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO, BLS12Point.ZERO, Fr.ZERO, BLS12Fr.ZERO, finalBlobChallenges);
|
|
221
|
+
}
|
|
222
|
+
/**
|
|
223
|
+
* Given blob i, accumulate all state.
|
|
224
|
+
* We assume the input blob has not been evaluated at z.
|
|
225
|
+
* @returns An updated blob accumulator.
|
|
226
|
+
*/ async accumulate(blob) {
|
|
227
|
+
if (this.isEmptyState()) {
|
|
228
|
+
return BatchedBlobAccumulator.initialize(blob, this.finalBlobChallenges);
|
|
229
|
+
} else {
|
|
230
|
+
const [q, evaluation] = computeKzgProof(blob.data, this.finalBlobChallenges.z.toBuffer());
|
|
231
|
+
const thisY = BLS12Fr.fromBuffer(Buffer.from(evaluation));
|
|
232
|
+
// Moving from i - 1 to i, so:
|
|
233
|
+
return new BatchedBlobAccumulator(sha256ToField([
|
|
234
|
+
this.blobCommitmentsHashAcc,
|
|
235
|
+
blob.commitment
|
|
236
|
+
]), await poseidon2Hash([
|
|
237
|
+
this.zAcc,
|
|
238
|
+
blob.challengeZ
|
|
239
|
+
]), this.yAcc.add(thisY.mul(this.gammaPow)), this.cAcc.add(BLS12Point.decompress(blob.commitment).mul(this.gammaPow)), this.qAcc.add(BLS12Point.decompress(Buffer.from(q)).mul(this.gammaPow)), await poseidon2Hash([
|
|
240
|
+
this.gammaAcc,
|
|
241
|
+
await hashNoirBigNumLimbs(thisY)
|
|
242
|
+
]), this.gammaPow.mul(this.finalBlobChallenges.gamma), this.finalBlobChallenges);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
/**
|
|
246
|
+
* Given blobs, accumulate all state.
|
|
247
|
+
* We assume the input blobs have not been evaluated at z.
|
|
248
|
+
* @returns An updated blob accumulator.
|
|
249
|
+
*/ async accumulateBlobs(blobs) {
|
|
250
|
+
// Initialize the acc to iterate over:
|
|
251
|
+
let acc = this.clone();
|
|
252
|
+
for(let i = 0; i < blobs.length; i++){
|
|
253
|
+
acc = await acc.accumulate(blobs[i]);
|
|
254
|
+
}
|
|
255
|
+
return acc;
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Finalize accumulation state of the epoch.
|
|
259
|
+
* We assume ALL blobs in the epoch have been accumulated.
|
|
260
|
+
*
|
|
261
|
+
* Final accumulated values:
|
|
262
|
+
* - v := v_acc (hash of all commitments (C_i s) to be checked on L1)
|
|
263
|
+
* - z := z_acc (final challenge, at which all blobs are evaluated)
|
|
264
|
+
* - y := y_acc (final opening to be checked on L1)
|
|
265
|
+
* - c := c_acc (final commitment to be checked on L1)
|
|
266
|
+
* - gamma := poseidon2(gamma_acc, z) (challenge for linear combination of y and C, above)
|
|
267
|
+
*
|
|
268
|
+
* @returns A batched blob.
|
|
269
|
+
*/ async finalize() {
|
|
270
|
+
// All values in acc are final, apart from gamma := poseidon2(gammaAcc, z):
|
|
271
|
+
const calculatedGamma = await poseidon2Hash([
|
|
272
|
+
this.gammaAcc,
|
|
273
|
+
this.zAcc
|
|
274
|
+
]);
|
|
275
|
+
// Check final values:
|
|
276
|
+
if (!this.zAcc.equals(this.finalBlobChallenges.z)) {
|
|
277
|
+
throw new Error(`Blob batching mismatch: accumulated z ${this.zAcc} does not equal injected z ${this.finalBlobChallenges.z}`);
|
|
278
|
+
}
|
|
279
|
+
if (!calculatedGamma.equals(this.finalBlobChallenges.gamma.toBN254Fr())) {
|
|
280
|
+
throw new Error(`Blob batching mismatch: accumulated gamma ${calculatedGamma} does not equal injected gamma ${this.finalBlobChallenges.gamma.toBN254Fr()}`);
|
|
281
|
+
}
|
|
282
|
+
if (!verifyKzgProof(this.cAcc.compress(), this.zAcc.toBuffer(), this.yAcc.toBuffer(), this.qAcc.compress())) {
|
|
283
|
+
throw new Error(`KZG proof did not verify.`);
|
|
284
|
+
}
|
|
285
|
+
return new BatchedBlob(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.qAcc);
|
|
286
|
+
}
|
|
287
|
+
isEmptyState() {
|
|
288
|
+
return this.blobCommitmentsHashAcc.isZero() && this.zAcc.isZero() && this.yAcc.isZero() && this.cAcc.isZero() && this.qAcc.isZero() && this.gammaAcc.isZero() && this.gammaPow.isZero();
|
|
289
|
+
}
|
|
290
|
+
clone() {
|
|
291
|
+
return new BatchedBlobAccumulator(Fr.fromBuffer(this.blobCommitmentsHashAcc.toBuffer()), Fr.fromBuffer(this.zAcc.toBuffer()), BLS12Fr.fromBuffer(this.yAcc.toBuffer()), BLS12Point.fromBuffer(this.cAcc.toBuffer()), BLS12Point.fromBuffer(this.qAcc.toBuffer()), Fr.fromBuffer(this.gammaAcc.toBuffer()), BLS12Fr.fromBuffer(this.gammaPow.toBuffer()), FinalBlobBatchingChallenges.fromBuffer(this.finalBlobChallenges.toBuffer()));
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
// To mimic the hash accumulation in the rollup circuits, here we hash
|
|
295
|
+
// each u128 limb of the noir bignum struct representing the BLS field.
|
|
296
|
+
async function hashNoirBigNumLimbs(field) {
|
|
297
|
+
const num = field.toNoirBigNum();
|
|
298
|
+
return await poseidon2Hash(num.limbs.map(Fr.fromHexString));
|
|
299
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
2
|
+
import { BufferReader, FieldReader } from '@aztec/foundation/serialize';
|
|
3
|
+
import { inspect } from 'util';
|
|
4
|
+
import { Blob } from './blob.js';
|
|
5
|
+
import { BatchedBlob, BatchedBlobAccumulator, FinalBlobBatchingChallenges } from './blob_batching.js';
|
|
6
|
+
/**
|
|
7
|
+
* See nr BlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
|
|
8
|
+
*/
|
|
9
|
+
export declare class BlobAccumulatorPublicInputs {
|
|
10
|
+
blobCommitmentsHashAcc: Fr;
|
|
11
|
+
zAcc: Fr;
|
|
12
|
+
yAcc: BLS12Fr;
|
|
13
|
+
cAcc: BLS12Point;
|
|
14
|
+
gammaAcc: Fr;
|
|
15
|
+
gammaPowAcc: BLS12Fr;
|
|
16
|
+
constructor(blobCommitmentsHashAcc: Fr, zAcc: Fr, yAcc: BLS12Fr, cAcc: BLS12Point, gammaAcc: Fr, gammaPowAcc: BLS12Fr);
|
|
17
|
+
static empty(): BlobAccumulatorPublicInputs;
|
|
18
|
+
equals(other: BlobAccumulatorPublicInputs): boolean;
|
|
19
|
+
static fromBuffer(buffer: Buffer | BufferReader): BlobAccumulatorPublicInputs;
|
|
20
|
+
toBuffer(): Buffer<ArrayBufferLike>;
|
|
21
|
+
/**
|
|
22
|
+
* Given blobs, accumulate all public inputs state.
|
|
23
|
+
* We assume the input blobs have not been evaluated at z.
|
|
24
|
+
* NOTE: Does NOT accumulate non circuit values including Q. This exists to simulate/check exactly what the circuit is doing
|
|
25
|
+
* and is unsafe for other use. For that reason, a toBatchedBlobAccumulator does not exist. See evaluateBlobs() oracle for usage.
|
|
26
|
+
* @returns An updated blob accumulator.
|
|
27
|
+
*/
|
|
28
|
+
accumulateBlobs(blobs: Blob[], finalBlobChallenges: FinalBlobBatchingChallenges): Promise<BlobAccumulatorPublicInputs>;
|
|
29
|
+
toFields(): Fr[];
|
|
30
|
+
static fromFields(fields: Fr[] | FieldReader): BlobAccumulatorPublicInputs;
|
|
31
|
+
/**
|
|
32
|
+
* Converts from an accumulator to a struct for the public inputs of our rollup circuits.
|
|
33
|
+
* @returns A BlobAccumulatorPublicInputs instance.
|
|
34
|
+
*/
|
|
35
|
+
static fromBatchedBlobAccumulator(accumulator: BatchedBlobAccumulator): BlobAccumulatorPublicInputs;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* See nr FinalBlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
|
|
39
|
+
*/
|
|
40
|
+
export declare class FinalBlobAccumulatorPublicInputs {
|
|
41
|
+
blobCommitmentsHash: Fr;
|
|
42
|
+
z: Fr;
|
|
43
|
+
y: BLS12Fr;
|
|
44
|
+
c: BLS12Point;
|
|
45
|
+
constructor(blobCommitmentsHash: Fr, z: Fr, y: BLS12Fr, c: BLS12Point);
|
|
46
|
+
static empty(): FinalBlobAccumulatorPublicInputs;
|
|
47
|
+
static fromBuffer(buffer: Buffer | BufferReader): FinalBlobAccumulatorPublicInputs;
|
|
48
|
+
toBuffer(): Buffer<ArrayBufferLike>;
|
|
49
|
+
static fromBatchedBlob(blob: BatchedBlob): FinalBlobAccumulatorPublicInputs;
|
|
50
|
+
toFields(): Fr[];
|
|
51
|
+
toString(): string;
|
|
52
|
+
equals(other: FinalBlobAccumulatorPublicInputs): boolean;
|
|
53
|
+
static random(): FinalBlobAccumulatorPublicInputs;
|
|
54
|
+
static fromBatchedBlobAccumulator(accumulator: BatchedBlobAccumulator): FinalBlobAccumulatorPublicInputs;
|
|
55
|
+
[inspect.custom](): string;
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* startBlobAccumulator: Accumulated opening proofs for all blobs before this block range.
|
|
59
|
+
* endBlobAccumulator: Accumulated opening proofs for all blobs after adding this block range.
|
|
60
|
+
* finalBlobChallenges: Final values z and gamma, shared across the epoch.
|
|
61
|
+
*/
|
|
62
|
+
export declare class BlockBlobPublicInputs {
|
|
63
|
+
startBlobAccumulator: BlobAccumulatorPublicInputs;
|
|
64
|
+
endBlobAccumulator: BlobAccumulatorPublicInputs;
|
|
65
|
+
finalBlobChallenges: FinalBlobBatchingChallenges;
|
|
66
|
+
constructor(startBlobAccumulator: BlobAccumulatorPublicInputs, endBlobAccumulator: BlobAccumulatorPublicInputs, finalBlobChallenges: FinalBlobBatchingChallenges);
|
|
67
|
+
static empty(): BlockBlobPublicInputs;
|
|
68
|
+
static fromBuffer(buffer: Buffer | BufferReader): BlockBlobPublicInputs;
|
|
69
|
+
toBuffer(): Buffer<ArrayBufferLike>;
|
|
70
|
+
}
|
|
71
|
+
//# sourceMappingURL=blob_batching_public_inputs.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"blob_batching_public_inputs.d.ts","sourceRoot":"","sources":["../src/blob_batching_public_inputs.ts"],"names":[],"mappings":"AACA,OAAO,EAAW,OAAO,EAAE,UAAU,EAAE,EAAE,EAAE,MAAM,0BAA0B,CAAC;AAC5E,OAAO,EAAE,YAAY,EAAE,WAAW,EAAqB,MAAM,6BAA6B,CAAC;AAE3F,OAAO,EAAE,OAAO,EAAE,MAAM,MAAM,CAAC;AAE/B,OAAO,EAAE,IAAI,EAAE,MAAM,WAAW,CAAC;AACjC,OAAO,EAAE,WAAW,EAAE,sBAAsB,EAAE,2BAA2B,EAAE,MAAM,oBAAoB,CAAC;AAEtG;;GAEG;AACH,qBAAa,2BAA2B;IAE7B,sBAAsB,EAAE,EAAE;IAC1B,IAAI,EAAE,EAAE;IACR,IAAI,EAAE,OAAO;IACb,IAAI,EAAE,UAAU;IAChB,QAAQ,EAAE,EAAE;IACZ,WAAW,EAAE,OAAO;gBALpB,sBAAsB,EAAE,EAAE,EAC1B,IAAI,EAAE,EAAE,EACR,IAAI,EAAE,OAAO,EACb,IAAI,EAAE,UAAU,EAChB,QAAQ,EAAE,EAAE,EACZ,WAAW,EAAE,OAAO;IAG7B,MAAM,CAAC,KAAK,IAAI,2BAA2B;IAI3C,MAAM,CAAC,KAAK,EAAE,2BAA2B;IAWzC,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,YAAY,GAAG,2BAA2B;IAY7E,QAAQ;IAWR;;;;;;OAMG;IACG,eAAe,CAAC,KAAK,EAAE,IAAI,EAAE,EAAE,mBAAmB,EAAE,2BAA2B;IAsBrF,QAAQ;IAaR,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,EAAE,GAAG,WAAW,GAAG,2BAA2B;IAgB1E;;;OAGG;IACH,MAAM,CAAC,0BAA0B,CAAC,WAAW,EAAE,sBAAsB;CAUtE;AAED;;GAEG;AACH,qBAAa,gCAAgC;IAElC,mBAAmB,EAAE,EAAE;IACvB,CAAC,EAAE,EAAE;IACL,CAAC,EAAE,OAAO;IACV,CAAC,EAAE,UAAU;gBAHb,mBAAmB,EAAE,EAAE,EACvB,CAAC,EAAE,EAAE,EACL,CAAC,EAAE,OAAO,EACV,CAAC,EAAE,UAAU;IAGtB,MAAM,CAAC,KAAK,IAAI,gCAAgC;IAIhD,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,YAAY,GAAG,gCAAgC;IAUlF,QAAQ;IAIR,MAAM,CAAC,eAAe,CAAC,IAAI,EAAE,WAAW;IAIxC,QAAQ;IAUR,QAAQ;IAQR,MAAM,CAAC,KAAK,EAAE,gCAAgC;IAU9C,MAAM,CAAC,MAAM;IAKb,MAAM,CAAC,0BAA0B,CAAC,WAAW,EAAE,sBAAsB;IASrE,CAAC,OAAO,CAAC,MAAM,CAAC;CAQjB;AAED;;;;GAIG;AACH,qBAAa,qBAAqB;IAEvB,oBAAoB,EAAE,2BAA2B;IACjD,kBAAkB,EAAE,2BAA2B;IAC/C,mBAAmB,EAAE,2BAA2B;gBAFhD,oBAAoB,EAAE,2BAA2B,EACjD,kBAAkB,EAAE,2BAA2B,EAC/C,mBAAmB,EAAE,2BAA2B;IAGzD,MAAM,CAAC,KAAK,IAAI,qBAAqB;IAQrC,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,YAAY,GAAG,qBAAqB;IASvE,QAAQ;CAGT"}
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import { BLS12_FQ_LIMBS, BLS12_FR_LIMBS } from '@aztec/constants';
|
|
2
|
+
import { BLS12Fq, BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
3
|
+
import { BufferReader, FieldReader, serializeToBuffer } from '@aztec/foundation/serialize';
|
|
4
|
+
import { inspect } from 'util';
|
|
5
|
+
import { BatchedBlobAccumulator, FinalBlobBatchingChallenges } from './blob_batching.js';
|
|
6
|
+
/**
|
|
7
|
+
* See nr BlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
|
|
8
|
+
*/ export class BlobAccumulatorPublicInputs {
|
|
9
|
+
blobCommitmentsHashAcc;
|
|
10
|
+
zAcc;
|
|
11
|
+
yAcc;
|
|
12
|
+
cAcc;
|
|
13
|
+
gammaAcc;
|
|
14
|
+
gammaPowAcc;
|
|
15
|
+
constructor(blobCommitmentsHashAcc, zAcc, yAcc, cAcc, gammaAcc, gammaPowAcc){
|
|
16
|
+
this.blobCommitmentsHashAcc = blobCommitmentsHashAcc;
|
|
17
|
+
this.zAcc = zAcc;
|
|
18
|
+
this.yAcc = yAcc;
|
|
19
|
+
this.cAcc = cAcc;
|
|
20
|
+
this.gammaAcc = gammaAcc;
|
|
21
|
+
this.gammaPowAcc = gammaPowAcc;
|
|
22
|
+
}
|
|
23
|
+
static empty() {
|
|
24
|
+
return new BlobAccumulatorPublicInputs(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO, Fr.ZERO, BLS12Fr.ZERO);
|
|
25
|
+
}
|
|
26
|
+
equals(other) {
|
|
27
|
+
return this.blobCommitmentsHashAcc.equals(other.blobCommitmentsHashAcc) && this.zAcc.equals(other.zAcc) && this.yAcc.equals(other.yAcc) && this.cAcc.equals(other.cAcc) && this.gammaAcc.equals(other.gammaAcc) && this.gammaPowAcc.equals(other.gammaPowAcc);
|
|
28
|
+
}
|
|
29
|
+
static fromBuffer(buffer) {
|
|
30
|
+
const reader = BufferReader.asReader(buffer);
|
|
31
|
+
return new BlobAccumulatorPublicInputs(Fr.fromBuffer(reader), Fr.fromBuffer(reader), BLS12Fr.fromBuffer(reader), BLS12Point.fromBuffer(reader), Fr.fromBuffer(reader), BLS12Fr.fromBuffer(reader));
|
|
32
|
+
}
|
|
33
|
+
toBuffer() {
|
|
34
|
+
return serializeToBuffer(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.gammaAcc, this.gammaPowAcc);
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Given blobs, accumulate all public inputs state.
|
|
38
|
+
* We assume the input blobs have not been evaluated at z.
|
|
39
|
+
* NOTE: Does NOT accumulate non circuit values including Q. This exists to simulate/check exactly what the circuit is doing
|
|
40
|
+
* and is unsafe for other use. For that reason, a toBatchedBlobAccumulator does not exist. See evaluateBlobs() oracle for usage.
|
|
41
|
+
* @returns An updated blob accumulator.
|
|
42
|
+
*/ async accumulateBlobs(blobs, finalBlobChallenges) {
|
|
43
|
+
let acc = new BatchedBlobAccumulator(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, BLS12Point.ZERO, this.gammaAcc, this.gammaPowAcc, finalBlobChallenges);
|
|
44
|
+
acc = await acc.accumulateBlobs(blobs);
|
|
45
|
+
return new BlobAccumulatorPublicInputs(acc.blobCommitmentsHashAcc, acc.zAcc, acc.yAcc, acc.cAcc, acc.gammaAcc, acc.gammaPow);
|
|
46
|
+
}
|
|
47
|
+
toFields() {
|
|
48
|
+
return [
|
|
49
|
+
this.blobCommitmentsHashAcc,
|
|
50
|
+
this.zAcc,
|
|
51
|
+
...this.yAcc.toNoirBigNum().limbs.map(Fr.fromString),
|
|
52
|
+
...this.cAcc.x.toNoirBigNum().limbs.map(Fr.fromString),
|
|
53
|
+
...this.cAcc.y.toNoirBigNum().limbs.map(Fr.fromString),
|
|
54
|
+
new Fr(this.cAcc.isInfinite),
|
|
55
|
+
this.gammaAcc,
|
|
56
|
+
...this.gammaPowAcc.toNoirBigNum().limbs.map(Fr.fromString)
|
|
57
|
+
];
|
|
58
|
+
}
|
|
59
|
+
static fromFields(fields) {
|
|
60
|
+
const reader = FieldReader.asReader(fields);
|
|
61
|
+
return new BlobAccumulatorPublicInputs(reader.readField(), reader.readField(), BLS12Fr.fromNoirBigNum({
|
|
62
|
+
limbs: reader.readFieldArray(BLS12_FR_LIMBS).map((f)=>f.toString())
|
|
63
|
+
}), new BLS12Point(BLS12Fq.fromNoirBigNum({
|
|
64
|
+
limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map((f)=>f.toString())
|
|
65
|
+
}), BLS12Fq.fromNoirBigNum({
|
|
66
|
+
limbs: reader.readFieldArray(BLS12_FQ_LIMBS).map((f)=>f.toString())
|
|
67
|
+
}), reader.readBoolean()), reader.readField(), BLS12Fr.fromNoirBigNum({
|
|
68
|
+
limbs: reader.readFieldArray(BLS12_FR_LIMBS).map((f)=>f.toString())
|
|
69
|
+
}));
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Converts from an accumulator to a struct for the public inputs of our rollup circuits.
|
|
73
|
+
* @returns A BlobAccumulatorPublicInputs instance.
|
|
74
|
+
*/ static fromBatchedBlobAccumulator(accumulator) {
|
|
75
|
+
return new BlobAccumulatorPublicInputs(accumulator.blobCommitmentsHashAcc, accumulator.zAcc, accumulator.yAcc, accumulator.cAcc, accumulator.gammaAcc, accumulator.gammaPow);
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
/**
|
|
79
|
+
* See nr FinalBlobAccumulatorPublicInputs and ts BatchedBlobAccumulator for documentation.
|
|
80
|
+
*/ export class FinalBlobAccumulatorPublicInputs {
|
|
81
|
+
blobCommitmentsHash;
|
|
82
|
+
z;
|
|
83
|
+
y;
|
|
84
|
+
c;
|
|
85
|
+
constructor(blobCommitmentsHash, z, y, c){
|
|
86
|
+
this.blobCommitmentsHash = blobCommitmentsHash;
|
|
87
|
+
this.z = z;
|
|
88
|
+
this.y = y;
|
|
89
|
+
this.c = c;
|
|
90
|
+
}
|
|
91
|
+
static empty() {
|
|
92
|
+
return new FinalBlobAccumulatorPublicInputs(Fr.ZERO, Fr.ZERO, BLS12Fr.ZERO, BLS12Point.ZERO);
|
|
93
|
+
}
|
|
94
|
+
static fromBuffer(buffer) {
|
|
95
|
+
const reader = BufferReader.asReader(buffer);
|
|
96
|
+
return new FinalBlobAccumulatorPublicInputs(Fr.fromBuffer(reader), Fr.fromBuffer(reader), BLS12Fr.fromBuffer(reader), BLS12Point.fromBuffer(reader));
|
|
97
|
+
}
|
|
98
|
+
toBuffer() {
|
|
99
|
+
return serializeToBuffer(this.blobCommitmentsHash, this.z, this.y, this.c);
|
|
100
|
+
}
|
|
101
|
+
static fromBatchedBlob(blob) {
|
|
102
|
+
return new FinalBlobAccumulatorPublicInputs(blob.blobCommitmentsHash, blob.z, blob.y, blob.commitment);
|
|
103
|
+
}
|
|
104
|
+
toFields() {
|
|
105
|
+
return [
|
|
106
|
+
this.blobCommitmentsHash,
|
|
107
|
+
this.z,
|
|
108
|
+
...this.y.toNoirBigNum().limbs.map(Fr.fromString),
|
|
109
|
+
...this.c.toBN254Fields()
|
|
110
|
+
];
|
|
111
|
+
}
|
|
112
|
+
// The below is used to send to L1 for proof verification
|
|
113
|
+
toString() {
|
|
114
|
+
// We prepend 32 bytes for the (unused) 'blobHash' slot. This is not read or required by getEpochProofPublicInputs() on L1, but
|
|
115
|
+
// is expected since we usually pass the full precompile inputs via verifyEpochRootProof() to getEpochProofPublicInputs() to ensure
|
|
116
|
+
// we use calldata rather than a slice in memory:
|
|
117
|
+
const buf = Buffer.concat([
|
|
118
|
+
Buffer.alloc(32),
|
|
119
|
+
this.z.toBuffer(),
|
|
120
|
+
this.y.toBuffer(),
|
|
121
|
+
this.c.compress()
|
|
122
|
+
]);
|
|
123
|
+
return buf.toString('hex');
|
|
124
|
+
}
|
|
125
|
+
equals(other) {
|
|
126
|
+
return this.blobCommitmentsHash.equals(other.blobCommitmentsHash) && this.z.equals(other.z) && this.y.equals(other.y) && this.c.equals(other.c);
|
|
127
|
+
}
|
|
128
|
+
// Creates a random instance. Used for testing only - will not prove/verify.
|
|
129
|
+
static random() {
|
|
130
|
+
return new FinalBlobAccumulatorPublicInputs(Fr.random(), Fr.random(), BLS12Fr.random(), BLS12Point.random());
|
|
131
|
+
}
|
|
132
|
+
// Warning: MUST be final accumulator state.
|
|
133
|
+
static fromBatchedBlobAccumulator(accumulator) {
|
|
134
|
+
return new FinalBlobAccumulatorPublicInputs(accumulator.blobCommitmentsHashAcc, accumulator.zAcc, accumulator.yAcc, accumulator.cAcc);
|
|
135
|
+
}
|
|
136
|
+
[inspect.custom]() {
|
|
137
|
+
return `FinalBlobAccumulatorPublicInputs {
|
|
138
|
+
blobCommitmentsHash: ${inspect(this.blobCommitmentsHash)},
|
|
139
|
+
z: ${inspect(this.z)},
|
|
140
|
+
y: ${inspect(this.y)},
|
|
141
|
+
c: ${inspect(this.c)},
|
|
142
|
+
}`;
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
/**
|
|
146
|
+
* startBlobAccumulator: Accumulated opening proofs for all blobs before this block range.
|
|
147
|
+
* endBlobAccumulator: Accumulated opening proofs for all blobs after adding this block range.
|
|
148
|
+
* finalBlobChallenges: Final values z and gamma, shared across the epoch.
|
|
149
|
+
*/ export class BlockBlobPublicInputs {
|
|
150
|
+
startBlobAccumulator;
|
|
151
|
+
endBlobAccumulator;
|
|
152
|
+
finalBlobChallenges;
|
|
153
|
+
constructor(startBlobAccumulator, endBlobAccumulator, finalBlobChallenges){
|
|
154
|
+
this.startBlobAccumulator = startBlobAccumulator;
|
|
155
|
+
this.endBlobAccumulator = endBlobAccumulator;
|
|
156
|
+
this.finalBlobChallenges = finalBlobChallenges;
|
|
157
|
+
}
|
|
158
|
+
static empty() {
|
|
159
|
+
return new BlockBlobPublicInputs(BlobAccumulatorPublicInputs.empty(), BlobAccumulatorPublicInputs.empty(), FinalBlobBatchingChallenges.empty());
|
|
160
|
+
}
|
|
161
|
+
static fromBuffer(buffer) {
|
|
162
|
+
const reader = BufferReader.asReader(buffer);
|
|
163
|
+
return new BlockBlobPublicInputs(reader.readObject(BlobAccumulatorPublicInputs), reader.readObject(BlobAccumulatorPublicInputs), reader.readObject(FinalBlobBatchingChallenges));
|
|
164
|
+
}
|
|
165
|
+
toBuffer() {
|
|
166
|
+
return serializeToBuffer(this.startBlobAccumulator, this.endBlobAccumulator, this.finalBlobChallenges);
|
|
167
|
+
}
|
|
168
|
+
}
|
package/dest/index.d.ts
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
export * from './blob.js';
|
|
2
|
+
export * from './blob_batching.js';
|
|
2
3
|
export * from './encoding.js';
|
|
3
4
|
export * from './interface.js';
|
|
4
5
|
export * from './errors.js';
|
|
5
|
-
export * from './
|
|
6
|
+
export * from './blob_batching_public_inputs.js';
|
|
6
7
|
export * from './sponge_blob.js';
|
|
7
8
|
//# sourceMappingURL=index.d.ts.map
|
package/dest/index.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAIA,cAAc,WAAW,CAAC;AAC1B,cAAc,oBAAoB,CAAC;AACnC,cAAc,eAAe,CAAC;AAC9B,cAAc,gBAAgB,CAAC;AAC/B,cAAc,aAAa,CAAC;AAC5B,cAAc,kCAAkC,CAAC;AACjD,cAAc,kBAAkB,CAAC"}
|
package/dest/index.js
CHANGED
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
import cKzg from 'c-kzg';
|
|
2
|
-
|
|
2
|
+
const { loadTrustedSetup } = cKzg;
|
|
3
3
|
export * from './blob.js';
|
|
4
|
+
export * from './blob_batching.js';
|
|
4
5
|
export * from './encoding.js';
|
|
5
6
|
export * from './interface.js';
|
|
6
7
|
export * from './errors.js';
|
|
7
|
-
export * from './
|
|
8
|
+
export * from './blob_batching_public_inputs.js';
|
|
8
9
|
export * from './sponge_blob.js';
|
|
9
10
|
try {
|
|
10
|
-
loadTrustedSetup();
|
|
11
|
+
loadTrustedSetup(8); // See https://notes.ethereum.org/@jtraglia/windowed_multiplications
|
|
11
12
|
} catch (error) {
|
|
12
13
|
if (error.message.includes('trusted setup is already loaded')) {
|
|
13
14
|
// NB: The c-kzg lib has no way of checking whether the setup is loaded or not,
|
package/dest/interface.d.ts
CHANGED
package/dest/interface.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"interface.d.ts","sourceRoot":"","sources":["../src/interface.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,QAAQ;IACvB,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,
|
|
1
|
+
{"version":3,"file":"interface.d.ts","sourceRoot":"","sources":["../src/interface.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,QAAQ;IACvB,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,cAAc,EAAE,MAAM,CAAC;CACxB"}
|
package/dest/sponge_blob.d.ts
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
/// <reference types="node" resolution-mode="require"/>
|
|
2
|
-
/// <reference types="node" resolution-mode="require"/>
|
|
3
1
|
import { type FieldsOf } from '@aztec/foundation/array';
|
|
4
2
|
import { Fr } from '@aztec/foundation/fields';
|
|
5
3
|
import { BufferReader, FieldReader, type Tuple } from '@aztec/foundation/serialize';
|
|
@@ -22,7 +20,7 @@ export declare class SpongeBlob {
|
|
|
22
20
|
/** Number of effects that will be absorbed. */
|
|
23
21
|
expectedFields: number);
|
|
24
22
|
static fromBuffer(buffer: Buffer | BufferReader): SpongeBlob;
|
|
25
|
-
toBuffer(): Buffer
|
|
23
|
+
toBuffer(): Buffer<ArrayBufferLike>;
|
|
26
24
|
static getFields(fields: FieldsOf<SpongeBlob>): (number | Poseidon2Sponge)[];
|
|
27
25
|
toFields(): Fr[];
|
|
28
26
|
static fromFields(fields: Fr[] | FieldReader): SpongeBlob;
|
|
@@ -39,7 +37,7 @@ export declare class Poseidon2Sponge {
|
|
|
39
37
|
squeezeMode: boolean;
|
|
40
38
|
constructor(cache: Tuple<Fr, 3>, state: Tuple<Fr, 4>, cacheSize: number, squeezeMode: boolean);
|
|
41
39
|
static fromBuffer(buffer: Buffer | BufferReader): Poseidon2Sponge;
|
|
42
|
-
toBuffer(): Buffer
|
|
40
|
+
toBuffer(): Buffer<ArrayBufferLike>;
|
|
43
41
|
static getFields(fields: FieldsOf<Poseidon2Sponge>): (number | boolean | [Fr, Fr, Fr] | [Fr, Fr, Fr, Fr])[];
|
|
44
42
|
toFields(): Fr[];
|
|
45
43
|
static fromFields(fields: Fr[] | FieldReader): Poseidon2Sponge;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"sponge_blob.d.ts","sourceRoot":"","sources":["../src/sponge_blob.ts"],"names":[],"mappings":"
|
|
1
|
+
{"version":3,"file":"sponge_blob.d.ts","sourceRoot":"","sources":["../src/sponge_blob.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,QAAQ,EAAa,MAAM,yBAAyB,CAAC;AAEnE,OAAO,EAAE,EAAE,EAAE,MAAM,0BAA0B,CAAC;AAC9C,OAAO,EACL,YAAY,EACZ,WAAW,EACX,KAAK,KAAK,EAGX,MAAM,6BAA6B,CAAC;AAErC;;;GAGG;AACH,qBAAa,UAAU;IAEnB,gEAAgE;aAChD,MAAM,EAAE,eAAe;IACvC,yCAAyC;IAClC,MAAM,EAAE,MAAM;IACrB,+CAA+C;aAC/B,cAAc,EAAE,MAAM;;IALtC,gEAAgE;IAChD,MAAM,EAAE,eAAe;IACvC,yCAAyC;IAClC,MAAM,EAAE,MAAM;IACrB,+CAA+C;IAC/B,cAAc,EAAE,MAAM;IAGxC,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,YAAY,GAAG,UAAU;IAK5D,QAAQ;IAIR,MAAM,CAAC,SAAS,CAAC,MAAM,EAAE,QAAQ,CAAC,UAAU,CAAC;IAI7C,QAAQ,IAAI,EAAE,EAAE;IAIhB,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,EAAE,GAAG,WAAW,GAAG,UAAU;IASzD,KAAK;IAIC,MAAM,CAAC,MAAM,EAAE,EAAE,EAAE;IAUnB,OAAO,IAAI,OAAO,CAAC,EAAE,CAAC;IAS5B,MAAM,CAAC,KAAK,IAAI,UAAU;IAI1B,MAAM,CAAC,IAAI,CAAC,cAAc,EAAE,MAAM,GAAG,UAAU;CAGhD;AAGD,qBAAa,eAAe;IAEjB,KAAK,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC;IACnB,KAAK,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC;IACnB,SAAS,EAAE,MAAM;IACjB,WAAW,EAAE,OAAO;gBAHpB,KAAK,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC,EACnB,KAAK,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC,EACnB,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,OAAO;IAG7B,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,MAAM,GAAG,YAAY,GAAG,eAAe;IAUjE,QAAQ;IAIR,MAAM,CAAC,SAAS,CAAC,MAAM,EAAE,QAAQ,CAAC,eAAe,CAAC;IAIlD,QAAQ,IAAI,EAAE,EAAE;IAIhB,MAAM,CAAC,UAAU,CAAC,MAAM,EAAE,EAAE,EAAE,GAAG,WAAW,GAAG,eAAe;IAU9D,MAAM,CAAC,KAAK,IAAI,eAAe;IAS/B,MAAM,CAAC,IAAI,CAAC,cAAc,EAAE,MAAM,GAAG,eAAe;IAU9C,aAAa;IAWb,MAAM,CAAC,MAAM,EAAE,EAAE,EAAE;IAenB,OAAO,IAAI,OAAO,CAAC,EAAE,CAAC;CAQ7B"}
|
package/dest/testing.d.ts
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { Fr } from '@aztec/foundation/fields';
|
|
2
2
|
import { Blob } from './blob.js';
|
|
3
|
-
import {
|
|
3
|
+
import { BatchedBlobAccumulator } from './blob_batching.js';
|
|
4
|
+
import { BlockBlobPublicInputs } from './blob_batching_public_inputs.js';
|
|
4
5
|
import { SpongeBlob } from './sponge_blob.js';
|
|
5
6
|
/**
|
|
6
7
|
* Makes arbitrary poseidon sponge for blob inputs.
|
|
@@ -10,12 +11,12 @@ import { SpongeBlob } from './sponge_blob.js';
|
|
|
10
11
|
*/
|
|
11
12
|
export declare function makeSpongeBlob(seed?: number): SpongeBlob;
|
|
12
13
|
/**
|
|
13
|
-
* Makes arbitrary blob public
|
|
14
|
+
* Makes arbitrary blob public accumulator.
|
|
14
15
|
* Note: will not verify inside the circuit.
|
|
15
|
-
* @param seed - The seed to use for generating the blob
|
|
16
|
-
* @returns A blob
|
|
16
|
+
* @param seed - The seed to use for generating the blob accumulator.
|
|
17
|
+
* @returns A blob accumulator instance.
|
|
17
18
|
*/
|
|
18
|
-
export declare function
|
|
19
|
+
export declare function makeBatchedBlobAccumulator(seed?: number): BatchedBlobAccumulator;
|
|
19
20
|
/**
|
|
20
21
|
* Makes arbitrary block blob public inputs.
|
|
21
22
|
* Note: will not verify inside the circuit.
|