@aztec/blob-lib 0.0.1-commit.b655e406 → 0.0.1-commit.fce3e4f
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/batched_blob.d.ts +25 -0
- package/dest/batched_blob.d.ts.map +1 -0
- package/dest/batched_blob.js +20 -0
- package/dest/blob.d.ts +4 -10
- package/dest/blob.d.ts.map +1 -1
- package/dest/blob_batching.d.ts +33 -83
- package/dest/blob_batching.d.ts.map +1 -1
- package/dest/blob_batching.js +68 -105
- package/dest/blob_utils.d.ts +19 -10
- package/dest/blob_utils.d.ts.map +1 -1
- package/dest/blob_utils.js +28 -19
- package/dest/circuit_types/blob_accumulator.d.ts +2 -1
- package/dest/circuit_types/blob_accumulator.d.ts.map +1 -1
- package/dest/circuit_types/blob_accumulator.js +3 -0
- package/dest/circuit_types/final_blob_accumulator.d.ts +1 -1
- package/dest/circuit_types/final_blob_accumulator.d.ts.map +1 -1
- package/dest/circuit_types/final_blob_batching_challenges.d.ts +1 -1
- package/dest/circuit_types/final_blob_batching_challenges.d.ts.map +1 -1
- package/dest/circuit_types/index.d.ts +1 -1
- package/dest/encoding/block_blob_data.d.ts +22 -0
- package/dest/encoding/block_blob_data.d.ts.map +1 -0
- package/dest/encoding/block_blob_data.js +65 -0
- package/dest/encoding/block_end_marker.d.ts +10 -0
- package/dest/encoding/block_end_marker.d.ts.map +1 -0
- package/dest/encoding/block_end_marker.js +40 -0
- package/dest/encoding/block_end_state_field.d.ts +12 -0
- package/dest/encoding/block_end_state_field.d.ts.map +1 -0
- package/dest/encoding/block_end_state_field.js +39 -0
- package/dest/encoding/checkpoint_blob_data.d.ts +15 -0
- package/dest/encoding/checkpoint_blob_data.d.ts.map +1 -0
- package/dest/encoding/checkpoint_blob_data.js +67 -0
- package/dest/encoding/checkpoint_end_marker.d.ts +8 -0
- package/dest/encoding/checkpoint_end_marker.d.ts.map +1 -0
- package/dest/encoding/checkpoint_end_marker.js +28 -0
- package/dest/encoding/fixtures.d.ts +41 -0
- package/dest/encoding/fixtures.d.ts.map +1 -0
- package/dest/encoding/fixtures.js +139 -0
- package/dest/encoding/index.d.ts +10 -0
- package/dest/encoding/index.d.ts.map +1 -0
- package/dest/encoding/index.js +9 -0
- package/dest/encoding/tx_blob_data.d.ts +19 -0
- package/dest/encoding/tx_blob_data.d.ts.map +1 -0
- package/dest/encoding/tx_blob_data.js +79 -0
- package/dest/encoding/tx_start_marker.d.ts +16 -0
- package/dest/encoding/tx_start_marker.d.ts.map +1 -0
- package/dest/{encoding.js → encoding/tx_start_marker.js} +12 -58
- package/dest/errors.d.ts +1 -1
- package/dest/errors.d.ts.map +1 -1
- package/dest/hash.d.ts +11 -4
- package/dest/hash.d.ts.map +1 -1
- package/dest/hash.js +14 -4
- package/dest/index.d.ts +3 -4
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +2 -3
- package/dest/interface.d.ts +1 -1
- package/dest/kzg_context.d.ts +1 -1
- package/dest/sponge_blob.d.ts +8 -14
- package/dest/sponge_blob.d.ts.map +1 -1
- package/dest/sponge_blob.js +19 -34
- package/dest/testing.d.ts +8 -16
- package/dest/testing.d.ts.map +1 -1
- package/dest/testing.js +34 -64
- package/dest/types.d.ts +2 -1
- package/dest/types.d.ts.map +1 -1
- package/dest/types.js +1 -0
- package/package.json +8 -7
- package/src/batched_blob.ts +25 -0
- package/src/blob_batching.ts +81 -123
- package/src/blob_utils.ts +31 -21
- package/src/circuit_types/blob_accumulator.ts +11 -0
- package/src/encoding/block_blob_data.ts +102 -0
- package/src/encoding/block_end_marker.ts +54 -0
- package/src/encoding/block_end_state_field.ts +59 -0
- package/src/encoding/checkpoint_blob_data.ts +95 -0
- package/src/encoding/checkpoint_end_marker.ts +40 -0
- package/src/encoding/fixtures.ts +209 -0
- package/src/encoding/index.ts +9 -0
- package/src/encoding/tx_blob_data.ts +116 -0
- package/src/{encoding.ts → encoding/tx_start_marker.ts} +18 -75
- package/src/hash.ts +14 -4
- package/src/index.ts +2 -3
- package/src/sponge_blob.ts +21 -34
- package/src/testing.ts +46 -73
- package/src/types.ts +1 -0
- package/dest/deserialize.d.ts +0 -14
- package/dest/deserialize.d.ts.map +0 -1
- package/dest/deserialize.js +0 -33
- package/dest/encoding.d.ts +0 -26
- package/dest/encoding.d.ts.map +0 -1
- package/src/deserialize.ts +0 -38
package/dest/testing.js
CHANGED
|
@@ -1,84 +1,54 @@
|
|
|
1
|
-
import { FIELDS_PER_BLOB } from '@aztec/constants';
|
|
2
1
|
import { makeTuple } from '@aztec/foundation/array';
|
|
3
|
-
import {
|
|
4
|
-
import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
2
|
+
import { BLS12Fq, BLS12Fr, BLS12Point, BLSPointNotOnCurveError, Fr } from '@aztec/foundation/fields';
|
|
5
3
|
import { Blob } from './blob.js';
|
|
6
|
-
import {
|
|
7
|
-
import {
|
|
8
|
-
import { FinalBlobBatchingChallenges } from './circuit_types/
|
|
9
|
-
import { createBlockEndMarker, encodeTxStartMarker } from './encoding.js';
|
|
4
|
+
import { BlobAccumulator } from './circuit_types/blob_accumulator.js';
|
|
5
|
+
import { FinalBlobAccumulator } from './circuit_types/final_blob_accumulator.js';
|
|
6
|
+
import { FinalBlobBatchingChallenges } from './circuit_types/final_blob_batching_challenges.js';
|
|
10
7
|
import { Poseidon2Sponge, SpongeBlob } from './sponge_blob.js';
|
|
8
|
+
export * from './encoding/fixtures.js';
|
|
11
9
|
/**
|
|
12
10
|
* Makes arbitrary poseidon sponge for blob inputs.
|
|
13
11
|
* Note: will not verify inside the circuit.
|
|
14
12
|
* @param seed - The seed to use for generating the sponge.
|
|
15
13
|
* @returns A sponge blob instance.
|
|
16
14
|
*/ export function makeSpongeBlob(seed = 1) {
|
|
17
|
-
return new SpongeBlob(new Poseidon2Sponge(makeTuple(3, (i)=>new Fr(i)), makeTuple(4, (i)=>new Fr(i)), 1, false), seed
|
|
15
|
+
return new SpongeBlob(new Poseidon2Sponge(makeTuple(3, (i)=>new Fr(i)), makeTuple(4, (i)=>new Fr(i)), 1, false), seed);
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Makes an arbitrary but valid BLS12 point. The value is deterministic for a given seed.
|
|
19
|
+
* @param seed - The seed to use for generating the point.
|
|
20
|
+
* @returns A BLS12 point instance.
|
|
21
|
+
*/ function makeBLS12Point(seed = 1) {
|
|
22
|
+
let accum = 0;
|
|
23
|
+
while(true){
|
|
24
|
+
try {
|
|
25
|
+
const x = new BLS12Fq(seed + accum);
|
|
26
|
+
const y = BLS12Point.YFromX(x);
|
|
27
|
+
if (y) {
|
|
28
|
+
return new BLS12Point(x, y, false);
|
|
29
|
+
}
|
|
30
|
+
accum++;
|
|
31
|
+
} catch (e) {
|
|
32
|
+
if (!(e instanceof BLSPointNotOnCurveError)) {
|
|
33
|
+
throw e;
|
|
34
|
+
}
|
|
35
|
+
// The point is not on the curve - try again
|
|
36
|
+
}
|
|
37
|
+
}
|
|
18
38
|
}
|
|
19
39
|
/**
|
|
20
40
|
* Makes arbitrary blob public accumulator.
|
|
21
41
|
* Note: will not verify inside the circuit.
|
|
22
42
|
* @param seed - The seed to use for generating the blob accumulator.
|
|
23
43
|
* @returns A blob accumulator instance.
|
|
24
|
-
*/ export function
|
|
25
|
-
return new
|
|
44
|
+
*/ export function makeBlobAccumulator(seed = 1) {
|
|
45
|
+
return new BlobAccumulator(new Fr(seed), new Fr(seed + 0x10), new BLS12Fr(seed + 0x20), makeBLS12Point(seed + 0x30), new Fr(seed + 0x50), new BLS12Fr(seed + 0x60));
|
|
26
46
|
}
|
|
27
|
-
export function
|
|
28
|
-
|
|
29
|
-
numBlobFields: length,
|
|
30
|
-
// The rest of the values don't matter. The test components using it do not try to deserialize everything.
|
|
31
|
-
// Only `checkBlobFieldsEncoding` is used and it only looks at `numBlobFields`. This might change in the future
|
|
32
|
-
// when we add more thorough checks to `checkBlobFieldsEncoding`.
|
|
33
|
-
revertCode: 0,
|
|
34
|
-
numNoteHashes: 0,
|
|
35
|
-
numNullifiers: 0,
|
|
36
|
-
numL2ToL1Msgs: 0,
|
|
37
|
-
numPublicDataWrites: 0,
|
|
38
|
-
numPrivateLogs: 0,
|
|
39
|
-
publicLogsLength: 0,
|
|
40
|
-
contractClassLogLength: 0
|
|
41
|
-
};
|
|
42
|
-
return [
|
|
43
|
-
encodeTxStartMarker(txStartMarker),
|
|
44
|
-
...Array.from({
|
|
45
|
-
length: length - 1
|
|
46
|
-
}, ()=>new Fr(randomInt(Number.MAX_SAFE_INTEGER)))
|
|
47
|
-
];
|
|
48
|
-
}
|
|
49
|
-
export function makeEncodedBlockBlobFields(...lengths) {
|
|
50
|
-
return [
|
|
51
|
-
...lengths.length > 0 ? makeEncodedTxBlobFields(lengths[0] - 1) : [],
|
|
52
|
-
...lengths.slice(1).flatMap((length)=>makeEncodedTxBlobFields(length)),
|
|
53
|
-
createBlockEndMarker(lengths.length)
|
|
54
|
-
];
|
|
55
|
-
}
|
|
56
|
-
// Create blob fields for a checkpoint with a single block.
|
|
57
|
-
export function makeEncodedBlobFields(length) {
|
|
58
|
-
if (length <= 2) {
|
|
59
|
-
throw new Error('Encoded blob fields length must be greater than 2');
|
|
60
|
-
}
|
|
61
|
-
const checkpointPrefix = new Fr(length);
|
|
62
|
-
return [
|
|
63
|
-
checkpointPrefix,
|
|
64
|
-
...makeEncodedBlockBlobFields(length - 1)
|
|
65
|
-
]; // -1 to account for the checkpoint prefix.
|
|
66
|
-
}
|
|
67
|
-
/**
|
|
68
|
-
* Make an encoded blob with the given length
|
|
69
|
-
*
|
|
70
|
-
* This will deserialise correctly in the archiver
|
|
71
|
-
* @param length
|
|
72
|
-
* @returns
|
|
73
|
-
*/ export function makeEncodedBlob(length) {
|
|
74
|
-
if (length > FIELDS_PER_BLOB) {
|
|
75
|
-
throw new Error(`A single encoded blob must be less than ${FIELDS_PER_BLOB} fields`);
|
|
76
|
-
}
|
|
77
|
-
return Blob.fromFields(makeEncodedBlobFields(length));
|
|
47
|
+
export function makeFinalBlobAccumulator(seed = 1) {
|
|
48
|
+
return new FinalBlobAccumulator(new Fr(seed), new Fr(seed + 0x10), new BLS12Fr(seed + 0x20), makeBLS12Point(seed + 0x30));
|
|
78
49
|
}
|
|
79
|
-
export function
|
|
80
|
-
|
|
81
|
-
return getBlobsPerL1Block(fields);
|
|
50
|
+
export function makeFinalBlobBatchingChallenges(seed = 1) {
|
|
51
|
+
return new FinalBlobBatchingChallenges(new Fr(seed), new BLS12Fr(seed + 0x10));
|
|
82
52
|
}
|
|
83
53
|
/**
|
|
84
54
|
* Make a blob with random fields.
|
package/dest/types.d.ts
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
export * from './batched_blob.js';
|
|
1
2
|
export * from './circuit_types/index.js';
|
|
2
3
|
export * from './interface.js';
|
|
3
4
|
export * from './sponge_blob.js';
|
|
@@ -13,4 +14,4 @@ export interface BlobKzgInstance {
|
|
|
13
14
|
/** Function to compute both blob data cells and their corresponding KZG proofs for EIP7594 */
|
|
14
15
|
computeCellsAndKzgProofs(blob: Uint8Array): [Uint8Array[], Uint8Array[]];
|
|
15
16
|
}
|
|
16
|
-
//# sourceMappingURL=
|
|
17
|
+
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoidHlwZXMuZC50cyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uL3NyYy90eXBlcy50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQSxjQUFjLG1CQUFtQixDQUFDO0FBQ2xDLGNBQWMsMEJBQTBCLENBQUM7QUFDekMsY0FBYyxnQkFBZ0IsQ0FBQztBQUMvQixjQUFjLGtCQUFrQixDQUFDO0FBRWpDOzs7R0FHRztBQUNILE1BQU0sV0FBVyxlQUFlO0lBQzlCLHdEQUF3RDtJQUN4RCxtQkFBbUIsQ0FBQyxJQUFJLEVBQUUsVUFBVSxHQUFHLFVBQVUsQ0FBQztJQUNsRCxrREFBa0Q7SUFDbEQsbUJBQW1CLENBQUMsSUFBSSxFQUFFLFVBQVUsRUFBRSxVQUFVLEVBQUUsVUFBVSxHQUFHLFVBQVUsQ0FBQztJQUMxRSw4RkFBOEY7SUFDOUYsd0JBQXdCLENBQUMsSUFBSSxFQUFFLFVBQVUsR0FBRyxDQUFDLFVBQVUsRUFBRSxFQUFFLFVBQVUsRUFBRSxDQUFDLENBQUM7Q0FDMUUifQ==
|
package/dest/types.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,cAAc,0BAA0B,CAAC;AACzC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,kBAAkB,CAAC;AAEjC;;;GAGG;AACH,MAAM,WAAW,eAAe;IAC9B,wDAAwD;IACxD,mBAAmB,CAAC,IAAI,EAAE,UAAU,GAAG,UAAU,CAAC;IAClD,kDAAkD;IAClD,mBAAmB,CAAC,IAAI,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,GAAG,UAAU,CAAC;IAC1E,8FAA8F;IAC9F,wBAAwB,CAAC,IAAI,EAAE,UAAU,GAAG,CAAC,UAAU,EAAE,EAAE,UAAU,EAAE,CAAC,CAAC;CAC1E"}
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../src/types.ts"],"names":[],"mappings":"AAAA,cAAc,mBAAmB,CAAC;AAClC,cAAc,0BAA0B,CAAC;AACzC,cAAc,gBAAgB,CAAC;AAC/B,cAAc,kBAAkB,CAAC;AAEjC;;;GAGG;AACH,MAAM,WAAW,eAAe;IAC9B,wDAAwD;IACxD,mBAAmB,CAAC,IAAI,EAAE,UAAU,GAAG,UAAU,CAAC;IAClD,kDAAkD;IAClD,mBAAmB,CAAC,IAAI,EAAE,UAAU,EAAE,UAAU,EAAE,UAAU,GAAG,UAAU,CAAC;IAC1E,8FAA8F;IAC9F,wBAAwB,CAAC,IAAI,EAAE,UAAU,GAAG,CAAC,UAAU,EAAE,EAAE,UAAU,EAAE,CAAC,CAAC;CAC1E"}
|
package/dest/types.js
CHANGED
package/package.json
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aztec/blob-lib",
|
|
3
|
-
"version": "0.0.1-commit.
|
|
3
|
+
"version": "0.0.1-commit.fce3e4f",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"exports": {
|
|
6
6
|
".": "./dest/index.js",
|
|
7
|
-
"./encoding": "./dest/encoding.js",
|
|
7
|
+
"./encoding": "./dest/encoding/index.js",
|
|
8
8
|
"./types": "./dest/types.js",
|
|
9
9
|
"./testing": "./dest/testing.js"
|
|
10
10
|
},
|
|
@@ -16,10 +16,10 @@
|
|
|
16
16
|
"tsconfig": "./tsconfig.json"
|
|
17
17
|
},
|
|
18
18
|
"scripts": {
|
|
19
|
-
"build": "yarn clean &&
|
|
20
|
-
"build:dev": "
|
|
19
|
+
"build": "yarn clean && tsgo -b",
|
|
20
|
+
"build:dev": "tsgo -b --watch",
|
|
21
21
|
"clean": "rm -rf ./dest .tsbuildinfo",
|
|
22
|
-
"start:dev": "
|
|
22
|
+
"start:dev": "concurrently -k \"tsgo -b -w\" \"nodemon --watch dest --exec yarn start\"",
|
|
23
23
|
"start": "node ./dest/index.js",
|
|
24
24
|
"test": "NODE_NO_WARNINGS=1 node --experimental-vm-modules ../node_modules/.bin/jest --passWithNoTests --maxWorkers=${JEST_MAX_WORKERS:-8}"
|
|
25
25
|
},
|
|
@@ -27,8 +27,8 @@
|
|
|
27
27
|
"../package.common.json"
|
|
28
28
|
],
|
|
29
29
|
"dependencies": {
|
|
30
|
-
"@aztec/constants": "0.0.1-commit.
|
|
31
|
-
"@aztec/foundation": "0.0.1-commit.
|
|
30
|
+
"@aztec/constants": "0.0.1-commit.fce3e4f",
|
|
31
|
+
"@aztec/foundation": "0.0.1-commit.fce3e4f",
|
|
32
32
|
"@crate-crypto/node-eth-kzg": "^0.10.0",
|
|
33
33
|
"tslib": "^2.4.0"
|
|
34
34
|
},
|
|
@@ -36,6 +36,7 @@
|
|
|
36
36
|
"@jest/globals": "^30.0.0",
|
|
37
37
|
"@types/jest": "^30.0.0",
|
|
38
38
|
"@types/node": "^22.15.17",
|
|
39
|
+
"@typescript/native-preview": "7.0.0-dev.20251126.1",
|
|
39
40
|
"get-port": "^7.1.0",
|
|
40
41
|
"jest": "^30.0.0",
|
|
41
42
|
"ts-node": "^10.9.1",
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
2
|
+
|
|
3
|
+
import { FinalBlobAccumulator } from './circuit_types/index.js';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* A class to represent the result from accumulating blobs in an epoch using BatchedBlobAccumulator.
|
|
7
|
+
*/
|
|
8
|
+
export class BatchedBlob {
|
|
9
|
+
constructor(
|
|
10
|
+
/** Hash of Cs (to link to L1 blob hashes). */
|
|
11
|
+
public readonly blobCommitmentsHash: Fr,
|
|
12
|
+
/** Challenge point z such that p_i(z) = y_i. */
|
|
13
|
+
public readonly z: Fr,
|
|
14
|
+
/** Evaluation y, linear combination of all evaluations y_i = p_i(z) with gamma. */
|
|
15
|
+
public readonly y: BLS12Fr,
|
|
16
|
+
/** Commitment C, linear combination of all commitments C_i = [p_i] with gamma. */
|
|
17
|
+
public readonly commitment: BLS12Point,
|
|
18
|
+
/** KZG opening 'proof' Q (commitment to the quotient poly.), linear combination of all blob kzg 'proofs' Q_i with gamma. */
|
|
19
|
+
public readonly q: BLS12Point,
|
|
20
|
+
) {}
|
|
21
|
+
|
|
22
|
+
toFinalBlobAccumulator() {
|
|
23
|
+
return new FinalBlobAccumulator(this.blobCommitmentsHash, this.z, this.y, this.commitment);
|
|
24
|
+
}
|
|
25
|
+
}
|
package/src/blob_batching.ts
CHANGED
|
@@ -1,30 +1,69 @@
|
|
|
1
|
-
import { AZTEC_MAX_EPOCH_DURATION,
|
|
1
|
+
import { AZTEC_MAX_EPOCH_DURATION, BLOBS_PER_CHECKPOINT } from '@aztec/constants';
|
|
2
2
|
import { poseidon2Hash, sha256ToField } from '@aztec/foundation/crypto';
|
|
3
3
|
import { BLS12Fr, BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
4
4
|
|
|
5
|
+
import { BatchedBlob } from './batched_blob.js';
|
|
5
6
|
import { Blob } from './blob.js';
|
|
6
|
-
import {
|
|
7
|
+
import { getBlobsPerL1Block } from './blob_utils.js';
|
|
7
8
|
import { BlobAccumulator, FinalBlobAccumulator, FinalBlobBatchingChallenges } from './circuit_types/index.js';
|
|
8
|
-
import {
|
|
9
|
+
import { computeBlobFieldsHash, hashNoirBigNumLimbs } from './hash.js';
|
|
9
10
|
import { kzg } from './kzg_context.js';
|
|
10
11
|
|
|
11
12
|
/**
|
|
12
13
|
* A class to create, manage, and prove batched EVM blobs.
|
|
14
|
+
* See noir-projects/noir-protocol-circuits/crates/blob/src/abis/blob_accumulator.nr
|
|
13
15
|
*/
|
|
14
|
-
export class
|
|
16
|
+
export class BatchedBlobAccumulator {
|
|
15
17
|
constructor(
|
|
16
18
|
/** Hash of Cs (to link to L1 blob hashes). */
|
|
17
|
-
public readonly
|
|
18
|
-
/** Challenge point
|
|
19
|
-
public readonly
|
|
20
|
-
/** Evaluation
|
|
21
|
-
public readonly
|
|
22
|
-
/** Commitment
|
|
23
|
-
public readonly
|
|
24
|
-
/** KZG opening
|
|
25
|
-
public readonly
|
|
19
|
+
public readonly blobCommitmentsHashAcc: Fr,
|
|
20
|
+
/** Challenge point z_acc. Final value used such that p_i(z) = y_i. */
|
|
21
|
+
public readonly zAcc: Fr,
|
|
22
|
+
/** Evaluation y_acc. Final value is is linear combination of all evaluations y_i = p_i(z) with gamma. */
|
|
23
|
+
public readonly yAcc: BLS12Fr,
|
|
24
|
+
/** Commitment c_acc. Final value is linear combination of all commitments C_i = [p_i] with gamma. */
|
|
25
|
+
public readonly cAcc: BLS12Point,
|
|
26
|
+
/** KZG opening q_acc. Final value is linear combination of all blob kzg 'proofs' Q_i with gamma. */
|
|
27
|
+
public readonly qAcc: BLS12Point,
|
|
28
|
+
/**
|
|
29
|
+
* Challenge point gamma_acc for multi opening. Used with y, C, and kzg 'proof' Q above.
|
|
30
|
+
* TODO(#13608): We calculate this by hashing natively in the circuit (hence Fr representation), but it's actually used
|
|
31
|
+
* as a BLS12Fr field elt. Is this safe? Is there a skew?
|
|
32
|
+
*/
|
|
33
|
+
public readonly gammaAcc: Fr,
|
|
34
|
+
/** Simply gamma^(i + 1) at blob i. Used for calculating the i'th element of the above linear comb.s */
|
|
35
|
+
public readonly gammaPow: BLS12Fr,
|
|
36
|
+
/** Final challenge values used in evaluation. Optimistically input and checked in the final acc. */
|
|
37
|
+
public readonly finalBlobChallenges: FinalBlobBatchingChallenges,
|
|
26
38
|
) {}
|
|
27
39
|
|
|
40
|
+
/**
|
|
41
|
+
* Create the empty accumulation state of the epoch.
|
|
42
|
+
* @returns An empty blob accumulator with challenges.
|
|
43
|
+
*/
|
|
44
|
+
static newWithChallenges(finalBlobChallenges: FinalBlobBatchingChallenges): BatchedBlobAccumulator {
|
|
45
|
+
return new BatchedBlobAccumulator(
|
|
46
|
+
Fr.ZERO,
|
|
47
|
+
Fr.ZERO,
|
|
48
|
+
BLS12Fr.ZERO,
|
|
49
|
+
BLS12Point.ZERO,
|
|
50
|
+
BLS12Point.ZERO,
|
|
51
|
+
Fr.ZERO,
|
|
52
|
+
BLS12Fr.ZERO,
|
|
53
|
+
finalBlobChallenges,
|
|
54
|
+
);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Returns an empty BatchedBlobAccumulator with precomputed challenges from all blobs in the epoch.
|
|
59
|
+
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
60
|
+
* beforehand from ALL blobs.
|
|
61
|
+
*/
|
|
62
|
+
static async fromBlobFields(blobFieldsPerCheckpoint: Fr[][]): Promise<BatchedBlobAccumulator> {
|
|
63
|
+
const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobFieldsPerCheckpoint);
|
|
64
|
+
return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
|
|
65
|
+
}
|
|
66
|
+
|
|
28
67
|
/**
|
|
29
68
|
* Get the final batched opening proof from multiple blobs.
|
|
30
69
|
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
@@ -32,30 +71,21 @@ export class BatchedBlob {
|
|
|
32
71
|
*
|
|
33
72
|
* @returns A batched blob.
|
|
34
73
|
*/
|
|
35
|
-
static async batch(
|
|
36
|
-
|
|
74
|
+
static async batch(blobFieldsPerCheckpoint: Fr[][], verifyProof = false): Promise<BatchedBlob> {
|
|
75
|
+
const numCheckpoints = blobFieldsPerCheckpoint.length;
|
|
76
|
+
if (numCheckpoints > AZTEC_MAX_EPOCH_DURATION) {
|
|
37
77
|
throw new Error(
|
|
38
|
-
`Too many
|
|
78
|
+
`Too many checkpoints sent to batch(). The maximum is ${AZTEC_MAX_EPOCH_DURATION}. Got ${numCheckpoints}.`,
|
|
39
79
|
);
|
|
40
80
|
}
|
|
41
81
|
|
|
42
82
|
// Precalculate the values (z and gamma) and initialize the accumulator:
|
|
43
|
-
let acc = await this.
|
|
83
|
+
let acc = await this.fromBlobFields(blobFieldsPerCheckpoint);
|
|
44
84
|
// Now we can create a multi opening proof of all input blobs:
|
|
45
|
-
for (const
|
|
46
|
-
acc = await acc.
|
|
85
|
+
for (const blobFields of blobFieldsPerCheckpoint) {
|
|
86
|
+
acc = await acc.accumulateFields(blobFields);
|
|
47
87
|
}
|
|
48
|
-
return await acc.finalize();
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
/**
|
|
52
|
-
* Returns an empty BatchedBlobAccumulator with precomputed challenges from all blobs in the epoch.
|
|
53
|
-
* @dev MUST input all blobs to be broadcast. Does not work in multiple calls because z and gamma are calculated
|
|
54
|
-
* beforehand from ALL blobs.
|
|
55
|
-
*/
|
|
56
|
-
static async newAccumulator(blobs: Blob[][]): Promise<BatchedBlobAccumulator> {
|
|
57
|
-
const finalBlobChallenges = await this.precomputeBatchedBlobChallenges(blobs);
|
|
58
|
-
return BatchedBlobAccumulator.newWithChallenges(finalBlobChallenges);
|
|
88
|
+
return await acc.finalize(verifyProof);
|
|
59
89
|
}
|
|
60
90
|
|
|
61
91
|
/**
|
|
@@ -70,13 +100,15 @@ export class BatchedBlob {
|
|
|
70
100
|
* @param blobs - The blobs to precompute the challenges for. Each sub-array is the blobs for an L1 block.
|
|
71
101
|
* @returns Challenges z and gamma.
|
|
72
102
|
*/
|
|
73
|
-
static async precomputeBatchedBlobChallenges(
|
|
103
|
+
static async precomputeBatchedBlobChallenges(blobFieldsPerCheckpoint: Fr[][]): Promise<FinalBlobBatchingChallenges> {
|
|
74
104
|
// Compute the final challenge z to evaluate the blobs.
|
|
75
105
|
let z: Fr | undefined;
|
|
76
|
-
|
|
106
|
+
const allBlobs = [];
|
|
107
|
+
for (const blobFields of blobFieldsPerCheckpoint) {
|
|
77
108
|
// Compute the hash of all the fields in the block.
|
|
78
|
-
const blobFieldsHash = await
|
|
79
|
-
|
|
109
|
+
const blobFieldsHash = await computeBlobFieldsHash(blobFields);
|
|
110
|
+
const blobs = getBlobsPerL1Block(blobFields);
|
|
111
|
+
for (const blob of blobs) {
|
|
80
112
|
// Compute the challenge z for each blob and accumulate it.
|
|
81
113
|
const challengeZ = await blob.computeChallengeZ(blobFieldsHash);
|
|
82
114
|
if (!z) {
|
|
@@ -85,13 +117,13 @@ export class BatchedBlob {
|
|
|
85
117
|
z = await poseidon2Hash([z, challengeZ]);
|
|
86
118
|
}
|
|
87
119
|
}
|
|
120
|
+
allBlobs.push(...blobs);
|
|
88
121
|
}
|
|
89
122
|
if (!z) {
|
|
90
123
|
throw new Error('No blobs to precompute challenges for.');
|
|
91
124
|
}
|
|
92
125
|
|
|
93
126
|
// Now we have a shared challenge for all blobs, evaluate them...
|
|
94
|
-
const allBlobs = blobs.flat();
|
|
95
127
|
const proofObjects = allBlobs.map(b => b.evaluate(z));
|
|
96
128
|
const evaluations = await Promise.all(proofObjects.map(({ y }) => hashNoirBigNumLimbs(y)));
|
|
97
129
|
// ...and find the challenge for the linear combination of blobs.
|
|
@@ -105,92 +137,12 @@ export class BatchedBlob {
|
|
|
105
137
|
return new FinalBlobBatchingChallenges(z, BLS12Fr.fromBN254Fr(gamma));
|
|
106
138
|
}
|
|
107
139
|
|
|
108
|
-
verify() {
|
|
109
|
-
return kzg.verifyKzgProof(this.commitment.compress(), this.z.toBuffer(), this.y.toBuffer(), this.q.compress());
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
// Returns ethereum's versioned blob hash, following kzg_to_versioned_hash: https://eips.ethereum.org/EIPS/eip-4844#helpers
|
|
113
|
-
getEthVersionedBlobHash(): Buffer {
|
|
114
|
-
return computeEthVersionedBlobHash(this.commitment.compress());
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
/**
|
|
118
|
-
* Returns a proof of opening of the blobs to verify on L1 using the point evaluation precompile:
|
|
119
|
-
*
|
|
120
|
-
* input[:32] - versioned_hash
|
|
121
|
-
* input[32:64] - z
|
|
122
|
-
* input[64:96] - y
|
|
123
|
-
* input[96:144] - commitment C
|
|
124
|
-
* input[144:192] - commitment Q (a 'proof' committing to the quotient polynomial q(X))
|
|
125
|
-
*
|
|
126
|
-
* See https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
|
127
|
-
*/
|
|
128
|
-
getEthBlobEvaluationInputs(): `0x${string}` {
|
|
129
|
-
const buf = Buffer.concat([
|
|
130
|
-
this.getEthVersionedBlobHash(),
|
|
131
|
-
this.z.toBuffer(),
|
|
132
|
-
this.y.toBuffer(),
|
|
133
|
-
this.commitment.compress(),
|
|
134
|
-
this.q.compress(),
|
|
135
|
-
]);
|
|
136
|
-
return `0x${buf.toString('hex')}`;
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
toFinalBlobAccumulator() {
|
|
140
|
-
return new FinalBlobAccumulator(this.blobCommitmentsHash, this.z, this.y, this.commitment);
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
/**
|
|
145
|
-
* See noir-projects/noir-protocol-circuits/crates/blob/src/abis/blob_accumulator.nr
|
|
146
|
-
*/
|
|
147
|
-
export class BatchedBlobAccumulator {
|
|
148
|
-
constructor(
|
|
149
|
-
/** Hash of Cs (to link to L1 blob hashes). */
|
|
150
|
-
public readonly blobCommitmentsHashAcc: Fr,
|
|
151
|
-
/** Challenge point z_acc. Final value used such that p_i(z) = y_i. */
|
|
152
|
-
public readonly zAcc: Fr,
|
|
153
|
-
/** Evaluation y_acc. Final value is is linear combination of all evaluations y_i = p_i(z) with gamma. */
|
|
154
|
-
public readonly yAcc: BLS12Fr,
|
|
155
|
-
/** Commitment c_acc. Final value is linear combination of all commitments C_i = [p_i] with gamma. */
|
|
156
|
-
public readonly cAcc: BLS12Point,
|
|
157
|
-
/** KZG opening q_acc. Final value is linear combination of all blob kzg 'proofs' Q_i with gamma. */
|
|
158
|
-
public readonly qAcc: BLS12Point,
|
|
159
|
-
/**
|
|
160
|
-
* Challenge point gamma_acc for multi opening. Used with y, C, and kzg 'proof' Q above.
|
|
161
|
-
* TODO(#13608): We calculate this by hashing natively in the circuit (hence Fr representation), but it's actually used
|
|
162
|
-
* as a BLS12Fr field elt. Is this safe? Is there a skew?
|
|
163
|
-
*/
|
|
164
|
-
public readonly gammaAcc: Fr,
|
|
165
|
-
/** Simply gamma^(i + 1) at blob i. Used for calculating the i'th element of the above linear comb.s */
|
|
166
|
-
public readonly gammaPow: BLS12Fr,
|
|
167
|
-
/** Final challenge values used in evaluation. Optimistically input and checked in the final acc. */
|
|
168
|
-
public readonly finalBlobChallenges: FinalBlobBatchingChallenges,
|
|
169
|
-
) {}
|
|
170
|
-
|
|
171
|
-
/**
|
|
172
|
-
* Create the empty accumulation state of the epoch.
|
|
173
|
-
* @returns An empty blob accumulator with challenges.
|
|
174
|
-
*/
|
|
175
|
-
static newWithChallenges(finalBlobChallenges: FinalBlobBatchingChallenges): BatchedBlobAccumulator {
|
|
176
|
-
return new BatchedBlobAccumulator(
|
|
177
|
-
Fr.ZERO,
|
|
178
|
-
Fr.ZERO,
|
|
179
|
-
BLS12Fr.ZERO,
|
|
180
|
-
BLS12Point.ZERO,
|
|
181
|
-
BLS12Point.ZERO,
|
|
182
|
-
Fr.ZERO,
|
|
183
|
-
BLS12Fr.ZERO,
|
|
184
|
-
finalBlobChallenges,
|
|
185
|
-
);
|
|
186
|
-
}
|
|
187
|
-
|
|
188
140
|
/**
|
|
189
141
|
* Given blob i, accumulate all state.
|
|
190
142
|
* We assume the input blob has not been evaluated at z.
|
|
191
143
|
* @returns An updated blob accumulator.
|
|
192
144
|
*/
|
|
193
|
-
|
|
145
|
+
async accumulateBlob(blob: Blob, blobFieldsHash: Fr) {
|
|
194
146
|
const { proof, y: thisY } = blob.evaluate(this.finalBlobChallenges.z);
|
|
195
147
|
const thisC = BLS12Point.decompress(blob.commitment);
|
|
196
148
|
const thisQ = BLS12Point.decompress(proof);
|
|
@@ -234,23 +186,25 @@ export class BatchedBlobAccumulator {
|
|
|
234
186
|
/**
|
|
235
187
|
* Given blobs, accumulate all state.
|
|
236
188
|
* We assume the input blobs have not been evaluated at z.
|
|
237
|
-
* @param
|
|
189
|
+
* @param blobFields - The blob fields of a checkpoint to accumulate.
|
|
238
190
|
* @returns An updated blob accumulator.
|
|
239
191
|
*/
|
|
240
|
-
async
|
|
241
|
-
|
|
192
|
+
async accumulateFields(blobFields: Fr[]) {
|
|
193
|
+
const blobs = getBlobsPerL1Block(blobFields);
|
|
194
|
+
|
|
195
|
+
if (blobs.length > BLOBS_PER_CHECKPOINT) {
|
|
242
196
|
throw new Error(
|
|
243
|
-
`Too many blobs to accumulate. The maximum is ${
|
|
197
|
+
`Too many blobs to accumulate. The maximum is ${BLOBS_PER_CHECKPOINT} per checkpoint. Got ${blobs.length}.`,
|
|
244
198
|
);
|
|
245
199
|
}
|
|
246
200
|
|
|
247
201
|
// Compute the hash of all the fields in the block.
|
|
248
|
-
const blobFieldsHash = await
|
|
202
|
+
const blobFieldsHash = await computeBlobFieldsHash(blobFields);
|
|
249
203
|
|
|
250
204
|
// Initialize the acc to iterate over:
|
|
251
205
|
let acc: BatchedBlobAccumulator = this.clone();
|
|
252
206
|
for (const blob of blobs) {
|
|
253
|
-
acc = await acc.
|
|
207
|
+
acc = await acc.accumulateBlob(blob, blobFieldsHash);
|
|
254
208
|
}
|
|
255
209
|
return acc;
|
|
256
210
|
}
|
|
@@ -286,13 +240,17 @@ export class BatchedBlobAccumulator {
|
|
|
286
240
|
|
|
287
241
|
const batchedBlob = new BatchedBlob(this.blobCommitmentsHashAcc, this.zAcc, this.yAcc, this.cAcc, this.qAcc);
|
|
288
242
|
|
|
289
|
-
if (verifyProof && !
|
|
243
|
+
if (verifyProof && !this.verify()) {
|
|
290
244
|
throw new Error(`KZG proof did not verify.`);
|
|
291
245
|
}
|
|
292
246
|
|
|
293
247
|
return batchedBlob;
|
|
294
248
|
}
|
|
295
249
|
|
|
250
|
+
verify() {
|
|
251
|
+
return kzg.verifyKzgProof(this.cAcc.compress(), this.zAcc.toBuffer(), this.yAcc.toBuffer(), this.qAcc.compress());
|
|
252
|
+
}
|
|
253
|
+
|
|
296
254
|
isEmptyState() {
|
|
297
255
|
return (
|
|
298
256
|
this.blobCommitmentsHashAcc.isZero() &&
|
package/src/blob_utils.ts
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import { FIELDS_PER_BLOB } from '@aztec/constants';
|
|
2
2
|
import { BLS12Point, Fr } from '@aztec/foundation/fields';
|
|
3
3
|
|
|
4
|
+
import type { BatchedBlob } from './batched_blob.js';
|
|
4
5
|
import { Blob } from './blob.js';
|
|
5
|
-
import {
|
|
6
|
-
import {
|
|
6
|
+
import { type CheckpointBlobData, decodeCheckpointBlobDataFromBuffer } from './encoding/index.js';
|
|
7
|
+
import { computeBlobsHash, computeEthVersionedBlobHash } from './hash.js';
|
|
7
8
|
|
|
8
9
|
/**
|
|
9
10
|
* @param blobs - The blobs to emit.
|
|
@@ -40,26 +41,13 @@ export function getBlobsPerL1Block(fields: Fr[]): Blob[] {
|
|
|
40
41
|
}
|
|
41
42
|
|
|
42
43
|
/**
|
|
43
|
-
* Get the
|
|
44
|
-
*
|
|
45
|
-
*
|
|
46
|
-
* @param blobs - The blobs to read fields from. Should be all the blobs in the L1 block proposing the checkpoint.
|
|
47
|
-
* @param checkEncoding - Whether to check if the entire encoded blob fields are valid. If false, it will still check
|
|
48
|
-
* the checkpoint prefix and throw if there's not enough fields.
|
|
49
|
-
* @returns The fields added throughout the checkpoint.
|
|
44
|
+
* Get the encoded data from all blobs in the checkpoint.
|
|
45
|
+
* @param blobs - The blobs to read data from. Should be all the blobs for the L1 block proposing the checkpoint.
|
|
46
|
+
* @returns The encoded data of the checkpoint.
|
|
50
47
|
*/
|
|
51
|
-
export function
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
export async function computeBlobFieldsHashFromBlobs(blobs: Blob[]): Promise<Fr> {
|
|
56
|
-
const fields = blobs.map(b => b.toFields()).flat();
|
|
57
|
-
const numBlobFields = fields[0].toNumber();
|
|
58
|
-
if (numBlobFields > fields.length) {
|
|
59
|
-
throw new Error(`The prefix indicates ${numBlobFields} fields. Got ${fields.length}.`);
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
return await computeBlobFieldsHash(fields.slice(0, numBlobFields));
|
|
48
|
+
export function decodeCheckpointBlobDataFromBlobs(blobs: Blob[]): CheckpointBlobData {
|
|
49
|
+
const buf = Buffer.concat(blobs.map(b => b.data));
|
|
50
|
+
return decodeCheckpointBlobDataFromBuffer(buf);
|
|
63
51
|
}
|
|
64
52
|
|
|
65
53
|
export function computeBlobsHashFromBlobs(blobs: Blob[]): Fr {
|
|
@@ -69,3 +57,25 @@ export function computeBlobsHashFromBlobs(blobs: Blob[]): Fr {
|
|
|
69
57
|
export function getBlobCommitmentsFromBlobs(blobs: Blob[]): BLS12Point[] {
|
|
70
58
|
return blobs.map(b => BLS12Point.decompress(b.commitment));
|
|
71
59
|
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Returns a proof of opening of the blobs to verify on L1 using the point evaluation precompile:
|
|
63
|
+
*
|
|
64
|
+
* input[:32] - versioned_hash
|
|
65
|
+
* input[32:64] - z
|
|
66
|
+
* input[64:96] - y
|
|
67
|
+
* input[96:144] - commitment C
|
|
68
|
+
* input[144:192] - commitment Q (a 'proof' committing to the quotient polynomial q(X))
|
|
69
|
+
*
|
|
70
|
+
* See https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
|
71
|
+
*/
|
|
72
|
+
export function getEthBlobEvaluationInputs(batchedBlob: BatchedBlob): `0x${string}` {
|
|
73
|
+
const buf = Buffer.concat([
|
|
74
|
+
computeEthVersionedBlobHash(batchedBlob.commitment.compress()),
|
|
75
|
+
batchedBlob.z.toBuffer(),
|
|
76
|
+
batchedBlob.y.toBuffer(),
|
|
77
|
+
batchedBlob.commitment.compress(),
|
|
78
|
+
batchedBlob.q.compress(),
|
|
79
|
+
]);
|
|
80
|
+
return `0x${buf.toString('hex')}`;
|
|
81
|
+
}
|
|
@@ -81,4 +81,15 @@ export class BlobAccumulator {
|
|
|
81
81
|
BLS12Fr.fromNoirBigNum({ limbs: reader.readFieldArray(BLS12_FR_LIMBS).map(f => f.toString()) }),
|
|
82
82
|
);
|
|
83
83
|
}
|
|
84
|
+
|
|
85
|
+
static random() {
|
|
86
|
+
return new BlobAccumulator(
|
|
87
|
+
Fr.random(),
|
|
88
|
+
Fr.random(),
|
|
89
|
+
BLS12Fr.random(),
|
|
90
|
+
BLS12Point.random(),
|
|
91
|
+
Fr.random(),
|
|
92
|
+
BLS12Fr.random(),
|
|
93
|
+
);
|
|
94
|
+
}
|
|
84
95
|
}
|