@aztec/foundation 0.69.0-devnet → 0.69.1-devnet

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/dest/abi/note_selector.d.ts +5 -2
  2. package/dest/abi/note_selector.d.ts.map +1 -1
  3. package/dest/abi/note_selector.js +12 -4
  4. package/dest/blob/index.d.ts +24 -6
  5. package/dest/blob/index.d.ts.map +1 -1
  6. package/dest/blob/index.js +57 -18
  7. package/dest/config/env_var.d.ts +1 -1
  8. package/dest/config/env_var.d.ts.map +1 -1
  9. package/dest/fields/point.d.ts +1 -0
  10. package/dest/fields/point.d.ts.map +1 -1
  11. package/dest/fields/point.js +2 -1
  12. package/dest/json-rpc/server/safe_json_rpc_server.d.ts +12 -2
  13. package/dest/json-rpc/server/safe_json_rpc_server.d.ts.map +1 -1
  14. package/dest/json-rpc/server/safe_json_rpc_server.js +23 -7
  15. package/dest/promise/running-promise.d.ts.map +1 -1
  16. package/dest/promise/running-promise.js +9 -1
  17. package/dest/queue/serial_queue.d.ts +1 -0
  18. package/dest/queue/serial_queue.d.ts.map +1 -1
  19. package/dest/queue/serial_queue.js +6 -1
  20. package/dest/retry/index.d.ts.map +1 -1
  21. package/dest/retry/index.js +3 -2
  22. package/dest/serialize/buffer_reader.d.ts +9 -0
  23. package/dest/serialize/buffer_reader.d.ts.map +1 -1
  24. package/dest/serialize/buffer_reader.js +14 -1
  25. package/dest/serialize/serialize.d.ts +1 -1
  26. package/dest/serialize/serialize.d.ts.map +1 -1
  27. package/dest/serialize/serialize.js +1 -1
  28. package/dest/testing/files/index.d.ts +1 -1
  29. package/dest/testing/files/index.js +1 -1
  30. package/dest/trees/index.d.ts +2 -1
  31. package/dest/trees/index.d.ts.map +1 -1
  32. package/dest/trees/index.js +3 -2
  33. package/dest/trees/{unbalanced_merkle_root.d.ts → unbalanced_merkle_tree.d.ts} +6 -2
  34. package/dest/trees/unbalanced_merkle_tree.d.ts.map +1 -0
  35. package/dest/trees/{unbalanced_merkle_root.js → unbalanced_merkle_tree.js} +40 -2
  36. package/dest/trees/unbalanced_tree_store.d.ts +19 -0
  37. package/dest/trees/unbalanced_tree_store.d.ts.map +1 -0
  38. package/dest/trees/unbalanced_tree_store.js +80 -0
  39. package/package.json +2 -2
  40. package/src/abi/note_selector.ts +11 -4
  41. package/src/blob/index.ts +78 -31
  42. package/src/config/env_var.ts +9 -1
  43. package/src/fields/point.ts +1 -0
  44. package/src/json-rpc/server/safe_json_rpc_server.ts +19 -4
  45. package/src/promise/running-promise.ts +8 -0
  46. package/src/queue/serial_queue.ts +5 -0
  47. package/src/retry/index.ts +2 -1
  48. package/src/serialize/buffer_reader.ts +14 -0
  49. package/src/serialize/serialize.ts +1 -0
  50. package/src/testing/files/index.ts +1 -1
  51. package/src/trees/index.ts +2 -1
  52. package/src/trees/unbalanced_merkle_tree.ts +103 -0
  53. package/src/trees/unbalanced_tree_store.ts +102 -0
  54. package/dest/trees/unbalanced_merkle_root.d.ts.map +0 -1
  55. package/src/trees/unbalanced_merkle_root.ts +0 -52
package/src/blob/index.ts CHANGED
@@ -3,7 +3,7 @@ import type { Blob as BlobBuffer } from 'c-kzg';
3
3
 
4
4
  import { poseidon2Hash, sha256 } from '../crypto/index.js';
5
5
  import { Fr } from '../fields/index.js';
6
- import { serializeToBuffer } from '../serialize/index.js';
6
+ import { BufferReader, serializeToBuffer } from '../serialize/index.js';
7
7
 
8
8
  // Importing directly from 'c-kzg' does not work, ignoring import/no-named-as-default-member err:
9
9
  /* eslint-disable import/no-named-as-default-member */
@@ -36,48 +36,47 @@ export const VERSIONED_HASH_VERSION_KZG = 0x01;
36
36
  * A class to create, manage, and prove EVM blobs.
37
37
  */
38
38
  export class Blob {
39
- /** The blob to be broadcast on L1 in bytes form. */
40
- public readonly data: BlobBuffer;
41
- /** The hash of all tx effects inside the blob. Used in generating the challenge z and proving that we have included all required effects. */
42
- public readonly fieldsHash: Fr;
43
- /** Challenge point z (= H(H(tx_effects), kzgCommmitment). Used such that p(z) = y. */
44
- public readonly challengeZ: Fr;
45
- /** Evaluation y = p(z), where p() is the blob polynomial. BLS12 field element, rep. as BigNum in nr, bigint in ts. */
46
- public readonly evaluationY: Buffer;
47
- /** Commitment to the blob C. Used in compressed BLS12 point format (48 bytes). */
48
- public readonly commitment: Buffer;
49
- /** KZG opening proof for y = p(z). The commitment to quotient polynomial Q, used in compressed BLS12 point format (48 bytes). */
50
- public readonly proof: Buffer;
51
-
52
39
  constructor(
53
- /** All fields to be broadcast in the blob. */
54
- fields: Fr[],
55
- /** If we want to broadcast more fields than fit into a blob, we hash those and used it as the fieldsHash across all blobs.
56
- * This is much simpler and cheaper in the circuit to do, but MUST BE CHECKED before injecting here.
57
- */
58
- multiBlobFieldsHash?: Fr,
59
- ) {
40
+ /** The blob to be broadcast on L1 in bytes form. */
41
+ public readonly data: BlobBuffer,
42
+ /** The hash of all tx effects inside the blob. Used in generating the challenge z and proving that we have included all required effects. */
43
+ public readonly fieldsHash: Fr,
44
+ /** Challenge point z (= H(H(tx_effects), kzgCommmitment). Used such that p(z) = y. */
45
+ public readonly challengeZ: Fr,
46
+ /** Evaluation y = p(z), where p() is the blob polynomial. BLS12 field element, rep. as BigNum in nr, bigint in ts. */
47
+ public readonly evaluationY: Buffer,
48
+ /** Commitment to the blob C. Used in compressed BLS12 point format (48 bytes). */
49
+ public readonly commitment: Buffer,
50
+ /** KZG opening proof for y = p(z). The commitment to quotient polynomial Q, used in compressed BLS12 point format (48 bytes). */
51
+ public readonly proof: Buffer,
52
+ ) {}
53
+
54
+ static fromFields(fields: Fr[], multiBlobFieldsHash?: Fr): Blob {
60
55
  if (fields.length > FIELD_ELEMENTS_PER_BLOB) {
61
56
  throw new Error(
62
57
  `Attempted to overfill blob with ${fields.length} elements. The maximum is ${FIELD_ELEMENTS_PER_BLOB}`,
63
58
  );
64
59
  }
65
- this.data = Buffer.concat([serializeToBuffer(fields)], BYTES_PER_BLOB);
60
+ const dataWithoutZeros = serializeToBuffer(fields);
61
+ const data = Buffer.concat([dataWithoutZeros], BYTES_PER_BLOB);
62
+
66
63
  // This matches the output of SpongeBlob.squeeze() in the blob circuit
67
- this.fieldsHash = multiBlobFieldsHash ? multiBlobFieldsHash : poseidon2Hash(fields);
68
- this.commitment = Buffer.from(blobToKzgCommitment(this.data));
69
- this.challengeZ = poseidon2Hash([this.fieldsHash, ...this.commitmentToFields()]);
70
- const res = computeKzgProof(this.data, this.challengeZ.toBuffer());
71
- if (!verifyKzgProof(this.commitment, this.challengeZ.toBuffer(), res[1], res[0])) {
64
+ const fieldsHash = multiBlobFieldsHash ? multiBlobFieldsHash : poseidon2Hash(fields);
65
+ const commitment = Buffer.from(blobToKzgCommitment(data));
66
+ const challengeZ = poseidon2Hash([fieldsHash, ...commitmentToFields(commitment)]);
67
+ const res = computeKzgProof(data, challengeZ.toBuffer());
68
+ if (!verifyKzgProof(commitment, challengeZ.toBuffer(), res[1], res[0])) {
72
69
  throw new Error(`KZG proof did not verify.`);
73
70
  }
74
- this.proof = Buffer.from(res[0]);
75
- this.evaluationY = Buffer.from(res[1]);
71
+ const proof = Buffer.from(res[0]);
72
+ const evaluationY = Buffer.from(res[1]);
73
+
74
+ return new Blob(dataWithoutZeros, fieldsHash, challengeZ, evaluationY, commitment, proof);
76
75
  }
77
76
 
78
77
  // 48 bytes encoded in fields as [Fr, Fr] = [0->31, 31->48]
79
78
  commitmentToFields(): [Fr, Fr] {
80
- return [new Fr(this.commitment.subarray(0, 31)), new Fr(this.commitment.subarray(31, 48))];
79
+ return commitmentToFields(this.commitment);
81
80
  }
82
81
 
83
82
  // Returns ethereum's versioned blob hash, following kzg_to_versioned_hash: https://eips.ethereum.org/EIPS/eip-4844#helpers
@@ -93,6 +92,49 @@ export class Blob {
93
92
  return hash;
94
93
  }
95
94
 
95
+ toBuffer(): Buffer {
96
+ return Buffer.from(
97
+ serializeToBuffer(
98
+ this.data.length,
99
+ this.data,
100
+ this.fieldsHash,
101
+ this.challengeZ,
102
+ this.evaluationY.length,
103
+ this.evaluationY,
104
+ this.commitment.length,
105
+ this.commitment,
106
+ this.proof.length,
107
+ this.proof,
108
+ ),
109
+ );
110
+ }
111
+
112
+ static fromBuffer(buf: Buffer | BufferReader): Blob {
113
+ const reader = BufferReader.asReader(buf);
114
+ return new Blob(
115
+ reader.readUint8Array(),
116
+ reader.readObject(Fr),
117
+ reader.readObject(Fr),
118
+ reader.readBuffer(),
119
+ reader.readBuffer(),
120
+ reader.readBuffer(),
121
+ );
122
+ }
123
+
124
+ /**
125
+ * Pad the blob data to it's full size before posting
126
+ */
127
+ get dataWithZeros(): BlobBuffer {
128
+ return Buffer.concat([this.data], BYTES_PER_BLOB);
129
+ }
130
+
131
+ /**
132
+ * Get the size of the blob in bytes
133
+ */
134
+ getSize() {
135
+ return this.data.length;
136
+ }
137
+
96
138
  // Returns a proof of opening of the blob to verify on L1 using the point evaluation precompile:
97
139
  // * input[:32] - versioned_hash
98
140
  // * input[32:64] - z
@@ -145,8 +187,13 @@ export class Blob {
145
187
  const res = [];
146
188
  for (let i = 0; i < numBlobs; i++) {
147
189
  const end = fields.length < (i + 1) * FIELD_ELEMENTS_PER_BLOB ? fields.length : (i + 1) * FIELD_ELEMENTS_PER_BLOB;
148
- res.push(new Blob(fields.slice(i * FIELD_ELEMENTS_PER_BLOB, end), multiBlobFieldsHash));
190
+ res.push(Blob.fromFields(fields.slice(i * FIELD_ELEMENTS_PER_BLOB, end), multiBlobFieldsHash));
149
191
  }
150
192
  return res;
151
193
  }
152
194
  }
195
+
196
+ // 48 bytes encoded in fields as [Fr, Fr] = [0->31, 31->48]
197
+ function commitmentToFields(commitment: Buffer): [Fr, Fr] {
198
+ return [new Fr(commitment.subarray(0, 31)), new Fr(commitment.subarray(31, 48))];
199
+ }
@@ -100,6 +100,7 @@ export type EnvVar =
100
100
  | 'P2P_UDP_ANNOUNCE_ADDR'
101
101
  | 'P2P_UDP_LISTEN_ADDR'
102
102
  | 'PEER_ID_PRIVATE_KEY'
103
+ | 'PROVER_BLOB_SINK_URL'
103
104
  | 'PROOF_VERIFIER_L1_START_BLOCK'
104
105
  | 'PROOF_VERIFIER_POLL_INTERVAL_MS'
105
106
  | 'PROVER_AGENT_ENABLED'
@@ -112,7 +113,6 @@ export type EnvVar =
112
113
  | 'PROVER_BROKER_JOB_TIMEOUT_MS'
113
114
  | 'PROVER_BROKER_POLL_INTERVAL_MS'
114
115
  | 'PROVER_BROKER_JOB_MAX_RETRIES'
115
- | 'PROVER_BROKER_DATA_DIRECTORY'
116
116
  | 'PROVER_COORDINATION_NODE_URL'
117
117
  | 'PROVER_DISABLED'
118
118
  | 'PROVER_ID'
@@ -121,6 +121,9 @@ export type EnvVar =
121
121
  | 'PROVER_NODE_POLLING_INTERVAL_MS'
122
122
  | 'PROVER_NODE_MAX_PENDING_JOBS'
123
123
  | 'PROVER_NODE_MAX_PARALLEL_BLOCKS_PER_EPOCH'
124
+ | 'PROVER_NODE_TX_GATHERING_TIMEOUT_MS'
125
+ | 'PROVER_NODE_TX_GATHERING_INTERVAL_MS'
126
+ | 'PROVER_NODE_TX_GATHERING_MAX_PARALLEL_REQUESTS'
124
127
  | 'PROVER_PUBLISH_RETRY_INTERVAL_MS'
125
128
  | 'PROVER_PUBLISHER_PRIVATE_KEY'
126
129
  | 'PROVER_REAL_PROOFS'
@@ -136,9 +139,12 @@ export type EnvVar =
136
139
  | 'REGISTRY_CONTRACT_ADDRESS'
137
140
  | 'ROLLUP_CONTRACT_ADDRESS'
138
141
  | 'SEQ_ALLOWED_SETUP_FN'
142
+ | 'SEQ_BLOB_SINK_URL'
139
143
  | 'SEQ_MAX_BLOCK_SIZE_IN_BYTES'
140
144
  | 'SEQ_MAX_TX_PER_BLOCK'
141
145
  | 'SEQ_MIN_TX_PER_BLOCK'
146
+ | 'SEQ_MAX_DA_BLOCK_GAS'
147
+ | 'SEQ_MAX_L2_BLOCK_GAS'
142
148
  | 'SEQ_PUBLISH_RETRY_INTERVAL_MS'
143
149
  | 'SEQ_PUBLISHER_PRIVATE_KEY'
144
150
  | 'SEQ_REQUIRED_CONFIRMATIONS'
@@ -182,8 +188,10 @@ export type EnvVar =
182
188
  | 'L1_GAS_LIMIT_BUFFER_FIXED'
183
189
  | 'L1_GAS_PRICE_MIN'
184
190
  | 'L1_GAS_PRICE_MAX'
191
+ | 'L1_BLOB_FEE_PER_GAS_MAX'
185
192
  | 'L1_PRIORITY_FEE_BUMP_PERCENTAGE'
186
193
  | 'L1_PRIORITY_FEE_RETRY_BUMP_PERCENTAGE'
194
+ | 'L1_FIXED_PRIORITY_FEE_PER_GAS'
187
195
  | 'L1_TX_MONITOR_MAX_ATTEMPTS'
188
196
  | 'L1_TX_MONITOR_CHECK_INTERVAL_MS'
189
197
  | 'L1_TX_MONITOR_STALL_TIME_MS'
@@ -10,6 +10,7 @@ import { Fr } from './fields.js';
10
10
  * Represents a Point on an elliptic curve with x and y coordinates.
11
11
  * The Point class provides methods for creating instances from different input types,
12
12
  * converting instances to various output formats, and checking the equality of points.
13
+ * TODO(#7386): Clean up this class.
13
14
  */
14
15
  export class Point {
15
16
  static ZERO = new Point(Fr.ZERO, Fr.ZERO, false);
@@ -24,6 +24,11 @@ export class SafeJsonRpcServer {
24
24
  constructor(
25
25
  /** The proxy object to delegate requests to. */
26
26
  private readonly proxy: Proxy,
27
+ /**
28
+ * Return an HTTP 200 status code on errors, but include an error object
29
+ * as per the JSON RPC spec
30
+ */
31
+ private http200OnError = false,
27
32
  /** Health check function */
28
33
  private readonly healthCheck: StatusCheckFn = () => true,
29
34
  /** Logger */
@@ -105,9 +110,17 @@ export class SafeJsonRpcServer {
105
110
  ctx.status = 400;
106
111
  ctx.body = { jsonrpc, id, error: { code: -32601, message: `Method not found: ${method}` } };
107
112
  } else {
108
- const result = await this.proxy.call(method, params);
109
- ctx.body = { jsonrpc, id, result };
110
113
  ctx.status = 200;
114
+ try {
115
+ const result = await this.proxy.call(method, params);
116
+ ctx.body = { jsonrpc, id, result };
117
+ } catch (err: any) {
118
+ if (this.http200OnError) {
119
+ ctx.body = { jsonrpc, id, error: { code: err.code || -32600, data: err.data, message: err.message } };
120
+ } else {
121
+ throw err;
122
+ }
123
+ }
111
124
  }
112
125
  });
113
126
 
@@ -259,20 +272,22 @@ function makeAggregateHealthcheck(namedHandlers: NamespacedApiHandlers, log?: Lo
259
272
  */
260
273
  export function createNamespacedSafeJsonRpcServer(
261
274
  handlers: NamespacedApiHandlers,
275
+ http200OnError = false,
262
276
  log = createLogger('json-rpc:server'),
263
277
  ): SafeJsonRpcServer {
264
278
  const proxy = new NamespacedSafeJsonProxy(handlers);
265
279
  const healthCheck = makeAggregateHealthcheck(handlers, log);
266
- return new SafeJsonRpcServer(proxy, healthCheck, log);
280
+ return new SafeJsonRpcServer(proxy, http200OnError, healthCheck, log);
267
281
  }
268
282
 
269
283
  export function createSafeJsonRpcServer<T extends object = any>(
270
284
  handler: T,
271
285
  schema: ApiSchemaFor<T>,
286
+ http200OnError = false,
272
287
  healthCheck?: StatusCheckFn,
273
288
  ) {
274
289
  const proxy = new SafeJsonProxy(handler, schema);
275
- return new SafeJsonRpcServer(proxy, healthCheck);
290
+ return new SafeJsonRpcServer(proxy, http200OnError, healthCheck);
276
291
  }
277
292
 
278
293
  /**
@@ -23,6 +23,10 @@ export class RunningPromise {
23
23
  * Starts the running promise.
24
24
  */
25
25
  public start() {
26
+ if (this.running) {
27
+ this.logger.warn(`Attempted to start running promise that was already started`);
28
+ return;
29
+ }
26
30
  this.running = true;
27
31
 
28
32
  const poll = async () => {
@@ -54,6 +58,10 @@ export class RunningPromise {
54
58
  * and waits for the currently executing function to complete.
55
59
  */
56
60
  async stop(): Promise<void> {
61
+ if (!this.running) {
62
+ this.logger.warn(`Running promise was not started`);
63
+ return;
64
+ }
57
65
  this.running = false;
58
66
  this.interruptibleSleep.interrupt();
59
67
  await this.runningPromise;
@@ -6,6 +6,7 @@ import { FifoMemoryQueue } from './fifo_memory_queue.js';
6
6
  export class SerialQueue {
7
7
  private readonly queue = new FifoMemoryQueue<() => Promise<void>>();
8
8
  private runningPromise!: Promise<void>;
9
+ private started = false;
9
10
 
10
11
  /**
11
12
  * Initializes the execution of enqueued functions in the serial queue.
@@ -14,7 +15,11 @@ export class SerialQueue {
14
15
  * This method should be called once to start processing the queue.
15
16
  */
16
17
  public start() {
18
+ if (this.started) {
19
+ return;
20
+ }
17
21
  this.runningPromise = this.queue.process(fn => fn());
22
+ this.started = true;
18
23
  }
19
24
 
20
25
  /**
@@ -1,3 +1,4 @@
1
+ import { TimeoutError } from '../error/index.js';
1
2
  import { createLogger } from '../log/index.js';
2
3
  import { sleep } from '../sleep/index.js';
3
4
  import { Timer } from '../timer/index.js';
@@ -93,7 +94,7 @@ export async function retryUntil<T>(fn: () => Promise<T | undefined>, name = '',
93
94
  await sleep(interval * 1000);
94
95
 
95
96
  if (timeout && timer.s() > timeout) {
96
- throw new Error(name ? `Timeout awaiting ${name}` : 'Timeout');
97
+ throw new TimeoutError(name ? `Timeout awaiting ${name}` : 'Timeout');
97
98
  }
98
99
  }
99
100
  }
@@ -307,6 +307,20 @@ export class BufferReader {
307
307
  return this.readBytes(size);
308
308
  }
309
309
 
310
+ /**
311
+ * Reads a buffer from the current position of the reader and advances the index.
312
+ * The method first reads the size (number) of bytes to be read, and then returns
313
+ * a Buffer with that size containing the bytes. Useful for reading variable-length
314
+ * binary data encoded as (size, data) format.
315
+ *
316
+ * @returns A Buffer containing the read bytes.
317
+ */
318
+ public readUint8Array(): Uint8Array {
319
+ const size = this.readNumber();
320
+ this.#rangeCheck(size);
321
+ return this.readBytes(size);
322
+ }
323
+
310
324
  /**
311
325
  * Reads and constructs a map object from the current buffer using the provided deserializer.
312
326
  * The method reads the number of entries in the map, followed by iterating through each key-value pair.
@@ -109,6 +109,7 @@ export function deserializeField(buf: Buffer, offset = 0) {
109
109
  export type Bufferable =
110
110
  | boolean
111
111
  | Buffer
112
+ | Uint8Array
112
113
  | number
113
114
  | bigint
114
115
  | string
@@ -43,7 +43,7 @@ export function updateInlineTestData(targetFileFromRepoRoot: string, itemName: s
43
43
  /**
44
44
  * Updates the sample Prover.toml files in noir-projects/noir-protocol-circuits/crates/.
45
45
  * @remarks Requires AZTEC_GENERATE_TEST_DATA=1 to be set
46
- * To re-gen, run 'AZTEC_GENERATE_TEST_DATA=1 FAKE_PROOFS=1 yarn test:e2e-no-docker full.test '
46
+ * To re-gen, run 'AZTEC_GENERATE_TEST_DATA=1 FAKE_PROOFS=1 yarn test:e2e full.test '
47
47
  */
48
48
  export function updateProtocolCircuitSampleInputs(circuitName: string, value: string) {
49
49
  const logger = createConsoleLogger('aztec:testing:test_data');
@@ -1,4 +1,5 @@
1
- export * from './unbalanced_merkle_root.js';
1
+ export * from './unbalanced_merkle_tree.js';
2
+ export * from './unbalanced_tree_store.js';
2
3
 
3
4
  /**
4
5
  * A leaf of an indexed merkle tree.
@@ -0,0 +1,103 @@
1
+ import { padArrayEnd } from '@aztec/foundation/collection';
2
+ import { sha256Trunc } from '@aztec/foundation/crypto';
3
+
4
+ /**
5
+ * Computes the merkle root for an unbalanced tree.
6
+ *
7
+ * @dev Adapted from unbalanced_tree.ts.
8
+ * Calculates the tree upwards layer by layer until we reach the root.
9
+ * The L1 calculation instead computes the tree from right to left (slightly cheaper gas).
10
+ * TODO: A more thorough investigation of which method is cheaper, then use that method everywhere.
11
+ */
12
+ export function computeUnbalancedMerkleRoot(leaves: Buffer[], emptyLeaf?: Buffer, hasher = sha256Trunc): Buffer {
13
+ // Pad leaves to 2
14
+ if (leaves.length < 2) {
15
+ if (emptyLeaf === undefined) {
16
+ throw new Error('Cannot compute a Merkle root with less than 2 leaves');
17
+ } else {
18
+ leaves = padArrayEnd(leaves, emptyLeaf, 2);
19
+ }
20
+ }
21
+
22
+ const depth = Math.ceil(Math.log2(leaves.length));
23
+ let [layerWidth, nodeToShift] =
24
+ leaves.length & 1 ? [leaves.length - 1, leaves[leaves.length - 1]] : [leaves.length, Buffer.alloc(0)];
25
+ // Allocate this layer's leaves and init the next layer up
26
+ let thisLayer = leaves.slice(0, layerWidth);
27
+ let nextLayer = [];
28
+ for (let i = 0; i < depth; i++) {
29
+ for (let j = 0; j < layerWidth; j += 2) {
30
+ // Store the hash of each pair one layer up
31
+ nextLayer[j / 2] = hasher(Buffer.concat([thisLayer[j], thisLayer[j + 1]]));
32
+ }
33
+ layerWidth /= 2;
34
+ if (layerWidth & 1) {
35
+ if (nodeToShift.length) {
36
+ // If the next layer has odd length, and we have a node that needs to be shifted up, add it here
37
+ nextLayer.push(nodeToShift);
38
+ layerWidth += 1;
39
+ nodeToShift = Buffer.alloc(0);
40
+ } else {
41
+ // If we don't have a node waiting to be shifted, store the next layer's final node to be shifted
42
+ layerWidth -= 1;
43
+ nodeToShift = nextLayer[layerWidth];
44
+ }
45
+ }
46
+ // reset the layers
47
+ thisLayer = nextLayer;
48
+ nextLayer = [];
49
+ }
50
+ // return the root
51
+ return thisLayer[0];
52
+ }
53
+
54
+ function getMaxBalancedTreeDepth(numLeaves: number) {
55
+ return Math.floor(Math.log2(numLeaves));
56
+ }
57
+
58
+ function getMaxUnbalancedTreeDepth(numLeaves: number) {
59
+ return Math.ceil(Math.log2(numLeaves));
60
+ }
61
+
62
+ function findPosition(
63
+ rootLevel: number,
64
+ leafLevel: number,
65
+ numLeaves: number,
66
+ indexOffset: number,
67
+ targetIndex: number,
68
+ ): { level: number; indexAtLevel: number } {
69
+ if (numLeaves <= 1) {
70
+ // Single leaf.
71
+ return { level: rootLevel, indexAtLevel: indexOffset };
72
+ }
73
+
74
+ // The largest balanced tree that can be created with the given number of leaves.
75
+ const maxBalancedTreeDepth = getMaxBalancedTreeDepth(numLeaves);
76
+ const numBalancedLeaves = 2 ** maxBalancedTreeDepth;
77
+ const numRemainingLeaves = numLeaves - numBalancedLeaves;
78
+
79
+ if (targetIndex < numBalancedLeaves) {
80
+ // Target is in the balanced tree.
81
+
82
+ // - If numRemainingLeaves is 0: this balanced tree is grown from the current root.
83
+ // - If numRemainingLeaves is not 0: the remaining leaves will form another tree, which will become the right child of the root.
84
+ // And the balanced tree will be the left child of the root.
85
+ // There will be an extra level between the root of the balanced tree and the current root.
86
+ const extraLevel = numRemainingLeaves ? 1 : 0;
87
+
88
+ return { level: rootLevel + maxBalancedTreeDepth + extraLevel, indexAtLevel: indexOffset + targetIndex };
89
+ } else {
90
+ // Target is in the right branch.
91
+ const rightBranchMaxLevel = getMaxUnbalancedTreeDepth(numRemainingLeaves);
92
+ const shiftedUp = leafLevel - rootLevel - rightBranchMaxLevel - 1;
93
+ const nextLeafLevel = leafLevel - shiftedUp;
94
+ const newIndexOffset = (indexOffset + numBalancedLeaves) >> shiftedUp;
95
+ const shiftedTargetIndex = targetIndex - numBalancedLeaves;
96
+ return findPosition(rootLevel + 1, nextLeafLevel, numRemainingLeaves, newIndexOffset, shiftedTargetIndex);
97
+ }
98
+ }
99
+
100
+ export function findLeafLevelAndIndex(numLeaves: number, leafIndex: number) {
101
+ const maxLevel = getMaxUnbalancedTreeDepth(numLeaves);
102
+ return findPosition(0, maxLevel, numLeaves, 0, leafIndex);
103
+ }
@@ -0,0 +1,102 @@
1
+ import { findLeafLevelAndIndex } from './unbalanced_merkle_tree.js';
2
+
3
+ export interface TreeNodeLocation {
4
+ level: number;
5
+ index: number;
6
+ }
7
+
8
+ interface TreeNode<T> {
9
+ value: T;
10
+ location: TreeNodeLocation;
11
+ }
12
+
13
+ export class UnbalancedTreeStore<T> {
14
+ #nodeMapping: Map<string, TreeNode<T>> = new Map();
15
+ readonly #numLeaves: number;
16
+
17
+ constructor(numLeaves: number) {
18
+ this.#numLeaves = numLeaves;
19
+ }
20
+
21
+ setLeaf(leafIndex: number, value: T): TreeNodeLocation {
22
+ if (leafIndex >= this.#numLeaves) {
23
+ throw new Error(`Expected at most ${this.#numLeaves} leaves. Received a leaf at index ${leafIndex}.`);
24
+ }
25
+
26
+ const { level, indexAtLevel } = findLeafLevelAndIndex(this.#numLeaves, leafIndex);
27
+ const location = {
28
+ level,
29
+ index: indexAtLevel,
30
+ };
31
+ this.#nodeMapping.set(this.#getKey(location), {
32
+ location,
33
+ value,
34
+ });
35
+ return location;
36
+ }
37
+
38
+ setNode({ level, index }: TreeNodeLocation, value: T) {
39
+ const location = {
40
+ level,
41
+ index,
42
+ };
43
+ this.#nodeMapping.set(this.#getKey(location), {
44
+ location,
45
+ value,
46
+ });
47
+ }
48
+
49
+ getParentLocation({ level, index }: TreeNodeLocation): TreeNodeLocation {
50
+ if (level === 0) {
51
+ throw new Error('Tree root does not have a parent.');
52
+ }
53
+
54
+ return { level: level - 1, index: Math.floor(index / 2) };
55
+ }
56
+
57
+ getSiblingLocation({ level, index }: TreeNodeLocation): TreeNodeLocation {
58
+ if (level === 0) {
59
+ throw new Error('Tree root does not have a sibling.');
60
+ }
61
+
62
+ return { level, index: index % 2 ? index - 1 : index + 1 };
63
+ }
64
+
65
+ getChildLocations({ level, index }: TreeNodeLocation): [TreeNodeLocation, TreeNodeLocation] {
66
+ const left = { level: level + 1, index: index * 2 };
67
+ const right = { level: level + 1, index: index * 2 + 1 };
68
+ return [left, right];
69
+ }
70
+
71
+ getLeaf(leafIndex: number) {
72
+ const { level, indexAtLevel } = findLeafLevelAndIndex(this.#numLeaves, leafIndex);
73
+ const location = {
74
+ level,
75
+ index: indexAtLevel,
76
+ };
77
+ return this.getNode(location);
78
+ }
79
+
80
+ getNode(location: TreeNodeLocation): T | undefined {
81
+ return this.#nodeMapping.get(this.#getKey(location))?.value;
82
+ }
83
+
84
+ getParent(location: TreeNodeLocation): T | undefined {
85
+ const parentLocation = this.getParentLocation(location);
86
+ return this.getNode(parentLocation);
87
+ }
88
+
89
+ getSibling(location: TreeNodeLocation): T | undefined {
90
+ const siblingLocation = this.getSiblingLocation(location);
91
+ return this.getNode(siblingLocation);
92
+ }
93
+
94
+ getChildren(location: TreeNodeLocation): [T | undefined, T | undefined] {
95
+ const [left, right] = this.getChildLocations(location);
96
+ return [this.getNode(left), this.getNode(right)];
97
+ }
98
+
99
+ #getKey(location: TreeNodeLocation) {
100
+ return `${location.level}-${location.index}`;
101
+ }
102
+ }
@@ -1 +0,0 @@
1
- {"version":3,"file":"unbalanced_merkle_root.d.ts","sourceRoot":"","sources":["../../src/trees/unbalanced_merkle_root.ts"],"names":[],"mappings":";;AACA,OAAO,EAAE,WAAW,EAAE,MAAM,0BAA0B,CAAC;AAEvD;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,EAAE,EAAE,SAAS,CAAC,EAAE,MAAM,EAAE,MAAM,qBAAc,GAAG,MAAM,CAwC9G"}
@@ -1,52 +0,0 @@
1
- import { padArrayEnd } from '@aztec/foundation/collection';
2
- import { sha256Trunc } from '@aztec/foundation/crypto';
3
-
4
- /**
5
- * Computes the merkle root for an unbalanced tree.
6
- *
7
- * @dev Adapted from proving-state.ts -> findMergeLevel and unbalanced_tree.ts.
8
- * Calculates the tree upwards layer by layer until we reach the root.
9
- * The L1 calculation instead computes the tree from right to left (slightly cheaper gas).
10
- * TODO: A more thorough investigation of which method is cheaper, then use that method everywhere.
11
- */
12
- export function computeUnbalancedMerkleRoot(leaves: Buffer[], emptyLeaf?: Buffer, hasher = sha256Trunc): Buffer {
13
- // Pad leaves to 2
14
- if (leaves.length < 2) {
15
- if (emptyLeaf === undefined) {
16
- throw new Error('Cannot compute a Merkle root with less than 2 leaves');
17
- } else {
18
- leaves = padArrayEnd(leaves, emptyLeaf, 2);
19
- }
20
- }
21
-
22
- const depth = Math.ceil(Math.log2(leaves.length));
23
- let [layerWidth, nodeToShift] =
24
- leaves.length & 1 ? [leaves.length - 1, leaves[leaves.length - 1]] : [leaves.length, Buffer.alloc(0)];
25
- // Allocate this layer's leaves and init the next layer up
26
- let thisLayer = leaves.slice(0, layerWidth);
27
- let nextLayer = [];
28
- for (let i = 0; i < depth; i++) {
29
- for (let j = 0; j < layerWidth; j += 2) {
30
- // Store the hash of each pair one layer up
31
- nextLayer[j / 2] = hasher(Buffer.concat([thisLayer[j], thisLayer[j + 1]]));
32
- }
33
- layerWidth /= 2;
34
- if (layerWidth & 1) {
35
- if (nodeToShift.length) {
36
- // If the next layer has odd length, and we have a node that needs to be shifted up, add it here
37
- nextLayer.push(nodeToShift);
38
- layerWidth += 1;
39
- nodeToShift = Buffer.alloc(0);
40
- } else {
41
- // If we don't have a node waiting to be shifted, store the next layer's final node to be shifted
42
- layerWidth -= 1;
43
- nodeToShift = nextLayer[layerWidth];
44
- }
45
- }
46
- // reset the layers
47
- thisLayer = nextLayer;
48
- nextLayer = [];
49
- }
50
- // return the root
51
- return thisLayer[0];
52
- }