@aztec/pxe 0.0.1-commit.96dac018d → 0.0.1-commit.993d52e

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dest/config/package_info.js +1 -1
  2. package/dest/contract_function_simulator/contract_function_simulator.js +3 -3
  3. package/dest/contract_function_simulator/execution_tagging_index_cache.d.ts +5 -5
  4. package/dest/contract_function_simulator/execution_tagging_index_cache.d.ts.map +1 -1
  5. package/dest/contract_function_simulator/execution_tagging_index_cache.js +3 -3
  6. package/dest/contract_function_simulator/noir-structs/event_validation_request.js +1 -1
  7. package/dest/contract_function_simulator/noir-structs/note_validation_request.d.ts +1 -1
  8. package/dest/contract_function_simulator/noir-structs/note_validation_request.js +1 -1
  9. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts +1 -1
  10. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts.map +1 -1
  11. package/dest/contract_function_simulator/oracle/private_execution_oracle.js +18 -10
  12. package/dest/logs/log_service.d.ts +1 -1
  13. package/dest/logs/log_service.d.ts.map +1 -1
  14. package/dest/logs/log_service.js +4 -4
  15. package/dest/private_kernel/hints/private_kernel_reset_private_inputs_builder.d.ts +3 -4
  16. package/dest/private_kernel/hints/private_kernel_reset_private_inputs_builder.d.ts.map +1 -1
  17. package/dest/private_kernel/hints/private_kernel_reset_private_inputs_builder.js +64 -125
  18. package/dest/private_kernel/private_kernel_execution_prover.d.ts +1 -1
  19. package/dest/private_kernel/private_kernel_execution_prover.d.ts.map +1 -1
  20. package/dest/private_kernel/private_kernel_execution_prover.js +5 -13
  21. package/dest/storage/tagging_store/recipient_tagging_store.d.ts +6 -6
  22. package/dest/storage/tagging_store/recipient_tagging_store.d.ts.map +1 -1
  23. package/dest/storage/tagging_store/sender_tagging_store.d.ts +5 -5
  24. package/dest/storage/tagging_store/sender_tagging_store.d.ts.map +1 -1
  25. package/dest/storage/tagging_store/sender_tagging_store.js +4 -4
  26. package/dest/tagging/index.d.ts +2 -2
  27. package/dest/tagging/index.d.ts.map +1 -1
  28. package/dest/tagging/index.js +1 -1
  29. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts +4 -5
  30. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts.map +1 -1
  31. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.js +3 -3
  32. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts +6 -7
  33. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts.map +1 -1
  34. package/dest/tagging/recipient_sync/utils/load_logs_for_range.js +12 -11
  35. package/dest/tagging/sender_sync/sync_sender_tagging_indexes.d.ts +4 -8
  36. package/dest/tagging/sender_sync/sync_sender_tagging_indexes.d.ts.map +1 -1
  37. package/dest/tagging/sender_sync/sync_sender_tagging_indexes.js +3 -6
  38. package/dest/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.d.ts +4 -7
  39. package/dest/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.d.ts.map +1 -1
  40. package/dest/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.js +14 -15
  41. package/package.json +16 -16
  42. package/src/config/package_info.ts +1 -1
  43. package/src/contract_function_simulator/contract_function_simulator.ts +10 -3
  44. package/src/contract_function_simulator/execution_tagging_index_cache.ts +5 -5
  45. package/src/contract_function_simulator/noir-structs/event_validation_request.ts +1 -1
  46. package/src/contract_function_simulator/noir-structs/note_validation_request.ts +1 -1
  47. package/src/contract_function_simulator/oracle/private_execution_oracle.ts +23 -11
  48. package/src/logs/log_service.ts +10 -5
  49. package/src/private_kernel/hints/private_kernel_reset_private_inputs_builder.ts +110 -157
  50. package/src/private_kernel/private_kernel_execution_prover.ts +6 -13
  51. package/src/storage/tagging_store/recipient_tagging_store.ts +9 -5
  52. package/src/storage/tagging_store/sender_tagging_store.ts +8 -8
  53. package/src/tagging/index.ts +1 -1
  54. package/src/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.ts +3 -6
  55. package/src/tagging/recipient_sync/utils/load_logs_for_range.ts +10 -15
  56. package/src/tagging/sender_sync/sync_sender_tagging_indexes.ts +4 -9
  57. package/src/tagging/sender_sync/utils/load_and_store_new_tagging_indexes.ts +11 -20
  58. package/dest/private_kernel/hints/test_utils.d.ts +0 -122
  59. package/dest/private_kernel/hints/test_utils.d.ts.map +0 -1
  60. package/dest/private_kernel/hints/test_utils.js +0 -203
  61. package/src/private_kernel/hints/test_utils.ts +0 -325
@@ -10,7 +10,7 @@ import {
10
10
  import { makeTuple } from '@aztec/foundation/array';
11
11
  import { padArrayEnd } from '@aztec/foundation/collection';
12
12
  import type { Fr } from '@aztec/foundation/curves/bn254';
13
- import { assertLength } from '@aztec/foundation/serialize';
13
+ import { type Tuple, assertLength } from '@aztec/foundation/serialize';
14
14
  import { MembershipWitness } from '@aztec/foundation/trees';
15
15
  import { privateKernelResetDimensionsConfig } from '@aztec/noir-protocol-circuits-types/client';
16
16
  import {
@@ -27,11 +27,13 @@ import {
27
27
  ReadRequestActionEnum,
28
28
  ReadRequestResetActions,
29
29
  type ScopedKeyValidationRequestAndSeparator,
30
+ ScopedNoteHash,
31
+ ScopedNullifier,
32
+ ScopedReadRequest,
30
33
  TransientDataSquashingHint,
31
34
  buildNoteHashReadRequestHintsFromResetActions,
32
35
  buildNullifierReadRequestHintsFromResetActions,
33
36
  buildTransientDataHints,
34
- countSquashedLogs,
35
37
  findPrivateKernelResetDimensions,
36
38
  getNoteHashReadRequestResetActions,
37
39
  getNullifierReadRequestResetActions,
@@ -42,6 +44,15 @@ import { VkData } from '@aztec/stdlib/vks';
42
44
 
43
45
  import type { PrivateKernelOracle } from '../private_kernel_oracle.js';
44
46
 
47
+ function collectNestedReadRequests<N extends number>(
48
+ executionStack: PrivateCallExecutionResult[],
49
+ extractReadRequests: (execution: PrivateCallExecutionResult) => ClaimedLengthArray<ScopedReadRequest, N>,
50
+ ): ScopedReadRequest[] {
51
+ return collectNested(executionStack, executionResult => {
52
+ return extractReadRequests(executionResult).getActiveItems();
53
+ });
54
+ }
55
+
45
56
  function getNullifierMembershipWitnessResolver(oracle: PrivateKernelOracle) {
46
57
  return async (nullifier: Fr) => {
47
58
  const res = await oracle.getNullifierMembershipWitness(nullifier);
@@ -80,14 +91,11 @@ export class PrivateKernelResetPrivateInputsBuilder {
80
91
  // If there's no next iteration, it's the final reset.
81
92
  private nextIteration?: PrivateCircuitPublicInputs;
82
93
 
83
- private noteHashResetActions = ReadRequestResetActions.empty(MAX_NOTE_HASH_READ_REQUESTS_PER_TX);
84
- private nullifierResetActions = ReadRequestResetActions.empty(MAX_NULLIFIER_READ_REQUESTS_PER_TX);
94
+ private noteHashResetActions: ReadRequestResetActions<typeof MAX_NOTE_HASH_READ_REQUESTS_PER_TX>;
95
+ private nullifierResetActions: ReadRequestResetActions<typeof MAX_NULLIFIER_READ_REQUESTS_PER_TX>;
85
96
  private numTransientData?: number;
86
- private transientDataSquashingHints = makeTuple(
87
- MAX_NULLIFIERS_PER_TX,
88
- () => new TransientDataSquashingHint(MAX_NULLIFIERS_PER_TX, MAX_NOTE_HASHES_PER_TX),
89
- );
90
- private requestedDimensions = PrivateKernelResetDimensions.empty();
97
+ private transientDataSquashingHints: Tuple<TransientDataSquashingHint, typeof MAX_NULLIFIERS_PER_TX>;
98
+ private requestedDimensions: PrivateKernelResetDimensions;
91
99
 
92
100
  constructor(
93
101
  private previousKernelOutput: PrivateKernelSimulateOutput<PrivateKernelCircuitPublicInputs>,
@@ -96,18 +104,21 @@ export class PrivateKernelResetPrivateInputsBuilder {
96
104
  private splitCounter: number,
97
105
  ) {
98
106
  this.previousKernel = previousKernelOutput.publicInputs;
107
+ this.requestedDimensions = PrivateKernelResetDimensions.empty();
108
+ this.noteHashResetActions = ReadRequestResetActions.empty(MAX_NOTE_HASH_READ_REQUESTS_PER_TX);
109
+ this.nullifierResetActions = ReadRequestResetActions.empty(MAX_NULLIFIER_READ_REQUESTS_PER_TX);
110
+ this.transientDataSquashingHints = makeTuple(
111
+ MAX_NULLIFIERS_PER_TX,
112
+ () => new TransientDataSquashingHint(MAX_NULLIFIERS_PER_TX, MAX_NOTE_HASHES_PER_TX),
113
+ );
99
114
  this.nextIteration = executionStack[this.executionStack.length - 1]?.publicInputs;
100
115
  }
101
116
 
102
- getRequestedDimensions(): PrivateKernelResetDimensions {
103
- return this.requestedDimensions;
104
- }
105
-
106
117
  needsReset(): boolean {
107
118
  const fns: (() => boolean)[] = [
108
119
  () => this.needsResetNoteHashReadRequests(),
109
120
  () => this.needsResetNullifierReadRequests(),
110
- () => this.needsResetKeyValidationRequests(),
121
+ () => this.needsResetNullifierKeys(),
111
122
  () => this.needsResetTransientData(),
112
123
  ];
113
124
 
@@ -134,7 +145,8 @@ export class PrivateKernelResetPrivateInputsBuilder {
134
145
 
135
146
  const isInner = !!this.nextIteration;
136
147
 
137
- // "final" reset must be done exactly once, because siloing can't be run repeatedly.
148
+ // "final" reset must be done at most once.
149
+ // Because the code that silo note hashes can't be run repeatedly.
138
150
  // The dimensions found must be big enough to reset all values, i.e. empty remainder.
139
151
  const allowRemainder = isInner;
140
152
 
@@ -234,17 +246,24 @@ export class PrivateKernelResetPrivateInputsBuilder {
234
246
  resetActions.pendingReadHints = resetActions.pendingReadHints.slice(0, maxPending);
235
247
  }
236
248
 
237
- private needsResetNoteHashReadRequests(forceReset = false) {
249
+ private needsResetNoteHashReadRequests(forceResetAll = false) {
238
250
  const numCurr = this.previousKernel.validationRequests.noteHashReadRequests.claimedLength;
239
251
  const numNext = this.nextIteration ? this.nextIteration.noteHashReadRequests.claimedLength : 0;
240
- const maxAmountToKeep = !this.nextIteration || forceReset ? 0 : MAX_NOTE_HASH_READ_REQUESTS_PER_TX;
252
+ const maxAmountToKeep = !this.nextIteration || forceResetAll ? 0 : MAX_NOTE_HASH_READ_REQUESTS_PER_TX;
241
253
  if (numCurr + numNext <= maxAmountToKeep) {
242
254
  return false;
243
255
  }
244
256
 
257
+ const futureNoteHashes = collectNested(this.executionStack, executionResult => {
258
+ return executionResult.publicInputs.noteHashes
259
+ .getActiveItems()
260
+ .map(noteHash => new ScopedNoteHash(noteHash, executionResult.publicInputs.callContext.contractAddress));
261
+ });
262
+
245
263
  const resetActions = getNoteHashReadRequestResetActions(
246
264
  this.previousKernel.validationRequests.noteHashReadRequests,
247
265
  this.previousKernel.end.noteHashes,
266
+ futureNoteHashes,
248
267
  );
249
268
 
250
269
  const numPendingReads = resetActions.pendingReadHints.length;
@@ -253,72 +272,53 @@ export class PrivateKernelResetPrivateInputsBuilder {
253
272
  0,
254
273
  );
255
274
 
256
- const totalReadsToReset = numPendingReads + numSettledReads;
257
- const minResetNeeded = numCurr + numNext - maxAmountToKeep;
258
- if (totalReadsToReset < minResetNeeded) {
259
- if (!this.nextIteration) {
260
- // In the final reset, all note hashes have been emitted. So if we can't reset all requests, at least one
261
- // pending read request doesn't match any of them.
262
- throw new Error('No matching note hash found for note hash read request.');
263
- } else if (!forceReset) {
264
- // A pending read request can only be reset if its note hash has already been included (e.g. a parent call might
265
- // be reading a note hash emitted by a child call. The read request of the parent call is included before the note
266
- // hash of the child call).
267
- // If we can't clear enough read requests to make room for the next iteration's reads, we're stuck.
268
- throw new Error('Number of note hash read requests exceeds the limit.');
269
- } else if (totalReadsToReset == 0) {
270
- // It's transient data squashing asking for the read requests to be reset first (forceReset == true), and
271
- // there's nothing to reset, returns false and let needsResetTransientData throw a more descriptive error.
272
- return false;
273
- }
274
- // Otherwise, forceReset is true, we should proceed to reset as many as we can.
275
- }
276
-
277
275
  if (!this.nextIteration) {
278
- // If there's no next iteration, we need to reset all the read requests.
279
276
  this.noteHashResetActions = resetActions;
280
277
  this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads;
281
278
  this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads;
282
279
  } else {
283
- // If there's a next iteration, only one dimension can be reset at a time.
284
- // So we pick the dimension that has more read requests to reset.
280
+ // Pick only one dimension to reset if next iteration is not empty.
285
281
  if (numPendingReads > numSettledReads) {
286
- // Reset the pending read requests.
287
- const pendingOnlyActions = assertLength(
282
+ this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads;
283
+ this.noteHashResetActions.actions = assertLength(
288
284
  resetActions.actions.map(action =>
289
285
  action === ReadRequestActionEnum.READ_AS_PENDING ? action : ReadRequestActionEnum.SKIP,
290
286
  ),
291
287
  MAX_NOTE_HASH_READ_REQUESTS_PER_TX,
292
288
  );
293
- this.noteHashResetActions = new ReadRequestResetActions(pendingOnlyActions, resetActions.pendingReadHints);
294
- this.requestedDimensions.NOTE_HASH_PENDING_READ = numPendingReads;
289
+ this.noteHashResetActions.pendingReadHints = resetActions.pendingReadHints;
295
290
  } else {
296
- // Reset the settled read requests.
297
- const settledOnlyActions = assertLength(
291
+ this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads;
292
+ this.noteHashResetActions.actions = assertLength(
298
293
  resetActions.actions.map(action =>
299
294
  action === ReadRequestActionEnum.READ_AS_SETTLED ? action : ReadRequestActionEnum.SKIP,
300
295
  ),
301
296
  MAX_NOTE_HASH_READ_REQUESTS_PER_TX,
302
297
  );
303
- this.noteHashResetActions = new ReadRequestResetActions(settledOnlyActions, []);
304
- this.requestedDimensions.NOTE_HASH_SETTLED_READ = numSettledReads;
305
298
  }
306
299
  }
307
300
 
308
301
  return true;
309
302
  }
310
303
 
311
- private needsResetNullifierReadRequests(forceReset = false) {
304
+ private needsResetNullifierReadRequests(forceResetAll = false) {
312
305
  const numCurr = this.previousKernel.validationRequests.nullifierReadRequests.claimedLength;
313
306
  const numNext = this.nextIteration ? this.nextIteration.nullifierReadRequests.claimedLength : 0;
314
- const maxAmountToKeep = !this.nextIteration || forceReset ? 0 : MAX_NULLIFIER_READ_REQUESTS_PER_TX;
307
+ const maxAmountToKeep = !this.nextIteration || forceResetAll ? 0 : MAX_NULLIFIER_READ_REQUESTS_PER_TX;
315
308
  if (numCurr + numNext <= maxAmountToKeep) {
316
309
  return false;
317
310
  }
318
311
 
312
+ const futureNullifiers = collectNested(this.executionStack, executionResult => {
313
+ return executionResult.publicInputs.nullifiers
314
+ .getActiveItems()
315
+ .map(nullifier => new ScopedNullifier(nullifier, executionResult.publicInputs.callContext.contractAddress));
316
+ });
317
+
319
318
  const resetActions = getNullifierReadRequestResetActions(
320
319
  this.previousKernel.validationRequests.nullifierReadRequests,
321
320
  this.previousKernel.end.nullifiers,
321
+ futureNullifiers,
322
322
  );
323
323
 
324
324
  const numPendingReads = resetActions.pendingReadHints.length;
@@ -327,61 +327,36 @@ export class PrivateKernelResetPrivateInputsBuilder {
327
327
  0,
328
328
  );
329
329
 
330
- const totalReadsToReset = numPendingReads + numSettledReads;
331
- const minResetNeeded = numCurr + numNext - maxAmountToKeep;
332
- if (totalReadsToReset < minResetNeeded) {
333
- if (!this.nextIteration) {
334
- // In the final reset, all nullifiers have been emitted. So if we can't reset all requests, at least one pending
335
- // read request doesn't match any of them.
336
- throw new Error('No matching nullifier found for nullifier read request.');
337
- } else if (!forceReset) {
338
- // A pending read request can only be reset if its nullifier has already been included (e.g. a parent call might
339
- // be reading a nullifier emitted by a child call. The read request of the parent call is included before the
340
- // nullifier of the child call).
341
- // If we can't clear enough read requests to make room for the next iteration's reads, we're stuck.
342
- throw new Error('Number of nullifier read requests exceeds the limit.');
343
- } else if (totalReadsToReset == 0) {
344
- // It's transient data squashing asking for the read requests to be reset first (forceReset == true), and
345
- // there's nothing to reset, returns false and let needsResetTransientData throw a more descriptive error.
346
- return false;
347
- }
348
- // Otherwise, forceReset is true, we should proceed to reset as many as we can.
349
- }
350
-
351
330
  if (!this.nextIteration) {
352
- // If there's no next iteration, we need to reset all the read requests.
353
331
  this.nullifierResetActions = resetActions;
354
332
  this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads;
355
333
  this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads;
356
334
  } else {
357
- // If there's a next iteration, we can only reset one dimension at a time.
335
+ // Pick only one dimension to reset if next iteration is not empty.
358
336
  if (numPendingReads > numSettledReads) {
359
- // Reset the pending read requests.
360
- const pendingOnlyActions = assertLength(
337
+ this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads;
338
+ this.nullifierResetActions.actions = assertLength(
361
339
  resetActions.actions.map(action =>
362
340
  action === ReadRequestActionEnum.READ_AS_PENDING ? action : ReadRequestActionEnum.SKIP,
363
341
  ),
364
342
  MAX_NULLIFIER_READ_REQUESTS_PER_TX,
365
343
  );
366
- this.nullifierResetActions = new ReadRequestResetActions(pendingOnlyActions, resetActions.pendingReadHints);
367
- this.requestedDimensions.NULLIFIER_PENDING_READ = numPendingReads;
344
+ this.nullifierResetActions.pendingReadHints = resetActions.pendingReadHints;
368
345
  } else {
369
- // Reset the settled read requests.
370
- const settledOnlyActions = assertLength(
346
+ this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads;
347
+ this.nullifierResetActions.actions = assertLength(
371
348
  resetActions.actions.map(action =>
372
349
  action === ReadRequestActionEnum.READ_AS_SETTLED ? action : ReadRequestActionEnum.SKIP,
373
350
  ),
374
351
  MAX_NULLIFIER_READ_REQUESTS_PER_TX,
375
352
  );
376
- this.nullifierResetActions = new ReadRequestResetActions(settledOnlyActions, []);
377
- this.requestedDimensions.NULLIFIER_SETTLED_READ = numSettledReads;
378
353
  }
379
354
  }
380
355
 
381
356
  return true;
382
357
  }
383
358
 
384
- private needsResetKeyValidationRequests() {
359
+ private needsResetNullifierKeys() {
385
360
  const numCurr = this.previousKernel.validationRequests.scopedKeyValidationRequestsAndSeparators.claimedLength;
386
361
  const numNext = this.nextIteration ? this.nextIteration.keyValidationRequestsAndSeparators.claimedLength : 0;
387
362
  const maxAmountToKeep = !this.nextIteration ? 0 : MAX_KEY_VALIDATION_REQUESTS_PER_TX;
@@ -395,6 +370,9 @@ export class PrivateKernelResetPrivateInputsBuilder {
395
370
  }
396
371
 
397
372
  private needsResetTransientData() {
373
+ // Initialize this to 0 so that needsSilo can be run.
374
+ this.numTransientData = 0;
375
+
398
376
  const nextAccumNoteHashes =
399
377
  this.previousKernel.end.noteHashes.claimedLength + (this.nextIteration?.noteHashes.claimedLength ?? 0);
400
378
  const noteHashWillOverflow = nextAccumNoteHashes > MAX_NOTE_HASHES_PER_TX;
@@ -409,19 +387,19 @@ export class PrivateKernelResetPrivateInputsBuilder {
409
387
  return false;
410
388
  }
411
389
 
412
- const futureNoteHashReads = collectNested(this.executionStack, executionResult =>
413
- executionResult.publicInputs.noteHashReadRequests.getActiveItems(),
414
- );
415
- const futureNullifierReads = collectNested(this.executionStack, executionResult =>
416
- executionResult.publicInputs.nullifierReadRequests.getActiveItems(),
390
+ const futureNoteHashReads = collectNestedReadRequests(
391
+ this.executionStack,
392
+ executionResult => executionResult.publicInputs.noteHashReadRequests,
417
393
  );
418
- const futureLogs = collectNested(this.executionStack, executionResult =>
419
- executionResult.publicInputs.privateLogs.getActiveItems(),
394
+ const futureNullifierReads = collectNestedReadRequests(
395
+ this.executionStack,
396
+ executionResult => executionResult.publicInputs.nullifierReadRequests,
420
397
  );
398
+ // TODO(#15902): Collect future logs and only allow squashing a note hash when all its logs have been emitted
399
+ // (i.e. none of the future logs are linked to the to-be-squashed note hashes).
421
400
  if (this.nextIteration) {
422
- // If it's not the final reset, only one dimension will be reset at a time. Since we are resetting the transient
423
- // data, the note hash and nullifier read requests in the previous kernel won't be squashed and need to be
424
- // included in the future read requests.
401
+ // If it's not the final reset, only one dimension will be reset at a time.
402
+ // The note hashes and nullifiers for the remaining read requests can't be squashed.
425
403
  futureNoteHashReads.push(...this.previousKernel.validationRequests.noteHashReadRequests.getActiveItems());
426
404
  futureNullifierReads.push(...this.previousKernel.validationRequests.nullifierReadRequests.getActiveItems());
427
405
  }
@@ -431,50 +409,27 @@ export class PrivateKernelResetPrivateInputsBuilder {
431
409
  this.previousKernel.end.nullifiers,
432
410
  futureNoteHashReads,
433
411
  futureNullifierReads,
434
- futureLogs,
435
412
  this.noteHashNullifierCounterMap,
436
413
  this.splitCounter,
437
414
  );
438
415
 
439
- if (this.nextIteration) {
440
- const noteHashOverflowBy = noteHashWillOverflow
441
- ? nextAccumNoteHashes - MAX_NOTE_HASHES_PER_TX - numTransientData
442
- : 0;
443
- const nullifierOverflowBy = nullifierWillOverflow
444
- ? nextAccumNullifiers - MAX_NULLIFIERS_PER_TX - numTransientData
445
- : 0;
446
- const numSquashedLogs = logsWillOverflow
447
- ? countSquashedLogs(
448
- this.previousKernel.end.noteHashes,
449
- this.previousKernel.end.privateLogs,
450
- transientDataSquashingHints.slice(0, numTransientData),
451
- )
452
- : 0;
453
- const logsOverflowBy = logsWillOverflow ? nextAccumLogs - MAX_PRIVATE_LOGS_PER_TX - numSquashedLogs : 0;
454
-
455
- if (noteHashOverflowBy > 0 || nullifierOverflowBy > 0 || logsOverflowBy > 0) {
456
- // There's not enough transient data to squash to clear space for the overflow. It may be because some data is
457
- // still required for read requests. Force a reset of the read requests first, and return to transient data
458
- // squashing in the next round of reset.
459
- // Note that clearing the read requests might not be enough to clear more space for the overflow. In this case,
460
- // running the next reset will fail at the following check.
461
- // Only one dimension can be reset at a time for an inner reset, so we try the note hash read requests first
462
- // (which also helps with log overflow), then fall back to nullifier read requests.
463
- const forceReset = true;
464
- if ((noteHashOverflowBy > 0 || logsOverflowBy > 0) && this.needsResetNoteHashReadRequests(forceReset)) {
465
- return true;
466
- }
467
- if (nullifierOverflowBy > 0 && this.needsResetNullifierReadRequests(forceReset)) {
468
- return true;
469
- }
470
- if (noteHashWillOverflow) {
471
- throw new Error('Number of note hashes exceeds the limit.');
472
- }
473
- if (nullifierWillOverflow) {
474
- throw new Error('Number of nullifiers exceeds the limit.');
475
- }
476
- throw new Error('Number of private logs exceeds the limit.');
416
+ if (this.nextIteration && !numTransientData) {
417
+ const forceResetAll = true;
418
+ const canClearReadRequests =
419
+ (noteHashWillOverflow && this.needsResetNoteHashReadRequests(forceResetAll)) ||
420
+ (nullifierWillOverflow && this.needsResetNullifierReadRequests(forceResetAll)) ||
421
+ (logsWillOverflow && this.needsResetNoteHashReadRequests(forceResetAll));
422
+ if (!canClearReadRequests) {
423
+ const overflownData = noteHashWillOverflow
424
+ ? 'note hashes'
425
+ : nullifierWillOverflow
426
+ ? 'nullifiers'
427
+ : 'private logs';
428
+ throw new Error(`Number of ${overflownData} exceeds the limit.`);
477
429
  }
430
+ // Clearing the read requests might not be enough to squash the overflown data.
431
+ // In this case, the next iteration will fail at the above check.
432
+ return true;
478
433
  }
479
434
 
480
435
  this.numTransientData = numTransientData;
@@ -489,13 +444,10 @@ export class PrivateKernelResetPrivateInputsBuilder {
489
444
  throw new Error('`needsResetTransientData` must be run before `needsSiloNoteHashes`.');
490
445
  }
491
446
 
492
- const noteHashes = this.previousKernel.end.noteHashes;
493
- if (noteHashes.claimedLength > 0 && noteHashes.array[0].contractAddress.isZero()) {
494
- // Already siloed.
495
- return false;
496
- }
497
-
498
- const numToSilo = noteHashes.claimedLength - this.numTransientData;
447
+ const numNoteHashes = this.previousKernel.end.noteHashes
448
+ .getActiveItems()
449
+ .filter(n => !n.contractAddress.isZero()).length;
450
+ const numToSilo = Math.max(0, numNoteHashes - this.numTransientData);
499
451
  this.requestedDimensions.NOTE_HASH_SILOING = numToSilo;
500
452
 
501
453
  return numToSilo > 0;
@@ -506,14 +458,15 @@ export class PrivateKernelResetPrivateInputsBuilder {
506
458
  throw new Error('`needsResetTransientData` must be run before `needsSiloNullifiers`.');
507
459
  }
508
460
 
509
- const nullifiers = this.previousKernel.end.nullifiers;
510
- if (nullifiers.claimedLength > 0 && nullifiers.array[0].contractAddress.isZero()) {
511
- // Already siloed.
512
- return false;
513
- }
514
-
515
- const numToSilo = nullifiers.claimedLength - this.numTransientData;
516
- this.requestedDimensions.NULLIFIER_SILOING = numToSilo;
461
+ const numNullifiers = this.previousKernel.end.nullifiers
462
+ .getActiveItems()
463
+ .filter(n => !n.contractAddress.isZero()).length;
464
+ const numToSilo = Math.max(0, numNullifiers - this.numTransientData);
465
+ // Include the first nullifier if there's something to silo.
466
+ // The reset circuit checks that capped_size must be greater than or equal to all non-empty nullifiers.
467
+ // Which includes the first nullifier, even though its contract address is always zero and doesn't need siloing.
468
+ const cappedSize = numToSilo ? numToSilo + 1 : 0;
469
+ this.requestedDimensions.NULLIFIER_SILOING = cappedSize;
517
470
 
518
471
  return numToSilo > 0;
519
472
  }
@@ -524,17 +477,17 @@ export class PrivateKernelResetPrivateInputsBuilder {
524
477
  }
525
478
 
526
479
  const privateLogs = this.previousKernel.end.privateLogs;
527
- if (privateLogs.claimedLength > 0 && privateLogs.array[0].contractAddress.isZero()) {
528
- // Already siloed.
529
- return false;
530
- }
480
+ const numLogs = privateLogs.getActiveItems().filter(l => !l.contractAddress.isZero()).length;
531
481
 
532
- const numSquashedLogs = countSquashedLogs(
533
- this.previousKernel.end.noteHashes,
534
- privateLogs,
535
- this.transientDataSquashingHints.slice(0, this.numTransientData),
536
- );
537
- const numToSilo = privateLogs.claimedLength - numSquashedLogs;
482
+ const noteHashes = this.previousKernel.end.noteHashes;
483
+ const squashedNoteHashCounters = this.transientDataSquashingHints
484
+ .filter(h => h.noteHashIndex < noteHashes.claimedLength)
485
+ .map(h => noteHashes.array[h.noteHashIndex].counter);
486
+ const numSquashedLogs = privateLogs
487
+ .getActiveItems()
488
+ .filter(l => squashedNoteHashCounters.includes(l.inner.noteHashCounter)).length;
489
+
490
+ const numToSilo = numLogs - numSquashedLogs;
538
491
  this.requestedDimensions.PRIVATE_LOG_SILOING = numToSilo;
539
492
 
540
493
  return numToSilo > 0;
@@ -116,7 +116,6 @@ export class PrivateKernelExecutionProver {
116
116
  splitCounter,
117
117
  );
118
118
  while (resetBuilder.needsReset()) {
119
- // Inner reset: without siloing.
120
119
  const witgenTimer = new Timer();
121
120
  const privateInputs = await resetBuilder.build(this.oracle);
122
121
  output = generateWitnesses
@@ -217,24 +216,16 @@ export class PrivateKernelExecutionProver {
217
216
  firstIteration = false;
218
217
  }
219
218
 
220
- // Final reset: include siloing of note hashes, nullifiers and private logs.
221
- const finalResetBuilder = new PrivateKernelResetPrivateInputsBuilder(
219
+ // Reset.
220
+ let resetBuilder = new PrivateKernelResetPrivateInputsBuilder(
222
221
  output,
223
222
  [],
224
223
  noteHashNullifierCounterMap,
225
224
  splitCounter,
226
225
  );
227
- if (!finalResetBuilder.needsReset()) {
228
- // The final reset must be performed exactly once, because each tx has at least one nullifier that requires
229
- // siloing, and siloing cannot be done multiple times.
230
- // While, in theory, it might be possible to silo note hashes first and then run another reset to silo nullifiers
231
- // and/or private logs, we currently don't have standalone dimensions for the arrays that require siloing. As a
232
- // result, all necessary siloing must be done together in a single reset.
233
- // Refer to the possible combinations of dimensions in private_kernel_reset_config.json.
234
- throw new Error('Nothing to reset for the final reset.');
235
- } else {
226
+ while (resetBuilder.needsReset()) {
236
227
  const witgenTimer = new Timer();
237
- const privateInputs = await finalResetBuilder.build(this.oracle);
228
+ const privateInputs = await resetBuilder.build(this.oracle);
238
229
  output = generateWitnesses
239
230
  ? await this.proofCreator.generateResetOutput(privateInputs)
240
231
  : await this.proofCreator.simulateReset(privateInputs);
@@ -248,6 +239,8 @@ export class PrivateKernelExecutionProver {
248
239
  witgen: witgenTimer.ms(),
249
240
  },
250
241
  });
242
+
243
+ resetBuilder = new PrivateKernelResetPrivateInputsBuilder(output, [], noteHashNullifierCounterMap, splitCounter);
251
244
  }
252
245
 
253
246
  if (output.publicInputs.feePayer.isZero() && skipFeeEnforcement) {
@@ -1,5 +1,5 @@
1
1
  import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store';
2
- import type { DirectionalAppTaggingSecret } from '@aztec/stdlib/logs';
2
+ import type { ExtendedDirectionalAppTaggingSecret } from '@aztec/stdlib/logs';
3
3
 
4
4
  import type { StagedStore } from '../../job_coordinator/job_coordinator.js';
5
5
 
@@ -106,11 +106,11 @@ export class RecipientTaggingStore implements StagedStore {
106
106
  return Promise.resolve();
107
107
  }
108
108
 
109
- getHighestAgedIndex(secret: DirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
109
+ getHighestAgedIndex(secret: ExtendedDirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
110
110
  return this.#store.transactionAsync(() => this.#readHighestAgedIndex(jobId, secret.toString()));
111
111
  }
112
112
 
113
- updateHighestAgedIndex(secret: DirectionalAppTaggingSecret, index: number, jobId: string): Promise<void> {
113
+ updateHighestAgedIndex(secret: ExtendedDirectionalAppTaggingSecret, index: number, jobId: string): Promise<void> {
114
114
  return this.#store.transactionAsync(async () => {
115
115
  const currentIndex = await this.#readHighestAgedIndex(jobId, secret.toString());
116
116
  if (currentIndex !== undefined && index <= currentIndex) {
@@ -121,11 +121,15 @@ export class RecipientTaggingStore implements StagedStore {
121
121
  });
122
122
  }
123
123
 
124
- getHighestFinalizedIndex(secret: DirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
124
+ getHighestFinalizedIndex(secret: ExtendedDirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
125
125
  return this.#store.transactionAsync(() => this.#readHighestFinalizedIndex(jobId, secret.toString()));
126
126
  }
127
127
 
128
- updateHighestFinalizedIndex(secret: DirectionalAppTaggingSecret, index: number, jobId: string): Promise<void> {
128
+ updateHighestFinalizedIndex(
129
+ secret: ExtendedDirectionalAppTaggingSecret,
130
+ index: number,
131
+ jobId: string,
132
+ ): Promise<void> {
129
133
  return this.#store.transactionAsync(async () => {
130
134
  const currentIndex = await this.#readHighestFinalizedIndex(jobId, secret.toString());
131
135
  if (currentIndex !== undefined && index < currentIndex) {
@@ -1,5 +1,5 @@
1
1
  import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store';
2
- import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs';
2
+ import type { ExtendedDirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs';
3
3
  import { TxHash } from '@aztec/stdlib/tx';
4
4
 
5
5
  import type { StagedStore } from '../../job_coordinator/job_coordinator.js';
@@ -154,7 +154,7 @@ export class SenderTaggingStore implements StagedStore {
154
154
 
155
155
  // The secrets in pre-tags should be unique because we always store just the highest index per given secret-txHash
156
156
  // pair. Below we check that this is the case.
157
- const secretsSet = new Set(preTags.map(preTag => preTag.secret.toString()));
157
+ const secretsSet = new Set(preTags.map(preTag => preTag.extendedSecret.toString()));
158
158
  if (secretsSet.size !== preTags.length) {
159
159
  return Promise.reject(new Error(`Duplicate secrets found when storing pending indexes`));
160
160
  }
@@ -163,10 +163,10 @@ export class SenderTaggingStore implements StagedStore {
163
163
 
164
164
  return this.#store.transactionAsync(async () => {
165
165
  // Prefetch all data, start reads during iteration to keep IndexedDB transaction alive
166
- const preTagReadPromises = preTags.map(({ secret, index }) => {
167
- const secretStr = secret.toString();
166
+ const preTagReadPromises = preTags.map(({ extendedSecret, index }) => {
167
+ const secretStr = extendedSecret.toString();
168
168
  return {
169
- secret,
169
+ extendedSecret,
170
170
  secretStr,
171
171
  index,
172
172
  pending: this.#readPendingIndexes(jobId, secretStr),
@@ -233,7 +233,7 @@ export class SenderTaggingStore implements StagedStore {
233
233
  * [startIndex, endIndex). Returns an empty array if no pending indexes exist in the range.
234
234
  */
235
235
  getTxHashesOfPendingIndexes(
236
- secret: DirectionalAppTaggingSecret,
236
+ secret: ExtendedDirectionalAppTaggingSecret,
237
237
  startIndex: number,
238
238
  endIndex: number,
239
239
  jobId: string,
@@ -252,7 +252,7 @@ export class SenderTaggingStore implements StagedStore {
252
252
  * @param secret - The secret to get the last finalized index for.
253
253
  * @returns The last (highest) finalized index for the given secret.
254
254
  */
255
- getLastFinalizedIndex(secret: DirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
255
+ getLastFinalizedIndex(secret: ExtendedDirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
256
256
  return this.#store.transactionAsync(() => this.#readLastFinalizedIndex(jobId, secret.toString()));
257
257
  }
258
258
 
@@ -262,7 +262,7 @@ export class SenderTaggingStore implements StagedStore {
262
262
  * @param secret - The directional app tagging secret to query the last used index for.
263
263
  * @returns The last used index.
264
264
  */
265
- getLastUsedIndex(secret: DirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
265
+ getLastUsedIndex(secret: ExtendedDirectionalAppTaggingSecret, jobId: string): Promise<number | undefined> {
266
266
  const secretStr = secret.toString();
267
267
 
268
268
  return this.#store.transactionAsync(async () => {
@@ -15,5 +15,5 @@ export { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from './constants.js';
15
15
  export { getAllPrivateLogsByTags, getAllPublicLogsByTagsFromContract } from './get_all_logs_by_tags.js';
16
16
 
17
17
  // Re-export tagging-related types from stdlib
18
- export { DirectionalAppTaggingSecret, Tag, SiloedTag } from '@aztec/stdlib/logs';
18
+ export { ExtendedDirectionalAppTaggingSecret, Tag, SiloedTag } from '@aztec/stdlib/logs';
19
19
  export { type PreTag } from '@aztec/stdlib/logs';
@@ -1,8 +1,7 @@
1
1
  import type { BlockNumber } from '@aztec/foundation/branded-types';
2
- import type { AztecAddress } from '@aztec/stdlib/aztec-address';
3
2
  import type { BlockHash } from '@aztec/stdlib/block';
4
3
  import type { AztecNode } from '@aztec/stdlib/interfaces/client';
5
- import type { DirectionalAppTaggingSecret, TxScopedL2Log } from '@aztec/stdlib/logs';
4
+ import type { ExtendedDirectionalAppTaggingSecret, TxScopedL2Log } from '@aztec/stdlib/logs';
6
5
 
7
6
  import type { RecipientTaggingStore } from '../../storage/tagging_store/recipient_tagging_store.js';
8
7
  import { UNFINALIZED_TAGGING_INDEXES_WINDOW_LEN } from '../constants.js';
@@ -10,15 +9,14 @@ import { findHighestIndexes } from './utils/find_highest_indexes.js';
10
9
  import { loadLogsForRange } from './utils/load_logs_for_range.js';
11
10
 
12
11
  /**
13
- * Loads private logs for `app` and sender-recipient pair defined by `secret` and updates the highest aged and
12
+ * Loads private logs for the app-sender-recipient triplet defined by `secret` and updates the highest aged and
14
13
  * finalized indexes in the db. At most load logs from blocks up to and including `anchorBlockNumber`.
15
14
  *
16
15
  * @dev This function can be safely executed "in parallel" for other sender-recipient pairs because the data in
17
16
  * in the tagging data provider is indexed by the secret and hence completely disjoint.
18
17
  */
19
18
  export async function loadPrivateLogsForSenderRecipientPair(
20
- secret: DirectionalAppTaggingSecret,
21
- app: AztecAddress,
19
+ secret: ExtendedDirectionalAppTaggingSecret,
22
20
  aztecNode: AztecNode,
23
21
  taggingStore: RecipientTaggingStore,
24
22
  anchorBlockNumber: BlockNumber,
@@ -96,7 +94,6 @@ export async function loadPrivateLogsForSenderRecipientPair(
96
94
  // Get private logs with their block timestamps and corresponding tagging indexes
97
95
  const privateLogsWithIndexes = await loadLogsForRange(
98
96
  secret,
99
- app,
100
97
  aztecNode,
101
98
  start,
102
99
  end,