@aztec/pxe 3.0.0-nightly.20251222 → 3.0.0-nightly.20251224

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. package/dest/contract_function_simulator/contract_function_simulator.d.ts +31 -6
  2. package/dest/contract_function_simulator/contract_function_simulator.d.ts.map +1 -1
  3. package/dest/contract_function_simulator/contract_function_simulator.js +35 -11
  4. package/dest/contract_function_simulator/noir-structs/log_retrieval_request.d.ts +4 -3
  5. package/dest/contract_function_simulator/noir-structs/log_retrieval_request.d.ts.map +1 -1
  6. package/dest/contract_function_simulator/noir-structs/log_retrieval_request.js +7 -6
  7. package/dest/contract_function_simulator/oracle/interfaces.d.ts +2 -3
  8. package/dest/contract_function_simulator/oracle/interfaces.d.ts.map +1 -1
  9. package/dest/contract_function_simulator/oracle/private_execution.d.ts +6 -8
  10. package/dest/contract_function_simulator/oracle/private_execution.d.ts.map +1 -1
  11. package/dest/contract_function_simulator/oracle/private_execution.js +10 -9
  12. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts +14 -5
  13. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts.map +1 -1
  14. package/dest/contract_function_simulator/oracle/private_execution_oracle.js +19 -14
  15. package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts +44 -6
  16. package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts.map +1 -1
  17. package/dest/contract_function_simulator/oracle/utility_execution_oracle.js +135 -30
  18. package/dest/contract_function_simulator/proxied_contract_data_source.d.ts +2 -2
  19. package/dest/contract_function_simulator/proxied_contract_data_source.d.ts.map +1 -1
  20. package/dest/contract_function_simulator/proxied_contract_data_source.js +18 -0
  21. package/dest/debug/pxe_debug_utils.d.ts +3 -2
  22. package/dest/debug/pxe_debug_utils.d.ts.map +1 -1
  23. package/dest/entrypoints/client/bundle/index.d.ts +1 -2
  24. package/dest/entrypoints/client/bundle/index.d.ts.map +1 -1
  25. package/dest/entrypoints/client/bundle/index.js +0 -1
  26. package/dest/entrypoints/client/lazy/index.d.ts +1 -2
  27. package/dest/entrypoints/client/lazy/index.d.ts.map +1 -1
  28. package/dest/entrypoints/client/lazy/index.js +0 -1
  29. package/dest/entrypoints/server/index.d.ts +2 -2
  30. package/dest/entrypoints/server/index.d.ts.map +1 -1
  31. package/dest/entrypoints/server/index.js +1 -1
  32. package/dest/events/event_service.d.ts +15 -0
  33. package/dest/events/event_service.d.ts.map +1 -0
  34. package/dest/events/event_service.js +47 -0
  35. package/dest/events/private_event_filter_validator.d.ts +3 -2
  36. package/dest/events/private_event_filter_validator.d.ts.map +1 -1
  37. package/dest/logs/log_service.d.ts +43 -0
  38. package/dest/logs/log_service.d.ts.map +1 -0
  39. package/dest/logs/log_service.js +239 -0
  40. package/dest/notes/index.d.ts +2 -0
  41. package/dest/notes/index.d.ts.map +1 -0
  42. package/dest/notes/index.js +1 -0
  43. package/dest/notes/note_service.d.ts +48 -0
  44. package/dest/notes/note_service.d.ts.map +1 -0
  45. package/dest/notes/note_service.js +152 -0
  46. package/dest/private_kernel/private_kernel_oracle_impl.d.ts +2 -2
  47. package/dest/private_kernel/private_kernel_oracle_impl.d.ts.map +1 -1
  48. package/dest/public_storage/public_storage_service.d.ts +24 -0
  49. package/dest/public_storage/public_storage_service.d.ts.map +1 -0
  50. package/dest/public_storage/public_storage_service.js +26 -0
  51. package/dest/pxe.d.ts +1 -1
  52. package/dest/pxe.d.ts.map +1 -1
  53. package/dest/pxe.js +3 -5
  54. package/dest/storage/capsule_data_provider/capsule_data_provider.d.ts +33 -1
  55. package/dest/storage/capsule_data_provider/capsule_data_provider.d.ts.map +1 -1
  56. package/dest/storage/capsule_data_provider/capsule_data_provider.js +32 -4
  57. package/dest/storage/contract_data_provider/contract_data_provider.d.ts +2 -1
  58. package/dest/storage/contract_data_provider/contract_data_provider.d.ts.map +1 -1
  59. package/dest/storage/contract_data_provider/contract_data_provider.js +11 -0
  60. package/dest/storage/tagging_data_provider/sender_tagging_data_provider.js +3 -3
  61. package/dest/tagging/index.d.ts +2 -4
  62. package/dest/tagging/index.d.ts.map +1 -1
  63. package/dest/tagging/index.js +1 -3
  64. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts +14 -0
  65. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts.map +1 -0
  66. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.js +99 -0
  67. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.d.ts +21 -0
  68. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.d.ts.map +1 -0
  69. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.js +42 -0
  70. package/dest/tagging/recipient_sync/utils/find_highest_indexes.d.ts +12 -0
  71. package/dest/tagging/recipient_sync/utils/find_highest_indexes.d.ts.map +1 -0
  72. package/dest/tagging/recipient_sync/utils/find_highest_indexes.js +20 -0
  73. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts +14 -0
  74. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts.map +1 -0
  75. package/dest/tagging/recipient_sync/utils/load_logs_for_range.js +29 -0
  76. package/dest/tagging/sync/sync_sender_tagging_indexes.d.ts +2 -2
  77. package/dest/tagging/sync/sync_sender_tagging_indexes.d.ts.map +1 -1
  78. package/dest/tagging/sync/sync_sender_tagging_indexes.js +3 -3
  79. package/dest/tagging/sync/utils/load_and_store_new_tagging_indexes.d.ts +1 -1
  80. package/dest/tagging/sync/utils/load_and_store_new_tagging_indexes.d.ts.map +1 -1
  81. package/dest/tagging/sync/utils/load_and_store_new_tagging_indexes.js +3 -5
  82. package/dest/tree_membership/tree_membership_service.d.ts +52 -0
  83. package/dest/tree_membership/tree_membership_service.d.ts.map +1 -0
  84. package/dest/tree_membership/tree_membership_service.js +84 -0
  85. package/package.json +16 -16
  86. package/src/contract_function_simulator/contract_function_simulator.ts +59 -10
  87. package/src/contract_function_simulator/noir-structs/log_retrieval_request.ts +5 -4
  88. package/src/contract_function_simulator/oracle/interfaces.ts +1 -2
  89. package/src/contract_function_simulator/oracle/private_execution.ts +13 -10
  90. package/src/contract_function_simulator/oracle/private_execution_oracle.ts +81 -21
  91. package/src/contract_function_simulator/oracle/utility_execution_oracle.ts +199 -38
  92. package/src/contract_function_simulator/proxied_contract_data_source.ts +18 -1
  93. package/src/debug/pxe_debug_utils.ts +2 -1
  94. package/src/entrypoints/client/bundle/index.ts +0 -1
  95. package/src/entrypoints/client/lazy/index.ts +0 -1
  96. package/src/entrypoints/server/index.ts +1 -1
  97. package/src/events/event_service.ts +77 -0
  98. package/src/events/private_event_filter_validator.ts +2 -1
  99. package/src/logs/log_service.ts +364 -0
  100. package/src/notes/index.ts +1 -0
  101. package/src/notes/note_service.ts +200 -0
  102. package/src/private_kernel/private_kernel_oracle_impl.ts +1 -1
  103. package/src/public_storage/public_storage_service.ts +33 -0
  104. package/src/pxe.ts +13 -11
  105. package/src/storage/capsule_data_provider/capsule_data_provider.ts +32 -0
  106. package/src/storage/contract_data_provider/contract_data_provider.ts +15 -0
  107. package/src/storage/tagging_data_provider/sender_tagging_data_provider.ts +3 -3
  108. package/src/tagging/index.ts +1 -3
  109. package/src/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.ts +129 -0
  110. package/src/tagging/recipient_sync/new_recipient_tagging_data_provider.ts +53 -0
  111. package/src/tagging/recipient_sync/utils/find_highest_indexes.ts +34 -0
  112. package/src/tagging/recipient_sync/utils/load_logs_for_range.ts +43 -0
  113. package/src/tagging/sync/sync_sender_tagging_indexes.ts +3 -3
  114. package/src/tagging/sync/utils/load_and_store_new_tagging_indexes.ts +3 -5
  115. package/src/tree_membership/tree_membership_service.ts +112 -0
  116. package/dest/contract_function_simulator/execution_data_provider.d.ts +0 -248
  117. package/dest/contract_function_simulator/execution_data_provider.d.ts.map +0 -1
  118. package/dest/contract_function_simulator/execution_data_provider.js +0 -14
  119. package/dest/contract_function_simulator/pxe_oracle_interface.d.ts +0 -113
  120. package/dest/contract_function_simulator/pxe_oracle_interface.d.ts.map +0 -1
  121. package/dest/contract_function_simulator/pxe_oracle_interface.js +0 -648
  122. package/dest/tagging/siloed_tag.d.ts +0 -14
  123. package/dest/tagging/siloed_tag.d.ts.map +0 -1
  124. package/dest/tagging/siloed_tag.js +0 -20
  125. package/dest/tagging/tag.d.ts +0 -12
  126. package/dest/tagging/tag.d.ts.map +0 -1
  127. package/dest/tagging/tag.js +0 -17
  128. package/src/contract_function_simulator/execution_data_provider.ts +0 -322
  129. package/src/contract_function_simulator/pxe_oracle_interface.ts +0 -967
  130. package/src/tagging/siloed_tag.ts +0 -22
  131. package/src/tagging/tag.ts +0 -16
@@ -1,967 +0,0 @@
1
- import type { L1_TO_L2_MSG_TREE_HEIGHT } from '@aztec/constants';
2
- import { Fr } from '@aztec/foundation/curves/bn254';
3
- import { Point } from '@aztec/foundation/curves/grumpkin';
4
- import { createLogger } from '@aztec/foundation/log';
5
- import type { KeyStore } from '@aztec/key-store';
6
- import { EventSelector, type FunctionArtifactWithContractName, FunctionSelector } from '@aztec/stdlib/abi';
7
- import { AztecAddress } from '@aztec/stdlib/aztec-address';
8
- import type { BlockParameter, DataInBlock, L2Block } from '@aztec/stdlib/block';
9
- import type { CompleteAddress, ContractInstance } from '@aztec/stdlib/contract';
10
- import { computeUniqueNoteHash, siloNoteHash, siloNullifier, siloPrivateLog } from '@aztec/stdlib/hash';
11
- import { type AztecNode, MAX_RPC_LEN } from '@aztec/stdlib/interfaces/client';
12
- import type { KeyValidationRequest } from '@aztec/stdlib/kernel';
13
- import { computeAddressSecret } from '@aztec/stdlib/keys';
14
- import {
15
- PendingTaggedLog,
16
- PrivateLogWithTxData,
17
- PublicLog,
18
- PublicLogWithTxData,
19
- TxScopedL2Log,
20
- deriveEcdhSharedSecret,
21
- } from '@aztec/stdlib/logs';
22
- import { getNonNullifiedL1ToL2MessageWitness } from '@aztec/stdlib/messaging';
23
- import { Note, type NoteStatus } from '@aztec/stdlib/note';
24
- import { NoteDao } from '@aztec/stdlib/note';
25
- import { MerkleTreeId, type NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees';
26
- import { TxHash } from '@aztec/stdlib/tx';
27
-
28
- import type { ExecutionDataProvider, ExecutionStats } from '../contract_function_simulator/execution_data_provider.js';
29
- import { MessageLoadOracleInputs } from '../contract_function_simulator/oracle/message_load_oracle_inputs.js';
30
- import { ORACLE_VERSION } from '../oracle_version.js';
31
- import type { AddressDataProvider } from '../storage/address_data_provider/address_data_provider.js';
32
- import type { AnchorBlockDataProvider } from '../storage/anchor_block_data_provider/anchor_block_data_provider.js';
33
- import type { CapsuleDataProvider } from '../storage/capsule_data_provider/capsule_data_provider.js';
34
- import type { ContractDataProvider } from '../storage/contract_data_provider/contract_data_provider.js';
35
- import type { NoteDataProvider } from '../storage/note_data_provider/note_data_provider.js';
36
- import type { PrivateEventDataProvider } from '../storage/private_event_data_provider/private_event_data_provider.js';
37
- import type { RecipientTaggingDataProvider } from '../storage/tagging_data_provider/recipient_tagging_data_provider.js';
38
- import type { SenderTaggingDataProvider } from '../storage/tagging_data_provider/sender_tagging_data_provider.js';
39
- import {
40
- DirectionalAppTaggingSecret,
41
- SiloedTag,
42
- Tag,
43
- WINDOW_HALF_SIZE,
44
- getInitialIndexesMap,
45
- getPreTagsForTheWindow,
46
- } from '../tagging/index.js';
47
- import { EventValidationRequest } from './noir-structs/event_validation_request.js';
48
- import { LogRetrievalRequest } from './noir-structs/log_retrieval_request.js';
49
- import { LogRetrievalResponse } from './noir-structs/log_retrieval_response.js';
50
- import { NoteValidationRequest } from './noir-structs/note_validation_request.js';
51
- import type { ProxiedNode } from './proxied_node.js';
52
-
53
- /**
54
- * A data layer that provides and stores information needed for simulating/proving a transaction.
55
- */
56
- export class PXEOracleInterface implements ExecutionDataProvider {
57
- // Note: The Aztec node and senderDataProvider are exposed publicly since PXEOracleInterface will be deprecated soon
58
- // (issue #17776). When refactoring tagging, it made sense to align with this future change by moving the sender
59
- // tagging index sync functionality elsewhere. This required exposing these two properties since there is currently
60
- // no alternative way to access them in the PrivateExecutionOracle.
61
- constructor(
62
- public readonly aztecNode: AztecNode | ProxiedNode,
63
- private keyStore: KeyStore,
64
- private contractDataProvider: ContractDataProvider,
65
- private noteDataProvider: NoteDataProvider,
66
- private capsuleDataProvider: CapsuleDataProvider,
67
- private anchorBlockDataProvider: AnchorBlockDataProvider,
68
- public readonly senderTaggingDataProvider: SenderTaggingDataProvider,
69
- private recipientTaggingDataProvider: RecipientTaggingDataProvider,
70
- private addressDataProvider: AddressDataProvider,
71
- private privateEventDataProvider: PrivateEventDataProvider,
72
- private log = createLogger('pxe:pxe_oracle_interface'),
73
- ) {}
74
-
75
- getKeyValidationRequest(pkMHash: Fr, contractAddress: AztecAddress): Promise<KeyValidationRequest> {
76
- return this.keyStore.getKeyValidationRequest(pkMHash, contractAddress);
77
- }
78
-
79
- async getCompleteAddress(account: AztecAddress): Promise<CompleteAddress> {
80
- const completeAddress = await this.addressDataProvider.getCompleteAddress(account);
81
- if (!completeAddress) {
82
- throw new Error(
83
- `No public key registered for address ${account}.
84
- Register it by calling pxe.addAccount(...).\nSee docs for context: https://docs.aztec.network/developers/resources/debugging/aztecnr-errors#simulation-error-no-public-key-registered-for-address-0x0-register-it-by-calling-pxeregisterrecipient-or-pxeregisteraccount`,
85
- );
86
- }
87
- return completeAddress;
88
- }
89
-
90
- async getContractInstance(address: AztecAddress): Promise<ContractInstance> {
91
- const instance = await this.contractDataProvider.getContractInstance(address);
92
- if (!instance) {
93
- throw new Error(`No contract instance found for address ${address.toString()}`);
94
- }
95
- return instance;
96
- }
97
-
98
- async getNotes(
99
- contractAddress: AztecAddress,
100
- owner: AztecAddress | undefined,
101
- storageSlot: Fr,
102
- status: NoteStatus,
103
- scopes?: AztecAddress[],
104
- ) {
105
- const noteDaos = await this.noteDataProvider.getNotes({
106
- contractAddress,
107
- owner,
108
- storageSlot,
109
- status,
110
- scopes,
111
- });
112
- return noteDaos.map(
113
- ({ contractAddress, owner, storageSlot, randomness, noteNonce, note, noteHash, siloedNullifier, index }) => ({
114
- contractAddress,
115
- owner,
116
- storageSlot,
117
- randomness,
118
- noteNonce,
119
- note,
120
- noteHash,
121
- siloedNullifier,
122
- // PXE can use this index to get full MembershipWitness
123
- index,
124
- }),
125
- );
126
- }
127
-
128
- async getFunctionArtifact(
129
- contractAddress: AztecAddress,
130
- selector: FunctionSelector,
131
- ): Promise<FunctionArtifactWithContractName> {
132
- const artifact = await this.contractDataProvider.getFunctionArtifact(contractAddress, selector);
133
- if (!artifact) {
134
- throw new Error(`Function artifact not found for contract ${contractAddress} and selector ${selector}.`);
135
- }
136
- const debug = await this.contractDataProvider.getFunctionDebugMetadata(contractAddress, selector);
137
- return {
138
- ...artifact,
139
- debug,
140
- };
141
- }
142
-
143
- /**
144
- * Fetches a message from the db, given its key.
145
- * @param contractAddress - Address of a contract by which the message was emitted.
146
- * @param messageHash - Hash of the message.
147
- * @param secret - Secret used to compute a nullifier.
148
- * @dev Contract address and secret are only used to compute the nullifier to get non-nullified messages
149
- * @returns The l1 to l2 membership witness (index of message in the tree and sibling path).
150
- */
151
- async getL1ToL2MembershipWitness(
152
- contractAddress: AztecAddress,
153
- messageHash: Fr,
154
- secret: Fr,
155
- ): Promise<MessageLoadOracleInputs<typeof L1_TO_L2_MSG_TREE_HEIGHT>> {
156
- const [messageIndex, siblingPath] = await getNonNullifiedL1ToL2MessageWitness(
157
- this.aztecNode,
158
- contractAddress,
159
- messageHash,
160
- secret,
161
- );
162
-
163
- // Assuming messageIndex is what you intended to use for the index in MessageLoadOracleInputs
164
- return new MessageLoadOracleInputs(messageIndex, siblingPath);
165
- }
166
-
167
- async getNullifierIndex(nullifier: Fr) {
168
- return await this.#findLeafIndex('latest', MerkleTreeId.NULLIFIER_TREE, nullifier);
169
- }
170
-
171
- async #findLeafIndex(blockNumber: BlockParameter, treeId: MerkleTreeId, leafValue: Fr): Promise<bigint | undefined> {
172
- const [leafIndex] = await this.aztecNode.findLeavesIndexes(blockNumber, treeId, [leafValue]);
173
- return leafIndex?.data;
174
- }
175
-
176
- public async getMembershipWitness(blockNumber: BlockParameter, treeId: MerkleTreeId, leafValue: Fr): Promise<Fr[]> {
177
- const witness = await this.#tryGetMembershipWitness(blockNumber, treeId, leafValue);
178
- if (!witness) {
179
- throw new Error(`Leaf value ${leafValue} not found in tree ${MerkleTreeId[treeId]} at block ${blockNumber}`);
180
- }
181
- return witness;
182
- }
183
-
184
- async #tryGetMembershipWitness(
185
- blockNumber: BlockParameter,
186
- treeId: MerkleTreeId,
187
- value: Fr,
188
- ): Promise<Fr[] | undefined> {
189
- switch (treeId) {
190
- case MerkleTreeId.NULLIFIER_TREE:
191
- return (await this.aztecNode.getNullifierMembershipWitness(blockNumber, value))?.withoutPreimage().toFields();
192
- case MerkleTreeId.NOTE_HASH_TREE:
193
- return (await this.aztecNode.getNoteHashMembershipWitness(blockNumber, value))?.toFields();
194
- case MerkleTreeId.PUBLIC_DATA_TREE:
195
- return (await this.aztecNode.getPublicDataWitness(blockNumber, value))?.withoutPreimage().toFields();
196
- case MerkleTreeId.ARCHIVE:
197
- return (await this.aztecNode.getArchiveMembershipWitness(blockNumber, value))?.toFields();
198
- default:
199
- throw new Error('Not implemented');
200
- }
201
- }
202
-
203
- public async getNullifierMembershipWitnessAtLatestBlock(nullifier: Fr) {
204
- const blockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
205
- return this.getNullifierMembershipWitness(blockNumber, nullifier);
206
- }
207
-
208
- public getNullifierMembershipWitness(
209
- blockNumber: BlockParameter,
210
- nullifier: Fr,
211
- ): Promise<NullifierMembershipWitness | undefined> {
212
- return this.aztecNode.getNullifierMembershipWitness(blockNumber, nullifier);
213
- }
214
-
215
- public async getLowNullifierMembershipWitness(
216
- blockNumber: BlockParameter,
217
- nullifier: Fr,
218
- ): Promise<NullifierMembershipWitness | undefined> {
219
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
220
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
221
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
222
- }
223
- return this.aztecNode.getLowNullifierMembershipWitness(blockNumber, nullifier);
224
- }
225
-
226
- public async getBlock(blockNumber: BlockParameter): Promise<L2Block | undefined> {
227
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
228
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
229
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
230
- }
231
- return await this.aztecNode.getBlock(blockNumber);
232
- }
233
-
234
- public async getPublicDataWitness(blockNumber: BlockParameter, leafSlot: Fr): Promise<PublicDataWitness | undefined> {
235
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
236
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
237
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
238
- }
239
- return await this.aztecNode.getPublicDataWitness(blockNumber, leafSlot);
240
- }
241
-
242
- public async getPublicStorageAt(blockNumber: BlockParameter, contract: AztecAddress, slot: Fr): Promise<Fr> {
243
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
244
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
245
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
246
- }
247
- return await this.aztecNode.getPublicStorageAt(blockNumber, contract, slot);
248
- }
249
-
250
- public assertCompatibleOracleVersion(version: number): void {
251
- if (version !== ORACLE_VERSION) {
252
- throw new Error(`Incompatible oracle version. Expected version ${ORACLE_VERSION}, got ${version}.`);
253
- }
254
- }
255
-
256
- public getDebugFunctionName(contractAddress: AztecAddress, selector: FunctionSelector): Promise<string> {
257
- return this.contractDataProvider.getDebugFunctionName(contractAddress, selector);
258
- }
259
-
260
- /**
261
- * Returns the full contents of your address book.
262
- * This is used when calculating tags for incoming notes by deriving the shared secret, the contract-siloed tagging secret, and
263
- * finally the index specified tag. We will then query the node with this tag for each address in the address book.
264
- * @returns The full list of the users contact addresses.
265
- */
266
- public getSenders(): Promise<AztecAddress[]> {
267
- return this.recipientTaggingDataProvider.getSenderAddresses();
268
- }
269
-
270
- public async calculateDirectionalAppTaggingSecret(
271
- contractAddress: AztecAddress,
272
- sender: AztecAddress,
273
- recipient: AztecAddress,
274
- ) {
275
- const senderCompleteAddress = await this.getCompleteAddress(sender);
276
- const senderIvsk = await this.keyStore.getMasterIncomingViewingSecretKey(sender);
277
- return DirectionalAppTaggingSecret.compute(
278
- senderCompleteAddress,
279
- senderIvsk,
280
- recipient,
281
- contractAddress,
282
- recipient,
283
- );
284
- }
285
-
286
- /**
287
- * Returns the last used tagging indexes along with the directional app tagging secrets for a given recipient and all
288
- * the senders in the address book.
289
- * This method should be exposed as an oracle call to allow aztec.nr to perform the orchestration
290
- * of the syncTaggedLogs and processTaggedLogs methods. However, it is not possible to do so at the moment,
291
- * so we're keeping it private for now.
292
- * @param contractAddress - The contract address to silo the secret for
293
- * @param recipient - The address receiving the notes
294
- * @returns A list of directional app tagging secrets along with the last used tagging indexes. If the corresponding
295
- * secret was never used, the index is undefined.
296
- * TODO(#17775): The naming here is broken as the function name does not reflect the return type. Make sure this gets
297
- * fixed when implementing the linked issue.
298
- */
299
- async #getLastUsedTaggingIndexesForSenders(
300
- contractAddress: AztecAddress,
301
- recipient: AztecAddress,
302
- ): Promise<{ secret: DirectionalAppTaggingSecret; index: number | undefined }[]> {
303
- const recipientCompleteAddress = await this.getCompleteAddress(recipient);
304
- const recipientIvsk = await this.keyStore.getMasterIncomingViewingSecretKey(recipient);
305
-
306
- // We implicitly add all PXE accounts as senders, this helps us decrypt tags on notes that we send to ourselves
307
- // (recipient = us, sender = us)
308
- const senders = [
309
- ...(await this.recipientTaggingDataProvider.getSenderAddresses()),
310
- ...(await this.keyStore.getAccounts()),
311
- ].filter((address, index, self) => index === self.findIndex(otherAddress => otherAddress.equals(address)));
312
- const secrets = await Promise.all(
313
- senders.map(contact => {
314
- return DirectionalAppTaggingSecret.compute(
315
- recipientCompleteAddress,
316
- recipientIvsk,
317
- contact,
318
- contractAddress,
319
- recipient,
320
- );
321
- }),
322
- );
323
- const indexes = await this.recipientTaggingDataProvider.getLastUsedIndexes(secrets);
324
- if (indexes.length !== secrets.length) {
325
- throw new Error('Indexes and directional app tagging secrets have different lengths');
326
- }
327
-
328
- return secrets.map((secret, i) => ({
329
- secret,
330
- index: indexes[i],
331
- }));
332
- }
333
-
334
- // TODO(#17775): Replace this implementation of this function with one implementing an approach similar
335
- // to syncSenderTaggingIndexes. Not done yet due to re-prioritization to devex and this doesn't directly affect
336
- // devex.
337
- public async syncTaggedLogs(
338
- contractAddress: AztecAddress,
339
- pendingTaggedLogArrayBaseSlot: Fr,
340
- scopes?: AztecAddress[],
341
- ) {
342
- this.log.verbose('Searching for tagged logs', { contract: contractAddress });
343
-
344
- const maxBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
345
-
346
- // Ideally this algorithm would be implemented in noir, exposing its building blocks as oracles.
347
- // However it is impossible at the moment due to the language not supporting nested slices.
348
- // This nesting is necessary because for a given set of tags we don't
349
- // know how many logs we will get back. Furthermore, these logs are of undetermined
350
- // length, since we don't really know the note they correspond to until we decrypt them.
351
-
352
- const recipients = scopes ? scopes : await this.keyStore.getAccounts();
353
- const contractName = await this.contractDataProvider.getDebugContractName(contractAddress);
354
- for (const recipient of recipients) {
355
- // Get all the secrets for the recipient and sender pairs (#9365)
356
- const indexedSecrets = await this.#getLastUsedTaggingIndexesForSenders(contractAddress, recipient);
357
-
358
- // We fetch logs for a window of indexes in a range:
359
- // <latest_log_index - WINDOW_HALF_SIZE, latest_log_index + WINDOW_HALF_SIZE>.
360
- //
361
- // We use this window approach because it could happen that a sender might have messed up and inadvertently
362
- // incremented their index without us getting any logs (for example, in case of a revert). If we stopped looking
363
- // for logs the first time we don't receive any logs for a tag, we might never receive anything from that sender again.
364
- // Also there's a possibility that we have advanced our index, but the sender has reused it, so we might have missed
365
- // some logs. For these reasons, we have to look both back and ahead of the stored index.
366
- let secretsAndWindows = indexedSecrets.map(indexedSecret => {
367
- if (indexedSecret.index === undefined) {
368
- return {
369
- secret: indexedSecret.secret,
370
- leftMostIndex: 0,
371
- rightMostIndex: WINDOW_HALF_SIZE,
372
- };
373
- } else {
374
- return {
375
- secret: indexedSecret.secret,
376
- leftMostIndex: Math.max(0, indexedSecret.index - WINDOW_HALF_SIZE),
377
- rightMostIndex: indexedSecret.index + WINDOW_HALF_SIZE,
378
- };
379
- }
380
- });
381
-
382
- // As we iterate we store the largest index we have seen for a given secret to later on store it in the db.
383
- const newLargestIndexMapToStore: { [k: string]: number } = {};
384
-
385
- // The initial/unmodified indexes of the secrets stored in a key-value map where key is the directional app
386
- // tagging secret.
387
- const initialIndexesMap = getInitialIndexesMap(indexedSecrets);
388
-
389
- while (secretsAndWindows.length > 0) {
390
- const preTagsForTheWholeWindow = getPreTagsForTheWindow(secretsAndWindows);
391
- const tagsForTheWholeWindow = await Promise.all(
392
- preTagsForTheWholeWindow.map(async preTag => {
393
- return SiloedTag.compute(await Tag.compute(preTag), contractAddress);
394
- }),
395
- );
396
-
397
- // We store the new largest indexes we find in the iteration in the following map to later on construct
398
- // a new set of secrets and windows to fetch logs for.
399
- const newLargestIndexMapForIteration: { [k: string]: number } = {};
400
-
401
- // Fetch the private logs for the tags and iterate over them
402
- // TODO: The following conversion is unfortunate and we should most likely just type the #getPrivateLogsByTags
403
- // to accept SiloedTag[] instead of Fr[]. That would result in a large change so I didn't do it yet.
404
- const tagsForTheWholeWindowAsFr = tagsForTheWholeWindow.map(tag => tag.value);
405
- const logsByTags = await this.#getPrivateLogsByTags(tagsForTheWholeWindowAsFr);
406
- this.log.debug(`Found ${logsByTags.filter(logs => logs.length > 0).length} logs as recipient ${recipient}`, {
407
- recipient,
408
- contractName,
409
- contractAddress,
410
- });
411
-
412
- for (let logIndex = 0; logIndex < logsByTags.length; logIndex++) {
413
- const logsByTag = logsByTags[logIndex];
414
- if (logsByTag.length > 0) {
415
- // We filter out the logs that are newer than the anchor block number of the tx currently being constructed
416
- const filteredLogsByBlockNumber = logsByTag.filter(l => l.blockNumber <= maxBlockNumber);
417
-
418
- // We store the logs in capsules (to later be obtained in Noir)
419
- await this.#storePendingTaggedLogs(
420
- contractAddress,
421
- pendingTaggedLogArrayBaseSlot,
422
- recipient,
423
- filteredLogsByBlockNumber,
424
- );
425
-
426
- // We retrieve the pre-tag corresponding to the log as I need that to evaluate whether
427
- // a new largest index have been found.
428
- const preTagCorrespondingToLog = preTagsForTheWholeWindow[logIndex];
429
- const initialIndex = initialIndexesMap[preTagCorrespondingToLog.secret.toString()];
430
-
431
- if (
432
- preTagCorrespondingToLog.index >= initialIndex &&
433
- (newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()] === undefined ||
434
- preTagCorrespondingToLog.index >=
435
- newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()])
436
- ) {
437
- // We have found a new largest index so we store it for later processing (storing it in the db + fetching
438
- // the difference of the window sets of current and the next iteration)
439
- newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()] =
440
- preTagCorrespondingToLog.index + 1;
441
-
442
- this.log.debug(
443
- `Incrementing index to ${
444
- preTagCorrespondingToLog.index + 1
445
- } at contract ${contractName}(${contractAddress})`,
446
- );
447
- }
448
- }
449
- }
450
-
451
- // Now based on the new largest indexes we found, we will construct a new secrets and windows set to fetch logs
452
- // for. Note that it's very unlikely that a new log from the current window would appear between the iterations
453
- // so we fetch the logs only for the difference of the window sets.
454
- const newSecretsAndWindows = [];
455
- for (const [directionalAppTaggingSecret, newIndex] of Object.entries(newLargestIndexMapForIteration)) {
456
- const maybeIndexedSecret = indexedSecrets.find(
457
- indexedSecret => indexedSecret.secret.toString() === directionalAppTaggingSecret,
458
- );
459
- if (maybeIndexedSecret) {
460
- newSecretsAndWindows.push({
461
- secret: maybeIndexedSecret.secret,
462
- // We set the left most index to the new index to avoid fetching the same logs again
463
- leftMostIndex: newIndex,
464
- rightMostIndex: newIndex + WINDOW_HALF_SIZE,
465
- });
466
-
467
- // We store the new largest index in the map to later store it in the db.
468
- newLargestIndexMapToStore[directionalAppTaggingSecret] = newIndex;
469
- } else {
470
- throw new Error(
471
- `Secret not found for directionalAppTaggingSecret ${directionalAppTaggingSecret}. This is a bug as it should never happen!`,
472
- );
473
- }
474
- }
475
-
476
- // Now we set the new secrets and windows and proceed to the next iteration.
477
- secretsAndWindows = newSecretsAndWindows;
478
- }
479
-
480
- // At this point we have processed all the logs for the recipient so we store the last used indexes in the db.
481
- // newLargestIndexMapToStore contains "next" indexes to look for (one past the last found), so subtract 1 to get
482
- // last used.
483
- await this.recipientTaggingDataProvider.setLastUsedIndexes(
484
- Object.entries(newLargestIndexMapToStore).map(([directionalAppTaggingSecret, index]) => ({
485
- secret: DirectionalAppTaggingSecret.fromString(directionalAppTaggingSecret),
486
- index: index - 1,
487
- })),
488
- );
489
- }
490
- }
491
-
492
- async #storePendingTaggedLogs(
493
- contractAddress: AztecAddress,
494
- capsuleArrayBaseSlot: Fr,
495
- recipient: AztecAddress,
496
- privateLogs: TxScopedL2Log[],
497
- ) {
498
- // Build all pending tagged logs upfront with their tx effects
499
- const pendingTaggedLogs = await Promise.all(
500
- privateLogs.map(async scopedLog => {
501
- // TODO(#9789): get these effects along with the log
502
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
503
- if (!txEffect) {
504
- throw new Error(`Could not find tx effect for tx hash ${scopedLog.txHash}`);
505
- }
506
-
507
- const pendingTaggedLog = new PendingTaggedLog(
508
- scopedLog.log.fields,
509
- scopedLog.txHash,
510
- txEffect.data.noteHashes,
511
- txEffect.data.nullifiers[0],
512
- recipient,
513
- );
514
-
515
- return pendingTaggedLog.toFields();
516
- }),
517
- );
518
-
519
- return this.capsuleDataProvider.appendToCapsuleArray(contractAddress, capsuleArrayBaseSlot, pendingTaggedLogs);
520
- }
521
-
522
- public async validateEnqueuedNotesAndEvents(
523
- contractAddress: AztecAddress,
524
- noteValidationRequestsArrayBaseSlot: Fr,
525
- eventValidationRequestsArrayBaseSlot: Fr,
526
- ): Promise<void> {
527
- // We read all note and event validation requests and process them all concurrently. This makes the process much
528
- // faster as we don't need to wait for the network round-trip.
529
- const noteValidationRequests = (
530
- await this.capsuleDataProvider.readCapsuleArray(contractAddress, noteValidationRequestsArrayBaseSlot)
531
- ).map(NoteValidationRequest.fromFields);
532
-
533
- const eventValidationRequests = (
534
- await this.capsuleDataProvider.readCapsuleArray(contractAddress, eventValidationRequestsArrayBaseSlot)
535
- ).map(EventValidationRequest.fromFields);
536
-
537
- const noteDeliveries = noteValidationRequests.map(request =>
538
- this.deliverNote(
539
- request.contractAddress,
540
- request.owner,
541
- request.storageSlot,
542
- request.randomness,
543
- request.noteNonce,
544
- request.content,
545
- request.noteHash,
546
- request.nullifier,
547
- request.txHash,
548
- request.recipient,
549
- ),
550
- );
551
-
552
- const eventDeliveries = eventValidationRequests.map(request =>
553
- this.deliverEvent(
554
- request.contractAddress,
555
- request.eventTypeId,
556
- request.serializedEvent,
557
- request.eventCommitment,
558
- request.txHash,
559
- request.recipient,
560
- ),
561
- );
562
-
563
- await Promise.all([...noteDeliveries, ...eventDeliveries]);
564
-
565
- // Requests are cleared once we're done.
566
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, noteValidationRequestsArrayBaseSlot, []);
567
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, eventValidationRequestsArrayBaseSlot, []);
568
- }
569
-
570
- async deliverNote(
571
- contractAddress: AztecAddress,
572
- owner: AztecAddress,
573
- storageSlot: Fr,
574
- randomness: Fr,
575
- noteNonce: Fr,
576
- content: Fr[],
577
- noteHash: Fr,
578
- nullifier: Fr,
579
- txHash: TxHash,
580
- recipient: AztecAddress,
581
- ): Promise<void> {
582
- // We are going to store the new note in the NoteDataProvider, which will let us later return it via `getNotes`.
583
- // There's two things we need to check before we do this however:
584
- // - we must make sure the note does actually exist in the note hash tree
585
- // - we need to check if the note has already been nullified
586
- //
587
- // Failing to do either of the above would result in circuits getting either non-existent notes and failing to
588
- // produce inclusion proofs for them, or getting nullified notes and producing duplicate nullifiers, both of which
589
- // are catastrophic failure modes.
590
- //
591
- // Note that adding a note and removing it is *not* equivalent to never adding it in the first place. A nullifier
592
- // emitted in a block that comes after note creation might result in the note being de-nullified by a chain reorg,
593
- // so we must store both the note hash and nullifier block information.
594
-
595
- // We avoid making node queries at 'latest' since we don't want to process notes or nullifiers that only exist ahead
596
- // in time of the locally synced state.
597
- // Note that while this technically results in historical queries, we perform it at the latest locally synced block
598
- // number which *should* be recent enough to be available, even for non-archive nodes.
599
- // Also note that the note should never be ahead of the synced block here since `fetchTaggedLogs` only processes
600
- // logs up to the synced block making this only an additional safety check.
601
- const syncedBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
602
-
603
- // By computing siloed and unique note hashes ourselves we prevent contracts from interfering with the note storage
604
- // of other contracts, which would constitute a security breach.
605
- const uniqueNoteHash = await computeUniqueNoteHash(noteNonce, await siloNoteHash(contractAddress, noteHash));
606
- const siloedNullifier = await siloNullifier(contractAddress, nullifier);
607
-
608
- const txEffect = await this.aztecNode.getTxEffect(txHash);
609
- if (!txEffect) {
610
- throw new Error(`Could not find tx effect for tx hash ${txHash}`);
611
- }
612
-
613
- if (txEffect.l2BlockNumber > syncedBlockNumber) {
614
- throw new Error(`Could not find tx effect for tx hash ${txHash} as of block number ${syncedBlockNumber}`);
615
- }
616
-
617
- const noteInTx = txEffect.data.noteHashes.some(nh => nh.equals(uniqueNoteHash));
618
- if (!noteInTx) {
619
- throw new Error(`Note hash ${noteHash} (uniqued as ${uniqueNoteHash}) is not present in tx ${txHash}`);
620
- }
621
-
622
- // We store notes by their index in the global note hash tree, which has the convenient side effect of validating
623
- // note existence in said tree. We concurrently also check if the note's nullifier exists, performing all node
624
- // queries in a single round-trip.
625
- const [[uniqueNoteHashTreeIndexInBlock], [nullifierIndex]] = await Promise.all([
626
- this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NOTE_HASH_TREE, [uniqueNoteHash]),
627
- this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, [siloedNullifier]),
628
- ]);
629
-
630
- if (uniqueNoteHashTreeIndexInBlock === undefined) {
631
- throw new Error(
632
- `Note hash ${noteHash} (uniqued as ${uniqueNoteHash}) is not present on the tree at block ${syncedBlockNumber} (from tx ${txHash})`,
633
- );
634
- }
635
-
636
- const noteDao = new NoteDao(
637
- new Note(content),
638
- contractAddress,
639
- owner,
640
- storageSlot,
641
- randomness,
642
- noteNonce,
643
- noteHash,
644
- siloedNullifier,
645
- txHash,
646
- uniqueNoteHashTreeIndexInBlock.l2BlockNumber,
647
- uniqueNoteHashTreeIndexInBlock.l2BlockHash.toString(),
648
- uniqueNoteHashTreeIndexInBlock.data,
649
- );
650
-
651
- // The note was found by `recipient`, so we use that as the scope when storing the note.
652
- await this.noteDataProvider.addNotes([noteDao], recipient);
653
- this.log.verbose('Added note', {
654
- index: noteDao.index,
655
- contract: noteDao.contractAddress.toString(),
656
- slot: noteDao.storageSlot.toString(),
657
- noteHash: noteDao.noteHash.toString(),
658
- nullifier: noteDao.siloedNullifier.toString(),
659
- });
660
-
661
- if (nullifierIndex !== undefined) {
662
- const { data: _, ...blockHashAndNum } = nullifierIndex;
663
- await this.noteDataProvider.applyNullifiers([{ data: siloedNullifier, ...blockHashAndNum }]);
664
-
665
- this.log.verbose(`Removed just-added note`, {
666
- contract: contractAddress,
667
- slot: storageSlot,
668
- noteHash: noteHash,
669
- nullifier: siloedNullifier.toString(),
670
- });
671
- }
672
- }
673
-
674
- public async bulkRetrieveLogs(
675
- contractAddress: AztecAddress,
676
- logRetrievalRequestsArrayBaseSlot: Fr,
677
- logRetrievalResponsesArrayBaseSlot: Fr,
678
- ) {
679
- // We read all log retrieval requests and process them all concurrently. This makes the process much faster as we
680
- // don't need to wait for the network round-trip.
681
- const logRetrievalRequests = (
682
- await this.capsuleDataProvider.readCapsuleArray(contractAddress, logRetrievalRequestsArrayBaseSlot)
683
- ).map(LogRetrievalRequest.fromFields);
684
-
685
- const maybeLogRetrievalResponses = await Promise.all(
686
- logRetrievalRequests.map(async request => {
687
- // TODO(#14555): remove these internal functions and have node endpoints that do this instead
688
- const [publicLog, privateLog] = await Promise.all([
689
- this.getPublicLogByTag(request.unsiloedTag, request.contractAddress),
690
- this.getPrivateLogByTag(await siloPrivateLog(request.contractAddress, request.unsiloedTag)),
691
- ]);
692
-
693
- if (publicLog !== null) {
694
- if (privateLog !== null) {
695
- throw new Error(
696
- `Found both a public and private log when searching for tag ${request.unsiloedTag} from contract ${request.contractAddress}`,
697
- );
698
- }
699
-
700
- return new LogRetrievalResponse(
701
- publicLog.logPayload,
702
- publicLog.txHash,
703
- publicLog.uniqueNoteHashesInTx,
704
- publicLog.firstNullifierInTx,
705
- );
706
- } else if (privateLog !== null) {
707
- return new LogRetrievalResponse(
708
- privateLog.logPayload,
709
- privateLog.txHash,
710
- privateLog.uniqueNoteHashesInTx,
711
- privateLog.firstNullifierInTx,
712
- );
713
- } else {
714
- return null;
715
- }
716
- }),
717
- );
718
-
719
- // Requests are cleared once we're done.
720
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, logRetrievalRequestsArrayBaseSlot, []);
721
-
722
- // The responses are stored as Option<LogRetrievalResponse> in a second CapsuleArray.
723
- await this.capsuleDataProvider.setCapsuleArray(
724
- contractAddress,
725
- logRetrievalResponsesArrayBaseSlot,
726
- maybeLogRetrievalResponses.map(LogRetrievalResponse.toSerializedOption),
727
- );
728
- }
729
-
730
- async deliverEvent(
731
- contractAddress: AztecAddress,
732
- selector: EventSelector,
733
- content: Fr[],
734
- eventCommitment: Fr,
735
- txHash: TxHash,
736
- scope: AztecAddress,
737
- ): Promise<void> {
738
- // While using 'latest' block number would be fine for private events since they cannot be accessed from Aztec.nr
739
- // (and thus we're less concerned about being ahead of the synced block), we use the synced block number to
740
- // maintain consistent behavior in the PXE. Additionally, events should never be ahead of the synced block here
741
- // since `fetchTaggedLogs` only processes logs up to the synced block.
742
- const [syncedBlockHeader, siloedEventCommitment, txEffect] = await Promise.all([
743
- this.anchorBlockDataProvider.getBlockHeader(),
744
- siloNullifier(contractAddress, eventCommitment),
745
- this.aztecNode.getTxEffect(txHash),
746
- ]);
747
-
748
- const syncedBlockNumber = syncedBlockHeader.getBlockNumber();
749
-
750
- if (!txEffect) {
751
- throw new Error(`Could not find tx effect for tx hash ${txHash}`);
752
- }
753
-
754
- if (txEffect.l2BlockNumber > syncedBlockNumber) {
755
- throw new Error(`Could not find tx effect for tx hash ${txHash} as of block number ${syncedBlockNumber}`);
756
- }
757
-
758
- const eventInTx = txEffect.data.nullifiers.some(n => n.equals(siloedEventCommitment));
759
- if (!eventInTx) {
760
- throw new Error(
761
- `Event commitment ${eventCommitment} (siloed as ${siloedEventCommitment}) is not present in tx ${txHash}`,
762
- );
763
- }
764
-
765
- const [nullifierIndex] = await this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, [
766
- siloedEventCommitment,
767
- ]);
768
-
769
- if (nullifierIndex === undefined) {
770
- throw new Error(
771
- `Event commitment ${eventCommitment} (siloed as ${siloedEventCommitment}) is not present on the nullifier tree at block ${syncedBlockNumber} (from tx ${txHash})`,
772
- );
773
- }
774
-
775
- return this.privateEventDataProvider.storePrivateEventLog(
776
- selector,
777
- content,
778
- Number(nullifierIndex.data), // Index of the event commitment in the nullifier tree
779
- {
780
- contractAddress,
781
- scope,
782
- txHash,
783
- l2BlockNumber: nullifierIndex.l2BlockNumber, // Block number in which the event was emitted
784
- l2BlockHash: nullifierIndex.l2BlockHash, // Block hash in which the event was emitted
785
- },
786
- );
787
- }
788
-
789
- // TODO(#14555): delete this function and implement this behavior in the node instead
790
- async getPublicLogByTag(tag: Fr, contractAddress: AztecAddress): Promise<PublicLogWithTxData | null> {
791
- const logs = await this.#getPublicLogsByTagsFromContract([tag], contractAddress);
792
- const logsForTag = logs[0];
793
-
794
- this.log.debug(`Got ${logsForTag.length} public logs for tag ${tag}`);
795
-
796
- if (logsForTag.length == 0) {
797
- return null;
798
- } else if (logsForTag.length > 1) {
799
- // TODO(#11627): handle this case
800
- throw new Error(
801
- `Got ${logsForTag.length} logs for tag ${tag} and contract ${contractAddress.toString()}. getPublicLogByTag currently only supports a single log per tag`,
802
- );
803
- }
804
-
805
- const scopedLog = logsForTag[0];
806
-
807
- // getLogsByTag doesn't have all of the information that we need (notably note hashes and the first nullifier), so
808
- // we need to make a second call to the node for `getTxEffect`.
809
- // TODO(#9789): bundle this information in the `getLogsByTag` call.
810
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
811
- if (txEffect == undefined) {
812
- throw new Error(`Unexpected: failed to retrieve tx effects for tx ${scopedLog.txHash} which is known to exist`);
813
- }
814
-
815
- return new PublicLogWithTxData(
816
- scopedLog.log.getEmittedFieldsWithoutTag(),
817
- scopedLog.txHash,
818
- txEffect.data.noteHashes,
819
- txEffect.data.nullifiers[0],
820
- );
821
- }
822
-
823
- // TODO(#14555): delete this function and implement this behavior in the node instead
824
- async getPrivateLogByTag(siloedTag: Fr): Promise<PrivateLogWithTxData | null> {
825
- const logs = await this.#getPrivateLogsByTags([siloedTag]);
826
- const logsForTag = logs[0];
827
-
828
- this.log.debug(`Got ${logsForTag.length} private logs for tag ${siloedTag}`);
829
-
830
- if (logsForTag.length == 0) {
831
- return null;
832
- } else if (logsForTag.length > 1) {
833
- // TODO(#11627): handle this case
834
- throw new Error(
835
- `Got ${logsForTag.length} logs for tag ${siloedTag}. getPrivateLogByTag currently only supports a single log per tag`,
836
- );
837
- }
838
-
839
- const scopedLog = logsForTag[0];
840
-
841
- // getLogsByTag doesn't have all of the information that we need (notably note hashes and the first nullifier), so
842
- // we need to make a second call to the node for `getTxEffect`.
843
- // TODO(#9789): bundle this information in the `getLogsByTag` call.
844
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
845
- if (txEffect == undefined) {
846
- throw new Error(`Unexpected: failed to retrieve tx effects for tx ${scopedLog.txHash} which is known to exist`);
847
- }
848
-
849
- return new PrivateLogWithTxData(
850
- scopedLog.log.getEmittedFieldsWithoutTag(),
851
- scopedLog.txHash,
852
- txEffect.data.noteHashes,
853
- txEffect.data.nullifiers[0],
854
- );
855
- }
856
-
857
- /**
858
- * Looks for nullifiers of active contract notes and marks them as nullified if a nullifier is found.
859
- *
860
- * Fetches notes from the NoteDataProvider and checks which nullifiers are present in the
861
- * onchain nullifier Merkle tree - up to the latest locally synced block. We use the
862
- * locally synced block instead of querying the chain's 'latest' block to ensure correctness:
863
- * notes are only marked nullified once their corresponding nullifier has been included in a
864
- * block up to which the PXE has synced.
865
- * This allows recent nullifications to be processed even if the node is not an archive node.
866
- *
867
- * @param contractAddress - The contract whose notes should be checked and nullified.
868
- */
869
- public async syncNoteNullifiers(contractAddress: AztecAddress) {
870
- this.log.verbose('Searching for nullifiers of known notes', { contract: contractAddress });
871
-
872
- const syncedBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
873
-
874
- const contractNotes = await this.noteDataProvider.getNotes({ contractAddress });
875
-
876
- if (contractNotes.length === 0) {
877
- return;
878
- }
879
-
880
- const nullifiersToCheck = contractNotes.map(note => note.siloedNullifier);
881
- const nullifierBatches = nullifiersToCheck.reduce(
882
- (acc, nullifier) => {
883
- if (acc[acc.length - 1].length < MAX_RPC_LEN) {
884
- acc[acc.length - 1].push(nullifier);
885
- } else {
886
- acc.push([nullifier]);
887
- }
888
- return acc;
889
- },
890
- [[]] as Fr[][],
891
- );
892
- const nullifierIndexes = (
893
- await Promise.all(
894
- nullifierBatches.map(batch =>
895
- this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, batch),
896
- ),
897
- )
898
- ).flat();
899
-
900
- const foundNullifiers = nullifiersToCheck
901
- .map((nullifier, i) => {
902
- if (nullifierIndexes[i] !== undefined) {
903
- return { ...nullifierIndexes[i], ...{ data: nullifier } } as DataInBlock<Fr>;
904
- }
905
- })
906
- .filter(nullifier => nullifier !== undefined) as DataInBlock<Fr>[];
907
-
908
- const nullifiedNotes = await this.noteDataProvider.applyNullifiers(foundNullifiers);
909
- nullifiedNotes.forEach(noteDao => {
910
- this.log.verbose(`Removed note for contract ${noteDao.contractAddress} at slot ${noteDao.storageSlot}`, {
911
- contract: noteDao.contractAddress,
912
- slot: noteDao.storageSlot,
913
- nullifier: noteDao.siloedNullifier.toString(),
914
- });
915
- });
916
- }
917
-
918
- storeCapsule(contractAddress: AztecAddress, slot: Fr, capsule: Fr[]): Promise<void> {
919
- return this.capsuleDataProvider.storeCapsule(contractAddress, slot, capsule);
920
- }
921
-
922
- loadCapsule(contractAddress: AztecAddress, slot: Fr): Promise<Fr[] | null> {
923
- return this.capsuleDataProvider.loadCapsule(contractAddress, slot);
924
- }
925
-
926
- deleteCapsule(contractAddress: AztecAddress, slot: Fr): Promise<void> {
927
- return this.capsuleDataProvider.deleteCapsule(contractAddress, slot);
928
- }
929
-
930
- copyCapsule(contractAddress: AztecAddress, srcSlot: Fr, dstSlot: Fr, numEntries: number): Promise<void> {
931
- return this.capsuleDataProvider.copyCapsule(contractAddress, srcSlot, dstSlot, numEntries);
932
- }
933
-
934
- async getSharedSecret(address: AztecAddress, ephPk: Point): Promise<Point> {
935
- // TODO(#12656): return an app-siloed secret
936
- const recipientCompleteAddress = await this.getCompleteAddress(address);
937
- const ivskM = await this.keyStore.getMasterSecretKey(
938
- recipientCompleteAddress.publicKeys.masterIncomingViewingPublicKey,
939
- );
940
- const addressSecret = await computeAddressSecret(await recipientCompleteAddress.getPreaddress(), ivskM);
941
- return deriveEcdhSharedSecret(addressSecret, ephPk);
942
- }
943
-
944
- // TODO(#12656): Make this a public function on the AztecNode interface and remove the original getLogsByTags. This
945
- // was not done yet as we were unsure about the API and we didn't want to introduce a breaking change.
946
- async #getPrivateLogsByTags(tags: Fr[]): Promise<TxScopedL2Log[][]> {
947
- const allLogs = await this.aztecNode.getLogsByTags(tags);
948
- return allLogs.map(logs => logs.filter(log => !log.isFromPublic));
949
- }
950
-
951
- // TODO(#12656): Make this a public function on the AztecNode interface and remove the original getLogsByTags. This
952
- // was not done yet as we were unsure about the API and we didn't want to introduce a breaking change.
953
- async #getPublicLogsByTagsFromContract(tags: Fr[], contractAddress: AztecAddress): Promise<TxScopedL2Log[][]> {
954
- const allLogs = await this.aztecNode.getLogsByTags(tags);
955
- const allPublicLogs = allLogs.map(logs => logs.filter(log => log.isFromPublic));
956
- return allPublicLogs.map(logs =>
957
- logs.filter(log => (log.log as PublicLog).contractAddress.equals(contractAddress)),
958
- );
959
- }
960
-
961
- getStats(): ExecutionStats {
962
- const nodeRPCCalls =
963
- typeof (this.aztecNode as ProxiedNode).getStats === 'function' ? (this.aztecNode as ProxiedNode).getStats() : {};
964
-
965
- return { nodeRPCCalls };
966
- }
967
- }