@aztec/pxe 3.0.0-nightly.20251222 → 3.0.0-nightly.20251223

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/dest/contract_function_simulator/contract_function_simulator.d.ts +31 -6
  2. package/dest/contract_function_simulator/contract_function_simulator.d.ts.map +1 -1
  3. package/dest/contract_function_simulator/contract_function_simulator.js +35 -11
  4. package/dest/contract_function_simulator/oracle/private_execution.d.ts +6 -8
  5. package/dest/contract_function_simulator/oracle/private_execution.d.ts.map +1 -1
  6. package/dest/contract_function_simulator/oracle/private_execution.js +10 -9
  7. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts +13 -4
  8. package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts.map +1 -1
  9. package/dest/contract_function_simulator/oracle/private_execution_oracle.js +18 -13
  10. package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts +44 -6
  11. package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts.map +1 -1
  12. package/dest/contract_function_simulator/oracle/utility_execution_oracle.js +135 -30
  13. package/dest/contract_function_simulator/proxied_contract_data_source.d.ts +2 -2
  14. package/dest/contract_function_simulator/proxied_contract_data_source.d.ts.map +1 -1
  15. package/dest/contract_function_simulator/proxied_contract_data_source.js +18 -0
  16. package/dest/debug/pxe_debug_utils.d.ts +3 -2
  17. package/dest/debug/pxe_debug_utils.d.ts.map +1 -1
  18. package/dest/entrypoints/client/bundle/index.d.ts +1 -2
  19. package/dest/entrypoints/client/bundle/index.d.ts.map +1 -1
  20. package/dest/entrypoints/client/bundle/index.js +0 -1
  21. package/dest/entrypoints/client/lazy/index.d.ts +1 -2
  22. package/dest/entrypoints/client/lazy/index.d.ts.map +1 -1
  23. package/dest/entrypoints/client/lazy/index.js +0 -1
  24. package/dest/entrypoints/server/index.d.ts +2 -2
  25. package/dest/entrypoints/server/index.d.ts.map +1 -1
  26. package/dest/entrypoints/server/index.js +1 -1
  27. package/dest/events/event_service.d.ts +15 -0
  28. package/dest/events/event_service.d.ts.map +1 -0
  29. package/dest/events/event_service.js +47 -0
  30. package/dest/events/private_event_filter_validator.d.ts +3 -2
  31. package/dest/events/private_event_filter_validator.d.ts.map +1 -1
  32. package/dest/logs/log_service.d.ts +43 -0
  33. package/dest/logs/log_service.d.ts.map +1 -0
  34. package/dest/logs/log_service.js +258 -0
  35. package/dest/notes/index.d.ts +2 -0
  36. package/dest/notes/index.d.ts.map +1 -0
  37. package/dest/notes/index.js +1 -0
  38. package/dest/notes/note_service.d.ts +48 -0
  39. package/dest/notes/note_service.d.ts.map +1 -0
  40. package/dest/notes/note_service.js +152 -0
  41. package/dest/private_kernel/private_kernel_oracle_impl.d.ts +2 -2
  42. package/dest/private_kernel/private_kernel_oracle_impl.d.ts.map +1 -1
  43. package/dest/public_storage/public_storage_service.d.ts +24 -0
  44. package/dest/public_storage/public_storage_service.d.ts.map +1 -0
  45. package/dest/public_storage/public_storage_service.js +26 -0
  46. package/dest/pxe.d.ts +1 -1
  47. package/dest/pxe.d.ts.map +1 -1
  48. package/dest/pxe.js +3 -5
  49. package/dest/storage/capsule_data_provider/capsule_data_provider.d.ts +33 -1
  50. package/dest/storage/capsule_data_provider/capsule_data_provider.d.ts.map +1 -1
  51. package/dest/storage/capsule_data_provider/capsule_data_provider.js +32 -4
  52. package/dest/storage/contract_data_provider/contract_data_provider.d.ts +2 -1
  53. package/dest/storage/contract_data_provider/contract_data_provider.d.ts.map +1 -1
  54. package/dest/storage/contract_data_provider/contract_data_provider.js +11 -0
  55. package/dest/storage/tagging_data_provider/sender_tagging_data_provider.js +3 -3
  56. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts +14 -0
  57. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts.map +1 -0
  58. package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.js +99 -0
  59. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.d.ts +21 -0
  60. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.d.ts.map +1 -0
  61. package/dest/tagging/recipient_sync/new_recipient_tagging_data_provider.js +42 -0
  62. package/dest/tagging/recipient_sync/utils/find_highest_indexes.d.ts +12 -0
  63. package/dest/tagging/recipient_sync/utils/find_highest_indexes.d.ts.map +1 -0
  64. package/dest/tagging/recipient_sync/utils/find_highest_indexes.js +20 -0
  65. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts +14 -0
  66. package/dest/tagging/recipient_sync/utils/load_logs_for_range.d.ts.map +1 -0
  67. package/dest/tagging/recipient_sync/utils/load_logs_for_range.js +32 -0
  68. package/dest/tagging/sync/sync_sender_tagging_indexes.d.ts +2 -2
  69. package/dest/tagging/sync/sync_sender_tagging_indexes.d.ts.map +1 -1
  70. package/dest/tagging/sync/sync_sender_tagging_indexes.js +3 -3
  71. package/dest/tree_membership/tree_membership_service.d.ts +52 -0
  72. package/dest/tree_membership/tree_membership_service.d.ts.map +1 -0
  73. package/dest/tree_membership/tree_membership_service.js +84 -0
  74. package/package.json +16 -16
  75. package/src/contract_function_simulator/contract_function_simulator.ts +59 -10
  76. package/src/contract_function_simulator/oracle/private_execution.ts +13 -10
  77. package/src/contract_function_simulator/oracle/private_execution_oracle.ts +80 -20
  78. package/src/contract_function_simulator/oracle/utility_execution_oracle.ts +199 -38
  79. package/src/contract_function_simulator/proxied_contract_data_source.ts +18 -1
  80. package/src/debug/pxe_debug_utils.ts +2 -1
  81. package/src/entrypoints/client/bundle/index.ts +0 -1
  82. package/src/entrypoints/client/lazy/index.ts +0 -1
  83. package/src/entrypoints/server/index.ts +1 -1
  84. package/src/events/event_service.ts +77 -0
  85. package/src/events/private_event_filter_validator.ts +2 -1
  86. package/src/logs/log_service.ts +386 -0
  87. package/src/notes/index.ts +1 -0
  88. package/src/notes/note_service.ts +200 -0
  89. package/src/private_kernel/private_kernel_oracle_impl.ts +1 -1
  90. package/src/public_storage/public_storage_service.ts +33 -0
  91. package/src/pxe.ts +13 -11
  92. package/src/storage/capsule_data_provider/capsule_data_provider.ts +32 -0
  93. package/src/storage/contract_data_provider/contract_data_provider.ts +15 -0
  94. package/src/storage/tagging_data_provider/sender_tagging_data_provider.ts +3 -3
  95. package/src/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.ts +129 -0
  96. package/src/tagging/recipient_sync/new_recipient_tagging_data_provider.ts +53 -0
  97. package/src/tagging/recipient_sync/utils/find_highest_indexes.ts +34 -0
  98. package/src/tagging/recipient_sync/utils/load_logs_for_range.ts +47 -0
  99. package/src/tagging/sync/sync_sender_tagging_indexes.ts +3 -3
  100. package/src/tree_membership/tree_membership_service.ts +112 -0
  101. package/dest/contract_function_simulator/execution_data_provider.d.ts +0 -248
  102. package/dest/contract_function_simulator/execution_data_provider.d.ts.map +0 -1
  103. package/dest/contract_function_simulator/execution_data_provider.js +0 -14
  104. package/dest/contract_function_simulator/pxe_oracle_interface.d.ts +0 -113
  105. package/dest/contract_function_simulator/pxe_oracle_interface.d.ts.map +0 -1
  106. package/dest/contract_function_simulator/pxe_oracle_interface.js +0 -648
  107. package/src/contract_function_simulator/execution_data_provider.ts +0 -322
  108. package/src/contract_function_simulator/pxe_oracle_interface.ts +0 -967
@@ -1,648 +0,0 @@
1
- import { createLogger } from '@aztec/foundation/log';
2
- import { computeUniqueNoteHash, siloNoteHash, siloNullifier, siloPrivateLog } from '@aztec/stdlib/hash';
3
- import { MAX_RPC_LEN } from '@aztec/stdlib/interfaces/client';
4
- import { computeAddressSecret } from '@aztec/stdlib/keys';
5
- import { PendingTaggedLog, PrivateLogWithTxData, PublicLogWithTxData, deriveEcdhSharedSecret } from '@aztec/stdlib/logs';
6
- import { getNonNullifiedL1ToL2MessageWitness } from '@aztec/stdlib/messaging';
7
- import { Note } from '@aztec/stdlib/note';
8
- import { NoteDao } from '@aztec/stdlib/note';
9
- import { MerkleTreeId } from '@aztec/stdlib/trees';
10
- import { MessageLoadOracleInputs } from '../contract_function_simulator/oracle/message_load_oracle_inputs.js';
11
- import { ORACLE_VERSION } from '../oracle_version.js';
12
- import { DirectionalAppTaggingSecret, SiloedTag, Tag, WINDOW_HALF_SIZE, getInitialIndexesMap, getPreTagsForTheWindow } from '../tagging/index.js';
13
- import { EventValidationRequest } from './noir-structs/event_validation_request.js';
14
- import { LogRetrievalRequest } from './noir-structs/log_retrieval_request.js';
15
- import { LogRetrievalResponse } from './noir-structs/log_retrieval_response.js';
16
- import { NoteValidationRequest } from './noir-structs/note_validation_request.js';
17
- /**
18
- * A data layer that provides and stores information needed for simulating/proving a transaction.
19
- */ export class PXEOracleInterface {
20
- aztecNode;
21
- keyStore;
22
- contractDataProvider;
23
- noteDataProvider;
24
- capsuleDataProvider;
25
- anchorBlockDataProvider;
26
- senderTaggingDataProvider;
27
- recipientTaggingDataProvider;
28
- addressDataProvider;
29
- privateEventDataProvider;
30
- log;
31
- // Note: The Aztec node and senderDataProvider are exposed publicly since PXEOracleInterface will be deprecated soon
32
- // (issue #17776). When refactoring tagging, it made sense to align with this future change by moving the sender
33
- // tagging index sync functionality elsewhere. This required exposing these two properties since there is currently
34
- // no alternative way to access them in the PrivateExecutionOracle.
35
- constructor(aztecNode, keyStore, contractDataProvider, noteDataProvider, capsuleDataProvider, anchorBlockDataProvider, senderTaggingDataProvider, recipientTaggingDataProvider, addressDataProvider, privateEventDataProvider, log = createLogger('pxe:pxe_oracle_interface')){
36
- this.aztecNode = aztecNode;
37
- this.keyStore = keyStore;
38
- this.contractDataProvider = contractDataProvider;
39
- this.noteDataProvider = noteDataProvider;
40
- this.capsuleDataProvider = capsuleDataProvider;
41
- this.anchorBlockDataProvider = anchorBlockDataProvider;
42
- this.senderTaggingDataProvider = senderTaggingDataProvider;
43
- this.recipientTaggingDataProvider = recipientTaggingDataProvider;
44
- this.addressDataProvider = addressDataProvider;
45
- this.privateEventDataProvider = privateEventDataProvider;
46
- this.log = log;
47
- }
48
- getKeyValidationRequest(pkMHash, contractAddress) {
49
- return this.keyStore.getKeyValidationRequest(pkMHash, contractAddress);
50
- }
51
- async getCompleteAddress(account) {
52
- const completeAddress = await this.addressDataProvider.getCompleteAddress(account);
53
- if (!completeAddress) {
54
- throw new Error(`No public key registered for address ${account}.
55
- Register it by calling pxe.addAccount(...).\nSee docs for context: https://docs.aztec.network/developers/resources/debugging/aztecnr-errors#simulation-error-no-public-key-registered-for-address-0x0-register-it-by-calling-pxeregisterrecipient-or-pxeregisteraccount`);
56
- }
57
- return completeAddress;
58
- }
59
- async getContractInstance(address) {
60
- const instance = await this.contractDataProvider.getContractInstance(address);
61
- if (!instance) {
62
- throw new Error(`No contract instance found for address ${address.toString()}`);
63
- }
64
- return instance;
65
- }
66
- async getNotes(contractAddress, owner, storageSlot, status, scopes) {
67
- const noteDaos = await this.noteDataProvider.getNotes({
68
- contractAddress,
69
- owner,
70
- storageSlot,
71
- status,
72
- scopes
73
- });
74
- return noteDaos.map(({ contractAddress, owner, storageSlot, randomness, noteNonce, note, noteHash, siloedNullifier, index })=>({
75
- contractAddress,
76
- owner,
77
- storageSlot,
78
- randomness,
79
- noteNonce,
80
- note,
81
- noteHash,
82
- siloedNullifier,
83
- // PXE can use this index to get full MembershipWitness
84
- index
85
- }));
86
- }
87
- async getFunctionArtifact(contractAddress, selector) {
88
- const artifact = await this.contractDataProvider.getFunctionArtifact(contractAddress, selector);
89
- if (!artifact) {
90
- throw new Error(`Function artifact not found for contract ${contractAddress} and selector ${selector}.`);
91
- }
92
- const debug = await this.contractDataProvider.getFunctionDebugMetadata(contractAddress, selector);
93
- return {
94
- ...artifact,
95
- debug
96
- };
97
- }
98
- /**
99
- * Fetches a message from the db, given its key.
100
- * @param contractAddress - Address of a contract by which the message was emitted.
101
- * @param messageHash - Hash of the message.
102
- * @param secret - Secret used to compute a nullifier.
103
- * @dev Contract address and secret are only used to compute the nullifier to get non-nullified messages
104
- * @returns The l1 to l2 membership witness (index of message in the tree and sibling path).
105
- */ async getL1ToL2MembershipWitness(contractAddress, messageHash, secret) {
106
- const [messageIndex, siblingPath] = await getNonNullifiedL1ToL2MessageWitness(this.aztecNode, contractAddress, messageHash, secret);
107
- // Assuming messageIndex is what you intended to use for the index in MessageLoadOracleInputs
108
- return new MessageLoadOracleInputs(messageIndex, siblingPath);
109
- }
110
- async getNullifierIndex(nullifier) {
111
- return await this.#findLeafIndex('latest', MerkleTreeId.NULLIFIER_TREE, nullifier);
112
- }
113
- async #findLeafIndex(blockNumber, treeId, leafValue) {
114
- const [leafIndex] = await this.aztecNode.findLeavesIndexes(blockNumber, treeId, [
115
- leafValue
116
- ]);
117
- return leafIndex?.data;
118
- }
119
- async getMembershipWitness(blockNumber, treeId, leafValue) {
120
- const witness = await this.#tryGetMembershipWitness(blockNumber, treeId, leafValue);
121
- if (!witness) {
122
- throw new Error(`Leaf value ${leafValue} not found in tree ${MerkleTreeId[treeId]} at block ${blockNumber}`);
123
- }
124
- return witness;
125
- }
126
- async #tryGetMembershipWitness(blockNumber, treeId, value) {
127
- switch(treeId){
128
- case MerkleTreeId.NULLIFIER_TREE:
129
- return (await this.aztecNode.getNullifierMembershipWitness(blockNumber, value))?.withoutPreimage().toFields();
130
- case MerkleTreeId.NOTE_HASH_TREE:
131
- return (await this.aztecNode.getNoteHashMembershipWitness(blockNumber, value))?.toFields();
132
- case MerkleTreeId.PUBLIC_DATA_TREE:
133
- return (await this.aztecNode.getPublicDataWitness(blockNumber, value))?.withoutPreimage().toFields();
134
- case MerkleTreeId.ARCHIVE:
135
- return (await this.aztecNode.getArchiveMembershipWitness(blockNumber, value))?.toFields();
136
- default:
137
- throw new Error('Not implemented');
138
- }
139
- }
140
- async getNullifierMembershipWitnessAtLatestBlock(nullifier) {
141
- const blockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
142
- return this.getNullifierMembershipWitness(blockNumber, nullifier);
143
- }
144
- getNullifierMembershipWitness(blockNumber, nullifier) {
145
- return this.aztecNode.getNullifierMembershipWitness(blockNumber, nullifier);
146
- }
147
- async getLowNullifierMembershipWitness(blockNumber, nullifier) {
148
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
149
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
150
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
151
- }
152
- return this.aztecNode.getLowNullifierMembershipWitness(blockNumber, nullifier);
153
- }
154
- async getBlock(blockNumber) {
155
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
156
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
157
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
158
- }
159
- return await this.aztecNode.getBlock(blockNumber);
160
- }
161
- async getPublicDataWitness(blockNumber, leafSlot) {
162
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
163
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
164
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
165
- }
166
- return await this.aztecNode.getPublicDataWitness(blockNumber, leafSlot);
167
- }
168
- async getPublicStorageAt(blockNumber, contract, slot) {
169
- const anchorBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
170
- if (blockNumber !== 'latest' && blockNumber > anchorBlockNumber) {
171
- throw new Error(`Block number ${blockNumber} is higher than current block ${anchorBlockNumber}`);
172
- }
173
- return await this.aztecNode.getPublicStorageAt(blockNumber, contract, slot);
174
- }
175
- assertCompatibleOracleVersion(version) {
176
- if (version !== ORACLE_VERSION) {
177
- throw new Error(`Incompatible oracle version. Expected version ${ORACLE_VERSION}, got ${version}.`);
178
- }
179
- }
180
- getDebugFunctionName(contractAddress, selector) {
181
- return this.contractDataProvider.getDebugFunctionName(contractAddress, selector);
182
- }
183
- /**
184
- * Returns the full contents of your address book.
185
- * This is used when calculating tags for incoming notes by deriving the shared secret, the contract-siloed tagging secret, and
186
- * finally the index specified tag. We will then query the node with this tag for each address in the address book.
187
- * @returns The full list of the users contact addresses.
188
- */ getSenders() {
189
- return this.recipientTaggingDataProvider.getSenderAddresses();
190
- }
191
- async calculateDirectionalAppTaggingSecret(contractAddress, sender, recipient) {
192
- const senderCompleteAddress = await this.getCompleteAddress(sender);
193
- const senderIvsk = await this.keyStore.getMasterIncomingViewingSecretKey(sender);
194
- return DirectionalAppTaggingSecret.compute(senderCompleteAddress, senderIvsk, recipient, contractAddress, recipient);
195
- }
196
- /**
197
- * Returns the last used tagging indexes along with the directional app tagging secrets for a given recipient and all
198
- * the senders in the address book.
199
- * This method should be exposed as an oracle call to allow aztec.nr to perform the orchestration
200
- * of the syncTaggedLogs and processTaggedLogs methods. However, it is not possible to do so at the moment,
201
- * so we're keeping it private for now.
202
- * @param contractAddress - The contract address to silo the secret for
203
- * @param recipient - The address receiving the notes
204
- * @returns A list of directional app tagging secrets along with the last used tagging indexes. If the corresponding
205
- * secret was never used, the index is undefined.
206
- * TODO(#17775): The naming here is broken as the function name does not reflect the return type. Make sure this gets
207
- * fixed when implementing the linked issue.
208
- */ async #getLastUsedTaggingIndexesForSenders(contractAddress, recipient) {
209
- const recipientCompleteAddress = await this.getCompleteAddress(recipient);
210
- const recipientIvsk = await this.keyStore.getMasterIncomingViewingSecretKey(recipient);
211
- // We implicitly add all PXE accounts as senders, this helps us decrypt tags on notes that we send to ourselves
212
- // (recipient = us, sender = us)
213
- const senders = [
214
- ...await this.recipientTaggingDataProvider.getSenderAddresses(),
215
- ...await this.keyStore.getAccounts()
216
- ].filter((address, index, self)=>index === self.findIndex((otherAddress)=>otherAddress.equals(address)));
217
- const secrets = await Promise.all(senders.map((contact)=>{
218
- return DirectionalAppTaggingSecret.compute(recipientCompleteAddress, recipientIvsk, contact, contractAddress, recipient);
219
- }));
220
- const indexes = await this.recipientTaggingDataProvider.getLastUsedIndexes(secrets);
221
- if (indexes.length !== secrets.length) {
222
- throw new Error('Indexes and directional app tagging secrets have different lengths');
223
- }
224
- return secrets.map((secret, i)=>({
225
- secret,
226
- index: indexes[i]
227
- }));
228
- }
229
- // TODO(#17775): Replace this implementation of this function with one implementing an approach similar
230
- // to syncSenderTaggingIndexes. Not done yet due to re-prioritization to devex and this doesn't directly affect
231
- // devex.
232
- async syncTaggedLogs(contractAddress, pendingTaggedLogArrayBaseSlot, scopes) {
233
- this.log.verbose('Searching for tagged logs', {
234
- contract: contractAddress
235
- });
236
- const maxBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
237
- // Ideally this algorithm would be implemented in noir, exposing its building blocks as oracles.
238
- // However it is impossible at the moment due to the language not supporting nested slices.
239
- // This nesting is necessary because for a given set of tags we don't
240
- // know how many logs we will get back. Furthermore, these logs are of undetermined
241
- // length, since we don't really know the note they correspond to until we decrypt them.
242
- const recipients = scopes ? scopes : await this.keyStore.getAccounts();
243
- const contractName = await this.contractDataProvider.getDebugContractName(contractAddress);
244
- for (const recipient of recipients){
245
- // Get all the secrets for the recipient and sender pairs (#9365)
246
- const indexedSecrets = await this.#getLastUsedTaggingIndexesForSenders(contractAddress, recipient);
247
- // We fetch logs for a window of indexes in a range:
248
- // <latest_log_index - WINDOW_HALF_SIZE, latest_log_index + WINDOW_HALF_SIZE>.
249
- //
250
- // We use this window approach because it could happen that a sender might have messed up and inadvertently
251
- // incremented their index without us getting any logs (for example, in case of a revert). If we stopped looking
252
- // for logs the first time we don't receive any logs for a tag, we might never receive anything from that sender again.
253
- // Also there's a possibility that we have advanced our index, but the sender has reused it, so we might have missed
254
- // some logs. For these reasons, we have to look both back and ahead of the stored index.
255
- let secretsAndWindows = indexedSecrets.map((indexedSecret)=>{
256
- if (indexedSecret.index === undefined) {
257
- return {
258
- secret: indexedSecret.secret,
259
- leftMostIndex: 0,
260
- rightMostIndex: WINDOW_HALF_SIZE
261
- };
262
- } else {
263
- return {
264
- secret: indexedSecret.secret,
265
- leftMostIndex: Math.max(0, indexedSecret.index - WINDOW_HALF_SIZE),
266
- rightMostIndex: indexedSecret.index + WINDOW_HALF_SIZE
267
- };
268
- }
269
- });
270
- // As we iterate we store the largest index we have seen for a given secret to later on store it in the db.
271
- const newLargestIndexMapToStore = {};
272
- // The initial/unmodified indexes of the secrets stored in a key-value map where key is the directional app
273
- // tagging secret.
274
- const initialIndexesMap = getInitialIndexesMap(indexedSecrets);
275
- while(secretsAndWindows.length > 0){
276
- const preTagsForTheWholeWindow = getPreTagsForTheWindow(secretsAndWindows);
277
- const tagsForTheWholeWindow = await Promise.all(preTagsForTheWholeWindow.map(async (preTag)=>{
278
- return SiloedTag.compute(await Tag.compute(preTag), contractAddress);
279
- }));
280
- // We store the new largest indexes we find in the iteration in the following map to later on construct
281
- // a new set of secrets and windows to fetch logs for.
282
- const newLargestIndexMapForIteration = {};
283
- // Fetch the private logs for the tags and iterate over them
284
- // TODO: The following conversion is unfortunate and we should most likely just type the #getPrivateLogsByTags
285
- // to accept SiloedTag[] instead of Fr[]. That would result in a large change so I didn't do it yet.
286
- const tagsForTheWholeWindowAsFr = tagsForTheWholeWindow.map((tag)=>tag.value);
287
- const logsByTags = await this.#getPrivateLogsByTags(tagsForTheWholeWindowAsFr);
288
- this.log.debug(`Found ${logsByTags.filter((logs)=>logs.length > 0).length} logs as recipient ${recipient}`, {
289
- recipient,
290
- contractName,
291
- contractAddress
292
- });
293
- for(let logIndex = 0; logIndex < logsByTags.length; logIndex++){
294
- const logsByTag = logsByTags[logIndex];
295
- if (logsByTag.length > 0) {
296
- // We filter out the logs that are newer than the anchor block number of the tx currently being constructed
297
- const filteredLogsByBlockNumber = logsByTag.filter((l)=>l.blockNumber <= maxBlockNumber);
298
- // We store the logs in capsules (to later be obtained in Noir)
299
- await this.#storePendingTaggedLogs(contractAddress, pendingTaggedLogArrayBaseSlot, recipient, filteredLogsByBlockNumber);
300
- // We retrieve the pre-tag corresponding to the log as I need that to evaluate whether
301
- // a new largest index have been found.
302
- const preTagCorrespondingToLog = preTagsForTheWholeWindow[logIndex];
303
- const initialIndex = initialIndexesMap[preTagCorrespondingToLog.secret.toString()];
304
- if (preTagCorrespondingToLog.index >= initialIndex && (newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()] === undefined || preTagCorrespondingToLog.index >= newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()])) {
305
- // We have found a new largest index so we store it for later processing (storing it in the db + fetching
306
- // the difference of the window sets of current and the next iteration)
307
- newLargestIndexMapForIteration[preTagCorrespondingToLog.secret.toString()] = preTagCorrespondingToLog.index + 1;
308
- this.log.debug(`Incrementing index to ${preTagCorrespondingToLog.index + 1} at contract ${contractName}(${contractAddress})`);
309
- }
310
- }
311
- }
312
- // Now based on the new largest indexes we found, we will construct a new secrets and windows set to fetch logs
313
- // for. Note that it's very unlikely that a new log from the current window would appear between the iterations
314
- // so we fetch the logs only for the difference of the window sets.
315
- const newSecretsAndWindows = [];
316
- for (const [directionalAppTaggingSecret, newIndex] of Object.entries(newLargestIndexMapForIteration)){
317
- const maybeIndexedSecret = indexedSecrets.find((indexedSecret)=>indexedSecret.secret.toString() === directionalAppTaggingSecret);
318
- if (maybeIndexedSecret) {
319
- newSecretsAndWindows.push({
320
- secret: maybeIndexedSecret.secret,
321
- // We set the left most index to the new index to avoid fetching the same logs again
322
- leftMostIndex: newIndex,
323
- rightMostIndex: newIndex + WINDOW_HALF_SIZE
324
- });
325
- // We store the new largest index in the map to later store it in the db.
326
- newLargestIndexMapToStore[directionalAppTaggingSecret] = newIndex;
327
- } else {
328
- throw new Error(`Secret not found for directionalAppTaggingSecret ${directionalAppTaggingSecret}. This is a bug as it should never happen!`);
329
- }
330
- }
331
- // Now we set the new secrets and windows and proceed to the next iteration.
332
- secretsAndWindows = newSecretsAndWindows;
333
- }
334
- // At this point we have processed all the logs for the recipient so we store the last used indexes in the db.
335
- // newLargestIndexMapToStore contains "next" indexes to look for (one past the last found), so subtract 1 to get
336
- // last used.
337
- await this.recipientTaggingDataProvider.setLastUsedIndexes(Object.entries(newLargestIndexMapToStore).map(([directionalAppTaggingSecret, index])=>({
338
- secret: DirectionalAppTaggingSecret.fromString(directionalAppTaggingSecret),
339
- index: index - 1
340
- })));
341
- }
342
- }
343
- async #storePendingTaggedLogs(contractAddress, capsuleArrayBaseSlot, recipient, privateLogs) {
344
- // Build all pending tagged logs upfront with their tx effects
345
- const pendingTaggedLogs = await Promise.all(privateLogs.map(async (scopedLog)=>{
346
- // TODO(#9789): get these effects along with the log
347
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
348
- if (!txEffect) {
349
- throw new Error(`Could not find tx effect for tx hash ${scopedLog.txHash}`);
350
- }
351
- const pendingTaggedLog = new PendingTaggedLog(scopedLog.log.fields, scopedLog.txHash, txEffect.data.noteHashes, txEffect.data.nullifiers[0], recipient);
352
- return pendingTaggedLog.toFields();
353
- }));
354
- return this.capsuleDataProvider.appendToCapsuleArray(contractAddress, capsuleArrayBaseSlot, pendingTaggedLogs);
355
- }
356
- async validateEnqueuedNotesAndEvents(contractAddress, noteValidationRequestsArrayBaseSlot, eventValidationRequestsArrayBaseSlot) {
357
- // We read all note and event validation requests and process them all concurrently. This makes the process much
358
- // faster as we don't need to wait for the network round-trip.
359
- const noteValidationRequests = (await this.capsuleDataProvider.readCapsuleArray(contractAddress, noteValidationRequestsArrayBaseSlot)).map(NoteValidationRequest.fromFields);
360
- const eventValidationRequests = (await this.capsuleDataProvider.readCapsuleArray(contractAddress, eventValidationRequestsArrayBaseSlot)).map(EventValidationRequest.fromFields);
361
- const noteDeliveries = noteValidationRequests.map((request)=>this.deliverNote(request.contractAddress, request.owner, request.storageSlot, request.randomness, request.noteNonce, request.content, request.noteHash, request.nullifier, request.txHash, request.recipient));
362
- const eventDeliveries = eventValidationRequests.map((request)=>this.deliverEvent(request.contractAddress, request.eventTypeId, request.serializedEvent, request.eventCommitment, request.txHash, request.recipient));
363
- await Promise.all([
364
- ...noteDeliveries,
365
- ...eventDeliveries
366
- ]);
367
- // Requests are cleared once we're done.
368
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, noteValidationRequestsArrayBaseSlot, []);
369
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, eventValidationRequestsArrayBaseSlot, []);
370
- }
371
- async deliverNote(contractAddress, owner, storageSlot, randomness, noteNonce, content, noteHash, nullifier, txHash, recipient) {
372
- // We are going to store the new note in the NoteDataProvider, which will let us later return it via `getNotes`.
373
- // There's two things we need to check before we do this however:
374
- // - we must make sure the note does actually exist in the note hash tree
375
- // - we need to check if the note has already been nullified
376
- //
377
- // Failing to do either of the above would result in circuits getting either non-existent notes and failing to
378
- // produce inclusion proofs for them, or getting nullified notes and producing duplicate nullifiers, both of which
379
- // are catastrophic failure modes.
380
- //
381
- // Note that adding a note and removing it is *not* equivalent to never adding it in the first place. A nullifier
382
- // emitted in a block that comes after note creation might result in the note being de-nullified by a chain reorg,
383
- // so we must store both the note hash and nullifier block information.
384
- // We avoid making node queries at 'latest' since we don't want to process notes or nullifiers that only exist ahead
385
- // in time of the locally synced state.
386
- // Note that while this technically results in historical queries, we perform it at the latest locally synced block
387
- // number which *should* be recent enough to be available, even for non-archive nodes.
388
- // Also note that the note should never be ahead of the synced block here since `fetchTaggedLogs` only processes
389
- // logs up to the synced block making this only an additional safety check.
390
- const syncedBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
391
- // By computing siloed and unique note hashes ourselves we prevent contracts from interfering with the note storage
392
- // of other contracts, which would constitute a security breach.
393
- const uniqueNoteHash = await computeUniqueNoteHash(noteNonce, await siloNoteHash(contractAddress, noteHash));
394
- const siloedNullifier = await siloNullifier(contractAddress, nullifier);
395
- const txEffect = await this.aztecNode.getTxEffect(txHash);
396
- if (!txEffect) {
397
- throw new Error(`Could not find tx effect for tx hash ${txHash}`);
398
- }
399
- if (txEffect.l2BlockNumber > syncedBlockNumber) {
400
- throw new Error(`Could not find tx effect for tx hash ${txHash} as of block number ${syncedBlockNumber}`);
401
- }
402
- const noteInTx = txEffect.data.noteHashes.some((nh)=>nh.equals(uniqueNoteHash));
403
- if (!noteInTx) {
404
- throw new Error(`Note hash ${noteHash} (uniqued as ${uniqueNoteHash}) is not present in tx ${txHash}`);
405
- }
406
- // We store notes by their index in the global note hash tree, which has the convenient side effect of validating
407
- // note existence in said tree. We concurrently also check if the note's nullifier exists, performing all node
408
- // queries in a single round-trip.
409
- const [[uniqueNoteHashTreeIndexInBlock], [nullifierIndex]] = await Promise.all([
410
- this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NOTE_HASH_TREE, [
411
- uniqueNoteHash
412
- ]),
413
- this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, [
414
- siloedNullifier
415
- ])
416
- ]);
417
- if (uniqueNoteHashTreeIndexInBlock === undefined) {
418
- throw new Error(`Note hash ${noteHash} (uniqued as ${uniqueNoteHash}) is not present on the tree at block ${syncedBlockNumber} (from tx ${txHash})`);
419
- }
420
- const noteDao = new NoteDao(new Note(content), contractAddress, owner, storageSlot, randomness, noteNonce, noteHash, siloedNullifier, txHash, uniqueNoteHashTreeIndexInBlock.l2BlockNumber, uniqueNoteHashTreeIndexInBlock.l2BlockHash.toString(), uniqueNoteHashTreeIndexInBlock.data);
421
- // The note was found by `recipient`, so we use that as the scope when storing the note.
422
- await this.noteDataProvider.addNotes([
423
- noteDao
424
- ], recipient);
425
- this.log.verbose('Added note', {
426
- index: noteDao.index,
427
- contract: noteDao.contractAddress.toString(),
428
- slot: noteDao.storageSlot.toString(),
429
- noteHash: noteDao.noteHash.toString(),
430
- nullifier: noteDao.siloedNullifier.toString()
431
- });
432
- if (nullifierIndex !== undefined) {
433
- const { data: _, ...blockHashAndNum } = nullifierIndex;
434
- await this.noteDataProvider.applyNullifiers([
435
- {
436
- data: siloedNullifier,
437
- ...blockHashAndNum
438
- }
439
- ]);
440
- this.log.verbose(`Removed just-added note`, {
441
- contract: contractAddress,
442
- slot: storageSlot,
443
- noteHash: noteHash,
444
- nullifier: siloedNullifier.toString()
445
- });
446
- }
447
- }
448
- async bulkRetrieveLogs(contractAddress, logRetrievalRequestsArrayBaseSlot, logRetrievalResponsesArrayBaseSlot) {
449
- // We read all log retrieval requests and process them all concurrently. This makes the process much faster as we
450
- // don't need to wait for the network round-trip.
451
- const logRetrievalRequests = (await this.capsuleDataProvider.readCapsuleArray(contractAddress, logRetrievalRequestsArrayBaseSlot)).map(LogRetrievalRequest.fromFields);
452
- const maybeLogRetrievalResponses = await Promise.all(logRetrievalRequests.map(async (request)=>{
453
- // TODO(#14555): remove these internal functions and have node endpoints that do this instead
454
- const [publicLog, privateLog] = await Promise.all([
455
- this.getPublicLogByTag(request.unsiloedTag, request.contractAddress),
456
- this.getPrivateLogByTag(await siloPrivateLog(request.contractAddress, request.unsiloedTag))
457
- ]);
458
- if (publicLog !== null) {
459
- if (privateLog !== null) {
460
- throw new Error(`Found both a public and private log when searching for tag ${request.unsiloedTag} from contract ${request.contractAddress}`);
461
- }
462
- return new LogRetrievalResponse(publicLog.logPayload, publicLog.txHash, publicLog.uniqueNoteHashesInTx, publicLog.firstNullifierInTx);
463
- } else if (privateLog !== null) {
464
- return new LogRetrievalResponse(privateLog.logPayload, privateLog.txHash, privateLog.uniqueNoteHashesInTx, privateLog.firstNullifierInTx);
465
- } else {
466
- return null;
467
- }
468
- }));
469
- // Requests are cleared once we're done.
470
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, logRetrievalRequestsArrayBaseSlot, []);
471
- // The responses are stored as Option<LogRetrievalResponse> in a second CapsuleArray.
472
- await this.capsuleDataProvider.setCapsuleArray(contractAddress, logRetrievalResponsesArrayBaseSlot, maybeLogRetrievalResponses.map(LogRetrievalResponse.toSerializedOption));
473
- }
474
- async deliverEvent(contractAddress, selector, content, eventCommitment, txHash, scope) {
475
- // While using 'latest' block number would be fine for private events since they cannot be accessed from Aztec.nr
476
- // (and thus we're less concerned about being ahead of the synced block), we use the synced block number to
477
- // maintain consistent behavior in the PXE. Additionally, events should never be ahead of the synced block here
478
- // since `fetchTaggedLogs` only processes logs up to the synced block.
479
- const [syncedBlockHeader, siloedEventCommitment, txEffect] = await Promise.all([
480
- this.anchorBlockDataProvider.getBlockHeader(),
481
- siloNullifier(contractAddress, eventCommitment),
482
- this.aztecNode.getTxEffect(txHash)
483
- ]);
484
- const syncedBlockNumber = syncedBlockHeader.getBlockNumber();
485
- if (!txEffect) {
486
- throw new Error(`Could not find tx effect for tx hash ${txHash}`);
487
- }
488
- if (txEffect.l2BlockNumber > syncedBlockNumber) {
489
- throw new Error(`Could not find tx effect for tx hash ${txHash} as of block number ${syncedBlockNumber}`);
490
- }
491
- const eventInTx = txEffect.data.nullifiers.some((n)=>n.equals(siloedEventCommitment));
492
- if (!eventInTx) {
493
- throw new Error(`Event commitment ${eventCommitment} (siloed as ${siloedEventCommitment}) is not present in tx ${txHash}`);
494
- }
495
- const [nullifierIndex] = await this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, [
496
- siloedEventCommitment
497
- ]);
498
- if (nullifierIndex === undefined) {
499
- throw new Error(`Event commitment ${eventCommitment} (siloed as ${siloedEventCommitment}) is not present on the nullifier tree at block ${syncedBlockNumber} (from tx ${txHash})`);
500
- }
501
- return this.privateEventDataProvider.storePrivateEventLog(selector, content, Number(nullifierIndex.data), {
502
- contractAddress,
503
- scope,
504
- txHash,
505
- l2BlockNumber: nullifierIndex.l2BlockNumber,
506
- l2BlockHash: nullifierIndex.l2BlockHash
507
- });
508
- }
509
- // TODO(#14555): delete this function and implement this behavior in the node instead
510
- async getPublicLogByTag(tag, contractAddress) {
511
- const logs = await this.#getPublicLogsByTagsFromContract([
512
- tag
513
- ], contractAddress);
514
- const logsForTag = logs[0];
515
- this.log.debug(`Got ${logsForTag.length} public logs for tag ${tag}`);
516
- if (logsForTag.length == 0) {
517
- return null;
518
- } else if (logsForTag.length > 1) {
519
- // TODO(#11627): handle this case
520
- throw new Error(`Got ${logsForTag.length} logs for tag ${tag} and contract ${contractAddress.toString()}. getPublicLogByTag currently only supports a single log per tag`);
521
- }
522
- const scopedLog = logsForTag[0];
523
- // getLogsByTag doesn't have all of the information that we need (notably note hashes and the first nullifier), so
524
- // we need to make a second call to the node for `getTxEffect`.
525
- // TODO(#9789): bundle this information in the `getLogsByTag` call.
526
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
527
- if (txEffect == undefined) {
528
- throw new Error(`Unexpected: failed to retrieve tx effects for tx ${scopedLog.txHash} which is known to exist`);
529
- }
530
- return new PublicLogWithTxData(scopedLog.log.getEmittedFieldsWithoutTag(), scopedLog.txHash, txEffect.data.noteHashes, txEffect.data.nullifiers[0]);
531
- }
532
- // TODO(#14555): delete this function and implement this behavior in the node instead
533
- async getPrivateLogByTag(siloedTag) {
534
- const logs = await this.#getPrivateLogsByTags([
535
- siloedTag
536
- ]);
537
- const logsForTag = logs[0];
538
- this.log.debug(`Got ${logsForTag.length} private logs for tag ${siloedTag}`);
539
- if (logsForTag.length == 0) {
540
- return null;
541
- } else if (logsForTag.length > 1) {
542
- // TODO(#11627): handle this case
543
- throw new Error(`Got ${logsForTag.length} logs for tag ${siloedTag}. getPrivateLogByTag currently only supports a single log per tag`);
544
- }
545
- const scopedLog = logsForTag[0];
546
- // getLogsByTag doesn't have all of the information that we need (notably note hashes and the first nullifier), so
547
- // we need to make a second call to the node for `getTxEffect`.
548
- // TODO(#9789): bundle this information in the `getLogsByTag` call.
549
- const txEffect = await this.aztecNode.getTxEffect(scopedLog.txHash);
550
- if (txEffect == undefined) {
551
- throw new Error(`Unexpected: failed to retrieve tx effects for tx ${scopedLog.txHash} which is known to exist`);
552
- }
553
- return new PrivateLogWithTxData(scopedLog.log.getEmittedFieldsWithoutTag(), scopedLog.txHash, txEffect.data.noteHashes, txEffect.data.nullifiers[0]);
554
- }
555
- /**
556
- * Looks for nullifiers of active contract notes and marks them as nullified if a nullifier is found.
557
- *
558
- * Fetches notes from the NoteDataProvider and checks which nullifiers are present in the
559
- * onchain nullifier Merkle tree - up to the latest locally synced block. We use the
560
- * locally synced block instead of querying the chain's 'latest' block to ensure correctness:
561
- * notes are only marked nullified once their corresponding nullifier has been included in a
562
- * block up to which the PXE has synced.
563
- * This allows recent nullifications to be processed even if the node is not an archive node.
564
- *
565
- * @param contractAddress - The contract whose notes should be checked and nullified.
566
- */ async syncNoteNullifiers(contractAddress) {
567
- this.log.verbose('Searching for nullifiers of known notes', {
568
- contract: contractAddress
569
- });
570
- const syncedBlockNumber = (await this.anchorBlockDataProvider.getBlockHeader()).getBlockNumber();
571
- const contractNotes = await this.noteDataProvider.getNotes({
572
- contractAddress
573
- });
574
- if (contractNotes.length === 0) {
575
- return;
576
- }
577
- const nullifiersToCheck = contractNotes.map((note)=>note.siloedNullifier);
578
- const nullifierBatches = nullifiersToCheck.reduce((acc, nullifier)=>{
579
- if (acc[acc.length - 1].length < MAX_RPC_LEN) {
580
- acc[acc.length - 1].push(nullifier);
581
- } else {
582
- acc.push([
583
- nullifier
584
- ]);
585
- }
586
- return acc;
587
- }, [
588
- []
589
- ]);
590
- const nullifierIndexes = (await Promise.all(nullifierBatches.map((batch)=>this.aztecNode.findLeavesIndexes(syncedBlockNumber, MerkleTreeId.NULLIFIER_TREE, batch)))).flat();
591
- const foundNullifiers = nullifiersToCheck.map((nullifier, i)=>{
592
- if (nullifierIndexes[i] !== undefined) {
593
- return {
594
- ...nullifierIndexes[i],
595
- ...{
596
- data: nullifier
597
- }
598
- };
599
- }
600
- }).filter((nullifier)=>nullifier !== undefined);
601
- const nullifiedNotes = await this.noteDataProvider.applyNullifiers(foundNullifiers);
602
- nullifiedNotes.forEach((noteDao)=>{
603
- this.log.verbose(`Removed note for contract ${noteDao.contractAddress} at slot ${noteDao.storageSlot}`, {
604
- contract: noteDao.contractAddress,
605
- slot: noteDao.storageSlot,
606
- nullifier: noteDao.siloedNullifier.toString()
607
- });
608
- });
609
- }
610
- storeCapsule(contractAddress, slot, capsule) {
611
- return this.capsuleDataProvider.storeCapsule(contractAddress, slot, capsule);
612
- }
613
- loadCapsule(contractAddress, slot) {
614
- return this.capsuleDataProvider.loadCapsule(contractAddress, slot);
615
- }
616
- deleteCapsule(contractAddress, slot) {
617
- return this.capsuleDataProvider.deleteCapsule(contractAddress, slot);
618
- }
619
- copyCapsule(contractAddress, srcSlot, dstSlot, numEntries) {
620
- return this.capsuleDataProvider.copyCapsule(contractAddress, srcSlot, dstSlot, numEntries);
621
- }
622
- async getSharedSecret(address, ephPk) {
623
- // TODO(#12656): return an app-siloed secret
624
- const recipientCompleteAddress = await this.getCompleteAddress(address);
625
- const ivskM = await this.keyStore.getMasterSecretKey(recipientCompleteAddress.publicKeys.masterIncomingViewingPublicKey);
626
- const addressSecret = await computeAddressSecret(await recipientCompleteAddress.getPreaddress(), ivskM);
627
- return deriveEcdhSharedSecret(addressSecret, ephPk);
628
- }
629
- // TODO(#12656): Make this a public function on the AztecNode interface and remove the original getLogsByTags. This
630
- // was not done yet as we were unsure about the API and we didn't want to introduce a breaking change.
631
- async #getPrivateLogsByTags(tags) {
632
- const allLogs = await this.aztecNode.getLogsByTags(tags);
633
- return allLogs.map((logs)=>logs.filter((log)=>!log.isFromPublic));
634
- }
635
- // TODO(#12656): Make this a public function on the AztecNode interface and remove the original getLogsByTags. This
636
- // was not done yet as we were unsure about the API and we didn't want to introduce a breaking change.
637
- async #getPublicLogsByTagsFromContract(tags, contractAddress) {
638
- const allLogs = await this.aztecNode.getLogsByTags(tags);
639
- const allPublicLogs = allLogs.map((logs)=>logs.filter((log)=>log.isFromPublic));
640
- return allPublicLogs.map((logs)=>logs.filter((log)=>log.log.contractAddress.equals(contractAddress)));
641
- }
642
- getStats() {
643
- const nodeRPCCalls = typeof this.aztecNode.getStats === 'function' ? this.aztecNode.getStats() : {};
644
- return {
645
- nodeRPCCalls
646
- };
647
- }
648
- }