@aztec/pxe 4.0.0-nightly.20260112 → 4.0.0-nightly.20260114
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/block_synchronizer/block_synchronizer.d.ts +4 -2
- package/dest/block_synchronizer/block_synchronizer.d.ts.map +1 -1
- package/dest/block_synchronizer/block_synchronizer.js +19 -13
- package/dest/contract_function_simulator/oracle/interfaces.d.ts +3 -3
- package/dest/contract_function_simulator/oracle/interfaces.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/note_packing_utils.d.ts +4 -4
- package/dest/contract_function_simulator/oracle/note_packing_utils.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/note_packing_utils.js +5 -5
- package/dest/contract_function_simulator/oracle/oracle.js +1 -1
- package/dest/contract_function_simulator/oracle/private_execution.d.ts +1 -1
- package/dest/contract_function_simulator/oracle/private_execution.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/private_execution.js +1 -2
- package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts +1 -16
- package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/private_execution_oracle.js +4 -31
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts +1 -1
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.js +15 -13
- package/dest/events/event_service.d.ts +1 -1
- package/dest/events/event_service.d.ts.map +1 -1
- package/dest/events/event_service.js +8 -12
- package/dest/logs/log_service.d.ts +3 -2
- package/dest/logs/log_service.d.ts.map +1 -1
- package/dest/logs/log_service.js +4 -2
- package/dest/notes/note_service.d.ts +2 -2
- package/dest/notes/note_service.d.ts.map +1 -1
- package/dest/notes/note_service.js +14 -22
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.d.ts +2 -2
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.d.ts.map +1 -1
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.js +2 -2
- package/dest/private_kernel/private_kernel_execution_prover.d.ts +1 -1
- package/dest/private_kernel/private_kernel_execution_prover.d.ts.map +1 -1
- package/dest/private_kernel/private_kernel_execution_prover.js +3 -4
- package/dest/private_kernel/private_kernel_oracle.d.ts +24 -28
- package/dest/private_kernel/private_kernel_oracle.d.ts.map +1 -1
- package/dest/private_kernel/private_kernel_oracle.js +92 -2
- package/dest/pxe.d.ts +7 -36
- package/dest/pxe.d.ts.map +1 -1
- package/dest/pxe.js +12 -59
- package/dest/storage/capsule_store/capsule_store.d.ts +24 -9
- package/dest/storage/capsule_store/capsule_store.d.ts.map +1 -1
- package/dest/storage/capsule_store/capsule_store.js +132 -23
- package/dest/storage/note_store/note_store.d.ts +6 -5
- package/dest/storage/note_store/note_store.d.ts.map +1 -1
- package/dest/storage/note_store/note_store.js +89 -94
- package/dest/storage/private_event_store/private_event_store.d.ts +13 -6
- package/dest/storage/private_event_store/private_event_store.d.ts.map +1 -1
- package/dest/storage/private_event_store/private_event_store.js +70 -56
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts +1 -1
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts.map +1 -1
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.js +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.d.ts +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.d.ts.map +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.js +2 -2
- package/package.json +16 -16
- package/src/block_synchronizer/block_synchronizer.ts +23 -12
- package/src/contract_function_simulator/oracle/interfaces.ts +2 -2
- package/src/contract_function_simulator/oracle/note_packing_utils.ts +6 -6
- package/src/contract_function_simulator/oracle/oracle.ts +1 -1
- package/src/contract_function_simulator/oracle/private_execution.ts +0 -2
- package/src/contract_function_simulator/oracle/private_execution_oracle.ts +2 -36
- package/src/contract_function_simulator/oracle/utility_execution_oracle.ts +15 -10
- package/src/events/event_service.ts +12 -26
- package/src/logs/log_service.ts +2 -1
- package/src/notes/note_service.ts +14 -23
- package/src/private_kernel/hints/build_private_kernel_reset_private_inputs.ts +1 -2
- package/src/private_kernel/private_kernel_execution_prover.ts +2 -4
- package/src/private_kernel/private_kernel_oracle.ts +119 -36
- package/src/pxe.ts +10 -81
- package/src/storage/capsule_store/capsule_store.ts +159 -23
- package/src/storage/note_store/note_store.ts +98 -95
- package/src/storage/private_event_store/private_event_store.ts +92 -65
- package/src/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.ts +4 -1
- package/src/tagging/sender_sync/utils/get_status_change_of_pending.ts +6 -2
- package/dest/private_kernel/private_kernel_oracle_impl.d.ts +0 -46
- package/dest/private_kernel/private_kernel_oracle_impl.d.ts.map +0 -1
- package/dest/private_kernel/private_kernel_oracle_impl.js +0 -86
- package/src/private_kernel/private_kernel_oracle_impl.ts +0 -133
|
@@ -3,12 +3,21 @@ import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
|
3
3
|
import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store';
|
|
4
4
|
import type { AztecAddress } from '@aztec/stdlib/aztec-address';
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import type { StagedStore } from '../../job_coordinator/job_coordinator.js';
|
|
7
|
+
|
|
8
|
+
export class CapsuleStore implements StagedStore {
|
|
9
|
+
readonly storeName = 'capsule';
|
|
10
|
+
|
|
7
11
|
#store: AztecAsyncKVStore;
|
|
8
12
|
|
|
9
13
|
// Arbitrary data stored by contracts. Key is computed as `${contractAddress}:${key}`
|
|
10
14
|
#capsules: AztecAsyncMap<string, Buffer>;
|
|
11
15
|
|
|
16
|
+
// jobId => `${contractAddress}:${key}` => capsule data
|
|
17
|
+
// when `#stagedCapsules.get('some-job-id').get('${some-contract-address:some-key') === null`,
|
|
18
|
+
// it signals that the capsule was deleted during the job, so it needs to be deleted on commit
|
|
19
|
+
#stagedCapsules: Map<string, Map<string, Buffer | null>>;
|
|
20
|
+
|
|
12
21
|
logger: Logger;
|
|
13
22
|
|
|
14
23
|
constructor(store: AztecAsyncKVStore) {
|
|
@@ -16,21 +25,120 @@ export class CapsuleStore {
|
|
|
16
25
|
|
|
17
26
|
this.#capsules = this.#store.openMap('capsules');
|
|
18
27
|
|
|
28
|
+
this.#stagedCapsules = new Map();
|
|
29
|
+
|
|
19
30
|
this.logger = createLogger('pxe:capsule-data-provider');
|
|
20
31
|
}
|
|
21
32
|
|
|
33
|
+
/**
|
|
34
|
+
* Given a job denoted by `jobId`, it returns the
|
|
35
|
+
* capsules that said job has interacted with.
|
|
36
|
+
*
|
|
37
|
+
* Capsules that haven't been committed to persistence KV storage
|
|
38
|
+
* are kept in-memory in `#stagedCapsules`, this method provides a convenient
|
|
39
|
+
* way to access that in-memory collection of data.
|
|
40
|
+
*
|
|
41
|
+
* @param jobId
|
|
42
|
+
* @returns
|
|
43
|
+
*/
|
|
44
|
+
#getJobStagedCapsules(jobId: string): Map<string, Buffer | null> {
|
|
45
|
+
let jobStagedCapsules = this.#stagedCapsules.get(jobId);
|
|
46
|
+
if (!jobStagedCapsules) {
|
|
47
|
+
jobStagedCapsules = new Map();
|
|
48
|
+
this.#stagedCapsules.set(jobId, jobStagedCapsules);
|
|
49
|
+
}
|
|
50
|
+
return jobStagedCapsules;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Reads a capsule's slot from the staged version of the data associated to the given jobId.
|
|
55
|
+
*
|
|
56
|
+
* If it is not there, it reads it from the KV store.
|
|
57
|
+
*/
|
|
58
|
+
async #getFromStage(jobId: string, dbSlotKey: string): Promise<Buffer | null | undefined> {
|
|
59
|
+
const jobStagedCapsules = this.#getJobStagedCapsules(jobId);
|
|
60
|
+
let staged: Buffer | null | undefined = jobStagedCapsules.get(dbSlotKey);
|
|
61
|
+
// Note that if staged === null, we marked it for deletion, so we don't want to
|
|
62
|
+
// re-read it from DB
|
|
63
|
+
if (staged === undefined) {
|
|
64
|
+
// If we don't have a staged version of this dbSlotKey, first we check if there's one in DB
|
|
65
|
+
staged = await this.#loadCapsuleFromDb(dbSlotKey);
|
|
66
|
+
}
|
|
67
|
+
return staged;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Writes a capsule to the stage of a job.
|
|
72
|
+
*/
|
|
73
|
+
#setOnStage(jobId: string, dbSlotKey: string, capsuleData: Buffer) {
|
|
74
|
+
this.#getJobStagedCapsules(jobId).set(dbSlotKey, capsuleData);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Deletes a capsule on the stage of a job. Note the capsule will still
|
|
79
|
+
* exist in storage until the job is committed.
|
|
80
|
+
*/
|
|
81
|
+
#deleteOnStage(jobId: string, dbSlotKey: string) {
|
|
82
|
+
this.#getJobStagedCapsules(jobId).set(dbSlotKey, null);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async #loadCapsuleFromDb(dbSlotKey: string): Promise<Buffer | null> {
|
|
86
|
+
const dataBuffer = await this.#capsules.getAsync(dbSlotKey);
|
|
87
|
+
if (!dataBuffer) {
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return dataBuffer;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Commits staged data to main storage.
|
|
96
|
+
* Called by JobCoordinator when a job completes successfully.
|
|
97
|
+
* Note: JobCoordinator wraps all commits in a single transaction, so we don't
|
|
98
|
+
* need our own transactionAsync here (and using one would deadlock on IndexedDB).
|
|
99
|
+
* @param jobId - The jobId identifying which staged data to commit
|
|
100
|
+
*/
|
|
101
|
+
async commit(jobId: string): Promise<void> {
|
|
102
|
+
const jobStagedCapsules = this.#getJobStagedCapsules(jobId);
|
|
103
|
+
|
|
104
|
+
for (const [key, value] of jobStagedCapsules) {
|
|
105
|
+
// In the write stage, we represent deleted capsules with null
|
|
106
|
+
// (as opposed to undefined, which denotes there was never a capsule there to begin with).
|
|
107
|
+
// So we delete from actual KV store here.
|
|
108
|
+
if (value === null) {
|
|
109
|
+
await this.#capsules.delete(key);
|
|
110
|
+
} else {
|
|
111
|
+
await this.#capsules.set(key, value);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
this.#stagedCapsules.delete(jobId);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Discards staged data without committing.
|
|
120
|
+
*/
|
|
121
|
+
discardStaged(jobId: string): Promise<void> {
|
|
122
|
+
this.#stagedCapsules.delete(jobId);
|
|
123
|
+
return Promise.resolve();
|
|
124
|
+
}
|
|
125
|
+
|
|
22
126
|
/**
|
|
23
127
|
* Stores arbitrary information in a per-contract non-volatile database, which can later be retrieved with `loadCapsule`.
|
|
24
128
|
* * If data was already stored at this slot, it is overwritten.
|
|
25
129
|
* @param contractAddress - The contract address to scope the data under.
|
|
26
130
|
* @param slot - The slot in the database in which to store the value. Slots need not be contiguous.
|
|
27
131
|
* @param capsule - An array of field elements representing the capsule.
|
|
132
|
+
* @param jobId - The context in which this store will be visible until PXE decides to persist it to underlying KV store
|
|
28
133
|
* @remarks A capsule is a "blob" of data that is passed to the contract through an oracle. It works similarly
|
|
29
134
|
* to public contract storage in that it's indexed by the contract address and storage slot but instead of the global
|
|
30
135
|
* network state it's backed by local PXE db.
|
|
31
136
|
*/
|
|
32
|
-
|
|
33
|
-
|
|
137
|
+
storeCapsule(contractAddress: AztecAddress, slot: Fr, capsule: Fr[], jobId: string) {
|
|
138
|
+
const dbSlotKey = dbSlotToKey(contractAddress, slot);
|
|
139
|
+
|
|
140
|
+
// A store overrides any pre-existing data on the slot
|
|
141
|
+
this.#setOnStage(jobId, dbSlotKey, Buffer.concat(capsule.map(value => value.toBuffer())));
|
|
34
142
|
}
|
|
35
143
|
|
|
36
144
|
/**
|
|
@@ -39,8 +147,8 @@ export class CapsuleStore {
|
|
|
39
147
|
* @param slot - The slot in the database to read.
|
|
40
148
|
* @returns The stored data or `null` if no data is stored under the slot.
|
|
41
149
|
*/
|
|
42
|
-
async loadCapsule(contractAddress: AztecAddress, slot: Fr): Promise<Fr[] | null> {
|
|
43
|
-
const dataBuffer = await this.#
|
|
150
|
+
async loadCapsule(contractAddress: AztecAddress, slot: Fr, jobId: string): Promise<Fr[] | null> {
|
|
151
|
+
const dataBuffer = await this.#getFromStage(jobId, dbSlotToKey(contractAddress, slot));
|
|
44
152
|
if (!dataBuffer) {
|
|
45
153
|
this.logger.trace(`Data not found for contract ${contractAddress.toString()} and slot ${slot.toString()}`);
|
|
46
154
|
return null;
|
|
@@ -57,8 +165,9 @@ export class CapsuleStore {
|
|
|
57
165
|
* @param contractAddress - The contract address under which the data is scoped.
|
|
58
166
|
* @param slot - The slot in the database to delete.
|
|
59
167
|
*/
|
|
60
|
-
|
|
61
|
-
|
|
168
|
+
deleteCapsule(contractAddress: AztecAddress, slot: Fr, jobId: string) {
|
|
169
|
+
// When we commit this, we will interpret null as a deletion, so we'll propagate the delete to the KV store
|
|
170
|
+
this.#deleteOnStage(jobId, dbSlotToKey(contractAddress, slot));
|
|
62
171
|
}
|
|
63
172
|
|
|
64
173
|
/**
|
|
@@ -72,13 +181,22 @@ export class CapsuleStore {
|
|
|
72
181
|
* @param dstSlot - The first slot to copy to.
|
|
73
182
|
* @param numEntries - The number of entries to copy.
|
|
74
183
|
*/
|
|
75
|
-
copyCapsule(
|
|
184
|
+
copyCapsule(
|
|
185
|
+
contractAddress: AztecAddress,
|
|
186
|
+
srcSlot: Fr,
|
|
187
|
+
dstSlot: Fr,
|
|
188
|
+
numEntries: number,
|
|
189
|
+
jobId: string,
|
|
190
|
+
): Promise<void> {
|
|
191
|
+
// This transactional context gives us "copy atomicity":
|
|
192
|
+
// there shouldn't be concurrent writes to what's being copied here.
|
|
193
|
+
// Equally important: this in practice is expected to perform thousands of DB operations
|
|
194
|
+
// and not using a transaction here would heavily impact performance.
|
|
76
195
|
return this.#store.transactionAsync(async () => {
|
|
77
196
|
// In order to support overlapping source and destination regions, we need to check the relative positions of source
|
|
78
197
|
// and destination. If destination is ahead of source, then by the time we overwrite source elements using forward
|
|
79
198
|
// indexes we'll have already read those. On the contrary, if source is ahead of destination we need to use backward
|
|
80
199
|
// indexes to avoid reading elements that've been overwritten.
|
|
81
|
-
|
|
82
200
|
const indexes = Array.from(Array(numEntries).keys());
|
|
83
201
|
if (srcSlot.lt(dstSlot)) {
|
|
84
202
|
indexes.reverse();
|
|
@@ -88,12 +206,12 @@ export class CapsuleStore {
|
|
|
88
206
|
const currentSrcSlot = dbSlotToKey(contractAddress, srcSlot.add(new Fr(i)));
|
|
89
207
|
const currentDstSlot = dbSlotToKey(contractAddress, dstSlot.add(new Fr(i)));
|
|
90
208
|
|
|
91
|
-
const toCopy = await this.#
|
|
209
|
+
const toCopy = await this.#getFromStage(jobId, currentSrcSlot);
|
|
92
210
|
if (!toCopy) {
|
|
93
211
|
throw new Error(`Attempted to copy empty slot ${currentSrcSlot} for contract ${contractAddress.toString()}`);
|
|
94
212
|
}
|
|
95
213
|
|
|
96
|
-
|
|
214
|
+
this.#setOnStage(jobId, currentDstSlot, toCopy);
|
|
97
215
|
}
|
|
98
216
|
});
|
|
99
217
|
}
|
|
@@ -106,35 +224,45 @@ export class CapsuleStore {
|
|
|
106
224
|
* @param baseSlot - The slot where the array length is stored
|
|
107
225
|
* @param content - Array of capsule data to append
|
|
108
226
|
*/
|
|
109
|
-
appendToCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][]): Promise<void> {
|
|
227
|
+
appendToCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][], jobId: string): Promise<void> {
|
|
228
|
+
// We wrap this in a transaction to serialize concurrent calls from Promise.all.
|
|
229
|
+
// Without this, concurrent appends to the same array could race: both read length=0,
|
|
230
|
+
// both write at the same slots, one overwrites the other.
|
|
231
|
+
// Equally important: this in practice is expected to perform thousands of DB operations
|
|
232
|
+
// and not using a transaction here would heavily impact performance.
|
|
110
233
|
return this.#store.transactionAsync(async () => {
|
|
111
234
|
// Load current length, defaulting to 0 if not found
|
|
112
|
-
const lengthData = await this.loadCapsule(contractAddress, baseSlot);
|
|
235
|
+
const lengthData = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
113
236
|
const currentLength = lengthData ? lengthData[0].toNumber() : 0;
|
|
114
237
|
|
|
115
238
|
// Store each capsule at consecutive slots after baseSlot + 1 + currentLength
|
|
116
239
|
for (let i = 0; i < content.length; i++) {
|
|
117
240
|
const nextSlot = arraySlot(baseSlot, currentLength + i);
|
|
118
|
-
|
|
241
|
+
this.storeCapsule(contractAddress, nextSlot, content[i], jobId);
|
|
119
242
|
}
|
|
120
243
|
|
|
121
244
|
// Update length to include all new capsules
|
|
122
245
|
const newLength = currentLength + content.length;
|
|
123
|
-
|
|
246
|
+
this.storeCapsule(contractAddress, baseSlot, [new Fr(newLength)], jobId);
|
|
124
247
|
});
|
|
125
248
|
}
|
|
126
249
|
|
|
127
|
-
readCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr): Promise<Fr[][]> {
|
|
250
|
+
readCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, jobId: string): Promise<Fr[][]> {
|
|
251
|
+
// I'm leaving this transactional context here though because I'm assuming this
|
|
252
|
+
// gives us "read array atomicity": there shouldn't be concurrent writes to what's being copied
|
|
253
|
+
// here.
|
|
254
|
+
// This is one point we should revisit in the future if we want to relax the concurrency
|
|
255
|
+
// of jobs: different calls running concurrently on the same contract may cause trouble.
|
|
128
256
|
return this.#store.transactionAsync(async () => {
|
|
129
257
|
// Load length, defaulting to 0 if not found
|
|
130
|
-
const maybeLength = await this.loadCapsule(contractAddress, baseSlot);
|
|
258
|
+
const maybeLength = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
131
259
|
const length = maybeLength ? maybeLength[0].toBigInt() : 0n;
|
|
132
260
|
|
|
133
261
|
const values: Fr[][] = [];
|
|
134
262
|
|
|
135
263
|
// Read each capsule at consecutive slots after baseSlot
|
|
136
264
|
for (let i = 0; i < length; i++) {
|
|
137
|
-
const currentValue = await this.loadCapsule(contractAddress, arraySlot(baseSlot, i));
|
|
265
|
+
const currentValue = await this.loadCapsule(contractAddress, arraySlot(baseSlot, i), jobId);
|
|
138
266
|
if (currentValue == undefined) {
|
|
139
267
|
throw new Error(
|
|
140
268
|
`Expected non-empty value at capsule array in base slot ${baseSlot} at index ${i} for contract ${contractAddress}`,
|
|
@@ -148,23 +276,31 @@ export class CapsuleStore {
|
|
|
148
276
|
});
|
|
149
277
|
}
|
|
150
278
|
|
|
151
|
-
setCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][]) {
|
|
279
|
+
setCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][], jobId: string) {
|
|
280
|
+
// This transactional context in theory isn't so critical now because we aren't
|
|
281
|
+
// writing to DB so if there's exceptions midway and it blows up, no visible impact
|
|
282
|
+
// to persistent storage will happen.
|
|
283
|
+
// I'm leaving this transactional context here though because I'm assuming this
|
|
284
|
+
// gives us "write array atomicity": there shouldn't be concurrent writes to what's being copied
|
|
285
|
+
// here.
|
|
286
|
+
// This is one point we should revisit in the future if we want to relax the concurrency
|
|
287
|
+
// of jobs: different calls running concurrently on the same contract may cause trouble.
|
|
152
288
|
return this.#store.transactionAsync(async () => {
|
|
153
289
|
// Load current length, defaulting to 0 if not found
|
|
154
|
-
const maybeLength = await this.loadCapsule(contractAddress, baseSlot);
|
|
290
|
+
const maybeLength = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
155
291
|
const originalLength = maybeLength ? maybeLength[0].toNumber() : 0;
|
|
156
292
|
|
|
157
293
|
// Set the new length
|
|
158
|
-
|
|
294
|
+
this.storeCapsule(contractAddress, baseSlot, [new Fr(content.length)], jobId);
|
|
159
295
|
|
|
160
296
|
// Store the new content, possibly overwriting existing values
|
|
161
297
|
for (let i = 0; i < content.length; i++) {
|
|
162
|
-
|
|
298
|
+
this.storeCapsule(contractAddress, arraySlot(baseSlot, i), content[i], jobId);
|
|
163
299
|
}
|
|
164
300
|
|
|
165
301
|
// Clear any stragglers
|
|
166
302
|
for (let i = content.length; i < originalLength; i++) {
|
|
167
|
-
|
|
303
|
+
this.deleteCapsule(contractAddress, arraySlot(baseSlot, i), jobId);
|
|
168
304
|
}
|
|
169
305
|
});
|
|
170
306
|
}
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { toBufferBE } from '@aztec/foundation/bigint-buffer';
|
|
2
1
|
import type { Fr } from '@aztec/foundation/curves/bn254';
|
|
3
2
|
import { toArray } from '@aztec/foundation/iterable';
|
|
4
3
|
import type { AztecAsyncKVStore, AztecAsyncMap, AztecAsyncMultiMap } from '@aztec/kv-store';
|
|
@@ -15,32 +14,41 @@ import { NoteDao } from '@aztec/stdlib/note';
|
|
|
15
14
|
**/
|
|
16
15
|
export class NoteStore {
|
|
17
16
|
#store: AztecAsyncKVStore;
|
|
17
|
+
|
|
18
|
+
// Note that we use the siloedNullifier as the note id in the store as it's guaranteed to be unique.
|
|
19
|
+
|
|
20
|
+
/** noteId (siloedNullifier) -> NoteDao (serialized) */
|
|
18
21
|
#notes: AztecAsyncMap<string, Buffer>;
|
|
22
|
+
/** noteId (siloedNullifier) -> NoteDao (serialized) */
|
|
19
23
|
#nullifiedNotes: AztecAsyncMap<string, Buffer>;
|
|
20
|
-
|
|
24
|
+
/** blockNumber -> siloedNullifier */
|
|
21
25
|
#nullifiersByBlockNumber: AztecAsyncMultiMap<number, string>;
|
|
22
26
|
|
|
27
|
+
/** noteId (siloedNullifier) -> scope */
|
|
23
28
|
#nullifiedNotesToScope: AztecAsyncMultiMap<string, string>;
|
|
29
|
+
/** contractAddress -> noteId (siloedNullifier) */
|
|
24
30
|
#nullifiedNotesByContract: AztecAsyncMultiMap<string, string>;
|
|
31
|
+
/** storageSlot -> noteId (siloedNullifier) */
|
|
25
32
|
#nullifiedNotesByStorageSlot: AztecAsyncMultiMap<string, string>;
|
|
26
|
-
#nullifiedNotesByNullifier: AztecAsyncMap<string, string>;
|
|
27
33
|
|
|
34
|
+
/** scope (AztecAddress) -> true */
|
|
28
35
|
#scopes: AztecAsyncMap<string, true>;
|
|
36
|
+
/** noteId (siloedNullifier) -> scope */
|
|
29
37
|
#notesToScope: AztecAsyncMultiMap<string, string>;
|
|
38
|
+
/** scope -> MultiMap(contractAddress -> noteId) */
|
|
30
39
|
#notesByContractAndScope: Map<string, AztecAsyncMultiMap<string, string>>;
|
|
40
|
+
/** scope -> MultiMap(storageSlot -> noteId) */
|
|
31
41
|
#notesByStorageSlotAndScope: Map<string, AztecAsyncMultiMap<string, string>>;
|
|
32
42
|
|
|
33
43
|
private constructor(store: AztecAsyncKVStore) {
|
|
34
44
|
this.#store = store;
|
|
35
45
|
this.#notes = store.openMap('notes');
|
|
36
46
|
this.#nullifiedNotes = store.openMap('nullified_notes');
|
|
37
|
-
this.#nullifierToNoteId = store.openMap('nullifier_to_note');
|
|
38
47
|
this.#nullifiersByBlockNumber = store.openMultiMap('nullifier_to_block_number');
|
|
39
48
|
|
|
40
49
|
this.#nullifiedNotesToScope = store.openMultiMap('nullified_notes_to_scope');
|
|
41
50
|
this.#nullifiedNotesByContract = store.openMultiMap('nullified_notes_by_contract');
|
|
42
51
|
this.#nullifiedNotesByStorageSlot = store.openMultiMap('nullified_notes_by_storage_slot');
|
|
43
|
-
this.#nullifiedNotesByNullifier = store.openMap('nullified_notes_by_nullifier');
|
|
44
52
|
|
|
45
53
|
this.#scopes = store.openMap('scopes');
|
|
46
54
|
this.#notesToScope = store.openMultiMap('notes_to_scope');
|
|
@@ -92,9 +100,8 @@ export class NoteStore {
|
|
|
92
100
|
/**
|
|
93
101
|
* Adds multiple notes to the data provider under the specified scope.
|
|
94
102
|
*
|
|
95
|
-
* Notes are stored using their
|
|
96
|
-
*
|
|
97
|
-
* for efficient retrieval.
|
|
103
|
+
* Notes are stored using their siloedNullifier as the key, which provides uniqueness. Each note is indexed
|
|
104
|
+
* by multiple criteria for efficient retrieval.
|
|
98
105
|
*
|
|
99
106
|
* @param notes - Notes to store
|
|
100
107
|
* @param scope - The scope (user/account) under which to store the notes
|
|
@@ -106,13 +113,12 @@ export class NoteStore {
|
|
|
106
113
|
}
|
|
107
114
|
|
|
108
115
|
for (const dao of notes) {
|
|
109
|
-
const
|
|
110
|
-
await this.#notes.set(
|
|
111
|
-
await this.#notesToScope.set(
|
|
112
|
-
await this.#nullifierToNoteId.set(dao.siloedNullifier.toString(), noteIndex);
|
|
116
|
+
const noteId = dao.siloedNullifier.toString();
|
|
117
|
+
await this.#notes.set(noteId, dao.toBuffer());
|
|
118
|
+
await this.#notesToScope.set(noteId, scope.toString());
|
|
113
119
|
|
|
114
|
-
await this.#notesByContractAndScope.get(scope.toString())!.set(dao.contractAddress.toString(),
|
|
115
|
-
await this.#notesByStorageSlotAndScope.get(scope.toString())!.set(dao.storageSlot.toString(),
|
|
120
|
+
await this.#notesByContractAndScope.get(scope.toString())!.set(dao.contractAddress.toString(), noteId);
|
|
121
|
+
await this.#notesByStorageSlotAndScope.get(scope.toString())!.set(dao.storageSlot.toString(), noteId);
|
|
116
122
|
}
|
|
117
123
|
});
|
|
118
124
|
}
|
|
@@ -124,10 +130,12 @@ export class NoteStore {
|
|
|
124
130
|
* specified block number. It restores any notes that were nullified after the given block
|
|
125
131
|
* and deletes any active notes created after that block.
|
|
126
132
|
*
|
|
133
|
+
* IMPORTANT: This method must be called within a transaction to ensure atomicity.
|
|
134
|
+
*
|
|
127
135
|
* @param blockNumber - The new chain tip after a reorg
|
|
128
136
|
* @param synchedBlockNumber - The block number up to which PXE managed to sync before the reorg happened.
|
|
129
137
|
*/
|
|
130
|
-
public async
|
|
138
|
+
public async rollback(blockNumber: number, synchedBlockNumber: number): Promise<void> {
|
|
131
139
|
await this.#rewindNullifiersAfterBlock(blockNumber, synchedBlockNumber);
|
|
132
140
|
await this.#deleteActiveNotesAfterBlock(blockNumber);
|
|
133
141
|
}
|
|
@@ -140,24 +148,21 @@ export class NoteStore {
|
|
|
140
148
|
*
|
|
141
149
|
* @param blockNumber - Notes created after this block number will be deleted
|
|
142
150
|
*/
|
|
143
|
-
#deleteActiveNotesAfterBlock(blockNumber: number): Promise<void> {
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
await this.#notesByContractAndScope.get(scope)!.deleteValue(noteDao.contractAddress.toString(), noteIndex);
|
|
156
|
-
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(noteDao.storageSlot.toString(), noteIndex);
|
|
157
|
-
}
|
|
151
|
+
async #deleteActiveNotesAfterBlock(blockNumber: number): Promise<void> {
|
|
152
|
+
const notes = await toArray(this.#notes.valuesAsync());
|
|
153
|
+
for (const note of notes) {
|
|
154
|
+
const noteDao = NoteDao.fromBuffer(note);
|
|
155
|
+
if (noteDao.l2BlockNumber > blockNumber) {
|
|
156
|
+
const noteId = noteDao.siloedNullifier.toString();
|
|
157
|
+
await this.#notes.delete(noteId);
|
|
158
|
+
await this.#notesToScope.delete(noteId);
|
|
159
|
+
const scopes = await toArray(this.#scopes.keysAsync());
|
|
160
|
+
for (const scope of scopes) {
|
|
161
|
+
await this.#notesByContractAndScope.get(scope)!.deleteValue(noteDao.contractAddress.toString(), noteId);
|
|
162
|
+
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(noteDao.storageSlot.toString(), noteId);
|
|
158
163
|
}
|
|
159
164
|
}
|
|
160
|
-
}
|
|
165
|
+
}
|
|
161
166
|
}
|
|
162
167
|
|
|
163
168
|
/**
|
|
@@ -171,50 +176,47 @@ export class NoteStore {
|
|
|
171
176
|
* @param synchedBlockNumber - Upper bound for the block range to process
|
|
172
177
|
*/
|
|
173
178
|
async #rewindNullifiersAfterBlock(blockNumber: number, synchedBlockNumber: number): Promise<void> {
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
);
|
|
187
|
-
const noteDaos = nullifiedNoteBuffers
|
|
188
|
-
.filter(buffer => buffer != undefined)
|
|
189
|
-
.map(buffer => NoteDao.fromBuffer(buffer!));
|
|
190
|
-
|
|
191
|
-
for (const dao of noteDaos) {
|
|
192
|
-
const noteIndex = toBufferBE(dao.index, 32).toString('hex');
|
|
193
|
-
await this.#notes.set(noteIndex, dao.toBuffer());
|
|
194
|
-
await this.#nullifierToNoteId.set(dao.siloedNullifier.toString(), noteIndex);
|
|
179
|
+
const noteIdsToReinsert: string[] = [];
|
|
180
|
+
const currentBlockNumber = blockNumber + 1;
|
|
181
|
+
for (let i = currentBlockNumber; i <= synchedBlockNumber; i++) {
|
|
182
|
+
// noteId === siloedNullifier.toString(), so we can use nullifiers directly as noteIds
|
|
183
|
+
noteIdsToReinsert.push(...(await toArray(this.#nullifiersByBlockNumber.getValuesAsync(i))));
|
|
184
|
+
}
|
|
185
|
+
const nullifiedNoteBuffers = await Promise.all(
|
|
186
|
+
noteIdsToReinsert.map(noteId => this.#nullifiedNotes.getAsync(noteId)),
|
|
187
|
+
);
|
|
188
|
+
const noteDaos = nullifiedNoteBuffers
|
|
189
|
+
.filter(buffer => buffer != undefined)
|
|
190
|
+
.map(buffer => NoteDao.fromBuffer(buffer!));
|
|
195
191
|
|
|
196
|
-
|
|
192
|
+
for (const dao of noteDaos) {
|
|
193
|
+
const noteId = dao.siloedNullifier.toString();
|
|
197
194
|
|
|
198
|
-
|
|
199
|
-
// We should never run into this error because notes always have a scope assigned to them - either on initial
|
|
200
|
-
// insertion via `addNotes` or when removing their nullifiers.
|
|
201
|
-
throw new Error(`No scopes found for nullified note with index ${noteIndex}`);
|
|
202
|
-
}
|
|
195
|
+
const scopes = await toArray(this.#nullifiedNotesToScope.getValuesAsync(noteId));
|
|
203
196
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
197
|
+
if (scopes.length === 0) {
|
|
198
|
+
// We should never run into this error because notes always have a scope assigned to them - either on initial
|
|
199
|
+
// insertion via `addNotes` or when removing their nullifiers.
|
|
200
|
+
throw new Error(`No scopes found for nullified note with nullifier ${noteId}`);
|
|
201
|
+
}
|
|
209
202
|
|
|
210
|
-
|
|
211
|
-
await
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
203
|
+
for (const scope of scopes) {
|
|
204
|
+
await Promise.all([
|
|
205
|
+
this.#notesByContractAndScope.get(scope.toString())!.set(dao.contractAddress.toString(), noteId),
|
|
206
|
+
this.#notesByStorageSlotAndScope.get(scope.toString())!.set(dao.storageSlot.toString(), noteId),
|
|
207
|
+
this.#notesToScope.set(noteId, scope),
|
|
208
|
+
]);
|
|
216
209
|
}
|
|
217
|
-
|
|
210
|
+
|
|
211
|
+
await Promise.all([
|
|
212
|
+
this.#notes.set(noteId, dao.toBuffer()),
|
|
213
|
+
this.#nullifiedNotes.delete(noteId),
|
|
214
|
+
this.#nullifiedNotesToScope.delete(noteId),
|
|
215
|
+
this.#nullifiersByBlockNumber.deleteValue(dao.l2BlockNumber, dao.siloedNullifier.toString()),
|
|
216
|
+
this.#nullifiedNotesByContract.deleteValue(dao.contractAddress.toString(), noteId),
|
|
217
|
+
this.#nullifiedNotesByStorageSlot.deleteValue(dao.storageSlot.toString(), noteId),
|
|
218
|
+
]);
|
|
219
|
+
}
|
|
218
220
|
}
|
|
219
221
|
|
|
220
222
|
/**
|
|
@@ -332,6 +334,17 @@ export class NoteStore {
|
|
|
332
334
|
}
|
|
333
335
|
}
|
|
334
336
|
|
|
337
|
+
// Sort by block number, then by tx index within block, then by note index within tx
|
|
338
|
+
deduplicated.sort((a, b) => {
|
|
339
|
+
if (a.l2BlockNumber !== b.l2BlockNumber) {
|
|
340
|
+
return a.l2BlockNumber - b.l2BlockNumber;
|
|
341
|
+
}
|
|
342
|
+
if (a.txIndexInBlock !== b.txIndexInBlock) {
|
|
343
|
+
return a.txIndexInBlock - b.txIndexInBlock;
|
|
344
|
+
}
|
|
345
|
+
return a.noteIndexInTx - b.noteIndexInTx;
|
|
346
|
+
});
|
|
347
|
+
|
|
335
348
|
return deduplicated;
|
|
336
349
|
}
|
|
337
350
|
|
|
@@ -356,25 +369,18 @@ export class NoteStore {
|
|
|
356
369
|
|
|
357
370
|
for (const blockScopedNullifier of nullifiers) {
|
|
358
371
|
const { data: nullifier, l2BlockNumber: blockNumber } = blockScopedNullifier;
|
|
359
|
-
const
|
|
372
|
+
const noteId = nullifier.toString();
|
|
360
373
|
|
|
361
|
-
const
|
|
362
|
-
if (!
|
|
363
|
-
// Check if already nullified
|
|
364
|
-
|
|
365
|
-
if (alreadyNullified) {
|
|
374
|
+
const noteBuffer = await this.#notes.getAsync(noteId);
|
|
375
|
+
if (!noteBuffer) {
|
|
376
|
+
// Check if already nullified (noteId === siloedNullifier, so we can check #nullifiedNotes directly)
|
|
377
|
+
if (await this.#nullifiedNotes.hasAsync(noteId)) {
|
|
366
378
|
throw new Error(`Nullifier already applied in applyNullifiers`);
|
|
367
379
|
}
|
|
368
380
|
throw new Error('Nullifier not found in applyNullifiers');
|
|
369
381
|
}
|
|
370
382
|
|
|
371
|
-
const
|
|
372
|
-
|
|
373
|
-
if (!noteBuffer) {
|
|
374
|
-
throw new Error('Note not found in applyNullifiers');
|
|
375
|
-
}
|
|
376
|
-
|
|
377
|
-
const noteScopes = await toArray(this.#notesToScope.getValuesAsync(noteIndex));
|
|
383
|
+
const noteScopes = await toArray(this.#notesToScope.getValuesAsync(noteId));
|
|
378
384
|
if (noteScopes.length === 0) {
|
|
379
385
|
// We should never run into this error because notes always have a scope assigned to them - either on initial
|
|
380
386
|
// insertion via `addNotes` or when removing their nullifiers.
|
|
@@ -385,26 +391,23 @@ export class NoteStore {
|
|
|
385
391
|
|
|
386
392
|
nullifiedNotes.push(note);
|
|
387
393
|
|
|
388
|
-
await this.#notes.delete(
|
|
389
|
-
await this.#notesToScope.delete(
|
|
394
|
+
await this.#notes.delete(noteId);
|
|
395
|
+
await this.#notesToScope.delete(noteId);
|
|
390
396
|
|
|
391
397
|
const scopes = await toArray(this.#scopes.keysAsync());
|
|
392
398
|
|
|
393
399
|
for (const scope of scopes) {
|
|
394
|
-
await this.#notesByContractAndScope.get(scope)!.deleteValue(note.contractAddress.toString(),
|
|
395
|
-
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(note.storageSlot.toString(),
|
|
400
|
+
await this.#notesByContractAndScope.get(scope)!.deleteValue(note.contractAddress.toString(), noteId);
|
|
401
|
+
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(note.storageSlot.toString(), noteId);
|
|
396
402
|
}
|
|
397
403
|
|
|
398
404
|
for (const scope of noteScopes) {
|
|
399
|
-
await this.#nullifiedNotesToScope.set(
|
|
405
|
+
await this.#nullifiedNotesToScope.set(noteId, scope);
|
|
400
406
|
}
|
|
401
|
-
await this.#nullifiedNotes.set(
|
|
402
|
-
await this.#nullifiersByBlockNumber.set(blockNumber,
|
|
403
|
-
await this.#nullifiedNotesByContract.set(note.contractAddress.toString(),
|
|
404
|
-
await this.#nullifiedNotesByStorageSlot.set(note.storageSlot.toString(),
|
|
405
|
-
await this.#nullifiedNotesByNullifier.set(nullifier.toString(), noteIndex);
|
|
406
|
-
|
|
407
|
-
await this.#nullifierToNoteId.delete(nullifier.toString());
|
|
407
|
+
await this.#nullifiedNotes.set(noteId, note.toBuffer());
|
|
408
|
+
await this.#nullifiersByBlockNumber.set(blockNumber, noteId);
|
|
409
|
+
await this.#nullifiedNotesByContract.set(note.contractAddress.toString(), noteId);
|
|
410
|
+
await this.#nullifiedNotesByStorageSlot.set(note.storageSlot.toString(), noteId);
|
|
408
411
|
}
|
|
409
412
|
return nullifiedNotes;
|
|
410
413
|
});
|