@aztec/pxe 4.0.0-nightly.20260111 → 4.0.0-nightly.20260113
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/block_synchronizer/block_synchronizer.d.ts +4 -2
- package/dest/block_synchronizer/block_synchronizer.d.ts.map +1 -1
- package/dest/block_synchronizer/block_synchronizer.js +19 -13
- package/dest/contract_function_simulator/oracle/private_execution.d.ts +1 -1
- package/dest/contract_function_simulator/oracle/private_execution.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/private_execution.js +1 -2
- package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts +1 -16
- package/dest/contract_function_simulator/oracle/private_execution_oracle.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/private_execution_oracle.js +2 -30
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts +1 -1
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.d.ts.map +1 -1
- package/dest/contract_function_simulator/oracle/utility_execution_oracle.js +15 -13
- package/dest/logs/log_service.d.ts +3 -2
- package/dest/logs/log_service.d.ts.map +1 -1
- package/dest/logs/log_service.js +4 -2
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.d.ts +2 -2
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.d.ts.map +1 -1
- package/dest/private_kernel/hints/build_private_kernel_reset_private_inputs.js +2 -2
- package/dest/private_kernel/private_kernel_execution_prover.d.ts +1 -1
- package/dest/private_kernel/private_kernel_execution_prover.d.ts.map +1 -1
- package/dest/private_kernel/private_kernel_execution_prover.js +3 -4
- package/dest/private_kernel/private_kernel_oracle.d.ts +3 -2
- package/dest/private_kernel/private_kernel_oracle.d.ts.map +1 -1
- package/dest/private_kernel/private_kernel_oracle_impl.d.ts +2 -2
- package/dest/private_kernel/private_kernel_oracle_impl.d.ts.map +1 -1
- package/dest/private_kernel/private_kernel_oracle_impl.js +2 -3
- package/dest/pxe.d.ts +1 -1
- package/dest/pxe.d.ts.map +1 -1
- package/dest/pxe.js +4 -1
- package/dest/storage/capsule_store/capsule_store.d.ts +24 -9
- package/dest/storage/capsule_store/capsule_store.d.ts.map +1 -1
- package/dest/storage/capsule_store/capsule_store.js +132 -23
- package/dest/storage/note_store/note_store.d.ts +4 -2
- package/dest/storage/note_store/note_store.d.ts.map +1 -1
- package/dest/storage/note_store/note_store.js +50 -48
- package/dest/storage/private_event_store/private_event_store.d.ts +4 -2
- package/dest/storage/private_event_store/private_event_store.d.ts.map +1 -1
- package/dest/storage/private_event_store/private_event_store.js +28 -28
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts +1 -1
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.d.ts.map +1 -1
- package/dest/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.js +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.d.ts +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.d.ts.map +1 -1
- package/dest/tagging/sender_sync/utils/get_status_change_of_pending.js +2 -2
- package/package.json +16 -16
- package/src/block_synchronizer/block_synchronizer.ts +23 -12
- package/src/contract_function_simulator/oracle/private_execution.ts +0 -2
- package/src/contract_function_simulator/oracle/private_execution_oracle.ts +1 -36
- package/src/contract_function_simulator/oracle/utility_execution_oracle.ts +15 -10
- package/src/logs/log_service.ts +2 -1
- package/src/private_kernel/hints/build_private_kernel_reset_private_inputs.ts +1 -2
- package/src/private_kernel/private_kernel_execution_prover.ts +2 -4
- package/src/private_kernel/private_kernel_oracle.ts +2 -1
- package/src/private_kernel/private_kernel_oracle_impl.ts +2 -8
- package/src/pxe.ts +2 -0
- package/src/storage/capsule_store/capsule_store.ts +159 -23
- package/src/storage/note_store/note_store.ts +58 -56
- package/src/storage/private_event_store/private_event_store.ts +33 -33
- package/src/tagging/recipient_sync/load_private_logs_for_sender_recipient_pair.ts +4 -1
- package/src/tagging/sender_sync/utils/get_status_change_of_pending.ts +6 -2
|
@@ -28,7 +28,6 @@ import {
|
|
|
28
28
|
type PrivateCallExecutionResult,
|
|
29
29
|
type PrivateExecutionResult,
|
|
30
30
|
TxRequest,
|
|
31
|
-
collectNoteHashLeafIndexMap,
|
|
32
31
|
collectNoteHashNullifierCounterMap,
|
|
33
32
|
getFinalMinRevertibleSideEffectCounter,
|
|
34
33
|
} from '@aztec/stdlib/tx';
|
|
@@ -101,7 +100,6 @@ export class PrivateKernelExecutionProver {
|
|
|
101
100
|
|
|
102
101
|
const executionSteps: PrivateExecutionStep[] = [];
|
|
103
102
|
|
|
104
|
-
const noteHashLeafIndexMap = collectNoteHashLeafIndexMap(executionResult);
|
|
105
103
|
const noteHashNullifierCounterMap = collectNoteHashNullifierCounterMap(executionResult);
|
|
106
104
|
const minRevertibleSideEffectCounter = getFinalMinRevertibleSideEffectCounter(executionResult);
|
|
107
105
|
const splitCounter = isPrivateOnlyTx ? 0 : minRevertibleSideEffectCounter;
|
|
@@ -116,7 +114,7 @@ export class PrivateKernelExecutionProver {
|
|
|
116
114
|
);
|
|
117
115
|
while (resetBuilder.needsReset()) {
|
|
118
116
|
const witgenTimer = new Timer();
|
|
119
|
-
const privateInputs = await resetBuilder.build(this.oracle
|
|
117
|
+
const privateInputs = await resetBuilder.build(this.oracle);
|
|
120
118
|
output = generateWitnesses
|
|
121
119
|
? await this.proofCreator.generateResetOutput(privateInputs)
|
|
122
120
|
: await this.proofCreator.simulateReset(privateInputs);
|
|
@@ -224,7 +222,7 @@ export class PrivateKernelExecutionProver {
|
|
|
224
222
|
);
|
|
225
223
|
while (resetBuilder.needsReset()) {
|
|
226
224
|
const witgenTimer = new Timer();
|
|
227
|
-
const privateInputs = await resetBuilder.build(this.oracle
|
|
225
|
+
const privateInputs = await resetBuilder.build(this.oracle);
|
|
228
226
|
output = generateWitnesses
|
|
229
227
|
? await this.proofCreator.generateResetOutput(privateInputs)
|
|
230
228
|
: await this.proofCreator.simulateReset(privateInputs);
|
|
@@ -43,7 +43,8 @@ export interface PrivateKernelOracle {
|
|
|
43
43
|
|
|
44
44
|
/**
|
|
45
45
|
* Returns a membership witness with the sibling path and leaf index in our private function indexed merkle tree.
|
|
46
|
-
*/
|
|
46
|
+
*/
|
|
47
|
+
getNoteHashMembershipWitness(noteHash: Fr): Promise<MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT> | undefined>;
|
|
47
48
|
|
|
48
49
|
/**
|
|
49
50
|
* Returns a membership witness with the sibling path and leaf index in our nullifier indexed merkle tree.
|
|
@@ -2,7 +2,6 @@ import { NOTE_HASH_TREE_HEIGHT, PUBLIC_DATA_TREE_HEIGHT, VK_TREE_HEIGHT } from '
|
|
|
2
2
|
import type { Fr } from '@aztec/foundation/curves/bn254';
|
|
3
3
|
import type { GrumpkinScalar, Point } from '@aztec/foundation/curves/grumpkin';
|
|
4
4
|
import { createLogger } from '@aztec/foundation/log';
|
|
5
|
-
import type { Tuple } from '@aztec/foundation/serialize';
|
|
6
5
|
import { MembershipWitness } from '@aztec/foundation/trees';
|
|
7
6
|
import type { KeyStore } from '@aztec/key-store';
|
|
8
7
|
import { getVKIndex, getVKSiblingPath } from '@aztec/noir-protocol-circuits-types/vk-tree';
|
|
@@ -70,13 +69,8 @@ export class PrivateKernelOracleImpl implements PrivateKernelOracle {
|
|
|
70
69
|
return Promise.resolve(new MembershipWitness(VK_TREE_HEIGHT, BigInt(leafIndex), getVKSiblingPath(leafIndex)));
|
|
71
70
|
}
|
|
72
71
|
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
return new MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT>(
|
|
76
|
-
path.pathSize,
|
|
77
|
-
leafIndex,
|
|
78
|
-
path.toFields() as Tuple<Fr, typeof NOTE_HASH_TREE_HEIGHT>,
|
|
79
|
-
);
|
|
72
|
+
getNoteHashMembershipWitness(noteHash: Fr): Promise<MembershipWitness<typeof NOTE_HASH_TREE_HEIGHT> | undefined> {
|
|
73
|
+
return this.node.getNoteHashMembershipWitness(this.blockNumber, noteHash);
|
|
80
74
|
}
|
|
81
75
|
|
|
82
76
|
getNullifierMembershipWitness(nullifier: Fr): Promise<NullifierMembershipWitness | undefined> {
|
package/src/pxe.ts
CHANGED
|
@@ -148,6 +148,7 @@ export class PXE {
|
|
|
148
148
|
const tipsStore = new L2TipsKVStore(store, 'pxe');
|
|
149
149
|
const synchronizer = new BlockSynchronizer(
|
|
150
150
|
node,
|
|
151
|
+
store,
|
|
151
152
|
anchorBlockStore,
|
|
152
153
|
noteStore,
|
|
153
154
|
privateEventStore,
|
|
@@ -157,6 +158,7 @@ export class PXE {
|
|
|
157
158
|
);
|
|
158
159
|
|
|
159
160
|
const jobCoordinator = new JobCoordinator(store);
|
|
161
|
+
jobCoordinator.registerStores([capsuleStore]);
|
|
160
162
|
|
|
161
163
|
const debugUtils = new PXEDebugUtils(contractStore, noteStore);
|
|
162
164
|
|
|
@@ -3,12 +3,21 @@ import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
|
3
3
|
import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store';
|
|
4
4
|
import type { AztecAddress } from '@aztec/stdlib/aztec-address';
|
|
5
5
|
|
|
6
|
-
|
|
6
|
+
import type { StagedStore } from '../../job_coordinator/job_coordinator.js';
|
|
7
|
+
|
|
8
|
+
export class CapsuleStore implements StagedStore {
|
|
9
|
+
readonly storeName = 'capsule';
|
|
10
|
+
|
|
7
11
|
#store: AztecAsyncKVStore;
|
|
8
12
|
|
|
9
13
|
// Arbitrary data stored by contracts. Key is computed as `${contractAddress}:${key}`
|
|
10
14
|
#capsules: AztecAsyncMap<string, Buffer>;
|
|
11
15
|
|
|
16
|
+
// jobId => `${contractAddress}:${key}` => capsule data
|
|
17
|
+
// when `#stagedCapsules.get('some-job-id').get('${some-contract-address:some-key') === null`,
|
|
18
|
+
// it signals that the capsule was deleted during the job, so it needs to be deleted on commit
|
|
19
|
+
#stagedCapsules: Map<string, Map<string, Buffer | null>>;
|
|
20
|
+
|
|
12
21
|
logger: Logger;
|
|
13
22
|
|
|
14
23
|
constructor(store: AztecAsyncKVStore) {
|
|
@@ -16,21 +25,120 @@ export class CapsuleStore {
|
|
|
16
25
|
|
|
17
26
|
this.#capsules = this.#store.openMap('capsules');
|
|
18
27
|
|
|
28
|
+
this.#stagedCapsules = new Map();
|
|
29
|
+
|
|
19
30
|
this.logger = createLogger('pxe:capsule-data-provider');
|
|
20
31
|
}
|
|
21
32
|
|
|
33
|
+
/**
|
|
34
|
+
* Given a job denoted by `jobId`, it returns the
|
|
35
|
+
* capsules that said job has interacted with.
|
|
36
|
+
*
|
|
37
|
+
* Capsules that haven't been committed to persistence KV storage
|
|
38
|
+
* are kept in-memory in `#stagedCapsules`, this method provides a convenient
|
|
39
|
+
* way to access that in-memory collection of data.
|
|
40
|
+
*
|
|
41
|
+
* @param jobId
|
|
42
|
+
* @returns
|
|
43
|
+
*/
|
|
44
|
+
#getJobStagedCapsules(jobId: string): Map<string, Buffer | null> {
|
|
45
|
+
let jobStagedCapsules = this.#stagedCapsules.get(jobId);
|
|
46
|
+
if (!jobStagedCapsules) {
|
|
47
|
+
jobStagedCapsules = new Map();
|
|
48
|
+
this.#stagedCapsules.set(jobId, jobStagedCapsules);
|
|
49
|
+
}
|
|
50
|
+
return jobStagedCapsules;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Reads a capsule's slot from the staged version of the data associated to the given jobId.
|
|
55
|
+
*
|
|
56
|
+
* If it is not there, it reads it from the KV store.
|
|
57
|
+
*/
|
|
58
|
+
async #getFromStage(jobId: string, dbSlotKey: string): Promise<Buffer | null | undefined> {
|
|
59
|
+
const jobStagedCapsules = this.#getJobStagedCapsules(jobId);
|
|
60
|
+
let staged: Buffer | null | undefined = jobStagedCapsules.get(dbSlotKey);
|
|
61
|
+
// Note that if staged === null, we marked it for deletion, so we don't want to
|
|
62
|
+
// re-read it from DB
|
|
63
|
+
if (staged === undefined) {
|
|
64
|
+
// If we don't have a staged version of this dbSlotKey, first we check if there's one in DB
|
|
65
|
+
staged = await this.#loadCapsuleFromDb(dbSlotKey);
|
|
66
|
+
}
|
|
67
|
+
return staged;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Writes a capsule to the stage of a job.
|
|
72
|
+
*/
|
|
73
|
+
#setOnStage(jobId: string, dbSlotKey: string, capsuleData: Buffer) {
|
|
74
|
+
this.#getJobStagedCapsules(jobId).set(dbSlotKey, capsuleData);
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Deletes a capsule on the stage of a job. Note the capsule will still
|
|
79
|
+
* exist in storage until the job is committed.
|
|
80
|
+
*/
|
|
81
|
+
#deleteOnStage(jobId: string, dbSlotKey: string) {
|
|
82
|
+
this.#getJobStagedCapsules(jobId).set(dbSlotKey, null);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
async #loadCapsuleFromDb(dbSlotKey: string): Promise<Buffer | null> {
|
|
86
|
+
const dataBuffer = await this.#capsules.getAsync(dbSlotKey);
|
|
87
|
+
if (!dataBuffer) {
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
return dataBuffer;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Commits staged data to main storage.
|
|
96
|
+
* Called by JobCoordinator when a job completes successfully.
|
|
97
|
+
* Note: JobCoordinator wraps all commits in a single transaction, so we don't
|
|
98
|
+
* need our own transactionAsync here (and using one would deadlock on IndexedDB).
|
|
99
|
+
* @param jobId - The jobId identifying which staged data to commit
|
|
100
|
+
*/
|
|
101
|
+
async commit(jobId: string): Promise<void> {
|
|
102
|
+
const jobStagedCapsules = this.#getJobStagedCapsules(jobId);
|
|
103
|
+
|
|
104
|
+
for (const [key, value] of jobStagedCapsules) {
|
|
105
|
+
// In the write stage, we represent deleted capsules with null
|
|
106
|
+
// (as opposed to undefined, which denotes there was never a capsule there to begin with).
|
|
107
|
+
// So we delete from actual KV store here.
|
|
108
|
+
if (value === null) {
|
|
109
|
+
await this.#capsules.delete(key);
|
|
110
|
+
} else {
|
|
111
|
+
await this.#capsules.set(key, value);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
this.#stagedCapsules.delete(jobId);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Discards staged data without committing.
|
|
120
|
+
*/
|
|
121
|
+
discardStaged(jobId: string): Promise<void> {
|
|
122
|
+
this.#stagedCapsules.delete(jobId);
|
|
123
|
+
return Promise.resolve();
|
|
124
|
+
}
|
|
125
|
+
|
|
22
126
|
/**
|
|
23
127
|
* Stores arbitrary information in a per-contract non-volatile database, which can later be retrieved with `loadCapsule`.
|
|
24
128
|
* * If data was already stored at this slot, it is overwritten.
|
|
25
129
|
* @param contractAddress - The contract address to scope the data under.
|
|
26
130
|
* @param slot - The slot in the database in which to store the value. Slots need not be contiguous.
|
|
27
131
|
* @param capsule - An array of field elements representing the capsule.
|
|
132
|
+
* @param jobId - The context in which this store will be visible until PXE decides to persist it to underlying KV store
|
|
28
133
|
* @remarks A capsule is a "blob" of data that is passed to the contract through an oracle. It works similarly
|
|
29
134
|
* to public contract storage in that it's indexed by the contract address and storage slot but instead of the global
|
|
30
135
|
* network state it's backed by local PXE db.
|
|
31
136
|
*/
|
|
32
|
-
|
|
33
|
-
|
|
137
|
+
storeCapsule(contractAddress: AztecAddress, slot: Fr, capsule: Fr[], jobId: string) {
|
|
138
|
+
const dbSlotKey = dbSlotToKey(contractAddress, slot);
|
|
139
|
+
|
|
140
|
+
// A store overrides any pre-existing data on the slot
|
|
141
|
+
this.#setOnStage(jobId, dbSlotKey, Buffer.concat(capsule.map(value => value.toBuffer())));
|
|
34
142
|
}
|
|
35
143
|
|
|
36
144
|
/**
|
|
@@ -39,8 +147,8 @@ export class CapsuleStore {
|
|
|
39
147
|
* @param slot - The slot in the database to read.
|
|
40
148
|
* @returns The stored data or `null` if no data is stored under the slot.
|
|
41
149
|
*/
|
|
42
|
-
async loadCapsule(contractAddress: AztecAddress, slot: Fr): Promise<Fr[] | null> {
|
|
43
|
-
const dataBuffer = await this.#
|
|
150
|
+
async loadCapsule(contractAddress: AztecAddress, slot: Fr, jobId: string): Promise<Fr[] | null> {
|
|
151
|
+
const dataBuffer = await this.#getFromStage(jobId, dbSlotToKey(contractAddress, slot));
|
|
44
152
|
if (!dataBuffer) {
|
|
45
153
|
this.logger.trace(`Data not found for contract ${contractAddress.toString()} and slot ${slot.toString()}`);
|
|
46
154
|
return null;
|
|
@@ -57,8 +165,9 @@ export class CapsuleStore {
|
|
|
57
165
|
* @param contractAddress - The contract address under which the data is scoped.
|
|
58
166
|
* @param slot - The slot in the database to delete.
|
|
59
167
|
*/
|
|
60
|
-
|
|
61
|
-
|
|
168
|
+
deleteCapsule(contractAddress: AztecAddress, slot: Fr, jobId: string) {
|
|
169
|
+
// When we commit this, we will interpret null as a deletion, so we'll propagate the delete to the KV store
|
|
170
|
+
this.#deleteOnStage(jobId, dbSlotToKey(contractAddress, slot));
|
|
62
171
|
}
|
|
63
172
|
|
|
64
173
|
/**
|
|
@@ -72,13 +181,22 @@ export class CapsuleStore {
|
|
|
72
181
|
* @param dstSlot - The first slot to copy to.
|
|
73
182
|
* @param numEntries - The number of entries to copy.
|
|
74
183
|
*/
|
|
75
|
-
copyCapsule(
|
|
184
|
+
copyCapsule(
|
|
185
|
+
contractAddress: AztecAddress,
|
|
186
|
+
srcSlot: Fr,
|
|
187
|
+
dstSlot: Fr,
|
|
188
|
+
numEntries: number,
|
|
189
|
+
jobId: string,
|
|
190
|
+
): Promise<void> {
|
|
191
|
+
// This transactional context gives us "copy atomicity":
|
|
192
|
+
// there shouldn't be concurrent writes to what's being copied here.
|
|
193
|
+
// Equally important: this in practice is expected to perform thousands of DB operations
|
|
194
|
+
// and not using a transaction here would heavily impact performance.
|
|
76
195
|
return this.#store.transactionAsync(async () => {
|
|
77
196
|
// In order to support overlapping source and destination regions, we need to check the relative positions of source
|
|
78
197
|
// and destination. If destination is ahead of source, then by the time we overwrite source elements using forward
|
|
79
198
|
// indexes we'll have already read those. On the contrary, if source is ahead of destination we need to use backward
|
|
80
199
|
// indexes to avoid reading elements that've been overwritten.
|
|
81
|
-
|
|
82
200
|
const indexes = Array.from(Array(numEntries).keys());
|
|
83
201
|
if (srcSlot.lt(dstSlot)) {
|
|
84
202
|
indexes.reverse();
|
|
@@ -88,12 +206,12 @@ export class CapsuleStore {
|
|
|
88
206
|
const currentSrcSlot = dbSlotToKey(contractAddress, srcSlot.add(new Fr(i)));
|
|
89
207
|
const currentDstSlot = dbSlotToKey(contractAddress, dstSlot.add(new Fr(i)));
|
|
90
208
|
|
|
91
|
-
const toCopy = await this.#
|
|
209
|
+
const toCopy = await this.#getFromStage(jobId, currentSrcSlot);
|
|
92
210
|
if (!toCopy) {
|
|
93
211
|
throw new Error(`Attempted to copy empty slot ${currentSrcSlot} for contract ${contractAddress.toString()}`);
|
|
94
212
|
}
|
|
95
213
|
|
|
96
|
-
|
|
214
|
+
this.#setOnStage(jobId, currentDstSlot, toCopy);
|
|
97
215
|
}
|
|
98
216
|
});
|
|
99
217
|
}
|
|
@@ -106,35 +224,45 @@ export class CapsuleStore {
|
|
|
106
224
|
* @param baseSlot - The slot where the array length is stored
|
|
107
225
|
* @param content - Array of capsule data to append
|
|
108
226
|
*/
|
|
109
|
-
appendToCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][]): Promise<void> {
|
|
227
|
+
appendToCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][], jobId: string): Promise<void> {
|
|
228
|
+
// We wrap this in a transaction to serialize concurrent calls from Promise.all.
|
|
229
|
+
// Without this, concurrent appends to the same array could race: both read length=0,
|
|
230
|
+
// both write at the same slots, one overwrites the other.
|
|
231
|
+
// Equally important: this in practice is expected to perform thousands of DB operations
|
|
232
|
+
// and not using a transaction here would heavily impact performance.
|
|
110
233
|
return this.#store.transactionAsync(async () => {
|
|
111
234
|
// Load current length, defaulting to 0 if not found
|
|
112
|
-
const lengthData = await this.loadCapsule(contractAddress, baseSlot);
|
|
235
|
+
const lengthData = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
113
236
|
const currentLength = lengthData ? lengthData[0].toNumber() : 0;
|
|
114
237
|
|
|
115
238
|
// Store each capsule at consecutive slots after baseSlot + 1 + currentLength
|
|
116
239
|
for (let i = 0; i < content.length; i++) {
|
|
117
240
|
const nextSlot = arraySlot(baseSlot, currentLength + i);
|
|
118
|
-
|
|
241
|
+
this.storeCapsule(contractAddress, nextSlot, content[i], jobId);
|
|
119
242
|
}
|
|
120
243
|
|
|
121
244
|
// Update length to include all new capsules
|
|
122
245
|
const newLength = currentLength + content.length;
|
|
123
|
-
|
|
246
|
+
this.storeCapsule(contractAddress, baseSlot, [new Fr(newLength)], jobId);
|
|
124
247
|
});
|
|
125
248
|
}
|
|
126
249
|
|
|
127
|
-
readCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr): Promise<Fr[][]> {
|
|
250
|
+
readCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, jobId: string): Promise<Fr[][]> {
|
|
251
|
+
// I'm leaving this transactional context here though because I'm assuming this
|
|
252
|
+
// gives us "read array atomicity": there shouldn't be concurrent writes to what's being copied
|
|
253
|
+
// here.
|
|
254
|
+
// This is one point we should revisit in the future if we want to relax the concurrency
|
|
255
|
+
// of jobs: different calls running concurrently on the same contract may cause trouble.
|
|
128
256
|
return this.#store.transactionAsync(async () => {
|
|
129
257
|
// Load length, defaulting to 0 if not found
|
|
130
|
-
const maybeLength = await this.loadCapsule(contractAddress, baseSlot);
|
|
258
|
+
const maybeLength = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
131
259
|
const length = maybeLength ? maybeLength[0].toBigInt() : 0n;
|
|
132
260
|
|
|
133
261
|
const values: Fr[][] = [];
|
|
134
262
|
|
|
135
263
|
// Read each capsule at consecutive slots after baseSlot
|
|
136
264
|
for (let i = 0; i < length; i++) {
|
|
137
|
-
const currentValue = await this.loadCapsule(contractAddress, arraySlot(baseSlot, i));
|
|
265
|
+
const currentValue = await this.loadCapsule(contractAddress, arraySlot(baseSlot, i), jobId);
|
|
138
266
|
if (currentValue == undefined) {
|
|
139
267
|
throw new Error(
|
|
140
268
|
`Expected non-empty value at capsule array in base slot ${baseSlot} at index ${i} for contract ${contractAddress}`,
|
|
@@ -148,23 +276,31 @@ export class CapsuleStore {
|
|
|
148
276
|
});
|
|
149
277
|
}
|
|
150
278
|
|
|
151
|
-
setCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][]) {
|
|
279
|
+
setCapsuleArray(contractAddress: AztecAddress, baseSlot: Fr, content: Fr[][], jobId: string) {
|
|
280
|
+
// This transactional context in theory isn't so critical now because we aren't
|
|
281
|
+
// writing to DB so if there's exceptions midway and it blows up, no visible impact
|
|
282
|
+
// to persistent storage will happen.
|
|
283
|
+
// I'm leaving this transactional context here though because I'm assuming this
|
|
284
|
+
// gives us "write array atomicity": there shouldn't be concurrent writes to what's being copied
|
|
285
|
+
// here.
|
|
286
|
+
// This is one point we should revisit in the future if we want to relax the concurrency
|
|
287
|
+
// of jobs: different calls running concurrently on the same contract may cause trouble.
|
|
152
288
|
return this.#store.transactionAsync(async () => {
|
|
153
289
|
// Load current length, defaulting to 0 if not found
|
|
154
|
-
const maybeLength = await this.loadCapsule(contractAddress, baseSlot);
|
|
290
|
+
const maybeLength = await this.loadCapsule(contractAddress, baseSlot, jobId);
|
|
155
291
|
const originalLength = maybeLength ? maybeLength[0].toNumber() : 0;
|
|
156
292
|
|
|
157
293
|
// Set the new length
|
|
158
|
-
|
|
294
|
+
this.storeCapsule(contractAddress, baseSlot, [new Fr(content.length)], jobId);
|
|
159
295
|
|
|
160
296
|
// Store the new content, possibly overwriting existing values
|
|
161
297
|
for (let i = 0; i < content.length; i++) {
|
|
162
|
-
|
|
298
|
+
this.storeCapsule(contractAddress, arraySlot(baseSlot, i), content[i], jobId);
|
|
163
299
|
}
|
|
164
300
|
|
|
165
301
|
// Clear any stragglers
|
|
166
302
|
for (let i = content.length; i < originalLength; i++) {
|
|
167
|
-
|
|
303
|
+
this.deleteCapsule(contractAddress, arraySlot(baseSlot, i), jobId);
|
|
168
304
|
}
|
|
169
305
|
});
|
|
170
306
|
}
|
|
@@ -124,10 +124,12 @@ export class NoteStore {
|
|
|
124
124
|
* specified block number. It restores any notes that were nullified after the given block
|
|
125
125
|
* and deletes any active notes created after that block.
|
|
126
126
|
*
|
|
127
|
+
* IMPORTANT: This method must be called within a transaction to ensure atomicity.
|
|
128
|
+
*
|
|
127
129
|
* @param blockNumber - The new chain tip after a reorg
|
|
128
130
|
* @param synchedBlockNumber - The block number up to which PXE managed to sync before the reorg happened.
|
|
129
131
|
*/
|
|
130
|
-
public async
|
|
132
|
+
public async rollback(blockNumber: number, synchedBlockNumber: number): Promise<void> {
|
|
131
133
|
await this.#rewindNullifiersAfterBlock(blockNumber, synchedBlockNumber);
|
|
132
134
|
await this.#deleteActiveNotesAfterBlock(blockNumber);
|
|
133
135
|
}
|
|
@@ -140,24 +142,22 @@ export class NoteStore {
|
|
|
140
142
|
*
|
|
141
143
|
* @param blockNumber - Notes created after this block number will be deleted
|
|
142
144
|
*/
|
|
143
|
-
#deleteActiveNotesAfterBlock(blockNumber: number): Promise<void> {
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(noteDao.storageSlot.toString(), noteIndex);
|
|
157
|
-
}
|
|
145
|
+
async #deleteActiveNotesAfterBlock(blockNumber: number): Promise<void> {
|
|
146
|
+
const notes = await toArray(this.#notes.valuesAsync());
|
|
147
|
+
for (const note of notes) {
|
|
148
|
+
const noteDao = NoteDao.fromBuffer(note);
|
|
149
|
+
if (noteDao.l2BlockNumber > blockNumber) {
|
|
150
|
+
const noteIndex = toBufferBE(noteDao.index, 32).toString('hex');
|
|
151
|
+
await this.#notes.delete(noteIndex);
|
|
152
|
+
await this.#notesToScope.delete(noteIndex);
|
|
153
|
+
await this.#nullifierToNoteId.delete(noteDao.siloedNullifier.toString());
|
|
154
|
+
const scopes = await toArray(this.#scopes.keysAsync());
|
|
155
|
+
for (const scope of scopes) {
|
|
156
|
+
await this.#notesByContractAndScope.get(scope)!.deleteValue(noteDao.contractAddress.toString(), noteIndex);
|
|
157
|
+
await this.#notesByStorageSlotAndScope.get(scope)!.deleteValue(noteDao.storageSlot.toString(), noteIndex);
|
|
158
158
|
}
|
|
159
159
|
}
|
|
160
|
-
}
|
|
160
|
+
}
|
|
161
161
|
}
|
|
162
162
|
|
|
163
163
|
/**
|
|
@@ -171,50 +171,52 @@ export class NoteStore {
|
|
|
171
171
|
* @param synchedBlockNumber - Upper bound for the block range to process
|
|
172
172
|
*/
|
|
173
173
|
async #rewindNullifiersAfterBlock(blockNumber: number, synchedBlockNumber: number): Promise<void> {
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
.map(buffer => NoteDao.fromBuffer(buffer!));
|
|
190
|
-
|
|
191
|
-
for (const dao of noteDaos) {
|
|
192
|
-
const noteIndex = toBufferBE(dao.index, 32).toString('hex');
|
|
193
|
-
await this.#notes.set(noteIndex, dao.toBuffer());
|
|
194
|
-
await this.#nullifierToNoteId.set(dao.siloedNullifier.toString(), noteIndex);
|
|
174
|
+
const nullifiersToUndo: string[] = [];
|
|
175
|
+
const currentBlockNumber = blockNumber + 1;
|
|
176
|
+
for (let i = currentBlockNumber; i <= synchedBlockNumber; i++) {
|
|
177
|
+
nullifiersToUndo.push(...(await toArray(this.#nullifiersByBlockNumber.getValuesAsync(i))));
|
|
178
|
+
}
|
|
179
|
+
const notesIndexesToReinsert = await Promise.all(
|
|
180
|
+
nullifiersToUndo.map(nullifier => this.#nullifiedNotesByNullifier.getAsync(nullifier)),
|
|
181
|
+
);
|
|
182
|
+
const notNullNoteIndexes = notesIndexesToReinsert.filter(noteIndex => noteIndex != undefined);
|
|
183
|
+
const nullifiedNoteBuffers = await Promise.all(
|
|
184
|
+
notNullNoteIndexes.map(noteIndex => this.#nullifiedNotes.getAsync(noteIndex!)),
|
|
185
|
+
);
|
|
186
|
+
const noteDaos = nullifiedNoteBuffers
|
|
187
|
+
.filter(buffer => buffer != undefined)
|
|
188
|
+
.map(buffer => NoteDao.fromBuffer(buffer!));
|
|
195
189
|
|
|
196
|
-
|
|
190
|
+
for (const dao of noteDaos) {
|
|
191
|
+
const noteIndex = toBufferBE(dao.index, 32).toString('hex');
|
|
197
192
|
|
|
198
|
-
|
|
199
|
-
// We should never run into this error because notes always have a scope assigned to them - either on initial
|
|
200
|
-
// insertion via `addNotes` or when removing their nullifiers.
|
|
201
|
-
throw new Error(`No scopes found for nullified note with index ${noteIndex}`);
|
|
202
|
-
}
|
|
193
|
+
const scopes = await toArray(this.#nullifiedNotesToScope.getValuesAsync(noteIndex));
|
|
203
194
|
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
195
|
+
if (scopes.length === 0) {
|
|
196
|
+
// We should never run into this error because notes always have a scope assigned to them - either on initial
|
|
197
|
+
// insertion via `addNotes` or when removing their nullifiers.
|
|
198
|
+
throw new Error(`No scopes found for nullified note with index ${noteIndex}`);
|
|
199
|
+
}
|
|
209
200
|
|
|
210
|
-
|
|
211
|
-
await
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
201
|
+
for (const scope of scopes) {
|
|
202
|
+
await Promise.all([
|
|
203
|
+
this.#notesByContractAndScope.get(scope.toString())!.set(dao.contractAddress.toString(), noteIndex),
|
|
204
|
+
this.#notesByStorageSlotAndScope.get(scope.toString())!.set(dao.storageSlot.toString(), noteIndex),
|
|
205
|
+
this.#notesToScope.set(noteIndex, scope),
|
|
206
|
+
]);
|
|
216
207
|
}
|
|
217
|
-
|
|
208
|
+
|
|
209
|
+
await Promise.all([
|
|
210
|
+
this.#notes.set(noteIndex, dao.toBuffer()),
|
|
211
|
+
this.#nullifierToNoteId.set(dao.siloedNullifier.toString(), noteIndex),
|
|
212
|
+
this.#nullifiedNotes.delete(noteIndex),
|
|
213
|
+
this.#nullifiedNotesToScope.delete(noteIndex),
|
|
214
|
+
this.#nullifiersByBlockNumber.deleteValue(dao.l2BlockNumber, dao.siloedNullifier.toString()),
|
|
215
|
+
this.#nullifiedNotesByContract.deleteValue(dao.contractAddress.toString(), noteIndex),
|
|
216
|
+
this.#nullifiedNotesByStorageSlot.deleteValue(dao.storageSlot.toString(), noteIndex),
|
|
217
|
+
this.#nullifiedNotesByNullifier.delete(dao.siloedNullifier.toString()),
|
|
218
|
+
]);
|
|
219
|
+
}
|
|
218
220
|
}
|
|
219
221
|
|
|
220
222
|
/**
|
|
@@ -174,43 +174,43 @@ export class PrivateEventStore {
|
|
|
174
174
|
/**
|
|
175
175
|
* Rolls back private events that were stored after a given `blockNumber` and up to `synchedBlockNumber` (the block
|
|
176
176
|
* number up to which PXE managed to sync before the reorg happened).
|
|
177
|
+
*
|
|
178
|
+
* IMPORTANT: This method must be called within a transaction to ensure atomicity.
|
|
177
179
|
*/
|
|
178
|
-
public async
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
throw new Error(`Event log not found for eventCommitmentIndex ${eventCommitmentIndex}`);
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
await this.#eventLogs.delete(eventCommitmentIndex);
|
|
194
|
-
await this.#seenLogs.delete(eventCommitmentIndex);
|
|
195
|
-
|
|
196
|
-
// Update #eventsByContractScopeSelector using the stored lookupKey
|
|
197
|
-
const existingIndices = await this.#eventsByContractScopeSelector.getAsync(entry.lookupKey);
|
|
198
|
-
if (!existingIndices || existingIndices.length === 0) {
|
|
199
|
-
throw new Error(`No indices found in #eventsByContractScopeSelector for key ${entry.lookupKey}`);
|
|
200
|
-
}
|
|
201
|
-
const filteredIndices = existingIndices.filter(idx => idx !== eventCommitmentIndex);
|
|
202
|
-
if (filteredIndices.length === 0) {
|
|
203
|
-
await this.#eventsByContractScopeSelector.delete(entry.lookupKey);
|
|
204
|
-
} else {
|
|
205
|
-
await this.#eventsByContractScopeSelector.set(entry.lookupKey, filteredIndices);
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
removedCount++;
|
|
180
|
+
public async rollback(blockNumber: number, synchedBlockNumber: number): Promise<void> {
|
|
181
|
+
let removedCount = 0;
|
|
182
|
+
|
|
183
|
+
for (let block = blockNumber + 1; block <= synchedBlockNumber; block++) {
|
|
184
|
+
const indices = await this.#eventsByBlockNumber.getAsync(block);
|
|
185
|
+
if (indices) {
|
|
186
|
+
await this.#eventsByBlockNumber.delete(block);
|
|
187
|
+
|
|
188
|
+
for (const eventCommitmentIndex of indices) {
|
|
189
|
+
const entry = await this.#eventLogs.getAsync(eventCommitmentIndex);
|
|
190
|
+
if (!entry) {
|
|
191
|
+
throw new Error(`Event log not found for eventCommitmentIndex ${eventCommitmentIndex}`);
|
|
209
192
|
}
|
|
193
|
+
|
|
194
|
+
await this.#eventLogs.delete(eventCommitmentIndex);
|
|
195
|
+
await this.#seenLogs.delete(eventCommitmentIndex);
|
|
196
|
+
|
|
197
|
+
// Update #eventsByContractScopeSelector using the stored lookupKey
|
|
198
|
+
const existingIndices = await this.#eventsByContractScopeSelector.getAsync(entry.lookupKey);
|
|
199
|
+
if (!existingIndices || existingIndices.length === 0) {
|
|
200
|
+
throw new Error(`No indices found in #eventsByContractScopeSelector for key ${entry.lookupKey}`);
|
|
201
|
+
}
|
|
202
|
+
const filteredIndices = existingIndices.filter(idx => idx !== eventCommitmentIndex);
|
|
203
|
+
if (filteredIndices.length === 0) {
|
|
204
|
+
await this.#eventsByContractScopeSelector.delete(entry.lookupKey);
|
|
205
|
+
} else {
|
|
206
|
+
await this.#eventsByContractScopeSelector.set(entry.lookupKey, filteredIndices);
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
removedCount++;
|
|
210
210
|
}
|
|
211
211
|
}
|
|
212
|
+
}
|
|
212
213
|
|
|
213
|
-
|
|
214
|
-
});
|
|
214
|
+
this.logger.verbose(`Rolled back ${removedCount} private events after block ${blockNumber}`);
|
|
215
215
|
}
|
|
216
216
|
}
|
|
@@ -68,7 +68,10 @@ export async function loadPrivateLogsForSenderRecipientPair(
|
|
|
68
68
|
throw new Error('Node failed to return latest block header when syncing logs');
|
|
69
69
|
}
|
|
70
70
|
|
|
71
|
-
[finalizedBlockNumber, currentTimestamp] = [
|
|
71
|
+
[finalizedBlockNumber, currentTimestamp] = [
|
|
72
|
+
l2Tips.finalized.block.number,
|
|
73
|
+
latestBlockHeader.globalVariables.timestamp,
|
|
74
|
+
];
|
|
72
75
|
}
|
|
73
76
|
|
|
74
77
|
let start: number, end: number;
|
|
@@ -10,7 +10,7 @@ export async function getStatusChangeOfPending(
|
|
|
10
10
|
aztecNode: AztecNode,
|
|
11
11
|
): Promise<{ txHashesToFinalize: TxHash[]; txHashesToDrop: TxHash[] }> {
|
|
12
12
|
// Get receipts for all pending tx hashes and the finalized block number.
|
|
13
|
-
const [receipts,
|
|
13
|
+
const [receipts, tips] = await Promise.all([
|
|
14
14
|
Promise.all(pending.map(pendingTxHash => aztecNode.getTxReceipt(pendingTxHash))),
|
|
15
15
|
aztecNode.getL2Tips(),
|
|
16
16
|
]);
|
|
@@ -22,7 +22,11 @@ export async function getStatusChangeOfPending(
|
|
|
22
22
|
const receipt = receipts[i];
|
|
23
23
|
const txHash = pending[i];
|
|
24
24
|
|
|
25
|
-
if (
|
|
25
|
+
if (
|
|
26
|
+
receipt.status === TxStatus.SUCCESS &&
|
|
27
|
+
receipt.blockNumber &&
|
|
28
|
+
receipt.blockNumber <= tips.finalized.block.number
|
|
29
|
+
) {
|
|
26
30
|
// Tx has been included in a block and the corresponding block is finalized --> we mark the indexes as
|
|
27
31
|
// finalized.
|
|
28
32
|
txHashesToFinalize.push(txHash);
|