@aztec/p2p 3.0.0-rc.5 → 4.0.0-nightly.20260107
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/client/factory.d.ts +2 -2
- package/dest/client/factory.d.ts.map +1 -1
- package/dest/client/factory.js +2 -3
- package/dest/client/p2p_client.d.ts +2 -2
- package/dest/client/p2p_client.d.ts.map +1 -1
- package/dest/client/p2p_client.js +395 -21
- package/dest/config.d.ts +4 -7
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +6 -9
- package/dest/mem_pools/instrumentation.d.ts +7 -1
- package/dest/mem_pools/instrumentation.d.ts.map +1 -1
- package/dest/mem_pools/instrumentation.js +29 -2
- package/dest/mem_pools/interface.d.ts +3 -4
- package/dest/mem_pools/interface.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts +28 -24
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.js +261 -323
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts +18 -0
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.js +56 -0
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts +83 -0
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.js +5 -0
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.d.ts +15 -0
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.js +88 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.d.ts +17 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.js +84 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts +19 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.js +76 -0
- package/dest/mem_pools/tx_pool/eviction/low_priority_eviction_rule.d.ts +26 -0
- package/dest/mem_pools/tx_pool/eviction/low_priority_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/low_priority_eviction_rule.js +84 -0
- package/dest/mem_pools/tx_pool/index.d.ts +1 -2
- package/dest/mem_pools/tx_pool/index.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/index.js +0 -1
- package/dest/mem_pools/tx_pool/priority.d.ts +5 -1
- package/dest/mem_pools/tx_pool/priority.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/priority.js +6 -1
- package/dest/mem_pools/tx_pool/tx_pool.d.ts +8 -4
- package/dest/mem_pools/tx_pool/tx_pool.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/tx_pool_test_suite.d.ts +1 -1
- package/dest/mem_pools/tx_pool/tx_pool_test_suite.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/tx_pool_test_suite.js +25 -20
- package/dest/services/libp2p/libp2p_service.d.ts +4 -4
- package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
- package/dest/services/libp2p/libp2p_service.js +447 -64
- package/dest/services/peer-manager/metrics.d.ts +6 -1
- package/dest/services/peer-manager/metrics.d.ts.map +1 -1
- package/dest/services/peer-manager/metrics.js +17 -0
- package/dest/services/peer-manager/peer_manager.d.ts +1 -1
- package/dest/services/peer-manager/peer_manager.d.ts.map +1 -1
- package/dest/services/peer-manager/peer_manager.js +385 -9
- package/dest/services/reqresp/protocols/tx.d.ts +2 -3
- package/dest/services/reqresp/protocols/tx.d.ts.map +1 -1
- package/dest/services/reqresp/reqresp.js +402 -24
- package/dest/services/tx_provider.d.ts +2 -1
- package/dest/services/tx_provider.d.ts.map +1 -1
- package/dest/services/tx_provider.js +11 -2
- package/dest/services/tx_provider_instrumentation.d.ts +5 -2
- package/dest/services/tx_provider_instrumentation.d.ts.map +1 -1
- package/dest/services/tx_provider_instrumentation.js +14 -1
- package/dest/test-helpers/reqresp-nodes.d.ts +2 -2
- package/dest/test-helpers/reqresp-nodes.d.ts.map +1 -1
- package/dest/testbench/p2p_client_testbench_worker.js +1 -0
- package/package.json +14 -14
- package/src/client/factory.ts +5 -10
- package/src/client/p2p_client.ts +12 -17
- package/src/config.ts +8 -14
- package/src/mem_pools/instrumentation.ts +33 -0
- package/src/mem_pools/interface.ts +2 -4
- package/src/mem_pools/tx_pool/README.md +255 -0
- package/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +308 -368
- package/src/mem_pools/tx_pool/eviction/eviction_manager.ts +71 -0
- package/src/mem_pools/tx_pool/eviction/eviction_strategy.ts +93 -0
- package/src/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.ts +108 -0
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.ts +104 -0
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.ts +91 -0
- package/src/mem_pools/tx_pool/eviction/low_priority_eviction_rule.ts +106 -0
- package/src/mem_pools/tx_pool/index.ts +0 -1
- package/src/mem_pools/tx_pool/priority.ts +8 -1
- package/src/mem_pools/tx_pool/tx_pool.ts +8 -3
- package/src/mem_pools/tx_pool/tx_pool_test_suite.ts +18 -13
- package/src/services/libp2p/libp2p_service.ts +12 -17
- package/src/services/peer-manager/metrics.ts +22 -0
- package/src/services/peer-manager/peer_manager.ts +2 -0
- package/src/services/reqresp/protocols/tx.ts +1 -2
- package/src/services/tx_provider.ts +17 -2
- package/src/services/tx_provider_instrumentation.ts +19 -2
- package/src/test-helpers/mock-pubsub.ts +1 -1
- package/src/test-helpers/reqresp-nodes.ts +1 -1
- package/src/testbench/p2p_client_testbench_worker.ts +2 -1
- package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts +0 -81
- package/dest/mem_pools/tx_pool/memory_tx_pool.d.ts.map +0 -1
- package/dest/mem_pools/tx_pool/memory_tx_pool.js +0 -239
- package/src/mem_pools/tx_pool/memory_tx_pool.ts +0 -285
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aztec/p2p",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "4.0.0-nightly.20260107",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"exports": {
|
|
6
6
|
".": "./dest/index.js",
|
|
@@ -67,17 +67,17 @@
|
|
|
67
67
|
]
|
|
68
68
|
},
|
|
69
69
|
"dependencies": {
|
|
70
|
-
"@aztec/constants": "
|
|
71
|
-
"@aztec/epoch-cache": "
|
|
72
|
-
"@aztec/ethereum": "
|
|
73
|
-
"@aztec/foundation": "
|
|
74
|
-
"@aztec/kv-store": "
|
|
75
|
-
"@aztec/noir-contracts.js": "
|
|
76
|
-
"@aztec/noir-protocol-circuits-types": "
|
|
77
|
-
"@aztec/protocol-contracts": "
|
|
78
|
-
"@aztec/simulator": "
|
|
79
|
-
"@aztec/stdlib": "
|
|
80
|
-
"@aztec/telemetry-client": "
|
|
70
|
+
"@aztec/constants": "4.0.0-nightly.20260107",
|
|
71
|
+
"@aztec/epoch-cache": "4.0.0-nightly.20260107",
|
|
72
|
+
"@aztec/ethereum": "4.0.0-nightly.20260107",
|
|
73
|
+
"@aztec/foundation": "4.0.0-nightly.20260107",
|
|
74
|
+
"@aztec/kv-store": "4.0.0-nightly.20260107",
|
|
75
|
+
"@aztec/noir-contracts.js": "4.0.0-nightly.20260107",
|
|
76
|
+
"@aztec/noir-protocol-circuits-types": "4.0.0-nightly.20260107",
|
|
77
|
+
"@aztec/protocol-contracts": "4.0.0-nightly.20260107",
|
|
78
|
+
"@aztec/simulator": "4.0.0-nightly.20260107",
|
|
79
|
+
"@aztec/stdlib": "4.0.0-nightly.20260107",
|
|
80
|
+
"@aztec/telemetry-client": "4.0.0-nightly.20260107",
|
|
81
81
|
"@chainsafe/libp2p-gossipsub": "13.0.0",
|
|
82
82
|
"@chainsafe/libp2p-noise": "^15.0.0",
|
|
83
83
|
"@chainsafe/libp2p-yamux": "^6.0.2",
|
|
@@ -104,8 +104,8 @@
|
|
|
104
104
|
"xxhash-wasm": "^1.1.0"
|
|
105
105
|
},
|
|
106
106
|
"devDependencies": {
|
|
107
|
-
"@aztec/archiver": "
|
|
108
|
-
"@aztec/world-state": "
|
|
107
|
+
"@aztec/archiver": "4.0.0-nightly.20260107",
|
|
108
|
+
"@aztec/world-state": "4.0.0-nightly.20260107",
|
|
109
109
|
"@jest/globals": "^30.0.0",
|
|
110
110
|
"@types/jest": "^30.0.0",
|
|
111
111
|
"@types/node": "^22.15.17",
|
package/src/client/factory.ts
CHANGED
|
@@ -26,7 +26,7 @@ import { configureP2PClientAddresses, createLibP2PPeerIdFromPrivateKey, getPeerI
|
|
|
26
26
|
export type P2PClientDeps<T extends P2PClientType> = {
|
|
27
27
|
txPool?: TxPool;
|
|
28
28
|
store?: AztecAsyncKVStore;
|
|
29
|
-
attestationPool?:
|
|
29
|
+
attestationPool?: AttestationPool;
|
|
30
30
|
logger?: Logger;
|
|
31
31
|
txCollectionNodeSources?: TxSource[];
|
|
32
32
|
p2pServiceFactory?: (...args: Parameters<(typeof LibP2PService)['new']>) => Promise<LibP2PService<T>>;
|
|
@@ -73,19 +73,14 @@ export async function createP2PClient<T extends P2PClientType>(
|
|
|
73
73
|
);
|
|
74
74
|
const l1Constants = await archiver.getL1Constants();
|
|
75
75
|
|
|
76
|
-
const mempools: MemPools
|
|
76
|
+
const mempools: MemPools = {
|
|
77
77
|
txPool:
|
|
78
78
|
deps.txPool ??
|
|
79
79
|
new AztecKVTxPool(store, archive, worldStateSynchronizer, telemetry, {
|
|
80
|
-
|
|
80
|
+
maxPendingTxCount: config.maxPendingTxCount,
|
|
81
81
|
archivedTxLimit: config.archivedTxLimit,
|
|
82
82
|
}),
|
|
83
|
-
attestationPool:
|
|
84
|
-
clientType === P2PClientType.Full
|
|
85
|
-
? ((deps.attestationPool ?? new KvAttestationPool(attestationStore, telemetry)) as T extends P2PClientType.Full
|
|
86
|
-
? AttestationPool
|
|
87
|
-
: undefined)
|
|
88
|
-
: undefined,
|
|
83
|
+
attestationPool: deps.attestationPool ?? new KvAttestationPool(attestationStore, telemetry),
|
|
89
84
|
};
|
|
90
85
|
|
|
91
86
|
const p2pService = await createP2PService<T>(
|
|
@@ -147,7 +142,7 @@ async function createP2PService<T extends P2PClientType>(
|
|
|
147
142
|
epochCache: EpochCacheInterface,
|
|
148
143
|
store: AztecAsyncKVStore,
|
|
149
144
|
peerStore: AztecLMDBStoreV2,
|
|
150
|
-
mempools: MemPools
|
|
145
|
+
mempools: MemPools,
|
|
151
146
|
p2pServiceFactory: P2PClientDeps<T>['p2pServiceFactory'],
|
|
152
147
|
packageVersion: string,
|
|
153
148
|
logger: Logger,
|
package/src/client/p2p_client.ts
CHANGED
|
@@ -69,7 +69,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
69
69
|
private synchedLatestSlot: AztecAsyncSingleton<bigint>;
|
|
70
70
|
|
|
71
71
|
private txPool: TxPool;
|
|
72
|
-
private attestationPool:
|
|
72
|
+
private attestationPool: AttestationPool;
|
|
73
73
|
|
|
74
74
|
private config: P2PConfig;
|
|
75
75
|
|
|
@@ -91,7 +91,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
91
91
|
_clientType: T,
|
|
92
92
|
private store: AztecAsyncKVStore,
|
|
93
93
|
private l2BlockSource: L2BlockSource & ContractDataSource,
|
|
94
|
-
mempools: MemPools
|
|
94
|
+
mempools: MemPools,
|
|
95
95
|
private p2pService: P2PService,
|
|
96
96
|
private txCollection: TxCollection,
|
|
97
97
|
config: Partial<P2PConfig> = {},
|
|
@@ -103,7 +103,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
103
103
|
|
|
104
104
|
this.config = { ...getP2PDefaultConfig(), ...config };
|
|
105
105
|
this.txPool = mempools.txPool;
|
|
106
|
-
this.attestationPool = mempools.attestationPool
|
|
106
|
+
this.attestationPool = mempools.attestationPool;
|
|
107
107
|
|
|
108
108
|
this.txProvider = new TxProvider(
|
|
109
109
|
this.txCollection,
|
|
@@ -282,10 +282,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
282
282
|
const syncedProvenBlock = (await this.getSyncedProvenBlockNum()) + 1;
|
|
283
283
|
const syncedFinalizedBlock = (await this.getSyncedFinalizedBlockNum()) + 1;
|
|
284
284
|
|
|
285
|
-
if (
|
|
286
|
-
(await this.txPool.isEmpty()) &&
|
|
287
|
-
(this.attestationPool === undefined || (await this.attestationPool?.isEmpty()))
|
|
288
|
-
) {
|
|
285
|
+
if ((await this.txPool.isEmpty()) && (await this.attestationPool.isEmpty())) {
|
|
289
286
|
// if mempools are empty, we don't care about syncing prior blocks
|
|
290
287
|
this.initBlockStream(BlockNumber(this.latestBlockNumberAtStart));
|
|
291
288
|
this.setCurrentState(P2PClientState.RUNNING);
|
|
@@ -389,19 +386,17 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
389
386
|
}
|
|
390
387
|
|
|
391
388
|
public async getAttestationsForSlot(slot: SlotNumber, proposalId?: string): Promise<BlockAttestation[]> {
|
|
392
|
-
return (
|
|
393
|
-
(
|
|
394
|
-
|
|
395
|
-
: this.attestationPool?.getAttestationsForSlot(slot))) ?? []
|
|
396
|
-
);
|
|
389
|
+
return await (proposalId
|
|
390
|
+
? this.attestationPool.getAttestationsForSlotAndProposal(slot, proposalId)
|
|
391
|
+
: this.attestationPool.getAttestationsForSlot(slot));
|
|
397
392
|
}
|
|
398
393
|
|
|
399
394
|
public addAttestations(attestations: BlockAttestation[]): Promise<void> {
|
|
400
|
-
return this.attestationPool
|
|
395
|
+
return this.attestationPool.addAttestations(attestations);
|
|
401
396
|
}
|
|
402
397
|
|
|
403
398
|
public deleteAttestation(attestation: BlockAttestation): Promise<void> {
|
|
404
|
-
return this.attestationPool
|
|
399
|
+
return this.attestationPool.deleteAttestations([attestation]);
|
|
405
400
|
}
|
|
406
401
|
|
|
407
402
|
// REVIEW: https://github.com/AztecProtocol/aztec-packages/issues/7963
|
|
@@ -715,6 +710,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
715
710
|
}
|
|
716
711
|
|
|
717
712
|
await this.markTxsAsMinedFromBlocks(blocks);
|
|
713
|
+
await this.txPool.clearNonEvictableTxs();
|
|
718
714
|
await this.startCollectingMissingTxs(blocks);
|
|
719
715
|
|
|
720
716
|
const lastBlock = blocks.at(-1)!;
|
|
@@ -782,7 +778,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
782
778
|
await this.txPool.deleteTxs(txHashes, { permanently: true });
|
|
783
779
|
await this.txPool.cleanupDeletedMinedTxs(lastBlockNum);
|
|
784
780
|
|
|
785
|
-
await this.attestationPool
|
|
781
|
+
await this.attestationPool.deleteAttestationsOlderThan(lastBlockSlot);
|
|
786
782
|
|
|
787
783
|
await this.synchedFinalizedBlockNumber.set(lastBlockNum);
|
|
788
784
|
this.log.debug(`Synched to finalized block ${lastBlockNum} at slot ${lastBlockSlot}`);
|
|
@@ -833,8 +829,7 @@ export class P2PClient<T extends P2PClientType = P2PClientType.Full>
|
|
|
833
829
|
this.log.info(`Deleting ${minedTxsFromReorg.length} mined txs from reorg`);
|
|
834
830
|
await this.txPool.deleteTxs(minedTxsFromReorg);
|
|
835
831
|
} else {
|
|
836
|
-
this.
|
|
837
|
-
await this.txPool.markMinedAsPending(minedTxsFromReorg);
|
|
832
|
+
await this.txPool.markMinedAsPending(minedTxsFromReorg, latestBlock);
|
|
838
833
|
}
|
|
839
834
|
|
|
840
835
|
await this.synchedLatestBlockNumber.set(latestBlock);
|
package/src/config.ts
CHANGED
|
@@ -133,11 +133,8 @@ export interface P2PConfig extends P2PReqRespConfig, ChainConfig, TxCollectionCo
|
|
|
133
133
|
/** Which calls are allowed in the public setup phase of a tx. */
|
|
134
134
|
txPublicSetupAllowList: AllowedElement[];
|
|
135
135
|
|
|
136
|
-
/** The maximum
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
/** If the pool is full, it will still accept a few more txs until it reached maxTxPoolOverspillFactor * maxTxPoolSize. Then it will evict */
|
|
140
|
-
txPoolOverflowFactor: number;
|
|
136
|
+
/** The maximum number of pending txs before evicting lower priority txs. */
|
|
137
|
+
maxPendingTxCount: number;
|
|
141
138
|
|
|
142
139
|
/** The node's seen message ID cache size */
|
|
143
140
|
seenMessageCacheSize: number;
|
|
@@ -375,15 +372,12 @@ export const p2pConfigMappings: ConfigMappingsType<P2PConfig> = {
|
|
|
375
372
|
printDefault: () =>
|
|
376
373
|
'AuthRegistry, FeeJuice.increase_public_balance, Token.increase_public_balance, FPC.prepare_fee',
|
|
377
374
|
},
|
|
378
|
-
|
|
379
|
-
env: '
|
|
380
|
-
description: 'The maximum
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
env: 'P2P_TX_POOL_OVERFLOW_FACTOR',
|
|
385
|
-
description: 'How much the tx pool can overflow before it starts evicting txs. Must be greater than 1',
|
|
386
|
-
...floatConfigHelper(1.1), // 10% overflow
|
|
375
|
+
maxPendingTxCount: {
|
|
376
|
+
env: 'P2P_MAX_PENDING_TX_COUNT',
|
|
377
|
+
description: 'The maximum number of pending txs before evicting lower priority txs.',
|
|
378
|
+
// Worst case scenario: Uncompressed public/private tx is ~ 156kb
|
|
379
|
+
// This implies we are using ~156MB of memory for pending pool
|
|
380
|
+
...numberConfigHelper(1_000),
|
|
387
381
|
},
|
|
388
382
|
seenMessageCacheSize: {
|
|
389
383
|
env: 'P2P_SEEN_MSG_CACHE_SIZE',
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import type { Gossipable } from '@aztec/stdlib/p2p';
|
|
2
|
+
import type { Tx } from '@aztec/stdlib/tx';
|
|
2
3
|
import {
|
|
3
4
|
Attributes,
|
|
4
5
|
type BatchObservableResult,
|
|
@@ -22,6 +23,7 @@ type MetricsLabels = {
|
|
|
22
23
|
objectInMempool: MetricsType;
|
|
23
24
|
objectSize: MetricsType;
|
|
24
25
|
itemsAdded: MetricsType;
|
|
26
|
+
itemMinedDelay: MetricsType;
|
|
25
27
|
};
|
|
26
28
|
|
|
27
29
|
/**
|
|
@@ -35,12 +37,14 @@ function getMetricsLabels(name: PoolName): MetricsLabels {
|
|
|
35
37
|
objectInMempool: Metrics.MEMPOOL_TX_COUNT,
|
|
36
38
|
objectSize: Metrics.MEMPOOL_TX_SIZE,
|
|
37
39
|
itemsAdded: Metrics.MEMPOOL_TX_ADDED_COUNT,
|
|
40
|
+
itemMinedDelay: Metrics.MEMPOOL_TX_MINED_DELAY,
|
|
38
41
|
};
|
|
39
42
|
} else if (name === PoolName.ATTESTATION_POOL) {
|
|
40
43
|
return {
|
|
41
44
|
objectInMempool: Metrics.MEMPOOL_ATTESTATIONS_COUNT,
|
|
42
45
|
objectSize: Metrics.MEMPOOL_ATTESTATIONS_SIZE,
|
|
43
46
|
itemsAdded: Metrics.MEMPOOL_ATTESTATIONS_ADDED_COUNT,
|
|
47
|
+
itemMinedDelay: Metrics.MEMPOOL_ATTESTATIONS_MINED_DELAY,
|
|
44
48
|
};
|
|
45
49
|
}
|
|
46
50
|
|
|
@@ -60,12 +64,16 @@ export class PoolInstrumentation<PoolObject extends Gossipable> {
|
|
|
60
64
|
private addObjectCounter: UpDownCounter;
|
|
61
65
|
/** Tracks tx size */
|
|
62
66
|
private objectSize: Histogram;
|
|
67
|
+
/** Track delay between transaction added and evicted */
|
|
68
|
+
private minedDelay: Histogram;
|
|
63
69
|
|
|
64
70
|
private dbMetrics: LmdbMetrics;
|
|
65
71
|
|
|
66
72
|
private defaultAttributes;
|
|
67
73
|
private meter: Meter;
|
|
68
74
|
|
|
75
|
+
private txAddedTimestamp: Map<bigint, number> = new Map<bigint, number>();
|
|
76
|
+
|
|
69
77
|
constructor(
|
|
70
78
|
telemetry: TelemetryClient,
|
|
71
79
|
name: PoolName,
|
|
@@ -98,6 +106,10 @@ export class PoolInstrumentation<PoolObject extends Gossipable> {
|
|
|
98
106
|
description: 'The number of transactions added to the mempool',
|
|
99
107
|
});
|
|
100
108
|
|
|
109
|
+
this.minedDelay = this.meter.createHistogram(metricsLabels.itemMinedDelay, {
|
|
110
|
+
description: 'Delay between transaction added and evicted from the mempool',
|
|
111
|
+
});
|
|
112
|
+
|
|
101
113
|
this.meter.addBatchObservableCallback(this.observeStats, [this.objectsInMempool]);
|
|
102
114
|
}
|
|
103
115
|
|
|
@@ -109,6 +121,27 @@ export class PoolInstrumentation<PoolObject extends Gossipable> {
|
|
|
109
121
|
this.addObjectCounter.add(count);
|
|
110
122
|
}
|
|
111
123
|
|
|
124
|
+
public transactionsAdded(transactions: Tx[]) {
|
|
125
|
+
const timestamp = Date.now();
|
|
126
|
+
for (const transaction of transactions) {
|
|
127
|
+
this.txAddedTimestamp.set(transaction.txHash.toBigInt(), timestamp);
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
public transactionsRemoved(hashes: Iterable<bigint> | Iterable<string>) {
|
|
132
|
+
const timestamp = Date.now();
|
|
133
|
+
for (const hash of hashes) {
|
|
134
|
+
const key = BigInt(hash);
|
|
135
|
+
const addedAt = this.txAddedTimestamp.get(key);
|
|
136
|
+
if (addedAt !== undefined) {
|
|
137
|
+
this.txAddedTimestamp.delete(key);
|
|
138
|
+
if (addedAt < timestamp) {
|
|
139
|
+
this.minedDelay.record(timestamp - addedAt);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
|
|
112
145
|
private observeStats = async (observer: BatchObservableResult) => {
|
|
113
146
|
const { itemCount } = await this.poolStats();
|
|
114
147
|
if (typeof itemCount === 'number') {
|
|
@@ -1,12 +1,10 @@
|
|
|
1
|
-
import type { P2PClientType } from '@aztec/stdlib/p2p';
|
|
2
|
-
|
|
3
1
|
import type { AttestationPool } from './attestation_pool/attestation_pool.js';
|
|
4
2
|
import type { TxPool } from './tx_pool/tx_pool.js';
|
|
5
3
|
|
|
6
4
|
/**
|
|
7
5
|
* A interface the combines all mempools
|
|
8
6
|
*/
|
|
9
|
-
export type MemPools
|
|
7
|
+
export type MemPools = {
|
|
10
8
|
txPool: TxPool;
|
|
11
|
-
attestationPool
|
|
9
|
+
attestationPool: AttestationPool;
|
|
12
10
|
};
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
# Transaction Pool (Mempool)
|
|
2
|
+
|
|
3
|
+
This module implements the transaction pool (mempool) for the Aztec P2P network. The mempool holds unconfirmed transactions awaiting inclusion in a block.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The transaction pool serves as a staging area for transactions before they are included in blocks. It manages the lifecycle of transactions from initial submission through mining, handling duplicates, priority ordering, and eviction of invalid or low-priority transactions.
|
|
8
|
+
|
|
9
|
+
## Interface: `TxPool`
|
|
10
|
+
|
|
11
|
+
The [`TxPool`](tx_pool.ts) interface defines the contract that all transaction pool implementations must fulfill:
|
|
12
|
+
|
|
13
|
+
### Transaction Lifecycle
|
|
14
|
+
|
|
15
|
+
The lifecycle of transactions in the pool is summarised in the following table:
|
|
16
|
+
|
|
17
|
+
| State | Meaning | Possible Future States |
|
|
18
|
+
| --- | --- | --- |
|
|
19
|
+
| Pending | Available to be added to a block, can be evicted | Protected, Mined, Soft Deleted |
|
|
20
|
+
| Protected | Added to a proposal, must not be evicted | Mined, Pending |
|
|
21
|
+
| Mined | Confirmed as added to a block | Soft Deleted, Pending |
|
|
22
|
+
| Soft Deleted | Awaiting full deletion once state has been finalised on L1 | Pending, Deleted |
|
|
23
|
+
| Deleted | Removed from the pool | N/A |
|
|
24
|
+
|
|
25
|
+
**Note on why Soft Delete:**
|
|
26
|
+
Mined transactions are soft-deleted rather than permanently removed to support:
|
|
27
|
+
1. Reorg handling — If a chain reorganization occurs, soft-deleted transactions are still available in the mempool
|
|
28
|
+
2. Slash condition detection — The epoch prune watcher needs access to transactions from pruned epochs to correctly identify data withholding slash conditions. Without soft-delete, transactions invalidated by reorgs (e.g., built on removed blocks) would be lost, causing false positives for data withholding violations.
|
|
29
|
+
|
|
30
|
+
Mined transactions are permanently deleted via `cleanupDeletedMinedTxs()` once their original block is finalized on L1, ensuring theyremain available during the uncertainty window.
|
|
31
|
+
Alternatively, mined transactions can be permanently deleted immediately by passing the `permanent: true` option to `deleteTxs()`.
|
|
32
|
+
|
|
33
|
+
#### Transaction Lifecycle Methods
|
|
34
|
+
|
|
35
|
+
| Method | Description |
|
|
36
|
+
|--------|-------------|
|
|
37
|
+
| `addTxs(txs, opts?)` | Adds transactions to the pool. Duplicates are ignored. Returns count of newly added txs. |
|
|
38
|
+
| `deleteTxs(txHashes, opts?)` | Removes transactions from the pool. Supports soft-delete for mined txs. |
|
|
39
|
+
| `markAsMined(txHashes, blockHeader)` | Marks transactions as included in a block. |
|
|
40
|
+
| `markMinedAsPending(txHashes, blockNumber)` | Reverts mined transactions to pending (used during reorgs). |
|
|
41
|
+
| `getArchivedTxByHash(txHash)` | Retrieves archived (historical) transactions. |
|
|
42
|
+
| `getTxStatus(txHash)` | Returns status: `'pending'`, `'mined'`, `'deleted'`, or `undefined`. |
|
|
43
|
+
|
|
44
|
+
### Transaction Fetching
|
|
45
|
+
|
|
46
|
+
| Method | Description |
|
|
47
|
+
|--------|-------------|
|
|
48
|
+
| `hasTx(txHash)` / `hasTxs(txHashes)` | Checks if transaction(s) exist in the pool. |
|
|
49
|
+
| `getTxByHash(txHash)` | Retrieves a transaction by its hash. |
|
|
50
|
+
| `getTxsByHash(txHashes)` | Batch retrieval of transactions by hash. |
|
|
51
|
+
| `getAllTxs()` / `getAllTxHashes()` | Returns all transactions or their hashes. |
|
|
52
|
+
| `getPendingTxHashes()` | Returns pending tx hashes **sorted by priority** (highest first). |
|
|
53
|
+
| `getPendingTxCount()` | Returns count of pending transactions. |
|
|
54
|
+
| `getMinedTxHashes()` | Returns mined tx hashes with their block numbers. |
|
|
55
|
+
|
|
56
|
+
### Pool Management
|
|
57
|
+
|
|
58
|
+
| Method | Description |
|
|
59
|
+
|--------|-------------|
|
|
60
|
+
| `updateConfig(config)` | Updates pool configuration (max size, archive limit). |
|
|
61
|
+
| `markTxsAsNonEvictable(txHashes)` | Protects transactions from eviction. |
|
|
62
|
+
| `clearNonEvictableTxs()` | Clears non-evictable flag from all transactions. |
|
|
63
|
+
| `cleanupDeletedMinedTxs(blockNumber)` | Permanently removes soft-deleted txs from blocks ≤ blockNumber. |
|
|
64
|
+
| `isEmpty()` | Checks if the pool has no transactions. |
|
|
65
|
+
|
|
66
|
+
### Events
|
|
67
|
+
|
|
68
|
+
The pool emits a `txs-added` event when new transactions are successfully added, allowing subscribers to react to pool changes.
|
|
69
|
+
|
|
70
|
+
## `AztecKVTxPool`
|
|
71
|
+
|
|
72
|
+
The [`AztecKVTxPool`](aztec_kv_tx_pool.ts) is the production-grade implementation backed by a persistent key-value store. It provides:
|
|
73
|
+
|
|
74
|
+
- **Persistent storage** via `AztecAsyncKVStore`
|
|
75
|
+
- **Multiple indexes** for efficient queries
|
|
76
|
+
- **Automatic eviction** of invalid and low-priority transactions
|
|
77
|
+
- **Transaction archival** for historical lookups
|
|
78
|
+
- **Soft-delete semantics** for mined transactions
|
|
79
|
+
|
|
80
|
+
#### Storage Structure
|
|
81
|
+
|
|
82
|
+
The pool maintains several KV maps and indexes:
|
|
83
|
+
|
|
84
|
+
| Store | Purpose |
|
|
85
|
+
|-------|---------|
|
|
86
|
+
| `#txs` | Primary storage: tx hash → serialized tx buffer |
|
|
87
|
+
| `#minedTxHashToBlock` | Index of mined txs: tx hash → block number |
|
|
88
|
+
| `#pendingTxPriorityToHash` | Priority-ordered index of pending txs |
|
|
89
|
+
| `#deletedMinedTxHashes` | Soft-deleted mined txs: tx hash → original block number |
|
|
90
|
+
| `#blockToDeletedMinedTxHash` | Reverse index for cleanup: block → deleted tx hashes |
|
|
91
|
+
| `#txHashToHistoricalBlockHeaderHash` | Anchor block reference for each tx |
|
|
92
|
+
| `#historicalHeaderToTxHash` | Index from historical block → tx hashes |
|
|
93
|
+
| `#feePayerToTxHash` | Index from fee payer address → tx hashes |
|
|
94
|
+
| `#archivedTxs` | Archived transactions for historical lookup |
|
|
95
|
+
|
|
96
|
+
#### In-Memory Caches
|
|
97
|
+
|
|
98
|
+
| Cache | Purpose |
|
|
99
|
+
|-------|---------|
|
|
100
|
+
| `#pendingTxs` | Hydrated pending transactions for fast access |
|
|
101
|
+
| `#nonEvictableTxs` | Set of tx hashes protected from eviction |
|
|
102
|
+
|
|
103
|
+
## Transaction Priority
|
|
104
|
+
|
|
105
|
+
Transactions are prioritized based on their **total priority fees** (see [`priority.ts`](priority.ts)):
|
|
106
|
+
|
|
107
|
+
```typescript
|
|
108
|
+
priorityFee = maxPriorityFeesPerGas.feePerDaGas + maxPriorityFeesPerGas.feePerL2Gas
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
The priority is stored as a hex string derived from a 32-byte buffer representation of the fee amount, enabling lexicographic ordering in the KV store. Pending transactions are returned in **descending priority order** (highest fees first).
|
|
112
|
+
|
|
113
|
+
## Transaction Lifecycle in AztecKVTxPool
|
|
114
|
+
|
|
115
|
+
### 1. Adding Transactions
|
|
116
|
+
|
|
117
|
+
When `addTxs()` is called:
|
|
118
|
+
|
|
119
|
+
1. Check for duplicates (skip if tx already exists)
|
|
120
|
+
2. Store the serialized tx in `#txs`
|
|
121
|
+
3. Index the tx by its anchor block hash
|
|
122
|
+
4. If not already mined, add to pending indexes:
|
|
123
|
+
- Priority-to-hash index (for ordering)
|
|
124
|
+
- Historical header index (for reorg handling)
|
|
125
|
+
- Fee payer index (for balance validation)
|
|
126
|
+
5. Record metrics
|
|
127
|
+
6. Trigger eviction rules for `TXS_ADDED` event
|
|
128
|
+
7. Emit `txs-added` event
|
|
129
|
+
|
|
130
|
+
### 2. Marking as Mined
|
|
131
|
+
|
|
132
|
+
When a block is finalized, `markAsMined()`:
|
|
133
|
+
|
|
134
|
+
1. Move tx from pending to mined status
|
|
135
|
+
2. If previously soft-deleted, restore to mined status
|
|
136
|
+
3. Trigger eviction rules for `BLOCK_MINED` event
|
|
137
|
+
|
|
138
|
+
### 3. Handling Reorgs
|
|
139
|
+
|
|
140
|
+
When blocks are pruned, `markMinedAsPending()`:
|
|
141
|
+
|
|
142
|
+
1. Remove tx from mined index
|
|
143
|
+
2. Rehydrate pending indexes
|
|
144
|
+
3. Trigger eviction rules for `CHAIN_PRUNED` event
|
|
145
|
+
|
|
146
|
+
### 4. Deleting Transactions
|
|
147
|
+
|
|
148
|
+
The `deleteTxs()` method handles two cases:
|
|
149
|
+
|
|
150
|
+
- **Pending transactions**: Permanently deleted (transactions and all indexes to the transaction)
|
|
151
|
+
- **Mined transactions**: Soft-deleted by default (moved to `#deletedMinedTxHashes`), with option for permanent deletion
|
|
152
|
+
|
|
153
|
+
Soft-deleted mined transactions are retained for potential future reference and can be permanently cleaned up later via `cleanupDeletedMinedTxs()`.
|
|
154
|
+
|
|
155
|
+
## Eviction System
|
|
156
|
+
|
|
157
|
+
The eviction system automatically removes invalid or low-priority transactions based on configurable rules. See the [`eviction/`](eviction/) subdirectory for implementation details.
|
|
158
|
+
|
|
159
|
+
### Architecture
|
|
160
|
+
|
|
161
|
+
```
|
|
162
|
+
┌─────────────────────────────────────────────────────────────────┐
|
|
163
|
+
│ EvictionManager │
|
|
164
|
+
│ Orchestrates eviction rules based on pool events │
|
|
165
|
+
├─────────────────────────────────────────────────────────────────┤
|
|
166
|
+
│ │
|
|
167
|
+
│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │
|
|
168
|
+
│ │EvictionRule #1 │ │EvictionRule #2 │ │EvictionRule #N │ │
|
|
169
|
+
│ └─────────────────┘ └─────────────────┘ └─────────────────┘ │
|
|
170
|
+
│ │
|
|
171
|
+
└─────────────────────────────────────────────────────────────────┘
|
|
172
|
+
│
|
|
173
|
+
▼
|
|
174
|
+
┌─────────────────┐
|
|
175
|
+
│ TxPoolOperations│
|
|
176
|
+
│ (interface) │
|
|
177
|
+
└─────────────────┘
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
The [`EvictionManager`](eviction/eviction_manager.ts) coordinates eviction by:
|
|
181
|
+
|
|
182
|
+
1. Registering multiple `EvictionRule` implementations
|
|
183
|
+
2. Calling each rule when tx pool events occur
|
|
184
|
+
3. Propagating configuration updates to all rules
|
|
185
|
+
|
|
186
|
+
### Eviction Events
|
|
187
|
+
|
|
188
|
+
| Event | Trigger | Purpose |
|
|
189
|
+
|-------|---------|---------|
|
|
190
|
+
| `TXS_ADDED` | New transactions added | Enforce pool size limits |
|
|
191
|
+
| `BLOCK_MINED` | Block finalized | Remove invalidated transactions |
|
|
192
|
+
| `CHAIN_PRUNED` | Chain reorganization | Remove txs referencing pruned blocks |
|
|
193
|
+
|
|
194
|
+
### Eviction Rules
|
|
195
|
+
|
|
196
|
+
#### 1. `InvalidTxsAfterMiningRule`
|
|
197
|
+
|
|
198
|
+
**Triggers on:** `BLOCK_MINED`
|
|
199
|
+
|
|
200
|
+
Evicts transactions that become invalid after a block is mined:
|
|
201
|
+
|
|
202
|
+
- Duplicate nullifiers: Txs with nullifiers already included in the mined block
|
|
203
|
+
- Expired transactions: Txs with `includeByTimestamp` ≤ mined block timestamp
|
|
204
|
+
|
|
205
|
+
#### 2. `InvalidTxsAfterReorgRule`
|
|
206
|
+
|
|
207
|
+
**Triggers on:** `CHAIN_PRUNED`
|
|
208
|
+
|
|
209
|
+
Evicts transactions that reference blocks no longer in the canonical chain:
|
|
210
|
+
|
|
211
|
+
- Checks each pending tx's anchor block hash against the archive tree
|
|
212
|
+
- Removes txs whose anchor blocks are not found (pruned)
|
|
213
|
+
|
|
214
|
+
#### 3. `InsufficientFeePayerBalanceRule`
|
|
215
|
+
|
|
216
|
+
**Triggers on:** `BLOCK_MINED`, `CHAIN_PRUNED`
|
|
217
|
+
|
|
218
|
+
Evicts transactions whose fee payer no longer has sufficient balance:
|
|
219
|
+
|
|
220
|
+
- Uses `GasTxValidator` to check fee payer balances against current world state
|
|
221
|
+
|
|
222
|
+
#### 4. `LowPriorityEvictionRule`
|
|
223
|
+
|
|
224
|
+
**Triggers on:** `TXS_ADDED`
|
|
225
|
+
|
|
226
|
+
Enforces maximum pool size by evicting lowest-priority (by fee) transactions:
|
|
227
|
+
|
|
228
|
+
- Configured via `maxPendingTxCount` option (0 = disabled)
|
|
229
|
+
- Uses `getLowestPriorityEvictable()` to find txs to evict
|
|
230
|
+
|
|
231
|
+
### Non-Evictable Transactions
|
|
232
|
+
|
|
233
|
+
Transactions can be marked as non-evictable via `markTxsAsNonEvictable()`. This protects them from all eviction rules, typically used during block building to ensure transactions being processed aren't evicted mid-operation. The flag is cleared after block processing via `clearNonEvictableTxs()`.
|
|
234
|
+
The `clearNonEvictableTxs` is called upon getting new L2 block.
|
|
235
|
+
|
|
236
|
+
## Configuration
|
|
237
|
+
|
|
238
|
+
The pool accepts configuration via `TxPoolOptions`:
|
|
239
|
+
|
|
240
|
+
```typescript
|
|
241
|
+
type TxPoolOptions = {
|
|
242
|
+
maxPendingTxCount?: number; // Max pending txs (0 = unlimited)
|
|
243
|
+
archivedTxLimit?: number; // Number of archived txs to retain
|
|
244
|
+
};
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
Configuration can be updated at runtime via `updateConfig()`.
|
|
248
|
+
|
|
249
|
+
## Telemetry
|
|
250
|
+
|
|
251
|
+
The pool integrates with the telemetry system to report:
|
|
252
|
+
|
|
253
|
+
- Transaction counts (pending vs mined)
|
|
254
|
+
- Transaction sizes
|
|
255
|
+
- Store size estimates
|