@aztec/p2p 0.0.1-commit.f295ac2 → 0.0.1-commit.fc805bf
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bootstrap/bootstrap.d.ts +4 -3
- package/dest/bootstrap/bootstrap.d.ts.map +1 -1
- package/dest/bootstrap/bootstrap.js +4 -4
- package/dest/client/factory.d.ts +1 -1
- package/dest/client/factory.d.ts.map +1 -1
- package/dest/client/factory.js +6 -5
- package/dest/client/p2p_client.d.ts +1 -1
- package/dest/client/p2p_client.d.ts.map +1 -1
- package/dest/client/p2p_client.js +9 -2
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts +2 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.js +305 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts +73 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.js +8 -0
- package/dest/config.d.ts +8 -2
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +2 -0
- package/dest/mem_pools/instrumentation.d.ts +1 -1
- package/dest/mem_pools/instrumentation.d.ts.map +1 -1
- package/dest/mem_pools/instrumentation.js +2 -2
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts +3 -3
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts +3 -2
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts +3 -2
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.d.ts +3 -3
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.js +8 -1
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts +3 -3
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.js +2 -0
- package/dest/msg_validators/attestation_validator/attestation_validator.d.ts +3 -3
- package/dest/msg_validators/attestation_validator/attestation_validator.d.ts.map +1 -1
- package/dest/msg_validators/attestation_validator/attestation_validator.js +41 -10
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts +3 -3
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts.map +1 -1
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.js +18 -6
- package/dest/msg_validators/clock_tolerance.d.ts +21 -0
- package/dest/msg_validators/clock_tolerance.d.ts.map +1 -0
- package/dest/msg_validators/clock_tolerance.js +37 -0
- package/dest/msg_validators/proposal_validator/proposal_validator.d.ts +3 -3
- package/dest/msg_validators/proposal_validator/proposal_validator.d.ts.map +1 -1
- package/dest/msg_validators/proposal_validator/proposal_validator.js +55 -31
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.d.ts +3 -3
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.d.ts.map +1 -1
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.js +93 -64
- package/dest/msg_validators/tx_validator/archive_cache.d.ts +3 -3
- package/dest/msg_validators/tx_validator/archive_cache.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/archive_cache.js +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts +5 -4
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.js +3 -2
- package/dest/msg_validators/tx_validator/data_validator.d.ts +3 -1
- package/dest/msg_validators/tx_validator/data_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/data_validator.js +4 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.js +3 -2
- package/dest/msg_validators/tx_validator/factory.d.ts +8 -3
- package/dest/msg_validators/tx_validator/factory.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/factory.js +21 -11
- package/dest/msg_validators/tx_validator/gas_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/gas_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/gas_validator.js +3 -2
- package/dest/msg_validators/tx_validator/index.d.ts +2 -1
- package/dest/msg_validators/tx_validator/index.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/index.js +1 -0
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/metadata_validator.js +2 -2
- package/dest/msg_validators/tx_validator/phases_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/phases_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/phases_validator.js +3 -3
- package/dest/msg_validators/tx_validator/size_validator.d.ts +8 -0
- package/dest/msg_validators/tx_validator/size_validator.d.ts.map +1 -0
- package/dest/msg_validators/tx_validator/size_validator.js +23 -0
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/timestamp_validator.js +2 -2
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_permitted_validator.js +2 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_proof_validator.js +2 -2
- package/dest/services/data_store.d.ts +1 -1
- package/dest/services/data_store.d.ts.map +1 -1
- package/dest/services/data_store.js +10 -6
- package/dest/services/discv5/discV5_service.js +1 -1
- package/dest/services/dummy_service.d.ts +13 -1
- package/dest/services/dummy_service.d.ts.map +1 -1
- package/dest/services/dummy_service.js +39 -0
- package/dest/services/encoding.d.ts +1 -1
- package/dest/services/encoding.d.ts.map +1 -1
- package/dest/services/encoding.js +2 -3
- package/dest/services/libp2p/instrumentation.d.ts +1 -1
- package/dest/services/libp2p/instrumentation.d.ts.map +1 -1
- package/dest/services/libp2p/instrumentation.js +14 -3
- package/dest/services/libp2p/libp2p_service.d.ts +13 -7
- package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
- package/dest/services/libp2p/libp2p_service.js +60 -51
- package/dest/services/peer-manager/metrics.d.ts +2 -2
- package/dest/services/peer-manager/metrics.d.ts.map +1 -1
- package/dest/services/peer-manager/metrics.js +20 -5
- package/dest/services/peer-manager/peer_scoring.d.ts +1 -1
- package/dest/services/peer-manager/peer_scoring.d.ts.map +1 -1
- package/dest/services/peer-manager/peer_scoring.js +8 -2
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts +47 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.js +566 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts +17 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/config.js +27 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts +50 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/interface.js +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts +37 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.js +151 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts +54 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.js +139 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts +20 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.js +21 -0
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts +22 -3
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.js +63 -4
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts +2 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.js +12 -0
- package/dest/services/reqresp/interface.d.ts +5 -3
- package/dest/services/reqresp/interface.d.ts.map +1 -1
- package/dest/services/reqresp/interface.js +2 -2
- package/dest/services/reqresp/metrics.d.ts +6 -5
- package/dest/services/reqresp/metrics.d.ts.map +1 -1
- package/dest/services/reqresp/metrics.js +17 -5
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts +5 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.js +5 -0
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.js +16 -3
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts +18 -6
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.js +43 -13
- package/dest/services/reqresp/reqresp.d.ts +6 -1
- package/dest/services/reqresp/reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/reqresp.js +58 -22
- package/dest/services/service.d.ts +4 -1
- package/dest/services/service.d.ts.map +1 -1
- package/dest/services/tx_collection/config.d.ts +4 -1
- package/dest/services/tx_collection/config.d.ts.map +1 -1
- package/dest/services/tx_collection/config.js +9 -1
- package/dest/services/tx_collection/fast_tx_collection.d.ts +6 -4
- package/dest/services/tx_collection/fast_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/fast_tx_collection.js +16 -5
- package/dest/services/tx_collection/index.d.ts +2 -1
- package/dest/services/tx_collection/index.d.ts.map +1 -1
- package/dest/services/tx_collection/index.js +1 -0
- package/dest/services/tx_collection/instrumentation.d.ts +1 -1
- package/dest/services/tx_collection/instrumentation.d.ts.map +1 -1
- package/dest/services/tx_collection/instrumentation.js +9 -2
- package/dest/services/tx_collection/proposal_tx_collector.d.ts +48 -0
- package/dest/services/tx_collection/proposal_tx_collector.d.ts.map +1 -0
- package/dest/services/tx_collection/proposal_tx_collector.js +50 -0
- package/dest/services/tx_collection/slow_tx_collection.d.ts +3 -3
- package/dest/services/tx_collection/slow_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection.d.ts +8 -8
- package/dest/services/tx_collection/tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection.js +5 -5
- package/dest/services/tx_provider.d.ts +3 -3
- package/dest/services/tx_provider.d.ts.map +1 -1
- package/dest/services/tx_provider_instrumentation.d.ts +1 -1
- package/dest/services/tx_provider_instrumentation.d.ts.map +1 -1
- package/dest/services/tx_provider_instrumentation.js +5 -5
- package/dest/test-helpers/index.d.ts +3 -1
- package/dest/test-helpers/index.d.ts.map +1 -1
- package/dest/test-helpers/index.js +2 -0
- package/dest/test-helpers/test_tx_provider.d.ts +40 -0
- package/dest/test-helpers/test_tx_provider.d.ts.map +1 -0
- package/dest/test-helpers/test_tx_provider.js +41 -0
- package/dest/test-helpers/testbench-utils.d.ts +158 -0
- package/dest/test-helpers/testbench-utils.d.ts.map +1 -0
- package/dest/test-helpers/testbench-utils.js +297 -0
- package/dest/testbench/p2p_client_testbench_worker.d.ts +28 -2
- package/dest/testbench/p2p_client_testbench_worker.d.ts.map +1 -1
- package/dest/testbench/p2p_client_testbench_worker.js +212 -133
- package/dest/testbench/worker_client_manager.d.ts +51 -6
- package/dest/testbench/worker_client_manager.d.ts.map +1 -1
- package/dest/testbench/worker_client_manager.js +226 -44
- package/package.json +14 -14
- package/src/bootstrap/bootstrap.ts +7 -4
- package/src/client/factory.ts +6 -10
- package/src/client/p2p_client.ts +14 -7
- package/src/client/test/tx_proposal_collector/README.md +227 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +336 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.ts +43 -0
- package/src/config.ts +6 -1
- package/src/mem_pools/instrumentation.ts +2 -1
- package/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +2 -2
- package/src/mem_pools/tx_pool/eviction/eviction_manager.ts +2 -1
- package/src/mem_pools/tx_pool/eviction/eviction_strategy.ts +2 -1
- package/src/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.ts +10 -7
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.ts +4 -2
- package/src/msg_validators/attestation_validator/attestation_validator.ts +26 -14
- package/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts +14 -8
- package/src/msg_validators/clock_tolerance.ts +51 -0
- package/src/msg_validators/proposal_validator/proposal_validator.ts +31 -31
- package/src/msg_validators/proposal_validator/proposal_validator_test_suite.ts +91 -67
- package/src/msg_validators/tx_validator/archive_cache.ts +3 -3
- package/src/msg_validators/tx_validator/block_header_validator.ts +6 -5
- package/src/msg_validators/tx_validator/data_validator.ts +6 -2
- package/src/msg_validators/tx_validator/double_spend_validator.ts +4 -3
- package/src/msg_validators/tx_validator/factory.ts +64 -23
- package/src/msg_validators/tx_validator/gas_validator.ts +9 -3
- package/src/msg_validators/tx_validator/index.ts +1 -0
- package/src/msg_validators/tx_validator/metadata_validator.ts +6 -3
- package/src/msg_validators/tx_validator/phases_validator.ts +5 -3
- package/src/msg_validators/tx_validator/size_validator.ts +22 -0
- package/src/msg_validators/tx_validator/timestamp_validator.ts +6 -3
- package/src/msg_validators/tx_validator/tx_permitted_validator.ts +8 -3
- package/src/msg_validators/tx_validator/tx_proof_validator.ts +8 -3
- package/src/services/data_store.ts +10 -7
- package/src/services/discv5/discV5_service.ts +1 -1
- package/src/services/dummy_service.ts +45 -0
- package/src/services/encoding.ts +2 -3
- package/src/services/libp2p/instrumentation.ts +15 -2
- package/src/services/libp2p/libp2p_service.ts +99 -73
- package/src/services/peer-manager/metrics.ts +21 -4
- package/src/services/peer-manager/peer_scoring.ts +4 -1
- package/src/services/reqresp/batch-tx-requester/README.md +305 -0
- package/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +706 -0
- package/src/services/reqresp/batch-tx-requester/config.ts +40 -0
- package/src/services/reqresp/batch-tx-requester/interface.ts +57 -0
- package/src/services/reqresp/batch-tx-requester/missing_txs.ts +209 -0
- package/src/services/reqresp/batch-tx-requester/peer_collection.ts +205 -0
- package/src/services/reqresp/batch-tx-requester/tx_validator.ts +37 -0
- package/src/services/reqresp/connection-sampler/batch_connection_sampler.ts +65 -4
- package/src/services/reqresp/connection-sampler/connection_sampler.ts +16 -0
- package/src/services/reqresp/interface.ts +5 -2
- package/src/services/reqresp/metrics.ts +34 -9
- package/src/services/reqresp/protocols/block_txs/bitvector.ts +7 -0
- package/src/services/reqresp/protocols/block_txs/block_txs_handler.ts +18 -4
- package/src/services/reqresp/protocols/block_txs/block_txs_reqresp.ts +51 -9
- package/src/services/reqresp/reqresp.ts +66 -19
- package/src/services/service.ts +4 -0
- package/src/services/tx_collection/config.ts +15 -1
- package/src/services/tx_collection/fast_tx_collection.ts +36 -13
- package/src/services/tx_collection/index.ts +5 -0
- package/src/services/tx_collection/instrumentation.ts +11 -2
- package/src/services/tx_collection/proposal_tx_collector.ts +114 -0
- package/src/services/tx_collection/slow_tx_collection.ts +2 -2
- package/src/services/tx_collection/tx_collection.ts +8 -8
- package/src/services/tx_provider.ts +2 -2
- package/src/services/tx_provider_instrumentation.ts +11 -5
- package/src/test-helpers/index.ts +2 -0
- package/src/test-helpers/test_tx_provider.ts +64 -0
- package/src/test-helpers/testbench-utils.ts +374 -0
- package/src/testbench/p2p_client_testbench_worker.ts +321 -126
- package/src/testbench/worker_client_manager.ts +304 -47
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import { type ConfigMappingsType, numberConfigHelper } from '@aztec/foundation/config';
|
|
2
|
+
|
|
3
|
+
export const DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT = 10;
|
|
4
|
+
export const DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT = 10;
|
|
5
|
+
export const DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE = 8;
|
|
6
|
+
export const DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD = 2;
|
|
7
|
+
|
|
8
|
+
export interface BatchTxRequesterConfig {
|
|
9
|
+
/** Max concurrent requests to smart peers. */
|
|
10
|
+
batchTxRequesterSmartParallelWorkerCount: number;
|
|
11
|
+
/** Max concurrent requests to dumb peers. */
|
|
12
|
+
batchTxRequesterDumbParallelWorkerCount: number;
|
|
13
|
+
/** Max transactions per request / chunk size. */
|
|
14
|
+
batchTxRequesterTxBatchSize: number;
|
|
15
|
+
/** Failures before a peer is considered bad (see > threshold logic). */
|
|
16
|
+
batchTxRequesterBadPeerThreshold: number;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export const batchTxRequesterConfigMappings: ConfigMappingsType<BatchTxRequesterConfig> = {
|
|
20
|
+
batchTxRequesterSmartParallelWorkerCount: {
|
|
21
|
+
env: 'P2P_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT',
|
|
22
|
+
description: 'Max concurrent requests to smart peers for batch tx requester.',
|
|
23
|
+
...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT),
|
|
24
|
+
},
|
|
25
|
+
batchTxRequesterDumbParallelWorkerCount: {
|
|
26
|
+
env: 'P2P_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT',
|
|
27
|
+
description: 'Max concurrent requests to dumb peers for batch tx requester.',
|
|
28
|
+
...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT),
|
|
29
|
+
},
|
|
30
|
+
batchTxRequesterTxBatchSize: {
|
|
31
|
+
env: 'P2P_BATCH_TX_REQUESTER_TX_BATCH_SIZE',
|
|
32
|
+
description: 'Max transactions per request / chunk size for batch tx requester.',
|
|
33
|
+
...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE),
|
|
34
|
+
},
|
|
35
|
+
batchTxRequesterBadPeerThreshold: {
|
|
36
|
+
env: 'P2P_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD',
|
|
37
|
+
description: 'Failures before a peer is considered bad (see > threshold logic).',
|
|
38
|
+
...numberConfigHelper(DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD),
|
|
39
|
+
},
|
|
40
|
+
};
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import type { ISemaphore } from '@aztec/foundation/queue';
|
|
2
|
+
import type { PeerErrorSeverity } from '@aztec/stdlib/p2p';
|
|
3
|
+
import type { Tx, TxHash } from '@aztec/stdlib/tx';
|
|
4
|
+
|
|
5
|
+
import type { PeerId } from '@libp2p/interface';
|
|
6
|
+
|
|
7
|
+
import type { ConnectionSampler } from '../connection-sampler/connection_sampler.js';
|
|
8
|
+
import type { ReqRespInterface } from '../interface.js';
|
|
9
|
+
import type { MissingTxMetadata } from './missing_txs.js';
|
|
10
|
+
import type { IPeerCollection } from './peer_collection.js';
|
|
11
|
+
import type { BatchRequestTxValidatorConfig, IBatchRequestTxValidator } from './tx_validator.js';
|
|
12
|
+
|
|
13
|
+
export interface IPeerPenalizer {
|
|
14
|
+
penalizePeer(peerId: PeerId, penalty: PeerErrorSeverity): void;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export interface ITxMetadataCollection {
|
|
18
|
+
size: number;
|
|
19
|
+
values(): IterableIterator<MissingTxMetadata>;
|
|
20
|
+
getMissingTxHashes(): Set<string>;
|
|
21
|
+
getTxsToRequestFromThePeer(peer: PeerId): TxHash[];
|
|
22
|
+
markRequested(txHash: TxHash): void;
|
|
23
|
+
markInFlightBySmartPeer(txHash: TxHash): void;
|
|
24
|
+
markNotInFlightBySmartPeer(txHash: TxHash): void;
|
|
25
|
+
alreadyFetched(txHash: TxHash): boolean;
|
|
26
|
+
// Returns true if tx was marked as fetched, false if it was already marked as fetched
|
|
27
|
+
markFetched(peerId: PeerId, tx: Tx): boolean;
|
|
28
|
+
markPeerHas(peerId: PeerId, txHashes: TxHash[]): void;
|
|
29
|
+
getFetchedTxs(): Tx[];
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Interface for BatchTxRequester dependencies that can be injected from upstream
|
|
34
|
+
*/
|
|
35
|
+
export interface BatchTxRequesterLibP2PService {
|
|
36
|
+
/** ReqResp interface for sending requests to peers */
|
|
37
|
+
reqResp: Pick<ReqRespInterface, 'sendBatchRequest' | 'sendRequestToPeer'>;
|
|
38
|
+
/** Connection sampler for getting peer lists */
|
|
39
|
+
connectionSampler: Pick<ConnectionSampler, 'getPeerListSortedByConnectionCountAsc'>;
|
|
40
|
+
/** Configuration needed for transaction validation */
|
|
41
|
+
txValidatorConfig: BatchRequestTxValidatorConfig;
|
|
42
|
+
/** Peer scoring for penalizing peers */
|
|
43
|
+
peerScoring: IPeerPenalizer;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
export interface BatchTxRequesterOptions {
|
|
47
|
+
smartParallelWorkerCount?: number;
|
|
48
|
+
dumbParallelWorkerCount?: number;
|
|
49
|
+
txBatchSize?: number;
|
|
50
|
+
badPeerThreshold?: number;
|
|
51
|
+
//Injectable for testing purposes
|
|
52
|
+
semaphore?: ISemaphore;
|
|
53
|
+
peerCollection?: IPeerCollection;
|
|
54
|
+
abortSignal?: AbortSignal;
|
|
55
|
+
/** Optional tx validator for testing - if not provided, one is created from p2pService.txValidatorConfig */
|
|
56
|
+
txValidator?: IBatchRequestTxValidator;
|
|
57
|
+
}
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import { type Tx, TxHash } from '@aztec/stdlib/tx';
|
|
2
|
+
|
|
3
|
+
import type { PeerId } from '@libp2p/interface';
|
|
4
|
+
|
|
5
|
+
import { DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE } from './config.js';
|
|
6
|
+
import type { ITxMetadataCollection } from './interface.js';
|
|
7
|
+
|
|
8
|
+
export class MissingTxMetadata {
|
|
9
|
+
constructor(
|
|
10
|
+
public readonly txHash: TxHash,
|
|
11
|
+
public fetched = false,
|
|
12
|
+
public requestedCount = 0,
|
|
13
|
+
public inFlightCount = 0,
|
|
14
|
+
public tx: Tx | undefined = undefined,
|
|
15
|
+
public readonly peers = new Set<string>(),
|
|
16
|
+
) {}
|
|
17
|
+
|
|
18
|
+
public markAsRequested() {
|
|
19
|
+
this.requestedCount++;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
public markInFlight() {
|
|
23
|
+
this.inFlightCount++;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
public markNotInFlight() {
|
|
27
|
+
this.inFlightCount = Math.max(--this.inFlightCount, 0);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
public isInFlight(): boolean {
|
|
31
|
+
return this.inFlightCount > 0;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
//Returns true if this is the first time we mark it as fetched
|
|
35
|
+
public markAsFetched(peerId: PeerId, tx: Tx): boolean {
|
|
36
|
+
if (this.fetched) {
|
|
37
|
+
return false;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
this.fetched = true;
|
|
41
|
+
this.tx = tx;
|
|
42
|
+
|
|
43
|
+
this.peers.add(peerId.toString());
|
|
44
|
+
|
|
45
|
+
return true;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
public toString() {
|
|
49
|
+
return this.txHash.toString();
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/*
|
|
54
|
+
* Single source or truth for transactions we are fetching
|
|
55
|
+
* This could be better optimized but given expected count of missing txs (N < 100)
|
|
56
|
+
* At the moment there is no need for it. And benefit is that we have everything in single store
|
|
57
|
+
* */
|
|
58
|
+
export class MissingTxMetadataCollection extends Map<string, MissingTxMetadata> implements ITxMetadataCollection {
|
|
59
|
+
constructor(
|
|
60
|
+
entries?: readonly (readonly [string, MissingTxMetadata])[] | null,
|
|
61
|
+
private readonly txBatchSize: number = DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE,
|
|
62
|
+
) {
|
|
63
|
+
super(entries);
|
|
64
|
+
}
|
|
65
|
+
public getSortedByRequestedCountAsc(txs: string[]): MissingTxMetadata[] {
|
|
66
|
+
return Array.from(this.values().filter(txMeta => txs.includes(txMeta.txHash.toString()))).sort(
|
|
67
|
+
(a, b) => a.requestedCount - b.requestedCount,
|
|
68
|
+
);
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
public getPrioritizingNotInFlightAndLowerRequestCount(txs: string[]): MissingTxMetadata[] {
|
|
72
|
+
const filtered = Array.from(this.values()).filter(txMeta => txs.includes(txMeta.txHash.toString()));
|
|
73
|
+
|
|
74
|
+
const [notInFlight, inFlight] = filtered.reduce<[MissingTxMetadata[], MissingTxMetadata[]]>(
|
|
75
|
+
(buckets, tx) => {
|
|
76
|
+
tx.isInFlight() ? buckets[1].push(tx) : buckets[0].push(tx);
|
|
77
|
+
return buckets;
|
|
78
|
+
},
|
|
79
|
+
[[], []],
|
|
80
|
+
);
|
|
81
|
+
|
|
82
|
+
notInFlight.sort((a, b) => a.requestedCount - b.requestedCount);
|
|
83
|
+
inFlight.sort((a, b) => a.inFlightCount - b.inFlightCount);
|
|
84
|
+
|
|
85
|
+
return [...notInFlight, ...inFlight];
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
public getFetchedTxHashes(): Set<string> {
|
|
89
|
+
return new Set(
|
|
90
|
+
this.values()
|
|
91
|
+
.filter(t => t.fetched)
|
|
92
|
+
.map(t => t.txHash.toString()),
|
|
93
|
+
);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
public getMissingTxHashes(): Set<string> {
|
|
97
|
+
return new Set(
|
|
98
|
+
this.values()
|
|
99
|
+
.filter(t => !t.fetched)
|
|
100
|
+
.map(t => t.txHash.toString()),
|
|
101
|
+
);
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
public getInFlightTxHashes(): Set<string> {
|
|
105
|
+
return new Set(
|
|
106
|
+
this.values()
|
|
107
|
+
.filter(t => t.isInFlight())
|
|
108
|
+
.map(t => t.txHash.toString()),
|
|
109
|
+
);
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
public getFetchedTxs(): Tx[] {
|
|
113
|
+
return Array.from(
|
|
114
|
+
this.values()
|
|
115
|
+
.map(t => t.tx)
|
|
116
|
+
.filter(t => !!t),
|
|
117
|
+
);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
public getTxsPeerHas(peer: PeerId): Set<string> {
|
|
121
|
+
const peerIdStr = peer.toString();
|
|
122
|
+
const txsPeerHas = new Set<string>();
|
|
123
|
+
|
|
124
|
+
this.values().forEach(txMeta => {
|
|
125
|
+
if (txMeta.peers.has(peerIdStr)) {
|
|
126
|
+
txsPeerHas.add(txMeta.txHash.toString());
|
|
127
|
+
}
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
return txsPeerHas;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
public getTxsToRequestFromThePeer(peer: PeerId): TxHash[] {
|
|
134
|
+
const txsPeerHas = this.getTxsPeerHas(peer);
|
|
135
|
+
const fetchedTxs = this.getFetchedTxHashes();
|
|
136
|
+
|
|
137
|
+
const txsToRequest = txsPeerHas.difference(fetchedTxs);
|
|
138
|
+
|
|
139
|
+
if (txsToRequest.size >= this.txBatchSize) {
|
|
140
|
+
return this.getPrioritizingNotInFlightAndLowerRequestCount(Array.from(txsToRequest))
|
|
141
|
+
.map(t => t.txHash)
|
|
142
|
+
.slice(0, this.txBatchSize);
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
// Otherwise fill the txs to request till txBatchSize with random txs we are missing
|
|
146
|
+
// Who knows, maybe we get lucky and peer received these txs in the meantime
|
|
147
|
+
|
|
148
|
+
const countToFill = this.txBatchSize - txsToRequest.size;
|
|
149
|
+
const txsToFill = this.getPrioritizingNotInFlightAndLowerRequestCount(
|
|
150
|
+
Array.from(this.getMissingTxHashes().difference(txsToRequest)),
|
|
151
|
+
)
|
|
152
|
+
.slice(0, countToFill)
|
|
153
|
+
.map(t => t.txHash);
|
|
154
|
+
|
|
155
|
+
return [...Array.from(txsToRequest).map(t => TxHash.fromString(t)), ...txsToFill];
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
public markRequested(txHash: TxHash) {
|
|
159
|
+
this.get(txHash.toString())?.markAsRequested();
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/*
|
|
163
|
+
* This should be called only when requesting tx from smart peer
|
|
164
|
+
* Because the smart peer should return this tx, whereas
|
|
165
|
+
* "dumb" peer might return it, or might not - we don't know
|
|
166
|
+
* */
|
|
167
|
+
public markInFlightBySmartPeer(txHash: TxHash) {
|
|
168
|
+
this.get(txHash.toString())?.markInFlight();
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/*
|
|
172
|
+
* This should be called only when requesting tx from smart peer
|
|
173
|
+
* Because the smart peer should return this tx, whereas
|
|
174
|
+
* "dumb" peer might return it, or might not - we don't know*/
|
|
175
|
+
public markNotInFlightBySmartPeer(txHash: TxHash) {
|
|
176
|
+
this.get(txHash.toString())?.markNotInFlight();
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
public alreadyFetched(txHash: TxHash): boolean {
|
|
180
|
+
return this.get(txHash.toString())?.fetched ?? false;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
public markFetched(peerId: PeerId, tx: Tx): boolean {
|
|
184
|
+
const txHashStr = tx.txHash.toString();
|
|
185
|
+
const txMeta = this.get(txHashStr);
|
|
186
|
+
if (!txMeta) {
|
|
187
|
+
//TODO: what to do about peer which sent txs we didn't request?
|
|
188
|
+
// 1. don't request from it in the scope of this batch request
|
|
189
|
+
// 2. ban it immediately?
|
|
190
|
+
// 3. track it and ban it?
|
|
191
|
+
//
|
|
192
|
+
return false;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
return txMeta.markAsFetched(peerId, tx);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
public markPeerHas(peerId: PeerId, txHash: TxHash[]) {
|
|
199
|
+
const peerIdStr = peerId.toString();
|
|
200
|
+
txHash
|
|
201
|
+
.map(t => t.toString())
|
|
202
|
+
.forEach(txh => {
|
|
203
|
+
const txMeta = this.get(txh);
|
|
204
|
+
if (txMeta) {
|
|
205
|
+
txMeta.peers.add(peerIdStr);
|
|
206
|
+
}
|
|
207
|
+
});
|
|
208
|
+
}
|
|
209
|
+
}
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import type { DateProvider } from '@aztec/foundation/timer';
|
|
2
|
+
import type { PeerErrorSeverity } from '@aztec/stdlib/p2p';
|
|
3
|
+
|
|
4
|
+
import type { PeerId } from '@libp2p/interface';
|
|
5
|
+
|
|
6
|
+
import { DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD } from './config.js';
|
|
7
|
+
import type { IPeerPenalizer } from './interface.js';
|
|
8
|
+
|
|
9
|
+
export const RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL = 1000; // 1s
|
|
10
|
+
|
|
11
|
+
export interface IPeerCollection {
|
|
12
|
+
getAllPeers(): Set<string>;
|
|
13
|
+
getSmartPeers(): Set<string>;
|
|
14
|
+
markPeerSmart(peerId: PeerId): void;
|
|
15
|
+
getSmartPeersToQuery(): Array<string>;
|
|
16
|
+
getDumbPeersToQuery(): Array<string>;
|
|
17
|
+
thereAreSomeDumbRatelimitExceededPeers(): boolean;
|
|
18
|
+
penalisePeer(peerId: PeerId, severity: PeerErrorSeverity): void;
|
|
19
|
+
unMarkPeerAsBad(peerId: PeerId): void;
|
|
20
|
+
getBadPeers(): Set<string>;
|
|
21
|
+
markPeerInFlight(peerId: PeerId): void;
|
|
22
|
+
unMarkPeerInFlight(peerId: PeerId): void;
|
|
23
|
+
markPeerRateLimitExceeded(peerId: PeerId): void;
|
|
24
|
+
getRateLimitExceededPeers(): Set<string>;
|
|
25
|
+
getPeerRateLimitDelayMs(peerId: PeerId): number | undefined;
|
|
26
|
+
getNextDumbPeerAvailabilityDelayMs(): number | undefined;
|
|
27
|
+
getNextSmartPeerAvailabilityDelayMs(): number | undefined;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export class PeerCollection implements IPeerCollection {
|
|
31
|
+
private readonly peers;
|
|
32
|
+
|
|
33
|
+
private readonly smartPeers = new Set<string>();
|
|
34
|
+
private readonly inFlightPeers = new Set<string>();
|
|
35
|
+
private readonly rateLimitExceededPeers = new Map<string, number>();
|
|
36
|
+
private readonly peerPenaltyCounters = new Map<string, number>();
|
|
37
|
+
private readonly badPeers = new Set<string>();
|
|
38
|
+
|
|
39
|
+
constructor(
|
|
40
|
+
initialPeers: PeerId[],
|
|
41
|
+
private readonly pinnedPeerId: PeerId | undefined,
|
|
42
|
+
private readonly dateProvider: DateProvider,
|
|
43
|
+
private readonly badPeerThreshold: number = DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD,
|
|
44
|
+
private readonly peerPenalizer?: IPeerPenalizer,
|
|
45
|
+
) {
|
|
46
|
+
this.peers = new Set(initialPeers.map(peer => peer.toString()));
|
|
47
|
+
|
|
48
|
+
// Pinned peer is treaded specially, always mark it as in-flight
|
|
49
|
+
// and never return it as part of smart/dumb peers
|
|
50
|
+
if (this.pinnedPeerId) {
|
|
51
|
+
const peerIdStr = this.pinnedPeerId.toString();
|
|
52
|
+
this.inFlightPeers.add(peerIdStr);
|
|
53
|
+
this.peers.delete(peerIdStr);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
public getAllPeers(): Set<string> {
|
|
58
|
+
return this.peers;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
public getSmartPeers(): Set<string> {
|
|
62
|
+
return this.smartPeers;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
public markPeerSmart(peerId: PeerId): void {
|
|
66
|
+
this.smartPeers.add(peerId.toString());
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
public getSmartPeersToQuery(): Array<string> {
|
|
70
|
+
return Array.from(
|
|
71
|
+
this.smartPeers.difference(this.getBadPeers().union(this.inFlightPeers).union(this.getRateLimitExceededPeers())),
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
public getDumbPeersToQuery(): Array<string> {
|
|
76
|
+
return Array.from(
|
|
77
|
+
this.peers.difference(
|
|
78
|
+
this.smartPeers.union(this.getBadPeers()).union(this.inFlightPeers).union(this.getRateLimitExceededPeers()),
|
|
79
|
+
),
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
public thereAreSomeDumbRatelimitExceededPeers(): boolean {
|
|
84
|
+
return (
|
|
85
|
+
this.getRateLimitExceededPeers().difference(this.smartPeers.union(this.badPeers).union(this.inFlightPeers)).size >
|
|
86
|
+
0
|
|
87
|
+
);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
public markPeerInFlight(peerId: PeerId) {
|
|
91
|
+
this.inFlightPeers.add(peerId.toString());
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
public unMarkPeerInFlight(peerId: PeerId) {
|
|
95
|
+
// Never unmark the pinned peer as in-flight
|
|
96
|
+
if (this.pinnedPeerId && this.pinnedPeerId.toString() === peerId.toString()) {
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
this.inFlightPeers.delete(peerId.toString());
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
public markPeerRateLimitExceeded(peerId: PeerId) {
|
|
103
|
+
const ttl = this.dateProvider.now() + RATE_LIMIT_EXCEEDED_PEER_CACHE_TTL;
|
|
104
|
+
this.rateLimitExceededPeers.set(peerId.toString(), ttl);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
public getRateLimitExceededPeers(): Set<string> {
|
|
108
|
+
const now = this.dateProvider.now();
|
|
109
|
+
const rateLimitedPeers = new Set<string>();
|
|
110
|
+
|
|
111
|
+
for (const [peerId, expirationTime] of this.rateLimitExceededPeers) {
|
|
112
|
+
if (expirationTime <= now) {
|
|
113
|
+
this.rateLimitExceededPeers.delete(peerId);
|
|
114
|
+
} else {
|
|
115
|
+
rateLimitedPeers.add(peerId);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
return rateLimitedPeers;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
public penalisePeer(peerId: PeerId, severity: PeerErrorSeverity): void {
|
|
123
|
+
const key = peerId.toString();
|
|
124
|
+
const newPenaltyCount = (this.peerPenaltyCounters.get(key) ?? 0) + 1;
|
|
125
|
+
this.peerPenaltyCounters.set(key, newPenaltyCount);
|
|
126
|
+
this.peerPenalizer?.penalizePeer(peerId, severity);
|
|
127
|
+
if (newPenaltyCount > this.badPeerThreshold) {
|
|
128
|
+
this.badPeers.add(key);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
public unMarkPeerAsBad(peerId: PeerId) {
|
|
133
|
+
const key = peerId.toString();
|
|
134
|
+
this.badPeers.delete(key);
|
|
135
|
+
this.peerPenaltyCounters.delete(key);
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
public getBadPeers(): Set<string> {
|
|
139
|
+
return new Set(this.badPeers);
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
public getPeerRateLimitDelayMs(peerId: PeerId): number | undefined {
|
|
143
|
+
const key = peerId.toString();
|
|
144
|
+
const expiry = this.rateLimitExceededPeers.get(key);
|
|
145
|
+
const peerIsNotRateLimited = expiry === undefined;
|
|
146
|
+
if (peerIsNotRateLimited) {
|
|
147
|
+
return undefined;
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
const now = this.dateProvider.now();
|
|
151
|
+
const rateLimitHasExpired = expiry <= now;
|
|
152
|
+
if (rateLimitHasExpired) {
|
|
153
|
+
this.rateLimitExceededPeers.delete(key);
|
|
154
|
+
return undefined;
|
|
155
|
+
}
|
|
156
|
+
return expiry - now;
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
public getNextDumbPeerAvailabilityDelayMs(): number | undefined {
|
|
160
|
+
// Note: this _is_ suboptimal
|
|
161
|
+
// (we could've tracked rate limits ) per dumb/smart peers - different collections
|
|
162
|
+
// but everything is in memory and small scale so this, wile suboptimal is not slow
|
|
163
|
+
return this.getNextRateLimitDelayMs(
|
|
164
|
+
peerIdStr =>
|
|
165
|
+
!this.smartPeers.has(peerIdStr) &&
|
|
166
|
+
!this.getBadPeers().has(peerIdStr) &&
|
|
167
|
+
!this.inFlightPeers.has(peerIdStr) &&
|
|
168
|
+
this.peers.has(peerIdStr),
|
|
169
|
+
);
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
public getNextSmartPeerAvailabilityDelayMs(): number | undefined {
|
|
173
|
+
return this.getNextRateLimitDelayMs(
|
|
174
|
+
peerIdStr =>
|
|
175
|
+
this.smartPeers.has(peerIdStr) && !this.getBadPeers().has(peerIdStr) && !this.inFlightPeers.has(peerIdStr),
|
|
176
|
+
);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
private getNextRateLimitDelayMs(filter: (peerIdStr: string) => boolean): number | undefined {
|
|
180
|
+
const now = this.dateProvider.now();
|
|
181
|
+
let minExpiry: number | undefined;
|
|
182
|
+
|
|
183
|
+
for (const [peerIdStr, expiry] of this.rateLimitExceededPeers) {
|
|
184
|
+
const rateLimitHasExpired = expiry <= now;
|
|
185
|
+
if (rateLimitHasExpired) {
|
|
186
|
+
this.rateLimitExceededPeers.delete(peerIdStr);
|
|
187
|
+
continue;
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
const peerDoesNotMatchFilter = !filter(peerIdStr);
|
|
191
|
+
if (peerDoesNotMatchFilter) {
|
|
192
|
+
continue;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
minExpiry = minExpiry === undefined ? expiry : Math.min(minExpiry, expiry);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
const noRateLimitedPeersMatchFilter = minExpiry === undefined;
|
|
199
|
+
if (noRateLimitedPeersMatchFilter) {
|
|
200
|
+
return undefined;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
return minExpiry! - now;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import type { ClientProtocolCircuitVerifier } from '@aztec/stdlib/interfaces/server';
|
|
2
|
+
import { Tx, type TxValidationResult, type TxValidator } from '@aztec/stdlib/tx';
|
|
3
|
+
|
|
4
|
+
import { createTxReqRespValidator } from '../../../msg_validators/tx_validator/factory.js';
|
|
5
|
+
|
|
6
|
+
export interface BatchRequestTxValidatorConfig {
|
|
7
|
+
l1ChainId: number;
|
|
8
|
+
rollupVersion: number;
|
|
9
|
+
proofVerifier: ClientProtocolCircuitVerifier;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export interface IBatchRequestTxValidator {
|
|
13
|
+
validateRequestedTx(tx: Tx): Promise<TxValidationResult>;
|
|
14
|
+
validateRequestedTxs(txs: Tx[]): Promise<TxValidationResult[]>;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export class BatchRequestTxValidator implements IBatchRequestTxValidator {
|
|
18
|
+
readonly txValidator: TxValidator;
|
|
19
|
+
constructor(private readonly config: BatchRequestTxValidatorConfig) {
|
|
20
|
+
this.txValidator = BatchRequestTxValidator.createRequestedTxValidator(this.config);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
public async validateRequestedTx(tx: Tx): Promise<TxValidationResult> {
|
|
24
|
+
return await this.txValidator.validateTx(tx);
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
public async validateRequestedTxs(txs: Tx[]): Promise<TxValidationResult[]> {
|
|
28
|
+
return await Promise.all(txs.map(tx => this.validateRequestedTx(tx)));
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
static createRequestedTxValidator(config: BatchRequestTxValidatorConfig): TxValidator {
|
|
32
|
+
return createTxReqRespValidator(config.proofVerifier, {
|
|
33
|
+
l1ChainId: config.l1ChainId,
|
|
34
|
+
rollupVersion: config.rollupVersion,
|
|
35
|
+
});
|
|
36
|
+
}
|
|
37
|
+
}
|
|
@@ -20,6 +20,8 @@ import type { ConnectionSampler } from './connection_sampler.js';
|
|
|
20
20
|
export class BatchConnectionSampler {
|
|
21
21
|
private readonly batch: PeerId[] = [];
|
|
22
22
|
private readonly requestsPerPeer: number;
|
|
23
|
+
/** Tracks peer-index combinations that returned empty/invalid responses */
|
|
24
|
+
private readonly failedPeerIndices: Map<string, Set<number>> = new Map();
|
|
23
25
|
|
|
24
26
|
constructor(
|
|
25
27
|
private readonly connectionSampler: ConnectionSampler,
|
|
@@ -44,10 +46,12 @@ export class BatchConnectionSampler {
|
|
|
44
46
|
}
|
|
45
47
|
|
|
46
48
|
/**
|
|
47
|
-
* Gets the peer responsible for handling a specific request index
|
|
49
|
+
* Gets the peer responsible for handling a specific request index.
|
|
50
|
+
* If the primary peer has previously failed for this index, tries other peers.
|
|
51
|
+
* If all batch peers have failed, attempts to sample a new peer.
|
|
48
52
|
*
|
|
49
53
|
* @param index - The request index
|
|
50
|
-
* @returns The peer assigned to handle this request
|
|
54
|
+
* @returns The peer assigned to handle this request, or undefined if no peer available
|
|
51
55
|
*/
|
|
52
56
|
getPeerForRequest(index: number): PeerId | undefined {
|
|
53
57
|
if (this.batch.length === 0) {
|
|
@@ -55,8 +59,65 @@ export class BatchConnectionSampler {
|
|
|
55
59
|
}
|
|
56
60
|
|
|
57
61
|
// Calculate which peer bucket this index belongs to
|
|
58
|
-
const
|
|
59
|
-
|
|
62
|
+
const primaryPeerIndex = Math.floor(index / this.requestsPerPeer) % this.batch.length;
|
|
63
|
+
|
|
64
|
+
// Try peers starting from primary, wrapping around
|
|
65
|
+
for (let offset = 0; offset < this.batch.length; offset++) {
|
|
66
|
+
const peerIndex = (primaryPeerIndex + offset) % this.batch.length;
|
|
67
|
+
const peer = this.batch[peerIndex];
|
|
68
|
+
const peerKey = peer.toString();
|
|
69
|
+
|
|
70
|
+
const failedIndices = this.failedPeerIndices.get(peerKey);
|
|
71
|
+
if (!failedIndices || !failedIndices.has(index)) {
|
|
72
|
+
return peer;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// All batch peers have failed for this index - try to sample a new peer
|
|
77
|
+
const newPeer = this.sampleNewPeer();
|
|
78
|
+
if (newPeer) {
|
|
79
|
+
return newPeer;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return undefined;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Attempts to sample a new peer that isn't already in the batch.
|
|
87
|
+
* If successful, adds the peer to the batch.
|
|
88
|
+
*
|
|
89
|
+
* @returns The new peer if one was sampled, undefined otherwise
|
|
90
|
+
*/
|
|
91
|
+
private sampleNewPeer(): PeerId | undefined {
|
|
92
|
+
// Exclude all current batch peers
|
|
93
|
+
const excluding = new Map(this.batch.map(p => [p.toString(), true] as const));
|
|
94
|
+
const newPeer = this.connectionSampler.getPeer(excluding);
|
|
95
|
+
|
|
96
|
+
if (newPeer) {
|
|
97
|
+
this.batch.push(newPeer);
|
|
98
|
+
this.logger.trace('Sampled new peer for exhausted index', { newPeer: newPeer.toString() });
|
|
99
|
+
return newPeer;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return undefined;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Marks that a peer returned an empty/invalid response for a specific request index.
|
|
107
|
+
* The peer will not be assigned this index again.
|
|
108
|
+
*
|
|
109
|
+
* @param peerId - The peer that failed
|
|
110
|
+
* @param index - The request index that failed
|
|
111
|
+
*/
|
|
112
|
+
markPeerFailedForIndex(peerId: PeerId, index: number): void {
|
|
113
|
+
const peerKey = peerId.toString();
|
|
114
|
+
let failedIndices = this.failedPeerIndices.get(peerKey);
|
|
115
|
+
if (!failedIndices) {
|
|
116
|
+
failedIndices = new Set();
|
|
117
|
+
this.failedPeerIndices.set(peerKey, failedIndices);
|
|
118
|
+
}
|
|
119
|
+
failedIndices.add(index);
|
|
120
|
+
this.logger.trace('Marked peer failed for index', { peerId: peerKey, index });
|
|
60
121
|
}
|
|
61
122
|
|
|
62
123
|
/**
|
|
@@ -125,6 +125,22 @@ export class ConnectionSampler {
|
|
|
125
125
|
return { peer: lastPeer, sampledPeers };
|
|
126
126
|
}
|
|
127
127
|
|
|
128
|
+
/*
|
|
129
|
+
* Returns all peers sorted by connection count ascending,
|
|
130
|
+
* meaning that the peers with the least number of active connections are earlier in an array
|
|
131
|
+
*
|
|
132
|
+
* @param: excluding - peers to exclude
|
|
133
|
+
* @return: list of peer ids
|
|
134
|
+
* */
|
|
135
|
+
public getPeerListSortedByConnectionCountAsc(excluding?: Set<string>): PeerId[] {
|
|
136
|
+
return this.libp2p
|
|
137
|
+
.getPeers()
|
|
138
|
+
.filter(id => !excluding?.has(id.toString()))
|
|
139
|
+
.map(id => ({ id, count: this.activeConnectionsCount.get(id.toString()) ?? 0 }))
|
|
140
|
+
.sort((a, b) => a.count - b.count)
|
|
141
|
+
.map(p => p.id);
|
|
142
|
+
}
|
|
143
|
+
|
|
128
144
|
/**
|
|
129
145
|
* Samples a batch of unique peers from the libp2p node, prioritizing peers without active connections
|
|
130
146
|
*
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import { Fr } from '@aztec/foundation/curves/bn254';
|
|
2
|
-
import {
|
|
2
|
+
import { L2Block } from '@aztec/stdlib/block';
|
|
3
3
|
import { TxArray, TxHashArray } from '@aztec/stdlib/tx';
|
|
4
4
|
|
|
5
5
|
import type { PeerId } from '@libp2p/interface';
|
|
6
6
|
|
|
7
7
|
import type { P2PReqRespConfig } from './config.js';
|
|
8
|
+
import type { ConnectionSampler } from './connection-sampler/connection_sampler.js';
|
|
8
9
|
import { AuthRequest, AuthResponse } from './protocols/auth.js';
|
|
9
10
|
import { BlockTxsRequest, BlockTxsResponse } from './protocols/block_txs/block_txs_reqresp.js';
|
|
10
11
|
import { StatusMessage } from './protocols/status.js';
|
|
@@ -198,7 +199,7 @@ export const subProtocolMap = {
|
|
|
198
199
|
},
|
|
199
200
|
[ReqRespSubProtocol.BLOCK]: {
|
|
200
201
|
request: Fr, // block number
|
|
201
|
-
response:
|
|
202
|
+
response: L2Block,
|
|
202
203
|
},
|
|
203
204
|
[ReqRespSubProtocol.AUTH]: {
|
|
204
205
|
request: AuthRequest,
|
|
@@ -237,4 +238,6 @@ export interface ReqRespInterface {
|
|
|
237
238
|
): Promise<ReqRespResponse>;
|
|
238
239
|
|
|
239
240
|
updateConfig(config: Partial<P2PReqRespConfig>): void;
|
|
241
|
+
|
|
242
|
+
getConnectionSampler(): Pick<ConnectionSampler, 'getPeerListSortedByConnectionCountAsc'>;
|
|
240
243
|
}
|