@aztec/p2p 0.0.1-commit.fcb71a6 → 0.0.1-commit.ff7989d6c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bootstrap/bootstrap.d.ts +4 -3
- package/dest/bootstrap/bootstrap.d.ts.map +1 -1
- package/dest/bootstrap/bootstrap.js +4 -4
- package/dest/client/factory.d.ts +7 -6
- package/dest/client/factory.d.ts.map +1 -1
- package/dest/client/factory.js +53 -14
- package/dest/client/interface.d.ts +58 -25
- package/dest/client/interface.d.ts.map +1 -1
- package/dest/client/p2p_client.d.ts +46 -51
- package/dest/client/p2p_client.d.ts.map +1 -1
- package/dest/client/p2p_client.js +601 -259
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts +2 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.js +305 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts +73 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.js +8 -0
- package/dest/config.d.ts +35 -7
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +23 -9
- package/dest/errors/tx-pool.error.d.ts +8 -0
- package/dest/errors/tx-pool.error.d.ts.map +1 -0
- package/dest/errors/tx-pool.error.js +9 -0
- package/dest/index.d.ts +2 -1
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +1 -0
- package/dest/mem_pools/attestation_pool/attestation_pool.d.ts +111 -76
- package/dest/mem_pools/attestation_pool/attestation_pool.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/attestation_pool.js +441 -3
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.d.ts +2 -2
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.js +527 -287
- package/dest/mem_pools/attestation_pool/index.d.ts +2 -3
- package/dest/mem_pools/attestation_pool/index.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/index.js +1 -2
- package/dest/mem_pools/attestation_pool/mocks.d.ts +9 -6
- package/dest/mem_pools/attestation_pool/mocks.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/mocks.js +16 -12
- package/dest/mem_pools/index.d.ts +3 -2
- package/dest/mem_pools/index.d.ts.map +1 -1
- package/dest/mem_pools/index.js +1 -1
- package/dest/mem_pools/instrumentation.d.ts +1 -1
- package/dest/mem_pools/instrumentation.d.ts.map +1 -1
- package/dest/mem_pools/instrumentation.js +5 -14
- package/dest/mem_pools/interface.d.ts +5 -5
- package/dest/mem_pools/interface.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts +15 -10
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/aztec_kv_tx_pool.js +91 -50
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts +19 -5
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/eviction_manager.js +59 -3
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts +79 -5
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/eviction_strategy.js +47 -0
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.d.ts +16 -0
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.js +122 -0
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.d.ts +2 -2
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.js +3 -3
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts +4 -4
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.js +2 -0
- package/dest/mem_pools/tx_pool/eviction/low_priority_eviction_rule.d.ts +2 -2
- package/dest/mem_pools/tx_pool/eviction/low_priority_eviction_rule.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/nullifier_conflict_pre_add_rule.d.ts +25 -0
- package/dest/mem_pools/tx_pool/eviction/nullifier_conflict_pre_add_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool/eviction/nullifier_conflict_pre_add_rule.js +57 -0
- package/dest/mem_pools/tx_pool_v2/archive/index.d.ts +2 -0
- package/dest/mem_pools/tx_pool_v2/archive/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/index.js +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.d.ts +43 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.js +103 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.d.ts +104 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.js +251 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.d.ts +47 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.js +128 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.d.ts +17 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.js +93 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.d.ts +19 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.js +95 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.d.ts +10 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.js +11 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.d.ts +174 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.js +25 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.js +65 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.d.ts +17 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.js +93 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.d.ts +16 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.js +78 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.d.ts +20 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.js +73 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.js +19 -0
- package/dest/mem_pools/tx_pool_v2/index.d.ts +6 -0
- package/dest/mem_pools/tx_pool_v2/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/index.js +5 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.js +43 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.d.ts +211 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.js +9 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.d.ts +97 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.js +152 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.d.ts +26 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.js +70 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.d.ts +108 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.js +355 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.d.ts +60 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.js +161 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.d.ts +77 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.js +896 -0
- package/dest/msg_validators/attestation_validator/attestation_validator.d.ts +4 -4
- package/dest/msg_validators/attestation_validator/attestation_validator.d.ts.map +1 -1
- package/dest/msg_validators/attestation_validator/attestation_validator.js +51 -18
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts +7 -7
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts.map +1 -1
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.js +22 -13
- package/dest/msg_validators/clock_tolerance.d.ts +21 -0
- package/dest/msg_validators/clock_tolerance.d.ts.map +1 -0
- package/dest/msg_validators/clock_tolerance.js +37 -0
- package/dest/msg_validators/index.d.ts +2 -2
- package/dest/msg_validators/index.d.ts.map +1 -1
- package/dest/msg_validators/index.js +1 -1
- package/dest/msg_validators/proposal_validator/block_proposal_validator.d.ts +9 -0
- package/dest/msg_validators/proposal_validator/block_proposal_validator.d.ts.map +1 -0
- package/dest/msg_validators/proposal_validator/block_proposal_validator.js +6 -0
- package/dest/msg_validators/proposal_validator/checkpoint_proposal_validator.d.ts +9 -0
- package/dest/msg_validators/proposal_validator/checkpoint_proposal_validator.d.ts.map +1 -0
- package/dest/msg_validators/proposal_validator/checkpoint_proposal_validator.js +6 -0
- package/dest/msg_validators/proposal_validator/index.d.ts +4 -0
- package/dest/msg_validators/proposal_validator/index.d.ts.map +1 -0
- package/dest/msg_validators/proposal_validator/index.js +3 -0
- package/dest/msg_validators/proposal_validator/proposal_validator.d.ts +13 -0
- package/dest/msg_validators/proposal_validator/proposal_validator.d.ts.map +1 -0
- package/dest/msg_validators/proposal_validator/proposal_validator.js +104 -0
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.d.ts +23 -0
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.d.ts.map +1 -0
- package/dest/msg_validators/proposal_validator/proposal_validator_test_suite.js +212 -0
- package/dest/msg_validators/tx_validator/aggregate_tx_validator.d.ts +3 -3
- package/dest/msg_validators/tx_validator/aggregate_tx_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/archive_cache.d.ts +3 -3
- package/dest/msg_validators/tx_validator/archive_cache.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/archive_cache.js +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts +20 -6
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.js +4 -3
- package/dest/msg_validators/tx_validator/data_validator.d.ts +3 -1
- package/dest/msg_validators/tx_validator/data_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/data_validator.js +4 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts +15 -4
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.js +7 -6
- package/dest/msg_validators/tx_validator/factory.d.ts +8 -3
- package/dest/msg_validators/tx_validator/factory.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/factory.js +21 -11
- package/dest/msg_validators/tx_validator/fee_payer_balance.d.ts +10 -0
- package/dest/msg_validators/tx_validator/fee_payer_balance.d.ts.map +1 -0
- package/dest/msg_validators/tx_validator/fee_payer_balance.js +20 -0
- package/dest/msg_validators/tx_validator/gas_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/gas_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/gas_validator.js +11 -16
- package/dest/msg_validators/tx_validator/index.d.ts +2 -1
- package/dest/msg_validators/tx_validator/index.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/index.js +1 -0
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/metadata_validator.js +2 -2
- package/dest/msg_validators/tx_validator/phases_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/phases_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/phases_validator.js +3 -3
- package/dest/msg_validators/tx_validator/size_validator.d.ts +8 -0
- package/dest/msg_validators/tx_validator/size_validator.d.ts.map +1 -0
- package/dest/msg_validators/tx_validator/size_validator.js +23 -0
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts +22 -5
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/timestamp_validator.js +8 -8
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_permitted_validator.js +2 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_proof_validator.js +2 -2
- package/dest/services/data_store.d.ts +1 -1
- package/dest/services/data_store.d.ts.map +1 -1
- package/dest/services/data_store.js +10 -6
- package/dest/services/discv5/discV5_service.js +1 -1
- package/dest/services/dummy_service.d.ts +28 -3
- package/dest/services/dummy_service.d.ts.map +1 -1
- package/dest/services/dummy_service.js +51 -0
- package/dest/services/encoding.d.ts +2 -2
- package/dest/services/encoding.d.ts.map +1 -1
- package/dest/services/encoding.js +9 -7
- package/dest/services/gossipsub/index.d.ts +3 -0
- package/dest/services/gossipsub/index.d.ts.map +1 -0
- package/dest/services/gossipsub/index.js +2 -0
- package/dest/services/gossipsub/scoring.d.ts +21 -3
- package/dest/services/gossipsub/scoring.d.ts.map +1 -1
- package/dest/services/gossipsub/scoring.js +24 -7
- package/dest/services/gossipsub/topic_score_params.d.ts +173 -0
- package/dest/services/gossipsub/topic_score_params.d.ts.map +1 -0
- package/dest/services/gossipsub/topic_score_params.js +346 -0
- package/dest/services/index.d.ts +2 -1
- package/dest/services/index.d.ts.map +1 -1
- package/dest/services/index.js +1 -0
- package/dest/services/libp2p/instrumentation.d.ts +1 -1
- package/dest/services/libp2p/instrumentation.d.ts.map +1 -1
- package/dest/services/libp2p/instrumentation.js +30 -72
- package/dest/services/libp2p/libp2p_service.d.ts +106 -33
- package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
- package/dest/services/libp2p/libp2p_service.js +984 -317
- package/dest/services/peer-manager/metrics.d.ts +2 -2
- package/dest/services/peer-manager/metrics.d.ts.map +1 -1
- package/dest/services/peer-manager/metrics.js +21 -26
- package/dest/services/peer-manager/peer_manager.d.ts +2 -2
- package/dest/services/peer-manager/peer_manager.d.ts.map +1 -1
- package/dest/services/peer-manager/peer_manager.js +0 -10
- package/dest/services/peer-manager/peer_scoring.d.ts +1 -1
- package/dest/services/peer-manager/peer_scoring.d.ts.map +1 -1
- package/dest/services/peer-manager/peer_scoring.js +32 -6
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts +48 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.js +562 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts +17 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/config.js +27 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts +46 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/interface.js +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts +34 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.js +130 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts +54 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.js +139 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts +20 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.js +21 -0
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts +22 -3
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.js +63 -4
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts +2 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.js +12 -0
- package/dest/services/reqresp/constants.d.ts +12 -0
- package/dest/services/reqresp/constants.d.ts.map +1 -0
- package/dest/services/reqresp/constants.js +7 -0
- package/dest/services/reqresp/interface.d.ts +12 -1
- package/dest/services/reqresp/interface.d.ts.map +1 -1
- package/dest/services/reqresp/interface.js +15 -1
- package/dest/services/reqresp/metrics.d.ts +6 -5
- package/dest/services/reqresp/metrics.d.ts.map +1 -1
- package/dest/services/reqresp/metrics.js +17 -21
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts +5 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.js +12 -0
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts +7 -5
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.js +27 -9
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts +29 -6
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.js +59 -13
- package/dest/services/reqresp/protocols/status.d.ts +1 -1
- package/dest/services/reqresp/protocols/status.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/status.js +4 -1
- package/dest/services/reqresp/protocols/tx.d.ts +7 -1
- package/dest/services/reqresp/protocols/tx.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/tx.js +20 -0
- package/dest/services/reqresp/reqresp.d.ts +6 -1
- package/dest/services/reqresp/reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/reqresp.js +471 -50
- package/dest/services/service.d.ts +55 -3
- package/dest/services/service.d.ts.map +1 -1
- package/dest/services/tx_collection/config.d.ts +22 -1
- package/dest/services/tx_collection/config.d.ts.map +1 -1
- package/dest/services/tx_collection/config.js +55 -1
- package/dest/services/tx_collection/fast_tx_collection.d.ts +7 -4
- package/dest/services/tx_collection/fast_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/fast_tx_collection.js +71 -44
- package/dest/services/tx_collection/file_store_tx_collection.d.ts +53 -0
- package/dest/services/tx_collection/file_store_tx_collection.d.ts.map +1 -0
- package/dest/services/tx_collection/file_store_tx_collection.js +167 -0
- package/dest/services/tx_collection/file_store_tx_source.d.ts +37 -0
- package/dest/services/tx_collection/file_store_tx_source.d.ts.map +1 -0
- package/dest/services/tx_collection/file_store_tx_source.js +90 -0
- package/dest/services/tx_collection/index.d.ts +3 -1
- package/dest/services/tx_collection/index.d.ts.map +1 -1
- package/dest/services/tx_collection/index.js +2 -0
- package/dest/services/tx_collection/instrumentation.d.ts +1 -1
- package/dest/services/tx_collection/instrumentation.d.ts.map +1 -1
- package/dest/services/tx_collection/instrumentation.js +11 -13
- package/dest/services/tx_collection/missing_txs_tracker.d.ts +32 -0
- package/dest/services/tx_collection/missing_txs_tracker.d.ts.map +1 -0
- package/dest/services/tx_collection/missing_txs_tracker.js +27 -0
- package/dest/services/tx_collection/proposal_tx_collector.d.ts +49 -0
- package/dest/services/tx_collection/proposal_tx_collector.d.ts.map +1 -0
- package/dest/services/tx_collection/proposal_tx_collector.js +50 -0
- package/dest/services/tx_collection/slow_tx_collection.d.ts +9 -5
- package/dest/services/tx_collection/slow_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/slow_tx_collection.js +60 -26
- package/dest/services/tx_collection/tx_collection.d.ts +29 -16
- package/dest/services/tx_collection/tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection.js +79 -7
- package/dest/services/tx_collection/tx_collection_sink.d.ts +18 -8
- package/dest/services/tx_collection/tx_collection_sink.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection_sink.js +26 -29
- package/dest/services/tx_collection/tx_source.d.ts +8 -3
- package/dest/services/tx_collection/tx_source.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_source.js +19 -2
- package/dest/services/tx_file_store/config.d.ts +16 -0
- package/dest/services/tx_file_store/config.d.ts.map +1 -0
- package/dest/services/tx_file_store/config.js +22 -0
- package/dest/services/tx_file_store/index.d.ts +4 -0
- package/dest/services/tx_file_store/index.d.ts.map +1 -0
- package/dest/services/tx_file_store/index.js +3 -0
- package/dest/services/tx_file_store/instrumentation.d.ts +15 -0
- package/dest/services/tx_file_store/instrumentation.d.ts.map +1 -0
- package/dest/services/tx_file_store/instrumentation.js +29 -0
- package/dest/services/tx_file_store/tx_file_store.d.ts +48 -0
- package/dest/services/tx_file_store/tx_file_store.d.ts.map +1 -0
- package/dest/services/tx_file_store/tx_file_store.js +152 -0
- package/dest/services/tx_provider.d.ts +5 -5
- package/dest/services/tx_provider.d.ts.map +1 -1
- package/dest/services/tx_provider.js +5 -4
- package/dest/services/tx_provider_instrumentation.d.ts +1 -1
- package/dest/services/tx_provider_instrumentation.d.ts.map +1 -1
- package/dest/services/tx_provider_instrumentation.js +7 -20
- package/dest/test-helpers/index.d.ts +3 -1
- package/dest/test-helpers/index.d.ts.map +1 -1
- package/dest/test-helpers/index.js +2 -0
- package/dest/test-helpers/make-test-p2p-clients.d.ts +3 -3
- package/dest/test-helpers/make-test-p2p-clients.d.ts.map +1 -1
- package/dest/test-helpers/mock-pubsub.d.ts +29 -2
- package/dest/test-helpers/mock-pubsub.d.ts.map +1 -1
- package/dest/test-helpers/mock-pubsub.js +103 -2
- package/dest/test-helpers/reqresp-nodes.d.ts +1 -1
- package/dest/test-helpers/reqresp-nodes.d.ts.map +1 -1
- package/dest/test-helpers/reqresp-nodes.js +2 -1
- package/dest/test-helpers/test_tx_provider.d.ts +40 -0
- package/dest/test-helpers/test_tx_provider.d.ts.map +1 -0
- package/dest/test-helpers/test_tx_provider.js +41 -0
- package/dest/test-helpers/testbench-utils.d.ts +163 -0
- package/dest/test-helpers/testbench-utils.d.ts.map +1 -0
- package/dest/test-helpers/testbench-utils.js +366 -0
- package/dest/testbench/p2p_client_testbench_worker.d.ts +28 -2
- package/dest/testbench/p2p_client_testbench_worker.d.ts.map +1 -1
- package/dest/testbench/p2p_client_testbench_worker.js +221 -127
- package/dest/testbench/worker_client_manager.d.ts +51 -6
- package/dest/testbench/worker_client_manager.d.ts.map +1 -1
- package/dest/testbench/worker_client_manager.js +226 -39
- package/dest/util.d.ts +2 -2
- package/dest/util.d.ts.map +1 -1
- package/package.json +16 -16
- package/src/bootstrap/bootstrap.ts +7 -4
- package/src/client/factory.ts +96 -24
- package/src/client/interface.ts +76 -25
- package/src/client/p2p_client.ts +269 -290
- package/src/client/test/tx_proposal_collector/README.md +227 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +346 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.ts +43 -0
- package/src/config.ts +49 -13
- package/src/errors/tx-pool.error.ts +12 -0
- package/src/index.ts +1 -0
- package/src/mem_pools/attestation_pool/attestation_pool.ts +510 -78
- package/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts +612 -320
- package/src/mem_pools/attestation_pool/index.ts +9 -2
- package/src/mem_pools/attestation_pool/mocks.ts +20 -13
- package/src/mem_pools/index.ts +4 -1
- package/src/mem_pools/instrumentation.ts +10 -18
- package/src/mem_pools/interface.ts +4 -4
- package/src/mem_pools/tx_pool/README.md +29 -14
- package/src/mem_pools/tx_pool/aztec_kv_tx_pool.ts +130 -75
- package/src/mem_pools/tx_pool/eviction/eviction_manager.ts +66 -5
- package/src/mem_pools/tx_pool/eviction/eviction_strategy.ts +119 -4
- package/src/mem_pools/tx_pool/eviction/fee_payer_balance_eviction_rule.ts +162 -0
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.ts +3 -3
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_reorg_rule.ts +4 -2
- package/src/mem_pools/tx_pool/eviction/nullifier_conflict_pre_add_rule.ts +75 -0
- package/src/mem_pools/tx_pool_v2/README.md +275 -0
- package/src/mem_pools/tx_pool_v2/archive/index.ts +1 -0
- package/src/mem_pools/tx_pool_v2/archive/tx_archive.ts +120 -0
- package/src/mem_pools/tx_pool_v2/deleted_pool.ts +321 -0
- package/src/mem_pools/tx_pool_v2/eviction/eviction_manager.ts +160 -0
- package/src/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.ts +121 -0
- package/src/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.ts +122 -0
- package/src/mem_pools/tx_pool_v2/eviction/index.ts +27 -0
- package/src/mem_pools/tx_pool_v2/eviction/interfaces.ts +209 -0
- package/src/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.ts +74 -0
- package/src/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.ts +101 -0
- package/src/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.ts +91 -0
- package/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts +90 -0
- package/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts +31 -0
- package/src/mem_pools/tx_pool_v2/index.ts +12 -0
- package/src/mem_pools/tx_pool_v2/instrumentation.ts +69 -0
- package/src/mem_pools/tx_pool_v2/interfaces.ts +242 -0
- package/src/mem_pools/tx_pool_v2/tx_metadata.ts +242 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_bench_metrics.ts +77 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_indices.ts +444 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_v2.ts +223 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts +1069 -0
- package/src/msg_validators/attestation_validator/attestation_validator.ts +36 -21
- package/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts +21 -18
- package/src/msg_validators/clock_tolerance.ts +51 -0
- package/src/msg_validators/index.ts +1 -1
- package/src/msg_validators/proposal_validator/block_proposal_validator.ts +10 -0
- package/src/msg_validators/proposal_validator/checkpoint_proposal_validator.ts +13 -0
- package/src/msg_validators/proposal_validator/index.ts +3 -0
- package/src/msg_validators/proposal_validator/proposal_validator.ts +92 -0
- package/src/msg_validators/proposal_validator/proposal_validator_test_suite.ts +230 -0
- package/src/msg_validators/tx_validator/aggregate_tx_validator.ts +2 -2
- package/src/msg_validators/tx_validator/archive_cache.ts +3 -3
- package/src/msg_validators/tx_validator/block_header_validator.ts +21 -8
- package/src/msg_validators/tx_validator/data_validator.ts +18 -6
- package/src/msg_validators/tx_validator/double_spend_validator.ts +15 -9
- package/src/msg_validators/tx_validator/factory.ts +64 -23
- package/src/msg_validators/tx_validator/fee_payer_balance.ts +40 -0
- package/src/msg_validators/tx_validator/gas_validator.ts +17 -28
- package/src/msg_validators/tx_validator/index.ts +1 -0
- package/src/msg_validators/tx_validator/metadata_validator.ts +18 -7
- package/src/msg_validators/tx_validator/phases_validator.ts +5 -3
- package/src/msg_validators/tx_validator/size_validator.ts +22 -0
- package/src/msg_validators/tx_validator/timestamp_validator.ts +29 -19
- package/src/msg_validators/tx_validator/tx_permitted_validator.ts +8 -3
- package/src/msg_validators/tx_validator/tx_proof_validator.ts +8 -3
- package/src/services/data_store.ts +10 -7
- package/src/services/discv5/discV5_service.ts +1 -1
- package/src/services/dummy_service.ts +68 -1
- package/src/services/encoding.ts +8 -6
- package/src/services/gossipsub/README.md +641 -0
- package/src/services/gossipsub/index.ts +2 -0
- package/src/services/gossipsub/scoring.ts +29 -5
- package/src/services/gossipsub/topic_score_params.ts +487 -0
- package/src/services/index.ts +1 -0
- package/src/services/libp2p/instrumentation.ts +32 -73
- package/src/services/libp2p/libp2p_service.ts +651 -301
- package/src/services/peer-manager/metrics.ts +22 -26
- package/src/services/peer-manager/peer_manager.ts +1 -2
- package/src/services/peer-manager/peer_scoring.ts +28 -4
- package/src/services/reqresp/batch-tx-requester/README.md +305 -0
- package/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +706 -0
- package/src/services/reqresp/batch-tx-requester/config.ts +40 -0
- package/src/services/reqresp/batch-tx-requester/interface.ts +53 -0
- package/src/services/reqresp/batch-tx-requester/missing_txs.ts +161 -0
- package/src/services/reqresp/batch-tx-requester/peer_collection.ts +205 -0
- package/src/services/reqresp/batch-tx-requester/tx_validator.ts +37 -0
- package/src/services/reqresp/connection-sampler/batch_connection_sampler.ts +65 -4
- package/src/services/reqresp/connection-sampler/connection_sampler.ts +19 -1
- package/src/services/reqresp/constants.ts +14 -0
- package/src/services/reqresp/interface.ts +29 -1
- package/src/services/reqresp/metrics.ts +36 -27
- package/src/services/reqresp/protocols/block_txs/bitvector.ts +16 -0
- package/src/services/reqresp/protocols/block_txs/block_txs_handler.ts +35 -12
- package/src/services/reqresp/protocols/block_txs/block_txs_reqresp.ts +74 -9
- package/src/services/reqresp/protocols/status.ts +7 -4
- package/src/services/reqresp/protocols/tx.ts +22 -0
- package/src/services/reqresp/reqresp.ts +79 -22
- package/src/services/service.ts +72 -4
- package/src/services/tx_collection/config.ts +83 -1
- package/src/services/tx_collection/fast_tx_collection.ts +93 -47
- package/src/services/tx_collection/file_store_tx_collection.ts +202 -0
- package/src/services/tx_collection/file_store_tx_source.ts +117 -0
- package/src/services/tx_collection/index.ts +6 -0
- package/src/services/tx_collection/instrumentation.ts +11 -13
- package/src/services/tx_collection/missing_txs_tracker.ts +52 -0
- package/src/services/tx_collection/proposal_tx_collector.ts +113 -0
- package/src/services/tx_collection/slow_tx_collection.ts +68 -35
- package/src/services/tx_collection/tx_collection.ts +121 -24
- package/src/services/tx_collection/tx_collection_sink.ts +30 -34
- package/src/services/tx_collection/tx_source.ts +22 -3
- package/src/services/tx_file_store/config.ts +37 -0
- package/src/services/tx_file_store/index.ts +3 -0
- package/src/services/tx_file_store/instrumentation.ts +36 -0
- package/src/services/tx_file_store/tx_file_store.ts +175 -0
- package/src/services/tx_provider.ts +10 -9
- package/src/services/tx_provider_instrumentation.ts +13 -20
- package/src/test-helpers/index.ts +2 -0
- package/src/test-helpers/make-test-p2p-clients.ts +3 -3
- package/src/test-helpers/mock-pubsub.ts +143 -3
- package/src/test-helpers/reqresp-nodes.ts +2 -1
- package/src/test-helpers/test_tx_provider.ts +64 -0
- package/src/test-helpers/testbench-utils.ts +430 -0
- package/src/testbench/p2p_client_testbench_worker.ts +348 -123
- package/src/testbench/worker_client_manager.ts +304 -42
- package/src/util.ts +7 -1
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.d.ts +0 -37
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.d.ts.map +0 -1
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.js +0 -213
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.d.ts +0 -30
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.d.ts.map +0 -1
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.js +0 -219
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.d.ts +0 -15
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.d.ts.map +0 -1
- package/dest/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.js +0 -88
- package/dest/msg_validators/block_proposal_validator/block_proposal_validator.d.ts +0 -12
- package/dest/msg_validators/block_proposal_validator/block_proposal_validator.d.ts.map +0 -1
- package/dest/msg_validators/block_proposal_validator/block_proposal_validator.js +0 -82
- package/dest/msg_validators/block_proposal_validator/index.d.ts +0 -2
- package/dest/msg_validators/block_proposal_validator/index.d.ts.map +0 -1
- package/dest/msg_validators/block_proposal_validator/index.js +0 -1
- package/src/mem_pools/attestation_pool/kv_attestation_pool.ts +0 -298
- package/src/mem_pools/attestation_pool/memory_attestation_pool.ts +0 -287
- package/src/mem_pools/tx_pool/eviction/insufficient_fee_payer_balance_rule.ts +0 -108
- package/src/msg_validators/block_proposal_validator/block_proposal_validator.ts +0 -97
- package/src/msg_validators/block_proposal_validator/index.ts +0 -1
|
@@ -0,0 +1,706 @@
|
|
|
1
|
+
import { chunkWrapAround } from '@aztec/foundation/collection';
|
|
2
|
+
import { TimeoutError } from '@aztec/foundation/error';
|
|
3
|
+
import { type Logger, createLogger } from '@aztec/foundation/log';
|
|
4
|
+
import { FifoMemoryQueue, type ISemaphore, Semaphore } from '@aztec/foundation/queue';
|
|
5
|
+
import { sleep } from '@aztec/foundation/sleep';
|
|
6
|
+
import { DateProvider, executeTimeout } from '@aztec/foundation/timer';
|
|
7
|
+
import { PeerErrorSeverity } from '@aztec/stdlib/p2p';
|
|
8
|
+
import { Tx, TxArray, TxHash } from '@aztec/stdlib/tx';
|
|
9
|
+
|
|
10
|
+
import type { PeerId } from '@libp2p/interface';
|
|
11
|
+
import { peerIdFromString } from '@libp2p/peer-id';
|
|
12
|
+
|
|
13
|
+
import type { IMissingTxsTracker } from '../../tx_collection/missing_txs_tracker.js';
|
|
14
|
+
import { ReqRespSubProtocol } from '.././interface.js';
|
|
15
|
+
import { BlockTxsRequest, BlockTxsResponse, type BlockTxsSource } from '.././protocols/index.js';
|
|
16
|
+
import { ReqRespStatus } from '.././status.js';
|
|
17
|
+
import {
|
|
18
|
+
DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD,
|
|
19
|
+
DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT,
|
|
20
|
+
DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT,
|
|
21
|
+
DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE,
|
|
22
|
+
} from './config.js';
|
|
23
|
+
import type { BatchTxRequesterLibP2PService, BatchTxRequesterOptions, ITxMetadataCollection } from './interface.js';
|
|
24
|
+
import { MissingTxMetadataCollection } from './missing_txs.js';
|
|
25
|
+
import { type IPeerCollection, PeerCollection } from './peer_collection.js';
|
|
26
|
+
import { BatchRequestTxValidator, type IBatchRequestTxValidator } from './tx_validator.js';
|
|
27
|
+
|
|
28
|
+
/*
|
|
29
|
+
* Tries to fetch all missing transaction until deadline is hit.
|
|
30
|
+
* Transactions are yield by calling run*() method
|
|
31
|
+
*
|
|
32
|
+
* We have a couple of peer types:
|
|
33
|
+
* - Pinned peer is the one who sent us the block proposal
|
|
34
|
+
* - Dumb peer:
|
|
35
|
+
* - We query this peer blindly because we don't know which txs it has
|
|
36
|
+
* - We hope it might have some of the transactions we asked for
|
|
37
|
+
* - When this peer sends response it might become Smart peer
|
|
38
|
+
* - Smart peer:
|
|
39
|
+
* - Initially there are no smart peers, all are considered "dumb"
|
|
40
|
+
* - Peer becomes smart when in response it tels us exactly which transactions it has
|
|
41
|
+
* AND we are missing some of those transactions
|
|
42
|
+
* - Bad peer:
|
|
43
|
+
* - Is the peer which was unable to send us successful response N times in a row
|
|
44
|
+
* */
|
|
45
|
+
export class BatchTxRequester {
|
|
46
|
+
private readonly blockTxsSource: BlockTxsSource;
|
|
47
|
+
private readonly pinnedPeer: PeerId | undefined;
|
|
48
|
+
private readonly timeoutMs: number;
|
|
49
|
+
private readonly p2pService: BatchTxRequesterLibP2PService;
|
|
50
|
+
private readonly logger: Logger;
|
|
51
|
+
private readonly dateProvider: DateProvider;
|
|
52
|
+
private readonly opts: BatchTxRequesterOptions;
|
|
53
|
+
private readonly peers: IPeerCollection;
|
|
54
|
+
private readonly txsMetadata: ITxMetadataCollection;
|
|
55
|
+
private readonly deadline: number;
|
|
56
|
+
private readonly smartRequesterSemaphore: ISemaphore;
|
|
57
|
+
private readonly txQueue: FifoMemoryQueue<Tx>;
|
|
58
|
+
private readonly txValidator: IBatchRequestTxValidator;
|
|
59
|
+
private readonly smartParallelWorkerCount: number;
|
|
60
|
+
private readonly dumbParallelWorkerCount: number;
|
|
61
|
+
private readonly txBatchSize: number;
|
|
62
|
+
|
|
63
|
+
constructor(
|
|
64
|
+
missingTxsTracker: IMissingTxsTracker,
|
|
65
|
+
blockTxsSource: BlockTxsSource,
|
|
66
|
+
pinnedPeer: PeerId | undefined,
|
|
67
|
+
timeoutMs: number,
|
|
68
|
+
p2pService: BatchTxRequesterLibP2PService,
|
|
69
|
+
logger?: Logger,
|
|
70
|
+
dateProvider?: DateProvider,
|
|
71
|
+
opts?: BatchTxRequesterOptions,
|
|
72
|
+
) {
|
|
73
|
+
this.blockTxsSource = blockTxsSource;
|
|
74
|
+
this.pinnedPeer = pinnedPeer;
|
|
75
|
+
this.timeoutMs = timeoutMs;
|
|
76
|
+
this.p2pService = p2pService;
|
|
77
|
+
this.logger = logger ?? createLogger('p2p:reqresp_batch');
|
|
78
|
+
this.dateProvider = dateProvider ?? new DateProvider();
|
|
79
|
+
this.opts = opts ?? {};
|
|
80
|
+
|
|
81
|
+
this.smartParallelWorkerCount =
|
|
82
|
+
this.opts.smartParallelWorkerCount ?? DEFAULT_BATCH_TX_REQUESTER_SMART_PARALLEL_WORKER_COUNT;
|
|
83
|
+
this.dumbParallelWorkerCount =
|
|
84
|
+
this.opts.dumbParallelWorkerCount ?? DEFAULT_BATCH_TX_REQUESTER_DUMB_PARALLEL_WORKER_COUNT;
|
|
85
|
+
this.txBatchSize = this.opts.txBatchSize ?? DEFAULT_BATCH_TX_REQUESTER_TX_BATCH_SIZE;
|
|
86
|
+
this.deadline = this.dateProvider.now() + this.timeoutMs;
|
|
87
|
+
this.txQueue = new FifoMemoryQueue(this.logger);
|
|
88
|
+
this.txValidator = this.opts.txValidator ?? new BatchRequestTxValidator(this.p2pService.txValidatorConfig);
|
|
89
|
+
|
|
90
|
+
if (this.opts.peerCollection) {
|
|
91
|
+
this.peers = this.opts.peerCollection;
|
|
92
|
+
} else {
|
|
93
|
+
const initialPeers = this.p2pService.connectionSampler.getPeerListSortedByConnectionCountAsc();
|
|
94
|
+
const badPeerThreshold = this.opts.badPeerThreshold ?? DEFAULT_BATCH_TX_REQUESTER_BAD_PEER_THRESHOLD;
|
|
95
|
+
this.peers = new PeerCollection(
|
|
96
|
+
initialPeers,
|
|
97
|
+
this.pinnedPeer,
|
|
98
|
+
this.dateProvider,
|
|
99
|
+
badPeerThreshold,
|
|
100
|
+
this.p2pService.peerScoring,
|
|
101
|
+
);
|
|
102
|
+
}
|
|
103
|
+
this.txsMetadata = new MissingTxMetadataCollection(missingTxsTracker, this.txBatchSize);
|
|
104
|
+
this.smartRequesterSemaphore = this.opts.semaphore ?? new Semaphore(0);
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/*
|
|
108
|
+
* Fetches all missing transactions and yields them one by one
|
|
109
|
+
* */
|
|
110
|
+
public async *run(): AsyncGenerator<Tx, Tx | undefined, unknown> {
|
|
111
|
+
// Our timeout is represented in milliseconds but queue expects seconds
|
|
112
|
+
// We also want to make sure we wait at least 1 second in case of very low timeouts
|
|
113
|
+
const timeoutQueueAfter = Math.max(Math.ceil(this.timeoutMs / 1_000), 1);
|
|
114
|
+
try {
|
|
115
|
+
if (this.txsMetadata.getMissingTxHashes().size === 0) {
|
|
116
|
+
return undefined;
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
// Start workers in background
|
|
120
|
+
const workersPromise = executeTimeout(
|
|
121
|
+
() => Promise.allSettled([this.smartRequester(), this.dumbRequester(), this.pinnedPeerRequester()]),
|
|
122
|
+
this.timeoutMs,
|
|
123
|
+
).finally(() => {
|
|
124
|
+
this.txQueue.end();
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
while (true) {
|
|
128
|
+
const tx = await this.txQueue.get(timeoutQueueAfter);
|
|
129
|
+
|
|
130
|
+
// null indicates that the queue has ended
|
|
131
|
+
if (tx === null) {
|
|
132
|
+
break;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
yield tx;
|
|
136
|
+
|
|
137
|
+
if (this.shouldStop()) {
|
|
138
|
+
// Drain queue before ending
|
|
139
|
+
let remaining;
|
|
140
|
+
while ((remaining = this.txQueue.getImmediate()) !== undefined) {
|
|
141
|
+
yield remaining;
|
|
142
|
+
}
|
|
143
|
+
break;
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
this.unlockSmartRequesterSemaphores();
|
|
148
|
+
await workersPromise;
|
|
149
|
+
} catch (e: any) {
|
|
150
|
+
this.logger.error(`Batch tx requester failed with error: ${e.message}`, { error: e });
|
|
151
|
+
} finally {
|
|
152
|
+
this.txQueue.end();
|
|
153
|
+
this.unlockSmartRequesterSemaphores();
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/*
|
|
158
|
+
* Fetches all missing transactions
|
|
159
|
+
* @returns Collection of fetched transactions */
|
|
160
|
+
public static async collectAllTxs(generator: AsyncGenerator<Tx, Tx | undefined, unknown>): Promise<Tx[]> {
|
|
161
|
+
const txs: Tx[] = [];
|
|
162
|
+
for await (const tx of generator) {
|
|
163
|
+
if (tx === undefined) {
|
|
164
|
+
break;
|
|
165
|
+
}
|
|
166
|
+
txs.push(tx);
|
|
167
|
+
}
|
|
168
|
+
return txs;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/*
|
|
172
|
+
* Handles so-called pinned peer
|
|
173
|
+
* The pinned peer is the one who sent us block proposal
|
|
174
|
+
* We expect pinned peer to have all transactions from the proposal at some point
|
|
175
|
+
* This holds because they them selves have to attest to proposal and thus fetch all missing transactions
|
|
176
|
+
*
|
|
177
|
+
* Given the reasoning above - we query pinned peer separately from dumb/smart peers
|
|
178
|
+
* */
|
|
179
|
+
private async pinnedPeerRequester() {
|
|
180
|
+
if (!this.pinnedPeer) {
|
|
181
|
+
this.logger.debug('No pinned peer to request from');
|
|
182
|
+
return;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
while (!this.shouldStop()) {
|
|
186
|
+
// We've hit rate limits on the pinned peer - wait until rate limit expires (clamped to deadline)
|
|
187
|
+
const rateLimitDelay = this.peers.getPeerRateLimitDelayMs(this.pinnedPeer);
|
|
188
|
+
const pinnedPeerIsRateLimited = rateLimitDelay !== undefined;
|
|
189
|
+
if (pinnedPeerIsRateLimited) {
|
|
190
|
+
await this.sleepClampedToDeadline(rateLimitDelay);
|
|
191
|
+
continue;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
const pinnedPeerWentBad = this.peers.getBadPeers().has(this.pinnedPeer.toString());
|
|
195
|
+
if (pinnedPeerWentBad) {
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// From pinned peer we always request transactions so that we first request the least requested and not in flight
|
|
200
|
+
// This makes sense since pinned peer should have ALL transactions,
|
|
201
|
+
// Thus if it has all it is best to ask pinned first for the transactions we have trouble getting from other peers
|
|
202
|
+
const txs = this.txsMetadata.getTxsToRequestFromThePeer(this.pinnedPeer);
|
|
203
|
+
if (txs.length === 0) {
|
|
204
|
+
this.logger.debug(`Pinned peer ${this.pinnedPeer.toString()} has no txs to request`);
|
|
205
|
+
return;
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const request = BlockTxsRequest.fromTxsSourceAndMissingTxs(this.blockTxsSource, txs);
|
|
209
|
+
if (!request) {
|
|
210
|
+
return;
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
txs.forEach(tx => {
|
|
214
|
+
this.txsMetadata.markRequested(tx);
|
|
215
|
+
this.txsMetadata.markInFlightBySmartPeer(tx);
|
|
216
|
+
});
|
|
217
|
+
|
|
218
|
+
await this.requestTxBatch(this.pinnedPeer, request);
|
|
219
|
+
|
|
220
|
+
txs.forEach(tx => {
|
|
221
|
+
this.txsMetadata.markNotInFlightBySmartPeer(tx);
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
/*
|
|
227
|
+
* Starts dumb worker loops
|
|
228
|
+
* */
|
|
229
|
+
private async dumbRequester() {
|
|
230
|
+
const nextPeerIndex = this.makeRoundRobinIndexer();
|
|
231
|
+
const nextBatchIndex = this.makeRoundRobinIndexer();
|
|
232
|
+
|
|
233
|
+
// Chunk missing tx hashes into batches of txBatchSize, wrapping around to ensure no peer gets less than txBatchSize
|
|
234
|
+
const txChunks = () => {
|
|
235
|
+
const missingHashes = Array.from(this.txsMetadata.getMissingTxHashes());
|
|
236
|
+
return chunkWrapAround(missingHashes, this.txBatchSize);
|
|
237
|
+
};
|
|
238
|
+
|
|
239
|
+
const makeRequest = (_pid: PeerId) => {
|
|
240
|
+
const chunks = txChunks();
|
|
241
|
+
const idx = nextBatchIndex(() => chunks.length);
|
|
242
|
+
const noMoreTxsToRequest = idx === undefined;
|
|
243
|
+
if (noMoreTxsToRequest) {
|
|
244
|
+
return undefined;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
const txs = chunks[idx].map(t => TxHash.fromString(t));
|
|
248
|
+
|
|
249
|
+
// If peer is dumb peer, we don't know yet if they received full blockProposal
|
|
250
|
+
// there is solid chance that peer didn't receive proposal yet, thus we must send full hashes
|
|
251
|
+
const includeFullHashesInRequestNotJustIndices = true;
|
|
252
|
+
const blockRequest = BlockTxsRequest.fromTxsSourceAndMissingTxs(
|
|
253
|
+
this.blockTxsSource,
|
|
254
|
+
txs,
|
|
255
|
+
includeFullHashesInRequestNotJustIndices,
|
|
256
|
+
);
|
|
257
|
+
const blockRequestHasNoMissingTxsFromTheProposal = !blockRequest;
|
|
258
|
+
|
|
259
|
+
if (blockRequestHasNoMissingTxsFromTheProposal) {
|
|
260
|
+
return undefined;
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
return { blockRequest, txs };
|
|
264
|
+
};
|
|
265
|
+
|
|
266
|
+
const nextPeer = () => {
|
|
267
|
+
const peers = this.peers.getDumbPeersToQuery();
|
|
268
|
+
const idx = nextPeerIndex(() => peers.length);
|
|
269
|
+
return idx === undefined ? undefined : peerIdFromString(peers[idx]);
|
|
270
|
+
};
|
|
271
|
+
|
|
272
|
+
const workerCount = Math.min(this.dumbParallelWorkerCount, this.peers.getAllPeers().size);
|
|
273
|
+
const workers = Array.from({ length: workerCount }, (_, index) =>
|
|
274
|
+
this.dumbWorkerLoop(nextPeer, makeRequest, index + 1),
|
|
275
|
+
);
|
|
276
|
+
|
|
277
|
+
await Promise.allSettled(workers);
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
/*
|
|
281
|
+
* Dumb worker loop.
|
|
282
|
+
* It fetches next available dumb peer and builds request for that peer
|
|
283
|
+
* Loops until shouldStop condition is not met or there are no more dubm peers to query
|
|
284
|
+
* This can happen if e.g. all "dumb" peers transition to "smart" or e.g. become "bad"
|
|
285
|
+
* */
|
|
286
|
+
private async dumbWorkerLoop(
|
|
287
|
+
pickNextPeer: () => PeerId | undefined,
|
|
288
|
+
request: (pid: PeerId) => { blockRequest: BlockTxsRequest; txs: TxHash[] } | undefined,
|
|
289
|
+
workerIndex: number,
|
|
290
|
+
) {
|
|
291
|
+
try {
|
|
292
|
+
this.logger.debug(`Dumb worker ${workerIndex} started`);
|
|
293
|
+
while (!this.shouldStop()) {
|
|
294
|
+
const peerId = pickNextPeer();
|
|
295
|
+
const weRanOutOfPeersToQuery = peerId === undefined;
|
|
296
|
+
if (weRanOutOfPeersToQuery) {
|
|
297
|
+
const nextDumbPeerDelay = this.peers.getNextDumbPeerAvailabilityDelayMs();
|
|
298
|
+
const thereAreSomeRateLimitedDumbPeers = nextDumbPeerDelay !== undefined;
|
|
299
|
+
if (thereAreSomeRateLimitedDumbPeers) {
|
|
300
|
+
// There are still some dumb peers to query but they have been rate limited
|
|
301
|
+
// Sleep until the earliest one gets unblocked (clamped to deadline)
|
|
302
|
+
await this.sleepClampedToDeadline(nextDumbPeerDelay);
|
|
303
|
+
continue;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
this.logger.debug(`Worker loop dumb: No more peers to query`);
|
|
307
|
+
break;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
const nextBatchTxRequest = request(peerId);
|
|
311
|
+
if (!nextBatchTxRequest) {
|
|
312
|
+
this.logger.debug(`Worker loop dumb: no txs to request, exiting`);
|
|
313
|
+
break;
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
const { blockRequest, txs } = nextBatchTxRequest;
|
|
317
|
+
|
|
318
|
+
this.logger.debug(
|
|
319
|
+
`Worker type dumb: Requesting txs from peer ${peerId.toString()}: ${txs.map(tx => tx.toString()).join(', ')}`,
|
|
320
|
+
);
|
|
321
|
+
|
|
322
|
+
await this.requestTxBatch(peerId, blockRequest);
|
|
323
|
+
}
|
|
324
|
+
} catch (err: any) {
|
|
325
|
+
this.logger.error(`Dumb worker ${workerIndex} encountered an error: ${err}`);
|
|
326
|
+
} finally {
|
|
327
|
+
this.logger.debug(`Dumb worker ${workerIndex} finished`);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
/*
|
|
332
|
+
* Starts smart worker loops
|
|
333
|
+
* */
|
|
334
|
+
private async smartRequester() {
|
|
335
|
+
const nextPeerIndex = this.makeRoundRobinIndexer();
|
|
336
|
+
|
|
337
|
+
const nextPeer = () => {
|
|
338
|
+
const peers = this.peers.getSmartPeersToQuery();
|
|
339
|
+
const idx = nextPeerIndex(() => peers.length);
|
|
340
|
+
return idx === undefined ? undefined : peerIdFromString(peers[idx]);
|
|
341
|
+
};
|
|
342
|
+
|
|
343
|
+
const makeRequest = (pid: PeerId) => {
|
|
344
|
+
const txs = this.txsMetadata.getTxsToRequestFromThePeer(pid);
|
|
345
|
+
const blockRequest = BlockTxsRequest.fromTxsSourceAndMissingTxs(this.blockTxsSource, txs);
|
|
346
|
+
if (!blockRequest) {
|
|
347
|
+
return undefined;
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
return { blockRequest, txs };
|
|
351
|
+
};
|
|
352
|
+
|
|
353
|
+
const workers = Array.from(
|
|
354
|
+
{ length: Math.min(this.smartParallelWorkerCount, this.peers.getAllPeers().size) },
|
|
355
|
+
(_, index) => this.smartWorkerLoop(nextPeer, makeRequest, index + 1),
|
|
356
|
+
);
|
|
357
|
+
|
|
358
|
+
await Promise.allSettled(workers);
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
/*
|
|
362
|
+
* Smart worker loop.
|
|
363
|
+
* It fetches next available smart peer and builds request for that peer
|
|
364
|
+
* Loops until shouldStop condition is not met
|
|
365
|
+
*
|
|
366
|
+
* Notes:
|
|
367
|
+
* - We don't start worker loop immediately, but block on semaphore
|
|
368
|
+
* until some dumb peer transactions to smart peer
|
|
369
|
+
* - We might run out of smart peers, because:
|
|
370
|
+
* - they "went bad"
|
|
371
|
+
* - there are less smart peers than worker loops
|
|
372
|
+
* In such scenario we either wait for next dumb peer to become smart or kill the worker loop
|
|
373
|
+
* */
|
|
374
|
+
private async smartWorkerLoop(
|
|
375
|
+
pickNextPeer: () => PeerId | undefined,
|
|
376
|
+
request: (pid: PeerId) => { blockRequest: BlockTxsRequest; txs: TxHash[] } | undefined,
|
|
377
|
+
workerIndex: number,
|
|
378
|
+
) {
|
|
379
|
+
try {
|
|
380
|
+
this.logger.trace(`Smart worker ${workerIndex} started`);
|
|
381
|
+
await executeTimeout((_: AbortSignal) => this.smartRequesterSemaphore.acquire(), this.timeoutMs);
|
|
382
|
+
this.logger.trace(`Smart worker ${workerIndex} acquired semaphore`);
|
|
383
|
+
|
|
384
|
+
while (!this.shouldStop()) {
|
|
385
|
+
const peerId = pickNextPeer();
|
|
386
|
+
const weRanOutOfPeersToQuery = peerId === undefined;
|
|
387
|
+
if (weRanOutOfPeersToQuery) {
|
|
388
|
+
this.logger.debug(`Worker loop smart: No more peers to query`);
|
|
389
|
+
|
|
390
|
+
// If there are no more dumb peers to query then none of our peers can become smart,
|
|
391
|
+
// thus we can simply exit this worker
|
|
392
|
+
const noMoreDumbPeersToQuery = this.peers.getDumbPeersToQuery().length === 0;
|
|
393
|
+
if (noMoreDumbPeersToQuery) {
|
|
394
|
+
// These might be either smart peers that will get unblocked after _some time_
|
|
395
|
+
const nextSmartPeerDelay = this.peers.getNextSmartPeerAvailabilityDelayMs();
|
|
396
|
+
const thereAreSomeRateLimitedSmartPeers = nextSmartPeerDelay !== undefined;
|
|
397
|
+
if (thereAreSomeRateLimitedSmartPeers) {
|
|
398
|
+
await this.sleepClampedToDeadline(nextSmartPeerDelay);
|
|
399
|
+
continue;
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
this.logger.debug(`Worker loop smart: No more smart peers to query killing ${workerIndex}`);
|
|
403
|
+
break;
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
// Otherwise there are still some dumb peers that could become smart.
|
|
407
|
+
// We end up here when all known smart peers became temporarily unavailable via combination of
|
|
408
|
+
// (bad, in-flight, or rate-limited) or in some weird scenario all current smart peers turn bad which is permanent
|
|
409
|
+
// but dumb peers still exist that could become smart.
|
|
410
|
+
//
|
|
411
|
+
// When a dumb peer responds with valid txIndices, it gets
|
|
412
|
+
// promoted to smart and releases the semaphore, waking this worker.
|
|
413
|
+
await executeTimeout((_: AbortSignal) => this.smartRequesterSemaphore.acquire(), this.timeoutMs);
|
|
414
|
+
this.logger.debug(`Worker loop smart: acquired next smart peer`);
|
|
415
|
+
continue;
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
const nextBatchTxRequest = request(peerId);
|
|
419
|
+
if (!nextBatchTxRequest) {
|
|
420
|
+
this.logger.debug(`Worker loop smart: no txs to request, exiting`);
|
|
421
|
+
break;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
const { blockRequest, txs } = nextBatchTxRequest;
|
|
425
|
+
|
|
426
|
+
// We only mark transactions as in flight if queried by Smart peer
|
|
427
|
+
// Because asking them from dumb peer is shot in the dark (there is a good chance they won't have it)
|
|
428
|
+
// So we don't gain anything if we mark txs in-flight for dumb peers
|
|
429
|
+
txs.forEach(tx => {
|
|
430
|
+
this.txsMetadata.markRequested(tx);
|
|
431
|
+
this.txsMetadata.markInFlightBySmartPeer(tx);
|
|
432
|
+
});
|
|
433
|
+
|
|
434
|
+
await this.requestTxBatch(peerId, blockRequest);
|
|
435
|
+
txs.forEach(tx => {
|
|
436
|
+
this.txsMetadata.markNotInFlightBySmartPeer(tx);
|
|
437
|
+
});
|
|
438
|
+
}
|
|
439
|
+
} catch (err: any) {
|
|
440
|
+
if (err instanceof TimeoutError) {
|
|
441
|
+
this.logger.debug(`Smart worker ${workerIndex} timed out waiting for semaphore`);
|
|
442
|
+
} else {
|
|
443
|
+
this.logger.error(`Smart worker ${workerIndex} encountered an error: ${err}`);
|
|
444
|
+
}
|
|
445
|
+
} finally {
|
|
446
|
+
this.logger.debug(`Smart worker ${workerIndex} finished`);
|
|
447
|
+
}
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
/*
|
|
451
|
+
* Sends actual request to the peer and handles response
|
|
452
|
+
*
|
|
453
|
+
* @param peerId - the peer to send request to
|
|
454
|
+
* @param request - the actual request
|
|
455
|
+
*/
|
|
456
|
+
private async requestTxBatch(peerId: PeerId, request: BlockTxsRequest): Promise<void> {
|
|
457
|
+
try {
|
|
458
|
+
this.peers.markPeerInFlight(peerId);
|
|
459
|
+
const response = await this.p2pService.reqResp.sendRequestToPeer(
|
|
460
|
+
peerId,
|
|
461
|
+
ReqRespSubProtocol.BLOCK_TXS,
|
|
462
|
+
request.toBuffer(),
|
|
463
|
+
);
|
|
464
|
+
if (response.status !== ReqRespStatus.SUCCESS) {
|
|
465
|
+
this.logger.debug(`Peer ${peerId.toString()} failed to respond with status: ${response.status}`);
|
|
466
|
+
this.handleFailResponseFromPeer(peerId, response.status);
|
|
467
|
+
return;
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
const blockResponse = BlockTxsResponse.fromBuffer(response.data);
|
|
471
|
+
await this.handleSuccessResponseFromPeer(peerId, blockResponse);
|
|
472
|
+
} catch (err: any) {
|
|
473
|
+
this.logger.error(`Failed to get valid response from peer ${peerId.toString()}: ${err.message}`, {
|
|
474
|
+
peerId,
|
|
475
|
+
error: err,
|
|
476
|
+
});
|
|
477
|
+
|
|
478
|
+
this.handleFailResponseFromPeer(peerId, ReqRespStatus.UNKNOWN);
|
|
479
|
+
} finally {
|
|
480
|
+
this.peers.unMarkPeerInFlight(peerId);
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
/*
|
|
485
|
+
* Handles failed response form the peer
|
|
486
|
+
* There are 3 scenarios
|
|
487
|
+
* - RATE_LIMIT_EXCEEDED: We mark this and don't query this peer again for some_time
|
|
488
|
+
* - FAILURE and UNKNOWN: We penalise this, if peer has been penalised this way N times they are not queried again
|
|
489
|
+
* this implies we will query these peers couple of more times and give them a chance to "redeem" themselves before completely ignoring them
|
|
490
|
+
*/
|
|
491
|
+
private handleFailResponseFromPeer(peerId: PeerId, responseStatus: ReqRespStatus) {
|
|
492
|
+
//TODO: Should we ban these peers?
|
|
493
|
+
if (responseStatus === ReqRespStatus.FAILURE || responseStatus === ReqRespStatus.UNKNOWN) {
|
|
494
|
+
this.peers.penalisePeer(peerId, PeerErrorSeverity.HighToleranceError);
|
|
495
|
+
return;
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
if (responseStatus === ReqRespStatus.RATE_LIMIT_EXCEEDED) {
|
|
499
|
+
this.peers.markPeerRateLimitExceeded(peerId);
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
/*
|
|
504
|
+
* Handles successful response form the peer, this includes
|
|
505
|
+
* - Handling received transactions
|
|
506
|
+
* - Deciding if the peer is "smart" or not
|
|
507
|
+
* */
|
|
508
|
+
private async handleSuccessResponseFromPeer(peerId: PeerId, response: BlockTxsResponse) {
|
|
509
|
+
this.logger.debug(`Received txs: ${response.txs.length} from peer ${peerId.toString()} `);
|
|
510
|
+
await this.handleReceivedTxs(peerId, response.txs);
|
|
511
|
+
|
|
512
|
+
this.decideIfPeerIsSmart(peerId, response);
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
/*
|
|
516
|
+
* Handles received txs.
|
|
517
|
+
* Transactions are validated and then put on async queue
|
|
518
|
+
* to be yielded by main running loop
|
|
519
|
+
* */
|
|
520
|
+
private async handleReceivedTxs(peerId: PeerId, txs: TxArray) {
|
|
521
|
+
const newTxs = txs.filter(tx => !this.txsMetadata.alreadyFetched(tx.txHash));
|
|
522
|
+
|
|
523
|
+
if (newTxs.length === 0) {
|
|
524
|
+
return;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
//TODO: this validation can be slow, maybe spawn worker just for validation
|
|
528
|
+
// We could use the async queue for communication.
|
|
529
|
+
const validationResults = await Promise.allSettled(
|
|
530
|
+
newTxs.map(async tx => ({
|
|
531
|
+
tx,
|
|
532
|
+
isValid: (await this.txValidator.validateRequestedTx(tx)).result === 'valid',
|
|
533
|
+
})),
|
|
534
|
+
);
|
|
535
|
+
|
|
536
|
+
let hasInvalidTx = false;
|
|
537
|
+
validationResults.forEach(result => {
|
|
538
|
+
if (result.status === 'fulfilled' && result.value.isValid) {
|
|
539
|
+
if (this.txsMetadata.markFetched(peerId, result.value.tx)) {
|
|
540
|
+
this.txQueue.put(result.value.tx);
|
|
541
|
+
}
|
|
542
|
+
} else {
|
|
543
|
+
hasInvalidTx = true;
|
|
544
|
+
}
|
|
545
|
+
});
|
|
546
|
+
|
|
547
|
+
if (hasInvalidTx) {
|
|
548
|
+
this.peers.penalisePeer(peerId, PeerErrorSeverity.LowToleranceError);
|
|
549
|
+
} else {
|
|
550
|
+
// If we have received successful response from the peer, they have "redeemed" themselves and not considered bad anymore
|
|
551
|
+
this.peers.unMarkPeerAsBad(peerId);
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
const missingTxHashes = this.txsMetadata.getMissingTxHashes();
|
|
555
|
+
if (missingTxHashes.size === 0) {
|
|
556
|
+
// wake sleepers so they can see shouldStop() and exit before waiting on timeout
|
|
557
|
+
this.unlockSmartRequesterSemaphores();
|
|
558
|
+
} else {
|
|
559
|
+
this.logger.trace(
|
|
560
|
+
`Missing txs: ${Array.from(this.txsMetadata.getMissingTxHashes())
|
|
561
|
+
.map(tx => tx.toString())
|
|
562
|
+
.join(', ')}`,
|
|
563
|
+
);
|
|
564
|
+
}
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
/*
|
|
568
|
+
* Peer is smart if:
|
|
569
|
+
* - They are not pinned peer
|
|
570
|
+
* - They have sent successful response indicating which txs from Block proposal they have
|
|
571
|
+
* - They have transactions we are missing
|
|
572
|
+
*/
|
|
573
|
+
private decideIfPeerIsSmart(peerId: PeerId, response: BlockTxsResponse) {
|
|
574
|
+
const pinnedPeerShouldNeverBeMarkedAsSmart = this.pinnedPeer && peerId.toString() === this.pinnedPeer.toString();
|
|
575
|
+
if (pinnedPeerShouldNeverBeMarkedAsSmart) {
|
|
576
|
+
return;
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
const smartPeersAreDisabled = this.smartParallelWorkerCount === 0;
|
|
580
|
+
if (smartPeersAreDisabled) {
|
|
581
|
+
return;
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
// If block response is invalid we still want to query this peer in the future
|
|
585
|
+
// Because they sent successful response, so they might become smart peer in the future
|
|
586
|
+
// Or send us needed txs
|
|
587
|
+
if (!this.isBlockResponseValid(response)) {
|
|
588
|
+
return;
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
// We mark peer as "smart" only if they have some txs we are missing
|
|
592
|
+
// Otherwise we keep them as "dumb" in hope they'll receive some new txs we are missing in the future
|
|
593
|
+
if (!this.peerHasSomeTxsWeAreMissing(peerId, response)) {
|
|
594
|
+
this.logger.debug(`${peerId.toString()} has no txs we are missing, skipping`);
|
|
595
|
+
return;
|
|
596
|
+
}
|
|
597
|
+
|
|
598
|
+
this.peers.markPeerSmart(peerId);
|
|
599
|
+
this.markTxsPeerHas(peerId, response);
|
|
600
|
+
|
|
601
|
+
// Unblock smart workers
|
|
602
|
+
if (this.peers.getSmartPeersToQuery().length <= this.smartParallelWorkerCount) {
|
|
603
|
+
this.smartRequesterSemaphore.release();
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
|
|
607
|
+
private isBlockResponseValid(response: BlockTxsResponse): boolean {
|
|
608
|
+
const archiveRootsMatch = this.blockTxsSource.archive.toString() === response.archiveRoot.toString();
|
|
609
|
+
const peerHasSomeTxsFromProposal = !response.txIndices.isEmpty();
|
|
610
|
+
return archiveRootsMatch && peerHasSomeTxsFromProposal;
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
private peerHasSomeTxsWeAreMissing(_peerId: PeerId, response: BlockTxsResponse): boolean {
|
|
614
|
+
const txsPeerHas = new Set(this.extractHashesPeerHasFromResponse(response).map(h => h.toString()));
|
|
615
|
+
return this.txsMetadata.getMissingTxHashes().intersection(txsPeerHas).size > 0;
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
private markTxsPeerHas(peerId: PeerId, response: BlockTxsResponse) {
|
|
619
|
+
const txsPeerHas = this.extractHashesPeerHasFromResponse(response);
|
|
620
|
+
this.logger.debug(`${peerId.toString()} has txs: ${txsPeerHas.map(tx => tx.toString()).join(', ')}`);
|
|
621
|
+
this.txsMetadata.markPeerHas(peerId, txsPeerHas);
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
private extractHashesPeerHasFromResponse(response: BlockTxsResponse): Array<TxHash> {
|
|
625
|
+
const hashes: TxHash[] = [];
|
|
626
|
+
const indicesOfHashesPeerHas = new Set(response.txIndices.getTrueIndices());
|
|
627
|
+
this.blockTxsSource.txHashes.forEach((hash, idx) => {
|
|
628
|
+
if (indicesOfHashesPeerHas.has(idx)) {
|
|
629
|
+
hashes.push(hash);
|
|
630
|
+
}
|
|
631
|
+
});
|
|
632
|
+
|
|
633
|
+
return hashes;
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
/*
|
|
637
|
+
* Helper function to crate round robin indexer -
|
|
638
|
+
* i.e. the "thing" which returns next index/number in round robin fashion
|
|
639
|
+
**/
|
|
640
|
+
private makeRoundRobinIndexer(start = 0) {
|
|
641
|
+
let i = start;
|
|
642
|
+
/*
|
|
643
|
+
* Function to calculate next round-robin number
|
|
644
|
+
* Idea is that we pass in an array size and based on it and previous state we call next
|
|
645
|
+
* Array size can change between calls thus it is passed as function
|
|
646
|
+
*
|
|
647
|
+
* @returns next index or undefined if size is 0
|
|
648
|
+
*/
|
|
649
|
+
return (size: () => number) => {
|
|
650
|
+
const length = size();
|
|
651
|
+
if (length === 0) {
|
|
652
|
+
return undefined;
|
|
653
|
+
}
|
|
654
|
+
|
|
655
|
+
const current = i % length;
|
|
656
|
+
i = (current + 1) % length;
|
|
657
|
+
return current;
|
|
658
|
+
};
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
/*
|
|
662
|
+
* @returns true if all missing txs have been fetched */
|
|
663
|
+
private fetchedAllTxs() {
|
|
664
|
+
return this.txsMetadata.getMissingTxHashes().size == 0;
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
/*
|
|
668
|
+
* Checks if the BatchTxRequester should stop fetching missing txs
|
|
669
|
+
* Conditions for stopping are:
|
|
670
|
+
* - There have been no missing transactions to start with
|
|
671
|
+
* - All transactions have been fetched
|
|
672
|
+
* - The deadline has been hit (no more time to fetch)
|
|
673
|
+
* - This process has been cancelled via abortSignal
|
|
674
|
+
*
|
|
675
|
+
* @returns true if BatchTxRequester should stop, otherwise false*/
|
|
676
|
+
private shouldStop() {
|
|
677
|
+
const aborted = this.opts.abortSignal?.aborted ?? false;
|
|
678
|
+
if (aborted) {
|
|
679
|
+
this.unlockSmartRequesterSemaphores();
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
return aborted || this.fetchedAllTxs() || this.dateProvider.now() > this.deadline;
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
/*
|
|
686
|
+
* Helper function which unlocks all smart requester semaphores
|
|
687
|
+
* @note This is needed otherwise they will block forever
|
|
688
|
+
* */
|
|
689
|
+
private unlockSmartRequesterSemaphores() {
|
|
690
|
+
for (let i = 0; i < this.smartParallelWorkerCount; i++) {
|
|
691
|
+
this.smartRequesterSemaphore.release();
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
/*
|
|
696
|
+
* Sleeps for the given duration, but clamped to the deadline.
|
|
697
|
+
* This ensures we don't sleep past the deadline.
|
|
698
|
+
* */
|
|
699
|
+
private async sleepClampedToDeadline(durationMs: number) {
|
|
700
|
+
const remaining = this.deadline - this.dateProvider.now();
|
|
701
|
+
const thereIsTimeRemaining = remaining > 0;
|
|
702
|
+
if (thereIsTimeRemaining) {
|
|
703
|
+
await sleep(Math.min(durationMs, remaining));
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
}
|