@aztec/p2p 0.0.1-commit.e61ad554 → 0.0.1-commit.ec5f612
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dest/bootstrap/bootstrap.d.ts +4 -3
- package/dest/bootstrap/bootstrap.d.ts.map +1 -1
- package/dest/bootstrap/bootstrap.js +4 -4
- package/dest/client/factory.d.ts +10 -10
- package/dest/client/factory.d.ts.map +1 -1
- package/dest/client/factory.js +45 -18
- package/dest/client/interface.d.ts +46 -33
- package/dest/client/interface.d.ts.map +1 -1
- package/dest/client/p2p_client.d.ts +41 -51
- package/dest/client/p2p_client.d.ts.map +1 -1
- package/dest/client/p2p_client.js +156 -200
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts +2 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker.js +304 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts +73 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.d.ts.map +1 -0
- package/dest/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.js +8 -0
- package/dest/config.d.ts +35 -7
- package/dest/config.d.ts.map +1 -1
- package/dest/config.js +21 -7
- package/dest/errors/tx-pool.error.d.ts +8 -0
- package/dest/errors/tx-pool.error.d.ts.map +1 -0
- package/dest/errors/tx-pool.error.js +9 -0
- package/dest/index.d.ts +2 -1
- package/dest/index.d.ts.map +1 -1
- package/dest/index.js +1 -0
- package/dest/mem_pools/attestation_pool/attestation_pool.d.ts +104 -88
- package/dest/mem_pools/attestation_pool/attestation_pool.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/attestation_pool.js +441 -3
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.d.ts +2 -2
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/attestation_pool_test_suite.js +353 -87
- package/dest/mem_pools/attestation_pool/index.d.ts +2 -3
- package/dest/mem_pools/attestation_pool/index.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/index.js +1 -2
- package/dest/mem_pools/attestation_pool/mocks.d.ts +2 -2
- package/dest/mem_pools/attestation_pool/mocks.d.ts.map +1 -1
- package/dest/mem_pools/attestation_pool/mocks.js +2 -2
- package/dest/mem_pools/index.d.ts +3 -2
- package/dest/mem_pools/index.d.ts.map +1 -1
- package/dest/mem_pools/index.js +1 -1
- package/dest/mem_pools/instrumentation.d.ts +1 -1
- package/dest/mem_pools/instrumentation.d.ts.map +1 -1
- package/dest/mem_pools/instrumentation.js +2 -2
- package/dest/mem_pools/interface.d.ts +5 -5
- package/dest/mem_pools/interface.d.ts.map +1 -1
- package/dest/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.js +3 -3
- package/dest/mem_pools/tx_pool_v2/archive/index.d.ts +2 -0
- package/dest/mem_pools/tx_pool_v2/archive/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/index.js +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.d.ts +43 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/archive/tx_archive.js +103 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.d.ts +104 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/deleted_pool.js +251 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.d.ts +47 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/eviction_manager.js +128 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.d.ts +17 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.js +93 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.d.ts +19 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.js +97 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.d.ts +10 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/index.js +11 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.d.ts +174 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/interfaces.js +25 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.js +65 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.d.ts +17 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.js +93 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.d.ts +16 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.js +78 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.d.ts +20 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.js +73 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.js +19 -0
- package/dest/mem_pools/tx_pool_v2/index.d.ts +6 -0
- package/dest/mem_pools/tx_pool_v2/index.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/index.js +5 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.d.ts +15 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/instrumentation.js +43 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.d.ts +211 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/interfaces.js +9 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.d.ts +119 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_metadata.js +193 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.d.ts +26 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_bench_metrics.js +70 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.d.ts +108 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_indices.js +354 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.d.ts +60 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2.js +161 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.d.ts +77 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.d.ts.map +1 -0
- package/dest/mem_pools/tx_pool_v2/tx_pool_v2_impl.js +905 -0
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts +3 -3
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.d.ts.map +1 -1
- package/dest/msg_validators/attestation_validator/fisherman_attestation_validator.js +7 -2
- package/dest/msg_validators/proposal_validator/proposal_validator.js +5 -5
- package/dest/msg_validators/tx_validator/aggregate_tx_validator.d.ts +4 -4
- package/dest/msg_validators/tx_validator/aggregate_tx_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/aggregate_tx_validator.js +3 -3
- package/dest/msg_validators/tx_validator/archive_cache.d.ts +3 -3
- package/dest/msg_validators/tx_validator/archive_cache.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/archive_cache.js +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts +20 -6
- package/dest/msg_validators/tx_validator/block_header_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/block_header_validator.js +5 -4
- package/dest/msg_validators/tx_validator/data_validator.d.ts +3 -1
- package/dest/msg_validators/tx_validator/data_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/data_validator.js +4 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts +15 -4
- package/dest/msg_validators/tx_validator/double_spend_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/double_spend_validator.js +7 -6
- package/dest/msg_validators/tx_validator/factory.d.ts +118 -5
- package/dest/msg_validators/tx_validator/factory.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/factory.js +228 -57
- package/dest/msg_validators/tx_validator/gas_validator.d.ts +59 -3
- package/dest/msg_validators/tx_validator/gas_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/gas_validator.js +76 -38
- package/dest/msg_validators/tx_validator/index.d.ts +2 -1
- package/dest/msg_validators/tx_validator/index.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/index.js +1 -0
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/metadata_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/metadata_validator.js +2 -2
- package/dest/msg_validators/tx_validator/nullifier_cache.d.ts +14 -0
- package/dest/msg_validators/tx_validator/nullifier_cache.d.ts.map +1 -0
- package/dest/msg_validators/tx_validator/nullifier_cache.js +24 -0
- package/dest/msg_validators/tx_validator/phases_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/phases_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/phases_validator.js +3 -3
- package/dest/msg_validators/tx_validator/size_validator.d.ts +3 -1
- package/dest/msg_validators/tx_validator/size_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/size_validator.js +4 -1
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts +22 -5
- package/dest/msg_validators/tx_validator/timestamp_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/timestamp_validator.js +8 -8
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_permitted_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_permitted_validator.js +2 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts +3 -2
- package/dest/msg_validators/tx_validator/tx_proof_validator.d.ts.map +1 -1
- package/dest/msg_validators/tx_validator/tx_proof_validator.js +2 -2
- package/dest/services/data_store.d.ts +1 -1
- package/dest/services/data_store.d.ts.map +1 -1
- package/dest/services/data_store.js +10 -6
- package/dest/services/discv5/discV5_service.js +1 -1
- package/dest/services/dummy_service.d.ts +24 -4
- package/dest/services/dummy_service.d.ts.map +1 -1
- package/dest/services/dummy_service.js +46 -1
- package/dest/services/encoding.d.ts +3 -3
- package/dest/services/encoding.d.ts.map +1 -1
- package/dest/services/encoding.js +11 -10
- package/dest/services/gossipsub/index.d.ts +3 -0
- package/dest/services/gossipsub/index.d.ts.map +1 -0
- package/dest/services/gossipsub/index.js +2 -0
- package/dest/services/gossipsub/scoring.d.ts +21 -3
- package/dest/services/gossipsub/scoring.d.ts.map +1 -1
- package/dest/services/gossipsub/scoring.js +24 -7
- package/dest/services/gossipsub/topic_score_params.d.ts +173 -0
- package/dest/services/gossipsub/topic_score_params.d.ts.map +1 -0
- package/dest/services/gossipsub/topic_score_params.js +346 -0
- package/dest/services/index.d.ts +2 -1
- package/dest/services/index.d.ts.map +1 -1
- package/dest/services/index.js +1 -0
- package/dest/services/libp2p/instrumentation.d.ts +1 -1
- package/dest/services/libp2p/instrumentation.d.ts.map +1 -1
- package/dest/services/libp2p/instrumentation.js +14 -3
- package/dest/services/libp2p/libp2p_service.d.ts +100 -42
- package/dest/services/libp2p/libp2p_service.d.ts.map +1 -1
- package/dest/services/libp2p/libp2p_service.js +451 -359
- package/dest/services/peer-manager/metrics.d.ts +2 -2
- package/dest/services/peer-manager/metrics.d.ts.map +1 -1
- package/dest/services/peer-manager/metrics.js +20 -5
- package/dest/services/peer-manager/peer_scoring.d.ts +1 -1
- package/dest/services/peer-manager/peer_scoring.d.ts.map +1 -1
- package/dest/services/peer-manager/peer_scoring.js +33 -4
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts +48 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/batch_tx_requester.js +539 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts +17 -0
- package/dest/services/reqresp/batch-tx-requester/config.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/config.js +27 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts +46 -0
- package/dest/services/reqresp/batch-tx-requester/interface.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/interface.js +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts +34 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/missing_txs.js +130 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts +60 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/peer_collection.js +173 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts +20 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.d.ts.map +1 -0
- package/dest/services/reqresp/batch-tx-requester/tx_validator.js +21 -0
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts +22 -3
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/batch_connection_sampler.js +63 -4
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts +2 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.d.ts.map +1 -1
- package/dest/services/reqresp/connection-sampler/connection_sampler.js +12 -0
- package/dest/services/reqresp/interface.d.ts +12 -1
- package/dest/services/reqresp/interface.d.ts.map +1 -1
- package/dest/services/reqresp/interface.js +15 -1
- package/dest/services/reqresp/metrics.d.ts +6 -5
- package/dest/services/reqresp/metrics.d.ts.map +1 -1
- package/dest/services/reqresp/metrics.js +17 -5
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts +5 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/bitvector.js +5 -0
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts +7 -5
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_handler.js +27 -9
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts +29 -6
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/block_txs/block_txs_reqresp.js +59 -13
- package/dest/services/reqresp/protocols/tx.d.ts +7 -1
- package/dest/services/reqresp/protocols/tx.d.ts.map +1 -1
- package/dest/services/reqresp/protocols/tx.js +20 -0
- package/dest/services/reqresp/reqresp.d.ts +6 -1
- package/dest/services/reqresp/reqresp.d.ts.map +1 -1
- package/dest/services/reqresp/reqresp.js +71 -27
- package/dest/services/service.d.ts +42 -3
- package/dest/services/service.d.ts.map +1 -1
- package/dest/services/tx_collection/config.d.ts +22 -1
- package/dest/services/tx_collection/config.d.ts.map +1 -1
- package/dest/services/tx_collection/config.js +55 -1
- package/dest/services/tx_collection/fast_tx_collection.d.ts +7 -4
- package/dest/services/tx_collection/fast_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/fast_tx_collection.js +71 -44
- package/dest/services/tx_collection/file_store_tx_collection.d.ts +53 -0
- package/dest/services/tx_collection/file_store_tx_collection.d.ts.map +1 -0
- package/dest/services/tx_collection/file_store_tx_collection.js +167 -0
- package/dest/services/tx_collection/file_store_tx_source.d.ts +37 -0
- package/dest/services/tx_collection/file_store_tx_source.d.ts.map +1 -0
- package/dest/services/tx_collection/file_store_tx_source.js +90 -0
- package/dest/services/tx_collection/index.d.ts +3 -1
- package/dest/services/tx_collection/index.d.ts.map +1 -1
- package/dest/services/tx_collection/index.js +2 -0
- package/dest/services/tx_collection/instrumentation.d.ts +1 -1
- package/dest/services/tx_collection/instrumentation.d.ts.map +1 -1
- package/dest/services/tx_collection/instrumentation.js +10 -2
- package/dest/services/tx_collection/missing_txs_tracker.d.ts +32 -0
- package/dest/services/tx_collection/missing_txs_tracker.d.ts.map +1 -0
- package/dest/services/tx_collection/missing_txs_tracker.js +27 -0
- package/dest/services/tx_collection/proposal_tx_collector.d.ts +49 -0
- package/dest/services/tx_collection/proposal_tx_collector.d.ts.map +1 -0
- package/dest/services/tx_collection/proposal_tx_collector.js +50 -0
- package/dest/services/tx_collection/slow_tx_collection.d.ts +7 -3
- package/dest/services/tx_collection/slow_tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/slow_tx_collection.js +60 -26
- package/dest/services/tx_collection/tx_collection.d.ts +25 -12
- package/dest/services/tx_collection/tx_collection.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection.js +79 -7
- package/dest/services/tx_collection/tx_collection_sink.d.ts +18 -8
- package/dest/services/tx_collection/tx_collection_sink.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_collection_sink.js +26 -29
- package/dest/services/tx_collection/tx_source.d.ts +8 -3
- package/dest/services/tx_collection/tx_source.d.ts.map +1 -1
- package/dest/services/tx_collection/tx_source.js +19 -2
- package/dest/services/tx_file_store/config.d.ts +16 -0
- package/dest/services/tx_file_store/config.d.ts.map +1 -0
- package/dest/services/tx_file_store/config.js +22 -0
- package/dest/services/tx_file_store/index.d.ts +4 -0
- package/dest/services/tx_file_store/index.d.ts.map +1 -0
- package/dest/services/tx_file_store/index.js +3 -0
- package/dest/services/tx_file_store/instrumentation.d.ts +15 -0
- package/dest/services/tx_file_store/instrumentation.d.ts.map +1 -0
- package/dest/services/tx_file_store/instrumentation.js +29 -0
- package/dest/services/tx_file_store/tx_file_store.d.ts +48 -0
- package/dest/services/tx_file_store/tx_file_store.d.ts.map +1 -0
- package/dest/services/tx_file_store/tx_file_store.js +152 -0
- package/dest/services/tx_provider.d.ts +4 -4
- package/dest/services/tx_provider.d.ts.map +1 -1
- package/dest/services/tx_provider.js +9 -8
- package/dest/services/tx_provider_instrumentation.d.ts +1 -1
- package/dest/services/tx_provider_instrumentation.d.ts.map +1 -1
- package/dest/services/tx_provider_instrumentation.js +5 -5
- package/dest/test-helpers/index.d.ts +3 -1
- package/dest/test-helpers/index.d.ts.map +1 -1
- package/dest/test-helpers/index.js +2 -0
- package/dest/test-helpers/make-test-p2p-clients.d.ts +7 -8
- package/dest/test-helpers/make-test-p2p-clients.d.ts.map +1 -1
- package/dest/test-helpers/make-test-p2p-clients.js +1 -2
- package/dest/test-helpers/mock-pubsub.d.ts +30 -4
- package/dest/test-helpers/mock-pubsub.d.ts.map +1 -1
- package/dest/test-helpers/mock-pubsub.js +105 -4
- package/dest/test-helpers/reqresp-nodes.d.ts +2 -3
- package/dest/test-helpers/reqresp-nodes.d.ts.map +1 -1
- package/dest/test-helpers/reqresp-nodes.js +4 -3
- package/dest/test-helpers/test_tx_provider.d.ts +40 -0
- package/dest/test-helpers/test_tx_provider.d.ts.map +1 -0
- package/dest/test-helpers/test_tx_provider.js +41 -0
- package/dest/test-helpers/testbench-utils.d.ts +163 -0
- package/dest/test-helpers/testbench-utils.d.ts.map +1 -0
- package/dest/test-helpers/testbench-utils.js +366 -0
- package/dest/testbench/p2p_client_testbench_worker.d.ts +28 -2
- package/dest/testbench/p2p_client_testbench_worker.d.ts.map +1 -1
- package/dest/testbench/p2p_client_testbench_worker.js +219 -138
- package/dest/testbench/worker_client_manager.d.ts +51 -6
- package/dest/testbench/worker_client_manager.d.ts.map +1 -1
- package/dest/testbench/worker_client_manager.js +226 -44
- package/dest/util.d.ts +2 -2
- package/dest/util.d.ts.map +1 -1
- package/package.json +14 -14
- package/src/bootstrap/bootstrap.ts +7 -4
- package/src/client/factory.ts +83 -36
- package/src/client/interface.ts +56 -34
- package/src/client/p2p_client.ts +192 -247
- package/src/client/test/tx_proposal_collector/README.md +227 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker.ts +345 -0
- package/src/client/test/tx_proposal_collector/proposal_tx_collector_worker_protocol.ts +43 -0
- package/src/config.ts +47 -11
- package/src/errors/tx-pool.error.ts +12 -0
- package/src/index.ts +1 -0
- package/src/mem_pools/attestation_pool/attestation_pool.ts +496 -91
- package/src/mem_pools/attestation_pool/attestation_pool_test_suite.ts +442 -102
- package/src/mem_pools/attestation_pool/index.ts +9 -2
- package/src/mem_pools/attestation_pool/mocks.ts +2 -1
- package/src/mem_pools/index.ts +4 -1
- package/src/mem_pools/instrumentation.ts +2 -1
- package/src/mem_pools/interface.ts +4 -4
- package/src/mem_pools/tx_pool/README.md +1 -1
- package/src/mem_pools/tx_pool/eviction/invalid_txs_after_mining_rule.ts +3 -3
- package/src/mem_pools/tx_pool_v2/README.md +275 -0
- package/src/mem_pools/tx_pool_v2/archive/index.ts +1 -0
- package/src/mem_pools/tx_pool_v2/archive/tx_archive.ts +120 -0
- package/src/mem_pools/tx_pool_v2/deleted_pool.ts +321 -0
- package/src/mem_pools/tx_pool_v2/eviction/eviction_manager.ts +160 -0
- package/src/mem_pools/tx_pool_v2/eviction/fee_payer_balance_eviction_rule.ts +121 -0
- package/src/mem_pools/tx_pool_v2/eviction/fee_payer_balance_pre_add_rule.ts +125 -0
- package/src/mem_pools/tx_pool_v2/eviction/index.ts +27 -0
- package/src/mem_pools/tx_pool_v2/eviction/interfaces.ts +209 -0
- package/src/mem_pools/tx_pool_v2/eviction/invalid_txs_after_mining_rule.ts +74 -0
- package/src/mem_pools/tx_pool_v2/eviction/invalid_txs_after_reorg_rule.ts +101 -0
- package/src/mem_pools/tx_pool_v2/eviction/low_priority_eviction_rule.ts +91 -0
- package/src/mem_pools/tx_pool_v2/eviction/low_priority_pre_add_rule.ts +90 -0
- package/src/mem_pools/tx_pool_v2/eviction/nullifier_conflict_rule.ts +31 -0
- package/src/mem_pools/tx_pool_v2/index.ts +12 -0
- package/src/mem_pools/tx_pool_v2/instrumentation.ts +69 -0
- package/src/mem_pools/tx_pool_v2/interfaces.ts +242 -0
- package/src/mem_pools/tx_pool_v2/tx_metadata.ts +297 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_bench_metrics.ts +77 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_indices.ts +444 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_v2.ts +223 -0
- package/src/mem_pools/tx_pool_v2/tx_pool_v2_impl.ts +1083 -0
- package/src/msg_validators/attestation_validator/fisherman_attestation_validator.ts +10 -4
- package/src/msg_validators/proposal_validator/proposal_validator.ts +5 -5
- package/src/msg_validators/tx_validator/README.md +115 -0
- package/src/msg_validators/tx_validator/aggregate_tx_validator.ts +5 -5
- package/src/msg_validators/tx_validator/archive_cache.ts +3 -3
- package/src/msg_validators/tx_validator/block_header_validator.ts +22 -11
- package/src/msg_validators/tx_validator/data_validator.ts +6 -2
- package/src/msg_validators/tx_validator/double_spend_validator.ts +15 -9
- package/src/msg_validators/tx_validator/factory.ts +372 -55
- package/src/msg_validators/tx_validator/gas_validator.ts +98 -29
- package/src/msg_validators/tx_validator/index.ts +1 -0
- package/src/msg_validators/tx_validator/metadata_validator.ts +6 -3
- package/src/msg_validators/tx_validator/nullifier_cache.ts +30 -0
- package/src/msg_validators/tx_validator/phases_validator.ts +5 -3
- package/src/msg_validators/tx_validator/size_validator.ts +6 -2
- package/src/msg_validators/tx_validator/timestamp_validator.ts +29 -21
- package/src/msg_validators/tx_validator/tx_permitted_validator.ts +8 -3
- package/src/msg_validators/tx_validator/tx_proof_validator.ts +8 -3
- package/src/services/data_store.ts +10 -7
- package/src/services/discv5/discV5_service.ts +1 -1
- package/src/services/dummy_service.ts +59 -2
- package/src/services/encoding.ts +9 -9
- package/src/services/gossipsub/README.md +641 -0
- package/src/services/gossipsub/index.ts +2 -0
- package/src/services/gossipsub/scoring.ts +29 -5
- package/src/services/gossipsub/topic_score_params.ts +487 -0
- package/src/services/index.ts +1 -0
- package/src/services/libp2p/instrumentation.ts +15 -2
- package/src/services/libp2p/libp2p_service.ts +496 -397
- package/src/services/peer-manager/metrics.ts +21 -4
- package/src/services/peer-manager/peer_scoring.ts +29 -1
- package/src/services/reqresp/batch-tx-requester/README.md +305 -0
- package/src/services/reqresp/batch-tx-requester/batch_tx_requester.ts +678 -0
- package/src/services/reqresp/batch-tx-requester/config.ts +40 -0
- package/src/services/reqresp/batch-tx-requester/interface.ts +53 -0
- package/src/services/reqresp/batch-tx-requester/missing_txs.ts +161 -0
- package/src/services/reqresp/batch-tx-requester/peer_collection.ts +244 -0
- package/src/services/reqresp/batch-tx-requester/tx_validator.ts +37 -0
- package/src/services/reqresp/connection-sampler/batch_connection_sampler.ts +65 -4
- package/src/services/reqresp/connection-sampler/connection_sampler.ts +16 -0
- package/src/services/reqresp/interface.ts +29 -1
- package/src/services/reqresp/metrics.ts +34 -9
- package/src/services/reqresp/protocols/block_txs/bitvector.ts +7 -0
- package/src/services/reqresp/protocols/block_txs/block_txs_handler.ts +35 -12
- package/src/services/reqresp/protocols/block_txs/block_txs_reqresp.ts +74 -9
- package/src/services/reqresp/protocols/tx.ts +22 -0
- package/src/services/reqresp/reqresp.ts +82 -23
- package/src/services/service.ts +55 -2
- package/src/services/tx_collection/config.ts +83 -1
- package/src/services/tx_collection/fast_tx_collection.ts +93 -47
- package/src/services/tx_collection/file_store_tx_collection.ts +202 -0
- package/src/services/tx_collection/file_store_tx_source.ts +117 -0
- package/src/services/tx_collection/index.ts +6 -0
- package/src/services/tx_collection/instrumentation.ts +17 -2
- package/src/services/tx_collection/missing_txs_tracker.ts +52 -0
- package/src/services/tx_collection/proposal_tx_collector.ts +113 -0
- package/src/services/tx_collection/slow_tx_collection.ts +66 -33
- package/src/services/tx_collection/tx_collection.ts +117 -20
- package/src/services/tx_collection/tx_collection_sink.ts +30 -34
- package/src/services/tx_collection/tx_source.ts +22 -3
- package/src/services/tx_file_store/config.ts +37 -0
- package/src/services/tx_file_store/index.ts +3 -0
- package/src/services/tx_file_store/instrumentation.ts +36 -0
- package/src/services/tx_file_store/tx_file_store.ts +175 -0
- package/src/services/tx_provider.ts +10 -9
- package/src/services/tx_provider_instrumentation.ts +11 -5
- package/src/test-helpers/index.ts +2 -0
- package/src/test-helpers/make-test-p2p-clients.ts +3 -5
- package/src/test-helpers/mock-pubsub.ts +146 -9
- package/src/test-helpers/reqresp-nodes.ts +4 -6
- package/src/test-helpers/test_tx_provider.ts +64 -0
- package/src/test-helpers/testbench-utils.ts +430 -0
- package/src/testbench/p2p_client_testbench_worker.ts +333 -131
- package/src/testbench/worker_client_manager.ts +304 -47
- package/src/util.ts +7 -1
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.d.ts +0 -40
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.d.ts.map +0 -1
- package/dest/mem_pools/attestation_pool/kv_attestation_pool.js +0 -218
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.d.ts +0 -31
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.d.ts.map +0 -1
- package/dest/mem_pools/attestation_pool/memory_attestation_pool.js +0 -180
- package/src/mem_pools/attestation_pool/kv_attestation_pool.ts +0 -320
- package/src/mem_pools/attestation_pool/memory_attestation_pool.ts +0 -264
|
@@ -0,0 +1,641 @@
|
|
|
1
|
+
# Gossipsub Peer Scoring
|
|
2
|
+
|
|
3
|
+
This module configures gossipsub peer scoring parameters for the Aztec P2P network. Peer scoring helps maintain network health by rewarding well-behaving peers and penalizing misbehaving ones.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
Gossipsub v1.1 introduces peer scoring to defend against various attacks and improve message propagation. Each peer accumulates a score based on their behavior, and peers with low scores may be pruned from the mesh or even disconnected.
|
|
8
|
+
|
|
9
|
+
For the full specification, see: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring
|
|
10
|
+
|
|
11
|
+
## Scoring Parameters
|
|
12
|
+
|
|
13
|
+
The peer score is computed as a weighted sum of topic-specific and application-specific scores:
|
|
14
|
+
|
|
15
|
+
```
|
|
16
|
+
Score = TopicScore + AppSpecificScore + IPColocationPenalty + BehaviorPenalty
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
### Topic-Specific Parameters (P1-P4)
|
|
20
|
+
|
|
21
|
+
Each topic has its own scoring parameters:
|
|
22
|
+
|
|
23
|
+
| Parameter | Type | Description |
|
|
24
|
+
|-----------|------|-------------|
|
|
25
|
+
| **P1: timeInMesh** | Positive | Rewards peers for time spent in the mesh |
|
|
26
|
+
| **P2: firstMessageDeliveries** | Positive | Rewards peers who deliver messages first |
|
|
27
|
+
| **P3: meshMessageDeliveries** | Negative | Penalizes peers who under-deliver messages |
|
|
28
|
+
| **P3b: meshFailurePenalty** | Negative | Sticky penalty applied when pruned from mesh |
|
|
29
|
+
| **P4: invalidMessageDeliveries** | Negative | Penalizes peers who deliver invalid messages |
|
|
30
|
+
|
|
31
|
+
### Our Configuration
|
|
32
|
+
|
|
33
|
+
We configure all parameters (P1-P4) with values calculated dynamically from network configuration:
|
|
34
|
+
|
|
35
|
+
| Parameter | Max Score | Configuration |
|
|
36
|
+
|-----------|-----------|---------------|
|
|
37
|
+
| P1: timeInMesh | +8 per topic | Slot-based, caps at 1 hour |
|
|
38
|
+
| P2: firstMessageDeliveries | +25 per topic | Convergence-based, fast decay |
|
|
39
|
+
| P3: meshMessageDeliveries | -34 per topic | Must exceed P1+P2 for pruning |
|
|
40
|
+
| P3b: meshFailurePenalty | -34 per topic | Sticky penalty after pruning |
|
|
41
|
+
| P4: invalidMessageDeliveries | -20 per message | Attack detection |
|
|
42
|
+
|
|
43
|
+
**Important:** P1 and P2 are only enabled on topics with P3 enabled. By default, P3 is enabled for checkpoint_proposal and checkpoint_attestation (2 topics). Block proposal scoring is controlled by `expectedBlockProposalsPerSlot` (current default: `0`, including when env var is unset, so disabled) - see [Block Proposals](#block-proposals-block_proposal) for details. The tx topic has all scoring disabled except P4, to prevent free positive score accumulation that would offset penalties from other topics.
|
|
44
|
+
|
|
45
|
+
## Exponential Decay
|
|
46
|
+
|
|
47
|
+
All counters in gossipsub use exponential decay. Each heartbeat (default: 700ms), counters are multiplied by a decay factor:
|
|
48
|
+
|
|
49
|
+
```
|
|
50
|
+
counter = counter * decay
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
### Multi-Slot Decay Windows
|
|
54
|
+
|
|
55
|
+
For low-frequency topics (like 1 message per 72-second slot), naive decay would cause counters to drop to near-zero before the next message arrives. Instead, we use **multi-slot decay windows**:
|
|
56
|
+
|
|
57
|
+
| Frequency | Decay Window |
|
|
58
|
+
|-----------|--------------|
|
|
59
|
+
| <= 1 msg/slot | 5 slots |
|
|
60
|
+
| 2-10 msg/slot | 3 slots |
|
|
61
|
+
| > 10 msg/slot | 2 slots |
|
|
62
|
+
|
|
63
|
+
### Decay Factor Calculation
|
|
64
|
+
|
|
65
|
+
To decay to 1% of the original value over the decay window:
|
|
66
|
+
|
|
67
|
+
```typescript
|
|
68
|
+
heartbeatsPerSlot = slotDurationMs / heartbeatIntervalMs
|
|
69
|
+
heartbeatsInWindow = heartbeatsPerSlot * decayWindowSlots
|
|
70
|
+
decay = 0.01 ^ (1 / heartbeatsInWindow)
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
**Example** (72s slot, 700ms heartbeat, 5-slot decay window):
|
|
74
|
+
```
|
|
75
|
+
heartbeatsPerSlot = 72000 / 700 ≈ 103
|
|
76
|
+
heartbeatsInWindow = 103 * 5 = 515
|
|
77
|
+
decay = 0.01^(1/515) ≈ 0.991
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
## Convergence and Thresholds
|
|
81
|
+
|
|
82
|
+
### Convergence (Steady-State Value)
|
|
83
|
+
|
|
84
|
+
If messages arrive at a constant rate, the decaying counter converges to:
|
|
85
|
+
|
|
86
|
+
```typescript
|
|
87
|
+
messagesPerHeartbeat = expectedPerSlot * (heartbeatMs / slotDurationMs)
|
|
88
|
+
convergence = messagesPerHeartbeat / (1 - decay)
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Threshold Calculation
|
|
92
|
+
|
|
93
|
+
The P3 threshold determines when penalties apply. We use a conservative threshold at 30% of convergence to avoid penalizing honest peers experiencing normal variance:
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
threshold = convergence * 0.3
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## meshMessageDeliveriesWindow
|
|
100
|
+
|
|
101
|
+
This parameter determines how long after validating a message other peers can still receive credit for delivering it.
|
|
102
|
+
|
|
103
|
+
**How it works:**
|
|
104
|
+
1. Peer A delivers a message first
|
|
105
|
+
2. We validate the message
|
|
106
|
+
3. Timer starts for `meshMessageDeliveriesWindow` duration (5 seconds)
|
|
107
|
+
4. Any mesh peer delivering within this window gets credit
|
|
108
|
+
|
|
109
|
+
**Why 5 seconds?**
|
|
110
|
+
|
|
111
|
+
The [gossipsub v1.1 spec](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) recommends this window be "small (in the order of milliseconds)" to prevent peers from gaming P3 scores by simply replaying messages back. A peer can echo a message within ~100ms, so a large window allows score inflation.
|
|
112
|
+
|
|
113
|
+
However, real-world implementations use significantly larger values due to practical constraints:
|
|
114
|
+
- **Prysm** (Go): 2 seconds - the go-libp2p default for low-latency Go runtime
|
|
115
|
+
- **Lodestar** (TypeScript): 12 seconds - accounts for JavaScript I/O lag
|
|
116
|
+
- **Ethereum spec proposal**: 400ms was proposed but rejected as "too tight"
|
|
117
|
+
|
|
118
|
+
We use **5 seconds** as a balanced middle ground because:
|
|
119
|
+
1. **Runtime considerations**: Our implementation is TypeScript (like Lodestar), not Go (like Prysm). JavaScript has higher I/O latency due to single-threaded event loop and garbage collection pauses.
|
|
120
|
+
2. **Network variance**: Even on healthy networks, message propagation can vary due to:
|
|
121
|
+
- Concurrent validation of multiple messages
|
|
122
|
+
- CPU-intensive proof verification
|
|
123
|
+
- Network congestion during high transaction volume
|
|
124
|
+
- Geographic distribution of validators
|
|
125
|
+
3. **Conservative but not excessive**: 5s is 2.5× the Go default (allowing for JS overhead) but still well below Lodestar's 12s, maintaining reasonable protection against replay attacks.
|
|
126
|
+
4. **Attack mitigation**: A 5s window still prevents score gaming - peers would need to consistently echo messages within 5s to maintain positive P3 scores, which requires them to stay connected and somewhat functional.
|
|
127
|
+
|
|
128
|
+
## meshMessageDeliveriesActivation
|
|
129
|
+
|
|
130
|
+
This is the grace period before P3 penalties can be applied to a peer. During this time, the message delivery counter accumulates without any penalty.
|
|
131
|
+
|
|
132
|
+
**Why activation is 5× the decay window:**
|
|
133
|
+
|
|
134
|
+
We set activation time to **5× the decay window** (10-25 slots depending on topic frequency) because:
|
|
135
|
+
|
|
136
|
+
1. **Timer starts at mesh join, not first message**: The activation countdown begins when a peer joins the mesh, not when they receive their first message. During network bootstrap, peers may join before any messages are flowing.
|
|
137
|
+
|
|
138
|
+
2. **Bootstrap grace period**: When the network is starting up, message flow may be delayed. Peers need time for the network to stabilize and messages to start propagating.
|
|
139
|
+
|
|
140
|
+
3. **Counter convergence**: The threshold is set at 30% of the *converged* counter value. If activation is too short, the counter hasn't approached convergence yet, and honest peers could be penalized unfairly.
|
|
141
|
+
|
|
142
|
+
4. **Join timing variance**: Peers may join at any point during a slot. With longer activation time, even peers joining at an unlucky time will have accumulated enough messages before penalties start.
|
|
143
|
+
|
|
144
|
+
5. **Ethereum precedent**: Ethereum's Lodestar implementation uses very long activation times (1-2 epochs ≈ 16-32 slots) for similar reasons.
|
|
145
|
+
|
|
146
|
+
| Topic | Decay Window | Activation Time (5×) |
|
|
147
|
+
|-------|--------------|----------------------|
|
|
148
|
+
| checkpoint_proposal | 5 slots (360s) | 25 slots (1800s / 30min) |
|
|
149
|
+
| block_proposal | 3 slots (216s) | 15 slots (1080s / 18min) |
|
|
150
|
+
| checkpoint_attestation | 2 slots (144s) | 10 slots (720s / 12min) |
|
|
151
|
+
|
|
152
|
+
## P1: Time in Mesh (Positive Score)
|
|
153
|
+
|
|
154
|
+
P1 rewards peers for time spent in the mesh. We use Lodestar-style slot-based normalization:
|
|
155
|
+
|
|
156
|
+
```typescript
|
|
157
|
+
timeInMeshQuantum = slotDurationMs // Score increases by ~1 per slot
|
|
158
|
+
timeInMeshCap = 3600 / slotDurationSeconds // Cap at 1 hour (50 slots for 72s slots)
|
|
159
|
+
timeInMeshWeight = MAX_P1_SCORE / cap // Normalized so max P1 = 8
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
**Key properties:**
|
|
163
|
+
- Score increases gradually: ~1 per slot of mesh membership
|
|
164
|
+
- Caps at 1 hour: prevents runaway positive scores
|
|
165
|
+
- Resets on mesh leave: no credit carried after pruning
|
|
166
|
+
|
|
167
|
+
**Example (72s slots):**
|
|
168
|
+
- After 10 minutes in mesh: P1 ≈ 1.3
|
|
169
|
+
- After 30 minutes in mesh: P1 ≈ 4
|
|
170
|
+
- After 1 hour in mesh: P1 = 8 (max)
|
|
171
|
+
|
|
172
|
+
## P2: First Message Deliveries (Positive Score)
|
|
173
|
+
|
|
174
|
+
P2 rewards peers who deliver messages first to us. We use convergence-based normalization:
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
firstMessageDeliveriesDecay = computeDecay(2 slots) // Fast decay
|
|
178
|
+
firstMessageDeliveriesCap = convergence(1 msg/heartbeat)
|
|
179
|
+
firstMessageDeliveriesWeight = MAX_P2_SCORE / cap // Normalized so max P2 = 25
|
|
180
|
+
```
|
|
181
|
+
|
|
182
|
+
**Key properties:**
|
|
183
|
+
- Fast decay (2 slots): rewards recent behavior, not historical
|
|
184
|
+
- Caps at convergence: prevents score inflation from bursts
|
|
185
|
+
- Resets quickly after mesh leave: decays to near-zero over ~2 slots (e.g., ~144s with 72s slots)
|
|
186
|
+
|
|
187
|
+
## P3 Weight Formula
|
|
188
|
+
|
|
189
|
+
The P3 weight is calculated to ensure the max penalty equals `MAX_P3_PENALTY_PER_TOPIC` (-34):
|
|
190
|
+
|
|
191
|
+
```typescript
|
|
192
|
+
// Weight formula: max_penalty / threshold²
|
|
193
|
+
meshMessageDeliveriesWeight = MAX_P3_PENALTY_PER_TOPIC / (threshold * threshold)
|
|
194
|
+
|
|
195
|
+
// When peer delivers nothing (deficit = threshold):
|
|
196
|
+
// penalty = deficit² × weight = threshold² × (-34 / threshold²) = -34
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
This ensures P3 max penalty (-34) exceeds P1 + P2 max (+33), causing mesh pruning.
|
|
200
|
+
|
|
201
|
+
## Per-Topic Configuration
|
|
202
|
+
|
|
203
|
+
### Topic Types and Expected Rates
|
|
204
|
+
|
|
205
|
+
| Topic | Expected/Slot | Decay Window | Notes |
|
|
206
|
+
|-------|--------------|--------------|-------|
|
|
207
|
+
| `tx` | Unpredictable | N/A | P3/P3b disabled |
|
|
208
|
+
| `block_proposal` | N-1 | 3 slots | N = blocks per slot (MBPS mode) |
|
|
209
|
+
| `checkpoint_proposal` | 1 | 5 slots | One per slot |
|
|
210
|
+
| `checkpoint_attestation` | C (~48) | 2 slots | C = committee size |
|
|
211
|
+
|
|
212
|
+
### Transactions (tx)
|
|
213
|
+
|
|
214
|
+
Transactions are submitted unpredictably by users, so we cannot set meaningful delivery thresholds. **All scoring (P1, P2, P3, P3b) is disabled** for this topic except P4 (invalid message detection).
|
|
215
|
+
|
|
216
|
+
**Rationale:** If P1/P2 were enabled without P3, the tx topic would contribute free positive scores that could offset penalties from other topics, preventing proper mesh pruning of non-contributing peers.
|
|
217
|
+
|
|
218
|
+
### Block Proposals (block_proposal)
|
|
219
|
+
|
|
220
|
+
Block proposal scoring is controlled by the `expectedBlockProposalsPerSlot` config (`SEQ_EXPECTED_BLOCK_PROPOSALS_PER_SLOT` env var):
|
|
221
|
+
|
|
222
|
+
| Config Value | Behavior |
|
|
223
|
+
|-------------|----------|
|
|
224
|
+
| `0` (current default) | Block proposal P3 scoring is **disabled** |
|
|
225
|
+
| Positive number | Uses the provided value as expected proposals per slot |
|
|
226
|
+
| `undefined` | Falls back to `blocksPerSlot - 1` (MBPS mode: N-1, single block: 0) |
|
|
227
|
+
|
|
228
|
+
**Current behavior note:** In the current implementation, if `SEQ_EXPECTED_BLOCK_PROPOSALS_PER_SLOT` is not set, config mapping applies `0` by default (scoring disabled). The `undefined` fallback above is currently reachable only if the value is explicitly provided as `undefined` in code.
|
|
229
|
+
|
|
230
|
+
**Future intent:** Once throughput is stable, we may change env parsing/defaults so an unset env var resolves to `undefined` again (re-enabling automatic fallback to `blocksPerSlot - 1`).
|
|
231
|
+
|
|
232
|
+
**Why disabled by default?** In MBPS mode, gossipsub expects N-1 block proposals per slot. When transaction throughput is low (as expected at launch), fewer blocks are actually built, causing peers to be incorrectly penalized for under-delivering block proposals. The default of 0 disables this scoring. Set to a positive value when throughput increases and block production is consistent.
|
|
233
|
+
|
|
234
|
+
In MBPS mode (when enabled), N-1 block proposals are gossiped per slot (the last block is bundled with the checkpoint). In single-block mode, this is 0.
|
|
235
|
+
|
|
236
|
+
### Checkpoint Proposals (checkpoint_proposal)
|
|
237
|
+
|
|
238
|
+
Exactly one checkpoint proposal per slot, containing the final block and proof commitments.
|
|
239
|
+
|
|
240
|
+
### Checkpoint Attestations (checkpoint_attestation)
|
|
241
|
+
|
|
242
|
+
Each committee member sends one attestation per slot. With a target committee size of 48, we expect ~48 attestations per slot.
|
|
243
|
+
|
|
244
|
+
### Topic Weights
|
|
245
|
+
|
|
246
|
+
All topics use equal weight (1). Block proposals contain transaction hashes, so transactions must propagate for block proposals to validate - making all message types equally important for network health.
|
|
247
|
+
|
|
248
|
+
## Configuration Dependencies
|
|
249
|
+
|
|
250
|
+
The scoring parameters depend on:
|
|
251
|
+
|
|
252
|
+
| Parameter | Source | Default |
|
|
253
|
+
|-----------|--------|---------|
|
|
254
|
+
| `slotDuration` | L1RollupConstants | 72s |
|
|
255
|
+
| `targetCommitteeSize` | L1RollupConstants | 48 |
|
|
256
|
+
| `heartbeatInterval` | P2PConfig.gossipsubInterval | 700ms |
|
|
257
|
+
| `blockDurationMs` | P2PConfig.blockDurationMs | undefined (single block) |
|
|
258
|
+
| `expectedBlockProposalsPerSlot` | P2PConfig.expectedBlockProposalsPerSlot | 0 (disabled; current unset-env behavior) |
|
|
259
|
+
|
|
260
|
+
## Invalid Message Handling (P4)
|
|
261
|
+
|
|
262
|
+
P4 penalizes peers who deliver invalid messages. All topics have this enabled with:
|
|
263
|
+
- Weight: -20
|
|
264
|
+
- Decay: Over 4 slots
|
|
265
|
+
|
|
266
|
+
Invalid messages include malformed data, invalid signatures, or messages failing validation.
|
|
267
|
+
|
|
268
|
+
## Tuning Guidelines
|
|
269
|
+
|
|
270
|
+
### Signs of Too-Strict Scoring
|
|
271
|
+
|
|
272
|
+
- Honest peers frequently pruned from mesh
|
|
273
|
+
- High peer churn
|
|
274
|
+
- Slow message propagation despite good network
|
|
275
|
+
|
|
276
|
+
**Solution:** Increase thresholds, use longer decay windows
|
|
277
|
+
|
|
278
|
+
### Signs of Too-Lenient Scoring
|
|
279
|
+
|
|
280
|
+
- Slow or stalled message propagation
|
|
281
|
+
- Bad peers remaining in mesh too long
|
|
282
|
+
- Network vulnerable to eclipse attacks
|
|
283
|
+
|
|
284
|
+
**Solution:** Decrease thresholds, use shorter decay windows
|
|
285
|
+
|
|
286
|
+
### Monitoring
|
|
287
|
+
|
|
288
|
+
Key metrics to monitor:
|
|
289
|
+
- Peer scores distribution
|
|
290
|
+
- P3 penalty frequency per topic
|
|
291
|
+
- Invalid message rate per peer
|
|
292
|
+
- Mesh membership stability
|
|
293
|
+
|
|
294
|
+
## Code Structure
|
|
295
|
+
|
|
296
|
+
- `scoring.ts` - Global peer score thresholds
|
|
297
|
+
- `topic_score_params.ts` - Per-topic parameter calculation
|
|
298
|
+
- `index.ts` - Module exports
|
|
299
|
+
|
|
300
|
+
## Global Score Thresholds
|
|
301
|
+
|
|
302
|
+
Gossipsub uses global thresholds to determine peer behavior based on total score:
|
|
303
|
+
|
|
304
|
+
| Threshold | Value | Effect |
|
|
305
|
+
|-----------|-------|--------|
|
|
306
|
+
| gossipThreshold | -500 | Below this, peer doesn't receive gossip |
|
|
307
|
+
| publishThreshold | -1000 | Below this, peer's messages aren't relayed |
|
|
308
|
+
| graylistThreshold | -2000 | Below this, all RPCs from peer are ignored |
|
|
309
|
+
|
|
310
|
+
### Alignment with Application-Level Scoring
|
|
311
|
+
|
|
312
|
+
The thresholds are designed to align with Aztec's application-level peer scoring:
|
|
313
|
+
|
|
314
|
+
```
|
|
315
|
+
Total Gossipsub Score = TopicScore + (AppScore × AppSpecificWeight)
|
|
316
|
+
```
|
|
317
|
+
|
|
318
|
+
With `appSpecificWeight = 10` (topic score assumed ~0):
|
|
319
|
+
|
|
320
|
+
| App Score State | App Score | Gossipsub Contribution | Threshold Triggered |
|
|
321
|
+
|-----------------|-----------|------------------------|---------------------|
|
|
322
|
+
| Healthy | 0 to -49 | 0 to -490 | None |
|
|
323
|
+
| Disconnect | -50 | -500 | gossipThreshold |
|
|
324
|
+
| Ban | -100 | -1000 | publishThreshold |
|
|
325
|
+
|
|
326
|
+
This means (best-effort alignment):
|
|
327
|
+
- When a peer reaches **Disconnect** state, they generally stop receiving gossip
|
|
328
|
+
- When a peer reaches **Ban** state, their messages are generally not relayed
|
|
329
|
+
- **Graylist** requires ban-level score PLUS significant topic penalties (attacks)
|
|
330
|
+
|
|
331
|
+
**Important:** Positive topic scores (P1/P2) can temporarily offset app penalties, so alignment is not strict.
|
|
332
|
+
Conversely, if topic scores are low, a peer slightly above the disconnect threshold may still dip below `gossipThreshold`. This is acceptable and tends to recover quickly as topic scores accumulate.
|
|
333
|
+
|
|
334
|
+
### Topic Score Contribution
|
|
335
|
+
|
|
336
|
+
Topic scores provide **burst response** to attacks, while app score provides **stable baseline**:
|
|
337
|
+
|
|
338
|
+
- P1 (time in mesh): Max +8 per topic (+16 default, +24 with block proposal scoring enabled)
|
|
339
|
+
- P2 (first deliveries): Max +25 per topic (+50 default, +75 with block proposal scoring, but decays fast)
|
|
340
|
+
- P3 (under-delivery): Max -34 per topic (-68 default with 2 topics, -102 with block proposal scoring enabled)
|
|
341
|
+
- P4 (invalid messages): -20 per invalid message, can spike to -2000+ during attacks
|
|
342
|
+
|
|
343
|
+
Example attack scenario:
|
|
344
|
+
- App score: -100 (banned) → -1000 gossipsub
|
|
345
|
+
- P4 burst (10 invalid messages): -2000 per topic
|
|
346
|
+
- **Total: -3000+** → Triggers graylistThreshold
|
|
347
|
+
|
|
348
|
+
The P4 penalty decays to 1% over 4 slots (~5 minutes), allowing recovery if the attack stops.
|
|
349
|
+
|
|
350
|
+
## Non-Contributing Peers
|
|
351
|
+
|
|
352
|
+
### How P3 Handles Under-Delivery
|
|
353
|
+
|
|
354
|
+
The P3 (meshMessageDeliveries) penalty applies when a peer's message delivery counter falls below the threshold. The penalty formula is:
|
|
355
|
+
|
|
356
|
+
```
|
|
357
|
+
deficit = max(0, threshold - counter)
|
|
358
|
+
penalty = deficit² × weight
|
|
359
|
+
```
|
|
360
|
+
|
|
361
|
+
Where `weight = MAX_P3_PENALTY_PER_TOPIC / (threshold × threshold)`. This design ensures:
|
|
362
|
+
|
|
363
|
+
```
|
|
364
|
+
If counter = 0 (delivers nothing):
|
|
365
|
+
deficit = threshold
|
|
366
|
+
penalty = threshold² × (-34/threshold²) = -34 per topic
|
|
367
|
+
```
|
|
368
|
+
|
|
369
|
+
### Score Balance for Mesh Pruning
|
|
370
|
+
|
|
371
|
+
For a peer to be pruned from the mesh, their **topic score** must be negative. We balance P1/P2/P3 so that non-contributors get pruned:
|
|
372
|
+
|
|
373
|
+
| Scenario | P1 | P2 | P3 | Topic Score | Result |
|
|
374
|
+
|----------|----|----|-----|-------------|--------|
|
|
375
|
+
| Healthy peer (delivering) | +8 | +25 | 0 | +33 | In mesh |
|
|
376
|
+
| New peer (just joined) | +1 | +5 | 0 | +6 | In mesh |
|
|
377
|
+
| Non-contributor (1 hour in mesh) | +8 | 0 | -34 | **-26** | **Pruned** |
|
|
378
|
+
| Non-contributor (new) | +1 | 0 | -34 | **-33** | **Pruned** |
|
|
379
|
+
|
|
380
|
+
The key insight: **P3 max (-34) exceeds P1 + P2 max (+33)**, so even a peer that has been in the mesh for 1 hour will still be pruned if they stop delivering messages.
|
|
381
|
+
|
|
382
|
+
### What Happens After Pruning
|
|
383
|
+
|
|
384
|
+
When a peer is pruned from the mesh:
|
|
385
|
+
|
|
386
|
+
1. **P1 resets to 0**: The timeInMesh counter is cleared
|
|
387
|
+
2. **P2 decays to 0**: Fast decay (2-slot window) makes it negligible over minutes
|
|
388
|
+
3. **P3b captures the penalty**: The P3 deficit at prune time becomes P3b, which decays slowly
|
|
389
|
+
|
|
390
|
+
After pruning, the peer's score consists mainly of P3b:
|
|
391
|
+
- **Total P3b: -68** (default, 2 topics) or **-102** (with block proposal scoring enabled, 3 topics)
|
|
392
|
+
- **Recovery time**: P3b decays to ~1% over one decay window (2-5 slots = 2-6 minutes)
|
|
393
|
+
- **Grafting eligibility**: Peer can be grafted when score ≥ 0, but asymptotic decay means recovery is slow
|
|
394
|
+
|
|
395
|
+
### Why Non-Contributors Aren't Disconnected
|
|
396
|
+
|
|
397
|
+
With P3b capped at -68 (default, 2 topics) or -102 (with block proposal scoring, 3 topics) after pruning:
|
|
398
|
+
|
|
399
|
+
| Threshold | Value | P3b Score | Triggered? |
|
|
400
|
+
|-----------|-------|-----------|------------|
|
|
401
|
+
| gossipThreshold | -500 | -68 (default) / -102 (block scoring on) | No |
|
|
402
|
+
| publishThreshold | -1000 | -68 (default) / -102 (block scoring on) | No |
|
|
403
|
+
| graylistThreshold | -2000 | -68 (default) / -102 (block scoring on) | No |
|
|
404
|
+
|
|
405
|
+
**A score of -68 or -102 is well above -500**, so non-contributing peers:
|
|
406
|
+
- Are pruned from mesh (good - stops them slowing propagation)
|
|
407
|
+
- Still receive gossip (can recover by reconnecting/restarting)
|
|
408
|
+
- Are NOT disconnected unless they also have application-level penalties
|
|
409
|
+
|
|
410
|
+
### Design Philosophy
|
|
411
|
+
|
|
412
|
+
The system distinguishes between:
|
|
413
|
+
|
|
414
|
+
| Peer Type | Score Range | Effect |
|
|
415
|
+
|-----------|-------------|--------|
|
|
416
|
+
| **Productive** | ≥ 0 | Full mesh participation |
|
|
417
|
+
| **Unproductive** | -1 to -499 | Pruned from mesh, still receives gossip |
|
|
418
|
+
| **Misbehaving** | -500 to -999 | Stops receiving gossip (app: Disconnect) |
|
|
419
|
+
| **Malicious** | -1000 to -1999 | Cannot publish (app: Banned) |
|
|
420
|
+
| **Attacking** | ≤ -2000 | Graylisted, all RPCs ignored |
|
|
421
|
+
|
|
422
|
+
Note: These ranges are approximate; positive topic scores can shift a peer upward temporarily.
|
|
423
|
+
|
|
424
|
+
This is similar to Ethereum's approach: non-contributing peers are removed from the mesh (preventing them from slowing propagation) but not disconnected, as they might be starting up or experiencing temporary connectivity issues.
|
|
425
|
+
|
|
426
|
+
### When Non-Contributors ARE Penalized
|
|
427
|
+
|
|
428
|
+
Non-contributors will trigger thresholds if they also:
|
|
429
|
+
1. **Send invalid messages**: P4 penalty of -20 per invalid message accumulates quickly
|
|
430
|
+
2. **Fail protocol validation**: Application penalties for deserialization errors, manipulation attempts
|
|
431
|
+
3. **Violate rate limits**: Repeated per-peer limit hits accumulate application penalties
|
|
432
|
+
|
|
433
|
+
## Application-Level Penalties
|
|
434
|
+
|
|
435
|
+
Beyond gossipsub's topic scoring, Aztec has application-level penalties for protocol violations:
|
|
436
|
+
|
|
437
|
+
### Penalty Severities
|
|
438
|
+
|
|
439
|
+
| Severity | Points | Errors to Disconnect | Errors to Ban |
|
|
440
|
+
|----------|--------|----------------------|---------------|
|
|
441
|
+
| **HighToleranceError** | 2 | 25 | 50 |
|
|
442
|
+
| **MidToleranceError** | 10 | 5 | 10 |
|
|
443
|
+
| **LowToleranceError** | 50 | 1 | 2 |
|
|
444
|
+
|
|
445
|
+
### What Triggers Each Severity
|
|
446
|
+
|
|
447
|
+
**HighToleranceError (2 points)** - Transient issues:
|
|
448
|
+
- Rate limit exceeded
|
|
449
|
+
- Failed responses (FAILURE/UNKNOWN status)
|
|
450
|
+
- Recent double spend attempts (within penalty window)
|
|
451
|
+
|
|
452
|
+
**MidToleranceError (10 points)** - Protocol violations:
|
|
453
|
+
- Block/checkpoint exceeds per-slot cap
|
|
454
|
+
- Response hash mismatches
|
|
455
|
+
- Duplicate transactions in response
|
|
456
|
+
- Unrequested transactions in response
|
|
457
|
+
|
|
458
|
+
**LowToleranceError (50 points)** - Serious violations:
|
|
459
|
+
- Message deserialization errors
|
|
460
|
+
- Invalid message manipulation attempts
|
|
461
|
+
- Block number/order mismatches
|
|
462
|
+
- Invalid transactions
|
|
463
|
+
- Badly formed requests
|
|
464
|
+
- Confirmed double spends
|
|
465
|
+
|
|
466
|
+
### Score Decay
|
|
467
|
+
|
|
468
|
+
Application scores decay by 10% per minute (`decayFactor = 0.9`):
|
|
469
|
+
- Score -100 → -90 after 1 minute
|
|
470
|
+
- Score -100 → -35 after 10 minutes
|
|
471
|
+
- Score -100 → -12 after 20 minutes
|
|
472
|
+
|
|
473
|
+
This allows honest peers to recover from temporary issues.
|
|
474
|
+
|
|
475
|
+
## Score Calculation Examples
|
|
476
|
+
|
|
477
|
+
### Example 1: Honest Peer
|
|
478
|
+
|
|
479
|
+
```
|
|
480
|
+
App score: 0
|
|
481
|
+
Topic P3: 0 (delivering messages)
|
|
482
|
+
Topic P4: 0 (no invalid messages)
|
|
483
|
+
─────────────────────────────────
|
|
484
|
+
Total: 0 → Full participation ✓
|
|
485
|
+
```
|
|
486
|
+
|
|
487
|
+
### Example 2: Peer with Rate Limit Issues
|
|
488
|
+
|
|
489
|
+
```
|
|
490
|
+
App score: -20 (10 HighToleranceErrors)
|
|
491
|
+
→ Gossipsub contribution: -200
|
|
492
|
+
Topic P3: -1 (slightly under-delivering)
|
|
493
|
+
Topic P4: 0
|
|
494
|
+
─────────────────────────────────
|
|
495
|
+
Total: -201 → Still receives gossip ✓
|
|
496
|
+
```
|
|
497
|
+
|
|
498
|
+
### Example 3: Validation Failure
|
|
499
|
+
|
|
500
|
+
```
|
|
501
|
+
App score: -50 (1 LowToleranceError for invalid message)
|
|
502
|
+
→ Gossipsub contribution: -500
|
|
503
|
+
Topic P3: 0
|
|
504
|
+
Topic P4: -20 (the invalid message)
|
|
505
|
+
─────────────────────────────────
|
|
506
|
+
Total: -520 → Stops receiving gossip (gossipThreshold = -500)
|
|
507
|
+
→ Application disconnects peer
|
|
508
|
+
```
|
|
509
|
+
|
|
510
|
+
### Example 4: Banned Peer
|
|
511
|
+
|
|
512
|
+
```
|
|
513
|
+
App score: -100 (2 LowToleranceErrors)
|
|
514
|
+
→ Gossipsub contribution: -1000
|
|
515
|
+
Topic P3: -2
|
|
516
|
+
Topic P4: -40 (2 invalid messages)
|
|
517
|
+
─────────────────────────────────
|
|
518
|
+
Total: -1042 → Cannot publish (publishThreshold = -1000)
|
|
519
|
+
→ Application bans peer
|
|
520
|
+
```
|
|
521
|
+
|
|
522
|
+
### Example 5: Active Attack (Burst of Invalid Messages)
|
|
523
|
+
|
|
524
|
+
```
|
|
525
|
+
App score: -100 (banned)
|
|
526
|
+
→ Gossipsub contribution: -1000
|
|
527
|
+
Topic P3: -3
|
|
528
|
+
Topic P4: -200 (10 invalid messages: 10 × -20)
|
|
529
|
+
─────────────────────────────────
|
|
530
|
+
Total: -1203 → Cannot publish (publishThreshold = -1000)
|
|
531
|
+
|
|
532
|
+
If the attacker sends 100 invalid messages quickly:
|
|
533
|
+
|
|
534
|
+
Topic P4: -2000 (100 invalid messages: 100 × -20)
|
|
535
|
+
─────────────────────────────────
|
|
536
|
+
Total: -3003 → Graylisted (graylistThreshold = -2000)
|
|
537
|
+
→ All RPCs ignored
|
|
538
|
+
```
|
|
539
|
+
|
|
540
|
+
### Example 6: Recovery After Attack
|
|
541
|
+
|
|
542
|
+
```
|
|
543
|
+
Initial state: Total score -3003
|
|
544
|
+
|
|
545
|
+
After 4 slots (~5 min):
|
|
546
|
+
P4 decays to 1%: -2000 → -20
|
|
547
|
+
App score unchanged: -1000
|
|
548
|
+
Total: -1023 → Still banned, but no longer graylisted
|
|
549
|
+
|
|
550
|
+
After 10 min:
|
|
551
|
+
App score decays: -100 → -35 → -350 contribution
|
|
552
|
+
P4 further decayed: ~-5
|
|
553
|
+
Total: -358 → Above gossipThreshold, starting to recover
|
|
554
|
+
```
|
|
555
|
+
|
|
556
|
+
## Network Outage Analysis
|
|
557
|
+
|
|
558
|
+
What happens when a peer experiences a network outage and stops delivering messages?
|
|
559
|
+
|
|
560
|
+
### During the Outage
|
|
561
|
+
|
|
562
|
+
While the peer is disconnected:
|
|
563
|
+
|
|
564
|
+
1. **P3 penalty accumulates**: The message delivery counter decays toward 0, causing increasing P3 penalty
|
|
565
|
+
2. **Max P3 penalty reached**: Once counter drops below threshold, penalty hits -34 per topic (-68 default, -102 with block proposal scoring)
|
|
566
|
+
3. **Mesh pruning**: Topic score goes negative → peer is pruned from mesh
|
|
567
|
+
4. **P3b captures penalty**: The P3 deficit at prune time becomes P3b (sticky penalty)
|
|
568
|
+
|
|
569
|
+
### Outage Timeline
|
|
570
|
+
|
|
571
|
+
| Time | Event | Score Impact |
|
|
572
|
+
|------|-------|--------------|
|
|
573
|
+
| 0s | Outage begins | P3 = 0 |
|
|
574
|
+
| ~1 decay window (2-5 slots) | Counter decays below threshold | P3 starts decreasing |
|
|
575
|
+
| ~1-2 decay windows | Counter approaches 0 | P3 ≈ -34 per topic |
|
|
576
|
+
| ~1-2 decay windows | Peer pruned from mesh | P3b ≈ -34 per topic |
|
|
577
|
+
| Thereafter | P3b decays slowly | Recovery begins |
|
|
578
|
+
|
|
579
|
+
Note: If the peer just joined the mesh, P3 penalties only start after
|
|
580
|
+
`meshMessageDeliveriesActivation` (10-25 slots depending on topic frequency).
|
|
581
|
+
|
|
582
|
+
### Key Insight: No Application Penalties
|
|
583
|
+
|
|
584
|
+
During a network outage, the peer:
|
|
585
|
+
- **Does NOT send invalid messages** → No P4 penalty
|
|
586
|
+
- **Does NOT violate protocols** → No application-level penalty
|
|
587
|
+
- **Only accumulates topic-level penalties** → Max -68 (default) or -102 (with block proposal scoring)
|
|
588
|
+
|
|
589
|
+
This is the crucial difference from malicious behavior:
|
|
590
|
+
|
|
591
|
+
| Scenario | App Score | Topic Score | Total | Threshold Hit |
|
|
592
|
+
|----------|-----------|-------------|-------|---------------|
|
|
593
|
+
| Network outage | 0 | -68 (default) / -102 (block scoring on) | -68 / -102 | None |
|
|
594
|
+
| Validation failure | -50 | -20 | -520 | gossipThreshold |
|
|
595
|
+
| Malicious peer | -100 | -2000+ | -2100+ | graylistThreshold |
|
|
596
|
+
|
|
597
|
+
### Recovery After Outage
|
|
598
|
+
|
|
599
|
+
When the peer reconnects:
|
|
600
|
+
|
|
601
|
+
1. **Peer re-joins mesh**: Can request graft (topic score must be > 0 for acceptance)
|
|
602
|
+
2. **P3b decays**: To ~1% over decay window (2-5 slots depending on topic)
|
|
603
|
+
3. **P1 restarts from 0**: timeInMesh counter begins accumulating
|
|
604
|
+
4. **P2 restarts from 0**: firstMessageDeliveries counter begins accumulating
|
|
605
|
+
|
|
606
|
+
**Recovery timeline:**
|
|
607
|
+
- Immediate: Peer can attempt to re-graft
|
|
608
|
+
- ~3-5 minutes: P3b decays to near-zero
|
|
609
|
+
- ~10+ minutes: P1 builds up again (if staying in mesh)
|
|
610
|
+
|
|
611
|
+
### Why This Design Works
|
|
612
|
+
|
|
613
|
+
The system correctly distinguishes between:
|
|
614
|
+
|
|
615
|
+
| Behavior | Treatment |
|
|
616
|
+
|----------|-----------|
|
|
617
|
+
| **Network issues** | Pruned from mesh (stops slowing propagation), can recover quickly |
|
|
618
|
+
| **Protocol violations** | Disconnected (gossipThreshold), must wait for app score decay |
|
|
619
|
+
| **Malicious activity** | Banned/graylisted, requires both app and topic score decay |
|
|
620
|
+
|
|
621
|
+
A peer experiencing network problems will:
|
|
622
|
+
- Be temporarily removed from mesh propagation (good for network health)
|
|
623
|
+
- NOT be disconnected or banned (they haven't misbehaved)
|
|
624
|
+
- Recover automatically when connectivity returns
|
|
625
|
+
- Retain their connections for recovery
|
|
626
|
+
|
|
627
|
+
This matches Ethereum's approach: **honest peers with temporary issues are inconvenienced but not punished**.
|
|
628
|
+
|
|
629
|
+
### Rate Limiting During Outages
|
|
630
|
+
|
|
631
|
+
Note: Simply not sending messages does NOT trigger rate limit penalties. Rate limits apply to:
|
|
632
|
+
- **Per-peer rate limit exceeded** → HighToleranceError (2 points)
|
|
633
|
+
- **Other protocol violations** → MidToleranceError or LowToleranceError depending on severity
|
|
634
|
+
|
|
635
|
+
A peer that sends nothing receives no rate limit penalties. The only penalty for not delivering messages is P3, which is explicitly designed to be recoverable.
|
|
636
|
+
|
|
637
|
+
## References
|
|
638
|
+
|
|
639
|
+
- [Gossipsub v1.1 Specification](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md)
|
|
640
|
+
- [Lighthouse Scoring Implementation](https://github.com/sigp/lighthouse/blob/stable/beacon_node/lighthouse_network/src/peer_manager/score.rs)
|
|
641
|
+
- [Lodestar Scoring Implementation](https://github.com/ChainSafe/lodestar/tree/unstable/packages/beacon-node/src/network/gossip)
|
|
@@ -1,13 +1,37 @@
|
|
|
1
1
|
import type { PeerScoreThresholds } from '@chainsafe/libp2p-gossipsub/score';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
|
-
*
|
|
5
|
-
*
|
|
4
|
+
* Weight applied to application-level peer scores before contributing to gossipsub score.
|
|
5
|
+
*
|
|
6
|
+
* Note: positive topic scores can partially offset app penalties, so alignment with
|
|
7
|
+
* app-level thresholds is best-effort rather than strict.
|
|
8
|
+
*/
|
|
9
|
+
export const APP_SPECIFIC_WEIGHT = 10;
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Gossipsub peer score thresholds aligned with application-level scoring.
|
|
13
|
+
*
|
|
14
|
+
* These thresholds work with appSpecificWeight=10 to align gossipsub behavior
|
|
15
|
+
* with application-level peer states (Healthy → Disconnect → Banned).
|
|
16
|
+
*
|
|
17
|
+
* Alignment:
|
|
18
|
+
* - gossipThreshold (-500): Matches Disconnect state (app score -50 × weight 10)
|
|
19
|
+
* - publishThreshold (-1000): Matches Ban state (app score -100 × weight 10)
|
|
20
|
+
* - graylistThreshold (-2000): For severe attacks (ban + topic penalties)
|
|
21
|
+
*
|
|
22
|
+
* The 1:2:4 ratio follows Lodestar's approach and gossipsub spec recommendations.
|
|
23
|
+
*
|
|
24
|
+
* @see https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#peer-scoring
|
|
6
25
|
*/
|
|
7
26
|
export const gossipScoreThresholds: PeerScoreThresholds = {
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
27
|
+
/** Below this, peer is not gossiped to (matches Disconnect state) */
|
|
28
|
+
gossipThreshold: -500,
|
|
29
|
+
/** Below this, self-published messages are not propagated to peer (matches Ban state) */
|
|
30
|
+
publishThreshold: -1000,
|
|
31
|
+
/** Below this, all RPCs from peer are ignored (severe attack scenario) */
|
|
32
|
+
graylistThreshold: -2000,
|
|
33
|
+
/** Above this, peer can offer peer exchange (PX) */
|
|
11
34
|
acceptPXThreshold: 100,
|
|
35
|
+
/** Above this, peer can be grafted to mesh opportunistically */
|
|
12
36
|
opportunisticGraftThreshold: 5,
|
|
13
37
|
};
|