chia-blockchain 2.5.1rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chia/__init__.py +10 -0
- chia/__main__.py +5 -0
- chia/_tests/README.md +53 -0
- chia/_tests/__init__.py +0 -0
- chia/_tests/blockchain/__init__.py +0 -0
- chia/_tests/blockchain/blockchain_test_utils.py +195 -0
- chia/_tests/blockchain/config.py +4 -0
- chia/_tests/blockchain/test_augmented_chain.py +145 -0
- chia/_tests/blockchain/test_blockchain.py +4202 -0
- chia/_tests/blockchain/test_blockchain_transactions.py +1031 -0
- chia/_tests/blockchain/test_build_chains.py +59 -0
- chia/_tests/blockchain/test_get_block_generator.py +72 -0
- chia/_tests/blockchain/test_lookup_fork_chain.py +194 -0
- chia/_tests/build-init-files.py +92 -0
- chia/_tests/build-job-matrix.py +204 -0
- chia/_tests/check_pytest_monitor_output.py +34 -0
- chia/_tests/check_sql_statements.py +72 -0
- chia/_tests/chia-start-sim +42 -0
- chia/_tests/clvm/__init__.py +0 -0
- chia/_tests/clvm/benchmark_costs.py +23 -0
- chia/_tests/clvm/coin_store.py +149 -0
- chia/_tests/clvm/test_chialisp_deserialization.py +101 -0
- chia/_tests/clvm/test_clvm_step.py +37 -0
- chia/_tests/clvm/test_condition_codes.py +13 -0
- chia/_tests/clvm/test_curry_and_treehash.py +55 -0
- chia/_tests/clvm/test_message_conditions.py +184 -0
- chia/_tests/clvm/test_program.py +150 -0
- chia/_tests/clvm/test_puzzle_compression.py +143 -0
- chia/_tests/clvm/test_puzzle_drivers.py +45 -0
- chia/_tests/clvm/test_puzzles.py +242 -0
- chia/_tests/clvm/test_singletons.py +540 -0
- chia/_tests/clvm/test_spend_sim.py +181 -0
- chia/_tests/cmds/__init__.py +0 -0
- chia/_tests/cmds/cmd_test_utils.py +469 -0
- chia/_tests/cmds/config.py +3 -0
- chia/_tests/cmds/conftest.py +23 -0
- chia/_tests/cmds/test_click_types.py +200 -0
- chia/_tests/cmds/test_cmd_framework.py +620 -0
- chia/_tests/cmds/test_cmds_util.py +97 -0
- chia/_tests/cmds/test_daemon.py +92 -0
- chia/_tests/cmds/test_dev_gh.py +131 -0
- chia/_tests/cmds/test_farm_cmd.py +66 -0
- chia/_tests/cmds/test_show.py +116 -0
- chia/_tests/cmds/test_sim.py +207 -0
- chia/_tests/cmds/test_timelock_args.py +75 -0
- chia/_tests/cmds/test_tx_config_args.py +154 -0
- chia/_tests/cmds/testing_classes.py +59 -0
- chia/_tests/cmds/wallet/__init__.py +0 -0
- chia/_tests/cmds/wallet/test_consts.py +47 -0
- chia/_tests/cmds/wallet/test_dao.py +565 -0
- chia/_tests/cmds/wallet/test_did.py +403 -0
- chia/_tests/cmds/wallet/test_nft.py +471 -0
- chia/_tests/cmds/wallet/test_notifications.py +124 -0
- chia/_tests/cmds/wallet/test_offer.toffer +1 -0
- chia/_tests/cmds/wallet/test_tx_decorators.py +27 -0
- chia/_tests/cmds/wallet/test_vcs.py +400 -0
- chia/_tests/cmds/wallet/test_wallet.py +1125 -0
- chia/_tests/cmds/wallet/test_wallet_check.py +109 -0
- chia/_tests/conftest.py +1419 -0
- chia/_tests/connection_utils.py +125 -0
- chia/_tests/core/__init__.py +0 -0
- chia/_tests/core/cmds/__init__.py +0 -0
- chia/_tests/core/cmds/test_beta.py +382 -0
- chia/_tests/core/cmds/test_keys.py +1734 -0
- chia/_tests/core/cmds/test_wallet.py +126 -0
- chia/_tests/core/config.py +3 -0
- chia/_tests/core/consensus/__init__.py +0 -0
- chia/_tests/core/consensus/test_block_creation.py +54 -0
- chia/_tests/core/consensus/test_pot_iterations.py +117 -0
- chia/_tests/core/custom_types/__init__.py +0 -0
- chia/_tests/core/custom_types/test_coin.py +107 -0
- chia/_tests/core/custom_types/test_proof_of_space.py +144 -0
- chia/_tests/core/custom_types/test_spend_bundle.py +70 -0
- chia/_tests/core/daemon/__init__.py +0 -0
- chia/_tests/core/daemon/config.py +4 -0
- chia/_tests/core/daemon/test_daemon.py +2128 -0
- chia/_tests/core/daemon/test_daemon_register.py +109 -0
- chia/_tests/core/daemon/test_keychain_proxy.py +101 -0
- chia/_tests/core/data_layer/__init__.py +0 -0
- chia/_tests/core/data_layer/config.py +5 -0
- chia/_tests/core/data_layer/conftest.py +106 -0
- chia/_tests/core/data_layer/test_data_cli.py +56 -0
- chia/_tests/core/data_layer/test_data_layer.py +83 -0
- chia/_tests/core/data_layer/test_data_layer_util.py +218 -0
- chia/_tests/core/data_layer/test_data_rpc.py +3847 -0
- chia/_tests/core/data_layer/test_data_store.py +2424 -0
- chia/_tests/core/data_layer/test_data_store_schema.py +381 -0
- chia/_tests/core/data_layer/test_plugin.py +91 -0
- chia/_tests/core/data_layer/util.py +233 -0
- chia/_tests/core/farmer/__init__.py +0 -0
- chia/_tests/core/farmer/config.py +3 -0
- chia/_tests/core/farmer/test_farmer_api.py +103 -0
- chia/_tests/core/full_node/__init__.py +0 -0
- chia/_tests/core/full_node/config.py +4 -0
- chia/_tests/core/full_node/dos/__init__.py +0 -0
- chia/_tests/core/full_node/dos/config.py +3 -0
- chia/_tests/core/full_node/full_sync/__init__.py +0 -0
- chia/_tests/core/full_node/full_sync/config.py +4 -0
- chia/_tests/core/full_node/full_sync/test_full_sync.py +443 -0
- chia/_tests/core/full_node/ram_db.py +27 -0
- chia/_tests/core/full_node/stores/__init__.py +0 -0
- chia/_tests/core/full_node/stores/config.py +4 -0
- chia/_tests/core/full_node/stores/test_block_store.py +590 -0
- chia/_tests/core/full_node/stores/test_coin_store.py +897 -0
- chia/_tests/core/full_node/stores/test_full_node_store.py +1219 -0
- chia/_tests/core/full_node/stores/test_hint_store.py +229 -0
- chia/_tests/core/full_node/stores/test_sync_store.py +135 -0
- chia/_tests/core/full_node/test_address_manager.py +588 -0
- chia/_tests/core/full_node/test_block_height_map.py +556 -0
- chia/_tests/core/full_node/test_conditions.py +556 -0
- chia/_tests/core/full_node/test_full_node.py +2700 -0
- chia/_tests/core/full_node/test_generator_tools.py +82 -0
- chia/_tests/core/full_node/test_hint_management.py +104 -0
- chia/_tests/core/full_node/test_node_load.py +34 -0
- chia/_tests/core/full_node/test_performance.py +179 -0
- chia/_tests/core/full_node/test_subscriptions.py +492 -0
- chia/_tests/core/full_node/test_transactions.py +203 -0
- chia/_tests/core/full_node/test_tx_processing_queue.py +155 -0
- chia/_tests/core/large_block.py +2388 -0
- chia/_tests/core/make_block_generator.py +70 -0
- chia/_tests/core/mempool/__init__.py +0 -0
- chia/_tests/core/mempool/config.py +4 -0
- chia/_tests/core/mempool/test_mempool.py +3255 -0
- chia/_tests/core/mempool/test_mempool_fee_estimator.py +104 -0
- chia/_tests/core/mempool/test_mempool_fee_protocol.py +55 -0
- chia/_tests/core/mempool/test_mempool_item_queries.py +190 -0
- chia/_tests/core/mempool/test_mempool_manager.py +2084 -0
- chia/_tests/core/mempool/test_mempool_performance.py +64 -0
- chia/_tests/core/mempool/test_singleton_fast_forward.py +567 -0
- chia/_tests/core/node_height.py +28 -0
- chia/_tests/core/server/__init__.py +0 -0
- chia/_tests/core/server/config.py +3 -0
- chia/_tests/core/server/flood.py +84 -0
- chia/_tests/core/server/serve.py +135 -0
- chia/_tests/core/server/test_api_protocol.py +21 -0
- chia/_tests/core/server/test_capabilities.py +66 -0
- chia/_tests/core/server/test_dos.py +319 -0
- chia/_tests/core/server/test_event_loop.py +109 -0
- chia/_tests/core/server/test_loop.py +294 -0
- chia/_tests/core/server/test_node_discovery.py +73 -0
- chia/_tests/core/server/test_rate_limits.py +482 -0
- chia/_tests/core/server/test_server.py +226 -0
- chia/_tests/core/server/test_upnp.py +8 -0
- chia/_tests/core/services/__init__.py +0 -0
- chia/_tests/core/services/config.py +3 -0
- chia/_tests/core/services/test_services.py +188 -0
- chia/_tests/core/ssl/__init__.py +0 -0
- chia/_tests/core/ssl/config.py +3 -0
- chia/_tests/core/ssl/test_ssl.py +202 -0
- chia/_tests/core/test_coins.py +33 -0
- chia/_tests/core/test_cost_calculation.py +313 -0
- chia/_tests/core/test_crawler.py +175 -0
- chia/_tests/core/test_crawler_rpc.py +53 -0
- chia/_tests/core/test_daemon_rpc.py +24 -0
- chia/_tests/core/test_db_conversion.py +130 -0
- chia/_tests/core/test_db_validation.py +162 -0
- chia/_tests/core/test_farmer_harvester_rpc.py +505 -0
- chia/_tests/core/test_filter.py +35 -0
- chia/_tests/core/test_full_node_rpc.py +768 -0
- chia/_tests/core/test_merkle_set.py +343 -0
- chia/_tests/core/test_program.py +47 -0
- chia/_tests/core/test_rpc_util.py +86 -0
- chia/_tests/core/test_seeder.py +420 -0
- chia/_tests/core/test_setproctitle.py +13 -0
- chia/_tests/core/util/__init__.py +0 -0
- chia/_tests/core/util/config.py +4 -0
- chia/_tests/core/util/test_block_cache.py +44 -0
- chia/_tests/core/util/test_cached_bls.py +57 -0
- chia/_tests/core/util/test_config.py +337 -0
- chia/_tests/core/util/test_file_keyring_synchronization.py +105 -0
- chia/_tests/core/util/test_files.py +391 -0
- chia/_tests/core/util/test_jsonify.py +146 -0
- chia/_tests/core/util/test_keychain.py +522 -0
- chia/_tests/core/util/test_keyring_wrapper.py +491 -0
- chia/_tests/core/util/test_lockfile.py +380 -0
- chia/_tests/core/util/test_log_exceptions.py +187 -0
- chia/_tests/core/util/test_lru_cache.py +56 -0
- chia/_tests/core/util/test_significant_bits.py +40 -0
- chia/_tests/core/util/test_streamable.py +883 -0
- chia/_tests/db/__init__.py +0 -0
- chia/_tests/db/test_db_wrapper.py +566 -0
- chia/_tests/environments/__init__.py +0 -0
- chia/_tests/environments/common.py +35 -0
- chia/_tests/environments/full_node.py +47 -0
- chia/_tests/environments/wallet.py +429 -0
- chia/_tests/ether.py +19 -0
- chia/_tests/farmer_harvester/__init__.py +0 -0
- chia/_tests/farmer_harvester/config.py +3 -0
- chia/_tests/farmer_harvester/test_farmer.py +1264 -0
- chia/_tests/farmer_harvester/test_farmer_harvester.py +292 -0
- chia/_tests/farmer_harvester/test_filter_prefix_bits.py +131 -0
- chia/_tests/farmer_harvester/test_third_party_harvesters.py +528 -0
- chia/_tests/farmer_harvester/test_third_party_harvesters_data.json +29 -0
- chia/_tests/fee_estimation/__init__.py +0 -0
- chia/_tests/fee_estimation/config.py +3 -0
- chia/_tests/fee_estimation/test_fee_estimation_integration.py +262 -0
- chia/_tests/fee_estimation/test_fee_estimation_rpc.py +287 -0
- chia/_tests/fee_estimation/test_fee_estimation_unit_tests.py +144 -0
- chia/_tests/fee_estimation/test_mempoolitem_height_added.py +146 -0
- chia/_tests/generator/__init__.py +0 -0
- chia/_tests/generator/puzzles/__init__.py +0 -0
- chia/_tests/generator/puzzles/test_generator_deserialize.clsp +3 -0
- chia/_tests/generator/puzzles/test_generator_deserialize.clsp.hex +1 -0
- chia/_tests/generator/puzzles/test_multiple_generator_input_arguments.clsp +19 -0
- chia/_tests/generator/puzzles/test_multiple_generator_input_arguments.clsp.hex +1 -0
- chia/_tests/generator/test_compression.py +201 -0
- chia/_tests/generator/test_generator_types.py +44 -0
- chia/_tests/generator/test_rom.py +180 -0
- chia/_tests/plot_sync/__init__.py +0 -0
- chia/_tests/plot_sync/config.py +3 -0
- chia/_tests/plot_sync/test_delta.py +101 -0
- chia/_tests/plot_sync/test_plot_sync.py +618 -0
- chia/_tests/plot_sync/test_receiver.py +451 -0
- chia/_tests/plot_sync/test_sender.py +116 -0
- chia/_tests/plot_sync/test_sync_simulated.py +451 -0
- chia/_tests/plot_sync/util.py +68 -0
- chia/_tests/plotting/__init__.py +0 -0
- chia/_tests/plotting/config.py +3 -0
- chia/_tests/plotting/test_plot_manager.py +781 -0
- chia/_tests/plotting/util.py +12 -0
- chia/_tests/pools/__init__.py +0 -0
- chia/_tests/pools/config.py +5 -0
- chia/_tests/pools/test_pool_cli_parsing.py +128 -0
- chia/_tests/pools/test_pool_cmdline.py +1001 -0
- chia/_tests/pools/test_pool_config.py +42 -0
- chia/_tests/pools/test_pool_puzzles_lifecycle.py +397 -0
- chia/_tests/pools/test_pool_rpc.py +1123 -0
- chia/_tests/pools/test_pool_wallet.py +205 -0
- chia/_tests/pools/test_wallet_pool_store.py +161 -0
- chia/_tests/process_junit.py +348 -0
- chia/_tests/rpc/__init__.py +0 -0
- chia/_tests/rpc/test_rpc_client.py +138 -0
- chia/_tests/rpc/test_rpc_server.py +183 -0
- chia/_tests/simulation/__init__.py +0 -0
- chia/_tests/simulation/config.py +6 -0
- chia/_tests/simulation/test_simulation.py +501 -0
- chia/_tests/simulation/test_simulator.py +232 -0
- chia/_tests/simulation/test_start_simulator.py +107 -0
- chia/_tests/testconfig.py +13 -0
- chia/_tests/timelord/__init__.py +0 -0
- chia/_tests/timelord/config.py +3 -0
- chia/_tests/timelord/test_new_peak.py +437 -0
- chia/_tests/timelord/test_timelord.py +11 -0
- chia/_tests/tools/1315537.json +170 -0
- chia/_tests/tools/1315544.json +160 -0
- chia/_tests/tools/1315630.json +150 -0
- chia/_tests/tools/300000.json +105 -0
- chia/_tests/tools/442734.json +140 -0
- chia/_tests/tools/466212.json +130 -0
- chia/_tests/tools/__init__.py +0 -0
- chia/_tests/tools/config.py +5 -0
- chia/_tests/tools/test-blockchain-db.sqlite +0 -0
- chia/_tests/tools/test_full_sync.py +30 -0
- chia/_tests/tools/test_legacy_keyring.py +82 -0
- chia/_tests/tools/test_run_block.py +128 -0
- chia/_tests/tools/test_virtual_project.py +591 -0
- chia/_tests/util/__init__.py +0 -0
- chia/_tests/util/benchmark_cost.py +170 -0
- chia/_tests/util/benchmarks.py +153 -0
- chia/_tests/util/bip39_test_vectors.json +148 -0
- chia/_tests/util/blockchain.py +134 -0
- chia/_tests/util/blockchain_mock.py +132 -0
- chia/_tests/util/build_network_protocol_files.py +302 -0
- chia/_tests/util/clvm_generator.bin +0 -0
- chia/_tests/util/config.py +3 -0
- chia/_tests/util/constants.py +20 -0
- chia/_tests/util/db_connection.py +37 -0
- chia/_tests/util/full_sync.py +253 -0
- chia/_tests/util/gen_ssl_certs.py +114 -0
- chia/_tests/util/generator_tools_testing.py +45 -0
- chia/_tests/util/get_name_puzzle_conditions.py +52 -0
- chia/_tests/util/key_tool.py +36 -0
- chia/_tests/util/misc.py +675 -0
- chia/_tests/util/network_protocol_data.py +1072 -0
- chia/_tests/util/protocol_messages_bytes-v1.0 +0 -0
- chia/_tests/util/protocol_messages_json.py +2701 -0
- chia/_tests/util/rpc.py +26 -0
- chia/_tests/util/run_block.py +163 -0
- chia/_tests/util/setup_nodes.py +481 -0
- chia/_tests/util/spend_sim.py +492 -0
- chia/_tests/util/split_managers.py +102 -0
- chia/_tests/util/temp_file.py +14 -0
- chia/_tests/util/test_action_scope.py +144 -0
- chia/_tests/util/test_async_pool.py +366 -0
- chia/_tests/util/test_build_job_matrix.py +42 -0
- chia/_tests/util/test_build_network_protocol_files.py +7 -0
- chia/_tests/util/test_chia_version.py +50 -0
- chia/_tests/util/test_collection.py +11 -0
- chia/_tests/util/test_condition_tools.py +229 -0
- chia/_tests/util/test_config.py +426 -0
- chia/_tests/util/test_dump_keyring.py +60 -0
- chia/_tests/util/test_errors.py +10 -0
- chia/_tests/util/test_full_block_utils.py +279 -0
- chia/_tests/util/test_installed.py +20 -0
- chia/_tests/util/test_limited_semaphore.py +53 -0
- chia/_tests/util/test_logging_filter.py +42 -0
- chia/_tests/util/test_misc.py +445 -0
- chia/_tests/util/test_network.py +73 -0
- chia/_tests/util/test_network_protocol_files.py +578 -0
- chia/_tests/util/test_network_protocol_json.py +267 -0
- chia/_tests/util/test_network_protocol_test.py +256 -0
- chia/_tests/util/test_paginator.py +71 -0
- chia/_tests/util/test_pprint.py +17 -0
- chia/_tests/util/test_priority_mutex.py +488 -0
- chia/_tests/util/test_recursive_replace.py +116 -0
- chia/_tests/util/test_replace_str_to_bytes.py +137 -0
- chia/_tests/util/test_service_groups.py +15 -0
- chia/_tests/util/test_ssl_check.py +31 -0
- chia/_tests/util/test_testnet_overrides.py +19 -0
- chia/_tests/util/test_tests_misc.py +38 -0
- chia/_tests/util/test_timing.py +37 -0
- chia/_tests/util/test_trusted_peer.py +51 -0
- chia/_tests/util/time_out_assert.py +191 -0
- chia/_tests/wallet/__init__.py +0 -0
- chia/_tests/wallet/cat_wallet/__init__.py +0 -0
- chia/_tests/wallet/cat_wallet/config.py +4 -0
- chia/_tests/wallet/cat_wallet/test_cat_lifecycle.py +468 -0
- chia/_tests/wallet/cat_wallet/test_cat_outer_puzzle.py +69 -0
- chia/_tests/wallet/cat_wallet/test_cat_wallet.py +1826 -0
- chia/_tests/wallet/cat_wallet/test_offer_lifecycle.py +291 -0
- chia/_tests/wallet/cat_wallet/test_trades.py +2600 -0
- chia/_tests/wallet/clawback/__init__.py +0 -0
- chia/_tests/wallet/clawback/config.py +3 -0
- chia/_tests/wallet/clawback/test_clawback_decorator.py +78 -0
- chia/_tests/wallet/clawback/test_clawback_lifecycle.py +292 -0
- chia/_tests/wallet/clawback/test_clawback_metadata.py +50 -0
- chia/_tests/wallet/config.py +4 -0
- chia/_tests/wallet/conftest.py +278 -0
- chia/_tests/wallet/dao_wallet/__init__.py +0 -0
- chia/_tests/wallet/dao_wallet/config.py +3 -0
- chia/_tests/wallet/dao_wallet/test_dao_clvm.py +1330 -0
- chia/_tests/wallet/dao_wallet/test_dao_wallets.py +3488 -0
- chia/_tests/wallet/db_wallet/__init__.py +0 -0
- chia/_tests/wallet/db_wallet/config.py +3 -0
- chia/_tests/wallet/db_wallet/test_db_graftroot.py +141 -0
- chia/_tests/wallet/db_wallet/test_dl_offers.py +491 -0
- chia/_tests/wallet/db_wallet/test_dl_wallet.py +823 -0
- chia/_tests/wallet/did_wallet/__init__.py +0 -0
- chia/_tests/wallet/did_wallet/config.py +4 -0
- chia/_tests/wallet/did_wallet/test_did.py +2284 -0
- chia/_tests/wallet/nft_wallet/__init__.py +0 -0
- chia/_tests/wallet/nft_wallet/config.py +4 -0
- chia/_tests/wallet/nft_wallet/test_nft_1_offers.py +1493 -0
- chia/_tests/wallet/nft_wallet/test_nft_bulk_mint.py +1024 -0
- chia/_tests/wallet/nft_wallet/test_nft_lifecycle.py +375 -0
- chia/_tests/wallet/nft_wallet/test_nft_offers.py +1209 -0
- chia/_tests/wallet/nft_wallet/test_nft_puzzles.py +172 -0
- chia/_tests/wallet/nft_wallet/test_nft_wallet.py +2584 -0
- chia/_tests/wallet/nft_wallet/test_ownership_outer_puzzle.py +70 -0
- chia/_tests/wallet/rpc/__init__.py +0 -0
- chia/_tests/wallet/rpc/config.py +4 -0
- chia/_tests/wallet/rpc/test_dl_wallet_rpc.py +285 -0
- chia/_tests/wallet/rpc/test_wallet_rpc.py +3153 -0
- chia/_tests/wallet/simple_sync/__init__.py +0 -0
- chia/_tests/wallet/simple_sync/config.py +3 -0
- chia/_tests/wallet/simple_sync/test_simple_sync_protocol.py +718 -0
- chia/_tests/wallet/sync/__init__.py +0 -0
- chia/_tests/wallet/sync/config.py +4 -0
- chia/_tests/wallet/sync/test_wallet_sync.py +1692 -0
- chia/_tests/wallet/test_address_type.py +189 -0
- chia/_tests/wallet/test_bech32m.py +45 -0
- chia/_tests/wallet/test_clvm_streamable.py +244 -0
- chia/_tests/wallet/test_coin_management.py +354 -0
- chia/_tests/wallet/test_coin_selection.py +588 -0
- chia/_tests/wallet/test_conditions.py +400 -0
- chia/_tests/wallet/test_debug_spend_bundle.py +218 -0
- chia/_tests/wallet/test_new_wallet_protocol.py +1174 -0
- chia/_tests/wallet/test_nft_store.py +192 -0
- chia/_tests/wallet/test_notifications.py +196 -0
- chia/_tests/wallet/test_offer_parsing_performance.py +48 -0
- chia/_tests/wallet/test_puzzle_store.py +132 -0
- chia/_tests/wallet/test_sign_coin_spends.py +159 -0
- chia/_tests/wallet/test_signer_protocol.py +947 -0
- chia/_tests/wallet/test_singleton.py +122 -0
- chia/_tests/wallet/test_singleton_lifecycle_fast.py +772 -0
- chia/_tests/wallet/test_singleton_store.py +152 -0
- chia/_tests/wallet/test_taproot.py +19 -0
- chia/_tests/wallet/test_transaction_store.py +945 -0
- chia/_tests/wallet/test_util.py +185 -0
- chia/_tests/wallet/test_wallet.py +2139 -0
- chia/_tests/wallet/test_wallet_action_scope.py +85 -0
- chia/_tests/wallet/test_wallet_blockchain.py +111 -0
- chia/_tests/wallet/test_wallet_coin_store.py +1002 -0
- chia/_tests/wallet/test_wallet_interested_store.py +43 -0
- chia/_tests/wallet/test_wallet_key_val_store.py +40 -0
- chia/_tests/wallet/test_wallet_node.py +780 -0
- chia/_tests/wallet/test_wallet_retry.py +95 -0
- chia/_tests/wallet/test_wallet_state_manager.py +259 -0
- chia/_tests/wallet/test_wallet_test_framework.py +275 -0
- chia/_tests/wallet/test_wallet_trade_store.py +218 -0
- chia/_tests/wallet/test_wallet_user_store.py +34 -0
- chia/_tests/wallet/test_wallet_utils.py +156 -0
- chia/_tests/wallet/vc_wallet/__init__.py +0 -0
- chia/_tests/wallet/vc_wallet/config.py +3 -0
- chia/_tests/wallet/vc_wallet/test_cr_outer_puzzle.py +70 -0
- chia/_tests/wallet/vc_wallet/test_vc_lifecycle.py +883 -0
- chia/_tests/wallet/vc_wallet/test_vc_wallet.py +830 -0
- chia/_tests/wallet/wallet_block_tools.py +327 -0
- chia/_tests/weight_proof/__init__.py +0 -0
- chia/_tests/weight_proof/config.py +3 -0
- chia/_tests/weight_proof/test_weight_proof.py +528 -0
- chia/apis.py +19 -0
- chia/clvm/__init__.py +0 -0
- chia/cmds/__init__.py +0 -0
- chia/cmds/beta.py +184 -0
- chia/cmds/beta_funcs.py +137 -0
- chia/cmds/check_wallet_db.py +420 -0
- chia/cmds/chia.py +151 -0
- chia/cmds/cmd_classes.py +323 -0
- chia/cmds/cmd_helpers.py +242 -0
- chia/cmds/cmds_util.py +488 -0
- chia/cmds/coin_funcs.py +275 -0
- chia/cmds/coins.py +182 -0
- chia/cmds/completion.py +49 -0
- chia/cmds/configure.py +332 -0
- chia/cmds/dao.py +1064 -0
- chia/cmds/dao_funcs.py +598 -0
- chia/cmds/data.py +708 -0
- chia/cmds/data_funcs.py +385 -0
- chia/cmds/db.py +87 -0
- chia/cmds/db_backup_func.py +77 -0
- chia/cmds/db_upgrade_func.py +452 -0
- chia/cmds/db_validate_func.py +184 -0
- chia/cmds/dev.py +18 -0
- chia/cmds/farm.py +100 -0
- chia/cmds/farm_funcs.py +200 -0
- chia/cmds/gh.py +275 -0
- chia/cmds/init.py +63 -0
- chia/cmds/init_funcs.py +367 -0
- chia/cmds/installers.py +131 -0
- chia/cmds/keys.py +527 -0
- chia/cmds/keys_funcs.py +863 -0
- chia/cmds/netspace.py +50 -0
- chia/cmds/netspace_funcs.py +54 -0
- chia/cmds/options.py +32 -0
- chia/cmds/param_types.py +238 -0
- chia/cmds/passphrase.py +131 -0
- chia/cmds/passphrase_funcs.py +292 -0
- chia/cmds/peer.py +51 -0
- chia/cmds/peer_funcs.py +129 -0
- chia/cmds/plotnft.py +260 -0
- chia/cmds/plotnft_funcs.py +405 -0
- chia/cmds/plots.py +230 -0
- chia/cmds/plotters.py +18 -0
- chia/cmds/rpc.py +208 -0
- chia/cmds/show.py +72 -0
- chia/cmds/show_funcs.py +215 -0
- chia/cmds/signer.py +296 -0
- chia/cmds/sim.py +225 -0
- chia/cmds/sim_funcs.py +509 -0
- chia/cmds/start.py +24 -0
- chia/cmds/start_funcs.py +109 -0
- chia/cmds/stop.py +62 -0
- chia/cmds/units.py +9 -0
- chia/cmds/wallet.py +1901 -0
- chia/cmds/wallet_funcs.py +1874 -0
- chia/consensus/__init__.py +0 -0
- chia/consensus/block_body_validation.py +562 -0
- chia/consensus/block_creation.py +546 -0
- chia/consensus/block_header_validation.py +1059 -0
- chia/consensus/block_record.py +31 -0
- chia/consensus/block_rewards.py +53 -0
- chia/consensus/blockchain.py +1087 -0
- chia/consensus/blockchain_interface.py +56 -0
- chia/consensus/coinbase.py +30 -0
- chia/consensus/condition_costs.py +9 -0
- chia/consensus/constants.py +49 -0
- chia/consensus/cost_calculator.py +15 -0
- chia/consensus/default_constants.py +89 -0
- chia/consensus/deficit.py +55 -0
- chia/consensus/difficulty_adjustment.py +412 -0
- chia/consensus/find_fork_point.py +111 -0
- chia/consensus/full_block_to_block_record.py +167 -0
- chia/consensus/get_block_challenge.py +106 -0
- chia/consensus/get_block_generator.py +27 -0
- chia/consensus/make_sub_epoch_summary.py +210 -0
- chia/consensus/multiprocess_validation.py +268 -0
- chia/consensus/pos_quality.py +19 -0
- chia/consensus/pot_iterations.py +67 -0
- chia/consensus/puzzles/__init__.py +0 -0
- chia/consensus/puzzles/chialisp_deserialisation.clsp +69 -0
- chia/consensus/puzzles/chialisp_deserialisation.clsp.hex +1 -0
- chia/consensus/puzzles/rom_bootstrap_generator.clsp +37 -0
- chia/consensus/puzzles/rom_bootstrap_generator.clsp.hex +1 -0
- chia/consensus/vdf_info_computation.py +156 -0
- chia/daemon/__init__.py +0 -0
- chia/daemon/client.py +252 -0
- chia/daemon/keychain_proxy.py +502 -0
- chia/daemon/keychain_server.py +365 -0
- chia/daemon/server.py +1606 -0
- chia/daemon/windows_signal.py +56 -0
- chia/data_layer/__init__.py +0 -0
- chia/data_layer/data_layer.py +1291 -0
- chia/data_layer/data_layer_api.py +33 -0
- chia/data_layer/data_layer_errors.py +50 -0
- chia/data_layer/data_layer_server.py +170 -0
- chia/data_layer/data_layer_util.py +985 -0
- chia/data_layer/data_layer_wallet.py +1311 -0
- chia/data_layer/data_store.py +2267 -0
- chia/data_layer/dl_wallet_store.py +407 -0
- chia/data_layer/download_data.py +389 -0
- chia/data_layer/puzzles/__init__.py +0 -0
- chia/data_layer/puzzles/graftroot_dl_offers.clsp +100 -0
- chia/data_layer/puzzles/graftroot_dl_offers.clsp.hex +1 -0
- chia/data_layer/s3_plugin_config.yml +33 -0
- chia/data_layer/s3_plugin_service.py +468 -0
- chia/data_layer/util/__init__.py +0 -0
- chia/data_layer/util/benchmark.py +107 -0
- chia/data_layer/util/plugin.py +40 -0
- chia/farmer/__init__.py +0 -0
- chia/farmer/farmer.py +923 -0
- chia/farmer/farmer_api.py +820 -0
- chia/full_node/__init__.py +0 -0
- chia/full_node/bitcoin_fee_estimator.py +85 -0
- chia/full_node/block_height_map.py +271 -0
- chia/full_node/block_store.py +576 -0
- chia/full_node/bundle_tools.py +19 -0
- chia/full_node/coin_store.py +647 -0
- chia/full_node/fee_estimate.py +54 -0
- chia/full_node/fee_estimate_store.py +24 -0
- chia/full_node/fee_estimation.py +92 -0
- chia/full_node/fee_estimator.py +90 -0
- chia/full_node/fee_estimator_constants.py +38 -0
- chia/full_node/fee_estimator_interface.py +42 -0
- chia/full_node/fee_history.py +25 -0
- chia/full_node/fee_tracker.py +564 -0
- chia/full_node/full_node.py +3327 -0
- chia/full_node/full_node_api.py +2025 -0
- chia/full_node/full_node_store.py +1033 -0
- chia/full_node/hint_management.py +56 -0
- chia/full_node/hint_store.py +93 -0
- chia/full_node/mempool.py +589 -0
- chia/full_node/mempool_check_conditions.py +146 -0
- chia/full_node/mempool_manager.py +853 -0
- chia/full_node/pending_tx_cache.py +112 -0
- chia/full_node/puzzles/__init__.py +0 -0
- chia/full_node/puzzles/block_program_zero.clsp +14 -0
- chia/full_node/puzzles/block_program_zero.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_coin_spend_entry.clsp +5 -0
- chia/full_node/puzzles/decompress_coin_spend_entry.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_coin_spend_entry_with_prefix.clsp +7 -0
- chia/full_node/puzzles/decompress_coin_spend_entry_with_prefix.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_puzzle.clsp +6 -0
- chia/full_node/puzzles/decompress_puzzle.clsp.hex +1 -0
- chia/full_node/signage_point.py +16 -0
- chia/full_node/subscriptions.py +247 -0
- chia/full_node/sync_store.py +146 -0
- chia/full_node/tx_processing_queue.py +78 -0
- chia/full_node/util/__init__.py +0 -0
- chia/full_node/weight_proof.py +1720 -0
- chia/harvester/__init__.py +0 -0
- chia/harvester/harvester.py +272 -0
- chia/harvester/harvester_api.py +380 -0
- chia/introducer/__init__.py +0 -0
- chia/introducer/introducer.py +122 -0
- chia/introducer/introducer_api.py +70 -0
- chia/legacy/__init__.py +0 -0
- chia/legacy/keyring.py +155 -0
- chia/plot_sync/__init__.py +0 -0
- chia/plot_sync/delta.py +61 -0
- chia/plot_sync/exceptions.py +56 -0
- chia/plot_sync/receiver.py +386 -0
- chia/plot_sync/sender.py +340 -0
- chia/plot_sync/util.py +43 -0
- chia/plotters/__init__.py +0 -0
- chia/plotters/bladebit.py +388 -0
- chia/plotters/chiapos.py +63 -0
- chia/plotters/madmax.py +224 -0
- chia/plotters/plotters.py +577 -0
- chia/plotters/plotters_util.py +133 -0
- chia/plotting/__init__.py +0 -0
- chia/plotting/cache.py +213 -0
- chia/plotting/check_plots.py +283 -0
- chia/plotting/create_plots.py +278 -0
- chia/plotting/manager.py +436 -0
- chia/plotting/util.py +336 -0
- chia/pools/__init__.py +0 -0
- chia/pools/pool_config.py +110 -0
- chia/pools/pool_puzzles.py +459 -0
- chia/pools/pool_wallet.py +933 -0
- chia/pools/pool_wallet_info.py +118 -0
- chia/pools/puzzles/__init__.py +0 -0
- chia/pools/puzzles/pool_member_innerpuz.clsp +70 -0
- chia/pools/puzzles/pool_member_innerpuz.clsp.hex +1 -0
- chia/pools/puzzles/pool_waitingroom_innerpuz.clsp +69 -0
- chia/pools/puzzles/pool_waitingroom_innerpuz.clsp.hex +1 -0
- chia/protocols/__init__.py +0 -0
- chia/protocols/farmer_protocol.py +102 -0
- chia/protocols/full_node_protocol.py +219 -0
- chia/protocols/harvester_protocol.py +216 -0
- chia/protocols/introducer_protocol.py +25 -0
- chia/protocols/pool_protocol.py +177 -0
- chia/protocols/protocol_message_types.py +139 -0
- chia/protocols/protocol_state_machine.py +87 -0
- chia/protocols/protocol_timing.py +8 -0
- chia/protocols/shared_protocol.py +86 -0
- chia/protocols/timelord_protocol.py +93 -0
- chia/protocols/wallet_protocol.py +401 -0
- chia/py.typed +0 -0
- chia/rpc/__init__.py +0 -0
- chia/rpc/crawler_rpc_api.py +80 -0
- chia/rpc/data_layer_rpc_api.py +644 -0
- chia/rpc/data_layer_rpc_client.py +188 -0
- chia/rpc/data_layer_rpc_util.py +58 -0
- chia/rpc/farmer_rpc_api.py +365 -0
- chia/rpc/farmer_rpc_client.py +86 -0
- chia/rpc/full_node_rpc_api.py +959 -0
- chia/rpc/full_node_rpc_client.py +292 -0
- chia/rpc/harvester_rpc_api.py +141 -0
- chia/rpc/harvester_rpc_client.py +54 -0
- chia/rpc/rpc_client.py +164 -0
- chia/rpc/rpc_server.py +521 -0
- chia/rpc/timelord_rpc_api.py +32 -0
- chia/rpc/util.py +93 -0
- chia/rpc/wallet_request_types.py +904 -0
- chia/rpc/wallet_rpc_api.py +4943 -0
- chia/rpc/wallet_rpc_client.py +1814 -0
- chia/seeder/__init__.py +0 -0
- chia/seeder/crawl_store.py +425 -0
- chia/seeder/crawler.py +410 -0
- chia/seeder/crawler_api.py +135 -0
- chia/seeder/dns_server.py +593 -0
- chia/seeder/peer_record.py +146 -0
- chia/seeder/start_crawler.py +92 -0
- chia/server/__init__.py +0 -0
- chia/server/address_manager.py +658 -0
- chia/server/address_manager_store.py +237 -0
- chia/server/api_protocol.py +116 -0
- chia/server/capabilities.py +24 -0
- chia/server/chia_policy.py +346 -0
- chia/server/introducer_peers.py +76 -0
- chia/server/node_discovery.py +714 -0
- chia/server/outbound_message.py +33 -0
- chia/server/rate_limit_numbers.py +214 -0
- chia/server/rate_limits.py +153 -0
- chia/server/server.py +741 -0
- chia/server/signal_handlers.py +120 -0
- chia/server/ssl_context.py +32 -0
- chia/server/start_data_layer.py +151 -0
- chia/server/start_farmer.py +98 -0
- chia/server/start_full_node.py +112 -0
- chia/server/start_harvester.py +93 -0
- chia/server/start_introducer.py +81 -0
- chia/server/start_service.py +316 -0
- chia/server/start_timelord.py +89 -0
- chia/server/start_wallet.py +113 -0
- chia/server/upnp.py +118 -0
- chia/server/ws_connection.py +766 -0
- chia/simulator/__init__.py +0 -0
- chia/simulator/add_blocks_in_batches.py +54 -0
- chia/simulator/block_tools.py +2054 -0
- chia/simulator/full_node_simulator.py +794 -0
- chia/simulator/keyring.py +128 -0
- chia/simulator/setup_services.py +506 -0
- chia/simulator/simulator_constants.py +13 -0
- chia/simulator/simulator_full_node_rpc_api.py +99 -0
- chia/simulator/simulator_full_node_rpc_client.py +60 -0
- chia/simulator/simulator_protocol.py +29 -0
- chia/simulator/simulator_test_tools.py +164 -0
- chia/simulator/socket.py +24 -0
- chia/simulator/ssl_certs.py +114 -0
- chia/simulator/ssl_certs_1.py +697 -0
- chia/simulator/ssl_certs_10.py +697 -0
- chia/simulator/ssl_certs_2.py +697 -0
- chia/simulator/ssl_certs_3.py +697 -0
- chia/simulator/ssl_certs_4.py +697 -0
- chia/simulator/ssl_certs_5.py +697 -0
- chia/simulator/ssl_certs_6.py +697 -0
- chia/simulator/ssl_certs_7.py +697 -0
- chia/simulator/ssl_certs_8.py +697 -0
- chia/simulator/ssl_certs_9.py +697 -0
- chia/simulator/start_simulator.py +143 -0
- chia/simulator/wallet_tools.py +246 -0
- chia/ssl/__init__.py +0 -0
- chia/ssl/chia_ca.crt +19 -0
- chia/ssl/chia_ca.key +28 -0
- chia/ssl/create_ssl.py +249 -0
- chia/ssl/dst_root_ca.pem +20 -0
- chia/timelord/__init__.py +0 -0
- chia/timelord/iters_from_block.py +50 -0
- chia/timelord/timelord.py +1226 -0
- chia/timelord/timelord_api.py +138 -0
- chia/timelord/timelord_launcher.py +190 -0
- chia/timelord/timelord_state.py +244 -0
- chia/timelord/types.py +22 -0
- chia/types/__init__.py +0 -0
- chia/types/aliases.py +35 -0
- chia/types/block_protocol.py +20 -0
- chia/types/blockchain_format/__init__.py +0 -0
- chia/types/blockchain_format/classgroup.py +5 -0
- chia/types/blockchain_format/coin.py +28 -0
- chia/types/blockchain_format/foliage.py +8 -0
- chia/types/blockchain_format/pool_target.py +5 -0
- chia/types/blockchain_format/program.py +269 -0
- chia/types/blockchain_format/proof_of_space.py +135 -0
- chia/types/blockchain_format/reward_chain_block.py +6 -0
- chia/types/blockchain_format/serialized_program.py +5 -0
- chia/types/blockchain_format/sized_bytes.py +11 -0
- chia/types/blockchain_format/slots.py +9 -0
- chia/types/blockchain_format/sub_epoch_summary.py +5 -0
- chia/types/blockchain_format/tree_hash.py +72 -0
- chia/types/blockchain_format/vdf.py +86 -0
- chia/types/clvm_cost.py +13 -0
- chia/types/coin_record.py +43 -0
- chia/types/coin_spend.py +115 -0
- chia/types/condition_opcodes.py +73 -0
- chia/types/condition_with_args.py +16 -0
- chia/types/eligible_coin_spends.py +365 -0
- chia/types/end_of_slot_bundle.py +5 -0
- chia/types/fee_rate.py +38 -0
- chia/types/full_block.py +5 -0
- chia/types/generator_types.py +13 -0
- chia/types/header_block.py +5 -0
- chia/types/internal_mempool_item.py +18 -0
- chia/types/mempool_inclusion_status.py +9 -0
- chia/types/mempool_item.py +85 -0
- chia/types/mempool_submission_status.py +30 -0
- chia/types/mojos.py +7 -0
- chia/types/peer_info.py +64 -0
- chia/types/signing_mode.py +29 -0
- chia/types/spend_bundle.py +30 -0
- chia/types/spend_bundle_conditions.py +7 -0
- chia/types/transaction_queue_entry.py +55 -0
- chia/types/unfinished_block.py +5 -0
- chia/types/unfinished_header_block.py +37 -0
- chia/types/validation_state.py +14 -0
- chia/types/weight_proof.py +49 -0
- chia/util/__init__.py +0 -0
- chia/util/action_scope.py +168 -0
- chia/util/async_pool.py +226 -0
- chia/util/augmented_chain.py +134 -0
- chia/util/batches.py +42 -0
- chia/util/bech32m.py +126 -0
- chia/util/beta_metrics.py +119 -0
- chia/util/block_cache.py +56 -0
- chia/util/byte_types.py +12 -0
- chia/util/check_fork_next_block.py +33 -0
- chia/util/chia_logging.py +144 -0
- chia/util/chia_version.py +33 -0
- chia/util/collection.py +17 -0
- chia/util/condition_tools.py +201 -0
- chia/util/config.py +367 -0
- chia/util/cpu.py +22 -0
- chia/util/db_synchronous.py +23 -0
- chia/util/db_version.py +32 -0
- chia/util/db_wrapper.py +430 -0
- chia/util/default_root.py +27 -0
- chia/util/dump_keyring.py +93 -0
- chia/util/english.txt +2048 -0
- chia/util/errors.py +353 -0
- chia/util/file_keyring.py +469 -0
- chia/util/files.py +97 -0
- chia/util/full_block_utils.py +345 -0
- chia/util/generator_tools.py +72 -0
- chia/util/hash.py +31 -0
- chia/util/initial-config.yaml +694 -0
- chia/util/inline_executor.py +26 -0
- chia/util/ints.py +19 -0
- chia/util/ip_address.py +39 -0
- chia/util/json_util.py +37 -0
- chia/util/keychain.py +676 -0
- chia/util/keyring_wrapper.py +327 -0
- chia/util/limited_semaphore.py +41 -0
- chia/util/lock.py +49 -0
- chia/util/log_exceptions.py +32 -0
- chia/util/logging.py +36 -0
- chia/util/lru_cache.py +31 -0
- chia/util/math.py +20 -0
- chia/util/network.py +182 -0
- chia/util/paginator.py +48 -0
- chia/util/path.py +31 -0
- chia/util/permissions.py +20 -0
- chia/util/prev_transaction_block.py +21 -0
- chia/util/priority_mutex.py +95 -0
- chia/util/profiler.py +197 -0
- chia/util/recursive_replace.py +24 -0
- chia/util/safe_cancel_task.py +16 -0
- chia/util/service_groups.py +47 -0
- chia/util/setproctitle.py +22 -0
- chia/util/significant_bits.py +32 -0
- chia/util/ssl_check.py +213 -0
- chia/util/streamable.py +642 -0
- chia/util/task_referencer.py +59 -0
- chia/util/task_timing.py +382 -0
- chia/util/timing.py +67 -0
- chia/util/vdf_prover.py +30 -0
- chia/util/virtual_project_analysis.py +540 -0
- chia/util/ws_message.py +66 -0
- chia/wallet/__init__.py +0 -0
- chia/wallet/cat_wallet/__init__.py +0 -0
- chia/wallet/cat_wallet/cat_constants.py +75 -0
- chia/wallet/cat_wallet/cat_info.py +47 -0
- chia/wallet/cat_wallet/cat_outer_puzzle.py +120 -0
- chia/wallet/cat_wallet/cat_utils.py +164 -0
- chia/wallet/cat_wallet/cat_wallet.py +855 -0
- chia/wallet/cat_wallet/dao_cat_info.py +28 -0
- chia/wallet/cat_wallet/dao_cat_wallet.py +669 -0
- chia/wallet/cat_wallet/lineage_store.py +74 -0
- chia/wallet/cat_wallet/puzzles/__init__.py +0 -0
- chia/wallet/cat_wallet/puzzles/cat_truths.clib +31 -0
- chia/wallet/cat_wallet/puzzles/cat_v2.clsp +397 -0
- chia/wallet/cat_wallet/puzzles/cat_v2.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/delegated_tail.clsp +25 -0
- chia/wallet/cat_wallet/puzzles/delegated_tail.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/everything_with_signature.clsp +15 -0
- chia/wallet/cat_wallet/puzzles/everything_with_signature.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id.clsp +26 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id_or_singleton.clsp +42 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id_or_singleton.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_puzzle_hash.clsp +24 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_puzzle_hash.clsp.hex +1 -0
- chia/wallet/coin_selection.py +188 -0
- chia/wallet/conditions.py +1512 -0
- chia/wallet/dao_wallet/__init__.py +0 -0
- chia/wallet/dao_wallet/dao_info.py +61 -0
- chia/wallet/dao_wallet/dao_utils.py +811 -0
- chia/wallet/dao_wallet/dao_wallet.py +2119 -0
- chia/wallet/db_wallet/__init__.py +0 -0
- chia/wallet/db_wallet/db_wallet_puzzles.py +111 -0
- chia/wallet/derivation_record.py +30 -0
- chia/wallet/derive_keys.py +146 -0
- chia/wallet/did_wallet/__init__.py +0 -0
- chia/wallet/did_wallet/did_info.py +39 -0
- chia/wallet/did_wallet/did_wallet.py +1494 -0
- chia/wallet/did_wallet/did_wallet_puzzles.py +221 -0
- chia/wallet/did_wallet/puzzles/__init__.py +0 -0
- chia/wallet/did_wallet/puzzles/did_innerpuz.clsp +135 -0
- chia/wallet/did_wallet/puzzles/did_innerpuz.clsp.hex +1 -0
- chia/wallet/driver_protocol.py +26 -0
- chia/wallet/key_val_store.py +55 -0
- chia/wallet/lineage_proof.py +58 -0
- chia/wallet/nft_wallet/__init__.py +0 -0
- chia/wallet/nft_wallet/metadata_outer_puzzle.py +92 -0
- chia/wallet/nft_wallet/nft_info.py +120 -0
- chia/wallet/nft_wallet/nft_puzzles.py +305 -0
- chia/wallet/nft_wallet/nft_wallet.py +1687 -0
- chia/wallet/nft_wallet/ownership_outer_puzzle.py +101 -0
- chia/wallet/nft_wallet/puzzles/__init__.py +0 -0
- chia/wallet/nft_wallet/puzzles/create_nft_launcher_from_did.clsp +6 -0
- chia/wallet/nft_wallet/puzzles/create_nft_launcher_from_did.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_intermediate_launcher.clsp +6 -0
- chia/wallet/nft_wallet/puzzles/nft_intermediate_launcher.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_default.clsp +30 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_default.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_updateable.clsp +28 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_updateable.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_layer.clsp +100 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_layer.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_transfer_program_one_way_claim_with_royalties.clsp +78 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_transfer_program_one_way_claim_with_royalties.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_state_layer.clsp +74 -0
- chia/wallet/nft_wallet/puzzles/nft_state_layer.clsp.hex +1 -0
- chia/wallet/nft_wallet/singleton_outer_puzzle.py +101 -0
- chia/wallet/nft_wallet/transfer_program_puzzle.py +82 -0
- chia/wallet/nft_wallet/uncurry_nft.py +217 -0
- chia/wallet/notification_manager.py +117 -0
- chia/wallet/notification_store.py +178 -0
- chia/wallet/outer_puzzles.py +84 -0
- chia/wallet/payment.py +33 -0
- chia/wallet/puzzle_drivers.py +118 -0
- chia/wallet/puzzles/__init__.py +0 -0
- chia/wallet/puzzles/augmented_condition.clsp +13 -0
- chia/wallet/puzzles/augmented_condition.clsp.hex +1 -0
- chia/wallet/puzzles/clawback/__init__.py +0 -0
- chia/wallet/puzzles/clawback/drivers.py +188 -0
- chia/wallet/puzzles/clawback/metadata.py +38 -0
- chia/wallet/puzzles/clawback/puzzle_decorator.py +67 -0
- chia/wallet/puzzles/condition_codes.clib +77 -0
- chia/wallet/puzzles/curry-and-treehash.clib +102 -0
- chia/wallet/puzzles/curry.clib +135 -0
- chia/wallet/puzzles/curry_by_index.clib +16 -0
- chia/wallet/puzzles/dao_cat_eve.clsp +17 -0
- chia/wallet/puzzles/dao_cat_eve.clsp.hex +1 -0
- chia/wallet/puzzles/dao_cat_launcher.clsp +36 -0
- chia/wallet/puzzles/dao_cat_launcher.clsp.hex +1 -0
- chia/wallet/puzzles/dao_finished_state.clsp +35 -0
- chia/wallet/puzzles/dao_finished_state.clsp.hex +1 -0
- chia/wallet/puzzles/dao_finished_state.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_lockup.clsp +288 -0
- chia/wallet/puzzles/dao_lockup.clsp.hex +1 -0
- chia/wallet/puzzles/dao_lockup.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal.clsp +377 -0
- chia/wallet/puzzles/dao_proposal.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp +78 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp +87 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp +240 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp.hex +1 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_treasury.clsp +115 -0
- chia/wallet/puzzles/dao_treasury.clsp.hex +1 -0
- chia/wallet/puzzles/dao_update_proposal.clsp +44 -0
- chia/wallet/puzzles/dao_update_proposal.clsp.hex +1 -0
- chia/wallet/puzzles/deployed_puzzle_hashes.json +67 -0
- chia/wallet/puzzles/json.clib +25 -0
- chia/wallet/puzzles/load_clvm.py +161 -0
- chia/wallet/puzzles/merkle_utils.clib +18 -0
- chia/wallet/puzzles/notification.clsp +7 -0
- chia/wallet/puzzles/notification.clsp.hex +1 -0
- chia/wallet/puzzles/p2_1_of_n.clsp +22 -0
- chia/wallet/puzzles/p2_1_of_n.clsp.hex +1 -0
- chia/wallet/puzzles/p2_conditions.clsp +3 -0
- chia/wallet/puzzles/p2_conditions.clsp.hex +1 -0
- chia/wallet/puzzles/p2_conditions.py +26 -0
- chia/wallet/puzzles/p2_delegated_conditions.clsp +18 -0
- chia/wallet/puzzles/p2_delegated_conditions.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_conditions.py +21 -0
- chia/wallet/puzzles/p2_delegated_puzzle.clsp +19 -0
- chia/wallet/puzzles/p2_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_puzzle.py +34 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.clsp +91 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.py +160 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.clsp +108 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.clsp.hex +1 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.py +21 -0
- chia/wallet/puzzles/p2_parent.clsp +19 -0
- chia/wallet/puzzles/p2_parent.clsp.hex +1 -0
- chia/wallet/puzzles/p2_puzzle_hash.clsp +18 -0
- chia/wallet/puzzles/p2_puzzle_hash.clsp.hex +1 -0
- chia/wallet/puzzles/p2_puzzle_hash.py +27 -0
- chia/wallet/puzzles/p2_singleton.clsp +30 -0
- chia/wallet/puzzles/p2_singleton.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_aggregator.clsp +81 -0
- chia/wallet/puzzles/p2_singleton_aggregator.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_or_delayed_puzhash.clsp +50 -0
- chia/wallet/puzzles/p2_singleton_or_delayed_puzhash.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_via_delegated_puzzle.clsp +47 -0
- chia/wallet/puzzles/p2_singleton_via_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/puzzle_utils.py +34 -0
- chia/wallet/puzzles/settlement_payments.clsp +49 -0
- chia/wallet/puzzles/settlement_payments.clsp.hex +1 -0
- chia/wallet/puzzles/sha256tree.clib +11 -0
- chia/wallet/puzzles/singleton_launcher.clsp +16 -0
- chia/wallet/puzzles/singleton_launcher.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer.clsp +177 -0
- chia/wallet/puzzles/singleton_top_layer.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer.py +296 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.clsp +107 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.py +345 -0
- chia/wallet/puzzles/singleton_truths.clib +21 -0
- chia/wallet/puzzles/tails.py +348 -0
- chia/wallet/puzzles/utility_macros.clib +48 -0
- chia/wallet/signer_protocol.py +125 -0
- chia/wallet/singleton.py +106 -0
- chia/wallet/singleton_record.py +30 -0
- chia/wallet/trade_manager.py +1102 -0
- chia/wallet/trade_record.py +67 -0
- chia/wallet/trading/__init__.py +0 -0
- chia/wallet/trading/offer.py +702 -0
- chia/wallet/trading/trade_status.py +13 -0
- chia/wallet/trading/trade_store.py +526 -0
- chia/wallet/transaction_record.py +158 -0
- chia/wallet/transaction_sorting.py +14 -0
- chia/wallet/uncurried_puzzle.py +17 -0
- chia/wallet/util/__init__.py +0 -0
- chia/wallet/util/address_type.py +55 -0
- chia/wallet/util/blind_signer_tl.py +164 -0
- chia/wallet/util/clvm_streamable.py +203 -0
- chia/wallet/util/compute_hints.py +66 -0
- chia/wallet/util/compute_memos.py +43 -0
- chia/wallet/util/curry_and_treehash.py +91 -0
- chia/wallet/util/debug_spend_bundle.py +232 -0
- chia/wallet/util/merkle_tree.py +100 -0
- chia/wallet/util/merkle_utils.py +102 -0
- chia/wallet/util/new_peak_queue.py +82 -0
- chia/wallet/util/notifications.py +12 -0
- chia/wallet/util/peer_request_cache.py +174 -0
- chia/wallet/util/pprint.py +39 -0
- chia/wallet/util/puzzle_compression.py +95 -0
- chia/wallet/util/puzzle_decorator.py +100 -0
- chia/wallet/util/puzzle_decorator_type.py +7 -0
- chia/wallet/util/query_filter.py +59 -0
- chia/wallet/util/transaction_type.py +23 -0
- chia/wallet/util/tx_config.py +158 -0
- chia/wallet/util/wallet_sync_utils.py +351 -0
- chia/wallet/util/wallet_types.py +72 -0
- chia/wallet/vc_wallet/__init__.py +0 -0
- chia/wallet/vc_wallet/cr_cat_drivers.py +664 -0
- chia/wallet/vc_wallet/cr_cat_wallet.py +877 -0
- chia/wallet/vc_wallet/cr_outer_puzzle.py +102 -0
- chia/wallet/vc_wallet/cr_puzzles/__init__.py +0 -0
- chia/wallet/vc_wallet/cr_puzzles/conditions_w_fee_announce.clsp +3 -0
- chia/wallet/vc_wallet/cr_puzzles/conditions_w_fee_announce.clsp.hex +1 -0
- chia/wallet/vc_wallet/cr_puzzles/credential_restriction.clsp +304 -0
- chia/wallet/vc_wallet/cr_puzzles/credential_restriction.clsp.hex +1 -0
- chia/wallet/vc_wallet/cr_puzzles/flag_proofs_checker.clsp +45 -0
- chia/wallet/vc_wallet/cr_puzzles/flag_proofs_checker.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_drivers.py +838 -0
- chia/wallet/vc_wallet/vc_puzzles/__init__.py +0 -0
- chia/wallet/vc_wallet/vc_puzzles/covenant_layer.clsp +30 -0
- chia/wallet/vc_wallet/vc_puzzles/covenant_layer.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_covenant_morpher.clsp +75 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_covenant_morpher.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_transfer_program_covenant_adapter.clsp +32 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_transfer_program_covenant_adapter.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_update_metadata_with_DID.clsp +80 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_update_metadata_with_DID.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/exigent_metadata_layer.clsp +163 -0
- chia/wallet/vc_wallet/vc_puzzles/exigent_metadata_layer.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/p2_announced_delegated_puzzle.clsp +16 -0
- chia/wallet/vc_wallet/vc_puzzles/p2_announced_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/standard_vc_backdoor_puzzle.clsp +74 -0
- chia/wallet/vc_wallet/vc_puzzles/standard_vc_backdoor_puzzle.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/std_parent_morpher.clsp +23 -0
- chia/wallet/vc_wallet/vc_puzzles/std_parent_morpher.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/viral_backdoor.clsp +64 -0
- chia/wallet/vc_wallet/vc_puzzles/viral_backdoor.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_store.py +263 -0
- chia/wallet/vc_wallet/vc_wallet.py +638 -0
- chia/wallet/wallet.py +698 -0
- chia/wallet/wallet_action_scope.py +96 -0
- chia/wallet/wallet_blockchain.py +244 -0
- chia/wallet/wallet_coin_record.py +72 -0
- chia/wallet/wallet_coin_store.py +351 -0
- chia/wallet/wallet_info.py +35 -0
- chia/wallet/wallet_interested_store.py +188 -0
- chia/wallet/wallet_nft_store.py +279 -0
- chia/wallet/wallet_node.py +1765 -0
- chia/wallet/wallet_node_api.py +207 -0
- chia/wallet/wallet_pool_store.py +119 -0
- chia/wallet/wallet_protocol.py +90 -0
- chia/wallet/wallet_puzzle_store.py +396 -0
- chia/wallet/wallet_retry_store.py +70 -0
- chia/wallet/wallet_singleton_store.py +259 -0
- chia/wallet/wallet_spend_bundle.py +25 -0
- chia/wallet/wallet_state_manager.py +2819 -0
- chia/wallet/wallet_transaction_store.py +496 -0
- chia/wallet/wallet_user_store.py +110 -0
- chia/wallet/wallet_weight_proof_handler.py +126 -0
- chia_blockchain-2.5.1rc1.dist-info/LICENSE +201 -0
- chia_blockchain-2.5.1rc1.dist-info/METADATA +156 -0
- chia_blockchain-2.5.1rc1.dist-info/RECORD +1042 -0
- chia_blockchain-2.5.1rc1.dist-info/WHEEL +4 -0
- chia_blockchain-2.5.1rc1.dist-info/entry_points.txt +17 -0
- mozilla-ca/cacert.pem +3611 -0
|
@@ -0,0 +1,3327 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
import copy
|
|
6
|
+
import dataclasses
|
|
7
|
+
import logging
|
|
8
|
+
import multiprocessing
|
|
9
|
+
import random
|
|
10
|
+
import sqlite3
|
|
11
|
+
import time
|
|
12
|
+
import traceback
|
|
13
|
+
from collections.abc import AsyncIterator, Awaitable, Sequence
|
|
14
|
+
from multiprocessing.context import BaseContext
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Optional, TextIO, Union, cast, final
|
|
17
|
+
|
|
18
|
+
from chia_rs import (
|
|
19
|
+
AugSchemeMPL,
|
|
20
|
+
BLSCache,
|
|
21
|
+
get_flags_for_height_and_constants,
|
|
22
|
+
run_block_generator,
|
|
23
|
+
run_block_generator2,
|
|
24
|
+
)
|
|
25
|
+
from packaging.version import Version
|
|
26
|
+
|
|
27
|
+
from chia.consensus.block_body_validation import ForkInfo
|
|
28
|
+
from chia.consensus.block_creation import unfinished_block_to_full_block
|
|
29
|
+
from chia.consensus.block_record import BlockRecord
|
|
30
|
+
from chia.consensus.blockchain import AddBlockResult, Blockchain, BlockchainMutexPriority, StateChangeSummary
|
|
31
|
+
from chia.consensus.blockchain_interface import BlockchainInterface
|
|
32
|
+
from chia.consensus.constants import ConsensusConstants
|
|
33
|
+
from chia.consensus.cost_calculator import NPCResult
|
|
34
|
+
from chia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
|
|
35
|
+
from chia.consensus.make_sub_epoch_summary import next_sub_epoch_summary
|
|
36
|
+
from chia.consensus.multiprocess_validation import PreValidationResult, pre_validate_block
|
|
37
|
+
from chia.consensus.pot_iterations import calculate_sp_iters
|
|
38
|
+
from chia.full_node.block_store import BlockStore
|
|
39
|
+
from chia.full_node.coin_store import CoinStore
|
|
40
|
+
from chia.full_node.full_node_api import FullNodeAPI
|
|
41
|
+
from chia.full_node.full_node_store import FullNodeStore, FullNodeStorePeakResult, UnfinishedBlockEntry
|
|
42
|
+
from chia.full_node.hint_management import get_hints_and_subscription_coin_ids
|
|
43
|
+
from chia.full_node.hint_store import HintStore
|
|
44
|
+
from chia.full_node.mempool import MempoolRemoveInfo
|
|
45
|
+
from chia.full_node.mempool_manager import MempoolManager, NewPeakItem
|
|
46
|
+
from chia.full_node.signage_point import SignagePoint
|
|
47
|
+
from chia.full_node.subscriptions import PeerSubscriptions, peers_for_spend_bundle
|
|
48
|
+
from chia.full_node.sync_store import Peak, SyncStore
|
|
49
|
+
from chia.full_node.tx_processing_queue import TransactionQueue
|
|
50
|
+
from chia.full_node.weight_proof import WeightProofHandler
|
|
51
|
+
from chia.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
|
|
52
|
+
from chia.protocols.farmer_protocol import SignagePointSourceData, SPSubSlotSourceData, SPVDFSourceData
|
|
53
|
+
from chia.protocols.full_node_protocol import RequestBlocks, RespondBlock, RespondBlocks, RespondSignagePoint
|
|
54
|
+
from chia.protocols.protocol_message_types import ProtocolMessageTypes
|
|
55
|
+
from chia.protocols.shared_protocol import Capability
|
|
56
|
+
from chia.protocols.wallet_protocol import CoinState, CoinStateUpdate, RemovedMempoolItem
|
|
57
|
+
from chia.rpc.rpc_server import StateChangedProtocol
|
|
58
|
+
from chia.server.node_discovery import FullNodePeers
|
|
59
|
+
from chia.server.outbound_message import Message, NodeType, make_msg
|
|
60
|
+
from chia.server.server import ChiaServer
|
|
61
|
+
from chia.server.ws_connection import WSChiaConnection
|
|
62
|
+
from chia.types.blockchain_format.classgroup import ClassgroupElement
|
|
63
|
+
from chia.types.blockchain_format.pool_target import PoolTarget
|
|
64
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
65
|
+
from chia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
|
|
66
|
+
from chia.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof, validate_vdf
|
|
67
|
+
from chia.types.coin_record import CoinRecord
|
|
68
|
+
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
|
|
69
|
+
from chia.types.full_block import FullBlock
|
|
70
|
+
from chia.types.header_block import HeaderBlock
|
|
71
|
+
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
|
|
72
|
+
from chia.types.mempool_item import MempoolItem
|
|
73
|
+
from chia.types.peer_info import PeerInfo
|
|
74
|
+
from chia.types.spend_bundle import SpendBundle
|
|
75
|
+
from chia.types.transaction_queue_entry import TransactionQueueEntry
|
|
76
|
+
from chia.types.unfinished_block import UnfinishedBlock
|
|
77
|
+
from chia.types.validation_state import ValidationState
|
|
78
|
+
from chia.types.weight_proof import WeightProof
|
|
79
|
+
from chia.util.augmented_chain import AugmentedBlockchain
|
|
80
|
+
from chia.util.bech32m import encode_puzzle_hash
|
|
81
|
+
from chia.util.check_fork_next_block import check_fork_next_block
|
|
82
|
+
from chia.util.condition_tools import pkm_pairs
|
|
83
|
+
from chia.util.config import process_config_start_method
|
|
84
|
+
from chia.util.db_synchronous import db_synchronous_on
|
|
85
|
+
from chia.util.db_version import lookup_db_version, set_db_version_async
|
|
86
|
+
from chia.util.db_wrapper import DBWrapper2, manage_connection
|
|
87
|
+
from chia.util.errors import ConsensusError, Err, TimestampError, ValidationError
|
|
88
|
+
from chia.util.ints import uint8, uint32, uint64, uint128
|
|
89
|
+
from chia.util.limited_semaphore import LimitedSemaphore
|
|
90
|
+
from chia.util.network import is_localhost
|
|
91
|
+
from chia.util.path import path_from_root
|
|
92
|
+
from chia.util.profiler import enable_profiler, mem_profile_task, profile_task
|
|
93
|
+
from chia.util.safe_cancel_task import cancel_task_safe
|
|
94
|
+
from chia.util.task_referencer import create_referenced_task
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
# This is the result of calling peak_post_processing, which is then fed into peak_post_processing_2
|
|
98
|
+
@dataclasses.dataclass
|
|
99
|
+
class PeakPostProcessingResult:
|
|
100
|
+
mempool_peak_result: list[NewPeakItem] # The new items from calling MempoolManager.new_peak
|
|
101
|
+
mempool_removals: list[MempoolRemoveInfo] # The removed mempool items from calling MempoolManager.new_peak
|
|
102
|
+
fns_peak_result: FullNodeStorePeakResult # The result of calling FullNodeStore.new_peak
|
|
103
|
+
hints: list[tuple[bytes32, bytes]] # The hints added to the DB
|
|
104
|
+
lookup_coin_ids: list[bytes32] # The coin IDs that we need to look up to notify wallets of changes
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@dataclasses.dataclass(frozen=True)
|
|
108
|
+
class WalletUpdate:
|
|
109
|
+
fork_height: uint32
|
|
110
|
+
peak: Peak
|
|
111
|
+
coin_records: list[CoinRecord]
|
|
112
|
+
hints: dict[bytes32, bytes32]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
@final
|
|
116
|
+
@dataclasses.dataclass
|
|
117
|
+
class FullNode:
|
|
118
|
+
if TYPE_CHECKING:
|
|
119
|
+
from chia.rpc.rpc_server import RpcServiceProtocol
|
|
120
|
+
|
|
121
|
+
_protocol_check: ClassVar[RpcServiceProtocol] = cast("FullNode", None)
|
|
122
|
+
|
|
123
|
+
root_path: Path
|
|
124
|
+
config: dict[str, Any]
|
|
125
|
+
constants: ConsensusConstants
|
|
126
|
+
signage_point_times: list[float]
|
|
127
|
+
full_node_store: FullNodeStore
|
|
128
|
+
log: logging.Logger
|
|
129
|
+
db_path: Path
|
|
130
|
+
wallet_sync_queue: asyncio.Queue[WalletUpdate]
|
|
131
|
+
_segment_task_list: list[asyncio.Task[None]] = dataclasses.field(default_factory=list)
|
|
132
|
+
initialized: bool = False
|
|
133
|
+
_server: Optional[ChiaServer] = None
|
|
134
|
+
_shut_down: bool = False
|
|
135
|
+
pow_creation: dict[bytes32, asyncio.Event] = dataclasses.field(default_factory=dict)
|
|
136
|
+
state_changed_callback: Optional[StateChangedProtocol] = None
|
|
137
|
+
full_node_peers: Optional[FullNodePeers] = None
|
|
138
|
+
sync_store: SyncStore = dataclasses.field(default_factory=SyncStore)
|
|
139
|
+
uncompact_task: Optional[asyncio.Task[None]] = None
|
|
140
|
+
compact_vdf_requests: set[bytes32] = dataclasses.field(default_factory=set)
|
|
141
|
+
# TODO: Logging isn't setup yet so the log entries related to parsing the
|
|
142
|
+
# config would end up on stdout if handled here.
|
|
143
|
+
multiprocessing_context: Optional[BaseContext] = None
|
|
144
|
+
_ui_tasks: set[asyncio.Task[None]] = dataclasses.field(default_factory=set)
|
|
145
|
+
subscriptions: PeerSubscriptions = dataclasses.field(default_factory=PeerSubscriptions)
|
|
146
|
+
_transaction_queue_task: Optional[asyncio.Task[None]] = None
|
|
147
|
+
simulator_transaction_callback: Optional[Callable[[bytes32], Awaitable[None]]] = None
|
|
148
|
+
_sync_task_list: list[asyncio.Task[None]] = dataclasses.field(default_factory=list)
|
|
149
|
+
_transaction_queue: Optional[TransactionQueue] = None
|
|
150
|
+
_tx_task_list: list[asyncio.Task[None]] = dataclasses.field(default_factory=list)
|
|
151
|
+
_compact_vdf_sem: Optional[LimitedSemaphore] = None
|
|
152
|
+
_new_peak_sem: Optional[LimitedSemaphore] = None
|
|
153
|
+
_add_transaction_semaphore: Optional[asyncio.Semaphore] = None
|
|
154
|
+
_db_wrapper: Optional[DBWrapper2] = None
|
|
155
|
+
_hint_store: Optional[HintStore] = None
|
|
156
|
+
_block_store: Optional[BlockStore] = None
|
|
157
|
+
_coin_store: Optional[CoinStore] = None
|
|
158
|
+
_mempool_manager: Optional[MempoolManager] = None
|
|
159
|
+
_init_weight_proof: Optional[asyncio.Task[None]] = None
|
|
160
|
+
_blockchain: Optional[Blockchain] = None
|
|
161
|
+
_timelord_lock: Optional[asyncio.Lock] = None
|
|
162
|
+
weight_proof_handler: Optional[WeightProofHandler] = None
|
|
163
|
+
# hashes of peaks that failed long sync on chip13 Validation
|
|
164
|
+
bad_peak_cache: dict[bytes32, uint32] = dataclasses.field(default_factory=dict)
|
|
165
|
+
wallet_sync_task: Optional[asyncio.Task[None]] = None
|
|
166
|
+
_bls_cache: BLSCache = dataclasses.field(default_factory=lambda: BLSCache(50000))
|
|
167
|
+
|
|
168
|
+
@property
|
|
169
|
+
def server(self) -> ChiaServer:
|
|
170
|
+
# This is a stop gap until the class usage is refactored such the values of
|
|
171
|
+
# integral attributes are known at creation of the instance.
|
|
172
|
+
if self._server is None:
|
|
173
|
+
raise RuntimeError("server not assigned")
|
|
174
|
+
|
|
175
|
+
return self._server
|
|
176
|
+
|
|
177
|
+
@classmethod
|
|
178
|
+
async def create(
|
|
179
|
+
cls,
|
|
180
|
+
config: dict[str, Any],
|
|
181
|
+
root_path: Path,
|
|
182
|
+
consensus_constants: ConsensusConstants,
|
|
183
|
+
name: str = __name__,
|
|
184
|
+
) -> FullNode:
|
|
185
|
+
# NOTE: async to force the queue creation to occur when an event loop is available
|
|
186
|
+
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
|
|
187
|
+
db_path = path_from_root(root_path, db_path_replaced)
|
|
188
|
+
db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
189
|
+
|
|
190
|
+
return cls(
|
|
191
|
+
root_path=root_path,
|
|
192
|
+
config=config,
|
|
193
|
+
constants=consensus_constants,
|
|
194
|
+
signage_point_times=[time.time() for _ in range(consensus_constants.NUM_SPS_SUB_SLOT)],
|
|
195
|
+
full_node_store=FullNodeStore(consensus_constants),
|
|
196
|
+
log=logging.getLogger(name),
|
|
197
|
+
db_path=db_path,
|
|
198
|
+
wallet_sync_queue=asyncio.Queue(),
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
@contextlib.asynccontextmanager
|
|
202
|
+
async def manage(self) -> AsyncIterator[None]:
|
|
203
|
+
self._timelord_lock = asyncio.Lock()
|
|
204
|
+
self._compact_vdf_sem = LimitedSemaphore.create(active_limit=4, waiting_limit=20)
|
|
205
|
+
|
|
206
|
+
# We don't want to run too many concurrent new_peak instances, because it would fetch the same block from
|
|
207
|
+
# multiple peers and re-validate.
|
|
208
|
+
self._new_peak_sem = LimitedSemaphore.create(active_limit=2, waiting_limit=20)
|
|
209
|
+
|
|
210
|
+
# These many respond_transaction tasks can be active at any point in time
|
|
211
|
+
self._add_transaction_semaphore = asyncio.Semaphore(200)
|
|
212
|
+
|
|
213
|
+
sql_log_path: Optional[Path] = None
|
|
214
|
+
with contextlib.ExitStack() as exit_stack:
|
|
215
|
+
sql_log_file: Optional[TextIO] = None
|
|
216
|
+
if self.config.get("log_sqlite_cmds", False):
|
|
217
|
+
sql_log_path = path_from_root(self.root_path, "log/sql.log")
|
|
218
|
+
self.log.info(f"logging SQL commands to {sql_log_path}")
|
|
219
|
+
sql_log_file = exit_stack.enter_context(sql_log_path.open("a", encoding="utf-8"))
|
|
220
|
+
|
|
221
|
+
# create the store (db) and full node instance
|
|
222
|
+
# TODO: is this standardized and thus able to be handled by DBWrapper2?
|
|
223
|
+
async with manage_connection(self.db_path, log_file=sql_log_file, name="version_check") as db_connection:
|
|
224
|
+
db_version = await lookup_db_version(db_connection)
|
|
225
|
+
|
|
226
|
+
self.log.info(f"using blockchain database {self.db_path}, which is version {db_version}")
|
|
227
|
+
|
|
228
|
+
db_sync = db_synchronous_on(self.config.get("db_sync", "auto"))
|
|
229
|
+
self.log.info(f"opening blockchain DB: synchronous={db_sync}")
|
|
230
|
+
|
|
231
|
+
async with DBWrapper2.managed(
|
|
232
|
+
self.db_path,
|
|
233
|
+
db_version=db_version,
|
|
234
|
+
reader_count=self.config.get("db_readers", 4),
|
|
235
|
+
log_path=sql_log_path,
|
|
236
|
+
synchronous=db_sync,
|
|
237
|
+
) as self._db_wrapper:
|
|
238
|
+
if self.db_wrapper.db_version != 2:
|
|
239
|
+
async with self.db_wrapper.reader_no_transaction() as conn:
|
|
240
|
+
async with conn.execute(
|
|
241
|
+
"SELECT name FROM sqlite_master WHERE type='table' AND name='full_blocks'"
|
|
242
|
+
) as cur:
|
|
243
|
+
if len(list(await cur.fetchall())) == 0:
|
|
244
|
+
try:
|
|
245
|
+
# this is a new DB file. Make it v2
|
|
246
|
+
async with self.db_wrapper.writer_maybe_transaction() as w_conn:
|
|
247
|
+
await set_db_version_async(w_conn, 2)
|
|
248
|
+
self.db_wrapper.db_version = 2
|
|
249
|
+
self.log.info("blockchain database is empty, configuring as v2")
|
|
250
|
+
except sqlite3.OperationalError:
|
|
251
|
+
# it could be a database created with "chia init", which is
|
|
252
|
+
# empty except it has the database_version table
|
|
253
|
+
pass
|
|
254
|
+
|
|
255
|
+
self._block_store = await BlockStore.create(self.db_wrapper)
|
|
256
|
+
self._hint_store = await HintStore.create(self.db_wrapper)
|
|
257
|
+
self._coin_store = await CoinStore.create(self.db_wrapper)
|
|
258
|
+
self.log.info("Initializing blockchain from disk")
|
|
259
|
+
start_time = time.monotonic()
|
|
260
|
+
reserved_cores = self.config.get("reserved_cores", 0)
|
|
261
|
+
single_threaded = self.config.get("single_threaded", False)
|
|
262
|
+
log_coins = self.config.get("log_coins", False)
|
|
263
|
+
multiprocessing_start_method = process_config_start_method(config=self.config, log=self.log)
|
|
264
|
+
self.multiprocessing_context = multiprocessing.get_context(method=multiprocessing_start_method)
|
|
265
|
+
self._blockchain = await Blockchain.create(
|
|
266
|
+
coin_store=self.coin_store,
|
|
267
|
+
block_store=self.block_store,
|
|
268
|
+
consensus_constants=self.constants,
|
|
269
|
+
blockchain_dir=self.db_path.parent,
|
|
270
|
+
reserved_cores=reserved_cores,
|
|
271
|
+
single_threaded=single_threaded,
|
|
272
|
+
log_coins=log_coins,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
self._mempool_manager = MempoolManager(
|
|
276
|
+
get_coin_records=self.coin_store.get_coin_records,
|
|
277
|
+
consensus_constants=self.constants,
|
|
278
|
+
single_threaded=single_threaded,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Transactions go into this queue from the server, and get sent to respond_transaction
|
|
282
|
+
self._transaction_queue = TransactionQueue(1000, self.log)
|
|
283
|
+
self._transaction_queue_task: asyncio.Task[None] = create_referenced_task(self._handle_transactions())
|
|
284
|
+
|
|
285
|
+
self._init_weight_proof = create_referenced_task(self.initialize_weight_proof())
|
|
286
|
+
|
|
287
|
+
if self.config.get("enable_profiler", False):
|
|
288
|
+
create_referenced_task(profile_task(self.root_path, "node", self.log), known_unreferenced=True)
|
|
289
|
+
|
|
290
|
+
self.profile_block_validation = self.config.get("profile_block_validation", False)
|
|
291
|
+
if self.profile_block_validation: # pragma: no cover
|
|
292
|
+
# this is not covered by any unit tests as it's essentially test code
|
|
293
|
+
# itself. It's exercised manually when investigating performance issues
|
|
294
|
+
profile_dir = path_from_root(self.root_path, "block-validation-profile")
|
|
295
|
+
profile_dir.mkdir(parents=True, exist_ok=True)
|
|
296
|
+
|
|
297
|
+
if self.config.get("enable_memory_profiler", False):
|
|
298
|
+
create_referenced_task(mem_profile_task(self.root_path, "node", self.log), known_unreferenced=True)
|
|
299
|
+
|
|
300
|
+
time_taken = time.monotonic() - start_time
|
|
301
|
+
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
302
|
+
if peak is None:
|
|
303
|
+
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
|
|
304
|
+
num_unspent = await self.coin_store.num_unspent()
|
|
305
|
+
if num_unspent > 0:
|
|
306
|
+
self.log.error(
|
|
307
|
+
f"Inconsistent blockchain DB file! Could not find peak block but found {num_unspent} coins! "
|
|
308
|
+
"This is a fatal error. The blockchain database may be corrupt"
|
|
309
|
+
)
|
|
310
|
+
raise RuntimeError("corrupt blockchain DB")
|
|
311
|
+
else:
|
|
312
|
+
self.log.info(
|
|
313
|
+
f"Blockchain initialized to peak {peak.header_hash} height"
|
|
314
|
+
f" {peak.height}, "
|
|
315
|
+
f"time taken: {int(time_taken)}s"
|
|
316
|
+
)
|
|
317
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
318
|
+
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_tx_peak(), None)
|
|
319
|
+
assert len(pending_tx.items) == 0 # no pending transactions when starting up
|
|
320
|
+
|
|
321
|
+
full_peak: Optional[FullBlock] = await self.blockchain.get_full_peak()
|
|
322
|
+
assert full_peak is not None
|
|
323
|
+
state_change_summary = StateChangeSummary(peak, uint32(max(peak.height - 1, 0)), [], [], [], [])
|
|
324
|
+
# Must be called under priority_mutex
|
|
325
|
+
ppp_result: PeakPostProcessingResult = await self.peak_post_processing(
|
|
326
|
+
full_peak, state_change_summary, None
|
|
327
|
+
)
|
|
328
|
+
# Can be called outside of priority_mutex
|
|
329
|
+
await self.peak_post_processing_2(full_peak, None, state_change_summary, ppp_result)
|
|
330
|
+
if self.config["send_uncompact_interval"] != 0:
|
|
331
|
+
sanitize_weight_proof_only = False
|
|
332
|
+
if "sanitize_weight_proof_only" in self.config:
|
|
333
|
+
sanitize_weight_proof_only = self.config["sanitize_weight_proof_only"]
|
|
334
|
+
assert self.config["target_uncompact_proofs"] != 0
|
|
335
|
+
self.uncompact_task = create_referenced_task(
|
|
336
|
+
self.broadcast_uncompact_blocks(
|
|
337
|
+
self.config["send_uncompact_interval"],
|
|
338
|
+
self.config["target_uncompact_proofs"],
|
|
339
|
+
sanitize_weight_proof_only,
|
|
340
|
+
)
|
|
341
|
+
)
|
|
342
|
+
if self.wallet_sync_task is None or self.wallet_sync_task.done():
|
|
343
|
+
self.wallet_sync_task = create_referenced_task(self._wallets_sync_task_handler())
|
|
344
|
+
|
|
345
|
+
self.initialized = True
|
|
346
|
+
if self.full_node_peers is not None:
|
|
347
|
+
create_referenced_task(self.full_node_peers.start(), known_unreferenced=True)
|
|
348
|
+
try:
|
|
349
|
+
yield
|
|
350
|
+
finally:
|
|
351
|
+
self._shut_down = True
|
|
352
|
+
if self._init_weight_proof is not None:
|
|
353
|
+
self._init_weight_proof.cancel()
|
|
354
|
+
|
|
355
|
+
# blockchain is created in _start and in certain cases it may not exist here during _close
|
|
356
|
+
if self._blockchain is not None:
|
|
357
|
+
self.blockchain.shut_down()
|
|
358
|
+
# same for mempool_manager
|
|
359
|
+
if self._mempool_manager is not None:
|
|
360
|
+
self.mempool_manager.shut_down()
|
|
361
|
+
|
|
362
|
+
if self.full_node_peers is not None:
|
|
363
|
+
create_referenced_task(self.full_node_peers.close(), known_unreferenced=True)
|
|
364
|
+
if self.uncompact_task is not None:
|
|
365
|
+
self.uncompact_task.cancel()
|
|
366
|
+
if self._transaction_queue_task is not None:
|
|
367
|
+
self._transaction_queue_task.cancel()
|
|
368
|
+
cancel_task_safe(task=self.wallet_sync_task, log=self.log)
|
|
369
|
+
for one_tx_task in self._tx_task_list:
|
|
370
|
+
if not one_tx_task.done():
|
|
371
|
+
cancel_task_safe(task=one_tx_task, log=self.log)
|
|
372
|
+
for one_sync_task in self._sync_task_list:
|
|
373
|
+
if not one_sync_task.done():
|
|
374
|
+
cancel_task_safe(task=one_sync_task, log=self.log)
|
|
375
|
+
for segment_task in self._segment_task_list:
|
|
376
|
+
cancel_task_safe(segment_task, self.log)
|
|
377
|
+
for task_id, task in list(self.full_node_store.tx_fetch_tasks.items()):
|
|
378
|
+
cancel_task_safe(task, self.log)
|
|
379
|
+
if self._init_weight_proof is not None:
|
|
380
|
+
await asyncio.wait([self._init_weight_proof])
|
|
381
|
+
for one_tx_task in self._tx_task_list:
|
|
382
|
+
if one_tx_task.done():
|
|
383
|
+
self.log.info(f"TX task {one_tx_task.get_name()} done")
|
|
384
|
+
else:
|
|
385
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
386
|
+
self.log.info(f"Awaiting TX task {one_tx_task.get_name()}")
|
|
387
|
+
await one_tx_task
|
|
388
|
+
for one_sync_task in self._sync_task_list:
|
|
389
|
+
if one_sync_task.done():
|
|
390
|
+
self.log.info(f"Long sync task {one_sync_task.get_name()} done")
|
|
391
|
+
else:
|
|
392
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
393
|
+
self.log.info(f"Awaiting long sync task {one_sync_task.get_name()}")
|
|
394
|
+
await one_sync_task
|
|
395
|
+
await asyncio.gather(*self._segment_task_list, return_exceptions=True)
|
|
396
|
+
|
|
397
|
+
@property
|
|
398
|
+
def block_store(self) -> BlockStore:
|
|
399
|
+
assert self._block_store is not None
|
|
400
|
+
return self._block_store
|
|
401
|
+
|
|
402
|
+
@property
|
|
403
|
+
def timelord_lock(self) -> asyncio.Lock:
|
|
404
|
+
assert self._timelord_lock is not None
|
|
405
|
+
return self._timelord_lock
|
|
406
|
+
|
|
407
|
+
@property
|
|
408
|
+
def mempool_manager(self) -> MempoolManager:
|
|
409
|
+
assert self._mempool_manager is not None
|
|
410
|
+
return self._mempool_manager
|
|
411
|
+
|
|
412
|
+
@property
|
|
413
|
+
def blockchain(self) -> Blockchain:
|
|
414
|
+
assert self._blockchain is not None
|
|
415
|
+
return self._blockchain
|
|
416
|
+
|
|
417
|
+
@property
|
|
418
|
+
def coin_store(self) -> CoinStore:
|
|
419
|
+
assert self._coin_store is not None
|
|
420
|
+
return self._coin_store
|
|
421
|
+
|
|
422
|
+
@property
|
|
423
|
+
def add_transaction_semaphore(self) -> asyncio.Semaphore:
|
|
424
|
+
assert self._add_transaction_semaphore is not None
|
|
425
|
+
return self._add_transaction_semaphore
|
|
426
|
+
|
|
427
|
+
@property
|
|
428
|
+
def transaction_queue(self) -> TransactionQueue:
|
|
429
|
+
assert self._transaction_queue is not None
|
|
430
|
+
return self._transaction_queue
|
|
431
|
+
|
|
432
|
+
@property
|
|
433
|
+
def db_wrapper(self) -> DBWrapper2:
|
|
434
|
+
assert self._db_wrapper is not None
|
|
435
|
+
return self._db_wrapper
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def hint_store(self) -> HintStore:
|
|
439
|
+
assert self._hint_store is not None
|
|
440
|
+
return self._hint_store
|
|
441
|
+
|
|
442
|
+
@property
|
|
443
|
+
def new_peak_sem(self) -> LimitedSemaphore:
|
|
444
|
+
assert self._new_peak_sem is not None
|
|
445
|
+
return self._new_peak_sem
|
|
446
|
+
|
|
447
|
+
@property
|
|
448
|
+
def compact_vdf_sem(self) -> LimitedSemaphore:
|
|
449
|
+
assert self._compact_vdf_sem is not None
|
|
450
|
+
return self._compact_vdf_sem
|
|
451
|
+
|
|
452
|
+
def get_connections(self, request_node_type: Optional[NodeType]) -> list[dict[str, Any]]:
|
|
453
|
+
connections = self.server.get_connections(request_node_type)
|
|
454
|
+
con_info: list[dict[str, Any]] = []
|
|
455
|
+
if self.sync_store is not None:
|
|
456
|
+
peak_store = self.sync_store.peer_to_peak
|
|
457
|
+
else:
|
|
458
|
+
peak_store = None
|
|
459
|
+
for con in connections:
|
|
460
|
+
if peak_store is not None and con.peer_node_id in peak_store:
|
|
461
|
+
peak = peak_store[con.peer_node_id]
|
|
462
|
+
peak_height = peak.height
|
|
463
|
+
peak_hash = peak.header_hash
|
|
464
|
+
peak_weight = peak.weight
|
|
465
|
+
else:
|
|
466
|
+
peak_height = None
|
|
467
|
+
peak_hash = None
|
|
468
|
+
peak_weight = None
|
|
469
|
+
con_dict: dict[str, Any] = {
|
|
470
|
+
"type": con.connection_type,
|
|
471
|
+
"local_port": con.local_port,
|
|
472
|
+
"peer_host": con.peer_info.host,
|
|
473
|
+
"peer_port": con.peer_info.port,
|
|
474
|
+
"peer_server_port": con.peer_server_port,
|
|
475
|
+
"node_id": con.peer_node_id,
|
|
476
|
+
"creation_time": con.creation_time,
|
|
477
|
+
"bytes_read": con.bytes_read,
|
|
478
|
+
"bytes_written": con.bytes_written,
|
|
479
|
+
"last_message_time": con.last_message_time,
|
|
480
|
+
"peak_height": peak_height,
|
|
481
|
+
"peak_weight": peak_weight,
|
|
482
|
+
"peak_hash": peak_hash,
|
|
483
|
+
}
|
|
484
|
+
con_info.append(con_dict)
|
|
485
|
+
|
|
486
|
+
return con_info
|
|
487
|
+
|
|
488
|
+
def _set_state_changed_callback(self, callback: StateChangedProtocol) -> None:
|
|
489
|
+
self.state_changed_callback = callback
|
|
490
|
+
|
|
491
|
+
async def _handle_one_transaction(self, entry: TransactionQueueEntry) -> None:
|
|
492
|
+
peer = entry.peer
|
|
493
|
+
try:
|
|
494
|
+
inc_status, err = await self.add_transaction(entry.transaction, entry.spend_name, peer, entry.test)
|
|
495
|
+
entry.done.set((inc_status, err))
|
|
496
|
+
except asyncio.CancelledError:
|
|
497
|
+
error_stack = traceback.format_exc()
|
|
498
|
+
self.log.debug(f"Cancelling _handle_one_transaction, closing: {error_stack}")
|
|
499
|
+
except Exception:
|
|
500
|
+
error_stack = traceback.format_exc()
|
|
501
|
+
self.log.error(f"Error in _handle_one_transaction, closing: {error_stack}")
|
|
502
|
+
if peer is not None:
|
|
503
|
+
await peer.close()
|
|
504
|
+
finally:
|
|
505
|
+
self.add_transaction_semaphore.release()
|
|
506
|
+
|
|
507
|
+
async def _handle_transactions(self) -> None:
|
|
508
|
+
while not self._shut_down:
|
|
509
|
+
# We use a semaphore to make sure we don't send more than 200 concurrent calls of respond_transaction.
|
|
510
|
+
# However, doing them one at a time would be slow, because they get sent to other processes.
|
|
511
|
+
await self.add_transaction_semaphore.acquire()
|
|
512
|
+
|
|
513
|
+
# Clean up task reference list (used to prevent gc from killing running tasks)
|
|
514
|
+
for oldtask in self._tx_task_list[:]:
|
|
515
|
+
if oldtask.done():
|
|
516
|
+
self._tx_task_list.remove(oldtask)
|
|
517
|
+
|
|
518
|
+
item: TransactionQueueEntry = await self.transaction_queue.pop()
|
|
519
|
+
self._tx_task_list.append(create_referenced_task(self._handle_one_transaction(item)))
|
|
520
|
+
|
|
521
|
+
async def initialize_weight_proof(self) -> None:
|
|
522
|
+
self.weight_proof_handler = WeightProofHandler(
|
|
523
|
+
constants=self.constants,
|
|
524
|
+
blockchain=self.blockchain,
|
|
525
|
+
multiprocessing_context=self.multiprocessing_context,
|
|
526
|
+
)
|
|
527
|
+
peak = self.blockchain.get_peak()
|
|
528
|
+
if peak is not None:
|
|
529
|
+
await self.weight_proof_handler.create_sub_epoch_segments()
|
|
530
|
+
|
|
531
|
+
def set_server(self, server: ChiaServer) -> None:
|
|
532
|
+
self._server = server
|
|
533
|
+
dns_servers: list[str] = []
|
|
534
|
+
network_name = self.config["selected_network"]
|
|
535
|
+
try:
|
|
536
|
+
default_port = self.config["network_overrides"]["config"][network_name]["default_full_node_port"]
|
|
537
|
+
except Exception:
|
|
538
|
+
self.log.info("Default port field not found in config.")
|
|
539
|
+
default_port = None
|
|
540
|
+
if "dns_servers" in self.config:
|
|
541
|
+
dns_servers = self.config["dns_servers"]
|
|
542
|
+
elif network_name == "mainnet":
|
|
543
|
+
# If `dns_servers` is missing from the `config`, hardcode it if we're running mainnet.
|
|
544
|
+
dns_servers.append("dns-introducer.chia.net")
|
|
545
|
+
try:
|
|
546
|
+
self.full_node_peers = FullNodePeers(
|
|
547
|
+
self.server,
|
|
548
|
+
self.config["target_outbound_peer_count"],
|
|
549
|
+
self.root_path / Path(self.config.get("peers_file_path", "db/peers.dat")),
|
|
550
|
+
self.config["introducer_peer"],
|
|
551
|
+
dns_servers,
|
|
552
|
+
self.config["peer_connect_interval"],
|
|
553
|
+
self.config["selected_network"],
|
|
554
|
+
default_port,
|
|
555
|
+
self.log,
|
|
556
|
+
)
|
|
557
|
+
except Exception as e:
|
|
558
|
+
error_stack = traceback.format_exc()
|
|
559
|
+
self.log.error(f"Exception: {e}")
|
|
560
|
+
self.log.error(f"Exception in peer discovery: {e}")
|
|
561
|
+
self.log.error(f"Exception Stack: {error_stack}")
|
|
562
|
+
|
|
563
|
+
def _state_changed(self, change: str, change_data: Optional[dict[str, Any]] = None) -> None:
|
|
564
|
+
if self.state_changed_callback is not None:
|
|
565
|
+
self.state_changed_callback(change, change_data)
|
|
566
|
+
|
|
567
|
+
async def short_sync_batch(self, peer: WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
|
|
568
|
+
"""
|
|
569
|
+
Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first
|
|
570
|
+
block that we download is not connected to our chain, we return False and do an expensive long sync instead.
|
|
571
|
+
Long sync is not preferred because it requires downloading and validating a weight proof.
|
|
572
|
+
|
|
573
|
+
Args:
|
|
574
|
+
peer: peer to sync from
|
|
575
|
+
start_height: height that we should start downloading at. (Our peak is higher)
|
|
576
|
+
target_height: target to sync to
|
|
577
|
+
|
|
578
|
+
Returns:
|
|
579
|
+
False if the fork point was not found, and we need to do a long sync. True otherwise.
|
|
580
|
+
|
|
581
|
+
"""
|
|
582
|
+
# Don't trigger multiple batch syncs to the same peer
|
|
583
|
+
|
|
584
|
+
if self.sync_store.is_backtrack_syncing(node_id=peer.peer_node_id):
|
|
585
|
+
return True # Don't batch sync, we are already in progress of a backtrack sync
|
|
586
|
+
if peer.peer_node_id in self.sync_store.batch_syncing:
|
|
587
|
+
return True # Don't trigger a long sync
|
|
588
|
+
self.sync_store.batch_syncing.add(peer.peer_node_id)
|
|
589
|
+
|
|
590
|
+
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
|
|
591
|
+
if start_height > 0:
|
|
592
|
+
first = await peer.call_api(
|
|
593
|
+
FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(start_height), False)
|
|
594
|
+
)
|
|
595
|
+
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
|
|
596
|
+
self.sync_store.batch_syncing.remove(peer.peer_node_id)
|
|
597
|
+
self.log.error(f"Error short batch syncing, could not fetch block at height {start_height}")
|
|
598
|
+
return False
|
|
599
|
+
hash = self.blockchain.height_to_hash(first.block.height - 1)
|
|
600
|
+
assert hash is not None
|
|
601
|
+
if hash != first.block.prev_header_hash:
|
|
602
|
+
self.log.info("Batch syncing stopped, this is a deep chain")
|
|
603
|
+
self.sync_store.batch_syncing.remove(peer.peer_node_id)
|
|
604
|
+
# First sb not connected to our blockchain, do a long sync instead
|
|
605
|
+
return False
|
|
606
|
+
|
|
607
|
+
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
|
|
608
|
+
for task in self._segment_task_list[:]:
|
|
609
|
+
if task.done():
|
|
610
|
+
self._segment_task_list.remove(task)
|
|
611
|
+
else:
|
|
612
|
+
cancel_task_safe(task=task, log=self.log)
|
|
613
|
+
|
|
614
|
+
try:
|
|
615
|
+
peer_info = peer.get_peer_logging()
|
|
616
|
+
if start_height > 0:
|
|
617
|
+
fork_hash = self.blockchain.height_to_hash(uint32(start_height - 1))
|
|
618
|
+
else:
|
|
619
|
+
fork_hash = self.constants.GENESIS_CHALLENGE
|
|
620
|
+
assert fork_hash
|
|
621
|
+
fork_info = ForkInfo(start_height - 1, start_height - 1, fork_hash)
|
|
622
|
+
for height in range(start_height, target_height, batch_size):
|
|
623
|
+
end_height = min(target_height, height + batch_size)
|
|
624
|
+
request = RequestBlocks(uint32(height), uint32(end_height), True)
|
|
625
|
+
response = await peer.call_api(FullNodeAPI.request_blocks, request)
|
|
626
|
+
if not response:
|
|
627
|
+
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
|
|
628
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
629
|
+
state_change_summary: Optional[StateChangeSummary]
|
|
630
|
+
prev_b = None
|
|
631
|
+
if response.blocks[0].height > 0:
|
|
632
|
+
prev_b = await self.blockchain.get_block_record_from_db(response.blocks[0].prev_header_hash)
|
|
633
|
+
assert prev_b is not None
|
|
634
|
+
new_slot = len(response.blocks[0].finished_sub_slots) > 0
|
|
635
|
+
ssi, diff = get_next_sub_slot_iters_and_difficulty(
|
|
636
|
+
self.constants, new_slot, prev_b, self.blockchain
|
|
637
|
+
)
|
|
638
|
+
vs = ValidationState(ssi, diff, None)
|
|
639
|
+
success, state_change_summary = await self.add_block_batch(
|
|
640
|
+
response.blocks, peer_info, fork_info, vs
|
|
641
|
+
)
|
|
642
|
+
if not success:
|
|
643
|
+
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
|
|
644
|
+
if state_change_summary is not None:
|
|
645
|
+
try:
|
|
646
|
+
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
|
|
647
|
+
assert peak_fb is not None
|
|
648
|
+
ppp_result: PeakPostProcessingResult = await self.peak_post_processing(
|
|
649
|
+
peak_fb,
|
|
650
|
+
state_change_summary,
|
|
651
|
+
peer,
|
|
652
|
+
)
|
|
653
|
+
except Exception:
|
|
654
|
+
# Still do post processing after cancel (or exception)
|
|
655
|
+
peak_fb = await self.blockchain.get_full_peak()
|
|
656
|
+
assert peak_fb is not None
|
|
657
|
+
await self.peak_post_processing(peak_fb, state_change_summary, peer)
|
|
658
|
+
raise
|
|
659
|
+
finally:
|
|
660
|
+
self.log.info(f"Added blocks {height}-{end_height}")
|
|
661
|
+
if state_change_summary is not None and peak_fb is not None:
|
|
662
|
+
# Call outside of priority_mutex to encourage concurrency
|
|
663
|
+
await self.peak_post_processing_2(peak_fb, peer, state_change_summary, ppp_result)
|
|
664
|
+
finally:
|
|
665
|
+
self.sync_store.batch_syncing.remove(peer.peer_node_id)
|
|
666
|
+
return True
|
|
667
|
+
|
|
668
|
+
async def short_sync_backtrack(
|
|
669
|
+
self, peer: WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
|
|
670
|
+
) -> bool:
|
|
671
|
+
"""
|
|
672
|
+
Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not
|
|
673
|
+
find the fork point 5 deeper than our peak, we return False and do a long sync instead.
|
|
674
|
+
|
|
675
|
+
Args:
|
|
676
|
+
peer: peer to sync from
|
|
677
|
+
peak_height: height of our peak
|
|
678
|
+
target_height: target height
|
|
679
|
+
target_unf_hash: partial hash of the unfinished block of the target
|
|
680
|
+
|
|
681
|
+
Returns:
|
|
682
|
+
True iff we found the fork point, and we do not need to long sync.
|
|
683
|
+
"""
|
|
684
|
+
try:
|
|
685
|
+
self.sync_store.increment_backtrack_syncing(node_id=peer.peer_node_id)
|
|
686
|
+
|
|
687
|
+
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
|
|
688
|
+
curr_height: int = target_height
|
|
689
|
+
found_fork_point = False
|
|
690
|
+
blocks = []
|
|
691
|
+
while curr_height > peak_height - 5:
|
|
692
|
+
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
|
|
693
|
+
# already have the unfinished block, from when it was broadcast, so we just need to download the header,
|
|
694
|
+
# but not the transactions
|
|
695
|
+
fetch_tx: bool = unfinished_block is None or curr_height != target_height
|
|
696
|
+
curr = await peer.call_api(
|
|
697
|
+
FullNodeAPI.request_block, full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx)
|
|
698
|
+
)
|
|
699
|
+
if curr is None:
|
|
700
|
+
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, timed out")
|
|
701
|
+
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
|
|
702
|
+
raise ValueError(
|
|
703
|
+
f"Failed to fetch block {curr_height} from {peer.get_peer_logging()}, wrong type {type(curr)}"
|
|
704
|
+
)
|
|
705
|
+
blocks.append(curr.block)
|
|
706
|
+
if curr_height == 0:
|
|
707
|
+
found_fork_point = True
|
|
708
|
+
break
|
|
709
|
+
hash_at_height = self.blockchain.height_to_hash(curr.block.height - 1)
|
|
710
|
+
if hash_at_height is not None and hash_at_height == curr.block.prev_header_hash:
|
|
711
|
+
found_fork_point = True
|
|
712
|
+
break
|
|
713
|
+
curr_height -= 1
|
|
714
|
+
if found_fork_point:
|
|
715
|
+
first_block = blocks[-1] # blocks are reveresd this is the lowest block to add
|
|
716
|
+
# we create the fork_info and pass it here so it would be updated on each call to add_block
|
|
717
|
+
fork_info = ForkInfo(first_block.height - 1, first_block.height - 1, first_block.prev_header_hash)
|
|
718
|
+
for block in reversed(blocks):
|
|
719
|
+
# when syncing, we won't share any signatures with the
|
|
720
|
+
# mempool, so there's no need to pass in the BLS cache.
|
|
721
|
+
await self.add_block(block, peer, fork_info=fork_info)
|
|
722
|
+
except (asyncio.CancelledError, Exception):
|
|
723
|
+
self.sync_store.decrement_backtrack_syncing(node_id=peer.peer_node_id)
|
|
724
|
+
raise
|
|
725
|
+
|
|
726
|
+
self.sync_store.decrement_backtrack_syncing(node_id=peer.peer_node_id)
|
|
727
|
+
return found_fork_point
|
|
728
|
+
|
|
729
|
+
async def _refresh_ui_connections(self, sleep_before: float = 0) -> None:
|
|
730
|
+
if sleep_before > 0:
|
|
731
|
+
await asyncio.sleep(sleep_before)
|
|
732
|
+
self._state_changed("peer_changed_peak")
|
|
733
|
+
|
|
734
|
+
async def new_peak(self, request: full_node_protocol.NewPeak, peer: WSChiaConnection) -> None:
|
|
735
|
+
"""
|
|
736
|
+
We have received a notification of a new peak from a peer. This happens either when we have just connected,
|
|
737
|
+
or when the peer has updated their peak.
|
|
738
|
+
|
|
739
|
+
Args:
|
|
740
|
+
request: information about the new peak
|
|
741
|
+
peer: peer that sent the message
|
|
742
|
+
|
|
743
|
+
"""
|
|
744
|
+
|
|
745
|
+
try:
|
|
746
|
+
seen_header_hash = self.sync_store.seen_header_hash(request.header_hash)
|
|
747
|
+
# Updates heights in the UI. Sleeps 1.5s before, so other peers have time to update their peaks as well.
|
|
748
|
+
# Limit to 3 refreshes.
|
|
749
|
+
if not seen_header_hash and len(self._ui_tasks) < 3:
|
|
750
|
+
self._ui_tasks.add(create_referenced_task(self._refresh_ui_connections(1.5)))
|
|
751
|
+
# Prune completed connect tasks
|
|
752
|
+
self._ui_tasks = set(filter(lambda t: not t.done(), self._ui_tasks))
|
|
753
|
+
except Exception as e:
|
|
754
|
+
self.log.warning(f"Exception UI refresh task: {e}")
|
|
755
|
+
|
|
756
|
+
# Store this peak/peer combination in case we want to sync to it, and to keep track of peers
|
|
757
|
+
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
|
|
758
|
+
|
|
759
|
+
if self.blockchain.contains_block(request.header_hash):
|
|
760
|
+
return None
|
|
761
|
+
|
|
762
|
+
# Not interested in less heavy peaks
|
|
763
|
+
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
764
|
+
curr_peak_height = uint32(0) if peak is None else peak.height
|
|
765
|
+
if peak is not None and peak.weight > request.weight:
|
|
766
|
+
return None
|
|
767
|
+
|
|
768
|
+
if self.sync_store.get_sync_mode():
|
|
769
|
+
# If peer connects while we are syncing, check if they have the block we are syncing towards
|
|
770
|
+
target_peak = self.sync_store.target_peak
|
|
771
|
+
if target_peak is not None and request.header_hash != target_peak.header_hash:
|
|
772
|
+
peak_peers: set[bytes32] = self.sync_store.get_peers_that_have_peak([target_peak.header_hash])
|
|
773
|
+
# Don't ask if we already know this peer has the peak
|
|
774
|
+
if peer.peer_node_id not in peak_peers:
|
|
775
|
+
target_peak_response: Optional[RespondBlock] = await peer.call_api(
|
|
776
|
+
FullNodeAPI.request_block,
|
|
777
|
+
full_node_protocol.RequestBlock(target_peak.height, False),
|
|
778
|
+
timeout=10,
|
|
779
|
+
)
|
|
780
|
+
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
|
|
781
|
+
self.sync_store.peer_has_block(
|
|
782
|
+
target_peak.header_hash,
|
|
783
|
+
peer.peer_node_id,
|
|
784
|
+
target_peak_response.block.weight,
|
|
785
|
+
target_peak.height,
|
|
786
|
+
False,
|
|
787
|
+
)
|
|
788
|
+
else:
|
|
789
|
+
if (
|
|
790
|
+
curr_peak_height <= request.height
|
|
791
|
+
and request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]
|
|
792
|
+
):
|
|
793
|
+
# This is the normal case of receiving the next block
|
|
794
|
+
if await self.short_sync_backtrack(
|
|
795
|
+
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
|
|
796
|
+
):
|
|
797
|
+
return None
|
|
798
|
+
|
|
799
|
+
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
|
|
800
|
+
# This is the case of syncing up more than a few blocks, at the start of the chain
|
|
801
|
+
self.log.debug("Doing batch sync, no backup")
|
|
802
|
+
await self.short_sync_batch(peer, uint32(0), request.height)
|
|
803
|
+
return None
|
|
804
|
+
|
|
805
|
+
if (
|
|
806
|
+
curr_peak_height <= request.height
|
|
807
|
+
and request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]
|
|
808
|
+
):
|
|
809
|
+
# This case of being behind but not by so much
|
|
810
|
+
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
|
|
811
|
+
return None
|
|
812
|
+
|
|
813
|
+
# Clean up task reference list (used to prevent gc from killing running tasks)
|
|
814
|
+
for oldtask in self._sync_task_list[:]:
|
|
815
|
+
if oldtask.done():
|
|
816
|
+
self._sync_task_list.remove(oldtask)
|
|
817
|
+
|
|
818
|
+
# This is the either the case where we were not able to sync successfully (for example, due to the fork
|
|
819
|
+
# point being in the past), or we are very far behind. Performs a long sync.
|
|
820
|
+
# Multiple tasks may be created here. If we don't save all handles, a task could enter a sync object
|
|
821
|
+
# and be cleaned up by the GC, corrupting the sync object and possibly not allowing anything else in.
|
|
822
|
+
self._sync_task_list.append(create_referenced_task(self._sync()))
|
|
823
|
+
|
|
824
|
+
async def send_peak_to_timelords(
|
|
825
|
+
self, peak_block: Optional[FullBlock] = None, peer: Optional[WSChiaConnection] = None
|
|
826
|
+
) -> None:
|
|
827
|
+
"""
|
|
828
|
+
Sends current peak to timelords
|
|
829
|
+
"""
|
|
830
|
+
if peak_block is None:
|
|
831
|
+
peak_block = await self.blockchain.get_full_peak()
|
|
832
|
+
if peak_block is not None:
|
|
833
|
+
peak = self.blockchain.block_record(peak_block.header_hash)
|
|
834
|
+
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
|
|
835
|
+
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
|
|
836
|
+
self.constants,
|
|
837
|
+
self.blockchain,
|
|
838
|
+
peak.required_iters,
|
|
839
|
+
peak_block,
|
|
840
|
+
True,
|
|
841
|
+
)
|
|
842
|
+
recent_rc = self.blockchain.get_recent_reward_challenges()
|
|
843
|
+
|
|
844
|
+
curr = peak
|
|
845
|
+
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
|
|
846
|
+
curr = self.blockchain.block_record(curr.prev_hash)
|
|
847
|
+
|
|
848
|
+
if curr.is_challenge_block(self.constants):
|
|
849
|
+
last_csb_or_eos = curr.total_iters
|
|
850
|
+
else:
|
|
851
|
+
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
|
|
852
|
+
|
|
853
|
+
curr = peak
|
|
854
|
+
passed_ses_height_but_not_yet_included = True
|
|
855
|
+
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
|
|
856
|
+
if curr.sub_epoch_summary_included:
|
|
857
|
+
passed_ses_height_but_not_yet_included = False
|
|
858
|
+
curr = self.blockchain.block_record(curr.prev_hash)
|
|
859
|
+
if curr.sub_epoch_summary_included or curr.height == 0:
|
|
860
|
+
passed_ses_height_but_not_yet_included = False
|
|
861
|
+
|
|
862
|
+
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
|
|
863
|
+
peak_block.reward_chain_block,
|
|
864
|
+
difficulty,
|
|
865
|
+
peak.deficit,
|
|
866
|
+
peak.sub_slot_iters,
|
|
867
|
+
ses,
|
|
868
|
+
recent_rc,
|
|
869
|
+
last_csb_or_eos,
|
|
870
|
+
passed_ses_height_but_not_yet_included,
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
|
|
874
|
+
if peer is None:
|
|
875
|
+
await self.server.send_to_all([msg], NodeType.TIMELORD)
|
|
876
|
+
else:
|
|
877
|
+
await self.server.send_to_specific([msg], peer.peer_node_id)
|
|
878
|
+
|
|
879
|
+
async def synced(self, block_is_current_at: Optional[uint64] = None) -> bool:
|
|
880
|
+
if block_is_current_at is None:
|
|
881
|
+
block_is_current_at = uint64(int(time.time() - 60 * 7))
|
|
882
|
+
if "simulator" in str(self.config.get("selected_network")):
|
|
883
|
+
return True # sim is always synced because it has no peers
|
|
884
|
+
curr: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
885
|
+
if curr is None:
|
|
886
|
+
return False
|
|
887
|
+
|
|
888
|
+
while curr is not None and not curr.is_transaction_block:
|
|
889
|
+
curr = self.blockchain.try_block_record(curr.prev_hash)
|
|
890
|
+
|
|
891
|
+
if (
|
|
892
|
+
curr is None
|
|
893
|
+
or curr.timestamp is None
|
|
894
|
+
or curr.timestamp < block_is_current_at
|
|
895
|
+
or self.sync_store.get_sync_mode()
|
|
896
|
+
):
|
|
897
|
+
return False
|
|
898
|
+
else:
|
|
899
|
+
return True
|
|
900
|
+
|
|
901
|
+
async def on_connect(self, connection: WSChiaConnection) -> None:
|
|
902
|
+
"""
|
|
903
|
+
Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers
|
|
904
|
+
and challenges to timelords.
|
|
905
|
+
"""
|
|
906
|
+
|
|
907
|
+
self._state_changed("add_connection")
|
|
908
|
+
self._state_changed("sync_mode")
|
|
909
|
+
if self.full_node_peers is not None:
|
|
910
|
+
create_referenced_task(self.full_node_peers.on_connect(connection))
|
|
911
|
+
|
|
912
|
+
if self.initialized is False:
|
|
913
|
+
return None
|
|
914
|
+
|
|
915
|
+
if connection.connection_type is NodeType.FULL_NODE:
|
|
916
|
+
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
|
|
917
|
+
synced = await self.synced()
|
|
918
|
+
peak_height = self.blockchain.get_peak_height()
|
|
919
|
+
if synced and peak_height is not None:
|
|
920
|
+
my_filter = self.mempool_manager.get_filter()
|
|
921
|
+
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
|
|
922
|
+
|
|
923
|
+
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
|
|
924
|
+
await connection.send_message(msg)
|
|
925
|
+
|
|
926
|
+
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
|
|
927
|
+
|
|
928
|
+
if peak_full is not None:
|
|
929
|
+
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
|
|
930
|
+
if connection.connection_type is NodeType.FULL_NODE:
|
|
931
|
+
request_node = full_node_protocol.NewPeak(
|
|
932
|
+
peak.header_hash,
|
|
933
|
+
peak.height,
|
|
934
|
+
peak.weight,
|
|
935
|
+
peak.height,
|
|
936
|
+
peak_full.reward_chain_block.get_unfinished().get_hash(),
|
|
937
|
+
)
|
|
938
|
+
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
|
|
939
|
+
|
|
940
|
+
elif connection.connection_type is NodeType.WALLET:
|
|
941
|
+
# If connected to a wallet, send the Peak
|
|
942
|
+
request_wallet = wallet_protocol.NewPeakWallet(
|
|
943
|
+
peak.header_hash,
|
|
944
|
+
peak.height,
|
|
945
|
+
peak.weight,
|
|
946
|
+
peak.height,
|
|
947
|
+
)
|
|
948
|
+
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
|
|
949
|
+
elif connection.connection_type is NodeType.TIMELORD:
|
|
950
|
+
await self.send_peak_to_timelords()
|
|
951
|
+
|
|
952
|
+
async def on_disconnect(self, connection: WSChiaConnection) -> None:
|
|
953
|
+
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
|
|
954
|
+
self._state_changed("close_connection")
|
|
955
|
+
self._state_changed("sync_mode")
|
|
956
|
+
if self.sync_store is not None:
|
|
957
|
+
self.sync_store.peer_disconnected(connection.peer_node_id)
|
|
958
|
+
# Remove all ph | coin id subscription for this peer
|
|
959
|
+
self.subscriptions.remove_peer(connection.peer_node_id)
|
|
960
|
+
|
|
961
|
+
async def _sync(self) -> None:
|
|
962
|
+
"""
|
|
963
|
+
Performs a full sync of the blockchain up to the peak.
|
|
964
|
+
- Wait a few seconds for peers to send us their peaks
|
|
965
|
+
- Select the heaviest peak, and request a weight proof from a peer with that peak
|
|
966
|
+
- Validate the weight proof, and disconnect from the peer if invalid
|
|
967
|
+
- Find the fork point to see where to start downloading blocks
|
|
968
|
+
- Download blocks in batch (and in parallel) and verify them one at a time
|
|
969
|
+
- Disconnect peers that provide invalid blocks or don't have the blocks
|
|
970
|
+
"""
|
|
971
|
+
# Ensure we are only syncing once and not double calling this method
|
|
972
|
+
fork_point: Optional[uint32] = None
|
|
973
|
+
if self.sync_store.get_sync_mode():
|
|
974
|
+
return None
|
|
975
|
+
|
|
976
|
+
if self.sync_store.get_long_sync():
|
|
977
|
+
self.log.debug("already in long sync")
|
|
978
|
+
return None
|
|
979
|
+
|
|
980
|
+
self.sync_store.set_long_sync(True)
|
|
981
|
+
self.log.debug("long sync started")
|
|
982
|
+
try:
|
|
983
|
+
self.log.info("Starting to perform sync.")
|
|
984
|
+
|
|
985
|
+
# Wait until we have 3 peaks or up to a max of 30 seconds
|
|
986
|
+
max_iterations = int(self.config.get("max_sync_wait", 30)) * 10
|
|
987
|
+
|
|
988
|
+
self.log.info(f"Waiting to receive peaks from peers. (timeout: {max_iterations / 10}s)")
|
|
989
|
+
peaks = []
|
|
990
|
+
for i in range(max_iterations):
|
|
991
|
+
peaks = [peak.header_hash for peak in self.sync_store.get_peak_of_each_peer().values()]
|
|
992
|
+
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
|
|
993
|
+
if self._shut_down:
|
|
994
|
+
return None
|
|
995
|
+
await asyncio.sleep(0.1)
|
|
996
|
+
continue
|
|
997
|
+
break
|
|
998
|
+
|
|
999
|
+
self.log.info(f"Collected a total of {len(peaks)} peaks.")
|
|
1000
|
+
|
|
1001
|
+
# Based on responses from peers about the current peaks, see which peak is the heaviest
|
|
1002
|
+
# (similar to longest chain rule).
|
|
1003
|
+
target_peak = self.sync_store.get_heaviest_peak()
|
|
1004
|
+
|
|
1005
|
+
if target_peak is None:
|
|
1006
|
+
raise RuntimeError("Not performing sync, no peaks collected")
|
|
1007
|
+
|
|
1008
|
+
self.sync_store.target_peak = target_peak
|
|
1009
|
+
|
|
1010
|
+
self.log.info(f"Selected peak {target_peak}")
|
|
1011
|
+
# Check which peers are updated to this height
|
|
1012
|
+
|
|
1013
|
+
peers = self.server.get_connections(NodeType.FULL_NODE)
|
|
1014
|
+
coroutines = []
|
|
1015
|
+
for peer in peers:
|
|
1016
|
+
coroutines.append(
|
|
1017
|
+
peer.call_api(
|
|
1018
|
+
FullNodeAPI.request_block,
|
|
1019
|
+
full_node_protocol.RequestBlock(target_peak.height, True),
|
|
1020
|
+
timeout=10,
|
|
1021
|
+
)
|
|
1022
|
+
)
|
|
1023
|
+
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
|
|
1024
|
+
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
|
|
1025
|
+
self.sync_store.peer_has_block(
|
|
1026
|
+
target_peak.header_hash, peers[i].peer_node_id, target_peak.weight, target_peak.height, False
|
|
1027
|
+
)
|
|
1028
|
+
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
|
|
1029
|
+
fork_point, summaries = await self.request_validate_wp(
|
|
1030
|
+
target_peak.header_hash, target_peak.height, target_peak.weight
|
|
1031
|
+
)
|
|
1032
|
+
# Ensures that the fork point does not change
|
|
1033
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
1034
|
+
await self.blockchain.warmup(fork_point)
|
|
1035
|
+
fork_point = await check_fork_next_block(
|
|
1036
|
+
self.blockchain,
|
|
1037
|
+
fork_point,
|
|
1038
|
+
self.get_peers_with_peak(target_peak.header_hash),
|
|
1039
|
+
node_next_block_check,
|
|
1040
|
+
)
|
|
1041
|
+
await self.sync_from_fork_point(fork_point, target_peak.height, target_peak.header_hash, summaries)
|
|
1042
|
+
except asyncio.CancelledError:
|
|
1043
|
+
self.log.warning("Syncing failed, CancelledError")
|
|
1044
|
+
except Exception as e:
|
|
1045
|
+
tb = traceback.format_exc()
|
|
1046
|
+
self.log.error(f"Error with syncing: {type(e)}{tb}")
|
|
1047
|
+
finally:
|
|
1048
|
+
if self._shut_down:
|
|
1049
|
+
return None
|
|
1050
|
+
await self._finish_sync(fork_point)
|
|
1051
|
+
|
|
1052
|
+
async def request_validate_wp(
|
|
1053
|
+
self, peak_header_hash: bytes32, peak_height: uint32, peak_weight: uint128
|
|
1054
|
+
) -> tuple[uint32, list[SubEpochSummary]]:
|
|
1055
|
+
if self.weight_proof_handler is None:
|
|
1056
|
+
raise RuntimeError("Weight proof handler is None")
|
|
1057
|
+
peers_with_peak = self.get_peers_with_peak(peak_header_hash)
|
|
1058
|
+
# Request weight proof from a random peer
|
|
1059
|
+
peers_with_peak_len = len(peers_with_peak)
|
|
1060
|
+
self.log.info(f"Total of {peers_with_peak_len} peers with peak {peak_height}")
|
|
1061
|
+
# We can't choose from an empty sequence
|
|
1062
|
+
if peers_with_peak_len == 0:
|
|
1063
|
+
raise RuntimeError(f"Not performing sync, no peers with peak {peak_height}")
|
|
1064
|
+
weight_proof_peer: WSChiaConnection = random.choice(peers_with_peak)
|
|
1065
|
+
self.log.info(
|
|
1066
|
+
f"Requesting weight proof from peer {weight_proof_peer.peer_info.host} up to height {peak_height}"
|
|
1067
|
+
)
|
|
1068
|
+
cur_peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
1069
|
+
if cur_peak is not None and peak_weight <= cur_peak.weight:
|
|
1070
|
+
raise ValueError("Not performing sync, already caught up.")
|
|
1071
|
+
wp_timeout = 360
|
|
1072
|
+
if "weight_proof_timeout" in self.config:
|
|
1073
|
+
wp_timeout = self.config["weight_proof_timeout"]
|
|
1074
|
+
self.log.debug(f"weight proof timeout is {wp_timeout} sec")
|
|
1075
|
+
request = full_node_protocol.RequestProofOfWeight(peak_height, peak_header_hash)
|
|
1076
|
+
response = await weight_proof_peer.call_api(FullNodeAPI.request_proof_of_weight, request, timeout=wp_timeout)
|
|
1077
|
+
# Disconnect from this peer, because they have not behaved properly
|
|
1078
|
+
if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight):
|
|
1079
|
+
await weight_proof_peer.close(600)
|
|
1080
|
+
raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_info.host}")
|
|
1081
|
+
if response.wp.recent_chain_data[-1].reward_chain_block.height != peak_height:
|
|
1082
|
+
await weight_proof_peer.close(600)
|
|
1083
|
+
raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_info.host}")
|
|
1084
|
+
if response.wp.recent_chain_data[-1].reward_chain_block.weight != peak_weight:
|
|
1085
|
+
await weight_proof_peer.close(600)
|
|
1086
|
+
raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_info.host}")
|
|
1087
|
+
if self.in_bad_peak_cache(response.wp):
|
|
1088
|
+
raise ValueError("Weight proof failed bad peak cache validation")
|
|
1089
|
+
# dont sync to wp if local peak is heavier,
|
|
1090
|
+
# dont ban peer, we asked for this peak
|
|
1091
|
+
current_peak = self.blockchain.get_peak()
|
|
1092
|
+
if current_peak is not None:
|
|
1093
|
+
if response.wp.recent_chain_data[-1].reward_chain_block.weight <= current_peak.weight:
|
|
1094
|
+
raise RuntimeError(
|
|
1095
|
+
f"current peak is heavier than Weight proof peek: {weight_proof_peer.peer_info.host}"
|
|
1096
|
+
)
|
|
1097
|
+
try:
|
|
1098
|
+
validated, fork_point, summaries = await self.weight_proof_handler.validate_weight_proof(response.wp)
|
|
1099
|
+
except Exception as e:
|
|
1100
|
+
await weight_proof_peer.close(600)
|
|
1101
|
+
raise ValueError(f"Weight proof validation threw an error {e}")
|
|
1102
|
+
if not validated:
|
|
1103
|
+
await weight_proof_peer.close(600)
|
|
1104
|
+
raise ValueError("Weight proof validation failed")
|
|
1105
|
+
self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {peak_height}")
|
|
1106
|
+
self.sync_store.set_sync_mode(True)
|
|
1107
|
+
self._state_changed("sync_mode")
|
|
1108
|
+
return fork_point, summaries
|
|
1109
|
+
|
|
1110
|
+
async def sync_from_fork_point(
|
|
1111
|
+
self,
|
|
1112
|
+
fork_point_height: uint32,
|
|
1113
|
+
target_peak_sb_height: uint32,
|
|
1114
|
+
peak_hash: bytes32,
|
|
1115
|
+
summaries: list[SubEpochSummary],
|
|
1116
|
+
) -> None:
|
|
1117
|
+
self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}")
|
|
1118
|
+
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
|
|
1119
|
+
counter = 0
|
|
1120
|
+
if fork_point_height != 0:
|
|
1121
|
+
# warmup the cache
|
|
1122
|
+
curr = self.blockchain.height_to_block_record(fork_point_height)
|
|
1123
|
+
while (
|
|
1124
|
+
curr.sub_epoch_summary_included is None
|
|
1125
|
+
or counter < 3 * self.constants.MAX_SUB_SLOT_BLOCKS + self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK + 3
|
|
1126
|
+
):
|
|
1127
|
+
res = await self.blockchain.get_block_record_from_db(curr.prev_hash)
|
|
1128
|
+
if res is None:
|
|
1129
|
+
break
|
|
1130
|
+
curr = res
|
|
1131
|
+
self.blockchain.add_block_record(curr)
|
|
1132
|
+
counter += 1
|
|
1133
|
+
|
|
1134
|
+
# normally "fork_point" or "fork_height" refers to the first common
|
|
1135
|
+
# block between the main chain and the fork. Here "fork_point_height"
|
|
1136
|
+
# seems to refer to the first diverging block
|
|
1137
|
+
# in case we're validating a reorg fork (i.e. not extending the
|
|
1138
|
+
# main chain), we need to record the coin set from that fork in
|
|
1139
|
+
# fork_info. Otherwise validation is very expensive, especially
|
|
1140
|
+
# for deep reorgs
|
|
1141
|
+
if fork_point_height > 0:
|
|
1142
|
+
fork_hash = self.blockchain.height_to_hash(uint32(fork_point_height - 1))
|
|
1143
|
+
assert fork_hash is not None
|
|
1144
|
+
else:
|
|
1145
|
+
fork_hash = self.constants.GENESIS_CHALLENGE
|
|
1146
|
+
fork_info = ForkInfo(fork_point_height - 1, fork_point_height - 1, fork_hash)
|
|
1147
|
+
|
|
1148
|
+
if fork_point_height == 0:
|
|
1149
|
+
ssi = self.constants.SUB_SLOT_ITERS_STARTING
|
|
1150
|
+
diff = self.constants.DIFFICULTY_STARTING
|
|
1151
|
+
prev_ses_block = None
|
|
1152
|
+
else:
|
|
1153
|
+
prev_b_hash = self.blockchain.height_to_hash(fork_point_height)
|
|
1154
|
+
assert prev_b_hash is not None
|
|
1155
|
+
prev_b = await self.blockchain.get_full_block(prev_b_hash)
|
|
1156
|
+
assert prev_b is not None
|
|
1157
|
+
ssi, diff, prev_ses_block = await self.get_sub_slot_iters_difficulty_ses_block(prev_b, None, None)
|
|
1158
|
+
|
|
1159
|
+
# we need an augmented blockchain to validate blocks in batches. The
|
|
1160
|
+
# batch must be treated as if it's part of the chain to validate the
|
|
1161
|
+
# blocks in it. We also need them to keep appearing as if they're part
|
|
1162
|
+
# of the chain when pipelining the validation of blocks. We start
|
|
1163
|
+
# validating the next batch while still adding the first batch to the
|
|
1164
|
+
# chain.
|
|
1165
|
+
blockchain = AugmentedBlockchain(self.blockchain)
|
|
1166
|
+
peers_with_peak: list[WSChiaConnection] = self.get_peers_with_peak(peak_hash)
|
|
1167
|
+
|
|
1168
|
+
async def fetch_blocks(output_queue: asyncio.Queue[Optional[tuple[WSChiaConnection, list[FullBlock]]]]) -> None:
|
|
1169
|
+
# the rate limit for respond_blocks is 100 messages / 60 seconds.
|
|
1170
|
+
# But the limit is scaled to 30% for outbound messages, so that's 30
|
|
1171
|
+
# messages per 60 seconds.
|
|
1172
|
+
# That's 2 seconds per request.
|
|
1173
|
+
seconds_per_request = 2
|
|
1174
|
+
start_height, end_height = 0, 0
|
|
1175
|
+
|
|
1176
|
+
# the timestamp of when the next request_block message is allowed to
|
|
1177
|
+
# be sent. It's initialized to the current time, and bumped by the
|
|
1178
|
+
# seconds_per_request every time we send a request. This ensures we
|
|
1179
|
+
# won't exceed the 100 requests / 60 seconds rate limit.
|
|
1180
|
+
# Whichever peer has the lowest timestamp is the one we request
|
|
1181
|
+
# from. peers that take more than 5 seconds to respond are pushed to
|
|
1182
|
+
# the end of the queue, to be less likely to request from.
|
|
1183
|
+
|
|
1184
|
+
# This should be cleaned up to not be a hard coded value, and maybe
|
|
1185
|
+
# allow higher request rates (and align the request_blocks and
|
|
1186
|
+
# respond_blocks rate limits).
|
|
1187
|
+
now = time.monotonic()
|
|
1188
|
+
new_peers_with_peak: list[tuple[WSChiaConnection, float]] = [(c, now) for c in peers_with_peak[:]]
|
|
1189
|
+
self.log.info(f"peers with peak: {len(new_peers_with_peak)}")
|
|
1190
|
+
random.shuffle(new_peers_with_peak)
|
|
1191
|
+
try:
|
|
1192
|
+
# block request ranges are *inclusive*, this requires some
|
|
1193
|
+
# gymnastics of this range (+1 to make it exclusive, like normal
|
|
1194
|
+
# ranges) and then -1 when forming the request message
|
|
1195
|
+
for start_height in range(fork_point_height, target_peak_sb_height + 1, batch_size):
|
|
1196
|
+
end_height = min(target_peak_sb_height, start_height + batch_size - 1)
|
|
1197
|
+
request = RequestBlocks(uint32(start_height), uint32(end_height), True)
|
|
1198
|
+
new_peers_with_peak.sort(key=lambda pair: pair[1])
|
|
1199
|
+
fetched = False
|
|
1200
|
+
for idx, (peer, timestamp) in enumerate(new_peers_with_peak):
|
|
1201
|
+
if peer.closed:
|
|
1202
|
+
continue
|
|
1203
|
+
|
|
1204
|
+
start = time.monotonic()
|
|
1205
|
+
if start < timestamp:
|
|
1206
|
+
# rate limit ourselves, since we sent a message to
|
|
1207
|
+
# this peer too recently
|
|
1208
|
+
await asyncio.sleep(timestamp - start)
|
|
1209
|
+
start = time.monotonic()
|
|
1210
|
+
|
|
1211
|
+
# update the timestamp, now that we're sending a request
|
|
1212
|
+
# it's OK for the timestamp to fall behind wall-clock
|
|
1213
|
+
# time. It just means we're allowed to send more
|
|
1214
|
+
# requests to catch up
|
|
1215
|
+
if is_localhost(peer.peer_info.host):
|
|
1216
|
+
# we don't apply rate limits to localhost, and our
|
|
1217
|
+
# tests depend on it
|
|
1218
|
+
bump = 0.1
|
|
1219
|
+
else:
|
|
1220
|
+
bump = seconds_per_request
|
|
1221
|
+
|
|
1222
|
+
new_peers_with_peak[idx] = (
|
|
1223
|
+
new_peers_with_peak[idx][0],
|
|
1224
|
+
new_peers_with_peak[idx][1] + bump,
|
|
1225
|
+
)
|
|
1226
|
+
# the fewer peers we have, the more willing we should be
|
|
1227
|
+
# to wait for them.
|
|
1228
|
+
timeout = int(30 + 30 / len(new_peers_with_peak))
|
|
1229
|
+
response = await peer.call_api(FullNodeAPI.request_blocks, request, timeout=timeout)
|
|
1230
|
+
end = time.monotonic()
|
|
1231
|
+
if response is None:
|
|
1232
|
+
self.log.info(f"peer timed out after {end - start:.1f} s")
|
|
1233
|
+
await peer.close()
|
|
1234
|
+
elif isinstance(response, RespondBlocks):
|
|
1235
|
+
if end - start > 5:
|
|
1236
|
+
self.log.info(f"peer took {end - start:.1f} s to respond to request_blocks")
|
|
1237
|
+
# this isn't a great peer, reduce its priority
|
|
1238
|
+
# to prefer any peers that had to wait for it.
|
|
1239
|
+
# By setting the next allowed timestamp to now,
|
|
1240
|
+
# means that any other peer that has waited for
|
|
1241
|
+
# this will have its next allowed timestamp in
|
|
1242
|
+
# the passed, and be prefered multiple times
|
|
1243
|
+
# over this peer.
|
|
1244
|
+
new_peers_with_peak[idx] = (
|
|
1245
|
+
new_peers_with_peak[idx][0],
|
|
1246
|
+
end,
|
|
1247
|
+
)
|
|
1248
|
+
start = time.monotonic()
|
|
1249
|
+
await output_queue.put((peer, response.blocks))
|
|
1250
|
+
end = time.monotonic()
|
|
1251
|
+
if end - start > 1:
|
|
1252
|
+
self.log.info(
|
|
1253
|
+
f"sync pipeline back-pressure. stalled {end - start:0.2f} "
|
|
1254
|
+
"seconds on prevalidate block"
|
|
1255
|
+
)
|
|
1256
|
+
fetched = True
|
|
1257
|
+
break
|
|
1258
|
+
if fetched is False:
|
|
1259
|
+
self.log.error(f"failed fetching {start_height} to {end_height} from peers")
|
|
1260
|
+
return
|
|
1261
|
+
if self.sync_store.peers_changed.is_set():
|
|
1262
|
+
existing_peers = {id(c): timestamp for c, timestamp in new_peers_with_peak}
|
|
1263
|
+
peers = self.get_peers_with_peak(peak_hash)
|
|
1264
|
+
new_peers_with_peak = [(c, existing_peers.get(id(c), end)) for c in peers]
|
|
1265
|
+
random.shuffle(new_peers_with_peak)
|
|
1266
|
+
self.sync_store.peers_changed.clear()
|
|
1267
|
+
self.log.info(f"peers with peak: {len(new_peers_with_peak)}")
|
|
1268
|
+
except Exception as e:
|
|
1269
|
+
self.log.error(f"Exception fetching {start_height} to {end_height} from peer {e}")
|
|
1270
|
+
finally:
|
|
1271
|
+
# finished signal with None
|
|
1272
|
+
await output_queue.put(None)
|
|
1273
|
+
|
|
1274
|
+
async def validate_blocks(
|
|
1275
|
+
input_queue: asyncio.Queue[Optional[tuple[WSChiaConnection, list[FullBlock]]]],
|
|
1276
|
+
output_queue: asyncio.Queue[
|
|
1277
|
+
Optional[
|
|
1278
|
+
tuple[WSChiaConnection, ValidationState, list[Awaitable[PreValidationResult]], list[FullBlock]]
|
|
1279
|
+
]
|
|
1280
|
+
],
|
|
1281
|
+
) -> None:
|
|
1282
|
+
nonlocal blockchain
|
|
1283
|
+
nonlocal fork_info
|
|
1284
|
+
first_batch = True
|
|
1285
|
+
|
|
1286
|
+
vs = ValidationState(ssi, diff, prev_ses_block)
|
|
1287
|
+
|
|
1288
|
+
try:
|
|
1289
|
+
while True:
|
|
1290
|
+
res: Optional[tuple[WSChiaConnection, list[FullBlock]]] = await input_queue.get()
|
|
1291
|
+
if res is None:
|
|
1292
|
+
self.log.debug("done fetching blocks")
|
|
1293
|
+
return None
|
|
1294
|
+
peer, blocks = res
|
|
1295
|
+
|
|
1296
|
+
# skip_blocks is only relevant at the start of the sync,
|
|
1297
|
+
# to skip blocks we already have in the database (and have
|
|
1298
|
+
# been validated). Once we start validating blocks, we
|
|
1299
|
+
# shouldn't be skipping any.
|
|
1300
|
+
blocks_to_validate = await self.skip_blocks(blockchain, blocks, fork_info, vs)
|
|
1301
|
+
assert first_batch or len(blocks_to_validate) == len(blocks)
|
|
1302
|
+
next_validation_state = copy.copy(vs)
|
|
1303
|
+
|
|
1304
|
+
if len(blocks_to_validate) == 0:
|
|
1305
|
+
continue
|
|
1306
|
+
|
|
1307
|
+
first_batch = False
|
|
1308
|
+
|
|
1309
|
+
futures: list[Awaitable[PreValidationResult]] = []
|
|
1310
|
+
for block in blocks_to_validate:
|
|
1311
|
+
futures.extend(
|
|
1312
|
+
await self.prevalidate_blocks(
|
|
1313
|
+
blockchain,
|
|
1314
|
+
[block],
|
|
1315
|
+
vs,
|
|
1316
|
+
summaries,
|
|
1317
|
+
)
|
|
1318
|
+
)
|
|
1319
|
+
start = time.monotonic()
|
|
1320
|
+
await output_queue.put((peer, next_validation_state, list(futures), blocks_to_validate))
|
|
1321
|
+
end = time.monotonic()
|
|
1322
|
+
if end - start > 1:
|
|
1323
|
+
self.log.info(f"sync pipeline back-pressure. stalled {end - start:0.2f} seconds on add_block()")
|
|
1324
|
+
except Exception:
|
|
1325
|
+
self.log.exception("Exception validating")
|
|
1326
|
+
finally:
|
|
1327
|
+
# finished signal with None
|
|
1328
|
+
await output_queue.put(None)
|
|
1329
|
+
|
|
1330
|
+
async def ingest_blocks(
|
|
1331
|
+
input_queue: asyncio.Queue[
|
|
1332
|
+
Optional[
|
|
1333
|
+
tuple[WSChiaConnection, ValidationState, list[Awaitable[PreValidationResult]], list[FullBlock]]
|
|
1334
|
+
]
|
|
1335
|
+
],
|
|
1336
|
+
) -> None:
|
|
1337
|
+
nonlocal fork_info
|
|
1338
|
+
block_rate = 0
|
|
1339
|
+
block_rate_time = time.monotonic()
|
|
1340
|
+
block_rate_height = -1
|
|
1341
|
+
while True:
|
|
1342
|
+
res = await input_queue.get()
|
|
1343
|
+
if res is None:
|
|
1344
|
+
self.log.debug("done validating blocks")
|
|
1345
|
+
return None
|
|
1346
|
+
peer, vs, futures, blocks = res
|
|
1347
|
+
start_height = blocks[0].height
|
|
1348
|
+
end_height = blocks[-1].height
|
|
1349
|
+
|
|
1350
|
+
if block_rate_height == -1:
|
|
1351
|
+
block_rate_height = start_height
|
|
1352
|
+
|
|
1353
|
+
pre_validation_results = list(await asyncio.gather(*futures))
|
|
1354
|
+
# The ValidationState object (vs) is an in-out parameter. the add_block_batch()
|
|
1355
|
+
# call will update it
|
|
1356
|
+
state_change_summary, err = await self.add_prevalidated_blocks(
|
|
1357
|
+
blockchain,
|
|
1358
|
+
blocks,
|
|
1359
|
+
pre_validation_results,
|
|
1360
|
+
fork_info,
|
|
1361
|
+
peer.peer_info,
|
|
1362
|
+
vs,
|
|
1363
|
+
)
|
|
1364
|
+
if err is not None:
|
|
1365
|
+
await peer.close(600)
|
|
1366
|
+
raise ValueError(f"Failed to validate block batch {start_height} to {end_height}: {err}")
|
|
1367
|
+
if end_height - block_rate_height > 100:
|
|
1368
|
+
now = time.monotonic()
|
|
1369
|
+
block_rate = int((end_height - block_rate_height) // (now - block_rate_time))
|
|
1370
|
+
block_rate_time = now
|
|
1371
|
+
block_rate_height = end_height
|
|
1372
|
+
|
|
1373
|
+
self.log.info(
|
|
1374
|
+
f"Added blocks {start_height} to {end_height} ({block_rate} blocks/s) (from: {peer.peer_info.ip})"
|
|
1375
|
+
)
|
|
1376
|
+
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
1377
|
+
if state_change_summary is not None:
|
|
1378
|
+
assert peak is not None
|
|
1379
|
+
# Hints must be added to the DB. The other post-processing tasks are not required when syncing
|
|
1380
|
+
hints_to_add, _ = get_hints_and_subscription_coin_ids(
|
|
1381
|
+
state_change_summary,
|
|
1382
|
+
self.subscriptions.has_coin_subscription,
|
|
1383
|
+
self.subscriptions.has_puzzle_subscription,
|
|
1384
|
+
)
|
|
1385
|
+
await self.hint_store.add_hints(hints_to_add)
|
|
1386
|
+
# Note that end_height is not necessarily the peak at this
|
|
1387
|
+
# point. In case of a re-org, it may even be significantly
|
|
1388
|
+
# higher than _peak_height, and still not be the peak.
|
|
1389
|
+
# clean_block_record() will not necessarily honor this cut-off
|
|
1390
|
+
# height, in that case.
|
|
1391
|
+
self.blockchain.clean_block_record(end_height - self.constants.BLOCKS_CACHE_SIZE)
|
|
1392
|
+
|
|
1393
|
+
block_queue: asyncio.Queue[Optional[tuple[WSChiaConnection, list[FullBlock]]]] = asyncio.Queue(maxsize=10)
|
|
1394
|
+
validation_queue: asyncio.Queue[
|
|
1395
|
+
Optional[tuple[WSChiaConnection, ValidationState, list[Awaitable[PreValidationResult]], list[FullBlock]]]
|
|
1396
|
+
] = asyncio.Queue(maxsize=10)
|
|
1397
|
+
|
|
1398
|
+
fetch_task = create_referenced_task(fetch_blocks(block_queue))
|
|
1399
|
+
validate_task = create_referenced_task(validate_blocks(block_queue, validation_queue))
|
|
1400
|
+
ingest_task = create_referenced_task(ingest_blocks(validation_queue))
|
|
1401
|
+
try:
|
|
1402
|
+
await asyncio.gather(fetch_task, validate_task, ingest_task)
|
|
1403
|
+
except Exception:
|
|
1404
|
+
self.log.exception("sync from fork point failed")
|
|
1405
|
+
finally:
|
|
1406
|
+
cancel_task_safe(validate_task, self.log)
|
|
1407
|
+
cancel_task_safe(fetch_task)
|
|
1408
|
+
cancel_task_safe(ingest_task)
|
|
1409
|
+
|
|
1410
|
+
# we still need to await all the pending futures of the
|
|
1411
|
+
# prevalidation steps posted to the thread pool
|
|
1412
|
+
while not validation_queue.empty():
|
|
1413
|
+
result = validation_queue.get_nowait()
|
|
1414
|
+
if result is None:
|
|
1415
|
+
continue
|
|
1416
|
+
|
|
1417
|
+
_, _, futures, _ = result
|
|
1418
|
+
await asyncio.gather(*futures)
|
|
1419
|
+
|
|
1420
|
+
def get_peers_with_peak(self, peak_hash: bytes32) -> list[WSChiaConnection]:
|
|
1421
|
+
peer_ids: set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash])
|
|
1422
|
+
if len(peer_ids) == 0:
|
|
1423
|
+
self.log.warning(f"Not syncing, no peers with header_hash {peak_hash} ")
|
|
1424
|
+
return []
|
|
1425
|
+
return [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
|
|
1426
|
+
|
|
1427
|
+
async def _wallets_sync_task_handler(self) -> None:
|
|
1428
|
+
while not self._shut_down:
|
|
1429
|
+
try:
|
|
1430
|
+
wallet_update = await self.wallet_sync_queue.get()
|
|
1431
|
+
await self.update_wallets(wallet_update)
|
|
1432
|
+
except Exception:
|
|
1433
|
+
self.log.exception("Wallet sync task failure")
|
|
1434
|
+
continue
|
|
1435
|
+
|
|
1436
|
+
async def update_wallets(self, wallet_update: WalletUpdate) -> None:
|
|
1437
|
+
self.log.debug(
|
|
1438
|
+
f"update_wallets - fork_height: {wallet_update.fork_height}, peak_height: {wallet_update.peak.height}"
|
|
1439
|
+
)
|
|
1440
|
+
changes_for_peer: dict[bytes32, set[CoinState]] = {}
|
|
1441
|
+
for coin_record in wallet_update.coin_records:
|
|
1442
|
+
coin_id = coin_record.name
|
|
1443
|
+
subscribed_peers = self.subscriptions.peers_for_coin_id(coin_id)
|
|
1444
|
+
subscribed_peers.update(self.subscriptions.peers_for_puzzle_hash(coin_record.coin.puzzle_hash))
|
|
1445
|
+
hint = wallet_update.hints.get(coin_id)
|
|
1446
|
+
if hint is not None:
|
|
1447
|
+
subscribed_peers.update(self.subscriptions.peers_for_puzzle_hash(hint))
|
|
1448
|
+
for peer in subscribed_peers:
|
|
1449
|
+
changes_for_peer.setdefault(peer, set()).add(coin_record.coin_state)
|
|
1450
|
+
|
|
1451
|
+
for peer, changes in changes_for_peer.items():
|
|
1452
|
+
connection = self.server.all_connections.get(peer)
|
|
1453
|
+
if connection is not None:
|
|
1454
|
+
state = CoinStateUpdate(
|
|
1455
|
+
wallet_update.peak.height,
|
|
1456
|
+
wallet_update.fork_height,
|
|
1457
|
+
wallet_update.peak.header_hash,
|
|
1458
|
+
list(changes),
|
|
1459
|
+
)
|
|
1460
|
+
await connection.send_message(make_msg(ProtocolMessageTypes.coin_state_update, state))
|
|
1461
|
+
|
|
1462
|
+
# Tell wallets about the new peak
|
|
1463
|
+
new_peak_message = make_msg(
|
|
1464
|
+
ProtocolMessageTypes.new_peak_wallet,
|
|
1465
|
+
wallet_protocol.NewPeakWallet(
|
|
1466
|
+
wallet_update.peak.header_hash,
|
|
1467
|
+
wallet_update.peak.height,
|
|
1468
|
+
wallet_update.peak.weight,
|
|
1469
|
+
wallet_update.fork_height,
|
|
1470
|
+
),
|
|
1471
|
+
)
|
|
1472
|
+
await self.server.send_to_all([new_peak_message], NodeType.WALLET)
|
|
1473
|
+
|
|
1474
|
+
async def add_block_batch(
|
|
1475
|
+
self,
|
|
1476
|
+
all_blocks: list[FullBlock],
|
|
1477
|
+
peer_info: PeerInfo,
|
|
1478
|
+
fork_info: ForkInfo,
|
|
1479
|
+
vs: ValidationState, # in-out parameter
|
|
1480
|
+
wp_summaries: Optional[list[SubEpochSummary]] = None,
|
|
1481
|
+
) -> tuple[bool, Optional[StateChangeSummary]]:
|
|
1482
|
+
# Precondition: All blocks must be contiguous blocks, index i+1 must be the parent of index i
|
|
1483
|
+
# Returns a bool for success, as well as a StateChangeSummary if the peak was advanced
|
|
1484
|
+
|
|
1485
|
+
pre_validate_start = time.monotonic()
|
|
1486
|
+
blockchain = AugmentedBlockchain(self.blockchain)
|
|
1487
|
+
blocks_to_validate = await self.skip_blocks(blockchain, all_blocks, fork_info, vs)
|
|
1488
|
+
|
|
1489
|
+
if len(blocks_to_validate) == 0:
|
|
1490
|
+
return True, None
|
|
1491
|
+
|
|
1492
|
+
futures = await self.prevalidate_blocks(
|
|
1493
|
+
blockchain,
|
|
1494
|
+
blocks_to_validate,
|
|
1495
|
+
copy.copy(vs),
|
|
1496
|
+
wp_summaries,
|
|
1497
|
+
)
|
|
1498
|
+
pre_validation_results = list(await asyncio.gather(*futures))
|
|
1499
|
+
|
|
1500
|
+
agg_state_change_summary, err = await self.add_prevalidated_blocks(
|
|
1501
|
+
blockchain,
|
|
1502
|
+
blocks_to_validate,
|
|
1503
|
+
pre_validation_results,
|
|
1504
|
+
fork_info,
|
|
1505
|
+
peer_info,
|
|
1506
|
+
vs,
|
|
1507
|
+
)
|
|
1508
|
+
|
|
1509
|
+
if agg_state_change_summary is not None:
|
|
1510
|
+
self._state_changed("new_peak")
|
|
1511
|
+
self.log.debug(
|
|
1512
|
+
f"Total time for {len(blocks_to_validate)} blocks: {time.monotonic() - pre_validate_start}, "
|
|
1513
|
+
f"advanced: True"
|
|
1514
|
+
)
|
|
1515
|
+
return err is None, agg_state_change_summary
|
|
1516
|
+
|
|
1517
|
+
async def skip_blocks(
|
|
1518
|
+
self,
|
|
1519
|
+
blockchain: AugmentedBlockchain,
|
|
1520
|
+
all_blocks: list[FullBlock],
|
|
1521
|
+
fork_info: ForkInfo,
|
|
1522
|
+
vs: ValidationState, # in-out parameter
|
|
1523
|
+
) -> list[FullBlock]:
|
|
1524
|
+
blocks_to_validate: list[FullBlock] = []
|
|
1525
|
+
for i, block in enumerate(all_blocks):
|
|
1526
|
+
header_hash = block.header_hash
|
|
1527
|
+
block_rec = await blockchain.get_block_record_from_db(header_hash)
|
|
1528
|
+
if block_rec is None:
|
|
1529
|
+
blocks_to_validate = all_blocks[i:]
|
|
1530
|
+
break
|
|
1531
|
+
else:
|
|
1532
|
+
blockchain.add_block_record(block_rec)
|
|
1533
|
+
if block_rec.sub_epoch_summary_included:
|
|
1534
|
+
# already validated block, update sub slot iters, difficulty and prev sub epoch summary
|
|
1535
|
+
vs.prev_ses_block = block_rec
|
|
1536
|
+
if block_rec.sub_epoch_summary_included.new_sub_slot_iters is not None:
|
|
1537
|
+
vs.ssi = block_rec.sub_epoch_summary_included.new_sub_slot_iters
|
|
1538
|
+
if block_rec.sub_epoch_summary_included.new_difficulty is not None:
|
|
1539
|
+
vs.difficulty = block_rec.sub_epoch_summary_included.new_difficulty
|
|
1540
|
+
|
|
1541
|
+
# the below section updates the fork_info object, if
|
|
1542
|
+
# there is one.
|
|
1543
|
+
if block.height <= fork_info.peak_height:
|
|
1544
|
+
continue
|
|
1545
|
+
# we have already validated this block once, no need to do it again.
|
|
1546
|
+
# however, if this block is not part of the main chain, we need to
|
|
1547
|
+
# update the fork context with its additions and removals
|
|
1548
|
+
if blockchain.height_to_hash(block.height) == header_hash:
|
|
1549
|
+
# we're on the main chain, just fast-forward the fork height
|
|
1550
|
+
fork_info.reset(block.height, header_hash)
|
|
1551
|
+
else:
|
|
1552
|
+
# We have already validated the block, but if it's not part of the
|
|
1553
|
+
# main chain, we still need to re-run it to update the additions and
|
|
1554
|
+
# removals in fork_info.
|
|
1555
|
+
await self.blockchain.advance_fork_info(block, fork_info)
|
|
1556
|
+
await self.blockchain.run_single_block(block, fork_info)
|
|
1557
|
+
return blocks_to_validate
|
|
1558
|
+
|
|
1559
|
+
async def prevalidate_blocks(
|
|
1560
|
+
self,
|
|
1561
|
+
blockchain: AugmentedBlockchain,
|
|
1562
|
+
blocks_to_validate: list[FullBlock],
|
|
1563
|
+
vs: ValidationState,
|
|
1564
|
+
wp_summaries: Optional[list[SubEpochSummary]] = None,
|
|
1565
|
+
) -> Sequence[Awaitable[PreValidationResult]]:
|
|
1566
|
+
"""
|
|
1567
|
+
This is a thin wrapper over pre_validate_block().
|
|
1568
|
+
|
|
1569
|
+
Args:
|
|
1570
|
+
blockchain:
|
|
1571
|
+
blocks_to_validate:
|
|
1572
|
+
vs: The ValidationState for the first block in the batch. This is an in-out
|
|
1573
|
+
parameter. It will be updated to be the validation state for the next
|
|
1574
|
+
batch of blocks.
|
|
1575
|
+
wp_summaries:
|
|
1576
|
+
"""
|
|
1577
|
+
# Validates signatures in multiprocessing since they take a while, and we don't have cached transactions
|
|
1578
|
+
# for these blocks (unlike during normal operation where we validate one at a time)
|
|
1579
|
+
# We have to copy the ValidationState object to preserve it for the add_block()
|
|
1580
|
+
# call below. pre_validate_block() will update the
|
|
1581
|
+
# object we pass in.
|
|
1582
|
+
ret: list[Awaitable[PreValidationResult]] = []
|
|
1583
|
+
for block in blocks_to_validate:
|
|
1584
|
+
ret.append(
|
|
1585
|
+
await pre_validate_block(
|
|
1586
|
+
self.constants,
|
|
1587
|
+
blockchain,
|
|
1588
|
+
block,
|
|
1589
|
+
self.blockchain.pool,
|
|
1590
|
+
None,
|
|
1591
|
+
vs,
|
|
1592
|
+
wp_summaries=wp_summaries,
|
|
1593
|
+
)
|
|
1594
|
+
)
|
|
1595
|
+
return ret
|
|
1596
|
+
|
|
1597
|
+
async def add_prevalidated_blocks(
|
|
1598
|
+
self,
|
|
1599
|
+
blockchain: AugmentedBlockchain,
|
|
1600
|
+
blocks_to_validate: list[FullBlock],
|
|
1601
|
+
pre_validation_results: list[PreValidationResult],
|
|
1602
|
+
fork_info: ForkInfo,
|
|
1603
|
+
peer_info: PeerInfo,
|
|
1604
|
+
vs: ValidationState, # in-out parameter
|
|
1605
|
+
) -> tuple[Optional[StateChangeSummary], Optional[Err]]:
|
|
1606
|
+
agg_state_change_summary: Optional[StateChangeSummary] = None
|
|
1607
|
+
block_record = await self.blockchain.get_block_record_from_db(blocks_to_validate[0].prev_header_hash)
|
|
1608
|
+
for i, block in enumerate(blocks_to_validate):
|
|
1609
|
+
header_hash = block.header_hash
|
|
1610
|
+
assert vs.prev_ses_block is None or vs.prev_ses_block.height < block.height
|
|
1611
|
+
assert pre_validation_results[i].required_iters is not None
|
|
1612
|
+
state_change_summary: Optional[StateChangeSummary]
|
|
1613
|
+
# when adding blocks in batches, we won't have any overlapping
|
|
1614
|
+
# signatures with the mempool. There won't be any cache hits, so
|
|
1615
|
+
# there's no need to pass the BLS cache in
|
|
1616
|
+
|
|
1617
|
+
if len(block.finished_sub_slots) > 0:
|
|
1618
|
+
cc_sub_slot = block.finished_sub_slots[0].challenge_chain
|
|
1619
|
+
if cc_sub_slot.new_sub_slot_iters is not None or cc_sub_slot.new_difficulty is not None:
|
|
1620
|
+
expected_sub_slot_iters, expected_difficulty = get_next_sub_slot_iters_and_difficulty(
|
|
1621
|
+
self.constants, True, block_record, blockchain
|
|
1622
|
+
)
|
|
1623
|
+
assert cc_sub_slot.new_sub_slot_iters is not None
|
|
1624
|
+
vs.ssi = cc_sub_slot.new_sub_slot_iters
|
|
1625
|
+
assert cc_sub_slot.new_difficulty is not None
|
|
1626
|
+
vs.difficulty = cc_sub_slot.new_difficulty
|
|
1627
|
+
assert expected_sub_slot_iters == vs.ssi
|
|
1628
|
+
assert expected_difficulty == vs.difficulty
|
|
1629
|
+
block_rec = blockchain.block_record(block.header_hash)
|
|
1630
|
+
result, error, state_change_summary = await self.blockchain.add_block(
|
|
1631
|
+
block,
|
|
1632
|
+
pre_validation_results[i],
|
|
1633
|
+
vs.ssi,
|
|
1634
|
+
fork_info,
|
|
1635
|
+
prev_ses_block=vs.prev_ses_block,
|
|
1636
|
+
block_record=block_rec,
|
|
1637
|
+
)
|
|
1638
|
+
if error is None:
|
|
1639
|
+
blockchain.remove_extra_block(header_hash)
|
|
1640
|
+
|
|
1641
|
+
if result == AddBlockResult.NEW_PEAK:
|
|
1642
|
+
# since this block just added a new peak, we've don't need any
|
|
1643
|
+
# fork history from fork_info anymore
|
|
1644
|
+
fork_info.reset(block.height, header_hash)
|
|
1645
|
+
assert state_change_summary is not None
|
|
1646
|
+
# Since all blocks are contiguous, we can simply append the rollback changes and npc results
|
|
1647
|
+
if agg_state_change_summary is None:
|
|
1648
|
+
agg_state_change_summary = state_change_summary
|
|
1649
|
+
else:
|
|
1650
|
+
# Keeps the old, original fork_height, since the next blocks will have fork height h-1
|
|
1651
|
+
# Groups up all state changes into one
|
|
1652
|
+
agg_state_change_summary = StateChangeSummary(
|
|
1653
|
+
state_change_summary.peak,
|
|
1654
|
+
agg_state_change_summary.fork_height,
|
|
1655
|
+
agg_state_change_summary.rolled_back_records + state_change_summary.rolled_back_records,
|
|
1656
|
+
agg_state_change_summary.removals + state_change_summary.removals,
|
|
1657
|
+
agg_state_change_summary.additions + state_change_summary.additions,
|
|
1658
|
+
agg_state_change_summary.new_rewards + state_change_summary.new_rewards,
|
|
1659
|
+
)
|
|
1660
|
+
elif result in {AddBlockResult.INVALID_BLOCK, AddBlockResult.DISCONNECTED_BLOCK}:
|
|
1661
|
+
if error is not None:
|
|
1662
|
+
self.log.error(f"Error: {error}, Invalid block from peer: {peer_info} ")
|
|
1663
|
+
return agg_state_change_summary, error
|
|
1664
|
+
block_record = blockchain.block_record(header_hash)
|
|
1665
|
+
assert block_record is not None
|
|
1666
|
+
if block_record.sub_epoch_summary_included is not None:
|
|
1667
|
+
vs.prev_ses_block = block_record
|
|
1668
|
+
if self.weight_proof_handler is not None:
|
|
1669
|
+
await self.weight_proof_handler.create_prev_sub_epoch_segments()
|
|
1670
|
+
if agg_state_change_summary is not None:
|
|
1671
|
+
self._state_changed("new_peak")
|
|
1672
|
+
return agg_state_change_summary, None
|
|
1673
|
+
|
|
1674
|
+
async def get_sub_slot_iters_difficulty_ses_block(
|
|
1675
|
+
self, block: FullBlock, ssi: Optional[uint64], diff: Optional[uint64]
|
|
1676
|
+
) -> tuple[uint64, uint64, Optional[BlockRecord]]:
|
|
1677
|
+
prev_ses_block = None
|
|
1678
|
+
if ssi is None or diff is None:
|
|
1679
|
+
if block.height == 0:
|
|
1680
|
+
ssi = self.constants.SUB_SLOT_ITERS_STARTING
|
|
1681
|
+
diff = self.constants.DIFFICULTY_STARTING
|
|
1682
|
+
if ssi is None or diff is None:
|
|
1683
|
+
if len(block.finished_sub_slots) > 0:
|
|
1684
|
+
if block.finished_sub_slots[0].challenge_chain.new_difficulty is not None:
|
|
1685
|
+
diff = block.finished_sub_slots[0].challenge_chain.new_difficulty
|
|
1686
|
+
if block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters is not None:
|
|
1687
|
+
ssi = block.finished_sub_slots[0].challenge_chain.new_sub_slot_iters
|
|
1688
|
+
|
|
1689
|
+
if block.height > 0:
|
|
1690
|
+
prev_b = await self.blockchain.get_block_record_from_db(block.prev_header_hash)
|
|
1691
|
+
curr = prev_b
|
|
1692
|
+
while prev_ses_block is None or ssi is None or diff is None:
|
|
1693
|
+
assert curr is not None
|
|
1694
|
+
if curr.height == 0:
|
|
1695
|
+
if ssi is None or diff is None:
|
|
1696
|
+
ssi = self.constants.SUB_SLOT_ITERS_STARTING
|
|
1697
|
+
diff = self.constants.DIFFICULTY_STARTING
|
|
1698
|
+
if prev_ses_block is None:
|
|
1699
|
+
prev_ses_block = curr
|
|
1700
|
+
if curr.sub_epoch_summary_included is not None:
|
|
1701
|
+
if prev_ses_block is None:
|
|
1702
|
+
prev_ses_block = curr
|
|
1703
|
+
if ssi is None or diff is None:
|
|
1704
|
+
if curr.sub_epoch_summary_included.new_difficulty is not None:
|
|
1705
|
+
diff = curr.sub_epoch_summary_included.new_difficulty
|
|
1706
|
+
if curr.sub_epoch_summary_included.new_sub_slot_iters is not None:
|
|
1707
|
+
ssi = curr.sub_epoch_summary_included.new_sub_slot_iters
|
|
1708
|
+
curr = await self.blockchain.get_block_record_from_db(curr.prev_hash)
|
|
1709
|
+
assert ssi is not None
|
|
1710
|
+
assert diff is not None
|
|
1711
|
+
return ssi, diff, prev_ses_block
|
|
1712
|
+
|
|
1713
|
+
async def _finish_sync(self, fork_point: Optional[uint32]) -> None:
|
|
1714
|
+
"""
|
|
1715
|
+
Finalize sync by setting sync mode to False, clearing all sync information, and adding any final
|
|
1716
|
+
blocks that we have finalized recently.
|
|
1717
|
+
"""
|
|
1718
|
+
self.log.info("long sync done")
|
|
1719
|
+
self.sync_store.set_long_sync(False)
|
|
1720
|
+
self.sync_store.set_sync_mode(False)
|
|
1721
|
+
self._state_changed("sync_mode")
|
|
1722
|
+
if self._server is None:
|
|
1723
|
+
return None
|
|
1724
|
+
|
|
1725
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
1726
|
+
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
1727
|
+
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
|
|
1728
|
+
if peak_fb is not None:
|
|
1729
|
+
if fork_point is None:
|
|
1730
|
+
fork_point = uint32(max(peak_fb.height - 1, 0))
|
|
1731
|
+
assert peak is not None
|
|
1732
|
+
state_change_summary = StateChangeSummary(peak, fork_point, [], [], [], [])
|
|
1733
|
+
ppp_result: PeakPostProcessingResult = await self.peak_post_processing(
|
|
1734
|
+
peak_fb, state_change_summary, None
|
|
1735
|
+
)
|
|
1736
|
+
|
|
1737
|
+
if peak_fb is not None:
|
|
1738
|
+
# Call outside of priority_mutex to encourage concurrency
|
|
1739
|
+
await self.peak_post_processing_2(peak_fb, None, state_change_summary, ppp_result)
|
|
1740
|
+
|
|
1741
|
+
if peak is not None and self.weight_proof_handler is not None:
|
|
1742
|
+
await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)
|
|
1743
|
+
self._state_changed("block")
|
|
1744
|
+
|
|
1745
|
+
def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]) -> bool:
|
|
1746
|
+
if (
|
|
1747
|
+
block.foliage.foliage_block_data.pool_target
|
|
1748
|
+
== PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0))
|
|
1749
|
+
and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE
|
|
1750
|
+
and block.reward_chain_block.proof_of_space.pool_public_key is not None
|
|
1751
|
+
):
|
|
1752
|
+
assert block.foliage.foliage_block_data.pool_signature is not None
|
|
1753
|
+
if not AugSchemeMPL.verify(
|
|
1754
|
+
block.reward_chain_block.proof_of_space.pool_public_key,
|
|
1755
|
+
bytes(block.foliage.foliage_block_data.pool_target),
|
|
1756
|
+
block.foliage.foliage_block_data.pool_signature,
|
|
1757
|
+
):
|
|
1758
|
+
return False
|
|
1759
|
+
return True
|
|
1760
|
+
|
|
1761
|
+
async def signage_point_post_processing(
|
|
1762
|
+
self,
|
|
1763
|
+
request: full_node_protocol.RespondSignagePoint,
|
|
1764
|
+
peer: WSChiaConnection,
|
|
1765
|
+
ip_sub_slot: Optional[EndOfSubSlotBundle],
|
|
1766
|
+
) -> None:
|
|
1767
|
+
self.log.info(
|
|
1768
|
+
f"⏲️ Finished signage point {request.index_from_challenge}/"
|
|
1769
|
+
f"{self.constants.NUM_SPS_SUB_SLOT}: "
|
|
1770
|
+
f"CC: {request.challenge_chain_vdf.output.get_hash().hex()} "
|
|
1771
|
+
f"RC: {request.reward_chain_vdf.output.get_hash().hex()} "
|
|
1772
|
+
)
|
|
1773
|
+
self.signage_point_times[request.index_from_challenge] = time.time()
|
|
1774
|
+
sub_slot_tuple = self.full_node_store.get_sub_slot(request.challenge_chain_vdf.challenge)
|
|
1775
|
+
prev_challenge: Optional[bytes32]
|
|
1776
|
+
if sub_slot_tuple is not None:
|
|
1777
|
+
prev_challenge = sub_slot_tuple[0].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
|
|
1778
|
+
else:
|
|
1779
|
+
prev_challenge = None
|
|
1780
|
+
|
|
1781
|
+
# Notify nodes of the new signage point
|
|
1782
|
+
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
|
|
1783
|
+
prev_challenge,
|
|
1784
|
+
request.challenge_chain_vdf.challenge,
|
|
1785
|
+
request.index_from_challenge,
|
|
1786
|
+
request.reward_chain_vdf.challenge,
|
|
1787
|
+
)
|
|
1788
|
+
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
|
|
1789
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE, peer.peer_node_id)
|
|
1790
|
+
|
|
1791
|
+
peak = self.blockchain.get_peak()
|
|
1792
|
+
if peak is not None and peak.height > self.constants.MAX_SUB_SLOT_BLOCKS:
|
|
1793
|
+
sub_slot_iters = peak.sub_slot_iters
|
|
1794
|
+
difficulty = uint64(peak.weight - self.blockchain.block_record(peak.prev_hash).weight)
|
|
1795
|
+
# Makes sure to potentially update the difficulty if we are past the peak (into a new sub-slot)
|
|
1796
|
+
assert ip_sub_slot is not None
|
|
1797
|
+
if request.challenge_chain_vdf.challenge != ip_sub_slot.challenge_chain.get_hash():
|
|
1798
|
+
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
|
|
1799
|
+
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
|
|
1800
|
+
difficulty = next_difficulty
|
|
1801
|
+
sub_slot_iters = next_sub_slot_iters
|
|
1802
|
+
else:
|
|
1803
|
+
difficulty = self.constants.DIFFICULTY_STARTING
|
|
1804
|
+
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
|
|
1805
|
+
|
|
1806
|
+
# Notify farmers of the new signage point
|
|
1807
|
+
broadcast_farmer = farmer_protocol.NewSignagePoint(
|
|
1808
|
+
request.challenge_chain_vdf.challenge,
|
|
1809
|
+
request.challenge_chain_vdf.output.get_hash(),
|
|
1810
|
+
request.reward_chain_vdf.output.get_hash(),
|
|
1811
|
+
difficulty,
|
|
1812
|
+
sub_slot_iters,
|
|
1813
|
+
request.index_from_challenge,
|
|
1814
|
+
uint32(0) if peak is None else peak.height,
|
|
1815
|
+
sp_source_data=SignagePointSourceData(
|
|
1816
|
+
vdf_data=SPVDFSourceData(request.challenge_chain_vdf.output, request.reward_chain_vdf.output)
|
|
1817
|
+
),
|
|
1818
|
+
)
|
|
1819
|
+
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
|
|
1820
|
+
await self.server.send_to_all([msg], NodeType.FARMER)
|
|
1821
|
+
|
|
1822
|
+
self._state_changed("signage_point", {"broadcast_farmer": broadcast_farmer})
|
|
1823
|
+
|
|
1824
|
+
async def peak_post_processing(
|
|
1825
|
+
self,
|
|
1826
|
+
block: FullBlock,
|
|
1827
|
+
state_change_summary: StateChangeSummary,
|
|
1828
|
+
peer: Optional[WSChiaConnection],
|
|
1829
|
+
) -> PeakPostProcessingResult:
|
|
1830
|
+
"""
|
|
1831
|
+
Must be called under self.blockchain.priority_mutex. This updates the internal state of the full node with the
|
|
1832
|
+
latest peak information. It also notifies peers about the new peak.
|
|
1833
|
+
"""
|
|
1834
|
+
|
|
1835
|
+
record = state_change_summary.peak
|
|
1836
|
+
difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)
|
|
1837
|
+
sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)
|
|
1838
|
+
|
|
1839
|
+
self.log.info(
|
|
1840
|
+
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
|
|
1841
|
+
f"hh {record.header_hash.hex()}, "
|
|
1842
|
+
f"ph {record.prev_hash.hex()}, "
|
|
1843
|
+
f"forked at {state_change_summary.fork_height}, rh: {record.reward_infusion_new_challenge.hex()}, "
|
|
1844
|
+
f"total iters: {record.total_iters}, "
|
|
1845
|
+
f"overflow: {record.overflow}, "
|
|
1846
|
+
f"deficit: {record.deficit}, "
|
|
1847
|
+
f"difficulty: {difficulty}, "
|
|
1848
|
+
f"sub slot iters: {sub_slot_iters}, "
|
|
1849
|
+
f"Generator size: "
|
|
1850
|
+
f"{len(bytes(block.transactions_generator)) if block.transactions_generator else 'No tx'}, "
|
|
1851
|
+
f"Generator ref list size: "
|
|
1852
|
+
f"{len(block.transactions_generator_ref_list) if block.transactions_generator else 'No tx'}"
|
|
1853
|
+
)
|
|
1854
|
+
|
|
1855
|
+
hints_to_add, lookup_coin_ids = get_hints_and_subscription_coin_ids(
|
|
1856
|
+
state_change_summary,
|
|
1857
|
+
self.subscriptions.has_coin_subscription,
|
|
1858
|
+
self.subscriptions.has_puzzle_subscription,
|
|
1859
|
+
)
|
|
1860
|
+
await self.hint_store.add_hints(hints_to_add)
|
|
1861
|
+
|
|
1862
|
+
sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)
|
|
1863
|
+
assert sub_slots is not None
|
|
1864
|
+
|
|
1865
|
+
if not self.sync_store.get_sync_mode():
|
|
1866
|
+
self.blockchain.clean_block_records()
|
|
1867
|
+
|
|
1868
|
+
fork_block: Optional[BlockRecord] = None
|
|
1869
|
+
if state_change_summary.fork_height != block.height - 1 and block.height != 0:
|
|
1870
|
+
# This is a reorg
|
|
1871
|
+
fork_hash: Optional[bytes32] = self.blockchain.height_to_hash(state_change_summary.fork_height)
|
|
1872
|
+
assert fork_hash is not None
|
|
1873
|
+
fork_block = await self.blockchain.get_block_record_from_db(fork_hash)
|
|
1874
|
+
|
|
1875
|
+
fns_peak_result: FullNodeStorePeakResult = self.full_node_store.new_peak(
|
|
1876
|
+
record,
|
|
1877
|
+
block,
|
|
1878
|
+
sub_slots[0],
|
|
1879
|
+
sub_slots[1],
|
|
1880
|
+
fork_block,
|
|
1881
|
+
self.blockchain,
|
|
1882
|
+
sub_slot_iters,
|
|
1883
|
+
difficulty,
|
|
1884
|
+
)
|
|
1885
|
+
|
|
1886
|
+
if fns_peak_result.new_signage_points is not None and peer is not None:
|
|
1887
|
+
for index, sp in fns_peak_result.new_signage_points:
|
|
1888
|
+
assert (
|
|
1889
|
+
sp.cc_vdf is not None
|
|
1890
|
+
and sp.cc_proof is not None
|
|
1891
|
+
and sp.rc_vdf is not None
|
|
1892
|
+
and sp.rc_proof is not None
|
|
1893
|
+
)
|
|
1894
|
+
await self.signage_point_post_processing(
|
|
1895
|
+
RespondSignagePoint(index, sp.cc_vdf, sp.cc_proof, sp.rc_vdf, sp.rc_proof), peer, sub_slots[1]
|
|
1896
|
+
)
|
|
1897
|
+
|
|
1898
|
+
if sub_slots[1] is None:
|
|
1899
|
+
assert record.ip_sub_slot_total_iters(self.constants) == 0
|
|
1900
|
+
# Ensure the signage point is also in the store, for consistency
|
|
1901
|
+
self.full_node_store.new_signage_point(
|
|
1902
|
+
record.signage_point_index,
|
|
1903
|
+
self.blockchain,
|
|
1904
|
+
record,
|
|
1905
|
+
record.sub_slot_iters,
|
|
1906
|
+
SignagePoint(
|
|
1907
|
+
block.reward_chain_block.challenge_chain_sp_vdf,
|
|
1908
|
+
block.challenge_chain_sp_proof,
|
|
1909
|
+
block.reward_chain_block.reward_chain_sp_vdf,
|
|
1910
|
+
block.reward_chain_sp_proof,
|
|
1911
|
+
),
|
|
1912
|
+
skip_vdf_validation=True,
|
|
1913
|
+
)
|
|
1914
|
+
|
|
1915
|
+
# Update the mempool (returns successful pending transactions added to the mempool)
|
|
1916
|
+
spent_coins: list[bytes32] = [coin_id for coin_id, _ in state_change_summary.removals]
|
|
1917
|
+
mempool_new_peak_result = await self.mempool_manager.new_peak(self.blockchain.get_tx_peak(), spent_coins)
|
|
1918
|
+
|
|
1919
|
+
return PeakPostProcessingResult(
|
|
1920
|
+
mempool_new_peak_result.items,
|
|
1921
|
+
mempool_new_peak_result.removals,
|
|
1922
|
+
fns_peak_result,
|
|
1923
|
+
hints_to_add,
|
|
1924
|
+
lookup_coin_ids,
|
|
1925
|
+
)
|
|
1926
|
+
|
|
1927
|
+
async def peak_post_processing_2(
|
|
1928
|
+
self,
|
|
1929
|
+
block: FullBlock,
|
|
1930
|
+
peer: Optional[WSChiaConnection],
|
|
1931
|
+
state_change_summary: StateChangeSummary,
|
|
1932
|
+
ppp_result: PeakPostProcessingResult,
|
|
1933
|
+
) -> None:
|
|
1934
|
+
"""
|
|
1935
|
+
Does NOT need to be called under the blockchain lock. Handle other parts of post processing like communicating
|
|
1936
|
+
with peers
|
|
1937
|
+
"""
|
|
1938
|
+
record = state_change_summary.peak
|
|
1939
|
+
for new_peak_item in ppp_result.mempool_peak_result:
|
|
1940
|
+
self.log.debug(f"Added transaction to mempool: {new_peak_item.transaction_id}")
|
|
1941
|
+
mempool_item = self.mempool_manager.get_mempool_item(new_peak_item.transaction_id)
|
|
1942
|
+
assert mempool_item is not None
|
|
1943
|
+
await self.broadcast_added_tx(mempool_item)
|
|
1944
|
+
|
|
1945
|
+
# If there were pending end of slots that happen after this peak, broadcast them if they are added
|
|
1946
|
+
if ppp_result.fns_peak_result.added_eos is not None:
|
|
1947
|
+
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
|
|
1948
|
+
ppp_result.fns_peak_result.added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
|
|
1949
|
+
ppp_result.fns_peak_result.added_eos.challenge_chain.get_hash(),
|
|
1950
|
+
uint8(0),
|
|
1951
|
+
ppp_result.fns_peak_result.added_eos.reward_chain.end_of_slot_vdf.challenge,
|
|
1952
|
+
)
|
|
1953
|
+
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
|
|
1954
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE)
|
|
1955
|
+
|
|
1956
|
+
# TODO: maybe add and broadcast new IPs as well
|
|
1957
|
+
|
|
1958
|
+
if record.height % 1000 == 0:
|
|
1959
|
+
# Occasionally clear data in full node store to keep memory usage small
|
|
1960
|
+
self.full_node_store.clear_old_cache_entries()
|
|
1961
|
+
|
|
1962
|
+
if self.sync_store.get_sync_mode() is False:
|
|
1963
|
+
await self.send_peak_to_timelords(block)
|
|
1964
|
+
await self.broadcast_removed_tx(ppp_result.mempool_removals)
|
|
1965
|
+
|
|
1966
|
+
# Tell full nodes about the new peak
|
|
1967
|
+
msg = make_msg(
|
|
1968
|
+
ProtocolMessageTypes.new_peak,
|
|
1969
|
+
full_node_protocol.NewPeak(
|
|
1970
|
+
record.header_hash,
|
|
1971
|
+
record.height,
|
|
1972
|
+
record.weight,
|
|
1973
|
+
state_change_summary.fork_height,
|
|
1974
|
+
block.reward_chain_block.get_unfinished().get_hash(),
|
|
1975
|
+
),
|
|
1976
|
+
)
|
|
1977
|
+
if peer is not None:
|
|
1978
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE, peer.peer_node_id)
|
|
1979
|
+
else:
|
|
1980
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE)
|
|
1981
|
+
|
|
1982
|
+
coin_hints: dict[bytes32, bytes32] = {
|
|
1983
|
+
coin_id: bytes32(hint) for coin_id, hint in ppp_result.hints if len(hint) == 32
|
|
1984
|
+
}
|
|
1985
|
+
|
|
1986
|
+
peak = Peak(
|
|
1987
|
+
state_change_summary.peak.header_hash, state_change_summary.peak.height, state_change_summary.peak.weight
|
|
1988
|
+
)
|
|
1989
|
+
|
|
1990
|
+
# Looks up coin records in DB for the coins that wallets are interested in
|
|
1991
|
+
new_states = await self.coin_store.get_coin_records(ppp_result.lookup_coin_ids)
|
|
1992
|
+
|
|
1993
|
+
await self.wallet_sync_queue.put(
|
|
1994
|
+
WalletUpdate(
|
|
1995
|
+
state_change_summary.fork_height,
|
|
1996
|
+
peak,
|
|
1997
|
+
state_change_summary.rolled_back_records + new_states,
|
|
1998
|
+
coin_hints,
|
|
1999
|
+
)
|
|
2000
|
+
)
|
|
2001
|
+
|
|
2002
|
+
self._state_changed("new_peak")
|
|
2003
|
+
|
|
2004
|
+
async def add_block(
|
|
2005
|
+
self,
|
|
2006
|
+
block: FullBlock,
|
|
2007
|
+
peer: Optional[WSChiaConnection] = None,
|
|
2008
|
+
bls_cache: Optional[BLSCache] = None,
|
|
2009
|
+
raise_on_disconnected: bool = False,
|
|
2010
|
+
fork_info: Optional[ForkInfo] = None,
|
|
2011
|
+
) -> Optional[Message]:
|
|
2012
|
+
"""
|
|
2013
|
+
Add a full block from a peer full node (or ourselves).
|
|
2014
|
+
"""
|
|
2015
|
+
if self.sync_store.get_sync_mode():
|
|
2016
|
+
return None
|
|
2017
|
+
|
|
2018
|
+
# Adds the block to seen, and check if it's seen before (which means header is in memory)
|
|
2019
|
+
header_hash = block.header_hash
|
|
2020
|
+
if self.blockchain.contains_block(header_hash):
|
|
2021
|
+
if fork_info is not None:
|
|
2022
|
+
await self.blockchain.run_single_block(block, fork_info)
|
|
2023
|
+
return None
|
|
2024
|
+
|
|
2025
|
+
pre_validation_result: Optional[PreValidationResult] = None
|
|
2026
|
+
if (
|
|
2027
|
+
block.is_transaction_block()
|
|
2028
|
+
and block.transactions_info is not None
|
|
2029
|
+
and block.transactions_info.generator_root != bytes([0] * 32)
|
|
2030
|
+
and block.transactions_generator is None
|
|
2031
|
+
):
|
|
2032
|
+
# This is the case where we already had the unfinished block, and asked for this block without
|
|
2033
|
+
# the transactions (since we already had them). Therefore, here we add the transactions.
|
|
2034
|
+
unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()
|
|
2035
|
+
foliage_hash: Optional[bytes32] = block.foliage.foliage_transaction_block_hash
|
|
2036
|
+
assert foliage_hash is not None
|
|
2037
|
+
unf_entry: Optional[UnfinishedBlockEntry] = self.full_node_store.get_unfinished_block_result(
|
|
2038
|
+
unfinished_rh, foliage_hash
|
|
2039
|
+
)
|
|
2040
|
+
assert unf_entry is None or unf_entry.result is None or unf_entry.result.validated_signature is True
|
|
2041
|
+
if (
|
|
2042
|
+
unf_entry is not None
|
|
2043
|
+
and unf_entry.unfinished_block is not None
|
|
2044
|
+
and unf_entry.unfinished_block.transactions_generator is not None
|
|
2045
|
+
and unf_entry.unfinished_block.foliage_transaction_block == block.foliage_transaction_block
|
|
2046
|
+
):
|
|
2047
|
+
# We checked that the transaction block is the same, therefore all transactions and the signature
|
|
2048
|
+
# must be identical in the unfinished and finished blocks. We can therefore use the cache.
|
|
2049
|
+
|
|
2050
|
+
# this is a transaction block, the foliage hash should be set
|
|
2051
|
+
assert foliage_hash is not None
|
|
2052
|
+
pre_validation_result = unf_entry.result
|
|
2053
|
+
assert pre_validation_result is not None
|
|
2054
|
+
block = block.replace(
|
|
2055
|
+
transactions_generator=unf_entry.unfinished_block.transactions_generator,
|
|
2056
|
+
transactions_generator_ref_list=unf_entry.unfinished_block.transactions_generator_ref_list,
|
|
2057
|
+
)
|
|
2058
|
+
else:
|
|
2059
|
+
# We still do not have the correct information for this block, perhaps there is a duplicate block
|
|
2060
|
+
# with the same unfinished block hash in the cache, so we need to fetch the correct one
|
|
2061
|
+
if peer is None:
|
|
2062
|
+
return None
|
|
2063
|
+
|
|
2064
|
+
block_response: Optional[Any] = await peer.call_api(
|
|
2065
|
+
FullNodeAPI.request_block, full_node_protocol.RequestBlock(block.height, True)
|
|
2066
|
+
)
|
|
2067
|
+
if block_response is None or not isinstance(block_response, full_node_protocol.RespondBlock):
|
|
2068
|
+
self.log.warning(
|
|
2069
|
+
f"Was not able to fetch the correct block for height {block.height} {block_response}"
|
|
2070
|
+
)
|
|
2071
|
+
return None
|
|
2072
|
+
new_block: FullBlock = block_response.block
|
|
2073
|
+
if new_block.foliage_transaction_block != block.foliage_transaction_block:
|
|
2074
|
+
self.log.warning(
|
|
2075
|
+
f"Received the wrong block for height {block.height} {new_block.header_hash.hex()}"
|
|
2076
|
+
)
|
|
2077
|
+
return None
|
|
2078
|
+
assert new_block.transactions_generator is not None
|
|
2079
|
+
|
|
2080
|
+
self.log.debug(
|
|
2081
|
+
f"Wrong info in the cache for bh {new_block.header_hash.hex()}, "
|
|
2082
|
+
f"there might be multiple blocks from the "
|
|
2083
|
+
f"same farmer with the same pospace."
|
|
2084
|
+
)
|
|
2085
|
+
# This recursion ends here, we cannot recurse again because transactions_generator is not None
|
|
2086
|
+
return await self.add_block(new_block, peer, bls_cache)
|
|
2087
|
+
state_change_summary: Optional[StateChangeSummary] = None
|
|
2088
|
+
ppp_result: Optional[PeakPostProcessingResult] = None
|
|
2089
|
+
async with (
|
|
2090
|
+
self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high),
|
|
2091
|
+
enable_profiler(self.profile_block_validation) as pr,
|
|
2092
|
+
):
|
|
2093
|
+
# After acquiring the lock, check again, because another asyncio thread might have added it
|
|
2094
|
+
if self.blockchain.contains_block(header_hash):
|
|
2095
|
+
if fork_info is not None:
|
|
2096
|
+
await self.blockchain.run_single_block(block, fork_info)
|
|
2097
|
+
return None
|
|
2098
|
+
validation_start = time.monotonic()
|
|
2099
|
+
# Tries to add the block to the blockchain, if we already validated transactions, don't do it again
|
|
2100
|
+
conds = None
|
|
2101
|
+
if pre_validation_result is not None and pre_validation_result.conds is not None:
|
|
2102
|
+
conds = pre_validation_result.conds
|
|
2103
|
+
|
|
2104
|
+
# Don't validate signatures because we want to validate them in the main thread later, since we have a
|
|
2105
|
+
# cache available
|
|
2106
|
+
prev_b = None
|
|
2107
|
+
prev_ses_block = None
|
|
2108
|
+
if block.height > 0:
|
|
2109
|
+
prev_b = await self.blockchain.get_block_record_from_db(block.prev_header_hash)
|
|
2110
|
+
assert prev_b is not None
|
|
2111
|
+
curr = prev_b
|
|
2112
|
+
while curr.height > 0 and curr.sub_epoch_summary_included is None:
|
|
2113
|
+
curr = self.blockchain.block_record(curr.prev_hash)
|
|
2114
|
+
prev_ses_block = curr
|
|
2115
|
+
new_slot = len(block.finished_sub_slots) > 0
|
|
2116
|
+
ssi, diff = get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, prev_b, self.blockchain)
|
|
2117
|
+
future = await pre_validate_block(
|
|
2118
|
+
self.blockchain.constants,
|
|
2119
|
+
AugmentedBlockchain(self.blockchain),
|
|
2120
|
+
block,
|
|
2121
|
+
self.blockchain.pool,
|
|
2122
|
+
conds,
|
|
2123
|
+
ValidationState(ssi, diff, prev_ses_block),
|
|
2124
|
+
)
|
|
2125
|
+
pre_validation_result = await future
|
|
2126
|
+
added: Optional[AddBlockResult] = None
|
|
2127
|
+
pre_validation_time = time.monotonic() - validation_start
|
|
2128
|
+
try:
|
|
2129
|
+
if pre_validation_result.error is not None:
|
|
2130
|
+
if Err(pre_validation_result.error) == Err.INVALID_PREV_BLOCK_HASH:
|
|
2131
|
+
added = AddBlockResult.DISCONNECTED_BLOCK
|
|
2132
|
+
error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH
|
|
2133
|
+
elif Err(pre_validation_result.error) == Err.TIMESTAMP_TOO_FAR_IN_FUTURE:
|
|
2134
|
+
raise TimestampError()
|
|
2135
|
+
else:
|
|
2136
|
+
raise ValueError(
|
|
2137
|
+
f"Failed to validate block {header_hash} height "
|
|
2138
|
+
f"{block.height}: {Err(pre_validation_result.error).name}"
|
|
2139
|
+
)
|
|
2140
|
+
else:
|
|
2141
|
+
if fork_info is None:
|
|
2142
|
+
fork_info = ForkInfo(block.height - 1, block.height - 1, block.prev_header_hash)
|
|
2143
|
+
(added, error_code, state_change_summary) = await self.blockchain.add_block(
|
|
2144
|
+
block, pre_validation_result, ssi, fork_info
|
|
2145
|
+
)
|
|
2146
|
+
if added == AddBlockResult.ALREADY_HAVE_BLOCK:
|
|
2147
|
+
return None
|
|
2148
|
+
elif added == AddBlockResult.INVALID_BLOCK:
|
|
2149
|
+
assert error_code is not None
|
|
2150
|
+
self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.")
|
|
2151
|
+
raise ConsensusError(error_code, [header_hash])
|
|
2152
|
+
elif added == AddBlockResult.DISCONNECTED_BLOCK:
|
|
2153
|
+
self.log.info(f"Disconnected block {header_hash} at height {block.height}")
|
|
2154
|
+
if raise_on_disconnected:
|
|
2155
|
+
raise RuntimeError("Expected block to be added, received disconnected block.")
|
|
2156
|
+
return None
|
|
2157
|
+
elif added == AddBlockResult.NEW_PEAK:
|
|
2158
|
+
# Evict any related BLS cache entries as we no longer need them
|
|
2159
|
+
if bls_cache is not None and pre_validation_result.conds is not None:
|
|
2160
|
+
pairs_pks, pairs_msgs = pkm_pairs(
|
|
2161
|
+
pre_validation_result.conds, self.constants.AGG_SIG_ME_ADDITIONAL_DATA
|
|
2162
|
+
)
|
|
2163
|
+
bls_cache.evict(pairs_pks, pairs_msgs)
|
|
2164
|
+
# Only propagate blocks which extend the blockchain (becomes one of the heads)
|
|
2165
|
+
assert state_change_summary is not None
|
|
2166
|
+
post_process_time = time.monotonic()
|
|
2167
|
+
ppp_result = await self.peak_post_processing(block, state_change_summary, peer)
|
|
2168
|
+
post_process_time = time.monotonic() - post_process_time
|
|
2169
|
+
|
|
2170
|
+
elif added == AddBlockResult.ADDED_AS_ORPHAN:
|
|
2171
|
+
self.log.info(
|
|
2172
|
+
f"Received orphan block of height {block.height} rh {block.reward_chain_block.get_hash()}"
|
|
2173
|
+
)
|
|
2174
|
+
post_process_time = 0
|
|
2175
|
+
else:
|
|
2176
|
+
# Should never reach here, all the cases are covered
|
|
2177
|
+
raise RuntimeError(f"Invalid result from add_block {added}")
|
|
2178
|
+
except asyncio.CancelledError:
|
|
2179
|
+
# We need to make sure to always call this method even when we get a cancel exception, to make sure
|
|
2180
|
+
# the node stays in sync
|
|
2181
|
+
if added == AddBlockResult.NEW_PEAK:
|
|
2182
|
+
assert state_change_summary is not None
|
|
2183
|
+
await self.peak_post_processing(block, state_change_summary, peer)
|
|
2184
|
+
raise
|
|
2185
|
+
|
|
2186
|
+
validation_time = time.monotonic() - validation_start
|
|
2187
|
+
|
|
2188
|
+
if ppp_result is not None:
|
|
2189
|
+
assert state_change_summary is not None
|
|
2190
|
+
await self.peak_post_processing_2(block, peer, state_change_summary, ppp_result)
|
|
2191
|
+
|
|
2192
|
+
percent_full_str = (
|
|
2193
|
+
(
|
|
2194
|
+
", percent full: "
|
|
2195
|
+
+ str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3))
|
|
2196
|
+
+ "%"
|
|
2197
|
+
)
|
|
2198
|
+
if block.transactions_info is not None
|
|
2199
|
+
else ""
|
|
2200
|
+
)
|
|
2201
|
+
self.log.log(
|
|
2202
|
+
logging.WARNING if validation_time > 2 else logging.DEBUG,
|
|
2203
|
+
f"Block validation: {validation_time:0.2f}s, "
|
|
2204
|
+
f"pre_validation: {pre_validation_time:0.2f}s, "
|
|
2205
|
+
f"CLVM: {pre_validation_result.timing / 1000.0:0.2f}s, "
|
|
2206
|
+
f"post-process: {post_process_time:0.2f}s, "
|
|
2207
|
+
f"cost: {block.transactions_info.cost if block.transactions_info is not None else 'None'}"
|
|
2208
|
+
f"{percent_full_str} header_hash: {header_hash.hex()} height: {block.height}",
|
|
2209
|
+
)
|
|
2210
|
+
|
|
2211
|
+
# this is not covered by any unit tests as it's essentially test code
|
|
2212
|
+
# itself. It's exercised manually when investigating performance issues
|
|
2213
|
+
if validation_time > 2 and pr is not None: # pragma: no cover
|
|
2214
|
+
pr.create_stats()
|
|
2215
|
+
profile_dir = path_from_root(self.root_path, "block-validation-profile")
|
|
2216
|
+
pr.dump_stats(profile_dir / f"{block.height}-{validation_time:0.1f}.profile")
|
|
2217
|
+
|
|
2218
|
+
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
|
|
2219
|
+
peak = self.blockchain.get_peak()
|
|
2220
|
+
assert peak is not None
|
|
2221
|
+
|
|
2222
|
+
# Removes all temporary data for old blocks
|
|
2223
|
+
clear_height = uint32(max(0, peak.height - 50))
|
|
2224
|
+
self.full_node_store.clear_candidate_blocks_below(clear_height)
|
|
2225
|
+
self.full_node_store.clear_unfinished_blocks_below(clear_height)
|
|
2226
|
+
|
|
2227
|
+
state_changed_data: dict[str, Any] = {
|
|
2228
|
+
"transaction_block": False,
|
|
2229
|
+
"k_size": block.reward_chain_block.proof_of_space.size,
|
|
2230
|
+
"header_hash": block.header_hash,
|
|
2231
|
+
"fork_height": None,
|
|
2232
|
+
"rolled_back_records": None,
|
|
2233
|
+
"height": block.height,
|
|
2234
|
+
"validation_time": validation_time,
|
|
2235
|
+
"pre_validation_time": pre_validation_time,
|
|
2236
|
+
}
|
|
2237
|
+
|
|
2238
|
+
if state_change_summary is not None:
|
|
2239
|
+
state_changed_data["fork_height"] = state_change_summary.fork_height
|
|
2240
|
+
state_changed_data["rolled_back_records"] = len(state_change_summary.rolled_back_records)
|
|
2241
|
+
|
|
2242
|
+
if block.transactions_info is not None:
|
|
2243
|
+
state_changed_data["transaction_block"] = True
|
|
2244
|
+
state_changed_data["block_cost"] = block.transactions_info.cost
|
|
2245
|
+
state_changed_data["block_fees"] = block.transactions_info.fees
|
|
2246
|
+
|
|
2247
|
+
if block.foliage_transaction_block is not None:
|
|
2248
|
+
state_changed_data["timestamp"] = block.foliage_transaction_block.timestamp
|
|
2249
|
+
|
|
2250
|
+
if block.transactions_generator is not None:
|
|
2251
|
+
state_changed_data["transaction_generator_size_bytes"] = len(bytes(block.transactions_generator))
|
|
2252
|
+
|
|
2253
|
+
state_changed_data["transaction_generator_ref_list"] = block.transactions_generator_ref_list
|
|
2254
|
+
if added is not None:
|
|
2255
|
+
state_changed_data["receive_block_result"] = added.value
|
|
2256
|
+
|
|
2257
|
+
self._state_changed("block", state_changed_data)
|
|
2258
|
+
|
|
2259
|
+
record = self.blockchain.block_record(block.header_hash)
|
|
2260
|
+
if self.weight_proof_handler is not None and record.sub_epoch_summary_included is not None:
|
|
2261
|
+
self._segment_task_list.append(
|
|
2262
|
+
create_referenced_task(self.weight_proof_handler.create_prev_sub_epoch_segments())
|
|
2263
|
+
)
|
|
2264
|
+
for task in self._segment_task_list[:]:
|
|
2265
|
+
if task.done():
|
|
2266
|
+
self._segment_task_list.remove(task)
|
|
2267
|
+
return None
|
|
2268
|
+
|
|
2269
|
+
async def add_unfinished_block(
|
|
2270
|
+
self,
|
|
2271
|
+
block: UnfinishedBlock,
|
|
2272
|
+
peer: Optional[WSChiaConnection],
|
|
2273
|
+
farmed_block: bool = False,
|
|
2274
|
+
) -> None:
|
|
2275
|
+
"""
|
|
2276
|
+
We have received an unfinished block, either created by us, or from another peer.
|
|
2277
|
+
We can validate and add it and if it's a good block, propagate it to other peers and
|
|
2278
|
+
timelords.
|
|
2279
|
+
"""
|
|
2280
|
+
receive_time = time.time()
|
|
2281
|
+
|
|
2282
|
+
if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(
|
|
2283
|
+
block.prev_header_hash
|
|
2284
|
+
):
|
|
2285
|
+
# No need to request the parent, since the peer will send it to us anyway, via NewPeak
|
|
2286
|
+
self.log.debug("Received a disconnected unfinished block")
|
|
2287
|
+
return None
|
|
2288
|
+
|
|
2289
|
+
# Adds the unfinished block to seen, and check if it's seen before, to prevent
|
|
2290
|
+
# processing it twice. This searches for the exact version of the unfinished block (there can be many different
|
|
2291
|
+
# foliages for the same trunk). This is intentional, to prevent DOS attacks.
|
|
2292
|
+
# Note that it does not require that this block was successfully processed
|
|
2293
|
+
if self.full_node_store.seen_unfinished_block(block.get_hash()):
|
|
2294
|
+
return None
|
|
2295
|
+
|
|
2296
|
+
block_hash = bytes32(block.reward_chain_block.get_hash())
|
|
2297
|
+
foliage_tx_hash = block.foliage.foliage_transaction_block_hash
|
|
2298
|
+
|
|
2299
|
+
# If we have already added the block with this reward block hash and
|
|
2300
|
+
# foliage hash, return
|
|
2301
|
+
if self.full_node_store.get_unfinished_block2(block_hash, foliage_tx_hash)[0] is not None:
|
|
2302
|
+
return None
|
|
2303
|
+
|
|
2304
|
+
peak: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
2305
|
+
if peak is not None:
|
|
2306
|
+
if block.total_iters < peak.sp_total_iters(self.constants):
|
|
2307
|
+
# This means this unfinished block is pretty far behind, it will not add weight to our chain
|
|
2308
|
+
return None
|
|
2309
|
+
|
|
2310
|
+
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
|
|
2311
|
+
prev_b = None
|
|
2312
|
+
else:
|
|
2313
|
+
prev_b = self.blockchain.block_record(block.prev_header_hash)
|
|
2314
|
+
|
|
2315
|
+
# Count the blocks in sub slot, and check if it's a new epoch
|
|
2316
|
+
if len(block.finished_sub_slots) > 0:
|
|
2317
|
+
num_blocks_in_ss = 1 # Curr
|
|
2318
|
+
else:
|
|
2319
|
+
curr = self.blockchain.try_block_record(block.prev_header_hash)
|
|
2320
|
+
num_blocks_in_ss = 2 # Curr and prev
|
|
2321
|
+
while (curr is not None) and not curr.first_in_sub_slot:
|
|
2322
|
+
curr = self.blockchain.try_block_record(curr.prev_hash)
|
|
2323
|
+
num_blocks_in_ss += 1
|
|
2324
|
+
|
|
2325
|
+
if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:
|
|
2326
|
+
# TODO: potentially allow overflow blocks here, which count for the next slot
|
|
2327
|
+
self.log.warning("Too many blocks added, not adding block")
|
|
2328
|
+
return None
|
|
2329
|
+
|
|
2330
|
+
# The clvm generator and aggregate signature are validated outside of the lock, to allow other blocks and
|
|
2331
|
+
# transactions to get validated
|
|
2332
|
+
npc_result: Optional[NPCResult] = None
|
|
2333
|
+
pre_validation_time = None
|
|
2334
|
+
|
|
2335
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
2336
|
+
start_header_time = time.monotonic()
|
|
2337
|
+
_, header_error = await self.blockchain.validate_unfinished_block_header(block)
|
|
2338
|
+
if header_error is not None:
|
|
2339
|
+
if header_error == Err.TIMESTAMP_TOO_FAR_IN_FUTURE:
|
|
2340
|
+
raise TimestampError()
|
|
2341
|
+
else:
|
|
2342
|
+
raise ConsensusError(header_error)
|
|
2343
|
+
validate_time = time.monotonic() - start_header_time
|
|
2344
|
+
self.log.log(
|
|
2345
|
+
logging.WARNING if validate_time > 2 else logging.DEBUG,
|
|
2346
|
+
f"Time for header validate: {validate_time:0.3f}s",
|
|
2347
|
+
)
|
|
2348
|
+
|
|
2349
|
+
if block.transactions_generator is not None:
|
|
2350
|
+
pre_validation_start = time.monotonic()
|
|
2351
|
+
assert block.transactions_info is not None
|
|
2352
|
+
if len(block.transactions_generator_ref_list) > 0:
|
|
2353
|
+
generator_refs = set(block.transactions_generator_ref_list)
|
|
2354
|
+
generators: dict[uint32, bytes] = await self.blockchain.lookup_block_generators(
|
|
2355
|
+
block.prev_header_hash, generator_refs
|
|
2356
|
+
)
|
|
2357
|
+
generator_args = [generators[height] for height in block.transactions_generator_ref_list]
|
|
2358
|
+
else:
|
|
2359
|
+
generator_args = []
|
|
2360
|
+
|
|
2361
|
+
height = uint32(0) if prev_b is None else uint32(prev_b.height + 1)
|
|
2362
|
+
flags = get_flags_for_height_and_constants(height, self.constants)
|
|
2363
|
+
|
|
2364
|
+
# on mainnet we won't receive unfinished blocks for heights
|
|
2365
|
+
# below the hard fork activation, but we have tests where we do
|
|
2366
|
+
if height >= self.constants.HARD_FORK_HEIGHT:
|
|
2367
|
+
run_block = run_block_generator2
|
|
2368
|
+
else:
|
|
2369
|
+
run_block = run_block_generator
|
|
2370
|
+
|
|
2371
|
+
# run_block() also validates the signature
|
|
2372
|
+
err, conditions = await asyncio.get_running_loop().run_in_executor(
|
|
2373
|
+
self.blockchain.pool,
|
|
2374
|
+
run_block,
|
|
2375
|
+
bytes(block.transactions_generator),
|
|
2376
|
+
generator_args,
|
|
2377
|
+
min(self.constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
|
|
2378
|
+
flags,
|
|
2379
|
+
block.transactions_info.aggregated_signature,
|
|
2380
|
+
self._bls_cache,
|
|
2381
|
+
self.constants,
|
|
2382
|
+
)
|
|
2383
|
+
|
|
2384
|
+
if err is not None:
|
|
2385
|
+
raise ConsensusError(Err(err))
|
|
2386
|
+
assert conditions is not None
|
|
2387
|
+
assert conditions.validated_signature
|
|
2388
|
+
npc_result = NPCResult(None, conditions)
|
|
2389
|
+
pre_validation_time = time.monotonic() - pre_validation_start
|
|
2390
|
+
|
|
2391
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.high):
|
|
2392
|
+
# TODO: pre-validate VDFs outside of lock
|
|
2393
|
+
validation_start = time.monotonic()
|
|
2394
|
+
validate_result = await self.blockchain.validate_unfinished_block(block, npc_result)
|
|
2395
|
+
if validate_result.error is not None:
|
|
2396
|
+
raise ConsensusError(Err(validate_result.error))
|
|
2397
|
+
validation_time = time.monotonic() - validation_start
|
|
2398
|
+
|
|
2399
|
+
assert validate_result.required_iters is not None
|
|
2400
|
+
|
|
2401
|
+
# Perform another check, in case we have already concurrently added the same unfinished block
|
|
2402
|
+
if self.full_node_store.get_unfinished_block2(block_hash, foliage_tx_hash)[0] is not None:
|
|
2403
|
+
return None
|
|
2404
|
+
|
|
2405
|
+
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
|
|
2406
|
+
height = uint32(0)
|
|
2407
|
+
else:
|
|
2408
|
+
height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)
|
|
2409
|
+
|
|
2410
|
+
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
|
|
2411
|
+
self.constants,
|
|
2412
|
+
self.blockchain,
|
|
2413
|
+
validate_result.required_iters,
|
|
2414
|
+
block,
|
|
2415
|
+
True,
|
|
2416
|
+
)
|
|
2417
|
+
|
|
2418
|
+
self.full_node_store.add_unfinished_block(height, block, validate_result)
|
|
2419
|
+
pre_validation_log = (
|
|
2420
|
+
f"pre_validation time {pre_validation_time:0.4f}, " if pre_validation_time is not None else ""
|
|
2421
|
+
)
|
|
2422
|
+
block_duration_in_seconds = (
|
|
2423
|
+
receive_time - self.signage_point_times[block.reward_chain_block.signage_point_index]
|
|
2424
|
+
)
|
|
2425
|
+
if farmed_block is True:
|
|
2426
|
+
self.log.info(
|
|
2427
|
+
f"🍀 ️Farmed unfinished_block {block_hash}, SP: {block.reward_chain_block.signage_point_index}, "
|
|
2428
|
+
f"validation time: {validation_time:0.4f} seconds, {pre_validation_log}"
|
|
2429
|
+
f"cost: {block.transactions_info.cost if block.transactions_info else 'None'} "
|
|
2430
|
+
)
|
|
2431
|
+
else:
|
|
2432
|
+
percent_full_str = (
|
|
2433
|
+
(
|
|
2434
|
+
", percent full: "
|
|
2435
|
+
+ str(round(100.0 * float(block.transactions_info.cost) / self.constants.MAX_BLOCK_COST_CLVM, 3))
|
|
2436
|
+
+ "%"
|
|
2437
|
+
)
|
|
2438
|
+
if block.transactions_info is not None
|
|
2439
|
+
else ""
|
|
2440
|
+
)
|
|
2441
|
+
self.log.info(
|
|
2442
|
+
f"Added unfinished_block {block_hash}, not farmed by us,"
|
|
2443
|
+
f" SP: {block.reward_chain_block.signage_point_index} farmer response time: "
|
|
2444
|
+
f"{block_duration_in_seconds:0.4f}, "
|
|
2445
|
+
f"Pool pk {encode_puzzle_hash(block.foliage.foliage_block_data.pool_target.puzzle_hash, 'xch')}, "
|
|
2446
|
+
f"validation time: {validation_time:0.4f} seconds, {pre_validation_log}"
|
|
2447
|
+
f"cost: {block.transactions_info.cost if block.transactions_info else 'None'}"
|
|
2448
|
+
f"{percent_full_str}"
|
|
2449
|
+
)
|
|
2450
|
+
|
|
2451
|
+
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
|
|
2452
|
+
self.constants,
|
|
2453
|
+
len(block.finished_sub_slots) > 0,
|
|
2454
|
+
prev_b,
|
|
2455
|
+
self.blockchain,
|
|
2456
|
+
)
|
|
2457
|
+
|
|
2458
|
+
if block.reward_chain_block.signage_point_index == 0:
|
|
2459
|
+
res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)
|
|
2460
|
+
if res is None:
|
|
2461
|
+
if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
|
|
2462
|
+
rc_prev = self.constants.GENESIS_CHALLENGE
|
|
2463
|
+
else:
|
|
2464
|
+
self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}")
|
|
2465
|
+
return None
|
|
2466
|
+
else:
|
|
2467
|
+
rc_prev = res[0].reward_chain.get_hash()
|
|
2468
|
+
else:
|
|
2469
|
+
assert block.reward_chain_block.reward_chain_sp_vdf is not None
|
|
2470
|
+
rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge
|
|
2471
|
+
|
|
2472
|
+
timelord_request = timelord_protocol.NewUnfinishedBlockTimelord(
|
|
2473
|
+
block.reward_chain_block,
|
|
2474
|
+
difficulty,
|
|
2475
|
+
sub_slot_iters,
|
|
2476
|
+
block.foliage,
|
|
2477
|
+
ses,
|
|
2478
|
+
rc_prev,
|
|
2479
|
+
)
|
|
2480
|
+
|
|
2481
|
+
timelord_msg = make_msg(ProtocolMessageTypes.new_unfinished_block_timelord, timelord_request)
|
|
2482
|
+
await self.server.send_to_all([timelord_msg], NodeType.TIMELORD)
|
|
2483
|
+
|
|
2484
|
+
# create two versions of the NewUnfinishedBlock message, one to be sent
|
|
2485
|
+
# to newer clients and one for older clients
|
|
2486
|
+
full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())
|
|
2487
|
+
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)
|
|
2488
|
+
|
|
2489
|
+
full_node_request2 = full_node_protocol.NewUnfinishedBlock2(
|
|
2490
|
+
block.reward_chain_block.get_hash(), block.foliage.foliage_transaction_block_hash
|
|
2491
|
+
)
|
|
2492
|
+
msg2 = make_msg(ProtocolMessageTypes.new_unfinished_block2, full_node_request2)
|
|
2493
|
+
|
|
2494
|
+
def old_clients(conn: WSChiaConnection) -> bool:
|
|
2495
|
+
# don't send this to peers with new clients
|
|
2496
|
+
return conn.protocol_version <= Version("0.0.35")
|
|
2497
|
+
|
|
2498
|
+
def new_clients(conn: WSChiaConnection) -> bool:
|
|
2499
|
+
# don't send this to peers with old clients
|
|
2500
|
+
return conn.protocol_version > Version("0.0.35")
|
|
2501
|
+
|
|
2502
|
+
peer_id: Optional[bytes32] = None if peer is None else peer.peer_node_id
|
|
2503
|
+
await self.server.send_to_all_if([msg], NodeType.FULL_NODE, old_clients, peer_id)
|
|
2504
|
+
await self.server.send_to_all_if([msg2], NodeType.FULL_NODE, new_clients, peer_id)
|
|
2505
|
+
|
|
2506
|
+
self._state_changed(
|
|
2507
|
+
"unfinished_block",
|
|
2508
|
+
{
|
|
2509
|
+
"block_duration_in_seconds": block_duration_in_seconds,
|
|
2510
|
+
"validation_time_in_seconds": validation_time,
|
|
2511
|
+
"pre_validation_time_in_seconds": pre_validation_time,
|
|
2512
|
+
"unfinished_block": block.to_json_dict(),
|
|
2513
|
+
},
|
|
2514
|
+
)
|
|
2515
|
+
|
|
2516
|
+
async def new_infusion_point_vdf(
|
|
2517
|
+
self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[WSChiaConnection] = None
|
|
2518
|
+
) -> Optional[Message]:
|
|
2519
|
+
# Lookup unfinished blocks
|
|
2520
|
+
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(
|
|
2521
|
+
request.unfinished_reward_hash
|
|
2522
|
+
)
|
|
2523
|
+
|
|
2524
|
+
if unfinished_block is None:
|
|
2525
|
+
self.log.warning(
|
|
2526
|
+
f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish."
|
|
2527
|
+
)
|
|
2528
|
+
return None
|
|
2529
|
+
|
|
2530
|
+
prev_b: Optional[BlockRecord] = None
|
|
2531
|
+
|
|
2532
|
+
target_rc_hash = request.reward_chain_ip_vdf.challenge
|
|
2533
|
+
last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge
|
|
2534
|
+
|
|
2535
|
+
# Backtracks through end of slot objects, should work for multiple empty sub slots
|
|
2536
|
+
for eos, _, _ in reversed(self.full_node_store.finished_sub_slots):
|
|
2537
|
+
if eos is not None and eos.reward_chain.get_hash() == target_rc_hash:
|
|
2538
|
+
target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge
|
|
2539
|
+
if target_rc_hash == self.constants.GENESIS_CHALLENGE:
|
|
2540
|
+
prev_b = None
|
|
2541
|
+
else:
|
|
2542
|
+
# Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block
|
|
2543
|
+
# and not an end of slot (since we just looked through the slots and backtracked)
|
|
2544
|
+
curr: Optional[BlockRecord] = self.blockchain.get_peak()
|
|
2545
|
+
|
|
2546
|
+
for _ in range(10):
|
|
2547
|
+
if curr is None:
|
|
2548
|
+
break
|
|
2549
|
+
if curr.reward_infusion_new_challenge == target_rc_hash:
|
|
2550
|
+
# Found our prev block
|
|
2551
|
+
prev_b = curr
|
|
2552
|
+
break
|
|
2553
|
+
curr = self.blockchain.try_block_record(curr.prev_hash)
|
|
2554
|
+
|
|
2555
|
+
# If not found, cache keyed on prev block
|
|
2556
|
+
if prev_b is None:
|
|
2557
|
+
self.full_node_store.add_to_future_ip(request)
|
|
2558
|
+
self.log.warning(
|
|
2559
|
+
f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge.hex()}"
|
|
2560
|
+
)
|
|
2561
|
+
return None
|
|
2562
|
+
|
|
2563
|
+
finished_sub_slots: Optional[list[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
|
|
2564
|
+
self.blockchain,
|
|
2565
|
+
prev_b,
|
|
2566
|
+
last_slot_cc_hash,
|
|
2567
|
+
)
|
|
2568
|
+
if finished_sub_slots is None:
|
|
2569
|
+
return None
|
|
2570
|
+
|
|
2571
|
+
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
|
|
2572
|
+
self.constants,
|
|
2573
|
+
len(finished_sub_slots) > 0,
|
|
2574
|
+
prev_b,
|
|
2575
|
+
self.blockchain,
|
|
2576
|
+
)
|
|
2577
|
+
|
|
2578
|
+
if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
|
|
2579
|
+
sub_slot_start_iters = uint128(0)
|
|
2580
|
+
else:
|
|
2581
|
+
ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash)
|
|
2582
|
+
if ss_res is None:
|
|
2583
|
+
self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}")
|
|
2584
|
+
return None
|
|
2585
|
+
_, _, sub_slot_start_iters = ss_res
|
|
2586
|
+
sp_total_iters = uint128(
|
|
2587
|
+
sub_slot_start_iters
|
|
2588
|
+
+ calculate_sp_iters(
|
|
2589
|
+
self.constants,
|
|
2590
|
+
sub_slot_iters,
|
|
2591
|
+
unfinished_block.reward_chain_block.signage_point_index,
|
|
2592
|
+
)
|
|
2593
|
+
)
|
|
2594
|
+
|
|
2595
|
+
block: FullBlock = unfinished_block_to_full_block(
|
|
2596
|
+
unfinished_block,
|
|
2597
|
+
request.challenge_chain_ip_vdf,
|
|
2598
|
+
request.challenge_chain_ip_proof,
|
|
2599
|
+
request.reward_chain_ip_vdf,
|
|
2600
|
+
request.reward_chain_ip_proof,
|
|
2601
|
+
request.infused_challenge_chain_ip_vdf,
|
|
2602
|
+
request.infused_challenge_chain_ip_proof,
|
|
2603
|
+
finished_sub_slots,
|
|
2604
|
+
prev_b,
|
|
2605
|
+
self.blockchain,
|
|
2606
|
+
sp_total_iters,
|
|
2607
|
+
difficulty,
|
|
2608
|
+
)
|
|
2609
|
+
if not self.has_valid_pool_sig(block):
|
|
2610
|
+
self.log.warning("Trying to make a pre-farm block but height is not 0")
|
|
2611
|
+
return None
|
|
2612
|
+
try:
|
|
2613
|
+
await self.add_block(block, None, self._bls_cache, raise_on_disconnected=True)
|
|
2614
|
+
except Exception as e:
|
|
2615
|
+
self.log.warning(f"Consensus error validating block: {e}")
|
|
2616
|
+
if timelord_peer is not None:
|
|
2617
|
+
# Only sends to the timelord who sent us this VDF, to reset them to the correct peak
|
|
2618
|
+
await self.send_peak_to_timelords(peer=timelord_peer)
|
|
2619
|
+
return None
|
|
2620
|
+
|
|
2621
|
+
async def add_end_of_sub_slot(
|
|
2622
|
+
self, end_of_slot_bundle: EndOfSubSlotBundle, peer: WSChiaConnection
|
|
2623
|
+
) -> tuple[Optional[Message], bool]:
|
|
2624
|
+
fetched_ss = self.full_node_store.get_sub_slot(end_of_slot_bundle.challenge_chain.get_hash())
|
|
2625
|
+
|
|
2626
|
+
# We are not interested in sub-slots which have the same challenge chain but different reward chain. If there
|
|
2627
|
+
# is a reorg, we will find out through the broadcast of blocks instead.
|
|
2628
|
+
if fetched_ss is not None:
|
|
2629
|
+
# Already have the sub-slot
|
|
2630
|
+
return None, True
|
|
2631
|
+
|
|
2632
|
+
async with self.timelord_lock:
|
|
2633
|
+
fetched_ss = self.full_node_store.get_sub_slot(
|
|
2634
|
+
end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
|
|
2635
|
+
)
|
|
2636
|
+
if (
|
|
2637
|
+
(fetched_ss is None)
|
|
2638
|
+
and end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
|
|
2639
|
+
!= self.constants.GENESIS_CHALLENGE
|
|
2640
|
+
):
|
|
2641
|
+
# If we don't have the prev, request the prev instead
|
|
2642
|
+
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
|
|
2643
|
+
end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
|
|
2644
|
+
uint8(0),
|
|
2645
|
+
bytes32.zeros,
|
|
2646
|
+
)
|
|
2647
|
+
return (
|
|
2648
|
+
make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request),
|
|
2649
|
+
False,
|
|
2650
|
+
)
|
|
2651
|
+
|
|
2652
|
+
peak = self.blockchain.get_peak()
|
|
2653
|
+
if peak is not None and peak.height > 2:
|
|
2654
|
+
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
|
|
2655
|
+
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
|
|
2656
|
+
else:
|
|
2657
|
+
next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
|
|
2658
|
+
next_difficulty = self.constants.DIFFICULTY_STARTING
|
|
2659
|
+
|
|
2660
|
+
# Adds the sub slot and potentially get new infusions
|
|
2661
|
+
new_infusions = self.full_node_store.new_finished_sub_slot(
|
|
2662
|
+
end_of_slot_bundle,
|
|
2663
|
+
self.blockchain,
|
|
2664
|
+
peak,
|
|
2665
|
+
next_sub_slot_iters,
|
|
2666
|
+
next_difficulty,
|
|
2667
|
+
await self.blockchain.get_full_peak(),
|
|
2668
|
+
)
|
|
2669
|
+
# It may be an empty list, even if it's not None. Not None means added successfully
|
|
2670
|
+
if new_infusions is not None:
|
|
2671
|
+
self.log.info(
|
|
2672
|
+
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
|
|
2673
|
+
f"{end_of_slot_bundle.challenge_chain.get_hash().hex()}, "
|
|
2674
|
+
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
|
|
2675
|
+
f"RC hash: {end_of_slot_bundle.reward_chain.get_hash().hex()}, "
|
|
2676
|
+
f"Deficit {end_of_slot_bundle.reward_chain.deficit}"
|
|
2677
|
+
)
|
|
2678
|
+
# Reset farmer response timer for sub slot (SP 0)
|
|
2679
|
+
self.signage_point_times[0] = time.time()
|
|
2680
|
+
# Notify full nodes of the new sub-slot
|
|
2681
|
+
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
|
|
2682
|
+
end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
|
|
2683
|
+
end_of_slot_bundle.challenge_chain.get_hash(),
|
|
2684
|
+
uint8(0),
|
|
2685
|
+
end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge,
|
|
2686
|
+
)
|
|
2687
|
+
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
|
|
2688
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE, peer.peer_node_id)
|
|
2689
|
+
|
|
2690
|
+
for infusion in new_infusions:
|
|
2691
|
+
await self.new_infusion_point_vdf(infusion)
|
|
2692
|
+
|
|
2693
|
+
# Notify farmers of the new sub-slot
|
|
2694
|
+
broadcast_farmer = farmer_protocol.NewSignagePoint(
|
|
2695
|
+
end_of_slot_bundle.challenge_chain.get_hash(),
|
|
2696
|
+
end_of_slot_bundle.challenge_chain.get_hash(),
|
|
2697
|
+
end_of_slot_bundle.reward_chain.get_hash(),
|
|
2698
|
+
next_difficulty,
|
|
2699
|
+
next_sub_slot_iters,
|
|
2700
|
+
uint8(0),
|
|
2701
|
+
uint32(0) if peak is None else peak.height,
|
|
2702
|
+
sp_source_data=SignagePointSourceData(
|
|
2703
|
+
sub_slot_data=SPSubSlotSourceData(
|
|
2704
|
+
end_of_slot_bundle.challenge_chain, end_of_slot_bundle.reward_chain
|
|
2705
|
+
)
|
|
2706
|
+
),
|
|
2707
|
+
)
|
|
2708
|
+
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
|
|
2709
|
+
await self.server.send_to_all([msg], NodeType.FARMER)
|
|
2710
|
+
return None, True
|
|
2711
|
+
else:
|
|
2712
|
+
self.log.info(
|
|
2713
|
+
f"End of slot not added CC challenge "
|
|
2714
|
+
f"{end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge.hex()}"
|
|
2715
|
+
)
|
|
2716
|
+
return None, False
|
|
2717
|
+
|
|
2718
|
+
async def add_transaction(
|
|
2719
|
+
self, transaction: SpendBundle, spend_name: bytes32, peer: Optional[WSChiaConnection] = None, test: bool = False
|
|
2720
|
+
) -> tuple[MempoolInclusionStatus, Optional[Err]]:
|
|
2721
|
+
if self.sync_store.get_sync_mode():
|
|
2722
|
+
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
|
|
2723
|
+
if not test and not (await self.synced()):
|
|
2724
|
+
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
|
|
2725
|
+
|
|
2726
|
+
if self.mempool_manager.get_spendbundle(spend_name) is not None:
|
|
2727
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2728
|
+
return MempoolInclusionStatus.SUCCESS, None
|
|
2729
|
+
if self.mempool_manager.seen(spend_name):
|
|
2730
|
+
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
|
|
2731
|
+
self.mempool_manager.add_and_maybe_pop_seen(spend_name)
|
|
2732
|
+
self.log.debug(f"Processing transaction: {spend_name}")
|
|
2733
|
+
# Ignore if syncing or if we have not yet received a block
|
|
2734
|
+
# the mempool must have a peak to validate transactions
|
|
2735
|
+
if self.sync_store.get_sync_mode() or self.mempool_manager.peak is None:
|
|
2736
|
+
status = MempoolInclusionStatus.FAILED
|
|
2737
|
+
error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING
|
|
2738
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2739
|
+
else:
|
|
2740
|
+
try:
|
|
2741
|
+
cost_result = await self.mempool_manager.pre_validate_spendbundle(
|
|
2742
|
+
transaction, spend_name, self._bls_cache
|
|
2743
|
+
)
|
|
2744
|
+
except ValidationError as e:
|
|
2745
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2746
|
+
return MempoolInclusionStatus.FAILED, e.code
|
|
2747
|
+
except Exception:
|
|
2748
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2749
|
+
raise
|
|
2750
|
+
|
|
2751
|
+
if self.config.get("log_mempool", False): # pragma: no cover
|
|
2752
|
+
try:
|
|
2753
|
+
mempool_dir = path_from_root(self.root_path, "mempool-log") / f"{self.blockchain.get_peak_height()}"
|
|
2754
|
+
mempool_dir.mkdir(parents=True, exist_ok=True)
|
|
2755
|
+
with open(mempool_dir / f"{spend_name}.bundle", "wb+") as f:
|
|
2756
|
+
f.write(bytes(transaction))
|
|
2757
|
+
except Exception:
|
|
2758
|
+
self.log.exception(f"Failed to log mempool item: {spend_name}")
|
|
2759
|
+
|
|
2760
|
+
async with self.blockchain.priority_mutex.acquire(priority=BlockchainMutexPriority.low):
|
|
2761
|
+
if self.mempool_manager.get_spendbundle(spend_name) is not None:
|
|
2762
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2763
|
+
return MempoolInclusionStatus.SUCCESS, None
|
|
2764
|
+
if self.mempool_manager.peak is None:
|
|
2765
|
+
return MempoolInclusionStatus.FAILED, Err.MEMPOOL_NOT_INITIALIZED
|
|
2766
|
+
info = await self.mempool_manager.add_spend_bundle(
|
|
2767
|
+
transaction, cost_result, spend_name, self.mempool_manager.peak.height
|
|
2768
|
+
)
|
|
2769
|
+
status = info.status
|
|
2770
|
+
error = info.error
|
|
2771
|
+
if status == MempoolInclusionStatus.SUCCESS:
|
|
2772
|
+
self.log.debug(
|
|
2773
|
+
f"Added transaction to mempool: {spend_name} mempool size: "
|
|
2774
|
+
f"{self.mempool_manager.mempool.total_mempool_cost()} normalized "
|
|
2775
|
+
f"{self.mempool_manager.mempool.total_mempool_cost() / 5000000}"
|
|
2776
|
+
)
|
|
2777
|
+
|
|
2778
|
+
# Only broadcast successful transactions, not pending ones. Otherwise it's a DOS
|
|
2779
|
+
# vector.
|
|
2780
|
+
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
|
|
2781
|
+
assert mempool_item is not None
|
|
2782
|
+
await self.broadcast_removed_tx(info.removals)
|
|
2783
|
+
await self.broadcast_added_tx(mempool_item, current_peer=peer)
|
|
2784
|
+
|
|
2785
|
+
if self.simulator_transaction_callback is not None: # callback
|
|
2786
|
+
await self.simulator_transaction_callback(spend_name)
|
|
2787
|
+
|
|
2788
|
+
else:
|
|
2789
|
+
self.mempool_manager.remove_seen(spend_name)
|
|
2790
|
+
self.log.debug(f"Wasn't able to add transaction with id {spend_name}, status {status} error: {error}")
|
|
2791
|
+
return status, error
|
|
2792
|
+
|
|
2793
|
+
async def broadcast_added_tx(
|
|
2794
|
+
self, mempool_item: MempoolItem, current_peer: Optional[WSChiaConnection] = None
|
|
2795
|
+
) -> None:
|
|
2796
|
+
assert mempool_item.fee >= 0
|
|
2797
|
+
assert mempool_item.cost is not None
|
|
2798
|
+
|
|
2799
|
+
new_tx = full_node_protocol.NewTransaction(
|
|
2800
|
+
mempool_item.name,
|
|
2801
|
+
mempool_item.cost,
|
|
2802
|
+
mempool_item.fee,
|
|
2803
|
+
)
|
|
2804
|
+
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
|
|
2805
|
+
if current_peer is None:
|
|
2806
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE)
|
|
2807
|
+
else:
|
|
2808
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE, current_peer.peer_node_id)
|
|
2809
|
+
|
|
2810
|
+
conds = mempool_item.conds
|
|
2811
|
+
|
|
2812
|
+
all_peers = {
|
|
2813
|
+
peer_id
|
|
2814
|
+
for peer_id, peer in self.server.all_connections.items()
|
|
2815
|
+
if peer.has_capability(Capability.MEMPOOL_UPDATES)
|
|
2816
|
+
}
|
|
2817
|
+
|
|
2818
|
+
if len(all_peers) == 0:
|
|
2819
|
+
return
|
|
2820
|
+
|
|
2821
|
+
start_time = time.monotonic()
|
|
2822
|
+
|
|
2823
|
+
hints_for_removals = await self.hint_store.get_hints([bytes32(spend.coin_id) for spend in conds.spends])
|
|
2824
|
+
peer_ids = all_peers.intersection(peers_for_spend_bundle(self.subscriptions, conds, set(hints_for_removals)))
|
|
2825
|
+
|
|
2826
|
+
for peer_id in peer_ids:
|
|
2827
|
+
peer = self.server.all_connections.get(peer_id)
|
|
2828
|
+
|
|
2829
|
+
if peer is None:
|
|
2830
|
+
continue
|
|
2831
|
+
|
|
2832
|
+
msg = make_msg(
|
|
2833
|
+
ProtocolMessageTypes.mempool_items_added, wallet_protocol.MempoolItemsAdded([mempool_item.name])
|
|
2834
|
+
)
|
|
2835
|
+
await peer.send_message(msg)
|
|
2836
|
+
|
|
2837
|
+
total_time = time.monotonic() - start_time
|
|
2838
|
+
|
|
2839
|
+
self.log.log(
|
|
2840
|
+
logging.DEBUG if total_time < 0.5 else logging.WARNING,
|
|
2841
|
+
f"Broadcasting added transaction {mempool_item.name} to {len(peer_ids)} peers took {total_time:.4f}s",
|
|
2842
|
+
)
|
|
2843
|
+
|
|
2844
|
+
async def broadcast_removed_tx(self, mempool_removals: list[MempoolRemoveInfo]) -> None:
|
|
2845
|
+
total_removals = sum(len(r.items) for r in mempool_removals)
|
|
2846
|
+
if total_removals == 0:
|
|
2847
|
+
return
|
|
2848
|
+
|
|
2849
|
+
start_time = time.monotonic()
|
|
2850
|
+
|
|
2851
|
+
self.log.debug(f"Broadcasting {total_removals} removed transactions to peers")
|
|
2852
|
+
|
|
2853
|
+
all_peers = {
|
|
2854
|
+
peer_id
|
|
2855
|
+
for peer_id, peer in self.server.all_connections.items()
|
|
2856
|
+
if peer.has_capability(Capability.MEMPOOL_UPDATES)
|
|
2857
|
+
}
|
|
2858
|
+
|
|
2859
|
+
if len(all_peers) == 0:
|
|
2860
|
+
return
|
|
2861
|
+
|
|
2862
|
+
removals_to_send: dict[bytes32, list[RemovedMempoolItem]] = dict()
|
|
2863
|
+
|
|
2864
|
+
for removal_info in mempool_removals:
|
|
2865
|
+
for internal_mempool_item in removal_info.items:
|
|
2866
|
+
conds = internal_mempool_item.conds
|
|
2867
|
+
assert conds is not None
|
|
2868
|
+
|
|
2869
|
+
hints_for_removals = await self.hint_store.get_hints([bytes32(spend.coin_id) for spend in conds.spends])
|
|
2870
|
+
peer_ids = all_peers.intersection(
|
|
2871
|
+
peers_for_spend_bundle(self.subscriptions, conds, set(hints_for_removals))
|
|
2872
|
+
)
|
|
2873
|
+
|
|
2874
|
+
if len(peer_ids) == 0:
|
|
2875
|
+
continue
|
|
2876
|
+
|
|
2877
|
+
transaction_id = internal_mempool_item.spend_bundle.name()
|
|
2878
|
+
|
|
2879
|
+
self.log.debug(f"Broadcasting removed transaction {transaction_id} to wallet peers {peer_ids}")
|
|
2880
|
+
|
|
2881
|
+
for peer_id in peer_ids:
|
|
2882
|
+
peer = self.server.all_connections.get(peer_id)
|
|
2883
|
+
|
|
2884
|
+
if peer is None:
|
|
2885
|
+
continue
|
|
2886
|
+
|
|
2887
|
+
removal = wallet_protocol.RemovedMempoolItem(transaction_id, uint8(removal_info.reason.value))
|
|
2888
|
+
removals_to_send.setdefault(peer.peer_node_id, []).append(removal)
|
|
2889
|
+
|
|
2890
|
+
for peer_id, removals in removals_to_send.items():
|
|
2891
|
+
peer = self.server.all_connections.get(peer_id)
|
|
2892
|
+
|
|
2893
|
+
if peer is None:
|
|
2894
|
+
continue
|
|
2895
|
+
|
|
2896
|
+
msg = make_msg(
|
|
2897
|
+
ProtocolMessageTypes.mempool_items_removed,
|
|
2898
|
+
wallet_protocol.MempoolItemsRemoved(removals),
|
|
2899
|
+
)
|
|
2900
|
+
await peer.send_message(msg)
|
|
2901
|
+
|
|
2902
|
+
total_time = time.monotonic() - start_time
|
|
2903
|
+
|
|
2904
|
+
self.log.log(
|
|
2905
|
+
logging.DEBUG if total_time < 0.5 else logging.WARNING,
|
|
2906
|
+
f"Broadcasting {total_removals} removed transactions "
|
|
2907
|
+
f"to {len(removals_to_send)} peers took {total_time:.4f}s",
|
|
2908
|
+
)
|
|
2909
|
+
|
|
2910
|
+
async def _needs_compact_proof(
|
|
2911
|
+
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
|
|
2912
|
+
) -> bool:
|
|
2913
|
+
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
|
2914
|
+
for sub_slot in header_block.finished_sub_slots:
|
|
2915
|
+
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
|
|
2916
|
+
if (
|
|
2917
|
+
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
|
|
2918
|
+
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
|
2919
|
+
):
|
|
2920
|
+
return False
|
|
2921
|
+
return True
|
|
2922
|
+
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
|
2923
|
+
for sub_slot in header_block.finished_sub_slots:
|
|
2924
|
+
if (
|
|
2925
|
+
sub_slot.infused_challenge_chain is not None
|
|
2926
|
+
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
|
|
2927
|
+
):
|
|
2928
|
+
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
|
|
2929
|
+
if (
|
|
2930
|
+
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
|
|
2931
|
+
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
|
2932
|
+
):
|
|
2933
|
+
return False
|
|
2934
|
+
return True
|
|
2935
|
+
if field_vdf == CompressibleVDFField.CC_SP_VDF:
|
|
2936
|
+
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
|
|
2937
|
+
return False
|
|
2938
|
+
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
|
|
2939
|
+
assert header_block.challenge_chain_sp_proof is not None
|
|
2940
|
+
if (
|
|
2941
|
+
header_block.challenge_chain_sp_proof.witness_type == 0
|
|
2942
|
+
and header_block.challenge_chain_sp_proof.normalized_to_identity
|
|
2943
|
+
):
|
|
2944
|
+
return False
|
|
2945
|
+
return True
|
|
2946
|
+
if field_vdf == CompressibleVDFField.CC_IP_VDF:
|
|
2947
|
+
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
|
|
2948
|
+
if (
|
|
2949
|
+
header_block.challenge_chain_ip_proof.witness_type == 0
|
|
2950
|
+
and header_block.challenge_chain_ip_proof.normalized_to_identity
|
|
2951
|
+
):
|
|
2952
|
+
return False
|
|
2953
|
+
return True
|
|
2954
|
+
return False
|
|
2955
|
+
|
|
2956
|
+
async def _can_accept_compact_proof(
|
|
2957
|
+
self,
|
|
2958
|
+
vdf_info: VDFInfo,
|
|
2959
|
+
vdf_proof: VDFProof,
|
|
2960
|
+
height: uint32,
|
|
2961
|
+
header_hash: bytes32,
|
|
2962
|
+
field_vdf: CompressibleVDFField,
|
|
2963
|
+
) -> bool:
|
|
2964
|
+
"""
|
|
2965
|
+
- Checks if the provided proof is indeed compact.
|
|
2966
|
+
- Checks if proof verifies given the vdf_info from the start of sub-slot.
|
|
2967
|
+
- Checks if the provided vdf_info is correct, assuming it refers to the start of sub-slot.
|
|
2968
|
+
- Checks if the existing proof was non-compact. Ignore this proof if we already have a compact proof.
|
|
2969
|
+
"""
|
|
2970
|
+
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
|
|
2971
|
+
if is_fully_compactified is None or is_fully_compactified:
|
|
2972
|
+
self.log.info(f"Already compactified block: {header_hash}. Ignoring.")
|
|
2973
|
+
return False
|
|
2974
|
+
peak = self.blockchain.get_peak()
|
|
2975
|
+
if peak is None or peak.height - height < 5:
|
|
2976
|
+
self.log.debug("Will not compactify recent block")
|
|
2977
|
+
return False
|
|
2978
|
+
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
|
|
2979
|
+
self.log.error(f"Received vdf proof is not compact: {vdf_proof}.")
|
|
2980
|
+
return False
|
|
2981
|
+
if not validate_vdf(vdf_proof, self.constants, ClassgroupElement.get_default_element(), vdf_info):
|
|
2982
|
+
self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.")
|
|
2983
|
+
return False
|
|
2984
|
+
header_block = await self.blockchain.get_header_block_by_height(height, header_hash, tx_filter=False)
|
|
2985
|
+
if header_block is None:
|
|
2986
|
+
self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}")
|
|
2987
|
+
return False
|
|
2988
|
+
is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf)
|
|
2989
|
+
if not is_new_proof:
|
|
2990
|
+
self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.")
|
|
2991
|
+
return is_new_proof
|
|
2992
|
+
|
|
2993
|
+
# returns True if we ended up replacing the proof, and False otherwise
|
|
2994
|
+
async def _replace_proof(
|
|
2995
|
+
self,
|
|
2996
|
+
vdf_info: VDFInfo,
|
|
2997
|
+
vdf_proof: VDFProof,
|
|
2998
|
+
header_hash: bytes32,
|
|
2999
|
+
field_vdf: CompressibleVDFField,
|
|
3000
|
+
) -> bool:
|
|
3001
|
+
block = await self.block_store.get_full_block(header_hash)
|
|
3002
|
+
if block is None:
|
|
3003
|
+
return False
|
|
3004
|
+
|
|
3005
|
+
new_block = None
|
|
3006
|
+
|
|
3007
|
+
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
|
3008
|
+
for index, sub_slot in enumerate(block.finished_sub_slots):
|
|
3009
|
+
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
|
|
3010
|
+
new_proofs = sub_slot.proofs.replace(challenge_chain_slot_proof=vdf_proof)
|
|
3011
|
+
new_subslot = sub_slot.replace(proofs=new_proofs)
|
|
3012
|
+
new_finished_subslots = block.finished_sub_slots
|
|
3013
|
+
new_finished_subslots[index] = new_subslot
|
|
3014
|
+
new_block = block.replace(finished_sub_slots=new_finished_subslots)
|
|
3015
|
+
break
|
|
3016
|
+
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
|
3017
|
+
for index, sub_slot in enumerate(block.finished_sub_slots):
|
|
3018
|
+
if (
|
|
3019
|
+
sub_slot.infused_challenge_chain is not None
|
|
3020
|
+
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
|
|
3021
|
+
):
|
|
3022
|
+
new_proofs = sub_slot.proofs.replace(infused_challenge_chain_slot_proof=vdf_proof)
|
|
3023
|
+
new_subslot = sub_slot.replace(proofs=new_proofs)
|
|
3024
|
+
new_finished_subslots = block.finished_sub_slots
|
|
3025
|
+
new_finished_subslots[index] = new_subslot
|
|
3026
|
+
new_block = block.replace(finished_sub_slots=new_finished_subslots)
|
|
3027
|
+
break
|
|
3028
|
+
if field_vdf == CompressibleVDFField.CC_SP_VDF:
|
|
3029
|
+
if block.reward_chain_block.challenge_chain_sp_vdf == vdf_info:
|
|
3030
|
+
assert block.challenge_chain_sp_proof is not None
|
|
3031
|
+
new_block = block.replace(challenge_chain_sp_proof=vdf_proof)
|
|
3032
|
+
if field_vdf == CompressibleVDFField.CC_IP_VDF:
|
|
3033
|
+
if block.reward_chain_block.challenge_chain_ip_vdf == vdf_info:
|
|
3034
|
+
new_block = block.replace(challenge_chain_ip_proof=vdf_proof)
|
|
3035
|
+
if new_block is None:
|
|
3036
|
+
return False
|
|
3037
|
+
async with self.db_wrapper.writer():
|
|
3038
|
+
try:
|
|
3039
|
+
await self.block_store.replace_proof(header_hash, new_block)
|
|
3040
|
+
return True
|
|
3041
|
+
except BaseException as e:
|
|
3042
|
+
self.log.error(
|
|
3043
|
+
f"_replace_proof error while adding block {block.header_hash} height {block.height},"
|
|
3044
|
+
f" rolling back: {e} {traceback.format_exc()}"
|
|
3045
|
+
)
|
|
3046
|
+
raise
|
|
3047
|
+
|
|
3048
|
+
async def add_compact_proof_of_time(self, request: timelord_protocol.RespondCompactProofOfTime) -> None:
|
|
3049
|
+
peak = self.blockchain.get_peak()
|
|
3050
|
+
if peak is None or peak.height - request.height < 5:
|
|
3051
|
+
self.log.info(f"Ignoring add_compact_proof_of_time, height {request.height} too recent.")
|
|
3052
|
+
return None
|
|
3053
|
+
|
|
3054
|
+
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
|
3055
|
+
if not await self._can_accept_compact_proof(
|
|
3056
|
+
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
|
|
3057
|
+
):
|
|
3058
|
+
return None
|
|
3059
|
+
async with self.blockchain.compact_proof_lock:
|
|
3060
|
+
replaced = await self._replace_proof(request.vdf_info, request.vdf_proof, request.header_hash, field_vdf)
|
|
3061
|
+
if not replaced:
|
|
3062
|
+
self.log.error(f"Could not replace compact proof: {request.height}")
|
|
3063
|
+
return None
|
|
3064
|
+
self.log.info(f"Replaced compact proof at height {request.height}")
|
|
3065
|
+
msg = make_msg(
|
|
3066
|
+
ProtocolMessageTypes.new_compact_vdf,
|
|
3067
|
+
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
|
|
3068
|
+
)
|
|
3069
|
+
if self._server is not None:
|
|
3070
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE)
|
|
3071
|
+
|
|
3072
|
+
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: WSChiaConnection) -> None:
|
|
3073
|
+
peak = self.blockchain.get_peak()
|
|
3074
|
+
if peak is None or peak.height - request.height < 5:
|
|
3075
|
+
self.log.info(f"Ignoring new_compact_vdf, height {request.height} too recent.")
|
|
3076
|
+
return None
|
|
3077
|
+
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
|
|
3078
|
+
if is_fully_compactified is None or is_fully_compactified:
|
|
3079
|
+
return None
|
|
3080
|
+
header_block = await self.blockchain.get_header_block_by_height(
|
|
3081
|
+
request.height, request.header_hash, tx_filter=False
|
|
3082
|
+
)
|
|
3083
|
+
if header_block is None:
|
|
3084
|
+
return None
|
|
3085
|
+
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
|
3086
|
+
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
|
|
3087
|
+
peer_request = full_node_protocol.RequestCompactVDF(
|
|
3088
|
+
request.height, request.header_hash, request.field_vdf, request.vdf_info
|
|
3089
|
+
)
|
|
3090
|
+
response = await peer.call_api(FullNodeAPI.request_compact_vdf, peer_request, timeout=10)
|
|
3091
|
+
if response is not None and isinstance(response, full_node_protocol.RespondCompactVDF):
|
|
3092
|
+
await self.add_compact_vdf(response, peer)
|
|
3093
|
+
|
|
3094
|
+
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: WSChiaConnection) -> None:
|
|
3095
|
+
header_block = await self.blockchain.get_header_block_by_height(
|
|
3096
|
+
request.height, request.header_hash, tx_filter=False
|
|
3097
|
+
)
|
|
3098
|
+
if header_block is None:
|
|
3099
|
+
return None
|
|
3100
|
+
vdf_proof: Optional[VDFProof] = None
|
|
3101
|
+
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
|
3102
|
+
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
|
|
3103
|
+
for sub_slot in header_block.finished_sub_slots:
|
|
3104
|
+
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
|
|
3105
|
+
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
|
|
3106
|
+
break
|
|
3107
|
+
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
|
|
3108
|
+
for sub_slot in header_block.finished_sub_slots:
|
|
3109
|
+
if (
|
|
3110
|
+
sub_slot.infused_challenge_chain is not None
|
|
3111
|
+
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
|
|
3112
|
+
):
|
|
3113
|
+
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
|
|
3114
|
+
break
|
|
3115
|
+
if (
|
|
3116
|
+
field_vdf == CompressibleVDFField.CC_SP_VDF
|
|
3117
|
+
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
|
|
3118
|
+
):
|
|
3119
|
+
vdf_proof = header_block.challenge_chain_sp_proof
|
|
3120
|
+
if (
|
|
3121
|
+
field_vdf == CompressibleVDFField.CC_IP_VDF
|
|
3122
|
+
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
|
|
3123
|
+
):
|
|
3124
|
+
vdf_proof = header_block.challenge_chain_ip_proof
|
|
3125
|
+
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
|
|
3126
|
+
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
|
|
3127
|
+
return None
|
|
3128
|
+
compact_vdf = full_node_protocol.RespondCompactVDF(
|
|
3129
|
+
request.height,
|
|
3130
|
+
request.header_hash,
|
|
3131
|
+
request.field_vdf,
|
|
3132
|
+
request.vdf_info,
|
|
3133
|
+
vdf_proof,
|
|
3134
|
+
)
|
|
3135
|
+
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
|
|
3136
|
+
await peer.send_message(msg)
|
|
3137
|
+
|
|
3138
|
+
async def add_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: WSChiaConnection) -> None:
|
|
3139
|
+
field_vdf = CompressibleVDFField(int(request.field_vdf))
|
|
3140
|
+
if not await self._can_accept_compact_proof(
|
|
3141
|
+
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
|
|
3142
|
+
):
|
|
3143
|
+
return None
|
|
3144
|
+
async with self.blockchain.compact_proof_lock:
|
|
3145
|
+
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
|
|
3146
|
+
return None
|
|
3147
|
+
replaced = await self._replace_proof(request.vdf_info, request.vdf_proof, request.header_hash, field_vdf)
|
|
3148
|
+
if not replaced:
|
|
3149
|
+
self.log.error(f"Could not replace compact proof: {request.height}")
|
|
3150
|
+
return None
|
|
3151
|
+
msg = make_msg(
|
|
3152
|
+
ProtocolMessageTypes.new_compact_vdf,
|
|
3153
|
+
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
|
|
3154
|
+
)
|
|
3155
|
+
if self._server is not None:
|
|
3156
|
+
await self.server.send_to_all([msg], NodeType.FULL_NODE, peer.peer_node_id)
|
|
3157
|
+
|
|
3158
|
+
def in_bad_peak_cache(self, wp: WeightProof) -> bool:
|
|
3159
|
+
for block in wp.recent_chain_data:
|
|
3160
|
+
if block.header_hash in self.bad_peak_cache.keys():
|
|
3161
|
+
return True
|
|
3162
|
+
return False
|
|
3163
|
+
|
|
3164
|
+
def add_to_bad_peak_cache(self, peak_header_hash: bytes32, peak_height: uint32) -> None:
|
|
3165
|
+
curr_height = self.blockchain.get_peak_height()
|
|
3166
|
+
|
|
3167
|
+
if curr_height is None:
|
|
3168
|
+
self.log.debug(f"add bad peak {peak_header_hash} to cache")
|
|
3169
|
+
self.bad_peak_cache[peak_header_hash] = peak_height
|
|
3170
|
+
return
|
|
3171
|
+
minimum_cache_height = curr_height - (2 * self.constants.SUB_EPOCH_BLOCKS)
|
|
3172
|
+
if peak_height < minimum_cache_height:
|
|
3173
|
+
return
|
|
3174
|
+
|
|
3175
|
+
new_cache = {}
|
|
3176
|
+
self.log.info(f"add bad peak {peak_header_hash} to cache")
|
|
3177
|
+
new_cache[peak_header_hash] = peak_height
|
|
3178
|
+
min_height = peak_height
|
|
3179
|
+
min_block = peak_header_hash
|
|
3180
|
+
for header_hash, height in self.bad_peak_cache.items():
|
|
3181
|
+
if height < minimum_cache_height:
|
|
3182
|
+
self.log.debug(f"remove bad peak {peak_header_hash} from cache")
|
|
3183
|
+
continue
|
|
3184
|
+
if height < min_height:
|
|
3185
|
+
min_block = header_hash
|
|
3186
|
+
new_cache[header_hash] = height
|
|
3187
|
+
|
|
3188
|
+
if len(new_cache) > self.config.get("bad_peak_cache_size", 100):
|
|
3189
|
+
del new_cache[min_block]
|
|
3190
|
+
|
|
3191
|
+
self.bad_peak_cache = new_cache
|
|
3192
|
+
|
|
3193
|
+
async def broadcast_uncompact_blocks(
|
|
3194
|
+
self, uncompact_interval_scan: int, target_uncompact_proofs: int, sanitize_weight_proof_only: bool
|
|
3195
|
+
) -> None:
|
|
3196
|
+
try:
|
|
3197
|
+
while not self._shut_down:
|
|
3198
|
+
while self.sync_store.get_sync_mode() or self.sync_store.get_long_sync():
|
|
3199
|
+
if self._shut_down:
|
|
3200
|
+
return None
|
|
3201
|
+
await asyncio.sleep(30)
|
|
3202
|
+
|
|
3203
|
+
broadcast_list: list[timelord_protocol.RequestCompactProofOfTime] = []
|
|
3204
|
+
|
|
3205
|
+
self.log.info("Getting random heights for bluebox to compact")
|
|
3206
|
+
|
|
3207
|
+
if self._server is None:
|
|
3208
|
+
self.log.info("Not broadcasting uncompact blocks, no server found")
|
|
3209
|
+
await asyncio.sleep(uncompact_interval_scan)
|
|
3210
|
+
continue
|
|
3211
|
+
connected_timelords = self.server.get_connections(NodeType.TIMELORD)
|
|
3212
|
+
|
|
3213
|
+
total_target_uncompact_proofs = target_uncompact_proofs * max(1, len(connected_timelords))
|
|
3214
|
+
heights = await self.block_store.get_random_not_compactified(total_target_uncompact_proofs)
|
|
3215
|
+
self.log.info("Heights found for bluebox to compact: [%s]", ", ".join(map(str, heights)))
|
|
3216
|
+
|
|
3217
|
+
for h in heights:
|
|
3218
|
+
headers = await self.blockchain.get_header_blocks_in_range(h, h, tx_filter=False)
|
|
3219
|
+
records: dict[bytes32, BlockRecord] = {}
|
|
3220
|
+
if sanitize_weight_proof_only:
|
|
3221
|
+
records = await self.blockchain.get_block_records_in_range(h, h)
|
|
3222
|
+
for header in headers.values():
|
|
3223
|
+
expected_header_hash = self.blockchain.height_to_hash(header.height)
|
|
3224
|
+
if header.header_hash != expected_header_hash:
|
|
3225
|
+
continue
|
|
3226
|
+
if sanitize_weight_proof_only:
|
|
3227
|
+
assert header.header_hash in records
|
|
3228
|
+
record = records[header.header_hash]
|
|
3229
|
+
for sub_slot in header.finished_sub_slots:
|
|
3230
|
+
if (
|
|
3231
|
+
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
|
|
3232
|
+
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
|
|
3233
|
+
):
|
|
3234
|
+
broadcast_list.append(
|
|
3235
|
+
timelord_protocol.RequestCompactProofOfTime(
|
|
3236
|
+
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
|
|
3237
|
+
header.header_hash,
|
|
3238
|
+
header.height,
|
|
3239
|
+
uint8(CompressibleVDFField.CC_EOS_VDF),
|
|
3240
|
+
)
|
|
3241
|
+
)
|
|
3242
|
+
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
|
|
3243
|
+
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
|
|
3244
|
+
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
|
|
3245
|
+
):
|
|
3246
|
+
assert sub_slot.infused_challenge_chain is not None
|
|
3247
|
+
broadcast_list.append(
|
|
3248
|
+
timelord_protocol.RequestCompactProofOfTime(
|
|
3249
|
+
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
|
|
3250
|
+
header.header_hash,
|
|
3251
|
+
header.height,
|
|
3252
|
+
uint8(CompressibleVDFField.ICC_EOS_VDF),
|
|
3253
|
+
)
|
|
3254
|
+
)
|
|
3255
|
+
# Running in 'sanitize_weight_proof_only' ignores CC_SP_VDF and CC_IP_VDF
|
|
3256
|
+
# unless this is a challenge block.
|
|
3257
|
+
if sanitize_weight_proof_only:
|
|
3258
|
+
if not record.is_challenge_block(self.constants):
|
|
3259
|
+
continue
|
|
3260
|
+
if header.challenge_chain_sp_proof is not None and (
|
|
3261
|
+
header.challenge_chain_sp_proof.witness_type > 0
|
|
3262
|
+
or not header.challenge_chain_sp_proof.normalized_to_identity
|
|
3263
|
+
):
|
|
3264
|
+
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
|
|
3265
|
+
broadcast_list.append(
|
|
3266
|
+
timelord_protocol.RequestCompactProofOfTime(
|
|
3267
|
+
header.reward_chain_block.challenge_chain_sp_vdf,
|
|
3268
|
+
header.header_hash,
|
|
3269
|
+
header.height,
|
|
3270
|
+
uint8(CompressibleVDFField.CC_SP_VDF),
|
|
3271
|
+
)
|
|
3272
|
+
)
|
|
3273
|
+
|
|
3274
|
+
if (
|
|
3275
|
+
header.challenge_chain_ip_proof.witness_type > 0
|
|
3276
|
+
or not header.challenge_chain_ip_proof.normalized_to_identity
|
|
3277
|
+
):
|
|
3278
|
+
broadcast_list.append(
|
|
3279
|
+
timelord_protocol.RequestCompactProofOfTime(
|
|
3280
|
+
header.reward_chain_block.challenge_chain_ip_vdf,
|
|
3281
|
+
header.header_hash,
|
|
3282
|
+
header.height,
|
|
3283
|
+
uint8(CompressibleVDFField.CC_IP_VDF),
|
|
3284
|
+
)
|
|
3285
|
+
)
|
|
3286
|
+
|
|
3287
|
+
broadcast_list_chunks: list[list[timelord_protocol.RequestCompactProofOfTime]] = []
|
|
3288
|
+
for index in range(0, len(broadcast_list), target_uncompact_proofs):
|
|
3289
|
+
broadcast_list_chunks.append(broadcast_list[index : index + target_uncompact_proofs])
|
|
3290
|
+
if len(broadcast_list_chunks) == 0:
|
|
3291
|
+
self.log.info("Did not find any uncompact blocks.")
|
|
3292
|
+
await asyncio.sleep(uncompact_interval_scan)
|
|
3293
|
+
continue
|
|
3294
|
+
if self.sync_store.get_sync_mode() or self.sync_store.get_long_sync():
|
|
3295
|
+
await asyncio.sleep(uncompact_interval_scan)
|
|
3296
|
+
continue
|
|
3297
|
+
if self._server is not None:
|
|
3298
|
+
self.log.info(f"Broadcasting {len(broadcast_list)} items to the bluebox")
|
|
3299
|
+
connected_timelords = self.server.get_connections(NodeType.TIMELORD)
|
|
3300
|
+
chunk_index = 0
|
|
3301
|
+
for connection in connected_timelords:
|
|
3302
|
+
peer_node_id = connection.peer_node_id
|
|
3303
|
+
msgs = []
|
|
3304
|
+
broadcast_list = broadcast_list_chunks[chunk_index]
|
|
3305
|
+
chunk_index = (chunk_index + 1) % len(broadcast_list_chunks)
|
|
3306
|
+
for new_pot in broadcast_list:
|
|
3307
|
+
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
|
|
3308
|
+
msgs.append(msg)
|
|
3309
|
+
await self.server.send_to_specific(msgs, peer_node_id)
|
|
3310
|
+
await asyncio.sleep(uncompact_interval_scan)
|
|
3311
|
+
except Exception as e:
|
|
3312
|
+
error_stack = traceback.format_exc()
|
|
3313
|
+
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
|
|
3314
|
+
self.log.error(f"Exception Stack: {error_stack}")
|
|
3315
|
+
|
|
3316
|
+
|
|
3317
|
+
async def node_next_block_check(
|
|
3318
|
+
peer: WSChiaConnection, potential_peek: uint32, blockchain: BlockchainInterface
|
|
3319
|
+
) -> bool:
|
|
3320
|
+
block_response: Optional[Any] = await peer.call_api(
|
|
3321
|
+
FullNodeAPI.request_block, full_node_protocol.RequestBlock(potential_peek, True)
|
|
3322
|
+
)
|
|
3323
|
+
if block_response is not None and isinstance(block_response, full_node_protocol.RespondBlock):
|
|
3324
|
+
peak = blockchain.get_peak()
|
|
3325
|
+
if peak is not None and block_response.block.prev_header_hash == peak.header_hash:
|
|
3326
|
+
return True
|
|
3327
|
+
return False
|