chia-blockchain 2.5.1rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chia/__init__.py +10 -0
- chia/__main__.py +5 -0
- chia/_tests/README.md +53 -0
- chia/_tests/__init__.py +0 -0
- chia/_tests/blockchain/__init__.py +0 -0
- chia/_tests/blockchain/blockchain_test_utils.py +195 -0
- chia/_tests/blockchain/config.py +4 -0
- chia/_tests/blockchain/test_augmented_chain.py +145 -0
- chia/_tests/blockchain/test_blockchain.py +4202 -0
- chia/_tests/blockchain/test_blockchain_transactions.py +1031 -0
- chia/_tests/blockchain/test_build_chains.py +59 -0
- chia/_tests/blockchain/test_get_block_generator.py +72 -0
- chia/_tests/blockchain/test_lookup_fork_chain.py +194 -0
- chia/_tests/build-init-files.py +92 -0
- chia/_tests/build-job-matrix.py +204 -0
- chia/_tests/check_pytest_monitor_output.py +34 -0
- chia/_tests/check_sql_statements.py +72 -0
- chia/_tests/chia-start-sim +42 -0
- chia/_tests/clvm/__init__.py +0 -0
- chia/_tests/clvm/benchmark_costs.py +23 -0
- chia/_tests/clvm/coin_store.py +149 -0
- chia/_tests/clvm/test_chialisp_deserialization.py +101 -0
- chia/_tests/clvm/test_clvm_step.py +37 -0
- chia/_tests/clvm/test_condition_codes.py +13 -0
- chia/_tests/clvm/test_curry_and_treehash.py +55 -0
- chia/_tests/clvm/test_message_conditions.py +184 -0
- chia/_tests/clvm/test_program.py +150 -0
- chia/_tests/clvm/test_puzzle_compression.py +143 -0
- chia/_tests/clvm/test_puzzle_drivers.py +45 -0
- chia/_tests/clvm/test_puzzles.py +242 -0
- chia/_tests/clvm/test_singletons.py +540 -0
- chia/_tests/clvm/test_spend_sim.py +181 -0
- chia/_tests/cmds/__init__.py +0 -0
- chia/_tests/cmds/cmd_test_utils.py +469 -0
- chia/_tests/cmds/config.py +3 -0
- chia/_tests/cmds/conftest.py +23 -0
- chia/_tests/cmds/test_click_types.py +200 -0
- chia/_tests/cmds/test_cmd_framework.py +620 -0
- chia/_tests/cmds/test_cmds_util.py +97 -0
- chia/_tests/cmds/test_daemon.py +92 -0
- chia/_tests/cmds/test_dev_gh.py +131 -0
- chia/_tests/cmds/test_farm_cmd.py +66 -0
- chia/_tests/cmds/test_show.py +116 -0
- chia/_tests/cmds/test_sim.py +207 -0
- chia/_tests/cmds/test_timelock_args.py +75 -0
- chia/_tests/cmds/test_tx_config_args.py +154 -0
- chia/_tests/cmds/testing_classes.py +59 -0
- chia/_tests/cmds/wallet/__init__.py +0 -0
- chia/_tests/cmds/wallet/test_consts.py +47 -0
- chia/_tests/cmds/wallet/test_dao.py +565 -0
- chia/_tests/cmds/wallet/test_did.py +403 -0
- chia/_tests/cmds/wallet/test_nft.py +471 -0
- chia/_tests/cmds/wallet/test_notifications.py +124 -0
- chia/_tests/cmds/wallet/test_offer.toffer +1 -0
- chia/_tests/cmds/wallet/test_tx_decorators.py +27 -0
- chia/_tests/cmds/wallet/test_vcs.py +400 -0
- chia/_tests/cmds/wallet/test_wallet.py +1125 -0
- chia/_tests/cmds/wallet/test_wallet_check.py +109 -0
- chia/_tests/conftest.py +1419 -0
- chia/_tests/connection_utils.py +125 -0
- chia/_tests/core/__init__.py +0 -0
- chia/_tests/core/cmds/__init__.py +0 -0
- chia/_tests/core/cmds/test_beta.py +382 -0
- chia/_tests/core/cmds/test_keys.py +1734 -0
- chia/_tests/core/cmds/test_wallet.py +126 -0
- chia/_tests/core/config.py +3 -0
- chia/_tests/core/consensus/__init__.py +0 -0
- chia/_tests/core/consensus/test_block_creation.py +54 -0
- chia/_tests/core/consensus/test_pot_iterations.py +117 -0
- chia/_tests/core/custom_types/__init__.py +0 -0
- chia/_tests/core/custom_types/test_coin.py +107 -0
- chia/_tests/core/custom_types/test_proof_of_space.py +144 -0
- chia/_tests/core/custom_types/test_spend_bundle.py +70 -0
- chia/_tests/core/daemon/__init__.py +0 -0
- chia/_tests/core/daemon/config.py +4 -0
- chia/_tests/core/daemon/test_daemon.py +2128 -0
- chia/_tests/core/daemon/test_daemon_register.py +109 -0
- chia/_tests/core/daemon/test_keychain_proxy.py +101 -0
- chia/_tests/core/data_layer/__init__.py +0 -0
- chia/_tests/core/data_layer/config.py +5 -0
- chia/_tests/core/data_layer/conftest.py +106 -0
- chia/_tests/core/data_layer/test_data_cli.py +56 -0
- chia/_tests/core/data_layer/test_data_layer.py +83 -0
- chia/_tests/core/data_layer/test_data_layer_util.py +218 -0
- chia/_tests/core/data_layer/test_data_rpc.py +3847 -0
- chia/_tests/core/data_layer/test_data_store.py +2424 -0
- chia/_tests/core/data_layer/test_data_store_schema.py +381 -0
- chia/_tests/core/data_layer/test_plugin.py +91 -0
- chia/_tests/core/data_layer/util.py +233 -0
- chia/_tests/core/farmer/__init__.py +0 -0
- chia/_tests/core/farmer/config.py +3 -0
- chia/_tests/core/farmer/test_farmer_api.py +103 -0
- chia/_tests/core/full_node/__init__.py +0 -0
- chia/_tests/core/full_node/config.py +4 -0
- chia/_tests/core/full_node/dos/__init__.py +0 -0
- chia/_tests/core/full_node/dos/config.py +3 -0
- chia/_tests/core/full_node/full_sync/__init__.py +0 -0
- chia/_tests/core/full_node/full_sync/config.py +4 -0
- chia/_tests/core/full_node/full_sync/test_full_sync.py +443 -0
- chia/_tests/core/full_node/ram_db.py +27 -0
- chia/_tests/core/full_node/stores/__init__.py +0 -0
- chia/_tests/core/full_node/stores/config.py +4 -0
- chia/_tests/core/full_node/stores/test_block_store.py +590 -0
- chia/_tests/core/full_node/stores/test_coin_store.py +897 -0
- chia/_tests/core/full_node/stores/test_full_node_store.py +1219 -0
- chia/_tests/core/full_node/stores/test_hint_store.py +229 -0
- chia/_tests/core/full_node/stores/test_sync_store.py +135 -0
- chia/_tests/core/full_node/test_address_manager.py +588 -0
- chia/_tests/core/full_node/test_block_height_map.py +556 -0
- chia/_tests/core/full_node/test_conditions.py +556 -0
- chia/_tests/core/full_node/test_full_node.py +2700 -0
- chia/_tests/core/full_node/test_generator_tools.py +82 -0
- chia/_tests/core/full_node/test_hint_management.py +104 -0
- chia/_tests/core/full_node/test_node_load.py +34 -0
- chia/_tests/core/full_node/test_performance.py +179 -0
- chia/_tests/core/full_node/test_subscriptions.py +492 -0
- chia/_tests/core/full_node/test_transactions.py +203 -0
- chia/_tests/core/full_node/test_tx_processing_queue.py +155 -0
- chia/_tests/core/large_block.py +2388 -0
- chia/_tests/core/make_block_generator.py +70 -0
- chia/_tests/core/mempool/__init__.py +0 -0
- chia/_tests/core/mempool/config.py +4 -0
- chia/_tests/core/mempool/test_mempool.py +3255 -0
- chia/_tests/core/mempool/test_mempool_fee_estimator.py +104 -0
- chia/_tests/core/mempool/test_mempool_fee_protocol.py +55 -0
- chia/_tests/core/mempool/test_mempool_item_queries.py +190 -0
- chia/_tests/core/mempool/test_mempool_manager.py +2084 -0
- chia/_tests/core/mempool/test_mempool_performance.py +64 -0
- chia/_tests/core/mempool/test_singleton_fast_forward.py +567 -0
- chia/_tests/core/node_height.py +28 -0
- chia/_tests/core/server/__init__.py +0 -0
- chia/_tests/core/server/config.py +3 -0
- chia/_tests/core/server/flood.py +84 -0
- chia/_tests/core/server/serve.py +135 -0
- chia/_tests/core/server/test_api_protocol.py +21 -0
- chia/_tests/core/server/test_capabilities.py +66 -0
- chia/_tests/core/server/test_dos.py +319 -0
- chia/_tests/core/server/test_event_loop.py +109 -0
- chia/_tests/core/server/test_loop.py +294 -0
- chia/_tests/core/server/test_node_discovery.py +73 -0
- chia/_tests/core/server/test_rate_limits.py +482 -0
- chia/_tests/core/server/test_server.py +226 -0
- chia/_tests/core/server/test_upnp.py +8 -0
- chia/_tests/core/services/__init__.py +0 -0
- chia/_tests/core/services/config.py +3 -0
- chia/_tests/core/services/test_services.py +188 -0
- chia/_tests/core/ssl/__init__.py +0 -0
- chia/_tests/core/ssl/config.py +3 -0
- chia/_tests/core/ssl/test_ssl.py +202 -0
- chia/_tests/core/test_coins.py +33 -0
- chia/_tests/core/test_cost_calculation.py +313 -0
- chia/_tests/core/test_crawler.py +175 -0
- chia/_tests/core/test_crawler_rpc.py +53 -0
- chia/_tests/core/test_daemon_rpc.py +24 -0
- chia/_tests/core/test_db_conversion.py +130 -0
- chia/_tests/core/test_db_validation.py +162 -0
- chia/_tests/core/test_farmer_harvester_rpc.py +505 -0
- chia/_tests/core/test_filter.py +35 -0
- chia/_tests/core/test_full_node_rpc.py +768 -0
- chia/_tests/core/test_merkle_set.py +343 -0
- chia/_tests/core/test_program.py +47 -0
- chia/_tests/core/test_rpc_util.py +86 -0
- chia/_tests/core/test_seeder.py +420 -0
- chia/_tests/core/test_setproctitle.py +13 -0
- chia/_tests/core/util/__init__.py +0 -0
- chia/_tests/core/util/config.py +4 -0
- chia/_tests/core/util/test_block_cache.py +44 -0
- chia/_tests/core/util/test_cached_bls.py +57 -0
- chia/_tests/core/util/test_config.py +337 -0
- chia/_tests/core/util/test_file_keyring_synchronization.py +105 -0
- chia/_tests/core/util/test_files.py +391 -0
- chia/_tests/core/util/test_jsonify.py +146 -0
- chia/_tests/core/util/test_keychain.py +522 -0
- chia/_tests/core/util/test_keyring_wrapper.py +491 -0
- chia/_tests/core/util/test_lockfile.py +380 -0
- chia/_tests/core/util/test_log_exceptions.py +187 -0
- chia/_tests/core/util/test_lru_cache.py +56 -0
- chia/_tests/core/util/test_significant_bits.py +40 -0
- chia/_tests/core/util/test_streamable.py +883 -0
- chia/_tests/db/__init__.py +0 -0
- chia/_tests/db/test_db_wrapper.py +566 -0
- chia/_tests/environments/__init__.py +0 -0
- chia/_tests/environments/common.py +35 -0
- chia/_tests/environments/full_node.py +47 -0
- chia/_tests/environments/wallet.py +429 -0
- chia/_tests/ether.py +19 -0
- chia/_tests/farmer_harvester/__init__.py +0 -0
- chia/_tests/farmer_harvester/config.py +3 -0
- chia/_tests/farmer_harvester/test_farmer.py +1264 -0
- chia/_tests/farmer_harvester/test_farmer_harvester.py +292 -0
- chia/_tests/farmer_harvester/test_filter_prefix_bits.py +131 -0
- chia/_tests/farmer_harvester/test_third_party_harvesters.py +528 -0
- chia/_tests/farmer_harvester/test_third_party_harvesters_data.json +29 -0
- chia/_tests/fee_estimation/__init__.py +0 -0
- chia/_tests/fee_estimation/config.py +3 -0
- chia/_tests/fee_estimation/test_fee_estimation_integration.py +262 -0
- chia/_tests/fee_estimation/test_fee_estimation_rpc.py +287 -0
- chia/_tests/fee_estimation/test_fee_estimation_unit_tests.py +144 -0
- chia/_tests/fee_estimation/test_mempoolitem_height_added.py +146 -0
- chia/_tests/generator/__init__.py +0 -0
- chia/_tests/generator/puzzles/__init__.py +0 -0
- chia/_tests/generator/puzzles/test_generator_deserialize.clsp +3 -0
- chia/_tests/generator/puzzles/test_generator_deserialize.clsp.hex +1 -0
- chia/_tests/generator/puzzles/test_multiple_generator_input_arguments.clsp +19 -0
- chia/_tests/generator/puzzles/test_multiple_generator_input_arguments.clsp.hex +1 -0
- chia/_tests/generator/test_compression.py +201 -0
- chia/_tests/generator/test_generator_types.py +44 -0
- chia/_tests/generator/test_rom.py +180 -0
- chia/_tests/plot_sync/__init__.py +0 -0
- chia/_tests/plot_sync/config.py +3 -0
- chia/_tests/plot_sync/test_delta.py +101 -0
- chia/_tests/plot_sync/test_plot_sync.py +618 -0
- chia/_tests/plot_sync/test_receiver.py +451 -0
- chia/_tests/plot_sync/test_sender.py +116 -0
- chia/_tests/plot_sync/test_sync_simulated.py +451 -0
- chia/_tests/plot_sync/util.py +68 -0
- chia/_tests/plotting/__init__.py +0 -0
- chia/_tests/plotting/config.py +3 -0
- chia/_tests/plotting/test_plot_manager.py +781 -0
- chia/_tests/plotting/util.py +12 -0
- chia/_tests/pools/__init__.py +0 -0
- chia/_tests/pools/config.py +5 -0
- chia/_tests/pools/test_pool_cli_parsing.py +128 -0
- chia/_tests/pools/test_pool_cmdline.py +1001 -0
- chia/_tests/pools/test_pool_config.py +42 -0
- chia/_tests/pools/test_pool_puzzles_lifecycle.py +397 -0
- chia/_tests/pools/test_pool_rpc.py +1123 -0
- chia/_tests/pools/test_pool_wallet.py +205 -0
- chia/_tests/pools/test_wallet_pool_store.py +161 -0
- chia/_tests/process_junit.py +348 -0
- chia/_tests/rpc/__init__.py +0 -0
- chia/_tests/rpc/test_rpc_client.py +138 -0
- chia/_tests/rpc/test_rpc_server.py +183 -0
- chia/_tests/simulation/__init__.py +0 -0
- chia/_tests/simulation/config.py +6 -0
- chia/_tests/simulation/test_simulation.py +501 -0
- chia/_tests/simulation/test_simulator.py +232 -0
- chia/_tests/simulation/test_start_simulator.py +107 -0
- chia/_tests/testconfig.py +13 -0
- chia/_tests/timelord/__init__.py +0 -0
- chia/_tests/timelord/config.py +3 -0
- chia/_tests/timelord/test_new_peak.py +437 -0
- chia/_tests/timelord/test_timelord.py +11 -0
- chia/_tests/tools/1315537.json +170 -0
- chia/_tests/tools/1315544.json +160 -0
- chia/_tests/tools/1315630.json +150 -0
- chia/_tests/tools/300000.json +105 -0
- chia/_tests/tools/442734.json +140 -0
- chia/_tests/tools/466212.json +130 -0
- chia/_tests/tools/__init__.py +0 -0
- chia/_tests/tools/config.py +5 -0
- chia/_tests/tools/test-blockchain-db.sqlite +0 -0
- chia/_tests/tools/test_full_sync.py +30 -0
- chia/_tests/tools/test_legacy_keyring.py +82 -0
- chia/_tests/tools/test_run_block.py +128 -0
- chia/_tests/tools/test_virtual_project.py +591 -0
- chia/_tests/util/__init__.py +0 -0
- chia/_tests/util/benchmark_cost.py +170 -0
- chia/_tests/util/benchmarks.py +153 -0
- chia/_tests/util/bip39_test_vectors.json +148 -0
- chia/_tests/util/blockchain.py +134 -0
- chia/_tests/util/blockchain_mock.py +132 -0
- chia/_tests/util/build_network_protocol_files.py +302 -0
- chia/_tests/util/clvm_generator.bin +0 -0
- chia/_tests/util/config.py +3 -0
- chia/_tests/util/constants.py +20 -0
- chia/_tests/util/db_connection.py +37 -0
- chia/_tests/util/full_sync.py +253 -0
- chia/_tests/util/gen_ssl_certs.py +114 -0
- chia/_tests/util/generator_tools_testing.py +45 -0
- chia/_tests/util/get_name_puzzle_conditions.py +52 -0
- chia/_tests/util/key_tool.py +36 -0
- chia/_tests/util/misc.py +675 -0
- chia/_tests/util/network_protocol_data.py +1072 -0
- chia/_tests/util/protocol_messages_bytes-v1.0 +0 -0
- chia/_tests/util/protocol_messages_json.py +2701 -0
- chia/_tests/util/rpc.py +26 -0
- chia/_tests/util/run_block.py +163 -0
- chia/_tests/util/setup_nodes.py +481 -0
- chia/_tests/util/spend_sim.py +492 -0
- chia/_tests/util/split_managers.py +102 -0
- chia/_tests/util/temp_file.py +14 -0
- chia/_tests/util/test_action_scope.py +144 -0
- chia/_tests/util/test_async_pool.py +366 -0
- chia/_tests/util/test_build_job_matrix.py +42 -0
- chia/_tests/util/test_build_network_protocol_files.py +7 -0
- chia/_tests/util/test_chia_version.py +50 -0
- chia/_tests/util/test_collection.py +11 -0
- chia/_tests/util/test_condition_tools.py +229 -0
- chia/_tests/util/test_config.py +426 -0
- chia/_tests/util/test_dump_keyring.py +60 -0
- chia/_tests/util/test_errors.py +10 -0
- chia/_tests/util/test_full_block_utils.py +279 -0
- chia/_tests/util/test_installed.py +20 -0
- chia/_tests/util/test_limited_semaphore.py +53 -0
- chia/_tests/util/test_logging_filter.py +42 -0
- chia/_tests/util/test_misc.py +445 -0
- chia/_tests/util/test_network.py +73 -0
- chia/_tests/util/test_network_protocol_files.py +578 -0
- chia/_tests/util/test_network_protocol_json.py +267 -0
- chia/_tests/util/test_network_protocol_test.py +256 -0
- chia/_tests/util/test_paginator.py +71 -0
- chia/_tests/util/test_pprint.py +17 -0
- chia/_tests/util/test_priority_mutex.py +488 -0
- chia/_tests/util/test_recursive_replace.py +116 -0
- chia/_tests/util/test_replace_str_to_bytes.py +137 -0
- chia/_tests/util/test_service_groups.py +15 -0
- chia/_tests/util/test_ssl_check.py +31 -0
- chia/_tests/util/test_testnet_overrides.py +19 -0
- chia/_tests/util/test_tests_misc.py +38 -0
- chia/_tests/util/test_timing.py +37 -0
- chia/_tests/util/test_trusted_peer.py +51 -0
- chia/_tests/util/time_out_assert.py +191 -0
- chia/_tests/wallet/__init__.py +0 -0
- chia/_tests/wallet/cat_wallet/__init__.py +0 -0
- chia/_tests/wallet/cat_wallet/config.py +4 -0
- chia/_tests/wallet/cat_wallet/test_cat_lifecycle.py +468 -0
- chia/_tests/wallet/cat_wallet/test_cat_outer_puzzle.py +69 -0
- chia/_tests/wallet/cat_wallet/test_cat_wallet.py +1826 -0
- chia/_tests/wallet/cat_wallet/test_offer_lifecycle.py +291 -0
- chia/_tests/wallet/cat_wallet/test_trades.py +2600 -0
- chia/_tests/wallet/clawback/__init__.py +0 -0
- chia/_tests/wallet/clawback/config.py +3 -0
- chia/_tests/wallet/clawback/test_clawback_decorator.py +78 -0
- chia/_tests/wallet/clawback/test_clawback_lifecycle.py +292 -0
- chia/_tests/wallet/clawback/test_clawback_metadata.py +50 -0
- chia/_tests/wallet/config.py +4 -0
- chia/_tests/wallet/conftest.py +278 -0
- chia/_tests/wallet/dao_wallet/__init__.py +0 -0
- chia/_tests/wallet/dao_wallet/config.py +3 -0
- chia/_tests/wallet/dao_wallet/test_dao_clvm.py +1330 -0
- chia/_tests/wallet/dao_wallet/test_dao_wallets.py +3488 -0
- chia/_tests/wallet/db_wallet/__init__.py +0 -0
- chia/_tests/wallet/db_wallet/config.py +3 -0
- chia/_tests/wallet/db_wallet/test_db_graftroot.py +141 -0
- chia/_tests/wallet/db_wallet/test_dl_offers.py +491 -0
- chia/_tests/wallet/db_wallet/test_dl_wallet.py +823 -0
- chia/_tests/wallet/did_wallet/__init__.py +0 -0
- chia/_tests/wallet/did_wallet/config.py +4 -0
- chia/_tests/wallet/did_wallet/test_did.py +2284 -0
- chia/_tests/wallet/nft_wallet/__init__.py +0 -0
- chia/_tests/wallet/nft_wallet/config.py +4 -0
- chia/_tests/wallet/nft_wallet/test_nft_1_offers.py +1493 -0
- chia/_tests/wallet/nft_wallet/test_nft_bulk_mint.py +1024 -0
- chia/_tests/wallet/nft_wallet/test_nft_lifecycle.py +375 -0
- chia/_tests/wallet/nft_wallet/test_nft_offers.py +1209 -0
- chia/_tests/wallet/nft_wallet/test_nft_puzzles.py +172 -0
- chia/_tests/wallet/nft_wallet/test_nft_wallet.py +2584 -0
- chia/_tests/wallet/nft_wallet/test_ownership_outer_puzzle.py +70 -0
- chia/_tests/wallet/rpc/__init__.py +0 -0
- chia/_tests/wallet/rpc/config.py +4 -0
- chia/_tests/wallet/rpc/test_dl_wallet_rpc.py +285 -0
- chia/_tests/wallet/rpc/test_wallet_rpc.py +3153 -0
- chia/_tests/wallet/simple_sync/__init__.py +0 -0
- chia/_tests/wallet/simple_sync/config.py +3 -0
- chia/_tests/wallet/simple_sync/test_simple_sync_protocol.py +718 -0
- chia/_tests/wallet/sync/__init__.py +0 -0
- chia/_tests/wallet/sync/config.py +4 -0
- chia/_tests/wallet/sync/test_wallet_sync.py +1692 -0
- chia/_tests/wallet/test_address_type.py +189 -0
- chia/_tests/wallet/test_bech32m.py +45 -0
- chia/_tests/wallet/test_clvm_streamable.py +244 -0
- chia/_tests/wallet/test_coin_management.py +354 -0
- chia/_tests/wallet/test_coin_selection.py +588 -0
- chia/_tests/wallet/test_conditions.py +400 -0
- chia/_tests/wallet/test_debug_spend_bundle.py +218 -0
- chia/_tests/wallet/test_new_wallet_protocol.py +1174 -0
- chia/_tests/wallet/test_nft_store.py +192 -0
- chia/_tests/wallet/test_notifications.py +196 -0
- chia/_tests/wallet/test_offer_parsing_performance.py +48 -0
- chia/_tests/wallet/test_puzzle_store.py +132 -0
- chia/_tests/wallet/test_sign_coin_spends.py +159 -0
- chia/_tests/wallet/test_signer_protocol.py +947 -0
- chia/_tests/wallet/test_singleton.py +122 -0
- chia/_tests/wallet/test_singleton_lifecycle_fast.py +772 -0
- chia/_tests/wallet/test_singleton_store.py +152 -0
- chia/_tests/wallet/test_taproot.py +19 -0
- chia/_tests/wallet/test_transaction_store.py +945 -0
- chia/_tests/wallet/test_util.py +185 -0
- chia/_tests/wallet/test_wallet.py +2139 -0
- chia/_tests/wallet/test_wallet_action_scope.py +85 -0
- chia/_tests/wallet/test_wallet_blockchain.py +111 -0
- chia/_tests/wallet/test_wallet_coin_store.py +1002 -0
- chia/_tests/wallet/test_wallet_interested_store.py +43 -0
- chia/_tests/wallet/test_wallet_key_val_store.py +40 -0
- chia/_tests/wallet/test_wallet_node.py +780 -0
- chia/_tests/wallet/test_wallet_retry.py +95 -0
- chia/_tests/wallet/test_wallet_state_manager.py +259 -0
- chia/_tests/wallet/test_wallet_test_framework.py +275 -0
- chia/_tests/wallet/test_wallet_trade_store.py +218 -0
- chia/_tests/wallet/test_wallet_user_store.py +34 -0
- chia/_tests/wallet/test_wallet_utils.py +156 -0
- chia/_tests/wallet/vc_wallet/__init__.py +0 -0
- chia/_tests/wallet/vc_wallet/config.py +3 -0
- chia/_tests/wallet/vc_wallet/test_cr_outer_puzzle.py +70 -0
- chia/_tests/wallet/vc_wallet/test_vc_lifecycle.py +883 -0
- chia/_tests/wallet/vc_wallet/test_vc_wallet.py +830 -0
- chia/_tests/wallet/wallet_block_tools.py +327 -0
- chia/_tests/weight_proof/__init__.py +0 -0
- chia/_tests/weight_proof/config.py +3 -0
- chia/_tests/weight_proof/test_weight_proof.py +528 -0
- chia/apis.py +19 -0
- chia/clvm/__init__.py +0 -0
- chia/cmds/__init__.py +0 -0
- chia/cmds/beta.py +184 -0
- chia/cmds/beta_funcs.py +137 -0
- chia/cmds/check_wallet_db.py +420 -0
- chia/cmds/chia.py +151 -0
- chia/cmds/cmd_classes.py +323 -0
- chia/cmds/cmd_helpers.py +242 -0
- chia/cmds/cmds_util.py +488 -0
- chia/cmds/coin_funcs.py +275 -0
- chia/cmds/coins.py +182 -0
- chia/cmds/completion.py +49 -0
- chia/cmds/configure.py +332 -0
- chia/cmds/dao.py +1064 -0
- chia/cmds/dao_funcs.py +598 -0
- chia/cmds/data.py +708 -0
- chia/cmds/data_funcs.py +385 -0
- chia/cmds/db.py +87 -0
- chia/cmds/db_backup_func.py +77 -0
- chia/cmds/db_upgrade_func.py +452 -0
- chia/cmds/db_validate_func.py +184 -0
- chia/cmds/dev.py +18 -0
- chia/cmds/farm.py +100 -0
- chia/cmds/farm_funcs.py +200 -0
- chia/cmds/gh.py +275 -0
- chia/cmds/init.py +63 -0
- chia/cmds/init_funcs.py +367 -0
- chia/cmds/installers.py +131 -0
- chia/cmds/keys.py +527 -0
- chia/cmds/keys_funcs.py +863 -0
- chia/cmds/netspace.py +50 -0
- chia/cmds/netspace_funcs.py +54 -0
- chia/cmds/options.py +32 -0
- chia/cmds/param_types.py +238 -0
- chia/cmds/passphrase.py +131 -0
- chia/cmds/passphrase_funcs.py +292 -0
- chia/cmds/peer.py +51 -0
- chia/cmds/peer_funcs.py +129 -0
- chia/cmds/plotnft.py +260 -0
- chia/cmds/plotnft_funcs.py +405 -0
- chia/cmds/plots.py +230 -0
- chia/cmds/plotters.py +18 -0
- chia/cmds/rpc.py +208 -0
- chia/cmds/show.py +72 -0
- chia/cmds/show_funcs.py +215 -0
- chia/cmds/signer.py +296 -0
- chia/cmds/sim.py +225 -0
- chia/cmds/sim_funcs.py +509 -0
- chia/cmds/start.py +24 -0
- chia/cmds/start_funcs.py +109 -0
- chia/cmds/stop.py +62 -0
- chia/cmds/units.py +9 -0
- chia/cmds/wallet.py +1901 -0
- chia/cmds/wallet_funcs.py +1874 -0
- chia/consensus/__init__.py +0 -0
- chia/consensus/block_body_validation.py +562 -0
- chia/consensus/block_creation.py +546 -0
- chia/consensus/block_header_validation.py +1059 -0
- chia/consensus/block_record.py +31 -0
- chia/consensus/block_rewards.py +53 -0
- chia/consensus/blockchain.py +1087 -0
- chia/consensus/blockchain_interface.py +56 -0
- chia/consensus/coinbase.py +30 -0
- chia/consensus/condition_costs.py +9 -0
- chia/consensus/constants.py +49 -0
- chia/consensus/cost_calculator.py +15 -0
- chia/consensus/default_constants.py +89 -0
- chia/consensus/deficit.py +55 -0
- chia/consensus/difficulty_adjustment.py +412 -0
- chia/consensus/find_fork_point.py +111 -0
- chia/consensus/full_block_to_block_record.py +167 -0
- chia/consensus/get_block_challenge.py +106 -0
- chia/consensus/get_block_generator.py +27 -0
- chia/consensus/make_sub_epoch_summary.py +210 -0
- chia/consensus/multiprocess_validation.py +268 -0
- chia/consensus/pos_quality.py +19 -0
- chia/consensus/pot_iterations.py +67 -0
- chia/consensus/puzzles/__init__.py +0 -0
- chia/consensus/puzzles/chialisp_deserialisation.clsp +69 -0
- chia/consensus/puzzles/chialisp_deserialisation.clsp.hex +1 -0
- chia/consensus/puzzles/rom_bootstrap_generator.clsp +37 -0
- chia/consensus/puzzles/rom_bootstrap_generator.clsp.hex +1 -0
- chia/consensus/vdf_info_computation.py +156 -0
- chia/daemon/__init__.py +0 -0
- chia/daemon/client.py +252 -0
- chia/daemon/keychain_proxy.py +502 -0
- chia/daemon/keychain_server.py +365 -0
- chia/daemon/server.py +1606 -0
- chia/daemon/windows_signal.py +56 -0
- chia/data_layer/__init__.py +0 -0
- chia/data_layer/data_layer.py +1291 -0
- chia/data_layer/data_layer_api.py +33 -0
- chia/data_layer/data_layer_errors.py +50 -0
- chia/data_layer/data_layer_server.py +170 -0
- chia/data_layer/data_layer_util.py +985 -0
- chia/data_layer/data_layer_wallet.py +1311 -0
- chia/data_layer/data_store.py +2267 -0
- chia/data_layer/dl_wallet_store.py +407 -0
- chia/data_layer/download_data.py +389 -0
- chia/data_layer/puzzles/__init__.py +0 -0
- chia/data_layer/puzzles/graftroot_dl_offers.clsp +100 -0
- chia/data_layer/puzzles/graftroot_dl_offers.clsp.hex +1 -0
- chia/data_layer/s3_plugin_config.yml +33 -0
- chia/data_layer/s3_plugin_service.py +468 -0
- chia/data_layer/util/__init__.py +0 -0
- chia/data_layer/util/benchmark.py +107 -0
- chia/data_layer/util/plugin.py +40 -0
- chia/farmer/__init__.py +0 -0
- chia/farmer/farmer.py +923 -0
- chia/farmer/farmer_api.py +820 -0
- chia/full_node/__init__.py +0 -0
- chia/full_node/bitcoin_fee_estimator.py +85 -0
- chia/full_node/block_height_map.py +271 -0
- chia/full_node/block_store.py +576 -0
- chia/full_node/bundle_tools.py +19 -0
- chia/full_node/coin_store.py +647 -0
- chia/full_node/fee_estimate.py +54 -0
- chia/full_node/fee_estimate_store.py +24 -0
- chia/full_node/fee_estimation.py +92 -0
- chia/full_node/fee_estimator.py +90 -0
- chia/full_node/fee_estimator_constants.py +38 -0
- chia/full_node/fee_estimator_interface.py +42 -0
- chia/full_node/fee_history.py +25 -0
- chia/full_node/fee_tracker.py +564 -0
- chia/full_node/full_node.py +3327 -0
- chia/full_node/full_node_api.py +2025 -0
- chia/full_node/full_node_store.py +1033 -0
- chia/full_node/hint_management.py +56 -0
- chia/full_node/hint_store.py +93 -0
- chia/full_node/mempool.py +589 -0
- chia/full_node/mempool_check_conditions.py +146 -0
- chia/full_node/mempool_manager.py +853 -0
- chia/full_node/pending_tx_cache.py +112 -0
- chia/full_node/puzzles/__init__.py +0 -0
- chia/full_node/puzzles/block_program_zero.clsp +14 -0
- chia/full_node/puzzles/block_program_zero.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_coin_spend_entry.clsp +5 -0
- chia/full_node/puzzles/decompress_coin_spend_entry.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_coin_spend_entry_with_prefix.clsp +7 -0
- chia/full_node/puzzles/decompress_coin_spend_entry_with_prefix.clsp.hex +1 -0
- chia/full_node/puzzles/decompress_puzzle.clsp +6 -0
- chia/full_node/puzzles/decompress_puzzle.clsp.hex +1 -0
- chia/full_node/signage_point.py +16 -0
- chia/full_node/subscriptions.py +247 -0
- chia/full_node/sync_store.py +146 -0
- chia/full_node/tx_processing_queue.py +78 -0
- chia/full_node/util/__init__.py +0 -0
- chia/full_node/weight_proof.py +1720 -0
- chia/harvester/__init__.py +0 -0
- chia/harvester/harvester.py +272 -0
- chia/harvester/harvester_api.py +380 -0
- chia/introducer/__init__.py +0 -0
- chia/introducer/introducer.py +122 -0
- chia/introducer/introducer_api.py +70 -0
- chia/legacy/__init__.py +0 -0
- chia/legacy/keyring.py +155 -0
- chia/plot_sync/__init__.py +0 -0
- chia/plot_sync/delta.py +61 -0
- chia/plot_sync/exceptions.py +56 -0
- chia/plot_sync/receiver.py +386 -0
- chia/plot_sync/sender.py +340 -0
- chia/plot_sync/util.py +43 -0
- chia/plotters/__init__.py +0 -0
- chia/plotters/bladebit.py +388 -0
- chia/plotters/chiapos.py +63 -0
- chia/plotters/madmax.py +224 -0
- chia/plotters/plotters.py +577 -0
- chia/plotters/plotters_util.py +133 -0
- chia/plotting/__init__.py +0 -0
- chia/plotting/cache.py +213 -0
- chia/plotting/check_plots.py +283 -0
- chia/plotting/create_plots.py +278 -0
- chia/plotting/manager.py +436 -0
- chia/plotting/util.py +336 -0
- chia/pools/__init__.py +0 -0
- chia/pools/pool_config.py +110 -0
- chia/pools/pool_puzzles.py +459 -0
- chia/pools/pool_wallet.py +933 -0
- chia/pools/pool_wallet_info.py +118 -0
- chia/pools/puzzles/__init__.py +0 -0
- chia/pools/puzzles/pool_member_innerpuz.clsp +70 -0
- chia/pools/puzzles/pool_member_innerpuz.clsp.hex +1 -0
- chia/pools/puzzles/pool_waitingroom_innerpuz.clsp +69 -0
- chia/pools/puzzles/pool_waitingroom_innerpuz.clsp.hex +1 -0
- chia/protocols/__init__.py +0 -0
- chia/protocols/farmer_protocol.py +102 -0
- chia/protocols/full_node_protocol.py +219 -0
- chia/protocols/harvester_protocol.py +216 -0
- chia/protocols/introducer_protocol.py +25 -0
- chia/protocols/pool_protocol.py +177 -0
- chia/protocols/protocol_message_types.py +139 -0
- chia/protocols/protocol_state_machine.py +87 -0
- chia/protocols/protocol_timing.py +8 -0
- chia/protocols/shared_protocol.py +86 -0
- chia/protocols/timelord_protocol.py +93 -0
- chia/protocols/wallet_protocol.py +401 -0
- chia/py.typed +0 -0
- chia/rpc/__init__.py +0 -0
- chia/rpc/crawler_rpc_api.py +80 -0
- chia/rpc/data_layer_rpc_api.py +644 -0
- chia/rpc/data_layer_rpc_client.py +188 -0
- chia/rpc/data_layer_rpc_util.py +58 -0
- chia/rpc/farmer_rpc_api.py +365 -0
- chia/rpc/farmer_rpc_client.py +86 -0
- chia/rpc/full_node_rpc_api.py +959 -0
- chia/rpc/full_node_rpc_client.py +292 -0
- chia/rpc/harvester_rpc_api.py +141 -0
- chia/rpc/harvester_rpc_client.py +54 -0
- chia/rpc/rpc_client.py +164 -0
- chia/rpc/rpc_server.py +521 -0
- chia/rpc/timelord_rpc_api.py +32 -0
- chia/rpc/util.py +93 -0
- chia/rpc/wallet_request_types.py +904 -0
- chia/rpc/wallet_rpc_api.py +4943 -0
- chia/rpc/wallet_rpc_client.py +1814 -0
- chia/seeder/__init__.py +0 -0
- chia/seeder/crawl_store.py +425 -0
- chia/seeder/crawler.py +410 -0
- chia/seeder/crawler_api.py +135 -0
- chia/seeder/dns_server.py +593 -0
- chia/seeder/peer_record.py +146 -0
- chia/seeder/start_crawler.py +92 -0
- chia/server/__init__.py +0 -0
- chia/server/address_manager.py +658 -0
- chia/server/address_manager_store.py +237 -0
- chia/server/api_protocol.py +116 -0
- chia/server/capabilities.py +24 -0
- chia/server/chia_policy.py +346 -0
- chia/server/introducer_peers.py +76 -0
- chia/server/node_discovery.py +714 -0
- chia/server/outbound_message.py +33 -0
- chia/server/rate_limit_numbers.py +214 -0
- chia/server/rate_limits.py +153 -0
- chia/server/server.py +741 -0
- chia/server/signal_handlers.py +120 -0
- chia/server/ssl_context.py +32 -0
- chia/server/start_data_layer.py +151 -0
- chia/server/start_farmer.py +98 -0
- chia/server/start_full_node.py +112 -0
- chia/server/start_harvester.py +93 -0
- chia/server/start_introducer.py +81 -0
- chia/server/start_service.py +316 -0
- chia/server/start_timelord.py +89 -0
- chia/server/start_wallet.py +113 -0
- chia/server/upnp.py +118 -0
- chia/server/ws_connection.py +766 -0
- chia/simulator/__init__.py +0 -0
- chia/simulator/add_blocks_in_batches.py +54 -0
- chia/simulator/block_tools.py +2054 -0
- chia/simulator/full_node_simulator.py +794 -0
- chia/simulator/keyring.py +128 -0
- chia/simulator/setup_services.py +506 -0
- chia/simulator/simulator_constants.py +13 -0
- chia/simulator/simulator_full_node_rpc_api.py +99 -0
- chia/simulator/simulator_full_node_rpc_client.py +60 -0
- chia/simulator/simulator_protocol.py +29 -0
- chia/simulator/simulator_test_tools.py +164 -0
- chia/simulator/socket.py +24 -0
- chia/simulator/ssl_certs.py +114 -0
- chia/simulator/ssl_certs_1.py +697 -0
- chia/simulator/ssl_certs_10.py +697 -0
- chia/simulator/ssl_certs_2.py +697 -0
- chia/simulator/ssl_certs_3.py +697 -0
- chia/simulator/ssl_certs_4.py +697 -0
- chia/simulator/ssl_certs_5.py +697 -0
- chia/simulator/ssl_certs_6.py +697 -0
- chia/simulator/ssl_certs_7.py +697 -0
- chia/simulator/ssl_certs_8.py +697 -0
- chia/simulator/ssl_certs_9.py +697 -0
- chia/simulator/start_simulator.py +143 -0
- chia/simulator/wallet_tools.py +246 -0
- chia/ssl/__init__.py +0 -0
- chia/ssl/chia_ca.crt +19 -0
- chia/ssl/chia_ca.key +28 -0
- chia/ssl/create_ssl.py +249 -0
- chia/ssl/dst_root_ca.pem +20 -0
- chia/timelord/__init__.py +0 -0
- chia/timelord/iters_from_block.py +50 -0
- chia/timelord/timelord.py +1226 -0
- chia/timelord/timelord_api.py +138 -0
- chia/timelord/timelord_launcher.py +190 -0
- chia/timelord/timelord_state.py +244 -0
- chia/timelord/types.py +22 -0
- chia/types/__init__.py +0 -0
- chia/types/aliases.py +35 -0
- chia/types/block_protocol.py +20 -0
- chia/types/blockchain_format/__init__.py +0 -0
- chia/types/blockchain_format/classgroup.py +5 -0
- chia/types/blockchain_format/coin.py +28 -0
- chia/types/blockchain_format/foliage.py +8 -0
- chia/types/blockchain_format/pool_target.py +5 -0
- chia/types/blockchain_format/program.py +269 -0
- chia/types/blockchain_format/proof_of_space.py +135 -0
- chia/types/blockchain_format/reward_chain_block.py +6 -0
- chia/types/blockchain_format/serialized_program.py +5 -0
- chia/types/blockchain_format/sized_bytes.py +11 -0
- chia/types/blockchain_format/slots.py +9 -0
- chia/types/blockchain_format/sub_epoch_summary.py +5 -0
- chia/types/blockchain_format/tree_hash.py +72 -0
- chia/types/blockchain_format/vdf.py +86 -0
- chia/types/clvm_cost.py +13 -0
- chia/types/coin_record.py +43 -0
- chia/types/coin_spend.py +115 -0
- chia/types/condition_opcodes.py +73 -0
- chia/types/condition_with_args.py +16 -0
- chia/types/eligible_coin_spends.py +365 -0
- chia/types/end_of_slot_bundle.py +5 -0
- chia/types/fee_rate.py +38 -0
- chia/types/full_block.py +5 -0
- chia/types/generator_types.py +13 -0
- chia/types/header_block.py +5 -0
- chia/types/internal_mempool_item.py +18 -0
- chia/types/mempool_inclusion_status.py +9 -0
- chia/types/mempool_item.py +85 -0
- chia/types/mempool_submission_status.py +30 -0
- chia/types/mojos.py +7 -0
- chia/types/peer_info.py +64 -0
- chia/types/signing_mode.py +29 -0
- chia/types/spend_bundle.py +30 -0
- chia/types/spend_bundle_conditions.py +7 -0
- chia/types/transaction_queue_entry.py +55 -0
- chia/types/unfinished_block.py +5 -0
- chia/types/unfinished_header_block.py +37 -0
- chia/types/validation_state.py +14 -0
- chia/types/weight_proof.py +49 -0
- chia/util/__init__.py +0 -0
- chia/util/action_scope.py +168 -0
- chia/util/async_pool.py +226 -0
- chia/util/augmented_chain.py +134 -0
- chia/util/batches.py +42 -0
- chia/util/bech32m.py +126 -0
- chia/util/beta_metrics.py +119 -0
- chia/util/block_cache.py +56 -0
- chia/util/byte_types.py +12 -0
- chia/util/check_fork_next_block.py +33 -0
- chia/util/chia_logging.py +144 -0
- chia/util/chia_version.py +33 -0
- chia/util/collection.py +17 -0
- chia/util/condition_tools.py +201 -0
- chia/util/config.py +367 -0
- chia/util/cpu.py +22 -0
- chia/util/db_synchronous.py +23 -0
- chia/util/db_version.py +32 -0
- chia/util/db_wrapper.py +430 -0
- chia/util/default_root.py +27 -0
- chia/util/dump_keyring.py +93 -0
- chia/util/english.txt +2048 -0
- chia/util/errors.py +353 -0
- chia/util/file_keyring.py +469 -0
- chia/util/files.py +97 -0
- chia/util/full_block_utils.py +345 -0
- chia/util/generator_tools.py +72 -0
- chia/util/hash.py +31 -0
- chia/util/initial-config.yaml +694 -0
- chia/util/inline_executor.py +26 -0
- chia/util/ints.py +19 -0
- chia/util/ip_address.py +39 -0
- chia/util/json_util.py +37 -0
- chia/util/keychain.py +676 -0
- chia/util/keyring_wrapper.py +327 -0
- chia/util/limited_semaphore.py +41 -0
- chia/util/lock.py +49 -0
- chia/util/log_exceptions.py +32 -0
- chia/util/logging.py +36 -0
- chia/util/lru_cache.py +31 -0
- chia/util/math.py +20 -0
- chia/util/network.py +182 -0
- chia/util/paginator.py +48 -0
- chia/util/path.py +31 -0
- chia/util/permissions.py +20 -0
- chia/util/prev_transaction_block.py +21 -0
- chia/util/priority_mutex.py +95 -0
- chia/util/profiler.py +197 -0
- chia/util/recursive_replace.py +24 -0
- chia/util/safe_cancel_task.py +16 -0
- chia/util/service_groups.py +47 -0
- chia/util/setproctitle.py +22 -0
- chia/util/significant_bits.py +32 -0
- chia/util/ssl_check.py +213 -0
- chia/util/streamable.py +642 -0
- chia/util/task_referencer.py +59 -0
- chia/util/task_timing.py +382 -0
- chia/util/timing.py +67 -0
- chia/util/vdf_prover.py +30 -0
- chia/util/virtual_project_analysis.py +540 -0
- chia/util/ws_message.py +66 -0
- chia/wallet/__init__.py +0 -0
- chia/wallet/cat_wallet/__init__.py +0 -0
- chia/wallet/cat_wallet/cat_constants.py +75 -0
- chia/wallet/cat_wallet/cat_info.py +47 -0
- chia/wallet/cat_wallet/cat_outer_puzzle.py +120 -0
- chia/wallet/cat_wallet/cat_utils.py +164 -0
- chia/wallet/cat_wallet/cat_wallet.py +855 -0
- chia/wallet/cat_wallet/dao_cat_info.py +28 -0
- chia/wallet/cat_wallet/dao_cat_wallet.py +669 -0
- chia/wallet/cat_wallet/lineage_store.py +74 -0
- chia/wallet/cat_wallet/puzzles/__init__.py +0 -0
- chia/wallet/cat_wallet/puzzles/cat_truths.clib +31 -0
- chia/wallet/cat_wallet/puzzles/cat_v2.clsp +397 -0
- chia/wallet/cat_wallet/puzzles/cat_v2.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/delegated_tail.clsp +25 -0
- chia/wallet/cat_wallet/puzzles/delegated_tail.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/everything_with_signature.clsp +15 -0
- chia/wallet/cat_wallet/puzzles/everything_with_signature.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id.clsp +26 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id_or_singleton.clsp +42 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_coin_id_or_singleton.clsp.hex +1 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_puzzle_hash.clsp +24 -0
- chia/wallet/cat_wallet/puzzles/genesis_by_puzzle_hash.clsp.hex +1 -0
- chia/wallet/coin_selection.py +188 -0
- chia/wallet/conditions.py +1512 -0
- chia/wallet/dao_wallet/__init__.py +0 -0
- chia/wallet/dao_wallet/dao_info.py +61 -0
- chia/wallet/dao_wallet/dao_utils.py +811 -0
- chia/wallet/dao_wallet/dao_wallet.py +2119 -0
- chia/wallet/db_wallet/__init__.py +0 -0
- chia/wallet/db_wallet/db_wallet_puzzles.py +111 -0
- chia/wallet/derivation_record.py +30 -0
- chia/wallet/derive_keys.py +146 -0
- chia/wallet/did_wallet/__init__.py +0 -0
- chia/wallet/did_wallet/did_info.py +39 -0
- chia/wallet/did_wallet/did_wallet.py +1494 -0
- chia/wallet/did_wallet/did_wallet_puzzles.py +221 -0
- chia/wallet/did_wallet/puzzles/__init__.py +0 -0
- chia/wallet/did_wallet/puzzles/did_innerpuz.clsp +135 -0
- chia/wallet/did_wallet/puzzles/did_innerpuz.clsp.hex +1 -0
- chia/wallet/driver_protocol.py +26 -0
- chia/wallet/key_val_store.py +55 -0
- chia/wallet/lineage_proof.py +58 -0
- chia/wallet/nft_wallet/__init__.py +0 -0
- chia/wallet/nft_wallet/metadata_outer_puzzle.py +92 -0
- chia/wallet/nft_wallet/nft_info.py +120 -0
- chia/wallet/nft_wallet/nft_puzzles.py +305 -0
- chia/wallet/nft_wallet/nft_wallet.py +1687 -0
- chia/wallet/nft_wallet/ownership_outer_puzzle.py +101 -0
- chia/wallet/nft_wallet/puzzles/__init__.py +0 -0
- chia/wallet/nft_wallet/puzzles/create_nft_launcher_from_did.clsp +6 -0
- chia/wallet/nft_wallet/puzzles/create_nft_launcher_from_did.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_intermediate_launcher.clsp +6 -0
- chia/wallet/nft_wallet/puzzles/nft_intermediate_launcher.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_default.clsp +30 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_default.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_updateable.clsp +28 -0
- chia/wallet/nft_wallet/puzzles/nft_metadata_updater_updateable.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_layer.clsp +100 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_layer.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_transfer_program_one_way_claim_with_royalties.clsp +78 -0
- chia/wallet/nft_wallet/puzzles/nft_ownership_transfer_program_one_way_claim_with_royalties.clsp.hex +1 -0
- chia/wallet/nft_wallet/puzzles/nft_state_layer.clsp +74 -0
- chia/wallet/nft_wallet/puzzles/nft_state_layer.clsp.hex +1 -0
- chia/wallet/nft_wallet/singleton_outer_puzzle.py +101 -0
- chia/wallet/nft_wallet/transfer_program_puzzle.py +82 -0
- chia/wallet/nft_wallet/uncurry_nft.py +217 -0
- chia/wallet/notification_manager.py +117 -0
- chia/wallet/notification_store.py +178 -0
- chia/wallet/outer_puzzles.py +84 -0
- chia/wallet/payment.py +33 -0
- chia/wallet/puzzle_drivers.py +118 -0
- chia/wallet/puzzles/__init__.py +0 -0
- chia/wallet/puzzles/augmented_condition.clsp +13 -0
- chia/wallet/puzzles/augmented_condition.clsp.hex +1 -0
- chia/wallet/puzzles/clawback/__init__.py +0 -0
- chia/wallet/puzzles/clawback/drivers.py +188 -0
- chia/wallet/puzzles/clawback/metadata.py +38 -0
- chia/wallet/puzzles/clawback/puzzle_decorator.py +67 -0
- chia/wallet/puzzles/condition_codes.clib +77 -0
- chia/wallet/puzzles/curry-and-treehash.clib +102 -0
- chia/wallet/puzzles/curry.clib +135 -0
- chia/wallet/puzzles/curry_by_index.clib +16 -0
- chia/wallet/puzzles/dao_cat_eve.clsp +17 -0
- chia/wallet/puzzles/dao_cat_eve.clsp.hex +1 -0
- chia/wallet/puzzles/dao_cat_launcher.clsp +36 -0
- chia/wallet/puzzles/dao_cat_launcher.clsp.hex +1 -0
- chia/wallet/puzzles/dao_finished_state.clsp +35 -0
- chia/wallet/puzzles/dao_finished_state.clsp.hex +1 -0
- chia/wallet/puzzles/dao_finished_state.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_lockup.clsp +288 -0
- chia/wallet/puzzles/dao_lockup.clsp.hex +1 -0
- chia/wallet/puzzles/dao_lockup.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal.clsp +377 -0
- chia/wallet/puzzles/dao_proposal.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp +78 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal_timer.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp +87 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp.hex +1 -0
- chia/wallet/puzzles/dao_proposal_validator.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp +240 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp.hex +1 -0
- chia/wallet/puzzles/dao_spend_p2_singleton_v2.clsp.hex.sha256tree +1 -0
- chia/wallet/puzzles/dao_treasury.clsp +115 -0
- chia/wallet/puzzles/dao_treasury.clsp.hex +1 -0
- chia/wallet/puzzles/dao_update_proposal.clsp +44 -0
- chia/wallet/puzzles/dao_update_proposal.clsp.hex +1 -0
- chia/wallet/puzzles/deployed_puzzle_hashes.json +67 -0
- chia/wallet/puzzles/json.clib +25 -0
- chia/wallet/puzzles/load_clvm.py +161 -0
- chia/wallet/puzzles/merkle_utils.clib +18 -0
- chia/wallet/puzzles/notification.clsp +7 -0
- chia/wallet/puzzles/notification.clsp.hex +1 -0
- chia/wallet/puzzles/p2_1_of_n.clsp +22 -0
- chia/wallet/puzzles/p2_1_of_n.clsp.hex +1 -0
- chia/wallet/puzzles/p2_conditions.clsp +3 -0
- chia/wallet/puzzles/p2_conditions.clsp.hex +1 -0
- chia/wallet/puzzles/p2_conditions.py +26 -0
- chia/wallet/puzzles/p2_delegated_conditions.clsp +18 -0
- chia/wallet/puzzles/p2_delegated_conditions.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_conditions.py +21 -0
- chia/wallet/puzzles/p2_delegated_puzzle.clsp +19 -0
- chia/wallet/puzzles/p2_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_puzzle.py +34 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.clsp +91 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.py +160 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.clsp +108 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.clsp.hex +1 -0
- chia/wallet/puzzles/p2_m_of_n_delegate_direct.py +21 -0
- chia/wallet/puzzles/p2_parent.clsp +19 -0
- chia/wallet/puzzles/p2_parent.clsp.hex +1 -0
- chia/wallet/puzzles/p2_puzzle_hash.clsp +18 -0
- chia/wallet/puzzles/p2_puzzle_hash.clsp.hex +1 -0
- chia/wallet/puzzles/p2_puzzle_hash.py +27 -0
- chia/wallet/puzzles/p2_singleton.clsp +30 -0
- chia/wallet/puzzles/p2_singleton.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_aggregator.clsp +81 -0
- chia/wallet/puzzles/p2_singleton_aggregator.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_or_delayed_puzhash.clsp +50 -0
- chia/wallet/puzzles/p2_singleton_or_delayed_puzhash.clsp.hex +1 -0
- chia/wallet/puzzles/p2_singleton_via_delegated_puzzle.clsp +47 -0
- chia/wallet/puzzles/p2_singleton_via_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/puzzles/puzzle_utils.py +34 -0
- chia/wallet/puzzles/settlement_payments.clsp +49 -0
- chia/wallet/puzzles/settlement_payments.clsp.hex +1 -0
- chia/wallet/puzzles/sha256tree.clib +11 -0
- chia/wallet/puzzles/singleton_launcher.clsp +16 -0
- chia/wallet/puzzles/singleton_launcher.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer.clsp +177 -0
- chia/wallet/puzzles/singleton_top_layer.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer.py +296 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.clsp +107 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.clsp.hex +1 -0
- chia/wallet/puzzles/singleton_top_layer_v1_1.py +345 -0
- chia/wallet/puzzles/singleton_truths.clib +21 -0
- chia/wallet/puzzles/tails.py +348 -0
- chia/wallet/puzzles/utility_macros.clib +48 -0
- chia/wallet/signer_protocol.py +125 -0
- chia/wallet/singleton.py +106 -0
- chia/wallet/singleton_record.py +30 -0
- chia/wallet/trade_manager.py +1102 -0
- chia/wallet/trade_record.py +67 -0
- chia/wallet/trading/__init__.py +0 -0
- chia/wallet/trading/offer.py +702 -0
- chia/wallet/trading/trade_status.py +13 -0
- chia/wallet/trading/trade_store.py +526 -0
- chia/wallet/transaction_record.py +158 -0
- chia/wallet/transaction_sorting.py +14 -0
- chia/wallet/uncurried_puzzle.py +17 -0
- chia/wallet/util/__init__.py +0 -0
- chia/wallet/util/address_type.py +55 -0
- chia/wallet/util/blind_signer_tl.py +164 -0
- chia/wallet/util/clvm_streamable.py +203 -0
- chia/wallet/util/compute_hints.py +66 -0
- chia/wallet/util/compute_memos.py +43 -0
- chia/wallet/util/curry_and_treehash.py +91 -0
- chia/wallet/util/debug_spend_bundle.py +232 -0
- chia/wallet/util/merkle_tree.py +100 -0
- chia/wallet/util/merkle_utils.py +102 -0
- chia/wallet/util/new_peak_queue.py +82 -0
- chia/wallet/util/notifications.py +12 -0
- chia/wallet/util/peer_request_cache.py +174 -0
- chia/wallet/util/pprint.py +39 -0
- chia/wallet/util/puzzle_compression.py +95 -0
- chia/wallet/util/puzzle_decorator.py +100 -0
- chia/wallet/util/puzzle_decorator_type.py +7 -0
- chia/wallet/util/query_filter.py +59 -0
- chia/wallet/util/transaction_type.py +23 -0
- chia/wallet/util/tx_config.py +158 -0
- chia/wallet/util/wallet_sync_utils.py +351 -0
- chia/wallet/util/wallet_types.py +72 -0
- chia/wallet/vc_wallet/__init__.py +0 -0
- chia/wallet/vc_wallet/cr_cat_drivers.py +664 -0
- chia/wallet/vc_wallet/cr_cat_wallet.py +877 -0
- chia/wallet/vc_wallet/cr_outer_puzzle.py +102 -0
- chia/wallet/vc_wallet/cr_puzzles/__init__.py +0 -0
- chia/wallet/vc_wallet/cr_puzzles/conditions_w_fee_announce.clsp +3 -0
- chia/wallet/vc_wallet/cr_puzzles/conditions_w_fee_announce.clsp.hex +1 -0
- chia/wallet/vc_wallet/cr_puzzles/credential_restriction.clsp +304 -0
- chia/wallet/vc_wallet/cr_puzzles/credential_restriction.clsp.hex +1 -0
- chia/wallet/vc_wallet/cr_puzzles/flag_proofs_checker.clsp +45 -0
- chia/wallet/vc_wallet/cr_puzzles/flag_proofs_checker.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_drivers.py +838 -0
- chia/wallet/vc_wallet/vc_puzzles/__init__.py +0 -0
- chia/wallet/vc_wallet/vc_puzzles/covenant_layer.clsp +30 -0
- chia/wallet/vc_wallet/vc_puzzles/covenant_layer.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_covenant_morpher.clsp +75 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_covenant_morpher.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_transfer_program_covenant_adapter.clsp +32 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_transfer_program_covenant_adapter.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_update_metadata_with_DID.clsp +80 -0
- chia/wallet/vc_wallet/vc_puzzles/eml_update_metadata_with_DID.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/exigent_metadata_layer.clsp +163 -0
- chia/wallet/vc_wallet/vc_puzzles/exigent_metadata_layer.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/p2_announced_delegated_puzzle.clsp +16 -0
- chia/wallet/vc_wallet/vc_puzzles/p2_announced_delegated_puzzle.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/standard_vc_backdoor_puzzle.clsp +74 -0
- chia/wallet/vc_wallet/vc_puzzles/standard_vc_backdoor_puzzle.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/std_parent_morpher.clsp +23 -0
- chia/wallet/vc_wallet/vc_puzzles/std_parent_morpher.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_puzzles/viral_backdoor.clsp +64 -0
- chia/wallet/vc_wallet/vc_puzzles/viral_backdoor.clsp.hex +1 -0
- chia/wallet/vc_wallet/vc_store.py +263 -0
- chia/wallet/vc_wallet/vc_wallet.py +638 -0
- chia/wallet/wallet.py +698 -0
- chia/wallet/wallet_action_scope.py +96 -0
- chia/wallet/wallet_blockchain.py +244 -0
- chia/wallet/wallet_coin_record.py +72 -0
- chia/wallet/wallet_coin_store.py +351 -0
- chia/wallet/wallet_info.py +35 -0
- chia/wallet/wallet_interested_store.py +188 -0
- chia/wallet/wallet_nft_store.py +279 -0
- chia/wallet/wallet_node.py +1765 -0
- chia/wallet/wallet_node_api.py +207 -0
- chia/wallet/wallet_pool_store.py +119 -0
- chia/wallet/wallet_protocol.py +90 -0
- chia/wallet/wallet_puzzle_store.py +396 -0
- chia/wallet/wallet_retry_store.py +70 -0
- chia/wallet/wallet_singleton_store.py +259 -0
- chia/wallet/wallet_spend_bundle.py +25 -0
- chia/wallet/wallet_state_manager.py +2819 -0
- chia/wallet/wallet_transaction_store.py +496 -0
- chia/wallet/wallet_user_store.py +110 -0
- chia/wallet/wallet_weight_proof_handler.py +126 -0
- chia_blockchain-2.5.1rc1.dist-info/LICENSE +201 -0
- chia_blockchain-2.5.1rc1.dist-info/METADATA +156 -0
- chia_blockchain-2.5.1rc1.dist-info/RECORD +1042 -0
- chia_blockchain-2.5.1rc1.dist-info/WHEEL +4 -0
- chia_blockchain-2.5.1rc1.dist-info/entry_points.txt +17 -0
- mozilla-ca/cacert.pem +3611 -0
|
@@ -0,0 +1,2267 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import logging
|
|
5
|
+
from collections import defaultdict
|
|
6
|
+
from collections.abc import AsyncIterator, Awaitable
|
|
7
|
+
from contextlib import asynccontextmanager
|
|
8
|
+
from dataclasses import dataclass, replace
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, BinaryIO, Callable, Optional, Union
|
|
11
|
+
|
|
12
|
+
import aiosqlite
|
|
13
|
+
|
|
14
|
+
from chia.data_layer.data_layer_errors import KeyNotFoundError, NodeHashError, TreeGenerationIncrementingError
|
|
15
|
+
from chia.data_layer.data_layer_util import (
|
|
16
|
+
DiffData,
|
|
17
|
+
InsertResult,
|
|
18
|
+
InternalNode,
|
|
19
|
+
KeysPaginationData,
|
|
20
|
+
KeysValuesCompressed,
|
|
21
|
+
KeysValuesPaginationData,
|
|
22
|
+
KVDiffPaginationData,
|
|
23
|
+
Node,
|
|
24
|
+
NodeType,
|
|
25
|
+
OperationType,
|
|
26
|
+
ProofOfInclusion,
|
|
27
|
+
ProofOfInclusionLayer,
|
|
28
|
+
Root,
|
|
29
|
+
SerializedNode,
|
|
30
|
+
ServerInfo,
|
|
31
|
+
Side,
|
|
32
|
+
Status,
|
|
33
|
+
Subscription,
|
|
34
|
+
TerminalNode,
|
|
35
|
+
Unspecified,
|
|
36
|
+
get_hashes_for_page,
|
|
37
|
+
internal_hash,
|
|
38
|
+
key_hash,
|
|
39
|
+
leaf_hash,
|
|
40
|
+
row_to_node,
|
|
41
|
+
unspecified,
|
|
42
|
+
)
|
|
43
|
+
from chia.types.blockchain_format.program import Program
|
|
44
|
+
from chia.types.blockchain_format.sized_bytes import bytes32
|
|
45
|
+
from chia.util.db_wrapper import SQLITE_MAX_VARIABLE_NUMBER, DBWrapper2
|
|
46
|
+
|
|
47
|
+
log = logging.getLogger(__name__)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# TODO: review exceptions for values that shouldn't be displayed
|
|
51
|
+
# TODO: pick exception types other than Exception
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class DataStore:
|
|
56
|
+
"""A key/value store with the pairs being terminal nodes in a CLVM object tree."""
|
|
57
|
+
|
|
58
|
+
db_wrapper: DBWrapper2
|
|
59
|
+
|
|
60
|
+
@classmethod
|
|
61
|
+
@contextlib.asynccontextmanager
|
|
62
|
+
async def managed(
|
|
63
|
+
cls, database: Union[str, Path], uri: bool = False, sql_log_path: Optional[Path] = None
|
|
64
|
+
) -> AsyncIterator[DataStore]:
|
|
65
|
+
async with DBWrapper2.managed(
|
|
66
|
+
database=database,
|
|
67
|
+
uri=uri,
|
|
68
|
+
journal_mode="WAL",
|
|
69
|
+
# Setting to FULL despite other locations being configurable. If there are
|
|
70
|
+
# performance issues we can consider other the implications of other options.
|
|
71
|
+
synchronous="FULL",
|
|
72
|
+
# If foreign key checking gets turned off, please add corresponding check
|
|
73
|
+
# methods and enable foreign key checking in the tests.
|
|
74
|
+
foreign_keys=True,
|
|
75
|
+
row_factory=aiosqlite.Row,
|
|
76
|
+
log_path=sql_log_path,
|
|
77
|
+
) as db_wrapper:
|
|
78
|
+
self = cls(db_wrapper=db_wrapper)
|
|
79
|
+
|
|
80
|
+
async with db_wrapper.writer() as writer:
|
|
81
|
+
await writer.execute(
|
|
82
|
+
f"""
|
|
83
|
+
CREATE TABLE IF NOT EXISTS node(
|
|
84
|
+
hash BLOB PRIMARY KEY NOT NULL CHECK(length(hash) == 32),
|
|
85
|
+
node_type INTEGER NOT NULL CHECK(
|
|
86
|
+
(
|
|
87
|
+
node_type == {int(NodeType.INTERNAL)}
|
|
88
|
+
AND left IS NOT NULL
|
|
89
|
+
AND right IS NOT NULL
|
|
90
|
+
AND key IS NULL
|
|
91
|
+
AND value IS NULL
|
|
92
|
+
)
|
|
93
|
+
OR
|
|
94
|
+
(
|
|
95
|
+
node_type == {int(NodeType.TERMINAL)}
|
|
96
|
+
AND left IS NULL
|
|
97
|
+
AND right IS NULL
|
|
98
|
+
AND key IS NOT NULL
|
|
99
|
+
AND value IS NOT NULL
|
|
100
|
+
)
|
|
101
|
+
),
|
|
102
|
+
left BLOB REFERENCES node,
|
|
103
|
+
right BLOB REFERENCES node,
|
|
104
|
+
key BLOB,
|
|
105
|
+
value BLOB
|
|
106
|
+
)
|
|
107
|
+
"""
|
|
108
|
+
)
|
|
109
|
+
await writer.execute(
|
|
110
|
+
"""
|
|
111
|
+
CREATE TRIGGER IF NOT EXISTS no_node_updates
|
|
112
|
+
BEFORE UPDATE ON node
|
|
113
|
+
BEGIN
|
|
114
|
+
SELECT RAISE(FAIL, 'updates not allowed to the node table');
|
|
115
|
+
END
|
|
116
|
+
"""
|
|
117
|
+
)
|
|
118
|
+
await writer.execute(
|
|
119
|
+
f"""
|
|
120
|
+
CREATE TABLE IF NOT EXISTS root(
|
|
121
|
+
tree_id BLOB NOT NULL CHECK(length(tree_id) == 32),
|
|
122
|
+
generation INTEGER NOT NULL CHECK(generation >= 0),
|
|
123
|
+
node_hash BLOB,
|
|
124
|
+
status INTEGER NOT NULL CHECK(
|
|
125
|
+
{" OR ".join(f"status == {status}" for status in Status)}
|
|
126
|
+
),
|
|
127
|
+
PRIMARY KEY(tree_id, generation),
|
|
128
|
+
FOREIGN KEY(node_hash) REFERENCES node(hash)
|
|
129
|
+
)
|
|
130
|
+
"""
|
|
131
|
+
)
|
|
132
|
+
# TODO: Add ancestor -> hash relationship, this might involve temporarily
|
|
133
|
+
# deferring the foreign key enforcement due to the insertion order
|
|
134
|
+
# and the node table also enforcing a similar relationship in the
|
|
135
|
+
# other direction.
|
|
136
|
+
# FOREIGN KEY(ancestor) REFERENCES ancestors(ancestor)
|
|
137
|
+
await writer.execute(
|
|
138
|
+
"""
|
|
139
|
+
CREATE TABLE IF NOT EXISTS ancestors(
|
|
140
|
+
hash BLOB NOT NULL REFERENCES node,
|
|
141
|
+
ancestor BLOB CHECK(length(ancestor) == 32),
|
|
142
|
+
tree_id BLOB NOT NULL CHECK(length(tree_id) == 32),
|
|
143
|
+
generation INTEGER NOT NULL,
|
|
144
|
+
PRIMARY KEY(hash, tree_id, generation),
|
|
145
|
+
FOREIGN KEY(ancestor) REFERENCES node(hash)
|
|
146
|
+
)
|
|
147
|
+
"""
|
|
148
|
+
)
|
|
149
|
+
await writer.execute(
|
|
150
|
+
"""
|
|
151
|
+
CREATE TABLE IF NOT EXISTS subscriptions(
|
|
152
|
+
tree_id BLOB NOT NULL CHECK(length(tree_id) == 32),
|
|
153
|
+
url TEXT,
|
|
154
|
+
ignore_till INTEGER,
|
|
155
|
+
num_consecutive_failures INTEGER,
|
|
156
|
+
from_wallet tinyint CHECK(from_wallet == 0 OR from_wallet == 1),
|
|
157
|
+
PRIMARY KEY(tree_id, url)
|
|
158
|
+
)
|
|
159
|
+
"""
|
|
160
|
+
)
|
|
161
|
+
await writer.execute(
|
|
162
|
+
"""
|
|
163
|
+
CREATE TABLE IF NOT EXISTS schema(
|
|
164
|
+
version_id TEXT PRIMARY KEY,
|
|
165
|
+
applied_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
|
|
166
|
+
)
|
|
167
|
+
"""
|
|
168
|
+
)
|
|
169
|
+
await writer.execute(
|
|
170
|
+
"""
|
|
171
|
+
CREATE INDEX IF NOT EXISTS node_hash ON root(node_hash)
|
|
172
|
+
"""
|
|
173
|
+
)
|
|
174
|
+
await writer.execute(
|
|
175
|
+
"""
|
|
176
|
+
CREATE INDEX IF NOT EXISTS node_key_index ON node(key)
|
|
177
|
+
"""
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
yield self
|
|
181
|
+
|
|
182
|
+
@asynccontextmanager
|
|
183
|
+
async def transaction(self) -> AsyncIterator[None]:
|
|
184
|
+
async with self.db_wrapper.writer():
|
|
185
|
+
yield
|
|
186
|
+
|
|
187
|
+
async def migrate_db(self) -> None:
|
|
188
|
+
async with self.db_wrapper.reader() as reader:
|
|
189
|
+
cursor = await reader.execute("SELECT * FROM schema")
|
|
190
|
+
row = await cursor.fetchone()
|
|
191
|
+
if row is not None:
|
|
192
|
+
version = row["version_id"]
|
|
193
|
+
if version != "v1.0":
|
|
194
|
+
raise Exception("Unknown version")
|
|
195
|
+
log.info(f"Found DB schema version {version}. No migration needed.")
|
|
196
|
+
return
|
|
197
|
+
|
|
198
|
+
version = "v1.0"
|
|
199
|
+
log.info(f"Initiating migration to version {version}")
|
|
200
|
+
async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
|
|
201
|
+
await writer.execute(
|
|
202
|
+
f"""
|
|
203
|
+
CREATE TABLE IF NOT EXISTS new_root(
|
|
204
|
+
tree_id BLOB NOT NULL CHECK(length(tree_id) == 32),
|
|
205
|
+
generation INTEGER NOT NULL CHECK(generation >= 0),
|
|
206
|
+
node_hash BLOB,
|
|
207
|
+
status INTEGER NOT NULL CHECK(
|
|
208
|
+
{" OR ".join(f"status == {status}" for status in Status)}
|
|
209
|
+
),
|
|
210
|
+
PRIMARY KEY(tree_id, generation),
|
|
211
|
+
FOREIGN KEY(node_hash) REFERENCES node(hash)
|
|
212
|
+
)
|
|
213
|
+
"""
|
|
214
|
+
)
|
|
215
|
+
await writer.execute("INSERT INTO new_root SELECT * FROM root")
|
|
216
|
+
await writer.execute("DROP TABLE root")
|
|
217
|
+
await writer.execute("ALTER TABLE new_root RENAME TO root")
|
|
218
|
+
await writer.execute("INSERT INTO schema (version_id) VALUES (?)", (version,))
|
|
219
|
+
log.info(f"Finished migrating DB to version {version}")
|
|
220
|
+
|
|
221
|
+
async def _insert_root(
|
|
222
|
+
self,
|
|
223
|
+
store_id: bytes32,
|
|
224
|
+
node_hash: Optional[bytes32],
|
|
225
|
+
status: Status,
|
|
226
|
+
generation: Optional[int] = None,
|
|
227
|
+
) -> Root:
|
|
228
|
+
# This should be replaced by an SQLite schema level check.
|
|
229
|
+
# https://github.com/Chia-Network/chia-blockchain/pull/9284
|
|
230
|
+
store_id = bytes32(store_id)
|
|
231
|
+
|
|
232
|
+
async with self.db_wrapper.writer() as writer:
|
|
233
|
+
if generation is None:
|
|
234
|
+
try:
|
|
235
|
+
existing_generation = await self.get_tree_generation(store_id=store_id)
|
|
236
|
+
except Exception as e:
|
|
237
|
+
if not str(e).startswith("No generations found for store ID:"):
|
|
238
|
+
raise
|
|
239
|
+
generation = 0
|
|
240
|
+
else:
|
|
241
|
+
generation = existing_generation + 1
|
|
242
|
+
|
|
243
|
+
new_root = Root(
|
|
244
|
+
store_id=store_id,
|
|
245
|
+
node_hash=node_hash,
|
|
246
|
+
generation=generation,
|
|
247
|
+
status=status,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
await writer.execute(
|
|
251
|
+
"""
|
|
252
|
+
INSERT INTO root(tree_id, generation, node_hash, status)
|
|
253
|
+
VALUES(:tree_id, :generation, :node_hash, :status)
|
|
254
|
+
""",
|
|
255
|
+
new_root.to_row(),
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# `node_hash` is now a root, so it has no ancestor.
|
|
259
|
+
# Don't change the ancestor table unless the root is committed.
|
|
260
|
+
if node_hash is not None and status == Status.COMMITTED:
|
|
261
|
+
values = {
|
|
262
|
+
"hash": node_hash,
|
|
263
|
+
"tree_id": store_id,
|
|
264
|
+
"generation": generation,
|
|
265
|
+
}
|
|
266
|
+
await writer.execute(
|
|
267
|
+
"""
|
|
268
|
+
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
|
|
269
|
+
VALUES (:hash, NULL, :tree_id, :generation)
|
|
270
|
+
""",
|
|
271
|
+
values,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
return new_root
|
|
275
|
+
|
|
276
|
+
async def _insert_node(
|
|
277
|
+
self,
|
|
278
|
+
node_hash: bytes32,
|
|
279
|
+
node_type: NodeType,
|
|
280
|
+
left_hash: Optional[bytes32],
|
|
281
|
+
right_hash: Optional[bytes32],
|
|
282
|
+
key: Optional[bytes],
|
|
283
|
+
value: Optional[bytes],
|
|
284
|
+
) -> None:
|
|
285
|
+
# TODO: can we get sqlite to do this check?
|
|
286
|
+
values = {
|
|
287
|
+
"hash": node_hash,
|
|
288
|
+
"node_type": node_type,
|
|
289
|
+
"left": left_hash,
|
|
290
|
+
"right": right_hash,
|
|
291
|
+
"key": key,
|
|
292
|
+
"value": value,
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
async with self.db_wrapper.writer() as writer:
|
|
296
|
+
try:
|
|
297
|
+
await writer.execute(
|
|
298
|
+
"""
|
|
299
|
+
INSERT INTO node(hash, node_type, left, right, key, value)
|
|
300
|
+
VALUES(:hash, :node_type, :left, :right, :key, :value)
|
|
301
|
+
""",
|
|
302
|
+
values,
|
|
303
|
+
)
|
|
304
|
+
except aiosqlite.IntegrityError as e:
|
|
305
|
+
if not e.args[0].startswith("UNIQUE constraint"):
|
|
306
|
+
# UNIQUE constraint failed: node.hash
|
|
307
|
+
raise
|
|
308
|
+
|
|
309
|
+
async with writer.execute(
|
|
310
|
+
"SELECT * FROM node WHERE hash == :hash LIMIT 1",
|
|
311
|
+
{"hash": node_hash},
|
|
312
|
+
) as cursor:
|
|
313
|
+
result = await cursor.fetchone()
|
|
314
|
+
|
|
315
|
+
if result is None:
|
|
316
|
+
# some ideas for causes:
|
|
317
|
+
# an sqlite bug
|
|
318
|
+
# bad queries in this function
|
|
319
|
+
# unexpected db constraints
|
|
320
|
+
raise Exception("Unable to find conflicting row") from e # pragma: no cover
|
|
321
|
+
|
|
322
|
+
result_dict = dict(result)
|
|
323
|
+
if result_dict != values:
|
|
324
|
+
raise Exception(
|
|
325
|
+
f"Requested insertion of node with matching hash but other values differ: {node_hash}"
|
|
326
|
+
) from None
|
|
327
|
+
|
|
328
|
+
async def insert_node(self, node_type: NodeType, value1: bytes, value2: bytes) -> None:
|
|
329
|
+
if node_type == NodeType.INTERNAL:
|
|
330
|
+
left_hash = bytes32(value1)
|
|
331
|
+
right_hash = bytes32(value2)
|
|
332
|
+
node_hash = internal_hash(left_hash, right_hash)
|
|
333
|
+
await self._insert_node(node_hash, node_type, bytes32(value1), bytes32(value2), None, None)
|
|
334
|
+
else:
|
|
335
|
+
node_hash = leaf_hash(key=value1, value=value2)
|
|
336
|
+
await self._insert_node(node_hash, node_type, None, None, value1, value2)
|
|
337
|
+
|
|
338
|
+
async def _insert_internal_node(self, left_hash: bytes32, right_hash: bytes32) -> bytes32:
|
|
339
|
+
node_hash: bytes32 = internal_hash(left_hash=left_hash, right_hash=right_hash)
|
|
340
|
+
|
|
341
|
+
await self._insert_node(
|
|
342
|
+
node_hash=node_hash,
|
|
343
|
+
node_type=NodeType.INTERNAL,
|
|
344
|
+
left_hash=left_hash,
|
|
345
|
+
right_hash=right_hash,
|
|
346
|
+
key=None,
|
|
347
|
+
value=None,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
return node_hash
|
|
351
|
+
|
|
352
|
+
async def _insert_ancestor_table(
|
|
353
|
+
self,
|
|
354
|
+
left_hash: bytes32,
|
|
355
|
+
right_hash: bytes32,
|
|
356
|
+
store_id: bytes32,
|
|
357
|
+
generation: int,
|
|
358
|
+
) -> None:
|
|
359
|
+
node_hash = internal_hash(left_hash=left_hash, right_hash=right_hash)
|
|
360
|
+
|
|
361
|
+
async with self.db_wrapper.writer() as writer:
|
|
362
|
+
for hash in (left_hash, right_hash):
|
|
363
|
+
values = {
|
|
364
|
+
"hash": hash,
|
|
365
|
+
"ancestor": node_hash,
|
|
366
|
+
"tree_id": store_id,
|
|
367
|
+
"generation": generation,
|
|
368
|
+
}
|
|
369
|
+
try:
|
|
370
|
+
await writer.execute(
|
|
371
|
+
"""
|
|
372
|
+
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
|
|
373
|
+
VALUES (:hash, :ancestor, :tree_id, :generation)
|
|
374
|
+
""",
|
|
375
|
+
values,
|
|
376
|
+
)
|
|
377
|
+
except aiosqlite.IntegrityError as e:
|
|
378
|
+
if not e.args[0].startswith("UNIQUE constraint"):
|
|
379
|
+
# UNIQUE constraint failed: ancestors.hash, ancestors.tree_id, ancestors.generation
|
|
380
|
+
raise
|
|
381
|
+
|
|
382
|
+
async with writer.execute(
|
|
383
|
+
"""
|
|
384
|
+
SELECT *
|
|
385
|
+
FROM ancestors
|
|
386
|
+
WHERE hash == :hash AND generation == :generation AND tree_id == :tree_id
|
|
387
|
+
LIMIT 1
|
|
388
|
+
""",
|
|
389
|
+
{"hash": hash, "generation": generation, "tree_id": store_id},
|
|
390
|
+
) as cursor:
|
|
391
|
+
result = await cursor.fetchone()
|
|
392
|
+
|
|
393
|
+
if result is None:
|
|
394
|
+
# some ideas for causes:
|
|
395
|
+
# an sqlite bug
|
|
396
|
+
# bad queries in this function
|
|
397
|
+
# unexpected db constraints
|
|
398
|
+
raise Exception("Unable to find conflicting row") from e # pragma: no cover
|
|
399
|
+
|
|
400
|
+
result_dict = dict(result)
|
|
401
|
+
if result_dict != values:
|
|
402
|
+
raise Exception(
|
|
403
|
+
"Requested insertion of ancestor, where ancestor differ, but other values are identical: "
|
|
404
|
+
f"{hash} {generation} {store_id}"
|
|
405
|
+
) from None
|
|
406
|
+
|
|
407
|
+
async def _insert_terminal_node(self, key: bytes, value: bytes) -> bytes32:
|
|
408
|
+
# forcing type hint here for:
|
|
409
|
+
# https://github.com/Chia-Network/clvm/pull/102
|
|
410
|
+
# https://github.com/Chia-Network/clvm/pull/106
|
|
411
|
+
node_hash: bytes32 = Program.to((key, value)).get_tree_hash()
|
|
412
|
+
|
|
413
|
+
await self._insert_node(
|
|
414
|
+
node_hash=node_hash,
|
|
415
|
+
node_type=NodeType.TERMINAL,
|
|
416
|
+
left_hash=None,
|
|
417
|
+
right_hash=None,
|
|
418
|
+
key=key,
|
|
419
|
+
value=value,
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
return node_hash
|
|
423
|
+
|
|
424
|
+
async def get_pending_root(self, store_id: bytes32) -> Optional[Root]:
|
|
425
|
+
async with self.db_wrapper.reader() as reader:
|
|
426
|
+
cursor = await reader.execute(
|
|
427
|
+
"""
|
|
428
|
+
SELECT * FROM root WHERE tree_id == :tree_id
|
|
429
|
+
AND status IN (:pending_status, :pending_batch_status) LIMIT 2
|
|
430
|
+
""",
|
|
431
|
+
{
|
|
432
|
+
"tree_id": store_id,
|
|
433
|
+
"pending_status": Status.PENDING.value,
|
|
434
|
+
"pending_batch_status": Status.PENDING_BATCH.value,
|
|
435
|
+
},
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
row = await cursor.fetchone()
|
|
439
|
+
|
|
440
|
+
if row is None:
|
|
441
|
+
return None
|
|
442
|
+
|
|
443
|
+
maybe_extra_result = await cursor.fetchone()
|
|
444
|
+
if maybe_extra_result is not None:
|
|
445
|
+
raise Exception(f"multiple pending roots found for id: {store_id.hex()}")
|
|
446
|
+
|
|
447
|
+
return Root.from_row(row=row)
|
|
448
|
+
|
|
449
|
+
async def clear_pending_roots(self, store_id: bytes32) -> Optional[Root]:
|
|
450
|
+
async with self.db_wrapper.writer() as writer:
|
|
451
|
+
pending_root = await self.get_pending_root(store_id=store_id)
|
|
452
|
+
|
|
453
|
+
if pending_root is not None:
|
|
454
|
+
await writer.execute(
|
|
455
|
+
"DELETE FROM root WHERE tree_id == :tree_id AND status IN (:pending_status, :pending_batch_status)",
|
|
456
|
+
{
|
|
457
|
+
"tree_id": store_id,
|
|
458
|
+
"pending_status": Status.PENDING.value,
|
|
459
|
+
"pending_batch_status": Status.PENDING_BATCH.value,
|
|
460
|
+
},
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
return pending_root
|
|
464
|
+
|
|
465
|
+
async def shift_root_generations(self, store_id: bytes32, shift_size: int) -> None:
|
|
466
|
+
async with self.db_wrapper.writer():
|
|
467
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
468
|
+
for _ in range(shift_size):
|
|
469
|
+
await self._insert_root(store_id=store_id, node_hash=root.node_hash, status=Status.COMMITTED)
|
|
470
|
+
|
|
471
|
+
async def change_root_status(self, root: Root, status: Status = Status.PENDING) -> None:
|
|
472
|
+
async with self.db_wrapper.writer() as writer:
|
|
473
|
+
await writer.execute(
|
|
474
|
+
"UPDATE root SET status = ? WHERE tree_id=? and generation = ?",
|
|
475
|
+
(
|
|
476
|
+
status.value,
|
|
477
|
+
root.store_id,
|
|
478
|
+
root.generation,
|
|
479
|
+
),
|
|
480
|
+
)
|
|
481
|
+
# `node_hash` is now a root, so it has no ancestor.
|
|
482
|
+
# Don't change the ancestor table unless the root is committed.
|
|
483
|
+
if root.node_hash is not None and status == Status.COMMITTED:
|
|
484
|
+
values = {
|
|
485
|
+
"hash": root.node_hash,
|
|
486
|
+
"tree_id": root.store_id,
|
|
487
|
+
"generation": root.generation,
|
|
488
|
+
}
|
|
489
|
+
await writer.execute(
|
|
490
|
+
"""
|
|
491
|
+
INSERT INTO ancestors(hash, ancestor, tree_id, generation)
|
|
492
|
+
VALUES (:hash, NULL, :tree_id, :generation)
|
|
493
|
+
""",
|
|
494
|
+
values,
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
async def check(self) -> None:
|
|
498
|
+
for check in self._checks:
|
|
499
|
+
await check(self)
|
|
500
|
+
|
|
501
|
+
async def _check_roots_are_incrementing(self) -> None:
|
|
502
|
+
async with self.db_wrapper.reader() as reader:
|
|
503
|
+
cursor = await reader.execute("SELECT * FROM root ORDER BY tree_id, generation")
|
|
504
|
+
roots = [Root.from_row(row=row) async for row in cursor]
|
|
505
|
+
|
|
506
|
+
roots_by_tree: dict[bytes32, list[Root]] = defaultdict(list)
|
|
507
|
+
for root in roots:
|
|
508
|
+
roots_by_tree[root.store_id].append(root)
|
|
509
|
+
|
|
510
|
+
bad_trees = []
|
|
511
|
+
for store_id, roots in roots_by_tree.items():
|
|
512
|
+
current_generation = roots[-1].generation
|
|
513
|
+
expected_generations = list(range(current_generation + 1))
|
|
514
|
+
actual_generations = [root.generation for root in roots]
|
|
515
|
+
if actual_generations != expected_generations:
|
|
516
|
+
bad_trees.append(store_id)
|
|
517
|
+
|
|
518
|
+
if len(bad_trees) > 0:
|
|
519
|
+
raise TreeGenerationIncrementingError(store_ids=bad_trees)
|
|
520
|
+
|
|
521
|
+
async def _check_hashes(self) -> None:
|
|
522
|
+
async with self.db_wrapper.reader() as reader:
|
|
523
|
+
cursor = await reader.execute("SELECT * FROM node")
|
|
524
|
+
|
|
525
|
+
bad_node_hashes: list[bytes32] = []
|
|
526
|
+
async for row in cursor:
|
|
527
|
+
node = row_to_node(row=row)
|
|
528
|
+
if isinstance(node, InternalNode):
|
|
529
|
+
expected_hash = internal_hash(left_hash=node.left_hash, right_hash=node.right_hash)
|
|
530
|
+
elif isinstance(node, TerminalNode):
|
|
531
|
+
expected_hash = Program.to((node.key, node.value)).get_tree_hash()
|
|
532
|
+
else:
|
|
533
|
+
raise Exception(f"Internal error, unknown node type: {node!r}")
|
|
534
|
+
|
|
535
|
+
if node.hash != expected_hash:
|
|
536
|
+
bad_node_hashes.append(node.hash)
|
|
537
|
+
|
|
538
|
+
if len(bad_node_hashes) > 0:
|
|
539
|
+
raise NodeHashError(node_hashes=bad_node_hashes)
|
|
540
|
+
|
|
541
|
+
_checks: tuple[Callable[[DataStore], Awaitable[None]], ...] = (
|
|
542
|
+
_check_roots_are_incrementing,
|
|
543
|
+
_check_hashes,
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
async def create_tree(self, store_id: bytes32, status: Status = Status.PENDING) -> bool:
|
|
547
|
+
await self._insert_root(store_id=store_id, node_hash=None, status=status)
|
|
548
|
+
|
|
549
|
+
return True
|
|
550
|
+
|
|
551
|
+
async def table_is_empty(self, store_id: bytes32) -> bool:
|
|
552
|
+
tree_root = await self.get_tree_root(store_id=store_id)
|
|
553
|
+
|
|
554
|
+
return tree_root.node_hash is None
|
|
555
|
+
|
|
556
|
+
async def get_store_ids(self) -> set[bytes32]:
|
|
557
|
+
async with self.db_wrapper.reader() as reader:
|
|
558
|
+
cursor = await reader.execute("SELECT DISTINCT tree_id FROM root")
|
|
559
|
+
|
|
560
|
+
store_ids = {bytes32(row["tree_id"]) async for row in cursor}
|
|
561
|
+
|
|
562
|
+
return store_ids
|
|
563
|
+
|
|
564
|
+
async def get_tree_generation(self, store_id: bytes32) -> int:
|
|
565
|
+
async with self.db_wrapper.reader() as reader:
|
|
566
|
+
cursor = await reader.execute(
|
|
567
|
+
"SELECT MAX(generation) FROM root WHERE tree_id == :tree_id AND status == :status",
|
|
568
|
+
{"tree_id": store_id, "status": Status.COMMITTED.value},
|
|
569
|
+
)
|
|
570
|
+
row = await cursor.fetchone()
|
|
571
|
+
|
|
572
|
+
if row is not None:
|
|
573
|
+
generation: Optional[int] = row["MAX(generation)"]
|
|
574
|
+
|
|
575
|
+
if generation is not None:
|
|
576
|
+
return generation
|
|
577
|
+
|
|
578
|
+
raise Exception(f"No generations found for store ID: {store_id.hex()}")
|
|
579
|
+
|
|
580
|
+
async def get_tree_root(self, store_id: bytes32, generation: Optional[int] = None) -> Root:
|
|
581
|
+
async with self.db_wrapper.reader() as reader:
|
|
582
|
+
if generation is None:
|
|
583
|
+
generation = await self.get_tree_generation(store_id=store_id)
|
|
584
|
+
cursor = await reader.execute(
|
|
585
|
+
"""
|
|
586
|
+
SELECT *
|
|
587
|
+
FROM root
|
|
588
|
+
WHERE tree_id == :tree_id AND generation == :generation AND status == :status
|
|
589
|
+
LIMIT 1
|
|
590
|
+
""",
|
|
591
|
+
{"tree_id": store_id, "generation": generation, "status": Status.COMMITTED.value},
|
|
592
|
+
)
|
|
593
|
+
row = await cursor.fetchone()
|
|
594
|
+
|
|
595
|
+
if row is None:
|
|
596
|
+
raise Exception(f"unable to find root for id, generation: {store_id.hex()}, {generation}")
|
|
597
|
+
|
|
598
|
+
return Root.from_row(row=row)
|
|
599
|
+
|
|
600
|
+
async def get_all_pending_batches_roots(self) -> list[Root]:
|
|
601
|
+
async with self.db_wrapper.reader() as reader:
|
|
602
|
+
cursor = await reader.execute(
|
|
603
|
+
"""
|
|
604
|
+
SELECT * FROM root WHERE status == :status
|
|
605
|
+
""",
|
|
606
|
+
{"status": Status.PENDING_BATCH.value},
|
|
607
|
+
)
|
|
608
|
+
roots = [Root.from_row(row=row) async for row in cursor]
|
|
609
|
+
store_ids = [root.store_id for root in roots]
|
|
610
|
+
if len(set(store_ids)) != len(store_ids):
|
|
611
|
+
raise Exception("Internal error: multiple pending batches for a store")
|
|
612
|
+
return roots
|
|
613
|
+
|
|
614
|
+
async def store_id_exists(self, store_id: bytes32) -> bool:
|
|
615
|
+
async with self.db_wrapper.reader() as reader:
|
|
616
|
+
cursor = await reader.execute(
|
|
617
|
+
"SELECT 1 FROM root WHERE tree_id == :tree_id AND status == :status LIMIT 1",
|
|
618
|
+
{"tree_id": store_id, "status": Status.COMMITTED.value},
|
|
619
|
+
)
|
|
620
|
+
row = await cursor.fetchone()
|
|
621
|
+
|
|
622
|
+
if row is None:
|
|
623
|
+
return False
|
|
624
|
+
return True
|
|
625
|
+
|
|
626
|
+
async def get_roots_between(self, store_id: bytes32, generation_begin: int, generation_end: int) -> list[Root]:
|
|
627
|
+
async with self.db_wrapper.reader() as reader:
|
|
628
|
+
cursor = await reader.execute(
|
|
629
|
+
"SELECT * FROM root WHERE tree_id == :tree_id "
|
|
630
|
+
"AND generation >= :generation_begin AND generation < :generation_end ORDER BY generation ASC",
|
|
631
|
+
{"tree_id": store_id, "generation_begin": generation_begin, "generation_end": generation_end},
|
|
632
|
+
)
|
|
633
|
+
roots = [Root.from_row(row=row) async for row in cursor]
|
|
634
|
+
|
|
635
|
+
return roots
|
|
636
|
+
|
|
637
|
+
async def get_last_tree_root_by_hash(
|
|
638
|
+
self, store_id: bytes32, hash: Optional[bytes32], max_generation: Optional[int] = None
|
|
639
|
+
) -> Optional[Root]:
|
|
640
|
+
async with self.db_wrapper.reader() as reader:
|
|
641
|
+
max_generation_str = "AND generation < :max_generation " if max_generation is not None else ""
|
|
642
|
+
node_hash_str = "AND node_hash == :node_hash " if hash is not None else "AND node_hash is NULL "
|
|
643
|
+
cursor = await reader.execute(
|
|
644
|
+
"SELECT * FROM root WHERE tree_id == :tree_id "
|
|
645
|
+
f"{max_generation_str}"
|
|
646
|
+
f"{node_hash_str}"
|
|
647
|
+
"ORDER BY generation DESC LIMIT 1",
|
|
648
|
+
{"tree_id": store_id, "node_hash": hash, "max_generation": max_generation},
|
|
649
|
+
)
|
|
650
|
+
row = await cursor.fetchone()
|
|
651
|
+
|
|
652
|
+
if row is None:
|
|
653
|
+
return None
|
|
654
|
+
return Root.from_row(row=row)
|
|
655
|
+
|
|
656
|
+
async def get_ancestors(
|
|
657
|
+
self,
|
|
658
|
+
node_hash: bytes32,
|
|
659
|
+
store_id: bytes32,
|
|
660
|
+
root_hash: Optional[bytes32] = None,
|
|
661
|
+
) -> list[InternalNode]:
|
|
662
|
+
async with self.db_wrapper.reader() as reader:
|
|
663
|
+
if root_hash is None:
|
|
664
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
665
|
+
root_hash = root.node_hash
|
|
666
|
+
if root_hash is None:
|
|
667
|
+
raise Exception(f"Root hash is unspecified for store ID: {store_id.hex()}")
|
|
668
|
+
cursor = await reader.execute(
|
|
669
|
+
"""
|
|
670
|
+
WITH RECURSIVE
|
|
671
|
+
tree_from_root_hash(hash, node_type, left, right, key, value, depth) AS (
|
|
672
|
+
SELECT node.*, 0 AS depth FROM node WHERE node.hash == :root_hash
|
|
673
|
+
UNION ALL
|
|
674
|
+
SELECT node.*, tree_from_root_hash.depth + 1 AS depth FROM node, tree_from_root_hash
|
|
675
|
+
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
|
676
|
+
),
|
|
677
|
+
ancestors(hash, node_type, left, right, key, value, depth) AS (
|
|
678
|
+
SELECT node.*, NULL AS depth FROM node
|
|
679
|
+
WHERE node.left == :reference_hash OR node.right == :reference_hash
|
|
680
|
+
UNION ALL
|
|
681
|
+
SELECT node.*, NULL AS depth FROM node, ancestors
|
|
682
|
+
WHERE node.left == ancestors.hash OR node.right == ancestors.hash
|
|
683
|
+
)
|
|
684
|
+
SELECT * FROM tree_from_root_hash INNER JOIN ancestors
|
|
685
|
+
WHERE tree_from_root_hash.hash == ancestors.hash
|
|
686
|
+
ORDER BY tree_from_root_hash.depth DESC
|
|
687
|
+
""",
|
|
688
|
+
{"reference_hash": node_hash, "root_hash": root_hash},
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
# The resulting rows must represent internal nodes. InternalNode.from_row()
|
|
692
|
+
# does some amount of validation in the sense that it will fail if left
|
|
693
|
+
# or right can't turn into a bytes32 as expected. There is room for more
|
|
694
|
+
# validation here if desired.
|
|
695
|
+
ancestors = [InternalNode.from_row(row=row) async for row in cursor]
|
|
696
|
+
|
|
697
|
+
return ancestors
|
|
698
|
+
|
|
699
|
+
async def get_ancestors_optimized(
|
|
700
|
+
self,
|
|
701
|
+
node_hash: bytes32,
|
|
702
|
+
store_id: bytes32,
|
|
703
|
+
generation: Optional[int] = None,
|
|
704
|
+
root_hash: Optional[bytes32] = None,
|
|
705
|
+
) -> list[InternalNode]:
|
|
706
|
+
async with self.db_wrapper.reader():
|
|
707
|
+
nodes = []
|
|
708
|
+
if root_hash is None:
|
|
709
|
+
root = await self.get_tree_root(store_id=store_id, generation=generation)
|
|
710
|
+
root_hash = root.node_hash
|
|
711
|
+
|
|
712
|
+
if root_hash is None:
|
|
713
|
+
return []
|
|
714
|
+
|
|
715
|
+
while True:
|
|
716
|
+
internal_node = await self._get_one_ancestor(node_hash, store_id, generation)
|
|
717
|
+
if internal_node is None:
|
|
718
|
+
break
|
|
719
|
+
nodes.append(internal_node)
|
|
720
|
+
node_hash = internal_node.hash
|
|
721
|
+
|
|
722
|
+
if len(nodes) > 0:
|
|
723
|
+
if root_hash != nodes[-1].hash:
|
|
724
|
+
raise RuntimeError("Ancestors list didn't produce the root as top result.")
|
|
725
|
+
|
|
726
|
+
return nodes
|
|
727
|
+
|
|
728
|
+
async def get_internal_nodes(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> list[InternalNode]:
|
|
729
|
+
async with self.db_wrapper.reader() as reader:
|
|
730
|
+
if root_hash is None:
|
|
731
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
732
|
+
root_hash = root.node_hash
|
|
733
|
+
cursor = await reader.execute(
|
|
734
|
+
"""
|
|
735
|
+
WITH RECURSIVE
|
|
736
|
+
tree_from_root_hash(hash, node_type, left, right, key, value) AS (
|
|
737
|
+
SELECT node.* FROM node WHERE node.hash == :root_hash
|
|
738
|
+
UNION ALL
|
|
739
|
+
SELECT node.* FROM node, tree_from_root_hash WHERE node.hash == tree_from_root_hash.left
|
|
740
|
+
OR node.hash == tree_from_root_hash.right
|
|
741
|
+
)
|
|
742
|
+
SELECT * FROM tree_from_root_hash
|
|
743
|
+
WHERE node_type == :node_type
|
|
744
|
+
""",
|
|
745
|
+
{"root_hash": root_hash, "node_type": NodeType.INTERNAL},
|
|
746
|
+
)
|
|
747
|
+
|
|
748
|
+
internal_nodes: list[InternalNode] = []
|
|
749
|
+
async for row in cursor:
|
|
750
|
+
node = row_to_node(row=row)
|
|
751
|
+
if not isinstance(node, InternalNode):
|
|
752
|
+
raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
|
|
753
|
+
internal_nodes.append(node)
|
|
754
|
+
|
|
755
|
+
return internal_nodes
|
|
756
|
+
|
|
757
|
+
async def get_keys_values_cursor(
|
|
758
|
+
self,
|
|
759
|
+
reader: aiosqlite.Connection,
|
|
760
|
+
root_hash: Optional[bytes32],
|
|
761
|
+
only_keys: bool = False,
|
|
762
|
+
) -> aiosqlite.Cursor:
|
|
763
|
+
select_clause = "SELECT hash, key" if only_keys else "SELECT *"
|
|
764
|
+
maybe_value = "" if only_keys else "value, "
|
|
765
|
+
select_node_clause = "node.hash, node.node_type, node.left, node.right, node.key" if only_keys else "node.*"
|
|
766
|
+
return await reader.execute(
|
|
767
|
+
f"""
|
|
768
|
+
WITH RECURSIVE
|
|
769
|
+
tree_from_root_hash(hash, node_type, left, right, key, {maybe_value}depth, rights) AS (
|
|
770
|
+
SELECT {select_node_clause}, 0 AS depth, 0 AS rights FROM node WHERE node.hash == :root_hash
|
|
771
|
+
UNION ALL
|
|
772
|
+
SELECT
|
|
773
|
+
{select_node_clause},
|
|
774
|
+
tree_from_root_hash.depth + 1 AS depth,
|
|
775
|
+
CASE
|
|
776
|
+
WHEN node.hash == tree_from_root_hash.right
|
|
777
|
+
THEN tree_from_root_hash.rights + (1 << (62 - tree_from_root_hash.depth))
|
|
778
|
+
ELSE tree_from_root_hash.rights
|
|
779
|
+
END AS rights
|
|
780
|
+
FROM node, tree_from_root_hash
|
|
781
|
+
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
|
782
|
+
)
|
|
783
|
+
{select_clause} FROM tree_from_root_hash
|
|
784
|
+
WHERE node_type == :node_type
|
|
785
|
+
ORDER BY depth ASC, rights ASC
|
|
786
|
+
""",
|
|
787
|
+
{"root_hash": root_hash, "node_type": NodeType.TERMINAL},
|
|
788
|
+
)
|
|
789
|
+
|
|
790
|
+
async def get_keys_values(
|
|
791
|
+
self,
|
|
792
|
+
store_id: bytes32,
|
|
793
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
794
|
+
) -> list[TerminalNode]:
|
|
795
|
+
async with self.db_wrapper.reader() as reader:
|
|
796
|
+
resolved_root_hash: Optional[bytes32]
|
|
797
|
+
if root_hash is unspecified:
|
|
798
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
799
|
+
resolved_root_hash = root.node_hash
|
|
800
|
+
else:
|
|
801
|
+
resolved_root_hash = root_hash
|
|
802
|
+
|
|
803
|
+
cursor = await self.get_keys_values_cursor(reader, resolved_root_hash)
|
|
804
|
+
terminal_nodes: list[TerminalNode] = []
|
|
805
|
+
async for row in cursor:
|
|
806
|
+
if row["depth"] > 62:
|
|
807
|
+
# TODO: Review the value and implementation of left-to-right order
|
|
808
|
+
# reporting. Initial use is for balanced insertion with the
|
|
809
|
+
# work done in the query.
|
|
810
|
+
|
|
811
|
+
# This is limited based on the choice of 63 for the maximum left
|
|
812
|
+
# shift in the query. This is in turn based on the SQLite integers
|
|
813
|
+
# ranging in size up to signed 8 bytes, 64 bits. If we exceed this then
|
|
814
|
+
# we no longer guarantee the left-to-right ordering of the node
|
|
815
|
+
# list. While 63 allows for a lot of nodes in a balanced tree, in
|
|
816
|
+
# the worst case it allows only 62 terminal nodes.
|
|
817
|
+
raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
|
|
818
|
+
node = row_to_node(row=row)
|
|
819
|
+
if not isinstance(node, TerminalNode):
|
|
820
|
+
raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
|
|
821
|
+
terminal_nodes.append(node)
|
|
822
|
+
|
|
823
|
+
return terminal_nodes
|
|
824
|
+
|
|
825
|
+
async def get_keys_values_compressed(
|
|
826
|
+
self,
|
|
827
|
+
store_id: bytes32,
|
|
828
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
829
|
+
) -> KeysValuesCompressed:
|
|
830
|
+
async with self.db_wrapper.reader() as reader:
|
|
831
|
+
resolved_root_hash: Optional[bytes32]
|
|
832
|
+
if root_hash is unspecified:
|
|
833
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
834
|
+
resolved_root_hash = root.node_hash
|
|
835
|
+
else:
|
|
836
|
+
resolved_root_hash = root_hash
|
|
837
|
+
|
|
838
|
+
cursor = await self.get_keys_values_cursor(reader, resolved_root_hash)
|
|
839
|
+
keys_values_hashed: dict[bytes32, bytes32] = {}
|
|
840
|
+
key_hash_to_length: dict[bytes32, int] = {}
|
|
841
|
+
leaf_hash_to_length: dict[bytes32, int] = {}
|
|
842
|
+
async for row in cursor:
|
|
843
|
+
if row["depth"] > 62:
|
|
844
|
+
raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
|
|
845
|
+
node = row_to_node(row=row)
|
|
846
|
+
if not isinstance(node, TerminalNode):
|
|
847
|
+
raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
|
|
848
|
+
keys_values_hashed[key_hash(node.key)] = leaf_hash(node.key, node.value)
|
|
849
|
+
key_hash_to_length[key_hash(node.key)] = len(node.key)
|
|
850
|
+
leaf_hash_to_length[leaf_hash(node.key, node.value)] = len(node.key) + len(node.value)
|
|
851
|
+
|
|
852
|
+
return KeysValuesCompressed(keys_values_hashed, key_hash_to_length, leaf_hash_to_length, resolved_root_hash)
|
|
853
|
+
|
|
854
|
+
async def get_leaf_hashes_by_hashed_key(
|
|
855
|
+
self, store_id: bytes32, root_hash: Optional[bytes32] = None
|
|
856
|
+
) -> dict[bytes32, bytes32]:
|
|
857
|
+
result: dict[bytes32, bytes32] = {}
|
|
858
|
+
async with self.db_wrapper.reader() as reader:
|
|
859
|
+
if root_hash is None:
|
|
860
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
861
|
+
root_hash = root.node_hash
|
|
862
|
+
|
|
863
|
+
cursor = await self.get_keys_values_cursor(reader, root_hash, True)
|
|
864
|
+
async for row in cursor:
|
|
865
|
+
result[key_hash(row["key"])] = bytes32(row["hash"])
|
|
866
|
+
|
|
867
|
+
return result
|
|
868
|
+
|
|
869
|
+
async def get_keys_paginated(
|
|
870
|
+
self,
|
|
871
|
+
store_id: bytes32,
|
|
872
|
+
page: int,
|
|
873
|
+
max_page_size: int,
|
|
874
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
875
|
+
) -> KeysPaginationData:
|
|
876
|
+
keys_values_compressed = await self.get_keys_values_compressed(store_id, root_hash)
|
|
877
|
+
pagination_data = get_hashes_for_page(page, keys_values_compressed.key_hash_to_length, max_page_size)
|
|
878
|
+
|
|
879
|
+
keys: list[bytes] = []
|
|
880
|
+
for hash in pagination_data.hashes:
|
|
881
|
+
leaf_hash = keys_values_compressed.keys_values_hashed[hash]
|
|
882
|
+
node = await self.get_node(leaf_hash)
|
|
883
|
+
assert isinstance(node, TerminalNode)
|
|
884
|
+
keys.append(node.key)
|
|
885
|
+
|
|
886
|
+
return KeysPaginationData(
|
|
887
|
+
pagination_data.total_pages,
|
|
888
|
+
pagination_data.total_bytes,
|
|
889
|
+
keys,
|
|
890
|
+
keys_values_compressed.root_hash,
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
async def get_keys_values_paginated(
|
|
894
|
+
self,
|
|
895
|
+
store_id: bytes32,
|
|
896
|
+
page: int,
|
|
897
|
+
max_page_size: int,
|
|
898
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
899
|
+
) -> KeysValuesPaginationData:
|
|
900
|
+
keys_values_compressed = await self.get_keys_values_compressed(store_id, root_hash)
|
|
901
|
+
pagination_data = get_hashes_for_page(page, keys_values_compressed.leaf_hash_to_length, max_page_size)
|
|
902
|
+
|
|
903
|
+
keys_values: list[TerminalNode] = []
|
|
904
|
+
for hash in pagination_data.hashes:
|
|
905
|
+
node = await self.get_node(hash)
|
|
906
|
+
assert isinstance(node, TerminalNode)
|
|
907
|
+
keys_values.append(node)
|
|
908
|
+
|
|
909
|
+
return KeysValuesPaginationData(
|
|
910
|
+
pagination_data.total_pages,
|
|
911
|
+
pagination_data.total_bytes,
|
|
912
|
+
keys_values,
|
|
913
|
+
keys_values_compressed.root_hash,
|
|
914
|
+
)
|
|
915
|
+
|
|
916
|
+
async def get_kv_diff_paginated(
|
|
917
|
+
self,
|
|
918
|
+
store_id: bytes32,
|
|
919
|
+
page: int,
|
|
920
|
+
max_page_size: int,
|
|
921
|
+
# NOTE: empty is expressed as zeros
|
|
922
|
+
hash1: bytes32,
|
|
923
|
+
hash2: bytes32,
|
|
924
|
+
) -> KVDiffPaginationData:
|
|
925
|
+
old_pairs = await self.get_keys_values_compressed(store_id, hash1)
|
|
926
|
+
if len(old_pairs.keys_values_hashed) == 0 and hash1 != bytes32.zeros:
|
|
927
|
+
raise Exception(f"Unable to diff: Can't find keys and values for {hash1}")
|
|
928
|
+
|
|
929
|
+
new_pairs = await self.get_keys_values_compressed(store_id, hash2)
|
|
930
|
+
if len(new_pairs.keys_values_hashed) == 0 and hash2 != bytes32.zeros:
|
|
931
|
+
raise Exception(f"Unable to diff: Can't find keys and values for {hash2}")
|
|
932
|
+
|
|
933
|
+
old_pairs_leaf_hashes = {v for v in old_pairs.keys_values_hashed.values()}
|
|
934
|
+
new_pairs_leaf_hashes = {v for v in new_pairs.keys_values_hashed.values()}
|
|
935
|
+
insertions = {k for k in new_pairs_leaf_hashes if k not in old_pairs_leaf_hashes}
|
|
936
|
+
deletions = {k for k in old_pairs_leaf_hashes if k not in new_pairs_leaf_hashes}
|
|
937
|
+
lengths = {}
|
|
938
|
+
for hash in insertions:
|
|
939
|
+
lengths[hash] = new_pairs.leaf_hash_to_length[hash]
|
|
940
|
+
for hash in deletions:
|
|
941
|
+
lengths[hash] = old_pairs.leaf_hash_to_length[hash]
|
|
942
|
+
|
|
943
|
+
pagination_data = get_hashes_for_page(page, lengths, max_page_size)
|
|
944
|
+
kv_diff: list[DiffData] = []
|
|
945
|
+
|
|
946
|
+
for hash in pagination_data.hashes:
|
|
947
|
+
node = await self.get_node(hash)
|
|
948
|
+
assert isinstance(node, TerminalNode)
|
|
949
|
+
if hash in insertions:
|
|
950
|
+
kv_diff.append(DiffData(OperationType.INSERT, node.key, node.value))
|
|
951
|
+
else:
|
|
952
|
+
kv_diff.append(DiffData(OperationType.DELETE, node.key, node.value))
|
|
953
|
+
|
|
954
|
+
return KVDiffPaginationData(
|
|
955
|
+
pagination_data.total_pages,
|
|
956
|
+
pagination_data.total_bytes,
|
|
957
|
+
kv_diff,
|
|
958
|
+
)
|
|
959
|
+
|
|
960
|
+
async def get_node_type(self, node_hash: bytes32) -> NodeType:
|
|
961
|
+
async with self.db_wrapper.reader() as reader:
|
|
962
|
+
cursor = await reader.execute(
|
|
963
|
+
"SELECT node_type FROM node WHERE hash == :hash LIMIT 1",
|
|
964
|
+
{"hash": node_hash},
|
|
965
|
+
)
|
|
966
|
+
raw_node_type = await cursor.fetchone()
|
|
967
|
+
|
|
968
|
+
if raw_node_type is None:
|
|
969
|
+
raise Exception(f"No node found for specified hash: {node_hash.hex()}")
|
|
970
|
+
|
|
971
|
+
return NodeType(raw_node_type["node_type"])
|
|
972
|
+
|
|
973
|
+
async def get_terminal_node_for_seed(
|
|
974
|
+
self, store_id: bytes32, seed: bytes32, root_hash: Optional[bytes32] = None
|
|
975
|
+
) -> Optional[bytes32]:
|
|
976
|
+
path = "".join(reversed("".join(f"{b:08b}" for b in seed)))
|
|
977
|
+
async with self.db_wrapper.reader() as reader:
|
|
978
|
+
if root_hash is None:
|
|
979
|
+
root = await self.get_tree_root(store_id)
|
|
980
|
+
root_hash = root.node_hash
|
|
981
|
+
if root_hash is None:
|
|
982
|
+
return None
|
|
983
|
+
|
|
984
|
+
async with reader.execute(
|
|
985
|
+
"""
|
|
986
|
+
WITH RECURSIVE
|
|
987
|
+
random_leaf(hash, node_type, left, right, depth, side) AS (
|
|
988
|
+
SELECT
|
|
989
|
+
node.hash AS hash,
|
|
990
|
+
node.node_type AS node_type,
|
|
991
|
+
node.left AS left,
|
|
992
|
+
node.right AS right,
|
|
993
|
+
1 AS depth,
|
|
994
|
+
SUBSTR(:path, 1, 1) as side
|
|
995
|
+
FROM node
|
|
996
|
+
WHERE node.hash == :root_hash
|
|
997
|
+
UNION ALL
|
|
998
|
+
SELECT
|
|
999
|
+
node.hash AS hash,
|
|
1000
|
+
node.node_type AS node_type,
|
|
1001
|
+
node.left AS left,
|
|
1002
|
+
node.right AS right,
|
|
1003
|
+
random_leaf.depth + 1 AS depth,
|
|
1004
|
+
SUBSTR(:path, random_leaf.depth + 1, 1) as side
|
|
1005
|
+
FROM node, random_leaf
|
|
1006
|
+
WHERE (
|
|
1007
|
+
(random_leaf.side == "0" AND node.hash == random_leaf.left)
|
|
1008
|
+
OR (random_leaf.side != "0" AND node.hash == random_leaf.right)
|
|
1009
|
+
)
|
|
1010
|
+
)
|
|
1011
|
+
SELECT hash AS hash FROM random_leaf
|
|
1012
|
+
WHERE node_type == :node_type
|
|
1013
|
+
LIMIT 1
|
|
1014
|
+
""",
|
|
1015
|
+
{"root_hash": root_hash, "node_type": NodeType.TERMINAL, "path": path},
|
|
1016
|
+
) as cursor:
|
|
1017
|
+
row = await cursor.fetchone()
|
|
1018
|
+
if row is None:
|
|
1019
|
+
# No cover since this is an error state that should be unreachable given the code
|
|
1020
|
+
# above has already verified that there is a non-empty tree.
|
|
1021
|
+
raise Exception("No terminal node found for seed") # pragma: no cover
|
|
1022
|
+
return bytes32(row["hash"])
|
|
1023
|
+
|
|
1024
|
+
def get_side_for_seed(self, seed: bytes32) -> Side:
|
|
1025
|
+
side_seed = bytes(seed)[0]
|
|
1026
|
+
return Side.LEFT if side_seed < 128 else Side.RIGHT
|
|
1027
|
+
|
|
1028
|
+
async def autoinsert(
|
|
1029
|
+
self,
|
|
1030
|
+
key: bytes,
|
|
1031
|
+
value: bytes,
|
|
1032
|
+
store_id: bytes32,
|
|
1033
|
+
use_optimized: bool = True,
|
|
1034
|
+
status: Status = Status.PENDING,
|
|
1035
|
+
root: Optional[Root] = None,
|
|
1036
|
+
) -> InsertResult:
|
|
1037
|
+
async with self.db_wrapper.writer():
|
|
1038
|
+
if root is None:
|
|
1039
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1040
|
+
|
|
1041
|
+
was_empty = root.node_hash is None
|
|
1042
|
+
|
|
1043
|
+
if was_empty:
|
|
1044
|
+
reference_node_hash = None
|
|
1045
|
+
side = None
|
|
1046
|
+
else:
|
|
1047
|
+
seed = leaf_hash(key=key, value=value)
|
|
1048
|
+
reference_node_hash = await self.get_terminal_node_for_seed(store_id, seed, root_hash=root.node_hash)
|
|
1049
|
+
side = self.get_side_for_seed(seed)
|
|
1050
|
+
|
|
1051
|
+
return await self.insert(
|
|
1052
|
+
key=key,
|
|
1053
|
+
value=value,
|
|
1054
|
+
store_id=store_id,
|
|
1055
|
+
reference_node_hash=reference_node_hash,
|
|
1056
|
+
side=side,
|
|
1057
|
+
use_optimized=use_optimized,
|
|
1058
|
+
status=status,
|
|
1059
|
+
root=root,
|
|
1060
|
+
)
|
|
1061
|
+
|
|
1062
|
+
async def get_keys_values_dict(
|
|
1063
|
+
self,
|
|
1064
|
+
store_id: bytes32,
|
|
1065
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
1066
|
+
) -> dict[bytes, bytes]:
|
|
1067
|
+
pairs = await self.get_keys_values(store_id=store_id, root_hash=root_hash)
|
|
1068
|
+
return {node.key: node.value for node in pairs}
|
|
1069
|
+
|
|
1070
|
+
async def get_keys(
|
|
1071
|
+
self,
|
|
1072
|
+
store_id: bytes32,
|
|
1073
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
1074
|
+
) -> list[bytes]:
|
|
1075
|
+
async with self.db_wrapper.reader() as reader:
|
|
1076
|
+
if root_hash is unspecified:
|
|
1077
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1078
|
+
resolved_root_hash = root.node_hash
|
|
1079
|
+
else:
|
|
1080
|
+
resolved_root_hash = root_hash
|
|
1081
|
+
cursor = await reader.execute(
|
|
1082
|
+
"""
|
|
1083
|
+
WITH RECURSIVE
|
|
1084
|
+
tree_from_root_hash(hash, node_type, left, right, key) AS (
|
|
1085
|
+
SELECT node.hash, node.node_type, node.left, node.right, node.key
|
|
1086
|
+
FROM node WHERE node.hash == :root_hash
|
|
1087
|
+
UNION ALL
|
|
1088
|
+
SELECT
|
|
1089
|
+
node.hash, node.node_type, node.left, node.right, node.key FROM node, tree_from_root_hash
|
|
1090
|
+
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
|
1091
|
+
)
|
|
1092
|
+
SELECT key FROM tree_from_root_hash WHERE node_type == :node_type
|
|
1093
|
+
""",
|
|
1094
|
+
{"root_hash": resolved_root_hash, "node_type": NodeType.TERMINAL},
|
|
1095
|
+
)
|
|
1096
|
+
|
|
1097
|
+
keys: list[bytes] = [row["key"] async for row in cursor]
|
|
1098
|
+
|
|
1099
|
+
return keys
|
|
1100
|
+
|
|
1101
|
+
async def get_ancestors_common(
|
|
1102
|
+
self,
|
|
1103
|
+
node_hash: bytes32,
|
|
1104
|
+
store_id: bytes32,
|
|
1105
|
+
root_hash: Optional[bytes32],
|
|
1106
|
+
generation: Optional[int] = None,
|
|
1107
|
+
use_optimized: bool = True,
|
|
1108
|
+
) -> list[InternalNode]:
|
|
1109
|
+
if use_optimized:
|
|
1110
|
+
ancestors: list[InternalNode] = await self.get_ancestors_optimized(
|
|
1111
|
+
node_hash=node_hash,
|
|
1112
|
+
store_id=store_id,
|
|
1113
|
+
generation=generation,
|
|
1114
|
+
root_hash=root_hash,
|
|
1115
|
+
)
|
|
1116
|
+
else:
|
|
1117
|
+
ancestors = await self.get_ancestors_optimized(
|
|
1118
|
+
node_hash=node_hash,
|
|
1119
|
+
store_id=store_id,
|
|
1120
|
+
generation=generation,
|
|
1121
|
+
root_hash=root_hash,
|
|
1122
|
+
)
|
|
1123
|
+
ancestors_2: list[InternalNode] = await self.get_ancestors(
|
|
1124
|
+
node_hash=node_hash, store_id=store_id, root_hash=root_hash
|
|
1125
|
+
)
|
|
1126
|
+
if ancestors != ancestors_2:
|
|
1127
|
+
raise RuntimeError("Ancestors optimized didn't produce the expected result.")
|
|
1128
|
+
|
|
1129
|
+
if len(ancestors) >= 62:
|
|
1130
|
+
raise RuntimeError("Tree exceeds max height of 62.")
|
|
1131
|
+
return ancestors
|
|
1132
|
+
|
|
1133
|
+
async def update_ancestor_hashes_on_insert(
|
|
1134
|
+
self,
|
|
1135
|
+
store_id: bytes32,
|
|
1136
|
+
left: bytes32,
|
|
1137
|
+
right: bytes32,
|
|
1138
|
+
traversal_node_hash: bytes32,
|
|
1139
|
+
ancestors: list[InternalNode],
|
|
1140
|
+
status: Status,
|
|
1141
|
+
root: Root,
|
|
1142
|
+
) -> Root:
|
|
1143
|
+
# update ancestors after inserting root, to keep table constraints.
|
|
1144
|
+
insert_ancestors_cache: list[tuple[bytes32, bytes32, bytes32]] = []
|
|
1145
|
+
new_generation = root.generation + 1
|
|
1146
|
+
# create first new internal node
|
|
1147
|
+
new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
|
|
1148
|
+
insert_ancestors_cache.append((left, right, store_id))
|
|
1149
|
+
|
|
1150
|
+
# create updated replacements for the rest of the internal nodes
|
|
1151
|
+
for ancestor in ancestors:
|
|
1152
|
+
if not isinstance(ancestor, InternalNode):
|
|
1153
|
+
raise Exception(f"Expected an internal node but got: {type(ancestor).__name__}")
|
|
1154
|
+
|
|
1155
|
+
if ancestor.left_hash == traversal_node_hash:
|
|
1156
|
+
left = new_hash
|
|
1157
|
+
right = ancestor.right_hash
|
|
1158
|
+
elif ancestor.right_hash == traversal_node_hash:
|
|
1159
|
+
left = ancestor.left_hash
|
|
1160
|
+
right = new_hash
|
|
1161
|
+
|
|
1162
|
+
traversal_node_hash = ancestor.hash
|
|
1163
|
+
|
|
1164
|
+
new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
|
|
1165
|
+
insert_ancestors_cache.append((left, right, store_id))
|
|
1166
|
+
|
|
1167
|
+
new_root = await self._insert_root(
|
|
1168
|
+
store_id=store_id,
|
|
1169
|
+
node_hash=new_hash,
|
|
1170
|
+
status=status,
|
|
1171
|
+
generation=new_generation,
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
if status == Status.COMMITTED:
|
|
1175
|
+
for left_hash, right_hash, store_id in insert_ancestors_cache:
|
|
1176
|
+
await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
|
|
1177
|
+
|
|
1178
|
+
return new_root
|
|
1179
|
+
|
|
1180
|
+
async def insert(
|
|
1181
|
+
self,
|
|
1182
|
+
key: bytes,
|
|
1183
|
+
value: bytes,
|
|
1184
|
+
store_id: bytes32,
|
|
1185
|
+
reference_node_hash: Optional[bytes32],
|
|
1186
|
+
side: Optional[Side],
|
|
1187
|
+
use_optimized: bool = True,
|
|
1188
|
+
status: Status = Status.PENDING,
|
|
1189
|
+
root: Optional[Root] = None,
|
|
1190
|
+
) -> InsertResult:
|
|
1191
|
+
async with self.db_wrapper.writer():
|
|
1192
|
+
if root is None:
|
|
1193
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1194
|
+
|
|
1195
|
+
try:
|
|
1196
|
+
await self.get_node_by_key(key=key, store_id=store_id)
|
|
1197
|
+
raise Exception(f"Key already present: {key.hex()}")
|
|
1198
|
+
except KeyNotFoundError:
|
|
1199
|
+
pass
|
|
1200
|
+
|
|
1201
|
+
was_empty = root.node_hash is None
|
|
1202
|
+
if reference_node_hash is None:
|
|
1203
|
+
if not was_empty:
|
|
1204
|
+
raise Exception(f"Reference node hash must be specified for non-empty tree: {store_id.hex()}")
|
|
1205
|
+
else:
|
|
1206
|
+
reference_node_type = await self.get_node_type(node_hash=reference_node_hash)
|
|
1207
|
+
if reference_node_type == NodeType.INTERNAL:
|
|
1208
|
+
raise Exception("can not insert a new key/value on an internal node")
|
|
1209
|
+
|
|
1210
|
+
# create new terminal node
|
|
1211
|
+
new_terminal_node_hash = await self._insert_terminal_node(key=key, value=value)
|
|
1212
|
+
|
|
1213
|
+
if was_empty:
|
|
1214
|
+
if side is not None:
|
|
1215
|
+
raise Exception(f"Tree was empty so side must be unspecified, got: {side!r}")
|
|
1216
|
+
|
|
1217
|
+
new_root = await self._insert_root(
|
|
1218
|
+
store_id=store_id,
|
|
1219
|
+
node_hash=new_terminal_node_hash,
|
|
1220
|
+
status=status,
|
|
1221
|
+
)
|
|
1222
|
+
else:
|
|
1223
|
+
if side is None:
|
|
1224
|
+
raise Exception("Tree was not empty, side must be specified.")
|
|
1225
|
+
if reference_node_hash is None:
|
|
1226
|
+
raise Exception("Tree was not empty, reference node hash must be specified.")
|
|
1227
|
+
if root.node_hash is None:
|
|
1228
|
+
raise Exception("Internal error.")
|
|
1229
|
+
|
|
1230
|
+
if side == Side.LEFT:
|
|
1231
|
+
left = new_terminal_node_hash
|
|
1232
|
+
right = reference_node_hash
|
|
1233
|
+
elif side == Side.RIGHT:
|
|
1234
|
+
left = reference_node_hash
|
|
1235
|
+
right = new_terminal_node_hash
|
|
1236
|
+
else:
|
|
1237
|
+
raise Exception(f"Internal error, unknown side: {side!r}")
|
|
1238
|
+
|
|
1239
|
+
ancestors = await self.get_ancestors_common(
|
|
1240
|
+
node_hash=reference_node_hash,
|
|
1241
|
+
store_id=store_id,
|
|
1242
|
+
root_hash=root.node_hash,
|
|
1243
|
+
generation=root.generation,
|
|
1244
|
+
use_optimized=use_optimized,
|
|
1245
|
+
)
|
|
1246
|
+
new_root = await self.update_ancestor_hashes_on_insert(
|
|
1247
|
+
store_id=store_id,
|
|
1248
|
+
left=left,
|
|
1249
|
+
right=right,
|
|
1250
|
+
traversal_node_hash=reference_node_hash,
|
|
1251
|
+
ancestors=ancestors,
|
|
1252
|
+
status=status,
|
|
1253
|
+
root=root,
|
|
1254
|
+
)
|
|
1255
|
+
|
|
1256
|
+
return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
|
|
1257
|
+
|
|
1258
|
+
async def delete(
|
|
1259
|
+
self,
|
|
1260
|
+
key: bytes,
|
|
1261
|
+
store_id: bytes32,
|
|
1262
|
+
use_optimized: bool = True,
|
|
1263
|
+
status: Status = Status.PENDING,
|
|
1264
|
+
root: Optional[Root] = None,
|
|
1265
|
+
) -> Optional[Root]:
|
|
1266
|
+
root_hash = None if root is None else root.node_hash
|
|
1267
|
+
async with self.db_wrapper.writer():
|
|
1268
|
+
try:
|
|
1269
|
+
node = await self.get_node_by_key(key=key, store_id=store_id)
|
|
1270
|
+
node_hash = node.hash
|
|
1271
|
+
assert isinstance(node, TerminalNode)
|
|
1272
|
+
except KeyNotFoundError:
|
|
1273
|
+
log.debug(f"Request to delete an unknown key ignored: {key.hex()}")
|
|
1274
|
+
return root
|
|
1275
|
+
|
|
1276
|
+
ancestors: list[InternalNode] = await self.get_ancestors_common(
|
|
1277
|
+
node_hash=node_hash,
|
|
1278
|
+
store_id=store_id,
|
|
1279
|
+
root_hash=root_hash,
|
|
1280
|
+
use_optimized=use_optimized,
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
if len(ancestors) == 0:
|
|
1284
|
+
# the only node is being deleted
|
|
1285
|
+
return await self._insert_root(
|
|
1286
|
+
store_id=store_id,
|
|
1287
|
+
node_hash=None,
|
|
1288
|
+
status=status,
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
parent = ancestors[0]
|
|
1292
|
+
other_hash = parent.other_child_hash(hash=node_hash)
|
|
1293
|
+
|
|
1294
|
+
if len(ancestors) == 1:
|
|
1295
|
+
# the parent is the root so the other side will become the new root
|
|
1296
|
+
return await self._insert_root(
|
|
1297
|
+
store_id=store_id,
|
|
1298
|
+
node_hash=other_hash,
|
|
1299
|
+
status=status,
|
|
1300
|
+
)
|
|
1301
|
+
|
|
1302
|
+
old_child_hash = parent.hash
|
|
1303
|
+
new_child_hash = other_hash
|
|
1304
|
+
if root is None:
|
|
1305
|
+
new_generation = await self.get_tree_generation(store_id) + 1
|
|
1306
|
+
else:
|
|
1307
|
+
new_generation = root.generation + 1
|
|
1308
|
+
# update ancestors after inserting root, to keep table constraints.
|
|
1309
|
+
insert_ancestors_cache: list[tuple[bytes32, bytes32, bytes32]] = []
|
|
1310
|
+
# more parents to handle so let's traverse them
|
|
1311
|
+
for ancestor in ancestors[1:]:
|
|
1312
|
+
if ancestor.left_hash == old_child_hash:
|
|
1313
|
+
left_hash = new_child_hash
|
|
1314
|
+
right_hash = ancestor.right_hash
|
|
1315
|
+
elif ancestor.right_hash == old_child_hash:
|
|
1316
|
+
left_hash = ancestor.left_hash
|
|
1317
|
+
right_hash = new_child_hash
|
|
1318
|
+
else:
|
|
1319
|
+
raise Exception("Internal error.")
|
|
1320
|
+
|
|
1321
|
+
new_child_hash = await self._insert_internal_node(left_hash=left_hash, right_hash=right_hash)
|
|
1322
|
+
insert_ancestors_cache.append((left_hash, right_hash, store_id))
|
|
1323
|
+
old_child_hash = ancestor.hash
|
|
1324
|
+
|
|
1325
|
+
new_root = await self._insert_root(
|
|
1326
|
+
store_id=store_id,
|
|
1327
|
+
node_hash=new_child_hash,
|
|
1328
|
+
status=status,
|
|
1329
|
+
generation=new_generation,
|
|
1330
|
+
)
|
|
1331
|
+
if status == Status.COMMITTED:
|
|
1332
|
+
for left_hash, right_hash, store_id in insert_ancestors_cache:
|
|
1333
|
+
await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
|
|
1334
|
+
|
|
1335
|
+
return new_root
|
|
1336
|
+
|
|
1337
|
+
async def upsert(
|
|
1338
|
+
self,
|
|
1339
|
+
key: bytes,
|
|
1340
|
+
new_value: bytes,
|
|
1341
|
+
store_id: bytes32,
|
|
1342
|
+
use_optimized: bool = True,
|
|
1343
|
+
status: Status = Status.PENDING,
|
|
1344
|
+
root: Optional[Root] = None,
|
|
1345
|
+
) -> InsertResult:
|
|
1346
|
+
async with self.db_wrapper.writer():
|
|
1347
|
+
if root is None:
|
|
1348
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1349
|
+
|
|
1350
|
+
try:
|
|
1351
|
+
old_node = await self.get_node_by_key(key=key, store_id=store_id)
|
|
1352
|
+
except KeyNotFoundError:
|
|
1353
|
+
log.debug(f"Key not found: {key.hex()}. Doing an autoinsert instead")
|
|
1354
|
+
return await self.autoinsert(
|
|
1355
|
+
key=key,
|
|
1356
|
+
value=new_value,
|
|
1357
|
+
store_id=store_id,
|
|
1358
|
+
use_optimized=use_optimized,
|
|
1359
|
+
status=status,
|
|
1360
|
+
root=root,
|
|
1361
|
+
)
|
|
1362
|
+
if old_node.value == new_value:
|
|
1363
|
+
log.debug(f"New value matches old value in upsert operation: {key.hex()}. Ignoring upsert")
|
|
1364
|
+
return InsertResult(leaf_hash(key, new_value), root)
|
|
1365
|
+
|
|
1366
|
+
# create new terminal node
|
|
1367
|
+
new_terminal_node_hash = await self._insert_terminal_node(key=key, value=new_value)
|
|
1368
|
+
|
|
1369
|
+
ancestors = await self.get_ancestors_common(
|
|
1370
|
+
node_hash=old_node.hash,
|
|
1371
|
+
store_id=store_id,
|
|
1372
|
+
root_hash=root.node_hash,
|
|
1373
|
+
generation=root.generation,
|
|
1374
|
+
use_optimized=use_optimized,
|
|
1375
|
+
)
|
|
1376
|
+
|
|
1377
|
+
# Store contains only the old root, replace it with a new root having the terminal node.
|
|
1378
|
+
if len(ancestors) == 0:
|
|
1379
|
+
new_root = await self._insert_root(
|
|
1380
|
+
store_id=store_id,
|
|
1381
|
+
node_hash=new_terminal_node_hash,
|
|
1382
|
+
status=status,
|
|
1383
|
+
)
|
|
1384
|
+
else:
|
|
1385
|
+
parent = ancestors[0]
|
|
1386
|
+
if parent.left_hash == old_node.hash:
|
|
1387
|
+
left = new_terminal_node_hash
|
|
1388
|
+
right = parent.right_hash
|
|
1389
|
+
elif parent.right_hash == old_node.hash:
|
|
1390
|
+
left = parent.left_hash
|
|
1391
|
+
right = new_terminal_node_hash
|
|
1392
|
+
else:
|
|
1393
|
+
raise Exception("Internal error.")
|
|
1394
|
+
|
|
1395
|
+
new_root = await self.update_ancestor_hashes_on_insert(
|
|
1396
|
+
store_id=store_id,
|
|
1397
|
+
left=left,
|
|
1398
|
+
right=right,
|
|
1399
|
+
traversal_node_hash=parent.hash,
|
|
1400
|
+
ancestors=ancestors[1:],
|
|
1401
|
+
status=status,
|
|
1402
|
+
root=root,
|
|
1403
|
+
)
|
|
1404
|
+
|
|
1405
|
+
return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
|
|
1406
|
+
|
|
1407
|
+
async def clean_node_table(self, writer: Optional[aiosqlite.Connection] = None) -> None:
|
|
1408
|
+
query = """
|
|
1409
|
+
WITH RECURSIVE pending_nodes AS (
|
|
1410
|
+
SELECT node_hash AS hash FROM root
|
|
1411
|
+
WHERE status IN (:pending_status, :pending_batch_status)
|
|
1412
|
+
UNION ALL
|
|
1413
|
+
SELECT n.left FROM node n
|
|
1414
|
+
INNER JOIN pending_nodes pn ON n.hash = pn.hash
|
|
1415
|
+
WHERE n.left IS NOT NULL
|
|
1416
|
+
UNION ALL
|
|
1417
|
+
SELECT n.right FROM node n
|
|
1418
|
+
INNER JOIN pending_nodes pn ON n.hash = pn.hash
|
|
1419
|
+
WHERE n.right IS NOT NULL
|
|
1420
|
+
)
|
|
1421
|
+
DELETE FROM node
|
|
1422
|
+
WHERE hash IN (
|
|
1423
|
+
SELECT n.hash FROM node n
|
|
1424
|
+
LEFT JOIN ancestors a ON n.hash = a.hash
|
|
1425
|
+
LEFT JOIN pending_nodes pn ON n.hash = pn.hash
|
|
1426
|
+
WHERE a.hash IS NULL AND pn.hash IS NULL
|
|
1427
|
+
)
|
|
1428
|
+
"""
|
|
1429
|
+
params = {"pending_status": Status.PENDING.value, "pending_batch_status": Status.PENDING_BATCH.value}
|
|
1430
|
+
if writer is None:
|
|
1431
|
+
async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
|
|
1432
|
+
await writer.execute(query, params)
|
|
1433
|
+
else:
|
|
1434
|
+
await writer.execute(query, params)
|
|
1435
|
+
|
|
1436
|
+
async def get_nodes(self, node_hashes: list[bytes32]) -> list[Node]:
|
|
1437
|
+
query_parameter_place_holders = ",".join("?" for _ in node_hashes)
|
|
1438
|
+
async with self.db_wrapper.reader() as reader:
|
|
1439
|
+
# TODO: handle SQLITE_MAX_VARIABLE_NUMBER
|
|
1440
|
+
cursor = await reader.execute(
|
|
1441
|
+
f"SELECT * FROM node WHERE hash IN ({query_parameter_place_holders})",
|
|
1442
|
+
[*node_hashes],
|
|
1443
|
+
)
|
|
1444
|
+
rows = await cursor.fetchall()
|
|
1445
|
+
|
|
1446
|
+
hash_to_node = {row["hash"]: row_to_node(row=row) for row in rows}
|
|
1447
|
+
|
|
1448
|
+
missing_hashes = [node_hash.hex() for node_hash in node_hashes if node_hash not in hash_to_node]
|
|
1449
|
+
if missing_hashes:
|
|
1450
|
+
raise Exception(f"Nodes not found for hashes: {', '.join(missing_hashes)}")
|
|
1451
|
+
|
|
1452
|
+
return [hash_to_node[node_hash] for node_hash in node_hashes]
|
|
1453
|
+
|
|
1454
|
+
async def get_leaf_at_minimum_height(
|
|
1455
|
+
self, root_hash: bytes32, hash_to_parent: dict[bytes32, InternalNode]
|
|
1456
|
+
) -> TerminalNode:
|
|
1457
|
+
queue: list[bytes32] = [root_hash]
|
|
1458
|
+
batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
|
|
1459
|
+
|
|
1460
|
+
while True:
|
|
1461
|
+
assert len(queue) > 0
|
|
1462
|
+
nodes = await self.get_nodes(queue[:batch_size])
|
|
1463
|
+
queue = queue[batch_size:]
|
|
1464
|
+
|
|
1465
|
+
for node in nodes:
|
|
1466
|
+
if isinstance(node, TerminalNode):
|
|
1467
|
+
return node
|
|
1468
|
+
hash_to_parent[node.left_hash] = node
|
|
1469
|
+
hash_to_parent[node.right_hash] = node
|
|
1470
|
+
queue.append(node.left_hash)
|
|
1471
|
+
queue.append(node.right_hash)
|
|
1472
|
+
|
|
1473
|
+
async def batch_upsert(
|
|
1474
|
+
self,
|
|
1475
|
+
hash: bytes32,
|
|
1476
|
+
to_update_hashes: set[bytes32],
|
|
1477
|
+
pending_upsert_new_hashes: dict[bytes32, bytes32],
|
|
1478
|
+
) -> bytes32:
|
|
1479
|
+
if hash not in to_update_hashes:
|
|
1480
|
+
return hash
|
|
1481
|
+
node = await self.get_node(hash)
|
|
1482
|
+
if isinstance(node, TerminalNode):
|
|
1483
|
+
return pending_upsert_new_hashes[hash]
|
|
1484
|
+
new_left_hash = await self.batch_upsert(node.left_hash, to_update_hashes, pending_upsert_new_hashes)
|
|
1485
|
+
new_right_hash = await self.batch_upsert(node.right_hash, to_update_hashes, pending_upsert_new_hashes)
|
|
1486
|
+
return await self._insert_internal_node(new_left_hash, new_right_hash)
|
|
1487
|
+
|
|
1488
|
+
async def insert_batch(
|
|
1489
|
+
self,
|
|
1490
|
+
store_id: bytes32,
|
|
1491
|
+
changelist: list[dict[str, Any]],
|
|
1492
|
+
status: Status = Status.PENDING,
|
|
1493
|
+
enable_batch_autoinsert: bool = True,
|
|
1494
|
+
) -> Optional[bytes32]:
|
|
1495
|
+
async with self.transaction():
|
|
1496
|
+
old_root = await self.get_tree_root(store_id)
|
|
1497
|
+
pending_root = await self.get_pending_root(store_id=store_id)
|
|
1498
|
+
if pending_root is None:
|
|
1499
|
+
latest_local_root: Optional[Root] = old_root
|
|
1500
|
+
else:
|
|
1501
|
+
if pending_root.status == Status.PENDING_BATCH:
|
|
1502
|
+
# We have an unfinished batch, continue the current batch on top of it.
|
|
1503
|
+
if pending_root.generation != old_root.generation + 1:
|
|
1504
|
+
raise Exception("Internal error")
|
|
1505
|
+
await self.change_root_status(pending_root, Status.COMMITTED)
|
|
1506
|
+
await self.build_ancestor_table_for_latest_root(store_id=store_id)
|
|
1507
|
+
latest_local_root = pending_root
|
|
1508
|
+
else:
|
|
1509
|
+
raise Exception("Internal error")
|
|
1510
|
+
|
|
1511
|
+
assert latest_local_root is not None
|
|
1512
|
+
|
|
1513
|
+
key_hash_frequency: dict[bytes32, int] = {}
|
|
1514
|
+
first_action: dict[bytes32, str] = {}
|
|
1515
|
+
last_action: dict[bytes32, str] = {}
|
|
1516
|
+
|
|
1517
|
+
for change in changelist:
|
|
1518
|
+
key = change["key"]
|
|
1519
|
+
hash = key_hash(key)
|
|
1520
|
+
key_hash_frequency[hash] = key_hash_frequency.get(hash, 0) + 1
|
|
1521
|
+
if hash not in first_action:
|
|
1522
|
+
first_action[hash] = change["action"]
|
|
1523
|
+
last_action[hash] = change["action"]
|
|
1524
|
+
|
|
1525
|
+
pending_autoinsert_hashes: list[bytes32] = []
|
|
1526
|
+
pending_upsert_new_hashes: dict[bytes32, bytes32] = {}
|
|
1527
|
+
leaf_hashes = await self.get_leaf_hashes_by_hashed_key(store_id)
|
|
1528
|
+
|
|
1529
|
+
for change in changelist:
|
|
1530
|
+
if change["action"] == "insert":
|
|
1531
|
+
key = change["key"]
|
|
1532
|
+
value = change["value"]
|
|
1533
|
+
reference_node_hash = change.get("reference_node_hash", None)
|
|
1534
|
+
side = change.get("side", None)
|
|
1535
|
+
if reference_node_hash is None and side is None:
|
|
1536
|
+
hash = key_hash(key)
|
|
1537
|
+
# The key is not referenced in any other operation but this autoinsert, hence the order
|
|
1538
|
+
# of performing these should not matter. We perform all these autoinserts as a batch
|
|
1539
|
+
# at the end, to speed up the tree processing operations.
|
|
1540
|
+
# Additionally, if the first action is a delete, we can still perform the autoinsert at the
|
|
1541
|
+
# end, since the order will be preserved.
|
|
1542
|
+
if enable_batch_autoinsert:
|
|
1543
|
+
if key_hash_frequency[hash] == 1 or (
|
|
1544
|
+
key_hash_frequency[hash] == 2 and first_action[hash] == "delete"
|
|
1545
|
+
):
|
|
1546
|
+
old_node = await self.maybe_get_node_from_key_hash(leaf_hashes, hash)
|
|
1547
|
+
terminal_node_hash = await self._insert_terminal_node(key, value)
|
|
1548
|
+
|
|
1549
|
+
if old_node is None:
|
|
1550
|
+
pending_autoinsert_hashes.append(terminal_node_hash)
|
|
1551
|
+
else:
|
|
1552
|
+
if key_hash_frequency[hash] == 1:
|
|
1553
|
+
raise Exception(f"Key already present: {key.hex()}")
|
|
1554
|
+
else:
|
|
1555
|
+
pending_upsert_new_hashes[old_node.hash] = terminal_node_hash
|
|
1556
|
+
continue
|
|
1557
|
+
insert_result = await self.autoinsert(
|
|
1558
|
+
key, value, store_id, True, Status.COMMITTED, root=latest_local_root
|
|
1559
|
+
)
|
|
1560
|
+
latest_local_root = insert_result.root
|
|
1561
|
+
else:
|
|
1562
|
+
if reference_node_hash is None or side is None:
|
|
1563
|
+
raise Exception("Provide both reference_node_hash and side or neither.")
|
|
1564
|
+
insert_result = await self.insert(
|
|
1565
|
+
key,
|
|
1566
|
+
value,
|
|
1567
|
+
store_id,
|
|
1568
|
+
reference_node_hash,
|
|
1569
|
+
side,
|
|
1570
|
+
True,
|
|
1571
|
+
Status.COMMITTED,
|
|
1572
|
+
root=latest_local_root,
|
|
1573
|
+
)
|
|
1574
|
+
latest_local_root = insert_result.root
|
|
1575
|
+
elif change["action"] == "delete":
|
|
1576
|
+
key = change["key"]
|
|
1577
|
+
hash = key_hash(key)
|
|
1578
|
+
if key_hash_frequency[hash] == 2 and last_action[hash] == "insert" and enable_batch_autoinsert:
|
|
1579
|
+
continue
|
|
1580
|
+
latest_local_root = await self.delete(key, store_id, True, Status.COMMITTED, root=latest_local_root)
|
|
1581
|
+
elif change["action"] == "upsert":
|
|
1582
|
+
key = change["key"]
|
|
1583
|
+
new_value = change["value"]
|
|
1584
|
+
hash = key_hash(key)
|
|
1585
|
+
if key_hash_frequency[hash] == 1 and enable_batch_autoinsert:
|
|
1586
|
+
terminal_node_hash = await self._insert_terminal_node(key, new_value)
|
|
1587
|
+
old_node = await self.maybe_get_node_from_key_hash(leaf_hashes, hash)
|
|
1588
|
+
if old_node is not None:
|
|
1589
|
+
pending_upsert_new_hashes[old_node.hash] = terminal_node_hash
|
|
1590
|
+
else:
|
|
1591
|
+
pending_autoinsert_hashes.append(terminal_node_hash)
|
|
1592
|
+
continue
|
|
1593
|
+
insert_result = await self.upsert(
|
|
1594
|
+
key, new_value, store_id, True, Status.COMMITTED, root=latest_local_root
|
|
1595
|
+
)
|
|
1596
|
+
latest_local_root = insert_result.root
|
|
1597
|
+
else:
|
|
1598
|
+
raise Exception(f"Operation in batch is not insert or delete: {change}")
|
|
1599
|
+
|
|
1600
|
+
if len(pending_upsert_new_hashes) > 0:
|
|
1601
|
+
to_update_hashes: set[bytes32] = set(pending_upsert_new_hashes.keys())
|
|
1602
|
+
to_update_queue: list[bytes32] = list(pending_upsert_new_hashes.keys())
|
|
1603
|
+
batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
|
|
1604
|
+
|
|
1605
|
+
while len(to_update_queue) > 0:
|
|
1606
|
+
nodes = await self._get_one_ancestor_multiple_hashes(to_update_queue[:batch_size], store_id)
|
|
1607
|
+
to_update_queue = to_update_queue[batch_size:]
|
|
1608
|
+
for node in nodes:
|
|
1609
|
+
if node.hash not in to_update_hashes:
|
|
1610
|
+
to_update_hashes.add(node.hash)
|
|
1611
|
+
to_update_queue.append(node.hash)
|
|
1612
|
+
|
|
1613
|
+
assert latest_local_root is not None
|
|
1614
|
+
assert latest_local_root.node_hash is not None
|
|
1615
|
+
new_root_hash = await self.batch_upsert(
|
|
1616
|
+
latest_local_root.node_hash,
|
|
1617
|
+
to_update_hashes,
|
|
1618
|
+
pending_upsert_new_hashes,
|
|
1619
|
+
)
|
|
1620
|
+
latest_local_root = await self._insert_root(store_id, new_root_hash, Status.COMMITTED)
|
|
1621
|
+
|
|
1622
|
+
# Start with the leaf nodes and pair them to form new nodes at the next level up, repeating this process
|
|
1623
|
+
# in a bottom-up fashion until a single root node remains. This constructs a balanced tree from the leaves.
|
|
1624
|
+
while len(pending_autoinsert_hashes) > 1:
|
|
1625
|
+
new_hashes: list[bytes32] = []
|
|
1626
|
+
for i in range(0, len(pending_autoinsert_hashes) - 1, 2):
|
|
1627
|
+
internal_node_hash = await self._insert_internal_node(
|
|
1628
|
+
pending_autoinsert_hashes[i], pending_autoinsert_hashes[i + 1]
|
|
1629
|
+
)
|
|
1630
|
+
new_hashes.append(internal_node_hash)
|
|
1631
|
+
if len(pending_autoinsert_hashes) % 2 != 0:
|
|
1632
|
+
new_hashes.append(pending_autoinsert_hashes[-1])
|
|
1633
|
+
|
|
1634
|
+
pending_autoinsert_hashes = new_hashes
|
|
1635
|
+
|
|
1636
|
+
if len(pending_autoinsert_hashes):
|
|
1637
|
+
subtree_hash = pending_autoinsert_hashes[0]
|
|
1638
|
+
if latest_local_root is None or latest_local_root.node_hash is None:
|
|
1639
|
+
await self._insert_root(store_id=store_id, node_hash=subtree_hash, status=Status.COMMITTED)
|
|
1640
|
+
else:
|
|
1641
|
+
hash_to_parent: dict[bytes32, InternalNode] = {}
|
|
1642
|
+
min_height_leaf = await self.get_leaf_at_minimum_height(latest_local_root.node_hash, hash_to_parent)
|
|
1643
|
+
ancestors: list[InternalNode] = []
|
|
1644
|
+
hash = min_height_leaf.hash
|
|
1645
|
+
while hash in hash_to_parent:
|
|
1646
|
+
node = hash_to_parent[hash]
|
|
1647
|
+
ancestors.append(node)
|
|
1648
|
+
hash = node.hash
|
|
1649
|
+
|
|
1650
|
+
await self.update_ancestor_hashes_on_insert(
|
|
1651
|
+
store_id=store_id,
|
|
1652
|
+
left=min_height_leaf.hash,
|
|
1653
|
+
right=subtree_hash,
|
|
1654
|
+
traversal_node_hash=min_height_leaf.hash,
|
|
1655
|
+
ancestors=ancestors,
|
|
1656
|
+
status=Status.COMMITTED,
|
|
1657
|
+
root=latest_local_root,
|
|
1658
|
+
)
|
|
1659
|
+
|
|
1660
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1661
|
+
if root.node_hash == old_root.node_hash:
|
|
1662
|
+
if len(changelist) != 0:
|
|
1663
|
+
await self.rollback_to_generation(store_id, old_root.generation)
|
|
1664
|
+
raise ValueError("Changelist resulted in no change to tree data")
|
|
1665
|
+
# We delete all "temporary" records stored in root and ancestor tables and store only the final result.
|
|
1666
|
+
await self.rollback_to_generation(store_id, old_root.generation)
|
|
1667
|
+
await self.insert_root_with_ancestor_table(store_id=store_id, node_hash=root.node_hash, status=status)
|
|
1668
|
+
if status in {Status.PENDING, Status.PENDING_BATCH}:
|
|
1669
|
+
new_root = await self.get_pending_root(store_id=store_id)
|
|
1670
|
+
assert new_root is not None
|
|
1671
|
+
elif status == Status.COMMITTED:
|
|
1672
|
+
new_root = await self.get_tree_root(store_id=store_id)
|
|
1673
|
+
else:
|
|
1674
|
+
raise Exception(f"No known status: {status}")
|
|
1675
|
+
if new_root.node_hash != root.node_hash:
|
|
1676
|
+
raise RuntimeError(
|
|
1677
|
+
f"Tree root mismatches after batch update: Expected: {root.node_hash}. Got: {new_root.node_hash}"
|
|
1678
|
+
)
|
|
1679
|
+
if new_root.generation != old_root.generation + 1:
|
|
1680
|
+
raise RuntimeError(
|
|
1681
|
+
"Didn't get the expected generation after batch update: "
|
|
1682
|
+
f"Expected: {old_root.generation + 1}. Got: {new_root.generation}"
|
|
1683
|
+
)
|
|
1684
|
+
return root.node_hash
|
|
1685
|
+
|
|
1686
|
+
async def _get_one_ancestor(
|
|
1687
|
+
self,
|
|
1688
|
+
node_hash: bytes32,
|
|
1689
|
+
store_id: bytes32,
|
|
1690
|
+
generation: Optional[int] = None,
|
|
1691
|
+
) -> Optional[InternalNode]:
|
|
1692
|
+
async with self.db_wrapper.reader() as reader:
|
|
1693
|
+
if generation is None:
|
|
1694
|
+
generation = await self.get_tree_generation(store_id=store_id)
|
|
1695
|
+
cursor = await reader.execute(
|
|
1696
|
+
"""
|
|
1697
|
+
SELECT * from node INNER JOIN (
|
|
1698
|
+
SELECT ancestors.ancestor AS hash, MAX(ancestors.generation) AS generation
|
|
1699
|
+
FROM ancestors
|
|
1700
|
+
WHERE ancestors.hash == :hash
|
|
1701
|
+
AND ancestors.tree_id == :tree_id
|
|
1702
|
+
AND ancestors.generation <= :generation
|
|
1703
|
+
GROUP BY hash
|
|
1704
|
+
) asc on asc.hash == node.hash
|
|
1705
|
+
""",
|
|
1706
|
+
{"hash": node_hash, "tree_id": store_id, "generation": generation},
|
|
1707
|
+
)
|
|
1708
|
+
row = await cursor.fetchone()
|
|
1709
|
+
if row is None:
|
|
1710
|
+
return None
|
|
1711
|
+
return InternalNode.from_row(row=row)
|
|
1712
|
+
|
|
1713
|
+
async def _get_one_ancestor_multiple_hashes(
|
|
1714
|
+
self,
|
|
1715
|
+
node_hashes: list[bytes32],
|
|
1716
|
+
store_id: bytes32,
|
|
1717
|
+
generation: Optional[int] = None,
|
|
1718
|
+
) -> list[InternalNode]:
|
|
1719
|
+
async with self.db_wrapper.reader() as reader:
|
|
1720
|
+
node_hashes_place_holders = ",".join("?" for _ in node_hashes)
|
|
1721
|
+
if generation is None:
|
|
1722
|
+
generation = await self.get_tree_generation(store_id=store_id)
|
|
1723
|
+
cursor = await reader.execute(
|
|
1724
|
+
f"""
|
|
1725
|
+
SELECT * from node INNER JOIN (
|
|
1726
|
+
SELECT ancestors.ancestor AS hash, MAX(ancestors.generation) AS generation
|
|
1727
|
+
FROM ancestors
|
|
1728
|
+
WHERE ancestors.hash IN ({node_hashes_place_holders})
|
|
1729
|
+
AND ancestors.tree_id == ?
|
|
1730
|
+
AND ancestors.generation <= ?
|
|
1731
|
+
GROUP BY hash
|
|
1732
|
+
) asc on asc.hash == node.hash
|
|
1733
|
+
""",
|
|
1734
|
+
[*node_hashes, store_id, generation],
|
|
1735
|
+
)
|
|
1736
|
+
rows = await cursor.fetchall()
|
|
1737
|
+
return [InternalNode.from_row(row=row) for row in rows]
|
|
1738
|
+
|
|
1739
|
+
async def build_ancestor_table_for_latest_root(self, store_id: bytes32) -> None:
|
|
1740
|
+
async with self.db_wrapper.writer():
|
|
1741
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1742
|
+
if root.node_hash is None:
|
|
1743
|
+
return
|
|
1744
|
+
previous_root = await self.get_tree_root(
|
|
1745
|
+
store_id=store_id,
|
|
1746
|
+
generation=max(root.generation - 1, 0),
|
|
1747
|
+
)
|
|
1748
|
+
|
|
1749
|
+
if previous_root.node_hash is not None:
|
|
1750
|
+
previous_internal_nodes: list[InternalNode] = await self.get_internal_nodes(
|
|
1751
|
+
store_id=store_id,
|
|
1752
|
+
root_hash=previous_root.node_hash,
|
|
1753
|
+
)
|
|
1754
|
+
known_hashes: set[bytes32] = {node.hash for node in previous_internal_nodes}
|
|
1755
|
+
else:
|
|
1756
|
+
known_hashes = set()
|
|
1757
|
+
internal_nodes: list[InternalNode] = await self.get_internal_nodes(
|
|
1758
|
+
store_id=store_id,
|
|
1759
|
+
root_hash=root.node_hash,
|
|
1760
|
+
)
|
|
1761
|
+
for node in internal_nodes:
|
|
1762
|
+
# We already have the same values in ancestor tables, if we have the same internal node.
|
|
1763
|
+
# Don't reinsert it so we can save DB space.
|
|
1764
|
+
if node.hash not in known_hashes:
|
|
1765
|
+
await self._insert_ancestor_table(node.left_hash, node.right_hash, store_id, root.generation)
|
|
1766
|
+
|
|
1767
|
+
async def insert_root_with_ancestor_table(
|
|
1768
|
+
self, store_id: bytes32, node_hash: Optional[bytes32], status: Status = Status.PENDING
|
|
1769
|
+
) -> None:
|
|
1770
|
+
async with self.db_wrapper.writer():
|
|
1771
|
+
await self._insert_root(store_id=store_id, node_hash=node_hash, status=status)
|
|
1772
|
+
# Don't update the ancestor table for non-committed status.
|
|
1773
|
+
if status == Status.COMMITTED:
|
|
1774
|
+
await self.build_ancestor_table_for_latest_root(store_id=store_id)
|
|
1775
|
+
|
|
1776
|
+
async def get_node_by_key_latest_generation(self, key: bytes, store_id: bytes32) -> TerminalNode:
|
|
1777
|
+
async with self.db_wrapper.reader() as reader:
|
|
1778
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1779
|
+
if root.node_hash is None:
|
|
1780
|
+
raise KeyNotFoundError(key=key)
|
|
1781
|
+
|
|
1782
|
+
cursor = await reader.execute(
|
|
1783
|
+
"""
|
|
1784
|
+
SELECT a.hash FROM ancestors a
|
|
1785
|
+
JOIN node n ON a.hash = n.hash
|
|
1786
|
+
WHERE n.key = :key
|
|
1787
|
+
AND a.tree_id = :tree_id
|
|
1788
|
+
ORDER BY a.generation DESC
|
|
1789
|
+
LIMIT 1
|
|
1790
|
+
""",
|
|
1791
|
+
{"key": key, "tree_id": store_id},
|
|
1792
|
+
)
|
|
1793
|
+
|
|
1794
|
+
row = await cursor.fetchone()
|
|
1795
|
+
if row is None:
|
|
1796
|
+
raise KeyNotFoundError(key=key)
|
|
1797
|
+
|
|
1798
|
+
node = await self.get_node(row["hash"])
|
|
1799
|
+
node_hash = node.hash
|
|
1800
|
+
while True:
|
|
1801
|
+
internal_node = await self._get_one_ancestor(node_hash, store_id)
|
|
1802
|
+
if internal_node is None:
|
|
1803
|
+
break
|
|
1804
|
+
node_hash = internal_node.hash
|
|
1805
|
+
|
|
1806
|
+
if node_hash != root.node_hash:
|
|
1807
|
+
raise KeyNotFoundError(key=key)
|
|
1808
|
+
assert isinstance(node, TerminalNode)
|
|
1809
|
+
return node
|
|
1810
|
+
|
|
1811
|
+
async def maybe_get_node_from_key_hash(
|
|
1812
|
+
self, leaf_hashes: dict[bytes32, bytes32], hash: bytes32
|
|
1813
|
+
) -> Optional[TerminalNode]:
|
|
1814
|
+
if hash in leaf_hashes:
|
|
1815
|
+
leaf_hash = leaf_hashes[hash]
|
|
1816
|
+
node = await self.get_node(leaf_hash)
|
|
1817
|
+
assert isinstance(node, TerminalNode)
|
|
1818
|
+
return node
|
|
1819
|
+
|
|
1820
|
+
return None
|
|
1821
|
+
|
|
1822
|
+
async def maybe_get_node_by_key(self, key: bytes, store_id: bytes32) -> Optional[TerminalNode]:
|
|
1823
|
+
try:
|
|
1824
|
+
node = await self.get_node_by_key_latest_generation(key, store_id)
|
|
1825
|
+
return node
|
|
1826
|
+
except KeyNotFoundError:
|
|
1827
|
+
return None
|
|
1828
|
+
|
|
1829
|
+
async def get_node_by_key(
|
|
1830
|
+
self,
|
|
1831
|
+
key: bytes,
|
|
1832
|
+
store_id: bytes32,
|
|
1833
|
+
root_hash: Union[bytes32, Unspecified] = unspecified,
|
|
1834
|
+
) -> TerminalNode:
|
|
1835
|
+
if root_hash is unspecified:
|
|
1836
|
+
return await self.get_node_by_key_latest_generation(key, store_id)
|
|
1837
|
+
|
|
1838
|
+
nodes = await self.get_keys_values(store_id=store_id, root_hash=root_hash)
|
|
1839
|
+
|
|
1840
|
+
for node in nodes:
|
|
1841
|
+
if node.key == key:
|
|
1842
|
+
return node
|
|
1843
|
+
|
|
1844
|
+
raise KeyNotFoundError(key=key)
|
|
1845
|
+
|
|
1846
|
+
async def get_node(self, node_hash: bytes32) -> Node:
|
|
1847
|
+
async with self.db_wrapper.reader() as reader:
|
|
1848
|
+
cursor = await reader.execute("SELECT * FROM node WHERE hash == :hash LIMIT 1", {"hash": node_hash})
|
|
1849
|
+
row = await cursor.fetchone()
|
|
1850
|
+
|
|
1851
|
+
if row is None:
|
|
1852
|
+
raise Exception(f"Node not found for requested hash: {node_hash.hex()}")
|
|
1853
|
+
|
|
1854
|
+
node = row_to_node(row=row)
|
|
1855
|
+
return node
|
|
1856
|
+
|
|
1857
|
+
async def get_tree_as_nodes(self, store_id: bytes32) -> Node:
|
|
1858
|
+
async with self.db_wrapper.reader() as reader:
|
|
1859
|
+
root = await self.get_tree_root(store_id=store_id)
|
|
1860
|
+
# TODO: consider actual proper behavior
|
|
1861
|
+
assert root.node_hash is not None
|
|
1862
|
+
root_node = await self.get_node(node_hash=root.node_hash)
|
|
1863
|
+
|
|
1864
|
+
cursor = await reader.execute(
|
|
1865
|
+
"""
|
|
1866
|
+
WITH RECURSIVE
|
|
1867
|
+
tree_from_root_hash(hash, node_type, left, right, key, value) AS (
|
|
1868
|
+
SELECT node.* FROM node WHERE node.hash == :root_hash
|
|
1869
|
+
UNION ALL
|
|
1870
|
+
SELECT node.* FROM node, tree_from_root_hash
|
|
1871
|
+
WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
|
|
1872
|
+
)
|
|
1873
|
+
SELECT * FROM tree_from_root_hash
|
|
1874
|
+
""",
|
|
1875
|
+
{"root_hash": root_node.hash},
|
|
1876
|
+
)
|
|
1877
|
+
nodes = [row_to_node(row=row) async for row in cursor]
|
|
1878
|
+
hash_to_node: dict[bytes32, Node] = {}
|
|
1879
|
+
for node in reversed(nodes):
|
|
1880
|
+
if isinstance(node, InternalNode):
|
|
1881
|
+
node = replace(node, left=hash_to_node[node.left_hash], right=hash_to_node[node.right_hash])
|
|
1882
|
+
hash_to_node[node.hash] = node
|
|
1883
|
+
|
|
1884
|
+
root_node = hash_to_node[root_node.hash]
|
|
1885
|
+
|
|
1886
|
+
return root_node
|
|
1887
|
+
|
|
1888
|
+
async def get_proof_of_inclusion_by_hash(
|
|
1889
|
+
self,
|
|
1890
|
+
node_hash: bytes32,
|
|
1891
|
+
store_id: bytes32,
|
|
1892
|
+
root_hash: Optional[bytes32] = None,
|
|
1893
|
+
use_optimized: bool = False,
|
|
1894
|
+
) -> ProofOfInclusion:
|
|
1895
|
+
"""Collect the information for a proof of inclusion of a hash in the Merkle
|
|
1896
|
+
tree.
|
|
1897
|
+
"""
|
|
1898
|
+
|
|
1899
|
+
# Ideally this would use get_ancestors_common, but this _common function has this interesting property
|
|
1900
|
+
# when used with use_optimized=False - it will compare both methods in this case and raise an exception.
|
|
1901
|
+
# this is undesirable in the DL Offers flow where PENDING roots can cause the optimized code to fail.
|
|
1902
|
+
if use_optimized:
|
|
1903
|
+
ancestors = await self.get_ancestors_optimized(node_hash=node_hash, store_id=store_id, root_hash=root_hash)
|
|
1904
|
+
else:
|
|
1905
|
+
ancestors = await self.get_ancestors(node_hash=node_hash, store_id=store_id, root_hash=root_hash)
|
|
1906
|
+
|
|
1907
|
+
layers: list[ProofOfInclusionLayer] = []
|
|
1908
|
+
child_hash = node_hash
|
|
1909
|
+
for parent in ancestors:
|
|
1910
|
+
layer = ProofOfInclusionLayer.from_internal_node(internal_node=parent, traversal_child_hash=child_hash)
|
|
1911
|
+
layers.append(layer)
|
|
1912
|
+
child_hash = parent.hash
|
|
1913
|
+
|
|
1914
|
+
proof_of_inclusion = ProofOfInclusion(node_hash=node_hash, layers=layers)
|
|
1915
|
+
|
|
1916
|
+
if len(ancestors) > 0:
|
|
1917
|
+
expected_root = ancestors[-1].hash
|
|
1918
|
+
else:
|
|
1919
|
+
expected_root = node_hash
|
|
1920
|
+
|
|
1921
|
+
if expected_root != proof_of_inclusion.root_hash:
|
|
1922
|
+
raise Exception(
|
|
1923
|
+
f"Incorrect root, expected: {expected_root.hex()}"
|
|
1924
|
+
f"\n has: {proof_of_inclusion.root_hash.hex()}"
|
|
1925
|
+
)
|
|
1926
|
+
|
|
1927
|
+
return proof_of_inclusion
|
|
1928
|
+
|
|
1929
|
+
async def get_proof_of_inclusion_by_key(
|
|
1930
|
+
self,
|
|
1931
|
+
key: bytes,
|
|
1932
|
+
store_id: bytes32,
|
|
1933
|
+
) -> ProofOfInclusion:
|
|
1934
|
+
"""Collect the information for a proof of inclusion of a key and its value in
|
|
1935
|
+
the Merkle tree.
|
|
1936
|
+
"""
|
|
1937
|
+
async with self.db_wrapper.reader():
|
|
1938
|
+
node = await self.get_node_by_key(key=key, store_id=store_id)
|
|
1939
|
+
return await self.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
|
|
1940
|
+
|
|
1941
|
+
async def get_first_generation(self, node_hash: bytes32, store_id: bytes32) -> int:
|
|
1942
|
+
async with self.db_wrapper.reader() as reader:
|
|
1943
|
+
cursor = await reader.execute(
|
|
1944
|
+
"SELECT MIN(generation) AS generation FROM ancestors WHERE hash == :hash AND tree_id == :tree_id",
|
|
1945
|
+
{"hash": node_hash, "tree_id": store_id},
|
|
1946
|
+
)
|
|
1947
|
+
row = await cursor.fetchone()
|
|
1948
|
+
if row is None:
|
|
1949
|
+
raise RuntimeError("Hash not found in ancestor table.")
|
|
1950
|
+
|
|
1951
|
+
generation = row["generation"]
|
|
1952
|
+
return int(generation)
|
|
1953
|
+
|
|
1954
|
+
async def write_tree_to_file(
|
|
1955
|
+
self,
|
|
1956
|
+
root: Root,
|
|
1957
|
+
node_hash: bytes32,
|
|
1958
|
+
store_id: bytes32,
|
|
1959
|
+
deltas_only: bool,
|
|
1960
|
+
writer: BinaryIO,
|
|
1961
|
+
) -> None:
|
|
1962
|
+
if node_hash == bytes32.zeros:
|
|
1963
|
+
return
|
|
1964
|
+
|
|
1965
|
+
if deltas_only:
|
|
1966
|
+
generation = await self.get_first_generation(node_hash, store_id)
|
|
1967
|
+
# Root's generation is not the first time we see this hash, so it's not a new delta.
|
|
1968
|
+
if root.generation != generation:
|
|
1969
|
+
return
|
|
1970
|
+
node = await self.get_node(node_hash)
|
|
1971
|
+
to_write = b""
|
|
1972
|
+
if isinstance(node, InternalNode):
|
|
1973
|
+
await self.write_tree_to_file(root, node.left_hash, store_id, deltas_only, writer)
|
|
1974
|
+
await self.write_tree_to_file(root, node.right_hash, store_id, deltas_only, writer)
|
|
1975
|
+
to_write = bytes(SerializedNode(False, bytes(node.left_hash), bytes(node.right_hash)))
|
|
1976
|
+
elif isinstance(node, TerminalNode):
|
|
1977
|
+
to_write = bytes(SerializedNode(True, node.key, node.value))
|
|
1978
|
+
else:
|
|
1979
|
+
raise Exception(f"Node is neither InternalNode nor TerminalNode: {node}")
|
|
1980
|
+
|
|
1981
|
+
writer.write(len(to_write).to_bytes(4, byteorder="big"))
|
|
1982
|
+
writer.write(to_write)
|
|
1983
|
+
|
|
1984
|
+
async def update_subscriptions_from_wallet(self, store_id: bytes32, new_urls: list[str]) -> None:
|
|
1985
|
+
async with self.db_wrapper.writer() as writer:
|
|
1986
|
+
cursor = await writer.execute(
|
|
1987
|
+
"SELECT * FROM subscriptions WHERE from_wallet == 1 AND tree_id == :tree_id",
|
|
1988
|
+
{
|
|
1989
|
+
"tree_id": store_id,
|
|
1990
|
+
},
|
|
1991
|
+
)
|
|
1992
|
+
old_urls = [row["url"] async for row in cursor]
|
|
1993
|
+
cursor = await writer.execute(
|
|
1994
|
+
"SELECT * FROM subscriptions WHERE from_wallet == 0 AND tree_id == :tree_id",
|
|
1995
|
+
{
|
|
1996
|
+
"tree_id": store_id,
|
|
1997
|
+
},
|
|
1998
|
+
)
|
|
1999
|
+
from_subscriptions_urls = {row["url"] async for row in cursor}
|
|
2000
|
+
additions = {url for url in new_urls if url not in old_urls}
|
|
2001
|
+
removals = [url for url in old_urls if url not in new_urls]
|
|
2002
|
+
for url in removals:
|
|
2003
|
+
await writer.execute(
|
|
2004
|
+
"DELETE FROM subscriptions WHERE url == :url AND tree_id == :tree_id",
|
|
2005
|
+
{
|
|
2006
|
+
"url": url,
|
|
2007
|
+
"tree_id": store_id,
|
|
2008
|
+
},
|
|
2009
|
+
)
|
|
2010
|
+
for url in additions:
|
|
2011
|
+
if url not in from_subscriptions_urls:
|
|
2012
|
+
await writer.execute(
|
|
2013
|
+
"INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) "
|
|
2014
|
+
"VALUES (:tree_id, :url, 0, 0, 1)",
|
|
2015
|
+
{
|
|
2016
|
+
"tree_id": store_id,
|
|
2017
|
+
"url": url,
|
|
2018
|
+
},
|
|
2019
|
+
)
|
|
2020
|
+
|
|
2021
|
+
async def subscribe(self, subscription: Subscription) -> None:
|
|
2022
|
+
async with self.db_wrapper.writer() as writer:
|
|
2023
|
+
# Add a fake subscription, so we always have the store_id, even with no URLs.
|
|
2024
|
+
await writer.execute(
|
|
2025
|
+
"INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) "
|
|
2026
|
+
"VALUES (:tree_id, NULL, NULL, NULL, 0)",
|
|
2027
|
+
{
|
|
2028
|
+
"tree_id": subscription.store_id,
|
|
2029
|
+
},
|
|
2030
|
+
)
|
|
2031
|
+
all_subscriptions = await self.get_subscriptions()
|
|
2032
|
+
old_subscription = next(
|
|
2033
|
+
(
|
|
2034
|
+
old_subscription
|
|
2035
|
+
for old_subscription in all_subscriptions
|
|
2036
|
+
if old_subscription.store_id == subscription.store_id
|
|
2037
|
+
),
|
|
2038
|
+
None,
|
|
2039
|
+
)
|
|
2040
|
+
old_urls = set()
|
|
2041
|
+
if old_subscription is not None:
|
|
2042
|
+
old_urls = {server_info.url for server_info in old_subscription.servers_info}
|
|
2043
|
+
new_servers = [server_info for server_info in subscription.servers_info if server_info.url not in old_urls]
|
|
2044
|
+
for server_info in new_servers:
|
|
2045
|
+
await writer.execute(
|
|
2046
|
+
"INSERT INTO subscriptions(tree_id, url, ignore_till, num_consecutive_failures, from_wallet) "
|
|
2047
|
+
"VALUES (:tree_id, :url, :ignore_till, :num_consecutive_failures, 0)",
|
|
2048
|
+
{
|
|
2049
|
+
"tree_id": subscription.store_id,
|
|
2050
|
+
"url": server_info.url,
|
|
2051
|
+
"ignore_till": server_info.ignore_till,
|
|
2052
|
+
"num_consecutive_failures": server_info.num_consecutive_failures,
|
|
2053
|
+
},
|
|
2054
|
+
)
|
|
2055
|
+
|
|
2056
|
+
async def remove_subscriptions(self, store_id: bytes32, urls: list[str]) -> None:
|
|
2057
|
+
async with self.db_wrapper.writer() as writer:
|
|
2058
|
+
for url in urls:
|
|
2059
|
+
await writer.execute(
|
|
2060
|
+
"DELETE FROM subscriptions WHERE tree_id == :tree_id AND url == :url",
|
|
2061
|
+
{
|
|
2062
|
+
"tree_id": store_id,
|
|
2063
|
+
"url": url,
|
|
2064
|
+
},
|
|
2065
|
+
)
|
|
2066
|
+
|
|
2067
|
+
async def delete_store_data(self, store_id: bytes32) -> None:
|
|
2068
|
+
async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
|
|
2069
|
+
await self.clean_node_table(writer)
|
|
2070
|
+
cursor = await writer.execute(
|
|
2071
|
+
"""
|
|
2072
|
+
WITH RECURSIVE all_nodes AS (
|
|
2073
|
+
SELECT a.hash, n.left, n.right
|
|
2074
|
+
FROM ancestors AS a
|
|
2075
|
+
JOIN node AS n ON a.hash = n.hash
|
|
2076
|
+
WHERE a.tree_id = :tree_id
|
|
2077
|
+
),
|
|
2078
|
+
pending_nodes AS (
|
|
2079
|
+
SELECT node_hash AS hash FROM root
|
|
2080
|
+
WHERE status IN (:pending_status, :pending_batch_status)
|
|
2081
|
+
UNION ALL
|
|
2082
|
+
SELECT n.left FROM node n
|
|
2083
|
+
INNER JOIN pending_nodes pn ON n.hash = pn.hash
|
|
2084
|
+
WHERE n.left IS NOT NULL
|
|
2085
|
+
UNION ALL
|
|
2086
|
+
SELECT n.right FROM node n
|
|
2087
|
+
INNER JOIN pending_nodes pn ON n.hash = pn.hash
|
|
2088
|
+
WHERE n.right IS NOT NULL
|
|
2089
|
+
)
|
|
2090
|
+
|
|
2091
|
+
SELECT hash, left, right
|
|
2092
|
+
FROM all_nodes
|
|
2093
|
+
WHERE hash NOT IN (SELECT hash FROM ancestors WHERE tree_id != :tree_id)
|
|
2094
|
+
AND hash NOT IN (SELECT hash from pending_nodes)
|
|
2095
|
+
""",
|
|
2096
|
+
{
|
|
2097
|
+
"tree_id": store_id,
|
|
2098
|
+
"pending_status": Status.PENDING.value,
|
|
2099
|
+
"pending_batch_status": Status.PENDING_BATCH.value,
|
|
2100
|
+
},
|
|
2101
|
+
)
|
|
2102
|
+
to_delete: dict[bytes, tuple[bytes, bytes]] = {}
|
|
2103
|
+
ref_counts: dict[bytes, int] = {}
|
|
2104
|
+
async for row in cursor:
|
|
2105
|
+
hash = row["hash"]
|
|
2106
|
+
left = row["left"]
|
|
2107
|
+
right = row["right"]
|
|
2108
|
+
if hash in to_delete:
|
|
2109
|
+
prev_left, prev_right = to_delete[hash]
|
|
2110
|
+
assert prev_left == left
|
|
2111
|
+
assert prev_right == right
|
|
2112
|
+
continue
|
|
2113
|
+
to_delete[hash] = (left, right)
|
|
2114
|
+
if left is not None:
|
|
2115
|
+
ref_counts[left] = ref_counts.get(left, 0) + 1
|
|
2116
|
+
if right is not None:
|
|
2117
|
+
ref_counts[right] = ref_counts.get(right, 0) + 1
|
|
2118
|
+
|
|
2119
|
+
await writer.execute("DELETE FROM ancestors WHERE tree_id == ?", (store_id,))
|
|
2120
|
+
await writer.execute("DELETE FROM root WHERE tree_id == ?", (store_id,))
|
|
2121
|
+
queue = [hash for hash in to_delete if ref_counts.get(hash, 0) == 0]
|
|
2122
|
+
while queue:
|
|
2123
|
+
hash = queue.pop(0)
|
|
2124
|
+
if hash not in to_delete:
|
|
2125
|
+
continue
|
|
2126
|
+
await writer.execute("DELETE FROM node WHERE hash == ?", (hash,))
|
|
2127
|
+
|
|
2128
|
+
left, right = to_delete[hash]
|
|
2129
|
+
if left is not None:
|
|
2130
|
+
ref_counts[left] -= 1
|
|
2131
|
+
if ref_counts[left] == 0:
|
|
2132
|
+
queue.append(left)
|
|
2133
|
+
|
|
2134
|
+
if right is not None:
|
|
2135
|
+
ref_counts[right] -= 1
|
|
2136
|
+
if ref_counts[right] == 0:
|
|
2137
|
+
queue.append(right)
|
|
2138
|
+
|
|
2139
|
+
async def unsubscribe(self, store_id: bytes32) -> None:
|
|
2140
|
+
async with self.db_wrapper.writer() as writer:
|
|
2141
|
+
await writer.execute(
|
|
2142
|
+
"DELETE FROM subscriptions WHERE tree_id == :tree_id",
|
|
2143
|
+
{"tree_id": store_id},
|
|
2144
|
+
)
|
|
2145
|
+
|
|
2146
|
+
async def rollback_to_generation(self, store_id: bytes32, target_generation: int) -> None:
|
|
2147
|
+
async with self.db_wrapper.writer() as writer:
|
|
2148
|
+
await writer.execute(
|
|
2149
|
+
"DELETE FROM ancestors WHERE tree_id == :tree_id AND generation > :target_generation",
|
|
2150
|
+
{"tree_id": store_id, "target_generation": target_generation},
|
|
2151
|
+
)
|
|
2152
|
+
await writer.execute(
|
|
2153
|
+
"DELETE FROM root WHERE tree_id == :tree_id AND generation > :target_generation",
|
|
2154
|
+
{"tree_id": store_id, "target_generation": target_generation},
|
|
2155
|
+
)
|
|
2156
|
+
|
|
2157
|
+
async def update_server_info(self, store_id: bytes32, server_info: ServerInfo) -> None:
|
|
2158
|
+
async with self.db_wrapper.writer() as writer:
|
|
2159
|
+
await writer.execute(
|
|
2160
|
+
"UPDATE subscriptions SET ignore_till = :ignore_till, "
|
|
2161
|
+
"num_consecutive_failures = :num_consecutive_failures WHERE tree_id = :tree_id AND url = :url",
|
|
2162
|
+
{
|
|
2163
|
+
"ignore_till": server_info.ignore_till,
|
|
2164
|
+
"num_consecutive_failures": server_info.num_consecutive_failures,
|
|
2165
|
+
"tree_id": store_id,
|
|
2166
|
+
"url": server_info.url,
|
|
2167
|
+
},
|
|
2168
|
+
)
|
|
2169
|
+
|
|
2170
|
+
async def received_incorrect_file(self, store_id: bytes32, server_info: ServerInfo, timestamp: int) -> None:
|
|
2171
|
+
SEVEN_DAYS_BAN = 7 * 24 * 60 * 60
|
|
2172
|
+
new_server_info = replace(
|
|
2173
|
+
server_info,
|
|
2174
|
+
num_consecutive_failures=server_info.num_consecutive_failures + 1,
|
|
2175
|
+
ignore_till=max(server_info.ignore_till, timestamp + SEVEN_DAYS_BAN),
|
|
2176
|
+
)
|
|
2177
|
+
await self.update_server_info(store_id, new_server_info)
|
|
2178
|
+
|
|
2179
|
+
async def received_correct_file(self, store_id: bytes32, server_info: ServerInfo) -> None:
|
|
2180
|
+
new_server_info = replace(
|
|
2181
|
+
server_info,
|
|
2182
|
+
num_consecutive_failures=0,
|
|
2183
|
+
)
|
|
2184
|
+
await self.update_server_info(store_id, new_server_info)
|
|
2185
|
+
|
|
2186
|
+
async def server_misses_file(self, store_id: bytes32, server_info: ServerInfo, timestamp: int) -> ServerInfo:
|
|
2187
|
+
# Max banned time is 1 hour.
|
|
2188
|
+
BAN_TIME_BY_MISSING_COUNT = [5 * 60] * 3 + [15 * 60] * 3 + [30 * 60] * 2 + [60 * 60]
|
|
2189
|
+
index = min(server_info.num_consecutive_failures, len(BAN_TIME_BY_MISSING_COUNT) - 1)
|
|
2190
|
+
new_server_info = replace(
|
|
2191
|
+
server_info,
|
|
2192
|
+
num_consecutive_failures=server_info.num_consecutive_failures + 1,
|
|
2193
|
+
ignore_till=max(server_info.ignore_till, timestamp + BAN_TIME_BY_MISSING_COUNT[index]),
|
|
2194
|
+
)
|
|
2195
|
+
await self.update_server_info(store_id, new_server_info)
|
|
2196
|
+
return new_server_info
|
|
2197
|
+
|
|
2198
|
+
async def get_available_servers_for_store(self, store_id: bytes32, timestamp: int) -> list[ServerInfo]:
|
|
2199
|
+
subscriptions = await self.get_subscriptions()
|
|
2200
|
+
subscription = next((subscription for subscription in subscriptions if subscription.store_id == store_id), None)
|
|
2201
|
+
if subscription is None:
|
|
2202
|
+
return []
|
|
2203
|
+
servers_info = []
|
|
2204
|
+
for server_info in subscription.servers_info:
|
|
2205
|
+
if timestamp > server_info.ignore_till:
|
|
2206
|
+
servers_info.append(server_info)
|
|
2207
|
+
return servers_info
|
|
2208
|
+
|
|
2209
|
+
async def get_subscriptions(self) -> list[Subscription]:
|
|
2210
|
+
subscriptions: list[Subscription] = []
|
|
2211
|
+
|
|
2212
|
+
async with self.db_wrapper.reader() as reader:
|
|
2213
|
+
cursor = await reader.execute(
|
|
2214
|
+
"SELECT * from subscriptions",
|
|
2215
|
+
)
|
|
2216
|
+
async for row in cursor:
|
|
2217
|
+
store_id = bytes32(row["tree_id"])
|
|
2218
|
+
url = row["url"]
|
|
2219
|
+
ignore_till = row["ignore_till"]
|
|
2220
|
+
num_consecutive_failures = row["num_consecutive_failures"]
|
|
2221
|
+
subscription = next(
|
|
2222
|
+
(subscription for subscription in subscriptions if subscription.store_id == store_id), None
|
|
2223
|
+
)
|
|
2224
|
+
if subscription is None:
|
|
2225
|
+
if url is not None and num_consecutive_failures is not None and ignore_till is not None:
|
|
2226
|
+
subscriptions.append(
|
|
2227
|
+
Subscription(store_id, [ServerInfo(url, num_consecutive_failures, ignore_till)])
|
|
2228
|
+
)
|
|
2229
|
+
else:
|
|
2230
|
+
subscriptions.append(Subscription(store_id, []))
|
|
2231
|
+
else:
|
|
2232
|
+
if url is not None and num_consecutive_failures is not None and ignore_till is not None:
|
|
2233
|
+
new_servers_info = subscription.servers_info
|
|
2234
|
+
new_servers_info.append(ServerInfo(url, num_consecutive_failures, ignore_till))
|
|
2235
|
+
new_subscription = replace(subscription, servers_info=new_servers_info)
|
|
2236
|
+
subscriptions.remove(subscription)
|
|
2237
|
+
subscriptions.append(new_subscription)
|
|
2238
|
+
|
|
2239
|
+
return subscriptions
|
|
2240
|
+
|
|
2241
|
+
async def get_kv_diff(
|
|
2242
|
+
self,
|
|
2243
|
+
store_id: bytes32,
|
|
2244
|
+
# NOTE: empty is expressed as zeros
|
|
2245
|
+
hash_1: bytes32,
|
|
2246
|
+
hash_2: bytes32,
|
|
2247
|
+
) -> set[DiffData]:
|
|
2248
|
+
async with self.db_wrapper.reader():
|
|
2249
|
+
old_pairs = set(await self.get_keys_values(store_id, hash_1))
|
|
2250
|
+
if len(old_pairs) == 0 and hash_1 != bytes32.zeros:
|
|
2251
|
+
raise Exception(f"Unable to diff: Can't find keys and values for {hash_1}")
|
|
2252
|
+
|
|
2253
|
+
new_pairs = set(await self.get_keys_values(store_id, hash_2))
|
|
2254
|
+
if len(new_pairs) == 0 and hash_2 != bytes32.zeros:
|
|
2255
|
+
raise Exception(f"Unable to diff: Can't find keys and values for {hash_2}")
|
|
2256
|
+
|
|
2257
|
+
insertions = {
|
|
2258
|
+
DiffData(type=OperationType.INSERT, key=node.key, value=node.value)
|
|
2259
|
+
for node in new_pairs
|
|
2260
|
+
if node not in old_pairs
|
|
2261
|
+
}
|
|
2262
|
+
deletions = {
|
|
2263
|
+
DiffData(type=OperationType.DELETE, key=node.key, value=node.value)
|
|
2264
|
+
for node in old_pairs
|
|
2265
|
+
if node not in new_pairs
|
|
2266
|
+
}
|
|
2267
|
+
return set.union(insertions, deletions)
|