chia-blockchain 2.5.6rc2__py3-none-any.whl → 2.5.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chia/_tests/blockchain/blockchain_test_utils.py +6 -7
- chia/_tests/blockchain/test_augmented_chain.py +4 -3
- chia/_tests/blockchain/test_blockchain.py +10 -5
- chia/_tests/clvm/coin_store.py +1 -1
- chia/_tests/cmds/cmd_test_utils.py +84 -97
- chia/_tests/cmds/test_dev_gh.py +1 -1
- chia/_tests/cmds/test_farm_cmd.py +56 -2
- chia/_tests/cmds/wallet/test_consts.py +3 -1
- chia/_tests/cmds/wallet/test_did.py +3 -8
- chia/_tests/cmds/wallet/test_nft.py +6 -6
- chia/_tests/cmds/wallet/test_notifications.py +39 -21
- chia/_tests/cmds/wallet/test_vcs.py +2 -1
- chia/_tests/cmds/wallet/test_wallet.py +160 -136
- chia/_tests/conftest.py +51 -26
- chia/_tests/core/cmds/test_wallet.py +4 -3
- chia/_tests/core/consensus/test_pot_iterations.py +71 -24
- chia/_tests/core/custom_types/test_proof_of_space.py +60 -30
- chia/_tests/core/custom_types/test_spend_bundle.py +1 -4
- chia/_tests/core/data_layer/conftest.py +7 -2
- chia/_tests/core/data_layer/old_format/__init__.py +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-delta-23-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-full-23-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-delta-9-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-full-9-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-delta-5-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-full-5-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-delta-22-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-full-22-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-delta-1-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-full-1-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-delta-2-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-full-2-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-delta-29-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-full-29-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-delta-28-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-full-28-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-delta-7-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-full-7-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-delta-19-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-full-19-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-delta-10-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-full-10-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-delta-17-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-full-17-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-delta-20-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-full-20-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-delta-31-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-full-31-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-delta-40-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-full-40-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-delta-49-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-full-49-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-delta-14-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-full-14-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-delta-34-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-full-34-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-delta-8-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-full-8-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-delta-45-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-full-45-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-delta-6-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-full-6-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-delta-48-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-full-48-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-delta-26-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-full-26-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-delta-3-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-full-3-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-delta-25-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-full-25-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-delta-35-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-full-35-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-delta-4-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-full-4-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-delta-41-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-full-41-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-delta-21-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-full-21-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-delta-42-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-full-42-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-delta-38-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-full-38-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-delta-11-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-full-11-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-delta-32-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-full-32-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-delta-33-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-full-33-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-delta-47-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-full-47-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-delta-50-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-full-50-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-delta-24-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-full-24-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-delta-44-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-full-44-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-delta-16-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-full-16-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-delta-13-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-full-13-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-delta-39-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-full-39-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-delta-36-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-full-36-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-delta-18-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-full-18-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-delta-37-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-full-37-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-delta-15-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-full-15-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-delta-46-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-full-46-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-delta-27-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-full-27-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-delta-12-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-full-12-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-delta-30-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-full-30-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-delta-43-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-full-43-v1.0.dat +0 -0
- chia/_tests/core/data_layer/old_format/files/__init__.py +0 -0
- chia/_tests/core/data_layer/old_format/old_db.sqlite +0 -0
- chia/_tests/core/data_layer/test_data_layer_util.py +18 -21
- chia/_tests/core/data_layer/test_data_rpc.py +77 -28
- chia/_tests/core/data_layer/test_data_store.py +637 -700
- chia/_tests/core/data_layer/test_data_store_schema.py +2 -209
- chia/_tests/core/full_node/ram_db.py +1 -1
- chia/_tests/core/full_node/stores/test_block_store.py +4 -10
- chia/_tests/core/full_node/stores/test_coin_store.py +1 -1
- chia/_tests/core/full_node/test_address_manager.py +3 -3
- chia/_tests/core/full_node/test_block_height_map.py +1 -1
- chia/_tests/core/full_node/test_full_node.py +91 -30
- chia/_tests/core/full_node/test_generator_tools.py +17 -10
- chia/_tests/core/mempool/test_mempool.py +190 -90
- chia/_tests/core/mempool/test_mempool_fee_estimator.py +2 -4
- chia/_tests/core/mempool/test_mempool_item_queries.py +1 -1
- chia/_tests/core/mempool/test_mempool_manager.py +252 -77
- chia/_tests/core/mempool/test_singleton_fast_forward.py +9 -27
- chia/_tests/core/server/serve.py +0 -2
- chia/_tests/core/server/test_rate_limits.py +400 -347
- chia/_tests/core/server/test_server.py +2 -2
- chia/_tests/core/services/test_services.py +7 -7
- chia/_tests/core/test_cost_calculation.py +31 -10
- chia/_tests/core/test_crawler.py +4 -4
- chia/_tests/core/test_db_conversion.py +7 -14
- chia/_tests/core/test_db_validation.py +2 -6
- chia/_tests/core/test_farmer_harvester_rpc.py +34 -1
- chia/_tests/core/test_full_node_rpc.py +28 -24
- chia/_tests/core/test_merkle_set.py +1 -4
- chia/_tests/core/test_seeder.py +1 -1
- chia/_tests/core/util/test_keychain.py +2 -2
- chia/_tests/core/util/test_lru_cache.py +16 -0
- chia/_tests/core/util/test_streamable.py +85 -4
- chia/_tests/environments/wallet.py +4 -1
- chia/_tests/farmer_harvester/test_farmer.py +8 -6
- chia/_tests/farmer_harvester/test_farmer_harvester.py +306 -8
- chia/_tests/farmer_harvester/test_filter_prefix_bits.py +3 -3
- chia/_tests/farmer_harvester/test_third_party_harvesters.py +11 -11
- chia/_tests/fee_estimation/test_fee_estimation_integration.py +2 -2
- chia/_tests/fee_estimation/test_fee_estimation_rpc.py +1 -1
- chia/_tests/fee_estimation/test_fee_estimation_unit_tests.py +1 -2
- chia/_tests/generator/test_rom.py +2 -1
- chia/_tests/harvester/__init__.py +0 -0
- chia/_tests/harvester/config.py +4 -0
- chia/_tests/harvester/test_harvester_api.py +157 -0
- chia/_tests/plot_sync/test_plot_sync.py +6 -3
- chia/_tests/plot_sync/test_receiver.py +16 -4
- chia/_tests/plot_sync/test_sender.py +8 -7
- chia/_tests/plot_sync/test_sync_simulated.py +15 -13
- chia/_tests/plot_sync/util.py +3 -2
- chia/_tests/plotting/test_plot_manager.py +21 -5
- chia/_tests/plotting/test_prover.py +106 -0
- chia/_tests/pools/test_pool_cmdline.py +7 -6
- chia/_tests/pools/test_pool_puzzles_lifecycle.py +10 -3
- chia/_tests/pools/test_pool_rpc.py +92 -64
- chia/_tests/solver/__init__.py +0 -0
- chia/_tests/solver/config.py +4 -0
- chia/_tests/solver/test_solver_service.py +29 -0
- chia/_tests/timelord/test_new_peak.py +1 -1
- chia/_tests/timelord/test_timelord.py +1 -1
- chia/_tests/util/benchmarks.py +5 -12
- chia/_tests/util/blockchain.py +1 -1
- chia/_tests/util/build_network_protocol_files.py +7 -0
- chia/_tests/util/network_protocol_data.py +26 -0
- chia/_tests/util/protocol_messages_bytes-v1.0 +0 -0
- chia/_tests/util/protocol_messages_json.py +19 -0
- chia/_tests/util/setup_nodes.py +21 -2
- chia/_tests/util/spend_sim.py +9 -3
- chia/_tests/util/test_condition_tools.py +3 -2
- chia/_tests/util/test_full_block_utils.py +10 -9
- chia/_tests/util/test_misc.py +10 -10
- chia/_tests/util/test_network.py +32 -1
- chia/_tests/util/test_network_protocol_files.py +333 -318
- chia/_tests/util/test_network_protocol_json.py +6 -0
- chia/_tests/util/test_network_protocol_test.py +27 -0
- chia/_tests/util/test_priority_mutex.py +1 -1
- chia/_tests/util/test_replace_str_to_bytes.py +6 -6
- chia/_tests/wallet/cat_wallet/test_cat_wallet.py +17 -13
- chia/_tests/wallet/cat_wallet/test_trades.py +55 -55
- chia/_tests/wallet/did_wallet/test_did.py +118 -1229
- chia/_tests/wallet/nft_wallet/config.py +1 -1
- chia/_tests/wallet/nft_wallet/test_nft_1_offers.py +73 -96
- chia/_tests/wallet/nft_wallet/test_nft_bulk_mint.py +15 -12
- chia/_tests/wallet/nft_wallet/test_nft_offers.py +67 -134
- chia/_tests/wallet/nft_wallet/test_nft_wallet.py +31 -26
- chia/_tests/wallet/rpc/test_wallet_rpc.py +765 -371
- chia/_tests/wallet/sync/test_wallet_sync.py +6 -0
- chia/_tests/wallet/test_new_wallet_protocol.py +1 -1
- chia/_tests/wallet/test_signer_protocol.py +2 -2
- chia/_tests/wallet/test_singleton_lifecycle_fast.py +3 -4
- chia/_tests/wallet/test_transaction_store.py +42 -33
- chia/_tests/wallet/test_wallet.py +22 -31
- chia/_tests/wallet/test_wallet_state_manager.py +14 -7
- chia/_tests/wallet/vc_wallet/test_vc_wallet.py +53 -32
- chia/apis.py +2 -0
- chia/cmds/beta.py +7 -3
- chia/cmds/chia.py +2 -0
- chia/cmds/cmd_classes.py +11 -27
- chia/cmds/cmds_util.py +3 -0
- chia/cmds/coin_funcs.py +27 -22
- chia/cmds/configure.py +42 -18
- chia/cmds/dev/data.py +22 -3
- chia/cmds/farm.py +32 -0
- chia/cmds/farm_funcs.py +54 -5
- chia/cmds/init_funcs.py +4 -0
- chia/cmds/keys_funcs.py +8 -10
- chia/cmds/peer_funcs.py +8 -10
- chia/cmds/plotnft_funcs.py +24 -16
- chia/cmds/rpc.py +11 -1
- chia/cmds/show_funcs.py +5 -5
- chia/cmds/solver.py +33 -0
- chia/cmds/solver_funcs.py +21 -0
- chia/cmds/wallet.py +1 -1
- chia/cmds/wallet_funcs.py +149 -96
- chia/consensus/block_body_validation.py +8 -9
- chia/consensus/block_creation.py +9 -10
- chia/consensus/block_header_validation.py +61 -69
- chia/{full_node → consensus}/block_height_map.py +2 -1
- chia/consensus/block_height_map_protocol.py +21 -0
- chia/consensus/block_rewards.py +12 -12
- chia/consensus/blockchain.py +8 -18
- chia/consensus/default_constants.py +6 -6
- chia/consensus/generator_tools.py +1 -1
- chia/consensus/get_block_challenge.py +24 -25
- chia/consensus/pos_quality.py +28 -2
- chia/consensus/pot_iterations.py +15 -17
- chia/daemon/keychain_proxy.py +5 -0
- chia/daemon/server.py +2 -3
- chia/data_layer/data_layer.py +32 -24
- chia/data_layer/data_layer_errors.py +5 -0
- chia/data_layer/data_layer_rpc_api.py +1 -1
- chia/data_layer/data_layer_service.py +8 -0
- chia/data_layer/data_layer_util.py +49 -89
- chia/data_layer/data_layer_wallet.py +20 -17
- chia/data_layer/data_store.py +1051 -1462
- chia/data_layer/download_data.py +44 -115
- chia/{server → data_layer}/start_data_layer.py +2 -1
- chia/data_layer/util/benchmark.py +38 -53
- chia/farmer/farmer.py +3 -0
- chia/farmer/farmer_api.py +104 -5
- chia/farmer/farmer_rpc_api.py +20 -0
- chia/farmer/farmer_rpc_client.py +6 -2
- chia/farmer/farmer_service.py +8 -0
- chia/{server → farmer}/start_farmer.py +9 -3
- chia/full_node/block_store.py +20 -10
- chia/full_node/coin_store.py +12 -4
- chia/full_node/eligible_coin_spends.py +17 -72
- chia/full_node/full_node.py +68 -71
- chia/full_node/full_node_api.py +26 -32
- chia/full_node/full_node_rpc_api.py +44 -33
- chia/full_node/full_node_rpc_client.py +67 -79
- chia/full_node/full_node_service.py +8 -0
- chia/full_node/full_node_store.py +5 -3
- chia/full_node/mempool.py +15 -16
- chia/full_node/mempool_manager.py +73 -89
- chia/{server → full_node}/start_full_node.py +1 -1
- chia/full_node/subscriptions.py +2 -2
- chia/full_node/weight_proof.py +14 -15
- chia/harvester/harvester.py +8 -1
- chia/harvester/harvester_api.py +178 -44
- chia/harvester/harvester_service.py +8 -0
- chia/{server → harvester}/start_harvester.py +1 -1
- chia/introducer/introducer_service.py +8 -0
- chia/{server → introducer}/start_introducer.py +1 -1
- chia/plot_sync/receiver.py +6 -1
- chia/plot_sync/sender.py +7 -4
- chia/plotting/cache.py +37 -28
- chia/plotting/check_plots.py +83 -48
- chia/plotting/create_plots.py +3 -4
- chia/plotting/manager.py +18 -13
- chia/plotting/prover.py +153 -0
- chia/plotting/util.py +14 -6
- chia/pools/pool_wallet.py +6 -4
- chia/protocols/harvester_protocol.py +14 -0
- chia/protocols/outbound_message.py +1 -0
- chia/protocols/pool_protocol.py +1 -1
- chia/protocols/protocol_message_types.py +7 -0
- chia/protocols/shared_protocol.py +2 -0
- chia/protocols/solver_protocol.py +18 -0
- chia/rpc/rpc_server.py +1 -1
- chia/seeder/crawl_store.py +4 -8
- chia/seeder/crawler.py +2 -2
- chia/seeder/crawler_service.py +8 -0
- chia/seeder/start_crawler.py +1 -1
- chia/server/address_manager.py +12 -15
- chia/server/introducer_peers.py +1 -1
- chia/server/node_discovery.py +9 -10
- chia/server/rate_limit_numbers.py +157 -168
- chia/server/rate_limits.py +44 -41
- chia/server/resolve_peer_info.py +5 -0
- chia/server/server.py +17 -7
- chia/server/start_service.py +0 -1
- chia/simulator/block_tools.py +92 -58
- chia/simulator/full_node_simulator.py +1 -1
- chia/simulator/setup_services.py +52 -15
- chia/solver/__init__.py +0 -0
- chia/solver/solver.py +100 -0
- chia/solver/solver_api.py +59 -0
- chia/solver/solver_rpc_api.py +31 -0
- chia/solver/solver_rpc_client.py +16 -0
- chia/solver/solver_service.py +8 -0
- chia/solver/start_solver.py +102 -0
- {mozilla-ca → chia/ssl}/cacert.pem +0 -27
- chia/ssl/create_ssl.py +3 -2
- chia/{server → timelord}/start_timelord.py +1 -1
- chia/timelord/timelord.py +12 -13
- chia/timelord/timelord_service.py +8 -0
- chia/types/blockchain_format/proof_of_space.py +61 -17
- chia/types/coin_spend.py +0 -8
- chia/types/internal_mempool_item.py +3 -3
- chia/types/mempool_item.py +15 -8
- chia/types/mempool_submission_status.py +1 -1
- chia/util/config.py +1 -3
- chia/util/db_wrapper.py +7 -8
- chia/util/initial-config.yaml +46 -0
- chia/util/lru_cache.py +8 -4
- chia/util/network.py +9 -0
- chia/util/streamable.py +38 -8
- chia/util/virtual_project_analysis.py +1 -1
- chia/wallet/cat_wallet/cat_outer_puzzle.py +7 -4
- chia/wallet/cat_wallet/cat_wallet.py +13 -7
- chia/wallet/cat_wallet/r_cat_wallet.py +4 -1
- chia/wallet/conditions.py +1 -3
- chia/wallet/did_wallet/did_wallet.py +27 -332
- chia/wallet/nft_wallet/nft_puzzle_utils.py +1 -1
- chia/wallet/nft_wallet/nft_wallet.py +9 -7
- chia/wallet/puzzle_drivers.py +7 -8
- chia/{server → wallet}/start_wallet.py +1 -1
- chia/wallet/trade_manager.py +12 -9
- chia/wallet/transaction_record.py +14 -51
- chia/wallet/util/clvm_streamable.py +28 -41
- chia/wallet/util/merkle_utils.py +2 -2
- chia/wallet/util/tx_config.py +3 -6
- chia/wallet/vc_wallet/cr_cat_wallet.py +12 -6
- chia/wallet/vc_wallet/vc_wallet.py +13 -15
- chia/wallet/wallet.py +5 -3
- chia/wallet/wallet_node.py +25 -30
- chia/wallet/wallet_request_types.py +538 -101
- chia/wallet/wallet_rpc_api.py +398 -570
- chia/wallet/wallet_rpc_client.py +144 -332
- chia/wallet/wallet_service.py +8 -0
- chia/wallet/wallet_state_manager.py +53 -42
- chia/wallet/wallet_transaction_store.py +13 -5
- {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/METADATA +31 -31
- {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/RECORD +368 -240
- {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/WHEEL +1 -1
- {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/entry_points.txt +8 -7
- chia/full_node/mempool_check_conditions.py +0 -102
- chia/server/aliases.py +0 -35
- {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info/licenses}/LICENSE +0 -0
|
@@ -1,54 +1,54 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
4
|
+
import importlib.resources as importlib_resources
|
|
3
5
|
import itertools
|
|
4
6
|
import logging
|
|
5
7
|
import os
|
|
6
8
|
import random
|
|
7
9
|
import re
|
|
10
|
+
import shutil
|
|
8
11
|
import statistics
|
|
9
12
|
import time
|
|
10
13
|
from collections.abc import Awaitable
|
|
11
14
|
from dataclasses import dataclass
|
|
15
|
+
from hashlib import sha256
|
|
12
16
|
from pathlib import Path
|
|
13
17
|
from random import Random
|
|
14
|
-
from typing import Any, Callable, Optional
|
|
18
|
+
from typing import Any, BinaryIO, Callable, Optional
|
|
15
19
|
|
|
16
20
|
import aiohttp
|
|
17
|
-
import
|
|
21
|
+
import chia_rs.datalayer
|
|
18
22
|
import pytest
|
|
23
|
+
from chia_rs.datalayer import KeyAlreadyPresentError, MerkleBlob, TreeIndex
|
|
19
24
|
from chia_rs.sized_bytes import bytes32
|
|
20
25
|
|
|
21
26
|
from chia._tests.core.data_layer.util import Example, add_0123_example, add_01234567_example
|
|
22
27
|
from chia._tests.util.misc import BenchmarkRunner, Marks, boolean_datacases, datacases
|
|
23
|
-
from chia.data_layer.data_layer_errors import KeyNotFoundError,
|
|
28
|
+
from chia.data_layer.data_layer_errors import KeyNotFoundError, TreeGenerationIncrementingError
|
|
24
29
|
from chia.data_layer.data_layer_util import (
|
|
25
30
|
DiffData,
|
|
26
31
|
InternalNode,
|
|
27
|
-
Node,
|
|
28
|
-
NodeType,
|
|
29
32
|
OperationType,
|
|
30
|
-
ProofOfInclusion,
|
|
31
|
-
ProofOfInclusionLayer,
|
|
32
33
|
Root,
|
|
34
|
+
SerializedNode,
|
|
33
35
|
ServerInfo,
|
|
34
36
|
Side,
|
|
35
37
|
Status,
|
|
36
38
|
Subscription,
|
|
37
39
|
TerminalNode,
|
|
38
40
|
_debug_dump,
|
|
39
|
-
leaf_hash,
|
|
40
|
-
)
|
|
41
|
-
from chia.data_layer.data_store import DataStore
|
|
42
|
-
from chia.data_layer.download_data import (
|
|
43
41
|
get_delta_filename_path,
|
|
44
42
|
get_full_tree_filename_path,
|
|
45
|
-
|
|
46
|
-
insert_into_data_store_from_file,
|
|
47
|
-
write_files_for_root,
|
|
43
|
+
leaf_hash,
|
|
48
44
|
)
|
|
45
|
+
from chia.data_layer.data_store import DataStore
|
|
46
|
+
from chia.data_layer.download_data import insert_from_delta_file, write_files_for_root
|
|
47
|
+
from chia.data_layer.util.benchmark import generate_datastore
|
|
49
48
|
from chia.types.blockchain_format.program import Program
|
|
50
49
|
from chia.util.byte_types import hexstr_to_bytes
|
|
51
50
|
from chia.util.db_wrapper import DBWrapper2, generate_in_memory_db_uri
|
|
51
|
+
from chia.util.lru_cache import LRUCache
|
|
52
52
|
|
|
53
53
|
log = logging.getLogger(__name__)
|
|
54
54
|
|
|
@@ -57,31 +57,48 @@ pytestmark = pytest.mark.data_layer
|
|
|
57
57
|
|
|
58
58
|
|
|
59
59
|
table_columns: dict[str, list[str]] = {
|
|
60
|
-
"node": ["hash", "node_type", "left", "right", "key", "value"],
|
|
61
60
|
"root": ["tree_id", "generation", "node_hash", "status"],
|
|
61
|
+
"subscriptions": ["tree_id", "url", "ignore_till", "num_consecutive_failures", "from_wallet"],
|
|
62
|
+
"schema": ["version_id", "applied_at"],
|
|
63
|
+
"ids": ["kv_id", "hash", "blob", "store_id"],
|
|
64
|
+
"nodes": ["store_id", "hash", "root_hash", "generation", "idx"],
|
|
62
65
|
}
|
|
63
66
|
|
|
64
67
|
|
|
65
|
-
# TODO: Someday add tests for malformed DB data to make sure we handle it gracefully
|
|
66
|
-
# and with good error messages.
|
|
67
|
-
|
|
68
|
-
|
|
69
68
|
@pytest.mark.anyio
|
|
70
|
-
async def
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
69
|
+
async def test_migrate_from_old_format(store_id: bytes32, tmp_path: Path) -> None:
|
|
70
|
+
old_format_resources = importlib_resources.files(__name__.rpartition(".")[0]).joinpath("old_format")
|
|
71
|
+
db_uri = tmp_path / "old_db.sqlite"
|
|
72
|
+
db_uri.write_bytes(old_format_resources.joinpath("old_db.sqlite").read_bytes())
|
|
73
|
+
files_resources = old_format_resources.joinpath("files")
|
|
74
|
+
|
|
75
|
+
with importlib_resources.as_file(files_resources) as files_path:
|
|
76
|
+
async with DataStore.managed(
|
|
77
|
+
database=db_uri,
|
|
78
|
+
uri=True,
|
|
79
|
+
merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
|
|
80
|
+
key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
|
|
81
|
+
) as data_store:
|
|
82
|
+
await data_store.migrate_db(files_path)
|
|
83
|
+
root = await data_store.get_tree_root(store_id=store_id)
|
|
84
|
+
expected = Root(
|
|
85
|
+
store_id=bytes32.fromhex("2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964"),
|
|
86
|
+
node_hash=bytes32.fromhex("aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9"),
|
|
87
|
+
generation=50,
|
|
88
|
+
status=Status.COMMITTED,
|
|
89
|
+
)
|
|
90
|
+
assert root == expected
|
|
79
91
|
|
|
80
92
|
|
|
93
|
+
# TODO: Someday add tests for malformed DB data to make sure we handle it gracefully
|
|
94
|
+
# and with good error messages.
|
|
81
95
|
@pytest.mark.parametrize(argnames=["table_name", "expected_columns"], argvalues=table_columns.items())
|
|
82
96
|
@pytest.mark.anyio
|
|
83
97
|
async def test_create_creates_tables_and_columns(
|
|
84
|
-
database_uri: str,
|
|
98
|
+
database_uri: str,
|
|
99
|
+
table_name: str,
|
|
100
|
+
expected_columns: list[str],
|
|
101
|
+
tmp_path: Path,
|
|
85
102
|
) -> None:
|
|
86
103
|
# Never string-interpolate sql queries... Except maybe in tests when it does not
|
|
87
104
|
# allow you to parametrize the query.
|
|
@@ -93,7 +110,12 @@ async def test_create_creates_tables_and_columns(
|
|
|
93
110
|
columns = await cursor.fetchall()
|
|
94
111
|
assert columns == []
|
|
95
112
|
|
|
96
|
-
async with DataStore.managed(
|
|
113
|
+
async with DataStore.managed(
|
|
114
|
+
database=database_uri,
|
|
115
|
+
uri=True,
|
|
116
|
+
merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
|
|
117
|
+
key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
|
|
118
|
+
):
|
|
97
119
|
async with db_wrapper.reader() as reader:
|
|
98
120
|
cursor = await reader.execute(query)
|
|
99
121
|
columns = await cursor.fetchall()
|
|
@@ -211,46 +233,6 @@ async def test_get_tree_generation_returns_none_when_none_available(
|
|
|
211
233
|
await raw_data_store.get_tree_generation(store_id=store_id)
|
|
212
234
|
|
|
213
235
|
|
|
214
|
-
@pytest.mark.anyio
|
|
215
|
-
async def test_insert_internal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None:
|
|
216
|
-
await add_01234567_example(data_store=data_store, store_id=store_id)
|
|
217
|
-
|
|
218
|
-
kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
|
|
219
|
-
ancestors = await data_store.get_ancestors(node_hash=kv_node.hash, store_id=store_id)
|
|
220
|
-
parent = ancestors[0]
|
|
221
|
-
|
|
222
|
-
async with data_store.db_wrapper.reader() as reader:
|
|
223
|
-
cursor = await reader.execute("SELECT * FROM node")
|
|
224
|
-
before = await cursor.fetchall()
|
|
225
|
-
|
|
226
|
-
await data_store._insert_internal_node(left_hash=parent.left_hash, right_hash=parent.right_hash)
|
|
227
|
-
|
|
228
|
-
async with data_store.db_wrapper.reader() as reader:
|
|
229
|
-
cursor = await reader.execute("SELECT * FROM node")
|
|
230
|
-
after = await cursor.fetchall()
|
|
231
|
-
|
|
232
|
-
assert after == before
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
@pytest.mark.anyio
|
|
236
|
-
async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None:
|
|
237
|
-
await add_01234567_example(data_store=data_store, store_id=store_id)
|
|
238
|
-
|
|
239
|
-
kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
|
|
240
|
-
|
|
241
|
-
async with data_store.db_wrapper.reader() as reader:
|
|
242
|
-
cursor = await reader.execute("SELECT * FROM node")
|
|
243
|
-
before = await cursor.fetchall()
|
|
244
|
-
|
|
245
|
-
await data_store._insert_terminal_node(key=kv_node.key, value=kv_node.value)
|
|
246
|
-
|
|
247
|
-
async with data_store.db_wrapper.reader() as reader:
|
|
248
|
-
cursor = await reader.execute("SELECT * FROM node")
|
|
249
|
-
after = await cursor.fetchall()
|
|
250
|
-
|
|
251
|
-
assert after == before
|
|
252
|
-
|
|
253
|
-
|
|
254
236
|
@pytest.mark.anyio
|
|
255
237
|
async def test_build_a_tree(
|
|
256
238
|
data_store: DataStore,
|
|
@@ -293,7 +275,7 @@ async def test_get_ancestors(data_store: DataStore, store_id: bytes32) -> None:
|
|
|
293
275
|
"c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2",
|
|
294
276
|
]
|
|
295
277
|
|
|
296
|
-
ancestors_2 = await data_store.
|
|
278
|
+
ancestors_2 = await data_store.get_ancestors(node_hash=reference_node_hash, store_id=store_id)
|
|
297
279
|
assert ancestors == ancestors_2
|
|
298
280
|
|
|
299
281
|
|
|
@@ -306,6 +288,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
|
|
|
306
288
|
first_insertions = [True, False, True, False, True, True, False, True, False, True, True, False, False, True, False]
|
|
307
289
|
deleted_all = False
|
|
308
290
|
node_count = 0
|
|
291
|
+
node_hashes: list[bytes32] = []
|
|
292
|
+
hash_to_key: dict[bytes32, bytes] = {}
|
|
293
|
+
node_hash: Optional[bytes32]
|
|
294
|
+
|
|
309
295
|
for i in range(1000):
|
|
310
296
|
is_insert = False
|
|
311
297
|
if i <= 14:
|
|
@@ -318,12 +304,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
|
|
|
318
304
|
if not deleted_all:
|
|
319
305
|
while node_count > 0:
|
|
320
306
|
node_count -= 1
|
|
321
|
-
|
|
322
|
-
node_hash = await data_store.get_terminal_node_for_seed(store_id, seed)
|
|
307
|
+
node_hash = random.choice(node_hashes)
|
|
323
308
|
assert node_hash is not None
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED)
|
|
309
|
+
await data_store.delete(key=hash_to_key[node_hash], store_id=store_id, status=Status.COMMITTED)
|
|
310
|
+
node_hashes.remove(node_hash)
|
|
327
311
|
deleted_all = True
|
|
328
312
|
is_insert = True
|
|
329
313
|
else:
|
|
@@ -335,10 +319,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
|
|
|
335
319
|
key = (i % 200).to_bytes(4, byteorder="big")
|
|
336
320
|
value = (i % 200).to_bytes(4, byteorder="big")
|
|
337
321
|
seed = Program.to((key, value)).get_tree_hash()
|
|
338
|
-
node_hash =
|
|
322
|
+
node_hash = None if len(node_hashes) == 0 else random.choice(node_hashes)
|
|
339
323
|
if is_insert:
|
|
340
324
|
node_count += 1
|
|
341
|
-
side = None if node_hash is None else
|
|
325
|
+
side = None if node_hash is None else (Side.LEFT if seed[0] < 128 else Side.RIGHT)
|
|
342
326
|
|
|
343
327
|
insert_result = await data_store.insert(
|
|
344
328
|
key=key,
|
|
@@ -346,10 +330,11 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
|
|
|
346
330
|
store_id=store_id,
|
|
347
331
|
reference_node_hash=node_hash,
|
|
348
332
|
side=side,
|
|
349
|
-
use_optimized=False,
|
|
350
333
|
status=Status.COMMITTED,
|
|
351
334
|
)
|
|
352
335
|
node_hash = insert_result.node_hash
|
|
336
|
+
hash_to_key[node_hash] = key
|
|
337
|
+
node_hashes.append(node_hash)
|
|
353
338
|
if node_hash is not None:
|
|
354
339
|
generation = await data_store.get_tree_generation(store_id=store_id)
|
|
355
340
|
current_ancestors = await data_store.get_ancestors(node_hash=node_hash, store_id=store_id)
|
|
@@ -357,39 +342,38 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
|
|
|
357
342
|
else:
|
|
358
343
|
node_count -= 1
|
|
359
344
|
assert node_hash is not None
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
await data_store.delete(key=node.key, store_id=store_id, use_optimized=False, status=Status.COMMITTED)
|
|
345
|
+
node_hashes.remove(node_hash)
|
|
346
|
+
await data_store.delete(key=hash_to_key[node_hash], store_id=store_id, status=Status.COMMITTED)
|
|
363
347
|
|
|
364
348
|
for generation, node_hash, expected_ancestors in ancestors:
|
|
365
|
-
current_ancestors = await data_store.
|
|
349
|
+
current_ancestors = await data_store.get_ancestors(
|
|
366
350
|
node_hash=node_hash, store_id=store_id, generation=generation
|
|
367
351
|
)
|
|
368
352
|
assert current_ancestors == expected_ancestors
|
|
369
353
|
|
|
370
354
|
|
|
371
355
|
@pytest.mark.anyio
|
|
372
|
-
@pytest.mark.parametrize(
|
|
373
|
-
"use_optimized",
|
|
374
|
-
[True, False],
|
|
375
|
-
)
|
|
376
356
|
@pytest.mark.parametrize(
|
|
377
357
|
"num_batches",
|
|
378
358
|
[1, 5, 10, 25],
|
|
379
359
|
)
|
|
380
|
-
async def
|
|
360
|
+
async def test_batch_update_against_single_operations(
|
|
381
361
|
data_store: DataStore,
|
|
382
362
|
store_id: bytes32,
|
|
383
|
-
use_optimized: bool,
|
|
384
363
|
tmp_path: Path,
|
|
385
364
|
num_batches: int,
|
|
386
365
|
) -> None:
|
|
387
|
-
total_operations = 1000
|
|
366
|
+
total_operations = 1000
|
|
388
367
|
num_ops_per_batch = total_operations // num_batches
|
|
389
368
|
saved_batches: list[list[dict[str, Any]]] = []
|
|
390
369
|
saved_kv: list[list[TerminalNode]] = []
|
|
391
370
|
db_uri = generate_in_memory_db_uri()
|
|
392
|
-
async with DataStore.managed(
|
|
371
|
+
async with DataStore.managed(
|
|
372
|
+
database=db_uri,
|
|
373
|
+
uri=True,
|
|
374
|
+
merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
|
|
375
|
+
key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
|
|
376
|
+
) as single_op_data_store:
|
|
393
377
|
await single_op_data_store.create_tree(store_id, status=Status.COMMITTED)
|
|
394
378
|
random = Random()
|
|
395
379
|
random.seed(100, version=2)
|
|
@@ -412,7 +396,6 @@ async def test_batch_update(
|
|
|
412
396
|
key=key,
|
|
413
397
|
value=value,
|
|
414
398
|
store_id=store_id,
|
|
415
|
-
use_optimized=use_optimized,
|
|
416
399
|
status=Status.COMMITTED,
|
|
417
400
|
)
|
|
418
401
|
else:
|
|
@@ -420,7 +403,6 @@ async def test_batch_update(
|
|
|
420
403
|
key=key,
|
|
421
404
|
new_value=value,
|
|
422
405
|
store_id=store_id,
|
|
423
|
-
use_optimized=use_optimized,
|
|
424
406
|
status=Status.COMMITTED,
|
|
425
407
|
)
|
|
426
408
|
action = "insert" if op_type == "insert" else "upsert"
|
|
@@ -432,7 +414,6 @@ async def test_batch_update(
|
|
|
432
414
|
await single_op_data_store.delete(
|
|
433
415
|
key=key,
|
|
434
416
|
store_id=store_id,
|
|
435
|
-
use_optimized=use_optimized,
|
|
436
417
|
status=Status.COMMITTED,
|
|
437
418
|
)
|
|
438
419
|
batch.append({"action": "delete", "key": key})
|
|
@@ -446,7 +427,6 @@ async def test_batch_update(
|
|
|
446
427
|
key=key,
|
|
447
428
|
new_value=new_value,
|
|
448
429
|
store_id=store_id,
|
|
449
|
-
use_optimized=use_optimized,
|
|
450
430
|
status=Status.COMMITTED,
|
|
451
431
|
)
|
|
452
432
|
keys_values[key] = new_value
|
|
@@ -469,38 +449,13 @@ async def test_batch_update(
|
|
|
469
449
|
assert {node.key: node.value for node in current_kv} == {
|
|
470
450
|
node.key: node.value for node in saved_kv[batch_number]
|
|
471
451
|
}
|
|
472
|
-
queue: list[bytes32] = [root.node_hash]
|
|
473
|
-
ancestors: dict[bytes32, bytes32] = {}
|
|
474
|
-
while len(queue) > 0:
|
|
475
|
-
node_hash = queue.pop(0)
|
|
476
|
-
expected_ancestors = []
|
|
477
|
-
ancestor = node_hash
|
|
478
|
-
while ancestor in ancestors:
|
|
479
|
-
ancestor = ancestors[ancestor]
|
|
480
|
-
expected_ancestors.append(ancestor)
|
|
481
|
-
result_ancestors = await data_store.get_ancestors_optimized(node_hash, store_id)
|
|
482
|
-
assert [node.hash for node in result_ancestors] == expected_ancestors
|
|
483
|
-
node = await data_store.get_node(node_hash)
|
|
484
|
-
if isinstance(node, InternalNode):
|
|
485
|
-
queue.append(node.left_hash)
|
|
486
|
-
queue.append(node.right_hash)
|
|
487
|
-
ancestors[node.left_hash] = node_hash
|
|
488
|
-
ancestors[node.right_hash] = node_hash
|
|
489
452
|
|
|
490
453
|
all_kv = await data_store.get_keys_values(store_id)
|
|
491
454
|
assert {node.key: node.value for node in all_kv} == keys_values
|
|
492
455
|
|
|
493
456
|
|
|
494
457
|
@pytest.mark.anyio
|
|
495
|
-
|
|
496
|
-
"use_optimized",
|
|
497
|
-
[True, False],
|
|
498
|
-
)
|
|
499
|
-
async def test_upsert_ignores_existing_arguments(
|
|
500
|
-
data_store: DataStore,
|
|
501
|
-
store_id: bytes32,
|
|
502
|
-
use_optimized: bool,
|
|
503
|
-
) -> None:
|
|
458
|
+
async def test_upsert_ignores_existing_arguments(data_store: DataStore, store_id: bytes32) -> None:
|
|
504
459
|
key = b"key"
|
|
505
460
|
value = b"value1"
|
|
506
461
|
|
|
@@ -508,7 +463,6 @@ async def test_upsert_ignores_existing_arguments(
|
|
|
508
463
|
key=key,
|
|
509
464
|
value=value,
|
|
510
465
|
store_id=store_id,
|
|
511
|
-
use_optimized=use_optimized,
|
|
512
466
|
status=Status.COMMITTED,
|
|
513
467
|
)
|
|
514
468
|
node = await data_store.get_node_by_key(key, store_id)
|
|
@@ -519,7 +473,6 @@ async def test_upsert_ignores_existing_arguments(
|
|
|
519
473
|
key=key,
|
|
520
474
|
new_value=new_value,
|
|
521
475
|
store_id=store_id,
|
|
522
|
-
use_optimized=use_optimized,
|
|
523
476
|
status=Status.COMMITTED,
|
|
524
477
|
)
|
|
525
478
|
node = await data_store.get_node_by_key(key, store_id)
|
|
@@ -529,7 +482,6 @@ async def test_upsert_ignores_existing_arguments(
|
|
|
529
482
|
key=key,
|
|
530
483
|
new_value=new_value,
|
|
531
484
|
store_id=store_id,
|
|
532
|
-
use_optimized=use_optimized,
|
|
533
485
|
status=Status.COMMITTED,
|
|
534
486
|
)
|
|
535
487
|
node = await data_store.get_node_by_key(key, store_id)
|
|
@@ -540,7 +492,6 @@ async def test_upsert_ignores_existing_arguments(
|
|
|
540
492
|
key=key2,
|
|
541
493
|
new_value=value,
|
|
542
494
|
store_id=store_id,
|
|
543
|
-
use_optimized=use_optimized,
|
|
544
495
|
status=Status.COMMITTED,
|
|
545
496
|
)
|
|
546
497
|
node = await data_store.get_node_by_key(key2, store_id)
|
|
@@ -575,30 +526,24 @@ async def test_insert_batch_reference_and_side(
|
|
|
575
526
|
)
|
|
576
527
|
assert new_root_hash is not None, "batch insert failed or failed to update root"
|
|
577
528
|
|
|
578
|
-
|
|
579
|
-
|
|
529
|
+
merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=new_root_hash)
|
|
530
|
+
nodes_with_indexes = merkle_blob.get_nodes_with_indexes()
|
|
531
|
+
nodes = [pair[1] for pair in nodes_with_indexes]
|
|
532
|
+
assert len(nodes) == 3
|
|
533
|
+
assert isinstance(nodes[1], chia_rs.datalayer.LeafNode)
|
|
534
|
+
assert isinstance(nodes[2], chia_rs.datalayer.LeafNode)
|
|
535
|
+
left_terminal_node = await data_store.get_terminal_node(nodes[1].key, nodes[1].value, store_id)
|
|
536
|
+
right_terminal_node = await data_store.get_terminal_node(nodes[2].key, nodes[2].value, store_id)
|
|
580
537
|
if side == Side.LEFT:
|
|
581
|
-
|
|
582
|
-
assert
|
|
538
|
+
assert left_terminal_node.key == b"key2"
|
|
539
|
+
assert right_terminal_node.key == b"key1"
|
|
583
540
|
elif side == Side.RIGHT:
|
|
584
|
-
|
|
585
|
-
assert
|
|
541
|
+
assert left_terminal_node.key == b"key1"
|
|
542
|
+
assert right_terminal_node.key == b"key2"
|
|
586
543
|
else: # pragma: no cover
|
|
587
544
|
raise Exception("invalid side for test")
|
|
588
545
|
|
|
589
546
|
|
|
590
|
-
@pytest.mark.anyio
|
|
591
|
-
async def test_ancestor_table_unique_inserts(data_store: DataStore, store_id: bytes32) -> None:
|
|
592
|
-
await add_0123_example(data_store=data_store, store_id=store_id)
|
|
593
|
-
hash_1 = bytes32.from_hexstr("0763561814685fbf92f6ca71fbb1cb11821951450d996375c239979bd63e9535")
|
|
594
|
-
hash_2 = bytes32.from_hexstr("924be8ff27e84cba17f5bc918097f8410fab9824713a4668a21c8e060a8cab40")
|
|
595
|
-
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
|
|
596
|
-
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
|
|
597
|
-
with pytest.raises(Exception, match="^Requested insertion of ancestor"):
|
|
598
|
-
await data_store._insert_ancestor_table(hash_1, hash_1, store_id, 2)
|
|
599
|
-
await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
|
|
600
|
-
|
|
601
|
-
|
|
602
547
|
@pytest.mark.anyio
|
|
603
548
|
async def test_get_pairs(
|
|
604
549
|
data_store: DataStore,
|
|
@@ -609,7 +554,7 @@ async def test_get_pairs(
|
|
|
609
554
|
|
|
610
555
|
pairs = await data_store.get_keys_values(store_id=store_id)
|
|
611
556
|
|
|
612
|
-
assert
|
|
557
|
+
assert {node.hash for node in pairs} == set(example.terminal_nodes)
|
|
613
558
|
|
|
614
559
|
|
|
615
560
|
@pytest.mark.anyio
|
|
@@ -662,37 +607,6 @@ async def test_inserting_duplicate_key_fails(
|
|
|
662
607
|
)
|
|
663
608
|
|
|
664
609
|
|
|
665
|
-
@pytest.mark.anyio()
|
|
666
|
-
async def test_inserting_invalid_length_hash_raises_original_exception(
|
|
667
|
-
data_store: DataStore,
|
|
668
|
-
) -> None:
|
|
669
|
-
with pytest.raises(aiosqlite.IntegrityError):
|
|
670
|
-
# casting since we are testing an invalid case
|
|
671
|
-
await data_store._insert_node(
|
|
672
|
-
node_hash=cast(bytes32, b"\x05"),
|
|
673
|
-
node_type=NodeType.TERMINAL,
|
|
674
|
-
left_hash=None,
|
|
675
|
-
right_hash=None,
|
|
676
|
-
key=b"\x06",
|
|
677
|
-
value=b"\x07",
|
|
678
|
-
)
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
@pytest.mark.anyio()
|
|
682
|
-
async def test_inserting_invalid_length_ancestor_hash_raises_original_exception(
|
|
683
|
-
data_store: DataStore,
|
|
684
|
-
store_id: bytes32,
|
|
685
|
-
) -> None:
|
|
686
|
-
with pytest.raises(aiosqlite.IntegrityError):
|
|
687
|
-
# casting since we are testing an invalid case
|
|
688
|
-
await data_store._insert_ancestor_table(
|
|
689
|
-
left_hash=bytes32(b"\x01" * 32),
|
|
690
|
-
right_hash=bytes32(b"\x02" * 32),
|
|
691
|
-
store_id=store_id,
|
|
692
|
-
generation=0,
|
|
693
|
-
)
|
|
694
|
-
|
|
695
|
-
|
|
696
610
|
@pytest.mark.anyio()
|
|
697
611
|
async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id: bytes32) -> None:
|
|
698
612
|
random = Random()
|
|
@@ -705,7 +619,7 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id:
|
|
|
705
619
|
insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED)
|
|
706
620
|
hashes.append(insert_result.node_hash)
|
|
707
621
|
|
|
708
|
-
heights = {node_hash: len(await data_store.
|
|
622
|
+
heights = {node_hash: len(await data_store.get_ancestors(node_hash, store_id)) for node_hash in hashes}
|
|
709
623
|
too_tall = {hash: height for hash, height in heights.items() if height > 14}
|
|
710
624
|
assert too_tall == {}
|
|
711
625
|
assert 11 <= statistics.mean(heights.values()) <= 12
|
|
@@ -715,7 +629,7 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id:
|
|
|
715
629
|
async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32) -> None:
|
|
716
630
|
random = Random()
|
|
717
631
|
random.seed(101, version=2)
|
|
718
|
-
hashes = []
|
|
632
|
+
hashes: list[bytes32] = []
|
|
719
633
|
|
|
720
634
|
for i in range(2000):
|
|
721
635
|
key = (i + 100).to_bytes(4, byteorder="big")
|
|
@@ -723,7 +637,7 @@ async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32
|
|
|
723
637
|
if i == 0 or i > 10:
|
|
724
638
|
insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED)
|
|
725
639
|
else:
|
|
726
|
-
reference_node_hash =
|
|
640
|
+
reference_node_hash = hashes[-1]
|
|
727
641
|
insert_result = await data_store.insert(
|
|
728
642
|
key=key,
|
|
729
643
|
value=value,
|
|
@@ -732,11 +646,11 @@ async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32
|
|
|
732
646
|
side=Side.LEFT,
|
|
733
647
|
status=Status.COMMITTED,
|
|
734
648
|
)
|
|
735
|
-
ancestors = await data_store.
|
|
649
|
+
ancestors = await data_store.get_ancestors(insert_result.node_hash, store_id)
|
|
736
650
|
assert len(ancestors) == i
|
|
737
651
|
hashes.append(insert_result.node_hash)
|
|
738
652
|
|
|
739
|
-
heights = {node_hash: len(await data_store.
|
|
653
|
+
heights = {node_hash: len(await data_store.get_ancestors(node_hash, store_id)) for node_hash in hashes}
|
|
740
654
|
too_tall = {hash: height for hash, height in heights.items() if height > 14}
|
|
741
655
|
assert too_tall == {}
|
|
742
656
|
assert 11 <= statistics.mean(heights.values()) <= 12
|
|
@@ -874,24 +788,24 @@ async def test_proof_of_inclusion_by_hash(data_store: DataStore, store_id: bytes
|
|
|
874
788
|
await _debug_dump(db=data_store.db_wrapper)
|
|
875
789
|
|
|
876
790
|
expected_layers = [
|
|
877
|
-
ProofOfInclusionLayer(
|
|
791
|
+
chia_rs.datalayer.ProofOfInclusionLayer(
|
|
878
792
|
other_hash_side=Side.RIGHT,
|
|
879
793
|
other_hash=bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
|
|
880
794
|
combined_hash=bytes32.fromhex("36cb1fc56017944213055da8cb0178fb0938c32df3ec4472f5edf0dff85ba4a3"),
|
|
881
795
|
),
|
|
882
|
-
ProofOfInclusionLayer(
|
|
796
|
+
chia_rs.datalayer.ProofOfInclusionLayer(
|
|
883
797
|
other_hash_side=Side.RIGHT,
|
|
884
798
|
other_hash=bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
|
|
885
799
|
combined_hash=bytes32.fromhex("5f67a0ab1976e090b834bf70e5ce2a0f0a9cd474e19a905348c44ae12274d30b"),
|
|
886
800
|
),
|
|
887
|
-
ProofOfInclusionLayer(
|
|
801
|
+
chia_rs.datalayer.ProofOfInclusionLayer(
|
|
888
802
|
other_hash_side=Side.LEFT,
|
|
889
803
|
other_hash=bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
|
|
890
804
|
combined_hash=bytes32.fromhex("7a5193a4e31a0a72f6623dfeb2876022ab74a48abb5966088a1c6f5451cc5d81"),
|
|
891
805
|
),
|
|
892
806
|
]
|
|
893
807
|
|
|
894
|
-
assert proof == ProofOfInclusion(node_hash=node.hash, layers=expected_layers)
|
|
808
|
+
assert proof == chia_rs.datalayer.ProofOfInclusion(node_hash=node.hash, layers=expected_layers)
|
|
895
809
|
|
|
896
810
|
|
|
897
811
|
@pytest.mark.anyio
|
|
@@ -904,26 +818,7 @@ async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, st
|
|
|
904
818
|
|
|
905
819
|
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
|
|
906
820
|
|
|
907
|
-
assert proof == ProofOfInclusion(node_hash=node.hash, layers=[])
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
@pytest.mark.anyio
|
|
911
|
-
async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, store_id: bytes32) -> None:
|
|
912
|
-
"""The proof of inclusion program has the expected Python equivalence."""
|
|
913
|
-
|
|
914
|
-
await add_01234567_example(data_store=data_store, store_id=store_id)
|
|
915
|
-
node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
|
|
916
|
-
|
|
917
|
-
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
|
|
918
|
-
|
|
919
|
-
assert proof.as_program() == [
|
|
920
|
-
b"\x04",
|
|
921
|
-
[
|
|
922
|
-
bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
|
|
923
|
-
bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
|
|
924
|
-
bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
|
|
925
|
-
],
|
|
926
|
-
]
|
|
821
|
+
assert proof == chia_rs.datalayer.ProofOfInclusion(node_hash=node.hash, layers=[])
|
|
927
822
|
|
|
928
823
|
|
|
929
824
|
@pytest.mark.anyio
|
|
@@ -939,27 +834,6 @@ async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, s
|
|
|
939
834
|
assert proof_by_hash == proof_by_key
|
|
940
835
|
|
|
941
836
|
|
|
942
|
-
@pytest.mark.anyio
|
|
943
|
-
async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, store_id: bytes32) -> None:
|
|
944
|
-
"""The proof of inclusion provided by the data store is able to be converted to a
|
|
945
|
-
program and subsequently to bytes.
|
|
946
|
-
"""
|
|
947
|
-
await add_01234567_example(data_store=data_store, store_id=store_id)
|
|
948
|
-
node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
|
|
949
|
-
|
|
950
|
-
proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
|
|
951
|
-
|
|
952
|
-
expected = (
|
|
953
|
-
b"\xff\x04\xff\xff\xa0\xfbf\xfeS\x9b>\xb2\x02\r\xfb\xfa\xdf\xd6\x01\xfa1\x85!)"
|
|
954
|
-
b"+A\xf0L W\xc1o\xcak\x94|\xa1\xff\xa0m:\xf8\xd9=\xb9H\xe8\xb6\xaaC\x86\x95"
|
|
955
|
-
b"\x8e\x13|k\xe8\xba\xb7&\xdb\x86x\x95\x94\xb3X\x8b5\xad\xcd\xff\xa0\xc8R\xec"
|
|
956
|
-
b"\xd8\xfbaT\x9a\nB\xf9\xeb\x9d\xdee\xe6\xc9J\x01\x93M\xbd\x9c\x1d5\xab\x94"
|
|
957
|
-
b"\xe2\xa0\xaeX\xe2\x80\x80"
|
|
958
|
-
)
|
|
959
|
-
|
|
960
|
-
assert bytes(proof.as_program()) == expected
|
|
961
|
-
|
|
962
|
-
|
|
963
837
|
# @pytest.mark.anyio
|
|
964
838
|
# async def test_create_first_pair(data_store: DataStore, store_id: bytes) -> None:
|
|
965
839
|
# key = SExp.to([1, 2])
|
|
@@ -1036,46 +910,6 @@ async def test_check_roots_are_incrementing_gap(raw_data_store: DataStore) -> No
|
|
|
1036
910
|
await raw_data_store._check_roots_are_incrementing()
|
|
1037
911
|
|
|
1038
912
|
|
|
1039
|
-
@pytest.mark.anyio
|
|
1040
|
-
async def test_check_hashes_internal(raw_data_store: DataStore) -> None:
|
|
1041
|
-
async with raw_data_store.db_wrapper.writer() as writer:
|
|
1042
|
-
await writer.execute(
|
|
1043
|
-
"INSERT INTO node(hash, node_type, left, right) VALUES(:hash, :node_type, :left, :right)",
|
|
1044
|
-
{
|
|
1045
|
-
"hash": a_bytes_32,
|
|
1046
|
-
"node_type": NodeType.INTERNAL,
|
|
1047
|
-
"left": a_bytes_32,
|
|
1048
|
-
"right": a_bytes_32,
|
|
1049
|
-
},
|
|
1050
|
-
)
|
|
1051
|
-
|
|
1052
|
-
with pytest.raises(
|
|
1053
|
-
NodeHashError,
|
|
1054
|
-
match=r"\n +000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f$",
|
|
1055
|
-
):
|
|
1056
|
-
await raw_data_store._check_hashes()
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
@pytest.mark.anyio
|
|
1060
|
-
async def test_check_hashes_terminal(raw_data_store: DataStore) -> None:
|
|
1061
|
-
async with raw_data_store.db_wrapper.writer() as writer:
|
|
1062
|
-
await writer.execute(
|
|
1063
|
-
"INSERT INTO node(hash, node_type, key, value) VALUES(:hash, :node_type, :key, :value)",
|
|
1064
|
-
{
|
|
1065
|
-
"hash": a_bytes_32,
|
|
1066
|
-
"node_type": NodeType.TERMINAL,
|
|
1067
|
-
"key": Program.to((1, 2)).as_bin(),
|
|
1068
|
-
"value": Program.to((1, 2)).as_bin(),
|
|
1069
|
-
},
|
|
1070
|
-
)
|
|
1071
|
-
|
|
1072
|
-
with pytest.raises(
|
|
1073
|
-
NodeHashError,
|
|
1074
|
-
match=r"\n +000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f$",
|
|
1075
|
-
):
|
|
1076
|
-
await raw_data_store._check_hashes()
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
913
|
@pytest.mark.anyio
|
|
1080
914
|
async def test_root_state(data_store: DataStore, store_id: bytes32) -> None:
|
|
1081
915
|
key = b"\x01\x02"
|
|
@@ -1127,28 +961,29 @@ async def test_kv_diff(data_store: DataStore, store_id: bytes32) -> None:
|
|
|
1127
961
|
insertions = 0
|
|
1128
962
|
expected_diff: set[DiffData] = set()
|
|
1129
963
|
root_start = None
|
|
964
|
+
|
|
1130
965
|
for i in range(500):
|
|
1131
966
|
key = (i + 100).to_bytes(4, byteorder="big")
|
|
1132
967
|
value = (i + 200).to_bytes(4, byteorder="big")
|
|
1133
968
|
seed = leaf_hash(key=key, value=value)
|
|
1134
|
-
|
|
969
|
+
node = await data_store.get_terminal_node_for_seed(seed, store_id)
|
|
970
|
+
side_seed = bytes(seed)[0]
|
|
971
|
+
side = None if node is None else (Side.LEFT if side_seed < 128 else Side.RIGHT)
|
|
972
|
+
|
|
1135
973
|
if random.randint(0, 4) > 0 or insertions < 10:
|
|
1136
974
|
insertions += 1
|
|
1137
|
-
|
|
1138
|
-
|
|
975
|
+
reference_node_hash = node.hash if node is not None else None
|
|
1139
976
|
await data_store.insert(
|
|
1140
977
|
key=key,
|
|
1141
978
|
value=value,
|
|
1142
979
|
store_id=store_id,
|
|
1143
|
-
reference_node_hash=node_hash,
|
|
1144
|
-
side=side,
|
|
1145
980
|
status=Status.COMMITTED,
|
|
981
|
+
reference_node_hash=reference_node_hash,
|
|
982
|
+
side=side,
|
|
1146
983
|
)
|
|
1147
984
|
if i > 200:
|
|
1148
985
|
expected_diff.add(DiffData(OperationType.INSERT, key, value))
|
|
1149
986
|
else:
|
|
1150
|
-
assert node_hash is not None
|
|
1151
|
-
node = await data_store.get_node(node_hash)
|
|
1152
987
|
assert isinstance(node, TerminalNode)
|
|
1153
988
|
await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED)
|
|
1154
989
|
if i > 200:
|
|
@@ -1275,6 +1110,39 @@ async def test_subscribe_unsubscribe(data_store: DataStore, store_id: bytes32) -
|
|
|
1275
1110
|
]
|
|
1276
1111
|
|
|
1277
1112
|
|
|
1113
|
+
@pytest.mark.anyio
|
|
1114
|
+
async def test_unsubscribe_clears_databases(data_store: DataStore, store_id: bytes32) -> None:
|
|
1115
|
+
num_inserts = 100
|
|
1116
|
+
await data_store.subscribe(Subscription(store_id, []))
|
|
1117
|
+
for value in range(num_inserts):
|
|
1118
|
+
await data_store.insert(
|
|
1119
|
+
key=value.to_bytes(4, byteorder="big"),
|
|
1120
|
+
value=value.to_bytes(4, byteorder="big"),
|
|
1121
|
+
store_id=store_id,
|
|
1122
|
+
reference_node_hash=None,
|
|
1123
|
+
side=None,
|
|
1124
|
+
status=Status.COMMITTED,
|
|
1125
|
+
)
|
|
1126
|
+
await data_store.add_node_hashes(store_id)
|
|
1127
|
+
|
|
1128
|
+
tables = ["ids", "nodes"]
|
|
1129
|
+
for table in tables:
|
|
1130
|
+
async with data_store.db_wrapper.reader() as reader:
|
|
1131
|
+
async with reader.execute(f"SELECT COUNT(*) FROM {table}") as cursor:
|
|
1132
|
+
row_count = await cursor.fetchone()
|
|
1133
|
+
assert row_count is not None
|
|
1134
|
+
assert row_count[0] > 0
|
|
1135
|
+
|
|
1136
|
+
await data_store.unsubscribe(store_id)
|
|
1137
|
+
|
|
1138
|
+
for table in tables:
|
|
1139
|
+
async with data_store.db_wrapper.reader() as reader:
|
|
1140
|
+
async with reader.execute(f"SELECT COUNT(*) FROM {table}") as cursor:
|
|
1141
|
+
row_count = await cursor.fetchone()
|
|
1142
|
+
assert row_count is not None
|
|
1143
|
+
assert row_count[0] == 0
|
|
1144
|
+
|
|
1145
|
+
|
|
1278
1146
|
@pytest.mark.anyio
|
|
1279
1147
|
async def test_server_selection(data_store: DataStore, store_id: bytes32) -> None:
|
|
1280
1148
|
start_timestamp = 1000
|
|
@@ -1410,16 +1278,71 @@ async def test_server_http_ban(
|
|
|
1410
1278
|
assert sinfo.ignore_till == start_timestamp # we don't increase on second failure
|
|
1411
1279
|
|
|
1412
1280
|
|
|
1413
|
-
|
|
1414
|
-
|
|
1415
|
-
|
|
1416
|
-
|
|
1281
|
+
async def get_first_generation(data_store: DataStore, node_hash: bytes32, store_id: bytes32) -> Optional[int]:
|
|
1282
|
+
async with data_store.db_wrapper.reader() as reader:
|
|
1283
|
+
cursor = await reader.execute(
|
|
1284
|
+
"SELECT generation FROM nodes WHERE hash = ? AND store_id = ?",
|
|
1285
|
+
(
|
|
1286
|
+
node_hash,
|
|
1287
|
+
store_id,
|
|
1288
|
+
),
|
|
1289
|
+
)
|
|
1290
|
+
|
|
1291
|
+
row = await cursor.fetchone()
|
|
1292
|
+
if row is None:
|
|
1293
|
+
return None
|
|
1294
|
+
|
|
1295
|
+
return int(row[0])
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
async def write_tree_to_file_old_format(
|
|
1299
|
+
data_store: DataStore,
|
|
1300
|
+
root: Root,
|
|
1301
|
+
node_hash: bytes32,
|
|
1302
|
+
store_id: bytes32,
|
|
1303
|
+
writer: BinaryIO,
|
|
1304
|
+
merkle_blob: Optional[MerkleBlob] = None,
|
|
1305
|
+
hash_to_index: Optional[dict[bytes32, TreeIndex]] = None,
|
|
1306
|
+
) -> None:
|
|
1307
|
+
if node_hash == bytes32.zeros:
|
|
1308
|
+
return
|
|
1309
|
+
|
|
1310
|
+
if merkle_blob is None:
|
|
1311
|
+
merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
|
|
1312
|
+
if hash_to_index is None:
|
|
1313
|
+
hash_to_index = merkle_blob.get_hashes_indexes()
|
|
1314
|
+
|
|
1315
|
+
generation = await get_first_generation(data_store, node_hash, store_id)
|
|
1316
|
+
# Root's generation is not the first time we see this hash, so it's not a new delta.
|
|
1317
|
+
if root.generation != generation:
|
|
1318
|
+
return
|
|
1319
|
+
|
|
1320
|
+
raw_index = hash_to_index[node_hash]
|
|
1321
|
+
raw_node = merkle_blob.get_raw_node(raw_index)
|
|
1322
|
+
|
|
1323
|
+
if isinstance(raw_node, chia_rs.datalayer.InternalNode):
|
|
1324
|
+
left_hash = merkle_blob.get_hash_at_index(raw_node.left)
|
|
1325
|
+
right_hash = merkle_blob.get_hash_at_index(raw_node.right)
|
|
1326
|
+
await write_tree_to_file_old_format(data_store, root, left_hash, store_id, writer, merkle_blob, hash_to_index)
|
|
1327
|
+
await write_tree_to_file_old_format(data_store, root, right_hash, store_id, writer, merkle_blob, hash_to_index)
|
|
1328
|
+
to_write = bytes(SerializedNode(False, bytes(left_hash), bytes(right_hash)))
|
|
1329
|
+
elif isinstance(raw_node, chia_rs.datalayer.LeafNode):
|
|
1330
|
+
node = await data_store.get_terminal_node(raw_node.key, raw_node.value, store_id)
|
|
1331
|
+
to_write = bytes(SerializedNode(True, node.key, node.value))
|
|
1332
|
+
else:
|
|
1333
|
+
raise Exception(f"Node is neither InternalNode nor TerminalNode: {raw_node}")
|
|
1334
|
+
|
|
1335
|
+
writer.write(len(to_write).to_bytes(4, byteorder="big"))
|
|
1336
|
+
writer.write(to_write)
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
@pytest.mark.parametrize(argnames="test_delta", argvalues=["full", "delta", "old"])
|
|
1417
1340
|
@boolean_datacases(name="group_files_by_store", false="group by singleton", true="don't group by singleton")
|
|
1418
1341
|
@pytest.mark.anyio
|
|
1419
1342
|
async def test_data_server_files(
|
|
1420
1343
|
data_store: DataStore,
|
|
1421
1344
|
store_id: bytes32,
|
|
1422
|
-
test_delta:
|
|
1345
|
+
test_delta: str,
|
|
1423
1346
|
group_files_by_store: bool,
|
|
1424
1347
|
tmp_path: Path,
|
|
1425
1348
|
) -> None:
|
|
@@ -1428,45 +1351,70 @@ async def test_data_server_files(
|
|
|
1428
1351
|
num_ops_per_batch = 100
|
|
1429
1352
|
|
|
1430
1353
|
db_uri = generate_in_memory_db_uri()
|
|
1431
|
-
async with DataStore.managed(
|
|
1354
|
+
async with DataStore.managed(
|
|
1355
|
+
database=db_uri,
|
|
1356
|
+
uri=True,
|
|
1357
|
+
merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
|
|
1358
|
+
key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
|
|
1359
|
+
) as data_store_server:
|
|
1432
1360
|
await data_store_server.create_tree(store_id, status=Status.COMMITTED)
|
|
1433
1361
|
random = Random()
|
|
1434
1362
|
random.seed(100, version=2)
|
|
1435
1363
|
|
|
1436
1364
|
keys: list[bytes] = []
|
|
1437
1365
|
counter = 0
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
keys
|
|
1446
|
-
|
|
1366
|
+
num_repeats = 2
|
|
1367
|
+
|
|
1368
|
+
# Repeat twice to guarantee there will be hashes from the old file format
|
|
1369
|
+
for _ in range(num_repeats):
|
|
1370
|
+
for batch in range(num_batches):
|
|
1371
|
+
changelist: list[dict[str, Any]] = []
|
|
1372
|
+
if batch == num_batches - 1:
|
|
1373
|
+
for key in keys:
|
|
1374
|
+
changelist.append({"action": "delete", "key": key})
|
|
1375
|
+
keys = []
|
|
1376
|
+
counter = 0
|
|
1447
1377
|
else:
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1378
|
+
for operation in range(num_ops_per_batch):
|
|
1379
|
+
if random.randint(0, 4) > 0 or len(keys) == 0:
|
|
1380
|
+
key = counter.to_bytes(4, byteorder="big")
|
|
1381
|
+
value = (2 * counter).to_bytes(4, byteorder="big")
|
|
1382
|
+
keys.append(key)
|
|
1383
|
+
changelist.append({"action": "insert", "key": key, "value": value})
|
|
1384
|
+
else:
|
|
1385
|
+
key = random.choice(keys)
|
|
1386
|
+
keys.remove(key)
|
|
1387
|
+
changelist.append({"action": "delete", "key": key})
|
|
1388
|
+
counter += 1
|
|
1389
|
+
|
|
1390
|
+
await data_store_server.insert_batch(store_id, changelist, status=Status.COMMITTED)
|
|
1391
|
+
root = await data_store_server.get_tree_root(store_id)
|
|
1392
|
+
await data_store_server.add_node_hashes(store_id)
|
|
1393
|
+
if test_delta == "old":
|
|
1394
|
+
node_hash = root.node_hash if root.node_hash is not None else bytes32.zeros
|
|
1395
|
+
filename = get_delta_filename_path(
|
|
1396
|
+
tmp_path, store_id, node_hash, root.generation, group_files_by_store
|
|
1397
|
+
)
|
|
1398
|
+
filename.parent.mkdir(parents=True, exist_ok=True)
|
|
1399
|
+
with open(filename, "xb") as writer:
|
|
1400
|
+
await write_tree_to_file_old_format(data_store_server, root, node_hash, store_id, writer)
|
|
1401
|
+
else:
|
|
1402
|
+
await write_files_for_root(
|
|
1403
|
+
data_store_server, store_id, root, tmp_path, 0, group_by_store=group_files_by_store
|
|
1404
|
+
)
|
|
1405
|
+
roots.append(root)
|
|
1458
1406
|
|
|
1459
1407
|
generation = 1
|
|
1460
|
-
assert len(roots) == num_batches
|
|
1408
|
+
assert len(roots) == num_batches * num_repeats
|
|
1461
1409
|
for root in roots:
|
|
1462
|
-
|
|
1463
|
-
if
|
|
1464
|
-
filename = get_full_tree_filename_path(tmp_path, store_id,
|
|
1410
|
+
node_hash = root.node_hash if root.node_hash is not None else bytes32.zeros
|
|
1411
|
+
if test_delta == "full":
|
|
1412
|
+
filename = get_full_tree_filename_path(tmp_path, store_id, node_hash, generation, group_files_by_store)
|
|
1465
1413
|
assert filename.exists()
|
|
1466
1414
|
else:
|
|
1467
|
-
filename = get_delta_filename_path(tmp_path, store_id,
|
|
1415
|
+
filename = get_delta_filename_path(tmp_path, store_id, node_hash, generation, group_files_by_store)
|
|
1468
1416
|
assert filename.exists()
|
|
1469
|
-
await insert_into_data_store_from_file(
|
|
1417
|
+
await data_store.insert_into_data_store_from_file(store_id, root.node_hash, tmp_path.joinpath(filename))
|
|
1470
1418
|
current_root = await data_store.get_tree_root(store_id=store_id)
|
|
1471
1419
|
assert current_root.node_hash == root.node_hash
|
|
1472
1420
|
generation += 1
|
|
@@ -1551,6 +1499,20 @@ class BatchesInsertBenchmarkCase:
|
|
|
1551
1499
|
return f"count={self.count},batch_count={self.batch_count}"
|
|
1552
1500
|
|
|
1553
1501
|
|
|
1502
|
+
@dataclass
|
|
1503
|
+
class BatchUpdateBenchmarkCase:
|
|
1504
|
+
pre: int
|
|
1505
|
+
num_inserts: int
|
|
1506
|
+
num_deletes: int
|
|
1507
|
+
num_upserts: int
|
|
1508
|
+
limit: float
|
|
1509
|
+
marks: Marks = ()
|
|
1510
|
+
|
|
1511
|
+
@property
|
|
1512
|
+
def id(self) -> str:
|
|
1513
|
+
return f"pre={self.pre},inserts={self.num_inserts},deletes={self.num_deletes},upserts={self.num_upserts}"
|
|
1514
|
+
|
|
1515
|
+
|
|
1554
1516
|
@datacases(
|
|
1555
1517
|
BatchInsertBenchmarkCase(
|
|
1556
1518
|
pre=0,
|
|
@@ -1589,16 +1551,55 @@ async def test_benchmark_batch_insert_speed(
|
|
|
1589
1551
|
r.seed("shadowlands", version=2)
|
|
1590
1552
|
|
|
1591
1553
|
changelist = [
|
|
1554
|
+
{"action": "insert", "key": x.to_bytes(32, byteorder="big", signed=False), "value": r.randbytes(1200)}
|
|
1555
|
+
for x in range(case.pre + case.count)
|
|
1556
|
+
]
|
|
1557
|
+
|
|
1558
|
+
pre = changelist[: case.pre]
|
|
1559
|
+
batch = changelist[case.pre : case.pre + case.count]
|
|
1560
|
+
|
|
1561
|
+
if case.pre > 0:
|
|
1562
|
+
await data_store.insert_batch(
|
|
1563
|
+
store_id=store_id,
|
|
1564
|
+
changelist=pre,
|
|
1565
|
+
status=Status.COMMITTED,
|
|
1566
|
+
)
|
|
1567
|
+
|
|
1568
|
+
with benchmark_runner.assert_runtime(seconds=case.limit):
|
|
1569
|
+
await data_store.insert_batch(
|
|
1570
|
+
store_id=store_id,
|
|
1571
|
+
changelist=batch,
|
|
1572
|
+
)
|
|
1573
|
+
|
|
1574
|
+
|
|
1575
|
+
@datacases(
|
|
1576
|
+
BatchUpdateBenchmarkCase(
|
|
1577
|
+
pre=1_000,
|
|
1578
|
+
num_inserts=1_000,
|
|
1579
|
+
num_deletes=500,
|
|
1580
|
+
num_upserts=500,
|
|
1581
|
+
limit=36,
|
|
1582
|
+
),
|
|
1583
|
+
)
|
|
1584
|
+
@pytest.mark.anyio
|
|
1585
|
+
async def test_benchmark_batch_update_speed(
|
|
1586
|
+
data_store: DataStore,
|
|
1587
|
+
store_id: bytes32,
|
|
1588
|
+
benchmark_runner: BenchmarkRunner,
|
|
1589
|
+
case: BatchUpdateBenchmarkCase,
|
|
1590
|
+
) -> None:
|
|
1591
|
+
r = random.Random()
|
|
1592
|
+
r.seed("shadowlands", version=2)
|
|
1593
|
+
|
|
1594
|
+
pre = [
|
|
1592
1595
|
{
|
|
1593
1596
|
"action": "insert",
|
|
1594
1597
|
"key": x.to_bytes(32, byteorder="big", signed=False),
|
|
1595
1598
|
"value": bytes(r.getrandbits(8) for _ in range(1200)),
|
|
1596
1599
|
}
|
|
1597
|
-
for x in range(case.pre
|
|
1600
|
+
for x in range(case.pre)
|
|
1598
1601
|
]
|
|
1599
|
-
|
|
1600
|
-
pre = changelist[: case.pre]
|
|
1601
|
-
batch = changelist[case.pre : case.pre + case.count]
|
|
1602
|
+
batch = []
|
|
1602
1603
|
|
|
1603
1604
|
if case.pre > 0:
|
|
1604
1605
|
await data_store.insert_batch(
|
|
@@ -1607,6 +1608,44 @@ async def test_benchmark_batch_insert_speed(
|
|
|
1607
1608
|
status=Status.COMMITTED,
|
|
1608
1609
|
)
|
|
1609
1610
|
|
|
1611
|
+
keys = [x.to_bytes(32, byteorder="big", signed=False) for x in range(case.pre)]
|
|
1612
|
+
for operation in range(case.num_inserts):
|
|
1613
|
+
key = (operation + case.pre).to_bytes(32, byteorder="big", signed=False)
|
|
1614
|
+
batch.append(
|
|
1615
|
+
{
|
|
1616
|
+
"action": "insert",
|
|
1617
|
+
"key": key,
|
|
1618
|
+
"value": bytes(r.getrandbits(8) for _ in range(1200)),
|
|
1619
|
+
}
|
|
1620
|
+
)
|
|
1621
|
+
keys.append(key)
|
|
1622
|
+
|
|
1623
|
+
if case.num_deletes > 0:
|
|
1624
|
+
r.shuffle(keys)
|
|
1625
|
+
assert len(keys) >= case.num_deletes
|
|
1626
|
+
batch.extend(
|
|
1627
|
+
{
|
|
1628
|
+
"action": "delete",
|
|
1629
|
+
"key": key,
|
|
1630
|
+
}
|
|
1631
|
+
for key in keys[: case.num_deletes]
|
|
1632
|
+
)
|
|
1633
|
+
keys = keys[case.num_deletes :]
|
|
1634
|
+
|
|
1635
|
+
if case.num_upserts > 0:
|
|
1636
|
+
assert len(keys) > 0
|
|
1637
|
+
r.shuffle(keys)
|
|
1638
|
+
batch.extend(
|
|
1639
|
+
[
|
|
1640
|
+
{
|
|
1641
|
+
"action": "upsert",
|
|
1642
|
+
"key": keys[operation % len(keys)],
|
|
1643
|
+
"value": bytes(r.getrandbits(8) for _ in range(1200)),
|
|
1644
|
+
}
|
|
1645
|
+
for operation in range(case.num_upserts)
|
|
1646
|
+
]
|
|
1647
|
+
)
|
|
1648
|
+
|
|
1610
1649
|
with benchmark_runner.assert_runtime(seconds=case.limit):
|
|
1611
1650
|
await data_store.insert_batch(
|
|
1612
1651
|
store_id=store_id,
|
|
@@ -1614,6 +1653,22 @@ async def test_benchmark_batch_insert_speed(
|
|
|
1614
1653
|
)
|
|
1615
1654
|
|
|
1616
1655
|
|
|
1656
|
+
@datacases(
|
|
1657
|
+
BatchInsertBenchmarkCase(
|
|
1658
|
+
pre=0,
|
|
1659
|
+
count=50,
|
|
1660
|
+
limit=2,
|
|
1661
|
+
),
|
|
1662
|
+
)
|
|
1663
|
+
@pytest.mark.anyio
|
|
1664
|
+
async def test_benchmark_tool(
|
|
1665
|
+
benchmark_runner: BenchmarkRunner,
|
|
1666
|
+
case: BatchInsertBenchmarkCase,
|
|
1667
|
+
) -> None:
|
|
1668
|
+
with benchmark_runner.assert_runtime(seconds=case.limit):
|
|
1669
|
+
await generate_datastore(case.count)
|
|
1670
|
+
|
|
1671
|
+
|
|
1617
1672
|
@datacases(
|
|
1618
1673
|
BatchesInsertBenchmarkCase(
|
|
1619
1674
|
count=50,
|
|
@@ -1637,7 +1692,7 @@ async def test_benchmark_batch_insert_speed_multiple_batches(
|
|
|
1637
1692
|
{
|
|
1638
1693
|
"action": "insert",
|
|
1639
1694
|
"key": x.to_bytes(32, byteorder="big", signed=False),
|
|
1640
|
-
"value":
|
|
1695
|
+
"value": r.randbytes(10000),
|
|
1641
1696
|
}
|
|
1642
1697
|
for x in range(batch * case.count, (batch + 1) * case.count)
|
|
1643
1698
|
]
|
|
@@ -1648,189 +1703,6 @@ async def test_benchmark_batch_insert_speed_multiple_batches(
|
|
|
1648
1703
|
)
|
|
1649
1704
|
|
|
1650
1705
|
|
|
1651
|
-
@pytest.mark.anyio
|
|
1652
|
-
async def test_delete_store_data(raw_data_store: DataStore) -> None:
|
|
1653
|
-
store_id = bytes32.zeros
|
|
1654
|
-
store_id_2 = bytes32(b"\0" * 31 + b"\1")
|
|
1655
|
-
await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
|
|
1656
|
-
await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED)
|
|
1657
|
-
total_keys = 4
|
|
1658
|
-
keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
|
|
1659
|
-
batch1 = [
|
|
1660
|
-
{"action": "insert", "key": keys[0], "value": keys[0]},
|
|
1661
|
-
{"action": "insert", "key": keys[1], "value": keys[1]},
|
|
1662
|
-
]
|
|
1663
|
-
batch2 = batch1.copy()
|
|
1664
|
-
batch1.append({"action": "insert", "key": keys[2], "value": keys[2]})
|
|
1665
|
-
batch2.append({"action": "insert", "key": keys[3], "value": keys[3]})
|
|
1666
|
-
assert batch1 != batch2
|
|
1667
|
-
await raw_data_store.insert_batch(store_id, batch1, status=Status.COMMITTED)
|
|
1668
|
-
await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED)
|
|
1669
|
-
keys_values_before = await raw_data_store.get_keys_values(store_id_2)
|
|
1670
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1671
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1672
|
-
nodes = await result.fetchall()
|
|
1673
|
-
kv_nodes_before = {}
|
|
1674
|
-
for node in nodes:
|
|
1675
|
-
if node["key"] is not None:
|
|
1676
|
-
kv_nodes_before[node["key"]] = node["value"]
|
|
1677
|
-
assert [kv_nodes_before[key] for key in keys] == keys
|
|
1678
|
-
await raw_data_store.delete_store_data(store_id)
|
|
1679
|
-
# Deleting from `node` table doesn't alter other stores.
|
|
1680
|
-
keys_values_after = await raw_data_store.get_keys_values(store_id_2)
|
|
1681
|
-
assert keys_values_before == keys_values_after
|
|
1682
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1683
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1684
|
-
nodes = await result.fetchall()
|
|
1685
|
-
kv_nodes_after = {}
|
|
1686
|
-
for node in nodes:
|
|
1687
|
-
if node["key"] is not None:
|
|
1688
|
-
kv_nodes_after[node["key"]] = node["value"]
|
|
1689
|
-
for i in range(total_keys):
|
|
1690
|
-
if i != 2:
|
|
1691
|
-
assert kv_nodes_after[keys[i]] == keys[i]
|
|
1692
|
-
else:
|
|
1693
|
-
# `keys[2]` was only present in the first store.
|
|
1694
|
-
assert keys[i] not in kv_nodes_after
|
|
1695
|
-
assert not await raw_data_store.store_id_exists(store_id)
|
|
1696
|
-
await raw_data_store.delete_store_data(store_id_2)
|
|
1697
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1698
|
-
async with reader.execute("SELECT COUNT(*) FROM node") as cursor:
|
|
1699
|
-
row_count = await cursor.fetchone()
|
|
1700
|
-
assert row_count is not None
|
|
1701
|
-
assert row_count[0] == 0
|
|
1702
|
-
assert not await raw_data_store.store_id_exists(store_id_2)
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
@pytest.mark.anyio
|
|
1706
|
-
async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> None:
|
|
1707
|
-
# Make sure inserting and deleting the same data works
|
|
1708
|
-
for repetition in range(2):
|
|
1709
|
-
num_stores = 50
|
|
1710
|
-
total_keys = 150
|
|
1711
|
-
keys_deleted_per_store = 3
|
|
1712
|
-
store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)]
|
|
1713
|
-
for store_id in store_ids:
|
|
1714
|
-
await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
|
|
1715
|
-
original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
|
|
1716
|
-
batches = []
|
|
1717
|
-
for i in range(num_stores):
|
|
1718
|
-
batch = [
|
|
1719
|
-
{"action": "insert", "key": key, "value": key} for key in original_keys[i * keys_deleted_per_store :]
|
|
1720
|
-
]
|
|
1721
|
-
batches.append(batch)
|
|
1722
|
-
|
|
1723
|
-
for store_id, batch in zip(store_ids, batches):
|
|
1724
|
-
await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
|
|
1725
|
-
|
|
1726
|
-
for tree_index in range(num_stores):
|
|
1727
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1728
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1729
|
-
nodes = await result.fetchall()
|
|
1730
|
-
|
|
1731
|
-
keys = {node["key"] for node in nodes if node["key"] is not None}
|
|
1732
|
-
assert len(keys) == total_keys - tree_index * keys_deleted_per_store
|
|
1733
|
-
keys_after_index = set(original_keys[tree_index * keys_deleted_per_store :])
|
|
1734
|
-
keys_before_index = set(original_keys[: tree_index * keys_deleted_per_store])
|
|
1735
|
-
assert keys_after_index.issubset(keys)
|
|
1736
|
-
assert keys.isdisjoint(keys_before_index)
|
|
1737
|
-
await raw_data_store.delete_store_data(store_ids[tree_index])
|
|
1738
|
-
|
|
1739
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1740
|
-
async with reader.execute("SELECT COUNT(*) FROM node") as cursor:
|
|
1741
|
-
row_count = await cursor.fetchone()
|
|
1742
|
-
assert row_count is not None
|
|
1743
|
-
assert row_count[0] == 0
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
@pytest.mark.parametrize("common_keys_count", [1, 250, 499])
|
|
1747
|
-
@pytest.mark.anyio
|
|
1748
|
-
async def test_delete_store_data_with_common_values(raw_data_store: DataStore, common_keys_count: int) -> None:
|
|
1749
|
-
store_id_1 = bytes32(b"\x00" * 31 + b"\x01")
|
|
1750
|
-
store_id_2 = bytes32(b"\x00" * 31 + b"\x02")
|
|
1751
|
-
|
|
1752
|
-
await raw_data_store.create_tree(store_id=store_id_1, status=Status.COMMITTED)
|
|
1753
|
-
await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED)
|
|
1754
|
-
|
|
1755
|
-
key_offset = 1000
|
|
1756
|
-
total_keys_per_store = 500
|
|
1757
|
-
assert common_keys_count < key_offset
|
|
1758
|
-
common_keys = {key.to_bytes(4, byteorder="big") for key in range(common_keys_count)}
|
|
1759
|
-
unique_keys_1 = {
|
|
1760
|
-
(key + key_offset).to_bytes(4, byteorder="big") for key in range(total_keys_per_store - common_keys_count)
|
|
1761
|
-
}
|
|
1762
|
-
unique_keys_2 = {
|
|
1763
|
-
(key + (2 * key_offset)).to_bytes(4, byteorder="big") for key in range(total_keys_per_store - common_keys_count)
|
|
1764
|
-
}
|
|
1765
|
-
|
|
1766
|
-
batch1 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_1)]
|
|
1767
|
-
batch2 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_2)]
|
|
1768
|
-
|
|
1769
|
-
await raw_data_store.insert_batch(store_id_1, batch1, status=Status.COMMITTED)
|
|
1770
|
-
await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED)
|
|
1771
|
-
|
|
1772
|
-
await raw_data_store.delete_store_data(store_id_1)
|
|
1773
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1774
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1775
|
-
nodes = await result.fetchall()
|
|
1776
|
-
|
|
1777
|
-
keys = {node["key"] for node in nodes if node["key"] is not None}
|
|
1778
|
-
# Since one store got all its keys deleted, we're left only with the keys of the other store.
|
|
1779
|
-
assert len(keys) == total_keys_per_store
|
|
1780
|
-
assert keys.intersection(unique_keys_1) == set()
|
|
1781
|
-
assert keys.symmetric_difference(common_keys.union(unique_keys_2)) == set()
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
@pytest.mark.anyio
|
|
1785
|
-
@pytest.mark.parametrize("pending_status", [Status.PENDING, Status.PENDING_BATCH])
|
|
1786
|
-
async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStore, pending_status: Status) -> None:
|
|
1787
|
-
num_stores = 5
|
|
1788
|
-
total_keys = 15
|
|
1789
|
-
store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)]
|
|
1790
|
-
for store_id in store_ids:
|
|
1791
|
-
await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
|
|
1792
|
-
original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
|
|
1793
|
-
batches = []
|
|
1794
|
-
keys_per_pending_root = 2
|
|
1795
|
-
|
|
1796
|
-
for i in range(num_stores - 1):
|
|
1797
|
-
start_index = i * keys_per_pending_root
|
|
1798
|
-
end_index = (i + 1) * keys_per_pending_root
|
|
1799
|
-
batch = [{"action": "insert", "key": key, "value": key} for key in original_keys[start_index:end_index]]
|
|
1800
|
-
batches.append(batch)
|
|
1801
|
-
for store_id, batch in zip(store_ids, batches):
|
|
1802
|
-
await raw_data_store.insert_batch(store_id, batch, status=pending_status)
|
|
1803
|
-
|
|
1804
|
-
store_id = store_ids[-1]
|
|
1805
|
-
batch = [{"action": "insert", "key": key, "value": key} for key in original_keys]
|
|
1806
|
-
await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
|
|
1807
|
-
|
|
1808
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1809
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1810
|
-
nodes = await result.fetchall()
|
|
1811
|
-
|
|
1812
|
-
keys = {node["key"] for node in nodes if node["key"] is not None}
|
|
1813
|
-
assert keys == set(original_keys)
|
|
1814
|
-
|
|
1815
|
-
await raw_data_store.delete_store_data(store_id)
|
|
1816
|
-
async with raw_data_store.db_wrapper.reader() as reader:
|
|
1817
|
-
result = await reader.execute("SELECT * FROM node")
|
|
1818
|
-
nodes = await result.fetchall()
|
|
1819
|
-
|
|
1820
|
-
keys = {node["key"] for node in nodes if node["key"] is not None}
|
|
1821
|
-
assert keys == set(original_keys[: (num_stores - 1) * keys_per_pending_root])
|
|
1822
|
-
|
|
1823
|
-
for index in range(num_stores - 1):
|
|
1824
|
-
store_id = store_ids[index]
|
|
1825
|
-
root = await raw_data_store.get_pending_root(store_id)
|
|
1826
|
-
assert root is not None
|
|
1827
|
-
await raw_data_store.change_root_status(root, Status.COMMITTED)
|
|
1828
|
-
kv = await raw_data_store.get_keys_values(store_id=store_id)
|
|
1829
|
-
start_index = index * keys_per_pending_root
|
|
1830
|
-
end_index = (index + 1) * keys_per_pending_root
|
|
1831
|
-
assert {pair.key for pair in kv} == set(original_keys[start_index:end_index])
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
1706
|
@pytest.mark.anyio
|
|
1835
1707
|
@boolean_datacases(name="group_files_by_store", true="group by singleton", false="don't group by singleton")
|
|
1836
1708
|
@pytest.mark.parametrize("max_full_files", [1, 2, 5])
|
|
@@ -1843,7 +1715,6 @@ async def test_insert_from_delta_file(
|
|
|
1843
1715
|
group_files_by_store: bool,
|
|
1844
1716
|
max_full_files: int,
|
|
1845
1717
|
) -> None:
|
|
1846
|
-
await data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
|
|
1847
1718
|
num_files = 5
|
|
1848
1719
|
for generation in range(num_files):
|
|
1849
1720
|
key = generation.to_bytes(4, byteorder="big")
|
|
@@ -1854,30 +1725,34 @@ async def test_insert_from_delta_file(
|
|
|
1854
1725
|
store_id=store_id,
|
|
1855
1726
|
status=Status.COMMITTED,
|
|
1856
1727
|
)
|
|
1728
|
+
await data_store.add_node_hashes(store_id)
|
|
1857
1729
|
|
|
1858
1730
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
1859
|
-
assert root.generation == num_files
|
|
1731
|
+
assert root.generation == num_files
|
|
1860
1732
|
root_hashes = []
|
|
1861
1733
|
|
|
1862
1734
|
tmp_path_1 = tmp_path.joinpath("1")
|
|
1863
1735
|
tmp_path_2 = tmp_path.joinpath("2")
|
|
1864
1736
|
|
|
1865
|
-
for generation in range(1, num_files +
|
|
1737
|
+
for generation in range(1, num_files + 1):
|
|
1866
1738
|
root = await data_store.get_tree_root(store_id=store_id, generation=generation)
|
|
1867
1739
|
await write_files_for_root(data_store, store_id, root, tmp_path_1, 0, False, group_files_by_store)
|
|
1868
1740
|
root_hashes.append(bytes32.zeros if root.node_hash is None else root.node_hash)
|
|
1869
1741
|
store_path = tmp_path_1.joinpath(f"{store_id}") if group_files_by_store else tmp_path_1
|
|
1870
1742
|
with os.scandir(store_path) as entries:
|
|
1871
1743
|
filenames = {entry.name for entry in entries}
|
|
1872
|
-
assert len(filenames) == 2 *
|
|
1744
|
+
assert len(filenames) == 2 * num_files
|
|
1873
1745
|
for filename in filenames:
|
|
1874
1746
|
if "full" in filename:
|
|
1875
1747
|
store_path.joinpath(filename).unlink()
|
|
1876
1748
|
with os.scandir(store_path) as entries:
|
|
1877
1749
|
filenames = {entry.name for entry in entries}
|
|
1878
|
-
assert len(filenames) == num_files
|
|
1750
|
+
assert len(filenames) == num_files
|
|
1879
1751
|
kv_before = await data_store.get_keys_values(store_id=store_id)
|
|
1880
1752
|
await data_store.rollback_to_generation(store_id, 0)
|
|
1753
|
+
with contextlib.suppress(FileNotFoundError):
|
|
1754
|
+
shutil.rmtree(data_store.merkle_blobs_path)
|
|
1755
|
+
|
|
1881
1756
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
1882
1757
|
assert root.generation == 0
|
|
1883
1758
|
os.rename(store_path, tmp_path_2)
|
|
@@ -1950,12 +1825,13 @@ async def test_insert_from_delta_file(
|
|
|
1950
1825
|
assert success
|
|
1951
1826
|
|
|
1952
1827
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
1953
|
-
assert root.generation == num_files
|
|
1828
|
+
assert root.generation == num_files
|
|
1954
1829
|
with os.scandir(store_path) as entries:
|
|
1955
1830
|
filenames = {entry.name for entry in entries}
|
|
1956
|
-
assert len(filenames) == num_files +
|
|
1831
|
+
assert len(filenames) == num_files + max_full_files - 1
|
|
1957
1832
|
kv = await data_store.get_keys_values(store_id=store_id)
|
|
1958
|
-
|
|
1833
|
+
# order agnostic comparison of the list
|
|
1834
|
+
assert set(kv) == set(kv_before)
|
|
1959
1835
|
|
|
1960
1836
|
|
|
1961
1837
|
@pytest.mark.anyio
|
|
@@ -1993,7 +1869,7 @@ async def test_get_node_by_key_with_overlapping_keys(raw_data_store: DataStore)
|
|
|
1993
1869
|
if random.randint(0, 4) == 0:
|
|
1994
1870
|
batch = [{"action": "delete", "key": key}]
|
|
1995
1871
|
await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
|
|
1996
|
-
with pytest.raises(
|
|
1872
|
+
with pytest.raises(chia_rs.datalayer.UnknownKeyError):
|
|
1997
1873
|
await raw_data_store.get_node_by_key(store_id=store_id, key=key)
|
|
1998
1874
|
|
|
1999
1875
|
|
|
@@ -2013,6 +1889,7 @@ async def test_insert_from_delta_file_correct_file_exists(
|
|
|
2013
1889
|
store_id=store_id,
|
|
2014
1890
|
status=Status.COMMITTED,
|
|
2015
1891
|
)
|
|
1892
|
+
await data_store.add_node_hashes(store_id)
|
|
2016
1893
|
|
|
2017
1894
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
2018
1895
|
assert root.generation == num_files + 1
|
|
@@ -2023,18 +1900,20 @@ async def test_insert_from_delta_file_correct_file_exists(
|
|
|
2023
1900
|
root_hashes.append(bytes32.zeros if root.node_hash is None else root.node_hash)
|
|
2024
1901
|
store_path = tmp_path.joinpath(f"{store_id}") if group_files_by_store else tmp_path
|
|
2025
1902
|
with os.scandir(store_path) as entries:
|
|
2026
|
-
filenames = {entry.name for entry in entries}
|
|
1903
|
+
filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
|
|
2027
1904
|
assert len(filenames) == 2 * (num_files + 1)
|
|
2028
1905
|
for filename in filenames:
|
|
2029
1906
|
if "full" in filename:
|
|
2030
1907
|
store_path.joinpath(filename).unlink()
|
|
2031
1908
|
with os.scandir(store_path) as entries:
|
|
2032
|
-
filenames = {entry.name for entry in entries}
|
|
1909
|
+
filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
|
|
2033
1910
|
assert len(filenames) == num_files + 1
|
|
2034
1911
|
kv_before = await data_store.get_keys_values(store_id=store_id)
|
|
2035
1912
|
await data_store.rollback_to_generation(store_id, 0)
|
|
2036
1913
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
2037
1914
|
assert root.generation == 0
|
|
1915
|
+
with contextlib.suppress(FileNotFoundError):
|
|
1916
|
+
shutil.rmtree(data_store.merkle_blobs_path)
|
|
2038
1917
|
|
|
2039
1918
|
sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0)
|
|
2040
1919
|
success = await insert_from_delta_file(
|
|
@@ -2056,10 +1935,11 @@ async def test_insert_from_delta_file_correct_file_exists(
|
|
|
2056
1935
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
2057
1936
|
assert root.generation == num_files + 1
|
|
2058
1937
|
with os.scandir(store_path) as entries:
|
|
2059
|
-
filenames = {entry.name for entry in entries}
|
|
1938
|
+
filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
|
|
2060
1939
|
assert len(filenames) == num_files + 2 # 1 full and 6 deltas
|
|
2061
1940
|
kv = await data_store.get_keys_values(store_id=store_id)
|
|
2062
|
-
|
|
1941
|
+
# order agnostic comparison of the list
|
|
1942
|
+
assert set(kv) == set(kv_before)
|
|
2063
1943
|
|
|
2064
1944
|
|
|
2065
1945
|
@pytest.mark.anyio
|
|
@@ -2079,6 +1959,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
|
|
|
2079
1959
|
store_id=store_id,
|
|
2080
1960
|
status=Status.COMMITTED,
|
|
2081
1961
|
)
|
|
1962
|
+
await data_store.add_node_hashes(store_id)
|
|
2082
1963
|
|
|
2083
1964
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
2084
1965
|
assert root.generation == 2
|
|
@@ -2087,7 +1968,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
|
|
|
2087
1968
|
incorrect_root_hash = bytes32([0] * 31 + [1])
|
|
2088
1969
|
store_path = tmp_path.joinpath(f"{store_id}") if group_files_by_store else tmp_path
|
|
2089
1970
|
with os.scandir(store_path) as entries:
|
|
2090
|
-
filenames = [entry.name for entry in entries]
|
|
1971
|
+
filenames = [entry.name for entry in entries if entry.name.endswith(".dat")]
|
|
2091
1972
|
assert len(filenames) == 2
|
|
2092
1973
|
os.rename(
|
|
2093
1974
|
store_path.joinpath(filenames[0]),
|
|
@@ -2119,7 +2000,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
|
|
|
2119
2000
|
root = await data_store.get_tree_root(store_id=store_id)
|
|
2120
2001
|
assert root.generation == 1
|
|
2121
2002
|
with os.scandir(store_path) as entries:
|
|
2122
|
-
filenames = [entry.name for entry in entries]
|
|
2003
|
+
filenames = [entry.name for entry in entries if entry.name.endswith(".dat")]
|
|
2123
2004
|
assert len(filenames) == 0
|
|
2124
2005
|
|
|
2125
2006
|
|
|
@@ -2130,7 +2011,7 @@ async def test_insert_key_already_present(data_store: DataStore, store_id: bytes
|
|
|
2130
2011
|
await data_store.insert(
|
|
2131
2012
|
key=key, value=value, store_id=store_id, reference_node_hash=None, side=None, status=Status.COMMITTED
|
|
2132
2013
|
)
|
|
2133
|
-
with pytest.raises(
|
|
2014
|
+
with pytest.raises(KeyAlreadyPresentError):
|
|
2134
2015
|
await data_store.insert(key=key, value=value, store_id=store_id, reference_node_hash=None, side=None)
|
|
2135
2016
|
|
|
2136
2017
|
|
|
@@ -2145,7 +2026,7 @@ async def test_batch_insert_key_already_present(
|
|
|
2145
2026
|
value = b"bar"
|
|
2146
2027
|
changelist = [{"action": "insert", "key": key, "value": value}]
|
|
2147
2028
|
await data_store.insert_batch(store_id, changelist, Status.COMMITTED, use_batch_autoinsert)
|
|
2148
|
-
with pytest.raises(
|
|
2029
|
+
with pytest.raises(KeyAlreadyPresentError):
|
|
2149
2030
|
await data_store.insert_batch(store_id, changelist, Status.COMMITTED, use_batch_autoinsert)
|
|
2150
2031
|
|
|
2151
2032
|
|
|
@@ -2188,7 +2069,7 @@ async def test_update_keys(data_store: DataStore, store_id: bytes32, use_upsert:
|
|
|
2188
2069
|
|
|
2189
2070
|
|
|
2190
2071
|
@pytest.mark.anyio
|
|
2191
|
-
async def test_migration_unknown_version(data_store: DataStore) -> None:
|
|
2072
|
+
async def test_migration_unknown_version(data_store: DataStore, tmp_path: Path) -> None:
|
|
2192
2073
|
async with data_store.db_wrapper.writer() as writer:
|
|
2193
2074
|
await writer.execute(
|
|
2194
2075
|
"INSERT INTO schema(version_id) VALUES(:version_id)",
|
|
@@ -2197,228 +2078,284 @@ async def test_migration_unknown_version(data_store: DataStore) -> None:
|
|
|
2197
2078
|
},
|
|
2198
2079
|
)
|
|
2199
2080
|
with pytest.raises(Exception, match="Unknown version"):
|
|
2200
|
-
await data_store.migrate_db()
|
|
2201
|
-
|
|
2202
|
-
|
|
2203
|
-
async def _check_ancestors(
|
|
2204
|
-
data_store: DataStore, store_id: bytes32, root_hash: bytes32
|
|
2205
|
-
) -> dict[bytes32, Optional[bytes32]]:
|
|
2206
|
-
ancestors: dict[bytes32, Optional[bytes32]] = {}
|
|
2207
|
-
root_node: Node = await data_store.get_node(root_hash)
|
|
2208
|
-
queue: list[Node] = [root_node]
|
|
2209
|
-
|
|
2210
|
-
while queue:
|
|
2211
|
-
node = queue.pop(0)
|
|
2212
|
-
if isinstance(node, InternalNode):
|
|
2213
|
-
left_node = await data_store.get_node(node.left_hash)
|
|
2214
|
-
right_node = await data_store.get_node(node.right_hash)
|
|
2215
|
-
ancestors[left_node.hash] = node.hash
|
|
2216
|
-
ancestors[right_node.hash] = node.hash
|
|
2217
|
-
queue.append(left_node)
|
|
2218
|
-
queue.append(right_node)
|
|
2219
|
-
|
|
2220
|
-
ancestors[root_hash] = None
|
|
2221
|
-
for node_hash, ancestor_hash in ancestors.items():
|
|
2222
|
-
ancestor_node = await data_store._get_one_ancestor(node_hash, store_id)
|
|
2223
|
-
if ancestor_hash is None:
|
|
2224
|
-
assert ancestor_node is None
|
|
2225
|
-
else:
|
|
2226
|
-
assert ancestor_node is not None
|
|
2227
|
-
assert ancestor_node.hash == ancestor_hash
|
|
2081
|
+
await data_store.migrate_db(tmp_path)
|
|
2228
2082
|
|
|
2229
|
-
|
|
2083
|
+
|
|
2084
|
+
@boolean_datacases(name="group_files_by_store", false="group by singleton", true="don't group by singleton")
|
|
2085
|
+
@pytest.mark.anyio
|
|
2086
|
+
async def test_migration(
|
|
2087
|
+
data_store: DataStore,
|
|
2088
|
+
store_id: bytes32,
|
|
2089
|
+
group_files_by_store: bool,
|
|
2090
|
+
tmp_path: Path,
|
|
2091
|
+
) -> None:
|
|
2092
|
+
num_batches = 10
|
|
2093
|
+
num_ops_per_batch = 100
|
|
2094
|
+
keys: list[bytes] = []
|
|
2095
|
+
counter = 0
|
|
2096
|
+
random = Random()
|
|
2097
|
+
random.seed(100, version=2)
|
|
2098
|
+
|
|
2099
|
+
for batch in range(num_batches):
|
|
2100
|
+
changelist: list[dict[str, Any]] = []
|
|
2101
|
+
for operation in range(num_ops_per_batch):
|
|
2102
|
+
if random.randint(0, 4) > 0 or len(keys) == 0:
|
|
2103
|
+
key = counter.to_bytes(4, byteorder="big")
|
|
2104
|
+
value = (2 * counter).to_bytes(4, byteorder="big")
|
|
2105
|
+
keys.append(key)
|
|
2106
|
+
changelist.append({"action": "insert", "key": key, "value": value})
|
|
2107
|
+
else:
|
|
2108
|
+
key = random.choice(keys)
|
|
2109
|
+
keys.remove(key)
|
|
2110
|
+
changelist.append({"action": "delete", "key": key})
|
|
2111
|
+
counter += 1
|
|
2112
|
+
await data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
|
|
2113
|
+
root = await data_store.get_tree_root(store_id)
|
|
2114
|
+
await data_store.add_node_hashes(store_id)
|
|
2115
|
+
await write_files_for_root(data_store, store_id, root, tmp_path, 0, group_by_store=group_files_by_store)
|
|
2116
|
+
|
|
2117
|
+
kv_before = await data_store.get_keys_values(store_id=store_id)
|
|
2118
|
+
async with data_store.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
|
|
2119
|
+
tables = [table for table in table_columns.keys() if table != "root"]
|
|
2120
|
+
for table in tables:
|
|
2121
|
+
await writer.execute(f"DELETE FROM {table}")
|
|
2122
|
+
|
|
2123
|
+
with contextlib.suppress(FileNotFoundError):
|
|
2124
|
+
shutil.rmtree(data_store.merkle_blobs_path)
|
|
2125
|
+
with contextlib.suppress(FileNotFoundError):
|
|
2126
|
+
shutil.rmtree(data_store.key_value_blobs_path)
|
|
2127
|
+
|
|
2128
|
+
data_store.recent_merkle_blobs = LRUCache(capacity=128)
|
|
2129
|
+
assert await data_store.get_keys_values(store_id=store_id) == []
|
|
2130
|
+
await data_store.migrate_db(tmp_path)
|
|
2131
|
+
# order agnostic comparison of the list
|
|
2132
|
+
assert set(await data_store.get_keys_values(store_id=store_id)) == set(kv_before)
|
|
2230
2133
|
|
|
2231
2134
|
|
|
2232
2135
|
@pytest.mark.anyio
|
|
2233
|
-
|
|
2234
|
-
|
|
2136
|
+
@pytest.mark.parametrize("num_keys", [10, 1000])
|
|
2137
|
+
async def test_get_existing_hashes(
|
|
2138
|
+
data_store: DataStore,
|
|
2139
|
+
store_id: bytes32,
|
|
2140
|
+
num_keys: int,
|
|
2141
|
+
) -> None:
|
|
2235
2142
|
changelist: list[dict[str, Any]] = []
|
|
2236
|
-
for
|
|
2237
|
-
|
|
2238
|
-
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2242
|
-
status=Status.PENDING,
|
|
2243
|
-
)
|
|
2143
|
+
for i in range(num_keys):
|
|
2144
|
+
key = i.to_bytes(4, byteorder="big")
|
|
2145
|
+
value = (2 * i).to_bytes(4, byteorder="big")
|
|
2146
|
+
changelist.append({"action": "insert", "key": key, "value": value})
|
|
2147
|
+
await data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
|
|
2148
|
+
await data_store.add_node_hashes(store_id)
|
|
2244
2149
|
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
assert
|
|
2252
|
-
await _check_ancestors(data_store, store_id, pending_root.node_hash)
|
|
2150
|
+
root = await data_store.get_tree_root(store_id=store_id)
|
|
2151
|
+
merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
|
|
2152
|
+
hash_to_index = merkle_blob.get_hashes_indexes()
|
|
2153
|
+
existing_hashes = list(hash_to_index.keys())
|
|
2154
|
+
not_existing_hashes = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_keys)]
|
|
2155
|
+
result = await data_store.get_existing_hashes(existing_hashes + not_existing_hashes, store_id)
|
|
2156
|
+
assert result == set(existing_hashes)
|
|
2253
2157
|
|
|
2254
2158
|
|
|
2255
2159
|
@pytest.mark.anyio
|
|
2256
|
-
|
|
2257
|
-
|
|
2258
|
-
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
status=Status.COMMITTED,
|
|
2265
|
-
)
|
|
2266
|
-
root = await data_store.get_tree_root(store_id=store_id)
|
|
2267
|
-
assert root.node_hash is not None
|
|
2268
|
-
ancestors = await _check_ancestors(data_store, store_id, root.node_hash)
|
|
2160
|
+
@pytest.mark.parametrize(argnames="size_offset", argvalues=[-1, 0, 1])
|
|
2161
|
+
async def test_basic_key_value_db_vs_disk_cutoff(
|
|
2162
|
+
data_store: DataStore,
|
|
2163
|
+
store_id: bytes32,
|
|
2164
|
+
seeded_random: random.Random,
|
|
2165
|
+
size_offset: int,
|
|
2166
|
+
) -> None:
|
|
2167
|
+
size = data_store.prefer_db_kv_blob_length + size_offset
|
|
2269
2168
|
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
cursor = await reader.execute(
|
|
2283
|
-
"SELECT MAX(generation) AS generation FROM ancestors WHERE hash == :hash AND ancestor IS NULL",
|
|
2284
|
-
{"hash": node_hash},
|
|
2285
|
-
)
|
|
2169
|
+
blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
|
|
2170
|
+
blob_hash = bytes32(sha256(blob).digest())
|
|
2171
|
+
async with data_store.db_wrapper.writer() as writer:
|
|
2172
|
+
with data_store.manage_kv_files(store_id):
|
|
2173
|
+
await data_store.add_kvid(blob=blob, store_id=store_id, writer=writer)
|
|
2174
|
+
|
|
2175
|
+
file_exists = data_store.get_key_value_path(store_id=store_id, blob_hash=blob_hash).exists()
|
|
2176
|
+
async with data_store.db_wrapper.writer() as writer:
|
|
2177
|
+
async with writer.execute(
|
|
2178
|
+
"SELECT blob FROM ids WHERE hash = :blob_hash",
|
|
2179
|
+
{"blob_hash": blob_hash},
|
|
2180
|
+
) as cursor:
|
|
2286
2181
|
row = await cursor.fetchone()
|
|
2287
2182
|
assert row is not None
|
|
2288
|
-
|
|
2289
|
-
assert generation <= root_generation
|
|
2290
|
-
if generation == root_generation:
|
|
2291
|
-
current_generation_count += 1
|
|
2292
|
-
else:
|
|
2293
|
-
previous_generation_count += 1
|
|
2183
|
+
db_blob: Optional[bytes] = row["blob"]
|
|
2294
2184
|
|
|
2295
|
-
|
|
2296
|
-
|
|
2185
|
+
if size_offset <= 0:
|
|
2186
|
+
assert not file_exists
|
|
2187
|
+
assert db_blob == blob
|
|
2188
|
+
else:
|
|
2189
|
+
assert file_exists
|
|
2190
|
+
assert db_blob is None
|
|
2297
2191
|
|
|
2298
2192
|
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2193
|
+
@pytest.mark.anyio
|
|
2194
|
+
@pytest.mark.parametrize(argnames="size_offset", argvalues=[-1, 0, 1])
|
|
2195
|
+
@pytest.mark.parametrize(argnames="limit_change", argvalues=[-2, -1, 1, 2])
|
|
2196
|
+
async def test_changing_key_value_db_vs_disk_cutoff(
|
|
2197
|
+
data_store: DataStore,
|
|
2198
|
+
store_id: bytes32,
|
|
2199
|
+
seeded_random: random.Random,
|
|
2200
|
+
size_offset: int,
|
|
2201
|
+
limit_change: int,
|
|
2202
|
+
) -> None:
|
|
2203
|
+
size = data_store.prefer_db_kv_blob_length + size_offset
|
|
2305
2204
|
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
queue.append(right_node)
|
|
2205
|
+
blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
|
|
2206
|
+
async with data_store.db_wrapper.writer() as writer:
|
|
2207
|
+
with data_store.manage_kv_files(store_id):
|
|
2208
|
+
kv_id = await data_store.add_kvid(blob=blob, store_id=store_id, writer=writer)
|
|
2209
|
+
|
|
2210
|
+
data_store.prefer_db_kv_blob_length += limit_change
|
|
2211
|
+
retrieved_blob = await data_store.get_blob_from_kvid(kv_id=kv_id, store_id=store_id)
|
|
2314
2212
|
|
|
2315
|
-
|
|
2213
|
+
assert blob == retrieved_blob
|
|
2316
2214
|
|
|
2317
2215
|
|
|
2318
2216
|
@pytest.mark.anyio
|
|
2319
|
-
async def
|
|
2320
|
-
|
|
2321
|
-
|
|
2217
|
+
async def test_get_keys_both_disk_and_db(
|
|
2218
|
+
data_store: DataStore,
|
|
2219
|
+
store_id: bytes32,
|
|
2220
|
+
seeded_random: random.Random,
|
|
2221
|
+
) -> None:
|
|
2222
|
+
inserted_keys: set[bytes] = set()
|
|
2322
2223
|
|
|
2323
|
-
for
|
|
2324
|
-
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
store_id=store_id,
|
|
2328
|
-
|
|
2329
|
-
status=Status.COMMITTED,
|
|
2330
|
-
)
|
|
2224
|
+
for size_offset in [-1, 0, 1]:
|
|
2225
|
+
size = data_store.prefer_db_kv_blob_length + size_offset
|
|
2226
|
+
|
|
2227
|
+
blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
|
|
2228
|
+
await data_store.insert(key=blob, value=b"", store_id=store_id, status=Status.COMMITTED)
|
|
2229
|
+
inserted_keys.add(blob)
|
|
2331
2230
|
|
|
2332
|
-
|
|
2333
|
-
nodes = await data_store.get_nodes([node.hash for node in expected_nodes])
|
|
2334
|
-
assert nodes == expected_nodes
|
|
2231
|
+
retrieved_keys = set(await data_store.get_keys(store_id=store_id))
|
|
2335
2232
|
|
|
2336
|
-
|
|
2337
|
-
node_hash_2 = bytes32([0] * 31 + [1])
|
|
2338
|
-
with pytest.raises(Exception, match=f"^Nodes not found for hashes: {node_hash.hex()}, {node_hash_2.hex()}"):
|
|
2339
|
-
await data_store.get_nodes([node_hash, node_hash_2] + [node.hash for node in expected_nodes])
|
|
2233
|
+
assert retrieved_keys == inserted_keys
|
|
2340
2234
|
|
|
2341
2235
|
|
|
2342
2236
|
@pytest.mark.anyio
|
|
2343
|
-
|
|
2344
|
-
@pytest.mark.parametrize("batch_size", [25, 100, 500])
|
|
2345
|
-
async def test_get_leaf_at_minimum_height(
|
|
2237
|
+
async def test_get_keys_values_both_disk_and_db(
|
|
2346
2238
|
data_store: DataStore,
|
|
2347
2239
|
store_id: bytes32,
|
|
2348
|
-
|
|
2349
|
-
batch_size: int,
|
|
2240
|
+
seeded_random: random.Random,
|
|
2350
2241
|
) -> None:
|
|
2351
|
-
|
|
2352
|
-
|
|
2353
|
-
|
|
2242
|
+
inserted_keys_values: dict[bytes, bytes] = {}
|
|
2243
|
+
|
|
2244
|
+
for size_offset in [-1, 0, 1]:
|
|
2245
|
+
size = data_store.prefer_db_kv_blob_length + size_offset
|
|
2246
|
+
|
|
2247
|
+
key = bytes(seeded_random.getrandbits(8) for _ in range(size))
|
|
2248
|
+
value = bytes(seeded_random.getrandbits(8) for _ in range(size))
|
|
2249
|
+
await data_store.insert(key=key, value=value, store_id=store_id, status=Status.COMMITTED)
|
|
2250
|
+
inserted_keys_values[key] = value
|
|
2251
|
+
|
|
2252
|
+
terminal_nodes = await data_store.get_keys_values(store_id=store_id)
|
|
2253
|
+
retrieved_keys_values = {node.key: node.value for node in terminal_nodes}
|
|
2254
|
+
|
|
2255
|
+
assert retrieved_keys_values == inserted_keys_values
|
|
2256
|
+
|
|
2354
2257
|
|
|
2355
|
-
|
|
2356
|
-
|
|
2258
|
+
@pytest.mark.anyio
|
|
2259
|
+
@boolean_datacases(name="success", false="invalid file", true="valid file")
|
|
2260
|
+
async def test_db_data_insert_from_file(
|
|
2261
|
+
data_store: DataStore,
|
|
2262
|
+
store_id: bytes32,
|
|
2263
|
+
tmp_path: Path,
|
|
2264
|
+
seeded_random: random.Random,
|
|
2265
|
+
success: bool,
|
|
2266
|
+
) -> None:
|
|
2267
|
+
num_keys = 1000
|
|
2268
|
+
db_uri = generate_in_memory_db_uri()
|
|
2269
|
+
|
|
2270
|
+
async with DataStore.managed(
|
|
2271
|
+
database=db_uri,
|
|
2272
|
+
uri=True,
|
|
2273
|
+
merkle_blobs_path=tmp_path.joinpath("merkle-blobs-tmp"),
|
|
2274
|
+
key_value_blobs_path=tmp_path.joinpath("key-value-blobs-tmp"),
|
|
2275
|
+
) as tmp_data_store:
|
|
2276
|
+
await tmp_data_store.create_tree(store_id, status=Status.COMMITTED)
|
|
2357
2277
|
changelist: list[dict[str, Any]] = []
|
|
2278
|
+
for _ in range(num_keys):
|
|
2279
|
+
use_file = seeded_random.choice([True, False])
|
|
2280
|
+
assert tmp_data_store.prefer_db_kv_blob_length > 7
|
|
2281
|
+
size = tmp_data_store.prefer_db_kv_blob_length + 1 if use_file else 8
|
|
2282
|
+
key = seeded_random.randbytes(size)
|
|
2283
|
+
value = seeded_random.randbytes(size)
|
|
2284
|
+
changelist.append({"action": "insert", "key": key, "value": value})
|
|
2285
|
+
|
|
2286
|
+
await tmp_data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
|
|
2287
|
+
root = await tmp_data_store.get_tree_root(store_id)
|
|
2288
|
+
files_path = tmp_path.joinpath("files")
|
|
2289
|
+
await write_files_for_root(tmp_data_store, store_id, root, files_path, 1000)
|
|
2290
|
+
assert root.node_hash is not None
|
|
2291
|
+
filename = get_delta_filename_path(files_path, store_id, root.node_hash, 1)
|
|
2292
|
+
assert filename.exists()
|
|
2358
2293
|
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
changelist.append({"action": "upsert", "key": value_bytes, "value": value_bytes})
|
|
2362
|
-
await data_store.insert_batch(
|
|
2363
|
-
store_id=store_id,
|
|
2364
|
-
changelist=changelist,
|
|
2365
|
-
status=Status.COMMITTED,
|
|
2366
|
-
)
|
|
2294
|
+
root_hash = bytes32([0] * 31 + [1]) if not success else root.node_hash
|
|
2295
|
+
sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0)
|
|
2367
2296
|
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
key=value_bytes,
|
|
2373
|
-
value=value_bytes,
|
|
2374
|
-
store_id=store_id,
|
|
2375
|
-
status=Status.COMMITTED,
|
|
2376
|
-
)
|
|
2297
|
+
if not success:
|
|
2298
|
+
target_filename_path = get_delta_filename_path(files_path, store_id, root_hash, 1)
|
|
2299
|
+
shutil.copyfile(filename, target_filename_path)
|
|
2300
|
+
assert target_filename_path.exists()
|
|
2377
2301
|
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
|
|
2406
|
-
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
|
-
|
|
2417
|
-
|
|
2418
|
-
|
|
2419
|
-
|
|
2420
|
-
|
|
2421
|
-
|
|
2422
|
-
|
|
2423
|
-
|
|
2424
|
-
|
|
2302
|
+
keys_value_path = data_store.key_value_blobs_path.joinpath(store_id.hex())
|
|
2303
|
+
assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0
|
|
2304
|
+
|
|
2305
|
+
is_success = await insert_from_delta_file(
|
|
2306
|
+
data_store=data_store,
|
|
2307
|
+
store_id=store_id,
|
|
2308
|
+
existing_generation=0,
|
|
2309
|
+
target_generation=1,
|
|
2310
|
+
root_hashes=[root_hash],
|
|
2311
|
+
server_info=sinfo,
|
|
2312
|
+
client_foldername=files_path,
|
|
2313
|
+
timeout=aiohttp.ClientTimeout(total=15, sock_connect=5),
|
|
2314
|
+
log=log,
|
|
2315
|
+
proxy_url="",
|
|
2316
|
+
downloader=None,
|
|
2317
|
+
)
|
|
2318
|
+
assert is_success == success
|
|
2319
|
+
|
|
2320
|
+
async with data_store.db_wrapper.reader() as reader:
|
|
2321
|
+
async with reader.execute("SELECT COUNT(*) FROM ids") as cursor:
|
|
2322
|
+
row_count = await cursor.fetchone()
|
|
2323
|
+
assert row_count is not None
|
|
2324
|
+
if success:
|
|
2325
|
+
assert row_count[0] > 0
|
|
2326
|
+
else:
|
|
2327
|
+
assert row_count[0] == 0
|
|
2328
|
+
|
|
2329
|
+
if success:
|
|
2330
|
+
assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) > 0
|
|
2331
|
+
else:
|
|
2332
|
+
assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0
|
|
2333
|
+
|
|
2334
|
+
|
|
2335
|
+
@pytest.mark.anyio
|
|
2336
|
+
async def test_manage_kv_files(
|
|
2337
|
+
data_store: DataStore,
|
|
2338
|
+
store_id: bytes32,
|
|
2339
|
+
seeded_random: random.Random,
|
|
2340
|
+
) -> None:
|
|
2341
|
+
num_keys = 1000
|
|
2342
|
+
num_files = 0
|
|
2343
|
+
keys_value_path = data_store.key_value_blobs_path.joinpath(store_id.hex())
|
|
2344
|
+
|
|
2345
|
+
with pytest.raises(Exception, match="Test exception"):
|
|
2346
|
+
async with data_store.db_wrapper.writer() as writer:
|
|
2347
|
+
with data_store.manage_kv_files(store_id):
|
|
2348
|
+
for _ in range(num_keys):
|
|
2349
|
+
use_file = seeded_random.choice([True, False])
|
|
2350
|
+
assert data_store.prefer_db_kv_blob_length > 7
|
|
2351
|
+
size = data_store.prefer_db_kv_blob_length + 1 if use_file else 8
|
|
2352
|
+
key = seeded_random.randbytes(size)
|
|
2353
|
+
value = seeded_random.randbytes(size)
|
|
2354
|
+
await data_store.add_key_value(key, value, store_id, writer)
|
|
2355
|
+
num_files += 2 * use_file
|
|
2356
|
+
|
|
2357
|
+
assert num_files > 0
|
|
2358
|
+
assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == num_files
|
|
2359
|
+
raise Exception("Test exception")
|
|
2360
|
+
|
|
2361
|
+
assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0
|