chia-blockchain 2.5.6rc2__py3-none-any.whl → 2.5.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (370) hide show
  1. chia/_tests/blockchain/blockchain_test_utils.py +6 -7
  2. chia/_tests/blockchain/test_augmented_chain.py +4 -3
  3. chia/_tests/blockchain/test_blockchain.py +10 -5
  4. chia/_tests/clvm/coin_store.py +1 -1
  5. chia/_tests/cmds/cmd_test_utils.py +84 -97
  6. chia/_tests/cmds/test_dev_gh.py +1 -1
  7. chia/_tests/cmds/test_farm_cmd.py +56 -2
  8. chia/_tests/cmds/wallet/test_consts.py +3 -1
  9. chia/_tests/cmds/wallet/test_did.py +3 -8
  10. chia/_tests/cmds/wallet/test_nft.py +6 -6
  11. chia/_tests/cmds/wallet/test_notifications.py +39 -21
  12. chia/_tests/cmds/wallet/test_vcs.py +2 -1
  13. chia/_tests/cmds/wallet/test_wallet.py +160 -136
  14. chia/_tests/conftest.py +51 -26
  15. chia/_tests/core/cmds/test_wallet.py +4 -3
  16. chia/_tests/core/consensus/test_pot_iterations.py +71 -24
  17. chia/_tests/core/custom_types/test_proof_of_space.py +60 -30
  18. chia/_tests/core/custom_types/test_spend_bundle.py +1 -4
  19. chia/_tests/core/data_layer/conftest.py +7 -2
  20. chia/_tests/core/data_layer/old_format/__init__.py +0 -0
  21. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-delta-23-v1.0.dat +0 -0
  22. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-full-23-v1.0.dat +0 -0
  23. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-delta-9-v1.0.dat +0 -0
  24. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-full-9-v1.0.dat +0 -0
  25. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-delta-5-v1.0.dat +0 -0
  26. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-full-5-v1.0.dat +0 -0
  27. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-delta-22-v1.0.dat +0 -0
  28. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-full-22-v1.0.dat +0 -0
  29. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-delta-1-v1.0.dat +0 -0
  30. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-full-1-v1.0.dat +0 -0
  31. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-delta-2-v1.0.dat +0 -0
  32. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-full-2-v1.0.dat +0 -0
  33. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-delta-29-v1.0.dat +0 -0
  34. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-full-29-v1.0.dat +0 -0
  35. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-delta-28-v1.0.dat +0 -0
  36. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-full-28-v1.0.dat +0 -0
  37. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-delta-7-v1.0.dat +0 -0
  38. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-full-7-v1.0.dat +0 -0
  39. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-delta-19-v1.0.dat +0 -0
  40. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-full-19-v1.0.dat +0 -0
  41. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-delta-10-v1.0.dat +0 -0
  42. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-full-10-v1.0.dat +0 -0
  43. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-delta-17-v1.0.dat +0 -0
  44. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-full-17-v1.0.dat +0 -0
  45. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-delta-20-v1.0.dat +0 -0
  46. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-full-20-v1.0.dat +0 -0
  47. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-delta-31-v1.0.dat +0 -0
  48. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-full-31-v1.0.dat +0 -0
  49. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-delta-40-v1.0.dat +0 -0
  50. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-full-40-v1.0.dat +0 -0
  51. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-delta-49-v1.0.dat +0 -0
  52. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-full-49-v1.0.dat +0 -0
  53. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-delta-14-v1.0.dat +0 -0
  54. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-full-14-v1.0.dat +0 -0
  55. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-delta-34-v1.0.dat +0 -0
  56. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-full-34-v1.0.dat +0 -0
  57. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-delta-8-v1.0.dat +0 -0
  58. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-full-8-v1.0.dat +0 -0
  59. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-delta-45-v1.0.dat +0 -0
  60. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-full-45-v1.0.dat +0 -0
  61. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-delta-6-v1.0.dat +0 -0
  62. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-full-6-v1.0.dat +0 -0
  63. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-delta-48-v1.0.dat +0 -0
  64. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-full-48-v1.0.dat +0 -0
  65. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-delta-26-v1.0.dat +0 -0
  66. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-full-26-v1.0.dat +0 -0
  67. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-delta-3-v1.0.dat +0 -0
  68. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-full-3-v1.0.dat +0 -0
  69. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-delta-25-v1.0.dat +0 -0
  70. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-full-25-v1.0.dat +0 -0
  71. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-delta-35-v1.0.dat +0 -0
  72. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-full-35-v1.0.dat +0 -0
  73. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-delta-4-v1.0.dat +0 -0
  74. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-full-4-v1.0.dat +0 -0
  75. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-delta-41-v1.0.dat +0 -0
  76. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-full-41-v1.0.dat +0 -0
  77. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-delta-21-v1.0.dat +0 -0
  78. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-full-21-v1.0.dat +0 -0
  79. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-delta-42-v1.0.dat +0 -0
  80. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-full-42-v1.0.dat +0 -0
  81. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-delta-38-v1.0.dat +0 -0
  82. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-full-38-v1.0.dat +0 -0
  83. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-delta-11-v1.0.dat +0 -0
  84. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-full-11-v1.0.dat +0 -0
  85. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-delta-32-v1.0.dat +0 -0
  86. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-full-32-v1.0.dat +0 -0
  87. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-delta-33-v1.0.dat +0 -0
  88. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-full-33-v1.0.dat +0 -0
  89. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-delta-47-v1.0.dat +0 -0
  90. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-full-47-v1.0.dat +0 -0
  91. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-delta-50-v1.0.dat +0 -0
  92. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-full-50-v1.0.dat +0 -0
  93. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-delta-24-v1.0.dat +0 -0
  94. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-full-24-v1.0.dat +0 -0
  95. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-delta-44-v1.0.dat +0 -0
  96. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-full-44-v1.0.dat +0 -0
  97. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-delta-16-v1.0.dat +0 -0
  98. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-full-16-v1.0.dat +0 -0
  99. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-delta-13-v1.0.dat +0 -0
  100. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-full-13-v1.0.dat +0 -0
  101. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-delta-39-v1.0.dat +0 -0
  102. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-full-39-v1.0.dat +0 -0
  103. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-delta-36-v1.0.dat +0 -0
  104. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-full-36-v1.0.dat +0 -0
  105. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-delta-18-v1.0.dat +0 -0
  106. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-full-18-v1.0.dat +0 -0
  107. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-delta-37-v1.0.dat +0 -0
  108. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-full-37-v1.0.dat +0 -0
  109. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-delta-15-v1.0.dat +0 -0
  110. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-full-15-v1.0.dat +0 -0
  111. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-delta-46-v1.0.dat +0 -0
  112. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-full-46-v1.0.dat +0 -0
  113. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-delta-27-v1.0.dat +0 -0
  114. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-full-27-v1.0.dat +0 -0
  115. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-delta-12-v1.0.dat +0 -0
  116. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-full-12-v1.0.dat +0 -0
  117. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-delta-30-v1.0.dat +0 -0
  118. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-full-30-v1.0.dat +0 -0
  119. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-delta-43-v1.0.dat +0 -0
  120. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-full-43-v1.0.dat +0 -0
  121. chia/_tests/core/data_layer/old_format/files/__init__.py +0 -0
  122. chia/_tests/core/data_layer/old_format/old_db.sqlite +0 -0
  123. chia/_tests/core/data_layer/test_data_layer_util.py +18 -21
  124. chia/_tests/core/data_layer/test_data_rpc.py +77 -28
  125. chia/_tests/core/data_layer/test_data_store.py +637 -700
  126. chia/_tests/core/data_layer/test_data_store_schema.py +2 -209
  127. chia/_tests/core/full_node/ram_db.py +1 -1
  128. chia/_tests/core/full_node/stores/test_block_store.py +4 -10
  129. chia/_tests/core/full_node/stores/test_coin_store.py +1 -1
  130. chia/_tests/core/full_node/test_address_manager.py +3 -3
  131. chia/_tests/core/full_node/test_block_height_map.py +1 -1
  132. chia/_tests/core/full_node/test_full_node.py +91 -30
  133. chia/_tests/core/full_node/test_generator_tools.py +17 -10
  134. chia/_tests/core/mempool/test_mempool.py +190 -90
  135. chia/_tests/core/mempool/test_mempool_fee_estimator.py +2 -4
  136. chia/_tests/core/mempool/test_mempool_item_queries.py +1 -1
  137. chia/_tests/core/mempool/test_mempool_manager.py +252 -77
  138. chia/_tests/core/mempool/test_singleton_fast_forward.py +9 -27
  139. chia/_tests/core/server/serve.py +0 -2
  140. chia/_tests/core/server/test_rate_limits.py +400 -347
  141. chia/_tests/core/server/test_server.py +2 -2
  142. chia/_tests/core/services/test_services.py +7 -7
  143. chia/_tests/core/test_cost_calculation.py +31 -10
  144. chia/_tests/core/test_crawler.py +4 -4
  145. chia/_tests/core/test_db_conversion.py +7 -14
  146. chia/_tests/core/test_db_validation.py +2 -6
  147. chia/_tests/core/test_farmer_harvester_rpc.py +34 -1
  148. chia/_tests/core/test_full_node_rpc.py +28 -24
  149. chia/_tests/core/test_merkle_set.py +1 -4
  150. chia/_tests/core/test_seeder.py +1 -1
  151. chia/_tests/core/util/test_keychain.py +2 -2
  152. chia/_tests/core/util/test_lru_cache.py +16 -0
  153. chia/_tests/core/util/test_streamable.py +85 -4
  154. chia/_tests/environments/wallet.py +4 -1
  155. chia/_tests/farmer_harvester/test_farmer.py +8 -6
  156. chia/_tests/farmer_harvester/test_farmer_harvester.py +306 -8
  157. chia/_tests/farmer_harvester/test_filter_prefix_bits.py +3 -3
  158. chia/_tests/farmer_harvester/test_third_party_harvesters.py +11 -11
  159. chia/_tests/fee_estimation/test_fee_estimation_integration.py +2 -2
  160. chia/_tests/fee_estimation/test_fee_estimation_rpc.py +1 -1
  161. chia/_tests/fee_estimation/test_fee_estimation_unit_tests.py +1 -2
  162. chia/_tests/generator/test_rom.py +2 -1
  163. chia/_tests/harvester/__init__.py +0 -0
  164. chia/_tests/harvester/config.py +4 -0
  165. chia/_tests/harvester/test_harvester_api.py +157 -0
  166. chia/_tests/plot_sync/test_plot_sync.py +6 -3
  167. chia/_tests/plot_sync/test_receiver.py +16 -4
  168. chia/_tests/plot_sync/test_sender.py +8 -7
  169. chia/_tests/plot_sync/test_sync_simulated.py +15 -13
  170. chia/_tests/plot_sync/util.py +3 -2
  171. chia/_tests/plotting/test_plot_manager.py +21 -5
  172. chia/_tests/plotting/test_prover.py +106 -0
  173. chia/_tests/pools/test_pool_cmdline.py +7 -6
  174. chia/_tests/pools/test_pool_puzzles_lifecycle.py +10 -3
  175. chia/_tests/pools/test_pool_rpc.py +92 -64
  176. chia/_tests/solver/__init__.py +0 -0
  177. chia/_tests/solver/config.py +4 -0
  178. chia/_tests/solver/test_solver_service.py +29 -0
  179. chia/_tests/timelord/test_new_peak.py +1 -1
  180. chia/_tests/timelord/test_timelord.py +1 -1
  181. chia/_tests/util/benchmarks.py +5 -12
  182. chia/_tests/util/blockchain.py +1 -1
  183. chia/_tests/util/build_network_protocol_files.py +7 -0
  184. chia/_tests/util/network_protocol_data.py +26 -0
  185. chia/_tests/util/protocol_messages_bytes-v1.0 +0 -0
  186. chia/_tests/util/protocol_messages_json.py +19 -0
  187. chia/_tests/util/setup_nodes.py +21 -2
  188. chia/_tests/util/spend_sim.py +9 -3
  189. chia/_tests/util/test_condition_tools.py +3 -2
  190. chia/_tests/util/test_full_block_utils.py +10 -9
  191. chia/_tests/util/test_misc.py +10 -10
  192. chia/_tests/util/test_network.py +32 -1
  193. chia/_tests/util/test_network_protocol_files.py +333 -318
  194. chia/_tests/util/test_network_protocol_json.py +6 -0
  195. chia/_tests/util/test_network_protocol_test.py +27 -0
  196. chia/_tests/util/test_priority_mutex.py +1 -1
  197. chia/_tests/util/test_replace_str_to_bytes.py +6 -6
  198. chia/_tests/wallet/cat_wallet/test_cat_wallet.py +17 -13
  199. chia/_tests/wallet/cat_wallet/test_trades.py +55 -55
  200. chia/_tests/wallet/did_wallet/test_did.py +118 -1229
  201. chia/_tests/wallet/nft_wallet/config.py +1 -1
  202. chia/_tests/wallet/nft_wallet/test_nft_1_offers.py +73 -96
  203. chia/_tests/wallet/nft_wallet/test_nft_bulk_mint.py +15 -12
  204. chia/_tests/wallet/nft_wallet/test_nft_offers.py +67 -134
  205. chia/_tests/wallet/nft_wallet/test_nft_wallet.py +31 -26
  206. chia/_tests/wallet/rpc/test_wallet_rpc.py +765 -371
  207. chia/_tests/wallet/sync/test_wallet_sync.py +6 -0
  208. chia/_tests/wallet/test_new_wallet_protocol.py +1 -1
  209. chia/_tests/wallet/test_signer_protocol.py +2 -2
  210. chia/_tests/wallet/test_singleton_lifecycle_fast.py +3 -4
  211. chia/_tests/wallet/test_transaction_store.py +42 -33
  212. chia/_tests/wallet/test_wallet.py +22 -31
  213. chia/_tests/wallet/test_wallet_state_manager.py +14 -7
  214. chia/_tests/wallet/vc_wallet/test_vc_wallet.py +53 -32
  215. chia/apis.py +2 -0
  216. chia/cmds/beta.py +7 -3
  217. chia/cmds/chia.py +2 -0
  218. chia/cmds/cmd_classes.py +11 -27
  219. chia/cmds/cmds_util.py +3 -0
  220. chia/cmds/coin_funcs.py +27 -22
  221. chia/cmds/configure.py +42 -18
  222. chia/cmds/dev/data.py +22 -3
  223. chia/cmds/farm.py +32 -0
  224. chia/cmds/farm_funcs.py +54 -5
  225. chia/cmds/init_funcs.py +4 -0
  226. chia/cmds/keys_funcs.py +8 -10
  227. chia/cmds/peer_funcs.py +8 -10
  228. chia/cmds/plotnft_funcs.py +24 -16
  229. chia/cmds/rpc.py +11 -1
  230. chia/cmds/show_funcs.py +5 -5
  231. chia/cmds/solver.py +33 -0
  232. chia/cmds/solver_funcs.py +21 -0
  233. chia/cmds/wallet.py +1 -1
  234. chia/cmds/wallet_funcs.py +149 -96
  235. chia/consensus/block_body_validation.py +8 -9
  236. chia/consensus/block_creation.py +9 -10
  237. chia/consensus/block_header_validation.py +61 -69
  238. chia/{full_node → consensus}/block_height_map.py +2 -1
  239. chia/consensus/block_height_map_protocol.py +21 -0
  240. chia/consensus/block_rewards.py +12 -12
  241. chia/consensus/blockchain.py +8 -18
  242. chia/consensus/default_constants.py +6 -6
  243. chia/consensus/generator_tools.py +1 -1
  244. chia/consensus/get_block_challenge.py +24 -25
  245. chia/consensus/pos_quality.py +28 -2
  246. chia/consensus/pot_iterations.py +15 -17
  247. chia/daemon/keychain_proxy.py +5 -0
  248. chia/daemon/server.py +2 -3
  249. chia/data_layer/data_layer.py +32 -24
  250. chia/data_layer/data_layer_errors.py +5 -0
  251. chia/data_layer/data_layer_rpc_api.py +1 -1
  252. chia/data_layer/data_layer_service.py +8 -0
  253. chia/data_layer/data_layer_util.py +49 -89
  254. chia/data_layer/data_layer_wallet.py +20 -17
  255. chia/data_layer/data_store.py +1051 -1462
  256. chia/data_layer/download_data.py +44 -115
  257. chia/{server → data_layer}/start_data_layer.py +2 -1
  258. chia/data_layer/util/benchmark.py +38 -53
  259. chia/farmer/farmer.py +3 -0
  260. chia/farmer/farmer_api.py +104 -5
  261. chia/farmer/farmer_rpc_api.py +20 -0
  262. chia/farmer/farmer_rpc_client.py +6 -2
  263. chia/farmer/farmer_service.py +8 -0
  264. chia/{server → farmer}/start_farmer.py +9 -3
  265. chia/full_node/block_store.py +20 -10
  266. chia/full_node/coin_store.py +12 -4
  267. chia/full_node/eligible_coin_spends.py +17 -72
  268. chia/full_node/full_node.py +68 -71
  269. chia/full_node/full_node_api.py +26 -32
  270. chia/full_node/full_node_rpc_api.py +44 -33
  271. chia/full_node/full_node_rpc_client.py +67 -79
  272. chia/full_node/full_node_service.py +8 -0
  273. chia/full_node/full_node_store.py +5 -3
  274. chia/full_node/mempool.py +15 -16
  275. chia/full_node/mempool_manager.py +73 -89
  276. chia/{server → full_node}/start_full_node.py +1 -1
  277. chia/full_node/subscriptions.py +2 -2
  278. chia/full_node/weight_proof.py +14 -15
  279. chia/harvester/harvester.py +8 -1
  280. chia/harvester/harvester_api.py +178 -44
  281. chia/harvester/harvester_service.py +8 -0
  282. chia/{server → harvester}/start_harvester.py +1 -1
  283. chia/introducer/introducer_service.py +8 -0
  284. chia/{server → introducer}/start_introducer.py +1 -1
  285. chia/plot_sync/receiver.py +6 -1
  286. chia/plot_sync/sender.py +7 -4
  287. chia/plotting/cache.py +37 -28
  288. chia/plotting/check_plots.py +83 -48
  289. chia/plotting/create_plots.py +3 -4
  290. chia/plotting/manager.py +18 -13
  291. chia/plotting/prover.py +153 -0
  292. chia/plotting/util.py +14 -6
  293. chia/pools/pool_wallet.py +6 -4
  294. chia/protocols/harvester_protocol.py +14 -0
  295. chia/protocols/outbound_message.py +1 -0
  296. chia/protocols/pool_protocol.py +1 -1
  297. chia/protocols/protocol_message_types.py +7 -0
  298. chia/protocols/shared_protocol.py +2 -0
  299. chia/protocols/solver_protocol.py +18 -0
  300. chia/rpc/rpc_server.py +1 -1
  301. chia/seeder/crawl_store.py +4 -8
  302. chia/seeder/crawler.py +2 -2
  303. chia/seeder/crawler_service.py +8 -0
  304. chia/seeder/start_crawler.py +1 -1
  305. chia/server/address_manager.py +12 -15
  306. chia/server/introducer_peers.py +1 -1
  307. chia/server/node_discovery.py +9 -10
  308. chia/server/rate_limit_numbers.py +157 -168
  309. chia/server/rate_limits.py +44 -41
  310. chia/server/resolve_peer_info.py +5 -0
  311. chia/server/server.py +17 -7
  312. chia/server/start_service.py +0 -1
  313. chia/simulator/block_tools.py +92 -58
  314. chia/simulator/full_node_simulator.py +1 -1
  315. chia/simulator/setup_services.py +52 -15
  316. chia/solver/__init__.py +0 -0
  317. chia/solver/solver.py +100 -0
  318. chia/solver/solver_api.py +59 -0
  319. chia/solver/solver_rpc_api.py +31 -0
  320. chia/solver/solver_rpc_client.py +16 -0
  321. chia/solver/solver_service.py +8 -0
  322. chia/solver/start_solver.py +102 -0
  323. {mozilla-ca → chia/ssl}/cacert.pem +0 -27
  324. chia/ssl/create_ssl.py +3 -2
  325. chia/{server → timelord}/start_timelord.py +1 -1
  326. chia/timelord/timelord.py +12 -13
  327. chia/timelord/timelord_service.py +8 -0
  328. chia/types/blockchain_format/proof_of_space.py +61 -17
  329. chia/types/coin_spend.py +0 -8
  330. chia/types/internal_mempool_item.py +3 -3
  331. chia/types/mempool_item.py +15 -8
  332. chia/types/mempool_submission_status.py +1 -1
  333. chia/util/config.py +1 -3
  334. chia/util/db_wrapper.py +7 -8
  335. chia/util/initial-config.yaml +46 -0
  336. chia/util/lru_cache.py +8 -4
  337. chia/util/network.py +9 -0
  338. chia/util/streamable.py +38 -8
  339. chia/util/virtual_project_analysis.py +1 -1
  340. chia/wallet/cat_wallet/cat_outer_puzzle.py +7 -4
  341. chia/wallet/cat_wallet/cat_wallet.py +13 -7
  342. chia/wallet/cat_wallet/r_cat_wallet.py +4 -1
  343. chia/wallet/conditions.py +1 -3
  344. chia/wallet/did_wallet/did_wallet.py +27 -332
  345. chia/wallet/nft_wallet/nft_puzzle_utils.py +1 -1
  346. chia/wallet/nft_wallet/nft_wallet.py +9 -7
  347. chia/wallet/puzzle_drivers.py +7 -8
  348. chia/{server → wallet}/start_wallet.py +1 -1
  349. chia/wallet/trade_manager.py +12 -9
  350. chia/wallet/transaction_record.py +14 -51
  351. chia/wallet/util/clvm_streamable.py +28 -41
  352. chia/wallet/util/merkle_utils.py +2 -2
  353. chia/wallet/util/tx_config.py +3 -6
  354. chia/wallet/vc_wallet/cr_cat_wallet.py +12 -6
  355. chia/wallet/vc_wallet/vc_wallet.py +13 -15
  356. chia/wallet/wallet.py +5 -3
  357. chia/wallet/wallet_node.py +25 -30
  358. chia/wallet/wallet_request_types.py +538 -101
  359. chia/wallet/wallet_rpc_api.py +398 -570
  360. chia/wallet/wallet_rpc_client.py +144 -332
  361. chia/wallet/wallet_service.py +8 -0
  362. chia/wallet/wallet_state_manager.py +53 -42
  363. chia/wallet/wallet_transaction_store.py +13 -5
  364. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/METADATA +31 -31
  365. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/RECORD +368 -240
  366. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/WHEEL +1 -1
  367. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info}/entry_points.txt +8 -7
  368. chia/full_node/mempool_check_conditions.py +0 -102
  369. chia/server/aliases.py +0 -35
  370. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7.dist-info/licenses}/LICENSE +0 -0
@@ -1,54 +1,54 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import contextlib
4
+ import importlib.resources as importlib_resources
3
5
  import itertools
4
6
  import logging
5
7
  import os
6
8
  import random
7
9
  import re
10
+ import shutil
8
11
  import statistics
9
12
  import time
10
13
  from collections.abc import Awaitable
11
14
  from dataclasses import dataclass
15
+ from hashlib import sha256
12
16
  from pathlib import Path
13
17
  from random import Random
14
- from typing import Any, Callable, Optional, cast
18
+ from typing import Any, BinaryIO, Callable, Optional
15
19
 
16
20
  import aiohttp
17
- import aiosqlite
21
+ import chia_rs.datalayer
18
22
  import pytest
23
+ from chia_rs.datalayer import KeyAlreadyPresentError, MerkleBlob, TreeIndex
19
24
  from chia_rs.sized_bytes import bytes32
20
25
 
21
26
  from chia._tests.core.data_layer.util import Example, add_0123_example, add_01234567_example
22
27
  from chia._tests.util.misc import BenchmarkRunner, Marks, boolean_datacases, datacases
23
- from chia.data_layer.data_layer_errors import KeyNotFoundError, NodeHashError, TreeGenerationIncrementingError
28
+ from chia.data_layer.data_layer_errors import KeyNotFoundError, TreeGenerationIncrementingError
24
29
  from chia.data_layer.data_layer_util import (
25
30
  DiffData,
26
31
  InternalNode,
27
- Node,
28
- NodeType,
29
32
  OperationType,
30
- ProofOfInclusion,
31
- ProofOfInclusionLayer,
32
33
  Root,
34
+ SerializedNode,
33
35
  ServerInfo,
34
36
  Side,
35
37
  Status,
36
38
  Subscription,
37
39
  TerminalNode,
38
40
  _debug_dump,
39
- leaf_hash,
40
- )
41
- from chia.data_layer.data_store import DataStore
42
- from chia.data_layer.download_data import (
43
41
  get_delta_filename_path,
44
42
  get_full_tree_filename_path,
45
- insert_from_delta_file,
46
- insert_into_data_store_from_file,
47
- write_files_for_root,
43
+ leaf_hash,
48
44
  )
45
+ from chia.data_layer.data_store import DataStore
46
+ from chia.data_layer.download_data import insert_from_delta_file, write_files_for_root
47
+ from chia.data_layer.util.benchmark import generate_datastore
49
48
  from chia.types.blockchain_format.program import Program
50
49
  from chia.util.byte_types import hexstr_to_bytes
51
50
  from chia.util.db_wrapper import DBWrapper2, generate_in_memory_db_uri
51
+ from chia.util.lru_cache import LRUCache
52
52
 
53
53
  log = logging.getLogger(__name__)
54
54
 
@@ -57,31 +57,48 @@ pytestmark = pytest.mark.data_layer
57
57
 
58
58
 
59
59
  table_columns: dict[str, list[str]] = {
60
- "node": ["hash", "node_type", "left", "right", "key", "value"],
61
60
  "root": ["tree_id", "generation", "node_hash", "status"],
61
+ "subscriptions": ["tree_id", "url", "ignore_till", "num_consecutive_failures", "from_wallet"],
62
+ "schema": ["version_id", "applied_at"],
63
+ "ids": ["kv_id", "hash", "blob", "store_id"],
64
+ "nodes": ["store_id", "hash", "root_hash", "generation", "idx"],
62
65
  }
63
66
 
64
67
 
65
- # TODO: Someday add tests for malformed DB data to make sure we handle it gracefully
66
- # and with good error messages.
67
-
68
-
69
68
  @pytest.mark.anyio
70
- async def test_valid_node_values_fixture_are_valid(data_store: DataStore, valid_node_values: dict[str, Any]) -> None:
71
- async with data_store.db_wrapper.writer() as writer:
72
- await writer.execute(
73
- """
74
- INSERT INTO node(hash, node_type, left, right, key, value)
75
- VALUES(:hash, :node_type, :left, :right, :key, :value)
76
- """,
77
- valid_node_values,
78
- )
69
+ async def test_migrate_from_old_format(store_id: bytes32, tmp_path: Path) -> None:
70
+ old_format_resources = importlib_resources.files(__name__.rpartition(".")[0]).joinpath("old_format")
71
+ db_uri = tmp_path / "old_db.sqlite"
72
+ db_uri.write_bytes(old_format_resources.joinpath("old_db.sqlite").read_bytes())
73
+ files_resources = old_format_resources.joinpath("files")
74
+
75
+ with importlib_resources.as_file(files_resources) as files_path:
76
+ async with DataStore.managed(
77
+ database=db_uri,
78
+ uri=True,
79
+ merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
80
+ key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
81
+ ) as data_store:
82
+ await data_store.migrate_db(files_path)
83
+ root = await data_store.get_tree_root(store_id=store_id)
84
+ expected = Root(
85
+ store_id=bytes32.fromhex("2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964"),
86
+ node_hash=bytes32.fromhex("aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9"),
87
+ generation=50,
88
+ status=Status.COMMITTED,
89
+ )
90
+ assert root == expected
79
91
 
80
92
 
93
+ # TODO: Someday add tests for malformed DB data to make sure we handle it gracefully
94
+ # and with good error messages.
81
95
  @pytest.mark.parametrize(argnames=["table_name", "expected_columns"], argvalues=table_columns.items())
82
96
  @pytest.mark.anyio
83
97
  async def test_create_creates_tables_and_columns(
84
- database_uri: str, table_name: str, expected_columns: list[str]
98
+ database_uri: str,
99
+ table_name: str,
100
+ expected_columns: list[str],
101
+ tmp_path: Path,
85
102
  ) -> None:
86
103
  # Never string-interpolate sql queries... Except maybe in tests when it does not
87
104
  # allow you to parametrize the query.
@@ -93,7 +110,12 @@ async def test_create_creates_tables_and_columns(
93
110
  columns = await cursor.fetchall()
94
111
  assert columns == []
95
112
 
96
- async with DataStore.managed(database=database_uri, uri=True):
113
+ async with DataStore.managed(
114
+ database=database_uri,
115
+ uri=True,
116
+ merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
117
+ key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
118
+ ):
97
119
  async with db_wrapper.reader() as reader:
98
120
  cursor = await reader.execute(query)
99
121
  columns = await cursor.fetchall()
@@ -211,46 +233,6 @@ async def test_get_tree_generation_returns_none_when_none_available(
211
233
  await raw_data_store.get_tree_generation(store_id=store_id)
212
234
 
213
235
 
214
- @pytest.mark.anyio
215
- async def test_insert_internal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None:
216
- await add_01234567_example(data_store=data_store, store_id=store_id)
217
-
218
- kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
219
- ancestors = await data_store.get_ancestors(node_hash=kv_node.hash, store_id=store_id)
220
- parent = ancestors[0]
221
-
222
- async with data_store.db_wrapper.reader() as reader:
223
- cursor = await reader.execute("SELECT * FROM node")
224
- before = await cursor.fetchall()
225
-
226
- await data_store._insert_internal_node(left_hash=parent.left_hash, right_hash=parent.right_hash)
227
-
228
- async with data_store.db_wrapper.reader() as reader:
229
- cursor = await reader.execute("SELECT * FROM node")
230
- after = await cursor.fetchall()
231
-
232
- assert after == before
233
-
234
-
235
- @pytest.mark.anyio
236
- async def test_insert_terminal_node_does_nothing_if_matching(data_store: DataStore, store_id: bytes32) -> None:
237
- await add_01234567_example(data_store=data_store, store_id=store_id)
238
-
239
- kv_node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
240
-
241
- async with data_store.db_wrapper.reader() as reader:
242
- cursor = await reader.execute("SELECT * FROM node")
243
- before = await cursor.fetchall()
244
-
245
- await data_store._insert_terminal_node(key=kv_node.key, value=kv_node.value)
246
-
247
- async with data_store.db_wrapper.reader() as reader:
248
- cursor = await reader.execute("SELECT * FROM node")
249
- after = await cursor.fetchall()
250
-
251
- assert after == before
252
-
253
-
254
236
  @pytest.mark.anyio
255
237
  async def test_build_a_tree(
256
238
  data_store: DataStore,
@@ -293,7 +275,7 @@ async def test_get_ancestors(data_store: DataStore, store_id: bytes32) -> None:
293
275
  "c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2",
294
276
  ]
295
277
 
296
- ancestors_2 = await data_store.get_ancestors_optimized(node_hash=reference_node_hash, store_id=store_id)
278
+ ancestors_2 = await data_store.get_ancestors(node_hash=reference_node_hash, store_id=store_id)
297
279
  assert ancestors == ancestors_2
298
280
 
299
281
 
@@ -306,6 +288,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
306
288
  first_insertions = [True, False, True, False, True, True, False, True, False, True, True, False, False, True, False]
307
289
  deleted_all = False
308
290
  node_count = 0
291
+ node_hashes: list[bytes32] = []
292
+ hash_to_key: dict[bytes32, bytes] = {}
293
+ node_hash: Optional[bytes32]
294
+
309
295
  for i in range(1000):
310
296
  is_insert = False
311
297
  if i <= 14:
@@ -318,12 +304,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
318
304
  if not deleted_all:
319
305
  while node_count > 0:
320
306
  node_count -= 1
321
- seed = bytes32(b"0" * 32)
322
- node_hash = await data_store.get_terminal_node_for_seed(store_id, seed)
307
+ node_hash = random.choice(node_hashes)
323
308
  assert node_hash is not None
324
- node = await data_store.get_node(node_hash)
325
- assert isinstance(node, TerminalNode)
326
- await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED)
309
+ await data_store.delete(key=hash_to_key[node_hash], store_id=store_id, status=Status.COMMITTED)
310
+ node_hashes.remove(node_hash)
327
311
  deleted_all = True
328
312
  is_insert = True
329
313
  else:
@@ -335,10 +319,10 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
335
319
  key = (i % 200).to_bytes(4, byteorder="big")
336
320
  value = (i % 200).to_bytes(4, byteorder="big")
337
321
  seed = Program.to((key, value)).get_tree_hash()
338
- node_hash = await data_store.get_terminal_node_for_seed(store_id, seed)
322
+ node_hash = None if len(node_hashes) == 0 else random.choice(node_hashes)
339
323
  if is_insert:
340
324
  node_count += 1
341
- side = None if node_hash is None else data_store.get_side_for_seed(seed)
325
+ side = None if node_hash is None else (Side.LEFT if seed[0] < 128 else Side.RIGHT)
342
326
 
343
327
  insert_result = await data_store.insert(
344
328
  key=key,
@@ -346,10 +330,11 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
346
330
  store_id=store_id,
347
331
  reference_node_hash=node_hash,
348
332
  side=side,
349
- use_optimized=False,
350
333
  status=Status.COMMITTED,
351
334
  )
352
335
  node_hash = insert_result.node_hash
336
+ hash_to_key[node_hash] = key
337
+ node_hashes.append(node_hash)
353
338
  if node_hash is not None:
354
339
  generation = await data_store.get_tree_generation(store_id=store_id)
355
340
  current_ancestors = await data_store.get_ancestors(node_hash=node_hash, store_id=store_id)
@@ -357,39 +342,38 @@ async def test_get_ancestors_optimized(data_store: DataStore, store_id: bytes32)
357
342
  else:
358
343
  node_count -= 1
359
344
  assert node_hash is not None
360
- node = await data_store.get_node(node_hash)
361
- assert isinstance(node, TerminalNode)
362
- await data_store.delete(key=node.key, store_id=store_id, use_optimized=False, status=Status.COMMITTED)
345
+ node_hashes.remove(node_hash)
346
+ await data_store.delete(key=hash_to_key[node_hash], store_id=store_id, status=Status.COMMITTED)
363
347
 
364
348
  for generation, node_hash, expected_ancestors in ancestors:
365
- current_ancestors = await data_store.get_ancestors_optimized(
349
+ current_ancestors = await data_store.get_ancestors(
366
350
  node_hash=node_hash, store_id=store_id, generation=generation
367
351
  )
368
352
  assert current_ancestors == expected_ancestors
369
353
 
370
354
 
371
355
  @pytest.mark.anyio
372
- @pytest.mark.parametrize(
373
- "use_optimized",
374
- [True, False],
375
- )
376
356
  @pytest.mark.parametrize(
377
357
  "num_batches",
378
358
  [1, 5, 10, 25],
379
359
  )
380
- async def test_batch_update(
360
+ async def test_batch_update_against_single_operations(
381
361
  data_store: DataStore,
382
362
  store_id: bytes32,
383
- use_optimized: bool,
384
363
  tmp_path: Path,
385
364
  num_batches: int,
386
365
  ) -> None:
387
- total_operations = 1000 if use_optimized else 100
366
+ total_operations = 1000
388
367
  num_ops_per_batch = total_operations // num_batches
389
368
  saved_batches: list[list[dict[str, Any]]] = []
390
369
  saved_kv: list[list[TerminalNode]] = []
391
370
  db_uri = generate_in_memory_db_uri()
392
- async with DataStore.managed(database=db_uri, uri=True) as single_op_data_store:
371
+ async with DataStore.managed(
372
+ database=db_uri,
373
+ uri=True,
374
+ merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
375
+ key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
376
+ ) as single_op_data_store:
393
377
  await single_op_data_store.create_tree(store_id, status=Status.COMMITTED)
394
378
  random = Random()
395
379
  random.seed(100, version=2)
@@ -412,7 +396,6 @@ async def test_batch_update(
412
396
  key=key,
413
397
  value=value,
414
398
  store_id=store_id,
415
- use_optimized=use_optimized,
416
399
  status=Status.COMMITTED,
417
400
  )
418
401
  else:
@@ -420,7 +403,6 @@ async def test_batch_update(
420
403
  key=key,
421
404
  new_value=value,
422
405
  store_id=store_id,
423
- use_optimized=use_optimized,
424
406
  status=Status.COMMITTED,
425
407
  )
426
408
  action = "insert" if op_type == "insert" else "upsert"
@@ -432,7 +414,6 @@ async def test_batch_update(
432
414
  await single_op_data_store.delete(
433
415
  key=key,
434
416
  store_id=store_id,
435
- use_optimized=use_optimized,
436
417
  status=Status.COMMITTED,
437
418
  )
438
419
  batch.append({"action": "delete", "key": key})
@@ -446,7 +427,6 @@ async def test_batch_update(
446
427
  key=key,
447
428
  new_value=new_value,
448
429
  store_id=store_id,
449
- use_optimized=use_optimized,
450
430
  status=Status.COMMITTED,
451
431
  )
452
432
  keys_values[key] = new_value
@@ -469,38 +449,13 @@ async def test_batch_update(
469
449
  assert {node.key: node.value for node in current_kv} == {
470
450
  node.key: node.value for node in saved_kv[batch_number]
471
451
  }
472
- queue: list[bytes32] = [root.node_hash]
473
- ancestors: dict[bytes32, bytes32] = {}
474
- while len(queue) > 0:
475
- node_hash = queue.pop(0)
476
- expected_ancestors = []
477
- ancestor = node_hash
478
- while ancestor in ancestors:
479
- ancestor = ancestors[ancestor]
480
- expected_ancestors.append(ancestor)
481
- result_ancestors = await data_store.get_ancestors_optimized(node_hash, store_id)
482
- assert [node.hash for node in result_ancestors] == expected_ancestors
483
- node = await data_store.get_node(node_hash)
484
- if isinstance(node, InternalNode):
485
- queue.append(node.left_hash)
486
- queue.append(node.right_hash)
487
- ancestors[node.left_hash] = node_hash
488
- ancestors[node.right_hash] = node_hash
489
452
 
490
453
  all_kv = await data_store.get_keys_values(store_id)
491
454
  assert {node.key: node.value for node in all_kv} == keys_values
492
455
 
493
456
 
494
457
  @pytest.mark.anyio
495
- @pytest.mark.parametrize(
496
- "use_optimized",
497
- [True, False],
498
- )
499
- async def test_upsert_ignores_existing_arguments(
500
- data_store: DataStore,
501
- store_id: bytes32,
502
- use_optimized: bool,
503
- ) -> None:
458
+ async def test_upsert_ignores_existing_arguments(data_store: DataStore, store_id: bytes32) -> None:
504
459
  key = b"key"
505
460
  value = b"value1"
506
461
 
@@ -508,7 +463,6 @@ async def test_upsert_ignores_existing_arguments(
508
463
  key=key,
509
464
  value=value,
510
465
  store_id=store_id,
511
- use_optimized=use_optimized,
512
466
  status=Status.COMMITTED,
513
467
  )
514
468
  node = await data_store.get_node_by_key(key, store_id)
@@ -519,7 +473,6 @@ async def test_upsert_ignores_existing_arguments(
519
473
  key=key,
520
474
  new_value=new_value,
521
475
  store_id=store_id,
522
- use_optimized=use_optimized,
523
476
  status=Status.COMMITTED,
524
477
  )
525
478
  node = await data_store.get_node_by_key(key, store_id)
@@ -529,7 +482,6 @@ async def test_upsert_ignores_existing_arguments(
529
482
  key=key,
530
483
  new_value=new_value,
531
484
  store_id=store_id,
532
- use_optimized=use_optimized,
533
485
  status=Status.COMMITTED,
534
486
  )
535
487
  node = await data_store.get_node_by_key(key, store_id)
@@ -540,7 +492,6 @@ async def test_upsert_ignores_existing_arguments(
540
492
  key=key2,
541
493
  new_value=value,
542
494
  store_id=store_id,
543
- use_optimized=use_optimized,
544
495
  status=Status.COMMITTED,
545
496
  )
546
497
  node = await data_store.get_node_by_key(key2, store_id)
@@ -575,30 +526,24 @@ async def test_insert_batch_reference_and_side(
575
526
  )
576
527
  assert new_root_hash is not None, "batch insert failed or failed to update root"
577
528
 
578
- parent = await data_store.get_node(new_root_hash)
579
- assert isinstance(parent, InternalNode)
529
+ merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=new_root_hash)
530
+ nodes_with_indexes = merkle_blob.get_nodes_with_indexes()
531
+ nodes = [pair[1] for pair in nodes_with_indexes]
532
+ assert len(nodes) == 3
533
+ assert isinstance(nodes[1], chia_rs.datalayer.LeafNode)
534
+ assert isinstance(nodes[2], chia_rs.datalayer.LeafNode)
535
+ left_terminal_node = await data_store.get_terminal_node(nodes[1].key, nodes[1].value, store_id)
536
+ right_terminal_node = await data_store.get_terminal_node(nodes[2].key, nodes[2].value, store_id)
580
537
  if side == Side.LEFT:
581
- child = await data_store.get_node(parent.left_hash)
582
- assert parent.left_hash == child.hash
538
+ assert left_terminal_node.key == b"key2"
539
+ assert right_terminal_node.key == b"key1"
583
540
  elif side == Side.RIGHT:
584
- child = await data_store.get_node(parent.right_hash)
585
- assert parent.right_hash == child.hash
541
+ assert left_terminal_node.key == b"key1"
542
+ assert right_terminal_node.key == b"key2"
586
543
  else: # pragma: no cover
587
544
  raise Exception("invalid side for test")
588
545
 
589
546
 
590
- @pytest.mark.anyio
591
- async def test_ancestor_table_unique_inserts(data_store: DataStore, store_id: bytes32) -> None:
592
- await add_0123_example(data_store=data_store, store_id=store_id)
593
- hash_1 = bytes32.from_hexstr("0763561814685fbf92f6ca71fbb1cb11821951450d996375c239979bd63e9535")
594
- hash_2 = bytes32.from_hexstr("924be8ff27e84cba17f5bc918097f8410fab9824713a4668a21c8e060a8cab40")
595
- await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
596
- await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
597
- with pytest.raises(Exception, match="^Requested insertion of ancestor"):
598
- await data_store._insert_ancestor_table(hash_1, hash_1, store_id, 2)
599
- await data_store._insert_ancestor_table(hash_1, hash_2, store_id, 2)
600
-
601
-
602
547
  @pytest.mark.anyio
603
548
  async def test_get_pairs(
604
549
  data_store: DataStore,
@@ -609,7 +554,7 @@ async def test_get_pairs(
609
554
 
610
555
  pairs = await data_store.get_keys_values(store_id=store_id)
611
556
 
612
- assert [node.hash for node in pairs] == example.terminal_nodes
557
+ assert {node.hash for node in pairs} == set(example.terminal_nodes)
613
558
 
614
559
 
615
560
  @pytest.mark.anyio
@@ -662,37 +607,6 @@ async def test_inserting_duplicate_key_fails(
662
607
  )
663
608
 
664
609
 
665
- @pytest.mark.anyio()
666
- async def test_inserting_invalid_length_hash_raises_original_exception(
667
- data_store: DataStore,
668
- ) -> None:
669
- with pytest.raises(aiosqlite.IntegrityError):
670
- # casting since we are testing an invalid case
671
- await data_store._insert_node(
672
- node_hash=cast(bytes32, b"\x05"),
673
- node_type=NodeType.TERMINAL,
674
- left_hash=None,
675
- right_hash=None,
676
- key=b"\x06",
677
- value=b"\x07",
678
- )
679
-
680
-
681
- @pytest.mark.anyio()
682
- async def test_inserting_invalid_length_ancestor_hash_raises_original_exception(
683
- data_store: DataStore,
684
- store_id: bytes32,
685
- ) -> None:
686
- with pytest.raises(aiosqlite.IntegrityError):
687
- # casting since we are testing an invalid case
688
- await data_store._insert_ancestor_table(
689
- left_hash=bytes32(b"\x01" * 32),
690
- right_hash=bytes32(b"\x02" * 32),
691
- store_id=store_id,
692
- generation=0,
693
- )
694
-
695
-
696
610
  @pytest.mark.anyio()
697
611
  async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id: bytes32) -> None:
698
612
  random = Random()
@@ -705,7 +619,7 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id:
705
619
  insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED)
706
620
  hashes.append(insert_result.node_hash)
707
621
 
708
- heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, store_id)) for node_hash in hashes}
622
+ heights = {node_hash: len(await data_store.get_ancestors(node_hash, store_id)) for node_hash in hashes}
709
623
  too_tall = {hash: height for hash, height in heights.items() if height > 14}
710
624
  assert too_tall == {}
711
625
  assert 11 <= statistics.mean(heights.values()) <= 12
@@ -715,7 +629,7 @@ async def test_autoinsert_balances_from_scratch(data_store: DataStore, store_id:
715
629
  async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32) -> None:
716
630
  random = Random()
717
631
  random.seed(101, version=2)
718
- hashes = []
632
+ hashes: list[bytes32] = []
719
633
 
720
634
  for i in range(2000):
721
635
  key = (i + 100).to_bytes(4, byteorder="big")
@@ -723,7 +637,7 @@ async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32
723
637
  if i == 0 or i > 10:
724
638
  insert_result = await data_store.autoinsert(key, value, store_id, status=Status.COMMITTED)
725
639
  else:
726
- reference_node_hash = await data_store.get_terminal_node_for_seed(store_id, bytes32.zeros)
640
+ reference_node_hash = hashes[-1]
727
641
  insert_result = await data_store.insert(
728
642
  key=key,
729
643
  value=value,
@@ -732,11 +646,11 @@ async def test_autoinsert_balances_gaps(data_store: DataStore, store_id: bytes32
732
646
  side=Side.LEFT,
733
647
  status=Status.COMMITTED,
734
648
  )
735
- ancestors = await data_store.get_ancestors_optimized(insert_result.node_hash, store_id)
649
+ ancestors = await data_store.get_ancestors(insert_result.node_hash, store_id)
736
650
  assert len(ancestors) == i
737
651
  hashes.append(insert_result.node_hash)
738
652
 
739
- heights = {node_hash: len(await data_store.get_ancestors_optimized(node_hash, store_id)) for node_hash in hashes}
653
+ heights = {node_hash: len(await data_store.get_ancestors(node_hash, store_id)) for node_hash in hashes}
740
654
  too_tall = {hash: height for hash, height in heights.items() if height > 14}
741
655
  assert too_tall == {}
742
656
  assert 11 <= statistics.mean(heights.values()) <= 12
@@ -874,24 +788,24 @@ async def test_proof_of_inclusion_by_hash(data_store: DataStore, store_id: bytes
874
788
  await _debug_dump(db=data_store.db_wrapper)
875
789
 
876
790
  expected_layers = [
877
- ProofOfInclusionLayer(
791
+ chia_rs.datalayer.ProofOfInclusionLayer(
878
792
  other_hash_side=Side.RIGHT,
879
793
  other_hash=bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
880
794
  combined_hash=bytes32.fromhex("36cb1fc56017944213055da8cb0178fb0938c32df3ec4472f5edf0dff85ba4a3"),
881
795
  ),
882
- ProofOfInclusionLayer(
796
+ chia_rs.datalayer.ProofOfInclusionLayer(
883
797
  other_hash_side=Side.RIGHT,
884
798
  other_hash=bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
885
799
  combined_hash=bytes32.fromhex("5f67a0ab1976e090b834bf70e5ce2a0f0a9cd474e19a905348c44ae12274d30b"),
886
800
  ),
887
- ProofOfInclusionLayer(
801
+ chia_rs.datalayer.ProofOfInclusionLayer(
888
802
  other_hash_side=Side.LEFT,
889
803
  other_hash=bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
890
804
  combined_hash=bytes32.fromhex("7a5193a4e31a0a72f6623dfeb2876022ab74a48abb5966088a1c6f5451cc5d81"),
891
805
  ),
892
806
  ]
893
807
 
894
- assert proof == ProofOfInclusion(node_hash=node.hash, layers=expected_layers)
808
+ assert proof == chia_rs.datalayer.ProofOfInclusion(node_hash=node.hash, layers=expected_layers)
895
809
 
896
810
 
897
811
  @pytest.mark.anyio
@@ -904,26 +818,7 @@ async def test_proof_of_inclusion_by_hash_no_ancestors(data_store: DataStore, st
904
818
 
905
819
  proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
906
820
 
907
- assert proof == ProofOfInclusion(node_hash=node.hash, layers=[])
908
-
909
-
910
- @pytest.mark.anyio
911
- async def test_proof_of_inclusion_by_hash_program(data_store: DataStore, store_id: bytes32) -> None:
912
- """The proof of inclusion program has the expected Python equivalence."""
913
-
914
- await add_01234567_example(data_store=data_store, store_id=store_id)
915
- node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
916
-
917
- proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
918
-
919
- assert proof.as_program() == [
920
- b"\x04",
921
- [
922
- bytes32.fromhex("fb66fe539b3eb2020dfbfadfd601fa318521292b41f04c2057c16fca6b947ca1"),
923
- bytes32.fromhex("6d3af8d93db948e8b6aa4386958e137c6be8bab726db86789594b3588b35adcd"),
924
- bytes32.fromhex("c852ecd8fb61549a0a42f9eb9dde65e6c94a01934dbd9c1d35ab94e2a0ae58e2"),
925
- ],
926
- ]
821
+ assert proof == chia_rs.datalayer.ProofOfInclusion(node_hash=node.hash, layers=[])
927
822
 
928
823
 
929
824
  @pytest.mark.anyio
@@ -939,27 +834,6 @@ async def test_proof_of_inclusion_by_hash_equals_by_key(data_store: DataStore, s
939
834
  assert proof_by_hash == proof_by_key
940
835
 
941
836
 
942
- @pytest.mark.anyio
943
- async def test_proof_of_inclusion_by_hash_bytes(data_store: DataStore, store_id: bytes32) -> None:
944
- """The proof of inclusion provided by the data store is able to be converted to a
945
- program and subsequently to bytes.
946
- """
947
- await add_01234567_example(data_store=data_store, store_id=store_id)
948
- node = await data_store.get_node_by_key(key=b"\x04", store_id=store_id)
949
-
950
- proof = await data_store.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
951
-
952
- expected = (
953
- b"\xff\x04\xff\xff\xa0\xfbf\xfeS\x9b>\xb2\x02\r\xfb\xfa\xdf\xd6\x01\xfa1\x85!)"
954
- b"+A\xf0L W\xc1o\xcak\x94|\xa1\xff\xa0m:\xf8\xd9=\xb9H\xe8\xb6\xaaC\x86\x95"
955
- b"\x8e\x13|k\xe8\xba\xb7&\xdb\x86x\x95\x94\xb3X\x8b5\xad\xcd\xff\xa0\xc8R\xec"
956
- b"\xd8\xfbaT\x9a\nB\xf9\xeb\x9d\xdee\xe6\xc9J\x01\x93M\xbd\x9c\x1d5\xab\x94"
957
- b"\xe2\xa0\xaeX\xe2\x80\x80"
958
- )
959
-
960
- assert bytes(proof.as_program()) == expected
961
-
962
-
963
837
  # @pytest.mark.anyio
964
838
  # async def test_create_first_pair(data_store: DataStore, store_id: bytes) -> None:
965
839
  # key = SExp.to([1, 2])
@@ -1036,46 +910,6 @@ async def test_check_roots_are_incrementing_gap(raw_data_store: DataStore) -> No
1036
910
  await raw_data_store._check_roots_are_incrementing()
1037
911
 
1038
912
 
1039
- @pytest.mark.anyio
1040
- async def test_check_hashes_internal(raw_data_store: DataStore) -> None:
1041
- async with raw_data_store.db_wrapper.writer() as writer:
1042
- await writer.execute(
1043
- "INSERT INTO node(hash, node_type, left, right) VALUES(:hash, :node_type, :left, :right)",
1044
- {
1045
- "hash": a_bytes_32,
1046
- "node_type": NodeType.INTERNAL,
1047
- "left": a_bytes_32,
1048
- "right": a_bytes_32,
1049
- },
1050
- )
1051
-
1052
- with pytest.raises(
1053
- NodeHashError,
1054
- match=r"\n +000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f$",
1055
- ):
1056
- await raw_data_store._check_hashes()
1057
-
1058
-
1059
- @pytest.mark.anyio
1060
- async def test_check_hashes_terminal(raw_data_store: DataStore) -> None:
1061
- async with raw_data_store.db_wrapper.writer() as writer:
1062
- await writer.execute(
1063
- "INSERT INTO node(hash, node_type, key, value) VALUES(:hash, :node_type, :key, :value)",
1064
- {
1065
- "hash": a_bytes_32,
1066
- "node_type": NodeType.TERMINAL,
1067
- "key": Program.to((1, 2)).as_bin(),
1068
- "value": Program.to((1, 2)).as_bin(),
1069
- },
1070
- )
1071
-
1072
- with pytest.raises(
1073
- NodeHashError,
1074
- match=r"\n +000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f$",
1075
- ):
1076
- await raw_data_store._check_hashes()
1077
-
1078
-
1079
913
  @pytest.mark.anyio
1080
914
  async def test_root_state(data_store: DataStore, store_id: bytes32) -> None:
1081
915
  key = b"\x01\x02"
@@ -1127,28 +961,29 @@ async def test_kv_diff(data_store: DataStore, store_id: bytes32) -> None:
1127
961
  insertions = 0
1128
962
  expected_diff: set[DiffData] = set()
1129
963
  root_start = None
964
+
1130
965
  for i in range(500):
1131
966
  key = (i + 100).to_bytes(4, byteorder="big")
1132
967
  value = (i + 200).to_bytes(4, byteorder="big")
1133
968
  seed = leaf_hash(key=key, value=value)
1134
- node_hash = await data_store.get_terminal_node_for_seed(store_id, seed)
969
+ node = await data_store.get_terminal_node_for_seed(seed, store_id)
970
+ side_seed = bytes(seed)[0]
971
+ side = None if node is None else (Side.LEFT if side_seed < 128 else Side.RIGHT)
972
+
1135
973
  if random.randint(0, 4) > 0 or insertions < 10:
1136
974
  insertions += 1
1137
- side = None if node_hash is None else data_store.get_side_for_seed(seed)
1138
-
975
+ reference_node_hash = node.hash if node is not None else None
1139
976
  await data_store.insert(
1140
977
  key=key,
1141
978
  value=value,
1142
979
  store_id=store_id,
1143
- reference_node_hash=node_hash,
1144
- side=side,
1145
980
  status=Status.COMMITTED,
981
+ reference_node_hash=reference_node_hash,
982
+ side=side,
1146
983
  )
1147
984
  if i > 200:
1148
985
  expected_diff.add(DiffData(OperationType.INSERT, key, value))
1149
986
  else:
1150
- assert node_hash is not None
1151
- node = await data_store.get_node(node_hash)
1152
987
  assert isinstance(node, TerminalNode)
1153
988
  await data_store.delete(key=node.key, store_id=store_id, status=Status.COMMITTED)
1154
989
  if i > 200:
@@ -1275,6 +1110,39 @@ async def test_subscribe_unsubscribe(data_store: DataStore, store_id: bytes32) -
1275
1110
  ]
1276
1111
 
1277
1112
 
1113
+ @pytest.mark.anyio
1114
+ async def test_unsubscribe_clears_databases(data_store: DataStore, store_id: bytes32) -> None:
1115
+ num_inserts = 100
1116
+ await data_store.subscribe(Subscription(store_id, []))
1117
+ for value in range(num_inserts):
1118
+ await data_store.insert(
1119
+ key=value.to_bytes(4, byteorder="big"),
1120
+ value=value.to_bytes(4, byteorder="big"),
1121
+ store_id=store_id,
1122
+ reference_node_hash=None,
1123
+ side=None,
1124
+ status=Status.COMMITTED,
1125
+ )
1126
+ await data_store.add_node_hashes(store_id)
1127
+
1128
+ tables = ["ids", "nodes"]
1129
+ for table in tables:
1130
+ async with data_store.db_wrapper.reader() as reader:
1131
+ async with reader.execute(f"SELECT COUNT(*) FROM {table}") as cursor:
1132
+ row_count = await cursor.fetchone()
1133
+ assert row_count is not None
1134
+ assert row_count[0] > 0
1135
+
1136
+ await data_store.unsubscribe(store_id)
1137
+
1138
+ for table in tables:
1139
+ async with data_store.db_wrapper.reader() as reader:
1140
+ async with reader.execute(f"SELECT COUNT(*) FROM {table}") as cursor:
1141
+ row_count = await cursor.fetchone()
1142
+ assert row_count is not None
1143
+ assert row_count[0] == 0
1144
+
1145
+
1278
1146
  @pytest.mark.anyio
1279
1147
  async def test_server_selection(data_store: DataStore, store_id: bytes32) -> None:
1280
1148
  start_timestamp = 1000
@@ -1410,16 +1278,71 @@ async def test_server_http_ban(
1410
1278
  assert sinfo.ignore_till == start_timestamp # we don't increase on second failure
1411
1279
 
1412
1280
 
1413
- @pytest.mark.parametrize(
1414
- "test_delta",
1415
- [True, False],
1416
- )
1281
+ async def get_first_generation(data_store: DataStore, node_hash: bytes32, store_id: bytes32) -> Optional[int]:
1282
+ async with data_store.db_wrapper.reader() as reader:
1283
+ cursor = await reader.execute(
1284
+ "SELECT generation FROM nodes WHERE hash = ? AND store_id = ?",
1285
+ (
1286
+ node_hash,
1287
+ store_id,
1288
+ ),
1289
+ )
1290
+
1291
+ row = await cursor.fetchone()
1292
+ if row is None:
1293
+ return None
1294
+
1295
+ return int(row[0])
1296
+
1297
+
1298
+ async def write_tree_to_file_old_format(
1299
+ data_store: DataStore,
1300
+ root: Root,
1301
+ node_hash: bytes32,
1302
+ store_id: bytes32,
1303
+ writer: BinaryIO,
1304
+ merkle_blob: Optional[MerkleBlob] = None,
1305
+ hash_to_index: Optional[dict[bytes32, TreeIndex]] = None,
1306
+ ) -> None:
1307
+ if node_hash == bytes32.zeros:
1308
+ return
1309
+
1310
+ if merkle_blob is None:
1311
+ merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1312
+ if hash_to_index is None:
1313
+ hash_to_index = merkle_blob.get_hashes_indexes()
1314
+
1315
+ generation = await get_first_generation(data_store, node_hash, store_id)
1316
+ # Root's generation is not the first time we see this hash, so it's not a new delta.
1317
+ if root.generation != generation:
1318
+ return
1319
+
1320
+ raw_index = hash_to_index[node_hash]
1321
+ raw_node = merkle_blob.get_raw_node(raw_index)
1322
+
1323
+ if isinstance(raw_node, chia_rs.datalayer.InternalNode):
1324
+ left_hash = merkle_blob.get_hash_at_index(raw_node.left)
1325
+ right_hash = merkle_blob.get_hash_at_index(raw_node.right)
1326
+ await write_tree_to_file_old_format(data_store, root, left_hash, store_id, writer, merkle_blob, hash_to_index)
1327
+ await write_tree_to_file_old_format(data_store, root, right_hash, store_id, writer, merkle_blob, hash_to_index)
1328
+ to_write = bytes(SerializedNode(False, bytes(left_hash), bytes(right_hash)))
1329
+ elif isinstance(raw_node, chia_rs.datalayer.LeafNode):
1330
+ node = await data_store.get_terminal_node(raw_node.key, raw_node.value, store_id)
1331
+ to_write = bytes(SerializedNode(True, node.key, node.value))
1332
+ else:
1333
+ raise Exception(f"Node is neither InternalNode nor TerminalNode: {raw_node}")
1334
+
1335
+ writer.write(len(to_write).to_bytes(4, byteorder="big"))
1336
+ writer.write(to_write)
1337
+
1338
+
1339
+ @pytest.mark.parametrize(argnames="test_delta", argvalues=["full", "delta", "old"])
1417
1340
  @boolean_datacases(name="group_files_by_store", false="group by singleton", true="don't group by singleton")
1418
1341
  @pytest.mark.anyio
1419
1342
  async def test_data_server_files(
1420
1343
  data_store: DataStore,
1421
1344
  store_id: bytes32,
1422
- test_delta: bool,
1345
+ test_delta: str,
1423
1346
  group_files_by_store: bool,
1424
1347
  tmp_path: Path,
1425
1348
  ) -> None:
@@ -1428,45 +1351,70 @@ async def test_data_server_files(
1428
1351
  num_ops_per_batch = 100
1429
1352
 
1430
1353
  db_uri = generate_in_memory_db_uri()
1431
- async with DataStore.managed(database=db_uri, uri=True) as data_store_server:
1354
+ async with DataStore.managed(
1355
+ database=db_uri,
1356
+ uri=True,
1357
+ merkle_blobs_path=tmp_path.joinpath("merkle-blobs"),
1358
+ key_value_blobs_path=tmp_path.joinpath("key-value-blobs"),
1359
+ ) as data_store_server:
1432
1360
  await data_store_server.create_tree(store_id, status=Status.COMMITTED)
1433
1361
  random = Random()
1434
1362
  random.seed(100, version=2)
1435
1363
 
1436
1364
  keys: list[bytes] = []
1437
1365
  counter = 0
1438
-
1439
- for batch in range(num_batches):
1440
- changelist: list[dict[str, Any]] = []
1441
- for operation in range(num_ops_per_batch):
1442
- if random.randint(0, 4) > 0 or len(keys) == 0:
1443
- key = counter.to_bytes(4, byteorder="big")
1444
- value = (2 * counter).to_bytes(4, byteorder="big")
1445
- keys.append(key)
1446
- changelist.append({"action": "insert", "key": key, "value": value})
1366
+ num_repeats = 2
1367
+
1368
+ # Repeat twice to guarantee there will be hashes from the old file format
1369
+ for _ in range(num_repeats):
1370
+ for batch in range(num_batches):
1371
+ changelist: list[dict[str, Any]] = []
1372
+ if batch == num_batches - 1:
1373
+ for key in keys:
1374
+ changelist.append({"action": "delete", "key": key})
1375
+ keys = []
1376
+ counter = 0
1447
1377
  else:
1448
- key = random.choice(keys)
1449
- keys.remove(key)
1450
- changelist.append({"action": "delete", "key": key})
1451
- counter += 1
1452
- await data_store_server.insert_batch(store_id, changelist, status=Status.COMMITTED)
1453
- root = await data_store_server.get_tree_root(store_id)
1454
- await write_files_for_root(
1455
- data_store_server, store_id, root, tmp_path, 0, group_by_store=group_files_by_store
1456
- )
1457
- roots.append(root)
1378
+ for operation in range(num_ops_per_batch):
1379
+ if random.randint(0, 4) > 0 or len(keys) == 0:
1380
+ key = counter.to_bytes(4, byteorder="big")
1381
+ value = (2 * counter).to_bytes(4, byteorder="big")
1382
+ keys.append(key)
1383
+ changelist.append({"action": "insert", "key": key, "value": value})
1384
+ else:
1385
+ key = random.choice(keys)
1386
+ keys.remove(key)
1387
+ changelist.append({"action": "delete", "key": key})
1388
+ counter += 1
1389
+
1390
+ await data_store_server.insert_batch(store_id, changelist, status=Status.COMMITTED)
1391
+ root = await data_store_server.get_tree_root(store_id)
1392
+ await data_store_server.add_node_hashes(store_id)
1393
+ if test_delta == "old":
1394
+ node_hash = root.node_hash if root.node_hash is not None else bytes32.zeros
1395
+ filename = get_delta_filename_path(
1396
+ tmp_path, store_id, node_hash, root.generation, group_files_by_store
1397
+ )
1398
+ filename.parent.mkdir(parents=True, exist_ok=True)
1399
+ with open(filename, "xb") as writer:
1400
+ await write_tree_to_file_old_format(data_store_server, root, node_hash, store_id, writer)
1401
+ else:
1402
+ await write_files_for_root(
1403
+ data_store_server, store_id, root, tmp_path, 0, group_by_store=group_files_by_store
1404
+ )
1405
+ roots.append(root)
1458
1406
 
1459
1407
  generation = 1
1460
- assert len(roots) == num_batches
1408
+ assert len(roots) == num_batches * num_repeats
1461
1409
  for root in roots:
1462
- assert root.node_hash is not None
1463
- if not test_delta:
1464
- filename = get_full_tree_filename_path(tmp_path, store_id, root.node_hash, generation, group_files_by_store)
1410
+ node_hash = root.node_hash if root.node_hash is not None else bytes32.zeros
1411
+ if test_delta == "full":
1412
+ filename = get_full_tree_filename_path(tmp_path, store_id, node_hash, generation, group_files_by_store)
1465
1413
  assert filename.exists()
1466
1414
  else:
1467
- filename = get_delta_filename_path(tmp_path, store_id, root.node_hash, generation, group_files_by_store)
1415
+ filename = get_delta_filename_path(tmp_path, store_id, node_hash, generation, group_files_by_store)
1468
1416
  assert filename.exists()
1469
- await insert_into_data_store_from_file(data_store, store_id, root.node_hash, tmp_path.joinpath(filename))
1417
+ await data_store.insert_into_data_store_from_file(store_id, root.node_hash, tmp_path.joinpath(filename))
1470
1418
  current_root = await data_store.get_tree_root(store_id=store_id)
1471
1419
  assert current_root.node_hash == root.node_hash
1472
1420
  generation += 1
@@ -1551,6 +1499,20 @@ class BatchesInsertBenchmarkCase:
1551
1499
  return f"count={self.count},batch_count={self.batch_count}"
1552
1500
 
1553
1501
 
1502
+ @dataclass
1503
+ class BatchUpdateBenchmarkCase:
1504
+ pre: int
1505
+ num_inserts: int
1506
+ num_deletes: int
1507
+ num_upserts: int
1508
+ limit: float
1509
+ marks: Marks = ()
1510
+
1511
+ @property
1512
+ def id(self) -> str:
1513
+ return f"pre={self.pre},inserts={self.num_inserts},deletes={self.num_deletes},upserts={self.num_upserts}"
1514
+
1515
+
1554
1516
  @datacases(
1555
1517
  BatchInsertBenchmarkCase(
1556
1518
  pre=0,
@@ -1589,16 +1551,55 @@ async def test_benchmark_batch_insert_speed(
1589
1551
  r.seed("shadowlands", version=2)
1590
1552
 
1591
1553
  changelist = [
1554
+ {"action": "insert", "key": x.to_bytes(32, byteorder="big", signed=False), "value": r.randbytes(1200)}
1555
+ for x in range(case.pre + case.count)
1556
+ ]
1557
+
1558
+ pre = changelist[: case.pre]
1559
+ batch = changelist[case.pre : case.pre + case.count]
1560
+
1561
+ if case.pre > 0:
1562
+ await data_store.insert_batch(
1563
+ store_id=store_id,
1564
+ changelist=pre,
1565
+ status=Status.COMMITTED,
1566
+ )
1567
+
1568
+ with benchmark_runner.assert_runtime(seconds=case.limit):
1569
+ await data_store.insert_batch(
1570
+ store_id=store_id,
1571
+ changelist=batch,
1572
+ )
1573
+
1574
+
1575
+ @datacases(
1576
+ BatchUpdateBenchmarkCase(
1577
+ pre=1_000,
1578
+ num_inserts=1_000,
1579
+ num_deletes=500,
1580
+ num_upserts=500,
1581
+ limit=36,
1582
+ ),
1583
+ )
1584
+ @pytest.mark.anyio
1585
+ async def test_benchmark_batch_update_speed(
1586
+ data_store: DataStore,
1587
+ store_id: bytes32,
1588
+ benchmark_runner: BenchmarkRunner,
1589
+ case: BatchUpdateBenchmarkCase,
1590
+ ) -> None:
1591
+ r = random.Random()
1592
+ r.seed("shadowlands", version=2)
1593
+
1594
+ pre = [
1592
1595
  {
1593
1596
  "action": "insert",
1594
1597
  "key": x.to_bytes(32, byteorder="big", signed=False),
1595
1598
  "value": bytes(r.getrandbits(8) for _ in range(1200)),
1596
1599
  }
1597
- for x in range(case.pre + case.count)
1600
+ for x in range(case.pre)
1598
1601
  ]
1599
-
1600
- pre = changelist[: case.pre]
1601
- batch = changelist[case.pre : case.pre + case.count]
1602
+ batch = []
1602
1603
 
1603
1604
  if case.pre > 0:
1604
1605
  await data_store.insert_batch(
@@ -1607,6 +1608,44 @@ async def test_benchmark_batch_insert_speed(
1607
1608
  status=Status.COMMITTED,
1608
1609
  )
1609
1610
 
1611
+ keys = [x.to_bytes(32, byteorder="big", signed=False) for x in range(case.pre)]
1612
+ for operation in range(case.num_inserts):
1613
+ key = (operation + case.pre).to_bytes(32, byteorder="big", signed=False)
1614
+ batch.append(
1615
+ {
1616
+ "action": "insert",
1617
+ "key": key,
1618
+ "value": bytes(r.getrandbits(8) for _ in range(1200)),
1619
+ }
1620
+ )
1621
+ keys.append(key)
1622
+
1623
+ if case.num_deletes > 0:
1624
+ r.shuffle(keys)
1625
+ assert len(keys) >= case.num_deletes
1626
+ batch.extend(
1627
+ {
1628
+ "action": "delete",
1629
+ "key": key,
1630
+ }
1631
+ for key in keys[: case.num_deletes]
1632
+ )
1633
+ keys = keys[case.num_deletes :]
1634
+
1635
+ if case.num_upserts > 0:
1636
+ assert len(keys) > 0
1637
+ r.shuffle(keys)
1638
+ batch.extend(
1639
+ [
1640
+ {
1641
+ "action": "upsert",
1642
+ "key": keys[operation % len(keys)],
1643
+ "value": bytes(r.getrandbits(8) for _ in range(1200)),
1644
+ }
1645
+ for operation in range(case.num_upserts)
1646
+ ]
1647
+ )
1648
+
1610
1649
  with benchmark_runner.assert_runtime(seconds=case.limit):
1611
1650
  await data_store.insert_batch(
1612
1651
  store_id=store_id,
@@ -1614,6 +1653,22 @@ async def test_benchmark_batch_insert_speed(
1614
1653
  )
1615
1654
 
1616
1655
 
1656
+ @datacases(
1657
+ BatchInsertBenchmarkCase(
1658
+ pre=0,
1659
+ count=50,
1660
+ limit=2,
1661
+ ),
1662
+ )
1663
+ @pytest.mark.anyio
1664
+ async def test_benchmark_tool(
1665
+ benchmark_runner: BenchmarkRunner,
1666
+ case: BatchInsertBenchmarkCase,
1667
+ ) -> None:
1668
+ with benchmark_runner.assert_runtime(seconds=case.limit):
1669
+ await generate_datastore(case.count)
1670
+
1671
+
1617
1672
  @datacases(
1618
1673
  BatchesInsertBenchmarkCase(
1619
1674
  count=50,
@@ -1637,7 +1692,7 @@ async def test_benchmark_batch_insert_speed_multiple_batches(
1637
1692
  {
1638
1693
  "action": "insert",
1639
1694
  "key": x.to_bytes(32, byteorder="big", signed=False),
1640
- "value": bytes(r.getrandbits(8) for _ in range(10000)),
1695
+ "value": r.randbytes(10000),
1641
1696
  }
1642
1697
  for x in range(batch * case.count, (batch + 1) * case.count)
1643
1698
  ]
@@ -1648,189 +1703,6 @@ async def test_benchmark_batch_insert_speed_multiple_batches(
1648
1703
  )
1649
1704
 
1650
1705
 
1651
- @pytest.mark.anyio
1652
- async def test_delete_store_data(raw_data_store: DataStore) -> None:
1653
- store_id = bytes32.zeros
1654
- store_id_2 = bytes32(b"\0" * 31 + b"\1")
1655
- await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
1656
- await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED)
1657
- total_keys = 4
1658
- keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
1659
- batch1 = [
1660
- {"action": "insert", "key": keys[0], "value": keys[0]},
1661
- {"action": "insert", "key": keys[1], "value": keys[1]},
1662
- ]
1663
- batch2 = batch1.copy()
1664
- batch1.append({"action": "insert", "key": keys[2], "value": keys[2]})
1665
- batch2.append({"action": "insert", "key": keys[3], "value": keys[3]})
1666
- assert batch1 != batch2
1667
- await raw_data_store.insert_batch(store_id, batch1, status=Status.COMMITTED)
1668
- await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED)
1669
- keys_values_before = await raw_data_store.get_keys_values(store_id_2)
1670
- async with raw_data_store.db_wrapper.reader() as reader:
1671
- result = await reader.execute("SELECT * FROM node")
1672
- nodes = await result.fetchall()
1673
- kv_nodes_before = {}
1674
- for node in nodes:
1675
- if node["key"] is not None:
1676
- kv_nodes_before[node["key"]] = node["value"]
1677
- assert [kv_nodes_before[key] for key in keys] == keys
1678
- await raw_data_store.delete_store_data(store_id)
1679
- # Deleting from `node` table doesn't alter other stores.
1680
- keys_values_after = await raw_data_store.get_keys_values(store_id_2)
1681
- assert keys_values_before == keys_values_after
1682
- async with raw_data_store.db_wrapper.reader() as reader:
1683
- result = await reader.execute("SELECT * FROM node")
1684
- nodes = await result.fetchall()
1685
- kv_nodes_after = {}
1686
- for node in nodes:
1687
- if node["key"] is not None:
1688
- kv_nodes_after[node["key"]] = node["value"]
1689
- for i in range(total_keys):
1690
- if i != 2:
1691
- assert kv_nodes_after[keys[i]] == keys[i]
1692
- else:
1693
- # `keys[2]` was only present in the first store.
1694
- assert keys[i] not in kv_nodes_after
1695
- assert not await raw_data_store.store_id_exists(store_id)
1696
- await raw_data_store.delete_store_data(store_id_2)
1697
- async with raw_data_store.db_wrapper.reader() as reader:
1698
- async with reader.execute("SELECT COUNT(*) FROM node") as cursor:
1699
- row_count = await cursor.fetchone()
1700
- assert row_count is not None
1701
- assert row_count[0] == 0
1702
- assert not await raw_data_store.store_id_exists(store_id_2)
1703
-
1704
-
1705
- @pytest.mark.anyio
1706
- async def test_delete_store_data_multiple_stores(raw_data_store: DataStore) -> None:
1707
- # Make sure inserting and deleting the same data works
1708
- for repetition in range(2):
1709
- num_stores = 50
1710
- total_keys = 150
1711
- keys_deleted_per_store = 3
1712
- store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)]
1713
- for store_id in store_ids:
1714
- await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
1715
- original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
1716
- batches = []
1717
- for i in range(num_stores):
1718
- batch = [
1719
- {"action": "insert", "key": key, "value": key} for key in original_keys[i * keys_deleted_per_store :]
1720
- ]
1721
- batches.append(batch)
1722
-
1723
- for store_id, batch in zip(store_ids, batches):
1724
- await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
1725
-
1726
- for tree_index in range(num_stores):
1727
- async with raw_data_store.db_wrapper.reader() as reader:
1728
- result = await reader.execute("SELECT * FROM node")
1729
- nodes = await result.fetchall()
1730
-
1731
- keys = {node["key"] for node in nodes if node["key"] is not None}
1732
- assert len(keys) == total_keys - tree_index * keys_deleted_per_store
1733
- keys_after_index = set(original_keys[tree_index * keys_deleted_per_store :])
1734
- keys_before_index = set(original_keys[: tree_index * keys_deleted_per_store])
1735
- assert keys_after_index.issubset(keys)
1736
- assert keys.isdisjoint(keys_before_index)
1737
- await raw_data_store.delete_store_data(store_ids[tree_index])
1738
-
1739
- async with raw_data_store.db_wrapper.reader() as reader:
1740
- async with reader.execute("SELECT COUNT(*) FROM node") as cursor:
1741
- row_count = await cursor.fetchone()
1742
- assert row_count is not None
1743
- assert row_count[0] == 0
1744
-
1745
-
1746
- @pytest.mark.parametrize("common_keys_count", [1, 250, 499])
1747
- @pytest.mark.anyio
1748
- async def test_delete_store_data_with_common_values(raw_data_store: DataStore, common_keys_count: int) -> None:
1749
- store_id_1 = bytes32(b"\x00" * 31 + b"\x01")
1750
- store_id_2 = bytes32(b"\x00" * 31 + b"\x02")
1751
-
1752
- await raw_data_store.create_tree(store_id=store_id_1, status=Status.COMMITTED)
1753
- await raw_data_store.create_tree(store_id=store_id_2, status=Status.COMMITTED)
1754
-
1755
- key_offset = 1000
1756
- total_keys_per_store = 500
1757
- assert common_keys_count < key_offset
1758
- common_keys = {key.to_bytes(4, byteorder="big") for key in range(common_keys_count)}
1759
- unique_keys_1 = {
1760
- (key + key_offset).to_bytes(4, byteorder="big") for key in range(total_keys_per_store - common_keys_count)
1761
- }
1762
- unique_keys_2 = {
1763
- (key + (2 * key_offset)).to_bytes(4, byteorder="big") for key in range(total_keys_per_store - common_keys_count)
1764
- }
1765
-
1766
- batch1 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_1)]
1767
- batch2 = [{"action": "insert", "key": key, "value": key} for key in common_keys.union(unique_keys_2)]
1768
-
1769
- await raw_data_store.insert_batch(store_id_1, batch1, status=Status.COMMITTED)
1770
- await raw_data_store.insert_batch(store_id_2, batch2, status=Status.COMMITTED)
1771
-
1772
- await raw_data_store.delete_store_data(store_id_1)
1773
- async with raw_data_store.db_wrapper.reader() as reader:
1774
- result = await reader.execute("SELECT * FROM node")
1775
- nodes = await result.fetchall()
1776
-
1777
- keys = {node["key"] for node in nodes if node["key"] is not None}
1778
- # Since one store got all its keys deleted, we're left only with the keys of the other store.
1779
- assert len(keys) == total_keys_per_store
1780
- assert keys.intersection(unique_keys_1) == set()
1781
- assert keys.symmetric_difference(common_keys.union(unique_keys_2)) == set()
1782
-
1783
-
1784
- @pytest.mark.anyio
1785
- @pytest.mark.parametrize("pending_status", [Status.PENDING, Status.PENDING_BATCH])
1786
- async def test_delete_store_data_protects_pending_roots(raw_data_store: DataStore, pending_status: Status) -> None:
1787
- num_stores = 5
1788
- total_keys = 15
1789
- store_ids = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_stores)]
1790
- for store_id in store_ids:
1791
- await raw_data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
1792
- original_keys = [key.to_bytes(4, byteorder="big") for key in range(total_keys)]
1793
- batches = []
1794
- keys_per_pending_root = 2
1795
-
1796
- for i in range(num_stores - 1):
1797
- start_index = i * keys_per_pending_root
1798
- end_index = (i + 1) * keys_per_pending_root
1799
- batch = [{"action": "insert", "key": key, "value": key} for key in original_keys[start_index:end_index]]
1800
- batches.append(batch)
1801
- for store_id, batch in zip(store_ids, batches):
1802
- await raw_data_store.insert_batch(store_id, batch, status=pending_status)
1803
-
1804
- store_id = store_ids[-1]
1805
- batch = [{"action": "insert", "key": key, "value": key} for key in original_keys]
1806
- await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
1807
-
1808
- async with raw_data_store.db_wrapper.reader() as reader:
1809
- result = await reader.execute("SELECT * FROM node")
1810
- nodes = await result.fetchall()
1811
-
1812
- keys = {node["key"] for node in nodes if node["key"] is not None}
1813
- assert keys == set(original_keys)
1814
-
1815
- await raw_data_store.delete_store_data(store_id)
1816
- async with raw_data_store.db_wrapper.reader() as reader:
1817
- result = await reader.execute("SELECT * FROM node")
1818
- nodes = await result.fetchall()
1819
-
1820
- keys = {node["key"] for node in nodes if node["key"] is not None}
1821
- assert keys == set(original_keys[: (num_stores - 1) * keys_per_pending_root])
1822
-
1823
- for index in range(num_stores - 1):
1824
- store_id = store_ids[index]
1825
- root = await raw_data_store.get_pending_root(store_id)
1826
- assert root is not None
1827
- await raw_data_store.change_root_status(root, Status.COMMITTED)
1828
- kv = await raw_data_store.get_keys_values(store_id=store_id)
1829
- start_index = index * keys_per_pending_root
1830
- end_index = (index + 1) * keys_per_pending_root
1831
- assert {pair.key for pair in kv} == set(original_keys[start_index:end_index])
1832
-
1833
-
1834
1706
  @pytest.mark.anyio
1835
1707
  @boolean_datacases(name="group_files_by_store", true="group by singleton", false="don't group by singleton")
1836
1708
  @pytest.mark.parametrize("max_full_files", [1, 2, 5])
@@ -1843,7 +1715,6 @@ async def test_insert_from_delta_file(
1843
1715
  group_files_by_store: bool,
1844
1716
  max_full_files: int,
1845
1717
  ) -> None:
1846
- await data_store.create_tree(store_id=store_id, status=Status.COMMITTED)
1847
1718
  num_files = 5
1848
1719
  for generation in range(num_files):
1849
1720
  key = generation.to_bytes(4, byteorder="big")
@@ -1854,30 +1725,34 @@ async def test_insert_from_delta_file(
1854
1725
  store_id=store_id,
1855
1726
  status=Status.COMMITTED,
1856
1727
  )
1728
+ await data_store.add_node_hashes(store_id)
1857
1729
 
1858
1730
  root = await data_store.get_tree_root(store_id=store_id)
1859
- assert root.generation == num_files + 1
1731
+ assert root.generation == num_files
1860
1732
  root_hashes = []
1861
1733
 
1862
1734
  tmp_path_1 = tmp_path.joinpath("1")
1863
1735
  tmp_path_2 = tmp_path.joinpath("2")
1864
1736
 
1865
- for generation in range(1, num_files + 2):
1737
+ for generation in range(1, num_files + 1):
1866
1738
  root = await data_store.get_tree_root(store_id=store_id, generation=generation)
1867
1739
  await write_files_for_root(data_store, store_id, root, tmp_path_1, 0, False, group_files_by_store)
1868
1740
  root_hashes.append(bytes32.zeros if root.node_hash is None else root.node_hash)
1869
1741
  store_path = tmp_path_1.joinpath(f"{store_id}") if group_files_by_store else tmp_path_1
1870
1742
  with os.scandir(store_path) as entries:
1871
1743
  filenames = {entry.name for entry in entries}
1872
- assert len(filenames) == 2 * (num_files + 1)
1744
+ assert len(filenames) == 2 * num_files
1873
1745
  for filename in filenames:
1874
1746
  if "full" in filename:
1875
1747
  store_path.joinpath(filename).unlink()
1876
1748
  with os.scandir(store_path) as entries:
1877
1749
  filenames = {entry.name for entry in entries}
1878
- assert len(filenames) == num_files + 1
1750
+ assert len(filenames) == num_files
1879
1751
  kv_before = await data_store.get_keys_values(store_id=store_id)
1880
1752
  await data_store.rollback_to_generation(store_id, 0)
1753
+ with contextlib.suppress(FileNotFoundError):
1754
+ shutil.rmtree(data_store.merkle_blobs_path)
1755
+
1881
1756
  root = await data_store.get_tree_root(store_id=store_id)
1882
1757
  assert root.generation == 0
1883
1758
  os.rename(store_path, tmp_path_2)
@@ -1950,12 +1825,13 @@ async def test_insert_from_delta_file(
1950
1825
  assert success
1951
1826
 
1952
1827
  root = await data_store.get_tree_root(store_id=store_id)
1953
- assert root.generation == num_files + 1
1828
+ assert root.generation == num_files
1954
1829
  with os.scandir(store_path) as entries:
1955
1830
  filenames = {entry.name for entry in entries}
1956
- assert len(filenames) == num_files + 1 + max_full_files # 6 deltas and max_full_files full files
1831
+ assert len(filenames) == num_files + max_full_files - 1
1957
1832
  kv = await data_store.get_keys_values(store_id=store_id)
1958
- assert kv == kv_before
1833
+ # order agnostic comparison of the list
1834
+ assert set(kv) == set(kv_before)
1959
1835
 
1960
1836
 
1961
1837
  @pytest.mark.anyio
@@ -1993,7 +1869,7 @@ async def test_get_node_by_key_with_overlapping_keys(raw_data_store: DataStore)
1993
1869
  if random.randint(0, 4) == 0:
1994
1870
  batch = [{"action": "delete", "key": key}]
1995
1871
  await raw_data_store.insert_batch(store_id, batch, status=Status.COMMITTED)
1996
- with pytest.raises(KeyNotFoundError, match=f"Key not found: {key.hex()}"):
1872
+ with pytest.raises(chia_rs.datalayer.UnknownKeyError):
1997
1873
  await raw_data_store.get_node_by_key(store_id=store_id, key=key)
1998
1874
 
1999
1875
 
@@ -2013,6 +1889,7 @@ async def test_insert_from_delta_file_correct_file_exists(
2013
1889
  store_id=store_id,
2014
1890
  status=Status.COMMITTED,
2015
1891
  )
1892
+ await data_store.add_node_hashes(store_id)
2016
1893
 
2017
1894
  root = await data_store.get_tree_root(store_id=store_id)
2018
1895
  assert root.generation == num_files + 1
@@ -2023,18 +1900,20 @@ async def test_insert_from_delta_file_correct_file_exists(
2023
1900
  root_hashes.append(bytes32.zeros if root.node_hash is None else root.node_hash)
2024
1901
  store_path = tmp_path.joinpath(f"{store_id}") if group_files_by_store else tmp_path
2025
1902
  with os.scandir(store_path) as entries:
2026
- filenames = {entry.name for entry in entries}
1903
+ filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
2027
1904
  assert len(filenames) == 2 * (num_files + 1)
2028
1905
  for filename in filenames:
2029
1906
  if "full" in filename:
2030
1907
  store_path.joinpath(filename).unlink()
2031
1908
  with os.scandir(store_path) as entries:
2032
- filenames = {entry.name for entry in entries}
1909
+ filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
2033
1910
  assert len(filenames) == num_files + 1
2034
1911
  kv_before = await data_store.get_keys_values(store_id=store_id)
2035
1912
  await data_store.rollback_to_generation(store_id, 0)
2036
1913
  root = await data_store.get_tree_root(store_id=store_id)
2037
1914
  assert root.generation == 0
1915
+ with contextlib.suppress(FileNotFoundError):
1916
+ shutil.rmtree(data_store.merkle_blobs_path)
2038
1917
 
2039
1918
  sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0)
2040
1919
  success = await insert_from_delta_file(
@@ -2056,10 +1935,11 @@ async def test_insert_from_delta_file_correct_file_exists(
2056
1935
  root = await data_store.get_tree_root(store_id=store_id)
2057
1936
  assert root.generation == num_files + 1
2058
1937
  with os.scandir(store_path) as entries:
2059
- filenames = {entry.name for entry in entries}
1938
+ filenames = {entry.name for entry in entries if entry.name.endswith(".dat")}
2060
1939
  assert len(filenames) == num_files + 2 # 1 full and 6 deltas
2061
1940
  kv = await data_store.get_keys_values(store_id=store_id)
2062
- assert kv == kv_before
1941
+ # order agnostic comparison of the list
1942
+ assert set(kv) == set(kv_before)
2063
1943
 
2064
1944
 
2065
1945
  @pytest.mark.anyio
@@ -2079,6 +1959,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
2079
1959
  store_id=store_id,
2080
1960
  status=Status.COMMITTED,
2081
1961
  )
1962
+ await data_store.add_node_hashes(store_id)
2082
1963
 
2083
1964
  root = await data_store.get_tree_root(store_id=store_id)
2084
1965
  assert root.generation == 2
@@ -2087,7 +1968,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
2087
1968
  incorrect_root_hash = bytes32([0] * 31 + [1])
2088
1969
  store_path = tmp_path.joinpath(f"{store_id}") if group_files_by_store else tmp_path
2089
1970
  with os.scandir(store_path) as entries:
2090
- filenames = [entry.name for entry in entries]
1971
+ filenames = [entry.name for entry in entries if entry.name.endswith(".dat")]
2091
1972
  assert len(filenames) == 2
2092
1973
  os.rename(
2093
1974
  store_path.joinpath(filenames[0]),
@@ -2119,7 +2000,7 @@ async def test_insert_from_delta_file_incorrect_file_exists(
2119
2000
  root = await data_store.get_tree_root(store_id=store_id)
2120
2001
  assert root.generation == 1
2121
2002
  with os.scandir(store_path) as entries:
2122
- filenames = [entry.name for entry in entries]
2003
+ filenames = [entry.name for entry in entries if entry.name.endswith(".dat")]
2123
2004
  assert len(filenames) == 0
2124
2005
 
2125
2006
 
@@ -2130,7 +2011,7 @@ async def test_insert_key_already_present(data_store: DataStore, store_id: bytes
2130
2011
  await data_store.insert(
2131
2012
  key=key, value=value, store_id=store_id, reference_node_hash=None, side=None, status=Status.COMMITTED
2132
2013
  )
2133
- with pytest.raises(Exception, match=f"Key already present: {key.hex()}"):
2014
+ with pytest.raises(KeyAlreadyPresentError):
2134
2015
  await data_store.insert(key=key, value=value, store_id=store_id, reference_node_hash=None, side=None)
2135
2016
 
2136
2017
 
@@ -2145,7 +2026,7 @@ async def test_batch_insert_key_already_present(
2145
2026
  value = b"bar"
2146
2027
  changelist = [{"action": "insert", "key": key, "value": value}]
2147
2028
  await data_store.insert_batch(store_id, changelist, Status.COMMITTED, use_batch_autoinsert)
2148
- with pytest.raises(Exception, match=f"Key already present: {key.hex()}"):
2029
+ with pytest.raises(KeyAlreadyPresentError):
2149
2030
  await data_store.insert_batch(store_id, changelist, Status.COMMITTED, use_batch_autoinsert)
2150
2031
 
2151
2032
 
@@ -2188,7 +2069,7 @@ async def test_update_keys(data_store: DataStore, store_id: bytes32, use_upsert:
2188
2069
 
2189
2070
 
2190
2071
  @pytest.mark.anyio
2191
- async def test_migration_unknown_version(data_store: DataStore) -> None:
2072
+ async def test_migration_unknown_version(data_store: DataStore, tmp_path: Path) -> None:
2192
2073
  async with data_store.db_wrapper.writer() as writer:
2193
2074
  await writer.execute(
2194
2075
  "INSERT INTO schema(version_id) VALUES(:version_id)",
@@ -2197,228 +2078,284 @@ async def test_migration_unknown_version(data_store: DataStore) -> None:
2197
2078
  },
2198
2079
  )
2199
2080
  with pytest.raises(Exception, match="Unknown version"):
2200
- await data_store.migrate_db()
2201
-
2202
-
2203
- async def _check_ancestors(
2204
- data_store: DataStore, store_id: bytes32, root_hash: bytes32
2205
- ) -> dict[bytes32, Optional[bytes32]]:
2206
- ancestors: dict[bytes32, Optional[bytes32]] = {}
2207
- root_node: Node = await data_store.get_node(root_hash)
2208
- queue: list[Node] = [root_node]
2209
-
2210
- while queue:
2211
- node = queue.pop(0)
2212
- if isinstance(node, InternalNode):
2213
- left_node = await data_store.get_node(node.left_hash)
2214
- right_node = await data_store.get_node(node.right_hash)
2215
- ancestors[left_node.hash] = node.hash
2216
- ancestors[right_node.hash] = node.hash
2217
- queue.append(left_node)
2218
- queue.append(right_node)
2219
-
2220
- ancestors[root_hash] = None
2221
- for node_hash, ancestor_hash in ancestors.items():
2222
- ancestor_node = await data_store._get_one_ancestor(node_hash, store_id)
2223
- if ancestor_hash is None:
2224
- assert ancestor_node is None
2225
- else:
2226
- assert ancestor_node is not None
2227
- assert ancestor_node.hash == ancestor_hash
2081
+ await data_store.migrate_db(tmp_path)
2228
2082
 
2229
- return ancestors
2083
+
2084
+ @boolean_datacases(name="group_files_by_store", false="group by singleton", true="don't group by singleton")
2085
+ @pytest.mark.anyio
2086
+ async def test_migration(
2087
+ data_store: DataStore,
2088
+ store_id: bytes32,
2089
+ group_files_by_store: bool,
2090
+ tmp_path: Path,
2091
+ ) -> None:
2092
+ num_batches = 10
2093
+ num_ops_per_batch = 100
2094
+ keys: list[bytes] = []
2095
+ counter = 0
2096
+ random = Random()
2097
+ random.seed(100, version=2)
2098
+
2099
+ for batch in range(num_batches):
2100
+ changelist: list[dict[str, Any]] = []
2101
+ for operation in range(num_ops_per_batch):
2102
+ if random.randint(0, 4) > 0 or len(keys) == 0:
2103
+ key = counter.to_bytes(4, byteorder="big")
2104
+ value = (2 * counter).to_bytes(4, byteorder="big")
2105
+ keys.append(key)
2106
+ changelist.append({"action": "insert", "key": key, "value": value})
2107
+ else:
2108
+ key = random.choice(keys)
2109
+ keys.remove(key)
2110
+ changelist.append({"action": "delete", "key": key})
2111
+ counter += 1
2112
+ await data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
2113
+ root = await data_store.get_tree_root(store_id)
2114
+ await data_store.add_node_hashes(store_id)
2115
+ await write_files_for_root(data_store, store_id, root, tmp_path, 0, group_by_store=group_files_by_store)
2116
+
2117
+ kv_before = await data_store.get_keys_values(store_id=store_id)
2118
+ async with data_store.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
2119
+ tables = [table for table in table_columns.keys() if table != "root"]
2120
+ for table in tables:
2121
+ await writer.execute(f"DELETE FROM {table}")
2122
+
2123
+ with contextlib.suppress(FileNotFoundError):
2124
+ shutil.rmtree(data_store.merkle_blobs_path)
2125
+ with contextlib.suppress(FileNotFoundError):
2126
+ shutil.rmtree(data_store.key_value_blobs_path)
2127
+
2128
+ data_store.recent_merkle_blobs = LRUCache(capacity=128)
2129
+ assert await data_store.get_keys_values(store_id=store_id) == []
2130
+ await data_store.migrate_db(tmp_path)
2131
+ # order agnostic comparison of the list
2132
+ assert set(await data_store.get_keys_values(store_id=store_id)) == set(kv_before)
2230
2133
 
2231
2134
 
2232
2135
  @pytest.mark.anyio
2233
- async def test_build_ancestor_table(data_store: DataStore, store_id: bytes32) -> None:
2234
- num_values = 1000
2136
+ @pytest.mark.parametrize("num_keys", [10, 1000])
2137
+ async def test_get_existing_hashes(
2138
+ data_store: DataStore,
2139
+ store_id: bytes32,
2140
+ num_keys: int,
2141
+ ) -> None:
2235
2142
  changelist: list[dict[str, Any]] = []
2236
- for value in range(num_values):
2237
- value_bytes = value.to_bytes(4, byteorder="big")
2238
- changelist.append({"action": "upsert", "key": value_bytes, "value": value_bytes})
2239
- await data_store.insert_batch(
2240
- store_id=store_id,
2241
- changelist=changelist,
2242
- status=Status.PENDING,
2243
- )
2143
+ for i in range(num_keys):
2144
+ key = i.to_bytes(4, byteorder="big")
2145
+ value = (2 * i).to_bytes(4, byteorder="big")
2146
+ changelist.append({"action": "insert", "key": key, "value": value})
2147
+ await data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
2148
+ await data_store.add_node_hashes(store_id)
2244
2149
 
2245
- pending_root = await data_store.get_pending_root(store_id=store_id)
2246
- assert pending_root is not None
2247
- assert pending_root.node_hash is not None
2248
- await data_store.change_root_status(pending_root, Status.COMMITTED)
2249
- await data_store.build_ancestor_table_for_latest_root(store_id=store_id)
2250
-
2251
- assert pending_root.node_hash is not None
2252
- await _check_ancestors(data_store, store_id, pending_root.node_hash)
2150
+ root = await data_store.get_tree_root(store_id=store_id)
2151
+ merkle_blob = await data_store.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
2152
+ hash_to_index = merkle_blob.get_hashes_indexes()
2153
+ existing_hashes = list(hash_to_index.keys())
2154
+ not_existing_hashes = [bytes32(i.to_bytes(32, byteorder="big")) for i in range(num_keys)]
2155
+ result = await data_store.get_existing_hashes(existing_hashes + not_existing_hashes, store_id)
2156
+ assert result == set(existing_hashes)
2253
2157
 
2254
2158
 
2255
2159
  @pytest.mark.anyio
2256
- async def test_sparse_ancestor_table(data_store: DataStore, store_id: bytes32) -> None:
2257
- num_values = 100
2258
- for value in range(num_values):
2259
- value_bytes = value.to_bytes(4, byteorder="big")
2260
- await data_store.autoinsert(
2261
- key=value_bytes,
2262
- value=value_bytes,
2263
- store_id=store_id,
2264
- status=Status.COMMITTED,
2265
- )
2266
- root = await data_store.get_tree_root(store_id=store_id)
2267
- assert root.node_hash is not None
2268
- ancestors = await _check_ancestors(data_store, store_id, root.node_hash)
2160
+ @pytest.mark.parametrize(argnames="size_offset", argvalues=[-1, 0, 1])
2161
+ async def test_basic_key_value_db_vs_disk_cutoff(
2162
+ data_store: DataStore,
2163
+ store_id: bytes32,
2164
+ seeded_random: random.Random,
2165
+ size_offset: int,
2166
+ ) -> None:
2167
+ size = data_store.prefer_db_kv_blob_length + size_offset
2269
2168
 
2270
- # Check the ancestor table is sparse
2271
- root_generation = root.generation
2272
- current_generation_count = 0
2273
- previous_generation_count = 0
2274
- for node_hash, ancestor_hash in ancestors.items():
2275
- async with data_store.db_wrapper.reader() as reader:
2276
- if ancestor_hash is not None:
2277
- cursor = await reader.execute(
2278
- "SELECT MAX(generation) AS generation FROM ancestors WHERE hash == :hash AND ancestor == :ancestor",
2279
- {"hash": node_hash, "ancestor": ancestor_hash},
2280
- )
2281
- else:
2282
- cursor = await reader.execute(
2283
- "SELECT MAX(generation) AS generation FROM ancestors WHERE hash == :hash AND ancestor IS NULL",
2284
- {"hash": node_hash},
2285
- )
2169
+ blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
2170
+ blob_hash = bytes32(sha256(blob).digest())
2171
+ async with data_store.db_wrapper.writer() as writer:
2172
+ with data_store.manage_kv_files(store_id):
2173
+ await data_store.add_kvid(blob=blob, store_id=store_id, writer=writer)
2174
+
2175
+ file_exists = data_store.get_key_value_path(store_id=store_id, blob_hash=blob_hash).exists()
2176
+ async with data_store.db_wrapper.writer() as writer:
2177
+ async with writer.execute(
2178
+ "SELECT blob FROM ids WHERE hash = :blob_hash",
2179
+ {"blob_hash": blob_hash},
2180
+ ) as cursor:
2286
2181
  row = await cursor.fetchone()
2287
2182
  assert row is not None
2288
- generation = row["generation"]
2289
- assert generation <= root_generation
2290
- if generation == root_generation:
2291
- current_generation_count += 1
2292
- else:
2293
- previous_generation_count += 1
2183
+ db_blob: Optional[bytes] = row["blob"]
2294
2184
 
2295
- assert current_generation_count == 15
2296
- assert previous_generation_count == 184
2185
+ if size_offset <= 0:
2186
+ assert not file_exists
2187
+ assert db_blob == blob
2188
+ else:
2189
+ assert file_exists
2190
+ assert db_blob is None
2297
2191
 
2298
2192
 
2299
- async def get_all_nodes(data_store: DataStore, store_id: bytes32) -> list[Node]:
2300
- root = await data_store.get_tree_root(store_id)
2301
- assert root.node_hash is not None
2302
- root_node = await data_store.get_node(root.node_hash)
2303
- nodes: list[Node] = []
2304
- queue: list[Node] = [root_node]
2193
+ @pytest.mark.anyio
2194
+ @pytest.mark.parametrize(argnames="size_offset", argvalues=[-1, 0, 1])
2195
+ @pytest.mark.parametrize(argnames="limit_change", argvalues=[-2, -1, 1, 2])
2196
+ async def test_changing_key_value_db_vs_disk_cutoff(
2197
+ data_store: DataStore,
2198
+ store_id: bytes32,
2199
+ seeded_random: random.Random,
2200
+ size_offset: int,
2201
+ limit_change: int,
2202
+ ) -> None:
2203
+ size = data_store.prefer_db_kv_blob_length + size_offset
2305
2204
 
2306
- while len(queue) > 0:
2307
- node = queue.pop(0)
2308
- nodes.append(node)
2309
- if isinstance(node, InternalNode):
2310
- left_node = await data_store.get_node(node.left_hash)
2311
- right_node = await data_store.get_node(node.right_hash)
2312
- queue.append(left_node)
2313
- queue.append(right_node)
2205
+ blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
2206
+ async with data_store.db_wrapper.writer() as writer:
2207
+ with data_store.manage_kv_files(store_id):
2208
+ kv_id = await data_store.add_kvid(blob=blob, store_id=store_id, writer=writer)
2209
+
2210
+ data_store.prefer_db_kv_blob_length += limit_change
2211
+ retrieved_blob = await data_store.get_blob_from_kvid(kv_id=kv_id, store_id=store_id)
2314
2212
 
2315
- return nodes
2213
+ assert blob == retrieved_blob
2316
2214
 
2317
2215
 
2318
2216
  @pytest.mark.anyio
2319
- async def test_get_nodes(data_store: DataStore, store_id: bytes32) -> None:
2320
- num_values = 50
2321
- changelist: list[dict[str, Any]] = []
2217
+ async def test_get_keys_both_disk_and_db(
2218
+ data_store: DataStore,
2219
+ store_id: bytes32,
2220
+ seeded_random: random.Random,
2221
+ ) -> None:
2222
+ inserted_keys: set[bytes] = set()
2322
2223
 
2323
- for value in range(num_values):
2324
- value_bytes = value.to_bytes(4, byteorder="big")
2325
- changelist.append({"action": "upsert", "key": value_bytes, "value": value_bytes})
2326
- await data_store.insert_batch(
2327
- store_id=store_id,
2328
- changelist=changelist,
2329
- status=Status.COMMITTED,
2330
- )
2224
+ for size_offset in [-1, 0, 1]:
2225
+ size = data_store.prefer_db_kv_blob_length + size_offset
2226
+
2227
+ blob = bytes(seeded_random.getrandbits(8) for _ in range(size))
2228
+ await data_store.insert(key=blob, value=b"", store_id=store_id, status=Status.COMMITTED)
2229
+ inserted_keys.add(blob)
2331
2230
 
2332
- expected_nodes = await get_all_nodes(data_store, store_id)
2333
- nodes = await data_store.get_nodes([node.hash for node in expected_nodes])
2334
- assert nodes == expected_nodes
2231
+ retrieved_keys = set(await data_store.get_keys(store_id=store_id))
2335
2232
 
2336
- node_hash = bytes32.zeros
2337
- node_hash_2 = bytes32([0] * 31 + [1])
2338
- with pytest.raises(Exception, match=f"^Nodes not found for hashes: {node_hash.hex()}, {node_hash_2.hex()}"):
2339
- await data_store.get_nodes([node_hash, node_hash_2] + [node.hash for node in expected_nodes])
2233
+ assert retrieved_keys == inserted_keys
2340
2234
 
2341
2235
 
2342
2236
  @pytest.mark.anyio
2343
- @pytest.mark.parametrize("pre", [0, 2048])
2344
- @pytest.mark.parametrize("batch_size", [25, 100, 500])
2345
- async def test_get_leaf_at_minimum_height(
2237
+ async def test_get_keys_values_both_disk_and_db(
2346
2238
  data_store: DataStore,
2347
2239
  store_id: bytes32,
2348
- pre: int,
2349
- batch_size: int,
2240
+ seeded_random: random.Random,
2350
2241
  ) -> None:
2351
- num_values = 1000
2352
- value_offset = 1000000
2353
- all_min_leafs: set[TerminalNode] = set()
2242
+ inserted_keys_values: dict[bytes, bytes] = {}
2243
+
2244
+ for size_offset in [-1, 0, 1]:
2245
+ size = data_store.prefer_db_kv_blob_length + size_offset
2246
+
2247
+ key = bytes(seeded_random.getrandbits(8) for _ in range(size))
2248
+ value = bytes(seeded_random.getrandbits(8) for _ in range(size))
2249
+ await data_store.insert(key=key, value=value, store_id=store_id, status=Status.COMMITTED)
2250
+ inserted_keys_values[key] = value
2251
+
2252
+ terminal_nodes = await data_store.get_keys_values(store_id=store_id)
2253
+ retrieved_keys_values = {node.key: node.value for node in terminal_nodes}
2254
+
2255
+ assert retrieved_keys_values == inserted_keys_values
2256
+
2354
2257
 
2355
- if pre > 0:
2356
- # This builds a complete binary tree, in order to test more than one batch in the queue before finding the leaf
2258
+ @pytest.mark.anyio
2259
+ @boolean_datacases(name="success", false="invalid file", true="valid file")
2260
+ async def test_db_data_insert_from_file(
2261
+ data_store: DataStore,
2262
+ store_id: bytes32,
2263
+ tmp_path: Path,
2264
+ seeded_random: random.Random,
2265
+ success: bool,
2266
+ ) -> None:
2267
+ num_keys = 1000
2268
+ db_uri = generate_in_memory_db_uri()
2269
+
2270
+ async with DataStore.managed(
2271
+ database=db_uri,
2272
+ uri=True,
2273
+ merkle_blobs_path=tmp_path.joinpath("merkle-blobs-tmp"),
2274
+ key_value_blobs_path=tmp_path.joinpath("key-value-blobs-tmp"),
2275
+ ) as tmp_data_store:
2276
+ await tmp_data_store.create_tree(store_id, status=Status.COMMITTED)
2357
2277
  changelist: list[dict[str, Any]] = []
2278
+ for _ in range(num_keys):
2279
+ use_file = seeded_random.choice([True, False])
2280
+ assert tmp_data_store.prefer_db_kv_blob_length > 7
2281
+ size = tmp_data_store.prefer_db_kv_blob_length + 1 if use_file else 8
2282
+ key = seeded_random.randbytes(size)
2283
+ value = seeded_random.randbytes(size)
2284
+ changelist.append({"action": "insert", "key": key, "value": value})
2285
+
2286
+ await tmp_data_store.insert_batch(store_id, changelist, status=Status.COMMITTED)
2287
+ root = await tmp_data_store.get_tree_root(store_id)
2288
+ files_path = tmp_path.joinpath("files")
2289
+ await write_files_for_root(tmp_data_store, store_id, root, files_path, 1000)
2290
+ assert root.node_hash is not None
2291
+ filename = get_delta_filename_path(files_path, store_id, root.node_hash, 1)
2292
+ assert filename.exists()
2358
2293
 
2359
- for value in range(pre):
2360
- value_bytes = (value * value).to_bytes(8, byteorder="big")
2361
- changelist.append({"action": "upsert", "key": value_bytes, "value": value_bytes})
2362
- await data_store.insert_batch(
2363
- store_id=store_id,
2364
- changelist=changelist,
2365
- status=Status.COMMITTED,
2366
- )
2294
+ root_hash = bytes32([0] * 31 + [1]) if not success else root.node_hash
2295
+ sinfo = ServerInfo("http://127.0.0.1/8003", 0, 0)
2367
2296
 
2368
- for value in range(num_values):
2369
- value_bytes = value.to_bytes(4, byteorder="big")
2370
- # Use autoinsert instead of `insert_batch` to get a more randomly shaped tree
2371
- await data_store.autoinsert(
2372
- key=value_bytes,
2373
- value=value_bytes,
2374
- store_id=store_id,
2375
- status=Status.COMMITTED,
2376
- )
2297
+ if not success:
2298
+ target_filename_path = get_delta_filename_path(files_path, store_id, root_hash, 1)
2299
+ shutil.copyfile(filename, target_filename_path)
2300
+ assert target_filename_path.exists()
2377
2301
 
2378
- if (value + 1) % batch_size == 0:
2379
- hash_to_parent: dict[bytes32, InternalNode] = {}
2380
- root = await data_store.get_tree_root(store_id)
2381
- assert root.node_hash is not None
2382
- min_leaf = await data_store.get_leaf_at_minimum_height(root.node_hash, hash_to_parent)
2383
- all_nodes = await get_all_nodes(data_store, store_id)
2384
- heights: dict[bytes32, int] = {}
2385
- heights[root.node_hash] = 0
2386
- min_leaf_height = None
2387
-
2388
- for node in all_nodes:
2389
- if isinstance(node, InternalNode):
2390
- heights[node.left_hash] = heights[node.hash] + 1
2391
- heights[node.right_hash] = heights[node.hash] + 1
2392
- else:
2393
- if min_leaf_height is not None:
2394
- min_leaf_height = min(min_leaf_height, heights[node.hash])
2395
- else:
2396
- min_leaf_height = heights[node.hash]
2397
-
2398
- assert min_leaf_height is not None
2399
- if pre > 0:
2400
- assert min_leaf_height >= 11
2401
- for node in all_nodes:
2402
- if isinstance(node, TerminalNode):
2403
- assert node == min_leaf
2404
- assert heights[min_leaf.hash] == min_leaf_height
2405
- break
2406
- if node.left_hash in hash_to_parent:
2407
- assert hash_to_parent[node.left_hash] == node
2408
- if node.right_hash in hash_to_parent:
2409
- assert hash_to_parent[node.right_hash] == node
2410
-
2411
- # Push down the min height leaf, so on the next iteration we get a different leaf
2412
- pushdown_height = 20
2413
- for repeat in range(pushdown_height):
2414
- value_bytes = (value + (repeat + 1) * value_offset).to_bytes(4, byteorder="big")
2415
- await data_store.insert(
2416
- key=value_bytes,
2417
- value=value_bytes,
2418
- store_id=store_id,
2419
- reference_node_hash=min_leaf.hash,
2420
- side=Side.RIGHT,
2421
- status=Status.COMMITTED,
2422
- )
2423
- assert min_leaf not in all_min_leafs
2424
- all_min_leafs.add(min_leaf)
2302
+ keys_value_path = data_store.key_value_blobs_path.joinpath(store_id.hex())
2303
+ assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0
2304
+
2305
+ is_success = await insert_from_delta_file(
2306
+ data_store=data_store,
2307
+ store_id=store_id,
2308
+ existing_generation=0,
2309
+ target_generation=1,
2310
+ root_hashes=[root_hash],
2311
+ server_info=sinfo,
2312
+ client_foldername=files_path,
2313
+ timeout=aiohttp.ClientTimeout(total=15, sock_connect=5),
2314
+ log=log,
2315
+ proxy_url="",
2316
+ downloader=None,
2317
+ )
2318
+ assert is_success == success
2319
+
2320
+ async with data_store.db_wrapper.reader() as reader:
2321
+ async with reader.execute("SELECT COUNT(*) FROM ids") as cursor:
2322
+ row_count = await cursor.fetchone()
2323
+ assert row_count is not None
2324
+ if success:
2325
+ assert row_count[0] > 0
2326
+ else:
2327
+ assert row_count[0] == 0
2328
+
2329
+ if success:
2330
+ assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) > 0
2331
+ else:
2332
+ assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0
2333
+
2334
+
2335
+ @pytest.mark.anyio
2336
+ async def test_manage_kv_files(
2337
+ data_store: DataStore,
2338
+ store_id: bytes32,
2339
+ seeded_random: random.Random,
2340
+ ) -> None:
2341
+ num_keys = 1000
2342
+ num_files = 0
2343
+ keys_value_path = data_store.key_value_blobs_path.joinpath(store_id.hex())
2344
+
2345
+ with pytest.raises(Exception, match="Test exception"):
2346
+ async with data_store.db_wrapper.writer() as writer:
2347
+ with data_store.manage_kv_files(store_id):
2348
+ for _ in range(num_keys):
2349
+ use_file = seeded_random.choice([True, False])
2350
+ assert data_store.prefer_db_kv_blob_length > 7
2351
+ size = data_store.prefer_db_kv_blob_length + 1 if use_file else 8
2352
+ key = seeded_random.randbytes(size)
2353
+ value = seeded_random.randbytes(size)
2354
+ await data_store.add_key_value(key, value, store_id, writer)
2355
+ num_files += 2 * use_file
2356
+
2357
+ assert num_files > 0
2358
+ assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == num_files
2359
+ raise Exception("Test exception")
2360
+
2361
+ assert sum(1 for path in keys_value_path.rglob("*") if path.is_file()) == 0