chia-blockchain 2.5.6rc2__py3-none-any.whl → 2.5.7rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (371) hide show
  1. chia/_tests/blockchain/blockchain_test_utils.py +6 -7
  2. chia/_tests/blockchain/test_augmented_chain.py +4 -3
  3. chia/_tests/blockchain/test_blockchain.py +10 -5
  4. chia/_tests/clvm/coin_store.py +1 -1
  5. chia/_tests/cmds/cmd_test_utils.py +84 -97
  6. chia/_tests/cmds/test_dev_gh.py +1 -1
  7. chia/_tests/cmds/test_farm_cmd.py +56 -2
  8. chia/_tests/cmds/wallet/test_consts.py +3 -1
  9. chia/_tests/cmds/wallet/test_did.py +3 -8
  10. chia/_tests/cmds/wallet/test_nft.py +6 -6
  11. chia/_tests/cmds/wallet/test_notifications.py +39 -21
  12. chia/_tests/cmds/wallet/test_vcs.py +2 -1
  13. chia/_tests/cmds/wallet/test_wallet.py +160 -136
  14. chia/_tests/conftest.py +51 -26
  15. chia/_tests/core/cmds/test_wallet.py +4 -3
  16. chia/_tests/core/consensus/test_pot_iterations.py +71 -24
  17. chia/_tests/core/custom_types/test_proof_of_space.py +60 -30
  18. chia/_tests/core/custom_types/test_spend_bundle.py +1 -4
  19. chia/_tests/core/data_layer/conftest.py +7 -2
  20. chia/_tests/core/data_layer/old_format/__init__.py +0 -0
  21. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-delta-23-v1.0.dat +0 -0
  22. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-005876c1cdc4d5f1726551b207b9f63efc9cd2f72df80a3a26a1ba73d40d6745-full-23-v1.0.dat +0 -0
  23. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-delta-9-v1.0.dat +0 -0
  24. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-01b36e72a975cdc00d6514eea81668d19e8ea3150217ae98cb3361688a016fab-full-9-v1.0.dat +0 -0
  25. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-delta-5-v1.0.dat +0 -0
  26. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-06147c3b12d73e9b83b686a8c10b4a36a513c8a93c0ff99ae197f06326278be9-full-5-v1.0.dat +0 -0
  27. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-delta-22-v1.0.dat +0 -0
  28. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-073c051a5934ad3b8db39eee2189e4300e55f48aaa17ff4ae30eeae088ff544a-full-22-v1.0.dat +0 -0
  29. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-delta-1-v1.0.dat +0 -0
  30. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-0cc077559b9c7b4aefe8f8f591c195e0779bebdf89f2ad8285a00ea5f859d965-full-1-v1.0.dat +0 -0
  31. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-delta-2-v1.0.dat +0 -0
  32. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-16377275567b723b20936d3f1ec0a2fd83f6ac379b922351a5e4c54949069f3b-full-2-v1.0.dat +0 -0
  33. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-delta-29-v1.0.dat +0 -0
  34. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-1cb824a7a5f02cd30ac6c38e8f6216780d9bfa2d24811d282a368dcd541438a7-full-29-v1.0.dat +0 -0
  35. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-delta-28-v1.0.dat +0 -0
  36. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-27b89dc4809ebc5a3b87757d35e95e2761d978cf121e44fa2773a5c06e4cc7b5-full-28-v1.0.dat +0 -0
  37. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-delta-7-v1.0.dat +0 -0
  38. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-28a6b7c134abfaeb0ab58a018313f6c87a61a40a4d9ec9bedf53aa1d12f3ee37-full-7-v1.0.dat +0 -0
  39. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-delta-19-v1.0.dat +0 -0
  40. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-30a6bfe7cecbeda259a295dc6de3a436357f52388c3b03d86901e7da68565aeb-full-19-v1.0.dat +0 -0
  41. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-delta-10-v1.0.dat +0 -0
  42. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-343a2bf9add798e3ac2e6a571823cf9fa7e8a1bed532143354ead2648bd036ef-full-10-v1.0.dat +0 -0
  43. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-delta-17-v1.0.dat +0 -0
  44. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4d90efbc1fb3df324193831ea4a57dd5e10e67d9653343eb18d178272adb0447-full-17-v1.0.dat +0 -0
  45. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-delta-20-v1.0.dat +0 -0
  46. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-4dd2ea099e91635c441f40b36d3f84078a2d818d2dc601c7278e72cbdfe3eca8-full-20-v1.0.dat +0 -0
  47. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-delta-31-v1.0.dat +0 -0
  48. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-509effbdca78639023b933ce6c08a0465fb247e1cd5329e9e9c553940e4b6e46-full-31-v1.0.dat +0 -0
  49. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-delta-40-v1.0.dat +0 -0
  50. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5379a4d9ff29c29d1ef0906d22e82c52472753d31806189ab813c43365341b78-full-40-v1.0.dat +0 -0
  51. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-delta-49-v1.0.dat +0 -0
  52. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-55908eda5686a8f89e4c50672cbe893ec1734fb23449dc03325efe7c414f9aa4-full-49-v1.0.dat +0 -0
  53. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-delta-14-v1.0.dat +0 -0
  54. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-57cc2691fb1fb986c99a58bcb0e029d0cd0cff41553d703147c54196d7d9ca63-full-14-v1.0.dat +0 -0
  55. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-delta-34-v1.0.dat +0 -0
  56. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-5943bf8ae4f5e59969d8570e4f40a8223299febdcfbcf188b3b3e2ab11044e18-full-34-v1.0.dat +0 -0
  57. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-delta-8-v1.0.dat +0 -0
  58. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6518527b7c939bee60ce6b024cbe90d3b9d8913c56b8ce11a4df5da7ff7db1c8-full-8-v1.0.dat +0 -0
  59. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-delta-45-v1.0.dat +0 -0
  60. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-66ff26a26620379e14a7c91252d27ee4dbe06ad69a3a390a88642fe757f2b288-full-45-v1.0.dat +0 -0
  61. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-delta-6-v1.0.dat +0 -0
  62. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6bd0a508ee2c4afbe9d4daa811139fd6e54e7f4e16850cbce999fa30f8bdccd2-full-6-v1.0.dat +0 -0
  63. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-delta-48-v1.0.dat +0 -0
  64. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6ce850d0d77ca743fcc2fc792747472e5d2c1c0813aa43abbb370554428fc897-full-48-v1.0.dat +0 -0
  65. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-delta-26-v1.0.dat +0 -0
  66. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-6eb4ca2e1552b156c5969396b49070eb08ad6c96b347359387519be59f7ccaed-full-26-v1.0.dat +0 -0
  67. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-delta-3-v1.0.dat +0 -0
  68. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-71c797fb7592d3f0a5a20c79ab8497ddaa0fd9ec17712e109d25c91b3f3c76e5-full-3-v1.0.dat +0 -0
  69. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-delta-25-v1.0.dat +0 -0
  70. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-73357026053d5a4969e7a6b9aeeef91c14cc6d5f32fc700fe6d21d2a1b22496c-full-25-v1.0.dat +0 -0
  71. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-delta-35-v1.0.dat +0 -0
  72. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-7c897e5c46e834ced65bde7de87716acfaa5dffbdb30b5cd9377d8c319df2034-full-35-v1.0.dat +0 -0
  73. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-delta-4-v1.0.dat +0 -0
  74. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-87b8394d80d08117a5a1cd04ed8a682564eab7197a2c090159863591b5108874-full-4-v1.0.dat +0 -0
  75. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-delta-41-v1.0.dat +0 -0
  76. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-89eb40b9cc0921c5f5c3feb20927c13a9ada5760f82d219dcee153b7d400165c-full-41-v1.0.dat +0 -0
  77. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-delta-21-v1.0.dat +0 -0
  78. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8b649433156b8c924436cdec9c6de26106fd6f73a0528570f48748f7b40d7f8a-full-21-v1.0.dat +0 -0
  79. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-delta-42-v1.0.dat +0 -0
  80. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-8d364023a0834c8c3077e236a465493acbf488e4f9d1f4c6cc230343c10a8f7d-full-42-v1.0.dat +0 -0
  81. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-delta-38-v1.0.dat +0 -0
  82. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-925689e24a3d98d98676d816cdd8b73e7b2df057d9d4503da9b27bf91d79666c-full-38-v1.0.dat +0 -0
  83. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-delta-11-v1.0.dat +0 -0
  84. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-937be3d428b19f521be4f98faecc3307ae11ee731c76992f417fa4268d13859e-full-11-v1.0.dat +0 -0
  85. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-delta-32-v1.0.dat +0 -0
  86. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-97f34af499b79e2111fc296a598fc9654c2467ea038dfea41fd58241fb3642de-full-32-v1.0.dat +0 -0
  87. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-delta-33-v1.0.dat +0 -0
  88. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-9d1b737243b8a1d0022f2b36ac53333c6280354a74d77f2a3642dcab35204e59-full-33-v1.0.dat +0 -0
  89. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-delta-47-v1.0.dat +0 -0
  90. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-a6663f98ef6ddf6db55f01163e34bb2e87aa82f0347e79ce31e8dbfa390c480c-full-47-v1.0.dat +0 -0
  91. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-delta-50-v1.0.dat +0 -0
  92. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-aa77376d1ccd3664e5c6366e010c52a978fedbf40f5ce262fee71b2e7fe0c6a9-full-50-v1.0.dat +0 -0
  93. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-delta-24-v1.0.dat +0 -0
  94. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b0f28514741ed1a71f5c6544bf92f9e0e493c5f3cf28328909771d8404eff626-full-24-v1.0.dat +0 -0
  95. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-delta-44-v1.0.dat +0 -0
  96. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-b3efee5358e6eb89ab3b60db2d128d57eef39e8538fb63c5632412d4f8e7d09e-full-44-v1.0.dat +0 -0
  97. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-delta-16-v1.0.dat +0 -0
  98. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bb0b56b6eb7acbb4e80893b04c72412fe833418232e1ed7b06d97d7a7f08b4e1-full-16-v1.0.dat +0 -0
  99. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-delta-13-v1.0.dat +0 -0
  100. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bc45262b757ff494b53bd2a8fba0f5511cc1f9c2a2c5360e04ea8cebbf6409df-full-13-v1.0.dat +0 -0
  101. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-delta-39-v1.0.dat +0 -0
  102. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-bd0494ba430aff13458b557113b073d226eaf11257dfe26ff3323fa1cfe1335b-full-39-v1.0.dat +0 -0
  103. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-delta-36-v1.0.dat +0 -0
  104. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cd04f5fbba1553fa728b4dd8131d4723aaac288e0c7dc080447fbf0872c0a6eb-full-36-v1.0.dat +0 -0
  105. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-delta-18-v1.0.dat +0 -0
  106. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cdd2399557fb3163a848f08831fdc833703354edb19a0d32a965fdb140f160c2-full-18-v1.0.dat +0 -0
  107. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-delta-37-v1.0.dat +0 -0
  108. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-cf7a08fca7b1332095242e4d9800f4b94a3f4eaae88fe8407da42736d54b9e18-full-37-v1.0.dat +0 -0
  109. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-delta-15-v1.0.dat +0 -0
  110. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-d1f97465a9f52187e2ef3a0d811a1258f52380a65340c55f3e8e65b92753bc13-full-15-v1.0.dat +0 -0
  111. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-delta-46-v1.0.dat +0 -0
  112. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e475eccd4ee597e5ff67b1a249e37d65d6e3f754c3f0379fdb43692513588fef-full-46-v1.0.dat +0 -0
  113. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-delta-27-v1.0.dat +0 -0
  114. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-e82e63517d78fd65b23a05c3b9a98cf905ddad7026995a238bfe634006b84cd0-full-27-v1.0.dat +0 -0
  115. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-delta-12-v1.0.dat +0 -0
  116. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-ed2cf0fd6c0f6237c87c161e1fca303b3fbe6c04e01f652b88720b4572143349-full-12-v1.0.dat +0 -0
  117. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-delta-30-v1.0.dat +0 -0
  118. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f6e454eaf24a83c46a7bed4c19260a0a3ce0ed5c51739cb6d748d4913dc2ef58-full-30-v1.0.dat +0 -0
  119. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-delta-43-v1.0.dat +0 -0
  120. chia/_tests/core/data_layer/old_format/files/2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e2e612073746f7265206964-f7ad2bdf86d9609b4d6381086ec1e296bf558e2ff467ead29dd7fa6e31bacc56-full-43-v1.0.dat +0 -0
  121. chia/_tests/core/data_layer/old_format/files/__init__.py +0 -0
  122. chia/_tests/core/data_layer/old_format/old_db.sqlite +0 -0
  123. chia/_tests/core/data_layer/test_data_layer_util.py +18 -21
  124. chia/_tests/core/data_layer/test_data_rpc.py +77 -28
  125. chia/_tests/core/data_layer/test_data_store.py +637 -700
  126. chia/_tests/core/data_layer/test_data_store_schema.py +2 -209
  127. chia/_tests/core/full_node/ram_db.py +1 -1
  128. chia/_tests/core/full_node/stores/test_block_store.py +4 -10
  129. chia/_tests/core/full_node/stores/test_coin_store.py +1 -1
  130. chia/_tests/core/full_node/test_address_manager.py +3 -3
  131. chia/_tests/core/full_node/test_block_height_map.py +1 -1
  132. chia/_tests/core/full_node/test_full_node.py +91 -30
  133. chia/_tests/core/full_node/test_generator_tools.py +17 -10
  134. chia/_tests/core/mempool/test_mempool.py +190 -90
  135. chia/_tests/core/mempool/test_mempool_fee_estimator.py +2 -4
  136. chia/_tests/core/mempool/test_mempool_item_queries.py +1 -1
  137. chia/_tests/core/mempool/test_mempool_manager.py +134 -75
  138. chia/_tests/core/mempool/test_singleton_fast_forward.py +9 -27
  139. chia/_tests/core/server/serve.py +0 -2
  140. chia/_tests/core/server/test_rate_limits.py +400 -347
  141. chia/_tests/core/server/test_server.py +2 -2
  142. chia/_tests/core/services/test_services.py +7 -7
  143. chia/_tests/core/test_cost_calculation.py +31 -10
  144. chia/_tests/core/test_crawler.py +4 -4
  145. chia/_tests/core/test_db_conversion.py +7 -14
  146. chia/_tests/core/test_db_validation.py +2 -6
  147. chia/_tests/core/test_farmer_harvester_rpc.py +34 -1
  148. chia/_tests/core/test_full_node_rpc.py +28 -24
  149. chia/_tests/core/test_merkle_set.py +1 -4
  150. chia/_tests/core/test_seeder.py +1 -1
  151. chia/_tests/core/util/test_keychain.py +2 -2
  152. chia/_tests/core/util/test_lru_cache.py +16 -0
  153. chia/_tests/core/util/test_streamable.py +85 -4
  154. chia/_tests/environments/wallet.py +4 -1
  155. chia/_tests/farmer_harvester/test_farmer.py +8 -6
  156. chia/_tests/farmer_harvester/test_farmer_harvester.py +306 -8
  157. chia/_tests/farmer_harvester/test_filter_prefix_bits.py +3 -3
  158. chia/_tests/farmer_harvester/test_third_party_harvesters.py +11 -11
  159. chia/_tests/fee_estimation/test_fee_estimation_integration.py +2 -2
  160. chia/_tests/fee_estimation/test_fee_estimation_rpc.py +1 -1
  161. chia/_tests/fee_estimation/test_fee_estimation_unit_tests.py +1 -2
  162. chia/_tests/generator/test_rom.py +2 -1
  163. chia/_tests/harvester/__init__.py +0 -0
  164. chia/_tests/harvester/config.py +4 -0
  165. chia/_tests/harvester/test_harvester_api.py +157 -0
  166. chia/_tests/plot_sync/test_plot_sync.py +6 -3
  167. chia/_tests/plot_sync/test_receiver.py +16 -4
  168. chia/_tests/plot_sync/test_sender.py +8 -7
  169. chia/_tests/plot_sync/test_sync_simulated.py +15 -13
  170. chia/_tests/plot_sync/util.py +3 -2
  171. chia/_tests/plotting/test_plot_manager.py +21 -5
  172. chia/_tests/plotting/test_prover.py +106 -0
  173. chia/_tests/pools/test_pool_cmdline.py +7 -6
  174. chia/_tests/pools/test_pool_puzzles_lifecycle.py +10 -3
  175. chia/_tests/pools/test_pool_rpc.py +92 -64
  176. chia/_tests/solver/__init__.py +0 -0
  177. chia/_tests/solver/config.py +4 -0
  178. chia/_tests/solver/test_solver_service.py +29 -0
  179. chia/_tests/timelord/test_new_peak.py +1 -1
  180. chia/_tests/timelord/test_timelord.py +1 -1
  181. chia/_tests/util/benchmarks.py +5 -12
  182. chia/_tests/util/blockchain.py +1 -1
  183. chia/_tests/util/build_network_protocol_files.py +7 -0
  184. chia/_tests/util/network_protocol_data.py +26 -0
  185. chia/_tests/util/protocol_messages_bytes-v1.0 +0 -0
  186. chia/_tests/util/protocol_messages_json.py +19 -0
  187. chia/_tests/util/setup_nodes.py +21 -2
  188. chia/_tests/util/spend_sim.py +9 -3
  189. chia/_tests/util/test_condition_tools.py +3 -2
  190. chia/_tests/util/test_full_block_utils.py +10 -9
  191. chia/_tests/util/test_misc.py +10 -10
  192. chia/_tests/util/test_network.py +32 -1
  193. chia/_tests/util/test_network_protocol_files.py +333 -318
  194. chia/_tests/util/test_network_protocol_json.py +6 -0
  195. chia/_tests/util/test_network_protocol_test.py +27 -0
  196. chia/_tests/util/test_priority_mutex.py +1 -1
  197. chia/_tests/util/test_replace_str_to_bytes.py +6 -6
  198. chia/_tests/wallet/cat_wallet/test_cat_wallet.py +17 -13
  199. chia/_tests/wallet/cat_wallet/test_trades.py +55 -55
  200. chia/_tests/wallet/did_wallet/test_did.py +118 -1229
  201. chia/_tests/wallet/nft_wallet/config.py +1 -1
  202. chia/_tests/wallet/nft_wallet/test_nft_1_offers.py +73 -96
  203. chia/_tests/wallet/nft_wallet/test_nft_bulk_mint.py +15 -12
  204. chia/_tests/wallet/nft_wallet/test_nft_offers.py +67 -134
  205. chia/_tests/wallet/nft_wallet/test_nft_wallet.py +31 -26
  206. chia/_tests/wallet/rpc/test_wallet_rpc.py +765 -371
  207. chia/_tests/wallet/sync/test_wallet_sync.py +6 -0
  208. chia/_tests/wallet/test_new_wallet_protocol.py +1 -1
  209. chia/_tests/wallet/test_signer_protocol.py +2 -2
  210. chia/_tests/wallet/test_singleton_lifecycle_fast.py +3 -4
  211. chia/_tests/wallet/test_transaction_store.py +42 -33
  212. chia/_tests/wallet/test_wallet.py +22 -31
  213. chia/_tests/wallet/test_wallet_state_manager.py +14 -7
  214. chia/_tests/wallet/vc_wallet/test_vc_wallet.py +53 -32
  215. chia/apis.py +2 -0
  216. chia/cmds/beta.py +7 -3
  217. chia/cmds/chia.py +2 -0
  218. chia/cmds/cmd_classes.py +11 -27
  219. chia/cmds/cmds_util.py +3 -0
  220. chia/cmds/coin_funcs.py +27 -22
  221. chia/cmds/configure.py +42 -18
  222. chia/cmds/dev/data.py +22 -3
  223. chia/cmds/farm.py +32 -0
  224. chia/cmds/farm_funcs.py +54 -5
  225. chia/cmds/init_funcs.py +4 -0
  226. chia/cmds/keys_funcs.py +8 -10
  227. chia/cmds/peer_funcs.py +8 -10
  228. chia/cmds/plotnft_funcs.py +24 -16
  229. chia/cmds/rpc.py +11 -1
  230. chia/cmds/show_funcs.py +5 -5
  231. chia/cmds/solver.py +33 -0
  232. chia/cmds/solver_funcs.py +21 -0
  233. chia/cmds/wallet.py +1 -1
  234. chia/cmds/wallet_funcs.py +149 -96
  235. chia/consensus/block_body_validation.py +8 -9
  236. chia/consensus/block_creation.py +9 -10
  237. chia/consensus/block_header_validation.py +61 -69
  238. chia/{full_node → consensus}/block_height_map.py +2 -1
  239. chia/consensus/block_height_map_protocol.py +21 -0
  240. chia/consensus/block_rewards.py +12 -12
  241. chia/consensus/blockchain.py +8 -18
  242. chia/consensus/default_constants.py +6 -6
  243. chia/consensus/generator_tools.py +1 -1
  244. chia/consensus/get_block_challenge.py +24 -25
  245. chia/consensus/pos_quality.py +28 -2
  246. chia/consensus/pot_iterations.py +15 -17
  247. chia/daemon/keychain_proxy.py +5 -0
  248. chia/daemon/server.py +2 -3
  249. chia/data_layer/data_layer.py +32 -24
  250. chia/data_layer/data_layer_errors.py +5 -0
  251. chia/data_layer/data_layer_rpc_api.py +1 -1
  252. chia/data_layer/data_layer_service.py +8 -0
  253. chia/data_layer/data_layer_util.py +49 -89
  254. chia/data_layer/data_layer_wallet.py +20 -17
  255. chia/data_layer/data_store.py +1051 -1462
  256. chia/data_layer/download_data.py +44 -115
  257. chia/{server → data_layer}/start_data_layer.py +2 -1
  258. chia/data_layer/util/benchmark.py +38 -53
  259. chia/farmer/farmer.py +3 -0
  260. chia/farmer/farmer_api.py +104 -5
  261. chia/farmer/farmer_rpc_api.py +20 -0
  262. chia/farmer/farmer_rpc_client.py +6 -2
  263. chia/farmer/farmer_service.py +8 -0
  264. chia/{server → farmer}/start_farmer.py +4 -3
  265. chia/full_node/block_store.py +20 -10
  266. chia/full_node/coin_store.py +12 -4
  267. chia/full_node/eligible_coin_spends.py +17 -72
  268. chia/full_node/full_node.py +68 -71
  269. chia/full_node/full_node_api.py +26 -32
  270. chia/full_node/full_node_rpc_api.py +44 -32
  271. chia/full_node/full_node_rpc_client.py +67 -79
  272. chia/full_node/full_node_service.py +8 -0
  273. chia/full_node/full_node_store.py +5 -3
  274. chia/full_node/mempool.py +14 -14
  275. chia/full_node/mempool_manager.py +67 -89
  276. chia/{server → full_node}/start_full_node.py +1 -1
  277. chia/full_node/subscriptions.py +2 -2
  278. chia/full_node/weight_proof.py +14 -15
  279. chia/harvester/harvester.py +8 -1
  280. chia/harvester/harvester_api.py +178 -44
  281. chia/harvester/harvester_service.py +8 -0
  282. chia/{server → harvester}/start_harvester.py +1 -1
  283. chia/introducer/introducer_service.py +8 -0
  284. chia/{server → introducer}/start_introducer.py +1 -1
  285. chia/plot_sync/receiver.py +6 -1
  286. chia/plot_sync/sender.py +7 -4
  287. chia/plotting/cache.py +37 -28
  288. chia/plotting/check_plots.py +83 -48
  289. chia/plotting/create_plots.py +3 -4
  290. chia/plotting/manager.py +18 -13
  291. chia/plotting/prover.py +153 -0
  292. chia/plotting/util.py +14 -6
  293. chia/pools/pool_wallet.py +6 -4
  294. chia/protocols/harvester_protocol.py +14 -0
  295. chia/protocols/outbound_message.py +1 -0
  296. chia/protocols/pool_protocol.py +1 -1
  297. chia/protocols/protocol_message_types.py +7 -0
  298. chia/protocols/shared_protocol.py +2 -0
  299. chia/protocols/solver_protocol.py +18 -0
  300. chia/rpc/rpc_server.py +1 -1
  301. chia/seeder/crawl_store.py +4 -8
  302. chia/seeder/crawler.py +2 -2
  303. chia/seeder/crawler_service.py +8 -0
  304. chia/seeder/start_crawler.py +1 -1
  305. chia/server/address_manager.py +12 -15
  306. chia/server/introducer_peers.py +1 -1
  307. chia/server/node_discovery.py +9 -10
  308. chia/server/rate_limit_numbers.py +157 -168
  309. chia/server/rate_limits.py +44 -41
  310. chia/server/resolve_peer_info.py +5 -0
  311. chia/server/server.py +17 -7
  312. chia/server/start_service.py +0 -1
  313. chia/simulator/block_tools.py +92 -58
  314. chia/simulator/full_node_simulator.py +1 -1
  315. chia/simulator/setup_services.py +51 -15
  316. chia/solver/__init__.py +0 -0
  317. chia/solver/solver.py +100 -0
  318. chia/solver/solver_api.py +59 -0
  319. chia/solver/solver_rpc_api.py +31 -0
  320. chia/solver/solver_rpc_client.py +16 -0
  321. chia/solver/solver_service.py +8 -0
  322. chia/solver/start_solver.py +102 -0
  323. {mozilla-ca → chia/ssl}/cacert.pem +0 -27
  324. chia/ssl/create_ssl.py +3 -2
  325. chia/{server → timelord}/start_timelord.py +1 -1
  326. chia/timelord/timelord.py +12 -13
  327. chia/timelord/timelord_service.py +8 -0
  328. chia/types/blockchain_format/proof_of_space.py +61 -17
  329. chia/types/coin_spend.py +0 -8
  330. chia/types/internal_mempool_item.py +3 -3
  331. chia/types/mempool_item.py +15 -8
  332. chia/types/mempool_submission_status.py +1 -1
  333. chia/util/config.py +1 -3
  334. chia/util/db_wrapper.py +7 -8
  335. chia/util/initial-config.yaml +46 -0
  336. chia/util/lru_cache.py +8 -4
  337. chia/util/network.py +9 -0
  338. chia/util/service_groups.py +3 -1
  339. chia/util/streamable.py +38 -8
  340. chia/util/virtual_project_analysis.py +1 -1
  341. chia/wallet/cat_wallet/cat_outer_puzzle.py +7 -4
  342. chia/wallet/cat_wallet/cat_wallet.py +13 -7
  343. chia/wallet/cat_wallet/r_cat_wallet.py +4 -1
  344. chia/wallet/conditions.py +1 -3
  345. chia/wallet/did_wallet/did_wallet.py +27 -332
  346. chia/wallet/nft_wallet/nft_puzzle_utils.py +1 -1
  347. chia/wallet/nft_wallet/nft_wallet.py +9 -7
  348. chia/wallet/puzzle_drivers.py +7 -8
  349. chia/{server → wallet}/start_wallet.py +1 -1
  350. chia/wallet/trade_manager.py +12 -9
  351. chia/wallet/transaction_record.py +14 -51
  352. chia/wallet/util/clvm_streamable.py +28 -41
  353. chia/wallet/util/merkle_utils.py +2 -2
  354. chia/wallet/util/tx_config.py +3 -6
  355. chia/wallet/vc_wallet/cr_cat_wallet.py +12 -6
  356. chia/wallet/vc_wallet/vc_wallet.py +13 -15
  357. chia/wallet/wallet.py +5 -3
  358. chia/wallet/wallet_node.py +25 -30
  359. chia/wallet/wallet_request_types.py +538 -101
  360. chia/wallet/wallet_rpc_api.py +398 -570
  361. chia/wallet/wallet_rpc_client.py +144 -332
  362. chia/wallet/wallet_service.py +8 -0
  363. chia/wallet/wallet_state_manager.py +53 -42
  364. chia/wallet/wallet_transaction_store.py +13 -5
  365. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7rc2.dist-info}/METADATA +31 -31
  366. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7rc2.dist-info}/RECORD +369 -241
  367. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7rc2.dist-info}/WHEEL +1 -1
  368. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7rc2.dist-info}/entry_points.txt +8 -7
  369. chia/full_node/mempool_check_conditions.py +0 -102
  370. chia/server/aliases.py +0 -35
  371. {chia_blockchain-2.5.6rc2.dist-info → chia_blockchain-2.5.7rc2.dist-info/licenses}/LICENSE +0 -0
@@ -1,18 +1,37 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import contextlib
4
+ import copy
5
+ import itertools
4
6
  import logging
7
+ import shutil
8
+ import sqlite3
5
9
  from collections import defaultdict
6
- from collections.abc import AsyncIterator, Awaitable
7
- from contextlib import asynccontextmanager
8
- from dataclasses import dataclass, replace
10
+ from collections.abc import AsyncIterator, Awaitable, Iterable, Iterator, Sequence
11
+ from contextlib import asynccontextmanager, contextmanager
12
+ from dataclasses import dataclass, field, replace
13
+ from hashlib import sha256
9
14
  from pathlib import Path
10
15
  from typing import Any, BinaryIO, Callable, Optional, Union
11
16
 
12
17
  import aiosqlite
18
+ import anyio.to_thread
19
+ import chia_rs.datalayer
20
+ import zstd
21
+ from chia_rs.datalayer import (
22
+ DeltaFileCache,
23
+ DeltaReader,
24
+ KeyAlreadyPresentError,
25
+ KeyId,
26
+ MerkleBlob,
27
+ ProofOfInclusion,
28
+ TreeIndex,
29
+ ValueId,
30
+ )
13
31
  from chia_rs.sized_bytes import bytes32
32
+ from chia_rs.sized_ints import int64
14
33
 
15
- from chia.data_layer.data_layer_errors import KeyNotFoundError, NodeHashError, TreeGenerationIncrementingError
34
+ from chia.data_layer.data_layer_errors import KeyNotFoundError, MerkleBlobNotFoundError, TreeGenerationIncrementingError
16
35
  from chia.data_layer.data_layer_util import (
17
36
  DiffData,
18
37
  InsertResult,
@@ -24,8 +43,6 @@ from chia.data_layer.data_layer_util import (
24
43
  Node,
25
44
  NodeType,
26
45
  OperationType,
27
- ProofOfInclusion,
28
- ProofOfInclusionLayer,
29
46
  Root,
30
47
  SerializedNode,
31
48
  ServerInfo,
@@ -34,15 +51,18 @@ from chia.data_layer.data_layer_util import (
34
51
  Subscription,
35
52
  TerminalNode,
36
53
  Unspecified,
54
+ get_delta_filename_path,
37
55
  get_hashes_for_page,
38
56
  internal_hash,
39
57
  key_hash,
40
58
  leaf_hash,
41
- row_to_node,
42
59
  unspecified,
43
60
  )
44
- from chia.types.blockchain_format.program import Program
61
+ from chia.util.batches import to_batches
62
+ from chia.util.cpu import available_logical_cores
45
63
  from chia.util.db_wrapper import SQLITE_MAX_VARIABLE_NUMBER, DBWrapper2
64
+ from chia.util.log_exceptions import log_exceptions
65
+ from chia.util.lru_cache import LRUCache
46
66
 
47
67
  log = logging.getLogger(__name__)
48
68
 
@@ -50,17 +70,33 @@ log = logging.getLogger(__name__)
50
70
  # TODO: review exceptions for values that shouldn't be displayed
51
71
  # TODO: pick exception types other than Exception
52
72
 
73
+ KeyOrValueId = int64
74
+
75
+ default_prefer_file_kv_blob_length: int = 4096
76
+
53
77
 
54
78
  @dataclass
55
79
  class DataStore:
56
80
  """A key/value store with the pairs being terminal nodes in a CLVM object tree."""
57
81
 
58
82
  db_wrapper: DBWrapper2
83
+ recent_merkle_blobs: LRUCache[bytes32, MerkleBlob]
84
+ merkle_blobs_path: Path
85
+ key_value_blobs_path: Path
86
+ unconfirmed_keys_values: dict[bytes32, list[bytes32]] = field(default_factory=dict)
87
+ prefer_db_kv_blob_length: int = default_prefer_file_kv_blob_length
59
88
 
60
89
  @classmethod
61
90
  @contextlib.asynccontextmanager
62
91
  async def managed(
63
- cls, database: Union[str, Path], uri: bool = False, sql_log_path: Optional[Path] = None
92
+ cls,
93
+ database: Union[str, Path],
94
+ merkle_blobs_path: Path,
95
+ key_value_blobs_path: Path,
96
+ uri: bool = False,
97
+ sql_log_path: Optional[Path] = None,
98
+ cache_capacity: int = 1,
99
+ prefer_db_kv_blob_length: int = default_prefer_file_kv_blob_length,
64
100
  ) -> AsyncIterator[DataStore]:
65
101
  async with DBWrapper2.managed(
66
102
  database=database,
@@ -75,46 +111,16 @@ class DataStore:
75
111
  row_factory=aiosqlite.Row,
76
112
  log_path=sql_log_path,
77
113
  ) as db_wrapper:
78
- self = cls(db_wrapper=db_wrapper)
114
+ recent_merkle_blobs: LRUCache[bytes32, MerkleBlob] = LRUCache(capacity=cache_capacity)
115
+ self = cls(
116
+ db_wrapper=db_wrapper,
117
+ recent_merkle_blobs=recent_merkle_blobs,
118
+ merkle_blobs_path=merkle_blobs_path,
119
+ key_value_blobs_path=key_value_blobs_path,
120
+ prefer_db_kv_blob_length=prefer_db_kv_blob_length,
121
+ )
79
122
 
80
123
  async with db_wrapper.writer() as writer:
81
- await writer.execute(
82
- f"""
83
- CREATE TABLE IF NOT EXISTS node(
84
- hash BLOB PRIMARY KEY NOT NULL CHECK(length(hash) == 32),
85
- node_type INTEGER NOT NULL CHECK(
86
- (
87
- node_type == {int(NodeType.INTERNAL)}
88
- AND left IS NOT NULL
89
- AND right IS NOT NULL
90
- AND key IS NULL
91
- AND value IS NULL
92
- )
93
- OR
94
- (
95
- node_type == {int(NodeType.TERMINAL)}
96
- AND left IS NULL
97
- AND right IS NULL
98
- AND key IS NOT NULL
99
- AND value IS NOT NULL
100
- )
101
- ),
102
- left BLOB REFERENCES node,
103
- right BLOB REFERENCES node,
104
- key BLOB,
105
- value BLOB
106
- )
107
- """
108
- )
109
- await writer.execute(
110
- """
111
- CREATE TRIGGER IF NOT EXISTS no_node_updates
112
- BEFORE UPDATE ON node
113
- BEGIN
114
- SELECT RAISE(FAIL, 'updates not allowed to the node table');
115
- END
116
- """
117
- )
118
124
  await writer.execute(
119
125
  f"""
120
126
  CREATE TABLE IF NOT EXISTS root(
@@ -124,25 +130,7 @@ class DataStore:
124
130
  status INTEGER NOT NULL CHECK(
125
131
  {" OR ".join(f"status == {status}" for status in Status)}
126
132
  ),
127
- PRIMARY KEY(tree_id, generation),
128
- FOREIGN KEY(node_hash) REFERENCES node(hash)
129
- )
130
- """
131
- )
132
- # TODO: Add ancestor -> hash relationship, this might involve temporarily
133
- # deferring the foreign key enforcement due to the insertion order
134
- # and the node table also enforcing a similar relationship in the
135
- # other direction.
136
- # FOREIGN KEY(ancestor) REFERENCES ancestors(ancestor)
137
- await writer.execute(
138
- """
139
- CREATE TABLE IF NOT EXISTS ancestors(
140
- hash BLOB NOT NULL REFERENCES node,
141
- ancestor BLOB CHECK(length(ancestor) == 32),
142
- tree_id BLOB NOT NULL CHECK(length(tree_id) == 32),
143
- generation INTEGER NOT NULL,
144
- PRIMARY KEY(hash, tree_id, generation),
145
- FOREIGN KEY(ancestor) REFERENCES node(hash)
133
+ PRIMARY KEY(tree_id, generation)
146
134
  )
147
135
  """
148
136
  )
@@ -173,7 +161,34 @@ class DataStore:
173
161
  )
174
162
  await writer.execute(
175
163
  """
176
- CREATE INDEX IF NOT EXISTS node_key_index ON node(key)
164
+ CREATE TABLE IF NOT EXISTS ids(
165
+ kv_id INTEGER PRIMARY KEY,
166
+ hash BLOB NOT NULL CHECK(length(store_id) == 32),
167
+ blob BLOB,
168
+ store_id BLOB NOT NULL CHECK(length(store_id) == 32)
169
+ )
170
+ """
171
+ )
172
+ await writer.execute(
173
+ """
174
+ CREATE TABLE IF NOT EXISTS nodes(
175
+ store_id BLOB NOT NULL CHECK(length(store_id) == 32),
176
+ hash BLOB NOT NULL,
177
+ root_hash BLOB NOT NULL,
178
+ generation INTEGER NOT NULL CHECK(generation >= 0),
179
+ idx INTEGER NOT NULL,
180
+ PRIMARY KEY(store_id, hash)
181
+ )
182
+ """
183
+ )
184
+ await writer.execute(
185
+ """
186
+ CREATE UNIQUE INDEX IF NOT EXISTS ids_hash_index ON ids(hash, store_id)
187
+ """
188
+ )
189
+ await writer.execute(
190
+ """
191
+ CREATE INDEX IF NOT EXISTS nodes_generation_index ON nodes(generation)
177
192
  """
178
193
  )
179
194
 
@@ -184,19 +199,228 @@ class DataStore:
184
199
  async with self.db_wrapper.writer():
185
200
  yield
186
201
 
187
- async def migrate_db(self) -> None:
202
+ async def insert_into_data_store_from_file(
203
+ self,
204
+ store_id: bytes32,
205
+ root_hash: Optional[bytes32],
206
+ filename: Path,
207
+ delta_reader: Optional[DeltaReader] = None,
208
+ ) -> Optional[DeltaReader]:
209
+ async with self.db_wrapper.writer():
210
+ with self.manage_kv_files(store_id):
211
+ if root_hash is None:
212
+ merkle_blob = MerkleBlob(b"")
213
+ else:
214
+ root = await self.get_tree_root(store_id=store_id)
215
+ if delta_reader is None:
216
+ delta_reader = DeltaReader(internal_nodes={}, leaf_nodes={})
217
+ if root.node_hash is not None:
218
+ delta_reader.collect_from_merkle_blob(
219
+ self.get_merkle_path(store_id=store_id, root_hash=root.node_hash),
220
+ indexes=[TreeIndex(0)],
221
+ )
222
+
223
+ internal_nodes, terminal_nodes = await self.read_from_file(filename, store_id)
224
+ delta_reader.add_internal_nodes(internal_nodes)
225
+ delta_reader.add_leaf_nodes(terminal_nodes)
226
+
227
+ missing_hashes = await anyio.to_thread.run_sync(delta_reader.get_missing_hashes, root_hash)
228
+
229
+ if len(missing_hashes) > 0:
230
+ # TODO: consider adding transactions around this code
231
+ merkle_blob_queries = await self.build_merkle_blob_queries_for_missing_hashes(
232
+ missing_hashes, store_id
233
+ )
234
+ if len(merkle_blob_queries) > 0:
235
+ jobs = [
236
+ (self.get_merkle_path(store_id=store_id, root_hash=old_root_hash), indexes)
237
+ for old_root_hash, indexes in merkle_blob_queries.items()
238
+ ]
239
+ await anyio.to_thread.run_sync(delta_reader.collect_from_merkle_blobs, jobs)
240
+ await self.build_cache_and_collect_missing_hashes(root, root_hash, store_id, delta_reader)
241
+
242
+ merkle_blob = delta_reader.create_merkle_blob_and_filter_unused_nodes(root_hash, set())
243
+
244
+ # Don't store these blob objects into cache, since their data structures are not calculated yet.
245
+ await self.insert_root_from_merkle_blob(merkle_blob, store_id, Status.COMMITTED, update_cache=False)
246
+ return delta_reader
247
+
248
+ async def build_merkle_blob_queries_for_missing_hashes(
249
+ self,
250
+ missing_hashes: set[bytes32],
251
+ store_id: bytes32,
252
+ ) -> defaultdict[bytes32, list[TreeIndex]]:
253
+ queries = defaultdict[bytes32, list[TreeIndex]](list)
254
+
255
+ batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
256
+
257
+ async with self.db_wrapper.reader() as reader:
258
+ for batch in to_batches(missing_hashes, batch_size):
259
+ placeholders = ",".join(["?"] * len(batch.entries))
260
+ query = f"""
261
+ SELECT hash, root_hash, idx
262
+ FROM nodes
263
+ WHERE store_id = ? AND hash IN ({placeholders})
264
+ LIMIT {len(batch.entries)}
265
+ """
266
+
267
+ async with reader.execute(query, (store_id, *batch.entries)) as cursor:
268
+ rows = await cursor.fetchall()
269
+ for row in rows:
270
+ root_hash_blob = bytes32(row["root_hash"])
271
+ index = TreeIndex(row["idx"])
272
+ queries[root_hash_blob].append(index)
273
+
274
+ return queries
275
+
276
+ async def build_cache_and_collect_missing_hashes(
277
+ self,
278
+ root: Root,
279
+ root_hash: bytes32,
280
+ store_id: bytes32,
281
+ delta_reader: DeltaReader,
282
+ ) -> None:
283
+ missing_hashes = delta_reader.get_missing_hashes(root_hash)
284
+
285
+ if len(missing_hashes) == 0:
286
+ return
287
+
288
+ async with self.db_wrapper.reader() as reader:
289
+ cursor = await reader.execute(
290
+ # TODO: the INDEXED BY seems like it shouldn't be needed, figure out why it is
291
+ # https://sqlite.org/lang_indexedby.html: admonished to omit all use of INDEXED BY
292
+ # https://sqlite.org/queryplanner-ng.html#howtofix
293
+ "SELECT MAX(generation) FROM nodes INDEXED BY nodes_generation_index WHERE store_id = ?",
294
+ (store_id,),
295
+ )
296
+ generation_row = await cursor.fetchone()
297
+ if generation_row is None or generation_row[0] is None:
298
+ current_generation = 0
299
+ else:
300
+ current_generation = generation_row[0]
301
+ generations: Sequence[int] = [current_generation]
302
+
303
+ while missing_hashes:
304
+ if current_generation >= root.generation:
305
+ raise Exception("Invalid delta file, cannot find all the required hashes")
306
+
307
+ current_generation = generations[-1] + 1
308
+
309
+ batch_size = available_logical_cores()
310
+ generations = range(
311
+ current_generation,
312
+ min(current_generation + batch_size, root.generation),
313
+ )
314
+ jobs: list[tuple[bytes32, Path]] = []
315
+ generations_by_root_hash: dict[bytes32, int] = {}
316
+ for generation in generations:
317
+ generation_root = await self.get_tree_root(store_id=store_id, generation=generation)
318
+ if generation_root.node_hash is None:
319
+ # no need to process an empty generation
320
+ continue
321
+ path = self.get_merkle_path(store_id=store_id, root_hash=generation_root.node_hash)
322
+ jobs.append((generation_root.node_hash, path))
323
+ generations_by_root_hash[generation_root.node_hash] = generation
324
+
325
+ found = await anyio.to_thread.run_sync(
326
+ delta_reader.collect_and_return_from_merkle_blobs,
327
+ jobs,
328
+ missing_hashes,
329
+ )
330
+ async with self.db_wrapper.writer() as writer:
331
+ await writer.executemany(
332
+ """
333
+ INSERT
334
+ OR IGNORE INTO nodes(store_id, hash, root_hash, generation, idx)
335
+ VALUES (?, ?, ?, ?, ?)
336
+ """,
337
+ (
338
+ (store_id, hash, root_hash, generations_by_root_hash[root_hash], index.raw)
339
+ for root_hash, map in found
340
+ for hash, index in map.items()
341
+ ),
342
+ )
343
+
344
+ missing_hashes = delta_reader.get_missing_hashes(root_hash)
345
+
346
+ log.info(f"Missing hashes: added old hashes from generation {current_generation}")
347
+
348
+ async def read_from_file(
349
+ self, filename: Path, store_id: bytes32
350
+ ) -> tuple[dict[bytes32, tuple[bytes32, bytes32]], dict[bytes32, tuple[KeyId, ValueId]]]:
351
+ internal_nodes: dict[bytes32, tuple[bytes32, bytes32]] = {}
352
+ terminal_nodes: dict[bytes32, tuple[KeyId, ValueId]] = {}
353
+
354
+ with open(filename, "rb") as reader:
355
+ async with self.db_wrapper.writer() as writer:
356
+ while True:
357
+ chunk = b""
358
+ while len(chunk) < 4:
359
+ size_to_read = 4 - len(chunk)
360
+ cur_chunk = reader.read(size_to_read)
361
+ if cur_chunk is None or cur_chunk == b"":
362
+ if size_to_read < 4:
363
+ raise Exception("Incomplete read of length.")
364
+ break
365
+ chunk += cur_chunk
366
+ if chunk == b"":
367
+ break
368
+
369
+ size = int.from_bytes(chunk, byteorder="big")
370
+ serialize_nodes_bytes = b""
371
+ while len(serialize_nodes_bytes) < size:
372
+ size_to_read = size - len(serialize_nodes_bytes)
373
+ cur_chunk = reader.read(size_to_read)
374
+ if cur_chunk is None or cur_chunk == b"":
375
+ raise Exception("Incomplete read of blob.")
376
+ serialize_nodes_bytes += cur_chunk
377
+ serialized_node = SerializedNode.from_bytes(serialize_nodes_bytes)
378
+
379
+ node_type = NodeType.TERMINAL if serialized_node.is_terminal else NodeType.INTERNAL
380
+ if node_type == NodeType.INTERNAL:
381
+ node_hash = internal_hash(bytes32(serialized_node.value1), bytes32(serialized_node.value2))
382
+ internal_nodes[node_hash] = (bytes32(serialized_node.value1), bytes32(serialized_node.value2))
383
+ else:
384
+ kid, vid = await self.add_key_value(
385
+ serialized_node.value1,
386
+ serialized_node.value2,
387
+ store_id,
388
+ writer=writer,
389
+ )
390
+ node_hash = leaf_hash(serialized_node.value1, serialized_node.value2)
391
+ terminal_nodes[node_hash] = (kid, vid)
392
+
393
+ return internal_nodes, terminal_nodes
394
+
395
+ async def migrate_db(self, server_files_location: Path) -> None:
188
396
  async with self.db_wrapper.reader() as reader:
189
397
  cursor = await reader.execute("SELECT * FROM schema")
190
- row = await cursor.fetchone()
191
- if row is not None:
398
+ rows = await cursor.fetchall()
399
+ all_versions = {"v1.0", "v2.0"}
400
+
401
+ for row in rows:
192
402
  version = row["version_id"]
193
- if version != "v1.0":
403
+ if version not in all_versions:
194
404
  raise Exception("Unknown version")
195
- log.info(f"Found DB schema version {version}. No migration needed.")
196
- return
405
+ if version == "v2.0":
406
+ log.info(f"Found DB schema version {version}. No migration needed.")
407
+ return
408
+
409
+ version = "v2.0"
410
+ old_tables = ["node", "root", "ancestors"]
411
+ all_stores = await self.get_store_ids()
412
+ all_roots: list[list[Root]] = []
413
+ for store_id in all_stores:
414
+ try:
415
+ root = await self.get_tree_root(store_id=store_id)
416
+ roots = await self.get_roots_between(store_id, 1, root.generation)
417
+ all_roots.append([*roots, root])
418
+ except Exception as e:
419
+ if "unable to find root for id, generation" in str(e):
420
+ log.error(f"Cannot find roots for {store_id}. Skipping it.")
421
+
422
+ log.info(f"Initiating migration to version {version}. Found {len(all_roots)} stores to migrate")
197
423
 
198
- version = "v1.0"
199
- log.info(f"Initiating migration to version {version}")
200
424
  async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
201
425
  await writer.execute(
202
426
  f"""
@@ -207,16 +431,360 @@ class DataStore:
207
431
  status INTEGER NOT NULL CHECK(
208
432
  {" OR ".join(f"status == {status}" for status in Status)}
209
433
  ),
210
- PRIMARY KEY(tree_id, generation),
211
- FOREIGN KEY(node_hash) REFERENCES node(hash)
434
+ PRIMARY KEY(tree_id, generation)
212
435
  )
213
436
  """
214
437
  )
215
- await writer.execute("INSERT INTO new_root SELECT * FROM root")
216
- await writer.execute("DROP TABLE root")
438
+ for old_table in old_tables:
439
+ await writer.execute(f"DROP TABLE IF EXISTS {old_table}")
217
440
  await writer.execute("ALTER TABLE new_root RENAME TO root")
218
441
  await writer.execute("INSERT INTO schema (version_id) VALUES (?)", (version,))
219
- log.info(f"Finished migrating DB to version {version}")
442
+ log.info(f"Initialized new DB schema {version}.")
443
+
444
+ total_generations = 0
445
+ synced_generations = 0
446
+ for roots in all_roots:
447
+ assert len(roots) > 0
448
+ total_generations += len(roots)
449
+
450
+ for roots in all_roots:
451
+ store_id = roots[0].store_id
452
+ await self.create_tree(store_id=store_id, status=Status.COMMITTED)
453
+
454
+ expected_synced_generations = synced_generations + len(roots)
455
+ for root in roots:
456
+ recovery_filename: Optional[Path] = None
457
+
458
+ for group_by_store in (True, False):
459
+ filename = get_delta_filename_path(
460
+ server_files_location,
461
+ store_id,
462
+ bytes32.zeros if root.node_hash is None else root.node_hash,
463
+ root.generation,
464
+ group_by_store,
465
+ )
466
+
467
+ if filename.exists():
468
+ log.info(f"Found filename {filename}. Recovering data from it")
469
+ recovery_filename = filename
470
+ break
471
+
472
+ if recovery_filename is None:
473
+ log.error(f"Cannot find any recovery file for root {root}")
474
+ break
475
+
476
+ try:
477
+ await self.insert_into_data_store_from_file(store_id, root.node_hash, recovery_filename)
478
+ synced_generations += 1
479
+ log.info(
480
+ f"Successfully recovered root from {filename}. "
481
+ f"Total roots processed: {(synced_generations / total_generations * 100):.2f}%"
482
+ )
483
+ except Exception as e:
484
+ log.error(f"Cannot recover data from {filename}: {e}")
485
+ break
486
+
487
+ if synced_generations < expected_synced_generations:
488
+ log.error(
489
+ f"Could not recover {expected_synced_generations - synced_generations} generations. "
490
+ f"Consider resyncing the store {store_id} once the migration is complete."
491
+ )
492
+ # Reset the counter as if we synced correctly, so the percentages add to 100% at the end.
493
+ synced_generations = expected_synced_generations
494
+
495
+ async def get_merkle_blob(
496
+ self,
497
+ store_id: bytes32,
498
+ root_hash: Optional[bytes32],
499
+ read_only: bool = False,
500
+ update_cache: bool = True,
501
+ ) -> MerkleBlob:
502
+ if root_hash is None:
503
+ return MerkleBlob(blob=b"")
504
+ if self.recent_merkle_blobs.get_capacity() == 0:
505
+ update_cache = False
506
+
507
+ existing_blob = self.recent_merkle_blobs.get(root_hash)
508
+ if existing_blob is not None:
509
+ return existing_blob if read_only else copy.deepcopy(existing_blob)
510
+
511
+ try:
512
+ with log_exceptions(log=log, message="Error while getting merkle blob"):
513
+ path = self.get_merkle_path(store_id=store_id, root_hash=root_hash)
514
+ # TODO: consider file-system based locking of either the file or the store directory
515
+ merkle_blob = MerkleBlob.from_path(path)
516
+ except Exception as e:
517
+ raise MerkleBlobNotFoundError(root_hash=root_hash) from e
518
+
519
+ if update_cache:
520
+ self.recent_merkle_blobs.put(root_hash, copy.deepcopy(merkle_blob))
521
+
522
+ return merkle_blob
523
+
524
+ def get_bytes_path(self, bytes_: bytes) -> Path:
525
+ raw = bytes_.hex()
526
+ segment_sizes = [2, 2, 2]
527
+ start = 0
528
+ segments = []
529
+ for size in segment_sizes:
530
+ segments.append(raw[start : start + size])
531
+ start += size
532
+
533
+ return Path(*segments, raw)
534
+
535
+ def get_merkle_path(self, store_id: bytes32, root_hash: Optional[bytes32]) -> Path:
536
+ store_root = self.merkle_blobs_path.joinpath(store_id.hex())
537
+ if root_hash is None:
538
+ return store_root
539
+
540
+ return store_root.joinpath(self.get_bytes_path(bytes_=root_hash))
541
+
542
+ def get_key_value_path(self, store_id: bytes32, blob_hash: Optional[bytes32]) -> Path:
543
+ store_root = self.key_value_blobs_path.joinpath(store_id.hex())
544
+ if blob_hash is None:
545
+ return store_root
546
+
547
+ return store_root.joinpath(self.get_bytes_path(bytes_=blob_hash))
548
+
549
+ async def insert_root_from_merkle_blob(
550
+ self,
551
+ merkle_blob: MerkleBlob,
552
+ store_id: bytes32,
553
+ status: Status,
554
+ old_root: Optional[Root] = None,
555
+ update_cache: bool = True,
556
+ ) -> Root:
557
+ if not merkle_blob.empty():
558
+ merkle_blob.calculate_lazy_hashes()
559
+ if self.recent_merkle_blobs.get_capacity() == 0:
560
+ update_cache = False
561
+
562
+ root_hash = merkle_blob.get_root_hash()
563
+ if old_root is not None and old_root.node_hash == root_hash:
564
+ raise ValueError("Changelist resulted in no change to tree data")
565
+
566
+ if root_hash is not None:
567
+ log.info(f"inserting merkle blob: {len(merkle_blob)} bytes {root_hash.hex()}")
568
+ blob_path = self.get_merkle_path(store_id=store_id, root_hash=merkle_blob.get_root_hash())
569
+ if not blob_path.exists():
570
+ blob_path.parent.mkdir(parents=True, exist_ok=True)
571
+ # TODO: consider file-system based locking of either the file or the store directory
572
+ merkle_blob.to_path(blob_path)
573
+
574
+ if update_cache:
575
+ self.recent_merkle_blobs.put(root_hash, copy.deepcopy(merkle_blob))
576
+
577
+ return await self._insert_root(store_id, root_hash, status)
578
+
579
+ def _use_file_for_new_kv_blob(self, blob: bytes) -> bool:
580
+ return len(blob) > self.prefer_db_kv_blob_length
581
+
582
+ async def get_kvid(self, blob: bytes, store_id: bytes32) -> Optional[KeyOrValueId]:
583
+ blob_hash = bytes32(sha256(blob).digest())
584
+
585
+ async with self.db_wrapper.reader() as reader:
586
+ cursor = await reader.execute(
587
+ "SELECT kv_id FROM ids WHERE hash = ? AND store_id = ?",
588
+ (
589
+ blob_hash,
590
+ store_id,
591
+ ),
592
+ )
593
+ row = await cursor.fetchone()
594
+
595
+ if row is None:
596
+ return None
597
+
598
+ return KeyOrValueId(row[0])
599
+
600
+ def get_blob_from_file(self, blob_hash: bytes32, store_id: bytes32) -> bytes:
601
+ # TODO: seems that zstd needs hinting
602
+ # TODO: consider file-system based locking of either the file or the store directory
603
+ return zstd.decompress(self.get_key_value_path(store_id=store_id, blob_hash=blob_hash).read_bytes()) # type: ignore[no-any-return]
604
+
605
+ async def get_blob_from_kvid(self, kv_id: KeyOrValueId, store_id: bytes32) -> Optional[bytes]:
606
+ async with self.db_wrapper.reader() as reader:
607
+ cursor = await reader.execute(
608
+ "SELECT hash, blob FROM ids WHERE kv_id = ? AND store_id = ?",
609
+ (
610
+ kv_id,
611
+ store_id,
612
+ ),
613
+ )
614
+ row = await cursor.fetchone()
615
+
616
+ if row is None:
617
+ return None
618
+
619
+ blob: bytes = row["blob"]
620
+ if blob is not None:
621
+ return blob
622
+
623
+ blob_hash = bytes32(row["hash"])
624
+ return self.get_blob_from_file(blob_hash, store_id)
625
+
626
+ async def get_terminal_node(self, kid: KeyId, vid: ValueId, store_id: bytes32) -> TerminalNode:
627
+ key = await self.get_blob_from_kvid(kid.raw, store_id)
628
+ value = await self.get_blob_from_kvid(vid.raw, store_id)
629
+ if key is None or value is None:
630
+ raise Exception("Cannot find the key/value pair")
631
+
632
+ return TerminalNode(hash=leaf_hash(key, value), key=key, value=value)
633
+
634
+ async def add_kvid(self, blob: bytes, store_id: bytes32, writer: aiosqlite.Connection) -> KeyOrValueId:
635
+ use_file = self._use_file_for_new_kv_blob(blob)
636
+ blob_hash = bytes32(sha256(blob).digest())
637
+ if use_file:
638
+ table_blob = None
639
+ else:
640
+ table_blob = blob
641
+ try:
642
+ row = await writer.execute_insert(
643
+ "INSERT INTO ids (hash, blob, store_id) VALUES (?, ?, ?)",
644
+ (
645
+ blob_hash,
646
+ table_blob,
647
+ store_id,
648
+ ),
649
+ )
650
+ except sqlite3.IntegrityError as e:
651
+ if "UNIQUE constraint failed" in str(e):
652
+ kv_id = await self.get_kvid(blob, store_id)
653
+ if kv_id is None:
654
+ raise Exception("Internal error") from e
655
+ return kv_id
656
+
657
+ raise
658
+
659
+ if row is None:
660
+ raise Exception("Internal error")
661
+ if use_file:
662
+ path = self.get_key_value_path(store_id=store_id, blob_hash=blob_hash)
663
+ path.parent.mkdir(parents=True, exist_ok=True)
664
+ self.unconfirmed_keys_values[store_id].append(blob_hash)
665
+ # TODO: consider file-system based locking of either the file or the store directory
666
+ path.write_bytes(zstd.compress(blob))
667
+ return KeyOrValueId(row[0])
668
+
669
+ def delete_unconfirmed_kvids(self, store_id: bytes32) -> None:
670
+ for blob_hash in self.unconfirmed_keys_values[store_id]:
671
+ with log_exceptions(log=log, consume=True):
672
+ path = self.get_key_value_path(store_id=store_id, blob_hash=blob_hash)
673
+ try:
674
+ path.unlink()
675
+ except FileNotFoundError:
676
+ log.error(f"Cannot find key/value path {path} for hash {blob_hash}")
677
+ del self.unconfirmed_keys_values[store_id]
678
+
679
+ def confirm_all_kvids(self, store_id: bytes32) -> None:
680
+ del self.unconfirmed_keys_values[store_id]
681
+
682
+ @contextmanager
683
+ def manage_kv_files(self, store_id: bytes32) -> Iterator[None]:
684
+ if store_id not in self.unconfirmed_keys_values:
685
+ self.unconfirmed_keys_values[store_id] = []
686
+ else:
687
+ raise Exception("Internal error: unconfirmed keys values cache not cleaned")
688
+
689
+ try:
690
+ yield
691
+ except:
692
+ self.delete_unconfirmed_kvids(store_id)
693
+ raise
694
+ else:
695
+ self.confirm_all_kvids(store_id)
696
+
697
+ async def add_key_value(
698
+ self, key: bytes, value: bytes, store_id: bytes32, writer: aiosqlite.Connection
699
+ ) -> tuple[KeyId, ValueId]:
700
+ kid = KeyId(await self.add_kvid(key, store_id, writer=writer))
701
+ vid = ValueId(await self.add_kvid(value, store_id, writer=writer))
702
+
703
+ return (kid, vid)
704
+
705
+ async def get_terminal_node_by_hash(
706
+ self,
707
+ node_hash: bytes32,
708
+ store_id: bytes32,
709
+ root_hash: Union[bytes32, Unspecified] = unspecified,
710
+ ) -> TerminalNode:
711
+ resolved_root_hash: Optional[bytes32]
712
+ if root_hash is unspecified:
713
+ root = await self.get_tree_root(store_id=store_id)
714
+ resolved_root_hash = root.node_hash
715
+ else:
716
+ resolved_root_hash = root_hash
717
+
718
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
719
+ kid, vid = merkle_blob.get_node_by_hash(node_hash)
720
+ return await self.get_terminal_node(kid, vid, store_id)
721
+
722
+ async def get_terminal_nodes_by_hashes(
723
+ self,
724
+ node_hashes: list[bytes32],
725
+ store_id: bytes32,
726
+ root_hash: Union[bytes32, Unspecified] = unspecified,
727
+ ) -> list[TerminalNode]:
728
+ resolved_root_hash: Optional[bytes32]
729
+ if root_hash is unspecified:
730
+ root = await self.get_tree_root(store_id=store_id)
731
+ resolved_root_hash = root.node_hash
732
+ else:
733
+ resolved_root_hash = root_hash
734
+
735
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
736
+ kv_ids: list[tuple[KeyId, ValueId]] = []
737
+ for node_hash in node_hashes:
738
+ kid, vid = merkle_blob.get_node_by_hash(node_hash)
739
+ kv_ids.append((kid, vid))
740
+ kv_ids_unpacked = (KeyOrValueId(id.raw) for kv_id in kv_ids for id in kv_id)
741
+ table_blobs = await self.get_table_blobs(kv_ids_unpacked, store_id)
742
+
743
+ terminal_nodes: list[TerminalNode] = []
744
+ for kid, vid in kv_ids:
745
+ terminal_nodes.append(self.get_terminal_node_from_table_blobs(kid, vid, table_blobs, store_id))
746
+
747
+ return terminal_nodes
748
+
749
+ async def get_existing_hashes(self, node_hashes: list[bytes32], store_id: bytes32) -> set[bytes32]:
750
+ result: set[bytes32] = set()
751
+ batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
752
+
753
+ async with self.db_wrapper.reader() as reader:
754
+ for i in range(0, len(node_hashes), batch_size):
755
+ chunk = node_hashes[i : i + batch_size]
756
+ placeholders = ",".join(["?"] * len(chunk))
757
+ query = f"SELECT hash FROM nodes WHERE store_id = ? AND hash IN ({placeholders}) LIMIT {len(chunk)}"
758
+
759
+ async with reader.execute(query, (store_id, *chunk)) as cursor:
760
+ rows = await cursor.fetchall()
761
+ result.update(row["hash"] for row in rows)
762
+
763
+ return result
764
+
765
+ async def add_node_hashes(self, store_id: bytes32, generation: Optional[int] = None) -> None:
766
+ root = await self.get_tree_root(store_id=store_id, generation=generation)
767
+ if root.node_hash is None:
768
+ return
769
+
770
+ merkle_blob = await self.get_merkle_blob(
771
+ store_id=store_id, root_hash=root.node_hash, read_only=True, update_cache=False
772
+ )
773
+ hash_to_index = merkle_blob.get_hashes_indexes()
774
+
775
+ existing_hashes = await self.get_existing_hashes(list(hash_to_index.keys()), store_id)
776
+ async with self.db_wrapper.writer() as writer:
777
+ await writer.executemany(
778
+ """
779
+ INSERT INTO nodes(store_id, hash, root_hash, generation, idx)
780
+ VALUES (?, ?, ?, ?, ?)
781
+ """,
782
+ (
783
+ (store_id, hash, root.node_hash, root.generation, index.raw)
784
+ for hash, index in hash_to_index.items()
785
+ if hash not in existing_hashes
786
+ ),
787
+ )
220
788
 
221
789
  async def _insert_root(
222
790
  self,
@@ -225,11 +793,7 @@ class DataStore:
225
793
  status: Status,
226
794
  generation: Optional[int] = None,
227
795
  ) -> Root:
228
- # This should be replaced by an SQLite schema level check.
229
- # https://github.com/Chia-Network/chia-blockchain/pull/9284
230
- store_id = bytes32(store_id)
231
-
232
- async with self.db_wrapper.writer() as writer:
796
+ async with self.db_wrapper.writer_maybe_transaction() as writer:
233
797
  if generation is None:
234
798
  try:
235
799
  existing_generation = await self.get_tree_generation(store_id=store_id)
@@ -254,173 +818,8 @@ class DataStore:
254
818
  """,
255
819
  new_root.to_row(),
256
820
  )
257
-
258
- # `node_hash` is now a root, so it has no ancestor.
259
- # Don't change the ancestor table unless the root is committed.
260
- if node_hash is not None and status == Status.COMMITTED:
261
- values = {
262
- "hash": node_hash,
263
- "tree_id": store_id,
264
- "generation": generation,
265
- }
266
- await writer.execute(
267
- """
268
- INSERT INTO ancestors(hash, ancestor, tree_id, generation)
269
- VALUES (:hash, NULL, :tree_id, :generation)
270
- """,
271
- values,
272
- )
273
-
274
821
  return new_root
275
822
 
276
- async def _insert_node(
277
- self,
278
- node_hash: bytes32,
279
- node_type: NodeType,
280
- left_hash: Optional[bytes32],
281
- right_hash: Optional[bytes32],
282
- key: Optional[bytes],
283
- value: Optional[bytes],
284
- ) -> None:
285
- # TODO: can we get sqlite to do this check?
286
- values = {
287
- "hash": node_hash,
288
- "node_type": node_type,
289
- "left": left_hash,
290
- "right": right_hash,
291
- "key": key,
292
- "value": value,
293
- }
294
-
295
- async with self.db_wrapper.writer() as writer:
296
- try:
297
- await writer.execute(
298
- """
299
- INSERT INTO node(hash, node_type, left, right, key, value)
300
- VALUES(:hash, :node_type, :left, :right, :key, :value)
301
- """,
302
- values,
303
- )
304
- except aiosqlite.IntegrityError as e:
305
- if not e.args[0].startswith("UNIQUE constraint"):
306
- # UNIQUE constraint failed: node.hash
307
- raise
308
-
309
- async with writer.execute(
310
- "SELECT * FROM node WHERE hash == :hash LIMIT 1",
311
- {"hash": node_hash},
312
- ) as cursor:
313
- result = await cursor.fetchone()
314
-
315
- if result is None:
316
- # some ideas for causes:
317
- # an sqlite bug
318
- # bad queries in this function
319
- # unexpected db constraints
320
- raise Exception("Unable to find conflicting row") from e # pragma: no cover
321
-
322
- result_dict = dict(result)
323
- if result_dict != values:
324
- raise Exception(
325
- f"Requested insertion of node with matching hash but other values differ: {node_hash}"
326
- ) from None
327
-
328
- async def insert_node(self, node_type: NodeType, value1: bytes, value2: bytes) -> None:
329
- if node_type == NodeType.INTERNAL:
330
- left_hash = bytes32(value1)
331
- right_hash = bytes32(value2)
332
- node_hash = internal_hash(left_hash, right_hash)
333
- await self._insert_node(node_hash, node_type, bytes32(value1), bytes32(value2), None, None)
334
- else:
335
- node_hash = leaf_hash(key=value1, value=value2)
336
- await self._insert_node(node_hash, node_type, None, None, value1, value2)
337
-
338
- async def _insert_internal_node(self, left_hash: bytes32, right_hash: bytes32) -> bytes32:
339
- node_hash: bytes32 = internal_hash(left_hash=left_hash, right_hash=right_hash)
340
-
341
- await self._insert_node(
342
- node_hash=node_hash,
343
- node_type=NodeType.INTERNAL,
344
- left_hash=left_hash,
345
- right_hash=right_hash,
346
- key=None,
347
- value=None,
348
- )
349
-
350
- return node_hash
351
-
352
- async def _insert_ancestor_table(
353
- self,
354
- left_hash: bytes32,
355
- right_hash: bytes32,
356
- store_id: bytes32,
357
- generation: int,
358
- ) -> None:
359
- node_hash = internal_hash(left_hash=left_hash, right_hash=right_hash)
360
-
361
- async with self.db_wrapper.writer() as writer:
362
- for hash in (left_hash, right_hash):
363
- values = {
364
- "hash": hash,
365
- "ancestor": node_hash,
366
- "tree_id": store_id,
367
- "generation": generation,
368
- }
369
- try:
370
- await writer.execute(
371
- """
372
- INSERT INTO ancestors(hash, ancestor, tree_id, generation)
373
- VALUES (:hash, :ancestor, :tree_id, :generation)
374
- """,
375
- values,
376
- )
377
- except aiosqlite.IntegrityError as e:
378
- if not e.args[0].startswith("UNIQUE constraint"):
379
- # UNIQUE constraint failed: ancestors.hash, ancestors.tree_id, ancestors.generation
380
- raise
381
-
382
- async with writer.execute(
383
- """
384
- SELECT *
385
- FROM ancestors
386
- WHERE hash == :hash AND generation == :generation AND tree_id == :tree_id
387
- LIMIT 1
388
- """,
389
- {"hash": hash, "generation": generation, "tree_id": store_id},
390
- ) as cursor:
391
- result = await cursor.fetchone()
392
-
393
- if result is None:
394
- # some ideas for causes:
395
- # an sqlite bug
396
- # bad queries in this function
397
- # unexpected db constraints
398
- raise Exception("Unable to find conflicting row") from e # pragma: no cover
399
-
400
- result_dict = dict(result)
401
- if result_dict != values:
402
- raise Exception(
403
- "Requested insertion of ancestor, where ancestor differ, but other values are identical: "
404
- f"{hash} {generation} {store_id}"
405
- ) from None
406
-
407
- async def _insert_terminal_node(self, key: bytes, value: bytes) -> bytes32:
408
- # forcing type hint here for:
409
- # https://github.com/Chia-Network/clvm/pull/102
410
- # https://github.com/Chia-Network/clvm/pull/106
411
- node_hash: bytes32 = Program.to((key, value)).get_tree_hash()
412
-
413
- await self._insert_node(
414
- node_hash=node_hash,
415
- node_type=NodeType.TERMINAL,
416
- left_hash=None,
417
- right_hash=None,
418
- key=key,
419
- value=value,
420
- )
421
-
422
- return node_hash
423
-
424
823
  async def get_pending_root(self, store_id: bytes32) -> Optional[Root]:
425
824
  async with self.db_wrapper.reader() as reader:
426
825
  cursor = await reader.execute(
@@ -478,21 +877,6 @@ class DataStore:
478
877
  root.generation,
479
878
  ),
480
879
  )
481
- # `node_hash` is now a root, so it has no ancestor.
482
- # Don't change the ancestor table unless the root is committed.
483
- if root.node_hash is not None and status == Status.COMMITTED:
484
- values = {
485
- "hash": root.node_hash,
486
- "tree_id": root.store_id,
487
- "generation": root.generation,
488
- }
489
- await writer.execute(
490
- """
491
- INSERT INTO ancestors(hash, ancestor, tree_id, generation)
492
- VALUES (:hash, NULL, :tree_id, :generation)
493
- """,
494
- values,
495
- )
496
880
 
497
881
  async def check(self) -> None:
498
882
  for check in self._checks:
@@ -518,30 +902,7 @@ class DataStore:
518
902
  if len(bad_trees) > 0:
519
903
  raise TreeGenerationIncrementingError(store_ids=bad_trees)
520
904
 
521
- async def _check_hashes(self) -> None:
522
- async with self.db_wrapper.reader() as reader:
523
- cursor = await reader.execute("SELECT * FROM node")
524
-
525
- bad_node_hashes: list[bytes32] = []
526
- async for row in cursor:
527
- node = row_to_node(row=row)
528
- if isinstance(node, InternalNode):
529
- expected_hash = internal_hash(left_hash=node.left_hash, right_hash=node.right_hash)
530
- elif isinstance(node, TerminalNode):
531
- expected_hash = Program.to((node.key, node.value)).get_tree_hash()
532
- else:
533
- raise Exception(f"Internal error, unknown node type: {node!r}")
534
-
535
- if node.hash != expected_hash:
536
- bad_node_hashes.append(node.hash)
537
-
538
- if len(bad_node_hashes) > 0:
539
- raise NodeHashError(node_hashes=bad_node_hashes)
540
-
541
- _checks: tuple[Callable[[DataStore], Awaitable[None]], ...] = (
542
- _check_roots_are_incrementing,
543
- _check_hashes,
544
- )
905
+ _checks: tuple[Callable[[DataStore], Awaitable[None]], ...] = (_check_roots_are_incrementing,)
545
906
 
546
907
  async def create_tree(self, store_id: bytes32, status: Status = Status.PENDING) -> bool:
547
908
  await self._insert_root(store_id=store_id, node_hash=None, status=status)
@@ -634,165 +995,62 @@ class DataStore:
634
995
 
635
996
  return roots
636
997
 
637
- async def get_last_tree_root_by_hash(
638
- self, store_id: bytes32, hash: Optional[bytes32], max_generation: Optional[int] = None
639
- ) -> Optional[Root]:
640
- async with self.db_wrapper.reader() as reader:
641
- max_generation_str = "AND generation < :max_generation " if max_generation is not None else ""
642
- node_hash_str = "AND node_hash == :node_hash " if hash is not None else "AND node_hash is NULL "
643
- cursor = await reader.execute(
644
- "SELECT * FROM root WHERE tree_id == :tree_id "
645
- f"{max_generation_str}"
646
- f"{node_hash_str}"
647
- "ORDER BY generation DESC LIMIT 1",
648
- {"tree_id": store_id, "node_hash": hash, "max_generation": max_generation},
649
- )
650
- row = await cursor.fetchone()
651
-
652
- if row is None:
653
- return None
654
- return Root.from_row(row=row)
655
-
656
998
  async def get_ancestors(
657
999
  self,
658
1000
  node_hash: bytes32,
659
1001
  store_id: bytes32,
660
1002
  root_hash: Optional[bytes32] = None,
661
- ) -> list[InternalNode]:
662
- async with self.db_wrapper.reader() as reader:
663
- if root_hash is None:
664
- root = await self.get_tree_root(store_id=store_id)
665
- root_hash = root.node_hash
666
- if root_hash is None:
667
- raise Exception(f"Root hash is unspecified for store ID: {store_id.hex()}")
668
- cursor = await reader.execute(
669
- """
670
- WITH RECURSIVE
671
- tree_from_root_hash(hash, node_type, left, right, key, value, depth) AS (
672
- SELECT node.*, 0 AS depth FROM node WHERE node.hash == :root_hash
673
- UNION ALL
674
- SELECT node.*, tree_from_root_hash.depth + 1 AS depth FROM node, tree_from_root_hash
675
- WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
676
- ),
677
- ancestors(hash, node_type, left, right, key, value, depth) AS (
678
- SELECT node.*, NULL AS depth FROM node
679
- WHERE node.left == :reference_hash OR node.right == :reference_hash
680
- UNION ALL
681
- SELECT node.*, NULL AS depth FROM node, ancestors
682
- WHERE node.left == ancestors.hash OR node.right == ancestors.hash
683
- )
684
- SELECT * FROM tree_from_root_hash INNER JOIN ancestors
685
- WHERE tree_from_root_hash.hash == ancestors.hash
686
- ORDER BY tree_from_root_hash.depth DESC
687
- """,
688
- {"reference_hash": node_hash, "root_hash": root_hash},
689
- )
690
-
691
- # The resulting rows must represent internal nodes. InternalNode.from_row()
692
- # does some amount of validation in the sense that it will fail if left
693
- # or right can't turn into a bytes32 as expected. There is room for more
694
- # validation here if desired.
695
- ancestors = [InternalNode.from_row(row=row) async for row in cursor]
696
-
697
- return ancestors
698
-
699
- async def get_ancestors_optimized(
700
- self,
701
- node_hash: bytes32,
702
- store_id: bytes32,
703
1003
  generation: Optional[int] = None,
704
- root_hash: Optional[bytes32] = None,
705
1004
  ) -> list[InternalNode]:
706
1005
  async with self.db_wrapper.reader():
707
- nodes = []
708
1006
  if root_hash is None:
709
1007
  root = await self.get_tree_root(store_id=store_id, generation=generation)
710
1008
  root_hash = root.node_hash
711
-
712
1009
  if root_hash is None:
713
- return []
714
-
715
- while True:
716
- internal_node = await self._get_one_ancestor(node_hash, store_id, generation)
717
- if internal_node is None:
718
- break
719
- nodes.append(internal_node)
720
- node_hash = internal_node.hash
721
-
722
- if len(nodes) > 0:
723
- if root_hash != nodes[-1].hash:
724
- raise RuntimeError("Ancestors list didn't produce the root as top result.")
725
-
726
- return nodes
1010
+ raise Exception(f"Root hash is unspecified for store ID: {store_id.hex()}")
727
1011
 
728
- async def get_internal_nodes(self, store_id: bytes32, root_hash: Optional[bytes32] = None) -> list[InternalNode]:
729
- async with self.db_wrapper.reader() as reader:
730
- if root_hash is None:
731
- root = await self.get_tree_root(store_id=store_id)
732
- root_hash = root.node_hash
733
- cursor = await reader.execute(
734
- """
735
- WITH RECURSIVE
736
- tree_from_root_hash(hash, node_type, left, right, key, value) AS (
737
- SELECT node.* FROM node WHERE node.hash == :root_hash
738
- UNION ALL
739
- SELECT node.* FROM node, tree_from_root_hash WHERE node.hash == tree_from_root_hash.left
740
- OR node.hash == tree_from_root_hash.right
741
- )
742
- SELECT * FROM tree_from_root_hash
743
- WHERE node_type == :node_type
744
- """,
745
- {"root_hash": root_hash, "node_type": NodeType.INTERNAL},
1012
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root_hash)
1013
+ reference_kid, _ = merkle_blob.get_node_by_hash(node_hash)
1014
+
1015
+ reference_index = merkle_blob.get_key_index(reference_kid)
1016
+ lineage = merkle_blob.get_lineage_with_indexes(reference_index)
1017
+ result: list[InternalNode] = []
1018
+ for index, node in itertools.islice(lineage, 1, None):
1019
+ assert isinstance(node, chia_rs.datalayer.InternalNode)
1020
+ result.append(
1021
+ InternalNode(
1022
+ hash=node.hash,
1023
+ left_hash=merkle_blob.get_hash_at_index(node.left),
1024
+ right_hash=merkle_blob.get_hash_at_index(node.right),
1025
+ )
746
1026
  )
1027
+ return result
747
1028
 
748
- internal_nodes: list[InternalNode] = []
749
- async for row in cursor:
750
- node = row_to_node(row=row)
751
- if not isinstance(node, InternalNode):
752
- raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
753
- internal_nodes.append(node)
1029
+ def get_terminal_node_from_table_blobs(
1030
+ self,
1031
+ kid: KeyId,
1032
+ vid: ValueId,
1033
+ table_blobs: dict[KeyOrValueId, tuple[bytes32, Optional[bytes]]],
1034
+ store_id: bytes32,
1035
+ ) -> TerminalNode:
1036
+ key = table_blobs[KeyOrValueId(kid.raw)][1]
1037
+ if key is None:
1038
+ key_hash = table_blobs[KeyOrValueId(kid.raw)][0]
1039
+ key = self.get_blob_from_file(key_hash, store_id)
754
1040
 
755
- return internal_nodes
1041
+ value = table_blobs[KeyOrValueId(vid.raw)][1]
1042
+ if value is None:
1043
+ value_hash = table_blobs[KeyOrValueId(vid.raw)][0]
1044
+ value = self.get_blob_from_file(value_hash, store_id)
756
1045
 
757
- async def get_keys_values_cursor(
758
- self,
759
- reader: aiosqlite.Connection,
760
- root_hash: Optional[bytes32],
761
- only_keys: bool = False,
762
- ) -> aiosqlite.Cursor:
763
- select_clause = "SELECT hash, key" if only_keys else "SELECT *"
764
- maybe_value = "" if only_keys else "value, "
765
- select_node_clause = "node.hash, node.node_type, node.left, node.right, node.key" if only_keys else "node.*"
766
- return await reader.execute(
767
- f"""
768
- WITH RECURSIVE
769
- tree_from_root_hash(hash, node_type, left, right, key, {maybe_value}depth, rights) AS (
770
- SELECT {select_node_clause}, 0 AS depth, 0 AS rights FROM node WHERE node.hash == :root_hash
771
- UNION ALL
772
- SELECT
773
- {select_node_clause},
774
- tree_from_root_hash.depth + 1 AS depth,
775
- CASE
776
- WHEN node.hash == tree_from_root_hash.right
777
- THEN tree_from_root_hash.rights + (1 << (62 - tree_from_root_hash.depth))
778
- ELSE tree_from_root_hash.rights
779
- END AS rights
780
- FROM node, tree_from_root_hash
781
- WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
782
- )
783
- {select_clause} FROM tree_from_root_hash
784
- WHERE node_type == :node_type
785
- ORDER BY depth ASC, rights ASC
786
- """,
787
- {"root_hash": root_hash, "node_type": NodeType.TERMINAL},
788
- )
1046
+ return TerminalNode(hash=leaf_hash(key, value), key=key, value=value)
789
1047
 
790
1048
  async def get_keys_values(
791
1049
  self,
792
1050
  store_id: bytes32,
793
1051
  root_hash: Union[bytes32, Unspecified] = unspecified,
794
1052
  ) -> list[TerminalNode]:
795
- async with self.db_wrapper.reader() as reader:
1053
+ async with self.db_wrapper.reader():
796
1054
  resolved_root_hash: Optional[bytes32]
797
1055
  if root_hash is unspecified:
798
1056
  root = await self.get_tree_root(store_id=store_id)
@@ -800,25 +1058,18 @@ class DataStore:
800
1058
  else:
801
1059
  resolved_root_hash = root_hash
802
1060
 
803
- cursor = await self.get_keys_values_cursor(reader, resolved_root_hash)
1061
+ try:
1062
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
1063
+ except MerkleBlobNotFoundError:
1064
+ return []
1065
+
1066
+ kv_ids = merkle_blob.get_keys_values()
1067
+ kv_ids_unpacked = (KeyOrValueId(id.raw) for pair in kv_ids.items() for id in pair)
1068
+ table_blobs = await self.get_table_blobs(kv_ids_unpacked, store_id)
1069
+
804
1070
  terminal_nodes: list[TerminalNode] = []
805
- async for row in cursor:
806
- if row["depth"] > 62:
807
- # TODO: Review the value and implementation of left-to-right order
808
- # reporting. Initial use is for balanced insertion with the
809
- # work done in the query.
810
-
811
- # This is limited based on the choice of 63 for the maximum left
812
- # shift in the query. This is in turn based on the SQLite integers
813
- # ranging in size up to signed 8 bytes, 64 bits. If we exceed this then
814
- # we no longer guarantee the left-to-right ordering of the node
815
- # list. While 63 allows for a lot of nodes in a balanced tree, in
816
- # the worst case it allows only 62 terminal nodes.
817
- raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
818
- node = row_to_node(row=row)
819
- if not isinstance(node, TerminalNode):
820
- raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
821
- terminal_nodes.append(node)
1071
+ for kid, vid in kv_ids.items():
1072
+ terminal_nodes.append(self.get_terminal_node_from_table_blobs(kid, vid, table_blobs, store_id))
822
1073
 
823
1074
  return terminal_nodes
824
1075
 
@@ -827,7 +1078,7 @@ class DataStore:
827
1078
  store_id: bytes32,
828
1079
  root_hash: Union[bytes32, Unspecified] = unspecified,
829
1080
  ) -> KeysValuesCompressed:
830
- async with self.db_wrapper.reader() as reader:
1081
+ async with self.db_wrapper.reader():
831
1082
  resolved_root_hash: Optional[bytes32]
832
1083
  if root_hash is unspecified:
833
1084
  root = await self.get_tree_root(store_id=store_id)
@@ -835,36 +1086,26 @@ class DataStore:
835
1086
  else:
836
1087
  resolved_root_hash = root_hash
837
1088
 
838
- cursor = await self.get_keys_values_cursor(reader, resolved_root_hash)
839
1089
  keys_values_hashed: dict[bytes32, bytes32] = {}
840
1090
  key_hash_to_length: dict[bytes32, int] = {}
841
1091
  leaf_hash_to_length: dict[bytes32, int] = {}
842
- async for row in cursor:
843
- if row["depth"] > 62:
844
- raise Exception("Tree depth exceeded 62, unable to guarantee left-to-right node order.")
845
- node = row_to_node(row=row)
846
- if not isinstance(node, TerminalNode):
847
- raise Exception(f"Unexpected internal node found: {node.hash.hex()}")
848
- keys_values_hashed[key_hash(node.key)] = leaf_hash(node.key, node.value)
849
- key_hash_to_length[key_hash(node.key)] = len(node.key)
850
- leaf_hash_to_length[leaf_hash(node.key, node.value)] = len(node.key) + len(node.value)
1092
+ if resolved_root_hash is not None:
1093
+ try:
1094
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
1095
+ except MerkleBlobNotFoundError:
1096
+ return KeysValuesCompressed({}, {}, {}, resolved_root_hash)
851
1097
 
852
- return KeysValuesCompressed(keys_values_hashed, key_hash_to_length, leaf_hash_to_length, resolved_root_hash)
1098
+ kv_ids = merkle_blob.get_keys_values()
1099
+ kv_ids_unpacked = (KeyOrValueId(id.raw) for pair in kv_ids.items() for id in pair)
1100
+ table_blobs = await self.get_table_blobs(kv_ids_unpacked, store_id)
853
1101
 
854
- async def get_leaf_hashes_by_hashed_key(
855
- self, store_id: bytes32, root_hash: Optional[bytes32] = None
856
- ) -> dict[bytes32, bytes32]:
857
- result: dict[bytes32, bytes32] = {}
858
- async with self.db_wrapper.reader() as reader:
859
- if root_hash is None:
860
- root = await self.get_tree_root(store_id=store_id)
861
- root_hash = root.node_hash
1102
+ for kid, vid in kv_ids.items():
1103
+ node = self.get_terminal_node_from_table_blobs(kid, vid, table_blobs, store_id)
1104
+ keys_values_hashed[key_hash(node.key)] = leaf_hash(node.key, node.value)
1105
+ key_hash_to_length[key_hash(node.key)] = len(node.key)
1106
+ leaf_hash_to_length[leaf_hash(node.key, node.value)] = len(node.key) + len(node.value)
862
1107
 
863
- cursor = await self.get_keys_values_cursor(reader, root_hash, True)
864
- async for row in cursor:
865
- result[key_hash(row["key"])] = bytes32(row["hash"])
866
-
867
- return result
1108
+ return KeysValuesCompressed(keys_values_hashed, key_hash_to_length, leaf_hash_to_length, resolved_root_hash)
868
1109
 
869
1110
  async def get_keys_paginated(
870
1111
  self,
@@ -877,11 +1118,12 @@ class DataStore:
877
1118
  pagination_data = get_hashes_for_page(page, keys_values_compressed.key_hash_to_length, max_page_size)
878
1119
 
879
1120
  keys: list[bytes] = []
1121
+ leaf_hashes: list[bytes32] = []
880
1122
  for hash in pagination_data.hashes:
881
1123
  leaf_hash = keys_values_compressed.keys_values_hashed[hash]
882
- node = await self.get_node(leaf_hash)
883
- assert isinstance(node, TerminalNode)
884
- keys.append(node.key)
1124
+ leaf_hashes.append(leaf_hash)
1125
+ nodes = await self.get_terminal_nodes_by_hashes(leaf_hashes, store_id, root_hash)
1126
+ keys = [node.key for node in nodes]
885
1127
 
886
1128
  return KeysPaginationData(
887
1129
  pagination_data.total_pages,
@@ -900,12 +1142,7 @@ class DataStore:
900
1142
  keys_values_compressed = await self.get_keys_values_compressed(store_id, root_hash)
901
1143
  pagination_data = get_hashes_for_page(page, keys_values_compressed.leaf_hash_to_length, max_page_size)
902
1144
 
903
- keys_values: list[TerminalNode] = []
904
- for hash in pagination_data.hashes:
905
- node = await self.get_node(hash)
906
- assert isinstance(node, TerminalNode)
907
- keys_values.append(node)
908
-
1145
+ keys_values = await self.get_terminal_nodes_by_hashes(pagination_data.hashes, store_id, root_hash)
909
1146
  return KeysValuesPaginationData(
910
1147
  pagination_data.total_pages,
911
1148
  pagination_data.total_bytes,
@@ -942,14 +1179,25 @@ class DataStore:
942
1179
 
943
1180
  pagination_data = get_hashes_for_page(page, lengths, max_page_size)
944
1181
  kv_diff: list[DiffData] = []
945
-
1182
+ insertion_hashes: list[bytes32] = []
1183
+ deletion_hashes: list[bytes32] = []
946
1184
  for hash in pagination_data.hashes:
947
- node = await self.get_node(hash)
948
- assert isinstance(node, TerminalNode)
949
1185
  if hash in insertions:
950
- kv_diff.append(DiffData(OperationType.INSERT, node.key, node.value))
1186
+ insertion_hashes.append(hash)
951
1187
  else:
952
- kv_diff.append(DiffData(OperationType.DELETE, node.key, node.value))
1188
+ deletion_hashes.append(hash)
1189
+ if hash2 != bytes32.zeros:
1190
+ insertion_nodes = await self.get_terminal_nodes_by_hashes(insertion_hashes, store_id, hash2)
1191
+ else:
1192
+ insertion_nodes = []
1193
+ if hash1 != bytes32.zeros:
1194
+ deletion_nodes = await self.get_terminal_nodes_by_hashes(deletion_hashes, store_id, hash1)
1195
+ else:
1196
+ deletion_nodes = []
1197
+ for node in insertion_nodes:
1198
+ kv_diff.append(DiffData(OperationType.INSERT, node.key, node.value))
1199
+ for node in deletion_nodes:
1200
+ kv_diff.append(DiffData(OperationType.DELETE, node.key, node.value))
953
1201
 
954
1202
  return KVDiffPaginationData(
955
1203
  pagination_data.total_pages,
@@ -957,380 +1205,128 @@ class DataStore:
957
1205
  kv_diff,
958
1206
  )
959
1207
 
960
- async def get_node_type(self, node_hash: bytes32) -> NodeType:
961
- async with self.db_wrapper.reader() as reader:
962
- cursor = await reader.execute(
963
- "SELECT node_type FROM node WHERE hash == :hash LIMIT 1",
964
- {"hash": node_hash},
965
- )
966
- raw_node_type = await cursor.fetchone()
967
-
968
- if raw_node_type is None:
969
- raise Exception(f"No node found for specified hash: {node_hash.hex()}")
970
-
971
- return NodeType(raw_node_type["node_type"])
972
-
973
- async def get_terminal_node_for_seed(
974
- self, store_id: bytes32, seed: bytes32, root_hash: Optional[bytes32] = None
975
- ) -> Optional[bytes32]:
976
- path = "".join(reversed("".join(f"{b:08b}" for b in seed)))
977
- async with self.db_wrapper.reader() as reader:
978
- if root_hash is None:
979
- root = await self.get_tree_root(store_id)
980
- root_hash = root.node_hash
981
- if root_hash is None:
982
- return None
983
-
984
- async with reader.execute(
985
- """
986
- WITH RECURSIVE
987
- random_leaf(hash, node_type, left, right, depth, side) AS (
988
- SELECT
989
- node.hash AS hash,
990
- node.node_type AS node_type,
991
- node.left AS left,
992
- node.right AS right,
993
- 1 AS depth,
994
- SUBSTR(:path, 1, 1) as side
995
- FROM node
996
- WHERE node.hash == :root_hash
997
- UNION ALL
998
- SELECT
999
- node.hash AS hash,
1000
- node.node_type AS node_type,
1001
- node.left AS left,
1002
- node.right AS right,
1003
- random_leaf.depth + 1 AS depth,
1004
- SUBSTR(:path, random_leaf.depth + 1, 1) as side
1005
- FROM node, random_leaf
1006
- WHERE (
1007
- (random_leaf.side == "0" AND node.hash == random_leaf.left)
1008
- OR (random_leaf.side != "0" AND node.hash == random_leaf.right)
1009
- )
1010
- )
1011
- SELECT hash AS hash FROM random_leaf
1012
- WHERE node_type == :node_type
1013
- LIMIT 1
1014
- """,
1015
- {"root_hash": root_hash, "node_type": NodeType.TERMINAL, "path": path},
1016
- ) as cursor:
1017
- row = await cursor.fetchone()
1018
- if row is None:
1019
- # No cover since this is an error state that should be unreachable given the code
1020
- # above has already verified that there is a non-empty tree.
1021
- raise Exception("No terminal node found for seed") # pragma: no cover
1022
- return bytes32(row["hash"])
1023
-
1024
- def get_side_for_seed(self, seed: bytes32) -> Side:
1025
- side_seed = bytes(seed)[0]
1026
- return Side.LEFT if side_seed < 128 else Side.RIGHT
1027
-
1028
1208
  async def autoinsert(
1029
1209
  self,
1030
1210
  key: bytes,
1031
1211
  value: bytes,
1032
1212
  store_id: bytes32,
1033
- use_optimized: bool = True,
1034
1213
  status: Status = Status.PENDING,
1035
1214
  root: Optional[Root] = None,
1036
1215
  ) -> InsertResult:
1037
- async with self.db_wrapper.writer():
1038
- if root is None:
1039
- root = await self.get_tree_root(store_id=store_id)
1040
-
1041
- was_empty = root.node_hash is None
1042
-
1043
- if was_empty:
1044
- reference_node_hash = None
1045
- side = None
1046
- else:
1047
- seed = leaf_hash(key=key, value=value)
1048
- reference_node_hash = await self.get_terminal_node_for_seed(store_id, seed, root_hash=root.node_hash)
1049
- side = self.get_side_for_seed(seed)
1050
-
1051
- return await self.insert(
1052
- key=key,
1053
- value=value,
1054
- store_id=store_id,
1055
- reference_node_hash=reference_node_hash,
1056
- side=side,
1057
- use_optimized=use_optimized,
1058
- status=status,
1059
- root=root,
1060
- )
1061
-
1062
- async def get_keys_values_dict(
1063
- self,
1064
- store_id: bytes32,
1065
- root_hash: Union[bytes32, Unspecified] = unspecified,
1066
- ) -> dict[bytes, bytes]:
1067
- pairs = await self.get_keys_values(store_id=store_id, root_hash=root_hash)
1068
- return {node.key: node.value for node in pairs}
1216
+ return await self.insert(
1217
+ key=key,
1218
+ value=value,
1219
+ store_id=store_id,
1220
+ reference_node_hash=None,
1221
+ side=None,
1222
+ status=status,
1223
+ root=root,
1224
+ )
1069
1225
 
1070
1226
  async def get_keys(
1071
1227
  self,
1072
1228
  store_id: bytes32,
1073
1229
  root_hash: Union[bytes32, Unspecified] = unspecified,
1074
1230
  ) -> list[bytes]:
1075
- async with self.db_wrapper.reader() as reader:
1231
+ async with self.db_wrapper.reader():
1076
1232
  if root_hash is unspecified:
1077
1233
  root = await self.get_tree_root(store_id=store_id)
1078
1234
  resolved_root_hash = root.node_hash
1079
1235
  else:
1080
1236
  resolved_root_hash = root_hash
1081
- cursor = await reader.execute(
1082
- """
1083
- WITH RECURSIVE
1084
- tree_from_root_hash(hash, node_type, left, right, key) AS (
1085
- SELECT node.hash, node.node_type, node.left, node.right, node.key
1086
- FROM node WHERE node.hash == :root_hash
1087
- UNION ALL
1088
- SELECT
1089
- node.hash, node.node_type, node.left, node.right, node.key FROM node, tree_from_root_hash
1090
- WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
1091
- )
1092
- SELECT key FROM tree_from_root_hash WHERE node_type == :node_type
1093
- """,
1094
- {"root_hash": resolved_root_hash, "node_type": NodeType.TERMINAL},
1095
- )
1096
-
1097
- keys: list[bytes] = [row["key"] async for row in cursor]
1098
-
1099
- return keys
1100
-
1101
- async def get_ancestors_common(
1102
- self,
1103
- node_hash: bytes32,
1104
- store_id: bytes32,
1105
- root_hash: Optional[bytes32],
1106
- generation: Optional[int] = None,
1107
- use_optimized: bool = True,
1108
- ) -> list[InternalNode]:
1109
- if use_optimized:
1110
- ancestors: list[InternalNode] = await self.get_ancestors_optimized(
1111
- node_hash=node_hash,
1112
- store_id=store_id,
1113
- generation=generation,
1114
- root_hash=root_hash,
1115
- )
1116
- else:
1117
- ancestors = await self.get_ancestors_optimized(
1118
- node_hash=node_hash,
1119
- store_id=store_id,
1120
- generation=generation,
1121
- root_hash=root_hash,
1122
- )
1123
- ancestors_2: list[InternalNode] = await self.get_ancestors(
1124
- node_hash=node_hash, store_id=store_id, root_hash=root_hash
1125
- )
1126
- if ancestors != ancestors_2:
1127
- raise RuntimeError("Ancestors optimized didn't produce the expected result.")
1128
-
1129
- if len(ancestors) >= 62:
1130
- raise RuntimeError("Tree exceeds max height of 62.")
1131
- return ancestors
1132
-
1133
- async def update_ancestor_hashes_on_insert(
1134
- self,
1135
- store_id: bytes32,
1136
- left: bytes32,
1137
- right: bytes32,
1138
- traversal_node_hash: bytes32,
1139
- ancestors: list[InternalNode],
1140
- status: Status,
1141
- root: Root,
1142
- ) -> Root:
1143
- # update ancestors after inserting root, to keep table constraints.
1144
- insert_ancestors_cache: list[tuple[bytes32, bytes32, bytes32]] = []
1145
- new_generation = root.generation + 1
1146
- # create first new internal node
1147
- new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
1148
- insert_ancestors_cache.append((left, right, store_id))
1149
-
1150
- # create updated replacements for the rest of the internal nodes
1151
- for ancestor in ancestors:
1152
- if not isinstance(ancestor, InternalNode):
1153
- raise Exception(f"Expected an internal node but got: {type(ancestor).__name__}")
1154
-
1155
- if ancestor.left_hash == traversal_node_hash:
1156
- left = new_hash
1157
- right = ancestor.right_hash
1158
- elif ancestor.right_hash == traversal_node_hash:
1159
- left = ancestor.left_hash
1160
- right = new_hash
1161
-
1162
- traversal_node_hash = ancestor.hash
1163
-
1164
- new_hash = await self._insert_internal_node(left_hash=left, right_hash=right)
1165
- insert_ancestors_cache.append((left, right, store_id))
1166
-
1167
- new_root = await self._insert_root(
1168
- store_id=store_id,
1169
- node_hash=new_hash,
1170
- status=status,
1171
- generation=new_generation,
1172
- )
1173
1237
 
1174
- if status == Status.COMMITTED:
1175
- for left_hash, right_hash, store_id in insert_ancestors_cache:
1176
- await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
1238
+ try:
1239
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
1240
+ except MerkleBlobNotFoundError:
1241
+ return []
1177
1242
 
1178
- return new_root
1243
+ kv_ids = merkle_blob.get_keys_values()
1244
+ raw_key_ids = (KeyOrValueId(id.raw) for id in kv_ids.keys())
1245
+ table_blobs = await self.get_table_blobs(raw_key_ids, store_id)
1246
+ keys: list[bytes] = []
1247
+ for kid in kv_ids.keys():
1248
+ blob_hash, blob = table_blobs[KeyOrValueId(kid.raw)]
1249
+ if blob is None:
1250
+ blob = self.get_blob_from_file(blob_hash, store_id)
1251
+ keys.append(blob)
1252
+
1253
+ return keys
1254
+
1255
+ def get_reference_kid_side(self, merkle_blob: MerkleBlob, seed: bytes32) -> tuple[KeyId, Side]:
1256
+ side_seed = bytes(seed)[0]
1257
+ side = Side.LEFT if side_seed < 128 else Side.RIGHT
1258
+ reference_node = merkle_blob.get_random_leaf_node(seed)
1259
+ kid = reference_node.key
1260
+ return (kid, side)
1261
+
1262
+ async def get_terminal_node_from_kid(self, merkle_blob: MerkleBlob, kid: KeyId, store_id: bytes32) -> TerminalNode:
1263
+ index = merkle_blob.get_key_index(kid)
1264
+ raw_node = merkle_blob.get_raw_node(index)
1265
+ assert isinstance(raw_node, chia_rs.datalayer.LeafNode)
1266
+ return await self.get_terminal_node(raw_node.key, raw_node.value, store_id)
1267
+
1268
+ async def get_terminal_node_for_seed(self, seed: bytes32, store_id: bytes32) -> Optional[TerminalNode]:
1269
+ root = await self.get_tree_root(store_id=store_id)
1270
+ if root is None or root.node_hash is None:
1271
+ return None
1272
+
1273
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1274
+ assert not merkle_blob.empty()
1275
+ kid, _ = self.get_reference_kid_side(merkle_blob, seed)
1276
+ return await self.get_terminal_node_from_kid(merkle_blob, kid, store_id)
1179
1277
 
1180
1278
  async def insert(
1181
1279
  self,
1182
1280
  key: bytes,
1183
1281
  value: bytes,
1184
1282
  store_id: bytes32,
1185
- reference_node_hash: Optional[bytes32],
1186
- side: Optional[Side],
1187
- use_optimized: bool = True,
1283
+ reference_node_hash: Optional[bytes32] = None,
1284
+ side: Optional[Side] = None,
1188
1285
  status: Status = Status.PENDING,
1189
1286
  root: Optional[Root] = None,
1190
1287
  ) -> InsertResult:
1191
- async with self.db_wrapper.writer():
1192
- if root is None:
1193
- root = await self.get_tree_root(store_id=store_id)
1288
+ async with self.db_wrapper.writer() as writer:
1289
+ with self.manage_kv_files(store_id):
1290
+ if root is None:
1291
+ root = await self.get_tree_root(store_id=store_id)
1292
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1194
1293
 
1195
- try:
1196
- await self.get_node_by_key(key=key, store_id=store_id)
1197
- raise Exception(f"Key already present: {key.hex()}")
1198
- except KeyNotFoundError:
1199
- pass
1200
-
1201
- was_empty = root.node_hash is None
1202
- if reference_node_hash is None:
1203
- if not was_empty:
1204
- raise Exception(f"Reference node hash must be specified for non-empty tree: {store_id.hex()}")
1205
- else:
1206
- reference_node_type = await self.get_node_type(node_hash=reference_node_hash)
1207
- if reference_node_type == NodeType.INTERNAL:
1208
- raise Exception("can not insert a new key/value on an internal node")
1294
+ kid, vid = await self.add_key_value(key, value, store_id, writer=writer)
1295
+ hash = leaf_hash(key, value)
1296
+ reference_kid = None
1297
+ if reference_node_hash is not None:
1298
+ reference_kid, _ = merkle_blob.get_node_by_hash(reference_node_hash)
1209
1299
 
1210
- # create new terminal node
1211
- new_terminal_node_hash = await self._insert_terminal_node(key=key, value=value)
1300
+ was_empty = root.node_hash is None
1301
+ if not was_empty and reference_kid is None:
1302
+ if side is not None:
1303
+ raise Exception("Side specified without reference node hash")
1212
1304
 
1213
- if was_empty:
1214
- if side is not None:
1215
- raise Exception(f"Tree was empty so side must be unspecified, got: {side!r}")
1305
+ seed = leaf_hash(key=key, value=value)
1306
+ reference_kid, side = self.get_reference_kid_side(merkle_blob, seed)
1216
1307
 
1217
- new_root = await self._insert_root(
1218
- store_id=store_id,
1219
- node_hash=new_terminal_node_hash,
1220
- status=status,
1221
- )
1222
- else:
1223
- if side is None:
1224
- raise Exception("Tree was not empty, side must be specified.")
1225
- if reference_node_hash is None:
1226
- raise Exception("Tree was not empty, reference node hash must be specified.")
1227
- if root.node_hash is None:
1228
- raise Exception("Internal error.")
1229
-
1230
- if side == Side.LEFT:
1231
- left = new_terminal_node_hash
1232
- right = reference_node_hash
1233
- elif side == Side.RIGHT:
1234
- left = reference_node_hash
1235
- right = new_terminal_node_hash
1236
- else:
1237
- raise Exception(f"Internal error, unknown side: {side!r}")
1238
-
1239
- ancestors = await self.get_ancestors_common(
1240
- node_hash=reference_node_hash,
1241
- store_id=store_id,
1242
- root_hash=root.node_hash,
1243
- generation=root.generation,
1244
- use_optimized=use_optimized,
1245
- )
1246
- new_root = await self.update_ancestor_hashes_on_insert(
1247
- store_id=store_id,
1248
- left=left,
1249
- right=right,
1250
- traversal_node_hash=reference_node_hash,
1251
- ancestors=ancestors,
1252
- status=status,
1253
- root=root,
1254
- )
1308
+ merkle_blob.insert(kid, vid, hash, reference_kid, side)
1255
1309
 
1256
- return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
1310
+ new_root = await self.insert_root_from_merkle_blob(merkle_blob, store_id, status)
1311
+ return InsertResult(node_hash=hash, root=new_root)
1257
1312
 
1258
1313
  async def delete(
1259
1314
  self,
1260
1315
  key: bytes,
1261
1316
  store_id: bytes32,
1262
- use_optimized: bool = True,
1263
1317
  status: Status = Status.PENDING,
1264
1318
  root: Optional[Root] = None,
1265
1319
  ) -> Optional[Root]:
1266
- root_hash = None if root is None else root.node_hash
1267
1320
  async with self.db_wrapper.writer():
1268
- try:
1269
- node = await self.get_node_by_key(key=key, store_id=store_id)
1270
- node_hash = node.hash
1271
- assert isinstance(node, TerminalNode)
1272
- except KeyNotFoundError:
1273
- log.debug(f"Request to delete an unknown key ignored: {key.hex()}")
1274
- return root
1275
-
1276
- ancestors: list[InternalNode] = await self.get_ancestors_common(
1277
- node_hash=node_hash,
1278
- store_id=store_id,
1279
- root_hash=root_hash,
1280
- use_optimized=use_optimized,
1281
- )
1282
-
1283
- if len(ancestors) == 0:
1284
- # the only node is being deleted
1285
- return await self._insert_root(
1286
- store_id=store_id,
1287
- node_hash=None,
1288
- status=status,
1289
- )
1290
-
1291
- parent = ancestors[0]
1292
- other_hash = parent.other_child_hash(hash=node_hash)
1293
-
1294
- if len(ancestors) == 1:
1295
- # the parent is the root so the other side will become the new root
1296
- return await self._insert_root(
1297
- store_id=store_id,
1298
- node_hash=other_hash,
1299
- status=status,
1300
- )
1301
-
1302
- old_child_hash = parent.hash
1303
- new_child_hash = other_hash
1304
1321
  if root is None:
1305
- new_generation = await self.get_tree_generation(store_id) + 1
1306
- else:
1307
- new_generation = root.generation + 1
1308
- # update ancestors after inserting root, to keep table constraints.
1309
- insert_ancestors_cache: list[tuple[bytes32, bytes32, bytes32]] = []
1310
- # more parents to handle so let's traverse them
1311
- for ancestor in ancestors[1:]:
1312
- if ancestor.left_hash == old_child_hash:
1313
- left_hash = new_child_hash
1314
- right_hash = ancestor.right_hash
1315
- elif ancestor.right_hash == old_child_hash:
1316
- left_hash = ancestor.left_hash
1317
- right_hash = new_child_hash
1318
- else:
1319
- raise Exception("Internal error.")
1322
+ root = await self.get_tree_root(store_id=store_id)
1323
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1320
1324
 
1321
- new_child_hash = await self._insert_internal_node(left_hash=left_hash, right_hash=right_hash)
1322
- insert_ancestors_cache.append((left_hash, right_hash, store_id))
1323
- old_child_hash = ancestor.hash
1325
+ kid = await self.get_kvid(key, store_id)
1326
+ if kid is not None:
1327
+ merkle_blob.delete(KeyId(kid))
1324
1328
 
1325
- new_root = await self._insert_root(
1326
- store_id=store_id,
1327
- node_hash=new_child_hash,
1328
- status=status,
1329
- generation=new_generation,
1330
- )
1331
- if status == Status.COMMITTED:
1332
- for left_hash, right_hash, store_id in insert_ancestors_cache:
1333
- await self._insert_ancestor_table(left_hash, right_hash, store_id, new_generation)
1329
+ new_root = await self.insert_root_from_merkle_blob(merkle_blob, store_id, status)
1334
1330
 
1335
1331
  return new_root
1336
1332
 
@@ -1339,151 +1335,21 @@ class DataStore:
1339
1335
  key: bytes,
1340
1336
  new_value: bytes,
1341
1337
  store_id: bytes32,
1342
- use_optimized: bool = True,
1343
1338
  status: Status = Status.PENDING,
1344
1339
  root: Optional[Root] = None,
1345
1340
  ) -> InsertResult:
1346
- async with self.db_wrapper.writer():
1347
- if root is None:
1348
- root = await self.get_tree_root(store_id=store_id)
1349
-
1350
- try:
1351
- old_node = await self.get_node_by_key(key=key, store_id=store_id)
1352
- except KeyNotFoundError:
1353
- log.debug(f"Key not found: {key.hex()}. Doing an autoinsert instead")
1354
- return await self.autoinsert(
1355
- key=key,
1356
- value=new_value,
1357
- store_id=store_id,
1358
- use_optimized=use_optimized,
1359
- status=status,
1360
- root=root,
1361
- )
1362
- if old_node.value == new_value:
1363
- log.debug(f"New value matches old value in upsert operation: {key.hex()}. Ignoring upsert")
1364
- return InsertResult(leaf_hash(key, new_value), root)
1365
-
1366
- # create new terminal node
1367
- new_terminal_node_hash = await self._insert_terminal_node(key=key, value=new_value)
1368
-
1369
- ancestors = await self.get_ancestors_common(
1370
- node_hash=old_node.hash,
1371
- store_id=store_id,
1372
- root_hash=root.node_hash,
1373
- generation=root.generation,
1374
- use_optimized=use_optimized,
1375
- )
1376
-
1377
- # Store contains only the old root, replace it with a new root having the terminal node.
1378
- if len(ancestors) == 0:
1379
- new_root = await self._insert_root(
1380
- store_id=store_id,
1381
- node_hash=new_terminal_node_hash,
1382
- status=status,
1383
- )
1384
- else:
1385
- parent = ancestors[0]
1386
- if parent.left_hash == old_node.hash:
1387
- left = new_terminal_node_hash
1388
- right = parent.right_hash
1389
- elif parent.right_hash == old_node.hash:
1390
- left = parent.left_hash
1391
- right = new_terminal_node_hash
1392
- else:
1393
- raise Exception("Internal error.")
1394
-
1395
- new_root = await self.update_ancestor_hashes_on_insert(
1396
- store_id=store_id,
1397
- left=left,
1398
- right=right,
1399
- traversal_node_hash=parent.hash,
1400
- ancestors=ancestors[1:],
1401
- status=status,
1402
- root=root,
1403
- )
1404
-
1405
- return InsertResult(node_hash=new_terminal_node_hash, root=new_root)
1406
-
1407
- async def clean_node_table(self, writer: Optional[aiosqlite.Connection] = None) -> None:
1408
- query = """
1409
- WITH RECURSIVE pending_nodes AS (
1410
- SELECT node_hash AS hash FROM root
1411
- WHERE status IN (:pending_status, :pending_batch_status)
1412
- UNION ALL
1413
- SELECT n.left FROM node n
1414
- INNER JOIN pending_nodes pn ON n.hash = pn.hash
1415
- WHERE n.left IS NOT NULL
1416
- UNION ALL
1417
- SELECT n.right FROM node n
1418
- INNER JOIN pending_nodes pn ON n.hash = pn.hash
1419
- WHERE n.right IS NOT NULL
1420
- )
1421
- DELETE FROM node
1422
- WHERE hash IN (
1423
- SELECT n.hash FROM node n
1424
- LEFT JOIN ancestors a ON n.hash = a.hash
1425
- LEFT JOIN pending_nodes pn ON n.hash = pn.hash
1426
- WHERE a.hash IS NULL AND pn.hash IS NULL
1427
- )
1428
- """
1429
- params = {"pending_status": Status.PENDING.value, "pending_batch_status": Status.PENDING_BATCH.value}
1430
- if writer is None:
1431
- async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
1432
- await writer.execute(query, params)
1433
- else:
1434
- await writer.execute(query, params)
1435
-
1436
- async def get_nodes(self, node_hashes: list[bytes32]) -> list[Node]:
1437
- query_parameter_place_holders = ",".join("?" for _ in node_hashes)
1438
- async with self.db_wrapper.reader() as reader:
1439
- # TODO: handle SQLITE_MAX_VARIABLE_NUMBER
1440
- cursor = await reader.execute(
1441
- f"SELECT * FROM node WHERE hash IN ({query_parameter_place_holders})",
1442
- [*node_hashes],
1443
- )
1444
- rows = await cursor.fetchall()
1445
-
1446
- hash_to_node = {row["hash"]: row_to_node(row=row) for row in rows}
1447
-
1448
- missing_hashes = [node_hash.hex() for node_hash in node_hashes if node_hash not in hash_to_node]
1449
- if missing_hashes:
1450
- raise Exception(f"Nodes not found for hashes: {', '.join(missing_hashes)}")
1451
-
1452
- return [hash_to_node[node_hash] for node_hash in node_hashes]
1453
-
1454
- async def get_leaf_at_minimum_height(
1455
- self, root_hash: bytes32, hash_to_parent: dict[bytes32, InternalNode]
1456
- ) -> TerminalNode:
1457
- queue: list[bytes32] = [root_hash]
1458
- batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
1459
-
1460
- while True:
1461
- assert len(queue) > 0
1462
- nodes = await self.get_nodes(queue[:batch_size])
1463
- queue = queue[batch_size:]
1341
+ async with self.db_wrapper.writer() as writer:
1342
+ with self.manage_kv_files(store_id):
1343
+ if root is None:
1344
+ root = await self.get_tree_root(store_id=store_id)
1345
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1464
1346
 
1465
- for node in nodes:
1466
- if isinstance(node, TerminalNode):
1467
- return node
1468
- hash_to_parent[node.left_hash] = node
1469
- hash_to_parent[node.right_hash] = node
1470
- queue.append(node.left_hash)
1471
- queue.append(node.right_hash)
1347
+ kid, vid = await self.add_key_value(key, new_value, store_id, writer=writer)
1348
+ hash = leaf_hash(key, new_value)
1349
+ merkle_blob.upsert(kid, vid, hash)
1472
1350
 
1473
- async def batch_upsert(
1474
- self,
1475
- hash: bytes32,
1476
- to_update_hashes: set[bytes32],
1477
- pending_upsert_new_hashes: dict[bytes32, bytes32],
1478
- ) -> bytes32:
1479
- if hash not in to_update_hashes:
1480
- return hash
1481
- node = await self.get_node(hash)
1482
- if isinstance(node, TerminalNode):
1483
- return pending_upsert_new_hashes[hash]
1484
- new_left_hash = await self.batch_upsert(node.left_hash, to_update_hashes, pending_upsert_new_hashes)
1485
- new_right_hash = await self.batch_upsert(node.right_hash, to_update_hashes, pending_upsert_new_hashes)
1486
- return await self._insert_internal_node(new_left_hash, new_right_hash)
1351
+ new_root = await self.insert_root_from_merkle_blob(merkle_blob, store_id, status)
1352
+ return InsertResult(node_hash=hash, root=new_root)
1487
1353
 
1488
1354
  async def insert_batch(
1489
1355
  self,
@@ -1492,339 +1358,90 @@ class DataStore:
1492
1358
  status: Status = Status.PENDING,
1493
1359
  enable_batch_autoinsert: bool = True,
1494
1360
  ) -> Optional[bytes32]:
1495
- async with self.transaction():
1496
- old_root = await self.get_tree_root(store_id)
1497
- pending_root = await self.get_pending_root(store_id=store_id)
1498
- if pending_root is None:
1499
- latest_local_root: Optional[Root] = old_root
1500
- else:
1501
- if pending_root.status == Status.PENDING_BATCH:
1502
- # We have an unfinished batch, continue the current batch on top of it.
1503
- if pending_root.generation != old_root.generation + 1:
1361
+ async with self.db_wrapper.writer() as writer:
1362
+ with self.manage_kv_files(store_id):
1363
+ old_root = await self.get_tree_root(store_id=store_id)
1364
+ pending_root = await self.get_pending_root(store_id=store_id)
1365
+ if pending_root is not None:
1366
+ if pending_root.status == Status.PENDING_BATCH:
1367
+ # We have an unfinished batch, continue the current batch on top of it.
1368
+ if pending_root.generation != old_root.generation + 1:
1369
+ raise Exception("Internal error")
1370
+ old_root = pending_root
1371
+ await self.clear_pending_roots(store_id)
1372
+ else:
1504
1373
  raise Exception("Internal error")
1505
- await self.change_root_status(pending_root, Status.COMMITTED)
1506
- await self.build_ancestor_table_for_latest_root(store_id=store_id)
1507
- latest_local_root = pending_root
1508
- else:
1509
- raise Exception("Internal error")
1510
-
1511
- assert latest_local_root is not None
1512
-
1513
- key_hash_frequency: dict[bytes32, int] = {}
1514
- first_action: dict[bytes32, str] = {}
1515
- last_action: dict[bytes32, str] = {}
1516
1374
 
1517
- for change in changelist:
1518
- key = change["key"]
1519
- hash = key_hash(key)
1520
- key_hash_frequency[hash] = key_hash_frequency.get(hash, 0) + 1
1521
- if hash not in first_action:
1522
- first_action[hash] = change["action"]
1523
- last_action[hash] = change["action"]
1375
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=old_root.node_hash)
1524
1376
 
1525
- pending_autoinsert_hashes: list[bytes32] = []
1526
- pending_upsert_new_hashes: dict[bytes32, bytes32] = {}
1527
- leaf_hashes = await self.get_leaf_hashes_by_hashed_key(store_id)
1377
+ key_hash_frequency: dict[bytes32, int] = {}
1378
+ first_action: dict[bytes32, str] = {}
1379
+ last_action: dict[bytes32, str] = {}
1528
1380
 
1529
- for change in changelist:
1530
- if change["action"] == "insert":
1531
- key = change["key"]
1532
- value = change["value"]
1533
- reference_node_hash = change.get("reference_node_hash", None)
1534
- side = change.get("side", None)
1535
- if reference_node_hash is None and side is None:
1536
- hash = key_hash(key)
1537
- # The key is not referenced in any other operation but this autoinsert, hence the order
1538
- # of performing these should not matter. We perform all these autoinserts as a batch
1539
- # at the end, to speed up the tree processing operations.
1540
- # Additionally, if the first action is a delete, we can still perform the autoinsert at the
1541
- # end, since the order will be preserved.
1542
- if enable_batch_autoinsert:
1543
- if key_hash_frequency[hash] == 1 or (
1544
- key_hash_frequency[hash] == 2 and first_action[hash] == "delete"
1545
- ):
1546
- old_node = await self.maybe_get_node_from_key_hash(leaf_hashes, hash)
1547
- terminal_node_hash = await self._insert_terminal_node(key, value)
1548
-
1549
- if old_node is None:
1550
- pending_autoinsert_hashes.append(terminal_node_hash)
1551
- else:
1552
- if key_hash_frequency[hash] == 1:
1553
- raise Exception(f"Key already present: {key.hex()}")
1554
- else:
1555
- pending_upsert_new_hashes[old_node.hash] = terminal_node_hash
1556
- continue
1557
- insert_result = await self.autoinsert(
1558
- key, value, store_id, True, Status.COMMITTED, root=latest_local_root
1559
- )
1560
- latest_local_root = insert_result.root
1561
- else:
1562
- if reference_node_hash is None or side is None:
1563
- raise Exception("Provide both reference_node_hash and side or neither.")
1564
- insert_result = await self.insert(
1565
- key,
1566
- value,
1567
- store_id,
1568
- reference_node_hash,
1569
- side,
1570
- True,
1571
- Status.COMMITTED,
1572
- root=latest_local_root,
1573
- )
1574
- latest_local_root = insert_result.root
1575
- elif change["action"] == "delete":
1576
- key = change["key"]
1577
- hash = key_hash(key)
1578
- if key_hash_frequency[hash] == 2 and last_action[hash] == "insert" and enable_batch_autoinsert:
1579
- continue
1580
- latest_local_root = await self.delete(key, store_id, True, Status.COMMITTED, root=latest_local_root)
1581
- elif change["action"] == "upsert":
1381
+ for change in changelist:
1582
1382
  key = change["key"]
1583
- new_value = change["value"]
1584
1383
  hash = key_hash(key)
1585
- if key_hash_frequency[hash] == 1 and enable_batch_autoinsert:
1586
- terminal_node_hash = await self._insert_terminal_node(key, new_value)
1587
- old_node = await self.maybe_get_node_from_key_hash(leaf_hashes, hash)
1588
- if old_node is not None:
1589
- pending_upsert_new_hashes[old_node.hash] = terminal_node_hash
1384
+ key_hash_frequency[hash] = key_hash_frequency.get(hash, 0) + 1
1385
+ if hash not in first_action:
1386
+ first_action[hash] = change["action"]
1387
+ last_action[hash] = change["action"]
1388
+
1389
+ batch_keys_values: list[tuple[KeyId, ValueId]] = []
1390
+ batch_hashes: list[bytes32] = []
1391
+
1392
+ for change in changelist:
1393
+ if change["action"] == "insert":
1394
+ key = change["key"]
1395
+ value = change["value"]
1396
+
1397
+ reference_node_hash = change.get("reference_node_hash", None)
1398
+ side = change.get("side", None)
1399
+ reference_kid: Optional[KeyId] = None
1400
+ if reference_node_hash is not None:
1401
+ reference_kid, _ = merkle_blob.get_node_by_hash(reference_node_hash)
1402
+
1403
+ key_hashed = key_hash(key)
1404
+ kid, vid = await self.add_key_value(key, value, store_id, writer=writer)
1405
+ try:
1406
+ merkle_blob.get_key_index(kid)
1407
+ except chia_rs.datalayer.UnknownKeyError:
1408
+ pass
1590
1409
  else:
1591
- pending_autoinsert_hashes.append(terminal_node_hash)
1592
- continue
1593
- insert_result = await self.upsert(
1594
- key, new_value, store_id, True, Status.COMMITTED, root=latest_local_root
1595
- )
1596
- latest_local_root = insert_result.root
1597
- else:
1598
- raise Exception(f"Operation in batch is not insert or delete: {change}")
1599
-
1600
- if len(pending_upsert_new_hashes) > 0:
1601
- to_update_hashes: set[bytes32] = set(pending_upsert_new_hashes.keys())
1602
- to_update_queue: list[bytes32] = list(pending_upsert_new_hashes.keys())
1603
- batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
1604
-
1605
- while len(to_update_queue) > 0:
1606
- nodes = await self._get_one_ancestor_multiple_hashes(to_update_queue[:batch_size], store_id)
1607
- to_update_queue = to_update_queue[batch_size:]
1608
- for node in nodes:
1609
- if node.hash not in to_update_hashes:
1610
- to_update_hashes.add(node.hash)
1611
- to_update_queue.append(node.hash)
1612
-
1613
- assert latest_local_root is not None
1614
- assert latest_local_root.node_hash is not None
1615
- new_root_hash = await self.batch_upsert(
1616
- latest_local_root.node_hash,
1617
- to_update_hashes,
1618
- pending_upsert_new_hashes,
1619
- )
1620
- latest_local_root = await self._insert_root(store_id, new_root_hash, Status.COMMITTED)
1621
-
1622
- # Start with the leaf nodes and pair them to form new nodes at the next level up, repeating this process
1623
- # in a bottom-up fashion until a single root node remains. This constructs a balanced tree from the leaves.
1624
- while len(pending_autoinsert_hashes) > 1:
1625
- new_hashes: list[bytes32] = []
1626
- for i in range(0, len(pending_autoinsert_hashes) - 1, 2):
1627
- internal_node_hash = await self._insert_internal_node(
1628
- pending_autoinsert_hashes[i], pending_autoinsert_hashes[i + 1]
1629
- )
1630
- new_hashes.append(internal_node_hash)
1631
- if len(pending_autoinsert_hashes) % 2 != 0:
1632
- new_hashes.append(pending_autoinsert_hashes[-1])
1633
-
1634
- pending_autoinsert_hashes = new_hashes
1635
-
1636
- if len(pending_autoinsert_hashes):
1637
- subtree_hash = pending_autoinsert_hashes[0]
1638
- if latest_local_root is None or latest_local_root.node_hash is None:
1639
- await self._insert_root(store_id=store_id, node_hash=subtree_hash, status=Status.COMMITTED)
1640
- else:
1641
- hash_to_parent: dict[bytes32, InternalNode] = {}
1642
- min_height_leaf = await self.get_leaf_at_minimum_height(latest_local_root.node_hash, hash_to_parent)
1643
- ancestors: list[InternalNode] = []
1644
- hash = min_height_leaf.hash
1645
- while hash in hash_to_parent:
1646
- node = hash_to_parent[hash]
1647
- ancestors.append(node)
1648
- hash = node.hash
1649
-
1650
- await self.update_ancestor_hashes_on_insert(
1651
- store_id=store_id,
1652
- left=min_height_leaf.hash,
1653
- right=subtree_hash,
1654
- traversal_node_hash=min_height_leaf.hash,
1655
- ancestors=ancestors,
1656
- status=Status.COMMITTED,
1657
- root=latest_local_root,
1658
- )
1659
-
1660
- root = await self.get_tree_root(store_id=store_id)
1661
- if root.node_hash == old_root.node_hash:
1662
- if len(changelist) != 0:
1663
- await self.rollback_to_generation(store_id, old_root.generation)
1664
- raise ValueError("Changelist resulted in no change to tree data")
1665
- # We delete all "temporary" records stored in root and ancestor tables and store only the final result.
1666
- await self.rollback_to_generation(store_id, old_root.generation)
1667
- await self.insert_root_with_ancestor_table(store_id=store_id, node_hash=root.node_hash, status=status)
1668
- if status in {Status.PENDING, Status.PENDING_BATCH}:
1669
- new_root = await self.get_pending_root(store_id=store_id)
1670
- assert new_root is not None
1671
- elif status == Status.COMMITTED:
1672
- new_root = await self.get_tree_root(store_id=store_id)
1673
- else:
1674
- raise Exception(f"No known status: {status}")
1675
- if new_root.node_hash != root.node_hash:
1676
- raise RuntimeError(
1677
- f"Tree root mismatches after batch update: Expected: {root.node_hash}. Got: {new_root.node_hash}"
1678
- )
1679
- if new_root.generation != old_root.generation + 1:
1680
- raise RuntimeError(
1681
- "Didn't get the expected generation after batch update: "
1682
- f"Expected: {old_root.generation + 1}. Got: {new_root.generation}"
1683
- )
1684
- return root.node_hash
1685
-
1686
- async def _get_one_ancestor(
1687
- self,
1688
- node_hash: bytes32,
1689
- store_id: bytes32,
1690
- generation: Optional[int] = None,
1691
- ) -> Optional[InternalNode]:
1692
- async with self.db_wrapper.reader() as reader:
1693
- if generation is None:
1694
- generation = await self.get_tree_generation(store_id=store_id)
1695
- cursor = await reader.execute(
1696
- """
1697
- SELECT * from node INNER JOIN (
1698
- SELECT ancestors.ancestor AS hash, MAX(ancestors.generation) AS generation
1699
- FROM ancestors
1700
- WHERE ancestors.hash == :hash
1701
- AND ancestors.tree_id == :tree_id
1702
- AND ancestors.generation <= :generation
1703
- GROUP BY hash
1704
- ) asc on asc.hash == node.hash
1705
- """,
1706
- {"hash": node_hash, "tree_id": store_id, "generation": generation},
1707
- )
1708
- row = await cursor.fetchone()
1709
- if row is None:
1710
- return None
1711
- return InternalNode.from_row(row=row)
1712
-
1713
- async def _get_one_ancestor_multiple_hashes(
1714
- self,
1715
- node_hashes: list[bytes32],
1716
- store_id: bytes32,
1717
- generation: Optional[int] = None,
1718
- ) -> list[InternalNode]:
1719
- async with self.db_wrapper.reader() as reader:
1720
- node_hashes_place_holders = ",".join("?" for _ in node_hashes)
1721
- if generation is None:
1722
- generation = await self.get_tree_generation(store_id=store_id)
1723
- cursor = await reader.execute(
1724
- f"""
1725
- SELECT * from node INNER JOIN (
1726
- SELECT ancestors.ancestor AS hash, MAX(ancestors.generation) AS generation
1727
- FROM ancestors
1728
- WHERE ancestors.hash IN ({node_hashes_place_holders})
1729
- AND ancestors.tree_id == ?
1730
- AND ancestors.generation <= ?
1731
- GROUP BY hash
1732
- ) asc on asc.hash == node.hash
1733
- """,
1734
- [*node_hashes, store_id, generation],
1735
- )
1736
- rows = await cursor.fetchall()
1737
- return [InternalNode.from_row(row=row) for row in rows]
1738
-
1739
- async def build_ancestor_table_for_latest_root(self, store_id: bytes32) -> None:
1740
- async with self.db_wrapper.writer():
1741
- root = await self.get_tree_root(store_id=store_id)
1742
- if root.node_hash is None:
1743
- return
1744
- previous_root = await self.get_tree_root(
1745
- store_id=store_id,
1746
- generation=max(root.generation - 1, 0),
1747
- )
1748
-
1749
- if previous_root.node_hash is not None:
1750
- previous_internal_nodes: list[InternalNode] = await self.get_internal_nodes(
1751
- store_id=store_id,
1752
- root_hash=previous_root.node_hash,
1753
- )
1754
- known_hashes: set[bytes32] = {node.hash for node in previous_internal_nodes}
1755
- else:
1756
- known_hashes = set()
1757
- internal_nodes: list[InternalNode] = await self.get_internal_nodes(
1758
- store_id=store_id,
1759
- root_hash=root.node_hash,
1760
- )
1761
- for node in internal_nodes:
1762
- # We already have the same values in ancestor tables, if we have the same internal node.
1763
- # Don't reinsert it so we can save DB space.
1764
- if node.hash not in known_hashes:
1765
- await self._insert_ancestor_table(node.left_hash, node.right_hash, store_id, root.generation)
1766
-
1767
- async def insert_root_with_ancestor_table(
1768
- self, store_id: bytes32, node_hash: Optional[bytes32], status: Status = Status.PENDING
1769
- ) -> None:
1770
- async with self.db_wrapper.writer():
1771
- await self._insert_root(store_id=store_id, node_hash=node_hash, status=status)
1772
- # Don't update the ancestor table for non-committed status.
1773
- if status == Status.COMMITTED:
1774
- await self.build_ancestor_table_for_latest_root(store_id=store_id)
1775
-
1776
- async def get_node_by_key_latest_generation(self, key: bytes, store_id: bytes32) -> TerminalNode:
1777
- async with self.db_wrapper.reader() as reader:
1778
- root = await self.get_tree_root(store_id=store_id)
1779
- if root.node_hash is None:
1780
- raise KeyNotFoundError(key=key)
1781
-
1782
- cursor = await reader.execute(
1783
- """
1784
- SELECT a.hash FROM ancestors a
1785
- JOIN node n ON a.hash = n.hash
1786
- WHERE n.key = :key
1787
- AND a.tree_id = :tree_id
1788
- ORDER BY a.generation DESC
1789
- LIMIT 1
1790
- """,
1791
- {"key": key, "tree_id": store_id},
1792
- )
1793
-
1794
- row = await cursor.fetchone()
1795
- if row is None:
1796
- raise KeyNotFoundError(key=key)
1797
-
1798
- node = await self.get_node(row["hash"])
1799
- node_hash = node.hash
1800
- while True:
1801
- internal_node = await self._get_one_ancestor(node_hash, store_id)
1802
- if internal_node is None:
1803
- break
1804
- node_hash = internal_node.hash
1805
-
1806
- if node_hash != root.node_hash:
1807
- raise KeyNotFoundError(key=key)
1808
- assert isinstance(node, TerminalNode)
1809
- return node
1810
-
1811
- async def maybe_get_node_from_key_hash(
1812
- self, leaf_hashes: dict[bytes32, bytes32], hash: bytes32
1813
- ) -> Optional[TerminalNode]:
1814
- if hash in leaf_hashes:
1815
- leaf_hash = leaf_hashes[hash]
1816
- node = await self.get_node(leaf_hash)
1817
- assert isinstance(node, TerminalNode)
1818
- return node
1410
+ raise KeyAlreadyPresentError(kid)
1411
+ hash = leaf_hash(key, value)
1412
+
1413
+ if reference_node_hash is None and side is None:
1414
+ if enable_batch_autoinsert and reference_kid is None:
1415
+ if key_hash_frequency[key_hashed] == 1 or (
1416
+ key_hash_frequency[key_hashed] == 2 and first_action[key_hashed] == "delete"
1417
+ ):
1418
+ batch_keys_values.append((kid, vid))
1419
+ batch_hashes.append(hash)
1420
+ continue
1421
+ if not merkle_blob.empty():
1422
+ seed = leaf_hash(key=key, value=value)
1423
+ reference_kid, side = self.get_reference_kid_side(merkle_blob, seed)
1424
+
1425
+ merkle_blob.insert(kid, vid, hash, reference_kid, side)
1426
+ elif change["action"] == "delete":
1427
+ key = change["key"]
1428
+ deletion_kid = await self.get_kvid(key, store_id)
1429
+ if deletion_kid is not None:
1430
+ merkle_blob.delete(KeyId(deletion_kid))
1431
+ elif change["action"] == "upsert":
1432
+ key = change["key"]
1433
+ new_value = change["value"]
1434
+ kid, vid = await self.add_key_value(key, new_value, store_id, writer=writer)
1435
+ hash = leaf_hash(key, new_value)
1436
+ merkle_blob.upsert(kid, vid, hash)
1437
+ else:
1438
+ raise Exception(f"Operation in batch is not insert or delete: {change}")
1819
1439
 
1820
- return None
1440
+ if len(batch_keys_values) > 0:
1441
+ merkle_blob.batch_insert(batch_keys_values, batch_hashes)
1821
1442
 
1822
- async def maybe_get_node_by_key(self, key: bytes, store_id: bytes32) -> Optional[TerminalNode]:
1823
- try:
1824
- node = await self.get_node_by_key_latest_generation(key, store_id)
1825
- return node
1826
- except KeyNotFoundError:
1827
- return None
1443
+ new_root = await self.insert_root_from_merkle_blob(merkle_blob, store_id, status, old_root)
1444
+ return new_root.node_hash
1828
1445
 
1829
1446
  async def get_node_by_key(
1830
1447
  self,
@@ -1832,56 +1449,49 @@ class DataStore:
1832
1449
  store_id: bytes32,
1833
1450
  root_hash: Union[bytes32, Unspecified] = unspecified,
1834
1451
  ) -> TerminalNode:
1835
- if root_hash is unspecified:
1836
- return await self.get_node_by_key_latest_generation(key, store_id)
1837
-
1838
- nodes = await self.get_keys_values(store_id=store_id, root_hash=root_hash)
1839
-
1840
- for node in nodes:
1841
- if node.key == key:
1842
- return node
1843
-
1844
- raise KeyNotFoundError(key=key)
1845
-
1846
- async def get_node(self, node_hash: bytes32) -> Node:
1847
- async with self.db_wrapper.reader() as reader:
1848
- cursor = await reader.execute("SELECT * FROM node WHERE hash == :hash LIMIT 1", {"hash": node_hash})
1849
- row = await cursor.fetchone()
1452
+ async with self.db_wrapper.reader():
1453
+ resolved_root_hash: Optional[bytes32]
1454
+ if root_hash is unspecified:
1455
+ root = await self.get_tree_root(store_id=store_id)
1456
+ resolved_root_hash = root.node_hash
1457
+ else:
1458
+ resolved_root_hash = root_hash
1850
1459
 
1851
- if row is None:
1852
- raise Exception(f"Node not found for requested hash: {node_hash.hex()}")
1460
+ try:
1461
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=resolved_root_hash)
1462
+ except MerkleBlobNotFoundError:
1463
+ raise KeyNotFoundError(key=key)
1853
1464
 
1854
- node = row_to_node(row=row)
1855
- return node
1465
+ kvid = await self.get_kvid(key, store_id)
1466
+ if kvid is None:
1467
+ raise KeyNotFoundError(key=key)
1468
+ kid = KeyId(kvid)
1469
+ return await self.get_terminal_node_from_kid(merkle_blob, kid, store_id)
1856
1470
 
1857
1471
  async def get_tree_as_nodes(self, store_id: bytes32) -> Node:
1858
- async with self.db_wrapper.reader() as reader:
1472
+ async with self.db_wrapper.reader():
1859
1473
  root = await self.get_tree_root(store_id=store_id)
1860
1474
  # TODO: consider actual proper behavior
1861
1475
  assert root.node_hash is not None
1862
- root_node = await self.get_node(node_hash=root.node_hash)
1863
1476
 
1864
- cursor = await reader.execute(
1865
- """
1866
- WITH RECURSIVE
1867
- tree_from_root_hash(hash, node_type, left, right, key, value) AS (
1868
- SELECT node.* FROM node WHERE node.hash == :root_hash
1869
- UNION ALL
1870
- SELECT node.* FROM node, tree_from_root_hash
1871
- WHERE node.hash == tree_from_root_hash.left OR node.hash == tree_from_root_hash.right
1872
- )
1873
- SELECT * FROM tree_from_root_hash
1874
- """,
1875
- {"root_hash": root_node.hash},
1876
- )
1877
- nodes = [row_to_node(row=row) async for row in cursor]
1477
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1478
+
1479
+ nodes = merkle_blob.get_nodes_with_indexes()
1878
1480
  hash_to_node: dict[bytes32, Node] = {}
1879
- for node in reversed(nodes):
1880
- if isinstance(node, InternalNode):
1881
- node = replace(node, left=hash_to_node[node.left_hash], right=hash_to_node[node.right_hash])
1882
- hash_to_node[node.hash] = node
1481
+ tree_node: Node
1482
+ for _, node in reversed(nodes):
1483
+ if isinstance(node, chia_rs.datalayer.InternalNode):
1484
+ left_hash = merkle_blob.get_hash_at_index(node.left)
1485
+ right_hash = merkle_blob.get_hash_at_index(node.right)
1486
+ tree_node = InternalNode.from_child_nodes(
1487
+ left=hash_to_node[left_hash], right=hash_to_node[right_hash]
1488
+ )
1489
+ else:
1490
+ assert isinstance(node, chia_rs.datalayer.LeafNode)
1491
+ tree_node = await self.get_terminal_node(node.key, node.value, store_id)
1492
+ hash_to_node[node.hash] = tree_node
1883
1493
 
1884
- root_node = hash_to_node[root_node.hash]
1494
+ root_node = hash_to_node[root.node_hash]
1885
1495
 
1886
1496
  return root_node
1887
1497
 
@@ -1890,66 +1500,86 @@ class DataStore:
1890
1500
  node_hash: bytes32,
1891
1501
  store_id: bytes32,
1892
1502
  root_hash: Optional[bytes32] = None,
1893
- use_optimized: bool = False,
1894
1503
  ) -> ProofOfInclusion:
1895
- """Collect the information for a proof of inclusion of a hash in the Merkle
1896
- tree.
1897
- """
1898
-
1899
- # Ideally this would use get_ancestors_common, but this _common function has this interesting property
1900
- # when used with use_optimized=False - it will compare both methods in this case and raise an exception.
1901
- # this is undesirable in the DL Offers flow where PENDING roots can cause the optimized code to fail.
1902
- if use_optimized:
1903
- ancestors = await self.get_ancestors_optimized(node_hash=node_hash, store_id=store_id, root_hash=root_hash)
1904
- else:
1905
- ancestors = await self.get_ancestors(node_hash=node_hash, store_id=store_id, root_hash=root_hash)
1906
-
1907
- layers: list[ProofOfInclusionLayer] = []
1908
- child_hash = node_hash
1909
- for parent in ancestors:
1910
- layer = ProofOfInclusionLayer.from_internal_node(internal_node=parent, traversal_child_hash=child_hash)
1911
- layers.append(layer)
1912
- child_hash = parent.hash
1913
-
1914
- proof_of_inclusion = ProofOfInclusion(node_hash=node_hash, layers=layers)
1915
-
1916
- if len(ancestors) > 0:
1917
- expected_root = ancestors[-1].hash
1918
- else:
1919
- expected_root = node_hash
1920
-
1921
- if expected_root != proof_of_inclusion.root_hash:
1922
- raise Exception(
1923
- f"Incorrect root, expected: {expected_root.hex()}"
1924
- f"\n has: {proof_of_inclusion.root_hash.hex()}"
1925
- )
1926
-
1927
- return proof_of_inclusion
1504
+ if root_hash is None:
1505
+ root = await self.get_tree_root(store_id=store_id)
1506
+ root_hash = root.node_hash
1507
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root_hash)
1508
+ kid, _ = merkle_blob.get_node_by_hash(node_hash)
1509
+ return merkle_blob.get_proof_of_inclusion(kid)
1928
1510
 
1929
1511
  async def get_proof_of_inclusion_by_key(
1930
1512
  self,
1931
1513
  key: bytes,
1932
1514
  store_id: bytes32,
1933
1515
  ) -> ProofOfInclusion:
1934
- """Collect the information for a proof of inclusion of a key and its value in
1935
- the Merkle tree.
1936
- """
1937
- async with self.db_wrapper.reader():
1938
- node = await self.get_node_by_key(key=key, store_id=store_id)
1939
- return await self.get_proof_of_inclusion_by_hash(node_hash=node.hash, store_id=store_id)
1516
+ root = await self.get_tree_root(store_id=store_id)
1517
+ merkle_blob = await self.get_merkle_blob(store_id=store_id, root_hash=root.node_hash)
1518
+ kvid = await self.get_kvid(key, store_id)
1519
+ if kvid is None:
1520
+ raise Exception(f"Cannot find key: {key.hex()}")
1521
+ kid = KeyId(kvid)
1522
+ return merkle_blob.get_proof_of_inclusion(kid)
1523
+
1524
+ async def get_nodes_for_file(
1525
+ self,
1526
+ root: Root,
1527
+ node_hash: bytes32,
1528
+ store_id: bytes32,
1529
+ deltas_only: bool,
1530
+ delta_file_cache: DeltaFileCache,
1531
+ tree_nodes: list[SerializedNode],
1532
+ ) -> None:
1533
+ if deltas_only:
1534
+ if delta_file_cache.seen_previous_hash(node_hash):
1535
+ return
1940
1536
 
1941
- async def get_first_generation(self, node_hash: bytes32, store_id: bytes32) -> int:
1942
- async with self.db_wrapper.reader() as reader:
1943
- cursor = await reader.execute(
1944
- "SELECT MIN(generation) AS generation FROM ancestors WHERE hash == :hash AND tree_id == :tree_id",
1945
- {"hash": node_hash, "tree_id": store_id},
1537
+ raw_index = delta_file_cache.get_index(node_hash)
1538
+ raw_node = delta_file_cache.get_raw_node(raw_index)
1539
+
1540
+ if isinstance(raw_node, chia_rs.datalayer.InternalNode):
1541
+ left_hash = delta_file_cache.get_hash_at_index(raw_node.left)
1542
+ right_hash = delta_file_cache.get_hash_at_index(raw_node.right)
1543
+ await self.get_nodes_for_file(root, left_hash, store_id, deltas_only, delta_file_cache, tree_nodes)
1544
+ await self.get_nodes_for_file(root, right_hash, store_id, deltas_only, delta_file_cache, tree_nodes)
1545
+ tree_nodes.append(SerializedNode(False, bytes(left_hash), bytes(right_hash)))
1546
+ elif isinstance(raw_node, chia_rs.datalayer.LeafNode):
1547
+ tree_nodes.append(
1548
+ SerializedNode(
1549
+ True,
1550
+ raw_node.key.to_bytes(),
1551
+ raw_node.value.to_bytes(),
1552
+ )
1946
1553
  )
1947
- row = await cursor.fetchone()
1948
- if row is None:
1949
- raise RuntimeError("Hash not found in ancestor table.")
1554
+ else:
1555
+ raise Exception(f"Node is neither InternalNode nor TerminalNode: {raw_node}")
1556
+
1557
+ async def get_table_blobs(
1558
+ self, kv_ids_iter: Iterable[KeyOrValueId], store_id: bytes32
1559
+ ) -> dict[KeyOrValueId, tuple[bytes32, Optional[bytes]]]:
1560
+ result: dict[KeyOrValueId, tuple[bytes32, Optional[bytes]]] = {}
1561
+ batch_size = min(500, SQLITE_MAX_VARIABLE_NUMBER - 10)
1562
+ kv_ids = list(dict.fromkeys(kv_ids_iter))
1563
+
1564
+ async with self.db_wrapper.reader() as reader:
1565
+ for i in range(0, len(kv_ids), batch_size):
1566
+ chunk = kv_ids[i : i + batch_size]
1567
+ placeholders = ",".join(["?"] * len(chunk))
1568
+ query = f"""
1569
+ SELECT hash, blob, kv_id
1570
+ FROM ids
1571
+ WHERE store_id = ? AND kv_id IN ({placeholders})
1572
+ LIMIT {len(chunk)}
1573
+ """
1574
+
1575
+ async with reader.execute(query, (store_id, *chunk)) as cursor:
1576
+ rows = await cursor.fetchall()
1577
+ result.update({row["kv_id"]: (row["hash"], row["blob"]) for row in rows})
1950
1578
 
1951
- generation = row["generation"]
1952
- return int(generation)
1579
+ if len(result) != len(kv_ids):
1580
+ raise Exception("Cannot retrieve all the requested kv_ids")
1581
+
1582
+ return result
1953
1583
 
1954
1584
  async def write_tree_to_file(
1955
1585
  self,
@@ -1962,24 +1592,42 @@ class DataStore:
1962
1592
  if node_hash == bytes32.zeros:
1963
1593
  return
1964
1594
 
1965
- if deltas_only:
1966
- generation = await self.get_first_generation(node_hash, store_id)
1967
- # Root's generation is not the first time we see this hash, so it's not a new delta.
1968
- if root.generation != generation:
1969
- return
1970
- node = await self.get_node(node_hash)
1971
- to_write = b""
1972
- if isinstance(node, InternalNode):
1973
- await self.write_tree_to_file(root, node.left_hash, store_id, deltas_only, writer)
1974
- await self.write_tree_to_file(root, node.right_hash, store_id, deltas_only, writer)
1975
- to_write = bytes(SerializedNode(False, bytes(node.left_hash), bytes(node.right_hash)))
1976
- elif isinstance(node, TerminalNode):
1977
- to_write = bytes(SerializedNode(True, node.key, node.value))
1978
- else:
1979
- raise Exception(f"Node is neither InternalNode nor TerminalNode: {node}")
1595
+ with log_exceptions(log=log, message="Error while getting merkle blob"):
1596
+ root_path = self.get_merkle_path(store_id=store_id, root_hash=root.node_hash)
1597
+ delta_file_cache = DeltaFileCache(root_path)
1980
1598
 
1981
- writer.write(len(to_write).to_bytes(4, byteorder="big"))
1982
- writer.write(to_write)
1599
+ if root.generation > 0:
1600
+ previous_root = await self.get_tree_root(store_id=store_id, generation=root.generation - 1)
1601
+ if previous_root.node_hash is not None:
1602
+ with log_exceptions(log=log, message="Error while getting previous merkle blob"):
1603
+ previous_root_path = self.get_merkle_path(store_id=store_id, root_hash=previous_root.node_hash)
1604
+ delta_file_cache.load_previous_hashes(previous_root_path)
1605
+
1606
+ tree_nodes: list[SerializedNode] = []
1607
+
1608
+ await self.get_nodes_for_file(root, node_hash, store_id, deltas_only, delta_file_cache, tree_nodes)
1609
+ kv_ids = (
1610
+ KeyOrValueId.from_bytes(raw_id)
1611
+ for node in tree_nodes
1612
+ if node.is_terminal
1613
+ for raw_id in (node.value1, node.value2)
1614
+ )
1615
+ table_blobs = await self.get_table_blobs(kv_ids, store_id)
1616
+
1617
+ for node in tree_nodes:
1618
+ if node.is_terminal:
1619
+ blobs = []
1620
+ for raw_id in (node.value1, node.value2):
1621
+ id = KeyOrValueId.from_bytes(raw_id)
1622
+ blob_hash, blob = table_blobs[id]
1623
+ if blob is None:
1624
+ blob = self.get_blob_from_file(blob_hash, store_id)
1625
+ blobs.append(blob)
1626
+ to_write = bytes(SerializedNode(True, blobs[0], blobs[1]))
1627
+ else:
1628
+ to_write = bytes(node)
1629
+ writer.write(len(to_write).to_bytes(4, byteorder="big"))
1630
+ writer.write(to_write)
1983
1631
 
1984
1632
  async def update_subscriptions_from_wallet(self, store_id: bytes32, new_urls: list[str]) -> None:
1985
1633
  async with self.db_wrapper.writer() as writer:
@@ -2064,94 +1712,36 @@ class DataStore:
2064
1712
  },
2065
1713
  )
2066
1714
 
2067
- async def delete_store_data(self, store_id: bytes32) -> None:
2068
- async with self.db_wrapper.writer(foreign_key_enforcement_enabled=False) as writer:
2069
- await self.clean_node_table(writer)
2070
- cursor = await writer.execute(
2071
- """
2072
- WITH RECURSIVE all_nodes AS (
2073
- SELECT a.hash, n.left, n.right
2074
- FROM ancestors AS a
2075
- JOIN node AS n ON a.hash = n.hash
2076
- WHERE a.tree_id = :tree_id
2077
- ),
2078
- pending_nodes AS (
2079
- SELECT node_hash AS hash FROM root
2080
- WHERE status IN (:pending_status, :pending_batch_status)
2081
- UNION ALL
2082
- SELECT n.left FROM node n
2083
- INNER JOIN pending_nodes pn ON n.hash = pn.hash
2084
- WHERE n.left IS NOT NULL
2085
- UNION ALL
2086
- SELECT n.right FROM node n
2087
- INNER JOIN pending_nodes pn ON n.hash = pn.hash
2088
- WHERE n.right IS NOT NULL
2089
- )
2090
-
2091
- SELECT hash, left, right
2092
- FROM all_nodes
2093
- WHERE hash NOT IN (SELECT hash FROM ancestors WHERE tree_id != :tree_id)
2094
- AND hash NOT IN (SELECT hash from pending_nodes)
2095
- """,
2096
- {
2097
- "tree_id": store_id,
2098
- "pending_status": Status.PENDING.value,
2099
- "pending_batch_status": Status.PENDING_BATCH.value,
2100
- },
2101
- )
2102
- to_delete: dict[bytes, tuple[bytes, bytes]] = {}
2103
- ref_counts: dict[bytes, int] = {}
2104
- async for row in cursor:
2105
- hash = row["hash"]
2106
- left = row["left"]
2107
- right = row["right"]
2108
- if hash in to_delete:
2109
- prev_left, prev_right = to_delete[hash]
2110
- assert prev_left == left
2111
- assert prev_right == right
2112
- continue
2113
- to_delete[hash] = (left, right)
2114
- if left is not None:
2115
- ref_counts[left] = ref_counts.get(left, 0) + 1
2116
- if right is not None:
2117
- ref_counts[right] = ref_counts.get(right, 0) + 1
2118
-
2119
- await writer.execute("DELETE FROM ancestors WHERE tree_id == ?", (store_id,))
2120
- await writer.execute("DELETE FROM root WHERE tree_id == ?", (store_id,))
2121
- queue = [hash for hash in to_delete if ref_counts.get(hash, 0) == 0]
2122
- while queue:
2123
- hash = queue.pop(0)
2124
- if hash not in to_delete:
2125
- continue
2126
- await writer.execute("DELETE FROM node WHERE hash == ?", (hash,))
2127
-
2128
- left, right = to_delete[hash]
2129
- if left is not None:
2130
- ref_counts[left] -= 1
2131
- if ref_counts[left] == 0:
2132
- queue.append(left)
2133
-
2134
- if right is not None:
2135
- ref_counts[right] -= 1
2136
- if ref_counts[right] == 0:
2137
- queue.append(right)
2138
-
2139
1715
  async def unsubscribe(self, store_id: bytes32) -> None:
2140
1716
  async with self.db_wrapper.writer() as writer:
2141
1717
  await writer.execute(
2142
1718
  "DELETE FROM subscriptions WHERE tree_id == :tree_id",
2143
1719
  {"tree_id": store_id},
2144
1720
  )
1721
+ await writer.execute(
1722
+ "DELETE FROM ids WHERE store_id == :store_id",
1723
+ {"store_id": store_id},
1724
+ )
1725
+ await writer.execute(
1726
+ "DELETE FROM nodes WHERE store_id == :store_id",
1727
+ {"store_id": store_id},
1728
+ )
1729
+
1730
+ with contextlib.suppress(FileNotFoundError):
1731
+ shutil.rmtree(self.get_merkle_path(store_id=store_id, root_hash=None))
1732
+
1733
+ with contextlib.suppress(FileNotFoundError):
1734
+ shutil.rmtree(self.get_key_value_path(store_id=store_id, blob_hash=None))
2145
1735
 
2146
1736
  async def rollback_to_generation(self, store_id: bytes32, target_generation: int) -> None:
2147
1737
  async with self.db_wrapper.writer() as writer:
2148
1738
  await writer.execute(
2149
- "DELETE FROM ancestors WHERE tree_id == :tree_id AND generation > :target_generation",
1739
+ "DELETE FROM root WHERE tree_id == :tree_id AND generation > :target_generation",
2150
1740
  {"tree_id": store_id, "target_generation": target_generation},
2151
1741
  )
2152
1742
  await writer.execute(
2153
- "DELETE FROM root WHERE tree_id == :tree_id AND generation > :target_generation",
2154
- {"tree_id": store_id, "target_generation": target_generation},
1743
+ "DELETE FROM nodes WHERE store_id == :store_id AND generation > :target_generation",
1744
+ {"store_id": store_id, "target_generation": target_generation},
2155
1745
  )
2156
1746
 
2157
1747
  async def update_server_info(self, store_id: bytes32, server_info: ServerInfo) -> None:
@@ -2228,13 +1818,12 @@ class DataStore:
2228
1818
  )
2229
1819
  else:
2230
1820
  subscriptions.append(Subscription(store_id, []))
2231
- else:
2232
- if url is not None and num_consecutive_failures is not None and ignore_till is not None:
2233
- new_servers_info = subscription.servers_info
2234
- new_servers_info.append(ServerInfo(url, num_consecutive_failures, ignore_till))
2235
- new_subscription = replace(subscription, servers_info=new_servers_info)
2236
- subscriptions.remove(subscription)
2237
- subscriptions.append(new_subscription)
1821
+ elif url is not None and num_consecutive_failures is not None and ignore_till is not None:
1822
+ new_servers_info = subscription.servers_info
1823
+ new_servers_info.append(ServerInfo(url, num_consecutive_failures, ignore_till))
1824
+ new_subscription = replace(subscription, servers_info=new_servers_info)
1825
+ subscriptions.remove(subscription)
1826
+ subscriptions.append(new_subscription)
2238
1827
 
2239
1828
  return subscriptions
2240
1829