cnhkmcp 2.1.3__py3-none-any.whl → 2.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (194) hide show
  1. cnhkmcp/__init__.py +126 -0
  2. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/README.md +38 -0
  3. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/ace.log +0 -0
  4. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +6 -0
  5. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/ace_lib.py +1514 -0
  6. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_datasets.py +157 -0
  7. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_documentation.py +132 -0
  8. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_operators.py +99 -0
  9. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/helpful_functions.py +180 -0
  10. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.ico +0 -0
  11. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.png +0 -0
  12. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_10_Steps_to_Start_on_BRAIN_documentation.json +14 -0
  13. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Improve_your_Alpha_2_2_documentation.json +174 -0
  14. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Understand_Results_1_2_documentation.json +167 -0
  15. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_Alphas_documentation.json +145 -0
  16. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_BRAIN_Expression_Language_documentation.json +107 -0
  17. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_WorldQuant_Challenge_documentation.json +56 -0
  18. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001__Read_this_First_-_Starter_Pack_documentation.json +404 -0
  19. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_How_to_choose_the_Simulation_Settings_documentation.json +268 -0
  20. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_Simulate_your_first_Alpha_documentation.json +88 -0
  21. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Beginners_documentation.json +254 -0
  22. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Bronze_Users_documentation.json +114 -0
  23. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Silver_Users_documentation.json +79 -0
  24. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__How_BRAIN_works_documentation.json +184 -0
  25. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Clear_these_tests_before_submitting_an_Alpha_documentation.json +388 -0
  26. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Parameters_in_the_Simulation_results_documentation.json +243 -0
  27. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Group_Data_Fields_documentation.json +69 -0
  28. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_How_to_use_the_Data_Explorer_documentation.json +142 -0
  29. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Model77_dataset_documentation.json +14 -0
  30. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Sentiment1_dataset_documentation.json +14 -0
  31. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Understanding_Data_in_BRAIN_Key_Concepts_and_Tips_documentation.json +182 -0
  32. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Vector_Data_Fields_documentation.json +30 -0
  33. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Crowding_Risk-Neutralized_Alphas_documentation.json +64 -0
  34. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_D0_documentation.json +66 -0
  35. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Double_Neutralization_documentation.json +53 -0
  36. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Fast_D1_Documentation_documentation.json +304 -0
  37. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Investability_Constrained_Metrics_documentation.json +129 -0
  38. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Must-read_posts_How_to_improve_your_Alphas_documentation.json +14 -0
  39. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Neutralization_documentation.json +29 -0
  40. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_RAM_Risk-Neutralized_Alphas_documentation.json +64 -0
  41. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralization_Default_setting_documentation.json +75 -0
  42. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralized_Alphas_documentation.json +171 -0
  43. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Statistical_Risk-Neutralized_Alphas_documentation.json +51 -0
  44. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_EUR_TOP2500_Universe_documentation.json +35 -0
  45. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_GLB_TOPDIV3000_Universe_documentation.json +48 -0
  46. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_Started_China_Research_for_Consultants_Gold_documentation.json +142 -0
  47. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_on_Illiquid_Universes_Gold_documentation.json +46 -0
  48. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_with_USA_TOPSP500_universe_Gold_documentation.json +62 -0
  49. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Global_Alphas_Gold_documentation.json +66 -0
  50. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_India_Alphas_documentation.json +35 -0
  51. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Dos_and_Don_ts_documentation.json +35 -0
  52. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Features_documentation.json +239 -0
  53. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Simulation_Features_documentation.json +149 -0
  54. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Submission_Tests_documentation.json +363 -0
  55. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Finding_Consultant_Alphas_documentation.json +333 -0
  56. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Power_Pool_Alphas_documentation.json +14 -0
  57. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Research_Advisory_Program_documentation.json +35 -0
  58. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Starting_Guide_for_Research_Consultants_documentation.json +14 -0
  59. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Visualization_Tool_documentation.json +99 -0
  60. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Your_Advisor_-_Kunqi_Jiang_documentation.json +53 -0
  61. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Brain_Genius_documentation.json +288 -0
  62. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Single_Dataset_Alphas_documentation.json +41 -0
  63. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Advisory_Theme_Calendar_documentation.json +14 -0
  64. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Multiplier_Rules_documentation.json +14 -0
  65. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Overview_of_Themes_documentation.json +14 -0
  66. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Theme_Calendar_documentation.json +14 -0
  67. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Combo_Expression_documentation.json +272 -0
  68. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Global_SuperAlphas_documentation.json +14 -0
  69. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Helpful_Tips_documentation.json +58 -0
  70. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Selection_Expression_documentation.json +1546 -0
  71. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Operators_documentation.json +890 -0
  72. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Results_documentation.json +83 -0
  73. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_What_is_a_SuperAlpha_documentation.json +261 -0
  74. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_BRAIN_API_documentation.json +515 -0
  75. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_Documentation_for_ACE_API_Library_Gold_documentation.json +27 -0
  76. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010__Understanding_simulation_limits_documentation.json +210 -0
  77. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/arithmetic_operators.json +209 -0
  78. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/cross_sectional_operators.json +98 -0
  79. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/group_operators.json +121 -0
  80. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/logical_operators.json +145 -0
  81. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/reduce_operators.json +156 -0
  82. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/special_operators.json +35 -0
  83. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/test.txt +1 -0
  84. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/time_series_operators.json +386 -0
  85. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/transformational_operators.json +61 -0
  86. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/vector_operators.json +38 -0
  87. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +576 -0
  88. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/process_knowledge_base.py +281 -0
  89. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/rag_engine.py +408 -0
  90. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/requirements.txt +7 -0
  91. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/run.bat +3 -0
  92. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_manifest.json +302 -0
  93. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_meta.json +1 -0
  94. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/chroma.sqlite3 +0 -0
  95. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242//321/211/320/266/320/246/321/206/320/274/320/261/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +265 -0
  96. cnhkmcp/untracked/APP/.gitignore +32 -0
  97. cnhkmcp/untracked/APP/MODULAR_STRUCTURE.md +112 -0
  98. cnhkmcp/untracked/APP/README.md +309 -0
  99. cnhkmcp/untracked/APP/Tranformer/Transformer.py +4989 -0
  100. cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
  101. cnhkmcp/untracked/APP/Tranformer/ace_lib.py +1514 -0
  102. cnhkmcp/untracked/APP/Tranformer/helpful_functions.py +180 -0
  103. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +7187 -0
  104. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates_/321/207/320/264/342/225/221/321/204/342/225/233/320/233.json +654 -0
  105. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +1 -0
  106. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +47312 -0
  107. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_/321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/277/321/207/320/253/342/224/244/321/206/320/236/320/265/321/210/342/225/234/342/225/234/321/205/320/225/320/265Machine_lib.json +22 -0
  108. cnhkmcp/untracked/APP/Tranformer/parsetab.py +60 -0
  109. cnhkmcp/untracked/APP/Tranformer/template_summary.txt +3182 -0
  110. cnhkmcp/untracked/APP/Tranformer/transformer_config.json +7 -0
  111. cnhkmcp/untracked/APP/Tranformer/validator.py +889 -0
  112. cnhkmcp/untracked/APP/ace.log +69 -0
  113. cnhkmcp/untracked/APP/ace_lib.py +1514 -0
  114. cnhkmcp/untracked/APP/blueprints/__init__.py +6 -0
  115. cnhkmcp/untracked/APP/blueprints/feature_engineering.py +347 -0
  116. cnhkmcp/untracked/APP/blueprints/idea_house.py +221 -0
  117. cnhkmcp/untracked/APP/blueprints/inspiration_house.py +432 -0
  118. cnhkmcp/untracked/APP/blueprints/paper_analysis.py +570 -0
  119. cnhkmcp/untracked/APP/custom_templates/templates.json +1257 -0
  120. cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +400 -0
  121. cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +1514 -0
  122. cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +252 -0
  123. cnhkmcp/untracked/APP/give_me_idea/fetch_all_datasets.py +157 -0
  124. cnhkmcp/untracked/APP/give_me_idea/fetch_all_operators.py +99 -0
  125. cnhkmcp/untracked/APP/give_me_idea/helpful_functions.py +180 -0
  126. cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +11 -0
  127. cnhkmcp/untracked/APP/helpful_functions.py +180 -0
  128. cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +1501 -0
  129. cnhkmcp/untracked/APP/hkSimulator/autosimulator.py +447 -0
  130. cnhkmcp/untracked/APP/hkSimulator/helpful_functions.py +180 -0
  131. cnhkmcp/untracked/APP/mirror_config.txt +20 -0
  132. cnhkmcp/untracked/APP/operaters.csv +129 -0
  133. cnhkmcp/untracked/APP/requirements.txt +53 -0
  134. cnhkmcp/untracked/APP/run_app.bat +28 -0
  135. cnhkmcp/untracked/APP/run_app.sh +34 -0
  136. cnhkmcp/untracked/APP/setup_tsinghua.bat +39 -0
  137. cnhkmcp/untracked/APP/setup_tsinghua.sh +43 -0
  138. cnhkmcp/untracked/APP/simulator/alpha_submitter.py +404 -0
  139. cnhkmcp/untracked/APP/simulator/simulator_wqb.py +618 -0
  140. cnhkmcp/untracked/APP/ssrn-3332513.pdf +109188 -19
  141. cnhkmcp/untracked/APP/static/brain.js +589 -0
  142. cnhkmcp/untracked/APP/static/decoder.js +1540 -0
  143. cnhkmcp/untracked/APP/static/feature_engineering.js +1729 -0
  144. cnhkmcp/untracked/APP/static/idea_house.js +937 -0
  145. cnhkmcp/untracked/APP/static/inspiration.js +465 -0
  146. cnhkmcp/untracked/APP/static/inspiration_house.js +868 -0
  147. cnhkmcp/untracked/APP/static/paper_analysis.js +390 -0
  148. cnhkmcp/untracked/APP/static/script.js +3082 -0
  149. cnhkmcp/untracked/APP/static/simulator.js +597 -0
  150. cnhkmcp/untracked/APP/static/styles.css +3127 -0
  151. cnhkmcp/untracked/APP/static/usage_widget.js +508 -0
  152. cnhkmcp/untracked/APP/templates/alpha_inspector.html +511 -0
  153. cnhkmcp/untracked/APP/templates/feature_engineering.html +960 -0
  154. cnhkmcp/untracked/APP/templates/idea_house.html +564 -0
  155. cnhkmcp/untracked/APP/templates/index.html +932 -0
  156. cnhkmcp/untracked/APP/templates/inspiration_house.html +861 -0
  157. cnhkmcp/untracked/APP/templates/paper_analysis.html +91 -0
  158. cnhkmcp/untracked/APP/templates/simulator.html +343 -0
  159. cnhkmcp/untracked/APP/templates/transformer_web.html +580 -0
  160. cnhkmcp/untracked/APP/usage.md +351 -0
  161. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +1514 -0
  162. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +712 -0
  163. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/helpful_functions.py +180 -0
  164. cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +2460 -0
  165. cnhkmcp/untracked/__init__.py +0 -0
  166. cnhkmcp/untracked/arXiv_API_Tool_Manual.md +490 -0
  167. cnhkmcp/untracked/arxiv_api.py +229 -0
  168. cnhkmcp/untracked/back_up/forum_functions.py +998 -0
  169. cnhkmcp/untracked/back_up/platform_functions.py +2886 -0
  170. cnhkmcp/untracked/brain-consultant.md +31 -0
  171. cnhkmcp/untracked/forum_functions.py +407 -0
  172. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +407 -0
  173. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +2601 -0
  174. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/user_config.json +31 -0
  175. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/210/320/276/320/271AI/321/210/320/277/342/225/227/321/210/342/224/220/320/251/321/204/342/225/225/320/272/321/206/320/246/320/227/321/206/320/261/320/263/321/206/320/255/320/265/321/205/320/275/320/266/321/204/342/225/235/320/252/321/204/342/225/225/320/233/321/210/342/225/234/342/225/234/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270.md +101 -0
  176. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +190 -0
  177. cnhkmcp/untracked/platform_functions.py +2601 -0
  178. cnhkmcp/untracked/sample_mcp_config.json +11 -0
  179. cnhkmcp/untracked/user_config.json +31 -0
  180. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/222/321/210/320/220/320/223/321/206/320/246/320/227/321/206/320/261/320/263_BRAIN_Alpha_Test_Requirements_and_Tips.md +202 -0
  181. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Alpha_explaination_workflow.md +56 -0
  182. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_6_Tips_Datafield_Exploration_Guide.md +194 -0
  183. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_Alpha_Improvement_Workflow.md +101 -0
  184. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Dataset_Exploration_Expert_Manual.md +436 -0
  185. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_daily_report_workflow.md +128 -0
  186. cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +192 -0
  187. {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.5.dist-info}/METADATA +1 -1
  188. cnhkmcp-2.1.5.dist-info/RECORD +192 -0
  189. cnhkmcp-2.1.5.dist-info/top_level.txt +1 -0
  190. cnhkmcp-2.1.3.dist-info/RECORD +0 -6
  191. cnhkmcp-2.1.3.dist-info/top_level.txt +0 -1
  192. {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.5.dist-info}/WHEEL +0 -0
  193. {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.5.dist-info}/entry_points.txt +0 -0
  194. {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.5.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,2460 @@
1
+ """
2
+ BRAIN Expression Template Decoder - Flask Web Application
3
+ A complete web application for decoding string templates with WorldQuant BRAIN integration
4
+ """
5
+
6
+ # Auto-install dependencies if missing
7
+ import subprocess
8
+ import sys
9
+ import os
10
+
11
+ def install_requirements():
12
+ """Install required packages from requirements.txt if they're missing"""
13
+ print("🔍 Checking and installing required dependencies...")
14
+ print("📋 Verifying packages needed for BRAIN Expression Template Decoder...")
15
+
16
+ # Get the directory where this script is located
17
+ script_dir = os.path.dirname(os.path.abspath(__file__))
18
+
19
+ # Check if requirements.txt exists in the script directory
20
+ req_file = os.path.join(script_dir, 'requirements.txt')
21
+ if not os.path.exists(req_file):
22
+ print("❌ Error: requirements.txt not found!")
23
+ print(f"Looking for: {req_file}")
24
+ return False
25
+
26
+ # Read mirror configuration if it exists
27
+ mirror_url = 'https://pypi.tuna.tsinghua.edu.cn/simple' # Default to Tsinghua
28
+ mirror_config_file = os.path.join(script_dir, 'mirror_config.txt')
29
+
30
+ if os.path.exists(mirror_config_file):
31
+ try:
32
+ with open(mirror_config_file, 'r', encoding='utf-8') as f:
33
+ for line in f:
34
+ line = line.strip()
35
+ if line and not line.startswith('#') and line.startswith('http'):
36
+ mirror_url = line
37
+ break
38
+ except Exception as e:
39
+ print(f"Warning: Could not read mirror configuration: {e}")
40
+
41
+ # Try to import the main packages to check if they're installed
42
+ packages_to_check = {
43
+ 'flask': 'flask',
44
+ 'flask_cors': 'flask-cors',
45
+ 'requests': 'requests',
46
+ 'pandas': 'pandas',
47
+ 'PyPDF2': 'PyPDF2',
48
+ 'docx': 'python-docx',
49
+ 'pdfplumber': 'pdfplumber',
50
+ 'fitz': 'PyMuPDF',
51
+ 'cozepy': 'cozepy',
52
+ 'lxml': 'lxml',
53
+ 'bs4': 'beautifulsoup4'
54
+ }
55
+
56
+ missing_packages = []
57
+ for import_name, pip_name in packages_to_check.items():
58
+ try:
59
+ __import__(import_name)
60
+ except ImportError:
61
+ missing_packages.append(pip_name)
62
+ print(f"Missing package: {pip_name} (import name: {import_name})")
63
+
64
+ if missing_packages:
65
+ print(f"⚠️ Missing packages detected: {', '.join(missing_packages)}")
66
+ print("📦 Installing dependencies from requirements.txt...")
67
+ print(f"🌐 Using mirror: {mirror_url}")
68
+
69
+ try:
70
+ # Install all requirements using configured mirror
71
+ subprocess.check_call([
72
+ sys.executable, '-m', 'pip', 'install',
73
+ '-i', mirror_url,
74
+ '-r', req_file
75
+ ])
76
+ print("✅ All dependencies installed successfully!")
77
+ return True
78
+ except subprocess.CalledProcessError:
79
+ print(f"❌ Error: Failed to install dependencies using {mirror_url}")
80
+ print("🔄 Trying with default PyPI...")
81
+ try:
82
+ # Fallback to default PyPI
83
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', req_file])
84
+ print("✅ All dependencies installed successfully!")
85
+ return True
86
+ except subprocess.CalledProcessError:
87
+ print("❌ Error: Failed to install dependencies. Please run manually:")
88
+ print(f" {sys.executable} -m pip install -i {mirror_url} -r requirements.txt")
89
+ return False
90
+ else:
91
+ print("✅ All required dependencies are already installed!")
92
+ return True
93
+
94
+ # Check and install dependencies before importing
95
+ # This will run every time the module is imported, but only install if needed
96
+ def check_and_install_dependencies():
97
+ """Check and install dependencies if needed"""
98
+ if not globals().get('_dependencies_checked'):
99
+ if install_requirements():
100
+ globals()['_dependencies_checked'] = True
101
+ return True
102
+ else:
103
+ print("\nPlease install the dependencies manually and try again.")
104
+ return False
105
+ return True
106
+
107
+ # Always run the dependency check when this module is imported
108
+ print("🚀 Initializing BRAIN Expression Template Decoder...")
109
+ if not check_and_install_dependencies():
110
+ if __name__ == "__main__":
111
+ sys.exit(1)
112
+ else:
113
+ print("⚠️ Warning: Some dependencies may be missing. Please run 'pip install -r requirements.txt'")
114
+ print("🔄 Continuing with import, but some features may not work properly.")
115
+
116
+ # Now import the packages
117
+ try:
118
+ from flask import Flask, render_template, request, jsonify, session as flask_session, Response, stream_with_context, send_from_directory
119
+ from flask_cors import CORS
120
+ import requests
121
+ import json
122
+ import time
123
+ import os
124
+ import threading
125
+ import queue
126
+ import uuid
127
+ from datetime import datetime
128
+ print("📚 Core packages imported successfully!")
129
+
130
+ # Import ace_lib for simulation options
131
+ try:
132
+ # Try importing from hkSimulator package
133
+ sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hkSimulator'))
134
+ from ace_lib import get_instrument_type_region_delay
135
+ print("✅ Imported get_instrument_type_region_delay from ace_lib")
136
+ except ImportError as e:
137
+ print(f"⚠️ Warning: Could not import get_instrument_type_region_delay: {e}")
138
+ get_instrument_type_region_delay = None
139
+
140
+ except ImportError as e:
141
+ print(f"❌ Failed to import core packages: {e}")
142
+ print("Please run: pip install -r requirements.txt")
143
+ if __name__ == "__main__":
144
+ sys.exit(1)
145
+ raise
146
+
147
+ app = Flask(__name__)
148
+ app.secret_key = 'brain_template_decoder_secret_key_change_in_production'
149
+ CORS(app)
150
+
151
+ print("🌐 Flask application initialized with CORS support!")
152
+
153
+ # BRAIN API configuration
154
+ BRAIN_API_BASE = 'https://api.worldquantbrain.com'
155
+
156
+ # Store BRAIN sessions (in production, use proper session management like Redis)
157
+ brain_sessions = {}
158
+
159
+ print("🧠 BRAIN API integration configured!")
160
+
161
+ def sign_in_to_brain(username, password):
162
+ """Sign in to BRAIN API with retry logic and biometric authentication support"""
163
+ from urllib.parse import urljoin
164
+
165
+ # Create a session to persistently store the headers
166
+ session = requests.Session()
167
+ # Save credentials into the session
168
+ session.auth = (username, password)
169
+
170
+ retry_count = 0
171
+ max_retries = 3
172
+
173
+ while retry_count < max_retries:
174
+ try:
175
+ # Send a POST request to the /authentication API
176
+ response = session.post(f'{BRAIN_API_BASE}/authentication')
177
+
178
+ # Check if biometric authentication is needed
179
+ if response.status_code == requests.codes.unauthorized:
180
+ if response.headers.get("WWW-Authenticate") == "persona":
181
+ # Get biometric auth URL
182
+ location = response.headers.get("Location")
183
+ if location:
184
+ biometric_url = urljoin(response.url, location)
185
+
186
+ # Return special response indicating biometric auth is needed
187
+ return {
188
+ 'requires_biometric': True,
189
+ 'biometric_url': biometric_url,
190
+ 'session': session,
191
+ 'location': location
192
+ }
193
+ else:
194
+ raise Exception("Biometric authentication required but no Location header provided")
195
+ else:
196
+ # Regular authentication failure
197
+ print("Incorrect username or password")
198
+ raise requests.HTTPError(
199
+ "Authentication failed: Invalid username or password",
200
+ response=response,
201
+ )
202
+
203
+ # If we get here, authentication was successful
204
+ response.raise_for_status()
205
+ print("Authentication successful.")
206
+ return session
207
+
208
+ except requests.HTTPError as e:
209
+ if "Invalid username or password" in str(e) or "Authentication failed" in str(e):
210
+ raise # Don't retry for invalid credentials
211
+ print(f"HTTP error occurred: {e}")
212
+ retry_count += 1
213
+ if retry_count < max_retries:
214
+ print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
215
+ time.sleep(10)
216
+ else:
217
+ print("Max retries reached. Authentication failed.")
218
+ raise
219
+ except Exception as e:
220
+ print(f"Error during authentication: {e}")
221
+ retry_count += 1
222
+ if retry_count < max_retries:
223
+ print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
224
+ time.sleep(10)
225
+ else:
226
+ print("Max retries reached. Authentication failed.")
227
+ raise
228
+
229
+ # Routes
230
+ @app.route('/')
231
+ def index():
232
+ """Main application page"""
233
+ return render_template('index.html')
234
+
235
+ @app.route('/simulator')
236
+ def simulator():
237
+ """User-friendly simulator interface"""
238
+ return render_template('simulator.html')
239
+
240
+ @app.route('/api/simulator/logs', methods=['GET'])
241
+ def get_simulator_logs():
242
+ """Get available log files in the simulator directory"""
243
+ try:
244
+ import glob
245
+ import os
246
+ from datetime import datetime
247
+
248
+ # Look for log files in the current directory and simulator directory
249
+ script_dir = os.path.dirname(os.path.abspath(__file__))
250
+ simulator_dir = os.path.join(script_dir, 'simulator')
251
+
252
+ log_files = []
253
+
254
+ # Check both current directory and simulator directory
255
+ for directory in [script_dir, simulator_dir]:
256
+ if os.path.exists(directory):
257
+ pattern = os.path.join(directory, 'wqb*.log')
258
+ for log_file in glob.glob(pattern):
259
+ try:
260
+ stat = os.stat(log_file)
261
+ log_files.append({
262
+ 'filename': os.path.basename(log_file),
263
+ 'path': log_file,
264
+ 'size': f"{stat.st_size / 1024:.1f} KB",
265
+ 'modified': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S'),
266
+ 'mtime': stat.st_mtime
267
+ })
268
+ except Exception as e:
269
+ print(f"Error reading log file {log_file}: {e}")
270
+
271
+ # Sort by modification time (newest first)
272
+ log_files.sort(key=lambda x: x['mtime'], reverse=True)
273
+
274
+ # Find the latest log file
275
+ latest = log_files[0]['filename'] if log_files else None
276
+
277
+ return jsonify({
278
+ 'logs': log_files,
279
+ 'latest': latest,
280
+ 'count': len(log_files)
281
+ })
282
+
283
+ except Exception as e:
284
+ return jsonify({'error': f'Error getting log files: {str(e)}'}), 500
285
+
286
+ @app.route('/api/transformer_candidates')
287
+ def get_transformer_candidates():
288
+ """Get Alpha candidates generated by Transformer"""
289
+ try:
290
+ # Path to the Transformer output file
291
+ # Note: Folder name is 'Tranformer' (missing 's') based on user context
292
+ file_path = os.path.join(os.path.dirname(__file__), 'Tranformer', 'output', 'Alpha_candidates.json')
293
+
294
+ if os.path.exists(file_path):
295
+ with open(file_path, 'r', encoding='utf-8') as f:
296
+ data = json.load(f)
297
+ return jsonify(data)
298
+ else:
299
+ return jsonify({"error": "File not found", "path": file_path})
300
+ except Exception as e:
301
+ return jsonify({"error": str(e)})
302
+
303
+ @app.route('/api/simulator/logs/<filename>', methods=['GET'])
304
+ def get_simulator_log_content(filename):
305
+ """Get content of a specific log file"""
306
+ try:
307
+ import os
308
+
309
+ # Security: only allow log files with safe names
310
+ if not filename.startswith('wqb') or not filename.endswith('.log'):
311
+ return jsonify({'error': 'Invalid log file name'}), 400
312
+
313
+ script_dir = os.path.dirname(os.path.abspath(__file__))
314
+ simulator_dir = os.path.join(script_dir, 'simulator')
315
+
316
+ # Look for the file in both directories
317
+ log_path = None
318
+ for directory in [script_dir, simulator_dir]:
319
+ potential_path = os.path.join(directory, filename)
320
+ if os.path.exists(potential_path):
321
+ log_path = potential_path
322
+ break
323
+
324
+ if not log_path:
325
+ return jsonify({'error': 'Log file not found'}), 404
326
+
327
+ # Read file content with multiple encoding attempts
328
+ content = None
329
+ encodings_to_try = ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'cp1252']
330
+
331
+ for encoding in encodings_to_try:
332
+ try:
333
+ with open(log_path, 'r', encoding=encoding) as f:
334
+ content = f.read()
335
+ print(f"Successfully read log file with {encoding} encoding")
336
+ break
337
+ except UnicodeDecodeError:
338
+ continue
339
+ except Exception as e:
340
+ print(f"Error reading with {encoding}: {e}")
341
+ continue
342
+
343
+ if content is None:
344
+ # Last resort: read as binary and decode with error handling
345
+ try:
346
+ with open(log_path, 'rb') as f:
347
+ raw_content = f.read()
348
+ content = raw_content.decode('utf-8', errors='replace')
349
+ print("Used UTF-8 with error replacement for log content")
350
+ except Exception as e:
351
+ content = f"Error: Could not decode file content - {str(e)}"
352
+
353
+ response = jsonify({
354
+ 'content': content,
355
+ 'filename': filename,
356
+ 'size': len(content)
357
+ })
358
+ response.headers['Content-Type'] = 'application/json; charset=utf-8'
359
+ return response
360
+
361
+ except Exception as e:
362
+ return jsonify({'error': f'Error reading log file: {str(e)}'}), 500
363
+
364
+ @app.route('/api/simulator/test-connection', methods=['POST'])
365
+ def test_simulator_connection():
366
+ """Test BRAIN API connection for simulator"""
367
+ try:
368
+ data = request.get_json()
369
+ username = data.get('username')
370
+ password = data.get('password')
371
+
372
+ if not username or not password:
373
+ return jsonify({'error': 'Username and password required'}), 400
374
+
375
+ # Test connection using the existing sign_in_to_brain function
376
+ result = sign_in_to_brain(username, password)
377
+
378
+ # Handle biometric authentication requirement
379
+ if isinstance(result, dict) and result.get('requires_biometric'):
380
+ return jsonify({
381
+ 'success': False,
382
+ 'error': 'Biometric authentication required. Please use the main interface first to complete authentication.',
383
+ 'requires_biometric': True
384
+ })
385
+
386
+ # Test a simple API call to verify connection
387
+ brain_session = result
388
+ response = brain_session.get(f'{BRAIN_API_BASE}/data-fields/open')
389
+
390
+ if response.ok:
391
+ return jsonify({
392
+ 'success': True,
393
+ 'message': 'Connection successful'
394
+ })
395
+ else:
396
+ return jsonify({
397
+ 'success': False,
398
+ 'error': f'API test failed: {response.status_code}'
399
+ })
400
+
401
+ except Exception as e:
402
+ return jsonify({
403
+ 'success': False,
404
+ 'error': f'Connection failed: {str(e)}'
405
+ })
406
+
407
+ @app.route('/api/simulator/run', methods=['POST'])
408
+ def run_simulator_with_params():
409
+ """Run simulator with user-provided parameters in a new terminal"""
410
+ try:
411
+ import subprocess
412
+ import threading
413
+ import json
414
+ import os
415
+ import tempfile
416
+ import sys
417
+ import time
418
+
419
+ # Get form data
420
+ json_file = request.files.get('jsonFile')
421
+ username = request.form.get('username')
422
+ password = request.form.get('password')
423
+ start_position = int(request.form.get('startPosition', 0))
424
+ concurrent_count = int(request.form.get('concurrentCount', 3))
425
+ random_shuffle = request.form.get('randomShuffle') == 'true'
426
+ use_multi_sim = request.form.get('useMultiSim') == 'true'
427
+ alpha_count_per_slot = int(request.form.get('alphaCountPerSlot', 3))
428
+
429
+ if not json_file or not username or not password:
430
+ return jsonify({'error': 'Missing required parameters'}), 400
431
+
432
+ # Validate and read JSON file
433
+ try:
434
+ json_content = json_file.read().decode('utf-8')
435
+ expressions_data = json.loads(json_content)
436
+ if not isinstance(expressions_data, list):
437
+ return jsonify({'error': 'JSON file must contain an array of expressions'}), 400
438
+ except Exception as e:
439
+ return jsonify({'error': f'Invalid JSON file: {str(e)}'}), 400
440
+
441
+ # Get paths
442
+ script_dir = os.path.dirname(os.path.abspath(__file__))
443
+ simulator_dir = os.path.join(script_dir, 'simulator')
444
+
445
+ # Create temporary files for the automated run
446
+ temp_json_path = os.path.join(simulator_dir, f'temp_expressions_{int(time.time())}.json')
447
+ temp_script_path = os.path.join(simulator_dir, f'temp_automated_{int(time.time())}.py')
448
+ temp_batch_path = os.path.join(simulator_dir, f'temp_run_{int(time.time())}.bat')
449
+
450
+ try:
451
+ # Save the JSON data to temporary file
452
+ with open(temp_json_path, 'w', encoding='utf-8') as f:
453
+ json.dump(expressions_data, f, ensure_ascii=False, indent=2)
454
+
455
+ # Create the automated script that calls automated_main
456
+ script_content = f'''
457
+ import asyncio
458
+ import sys
459
+ import os
460
+ import json
461
+
462
+ # Add current directory to path
463
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
464
+
465
+ import simulator_wqb
466
+
467
+ async def run_automated():
468
+ """Run the automated simulator with parameters from web interface"""
469
+ try:
470
+ # Load JSON data
471
+ with open(r"{temp_json_path}", 'r', encoding='utf-8') as f:
472
+ json_content = f.read()
473
+
474
+ # Call automated_main with parameters
475
+ result = await simulator_wqb.automated_main(
476
+ json_file_content=json_content,
477
+ username="{username}",
478
+ password="{password}",
479
+ start_position={start_position},
480
+ concurrent_count={concurrent_count},
481
+ random_shuffle={random_shuffle},
482
+ use_multi_sim={use_multi_sim},
483
+ alpha_count_per_slot={alpha_count_per_slot}
484
+ )
485
+
486
+ if result['success']:
487
+ print("\\n" + "="*60)
488
+ print("🎉 WEB INTERFACE AUTOMATION Finished, Go to the webpage to check your result 🎉")
489
+ print("="*60)
490
+ print(f"Total simulations: {{result['results']['total']}}")
491
+ print("="*60)
492
+ else:
493
+ print("\\n" + "="*60)
494
+ print("❌ WEB INTERFACE AUTOMATION FAILED")
495
+ print("="*60)
496
+ print(f"Error: {{result['error']}}")
497
+ print("="*60)
498
+
499
+ except Exception as e:
500
+ print(f"\\n❌ Script execution error: {{e}}")
501
+
502
+ finally:
503
+ # Clean up temporary files
504
+ try:
505
+ if os.path.exists(r"{temp_json_path}"):
506
+ os.remove(r"{temp_json_path}")
507
+ if os.path.exists(r"{temp_script_path}"):
508
+ os.remove(r"{temp_script_path}")
509
+ if os.path.exists(r"{temp_batch_path}"):
510
+ os.remove(r"{temp_batch_path}")
511
+ except:
512
+ pass
513
+
514
+ print("\\n🔄 Press any key to close this window...")
515
+ input()
516
+
517
+ if __name__ == '__main__':
518
+ asyncio.run(run_automated())
519
+ '''
520
+
521
+ # Save the script
522
+ with open(temp_script_path, 'w', encoding='utf-8') as f:
523
+ f.write(script_content)
524
+
525
+ # Create batch file for Windows
526
+ batch_content = f'''@echo off
527
+ cd /d "{simulator_dir}"
528
+ "{sys.executable}" "{os.path.basename(temp_script_path)}"
529
+ '''
530
+ with open(temp_batch_path, 'w', encoding='utf-8') as f:
531
+ f.write(batch_content)
532
+
533
+ # Launch in new terminal
534
+ def launch_simulator():
535
+ try:
536
+ if os.name == 'nt': # Windows
537
+ # Use cmd /c to execute batch file properly
538
+ subprocess.Popen(
539
+ f'cmd.exe /c "{temp_batch_path}"',
540
+ creationflags=subprocess.CREATE_NEW_CONSOLE
541
+ )
542
+ else: # Unix-like systems
543
+ # Try different terminal emulators
544
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
545
+ for terminal in terminals:
546
+ try:
547
+ if terminal == 'gnome-terminal':
548
+ subprocess.Popen([
549
+ terminal, '--working-directory', simulator_dir,
550
+ '--', sys.executable, os.path.basename(temp_script_path)
551
+ ])
552
+ else:
553
+ # Use bash -c to handle shell commands like &&
554
+ command = f'cd "{simulator_dir}" && "{sys.executable}" "{os.path.basename(temp_script_path)}"'
555
+ subprocess.Popen([
556
+ terminal, '-e',
557
+ 'bash', '-c', command
558
+ ])
559
+ break
560
+ except FileNotFoundError:
561
+ continue
562
+ else:
563
+ # Fallback: run in background if no terminal found
564
+ subprocess.Popen([
565
+ sys.executable, temp_script_path
566
+ ], cwd=simulator_dir)
567
+ except Exception as e:
568
+ print(f"Error launching simulator: {e}")
569
+
570
+ # Start the simulator in a separate thread
571
+ thread = threading.Thread(target=launch_simulator)
572
+ thread.daemon = True
573
+ thread.start()
574
+
575
+ return jsonify({
576
+ 'success': True,
577
+ 'message': 'Simulator launched in new terminal window',
578
+ 'parameters': {
579
+ 'expressions_count': len(expressions_data),
580
+ 'concurrent_count': concurrent_count,
581
+ 'use_multi_sim': use_multi_sim,
582
+ 'alpha_count_per_slot': alpha_count_per_slot if use_multi_sim else None
583
+ }
584
+ })
585
+
586
+ except Exception as e:
587
+ # Clean up on error
588
+ try:
589
+ if os.path.exists(temp_json_path):
590
+ os.remove(temp_json_path)
591
+ if os.path.exists(temp_script_path):
592
+ os.remove(temp_script_path)
593
+ if os.path.exists(temp_batch_path):
594
+ os.remove(temp_batch_path)
595
+ except:
596
+ pass
597
+ raise e
598
+
599
+ except Exception as e:
600
+ return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
601
+
602
+ @app.route('/api/simulator/stop', methods=['POST'])
603
+ def stop_simulator():
604
+ """Stop running simulator"""
605
+ try:
606
+ # This is a placeholder - in a production environment, you'd want to
607
+ # implement proper process management to stop running simulations
608
+ return jsonify({
609
+ 'success': True,
610
+ 'message': 'Stop signal sent'
611
+ })
612
+ except Exception as e:
613
+ return jsonify({'error': f'Failed to stop simulator: {str(e)}'}), 500
614
+
615
+ @app.route('/api/authenticate', methods=['POST'])
616
+ def authenticate():
617
+ """Authenticate with BRAIN API"""
618
+ try:
619
+ data = request.get_json()
620
+ username = data.get('username')
621
+ password = data.get('password')
622
+
623
+ if not username or not password:
624
+ return jsonify({'error': 'Username and password required'}), 400
625
+
626
+ # Authenticate with BRAIN
627
+ result = sign_in_to_brain(username, password)
628
+
629
+ # Check if biometric authentication is required
630
+ if isinstance(result, dict) and result.get('requires_biometric'):
631
+ # Store the session temporarily with biometric pending status
632
+ session_id = f"{username}_{int(time.time())}_biometric_pending"
633
+ brain_sessions[session_id] = {
634
+ 'session': result['session'],
635
+ 'username': username,
636
+ 'timestamp': time.time(),
637
+ 'biometric_pending': True,
638
+ 'biometric_location': result['location']
639
+ }
640
+
641
+ # Store session ID in Flask session
642
+ flask_session['brain_session_id'] = session_id
643
+
644
+ return jsonify({
645
+ 'success': False,
646
+ 'requires_biometric': True,
647
+ 'biometric_url': result['biometric_url'],
648
+ 'session_id': session_id,
649
+ 'message': 'Please complete biometric authentication by visiting the provided URL'
650
+ })
651
+
652
+ # Regular successful authentication
653
+ brain_session = result
654
+
655
+ # Fetch simulation options
656
+ valid_options = get_valid_simulation_options(brain_session)
657
+
658
+ # Store session
659
+ session_id = f"{username}_{int(time.time())}"
660
+ brain_sessions[session_id] = {
661
+ 'session': brain_session,
662
+ 'username': username,
663
+ 'timestamp': time.time(),
664
+ 'options': valid_options
665
+ }
666
+
667
+ # Store session ID in Flask session
668
+ flask_session['brain_session_id'] = session_id
669
+
670
+ return jsonify({
671
+ 'success': True,
672
+ 'session_id': session_id,
673
+ 'message': 'Authentication successful',
674
+ 'options': valid_options
675
+ })
676
+
677
+ except requests.HTTPError as e:
678
+ resp = getattr(e, 'response', None)
679
+ status_code = getattr(resp, 'status_code', None)
680
+
681
+ # Common: wrong username/password
682
+ if status_code == 401 or 'Invalid username or password' in str(e):
683
+ return jsonify({
684
+ 'error': '用户名或密码错误',
685
+ 'hint': '请检查账号密码是否正确;如果你的账号需要生物验证(persona),请按弹出的生物验证流程完成后再点“Complete Authentication”。'
686
+ }), 401
687
+
688
+ # Upstream/network/server issues
689
+ return jsonify({
690
+ 'error': 'Authentication failed',
691
+ 'detail': str(e)
692
+ }), 502
693
+ except Exception as e:
694
+ return jsonify({'error': f'Authentication error: {str(e)}'}), 500
695
+
696
+ @app.route('/api/complete-biometric', methods=['POST'])
697
+ def complete_biometric():
698
+ """Complete biometric authentication after user has done it in browser"""
699
+ try:
700
+ from urllib.parse import urljoin
701
+
702
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
703
+ if not session_id or session_id not in brain_sessions:
704
+ return jsonify({'error': 'Invalid or expired session'}), 401
705
+
706
+ session_info = brain_sessions[session_id]
707
+
708
+ # Check if this session is waiting for biometric completion
709
+ if not session_info.get('biometric_pending'):
710
+ return jsonify({'error': 'Session is not pending biometric authentication'}), 400
711
+
712
+ brain_session = session_info['session']
713
+ location = session_info['biometric_location']
714
+
715
+ # Complete the biometric authentication following the reference pattern
716
+ try:
717
+ # Construct the full URL for biometric authentication
718
+ auth_url = urljoin(f'{BRAIN_API_BASE}/authentication', location)
719
+
720
+ # Keep trying until biometric auth succeeds (like in reference code)
721
+ max_attempts = 5
722
+ attempt = 0
723
+
724
+ while attempt < max_attempts:
725
+ bio_response = brain_session.post(auth_url)
726
+ if bio_response.status_code == 201:
727
+ # Biometric authentication successful
728
+ break
729
+ elif bio_response.status_code == 401:
730
+ # Biometric authentication not complete yet
731
+ attempt += 1
732
+ if attempt >= max_attempts:
733
+ return jsonify({
734
+ 'success': False,
735
+ 'error': 'Biometric authentication not completed. Please try again.'
736
+ })
737
+ time.sleep(2) # Wait a bit before retrying
738
+ else:
739
+ # Other error
740
+ bio_response.raise_for_status()
741
+
742
+ # Update session info - remove biometric pending status
743
+ session_info['biometric_pending'] = False
744
+ del session_info['biometric_location']
745
+
746
+ # Create a new session ID without the biometric_pending suffix
747
+ new_session_id = f"{session_info['username']}_{int(time.time())}"
748
+ brain_sessions[new_session_id] = {
749
+ 'session': brain_session,
750
+ 'username': session_info['username'],
751
+ 'timestamp': time.time()
752
+ }
753
+
754
+ # Remove old session
755
+ del brain_sessions[session_id]
756
+
757
+ # Update Flask session
758
+ flask_session['brain_session_id'] = new_session_id
759
+
760
+ return jsonify({
761
+ 'success': True,
762
+ 'session_id': new_session_id,
763
+ 'message': 'Biometric authentication completed successfully'
764
+ })
765
+
766
+ except requests.HTTPError as e:
767
+ return jsonify({
768
+ 'success': False,
769
+ 'error': f'Failed to complete biometric authentication: {str(e)}'
770
+ })
771
+
772
+ except Exception as e:
773
+ return jsonify({
774
+ 'success': False,
775
+ 'error': f'Error completing biometric authentication: {str(e)}'
776
+ })
777
+
778
+ @app.route('/api/operators', methods=['GET'])
779
+ def get_operators():
780
+ """Get user operators from BRAIN API"""
781
+ try:
782
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
783
+ if not session_id or session_id not in brain_sessions:
784
+ return jsonify({'error': 'Invalid or expired session'}), 401
785
+
786
+ session_info = brain_sessions[session_id]
787
+ brain_session = session_info['session']
788
+
789
+ # First try without pagination parameters (most APIs return all operators at once)
790
+ try:
791
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators')
792
+ response.raise_for_status()
793
+
794
+ data = response.json()
795
+
796
+ # If it's a list, we got all operators
797
+ if isinstance(data, list):
798
+ all_operators = data
799
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (direct)")
800
+ # If it's a dict with results, handle pagination
801
+ elif isinstance(data, dict) and 'results' in data:
802
+ all_operators = []
803
+ total_count = data.get('count', len(data['results']))
804
+ print(f"Found {total_count} total operators, fetching all...")
805
+
806
+ # Get first batch
807
+ all_operators.extend(data['results'])
808
+
809
+ # Get remaining batches if needed
810
+ limit = 100
811
+ offset = len(data['results'])
812
+
813
+ while len(all_operators) < total_count:
814
+ params = {'limit': limit, 'offset': offset}
815
+ batch_response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
816
+ batch_response.raise_for_status()
817
+ batch_data = batch_response.json()
818
+
819
+ if isinstance(batch_data, dict) and 'results' in batch_data:
820
+ batch_operators = batch_data['results']
821
+ if not batch_operators: # No more data
822
+ break
823
+ all_operators.extend(batch_operators)
824
+ offset += len(batch_operators)
825
+ else:
826
+ break
827
+
828
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (paginated)")
829
+ else:
830
+ # Unknown format, treat as empty
831
+ all_operators = []
832
+ print("Unknown response format for operators API")
833
+
834
+ except Exception as e:
835
+ print(f"Error fetching operators: {str(e)}")
836
+ # Fallback: try with explicit pagination
837
+ all_operators = []
838
+ limit = 100
839
+ offset = 0
840
+
841
+ while True:
842
+ params = {'limit': limit, 'offset': offset}
843
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
844
+ response.raise_for_status()
845
+
846
+ data = response.json()
847
+ if isinstance(data, list):
848
+ all_operators.extend(data)
849
+ if len(data) < limit:
850
+ break
851
+ elif isinstance(data, dict) and 'results' in data:
852
+ batch_operators = data['results']
853
+ all_operators.extend(batch_operators)
854
+ if len(batch_operators) < limit:
855
+ break
856
+ else:
857
+ break
858
+
859
+ offset += limit
860
+
861
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (fallback)")
862
+
863
+ # Extract name, category, description, definition and other fields (if available)
864
+ filtered_operators = []
865
+ for op in all_operators:
866
+ operator_data = {
867
+ 'name': op['name'],
868
+ 'category': op['category']
869
+ }
870
+ # Include description if available
871
+ if 'description' in op and op['description']:
872
+ operator_data['description'] = op['description']
873
+ # Include definition if available
874
+ if 'definition' in op and op['definition']:
875
+ operator_data['definition'] = op['definition']
876
+ # Include usage count if available
877
+ if 'usageCount' in op:
878
+ operator_data['usageCount'] = op['usageCount']
879
+ # Include other useful fields if available
880
+ if 'example' in op and op['example']:
881
+ operator_data['example'] = op['example']
882
+ filtered_operators.append(operator_data)
883
+
884
+ return jsonify(filtered_operators)
885
+
886
+ except Exception as e:
887
+ print(f"Error fetching operators: {str(e)}")
888
+ return jsonify({'error': f'Failed to fetch operators: {str(e)}'}), 500
889
+
890
+ @app.route('/api/simulation-options', methods=['GET'])
891
+ def get_simulation_options():
892
+ """Get valid simulation options from BRAIN"""
893
+ try:
894
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
895
+ if not session_id or session_id not in brain_sessions:
896
+ return jsonify({'error': 'Invalid or expired session'}), 401
897
+
898
+ session_info = brain_sessions[session_id]
899
+
900
+ # Return cached options if available
901
+ if 'options' in session_info and session_info['options']:
902
+ return jsonify(session_info['options'])
903
+
904
+ # Otherwise fetch them
905
+ brain_session = session_info['session']
906
+ valid_options = get_valid_simulation_options(brain_session)
907
+
908
+ # Cache them
909
+ session_info['options'] = valid_options
910
+
911
+ return jsonify(valid_options)
912
+
913
+ except Exception as e:
914
+ print(f"Error fetching simulation options: {str(e)}")
915
+ return jsonify({'error': f'Failed to fetch simulation options: {str(e)}'}), 500
916
+
917
+ @app.route('/api/datasets', methods=['GET'])
918
+ def get_datasets():
919
+ """Get datasets from BRAIN API"""
920
+ try:
921
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
922
+ if not session_id or session_id not in brain_sessions:
923
+ return jsonify({'error': 'Invalid or expired session'}), 401
924
+
925
+ session_info = brain_sessions[session_id]
926
+ brain_session = session_info['session']
927
+
928
+ # Get parameters
929
+ region = request.args.get('region', 'USA')
930
+ delay = request.args.get('delay', '1')
931
+ universe = request.args.get('universe', 'TOP3000')
932
+ instrument_type = request.args.get('instrument_type', 'EQUITY')
933
+
934
+ # Fetch datasets (theme=false)
935
+ url_false = f"{BRAIN_API_BASE}/data-sets?instrumentType={instrument_type}&region={region}&delay={delay}&universe={universe}&theme=false"
936
+ response_false = brain_session.get(url_false)
937
+ response_false.raise_for_status()
938
+ datasets_false = response_false.json().get('results', [])
939
+
940
+ # Fetch datasets (theme=true)
941
+ url_true = f"{BRAIN_API_BASE}/data-sets?instrumentType={instrument_type}&region={region}&delay={delay}&universe={universe}&theme=true"
942
+ response_true = brain_session.get(url_true)
943
+ response_true.raise_for_status()
944
+ datasets_true = response_true.json().get('results', [])
945
+
946
+ # Combine results
947
+ all_datasets = datasets_false + datasets_true
948
+
949
+ return jsonify({'results': all_datasets, 'count': len(all_datasets)})
950
+
951
+ except Exception as e:
952
+ print(f"Error fetching datasets: {str(e)}")
953
+ return jsonify({'error': f'Failed to fetch datasets: {str(e)}'}), 500
954
+
955
+ @app.route('/api/datafields', methods=['GET'])
956
+ def get_datafields():
957
+ """Get data fields from BRAIN API"""
958
+ try:
959
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
960
+ if not session_id or session_id not in brain_sessions:
961
+ return jsonify({'error': 'Invalid or expired session'}), 401
962
+
963
+ session_info = brain_sessions[session_id]
964
+ brain_session = session_info['session']
965
+
966
+ # Get parameters
967
+ region = request.args.get('region', 'USA')
968
+ delay = request.args.get('delay', '1')
969
+ universe = request.args.get('universe', 'TOP3000')
970
+ dataset_id = request.args.get('dataset_id', 'fundamental6')
971
+ search = ''
972
+
973
+ # Build URL template based on notebook implementation
974
+ if len(search) == 0:
975
+ url_template = f"{BRAIN_API_BASE}/data-fields?" + \
976
+ f"&instrumentType=EQUITY" + \
977
+ f"&region={region}&delay={delay}&universe={universe}&dataset.id={dataset_id}&limit=50" + \
978
+ "&offset={x}"
979
+ # Get count from first request
980
+ first_response = brain_session.get(url_template.format(x=0))
981
+ first_response.raise_for_status()
982
+ count = first_response.json()['count']
983
+ else:
984
+ url_template = f"{BRAIN_API_BASE}/data-fields?" + \
985
+ f"&instrumentType=EQUITY" + \
986
+ f"&region={region}&delay={delay}&universe={universe}&limit=50" + \
987
+ f"&search={search}" + \
988
+ "&offset={x}"
989
+ count = 100 # Default for search queries
990
+
991
+ # Fetch all data fields in batches
992
+ datafields_list = []
993
+ for x in range(0, count, 50):
994
+ response = brain_session.get(url_template.format(x=x))
995
+ while response.status_code == 429:
996
+ print("status_code 429, sleep 3 seconds")
997
+ time.sleep(3)
998
+ response = brain_session.get(url_template.format(x=x))
999
+ response.raise_for_status()
1000
+ datafields_list.append(response.json()['results'])
1001
+
1002
+ # Flatten the list
1003
+ datafields_list_flat = [item for sublist in datafields_list for item in sublist]
1004
+
1005
+ # Filter fields to only include necessary information
1006
+ filtered_fields = [
1007
+ {
1008
+ 'id': field['id'],
1009
+ 'description': field['description'],
1010
+ 'type': field['type'],
1011
+ 'coverage': field.get('coverage', 0),
1012
+ 'userCount': field.get('userCount', 0),
1013
+ 'alphaCount': field.get('alphaCount', 0)
1014
+ }
1015
+ for field in datafields_list_flat
1016
+ ]
1017
+
1018
+ return jsonify(filtered_fields)
1019
+
1020
+ except Exception as e:
1021
+ return jsonify({'error': f'Failed to fetch data fields: {str(e)}'}), 500
1022
+
1023
+ @app.route('/api/dataset-description', methods=['GET'])
1024
+ def get_dataset_description():
1025
+ """Get dataset description from BRAIN API"""
1026
+ try:
1027
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1028
+ if not session_id or session_id not in brain_sessions:
1029
+ return jsonify({'error': 'Invalid or expired session'}), 401
1030
+
1031
+ session_info = brain_sessions[session_id]
1032
+ brain_session = session_info['session']
1033
+
1034
+ # Get parameters
1035
+ region = request.args.get('region', 'USA')
1036
+ delay = request.args.get('delay', '1')
1037
+ universe = request.args.get('universe', 'TOP3000')
1038
+ dataset_id = request.args.get('dataset_id', 'analyst10')
1039
+
1040
+ # Build URL for dataset description
1041
+ url = f"{BRAIN_API_BASE}/data-sets/{dataset_id}?" + \
1042
+ f"instrumentType=EQUITY&region={region}&delay={delay}&universe={universe}"
1043
+
1044
+ print(f"Getting dataset description from: {url}")
1045
+
1046
+ # Make request to BRAIN API
1047
+ response = brain_session.get(url)
1048
+ response.raise_for_status()
1049
+
1050
+ data = response.json()
1051
+ description = data.get('description', 'No description available')
1052
+
1053
+ print(f"Dataset description retrieved: {description[:100]}...")
1054
+
1055
+ return jsonify({
1056
+ 'success': True,
1057
+ 'description': description,
1058
+ 'dataset_id': dataset_id
1059
+ })
1060
+
1061
+ except Exception as e:
1062
+ print(f"Dataset description error: {str(e)}")
1063
+ return jsonify({'error': f'Failed to get dataset description: {str(e)}'}), 500
1064
+
1065
+ @app.route('/api/status', methods=['GET'])
1066
+ def check_status():
1067
+ """Check if session is still valid"""
1068
+ try:
1069
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1070
+ if not session_id or session_id not in brain_sessions:
1071
+ return jsonify({'valid': False})
1072
+
1073
+ session_info = brain_sessions[session_id]
1074
+ # Check if session is not too old (24 hours)
1075
+ if time.time() - session_info['timestamp'] > 86400:
1076
+ del brain_sessions[session_id]
1077
+ return jsonify({'valid': False})
1078
+
1079
+ # Check if biometric authentication is pending
1080
+ if session_info.get('biometric_pending'):
1081
+ return jsonify({
1082
+ 'valid': False,
1083
+ 'biometric_pending': True,
1084
+ 'username': session_info['username'],
1085
+ 'message': 'Biometric authentication pending'
1086
+ })
1087
+
1088
+ return jsonify({
1089
+ 'valid': True,
1090
+ 'username': session_info['username']
1091
+ })
1092
+
1093
+ except Exception as e:
1094
+ return jsonify({'error': f'Status check failed: {str(e)}'}), 500
1095
+
1096
+ @app.route('/api/logout', methods=['POST'])
1097
+ def logout():
1098
+ """Logout and clean up session"""
1099
+ try:
1100
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1101
+ if session_id and session_id in brain_sessions:
1102
+ del brain_sessions[session_id]
1103
+
1104
+ if 'brain_session_id' in flask_session:
1105
+ flask_session.pop('brain_session_id')
1106
+
1107
+ return jsonify({'success': True, 'message': 'Logged out successfully'})
1108
+
1109
+ except Exception as e:
1110
+ return jsonify({'error': f'Logout failed: {str(e)}'}), 500
1111
+
1112
+ @app.route('/api/test-expression', methods=['POST'])
1113
+ def test_expression():
1114
+ """Test an expression using BRAIN API simulation"""
1115
+ try:
1116
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1117
+ if not session_id or session_id not in brain_sessions:
1118
+ return jsonify({'error': 'Invalid or expired session'}), 401
1119
+
1120
+ session_info = brain_sessions[session_id]
1121
+ brain_session = session_info['session']
1122
+
1123
+ # Get the simulation data from request
1124
+ simulation_data = request.get_json()
1125
+
1126
+ # Ensure required fields are present
1127
+ if 'type' not in simulation_data:
1128
+ simulation_data['type'] = 'REGULAR'
1129
+
1130
+ # Ensure settings have required fields
1131
+ if 'settings' not in simulation_data:
1132
+ simulation_data['settings'] = {}
1133
+
1134
+ # Set default values for missing settings
1135
+ default_settings = {
1136
+ 'instrumentType': 'EQUITY',
1137
+ 'region': 'USA',
1138
+ 'universe': 'TOP3000',
1139
+ 'delay': 1,
1140
+ 'decay': 15,
1141
+ 'neutralization': 'SUBINDUSTRY',
1142
+ 'truncation': 0.08,
1143
+ 'pasteurization': 'ON',
1144
+ 'testPeriod': 'P1Y6M',
1145
+ 'unitHandling': 'VERIFY',
1146
+ 'nanHandling': 'OFF',
1147
+ 'language': 'FASTEXPR',
1148
+ 'visualization': False
1149
+ }
1150
+
1151
+ for key, value in default_settings.items():
1152
+ if key not in simulation_data['settings']:
1153
+ simulation_data['settings'][key] = value
1154
+
1155
+ # Convert string boolean values to actual boolean
1156
+ if isinstance(simulation_data['settings'].get('visualization'), str):
1157
+ viz_value = simulation_data['settings']['visualization'].lower()
1158
+ simulation_data['settings']['visualization'] = viz_value == 'true'
1159
+
1160
+ # Validate settings against cached options
1161
+ valid_options = session_info.get('options')
1162
+ if valid_options:
1163
+ settings = simulation_data['settings']
1164
+ inst_type = settings.get('instrumentType', 'EQUITY')
1165
+ region = settings.get('region')
1166
+ neut = settings.get('neutralization')
1167
+
1168
+ # Check if this specific neutralization is allowed for this region
1169
+ allowed_neuts = valid_options.get(inst_type, {}).get(region, {}).get('neutralizations', [])
1170
+
1171
+ if neut and allowed_neuts and neut not in allowed_neuts:
1172
+ print(f"Warning: {neut} is invalid for {region}. Auto-correcting.")
1173
+ # Auto-correct to the first valid one if available
1174
+ if allowed_neuts:
1175
+ print(f"Auto-correcting neutralization to {allowed_neuts[0]}")
1176
+ settings['neutralization'] = allowed_neuts[0]
1177
+ else:
1178
+ del settings['neutralization']
1179
+
1180
+ # Send simulation request (following notebook pattern)
1181
+ try:
1182
+ message = {}
1183
+ simulation_response = brain_session.post(f'{BRAIN_API_BASE}/simulations', json=simulation_data)
1184
+
1185
+ # Check if we got a Location header (following notebook pattern)
1186
+ if 'Location' in simulation_response.headers:
1187
+ # Follow the location to get the actual status
1188
+ message = brain_session.get(simulation_response.headers['Location']).json()
1189
+
1190
+ # Check if simulation is running or completed
1191
+ if 'progress' in message.keys():
1192
+ info_to_print = "Simulation is running"
1193
+ return jsonify({
1194
+ 'success': True,
1195
+ 'status': 'RUNNING',
1196
+ 'message': info_to_print,
1197
+ 'full_response': message
1198
+ })
1199
+ else:
1200
+ # Return the full message as in notebook
1201
+ return jsonify({
1202
+ 'success': message.get('status') != 'ERROR',
1203
+ 'status': message.get('status', 'UNKNOWN'),
1204
+ 'message': str(message),
1205
+ 'full_response': message
1206
+ })
1207
+ else:
1208
+ # Try to get error from response body (following notebook pattern)
1209
+ try:
1210
+ message = simulation_response.json()
1211
+ return jsonify({
1212
+ 'success': False,
1213
+ 'status': 'ERROR',
1214
+ 'message': str(message),
1215
+ 'full_response': message
1216
+ })
1217
+ except:
1218
+ return jsonify({
1219
+ 'success': False,
1220
+ 'status': 'ERROR',
1221
+ 'message': 'web Connection Error',
1222
+ 'full_response': {}
1223
+ })
1224
+
1225
+ except Exception as e:
1226
+ return jsonify({
1227
+ 'success': False,
1228
+ 'status': 'ERROR',
1229
+ 'message': 'web Connection Error',
1230
+ 'full_response': {'error': str(e)}
1231
+ })
1232
+
1233
+ except Exception as e:
1234
+ import traceback
1235
+ return jsonify({
1236
+ 'success': False,
1237
+ 'status': 'ERROR',
1238
+ 'message': f'Test expression failed: {str(e)}',
1239
+ 'full_response': {'error': str(e), 'traceback': traceback.format_exc()}
1240
+ }), 500
1241
+
1242
+ @app.route('/api/test-operators', methods=['GET'])
1243
+ def test_operators():
1244
+ """Test endpoint to check raw BRAIN operators API response"""
1245
+ try:
1246
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1247
+ if not session_id or session_id not in brain_sessions:
1248
+ return jsonify({'error': 'Invalid or expired session'}), 401
1249
+
1250
+ session_info = brain_sessions[session_id]
1251
+ brain_session = session_info['session']
1252
+
1253
+ # Get raw response from BRAIN API
1254
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators')
1255
+ response.raise_for_status()
1256
+
1257
+ data = response.json()
1258
+
1259
+ # Return raw response info for debugging
1260
+ result = {
1261
+ 'type': str(type(data)),
1262
+ 'is_list': isinstance(data, list),
1263
+ 'is_dict': isinstance(data, dict),
1264
+ 'length': len(data) if isinstance(data, list) else None,
1265
+ 'keys': list(data.keys()) if isinstance(data, dict) else None,
1266
+ 'count_key': data.get('count') if isinstance(data, dict) else None,
1267
+ 'first_few_items': data[:3] if isinstance(data, list) else (data.get('results', [])[:3] if isinstance(data, dict) else None)
1268
+ }
1269
+
1270
+ return jsonify(result)
1271
+
1272
+ except Exception as e:
1273
+ return jsonify({'error': f'Test failed: {str(e)}'}), 500
1274
+
1275
+ # Import blueprints
1276
+ try:
1277
+ from blueprints import idea_house_bp, paper_analysis_bp, feature_engineering_bp, inspiration_house_bp
1278
+ print("📦 Blueprints imported successfully!")
1279
+ except ImportError as e:
1280
+ print(f"❌ Failed to import blueprints: {e}")
1281
+ print("Some features may not be available.")
1282
+
1283
+ # Register blueprints
1284
+ app.register_blueprint(idea_house_bp, url_prefix='/idea-house')
1285
+ app.register_blueprint(paper_analysis_bp, url_prefix='/paper-analysis')
1286
+ app.register_blueprint(feature_engineering_bp, url_prefix='/feature-engineering')
1287
+ app.register_blueprint(inspiration_house_bp, url_prefix='/inspiration-house')
1288
+
1289
+ print("🔧 All blueprints registered successfully!")
1290
+ print(" - Idea House: /idea-house")
1291
+ print(" - Paper Analysis: /paper-analysis")
1292
+ print(" - Feature Engineering: /feature-engineering")
1293
+ print(" - Inspiration House: /inspiration-house")
1294
+
1295
+ # Template Management Routes
1296
+ # Get the directory where this script is located for templates
1297
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1298
+ TEMPLATES_DIR = os.path.join(script_dir, 'custom_templates')
1299
+
1300
+ # Ensure templates directory exists
1301
+ if not os.path.exists(TEMPLATES_DIR):
1302
+ os.makedirs(TEMPLATES_DIR)
1303
+ print(f"📁 Created templates directory: {TEMPLATES_DIR}")
1304
+ else:
1305
+ print(f"📁 Templates directory ready: {TEMPLATES_DIR}")
1306
+
1307
+ print("✅ BRAIN Expression Template Decoder fully initialized!")
1308
+ print("🎯 Ready to process templates and integrate with BRAIN API!")
1309
+
1310
+ @app.route('/api/templates', methods=['GET'])
1311
+ def get_templates():
1312
+ """Get all custom templates"""
1313
+ try:
1314
+ templates = []
1315
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1316
+
1317
+ if os.path.exists(templates_file):
1318
+ with open(templates_file, 'r', encoding='utf-8') as f:
1319
+ templates = json.load(f)
1320
+
1321
+ return jsonify(templates)
1322
+ except Exception as e:
1323
+ return jsonify({'error': f'Error loading templates: {str(e)}'}), 500
1324
+
1325
+ @app.route('/api/templates', methods=['POST'])
1326
+ def save_template():
1327
+ """Save a new custom template"""
1328
+ try:
1329
+ data = request.get_json()
1330
+ name = data.get('name', '').strip()
1331
+ description = data.get('description', '').strip()
1332
+ expression = data.get('expression', '').strip()
1333
+ template_configurations = data.get('templateConfigurations', {})
1334
+
1335
+ if not name or not expression:
1336
+ return jsonify({'error': 'Name and expression are required'}), 400
1337
+
1338
+ # Load existing templates
1339
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1340
+ templates = []
1341
+
1342
+ if os.path.exists(templates_file):
1343
+ with open(templates_file, 'r', encoding='utf-8') as f:
1344
+ templates = json.load(f)
1345
+
1346
+ # Check for duplicate names
1347
+ existing_index = next((i for i, t in enumerate(templates) if t['name'] == name), None)
1348
+
1349
+ new_template = {
1350
+ 'name': name,
1351
+ 'description': description,
1352
+ 'expression': expression,
1353
+ 'templateConfigurations': template_configurations,
1354
+ 'createdAt': datetime.now().isoformat()
1355
+ }
1356
+
1357
+ if existing_index is not None:
1358
+ # Update existing template but preserve createdAt if it exists
1359
+ if 'createdAt' in templates[existing_index]:
1360
+ new_template['createdAt'] = templates[existing_index]['createdAt']
1361
+ new_template['updatedAt'] = datetime.now().isoformat()
1362
+ templates[existing_index] = new_template
1363
+ message = f'Template "{name}" updated successfully'
1364
+ else:
1365
+ # Add new template
1366
+ templates.append(new_template)
1367
+ message = f'Template "{name}" saved successfully'
1368
+
1369
+ # Save to file
1370
+ with open(templates_file, 'w', encoding='utf-8') as f:
1371
+ json.dump(templates, f, indent=2, ensure_ascii=False)
1372
+
1373
+ return jsonify({'success': True, 'message': message})
1374
+
1375
+ except Exception as e:
1376
+ return jsonify({'error': f'Error saving template: {str(e)}'}), 500
1377
+
1378
+ @app.route('/api/templates/<int:template_id>', methods=['DELETE'])
1379
+ def delete_template(template_id):
1380
+ """Delete a custom template"""
1381
+ try:
1382
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1383
+ templates = []
1384
+
1385
+ if os.path.exists(templates_file):
1386
+ with open(templates_file, 'r', encoding='utf-8') as f:
1387
+ templates = json.load(f)
1388
+
1389
+ if 0 <= template_id < len(templates):
1390
+ deleted_template = templates.pop(template_id)
1391
+
1392
+ # Save updated templates
1393
+ with open(templates_file, 'w', encoding='utf-8') as f:
1394
+ json.dump(templates, f, indent=2, ensure_ascii=False)
1395
+
1396
+ return jsonify({'success': True, 'message': f'Template "{deleted_template["name"]}" deleted successfully'})
1397
+ else:
1398
+ return jsonify({'error': 'Template not found'}), 404
1399
+
1400
+ except Exception as e:
1401
+ return jsonify({'error': f'Error deleting template: {str(e)}'}), 500
1402
+
1403
+ @app.route('/api/templates/export', methods=['GET'])
1404
+ def export_templates():
1405
+ """Export all templates as JSON"""
1406
+ try:
1407
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1408
+ templates = []
1409
+
1410
+ if os.path.exists(templates_file):
1411
+ with open(templates_file, 'r', encoding='utf-8') as f:
1412
+ templates = json.load(f)
1413
+
1414
+ return jsonify(templates)
1415
+
1416
+ except Exception as e:
1417
+ return jsonify({'error': f'Error exporting templates: {str(e)}'}), 500
1418
+
1419
+ @app.route('/api/templates/import', methods=['POST'])
1420
+ def import_templates():
1421
+ """Import templates from JSON"""
1422
+ try:
1423
+ data = request.get_json()
1424
+ imported_templates = data.get('templates', [])
1425
+ overwrite = data.get('overwrite', False)
1426
+
1427
+ if not isinstance(imported_templates, list):
1428
+ return jsonify({'error': 'Invalid template format'}), 400
1429
+
1430
+ # Validate template structure
1431
+ valid_templates = []
1432
+ for template in imported_templates:
1433
+ if (isinstance(template, dict) and
1434
+ 'name' in template and 'expression' in template and
1435
+ template['name'].strip() and template['expression'].strip()):
1436
+ valid_templates.append({
1437
+ 'name': template['name'].strip(),
1438
+ 'description': template.get('description', '').strip(),
1439
+ 'expression': template['expression'].strip(),
1440
+ 'templateConfigurations': template.get('templateConfigurations', {}),
1441
+ 'createdAt': template.get('createdAt', datetime.now().isoformat())
1442
+ })
1443
+
1444
+ if not valid_templates:
1445
+ return jsonify({'error': 'No valid templates found'}), 400
1446
+
1447
+ # Load existing templates
1448
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1449
+ existing_templates = []
1450
+
1451
+ if os.path.exists(templates_file):
1452
+ with open(templates_file, 'r', encoding='utf-8') as f:
1453
+ existing_templates = json.load(f)
1454
+
1455
+ # Handle duplicates
1456
+ duplicates = []
1457
+ new_templates = []
1458
+
1459
+ for template in valid_templates:
1460
+ existing_index = next((i for i, t in enumerate(existing_templates) if t['name'] == template['name']), None)
1461
+
1462
+ if existing_index is not None:
1463
+ duplicates.append(template['name'])
1464
+ if overwrite:
1465
+ existing_templates[existing_index] = template
1466
+ else:
1467
+ new_templates.append(template)
1468
+
1469
+ # Add new templates
1470
+ existing_templates.extend(new_templates)
1471
+
1472
+ # Save to file
1473
+ with open(templates_file, 'w', encoding='utf-8') as f:
1474
+ json.dump(existing_templates, f, indent=2, ensure_ascii=False)
1475
+
1476
+ result = {
1477
+ 'success': True,
1478
+ 'imported': len(new_templates),
1479
+ 'duplicates': duplicates,
1480
+ 'overwritten': len(duplicates) if overwrite else 0
1481
+ }
1482
+
1483
+ return jsonify(result)
1484
+
1485
+ except Exception as e:
1486
+ return jsonify({'error': f'Error importing templates: {str(e)}'}), 500
1487
+
1488
+ @app.route('/api/run-simulator', methods=['POST'])
1489
+ def run_simulator():
1490
+ """Run the simulator_wqb.py script"""
1491
+ try:
1492
+ import subprocess
1493
+ import threading
1494
+ from pathlib import Path
1495
+
1496
+ # Get the script path (now in simulator subfolder)
1497
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1498
+ simulator_dir = os.path.join(script_dir, 'simulator')
1499
+ simulator_path = os.path.join(simulator_dir, 'simulator_wqb.py')
1500
+
1501
+ # Check if the script exists
1502
+ if not os.path.exists(simulator_path):
1503
+ return jsonify({'error': 'simulator_wqb.py not found in simulator folder'}), 404
1504
+
1505
+ # Run the script in a new terminal window
1506
+ def run_script():
1507
+ try:
1508
+ if os.name == 'nt':
1509
+ # Windows: Use cmd
1510
+ subprocess.Popen(['cmd', '/k', 'python', 'simulator_wqb.py'],
1511
+ cwd=simulator_dir,
1512
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1513
+ elif sys.platform == 'darwin':
1514
+ # macOS: Use AppleScript to call Terminal.app
1515
+ script = f'''
1516
+ tell application "Terminal"
1517
+ do script "cd '{simulator_dir}' && python3 simulator_wqb.py"
1518
+ activate
1519
+ end tell
1520
+ '''
1521
+ subprocess.Popen(['osascript', '-e', script])
1522
+ else:
1523
+ # Linux: Try multiple terminal emulators
1524
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1525
+ for terminal in terminals:
1526
+ try:
1527
+ if terminal == 'gnome-terminal':
1528
+ subprocess.Popen([terminal, '--working-directory', simulator_dir,
1529
+ '--', 'python3', 'simulator_wqb.py'])
1530
+ else:
1531
+ subprocess.Popen([terminal, '-e',
1532
+ f'cd "{simulator_dir}" && python3 simulator_wqb.py'])
1533
+ break
1534
+ except FileNotFoundError:
1535
+ continue
1536
+ else:
1537
+ # Fallback: Run in background
1538
+ print("Warning: No terminal emulator found, running in background")
1539
+ subprocess.Popen([sys.executable, 'simulator_wqb.py'], cwd=simulator_dir)
1540
+ except Exception as e:
1541
+ print(f"Error running simulator: {e}")
1542
+
1543
+ # Start the script in a separate thread
1544
+ thread = threading.Thread(target=run_script)
1545
+ thread.daemon = True
1546
+ thread.start()
1547
+
1548
+ return jsonify({
1549
+ 'success': True,
1550
+ 'message': 'Simulator script started in new terminal window'
1551
+ })
1552
+
1553
+ except Exception as e:
1554
+ return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
1555
+
1556
+ @app.route('/api/open-submitter', methods=['POST'])
1557
+ def open_submitter():
1558
+ """Run the alpha_submitter.py script"""
1559
+ try:
1560
+ import subprocess
1561
+ import threading
1562
+ from pathlib import Path
1563
+
1564
+ # Get the script path (now in simulator subfolder)
1565
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1566
+ simulator_dir = os.path.join(script_dir, 'simulator')
1567
+ submitter_path = os.path.join(simulator_dir, 'alpha_submitter.py')
1568
+
1569
+ # Check if the script exists
1570
+ if not os.path.exists(submitter_path):
1571
+ return jsonify({'error': 'alpha_submitter.py not found in simulator folder'}), 404
1572
+
1573
+ # Run the script in a new terminal window
1574
+ def run_script():
1575
+ try:
1576
+ if os.name == 'nt':
1577
+ # Windows: Use cmd
1578
+ subprocess.Popen(['cmd', '/k', 'python', 'alpha_submitter.py'],
1579
+ cwd=simulator_dir,
1580
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1581
+ elif sys.platform == 'darwin':
1582
+ # macOS: Use AppleScript to call Terminal.app
1583
+ script = f'''
1584
+ tell application "Terminal"
1585
+ do script "cd '{simulator_dir}' && python3 alpha_submitter.py"
1586
+ activate
1587
+ end tell
1588
+ '''
1589
+ subprocess.Popen(['osascript', '-e', script])
1590
+ else:
1591
+ # Linux: Try multiple terminal emulators
1592
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1593
+ for terminal in terminals:
1594
+ try:
1595
+ if terminal == 'gnome-terminal':
1596
+ subprocess.Popen([terminal, '--working-directory', simulator_dir,
1597
+ '--', 'python3', 'alpha_submitter.py'])
1598
+ else:
1599
+ subprocess.Popen([terminal, '-e',
1600
+ f'cd "{simulator_dir}" && python3 alpha_submitter.py'])
1601
+ break
1602
+ except FileNotFoundError:
1603
+ continue
1604
+ else:
1605
+ # Fallback: Run in background
1606
+ print("Warning: No terminal emulator found, running in background")
1607
+ subprocess.Popen([sys.executable, 'alpha_submitter.py'], cwd=simulator_dir)
1608
+ except Exception as e:
1609
+ print(f"Error running submitter: {e}")
1610
+
1611
+ # Start the script in a separate thread
1612
+ thread = threading.Thread(target=run_script)
1613
+ thread.daemon = True
1614
+ thread.start()
1615
+
1616
+ return jsonify({
1617
+ 'success': True,
1618
+ 'message': 'Alpha submitter script started in new terminal window'
1619
+ })
1620
+
1621
+ except Exception as e:
1622
+ return jsonify({'error': f'Failed to open submitter: {str(e)}'}), 500
1623
+
1624
+ @app.route('/api/open-hk-simulator', methods=['POST'])
1625
+ def open_hk_simulator():
1626
+ """Run the autosimulator.py script from hkSimulator folder"""
1627
+ try:
1628
+ import subprocess
1629
+ import threading
1630
+ from pathlib import Path
1631
+
1632
+ # Get the script path (hkSimulator subfolder)
1633
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1634
+ hk_simulator_dir = os.path.join(script_dir, 'hkSimulator')
1635
+ autosimulator_path = os.path.join(hk_simulator_dir, 'autosimulator.py')
1636
+
1637
+ # Check if the script exists
1638
+ if not os.path.exists(autosimulator_path):
1639
+ return jsonify({'error': 'autosimulator.py not found in hkSimulator folder'}), 404
1640
+
1641
+ # Run the script in a new terminal window
1642
+ def run_script():
1643
+ try:
1644
+ if os.name == 'nt':
1645
+ # Windows: Use cmd
1646
+ subprocess.Popen(['cmd', '/k', 'python', 'autosimulator.py'],
1647
+ cwd=hk_simulator_dir,
1648
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1649
+ elif sys.platform == 'darwin':
1650
+ # macOS: Use AppleScript to call Terminal.app
1651
+ script = f'''
1652
+ tell application "Terminal"
1653
+ do script "cd '{hk_simulator_dir}' && python3 autosimulator.py"
1654
+ activate
1655
+ end tell
1656
+ '''
1657
+ subprocess.Popen(['osascript', '-e', script])
1658
+ else:
1659
+ # Linux: Try multiple terminal emulators
1660
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1661
+ for terminal in terminals:
1662
+ try:
1663
+ if terminal == 'gnome-terminal':
1664
+ subprocess.Popen([terminal, '--working-directory', hk_simulator_dir,
1665
+ '--', 'python3', 'autosimulator.py'])
1666
+ else:
1667
+ subprocess.Popen([terminal, '-e',
1668
+ f'cd "{hk_simulator_dir}" && python3 autosimulator.py'])
1669
+ break
1670
+ except FileNotFoundError:
1671
+ continue
1672
+ else:
1673
+ # Fallback: Run in background
1674
+ print("Warning: No terminal emulator found, running in background")
1675
+ subprocess.Popen([sys.executable, 'autosimulator.py'], cwd=hk_simulator_dir)
1676
+ except Exception as e:
1677
+ print(f"Error running HK simulator: {e}")
1678
+
1679
+ # Start the script in a separate thread
1680
+ thread = threading.Thread(target=run_script)
1681
+ thread.daemon = True
1682
+ thread.start()
1683
+
1684
+ return jsonify({
1685
+ 'success': True,
1686
+ 'message': 'HK simulator script started in new terminal window'
1687
+ })
1688
+
1689
+ except Exception as e:
1690
+ return jsonify({'error': f'Failed to open HK simulator: {str(e)}'}), 500
1691
+
1692
+ @app.route('/api/open-transformer', methods=['POST'])
1693
+ def open_transformer():
1694
+ """Run the Transformer.py script from the Tranformer folder in a new terminal."""
1695
+ try:
1696
+ import subprocess
1697
+ import threading
1698
+
1699
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1700
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1701
+ transformer_path = os.path.join(transformer_dir, 'Transformer.py')
1702
+
1703
+ if not os.path.exists(transformer_path):
1704
+ return jsonify({'error': 'Transformer.py not found in Tranformer folder'}), 404
1705
+
1706
+ def run_script():
1707
+ try:
1708
+ if os.name == 'nt':
1709
+ subprocess.Popen(['cmd', '/k', 'python', 'Transformer.py'],
1710
+ cwd=transformer_dir,
1711
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1712
+ else:
1713
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
1714
+ for terminal in terminals:
1715
+ try:
1716
+ if terminal == 'gnome-terminal':
1717
+ subprocess.Popen([terminal, '--working-directory', transformer_dir, '--', 'python3', 'Transformer.py'])
1718
+ else:
1719
+ subprocess.Popen([terminal, '-e', f'cd "{transformer_dir}" && python3 "Transformer.py"'])
1720
+ break
1721
+ except FileNotFoundError:
1722
+ continue
1723
+ else:
1724
+ subprocess.Popen([sys.executable, 'Transformer.py'], cwd=transformer_dir)
1725
+ except Exception as e:
1726
+ print(f"Error running Transformer: {e}")
1727
+
1728
+ thread = threading.Thread(target=run_script)
1729
+ thread.daemon = True
1730
+ thread.start()
1731
+
1732
+ return jsonify({'success': True, 'message': 'Transformer script started in new terminal window'})
1733
+
1734
+ except Exception as e:
1735
+ return jsonify({'error': f'Failed to open Transformer: {str(e)}'}), 500
1736
+
1737
+
1738
+ @app.route('/api/usage-doc', methods=['GET'])
1739
+ def get_usage_doc():
1740
+ """Return usage.md as raw markdown text for in-app help display."""
1741
+ try:
1742
+ base_dir = os.path.dirname(os.path.abspath(__file__))
1743
+ usage_path = os.path.join(base_dir, 'usage.md')
1744
+ if not os.path.exists(usage_path):
1745
+ return jsonify({'success': False, 'error': 'usage.md not found'}), 404
1746
+
1747
+ with open(usage_path, 'r', encoding='utf-8') as f:
1748
+ content = f.read()
1749
+
1750
+ return jsonify({'success': True, 'markdown': content})
1751
+ except Exception as e:
1752
+ return jsonify({'success': False, 'error': str(e)}), 500
1753
+
1754
+ # Global task manager for Transformer Web
1755
+ transformer_tasks = {}
1756
+
1757
+ @app.route('/transformer-web')
1758
+ def transformer_web():
1759
+ return render_template('transformer_web.html')
1760
+
1761
+ @app.route('/api/test-llm-connection', methods=['POST'])
1762
+ def test_llm_connection():
1763
+ data = request.json
1764
+ api_key = data.get('apiKey')
1765
+ base_url = data.get('baseUrl')
1766
+ model = data.get('model')
1767
+
1768
+ try:
1769
+ import openai
1770
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
1771
+ # Simple test call
1772
+ response = client.chat.completions.create(
1773
+ model=model,
1774
+ messages=[{"role": "user", "content": "Hello"}],
1775
+ max_tokens=5
1776
+ )
1777
+ return jsonify({'success': True})
1778
+ except Exception as e:
1779
+ return jsonify({'success': False, 'error': str(e)})
1780
+
1781
+ @app.route('/api/get-default-template-summary')
1782
+ def get_default_template_summary():
1783
+ try:
1784
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1785
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1786
+
1787
+ # Read the file directly to avoid import issues/side effects
1788
+ transformer_path = os.path.join(transformer_dir, 'Transformer.py')
1789
+ with open(transformer_path, 'r', encoding='utf-8') as f:
1790
+ content = f.read()
1791
+
1792
+ # Extract template_summary variable using regex
1793
+ import re
1794
+ match = re.search(r'template_summary\s*=\s*"""(.*?)"""', content, re.DOTALL)
1795
+ if match:
1796
+ return jsonify({'success': True, 'summary': match.group(1)})
1797
+ else:
1798
+ return jsonify({'success': False, 'error': 'Could not find template_summary in Transformer.py'})
1799
+
1800
+ except Exception as e:
1801
+ return jsonify({'success': False, 'error': str(e)})
1802
+
1803
+ @app.route('/api/run-transformer-web', methods=['POST'])
1804
+ def run_transformer_web():
1805
+ data = request.json
1806
+ task_id = str(uuid.uuid4())
1807
+
1808
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1809
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1810
+
1811
+ # Handle template summary content
1812
+ template_summary_content = data.get('template_summary_content')
1813
+ template_summary_path = None
1814
+
1815
+ if template_summary_content:
1816
+ template_summary_path = os.path.join(transformer_dir, f'temp_summary_{task_id}.txt')
1817
+ with open(template_summary_path, 'w', encoding='utf-8') as f:
1818
+ f.write(template_summary_content)
1819
+
1820
+ # Create a temporary config file
1821
+ config = {
1822
+ "LLM_model_name": data.get('LLM_model_name'),
1823
+ "LLM_API_KEY": data.get('LLM_API_KEY'),
1824
+ "llm_base_url": data.get('llm_base_url'),
1825
+ "username": data.get('username'),
1826
+ "password": data.get('password'),
1827
+ "template_summary_path": template_summary_path,
1828
+ "alpha_id": data.get('alpha_id'),
1829
+ "top_n_datafield": int(data.get('top_n_datafield', 50)),
1830
+ "user_region": data.get('region'),
1831
+ "user_universe": data.get('universe'),
1832
+ "user_delay": int(data.get('delay')) if data.get('delay') else None,
1833
+ "user_category": data.get('category'),
1834
+ "user_data_type": data.get('data_type', 'MATRIX')
1835
+ }
1836
+
1837
+ config_path = os.path.join(transformer_dir, f'config_{task_id}.json')
1838
+
1839
+ with open(config_path, 'w', encoding='utf-8') as f:
1840
+ json.dump(config, f, indent=4)
1841
+
1842
+ # Start the process
1843
+ transformer_script = os.path.join(transformer_dir, 'Transformer.py')
1844
+
1845
+ # Use a queue to store logs
1846
+ log_queue = queue.Queue()
1847
+
1848
+ def run_process():
1849
+ try:
1850
+ # Force UTF-8 encoding for the subprocess output to avoid UnicodeEncodeError on Windows
1851
+ env = os.environ.copy()
1852
+ env["PYTHONIOENCODING"] = "utf-8"
1853
+
1854
+ process = subprocess.Popen(
1855
+ [sys.executable, '-u', transformer_script, config_path],
1856
+ cwd=transformer_dir,
1857
+ stdout=subprocess.PIPE,
1858
+ stderr=subprocess.STDOUT,
1859
+ text=True,
1860
+ bufsize=1,
1861
+ encoding='utf-8',
1862
+ errors='replace',
1863
+ env=env
1864
+ )
1865
+
1866
+ transformer_tasks[task_id]['process'] = process
1867
+
1868
+ for line in iter(process.stdout.readline, ''):
1869
+ log_queue.put(line)
1870
+
1871
+ process.stdout.close()
1872
+ process.wait()
1873
+ transformer_tasks[task_id]['return_code'] = process.returncode
1874
+ except Exception as e:
1875
+ log_queue.put(f"Error running process: {str(e)}")
1876
+ transformer_tasks[task_id]['return_code'] = 1
1877
+ finally:
1878
+ log_queue.put(None) # Signal end
1879
+ # Clean up config file and temp summary file
1880
+ try:
1881
+ if os.path.exists(config_path):
1882
+ os.remove(config_path)
1883
+ if template_summary_path and os.path.exists(template_summary_path):
1884
+ os.remove(template_summary_path)
1885
+ except:
1886
+ pass
1887
+
1888
+ thread = threading.Thread(target=run_process)
1889
+ thread.start()
1890
+
1891
+ transformer_tasks[task_id] = {
1892
+ 'queue': log_queue,
1893
+ 'status': 'running',
1894
+ 'output_dir': os.path.join(transformer_dir, 'output')
1895
+ }
1896
+
1897
+ return jsonify({'success': True, 'taskId': task_id})
1898
+
1899
+ @app.route('/api/transformer/login-and-fetch-options', methods=['POST'])
1900
+ def transformer_login_and_fetch_options():
1901
+ data = request.json
1902
+ username = data.get('username')
1903
+ password = data.get('password')
1904
+
1905
+ if not username or not password:
1906
+ return jsonify({'success': False, 'error': 'Username and password are required'})
1907
+
1908
+ try:
1909
+ # Add Tranformer to path to import ace_lib
1910
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1911
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1912
+ if transformer_dir not in sys.path:
1913
+ sys.path.append(transformer_dir)
1914
+
1915
+ from ace_lib import SingleSession, get_instrument_type_region_delay
1916
+
1917
+ # Use SingleSession for consistency with ace_lib
1918
+ session = SingleSession()
1919
+ # Force re-authentication
1920
+ session.auth = (username, password)
1921
+
1922
+ brain_api_url = "https://api.worldquantbrain.com"
1923
+ response = session.post(brain_api_url + "/authentication")
1924
+
1925
+ if response.status_code == 201:
1926
+ # Auth success
1927
+ pass
1928
+ elif response.status_code == 401:
1929
+ return jsonify({'success': False, 'error': 'Authentication failed: Invalid credentials'})
1930
+ else:
1931
+ return jsonify({'success': False, 'error': f'Authentication failed: {response.status_code} {response.text}'})
1932
+
1933
+ # Now fetch options
1934
+ df = get_instrument_type_region_delay(session)
1935
+
1936
+ # Fetch categories
1937
+ brain_api_url = "https://api.worldquantbrain.com"
1938
+ categories_resp = session.get(brain_api_url + "/data-categories")
1939
+ categories = []
1940
+ if categories_resp.status_code == 200:
1941
+ categories_data = categories_resp.json()
1942
+ if isinstance(categories_data, list):
1943
+ categories = categories_data
1944
+ elif isinstance(categories_data, dict):
1945
+ categories = categories_data.get('results', [])
1946
+
1947
+ # Convert DataFrame to a nested dictionary structure for the frontend
1948
+ # Structure: Region -> Delay -> Universe
1949
+ # We only care about EQUITY for now as per previous code
1950
+
1951
+ df_equity = df[df['InstrumentType'] == 'EQUITY']
1952
+
1953
+ options = {}
1954
+ for _, row in df_equity.iterrows():
1955
+ region = row['Region']
1956
+ delay = row['Delay']
1957
+ universes = row['Universe'] # This is a list
1958
+
1959
+ if region not in options:
1960
+ options[region] = {}
1961
+
1962
+ # Convert delay to string for JSON keys
1963
+ delay_str = str(delay)
1964
+ if delay_str not in options[region]:
1965
+ options[region][delay_str] = universes
1966
+
1967
+ return jsonify({
1968
+ 'success': True,
1969
+ 'options': options,
1970
+ 'categories': categories
1971
+ })
1972
+
1973
+ except Exception as e:
1974
+ return jsonify({'success': False, 'error': str(e)})
1975
+
1976
+ @app.route('/api/stream-transformer-logs/<task_id>')
1977
+ def stream_transformer_logs(task_id):
1978
+ def generate():
1979
+ if task_id not in transformer_tasks:
1980
+ yield f"data: {json.dumps({'status': 'error', 'log': 'Task not found'})}\n\n"
1981
+ return
1982
+
1983
+ q = transformer_tasks[task_id]['queue']
1984
+
1985
+ while True:
1986
+ try:
1987
+ line = q.get(timeout=1)
1988
+ if line is None:
1989
+ return_code = transformer_tasks[task_id].get('return_code', 0)
1990
+ status = 'completed' if return_code == 0 else 'error'
1991
+ yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
1992
+ break
1993
+ yield f"data: {json.dumps({'status': 'running', 'log': line})}\n\n"
1994
+ except queue.Empty:
1995
+ # Check if process is still running
1996
+ if 'process' in transformer_tasks[task_id]:
1997
+ proc = transformer_tasks[task_id]['process']
1998
+ if proc.poll() is not None and q.empty():
1999
+ return_code = proc.returncode
2000
+ status = 'completed' if return_code == 0 else 'error'
2001
+ yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
2002
+ break
2003
+ yield f"data: {json.dumps({'status': 'running', 'log': ''})}\n\n" # Keep alive
2004
+
2005
+ return Response(stream_with_context(generate()), mimetype='text/event-stream')
2006
+
2007
+ @app.route('/api/download-transformer-result/<task_id>/<file_type>')
2008
+ def download_transformer_result(task_id, file_type):
2009
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2010
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2011
+ output_dir = os.path.join(transformer_dir, 'output')
2012
+
2013
+ if file_type == 'candidates':
2014
+ filename = 'Alpha_candidates.json'
2015
+ elif file_type == 'success':
2016
+ filename = 'Alpha_generated_expressions_success.json'
2017
+ elif file_type == 'error':
2018
+ filename = 'Alpha_generated_expressions_error.json'
2019
+ else:
2020
+ return "Invalid file type", 400
2021
+
2022
+ return send_from_directory(output_dir, filename, as_attachment=True)
2023
+
2024
+ # --- 缘分一道桥 (Alpha Inspector) Routes ---
2025
+
2026
+ # Add '缘分一道桥' to sys.path to allow importing brain_alpha_inspector
2027
+ yuanfen_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '缘分一道桥')
2028
+ if yuanfen_dir not in sys.path:
2029
+ sys.path.append(yuanfen_dir)
2030
+
2031
+ try:
2032
+ import brain_alpha_inspector
2033
+ except ImportError as e:
2034
+ print(f"Warning: Could not import brain_alpha_inspector: {e}")
2035
+ brain_alpha_inspector = None
2036
+
2037
+ @app.route('/alpha_inspector')
2038
+ def alpha_inspector_page():
2039
+ return render_template('alpha_inspector.html')
2040
+
2041
+ @app.route('/api/yuanfen/login', methods=['POST'])
2042
+ def yuanfen_login():
2043
+ if not brain_alpha_inspector:
2044
+ return jsonify({'success': False, 'message': 'Module not loaded'})
2045
+
2046
+ data = request.json
2047
+ username = data.get('username')
2048
+ password = data.get('password')
2049
+
2050
+ try:
2051
+ session = brain_alpha_inspector.brain_login(username, password)
2052
+ session_id = str(uuid.uuid4())
2053
+ brain_sessions[session_id] = session
2054
+ return jsonify({'success': True, 'session_id': session_id})
2055
+ except Exception as e:
2056
+ return jsonify({'success': False, 'message': str(e)})
2057
+
2058
+ @app.route('/api/yuanfen/fetch_alphas', methods=['POST'])
2059
+ def yuanfen_fetch_alphas():
2060
+ if not brain_alpha_inspector:
2061
+ return jsonify({'success': False, 'message': 'Module not loaded'})
2062
+
2063
+ data = request.json
2064
+ session_id = data.get('session_id')
2065
+ mode = data.get('mode', 'date_range')
2066
+
2067
+ session = brain_sessions.get(session_id)
2068
+ if not session:
2069
+ return jsonify({'success': False, 'message': 'Invalid session'})
2070
+
2071
+ def generate():
2072
+ try:
2073
+ alphas = []
2074
+ if mode == 'ids':
2075
+ alpha_ids_str = data.get('alpha_ids', '')
2076
+ import re
2077
+ alpha_ids = [x.strip() for x in re.split(r'[,\s\n]+', alpha_ids_str) if x.strip()]
2078
+ yield json.dumps({"type": "progress", "message": f"Fetching {len(alpha_ids)} alphas by ID..."}) + "\n"
2079
+ alphas = brain_alpha_inspector.fetch_alphas_by_ids(session, alpha_ids)
2080
+ else:
2081
+ start_date = data.get('start_date')
2082
+ end_date = data.get('end_date')
2083
+ yield json.dumps({"type": "progress", "message": f"Fetching alphas from {start_date} to {end_date}..."}) + "\n"
2084
+ alphas = brain_alpha_inspector.fetch_alphas_by_date_range(session, start_date, end_date)
2085
+ yield json.dumps({"type": "progress", "message": f"Found {len(alphas)} alphas. Fetching operators..."}) + "\n"
2086
+
2087
+ # 2. Fetch Operators (needed for parsing)
2088
+ operators = brain_alpha_inspector.fetch_operators(session)
2089
+
2090
+ # 2.5 Fetch Simulation Options (for validation)
2091
+ simulation_options = None
2092
+ if brain_alpha_inspector.get_instrument_type_region_delay:
2093
+ yield json.dumps({"type": "progress", "message": "Fetching simulation options..."}) + "\n"
2094
+ try:
2095
+ simulation_options = brain_alpha_inspector.get_instrument_type_region_delay(session)
2096
+ except Exception as e:
2097
+ print(f"Error fetching simulation options: {e}")
2098
+
2099
+ yield json.dumps({"type": "progress", "message": f"Analyzing {len(alphas)} alphas..."}) + "\n"
2100
+
2101
+ # 3. Analyze each alpha
2102
+ analyzed_alphas = []
2103
+ for i, alpha in enumerate(alphas):
2104
+ alpha_id = alpha.get('id', 'Unknown')
2105
+ yield json.dumps({"type": "progress", "message": f"Processing alpha {i+1}/{len(alphas)}: {alpha_id}"}) + "\n"
2106
+
2107
+ result = brain_alpha_inspector.get_alpha_variants(session, alpha, operators, simulation_options)
2108
+ if result['valid'] and result['variants']:
2109
+ analyzed_alphas.append(result)
2110
+
2111
+ yield json.dumps({"type": "result", "success": True, "alphas": analyzed_alphas}) + "\n"
2112
+
2113
+ except Exception as e:
2114
+ print(f"Error in fetch_alphas: {e}")
2115
+ yield json.dumps({"type": "error", "message": str(e)}) + "\n"
2116
+
2117
+ return Response(stream_with_context(generate()), mimetype='application/x-ndjson')
2118
+
2119
+ @app.route('/api/yuanfen/simulate', methods=['POST'])
2120
+ def yuanfen_simulate():
2121
+ if not brain_alpha_inspector:
2122
+ return jsonify({'success': False, 'message': 'Module not loaded'})
2123
+
2124
+ data = request.json
2125
+ session_id = data.get('session_id')
2126
+ # alpha_id = data.get('alpha_id') # Not strictly needed if we have full payload
2127
+ payload = data.get('payload') # The full simulation payload
2128
+
2129
+ session = brain_sessions.get(session_id)
2130
+ if not session:
2131
+ return jsonify({'success': False, 'message': 'Invalid session'})
2132
+
2133
+ try:
2134
+ success, result_or_msg = brain_alpha_inspector.run_simulation_payload(session, payload)
2135
+
2136
+ if success:
2137
+ return jsonify({'success': True, 'result': result_or_msg})
2138
+ else:
2139
+ return jsonify({'success': False, 'message': result_or_msg})
2140
+
2141
+ except Exception as e:
2142
+ return jsonify({'success': False, 'message': str(e)})
2143
+
2144
+ def process_options_dataframe(df):
2145
+ """
2146
+ Transforms the options DataFrame into a nested dictionary:
2147
+ {
2148
+
2149
+ "EQUITY": {
2150
+ "USA": {
2151
+ "delays": [0, 1],
2152
+ "universes": ["TOP3000", ...],
2153
+ "neutralizations": ["MARKET", "INDUSTRY", ...]
2154
+ },
2155
+ "TWN": { ... }
2156
+ }
2157
+ }
2158
+ """
2159
+ result = {}
2160
+ if df is None or df.empty:
2161
+ return result
2162
+
2163
+ for _, row in df.iterrows():
2164
+ inst = row.get('InstrumentType', 'EQUITY')
2165
+ region = row.get('Region')
2166
+
2167
+ if inst not in result: result[inst] = {}
2168
+ if region not in result[inst]:
2169
+ result[inst][region] = {
2170
+ "delays": [],
2171
+ "universes": [],
2172
+ "neutralizations": []
2173
+ }
2174
+
2175
+ # Aggregate unique values
2176
+ delay = row.get('Delay')
2177
+ if delay is not None and delay not in result[inst][region]['delays']:
2178
+ result[inst][region]['delays'].append(delay)
2179
+
2180
+ universes = row.get('Universe')
2181
+ if isinstance(universes, list):
2182
+ for u in universes:
2183
+ if u not in result[inst][region]['universes']:
2184
+ result[inst][region]['universes'].append(u)
2185
+ elif isinstance(universes, str):
2186
+ if universes not in result[inst][region]['universes']:
2187
+ result[inst][region]['universes'].append(universes)
2188
+
2189
+ neutralizations = row.get('Neutralization')
2190
+ if isinstance(neutralizations, list):
2191
+ for n in neutralizations:
2192
+ if n not in result[inst][region]['neutralizations']:
2193
+ result[inst][region]['neutralizations'].append(n)
2194
+ elif isinstance(neutralizations, str):
2195
+ if neutralizations not in result[inst][region]['neutralizations']:
2196
+ result[inst][region]['neutralizations'].append(neutralizations)
2197
+
2198
+ return result
2199
+
2200
+ def get_valid_simulation_options(session):
2201
+ """Fetch valid simulation options from BRAIN."""
2202
+ try:
2203
+ if get_instrument_type_region_delay:
2204
+ print("Fetching simulation options using ace_lib...")
2205
+ df = get_instrument_type_region_delay(session)
2206
+ return process_options_dataframe(df)
2207
+ else:
2208
+ print("ace_lib not available, skipping options fetch")
2209
+ return {}
2210
+ except Exception as e:
2211
+ print(f"Error fetching options: {e}")
2212
+ return {}
2213
+
2214
+ # --- Inspiration Master Routes ---
2215
+
2216
+ def get_active_session():
2217
+ """Helper to get active session from header or SingleSession"""
2218
+ # Check header first
2219
+ session_id = request.headers.get('Session-ID')
2220
+ if session_id and session_id in brain_sessions:
2221
+ return brain_sessions[session_id]['session']
2222
+
2223
+ # Fallback to SingleSession
2224
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2225
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2226
+ if transformer_dir not in sys.path:
2227
+ sys.path.append(transformer_dir)
2228
+ from ace_lib import SingleSession
2229
+ s = SingleSession()
2230
+ if hasattr(s, 'auth') and s.auth:
2231
+ return s
2232
+ return None
2233
+
2234
+ @app.route('/api/check_login', methods=['GET'])
2235
+ def check_login():
2236
+ try:
2237
+ s = get_active_session()
2238
+ if s:
2239
+ return jsonify({'logged_in': True})
2240
+ else:
2241
+ return jsonify({'logged_in': False})
2242
+ except Exception as e:
2243
+ print(f"Check login error: {e}")
2244
+ return jsonify({'logged_in': False})
2245
+
2246
+ @app.route('/api/inspiration/options', methods=['GET'])
2247
+ def inspiration_options():
2248
+ try:
2249
+ # Use the same path logic as the main login
2250
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2251
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2252
+ if transformer_dir not in sys.path:
2253
+ sys.path.append(transformer_dir)
2254
+
2255
+ from ace_lib import get_instrument_type_region_delay
2256
+
2257
+ s = get_active_session()
2258
+ if not s:
2259
+ return jsonify({'error': 'Not logged in'}), 401
2260
+
2261
+ df = get_instrument_type_region_delay(s)
2262
+
2263
+ result = {}
2264
+ for _, row in df.iterrows():
2265
+ inst = row['InstrumentType']
2266
+ region = row['Region']
2267
+ delay = row['Delay']
2268
+ univs = row['Universe']
2269
+
2270
+ if inst not in result: result[inst] = {}
2271
+ if region not in result[inst]:
2272
+ result[inst][region] = {"delays": [], "universes": []}
2273
+
2274
+ if delay not in result[inst][region]['delays']:
2275
+ result[inst][region]['delays'].append(delay)
2276
+
2277
+ if isinstance(univs, list):
2278
+ for u in univs:
2279
+ if u not in result[inst][region]['universes']:
2280
+ result[inst][region]['universes'].append(u)
2281
+ else:
2282
+ if univs not in result[inst][region]['universes']:
2283
+ result[inst][region]['universes'].append(univs)
2284
+
2285
+ return jsonify(result)
2286
+ except Exception as e:
2287
+ return jsonify({'error': str(e)}), 500
2288
+
2289
+ @app.route('/api/inspiration/datasets', methods=['POST'])
2290
+ def inspiration_datasets():
2291
+ data = request.json
2292
+ region = data.get('region')
2293
+ delay = data.get('delay')
2294
+ universe = data.get('universe')
2295
+ search = data.get('search', '')
2296
+
2297
+ try:
2298
+ # Use the same path logic as the main login
2299
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2300
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2301
+ if transformer_dir not in sys.path:
2302
+ sys.path.append(transformer_dir)
2303
+
2304
+ from ace_lib import get_datasets
2305
+
2306
+ s = get_active_session()
2307
+ if not s:
2308
+ return jsonify({'error': 'Not logged in'}), 401
2309
+
2310
+ df = get_datasets(s, region=region, delay=int(delay), universe=universe)
2311
+
2312
+ if search:
2313
+ search = search.lower()
2314
+ mask = (
2315
+ df['id'].str.lower().str.contains(search, na=False) |
2316
+ df['name'].str.lower().str.contains(search, na=False) |
2317
+ df['description'].str.lower().str.contains(search, na=False)
2318
+ )
2319
+ df = df[mask]
2320
+
2321
+ # Return all results instead of limiting to 50
2322
+ # Use to_json to handle NaN values correctly (converts to null)
2323
+ json_str = df.to_json(orient='records', date_format='iso')
2324
+ return Response(json_str, mimetype='application/json')
2325
+ except Exception as e:
2326
+ return jsonify({'error': str(e)}), 500
2327
+
2328
+ @app.route('/api/inspiration/test_llm', methods=['POST'])
2329
+ def inspiration_test_llm():
2330
+ data = request.json
2331
+ api_key = data.get('apiKey')
2332
+ base_url = data.get('baseUrl')
2333
+ model = data.get('model')
2334
+
2335
+ try:
2336
+ import openai
2337
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
2338
+ # Simple call to list models or chat completion
2339
+ # Using a very cheap/fast call if possible, or just listing models
2340
+ try:
2341
+ client.models.list()
2342
+ return jsonify({'success': True})
2343
+ except Exception as e:
2344
+ # Fallback to a simple completion if models.list is restricted
2345
+ try:
2346
+ client.chat.completions.create(
2347
+ model=model,
2348
+ messages=[{"role": "user", "content": "hi"}],
2349
+ max_tokens=1
2350
+ )
2351
+ return jsonify({'success': True})
2352
+ except Exception as e2:
2353
+ return jsonify({'success': False, 'error': str(e2)})
2354
+
2355
+ except Exception as e:
2356
+ return jsonify({'success': False, 'error': str(e)})
2357
+
2358
+ @app.route('/api/inspiration/generate', methods=['POST'])
2359
+ def inspiration_generate():
2360
+ data = request.json
2361
+ api_key = data.get('apiKey')
2362
+ base_url = data.get('baseUrl')
2363
+ model = data.get('model')
2364
+ region = data.get('region')
2365
+ delay = data.get('delay')
2366
+ universe = data.get('universe')
2367
+ dataset_id = data.get('datasetId')
2368
+
2369
+ try:
2370
+ import openai
2371
+ # Use the same path logic as the main login
2372
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2373
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2374
+ if transformer_dir not in sys.path:
2375
+ sys.path.append(transformer_dir)
2376
+
2377
+ from ace_lib import get_operators, get_datafields
2378
+
2379
+ s = get_active_session()
2380
+ if not s:
2381
+ return jsonify({'error': 'Not logged in'}), 401
2382
+
2383
+ operators_df = get_operators(s)
2384
+ operators_df = operators_df[operators_df['scope'] == 'REGULAR']
2385
+
2386
+ datafields_df = get_datafields(s, region=region, delay=int(delay), universe=universe, dataset_id=dataset_id, data_type="ALL")
2387
+
2388
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2389
+ prompt_path = os.path.join(script_dir, "give_me_idea", "what_is_Alpha_template.md")
2390
+ try:
2391
+ with open(prompt_path, "r", encoding="utf-8") as f:
2392
+ system_prompt = f.read()
2393
+ except:
2394
+ system_prompt = "You are a helpful assistant for generating Alpha templates."
2395
+
2396
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
2397
+
2398
+ max_retries = 5
2399
+ n_ops = len(operators_df)
2400
+ n_fields = len(datafields_df)
2401
+
2402
+ last_error = None
2403
+
2404
+ for attempt in range(max_retries + 1):
2405
+ ops_subset = operators_df.head(n_ops)
2406
+ fields_subset = datafields_df.head(n_fields)
2407
+
2408
+ operators_info = ops_subset[['name', 'category', 'description']].to_string()
2409
+ datafields_info = fields_subset[['id', 'description', 'subcategory']].to_string()
2410
+
2411
+ user_prompt = f"""
2412
+ Here is the information about available operators (first {n_ops} rows):
2413
+ {operators_info}
2414
+
2415
+ Here is the information about the dataset '{dataset_id}' (first {n_fields} rows):
2416
+ {datafields_info}
2417
+
2418
+ Please come up with several Alpha templates based on this information.
2419
+ Specify the AI answer in Chinese.
2420
+ """
2421
+ try:
2422
+ completion = client.chat.completions.create(
2423
+ model=model,
2424
+ messages=[
2425
+ {"role": "system", "content": system_prompt},
2426
+ {"role": "user", "content": user_prompt}
2427
+ ],
2428
+ temperature=0.3,
2429
+ )
2430
+ return jsonify({'result': completion.choices[0].message.content})
2431
+
2432
+ except Exception as e:
2433
+ error_msg = str(e)
2434
+ last_error = error_msg
2435
+ if "token limit" in error_msg or "context_length_exceeded" in error_msg or "400" in error_msg:
2436
+ n_ops = max(1, n_ops // 2)
2437
+ n_fields = max(1, n_fields // 2)
2438
+ if n_ops == 1 and n_fields == 1:
2439
+ break
2440
+ else:
2441
+ break
2442
+
2443
+ return jsonify({'error': f"Failed after retries. Last error: {last_error}"})
2444
+
2445
+ except Exception as e:
2446
+ return jsonify({'error': str(e)}), 500
2447
+
2448
+ if __name__ == '__main__':
2449
+ print("Starting BRAIN Expression Template Decoder Web Application...")
2450
+ print("Starting in safe mode: binding only to localhost (127.0.0.1)")
2451
+ # Allow an explicit override only via an environment variable (not recommended)
2452
+ bind_host = os.environ.get('BRAIN_BIND_HOST', '127.0.0.1')
2453
+ if bind_host not in ('127.0.0.1', 'localhost'):
2454
+ print(f"Refusing to bind to non-localhost address: {bind_host}")
2455
+ print("To override (not recommended), set environment variable BRAIN_BIND_HOST")
2456
+ sys.exit(1)
2457
+
2458
+ print(f"Application will run on http://{bind_host}:5000")
2459
+ print("BRAIN API integration included - no separate proxy needed!")
2460
+ app.run(debug=False, host=bind_host, port=5000)