cnhkmcp 2.1.3__py3-none-any.whl → 2.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +126 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/README.md +38 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/ace.log +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +6 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/ace_lib.py +1514 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_datasets.py +157 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_documentation.py +132 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_operators.py +99 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/helpful_functions.py +180 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.ico +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.png +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_10_Steps_to_Start_on_BRAIN_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Improve_your_Alpha_2_2_documentation.json +174 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Understand_Results_1_2_documentation.json +167 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_Alphas_documentation.json +145 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_BRAIN_Expression_Language_documentation.json +107 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_WorldQuant_Challenge_documentation.json +56 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001__Read_this_First_-_Starter_Pack_documentation.json +404 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_How_to_choose_the_Simulation_Settings_documentation.json +268 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_Simulate_your_first_Alpha_documentation.json +88 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Beginners_documentation.json +254 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Bronze_Users_documentation.json +114 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Silver_Users_documentation.json +79 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__How_BRAIN_works_documentation.json +184 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Clear_these_tests_before_submitting_an_Alpha_documentation.json +388 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Parameters_in_the_Simulation_results_documentation.json +243 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Group_Data_Fields_documentation.json +69 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_How_to_use_the_Data_Explorer_documentation.json +142 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Model77_dataset_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Sentiment1_dataset_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Understanding_Data_in_BRAIN_Key_Concepts_and_Tips_documentation.json +182 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Vector_Data_Fields_documentation.json +30 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Crowding_Risk-Neutralized_Alphas_documentation.json +64 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_D0_documentation.json +66 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Double_Neutralization_documentation.json +53 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Fast_D1_Documentation_documentation.json +304 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Investability_Constrained_Metrics_documentation.json +129 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Must-read_posts_How_to_improve_your_Alphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Neutralization_documentation.json +29 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_RAM_Risk-Neutralized_Alphas_documentation.json +64 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralization_Default_setting_documentation.json +75 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralized_Alphas_documentation.json +171 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Statistical_Risk-Neutralized_Alphas_documentation.json +51 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_EUR_TOP2500_Universe_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_GLB_TOPDIV3000_Universe_documentation.json +48 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_Started_China_Research_for_Consultants_Gold_documentation.json +142 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_on_Illiquid_Universes_Gold_documentation.json +46 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_with_USA_TOPSP500_universe_Gold_documentation.json +62 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Global_Alphas_Gold_documentation.json +66 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_India_Alphas_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Dos_and_Don_ts_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Features_documentation.json +239 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Simulation_Features_documentation.json +149 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Submission_Tests_documentation.json +363 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Finding_Consultant_Alphas_documentation.json +333 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Power_Pool_Alphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Research_Advisory_Program_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Starting_Guide_for_Research_Consultants_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Visualization_Tool_documentation.json +99 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Your_Advisor_-_Kunqi_Jiang_documentation.json +53 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Brain_Genius_documentation.json +288 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Single_Dataset_Alphas_documentation.json +41 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Advisory_Theme_Calendar_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Multiplier_Rules_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Overview_of_Themes_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Theme_Calendar_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Combo_Expression_documentation.json +272 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Global_SuperAlphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Helpful_Tips_documentation.json +58 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Selection_Expression_documentation.json +1546 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Operators_documentation.json +890 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Results_documentation.json +83 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_What_is_a_SuperAlpha_documentation.json +261 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_BRAIN_API_documentation.json +515 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_Documentation_for_ACE_API_Library_Gold_documentation.json +27 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010__Understanding_simulation_limits_documentation.json +210 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/arithmetic_operators.json +209 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/cross_sectional_operators.json +98 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/group_operators.json +121 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/logical_operators.json +145 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/reduce_operators.json +156 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/special_operators.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/test.txt +1 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/time_series_operators.json +386 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/transformational_operators.json +61 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/vector_operators.json +38 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +576 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/process_knowledge_base.py +281 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/rag_engine.py +408 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/requirements.txt +7 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/run.bat +3 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_manifest.json +302 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_meta.json +1 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/chroma.sqlite3 +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242//321/211/320/266/320/246/321/206/320/274/320/261/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +265 -0
- cnhkmcp/untracked/APP/.gitignore +32 -0
- cnhkmcp/untracked/APP/MODULAR_STRUCTURE.md +112 -0
- cnhkmcp/untracked/APP/README.md +309 -0
- cnhkmcp/untracked/APP/Tranformer/Transformer.py +4989 -0
- cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
- cnhkmcp/untracked/APP/Tranformer/ace_lib.py +1514 -0
- cnhkmcp/untracked/APP/Tranformer/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +7187 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates_/321/207/320/264/342/225/221/321/204/342/225/233/320/233.json +654 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +1 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +47312 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_/321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/277/321/207/320/253/342/224/244/321/206/320/236/320/265/321/210/342/225/234/342/225/234/321/205/320/225/320/265Machine_lib.json +22 -0
- cnhkmcp/untracked/APP/Tranformer/parsetab.py +60 -0
- cnhkmcp/untracked/APP/Tranformer/template_summary.txt +3182 -0
- cnhkmcp/untracked/APP/Tranformer/transformer_config.json +7 -0
- cnhkmcp/untracked/APP/Tranformer/validator.py +889 -0
- cnhkmcp/untracked/APP/ace.log +69 -0
- cnhkmcp/untracked/APP/ace_lib.py +1514 -0
- cnhkmcp/untracked/APP/blueprints/__init__.py +6 -0
- cnhkmcp/untracked/APP/blueprints/feature_engineering.py +347 -0
- cnhkmcp/untracked/APP/blueprints/idea_house.py +221 -0
- cnhkmcp/untracked/APP/blueprints/inspiration_house.py +432 -0
- cnhkmcp/untracked/APP/blueprints/paper_analysis.py +570 -0
- cnhkmcp/untracked/APP/custom_templates/templates.json +1257 -0
- cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +400 -0
- cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +1514 -0
- cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +252 -0
- cnhkmcp/untracked/APP/give_me_idea/fetch_all_datasets.py +157 -0
- cnhkmcp/untracked/APP/give_me_idea/fetch_all_operators.py +99 -0
- cnhkmcp/untracked/APP/give_me_idea/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +11 -0
- cnhkmcp/untracked/APP/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +1501 -0
- cnhkmcp/untracked/APP/hkSimulator/autosimulator.py +447 -0
- cnhkmcp/untracked/APP/hkSimulator/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP/mirror_config.txt +20 -0
- cnhkmcp/untracked/APP/operaters.csv +129 -0
- cnhkmcp/untracked/APP/requirements.txt +53 -0
- cnhkmcp/untracked/APP/run_app.bat +28 -0
- cnhkmcp/untracked/APP/run_app.sh +34 -0
- cnhkmcp/untracked/APP/setup_tsinghua.bat +39 -0
- cnhkmcp/untracked/APP/setup_tsinghua.sh +43 -0
- cnhkmcp/untracked/APP/simulator/alpha_submitter.py +404 -0
- cnhkmcp/untracked/APP/simulator/simulator_wqb.py +618 -0
- cnhkmcp/untracked/APP/simulator/wqb20260107015647.log +57 -0
- cnhkmcp/untracked/APP/ssrn-3332513.pdf +109188 -19
- cnhkmcp/untracked/APP/static/brain.js +589 -0
- cnhkmcp/untracked/APP/static/decoder.js +1540 -0
- cnhkmcp/untracked/APP/static/feature_engineering.js +1729 -0
- cnhkmcp/untracked/APP/static/idea_house.js +937 -0
- cnhkmcp/untracked/APP/static/inspiration.js +465 -0
- cnhkmcp/untracked/APP/static/inspiration_house.js +868 -0
- cnhkmcp/untracked/APP/static/paper_analysis.js +390 -0
- cnhkmcp/untracked/APP/static/script.js +3082 -0
- cnhkmcp/untracked/APP/static/simulator.js +597 -0
- cnhkmcp/untracked/APP/static/styles.css +3127 -0
- cnhkmcp/untracked/APP/static/usage_widget.js +508 -0
- cnhkmcp/untracked/APP/templates/alpha_inspector.html +511 -0
- cnhkmcp/untracked/APP/templates/feature_engineering.html +960 -0
- cnhkmcp/untracked/APP/templates/idea_house.html +564 -0
- cnhkmcp/untracked/APP/templates/index.html +932 -0
- cnhkmcp/untracked/APP/templates/inspiration_house.html +861 -0
- cnhkmcp/untracked/APP/templates/paper_analysis.html +91 -0
- cnhkmcp/untracked/APP/templates/simulator.html +343 -0
- cnhkmcp/untracked/APP/templates/transformer_web.html +580 -0
- cnhkmcp/untracked/APP/usage.md +351 -0
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +1514 -0
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +712 -0
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +2460 -0
- cnhkmcp/untracked/__init__.py +0 -0
- cnhkmcp/untracked/arXiv_API_Tool_Manual.md +490 -0
- cnhkmcp/untracked/arxiv_api.py +229 -0
- cnhkmcp/untracked/forum_functions.py +998 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +407 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +2601 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/user_config.json +31 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/210/320/276/320/271AI/321/210/320/277/342/225/227/321/210/342/224/220/320/251/321/204/342/225/225/320/272/321/206/320/246/320/227/321/206/320/261/320/263/321/206/320/255/320/265/321/205/320/275/320/266/321/204/342/225/235/320/252/321/204/342/225/225/320/233/321/210/342/225/234/342/225/234/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270.md +101 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +190 -0
- cnhkmcp/untracked/platform_functions.py +2886 -0
- cnhkmcp/untracked/sample_mcp_config.json +11 -0
- cnhkmcp/untracked/user_config.json +31 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/222/321/210/320/220/320/223/321/206/320/246/320/227/321/206/320/261/320/263_BRAIN_Alpha_Test_Requirements_and_Tips.md +202 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Alpha_explaination_workflow.md +56 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_6_Tips_Datafield_Exploration_Guide.md +194 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_Alpha_Improvement_Workflow.md +101 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Dataset_Exploration_Expert_Manual.md +436 -0
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_daily_report_workflow.md +128 -0
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +190 -0
- {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.4.dist-info}/METADATA +1 -1
- cnhkmcp-2.1.4.dist-info/RECORD +190 -0
- cnhkmcp-2.1.4.dist-info/top_level.txt +1 -0
- cnhkmcp-2.1.3.dist-info/RECORD +0 -6
- cnhkmcp-2.1.3.dist-info/top_level.txt +0 -1
- {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.4.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.4.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.3.dist-info → cnhkmcp-2.1.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,2601 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
WorldQuant BRAIN MCP Server - Python Version
|
|
4
|
+
A comprehensive Model Context Protocol (MCP) server for WorldQuant BRAIN platform integration.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
import asyncio
|
|
10
|
+
import logging
|
|
11
|
+
from typing import Dict, List, Optional, Any, Union, Tuple
|
|
12
|
+
import re
|
|
13
|
+
import base64
|
|
14
|
+
from bs4 import BeautifulSoup
|
|
15
|
+
from dataclasses import dataclass, asdict
|
|
16
|
+
from datetime import datetime, timedelta
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
import math
|
|
20
|
+
from time import sleep
|
|
21
|
+
|
|
22
|
+
import requests
|
|
23
|
+
import pandas as pd
|
|
24
|
+
from mcp.server.fastmcp import FastMCP
|
|
25
|
+
from pydantic import BaseModel, Field, EmailStr
|
|
26
|
+
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
|
|
29
|
+
# Import the new forum client
|
|
30
|
+
from forum_functions import forum_client
|
|
31
|
+
|
|
32
|
+
# Configure logging
|
|
33
|
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
# Pydantic models for type safety
|
|
37
|
+
class AuthCredentials(BaseModel):
|
|
38
|
+
email: EmailStr
|
|
39
|
+
password: str
|
|
40
|
+
|
|
41
|
+
class SimulationSettings(BaseModel):
|
|
42
|
+
instrumentType: str = "EQUITY"
|
|
43
|
+
region: str = "USA"
|
|
44
|
+
universe: str = "TOP3000"
|
|
45
|
+
delay: int = 1
|
|
46
|
+
decay: float = 0.0
|
|
47
|
+
neutralization: str = "NONE"
|
|
48
|
+
truncation: float = 0.0
|
|
49
|
+
pasteurization: str = "ON"
|
|
50
|
+
unitHandling: str = "VERIFY"
|
|
51
|
+
nanHandling: str = "OFF"
|
|
52
|
+
language: str = "FASTEXPR"
|
|
53
|
+
visualization: bool = True
|
|
54
|
+
testPeriod: str = "P0Y0M"
|
|
55
|
+
selectionHandling: str = "POSITIVE"
|
|
56
|
+
selectionLimit: int = 1000
|
|
57
|
+
maxTrade: str = "OFF"
|
|
58
|
+
componentActivation: str = "IS"
|
|
59
|
+
|
|
60
|
+
class SimulationData(BaseModel):
|
|
61
|
+
type: str = "REGULAR" # "REGULAR" or "SUPER"
|
|
62
|
+
settings: SimulationSettings
|
|
63
|
+
regular: Optional[str] = None
|
|
64
|
+
combo: Optional[str] = None
|
|
65
|
+
selection: Optional[str] = None
|
|
66
|
+
|
|
67
|
+
class BrainApiClient:
|
|
68
|
+
"""WorldQuant BRAIN API client with comprehensive functionality."""
|
|
69
|
+
|
|
70
|
+
def __init__(self):
|
|
71
|
+
self.base_url = "https://api.worldquantbrain.com"
|
|
72
|
+
self.session = requests.Session()
|
|
73
|
+
self.auth_credentials = None
|
|
74
|
+
self.is_authenticating = False
|
|
75
|
+
|
|
76
|
+
# Configure session
|
|
77
|
+
self.session.timeout = 30
|
|
78
|
+
self.session.headers.update({
|
|
79
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
|
80
|
+
})
|
|
81
|
+
|
|
82
|
+
def log(self, message: str, level: str = "INFO"):
|
|
83
|
+
"""Log messages to stderr to avoid MCP protocol interference."""
|
|
84
|
+
try:
|
|
85
|
+
# Try to print with original message first
|
|
86
|
+
print(f"[{level}] {message}", file=sys.stderr)
|
|
87
|
+
except UnicodeEncodeError:
|
|
88
|
+
# Fallback: remove problematic characters and try again
|
|
89
|
+
try:
|
|
90
|
+
safe_message = message.encode('ascii', 'ignore').decode('ascii')
|
|
91
|
+
print(f"[{level}] {safe_message}", file=sys.stderr)
|
|
92
|
+
except Exception:
|
|
93
|
+
# Final fallback: just print the level and a safe message
|
|
94
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
95
|
+
except Exception:
|
|
96
|
+
# Final fallback: just print the level and a safe message
|
|
97
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
98
|
+
|
|
99
|
+
async def authenticate(self, email: str, password: str) -> Dict[str, Any]:
|
|
100
|
+
"""Authenticate with WorldQuant BRAIN platform with biometric support."""
|
|
101
|
+
self.log("🔐 Starting Authentication process...", "INFO")
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
# Store credentials for potential re-authentication
|
|
105
|
+
self.auth_credentials = {'email': email, 'password': password}
|
|
106
|
+
|
|
107
|
+
# Clear any existing session data
|
|
108
|
+
self.session.cookies.clear()
|
|
109
|
+
self.session.auth = None
|
|
110
|
+
|
|
111
|
+
# Create Basic Authentication header (base64 encoded credentials)
|
|
112
|
+
import base64
|
|
113
|
+
credentials = f"{email}:{password}"
|
|
114
|
+
encoded_credentials = base64.b64encode(credentials.encode()).decode()
|
|
115
|
+
|
|
116
|
+
# Send POST request with Basic Authentication header
|
|
117
|
+
headers = {
|
|
118
|
+
'Authorization': f'Basic {encoded_credentials}'
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
response = self.session.post('https://api.worldquantbrain.com/authentication', headers=headers)
|
|
122
|
+
|
|
123
|
+
# Check for successful authentication (status code 201)
|
|
124
|
+
if response.status_code == 201:
|
|
125
|
+
self.log("Authentication successful", "SUCCESS")
|
|
126
|
+
|
|
127
|
+
# Check if JWT token was automatically stored by session
|
|
128
|
+
jwt_token = self.session.cookies.get('t')
|
|
129
|
+
if jwt_token:
|
|
130
|
+
self.log("JWT token automatically stored by session", "SUCCESS")
|
|
131
|
+
else:
|
|
132
|
+
self.log("⚠️ No JWT token found in session", "WARNING")
|
|
133
|
+
|
|
134
|
+
# Return success response
|
|
135
|
+
return {
|
|
136
|
+
'user': {'email': email},
|
|
137
|
+
'status': 'authenticated',
|
|
138
|
+
'permissions': ['read', 'write'],
|
|
139
|
+
'message': 'Authentication successful',
|
|
140
|
+
'status_code': response.status_code,
|
|
141
|
+
'has_jwt': jwt_token is not None
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
# Check if biometric authentication is required (401 with persona)
|
|
145
|
+
elif response.status_code == 401:
|
|
146
|
+
www_auth = response.headers.get("WWW-Authenticate")
|
|
147
|
+
location = response.headers.get("Location")
|
|
148
|
+
|
|
149
|
+
if www_auth == "persona" and location:
|
|
150
|
+
self.log("🔴 Biometric authentication required", "INFO")
|
|
151
|
+
|
|
152
|
+
# Handle biometric authentication
|
|
153
|
+
from urllib.parse import urljoin
|
|
154
|
+
biometric_url = urljoin(response.url, location)
|
|
155
|
+
|
|
156
|
+
return await self._handle_biometric_auth(biometric_url, email)
|
|
157
|
+
else:
|
|
158
|
+
raise Exception("Incorrect email or password")
|
|
159
|
+
else:
|
|
160
|
+
raise Exception(f"Authentication failed with status code: {response.status_code}")
|
|
161
|
+
|
|
162
|
+
except requests.HTTPError as e:
|
|
163
|
+
self.log(f"❌ HTTP error during authentication: {e}", "ERROR")
|
|
164
|
+
raise
|
|
165
|
+
except Exception as e:
|
|
166
|
+
self.log(f"❌ Authentication failed: {str(e)}", "ERROR")
|
|
167
|
+
raise
|
|
168
|
+
|
|
169
|
+
async def _handle_biometric_auth(self, biometric_url: str, email: str) -> Dict[str, Any]:
|
|
170
|
+
"""Handle biometric authentication using browser automation."""
|
|
171
|
+
self.log("🌐 Starting biometric authentication...", "INFO")
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
# Import playwright for browser automation
|
|
175
|
+
from playwright.async_api import async_playwright
|
|
176
|
+
import time
|
|
177
|
+
|
|
178
|
+
async with async_playwright() as p:
|
|
179
|
+
browser = await p.chromium.launch(headless=False)
|
|
180
|
+
page = await browser.new_page()
|
|
181
|
+
|
|
182
|
+
self.log("🌐 Opening browser for biometric authentication...", "INFO")
|
|
183
|
+
await page.goto(biometric_url)
|
|
184
|
+
self.log("Browser page loaded successfully", "SUCCESS")
|
|
185
|
+
|
|
186
|
+
# Print instructions
|
|
187
|
+
print("\n" + "="*60, file=sys.stderr)
|
|
188
|
+
print("BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
|
|
189
|
+
print("="*60, file=sys.stderr)
|
|
190
|
+
print("Browser window is open with biometric authentication page", file=sys.stderr)
|
|
191
|
+
print("Complete the biometric authentication in the browser", file=sys.stderr)
|
|
192
|
+
print("The system will automatically check when you're done...", file=sys.stderr)
|
|
193
|
+
print("="*60, file=sys.stderr)
|
|
194
|
+
|
|
195
|
+
# Keep checking until authentication is complete
|
|
196
|
+
max_attempts = 60 # 5 minutes maximum (60 * 5 seconds)
|
|
197
|
+
attempt = 0
|
|
198
|
+
|
|
199
|
+
while attempt < max_attempts:
|
|
200
|
+
time.sleep(5) # Check every 5 seconds
|
|
201
|
+
attempt += 1
|
|
202
|
+
|
|
203
|
+
# Check if authentication completed
|
|
204
|
+
check_response = self.session.post(biometric_url)
|
|
205
|
+
self.log(f"🔄 Checking authentication status (attempt {attempt}/{max_attempts}): {check_response.status_code}", "INFO")
|
|
206
|
+
|
|
207
|
+
if check_response.status_code == 201:
|
|
208
|
+
self.log("Biometric authentication successful!", "SUCCESS")
|
|
209
|
+
|
|
210
|
+
await browser.close()
|
|
211
|
+
|
|
212
|
+
# Check JWT token
|
|
213
|
+
jwt_token = self.session.cookies.get('t')
|
|
214
|
+
if jwt_token:
|
|
215
|
+
self.log("JWT token received", "SUCCESS")
|
|
216
|
+
|
|
217
|
+
# Return success response
|
|
218
|
+
return {
|
|
219
|
+
'user': {'email': email},
|
|
220
|
+
'status': 'authenticated',
|
|
221
|
+
'permissions': ['read', 'write'],
|
|
222
|
+
'message': 'Biometric authentication successful',
|
|
223
|
+
'status_code': check_response.status_code,
|
|
224
|
+
'has_jwt': jwt_token is not None
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
await browser.close()
|
|
228
|
+
raise Exception("Biometric authentication timed out")
|
|
229
|
+
|
|
230
|
+
except Exception as e:
|
|
231
|
+
self.log(f"❌ Biometric authentication failed: {str(e)}", "ERROR")
|
|
232
|
+
raise
|
|
233
|
+
|
|
234
|
+
async def is_authenticated(self) -> bool:
|
|
235
|
+
"""Check if currently authenticated using JWT token."""
|
|
236
|
+
try:
|
|
237
|
+
# Check if we have a JWT token in cookies
|
|
238
|
+
jwt_token = self.session.cookies.get('t')
|
|
239
|
+
if not jwt_token:
|
|
240
|
+
self.log("❌ No JWT token found", "INFO")
|
|
241
|
+
return False
|
|
242
|
+
|
|
243
|
+
# Test authentication with a simple API call
|
|
244
|
+
response = self.session.get(f"{self.base_url}/authentication")
|
|
245
|
+
if response.status_code == 200:
|
|
246
|
+
return True
|
|
247
|
+
elif response.status_code == 401:
|
|
248
|
+
self.log("❌ JWT token expired or invalid (401)", "INFO")
|
|
249
|
+
return False
|
|
250
|
+
else:
|
|
251
|
+
self.log(f"⚠️ Unexpected status code during auth check: {response.status_code}", "WARNING")
|
|
252
|
+
return False
|
|
253
|
+
except Exception as e:
|
|
254
|
+
self.log(f"❌ Error checking authentication: {str(e)}", "ERROR")
|
|
255
|
+
return False
|
|
256
|
+
|
|
257
|
+
async def ensure_authenticated(self):
|
|
258
|
+
"""Ensure authentication is valid, re-authenticate if needed."""
|
|
259
|
+
if not await self.is_authenticated():
|
|
260
|
+
if not self.auth_credentials:
|
|
261
|
+
self.log("No credentials in memory, loading from config...", "INFO")
|
|
262
|
+
config = load_config()
|
|
263
|
+
creds = config.get("credentials", {})
|
|
264
|
+
email = creds.get("email")
|
|
265
|
+
password = creds.get("password")
|
|
266
|
+
if not email or not password:
|
|
267
|
+
raise Exception("Authentication credentials not found in config. Please authenticate first.")
|
|
268
|
+
self.auth_credentials = {'email': email, 'password': password}
|
|
269
|
+
|
|
270
|
+
self.log("🔄 Re-authenticating...", "INFO")
|
|
271
|
+
await self.authenticate(self.auth_credentials['email'], self.auth_credentials['password'])
|
|
272
|
+
|
|
273
|
+
async def get_authentication_status(self) -> Optional[Dict[str, Any]]:
|
|
274
|
+
"""Get current authentication status and user info."""
|
|
275
|
+
try:
|
|
276
|
+
response = self.session.get(f"{self.base_url}/users/self")
|
|
277
|
+
response.raise_for_status()
|
|
278
|
+
return response.json()
|
|
279
|
+
except Exception as e:
|
|
280
|
+
self.log(f"Failed to get auth status: {str(e)}", "ERROR")
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
async def create_simulation(self, simulation_data: SimulationData) -> Dict[str, str]:
|
|
284
|
+
"""Create a new simulation on BRAIN platform."""
|
|
285
|
+
await self.ensure_authenticated()
|
|
286
|
+
|
|
287
|
+
try:
|
|
288
|
+
self.log("🚀 Creating simulation...", "INFO")
|
|
289
|
+
|
|
290
|
+
# Prepare settings based on simulation type
|
|
291
|
+
settings_dict = simulation_data.settings.model_dump()
|
|
292
|
+
|
|
293
|
+
# Remove fields based on simulation type
|
|
294
|
+
if simulation_data.type == "REGULAR":
|
|
295
|
+
# Remove SUPER-specific fields for REGULAR
|
|
296
|
+
settings_dict.pop('selectionHandling', None)
|
|
297
|
+
settings_dict.pop('selectionLimit', None)
|
|
298
|
+
settings_dict.pop('componentActivation', None)
|
|
299
|
+
|
|
300
|
+
# Filter out None values from settings
|
|
301
|
+
settings_dict = {k: v for k, v in settings_dict.items() if v is not None}
|
|
302
|
+
|
|
303
|
+
# Prepare simulation payload
|
|
304
|
+
payload = {
|
|
305
|
+
'type': simulation_data.type,
|
|
306
|
+
'settings': settings_dict
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
# Add type-specific fields
|
|
310
|
+
if simulation_data.type == "REGULAR":
|
|
311
|
+
if simulation_data.regular:
|
|
312
|
+
payload['regular'] = simulation_data.regular
|
|
313
|
+
elif simulation_data.type == "SUPER":
|
|
314
|
+
if simulation_data.combo:
|
|
315
|
+
payload['combo'] = simulation_data.combo
|
|
316
|
+
if simulation_data.selection:
|
|
317
|
+
payload['selection'] = simulation_data.selection
|
|
318
|
+
|
|
319
|
+
# Filter out None values from entire payload
|
|
320
|
+
payload = {k: v for k, v in payload.items() if v is not None}
|
|
321
|
+
|
|
322
|
+
response = self.session.post(f"{self.base_url}/simulations", json=payload)
|
|
323
|
+
response.raise_for_status()
|
|
324
|
+
|
|
325
|
+
location = response.headers.get('Location', '')
|
|
326
|
+
simulation_id = location.split('/')[-1] if location else None
|
|
327
|
+
|
|
328
|
+
self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
|
|
329
|
+
|
|
330
|
+
while True:
|
|
331
|
+
simulation_progress = self.session.get(location)
|
|
332
|
+
if simulation_progress.headers.get("Retry-After", 0) == 0:
|
|
333
|
+
break
|
|
334
|
+
print("Sleeping for " + simulation_progress.headers["Retry-After"] + " seconds")
|
|
335
|
+
sleep(float(simulation_progress.headers["Retry-After"]))
|
|
336
|
+
print("Alpha done simulating, getting alpha details")
|
|
337
|
+
alpha_id = simulation_progress.json()["alpha"]
|
|
338
|
+
alpha = self.session.get("https://api.worldquantbrain.com/alphas/" + alpha_id)
|
|
339
|
+
result = alpha.json()
|
|
340
|
+
result['note'] = "if you got a negative alpha sharpe, you can just add a minus sign in front of the last line of the Alpha to flip then think the next step."
|
|
341
|
+
return result
|
|
342
|
+
|
|
343
|
+
except Exception as e:
|
|
344
|
+
self.log(f"❌ Failed to create simulation: {str(e)}", "ERROR")
|
|
345
|
+
raise
|
|
346
|
+
|
|
347
|
+
async def get_alpha_details(self, alpha_id: str) -> Dict[str, Any]:
|
|
348
|
+
"""Get detailed information about an alpha."""
|
|
349
|
+
await self.ensure_authenticated()
|
|
350
|
+
|
|
351
|
+
try:
|
|
352
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}")
|
|
353
|
+
response.raise_for_status()
|
|
354
|
+
return response.json()
|
|
355
|
+
except Exception as e:
|
|
356
|
+
self.log(f"Failed to get alpha details: {str(e)}", "ERROR")
|
|
357
|
+
raise
|
|
358
|
+
|
|
359
|
+
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
360
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "false", search: Optional[str] = None) -> Dict[str, Any]:
|
|
361
|
+
"""Get available datasets."""
|
|
362
|
+
await self.ensure_authenticated()
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
params = {
|
|
366
|
+
'instrumentType': instrument_type,
|
|
367
|
+
'region': region,
|
|
368
|
+
'delay': delay,
|
|
369
|
+
'universe': universe,
|
|
370
|
+
'theme': theme
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
if search:
|
|
374
|
+
params['search'] = search
|
|
375
|
+
|
|
376
|
+
response = self.session.get(f"{self.base_url}/data-sets", params=params)
|
|
377
|
+
response.raise_for_status()
|
|
378
|
+
response_json = response.json()
|
|
379
|
+
response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
380
|
+
return response_json
|
|
381
|
+
except Exception as e:
|
|
382
|
+
self.log(f"Failed to get datasets: {str(e)}", "ERROR")
|
|
383
|
+
raise
|
|
384
|
+
|
|
385
|
+
async def get_datafields(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
386
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "false",
|
|
387
|
+
dataset_id: Optional[str] = None, data_type: str = "",
|
|
388
|
+
search: Optional[str] = None) -> Dict[str, Any]:
|
|
389
|
+
"""Get available data fields."""
|
|
390
|
+
await self.ensure_authenticated()
|
|
391
|
+
|
|
392
|
+
try:
|
|
393
|
+
params = {
|
|
394
|
+
'instrumentType': instrument_type,
|
|
395
|
+
'region': region,
|
|
396
|
+
'delay': delay,
|
|
397
|
+
'universe': universe,
|
|
398
|
+
'limit': '50',
|
|
399
|
+
'offset': '0'
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
if data_type != 'ALL':
|
|
403
|
+
params['type'] = data_type
|
|
404
|
+
|
|
405
|
+
if dataset_id:
|
|
406
|
+
params['dataset.id'] = dataset_id
|
|
407
|
+
if search:
|
|
408
|
+
params['search'] = search
|
|
409
|
+
|
|
410
|
+
response = self.session.get(f"{self.base_url}/data-fields", params=params)
|
|
411
|
+
response.raise_for_status()
|
|
412
|
+
response_json = response.json()
|
|
413
|
+
response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
414
|
+
return response_json
|
|
415
|
+
except Exception as e:
|
|
416
|
+
self.log(f"Failed to get datafields: {str(e)}", "ERROR")
|
|
417
|
+
raise
|
|
418
|
+
|
|
419
|
+
async def get_alpha_pnl(self, alpha_id: str) -> Dict[str, Any]:
|
|
420
|
+
"""Get PnL data for an alpha with retry logic."""
|
|
421
|
+
await self.ensure_authenticated()
|
|
422
|
+
|
|
423
|
+
max_retries = 5
|
|
424
|
+
retry_delay = 2 # seconds
|
|
425
|
+
|
|
426
|
+
for attempt in range(max_retries):
|
|
427
|
+
try:
|
|
428
|
+
self.log(f"Attempting to get PnL for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
429
|
+
|
|
430
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
|
|
431
|
+
response.raise_for_status()
|
|
432
|
+
|
|
433
|
+
# Some alphas may return 204 No Content or an empty body
|
|
434
|
+
text = (response.text or "").strip()
|
|
435
|
+
if not text:
|
|
436
|
+
if attempt < max_retries - 1:
|
|
437
|
+
self.log(f"Empty PnL response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
438
|
+
await asyncio.sleep(retry_delay)
|
|
439
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
440
|
+
continue
|
|
441
|
+
else:
|
|
442
|
+
self.log(f"Empty PnL response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
443
|
+
return {}
|
|
444
|
+
|
|
445
|
+
try:
|
|
446
|
+
pnl_data = response.json()
|
|
447
|
+
if pnl_data:
|
|
448
|
+
self.log(f"Successfully retrieved PnL data for alpha {alpha_id}", "SUCCESS")
|
|
449
|
+
return pnl_data
|
|
450
|
+
else:
|
|
451
|
+
if attempt < max_retries - 1:
|
|
452
|
+
self.log(f"Empty PnL JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
453
|
+
await asyncio.sleep(retry_delay)
|
|
454
|
+
retry_delay *= 1.5
|
|
455
|
+
continue
|
|
456
|
+
else:
|
|
457
|
+
self.log(f"Empty PnL JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
458
|
+
return {}
|
|
459
|
+
|
|
460
|
+
except Exception as parse_err:
|
|
461
|
+
if attempt < max_retries - 1:
|
|
462
|
+
self.log(f"PnL JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
463
|
+
await asyncio.sleep(retry_delay)
|
|
464
|
+
retry_delay *= 1.5
|
|
465
|
+
continue
|
|
466
|
+
else:
|
|
467
|
+
self.log(f"PnL JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
468
|
+
return {}
|
|
469
|
+
|
|
470
|
+
except Exception as e:
|
|
471
|
+
if attempt < max_retries - 1:
|
|
472
|
+
self.log(f"Failed to get alpha PnL for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
473
|
+
await asyncio.sleep(retry_delay)
|
|
474
|
+
retry_delay *= 1.5
|
|
475
|
+
continue
|
|
476
|
+
else:
|
|
477
|
+
self.log(f"Failed to get alpha PnL for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
478
|
+
raise
|
|
479
|
+
|
|
480
|
+
# This should never be reached, but just in case
|
|
481
|
+
return {}
|
|
482
|
+
|
|
483
|
+
async def get_user_alphas(
|
|
484
|
+
self,
|
|
485
|
+
stage: str = "OS",
|
|
486
|
+
limit: int = 30,
|
|
487
|
+
offset: int = 0,
|
|
488
|
+
start_date: Optional[str] = None,
|
|
489
|
+
end_date: Optional[str] = None,
|
|
490
|
+
submission_start_date: Optional[str] = None,
|
|
491
|
+
submission_end_date: Optional[str] = None,
|
|
492
|
+
order: Optional[str] = None,
|
|
493
|
+
hidden: Optional[bool] = None,
|
|
494
|
+
) -> Dict[str, Any]:
|
|
495
|
+
"""Get user's alphas with advanced filtering."""
|
|
496
|
+
await self.ensure_authenticated()
|
|
497
|
+
|
|
498
|
+
try:
|
|
499
|
+
params = {
|
|
500
|
+
"stage": stage,
|
|
501
|
+
"limit": limit,
|
|
502
|
+
"offset": offset,
|
|
503
|
+
}
|
|
504
|
+
if start_date:
|
|
505
|
+
params["dateCreated>"] = start_date
|
|
506
|
+
if end_date:
|
|
507
|
+
params["dateCreated<"] = end_date
|
|
508
|
+
if submission_start_date:
|
|
509
|
+
params["dateSubmitted>"] = submission_start_date
|
|
510
|
+
if submission_end_date:
|
|
511
|
+
params["dateSubmitted<"] = submission_end_date
|
|
512
|
+
if order:
|
|
513
|
+
params["order"] = order
|
|
514
|
+
if hidden is not None:
|
|
515
|
+
params["hidden"] = str(hidden).lower()
|
|
516
|
+
|
|
517
|
+
response = self.session.get(f"{self.base_url}/users/self/alphas", params=params)
|
|
518
|
+
response.raise_for_status()
|
|
519
|
+
return response.json()
|
|
520
|
+
except Exception as e:
|
|
521
|
+
self.log(f"Failed to get user alphas: {str(e)}", "ERROR")
|
|
522
|
+
raise
|
|
523
|
+
|
|
524
|
+
async def submit_alpha(self, alpha_id: str) -> bool:
|
|
525
|
+
"""Submit an alpha for production."""
|
|
526
|
+
await self.ensure_authenticated()
|
|
527
|
+
|
|
528
|
+
try:
|
|
529
|
+
self.log(f"📤 Submitting alpha {alpha_id} for production...", "INFO")
|
|
530
|
+
|
|
531
|
+
response = self.session.post(f"{self.base_url}/alphas/{alpha_id}/submit")
|
|
532
|
+
response.raise_for_status()
|
|
533
|
+
|
|
534
|
+
self.log(f"Alpha {alpha_id} submitted successfully", "SUCCESS")
|
|
535
|
+
return response.__dict__
|
|
536
|
+
|
|
537
|
+
except Exception as e:
|
|
538
|
+
self.log(f"❌ Failed to submit alpha: {str(e)}", "ERROR")
|
|
539
|
+
return False
|
|
540
|
+
|
|
541
|
+
async def get_events(self) -> Dict[str, Any]:
|
|
542
|
+
"""Get available events and competitions."""
|
|
543
|
+
await self.ensure_authenticated()
|
|
544
|
+
|
|
545
|
+
try:
|
|
546
|
+
response = self.session.get(f"{self.base_url}/events")
|
|
547
|
+
response.raise_for_status()
|
|
548
|
+
return response.json()
|
|
549
|
+
except Exception as e:
|
|
550
|
+
self.log(f"Failed to get events: {str(e)}", "ERROR")
|
|
551
|
+
raise
|
|
552
|
+
|
|
553
|
+
async def get_leaderboard(self, user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
554
|
+
"""Get leaderboard data."""
|
|
555
|
+
await self.ensure_authenticated()
|
|
556
|
+
|
|
557
|
+
try:
|
|
558
|
+
params = {}
|
|
559
|
+
|
|
560
|
+
if user_id:
|
|
561
|
+
params['user'] = user_id
|
|
562
|
+
else:
|
|
563
|
+
# Get current user ID if not specified
|
|
564
|
+
user_response = self.session.get(f"{self.base_url}/users/self")
|
|
565
|
+
if user_response.status_code == 200:
|
|
566
|
+
user_data = user_response.json()
|
|
567
|
+
params['user'] = user_data.get('id')
|
|
568
|
+
|
|
569
|
+
response = self.session.get(f"{self.base_url}/consultant/boards/leader", params=params)
|
|
570
|
+
response.raise_for_status()
|
|
571
|
+
return response.json()
|
|
572
|
+
except Exception as e:
|
|
573
|
+
self.log(f"Failed to get leaderboard: {str(e)}", "ERROR")
|
|
574
|
+
raise
|
|
575
|
+
|
|
576
|
+
def _is_atom(self, detail: Optional[Dict[str, Any]]) -> bool:
|
|
577
|
+
"""Match atom detection used in extract_regular_alphas.py:
|
|
578
|
+
- Primary signal: 'classifications' entries containing 'SINGLE_DATA_SET'
|
|
579
|
+
- Fallbacks: tags list contains 'atom' or classification id/name contains 'ATOM'
|
|
580
|
+
"""
|
|
581
|
+
if not detail or not isinstance(detail, dict):
|
|
582
|
+
return False
|
|
583
|
+
|
|
584
|
+
classifications = detail.get('classifications') or []
|
|
585
|
+
for c in classifications:
|
|
586
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
587
|
+
if isinstance(cid, str) and 'SINGLE_DATA_SET' in cid:
|
|
588
|
+
return True
|
|
589
|
+
|
|
590
|
+
# Fallbacks
|
|
591
|
+
tags = detail.get('tags') or []
|
|
592
|
+
if isinstance(tags, list):
|
|
593
|
+
for t in tags:
|
|
594
|
+
if isinstance(t, str) and t.strip().lower() == 'atom':
|
|
595
|
+
return True
|
|
596
|
+
|
|
597
|
+
for c in classifications:
|
|
598
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
599
|
+
if isinstance(cid, str) and 'ATOM' in cid.upper():
|
|
600
|
+
return True
|
|
601
|
+
|
|
602
|
+
return False
|
|
603
|
+
|
|
604
|
+
async def value_factor_trendScore(self, start_date: str, end_date: str) -> Dict[str, Any]:
|
|
605
|
+
"""Compute diversity score for regular alphas in a date range.
|
|
606
|
+
|
|
607
|
+
Description:
|
|
608
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
609
|
+
value factor of a user is defiend by This diversity score, which measures three key aspects of work output: the proportion of works
|
|
610
|
+
with the "Atom" tag (S_A), atom proportion, the breadth of pyramids covered (S_P), and how evenly works
|
|
611
|
+
are distributed across those pyramids (S_H). Calculated as their product, it rewards
|
|
612
|
+
strong performance across all three dimensions—encouraging more Atom-tagged works,
|
|
613
|
+
wider pyramid coverage, and balanced distribution—with weaknesses in any area lowering
|
|
614
|
+
the total score significantly.
|
|
615
|
+
|
|
616
|
+
Inputs (hints for AI callers):
|
|
617
|
+
- start_date (str): ISO UTC start datetime, e.g. '2025-08-14T00:00:00Z'
|
|
618
|
+
- end_date (str): ISO UTC end datetime, e.g. '2025-08-18T23:59:59Z'
|
|
619
|
+
- Note: this tool always uses 'OS' (submission dates) to define the window; callers do not need to supply a stage.
|
|
620
|
+
- Note: P_max (total number of possible pyramids) is derived from the platform
|
|
621
|
+
pyramid-multipliers endpoint and not supplied by callers.
|
|
622
|
+
|
|
623
|
+
Returns (compact JSON): {
|
|
624
|
+
'diversity_score': float,
|
|
625
|
+
'N': int, # total regular alphas in window
|
|
626
|
+
'A': int, # number of Atom-tagged works (is_single_data_set)
|
|
627
|
+
'P': int, # pyramid coverage count in the sample
|
|
628
|
+
'P_max': int, # used max for normalization
|
|
629
|
+
'S_A': float, 'S_P': float, 'S_H': float,
|
|
630
|
+
'per_pyramid_counts': {pyramid_name: count}
|
|
631
|
+
}
|
|
632
|
+
"""
|
|
633
|
+
# Fetch user alphas (always use OS / submission dates per product policy)
|
|
634
|
+
await self.ensure_authenticated()
|
|
635
|
+
alphas_resp = await self.get_user_alphas(stage='OS', limit=500, submission_start_date=start_date, submission_end_date=end_date)
|
|
636
|
+
|
|
637
|
+
if not isinstance(alphas_resp, dict) or 'results' not in alphas_resp:
|
|
638
|
+
return {'error': 'Unexpected response from get_user_alphas', 'raw': alphas_resp}
|
|
639
|
+
|
|
640
|
+
alphas = alphas_resp['results']
|
|
641
|
+
regular = [a for a in alphas if a.get('type') == 'REGULAR']
|
|
642
|
+
|
|
643
|
+
# Fetch details for each regular alpha
|
|
644
|
+
pyramid_list = []
|
|
645
|
+
atom_count = 0
|
|
646
|
+
per_pyramid = {}
|
|
647
|
+
for a in regular:
|
|
648
|
+
try:
|
|
649
|
+
detail = await self.get_alpha_details(a.get('id'))
|
|
650
|
+
except Exception:
|
|
651
|
+
continue
|
|
652
|
+
|
|
653
|
+
is_atom = self._is_atom(detail)
|
|
654
|
+
if is_atom:
|
|
655
|
+
atom_count += 1
|
|
656
|
+
|
|
657
|
+
# Extract pyramids
|
|
658
|
+
ps = []
|
|
659
|
+
if isinstance(detail.get('pyramids'), list):
|
|
660
|
+
ps = [p.get('name') for p in detail.get('pyramids') if p.get('name')]
|
|
661
|
+
else:
|
|
662
|
+
pt = detail.get('pyramidThemes') or {}
|
|
663
|
+
pss = pt.get('pyramids') if isinstance(pt, dict) else None
|
|
664
|
+
if pss and isinstance(pss, list):
|
|
665
|
+
ps = [p.get('name') for p in pss if p.get('name')]
|
|
666
|
+
|
|
667
|
+
for p in ps:
|
|
668
|
+
pyramid_list.append(p)
|
|
669
|
+
per_pyramid[p] = per_pyramid.get(p, 0) + 1
|
|
670
|
+
|
|
671
|
+
N = len(regular)
|
|
672
|
+
A = atom_count
|
|
673
|
+
P = len(per_pyramid)
|
|
674
|
+
|
|
675
|
+
# Determine P_max similarly to the script: use pyramid multipliers if available
|
|
676
|
+
P_max = None
|
|
677
|
+
try:
|
|
678
|
+
pm = await self.get_pyramid_multipliers()
|
|
679
|
+
if isinstance(pm, dict) and 'pyramids' in pm:
|
|
680
|
+
pyramids_list = pm.get('pyramids') or []
|
|
681
|
+
P_max = len(pyramids_list)
|
|
682
|
+
except Exception:
|
|
683
|
+
P_max = None
|
|
684
|
+
|
|
685
|
+
if not P_max or P_max <= 0:
|
|
686
|
+
P_max = max(P, 1)
|
|
687
|
+
|
|
688
|
+
# Component scores
|
|
689
|
+
S_A = (A / N) if N > 0 else 0.0
|
|
690
|
+
S_P = (P / P_max) if P_max > 0 else 0.0
|
|
691
|
+
|
|
692
|
+
# Entropy
|
|
693
|
+
S_H = 0.0
|
|
694
|
+
if P <= 1 or not per_pyramid:
|
|
695
|
+
S_H = 0.0
|
|
696
|
+
else:
|
|
697
|
+
total_occ = sum(per_pyramid.values())
|
|
698
|
+
H = 0.0
|
|
699
|
+
for cnt in per_pyramid.values():
|
|
700
|
+
q = cnt / total_occ if total_occ > 0 else 0
|
|
701
|
+
if q > 0:
|
|
702
|
+
H -= q * math.log2(q)
|
|
703
|
+
max_H = math.log2(P) if P > 0 else 1
|
|
704
|
+
S_H = (H / max_H) if max_H > 0 else 0.0
|
|
705
|
+
|
|
706
|
+
diversity_score = S_A * S_P * S_H
|
|
707
|
+
|
|
708
|
+
return {
|
|
709
|
+
'diversity_score': diversity_score,
|
|
710
|
+
'N': N,
|
|
711
|
+
'A': A,
|
|
712
|
+
'P': P,
|
|
713
|
+
'P_max': P_max,
|
|
714
|
+
'S_A': S_A,
|
|
715
|
+
'S_P': S_P,
|
|
716
|
+
'S_H': S_H,
|
|
717
|
+
'per_pyramid_counts': per_pyramid
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
async def get_operators(self) -> Dict[str, Any]:
|
|
721
|
+
"""Get available operators for alpha creation."""
|
|
722
|
+
await self.ensure_authenticated()
|
|
723
|
+
|
|
724
|
+
try:
|
|
725
|
+
response = self.session.get(f"{self.base_url}/operators")
|
|
726
|
+
response.raise_for_status()
|
|
727
|
+
operators_data = response.json()
|
|
728
|
+
|
|
729
|
+
# Ensure we return a dictionary format even if API returns a list
|
|
730
|
+
if isinstance(operators_data, list):
|
|
731
|
+
return {"operators": operators_data, "count": len(operators_data)}
|
|
732
|
+
else:
|
|
733
|
+
return operators_data
|
|
734
|
+
except Exception as e:
|
|
735
|
+
self.log(f"Failed to get operators: {str(e)}", "ERROR")
|
|
736
|
+
raise
|
|
737
|
+
|
|
738
|
+
async def run_selection(
|
|
739
|
+
self,
|
|
740
|
+
selection: str,
|
|
741
|
+
instrument_type: str = "EQUITY",
|
|
742
|
+
region: str = "USA",
|
|
743
|
+
delay: int = 1,
|
|
744
|
+
selection_limit: int = 1000,
|
|
745
|
+
selection_handling: str = "POSITIVE"
|
|
746
|
+
) -> Dict[str, Any]:
|
|
747
|
+
"""Run a selection query to filter instruments."""
|
|
748
|
+
await self.ensure_authenticated()
|
|
749
|
+
|
|
750
|
+
try:
|
|
751
|
+
selection_data = {
|
|
752
|
+
"selection": selection,
|
|
753
|
+
"instrumentType": instrument_type,
|
|
754
|
+
"region": region,
|
|
755
|
+
"delay": delay,
|
|
756
|
+
"selectionLimit": selection_limit,
|
|
757
|
+
"selectionHandling": selection_handling
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
response = self.session.get(f"{self.base_url}/simulations/super-selection", params=selection_data)
|
|
761
|
+
response.raise_for_status()
|
|
762
|
+
return response.json()
|
|
763
|
+
except Exception as e:
|
|
764
|
+
self.log(f"Failed to run selection: {str(e)}", "ERROR")
|
|
765
|
+
raise
|
|
766
|
+
|
|
767
|
+
async def get_user_profile(self, user_id: str = "self") -> Dict[str, Any]:
|
|
768
|
+
"""Get user profile information."""
|
|
769
|
+
await self.ensure_authenticated()
|
|
770
|
+
|
|
771
|
+
try:
|
|
772
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}")
|
|
773
|
+
response.raise_for_status()
|
|
774
|
+
return response.json()
|
|
775
|
+
except Exception as e:
|
|
776
|
+
self.log(f"Failed to get user profile: {str(e)}", "ERROR")
|
|
777
|
+
raise
|
|
778
|
+
|
|
779
|
+
async def get_documentations(self) -> Dict[str, Any]:
|
|
780
|
+
"""Get available documentations and learning materials."""
|
|
781
|
+
await self.ensure_authenticated()
|
|
782
|
+
|
|
783
|
+
try:
|
|
784
|
+
response = self.session.get(f"{self.base_url}/tutorials")
|
|
785
|
+
response.raise_for_status()
|
|
786
|
+
return response.json()
|
|
787
|
+
except Exception as e:
|
|
788
|
+
self.log(f"Failed to get documentations: {str(e)}", "ERROR")
|
|
789
|
+
raise
|
|
790
|
+
|
|
791
|
+
async def get_messages(self, limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
792
|
+
"""Get messages for the current user with optional pagination.
|
|
793
|
+
|
|
794
|
+
Image / large binary payload mitigation:
|
|
795
|
+
Some messages embed base64 encoded images (e.g. <img src="data:image/png;base64,..."/>).
|
|
796
|
+
Returning full base64 can explode token usage for an LLM client. We post-process each
|
|
797
|
+
message description and (by default) extract embedded base64 images to disk and replace
|
|
798
|
+
them with lightweight placeholders while preserving context.
|
|
799
|
+
|
|
800
|
+
Strategies (environment driven in future – currently parameterless public API):
|
|
801
|
+
- placeholder (default): save images to message_images/ and replace with marker text.
|
|
802
|
+
- ignore: strip image tags entirely, leaving a note.
|
|
803
|
+
- keep: leave description unchanged (unsafe for LLM token limits).
|
|
804
|
+
|
|
805
|
+
A message dict gains an 'extracted_images' list when images are processed.
|
|
806
|
+
"""
|
|
807
|
+
await self.ensure_authenticated()
|
|
808
|
+
|
|
809
|
+
import re, base64, pathlib
|
|
810
|
+
|
|
811
|
+
image_handling = os.environ.get("BRAIN_MESSAGE_IMAGE_MODE", "placeholder").lower()
|
|
812
|
+
save_dir = pathlib.Path("message_images")
|
|
813
|
+
|
|
814
|
+
from typing import Tuple
|
|
815
|
+
def process_description(desc: str, message_id: str) -> Tuple[str, List[str]]:
|
|
816
|
+
try:
|
|
817
|
+
if not desc or image_handling == "keep":
|
|
818
|
+
return desc, []
|
|
819
|
+
attachments: List[str] = []
|
|
820
|
+
# Regex to capture full <img ...> tag with data URI
|
|
821
|
+
img_tag_pattern = re.compile(r"<img[^>]+src=\"(data:image/[^\"]+)\"[^>]*>", re.IGNORECASE)
|
|
822
|
+
# Iterate over unique matches to avoid double work
|
|
823
|
+
matches = list(img_tag_pattern.finditer(desc))
|
|
824
|
+
if not matches:
|
|
825
|
+
# Additional heuristic: very long base64-looking token inside quotes followed by </img>
|
|
826
|
+
# (legacy format noted by user sample). Replace with placeholder.
|
|
827
|
+
heuristic_pattern = re.compile(r"([A-Za-z0-9+/]{500,}={0,2})\"\s*</img>")
|
|
828
|
+
if image_handling != "keep" and heuristic_pattern.search(desc):
|
|
829
|
+
placeholder = "[Embedded image removed - large base64 sequence truncated]"
|
|
830
|
+
return heuristic_pattern.sub(placeholder + "</img>", desc), []
|
|
831
|
+
return desc, []
|
|
832
|
+
|
|
833
|
+
# Ensure save directory exists only if we will store something
|
|
834
|
+
if image_handling == "placeholder" and not save_dir.exists():
|
|
835
|
+
try:
|
|
836
|
+
save_dir.mkdir(parents=True, exist_ok=True)
|
|
837
|
+
except Exception as e:
|
|
838
|
+
self.log(f"Could not create image save directory: {e}", "WARNING")
|
|
839
|
+
|
|
840
|
+
new_desc = desc
|
|
841
|
+
for idx, match in enumerate(matches, start=1):
|
|
842
|
+
data_uri = match.group(1) # data:image/...;base64,XXXX
|
|
843
|
+
if not data_uri.lower().startswith("data:image"):
|
|
844
|
+
continue
|
|
845
|
+
# Split header and base64 payload
|
|
846
|
+
if "," not in data_uri:
|
|
847
|
+
continue
|
|
848
|
+
header, b64_data = data_uri.split(",", 1)
|
|
849
|
+
mime_part = header.split(";")[0] # data:image/png
|
|
850
|
+
ext = "png"
|
|
851
|
+
if "/" in mime_part:
|
|
852
|
+
ext = mime_part.split("/")[1]
|
|
853
|
+
safe_ext = (ext or "img").split("?")[0]
|
|
854
|
+
placeholder_text = "[Embedded image]"
|
|
855
|
+
if image_handling == "ignore":
|
|
856
|
+
replacement = f"[Image removed: {safe_ext}]"
|
|
857
|
+
elif image_handling == "placeholder":
|
|
858
|
+
# Try decode & save
|
|
859
|
+
file_name = f"{message_id}_{idx}.{safe_ext}"
|
|
860
|
+
file_path = save_dir / file_name
|
|
861
|
+
try:
|
|
862
|
+
# Guard extremely large strings (>5MB ~ 6.7M base64 chars) to avoid memory blow
|
|
863
|
+
if len(b64_data) > 7_000_000:
|
|
864
|
+
raise ValueError("Image too large to decode safely")
|
|
865
|
+
with open(file_path, "wb") as f:
|
|
866
|
+
f.write(base64.b64decode(b64_data))
|
|
867
|
+
attachments.append(str(file_path))
|
|
868
|
+
replacement = f"[Image extracted -> {file_path}]"
|
|
869
|
+
except Exception as e:
|
|
870
|
+
self.log(f"Failed to decode embedded image in message {message_id}: {e}", "WARNING")
|
|
871
|
+
replacement = "[Image extraction failed - content omitted]"
|
|
872
|
+
else: # keep
|
|
873
|
+
replacement = placeholder_text # shouldn't be used since early return, but safe
|
|
874
|
+
# Replace only the matched tag (not global) – use re.sub with count=1 on substring slice
|
|
875
|
+
# Safer to operate on new_desc using the exact matched string
|
|
876
|
+
original_tag = match.group(0)
|
|
877
|
+
new_desc = new_desc.replace(original_tag, replacement, 1)
|
|
878
|
+
return new_desc, attachments
|
|
879
|
+
except UnicodeEncodeError as ue:
|
|
880
|
+
self.log(f"Unicode encoding error in process_description: {ue}", "WARNING")
|
|
881
|
+
return desc, []
|
|
882
|
+
except Exception as e:
|
|
883
|
+
self.log(f"Error in process_description: {e}", "WARNING")
|
|
884
|
+
return desc, []
|
|
885
|
+
|
|
886
|
+
try:
|
|
887
|
+
params = {}
|
|
888
|
+
if limit is not None:
|
|
889
|
+
params['limit'] = limit
|
|
890
|
+
if offset > 0:
|
|
891
|
+
params['offset'] = offset
|
|
892
|
+
|
|
893
|
+
response = self.session.get(f"{self.base_url}/users/self/messages", params=params)
|
|
894
|
+
response.raise_for_status()
|
|
895
|
+
data = response.json()
|
|
896
|
+
|
|
897
|
+
# Post-process results for image handling
|
|
898
|
+
results = data.get('results', [])
|
|
899
|
+
for msg in results:
|
|
900
|
+
try:
|
|
901
|
+
desc = msg.get('description')
|
|
902
|
+
processed_desc, attachments = process_description(desc, msg.get('id', 'msg'))
|
|
903
|
+
if attachments or desc != processed_desc:
|
|
904
|
+
msg['description'] = processed_desc
|
|
905
|
+
if attachments:
|
|
906
|
+
msg['extracted_images'] = attachments
|
|
907
|
+
else:
|
|
908
|
+
# If changed but no attachments (ignore mode) mark sanitized
|
|
909
|
+
msg['sanitized'] = True
|
|
910
|
+
except UnicodeEncodeError as ue:
|
|
911
|
+
self.log(f"Unicode encoding error sanitizing message {msg.get('id')}: {ue}", "WARNING")
|
|
912
|
+
# Keep original description if encoding fails
|
|
913
|
+
continue
|
|
914
|
+
except Exception as inner_e:
|
|
915
|
+
self.log(f"Failed to sanitize message {msg.get('id')}: {inner_e}", "WARNING")
|
|
916
|
+
data['results'] = results
|
|
917
|
+
data['image_handling'] = image_handling
|
|
918
|
+
return data
|
|
919
|
+
except UnicodeEncodeError as ue:
|
|
920
|
+
self.log(f"Failed to get messages due to encoding error: {str(ue)}", "ERROR")
|
|
921
|
+
raise
|
|
922
|
+
except Exception as e:
|
|
923
|
+
self.log(f"Failed to get messages: {str(e)}", "ERROR")
|
|
924
|
+
raise
|
|
925
|
+
|
|
926
|
+
async def get_glossary_terms(self, email: str, password: str) -> List[Dict[str, str]]:
|
|
927
|
+
"""Get glossary terms from forum."""
|
|
928
|
+
try:
|
|
929
|
+
return await forum_client.get_glossary_terms(email, password)
|
|
930
|
+
except Exception as e:
|
|
931
|
+
self.log(f"Failed to get glossary terms: {str(e)}", "ERROR")
|
|
932
|
+
raise
|
|
933
|
+
|
|
934
|
+
async def search_forum_posts(self, email: str, password: str, search_query: str,
|
|
935
|
+
max_results: int = 50) -> Dict[str, Any]:
|
|
936
|
+
"""Search forum posts."""
|
|
937
|
+
try:
|
|
938
|
+
return await forum_client.search_forum_posts(email, password, search_query, max_results)
|
|
939
|
+
except Exception as e:
|
|
940
|
+
self.log(f"Failed to search forum posts: {str(e)}", "ERROR")
|
|
941
|
+
raise
|
|
942
|
+
|
|
943
|
+
async def read_forum_post(self, email: str, password: str, article_id: str,
|
|
944
|
+
include_comments: bool = True) -> Dict[str, Any]:
|
|
945
|
+
"""Get forum post."""
|
|
946
|
+
try:
|
|
947
|
+
return await forum_client.read_full_forum_post(email, password, article_id, include_comments)
|
|
948
|
+
except Exception as e:
|
|
949
|
+
self.log(f"Failed to read forum post: {str(e)}", "ERROR")
|
|
950
|
+
raise
|
|
951
|
+
|
|
952
|
+
async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
|
|
953
|
+
"""Get yearly statistics for an alpha with retry logic."""
|
|
954
|
+
await self.ensure_authenticated()
|
|
955
|
+
|
|
956
|
+
max_retries = 5
|
|
957
|
+
retry_delay = 2 # seconds
|
|
958
|
+
|
|
959
|
+
for attempt in range(max_retries):
|
|
960
|
+
try:
|
|
961
|
+
self.log(f"Attempting to get yearly stats for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
962
|
+
|
|
963
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
|
|
964
|
+
response.raise_for_status()
|
|
965
|
+
|
|
966
|
+
# Check if response has content
|
|
967
|
+
text = (response.text or "").strip()
|
|
968
|
+
if not text:
|
|
969
|
+
if attempt < max_retries - 1:
|
|
970
|
+
self.log(f"Empty yearly stats response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
971
|
+
await asyncio.sleep(retry_delay)
|
|
972
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
973
|
+
continue
|
|
974
|
+
else:
|
|
975
|
+
self.log(f"Empty yearly stats response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
976
|
+
return {}
|
|
977
|
+
|
|
978
|
+
try:
|
|
979
|
+
yearly_stats = response.json()
|
|
980
|
+
if yearly_stats:
|
|
981
|
+
self.log(f"Successfully retrieved yearly stats for alpha {alpha_id}", "SUCCESS")
|
|
982
|
+
return yearly_stats
|
|
983
|
+
else:
|
|
984
|
+
if attempt < max_retries - 1:
|
|
985
|
+
self.log(f"Empty yearly stats JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
986
|
+
await asyncio.sleep(retry_delay)
|
|
987
|
+
retry_delay *= 1.5
|
|
988
|
+
continue
|
|
989
|
+
else:
|
|
990
|
+
self.log(f"Empty yearly stats JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
991
|
+
return {}
|
|
992
|
+
|
|
993
|
+
except Exception as parse_err:
|
|
994
|
+
if attempt < max_retries - 1:
|
|
995
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
996
|
+
await asyncio.sleep(retry_delay)
|
|
997
|
+
retry_delay *= 1.5
|
|
998
|
+
continue
|
|
999
|
+
else:
|
|
1000
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1001
|
+
return {}
|
|
1002
|
+
|
|
1003
|
+
except Exception as e:
|
|
1004
|
+
if attempt < max_retries - 1:
|
|
1005
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1006
|
+
await asyncio.sleep(retry_delay)
|
|
1007
|
+
retry_delay *= 1.5
|
|
1008
|
+
continue
|
|
1009
|
+
else:
|
|
1010
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1011
|
+
raise
|
|
1012
|
+
|
|
1013
|
+
# This should never be reached, but just in case
|
|
1014
|
+
return {}
|
|
1015
|
+
|
|
1016
|
+
async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
1017
|
+
"""Get production correlation data for an alpha with retry logic."""
|
|
1018
|
+
await self.ensure_authenticated()
|
|
1019
|
+
|
|
1020
|
+
max_retries = 5
|
|
1021
|
+
retry_delay = 20 # seconds
|
|
1022
|
+
|
|
1023
|
+
for attempt in range(max_retries):
|
|
1024
|
+
try:
|
|
1025
|
+
self.log(f"Attempting to get production correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
1026
|
+
|
|
1027
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/prod")
|
|
1028
|
+
response.raise_for_status()
|
|
1029
|
+
|
|
1030
|
+
# Check if response has content
|
|
1031
|
+
text = (response.text or "").strip()
|
|
1032
|
+
if not text:
|
|
1033
|
+
if attempt < max_retries - 1:
|
|
1034
|
+
self.log(f"Empty production correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1035
|
+
await asyncio.sleep(retry_delay)
|
|
1036
|
+
continue
|
|
1037
|
+
else:
|
|
1038
|
+
self.log(f"Empty production correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1039
|
+
return {}
|
|
1040
|
+
|
|
1041
|
+
try:
|
|
1042
|
+
correlation_data = response.json()
|
|
1043
|
+
if correlation_data:
|
|
1044
|
+
self.log(f"Successfully retrieved production correlation for alpha {alpha_id}", "SUCCESS")
|
|
1045
|
+
return correlation_data
|
|
1046
|
+
else:
|
|
1047
|
+
if attempt < max_retries - 1:
|
|
1048
|
+
self.log(f"Empty production correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1049
|
+
await asyncio.sleep(retry_delay)
|
|
1050
|
+
continue
|
|
1051
|
+
else:
|
|
1052
|
+
self.log(f"Empty production correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1053
|
+
return {}
|
|
1054
|
+
|
|
1055
|
+
except Exception as parse_err:
|
|
1056
|
+
if attempt < max_retries - 1:
|
|
1057
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
1058
|
+
await asyncio.sleep(retry_delay)
|
|
1059
|
+
continue
|
|
1060
|
+
else:
|
|
1061
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1062
|
+
return {}
|
|
1063
|
+
|
|
1064
|
+
except Exception as e:
|
|
1065
|
+
if attempt < max_retries - 1:
|
|
1066
|
+
self.log(f"Failed to get production correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1067
|
+
await asyncio.sleep(retry_delay)
|
|
1068
|
+
continue
|
|
1069
|
+
else:
|
|
1070
|
+
self.log(f"Failed to get production correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1071
|
+
raise
|
|
1072
|
+
|
|
1073
|
+
# This should never be reached, but just in case
|
|
1074
|
+
return {}
|
|
1075
|
+
|
|
1076
|
+
async def get_self_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
1077
|
+
"""Get self-correlation data for an alpha with retry logic."""
|
|
1078
|
+
await self.ensure_authenticated()
|
|
1079
|
+
|
|
1080
|
+
max_retries = 5
|
|
1081
|
+
retry_delay = 20 # seconds
|
|
1082
|
+
|
|
1083
|
+
for attempt in range(max_retries):
|
|
1084
|
+
try:
|
|
1085
|
+
self.log(f"Attempting to get self correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
1086
|
+
|
|
1087
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/self")
|
|
1088
|
+
response.raise_for_status()
|
|
1089
|
+
|
|
1090
|
+
# Check if response has content
|
|
1091
|
+
text = (response.text or "").strip()
|
|
1092
|
+
if not text:
|
|
1093
|
+
if attempt < max_retries - 1:
|
|
1094
|
+
self.log(f"Empty self correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1095
|
+
await asyncio.sleep(retry_delay)
|
|
1096
|
+
continue
|
|
1097
|
+
else:
|
|
1098
|
+
self.log(f"Empty self correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1099
|
+
return {}
|
|
1100
|
+
|
|
1101
|
+
try:
|
|
1102
|
+
correlation_data = response.json()
|
|
1103
|
+
if correlation_data:
|
|
1104
|
+
self.log(f"Successfully retrieved self correlation for alpha {alpha_id}", "SUCCESS")
|
|
1105
|
+
return correlation_data
|
|
1106
|
+
else:
|
|
1107
|
+
if attempt < max_retries - 1:
|
|
1108
|
+
self.log(f"Empty self correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1109
|
+
await asyncio.sleep(retry_delay)
|
|
1110
|
+
continue
|
|
1111
|
+
else:
|
|
1112
|
+
self.log(f"Empty self correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1113
|
+
return {}
|
|
1114
|
+
|
|
1115
|
+
except Exception as parse_err:
|
|
1116
|
+
if attempt < max_retries - 1:
|
|
1117
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
1118
|
+
await asyncio.sleep(retry_delay)
|
|
1119
|
+
continue
|
|
1120
|
+
else:
|
|
1121
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1122
|
+
return {}
|
|
1123
|
+
|
|
1124
|
+
except Exception as e:
|
|
1125
|
+
if attempt < max_retries - 1:
|
|
1126
|
+
self.log(f"Failed to get self correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1127
|
+
await asyncio.sleep(retry_delay)
|
|
1128
|
+
continue
|
|
1129
|
+
else:
|
|
1130
|
+
self.log(f"Failed to get self correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1131
|
+
raise
|
|
1132
|
+
|
|
1133
|
+
# This should never be reached, but just in case
|
|
1134
|
+
return {}
|
|
1135
|
+
|
|
1136
|
+
async def check_correlation(self, alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
1137
|
+
"""Check alpha correlation against production alphas, self alphas, or both."""
|
|
1138
|
+
await self.ensure_authenticated()
|
|
1139
|
+
|
|
1140
|
+
try:
|
|
1141
|
+
results = {
|
|
1142
|
+
'alpha_id': alpha_id,
|
|
1143
|
+
'threshold': threshold,
|
|
1144
|
+
'correlation_type': correlation_type,
|
|
1145
|
+
'checks': {}
|
|
1146
|
+
}
|
|
1147
|
+
|
|
1148
|
+
# Determine which correlations to check
|
|
1149
|
+
check_types = []
|
|
1150
|
+
if correlation_type == "both":
|
|
1151
|
+
check_types = ["production", "self"]
|
|
1152
|
+
else:
|
|
1153
|
+
check_types = [correlation_type]
|
|
1154
|
+
|
|
1155
|
+
all_passed = True
|
|
1156
|
+
|
|
1157
|
+
for check_type in check_types:
|
|
1158
|
+
if check_type == "production":
|
|
1159
|
+
correlation_data = await self.get_production_correlation(alpha_id)
|
|
1160
|
+
elif check_type == "self":
|
|
1161
|
+
correlation_data = await self.get_self_correlation(alpha_id)
|
|
1162
|
+
else:
|
|
1163
|
+
continue
|
|
1164
|
+
|
|
1165
|
+
# Analyze correlation data (robust to schema/records format)
|
|
1166
|
+
if isinstance(correlation_data, dict):
|
|
1167
|
+
# Prefer strict access to schema.max or top-level max; otherwise error
|
|
1168
|
+
schema = correlation_data.get('schema') or {}
|
|
1169
|
+
if isinstance(schema, dict) and 'max' in schema:
|
|
1170
|
+
max_correlation = float(schema['max'])
|
|
1171
|
+
elif 'max' in correlation_data:
|
|
1172
|
+
# Some endpoints place max at top-level
|
|
1173
|
+
max_correlation = float(correlation_data['max'])
|
|
1174
|
+
else:
|
|
1175
|
+
# Attempt to derive from records; if none found, raise error instead of defaulting
|
|
1176
|
+
records = correlation_data.get('records') or []
|
|
1177
|
+
if isinstance(records, list) and records:
|
|
1178
|
+
candidate_max = None
|
|
1179
|
+
for row in records:
|
|
1180
|
+
if isinstance(row, (list, tuple)):
|
|
1181
|
+
for v in row:
|
|
1182
|
+
try:
|
|
1183
|
+
vf = float(v)
|
|
1184
|
+
if -1.0 <= vf <= 1.0:
|
|
1185
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1186
|
+
except Exception:
|
|
1187
|
+
continue
|
|
1188
|
+
elif isinstance(row, dict):
|
|
1189
|
+
for key in ('correlation', 'prodCorrelation', 'selfCorrelation', 'max'):
|
|
1190
|
+
try:
|
|
1191
|
+
vf = float(row.get(key))
|
|
1192
|
+
if -1.0 <= vf <= 1.0:
|
|
1193
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1194
|
+
except Exception:
|
|
1195
|
+
continue
|
|
1196
|
+
if candidate_max is None:
|
|
1197
|
+
raise ValueError("Unable to derive max correlation from records")
|
|
1198
|
+
max_correlation = float(candidate_max)
|
|
1199
|
+
else:
|
|
1200
|
+
raise KeyError("Correlation response missing 'schema.max' or top-level 'max' and no 'records' to derive from")
|
|
1201
|
+
else:
|
|
1202
|
+
raise TypeError("Correlation data is not a dictionary")
|
|
1203
|
+
|
|
1204
|
+
passes_check = max_correlation < threshold
|
|
1205
|
+
|
|
1206
|
+
results['checks'][check_type] = {
|
|
1207
|
+
'max_correlation': max_correlation,
|
|
1208
|
+
'passes_check': passes_check,
|
|
1209
|
+
'correlation_data': correlation_data
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
if not passes_check:
|
|
1213
|
+
all_passed = False
|
|
1214
|
+
|
|
1215
|
+
results['all_passed'] = all_passed
|
|
1216
|
+
|
|
1217
|
+
return results
|
|
1218
|
+
|
|
1219
|
+
except Exception as e:
|
|
1220
|
+
self.log(f"Failed to check correlation: {str(e)}", "ERROR")
|
|
1221
|
+
raise
|
|
1222
|
+
|
|
1223
|
+
async def get_submission_check(self, alpha_id: str) -> Dict[str, Any]:
|
|
1224
|
+
"""Comprehensive pre-submission check."""
|
|
1225
|
+
await self.ensure_authenticated()
|
|
1226
|
+
|
|
1227
|
+
try:
|
|
1228
|
+
# Get correlation checks using the unified function
|
|
1229
|
+
correlation_checks = await self.check_correlation(alpha_id, correlation_type="both")
|
|
1230
|
+
|
|
1231
|
+
# Get alpha details for additional validation
|
|
1232
|
+
alpha_details = await self.get_alpha_details(alpha_id)
|
|
1233
|
+
|
|
1234
|
+
# Compile comprehensive check results
|
|
1235
|
+
checks = {
|
|
1236
|
+
'correlation_checks': correlation_checks,
|
|
1237
|
+
'alpha_details': alpha_details,
|
|
1238
|
+
'all_passed': correlation_checks['all_passed']
|
|
1239
|
+
}
|
|
1240
|
+
|
|
1241
|
+
return checks
|
|
1242
|
+
except Exception as e:
|
|
1243
|
+
self.log(f"Failed to get submission check: {str(e)}", "ERROR")
|
|
1244
|
+
raise
|
|
1245
|
+
|
|
1246
|
+
async def set_alpha_properties(self, alpha_id: str, name: Optional[str] = None,
|
|
1247
|
+
color: Optional[str] = None, tags: Optional[List[str]] = None,
|
|
1248
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
1249
|
+
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
1250
|
+
await self.ensure_authenticated()
|
|
1251
|
+
|
|
1252
|
+
try:
|
|
1253
|
+
data = {}
|
|
1254
|
+
if name:
|
|
1255
|
+
data['name'] = name
|
|
1256
|
+
if color:
|
|
1257
|
+
data['color'] = color
|
|
1258
|
+
if tags:
|
|
1259
|
+
data['tags'] = tags
|
|
1260
|
+
if selection_desc:
|
|
1261
|
+
data['selectionDesc'] = selection_desc
|
|
1262
|
+
if combo_desc:
|
|
1263
|
+
data['comboDesc'] = combo_desc
|
|
1264
|
+
|
|
1265
|
+
response = self.session.patch(f"{self.base_url}/alphas/{alpha_id}", json=data)
|
|
1266
|
+
response.raise_for_status()
|
|
1267
|
+
return response.json()
|
|
1268
|
+
except Exception as e:
|
|
1269
|
+
self.log(f"Failed to set alpha properties: {str(e)}", "ERROR")
|
|
1270
|
+
raise
|
|
1271
|
+
|
|
1272
|
+
async def get_record_sets(self, alpha_id: str) -> Dict[str, Any]:
|
|
1273
|
+
"""List available record sets for an alpha."""
|
|
1274
|
+
await self.ensure_authenticated()
|
|
1275
|
+
|
|
1276
|
+
try:
|
|
1277
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets")
|
|
1278
|
+
response.raise_for_status()
|
|
1279
|
+
return response.json()
|
|
1280
|
+
except Exception as e:
|
|
1281
|
+
self.log(f"Failed to get record sets: {str(e)}", "ERROR")
|
|
1282
|
+
raise
|
|
1283
|
+
|
|
1284
|
+
async def get_record_set_data(self, alpha_id: str, record_set_name: str) -> Dict[str, Any]:
|
|
1285
|
+
"""Get data from a specific record set."""
|
|
1286
|
+
await self.ensure_authenticated()
|
|
1287
|
+
|
|
1288
|
+
try:
|
|
1289
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/{record_set_name}")
|
|
1290
|
+
response.raise_for_status()
|
|
1291
|
+
return response.json()
|
|
1292
|
+
except Exception as e:
|
|
1293
|
+
self.log(f"Failed to get record set data: {str(e)}", "ERROR")
|
|
1294
|
+
raise
|
|
1295
|
+
|
|
1296
|
+
async def get_user_activities(self, user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
|
|
1297
|
+
"""Get user activity diversity data."""
|
|
1298
|
+
await self.ensure_authenticated()
|
|
1299
|
+
|
|
1300
|
+
try:
|
|
1301
|
+
params = {}
|
|
1302
|
+
if grouping:
|
|
1303
|
+
params['grouping'] = grouping
|
|
1304
|
+
|
|
1305
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}/activities", params=params)
|
|
1306
|
+
response.raise_for_status()
|
|
1307
|
+
return response.json()
|
|
1308
|
+
except Exception as e:
|
|
1309
|
+
self.log(f"Failed to get user activities: {str(e)}", "ERROR")
|
|
1310
|
+
raise
|
|
1311
|
+
|
|
1312
|
+
async def get_pyramid_multipliers(self) -> Dict[str, Any]:
|
|
1313
|
+
"""Get current pyramid multipliers showing BRAIN's encouragement levels."""
|
|
1314
|
+
await self.ensure_authenticated()
|
|
1315
|
+
|
|
1316
|
+
try:
|
|
1317
|
+
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-multipliers")
|
|
1318
|
+
response.raise_for_status()
|
|
1319
|
+
return response.json()
|
|
1320
|
+
except Exception as e:
|
|
1321
|
+
self.log(f"Failed to get pyramid multipliers: {str(e)}", "ERROR")
|
|
1322
|
+
raise
|
|
1323
|
+
|
|
1324
|
+
async def get_pyramid_alphas(self, start_date: Optional[str] = None,
|
|
1325
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
1326
|
+
"""Get user's current alpha distribution across pyramid categories."""
|
|
1327
|
+
await self.ensure_authenticated()
|
|
1328
|
+
|
|
1329
|
+
try:
|
|
1330
|
+
params = {}
|
|
1331
|
+
if start_date:
|
|
1332
|
+
params['startDate'] = start_date
|
|
1333
|
+
if end_date:
|
|
1334
|
+
params['endDate'] = end_date
|
|
1335
|
+
|
|
1336
|
+
# Try the user-specific activities endpoint first (like pyramid-multipliers)
|
|
1337
|
+
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-alphas", params=params)
|
|
1338
|
+
|
|
1339
|
+
# If that fails, try alternative endpoints
|
|
1340
|
+
if response.status_code == 404:
|
|
1341
|
+
# Try alternative endpoint structure
|
|
1342
|
+
response = self.session.get(f"{self.base_url}/users/self/pyramid/alphas", params=params)
|
|
1343
|
+
|
|
1344
|
+
if response.status_code == 404:
|
|
1345
|
+
# Try yet another alternative
|
|
1346
|
+
response = self.session.get(f"{self.base_url}/activities/pyramid-alphas", params=params)
|
|
1347
|
+
|
|
1348
|
+
if response.status_code == 404:
|
|
1349
|
+
# Return an informative error with what we tried
|
|
1350
|
+
return {
|
|
1351
|
+
"error": "Pyramid alphas endpoint not found",
|
|
1352
|
+
"tried_endpoints": [
|
|
1353
|
+
"/users/self/activities/pyramid-alphas",
|
|
1354
|
+
"/users/self/pyramid/alphas",
|
|
1355
|
+
"/activities/pyramid-alphas",
|
|
1356
|
+
"/pyramid/alphas"
|
|
1357
|
+
],
|
|
1358
|
+
"suggestion": "This endpoint may not be available in the current API version"
|
|
1359
|
+
}
|
|
1360
|
+
|
|
1361
|
+
response.raise_for_status()
|
|
1362
|
+
return response.json()
|
|
1363
|
+
except Exception as e:
|
|
1364
|
+
self.log(f"Failed to get pyramid alphas: {str(e)}", "ERROR")
|
|
1365
|
+
raise
|
|
1366
|
+
|
|
1367
|
+
async def get_user_competitions(self, user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1368
|
+
"""Get list of competitions that the user is participating in."""
|
|
1369
|
+
await self.ensure_authenticated()
|
|
1370
|
+
|
|
1371
|
+
try:
|
|
1372
|
+
if not user_id:
|
|
1373
|
+
# Get current user ID if not specified
|
|
1374
|
+
user_response = self.session.get(f"{self.base_url}/users/self")
|
|
1375
|
+
if user_response.status_code == 200:
|
|
1376
|
+
user_data = user_response.json()
|
|
1377
|
+
user_id = user_data.get('id')
|
|
1378
|
+
else:
|
|
1379
|
+
user_id = 'self'
|
|
1380
|
+
|
|
1381
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}/competitions")
|
|
1382
|
+
response.raise_for_status()
|
|
1383
|
+
return response.json()
|
|
1384
|
+
except Exception as e:
|
|
1385
|
+
self.log(f"Failed to get user competitions: {str(e)}", "ERROR")
|
|
1386
|
+
raise
|
|
1387
|
+
|
|
1388
|
+
async def get_competition_details(self, competition_id: str) -> Dict[str, Any]:
|
|
1389
|
+
"""Get detailed information about a specific competition."""
|
|
1390
|
+
await self.ensure_authenticated()
|
|
1391
|
+
|
|
1392
|
+
try:
|
|
1393
|
+
response = self.session.get(f"{self.base_url}/competitions/{competition_id}")
|
|
1394
|
+
response.raise_for_status()
|
|
1395
|
+
return response.json()
|
|
1396
|
+
except Exception as e:
|
|
1397
|
+
self.log(f"Failed to get competition details: {str(e)}", "ERROR")
|
|
1398
|
+
raise
|
|
1399
|
+
|
|
1400
|
+
async def get_competition_agreement(self, competition_id: str) -> Dict[str, Any]:
|
|
1401
|
+
"""Get the rules, terms, and agreement for a specific competition."""
|
|
1402
|
+
await self.ensure_authenticated()
|
|
1403
|
+
|
|
1404
|
+
try:
|
|
1405
|
+
response = self.session.get(f"{self.base_url}/competitions/{competition_id}/agreement")
|
|
1406
|
+
response.raise_for_status()
|
|
1407
|
+
return response.json()
|
|
1408
|
+
except Exception as e:
|
|
1409
|
+
self.log(f"Failed to get competition agreement: {str(e)}", "ERROR")
|
|
1410
|
+
raise
|
|
1411
|
+
|
|
1412
|
+
async def get_platform_setting_options(self) -> Dict[str, Any]:
|
|
1413
|
+
"""Get available instrument types, regions, delays, and universes."""
|
|
1414
|
+
await self.ensure_authenticated()
|
|
1415
|
+
|
|
1416
|
+
try:
|
|
1417
|
+
# Use OPTIONS method on simulations endpoint to get configuration options
|
|
1418
|
+
response = self.session.options(f"{self.base_url}/simulations")
|
|
1419
|
+
response.raise_for_status()
|
|
1420
|
+
|
|
1421
|
+
# Parse the settings structure from the response
|
|
1422
|
+
settings_data = response.json()
|
|
1423
|
+
settings_options = settings_data['actions']['POST']['settings']['children']
|
|
1424
|
+
|
|
1425
|
+
# Extract instrument configuration options
|
|
1426
|
+
instrument_type_data = {}
|
|
1427
|
+
region_data = {}
|
|
1428
|
+
universe_data = {}
|
|
1429
|
+
delay_data = {}
|
|
1430
|
+
neutralization_data = {}
|
|
1431
|
+
|
|
1432
|
+
# Parse each setting type
|
|
1433
|
+
for key, setting in settings_options.items():
|
|
1434
|
+
if setting['type'] == 'choice':
|
|
1435
|
+
if setting['label'] == 'Instrument type':
|
|
1436
|
+
instrument_type_data = setting['choices']
|
|
1437
|
+
elif setting['label'] == 'Region':
|
|
1438
|
+
region_data = setting['choices']['instrumentType']
|
|
1439
|
+
elif setting['label'] == 'Universe':
|
|
1440
|
+
universe_data = setting['choices']['instrumentType']
|
|
1441
|
+
elif setting['label'] == 'Delay':
|
|
1442
|
+
delay_data = setting['choices']['instrumentType']
|
|
1443
|
+
elif setting['label'] == 'Neutralization':
|
|
1444
|
+
neutralization_data = setting['choices']['instrumentType']
|
|
1445
|
+
|
|
1446
|
+
# Build comprehensive instrument options
|
|
1447
|
+
data_list = []
|
|
1448
|
+
|
|
1449
|
+
for instrument_type in instrument_type_data:
|
|
1450
|
+
for region in region_data[instrument_type['value']]:
|
|
1451
|
+
for delay in delay_data[instrument_type['value']]['region'][region['value']]:
|
|
1452
|
+
row = {
|
|
1453
|
+
'InstrumentType': instrument_type['value'],
|
|
1454
|
+
'Region': region['value'],
|
|
1455
|
+
'Delay': delay['value']
|
|
1456
|
+
}
|
|
1457
|
+
row['Universe'] = [
|
|
1458
|
+
item['value'] for item in universe_data[instrument_type['value']]['region'][region['value']]
|
|
1459
|
+
]
|
|
1460
|
+
row['Neutralization'] = [
|
|
1461
|
+
item['value'] for item in neutralization_data[instrument_type['value']]['region'][region['value']]
|
|
1462
|
+
]
|
|
1463
|
+
data_list.append(row)
|
|
1464
|
+
|
|
1465
|
+
# Return structured data
|
|
1466
|
+
return {
|
|
1467
|
+
'instrument_options': data_list,
|
|
1468
|
+
'total_combinations': len(data_list),
|
|
1469
|
+
'instrument_types': [item['value'] for item in instrument_type_data],
|
|
1470
|
+
'regions_by_type': {
|
|
1471
|
+
item['value']: [r['value'] for r in region_data[item['value']]]
|
|
1472
|
+
for item in instrument_type_data
|
|
1473
|
+
}
|
|
1474
|
+
}
|
|
1475
|
+
|
|
1476
|
+
except Exception as e:
|
|
1477
|
+
self.log(f"Failed to get instrument options: {str(e)}", "ERROR")
|
|
1478
|
+
raise
|
|
1479
|
+
|
|
1480
|
+
async def performance_comparison(self, alpha_id: str, team_id: Optional[str] = None,
|
|
1481
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
1482
|
+
"""Get performance comparison data for an alpha."""
|
|
1483
|
+
await self.ensure_authenticated()
|
|
1484
|
+
|
|
1485
|
+
try:
|
|
1486
|
+
params = {"teamId": team_id, "competition": competition}
|
|
1487
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1488
|
+
|
|
1489
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/performance-comparison", params=params)
|
|
1490
|
+
response.raise_for_status()
|
|
1491
|
+
return response.json()
|
|
1492
|
+
except Exception as e:
|
|
1493
|
+
self.log(f"Failed to get performance comparison: {str(e)}", "ERROR")
|
|
1494
|
+
raise
|
|
1495
|
+
|
|
1496
|
+
# --- Helper function for data flattening ---
|
|
1497
|
+
|
|
1498
|
+
async def expand_nested_data(self, data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
1499
|
+
"""Flatten complex nested data structures into tabular format."""
|
|
1500
|
+
try:
|
|
1501
|
+
df = pd.json_normalize(data, sep='_')
|
|
1502
|
+
if preserve_original:
|
|
1503
|
+
original_df = pd.DataFrame(data)
|
|
1504
|
+
df = pd.concat([original_df, df], axis=1)
|
|
1505
|
+
df = df.loc[:,~df.columns.duplicated()]
|
|
1506
|
+
return df.to_dict(orient='records')
|
|
1507
|
+
except Exception as e:
|
|
1508
|
+
self.log(f"Failed to expand nested data: {str(e)}", "ERROR")
|
|
1509
|
+
raise
|
|
1510
|
+
|
|
1511
|
+
# --- New documentation endpoint ---
|
|
1512
|
+
|
|
1513
|
+
async def get_documentation_page(self, page_id: str) -> Dict[str, Any]:
|
|
1514
|
+
"""Retrieve detailed content of a specific documentation page/article."""
|
|
1515
|
+
await self.ensure_authenticated()
|
|
1516
|
+
|
|
1517
|
+
try:
|
|
1518
|
+
response = self.session.get(f"{self.base_url}/tutorial-pages/{page_id}")
|
|
1519
|
+
response.raise_for_status()
|
|
1520
|
+
return response.json()
|
|
1521
|
+
except Exception as e:
|
|
1522
|
+
self.log(f"Failed to get documentation page: {str(e)}", "ERROR")
|
|
1523
|
+
raise
|
|
1524
|
+
|
|
1525
|
+
brain_client = BrainApiClient()
|
|
1526
|
+
|
|
1527
|
+
# --- Configuration Management ---
|
|
1528
|
+
|
|
1529
|
+
def _resolve_config_path(for_write: bool = False) -> str:
|
|
1530
|
+
"""
|
|
1531
|
+
Resolve the configuration file path.
|
|
1532
|
+
|
|
1533
|
+
Checks for a file specified by the MCP_CONFIG_FILE environment variable,
|
|
1534
|
+
then falls back to ~/.brain_mcp_config.json. If for_write is True,
|
|
1535
|
+
it ensures the directory exists.
|
|
1536
|
+
"""
|
|
1537
|
+
if 'MCP_CONFIG_FILE' in os.environ:
|
|
1538
|
+
return os.environ['MCP_CONFIG_FILE']
|
|
1539
|
+
|
|
1540
|
+
config_path = Path(__file__).parent / "user_config.json"
|
|
1541
|
+
|
|
1542
|
+
if for_write:
|
|
1543
|
+
try:
|
|
1544
|
+
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1545
|
+
except (IOError, OSError) as e:
|
|
1546
|
+
logger.warning(f"Could not create config directory {config_path.parent}: {e}")
|
|
1547
|
+
# Fallback to a temporary file if home is not writable
|
|
1548
|
+
import tempfile
|
|
1549
|
+
return tempfile.NamedTemporaryFile(delete=False).name
|
|
1550
|
+
|
|
1551
|
+
return str(config_path)
|
|
1552
|
+
|
|
1553
|
+
def load_config() -> Dict[str, Any]:
|
|
1554
|
+
"""Load configuration from file."""
|
|
1555
|
+
config_file = _resolve_config_path()
|
|
1556
|
+
if os.path.exists(config_file):
|
|
1557
|
+
try:
|
|
1558
|
+
with open(config_file, 'r', encoding='utf-8') as f:
|
|
1559
|
+
return json.load(f)
|
|
1560
|
+
except (IOError, json.JSONDecodeError) as e:
|
|
1561
|
+
logger.error(f"Error loading config file {config_file}: {e}")
|
|
1562
|
+
return {}
|
|
1563
|
+
|
|
1564
|
+
def save_config(config: Dict[str, Any]):
|
|
1565
|
+
"""Save configuration to file using the resolved config path.
|
|
1566
|
+
|
|
1567
|
+
This function now uses the write-enabled path resolver to handle
|
|
1568
|
+
cases where the default home directory is not writable.
|
|
1569
|
+
"""
|
|
1570
|
+
config_file = _resolve_config_path(for_write=True)
|
|
1571
|
+
try:
|
|
1572
|
+
with open(config_file, 'w', encoding='utf-8') as f:
|
|
1573
|
+
json.dump(config, f, indent=2)
|
|
1574
|
+
except IOError as e:
|
|
1575
|
+
logger.error(f"Error saving config file to {config_file}: {e}")
|
|
1576
|
+
|
|
1577
|
+
# --- MCP Tool Definitions ---
|
|
1578
|
+
|
|
1579
|
+
mcp = FastMCP(
|
|
1580
|
+
"brain-platform-mcp",
|
|
1581
|
+
"A server for interacting with the WorldQuant BRAIN platform",
|
|
1582
|
+
)
|
|
1583
|
+
|
|
1584
|
+
@mcp.tool()
|
|
1585
|
+
async def authenticate(email: Optional[str] = "", password: Optional[str] = "") -> Dict[str, Any]:
|
|
1586
|
+
"""
|
|
1587
|
+
🔐 Authenticate with WorldQuant BRAIN platform.
|
|
1588
|
+
|
|
1589
|
+
This is the first step in any BRAIN workflow. You must authenticate before using any other tools.
|
|
1590
|
+
|
|
1591
|
+
Args:
|
|
1592
|
+
email: Your BRAIN platform email address (optional if in config or .brain_credentials)
|
|
1593
|
+
password: Your BRAIN platform password (optional if in config or .brain_credentials)
|
|
1594
|
+
|
|
1595
|
+
Returns:
|
|
1596
|
+
Authentication result with user info and permissions
|
|
1597
|
+
"""
|
|
1598
|
+
try:
|
|
1599
|
+
# Load config to get credentials if not provided
|
|
1600
|
+
config = load_config()
|
|
1601
|
+
credentials = config.get("credentials", {})
|
|
1602
|
+
email = email or credentials.get("email")
|
|
1603
|
+
password = password or credentials.get("password")
|
|
1604
|
+
if not email or not password:
|
|
1605
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
1606
|
+
|
|
1607
|
+
auth_result = await brain_client.authenticate(email, password)
|
|
1608
|
+
|
|
1609
|
+
# Save successful credentials
|
|
1610
|
+
if auth_result.get('status') == 'authenticated':
|
|
1611
|
+
if 'credentials' not in config:
|
|
1612
|
+
config['credentials'] = {}
|
|
1613
|
+
config['credentials']['email'] = email
|
|
1614
|
+
config['credentials']['password'] = password
|
|
1615
|
+
save_config(config)
|
|
1616
|
+
|
|
1617
|
+
return auth_result
|
|
1618
|
+
except Exception as e:
|
|
1619
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1620
|
+
|
|
1621
|
+
@mcp.tool()
|
|
1622
|
+
async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
1623
|
+
"""
|
|
1624
|
+
🔧 Manage configuration settings - get or update configuration.
|
|
1625
|
+
|
|
1626
|
+
Args:
|
|
1627
|
+
action: Action to perform ("get" to retrieve config, "set" to update config)
|
|
1628
|
+
settings: Configuration settings to update (required when action="set")
|
|
1629
|
+
|
|
1630
|
+
Returns:
|
|
1631
|
+
Current or updated configuration including authentication status
|
|
1632
|
+
"""
|
|
1633
|
+
if action == "get":
|
|
1634
|
+
config = load_config()
|
|
1635
|
+
auth_status = await brain_client.get_authentication_status()
|
|
1636
|
+
|
|
1637
|
+
return {
|
|
1638
|
+
"config": config,
|
|
1639
|
+
"auth_status": auth_status,
|
|
1640
|
+
"is_authenticated": await brain_client.is_authenticated()
|
|
1641
|
+
}
|
|
1642
|
+
|
|
1643
|
+
elif action == "set":
|
|
1644
|
+
if settings is None:
|
|
1645
|
+
return {"error": "Settings parameter is required when action='set'"}
|
|
1646
|
+
|
|
1647
|
+
config = load_config()
|
|
1648
|
+
config.update(settings)
|
|
1649
|
+
save_config(config)
|
|
1650
|
+
return config
|
|
1651
|
+
|
|
1652
|
+
else:
|
|
1653
|
+
return {"error": f"Invalid action '{action}'. Use 'get' or 'set'."}
|
|
1654
|
+
|
|
1655
|
+
# --- Simulation Tools ---
|
|
1656
|
+
|
|
1657
|
+
@mcp.tool()
|
|
1658
|
+
async def create_simulation(
|
|
1659
|
+
type: str = "REGULAR",
|
|
1660
|
+
instrument_type: str = "EQUITY",
|
|
1661
|
+
region: str = "USA",
|
|
1662
|
+
universe: str = "TOP3000",
|
|
1663
|
+
delay: int = 1,
|
|
1664
|
+
decay: float = 0.0,
|
|
1665
|
+
neutralization: str = "NONE",
|
|
1666
|
+
truncation: float = 0.0,
|
|
1667
|
+
test_period: str = "P0Y0M",
|
|
1668
|
+
unit_handling: str = "VERIFY",
|
|
1669
|
+
nan_handling: str = "OFF",
|
|
1670
|
+
language: str = "FASTEXPR",
|
|
1671
|
+
visualization: bool = True,
|
|
1672
|
+
regular: Optional[str] = None,
|
|
1673
|
+
combo: Optional[str] = None,
|
|
1674
|
+
selection: Optional[str] = None,
|
|
1675
|
+
pasteurization: str = "ON",
|
|
1676
|
+
max_trade: str = "OFF",
|
|
1677
|
+
selection_handling: str = "POSITIVE",
|
|
1678
|
+
selection_limit: int = 1000,
|
|
1679
|
+
component_activation: str = "IS",
|
|
1680
|
+
) -> Dict[str, Any]:
|
|
1681
|
+
"""
|
|
1682
|
+
🚀 Create a new simulation on BRAIN platform.
|
|
1683
|
+
|
|
1684
|
+
This tool creates and starts a simulation with your alpha code. Use this after you have your alpha formula ready.
|
|
1685
|
+
|
|
1686
|
+
Args:
|
|
1687
|
+
type: Simulation type ("REGULAR" or "SUPER")
|
|
1688
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1689
|
+
region: Market region (e.g., "USA")
|
|
1690
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1691
|
+
delay: Data delay (0 or 1)
|
|
1692
|
+
decay: Decay value for the simulation
|
|
1693
|
+
neutralization: Neutralization method
|
|
1694
|
+
truncation: Truncation value
|
|
1695
|
+
test_period: Test period (e.g., "P0Y0M" for 1 year 6 months)
|
|
1696
|
+
unit_handling: Unit handling method
|
|
1697
|
+
nan_handling: NaN handling method
|
|
1698
|
+
language: Expression language (e.g., "FASTEXPR")
|
|
1699
|
+
visualization: Enable visualization
|
|
1700
|
+
regular: Regular simulation code (for REGULAR type)
|
|
1701
|
+
combo: Combo code (for SUPER type)
|
|
1702
|
+
selection: Selection code (for SUPER type)
|
|
1703
|
+
|
|
1704
|
+
Returns:
|
|
1705
|
+
Simulation creation result with ID and location
|
|
1706
|
+
"""
|
|
1707
|
+
try:
|
|
1708
|
+
settings = SimulationSettings(
|
|
1709
|
+
instrumentType=instrument_type,
|
|
1710
|
+
region=region,
|
|
1711
|
+
universe=universe,
|
|
1712
|
+
delay=delay,
|
|
1713
|
+
decay=decay,
|
|
1714
|
+
neutralization=neutralization,
|
|
1715
|
+
truncation=truncation,
|
|
1716
|
+
testPeriod=test_period,
|
|
1717
|
+
unitHandling=unit_handling,
|
|
1718
|
+
nanHandling=nan_handling,
|
|
1719
|
+
language=language,
|
|
1720
|
+
visualization=visualization,
|
|
1721
|
+
pasteurization=pasteurization,
|
|
1722
|
+
maxTrade=max_trade,
|
|
1723
|
+
selectionHandling=selection_handling,
|
|
1724
|
+
selectionLimit=selection_limit,
|
|
1725
|
+
componentActivation=component_activation,
|
|
1726
|
+
)
|
|
1727
|
+
|
|
1728
|
+
sim_data = SimulationData(
|
|
1729
|
+
type=type,
|
|
1730
|
+
settings=settings,
|
|
1731
|
+
regular=regular,
|
|
1732
|
+
combo=combo,
|
|
1733
|
+
selection=selection
|
|
1734
|
+
)
|
|
1735
|
+
|
|
1736
|
+
return await brain_client.create_simulation(sim_data)
|
|
1737
|
+
except Exception as e:
|
|
1738
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1739
|
+
|
|
1740
|
+
# --- Alpha and Data Retrieval Tools ---
|
|
1741
|
+
|
|
1742
|
+
@mcp.tool()
|
|
1743
|
+
async def get_alpha_details(alpha_id: str) -> Dict[str, Any]:
|
|
1744
|
+
"""
|
|
1745
|
+
📋 Get detailed information about an alpha.
|
|
1746
|
+
|
|
1747
|
+
Args:
|
|
1748
|
+
alpha_id: The ID of the alpha to retrieve
|
|
1749
|
+
|
|
1750
|
+
Returns:
|
|
1751
|
+
Detailed alpha information
|
|
1752
|
+
"""
|
|
1753
|
+
try:
|
|
1754
|
+
return await brain_client.get_alpha_details(alpha_id)
|
|
1755
|
+
except Exception as e:
|
|
1756
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1757
|
+
|
|
1758
|
+
@mcp.tool()
|
|
1759
|
+
async def get_datasets(
|
|
1760
|
+
instrument_type: str = "EQUITY",
|
|
1761
|
+
region: str = "USA",
|
|
1762
|
+
delay: int = 1,
|
|
1763
|
+
universe: str = "TOP3000",
|
|
1764
|
+
theme: str = "false",
|
|
1765
|
+
search: Optional[str] = None,
|
|
1766
|
+
) -> Dict[str, Any]:
|
|
1767
|
+
"""
|
|
1768
|
+
📚 Get available datasets for research.
|
|
1769
|
+
|
|
1770
|
+
Use this to discover what data is available for your alpha research.
|
|
1771
|
+
|
|
1772
|
+
Args:
|
|
1773
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1774
|
+
region: Market region (e.g., "USA")
|
|
1775
|
+
delay: Data delay (0 or 1)
|
|
1776
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1777
|
+
theme: Theme filter
|
|
1778
|
+
|
|
1779
|
+
Returns:
|
|
1780
|
+
Available datasets
|
|
1781
|
+
"""
|
|
1782
|
+
try:
|
|
1783
|
+
return await brain_client.get_datasets(instrument_type, region, delay, universe, theme, search)
|
|
1784
|
+
except Exception as e:
|
|
1785
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1786
|
+
|
|
1787
|
+
@mcp.tool()
|
|
1788
|
+
async def get_datafields(
|
|
1789
|
+
instrument_type: str = "EQUITY",
|
|
1790
|
+
region: str = "USA",
|
|
1791
|
+
delay: int = 1,
|
|
1792
|
+
universe: str = "TOP3000",
|
|
1793
|
+
theme: str = "false",
|
|
1794
|
+
dataset_id: Optional[str] = None,
|
|
1795
|
+
data_type: str = "",
|
|
1796
|
+
search: Optional[str] = None,
|
|
1797
|
+
) -> Dict[str, Any]:
|
|
1798
|
+
"""
|
|
1799
|
+
🔍 Get available data fields for alpha construction.
|
|
1800
|
+
|
|
1801
|
+
Use this to find specific data fields you can use in your alpha formulas.
|
|
1802
|
+
|
|
1803
|
+
Args:
|
|
1804
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1805
|
+
region: Market region (e.g., "USA")
|
|
1806
|
+
delay: Data delay (0 or 1)
|
|
1807
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1808
|
+
theme: Theme filter
|
|
1809
|
+
dataset_id: Specific dataset ID to filter by
|
|
1810
|
+
data_type: Type of data (e.g., "MATRIX",'VECTOR','GROUP')
|
|
1811
|
+
search: Search term to filter fields
|
|
1812
|
+
|
|
1813
|
+
Returns:
|
|
1814
|
+
Available data fields
|
|
1815
|
+
"""
|
|
1816
|
+
try:
|
|
1817
|
+
return await brain_client.get_datafields(instrument_type, region, delay, universe, theme, dataset_id, data_type, search)
|
|
1818
|
+
except Exception as e:
|
|
1819
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1820
|
+
|
|
1821
|
+
@mcp.tool()
|
|
1822
|
+
async def get_alpha_pnl(alpha_id: str) -> Dict[str, Any]:
|
|
1823
|
+
"""
|
|
1824
|
+
📈 Get PnL (Profit and Loss) data for an alpha.
|
|
1825
|
+
|
|
1826
|
+
Args:
|
|
1827
|
+
alpha_id: The ID of the alpha
|
|
1828
|
+
|
|
1829
|
+
Returns:
|
|
1830
|
+
PnL data for the alpha
|
|
1831
|
+
"""
|
|
1832
|
+
try:
|
|
1833
|
+
return await brain_client.get_alpha_pnl(alpha_id)
|
|
1834
|
+
except Exception as e:
|
|
1835
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1836
|
+
|
|
1837
|
+
@mcp.tool()
|
|
1838
|
+
async def get_user_alphas(
|
|
1839
|
+
stage: str = "IS",
|
|
1840
|
+
limit: int = 30,
|
|
1841
|
+
offset: int = 0,
|
|
1842
|
+
start_date: Optional[str] = None,
|
|
1843
|
+
end_date: Optional[str] = None,
|
|
1844
|
+
submission_start_date: Optional[str] = None,
|
|
1845
|
+
submission_end_date: Optional[str] = None,
|
|
1846
|
+
order: Optional[str] = None,
|
|
1847
|
+
hidden: Optional[bool] = None,
|
|
1848
|
+
) -> Dict[str, Any]:
|
|
1849
|
+
"""
|
|
1850
|
+
👤 Get user's alphas with advanced filtering, pagination, and sorting.
|
|
1851
|
+
|
|
1852
|
+
This tool retrieves a list of your alphas, allowing for detailed filtering based on stage,
|
|
1853
|
+
creation date, submission date, and visibility. It also supports pagination and custom sorting.
|
|
1854
|
+
|
|
1855
|
+
Args:
|
|
1856
|
+
stage (str): The stage of the alphas to retrieve.
|
|
1857
|
+
- "IS": In-Sample (alphas that have not been submitted).
|
|
1858
|
+
- "OS": Out-of-Sample (alphas that have been submitted).
|
|
1859
|
+
Defaults to "IS".
|
|
1860
|
+
limit (int): The maximum number of alphas to return in a single request.
|
|
1861
|
+
For example, `limit=50` will return at most 50 alphas. Defaults to 30.
|
|
1862
|
+
offset (int): The number of alphas to skip from the beginning of the list.
|
|
1863
|
+
Used for pagination. For example, `limit=50, offset=50` will retrieve alphas 51-100.
|
|
1864
|
+
Defaults to 0.
|
|
1865
|
+
start_date (Optional[str]): The earliest creation date for the alphas to be included.
|
|
1866
|
+
Filters for alphas created on or after this date.
|
|
1867
|
+
Example format: "2023-01-01T00:00:00Z".
|
|
1868
|
+
end_date (Optional[str]): The latest creation date for the alphas to be included.
|
|
1869
|
+
Filters for alphas created before this date.
|
|
1870
|
+
Example format: "2023-12-31T23:59:59Z".
|
|
1871
|
+
submission_start_date (Optional[str]): The earliest submission date for the alphas.
|
|
1872
|
+
Only applies to "OS" alphas. Filters for alphas submitted on or after this date.
|
|
1873
|
+
Example format: "2024-01-01T00:00:00Z".
|
|
1874
|
+
submission_end_date (Optional[str]): The latest submission date for the alphas.
|
|
1875
|
+
Only applies to "OS" alphas. Filters for alphas submitted before this date.
|
|
1876
|
+
Example format: "2024-06-30T23:59:59Z".
|
|
1877
|
+
order (Optional[str]): The sorting order for the returned alphas.
|
|
1878
|
+
Prefix with a hyphen (-) for descending order.
|
|
1879
|
+
Examples: "name" (sort by name ascending), "-dateSubmitted" (sort by submission date descending).
|
|
1880
|
+
hidden (Optional[bool]): Filter alphas based on their visibility.
|
|
1881
|
+
- `True`: Only return hidden alphas.
|
|
1882
|
+
- `False`: Only return non-hidden alphas.
|
|
1883
|
+
If not provided, both hidden and non-hidden alphas are returned.
|
|
1884
|
+
|
|
1885
|
+
Returns:
|
|
1886
|
+
Dict[str, Any]: A dictionary containing a list of alpha details under the 'results' key,
|
|
1887
|
+
along with pagination information. If an error occurs, it returns a dictionary with an 'error' key.
|
|
1888
|
+
"""
|
|
1889
|
+
try:
|
|
1890
|
+
return await brain_client.get_user_alphas(
|
|
1891
|
+
stage=stage, limit=limit, offset=offset, start_date=start_date,
|
|
1892
|
+
end_date=end_date, submission_start_date=submission_start_date,
|
|
1893
|
+
submission_end_date=submission_end_date, order=order, hidden=hidden
|
|
1894
|
+
)
|
|
1895
|
+
except Exception as e:
|
|
1896
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1897
|
+
|
|
1898
|
+
@mcp.tool()
|
|
1899
|
+
async def submit_alpha(alpha_id: str) -> Dict[str, Any]:
|
|
1900
|
+
"""
|
|
1901
|
+
📤 Submit an alpha for production.
|
|
1902
|
+
|
|
1903
|
+
Use this when your alpha is ready for production deployment.
|
|
1904
|
+
|
|
1905
|
+
Args:
|
|
1906
|
+
alpha_id: The ID of the alpha to submit
|
|
1907
|
+
|
|
1908
|
+
Returns:
|
|
1909
|
+
Submission result
|
|
1910
|
+
"""
|
|
1911
|
+
try:
|
|
1912
|
+
success = await brain_client.submit_alpha(alpha_id)
|
|
1913
|
+
return {"success": success}
|
|
1914
|
+
except Exception as e:
|
|
1915
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1916
|
+
|
|
1917
|
+
@mcp.tool()
|
|
1918
|
+
async def value_factor_trendScore(start_date: str, end_date: str) -> Dict[str, Any]:
|
|
1919
|
+
"""Compute and return the diversity score for REGULAR alphas in a submission-date window.
|
|
1920
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1921
|
+
This MCP tool wraps BrainApiClient.value_factor_trendScore and always uses submission dates (OS).
|
|
1922
|
+
|
|
1923
|
+
Inputs:
|
|
1924
|
+
- start_date: ISO UTC start datetime (e.g. '2025-08-14T00:00:00Z')
|
|
1925
|
+
- end_date: ISO UTC end datetime (e.g. '2025-08-18T23:59:59Z')
|
|
1926
|
+
- p_max: optional integer total number of pyramid categories for normalization
|
|
1927
|
+
|
|
1928
|
+
Returns: compact JSON with diversity_score, N, A, P, P_max, S_A, S_P, S_H, per_pyramid_counts
|
|
1929
|
+
"""
|
|
1930
|
+
try:
|
|
1931
|
+
return await brain_client.value_factor_trendScore(start_date=start_date, end_date=end_date)
|
|
1932
|
+
except Exception as e:
|
|
1933
|
+
return {"error": str(e)}
|
|
1934
|
+
|
|
1935
|
+
# --- Community and Events Tools ---
|
|
1936
|
+
|
|
1937
|
+
@mcp.tool()
|
|
1938
|
+
async def get_events() -> Dict[str, Any]:
|
|
1939
|
+
"""
|
|
1940
|
+
🏆 Get available events and competitions.
|
|
1941
|
+
|
|
1942
|
+
Returns:
|
|
1943
|
+
Available events and competitions
|
|
1944
|
+
"""
|
|
1945
|
+
try:
|
|
1946
|
+
return await brain_client.get_events()
|
|
1947
|
+
except Exception as e:
|
|
1948
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1949
|
+
|
|
1950
|
+
@mcp.tool()
|
|
1951
|
+
async def get_leaderboard(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1952
|
+
"""
|
|
1953
|
+
🏅 Get leaderboard data.
|
|
1954
|
+
|
|
1955
|
+
Args:
|
|
1956
|
+
user_id: Optional user ID to filter results
|
|
1957
|
+
|
|
1958
|
+
Returns:
|
|
1959
|
+
Leaderboard data
|
|
1960
|
+
"""
|
|
1961
|
+
try:
|
|
1962
|
+
return await brain_client.get_leaderboard(user_id)
|
|
1963
|
+
except Exception as e:
|
|
1964
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1965
|
+
|
|
1966
|
+
|
|
1967
|
+
# --- Forum Tools ---
|
|
1968
|
+
|
|
1969
|
+
@mcp.tool()
|
|
1970
|
+
async def get_operators() -> Dict[str, Any]:
|
|
1971
|
+
"""
|
|
1972
|
+
🔧 Get available operators for alpha creation.
|
|
1973
|
+
|
|
1974
|
+
Returns:
|
|
1975
|
+
Dictionary containing operators list and count
|
|
1976
|
+
"""
|
|
1977
|
+
try:
|
|
1978
|
+
operators = await brain_client.get_operators()
|
|
1979
|
+
if isinstance(operators, list):
|
|
1980
|
+
return {"results": operators, "count": len(operators)}
|
|
1981
|
+
return operators
|
|
1982
|
+
except Exception as e:
|
|
1983
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1984
|
+
|
|
1985
|
+
@mcp.tool()
|
|
1986
|
+
async def run_selection(
|
|
1987
|
+
selection: str,
|
|
1988
|
+
instrument_type: str = "EQUITY",
|
|
1989
|
+
region: str = "USA",
|
|
1990
|
+
delay: int = 1,
|
|
1991
|
+
selection_limit: int = 1000,
|
|
1992
|
+
selection_handling: str = "POSITIVE",
|
|
1993
|
+
) -> Dict[str, Any]:
|
|
1994
|
+
"""
|
|
1995
|
+
🎯 Run a selection query to filter instruments.
|
|
1996
|
+
|
|
1997
|
+
Args:
|
|
1998
|
+
selection: Selection criteria
|
|
1999
|
+
instrument_type: Type of instruments
|
|
2000
|
+
region: Geographic region
|
|
2001
|
+
delay: Delay setting
|
|
2002
|
+
selection_limit: Maximum number of results
|
|
2003
|
+
selection_handling: How to handle selection results
|
|
2004
|
+
|
|
2005
|
+
Returns:
|
|
2006
|
+
Selection results
|
|
2007
|
+
"""
|
|
2008
|
+
try:
|
|
2009
|
+
return await brain_client.run_selection(
|
|
2010
|
+
selection, instrument_type, region, delay, selection_limit, selection_handling
|
|
2011
|
+
)
|
|
2012
|
+
except Exception as e:
|
|
2013
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2014
|
+
|
|
2015
|
+
@mcp.tool()
|
|
2016
|
+
async def get_user_profile(user_id: str = "self") -> Dict[str, Any]:
|
|
2017
|
+
"""
|
|
2018
|
+
👤 Get user profile information.
|
|
2019
|
+
|
|
2020
|
+
Args:
|
|
2021
|
+
user_id: User ID (default: "self" for current user)
|
|
2022
|
+
|
|
2023
|
+
Returns:
|
|
2024
|
+
User profile data
|
|
2025
|
+
"""
|
|
2026
|
+
try:
|
|
2027
|
+
return await brain_client.get_user_profile(user_id)
|
|
2028
|
+
except Exception as e:
|
|
2029
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2030
|
+
|
|
2031
|
+
@mcp.tool()
|
|
2032
|
+
async def get_documentations() -> Dict[str, Any]:
|
|
2033
|
+
"""
|
|
2034
|
+
📚 Get available documentations and learning materials.
|
|
2035
|
+
|
|
2036
|
+
Returns:
|
|
2037
|
+
List of documentations
|
|
2038
|
+
"""
|
|
2039
|
+
try:
|
|
2040
|
+
return await brain_client.get_documentations()
|
|
2041
|
+
except Exception as e:
|
|
2042
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2043
|
+
|
|
2044
|
+
# --- Message and Forum Tools ---
|
|
2045
|
+
|
|
2046
|
+
@mcp.tool()
|
|
2047
|
+
async def get_messages(limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
2048
|
+
"""
|
|
2049
|
+
💬 Get messages for the current user with optional pagination.
|
|
2050
|
+
|
|
2051
|
+
Args:
|
|
2052
|
+
limit: Maximum number of messages to return (e.g., 10 for top 10 messages)
|
|
2053
|
+
offset: Number of messages to skip (for pagination)
|
|
2054
|
+
|
|
2055
|
+
Returns:
|
|
2056
|
+
Messages for the current user, optionally limited by count
|
|
2057
|
+
"""
|
|
2058
|
+
try:
|
|
2059
|
+
return await brain_client.get_messages(limit, offset)
|
|
2060
|
+
except Exception as e:
|
|
2061
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2062
|
+
|
|
2063
|
+
@mcp.tool()
|
|
2064
|
+
async def get_glossary_terms(email: str = "", password: str = "") -> List[Dict[str, str]]:
|
|
2065
|
+
"""
|
|
2066
|
+
📚 Get glossary terms from WorldQuant BRAIN forum.
|
|
2067
|
+
|
|
2068
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2069
|
+
|
|
2070
|
+
Args:
|
|
2071
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2072
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2073
|
+
|
|
2074
|
+
Returns:
|
|
2075
|
+
A list of glossary terms with definitions
|
|
2076
|
+
"""
|
|
2077
|
+
try:
|
|
2078
|
+
config = load_config()
|
|
2079
|
+
credentials = config.get("credentials", {})
|
|
2080
|
+
email = email or credentials.get("email")
|
|
2081
|
+
password = password or credentials.get("password")
|
|
2082
|
+
if not email or not password:
|
|
2083
|
+
raise ValueError("Authentication credentials not provided or found in config.")
|
|
2084
|
+
|
|
2085
|
+
return await brain_client.get_glossary_terms(email, password)
|
|
2086
|
+
except Exception as e:
|
|
2087
|
+
logger.error(f"Error in get_glossary_terms tool: {e}")
|
|
2088
|
+
return [{"error": str(e)}]
|
|
2089
|
+
|
|
2090
|
+
@mcp.tool()
|
|
2091
|
+
async def search_forum_posts(search_query: str, email: str = "", password: str = "",
|
|
2092
|
+
max_results: int = 50) -> Dict[str, Any]:
|
|
2093
|
+
"""
|
|
2094
|
+
🔍 Search forum posts on WorldQuant BRAIN support site.
|
|
2095
|
+
|
|
2096
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2097
|
+
|
|
2098
|
+
Args:
|
|
2099
|
+
search_query: Search term or phrase
|
|
2100
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2101
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2102
|
+
max_results: Maximum number of results to return (default: 50)
|
|
2103
|
+
|
|
2104
|
+
Returns:
|
|
2105
|
+
Search results with analysis
|
|
2106
|
+
"""
|
|
2107
|
+
try:
|
|
2108
|
+
config = load_config()
|
|
2109
|
+
credentials = config.get("credentials", {})
|
|
2110
|
+
email = email or credentials.get("email")
|
|
2111
|
+
password = password or credentials.get("password")
|
|
2112
|
+
if not email or not password:
|
|
2113
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2114
|
+
|
|
2115
|
+
return await brain_client.search_forum_posts(email, password, search_query, max_results)
|
|
2116
|
+
except Exception as e:
|
|
2117
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2118
|
+
|
|
2119
|
+
@mcp.tool()
|
|
2120
|
+
async def read_forum_post(article_id: str, email: str = "", password: str = "",
|
|
2121
|
+
include_comments: bool = True) -> Dict[str, Any]:
|
|
2122
|
+
"""
|
|
2123
|
+
📄 Get a specific forum post by article ID.
|
|
2124
|
+
|
|
2125
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2126
|
+
|
|
2127
|
+
Args:
|
|
2128
|
+
article_id: The article ID to retrieve (e.g., "32984819083415-新人求模板")
|
|
2129
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2130
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2131
|
+
|
|
2132
|
+
Returns:
|
|
2133
|
+
Forum post content with comments
|
|
2134
|
+
"""
|
|
2135
|
+
try:
|
|
2136
|
+
config = load_config()
|
|
2137
|
+
credentials = config.get("credentials", {})
|
|
2138
|
+
email = email or credentials.get("email")
|
|
2139
|
+
password = password or credentials.get("password")
|
|
2140
|
+
if not email or not password:
|
|
2141
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2142
|
+
|
|
2143
|
+
return await brain_client.read_forum_post(email, password, article_id, include_comments)
|
|
2144
|
+
except Exception as e:
|
|
2145
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2146
|
+
|
|
2147
|
+
@mcp.tool()
|
|
2148
|
+
async def get_alpha_yearly_stats(alpha_id: str) -> Dict[str, Any]:
|
|
2149
|
+
"""Get yearly statistics for an alpha."""
|
|
2150
|
+
try:
|
|
2151
|
+
return await brain_client.get_alpha_yearly_stats(alpha_id)
|
|
2152
|
+
except Exception as e:
|
|
2153
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2154
|
+
|
|
2155
|
+
@mcp.tool()
|
|
2156
|
+
async def check_correlation(alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
2157
|
+
"""Check alpha correlation against production alphas, self alphas, or both."""
|
|
2158
|
+
try:
|
|
2159
|
+
return await brain_client.check_correlation(alpha_id, correlation_type, threshold)
|
|
2160
|
+
except Exception as e:
|
|
2161
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2162
|
+
|
|
2163
|
+
@mcp.tool()
|
|
2164
|
+
async def get_submission_check(alpha_id: str) -> Dict[str, Any]:
|
|
2165
|
+
"""Comprehensive pre-submission check."""
|
|
2166
|
+
try:
|
|
2167
|
+
return await brain_client.get_submission_check(alpha_id)
|
|
2168
|
+
except Exception as e:
|
|
2169
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2170
|
+
|
|
2171
|
+
@mcp.tool()
|
|
2172
|
+
async def set_alpha_properties(alpha_id: str, name: Optional[str] = None,
|
|
2173
|
+
color: Optional[str] = None, tags: Optional[List[str]] = None,
|
|
2174
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
2175
|
+
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
2176
|
+
try:
|
|
2177
|
+
return await brain_client.set_alpha_properties(alpha_id, name, color, tags, selection_desc, combo_desc)
|
|
2178
|
+
except Exception as e:
|
|
2179
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2180
|
+
|
|
2181
|
+
@mcp.tool()
|
|
2182
|
+
async def get_record_sets(alpha_id: str) -> Dict[str, Any]:
|
|
2183
|
+
"""List available record sets for an alpha."""
|
|
2184
|
+
try:
|
|
2185
|
+
return await brain_client.get_record_sets(alpha_id)
|
|
2186
|
+
except Exception as e:
|
|
2187
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2188
|
+
|
|
2189
|
+
@mcp.tool()
|
|
2190
|
+
async def get_record_set_data(alpha_id: str, record_set_name: str) -> Dict[str, Any]:
|
|
2191
|
+
"""Get data from a specific record set."""
|
|
2192
|
+
try:
|
|
2193
|
+
return await brain_client.get_record_set_data(alpha_id, record_set_name)
|
|
2194
|
+
except Exception as e:
|
|
2195
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2196
|
+
|
|
2197
|
+
@mcp.tool()
|
|
2198
|
+
async def get_user_activities(user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
|
|
2199
|
+
"""Get user activity diversity data."""
|
|
2200
|
+
try:
|
|
2201
|
+
return await brain_client.get_user_activities(user_id, grouping)
|
|
2202
|
+
except Exception as e:
|
|
2203
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2204
|
+
|
|
2205
|
+
@mcp.tool()
|
|
2206
|
+
async def get_pyramid_multipliers() -> Dict[str, Any]:
|
|
2207
|
+
"""Get current pyramid multipliers showing BRAIN's encouragement levels."""
|
|
2208
|
+
try:
|
|
2209
|
+
return await brain_client.get_pyramid_multipliers()
|
|
2210
|
+
except Exception as e:
|
|
2211
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2212
|
+
|
|
2213
|
+
@mcp.tool()
|
|
2214
|
+
async def get_pyramid_alphas(start_date: Optional[str] = None,
|
|
2215
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
2216
|
+
"""Get user's current alpha distribution across pyramid categories."""
|
|
2217
|
+
try:
|
|
2218
|
+
return await brain_client.get_pyramid_alphas(start_date, end_date)
|
|
2219
|
+
except Exception as e:
|
|
2220
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2221
|
+
|
|
2222
|
+
@mcp.tool()
|
|
2223
|
+
async def get_user_competitions(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
2224
|
+
"""Get list of competitions that the user is participating in."""
|
|
2225
|
+
try:
|
|
2226
|
+
return await brain_client.get_user_competitions(user_id)
|
|
2227
|
+
except Exception as e:
|
|
2228
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2229
|
+
|
|
2230
|
+
@mcp.tool()
|
|
2231
|
+
async def get_competition_details(competition_id: str) -> Dict[str, Any]:
|
|
2232
|
+
"""Get detailed information about a specific competition."""
|
|
2233
|
+
try:
|
|
2234
|
+
return await brain_client.get_competition_details(competition_id)
|
|
2235
|
+
except Exception as e:
|
|
2236
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2237
|
+
|
|
2238
|
+
@mcp.tool()
|
|
2239
|
+
async def get_competition_agreement(competition_id: str) -> Dict[str, Any]:
|
|
2240
|
+
"""Get the rules, terms, and agreement for a specific competition."""
|
|
2241
|
+
try:
|
|
2242
|
+
return await brain_client.get_competition_agreement(competition_id)
|
|
2243
|
+
except Exception as e:
|
|
2244
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2245
|
+
|
|
2246
|
+
@mcp.tool()
|
|
2247
|
+
async def get_platform_setting_options() -> Dict[str, Any]:
|
|
2248
|
+
"""Discover valid simulation setting options (instrument types, regions, delays, universes, neutralization).
|
|
2249
|
+
|
|
2250
|
+
Use this when a simulation request might contain an invalid/mismatched setting. If an AI or user supplies
|
|
2251
|
+
incorrect parameters (e.g., wrong region for an instrument type), call this tool to retrieve the authoritative
|
|
2252
|
+
option sets and correct the inputs before proceeding.
|
|
2253
|
+
|
|
2254
|
+
Returns:
|
|
2255
|
+
A structured list of valid combinations and choice lists to validate or fix simulation settings.
|
|
2256
|
+
"""
|
|
2257
|
+
try:
|
|
2258
|
+
return await brain_client.get_platform_setting_options()
|
|
2259
|
+
except Exception as e:
|
|
2260
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2261
|
+
|
|
2262
|
+
@mcp.tool()
|
|
2263
|
+
async def performance_comparison(alpha_id: str, team_id: Optional[str] = None,
|
|
2264
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
2265
|
+
"""Get performance comparison data for an alpha."""
|
|
2266
|
+
try:
|
|
2267
|
+
return await brain_client.performance_comparison(alpha_id, team_id, competition)
|
|
2268
|
+
except Exception as e:
|
|
2269
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2270
|
+
|
|
2271
|
+
# --- Dataframe Tool ---
|
|
2272
|
+
|
|
2273
|
+
@mcp.tool()
|
|
2274
|
+
async def expand_nested_data(data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
2275
|
+
"""Flatten complex nested data structures into tabular format."""
|
|
2276
|
+
try:
|
|
2277
|
+
return await brain_client.expand_nested_data(data, preserve_original)
|
|
2278
|
+
except Exception as e:
|
|
2279
|
+
return [{"error": f"An unexpected error occurred: {str(e)}"}]
|
|
2280
|
+
|
|
2281
|
+
# --- Documentation Tool ---
|
|
2282
|
+
|
|
2283
|
+
@mcp.tool()
|
|
2284
|
+
async def get_documentation_page(page_id: str) -> Dict[str, Any]:
|
|
2285
|
+
"""Retrieve detailed content of a specific documentation page/article."""
|
|
2286
|
+
try:
|
|
2287
|
+
return await brain_client.get_documentation_page(page_id)
|
|
2288
|
+
except Exception as e:
|
|
2289
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2290
|
+
|
|
2291
|
+
# --- Advanced Simulation Tools ---
|
|
2292
|
+
|
|
2293
|
+
@mcp.tool()
|
|
2294
|
+
async def create_multi_simulation(
|
|
2295
|
+
alpha_expressions: List[str],
|
|
2296
|
+
instrument_type: str = "EQUITY",
|
|
2297
|
+
region: str = "USA",
|
|
2298
|
+
universe: str = "TOP3000",
|
|
2299
|
+
delay: int = 1,
|
|
2300
|
+
decay: float = 0.0,
|
|
2301
|
+
neutralization: str = "NONE",
|
|
2302
|
+
truncation: float = 0.0,
|
|
2303
|
+
test_period: str = "P0Y0M",
|
|
2304
|
+
unit_handling: str = "VERIFY",
|
|
2305
|
+
nan_handling: str = "OFF",
|
|
2306
|
+
language: str = "FASTEXPR",
|
|
2307
|
+
visualization: bool = True,
|
|
2308
|
+
pasteurization: str = "ON",
|
|
2309
|
+
max_trade: str = "OFF"
|
|
2310
|
+
) -> Dict[str, Any]:
|
|
2311
|
+
"""
|
|
2312
|
+
🚀 Create multiple regular alpha simulations on BRAIN platform in a single request.
|
|
2313
|
+
|
|
2314
|
+
This tool creates a multisimulation with multiple regular alpha expressions,
|
|
2315
|
+
waits for all simulations to complete, and returns detailed results for each alpha.
|
|
2316
|
+
|
|
2317
|
+
⏰ NOTE: Multisimulations can take 8+ minutes to complete. This tool will wait
|
|
2318
|
+
for the entire process and return comprehensive results.
|
|
2319
|
+
Call get_platform_setting_options to get the valid options for the simulation.
|
|
2320
|
+
Args:
|
|
2321
|
+
alpha_expressions: List of alpha expressions (2-8 expressions required)
|
|
2322
|
+
instrument_type: Type of instruments (default: "EQUITY")
|
|
2323
|
+
region: Market region (default: "USA")
|
|
2324
|
+
universe: Universe of stocks (default: "TOP3000")
|
|
2325
|
+
delay: Data delay (default: 1)
|
|
2326
|
+
decay: Decay value (default: 0.0)
|
|
2327
|
+
neutralization: Neutralization method (default: "NONE")
|
|
2328
|
+
truncation: Truncation value (default: 0.0)
|
|
2329
|
+
test_period: Test period (default: "P0Y0M")
|
|
2330
|
+
unit_handling: Unit handling method (default: "VERIFY")
|
|
2331
|
+
nan_handling: NaN handling method (default: "OFF")
|
|
2332
|
+
language: Expression language (default: "FASTEXPR")
|
|
2333
|
+
visualization: Enable visualization (default: True)
|
|
2334
|
+
pasteurization: Pasteurization setting (default: "ON")
|
|
2335
|
+
max_trade: Max trade setting (default: "OFF")
|
|
2336
|
+
|
|
2337
|
+
Returns:
|
|
2338
|
+
Dictionary containing multisimulation results and individual alpha details
|
|
2339
|
+
"""
|
|
2340
|
+
try:
|
|
2341
|
+
# Validate input
|
|
2342
|
+
if len(alpha_expressions) < 2:
|
|
2343
|
+
return {"error": "At least 2 alpha expressions are required"}
|
|
2344
|
+
if len(alpha_expressions) > 8:
|
|
2345
|
+
return {"error": "Maximum 8 alpha expressions allowed per request"}
|
|
2346
|
+
|
|
2347
|
+
# Create multisimulation data
|
|
2348
|
+
multisimulation_data = []
|
|
2349
|
+
for alpha_expr in alpha_expressions:
|
|
2350
|
+
simulation_item = {
|
|
2351
|
+
'type': 'REGULAR',
|
|
2352
|
+
'settings': {
|
|
2353
|
+
'instrumentType': instrument_type,
|
|
2354
|
+
'region': region,
|
|
2355
|
+
'universe': universe,
|
|
2356
|
+
'delay': delay,
|
|
2357
|
+
'decay': decay,
|
|
2358
|
+
'neutralization': neutralization,
|
|
2359
|
+
'truncation': truncation,
|
|
2360
|
+
'pasteurization': pasteurization,
|
|
2361
|
+
'unitHandling': unit_handling,
|
|
2362
|
+
'nanHandling': nan_handling,
|
|
2363
|
+
'language': language,
|
|
2364
|
+
'visualization': visualization,
|
|
2365
|
+
'testPeriod': test_period,
|
|
2366
|
+
'maxTrade': max_trade
|
|
2367
|
+
},
|
|
2368
|
+
'regular': alpha_expr
|
|
2369
|
+
}
|
|
2370
|
+
multisimulation_data.append(simulation_item)
|
|
2371
|
+
|
|
2372
|
+
# Send multisimulation request
|
|
2373
|
+
response = brain_client.session.post(f"{brain_client.base_url}/simulations", json=multisimulation_data)
|
|
2374
|
+
|
|
2375
|
+
if response.status_code != 201:
|
|
2376
|
+
return {"error": f"Failed to create multisimulation. Status: {response.status_code}"}
|
|
2377
|
+
|
|
2378
|
+
# Get multisimulation location
|
|
2379
|
+
location = response.headers.get('Location', '')
|
|
2380
|
+
if not location:
|
|
2381
|
+
return {"error": "No location header in multisimulation response"}
|
|
2382
|
+
|
|
2383
|
+
# Wait for children to appear and get results
|
|
2384
|
+
return await _wait_for_multisimulation_completion(location, len(alpha_expressions))
|
|
2385
|
+
|
|
2386
|
+
except Exception as e:
|
|
2387
|
+
return {"error": f"Error creating multisimulation: {str(e)}"}
|
|
2388
|
+
|
|
2389
|
+
async def _wait_for_multisimulation_completion(location: str, expected_children: int) -> Dict[str, Any]:
|
|
2390
|
+
"""Wait for multisimulation to complete and return results"""
|
|
2391
|
+
try:
|
|
2392
|
+
# Simple progress indicator for users
|
|
2393
|
+
print(f"Waiting for multisimulation to complete... (this may take several minutes)")
|
|
2394
|
+
print(f"Expected {expected_children} alpha simulations")
|
|
2395
|
+
print()
|
|
2396
|
+
# Wait for children to appear - much more tolerant for 8+ minute multisimulations
|
|
2397
|
+
children = []
|
|
2398
|
+
max_wait_attempts = 200 # Increased significantly for 8+ minute multisimulations
|
|
2399
|
+
wait_attempt = 0
|
|
2400
|
+
|
|
2401
|
+
while wait_attempt < max_wait_attempts and len(children) == 0:
|
|
2402
|
+
wait_attempt += 1
|
|
2403
|
+
|
|
2404
|
+
try:
|
|
2405
|
+
multisim_response = brain_client.session.get(location)
|
|
2406
|
+
if multisim_response.status_code == 200:
|
|
2407
|
+
multisim_data = multisim_response.json()
|
|
2408
|
+
children = multisim_data.get('children', [])
|
|
2409
|
+
|
|
2410
|
+
if children:
|
|
2411
|
+
break
|
|
2412
|
+
else:
|
|
2413
|
+
# Wait before next attempt - use longer intervals for multisimulations
|
|
2414
|
+
retry_after = multisim_response.headers.get("Retry-After", 5)
|
|
2415
|
+
wait_time = float(retry_after)
|
|
2416
|
+
await asyncio.sleep(wait_time)
|
|
2417
|
+
else:
|
|
2418
|
+
await asyncio.sleep(5)
|
|
2419
|
+
except Exception as e:
|
|
2420
|
+
await asyncio.sleep(5)
|
|
2421
|
+
|
|
2422
|
+
if not children:
|
|
2423
|
+
return {"error": f"Children did not appear within {max_wait_attempts} attempts (multisimulation may still be processing)"}
|
|
2424
|
+
|
|
2425
|
+
# Process each child to get alpha results
|
|
2426
|
+
alpha_results = []
|
|
2427
|
+
for i, child_id in enumerate(children):
|
|
2428
|
+
try:
|
|
2429
|
+
# The children are full URLs, not just IDs
|
|
2430
|
+
child_url = child_id if child_id.startswith('http') else f"{brain_client.base_url}/simulations/{child_id}"
|
|
2431
|
+
|
|
2432
|
+
# Wait for this alpha to complete - more tolerant timing
|
|
2433
|
+
finished = False
|
|
2434
|
+
max_alpha_attempts = 100 # Increased for longer alpha processing
|
|
2435
|
+
alpha_attempt = 0
|
|
2436
|
+
|
|
2437
|
+
while not finished and alpha_attempt < max_alpha_attempts:
|
|
2438
|
+
alpha_attempt += 1
|
|
2439
|
+
|
|
2440
|
+
try:
|
|
2441
|
+
alpha_progress = brain_client.session.get(child_url)
|
|
2442
|
+
if alpha_progress.status_code == 200:
|
|
2443
|
+
alpha_data = alpha_progress.json()
|
|
2444
|
+
retry_after = alpha_progress.headers.get("Retry-After", 0)
|
|
2445
|
+
|
|
2446
|
+
if retry_after == 0:
|
|
2447
|
+
finished = True
|
|
2448
|
+
break
|
|
2449
|
+
else:
|
|
2450
|
+
wait_time = float(retry_after)
|
|
2451
|
+
await asyncio.sleep(wait_time)
|
|
2452
|
+
else:
|
|
2453
|
+
await asyncio.sleep(5)
|
|
2454
|
+
except Exception as e:
|
|
2455
|
+
await asyncio.sleep(5)
|
|
2456
|
+
|
|
2457
|
+
if finished:
|
|
2458
|
+
# Get alpha details from the completed simulation
|
|
2459
|
+
alpha_id = alpha_data.get("alpha")
|
|
2460
|
+
if alpha_id:
|
|
2461
|
+
# Now get the actual alpha details from the alpha endpoint
|
|
2462
|
+
alpha_details = brain_client.session.get(f"{brain_client.base_url}/alphas/{alpha_id}")
|
|
2463
|
+
if alpha_details.status_code == 200:
|
|
2464
|
+
alpha_detail_data = alpha_details.json()
|
|
2465
|
+
alpha_results.append({
|
|
2466
|
+
'alpha_id': alpha_id,
|
|
2467
|
+
'location': child_url,
|
|
2468
|
+
'details': alpha_detail_data
|
|
2469
|
+
})
|
|
2470
|
+
else:
|
|
2471
|
+
alpha_results.append({
|
|
2472
|
+
'alpha_id': alpha_id,
|
|
2473
|
+
'location': child_url,
|
|
2474
|
+
'error': f'Failed to get alpha details: {alpha_details.status_code}'
|
|
2475
|
+
})
|
|
2476
|
+
else:
|
|
2477
|
+
alpha_results.append({
|
|
2478
|
+
'location': child_url,
|
|
2479
|
+
'error': 'No alpha ID found in completed simulation'
|
|
2480
|
+
})
|
|
2481
|
+
else:
|
|
2482
|
+
alpha_results.append({
|
|
2483
|
+
'location': child_url,
|
|
2484
|
+
'error': f'Alpha simulation did not complete within {max_alpha_attempts} attempts'
|
|
2485
|
+
})
|
|
2486
|
+
|
|
2487
|
+
except Exception as e:
|
|
2488
|
+
alpha_results.append({
|
|
2489
|
+
'location': f"child_{i+1}",
|
|
2490
|
+
'error': str(e)
|
|
2491
|
+
})
|
|
2492
|
+
|
|
2493
|
+
# Return comprehensive results
|
|
2494
|
+
print(f"Multisimulation completed! Retrieved {len(alpha_results)} alpha results")
|
|
2495
|
+
return {
|
|
2496
|
+
'success': True,
|
|
2497
|
+
'message': f'Successfully created {expected_children} regular alpha simulations',
|
|
2498
|
+
'total_requested': expected_children,
|
|
2499
|
+
'total_created': len(alpha_results),
|
|
2500
|
+
'multisimulation_id': location.split('/')[-1],
|
|
2501
|
+
'multisimulation_location': location,
|
|
2502
|
+
'alpha_results': alpha_results
|
|
2503
|
+
}
|
|
2504
|
+
|
|
2505
|
+
except Exception as e:
|
|
2506
|
+
return {"error": f"Error waiting for multisimulation completion: {str(e)}"}
|
|
2507
|
+
# --- Payment and Financial Tools ---
|
|
2508
|
+
|
|
2509
|
+
@mcp.tool()
|
|
2510
|
+
async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -> Dict[str, Any]:
|
|
2511
|
+
"""
|
|
2512
|
+
Get daily and quarterly payment information from WorldQuant BRAIN platform.
|
|
2513
|
+
|
|
2514
|
+
This function retrieves both base payments (daily alpha performance payments) and
|
|
2515
|
+
other payments (competition rewards, quarterly payments, referrals, etc.).
|
|
2516
|
+
|
|
2517
|
+
Args:
|
|
2518
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2519
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2520
|
+
|
|
2521
|
+
Returns:
|
|
2522
|
+
Dictionary containing base payment and other payment data with summaries and detailed records
|
|
2523
|
+
"""
|
|
2524
|
+
try:
|
|
2525
|
+
config = load_config()
|
|
2526
|
+
credentials = config.get("credentials", {})
|
|
2527
|
+
email = email or credentials.get("email")
|
|
2528
|
+
password = password or credentials.get("password")
|
|
2529
|
+
if not email or not password:
|
|
2530
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2531
|
+
|
|
2532
|
+
await brain_client.authenticate(email, password)
|
|
2533
|
+
|
|
2534
|
+
# Get base payments
|
|
2535
|
+
try:
|
|
2536
|
+
base_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/base-payment")
|
|
2537
|
+
base_response.raise_for_status()
|
|
2538
|
+
base_payments = base_response.json()
|
|
2539
|
+
except:
|
|
2540
|
+
base_payments = "no data"
|
|
2541
|
+
|
|
2542
|
+
try:
|
|
2543
|
+
# Get other payments
|
|
2544
|
+
other_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/other-payment")
|
|
2545
|
+
other_response.raise_for_status()
|
|
2546
|
+
other_payments = other_response.json()
|
|
2547
|
+
except:
|
|
2548
|
+
other_payments = "no data"
|
|
2549
|
+
return {
|
|
2550
|
+
"base_payments": base_payments,
|
|
2551
|
+
"other_payments": other_payments
|
|
2552
|
+
}
|
|
2553
|
+
|
|
2554
|
+
except Exception as e:
|
|
2555
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2556
|
+
|
|
2557
|
+
from typing import Sequence
|
|
2558
|
+
@mcp.tool()
|
|
2559
|
+
async def lookINTO_SimError_message(locations: Sequence[str]) -> dict:
|
|
2560
|
+
"""
|
|
2561
|
+
Fetch and parse error/status from multiple simulation locations (URLs).
|
|
2562
|
+
Args:
|
|
2563
|
+
locations: List of simulation result URLs (e.g., /simulations/{id})
|
|
2564
|
+
Returns:
|
|
2565
|
+
List of dicts with location, error message, and raw response
|
|
2566
|
+
"""
|
|
2567
|
+
results = []
|
|
2568
|
+
for loc in locations:
|
|
2569
|
+
try:
|
|
2570
|
+
resp = brain_client.session.get(loc)
|
|
2571
|
+
if resp.status_code != 200:
|
|
2572
|
+
results.append({
|
|
2573
|
+
"location": loc,
|
|
2574
|
+
"error": f"HTTP {resp.status_code}",
|
|
2575
|
+
"raw": resp.text
|
|
2576
|
+
})
|
|
2577
|
+
continue
|
|
2578
|
+
data = resp.json() if resp.text else {}
|
|
2579
|
+
# Try to extract error message or status
|
|
2580
|
+
error_msg = data.get("error") or data.get("message")
|
|
2581
|
+
# If alpha ID is missing, include that info
|
|
2582
|
+
if not data.get("alpha"):
|
|
2583
|
+
error_msg = error_msg or "Simulation did not get through, if you are running a multisimulation, check the other children location in your request"
|
|
2584
|
+
results.append({
|
|
2585
|
+
"location": loc,
|
|
2586
|
+
"error": error_msg,
|
|
2587
|
+
"raw": data
|
|
2588
|
+
})
|
|
2589
|
+
except Exception as e:
|
|
2590
|
+
results.append({
|
|
2591
|
+
"location": loc,
|
|
2592
|
+
"error": str(e),
|
|
2593
|
+
"raw": None
|
|
2594
|
+
})
|
|
2595
|
+
return {"results": results}
|
|
2596
|
+
|
|
2597
|
+
|
|
2598
|
+
# --- Main entry point ---
|
|
2599
|
+
if __name__ == "__main__":
|
|
2600
|
+
print("running the server")
|
|
2601
|
+
mcp.run()
|