cnhkmcp 2.1.2__py3-none-any.whl → 2.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +126 -125
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +1 -1
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/ace_lib.py +4 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_10_Steps_to_Start_on_BRAIN_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Improve_your_Alpha_2_2_documentation.json +174 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Intermediate_Pack_-_Understand_Results_1_2_documentation.json +167 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_Alphas_documentation.json +145 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_Introduction_to_BRAIN_Expression_Language_documentation.json +107 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001_WorldQuant_Challenge_documentation.json +56 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/001__Read_this_First_-_Starter_Pack_documentation.json +404 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_How_to_choose_the_Simulation_Settings_documentation.json +268 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002_Simulate_your_first_Alpha_documentation.json +88 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Beginners_documentation.json +254 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Bronze_Users_documentation.json +114 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__Alpha_Examples_for_Silver_Users_documentation.json +79 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/002__How_BRAIN_works_documentation.json +184 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Clear_these_tests_before_submitting_an_Alpha_documentation.json +388 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/003_Parameters_in_the_Simulation_results_documentation.json +243 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Group_Data_Fields_documentation.json +69 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_How_to_use_the_Data_Explorer_documentation.json +142 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Model77_dataset_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Sentiment1_dataset_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Understanding_Data_in_BRAIN_Key_Concepts_and_Tips_documentation.json +182 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/004_Vector_Data_Fields_documentation.json +30 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Crowding_Risk-Neutralized_Alphas_documentation.json +64 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_D0_documentation.json +66 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Double_Neutralization_documentation.json +53 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Fast_D1_Documentation_documentation.json +304 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Investability_Constrained_Metrics_documentation.json +129 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Must-read_posts_How_to_improve_your_Alphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Neutralization_documentation.json +29 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_RAM_Risk-Neutralized_Alphas_documentation.json +64 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralization_Default_setting_documentation.json +75 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Risk_Neutralized_Alphas_documentation.json +171 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/005_Statistical_Risk-Neutralized_Alphas_documentation.json +51 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_EUR_TOP2500_Universe_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_GLB_TOPDIV3000_Universe_documentation.json +48 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_Started_China_Research_for_Consultants_Gold_documentation.json +142 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_on_Illiquid_Universes_Gold_documentation.json +46 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Getting_started_with_USA_TOPSP500_universe_Gold_documentation.json +62 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_Global_Alphas_Gold_documentation.json +66 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/006_India_Alphas_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Dos_and_Don_ts_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Features_documentation.json +239 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Simulation_Features_documentation.json +149 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Consultant_Submission_Tests_documentation.json +363 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Finding_Consultant_Alphas_documentation.json +333 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Power_Pool_Alphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Research_Advisory_Program_documentation.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Starting_Guide_for_Research_Consultants_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Visualization_Tool_documentation.json +99 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007_Your_Advisor_-_Kunqi_Jiang_documentation.json +53 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Brain_Genius_documentation.json +288 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/007__Single_Dataset_Alphas_documentation.json +41 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Advisory_Theme_Calendar_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Multiplier_Rules_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Overview_of_Themes_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/008_Theme_Calendar_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Combo_Expression_documentation.json +272 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Global_SuperAlphas_documentation.json +14 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Helpful_Tips_documentation.json +58 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_Selection_Expression_documentation.json +1546 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Operators_documentation.json +890 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_SuperAlpha_Results_documentation.json +83 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/009_What_is_a_SuperAlpha_documentation.json +261 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_BRAIN_API_documentation.json +515 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010_Documentation_for_ACE_API_Library_Gold_documentation.json +27 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/010__Understanding_simulation_limits_documentation.json +210 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/arithmetic_operators.json +209 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/cross_sectional_operators.json +98 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/group_operators.json +121 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/logical_operators.json +145 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/reduce_operators.json +156 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/special_operators.json +35 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/time_series_operators.json +386 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/transformational_operators.json +61 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/vector_operators.json +38 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_manifest.json +302 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/_meta.json +1 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/chroma.sqlite3 +0 -0
- cnhkmcp/untracked/APP/Tranformer/Transformer.py +5 -1
- cnhkmcp/untracked/APP/Tranformer/ace_lib.py +4 -0
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +6008 -1242
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +1 -1034
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +47310 -442
- cnhkmcp/untracked/APP/ace_lib.py +4 -0
- cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +4 -0
- cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +4 -0
- cnhkmcp/untracked/APP/simulator/wqb20260107015647.log +57 -0
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +4 -0
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +2 -2
- cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +4 -0
- cnhkmcp/untracked/__init__.py +0 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +352 -166
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.4.dist-info}/METADATA +1 -1
- cnhkmcp-2.1.4.dist-info/RECORD +190 -0
- cnhkmcp-2.1.2.dist-info/RECORD +0 -111
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.4.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.4.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.4.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.4.dist-info}/top_level.txt +0 -0
|
@@ -16,6 +16,7 @@ from dataclasses import dataclass, asdict
|
|
|
16
16
|
from datetime import datetime, timedelta
|
|
17
17
|
import os
|
|
18
18
|
import sys
|
|
19
|
+
import math
|
|
19
20
|
from time import sleep
|
|
20
21
|
|
|
21
22
|
import requests
|
|
@@ -80,7 +81,20 @@ class BrainApiClient:
|
|
|
80
81
|
|
|
81
82
|
def log(self, message: str, level: str = "INFO"):
|
|
82
83
|
"""Log messages to stderr to avoid MCP protocol interference."""
|
|
83
|
-
|
|
84
|
+
try:
|
|
85
|
+
# Try to print with original message first
|
|
86
|
+
print(f"[{level}] {message}", file=sys.stderr)
|
|
87
|
+
except UnicodeEncodeError:
|
|
88
|
+
# Fallback: remove problematic characters and try again
|
|
89
|
+
try:
|
|
90
|
+
safe_message = message.encode('ascii', 'ignore').decode('ascii')
|
|
91
|
+
print(f"[{level}] {safe_message}", file=sys.stderr)
|
|
92
|
+
except Exception:
|
|
93
|
+
# Final fallback: just print the level and a safe message
|
|
94
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
95
|
+
except Exception:
|
|
96
|
+
# Final fallback: just print the level and a safe message
|
|
97
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
84
98
|
|
|
85
99
|
async def authenticate(self, email: str, password: str) -> Dict[str, Any]:
|
|
86
100
|
"""Authenticate with WorldQuant BRAIN platform with biometric support."""
|
|
@@ -322,7 +336,9 @@ class BrainApiClient:
|
|
|
322
336
|
print("Alpha done simulating, getting alpha details")
|
|
323
337
|
alpha_id = simulation_progress.json()["alpha"]
|
|
324
338
|
alpha = self.session.get("https://api.worldquantbrain.com/alphas/" + alpha_id)
|
|
325
|
-
|
|
339
|
+
result = alpha.json()
|
|
340
|
+
result['note'] = "if you got a negative alpha sharpe, you can just add a minus sign in front of the last line of the Alpha to flip then think the next step."
|
|
341
|
+
return result
|
|
326
342
|
|
|
327
343
|
except Exception as e:
|
|
328
344
|
self.log(f"❌ Failed to create simulation: {str(e)}", "ERROR")
|
|
@@ -414,12 +430,13 @@ class BrainApiClient:
|
|
|
414
430
|
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
|
|
415
431
|
response.raise_for_status()
|
|
416
432
|
|
|
433
|
+
# Some alphas may return 204 No Content or an empty body
|
|
417
434
|
text = (response.text or "").strip()
|
|
418
435
|
if not text:
|
|
419
436
|
if attempt < max_retries - 1:
|
|
420
437
|
self.log(f"Empty PnL response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
421
438
|
await asyncio.sleep(retry_delay)
|
|
422
|
-
retry_delay *= 1.5
|
|
439
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
423
440
|
continue
|
|
424
441
|
else:
|
|
425
442
|
self.log(f"Empty PnL response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
@@ -440,7 +457,7 @@ class BrainApiClient:
|
|
|
440
457
|
self.log(f"Empty PnL JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
441
458
|
return {}
|
|
442
459
|
|
|
443
|
-
except
|
|
460
|
+
except Exception as parse_err:
|
|
444
461
|
if attempt < max_retries - 1:
|
|
445
462
|
self.log(f"PnL JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
446
463
|
await asyncio.sleep(retry_delay)
|
|
@@ -450,7 +467,7 @@ class BrainApiClient:
|
|
|
450
467
|
self.log(f"PnL JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
451
468
|
return {}
|
|
452
469
|
|
|
453
|
-
except
|
|
470
|
+
except Exception as e:
|
|
454
471
|
if attempt < max_retries - 1:
|
|
455
472
|
self.log(f"Failed to get alpha PnL for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
456
473
|
await asyncio.sleep(retry_delay)
|
|
@@ -460,6 +477,7 @@ class BrainApiClient:
|
|
|
460
477
|
self.log(f"Failed to get alpha PnL for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
461
478
|
raise
|
|
462
479
|
|
|
480
|
+
# This should never be reached, but just in case
|
|
463
481
|
return {}
|
|
464
482
|
|
|
465
483
|
async def get_user_alphas(
|
|
@@ -508,12 +526,17 @@ class BrainApiClient:
|
|
|
508
526
|
await self.ensure_authenticated()
|
|
509
527
|
|
|
510
528
|
try:
|
|
529
|
+
self.log(f"📤 Submitting alpha {alpha_id} for production...", "INFO")
|
|
530
|
+
|
|
511
531
|
response = self.session.post(f"{self.base_url}/alphas/{alpha_id}/submit")
|
|
512
532
|
response.raise_for_status()
|
|
513
|
-
|
|
533
|
+
|
|
534
|
+
self.log(f"Alpha {alpha_id} submitted successfully", "SUCCESS")
|
|
535
|
+
return response.__dict__
|
|
536
|
+
|
|
514
537
|
except Exception as e:
|
|
515
|
-
self.log(f"Failed to submit alpha: {str(e)}", "ERROR")
|
|
516
|
-
|
|
538
|
+
self.log(f"❌ Failed to submit alpha: {str(e)}", "ERROR")
|
|
539
|
+
return False
|
|
517
540
|
|
|
518
541
|
async def get_events(self) -> Dict[str, Any]:
|
|
519
542
|
"""Get available events and competitions."""
|
|
@@ -701,7 +724,13 @@ class BrainApiClient:
|
|
|
701
724
|
try:
|
|
702
725
|
response = self.session.get(f"{self.base_url}/operators")
|
|
703
726
|
response.raise_for_status()
|
|
704
|
-
|
|
727
|
+
operators_data = response.json()
|
|
728
|
+
|
|
729
|
+
# Ensure we return a dictionary format even if API returns a list
|
|
730
|
+
if isinstance(operators_data, list):
|
|
731
|
+
return {"operators": operators_data, "count": len(operators_data)}
|
|
732
|
+
else:
|
|
733
|
+
return operators_data
|
|
705
734
|
except Exception as e:
|
|
706
735
|
self.log(f"Failed to get operators: {str(e)}", "ERROR")
|
|
707
736
|
raise
|
|
@@ -761,85 +790,135 @@ class BrainApiClient:
|
|
|
761
790
|
|
|
762
791
|
async def get_messages(self, limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
763
792
|
"""Get messages for the current user with optional pagination.
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
793
|
+
|
|
794
|
+
Image / large binary payload mitigation:
|
|
795
|
+
Some messages embed base64 encoded images (e.g. <img src="data:image/png;base64,..."/>).
|
|
796
|
+
Returning full base64 can explode token usage for an LLM client. We post-process each
|
|
797
|
+
message description and (by default) extract embedded base64 images to disk and replace
|
|
798
|
+
them with lightweight placeholders while preserving context.
|
|
799
|
+
|
|
800
|
+
Strategies (environment driven in future – currently parameterless public API):
|
|
801
|
+
- placeholder (default): save images to message_images/ and replace with marker text.
|
|
802
|
+
- ignore: strip image tags entirely, leaving a note.
|
|
803
|
+
- keep: leave description unchanged (unsafe for LLM token limits).
|
|
804
|
+
|
|
805
|
+
A message dict gains an 'extracted_images' list when images are processed.
|
|
767
806
|
"""
|
|
807
|
+
await self.ensure_authenticated()
|
|
808
|
+
|
|
809
|
+
import re, base64, pathlib
|
|
810
|
+
|
|
811
|
+
image_handling = os.environ.get("BRAIN_MESSAGE_IMAGE_MODE", "placeholder").lower()
|
|
812
|
+
save_dir = pathlib.Path("message_images")
|
|
813
|
+
|
|
768
814
|
from typing import Tuple
|
|
769
|
-
|
|
770
815
|
def process_description(desc: str, message_id: str) -> Tuple[str, List[str]]:
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
816
|
+
try:
|
|
817
|
+
if not desc or image_handling == "keep":
|
|
818
|
+
return desc, []
|
|
819
|
+
attachments: List[str] = []
|
|
820
|
+
# Regex to capture full <img ...> tag with data URI
|
|
821
|
+
img_tag_pattern = re.compile(r"<img[^>]+src=\"(data:image/[^\"]+)\"[^>]*>", re.IGNORECASE)
|
|
822
|
+
# Iterate over unique matches to avoid double work
|
|
823
|
+
matches = list(img_tag_pattern.finditer(desc))
|
|
824
|
+
if not matches:
|
|
825
|
+
# Additional heuristic: very long base64-looking token inside quotes followed by </img>
|
|
826
|
+
# (legacy format noted by user sample). Replace with placeholder.
|
|
827
|
+
heuristic_pattern = re.compile(r"([A-Za-z0-9+/]{500,}={0,2})\"\s*</img>")
|
|
828
|
+
if image_handling != "keep" and heuristic_pattern.search(desc):
|
|
829
|
+
placeholder = "[Embedded image removed - large base64 sequence truncated]"
|
|
830
|
+
return heuristic_pattern.sub(placeholder + "</img>", desc), []
|
|
831
|
+
return desc, []
|
|
832
|
+
|
|
833
|
+
# Ensure save directory exists only if we will store something
|
|
834
|
+
if image_handling == "placeholder" and not save_dir.exists():
|
|
781
835
|
try:
|
|
782
|
-
|
|
783
|
-
header, encoded = src.split(',', 1)
|
|
784
|
-
ext = header.split(';')[0].split('/')[1]
|
|
785
|
-
safe_ext = re.sub(r'[^a-zA-Z0-9]', '', ext)
|
|
786
|
-
|
|
787
|
-
# Decode and save image
|
|
788
|
-
content = base64.b64decode(encoded)
|
|
789
|
-
file_name = f"{message_id}_img_{idx}.{safe_ext}"
|
|
790
|
-
with open(file_name, "wb") as f:
|
|
791
|
-
f.write(content)
|
|
792
|
-
|
|
793
|
-
# Update HTML and add attachment info
|
|
794
|
-
img_tag['src'] = file_name
|
|
795
|
-
attachments.append(f"Saved embedded image to ./{file_name}")
|
|
796
|
-
|
|
836
|
+
save_dir.mkdir(parents=True, exist_ok=True)
|
|
797
837
|
except Exception as e:
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
desc = str(soup)
|
|
838
|
+
self.log(f"Could not create image save directory: {e}", "WARNING")
|
|
801
839
|
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
840
|
+
new_desc = desc
|
|
841
|
+
for idx, match in enumerate(matches, start=1):
|
|
842
|
+
data_uri = match.group(1) # data:image/...;base64,XXXX
|
|
843
|
+
if not data_uri.lower().startswith("data:image"):
|
|
844
|
+
continue
|
|
845
|
+
# Split header and base64 payload
|
|
846
|
+
if "," not in data_uri:
|
|
847
|
+
continue
|
|
848
|
+
header, b64_data = data_uri.split(",", 1)
|
|
849
|
+
mime_part = header.split(";")[0] # data:image/png
|
|
850
|
+
ext = "png"
|
|
851
|
+
if "/" in mime_part:
|
|
852
|
+
ext = mime_part.split("/")[1]
|
|
853
|
+
safe_ext = (ext or "img").split("?")[0]
|
|
854
|
+
placeholder_text = "[Embedded image]"
|
|
855
|
+
if image_handling == "ignore":
|
|
856
|
+
replacement = f"[Image removed: {safe_ext}]"
|
|
857
|
+
elif image_handling == "placeholder":
|
|
858
|
+
# Try decode & save
|
|
859
|
+
file_name = f"{message_id}_{idx}.{safe_ext}"
|
|
860
|
+
file_path = save_dir / file_name
|
|
861
|
+
try:
|
|
862
|
+
# Guard extremely large strings (>5MB ~ 6.7M base64 chars) to avoid memory blow
|
|
863
|
+
if len(b64_data) > 7_000_000:
|
|
864
|
+
raise ValueError("Image too large to decode safely")
|
|
865
|
+
with open(file_path, "wb") as f:
|
|
866
|
+
f.write(base64.b64decode(b64_data))
|
|
867
|
+
attachments.append(str(file_path))
|
|
868
|
+
replacement = f"[Image extracted -> {file_path}]"
|
|
869
|
+
except Exception as e:
|
|
870
|
+
self.log(f"Failed to decode embedded image in message {message_id}: {e}", "WARNING")
|
|
871
|
+
replacement = "[Image extraction failed - content omitted]"
|
|
872
|
+
else: # keep
|
|
873
|
+
replacement = placeholder_text # shouldn't be used since early return, but safe
|
|
874
|
+
# Replace only the matched tag (not global) – use re.sub with count=1 on substring slice
|
|
875
|
+
# Safer to operate on new_desc using the exact matched string
|
|
876
|
+
original_tag = match.group(0)
|
|
877
|
+
new_desc = new_desc.replace(original_tag, replacement, 1)
|
|
878
|
+
return new_desc, attachments
|
|
879
|
+
except UnicodeEncodeError as ue:
|
|
880
|
+
self.log(f"Unicode encoding error in process_description: {ue}", "WARNING")
|
|
881
|
+
return desc, []
|
|
882
|
+
except Exception as e:
|
|
883
|
+
self.log(f"Error in process_description: {e}", "WARNING")
|
|
884
|
+
return desc, []
|
|
819
885
|
|
|
820
|
-
await self.ensure_authenticated()
|
|
821
|
-
|
|
822
886
|
try:
|
|
823
|
-
params = {
|
|
824
|
-
|
|
825
|
-
|
|
887
|
+
params = {}
|
|
888
|
+
if limit is not None:
|
|
889
|
+
params['limit'] = limit
|
|
890
|
+
if offset > 0:
|
|
891
|
+
params['offset'] = offset
|
|
892
|
+
|
|
826
893
|
response = self.session.get(f"{self.base_url}/users/self/messages", params=params)
|
|
827
894
|
response.raise_for_status()
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
# Process descriptions and attachments
|
|
831
|
-
for msg in messages_data.get("results", []):
|
|
832
|
-
try:
|
|
833
|
-
msg_id = msg.get("id", "unknown_id")
|
|
834
|
-
new_desc, attachments = process_description(msg.get("description", ""), msg_id)
|
|
835
|
-
msg["description"] = new_desc
|
|
836
|
-
if attachments:
|
|
837
|
-
msg["attachments_info"] = attachments
|
|
838
|
-
except Exception as e:
|
|
839
|
-
self.log(f"Error processing message {msg.get('id')}: {e}", "ERROR")
|
|
895
|
+
data = response.json()
|
|
840
896
|
|
|
841
|
-
|
|
842
|
-
|
|
897
|
+
# Post-process results for image handling
|
|
898
|
+
results = data.get('results', [])
|
|
899
|
+
for msg in results:
|
|
900
|
+
try:
|
|
901
|
+
desc = msg.get('description')
|
|
902
|
+
processed_desc, attachments = process_description(desc, msg.get('id', 'msg'))
|
|
903
|
+
if attachments or desc != processed_desc:
|
|
904
|
+
msg['description'] = processed_desc
|
|
905
|
+
if attachments:
|
|
906
|
+
msg['extracted_images'] = attachments
|
|
907
|
+
else:
|
|
908
|
+
# If changed but no attachments (ignore mode) mark sanitized
|
|
909
|
+
msg['sanitized'] = True
|
|
910
|
+
except UnicodeEncodeError as ue:
|
|
911
|
+
self.log(f"Unicode encoding error sanitizing message {msg.get('id')}: {ue}", "WARNING")
|
|
912
|
+
# Keep original description if encoding fails
|
|
913
|
+
continue
|
|
914
|
+
except Exception as inner_e:
|
|
915
|
+
self.log(f"Failed to sanitize message {msg.get('id')}: {inner_e}", "WARNING")
|
|
916
|
+
data['results'] = results
|
|
917
|
+
data['image_handling'] = image_handling
|
|
918
|
+
return data
|
|
919
|
+
except UnicodeEncodeError as ue:
|
|
920
|
+
self.log(f"Failed to get messages due to encoding error: {str(ue)}", "ERROR")
|
|
921
|
+
raise
|
|
843
922
|
except Exception as e:
|
|
844
923
|
self.log(f"Failed to get messages: {str(e)}", "ERROR")
|
|
845
924
|
raise
|
|
@@ -871,11 +950,11 @@ class BrainApiClient:
|
|
|
871
950
|
raise
|
|
872
951
|
|
|
873
952
|
async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
|
|
874
|
-
"""Get yearly statistics for an alpha."""
|
|
953
|
+
"""Get yearly statistics for an alpha with retry logic."""
|
|
875
954
|
await self.ensure_authenticated()
|
|
876
955
|
|
|
877
956
|
max_retries = 5
|
|
878
|
-
retry_delay = 2
|
|
957
|
+
retry_delay = 2 # seconds
|
|
879
958
|
|
|
880
959
|
for attempt in range(max_retries):
|
|
881
960
|
try:
|
|
@@ -884,55 +963,62 @@ class BrainApiClient:
|
|
|
884
963
|
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
|
|
885
964
|
response.raise_for_status()
|
|
886
965
|
|
|
966
|
+
# Check if response has content
|
|
887
967
|
text = (response.text or "").strip()
|
|
888
968
|
if not text:
|
|
889
969
|
if attempt < max_retries - 1:
|
|
890
|
-
self.log(f"Empty yearly stats response for {alpha_id}, retrying...", "WARNING")
|
|
970
|
+
self.log(f"Empty yearly stats response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
891
971
|
await asyncio.sleep(retry_delay)
|
|
892
|
-
retry_delay *= 1.5
|
|
972
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
893
973
|
continue
|
|
894
974
|
else:
|
|
975
|
+
self.log(f"Empty yearly stats response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
895
976
|
return {}
|
|
896
977
|
|
|
897
978
|
try:
|
|
898
|
-
|
|
899
|
-
if
|
|
900
|
-
|
|
979
|
+
yearly_stats = response.json()
|
|
980
|
+
if yearly_stats:
|
|
981
|
+
self.log(f"Successfully retrieved yearly stats for alpha {alpha_id}", "SUCCESS")
|
|
982
|
+
return yearly_stats
|
|
901
983
|
else:
|
|
902
984
|
if attempt < max_retries - 1:
|
|
903
|
-
self.log(f"Empty yearly stats JSON for {alpha_id}, retrying...", "WARNING")
|
|
985
|
+
self.log(f"Empty yearly stats JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
904
986
|
await asyncio.sleep(retry_delay)
|
|
905
987
|
retry_delay *= 1.5
|
|
906
988
|
continue
|
|
907
989
|
else:
|
|
990
|
+
self.log(f"Empty yearly stats JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
908
991
|
return {}
|
|
909
992
|
|
|
910
|
-
except
|
|
993
|
+
except Exception as parse_err:
|
|
911
994
|
if attempt < max_retries - 1:
|
|
912
|
-
self.log(f"Yearly stats JSON parse failed for {alpha_id}, retrying...", "WARNING")
|
|
995
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
913
996
|
await asyncio.sleep(retry_delay)
|
|
914
997
|
retry_delay *= 1.5
|
|
915
998
|
continue
|
|
916
999
|
else:
|
|
917
|
-
|
|
1000
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1001
|
+
return {}
|
|
918
1002
|
|
|
919
|
-
except
|
|
1003
|
+
except Exception as e:
|
|
920
1004
|
if attempt < max_retries - 1:
|
|
921
|
-
self.log(f"Failed to get yearly stats for {alpha_id}, retrying: {e}", "WARNING")
|
|
1005
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
922
1006
|
await asyncio.sleep(retry_delay)
|
|
923
1007
|
retry_delay *= 1.5
|
|
924
1008
|
continue
|
|
925
1009
|
else:
|
|
1010
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
926
1011
|
raise
|
|
927
1012
|
|
|
1013
|
+
# This should never be reached, but just in case
|
|
928
1014
|
return {}
|
|
929
1015
|
|
|
930
1016
|
async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
931
|
-
"""Get production correlation data for an alpha."""
|
|
1017
|
+
"""Get production correlation data for an alpha with retry logic."""
|
|
932
1018
|
await self.ensure_authenticated()
|
|
933
1019
|
|
|
934
1020
|
max_retries = 5
|
|
935
|
-
retry_delay =
|
|
1021
|
+
retry_delay = 20 # seconds
|
|
936
1022
|
|
|
937
1023
|
for attempt in range(max_retries):
|
|
938
1024
|
try:
|
|
@@ -953,44 +1039,46 @@ class BrainApiClient:
|
|
|
953
1039
|
return {}
|
|
954
1040
|
|
|
955
1041
|
try:
|
|
956
|
-
|
|
957
|
-
if
|
|
958
|
-
|
|
1042
|
+
correlation_data = response.json()
|
|
1043
|
+
if correlation_data:
|
|
1044
|
+
self.log(f"Successfully retrieved production correlation for alpha {alpha_id}", "SUCCESS")
|
|
1045
|
+
return correlation_data
|
|
959
1046
|
else:
|
|
960
1047
|
if attempt < max_retries - 1:
|
|
961
|
-
self.log(f"Empty production correlation JSON for {alpha_id}, retrying...", "WARNING")
|
|
1048
|
+
self.log(f"Empty production correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
962
1049
|
await asyncio.sleep(retry_delay)
|
|
963
|
-
retry_delay *= 1.5
|
|
964
1050
|
continue
|
|
965
1051
|
else:
|
|
1052
|
+
self.log(f"Empty production correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
966
1053
|
return {}
|
|
967
1054
|
|
|
968
|
-
except
|
|
1055
|
+
except Exception as parse_err:
|
|
969
1056
|
if attempt < max_retries - 1:
|
|
970
|
-
self.log(f"Production correlation JSON parse failed for {alpha_id}, retrying...", "WARNING")
|
|
1057
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
971
1058
|
await asyncio.sleep(retry_delay)
|
|
972
|
-
retry_delay *= 1.5
|
|
973
1059
|
continue
|
|
974
1060
|
else:
|
|
975
|
-
|
|
1061
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1062
|
+
return {}
|
|
976
1063
|
|
|
977
|
-
except
|
|
1064
|
+
except Exception as e:
|
|
978
1065
|
if attempt < max_retries - 1:
|
|
979
|
-
self.log(f"Failed to get production correlation for {alpha_id}, retrying: {e}", "WARNING")
|
|
1066
|
+
self.log(f"Failed to get production correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
980
1067
|
await asyncio.sleep(retry_delay)
|
|
981
|
-
retry_delay *= 1.5
|
|
982
1068
|
continue
|
|
983
1069
|
else:
|
|
1070
|
+
self.log(f"Failed to get production correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
984
1071
|
raise
|
|
985
1072
|
|
|
1073
|
+
# This should never be reached, but just in case
|
|
986
1074
|
return {}
|
|
987
1075
|
|
|
988
1076
|
async def get_self_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
989
|
-
"""Get self
|
|
1077
|
+
"""Get self-correlation data for an alpha with retry logic."""
|
|
990
1078
|
await self.ensure_authenticated()
|
|
991
1079
|
|
|
992
1080
|
max_retries = 5
|
|
993
|
-
retry_delay =
|
|
1081
|
+
retry_delay = 20 # seconds
|
|
994
1082
|
|
|
995
1083
|
for attempt in range(max_retries):
|
|
996
1084
|
try:
|
|
@@ -1011,36 +1099,38 @@ class BrainApiClient:
|
|
|
1011
1099
|
return {}
|
|
1012
1100
|
|
|
1013
1101
|
try:
|
|
1014
|
-
|
|
1015
|
-
if
|
|
1016
|
-
|
|
1102
|
+
correlation_data = response.json()
|
|
1103
|
+
if correlation_data:
|
|
1104
|
+
self.log(f"Successfully retrieved self correlation for alpha {alpha_id}", "SUCCESS")
|
|
1105
|
+
return correlation_data
|
|
1017
1106
|
else:
|
|
1018
1107
|
if attempt < max_retries - 1:
|
|
1019
|
-
self.log(f"Empty self correlation JSON for {alpha_id}, retrying...", "WARNING")
|
|
1108
|
+
self.log(f"Empty self correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1020
1109
|
await asyncio.sleep(retry_delay)
|
|
1021
|
-
retry_delay *= 1.5
|
|
1022
1110
|
continue
|
|
1023
1111
|
else:
|
|
1112
|
+
self.log(f"Empty self correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1024
1113
|
return {}
|
|
1025
1114
|
|
|
1026
|
-
except
|
|
1115
|
+
except Exception as parse_err:
|
|
1027
1116
|
if attempt < max_retries - 1:
|
|
1028
|
-
self.log(f"Self correlation JSON parse failed for {alpha_id}, retrying...", "WARNING")
|
|
1117
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
1029
1118
|
await asyncio.sleep(retry_delay)
|
|
1030
|
-
retry_delay *= 1.5
|
|
1031
1119
|
continue
|
|
1032
1120
|
else:
|
|
1033
|
-
|
|
1121
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1122
|
+
return {}
|
|
1034
1123
|
|
|
1035
|
-
except
|
|
1124
|
+
except Exception as e:
|
|
1036
1125
|
if attempt < max_retries - 1:
|
|
1037
|
-
self.log(f"Failed to get self correlation for {alpha_id}, retrying: {e}", "WARNING")
|
|
1126
|
+
self.log(f"Failed to get self correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1038
1127
|
await asyncio.sleep(retry_delay)
|
|
1039
|
-
retry_delay *= 1.5
|
|
1040
1128
|
continue
|
|
1041
1129
|
else:
|
|
1130
|
+
self.log(f"Failed to get self correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1042
1131
|
raise
|
|
1043
1132
|
|
|
1133
|
+
# This should never be reached, but just in case
|
|
1044
1134
|
return {}
|
|
1045
1135
|
|
|
1046
1136
|
async def check_correlation(self, alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
@@ -1048,22 +1138,84 @@ class BrainApiClient:
|
|
|
1048
1138
|
await self.ensure_authenticated()
|
|
1049
1139
|
|
|
1050
1140
|
try:
|
|
1051
|
-
results = {
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1141
|
+
results = {
|
|
1142
|
+
'alpha_id': alpha_id,
|
|
1143
|
+
'threshold': threshold,
|
|
1144
|
+
'correlation_type': correlation_type,
|
|
1145
|
+
'checks': {}
|
|
1146
|
+
}
|
|
1147
|
+
|
|
1148
|
+
# Determine which correlations to check
|
|
1149
|
+
check_types = []
|
|
1150
|
+
if correlation_type == "both":
|
|
1151
|
+
check_types = ["production", "self"]
|
|
1152
|
+
else:
|
|
1153
|
+
check_types = [correlation_type]
|
|
1154
|
+
|
|
1155
|
+
all_passed = True
|
|
1156
|
+
|
|
1157
|
+
for check_type in check_types:
|
|
1158
|
+
if check_type == "production":
|
|
1159
|
+
correlation_data = await self.get_production_correlation(alpha_id)
|
|
1160
|
+
elif check_type == "self":
|
|
1161
|
+
correlation_data = await self.get_self_correlation(alpha_id)
|
|
1162
|
+
else:
|
|
1163
|
+
continue
|
|
1164
|
+
|
|
1165
|
+
# Analyze correlation data (robust to schema/records format)
|
|
1166
|
+
if isinstance(correlation_data, dict):
|
|
1167
|
+
# Prefer strict access to schema.max or top-level max; otherwise error
|
|
1168
|
+
schema = correlation_data.get('schema') or {}
|
|
1169
|
+
if isinstance(schema, dict) and 'max' in schema:
|
|
1170
|
+
max_correlation = float(schema['max'])
|
|
1171
|
+
elif 'max' in correlation_data:
|
|
1172
|
+
# Some endpoints place max at top-level
|
|
1173
|
+
max_correlation = float(correlation_data['max'])
|
|
1174
|
+
else:
|
|
1175
|
+
# Attempt to derive from records; if none found, raise error instead of defaulting
|
|
1176
|
+
records = correlation_data.get('records') or []
|
|
1177
|
+
if isinstance(records, list) and records:
|
|
1178
|
+
candidate_max = None
|
|
1179
|
+
for row in records:
|
|
1180
|
+
if isinstance(row, (list, tuple)):
|
|
1181
|
+
for v in row:
|
|
1182
|
+
try:
|
|
1183
|
+
vf = float(v)
|
|
1184
|
+
if -1.0 <= vf <= 1.0:
|
|
1185
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1186
|
+
except Exception:
|
|
1187
|
+
continue
|
|
1188
|
+
elif isinstance(row, dict):
|
|
1189
|
+
for key in ('correlation', 'prodCorrelation', 'selfCorrelation', 'max'):
|
|
1190
|
+
try:
|
|
1191
|
+
vf = float(row.get(key))
|
|
1192
|
+
if -1.0 <= vf <= 1.0:
|
|
1193
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1194
|
+
except Exception:
|
|
1195
|
+
continue
|
|
1196
|
+
if candidate_max is None:
|
|
1197
|
+
raise ValueError("Unable to derive max correlation from records")
|
|
1198
|
+
max_correlation = float(candidate_max)
|
|
1199
|
+
else:
|
|
1200
|
+
raise KeyError("Correlation response missing 'schema.max' or top-level 'max' and no 'records' to derive from")
|
|
1201
|
+
else:
|
|
1202
|
+
raise TypeError("Correlation data is not a dictionary")
|
|
1065
1203
|
|
|
1204
|
+
passes_check = max_correlation < threshold
|
|
1205
|
+
|
|
1206
|
+
results['checks'][check_type] = {
|
|
1207
|
+
'max_correlation': max_correlation,
|
|
1208
|
+
'passes_check': passes_check,
|
|
1209
|
+
'correlation_data': correlation_data
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
if not passes_check:
|
|
1213
|
+
all_passed = False
|
|
1214
|
+
|
|
1215
|
+
results['all_passed'] = all_passed
|
|
1216
|
+
|
|
1066
1217
|
return results
|
|
1218
|
+
|
|
1067
1219
|
except Exception as e:
|
|
1068
1220
|
self.log(f"Failed to check correlation: {str(e)}", "ERROR")
|
|
1069
1221
|
raise
|
|
@@ -1073,20 +1225,22 @@ class BrainApiClient:
|
|
|
1073
1225
|
await self.ensure_authenticated()
|
|
1074
1226
|
|
|
1075
1227
|
try:
|
|
1076
|
-
#
|
|
1077
|
-
|
|
1228
|
+
# Get correlation checks using the unified function
|
|
1229
|
+
correlation_checks = await self.check_correlation(alpha_id, correlation_type="both")
|
|
1078
1230
|
|
|
1079
|
-
|
|
1080
|
-
|
|
1081
|
-
correlation = await self.check_correlation(alpha_id)
|
|
1231
|
+
# Get alpha details for additional validation
|
|
1232
|
+
alpha_details = await self.get_alpha_details(alpha_id)
|
|
1082
1233
|
|
|
1083
|
-
|
|
1084
|
-
|
|
1085
|
-
|
|
1086
|
-
|
|
1234
|
+
# Compile comprehensive check results
|
|
1235
|
+
checks = {
|
|
1236
|
+
'correlation_checks': correlation_checks,
|
|
1237
|
+
'alpha_details': alpha_details,
|
|
1238
|
+
'all_passed': correlation_checks['all_passed']
|
|
1087
1239
|
}
|
|
1240
|
+
|
|
1241
|
+
return checks
|
|
1088
1242
|
except Exception as e:
|
|
1089
|
-
self.log(f"Failed submission check: {str(e)}", "ERROR")
|
|
1243
|
+
self.log(f"Failed to get submission check: {str(e)}", "ERROR")
|
|
1090
1244
|
raise
|
|
1091
1245
|
|
|
1092
1246
|
async def set_alpha_properties(self, alpha_id: str, name: Optional[str] = None,
|
|
@@ -1096,18 +1250,19 @@ class BrainApiClient:
|
|
|
1096
1250
|
await self.ensure_authenticated()
|
|
1097
1251
|
|
|
1098
1252
|
try:
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1253
|
+
data = {}
|
|
1254
|
+
if name:
|
|
1255
|
+
data['name'] = name
|
|
1256
|
+
if color:
|
|
1257
|
+
data['color'] = color
|
|
1258
|
+
if tags:
|
|
1259
|
+
data['tags'] = tags
|
|
1260
|
+
if selection_desc:
|
|
1261
|
+
data['selectionDesc'] = selection_desc
|
|
1262
|
+
if combo_desc:
|
|
1263
|
+
data['comboDesc'] = combo_desc
|
|
1109
1264
|
|
|
1110
|
-
response = self.session.patch(f"{self.base_url}/alphas/{alpha_id}", json=
|
|
1265
|
+
response = self.session.patch(f"{self.base_url}/alphas/{alpha_id}", json=data)
|
|
1111
1266
|
response.raise_for_status()
|
|
1112
1267
|
return response.json()
|
|
1113
1268
|
except Exception as e:
|
|
@@ -1174,11 +1329,35 @@ class BrainApiClient:
|
|
|
1174
1329
|
try:
|
|
1175
1330
|
params = {}
|
|
1176
1331
|
if start_date:
|
|
1177
|
-
params[
|
|
1332
|
+
params['startDate'] = start_date
|
|
1178
1333
|
if end_date:
|
|
1179
|
-
params[
|
|
1180
|
-
|
|
1334
|
+
params['endDate'] = end_date
|
|
1335
|
+
|
|
1336
|
+
# Try the user-specific activities endpoint first (like pyramid-multipliers)
|
|
1181
1337
|
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-alphas", params=params)
|
|
1338
|
+
|
|
1339
|
+
# If that fails, try alternative endpoints
|
|
1340
|
+
if response.status_code == 404:
|
|
1341
|
+
# Try alternative endpoint structure
|
|
1342
|
+
response = self.session.get(f"{self.base_url}/users/self/pyramid/alphas", params=params)
|
|
1343
|
+
|
|
1344
|
+
if response.status_code == 404:
|
|
1345
|
+
# Try yet another alternative
|
|
1346
|
+
response = self.session.get(f"{self.base_url}/activities/pyramid-alphas", params=params)
|
|
1347
|
+
|
|
1348
|
+
if response.status_code == 404:
|
|
1349
|
+
# Return an informative error with what we tried
|
|
1350
|
+
return {
|
|
1351
|
+
"error": "Pyramid alphas endpoint not found",
|
|
1352
|
+
"tried_endpoints": [
|
|
1353
|
+
"/users/self/activities/pyramid-alphas",
|
|
1354
|
+
"/users/self/pyramid/alphas",
|
|
1355
|
+
"/activities/pyramid-alphas",
|
|
1356
|
+
"/pyramid/alphas"
|
|
1357
|
+
],
|
|
1358
|
+
"suggestion": "This endpoint may not be available in the current API version"
|
|
1359
|
+
}
|
|
1360
|
+
|
|
1182
1361
|
response.raise_for_status()
|
|
1183
1362
|
return response.json()
|
|
1184
1363
|
except Exception as e:
|
|
@@ -1451,20 +1630,27 @@ async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]]
|
|
|
1451
1630
|
Returns:
|
|
1452
1631
|
Current or updated configuration including authentication status
|
|
1453
1632
|
"""
|
|
1454
|
-
|
|
1633
|
+
if action == "get":
|
|
1634
|
+
config = load_config()
|
|
1635
|
+
auth_status = await brain_client.get_authentication_status()
|
|
1636
|
+
|
|
1637
|
+
return {
|
|
1638
|
+
"config": config,
|
|
1639
|
+
"auth_status": auth_status,
|
|
1640
|
+
"is_authenticated": await brain_client.is_authenticated()
|
|
1641
|
+
}
|
|
1455
1642
|
|
|
1456
|
-
|
|
1643
|
+
elif action == "set":
|
|
1644
|
+
if settings is None:
|
|
1645
|
+
return {"error": "Settings parameter is required when action='set'"}
|
|
1646
|
+
|
|
1647
|
+
config = load_config()
|
|
1457
1648
|
config.update(settings)
|
|
1458
1649
|
save_config(config)
|
|
1459
|
-
|
|
1460
|
-
is_authed = await brain_client.is_authenticated()
|
|
1461
|
-
config['isAuthenticated'] = is_authed
|
|
1650
|
+
return config
|
|
1462
1651
|
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
config['password'] = '********'
|
|
1466
|
-
|
|
1467
|
-
return config
|
|
1652
|
+
else:
|
|
1653
|
+
return {"error": f"Invalid action '{action}'. Use 'get' or 'set'."}
|
|
1468
1654
|
|
|
1469
1655
|
# --- Simulation Tools ---
|
|
1470
1656
|
|