cnhkmcp 2.1.2__py3-none-any.whl → 2.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/METADATA +1 -1
  2. cnhkmcp-2.1.3.dist-info/RECORD +6 -0
  3. cnhkmcp-2.1.3.dist-info/top_level.txt +1 -0
  4. cnhkmcp/__init__.py +0 -125
  5. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/README.md +0 -38
  6. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/ace.log +0 -0
  7. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +0 -6
  8. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/ace_lib.py +0 -1510
  9. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_datasets.py +0 -157
  10. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_documentation.py +0 -132
  11. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_operators.py +0 -99
  12. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/helpful_functions.py +0 -180
  13. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.ico +0 -0
  14. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.png +0 -0
  15. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/test.txt +0 -1
  16. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +0 -576
  17. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/process_knowledge_base.py +0 -281
  18. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/rag_engine.py +0 -408
  19. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/requirements.txt +0 -7
  20. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/run.bat +0 -3
  21. cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242//321/211/320/266/320/246/321/206/320/274/320/261/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +0 -265
  22. cnhkmcp/untracked/APP/.gitignore +0 -32
  23. cnhkmcp/untracked/APP/MODULAR_STRUCTURE.md +0 -112
  24. cnhkmcp/untracked/APP/README.md +0 -309
  25. cnhkmcp/untracked/APP/Tranformer/Transformer.py +0 -4985
  26. cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
  27. cnhkmcp/untracked/APP/Tranformer/ace_lib.py +0 -1510
  28. cnhkmcp/untracked/APP/Tranformer/helpful_functions.py +0 -180
  29. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +0 -2421
  30. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates_/321/207/320/264/342/225/221/321/204/342/225/233/320/233.json +0 -654
  31. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +0 -1034
  32. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +0 -444
  33. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_/321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/277/321/207/320/253/342/224/244/321/206/320/236/320/265/321/210/342/225/234/342/225/234/321/205/320/225/320/265Machine_lib.json +0 -22
  34. cnhkmcp/untracked/APP/Tranformer/parsetab.py +0 -60
  35. cnhkmcp/untracked/APP/Tranformer/template_summary.txt +0 -3182
  36. cnhkmcp/untracked/APP/Tranformer/transformer_config.json +0 -7
  37. cnhkmcp/untracked/APP/Tranformer/validator.py +0 -889
  38. cnhkmcp/untracked/APP/ace.log +0 -69
  39. cnhkmcp/untracked/APP/ace_lib.py +0 -1510
  40. cnhkmcp/untracked/APP/blueprints/__init__.py +0 -6
  41. cnhkmcp/untracked/APP/blueprints/feature_engineering.py +0 -347
  42. cnhkmcp/untracked/APP/blueprints/idea_house.py +0 -221
  43. cnhkmcp/untracked/APP/blueprints/inspiration_house.py +0 -432
  44. cnhkmcp/untracked/APP/blueprints/paper_analysis.py +0 -570
  45. cnhkmcp/untracked/APP/custom_templates/templates.json +0 -1257
  46. cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +0 -400
  47. cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +0 -1510
  48. cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +0 -252
  49. cnhkmcp/untracked/APP/give_me_idea/fetch_all_datasets.py +0 -157
  50. cnhkmcp/untracked/APP/give_me_idea/fetch_all_operators.py +0 -99
  51. cnhkmcp/untracked/APP/give_me_idea/helpful_functions.py +0 -180
  52. cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +0 -11
  53. cnhkmcp/untracked/APP/helpful_functions.py +0 -180
  54. cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +0 -1497
  55. cnhkmcp/untracked/APP/hkSimulator/autosimulator.py +0 -447
  56. cnhkmcp/untracked/APP/hkSimulator/helpful_functions.py +0 -180
  57. cnhkmcp/untracked/APP/mirror_config.txt +0 -20
  58. cnhkmcp/untracked/APP/operaters.csv +0 -129
  59. cnhkmcp/untracked/APP/requirements.txt +0 -53
  60. cnhkmcp/untracked/APP/run_app.bat +0 -28
  61. cnhkmcp/untracked/APP/run_app.sh +0 -34
  62. cnhkmcp/untracked/APP/setup_tsinghua.bat +0 -39
  63. cnhkmcp/untracked/APP/setup_tsinghua.sh +0 -43
  64. cnhkmcp/untracked/APP/simulator/alpha_submitter.py +0 -404
  65. cnhkmcp/untracked/APP/simulator/simulator_wqb.py +0 -618
  66. cnhkmcp/untracked/APP/ssrn-3332513.pdf +6 -109201
  67. cnhkmcp/untracked/APP/static/brain.js +0 -589
  68. cnhkmcp/untracked/APP/static/decoder.js +0 -1540
  69. cnhkmcp/untracked/APP/static/feature_engineering.js +0 -1729
  70. cnhkmcp/untracked/APP/static/idea_house.js +0 -937
  71. cnhkmcp/untracked/APP/static/inspiration.js +0 -465
  72. cnhkmcp/untracked/APP/static/inspiration_house.js +0 -868
  73. cnhkmcp/untracked/APP/static/paper_analysis.js +0 -390
  74. cnhkmcp/untracked/APP/static/script.js +0 -3082
  75. cnhkmcp/untracked/APP/static/simulator.js +0 -597
  76. cnhkmcp/untracked/APP/static/styles.css +0 -3127
  77. cnhkmcp/untracked/APP/static/usage_widget.js +0 -508
  78. cnhkmcp/untracked/APP/templates/alpha_inspector.html +0 -511
  79. cnhkmcp/untracked/APP/templates/feature_engineering.html +0 -960
  80. cnhkmcp/untracked/APP/templates/idea_house.html +0 -564
  81. cnhkmcp/untracked/APP/templates/index.html +0 -932
  82. cnhkmcp/untracked/APP/templates/inspiration_house.html +0 -861
  83. cnhkmcp/untracked/APP/templates/paper_analysis.html +0 -91
  84. cnhkmcp/untracked/APP/templates/simulator.html +0 -343
  85. cnhkmcp/untracked/APP/templates/transformer_web.html +0 -580
  86. cnhkmcp/untracked/APP/usage.md +0 -351
  87. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +0 -1510
  88. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +0 -712
  89. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/helpful_functions.py +0 -180
  90. cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +0 -2456
  91. cnhkmcp/untracked/arXiv_API_Tool_Manual.md +0 -490
  92. cnhkmcp/untracked/arxiv_api.py +0 -229
  93. cnhkmcp/untracked/forum_functions.py +0 -998
  94. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +0 -407
  95. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +0 -2415
  96. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/user_config.json +0 -31
  97. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/210/320/276/320/271AI/321/210/320/277/342/225/227/321/210/342/224/220/320/251/321/204/342/225/225/320/272/321/206/320/246/320/227/321/206/320/261/320/263/321/206/320/255/320/265/321/205/320/275/320/266/321/204/342/225/235/320/252/321/204/342/225/225/320/233/321/210/342/225/234/342/225/234/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270.md +0 -101
  98. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +0 -190
  99. cnhkmcp/untracked/platform_functions.py +0 -2886
  100. cnhkmcp/untracked/sample_mcp_config.json +0 -11
  101. cnhkmcp/untracked/user_config.json +0 -31
  102. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/222/321/210/320/220/320/223/321/206/320/246/320/227/321/206/320/261/320/263_BRAIN_Alpha_Test_Requirements_and_Tips.md +0 -202
  103. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Alpha_explaination_workflow.md +0 -56
  104. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_6_Tips_Datafield_Exploration_Guide.md +0 -194
  105. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_Alpha_Improvement_Workflow.md +0 -101
  106. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Dataset_Exploration_Expert_Manual.md +0 -436
  107. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_daily_report_workflow.md +0 -128
  108. cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +0 -190
  109. cnhkmcp-2.1.2.dist-info/RECORD +0 -111
  110. cnhkmcp-2.1.2.dist-info/top_level.txt +0 -1
  111. {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/WHEEL +0 -0
  112. {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/entry_points.txt +0 -0
  113. {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/licenses/LICENSE +0 -0
@@ -1,2415 +0,0 @@
1
- #!/usr/bin/env python3
2
- """
3
- WorldQuant BRAIN MCP Server - Python Version
4
- A comprehensive Model Context Protocol (MCP) server for WorldQuant BRAIN platform integration.
5
- """
6
-
7
- import json
8
- import time
9
- import asyncio
10
- import logging
11
- from typing import Dict, List, Optional, Any, Union, Tuple
12
- import re
13
- import base64
14
- from bs4 import BeautifulSoup
15
- from dataclasses import dataclass, asdict
16
- from datetime import datetime, timedelta
17
- import os
18
- import sys
19
- from time import sleep
20
-
21
- import requests
22
- import pandas as pd
23
- from mcp.server.fastmcp import FastMCP
24
- from pydantic import BaseModel, Field, EmailStr
25
-
26
- from pathlib import Path
27
-
28
- # Import the new forum client
29
- from forum_functions import forum_client
30
-
31
- # Configure logging
32
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
33
- logger = logging.getLogger(__name__)
34
-
35
- # Pydantic models for type safety
36
- class AuthCredentials(BaseModel):
37
- email: EmailStr
38
- password: str
39
-
40
- class SimulationSettings(BaseModel):
41
- instrumentType: str = "EQUITY"
42
- region: str = "USA"
43
- universe: str = "TOP3000"
44
- delay: int = 1
45
- decay: float = 0.0
46
- neutralization: str = "NONE"
47
- truncation: float = 0.0
48
- pasteurization: str = "ON"
49
- unitHandling: str = "VERIFY"
50
- nanHandling: str = "OFF"
51
- language: str = "FASTEXPR"
52
- visualization: bool = True
53
- testPeriod: str = "P0Y0M"
54
- selectionHandling: str = "POSITIVE"
55
- selectionLimit: int = 1000
56
- maxTrade: str = "OFF"
57
- componentActivation: str = "IS"
58
-
59
- class SimulationData(BaseModel):
60
- type: str = "REGULAR" # "REGULAR" or "SUPER"
61
- settings: SimulationSettings
62
- regular: Optional[str] = None
63
- combo: Optional[str] = None
64
- selection: Optional[str] = None
65
-
66
- class BrainApiClient:
67
- """WorldQuant BRAIN API client with comprehensive functionality."""
68
-
69
- def __init__(self):
70
- self.base_url = "https://api.worldquantbrain.com"
71
- self.session = requests.Session()
72
- self.auth_credentials = None
73
- self.is_authenticating = False
74
-
75
- # Configure session
76
- self.session.timeout = 30
77
- self.session.headers.update({
78
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
79
- })
80
-
81
- def log(self, message: str, level: str = "INFO"):
82
- """Log messages to stderr to avoid MCP protocol interference."""
83
- print(f"[{level}] {message}", file=sys.stderr)
84
-
85
- async def authenticate(self, email: str, password: str) -> Dict[str, Any]:
86
- """Authenticate with WorldQuant BRAIN platform with biometric support."""
87
- self.log("🔐 Starting Authentication process...", "INFO")
88
-
89
- try:
90
- # Store credentials for potential re-authentication
91
- self.auth_credentials = {'email': email, 'password': password}
92
-
93
- # Clear any existing session data
94
- self.session.cookies.clear()
95
- self.session.auth = None
96
-
97
- # Create Basic Authentication header (base64 encoded credentials)
98
- import base64
99
- credentials = f"{email}:{password}"
100
- encoded_credentials = base64.b64encode(credentials.encode()).decode()
101
-
102
- # Send POST request with Basic Authentication header
103
- headers = {
104
- 'Authorization': f'Basic {encoded_credentials}'
105
- }
106
-
107
- response = self.session.post('https://api.worldquantbrain.com/authentication', headers=headers)
108
-
109
- # Check for successful authentication (status code 201)
110
- if response.status_code == 201:
111
- self.log("Authentication successful", "SUCCESS")
112
-
113
- # Check if JWT token was automatically stored by session
114
- jwt_token = self.session.cookies.get('t')
115
- if jwt_token:
116
- self.log("JWT token automatically stored by session", "SUCCESS")
117
- else:
118
- self.log("⚠️ No JWT token found in session", "WARNING")
119
-
120
- # Return success response
121
- return {
122
- 'user': {'email': email},
123
- 'status': 'authenticated',
124
- 'permissions': ['read', 'write'],
125
- 'message': 'Authentication successful',
126
- 'status_code': response.status_code,
127
- 'has_jwt': jwt_token is not None
128
- }
129
-
130
- # Check if biometric authentication is required (401 with persona)
131
- elif response.status_code == 401:
132
- www_auth = response.headers.get("WWW-Authenticate")
133
- location = response.headers.get("Location")
134
-
135
- if www_auth == "persona" and location:
136
- self.log("🔴 Biometric authentication required", "INFO")
137
-
138
- # Handle biometric authentication
139
- from urllib.parse import urljoin
140
- biometric_url = urljoin(response.url, location)
141
-
142
- return await self._handle_biometric_auth(biometric_url, email)
143
- else:
144
- raise Exception("Incorrect email or password")
145
- else:
146
- raise Exception(f"Authentication failed with status code: {response.status_code}")
147
-
148
- except requests.HTTPError as e:
149
- self.log(f"❌ HTTP error during authentication: {e}", "ERROR")
150
- raise
151
- except Exception as e:
152
- self.log(f"❌ Authentication failed: {str(e)}", "ERROR")
153
- raise
154
-
155
- async def _handle_biometric_auth(self, biometric_url: str, email: str) -> Dict[str, Any]:
156
- """Handle biometric authentication using browser automation."""
157
- self.log("🌐 Starting biometric authentication...", "INFO")
158
-
159
- try:
160
- # Import playwright for browser automation
161
- from playwright.async_api import async_playwright
162
- import time
163
-
164
- async with async_playwright() as p:
165
- browser = await p.chromium.launch(headless=False)
166
- page = await browser.new_page()
167
-
168
- self.log("🌐 Opening browser for biometric authentication...", "INFO")
169
- await page.goto(biometric_url)
170
- self.log("Browser page loaded successfully", "SUCCESS")
171
-
172
- # Print instructions
173
- print("\n" + "="*60, file=sys.stderr)
174
- print("BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
175
- print("="*60, file=sys.stderr)
176
- print("Browser window is open with biometric authentication page", file=sys.stderr)
177
- print("Complete the biometric authentication in the browser", file=sys.stderr)
178
- print("The system will automatically check when you're done...", file=sys.stderr)
179
- print("="*60, file=sys.stderr)
180
-
181
- # Keep checking until authentication is complete
182
- max_attempts = 60 # 5 minutes maximum (60 * 5 seconds)
183
- attempt = 0
184
-
185
- while attempt < max_attempts:
186
- time.sleep(5) # Check every 5 seconds
187
- attempt += 1
188
-
189
- # Check if authentication completed
190
- check_response = self.session.post(biometric_url)
191
- self.log(f"🔄 Checking authentication status (attempt {attempt}/{max_attempts}): {check_response.status_code}", "INFO")
192
-
193
- if check_response.status_code == 201:
194
- self.log("Biometric authentication successful!", "SUCCESS")
195
-
196
- await browser.close()
197
-
198
- # Check JWT token
199
- jwt_token = self.session.cookies.get('t')
200
- if jwt_token:
201
- self.log("JWT token received", "SUCCESS")
202
-
203
- # Return success response
204
- return {
205
- 'user': {'email': email},
206
- 'status': 'authenticated',
207
- 'permissions': ['read', 'write'],
208
- 'message': 'Biometric authentication successful',
209
- 'status_code': check_response.status_code,
210
- 'has_jwt': jwt_token is not None
211
- }
212
-
213
- await browser.close()
214
- raise Exception("Biometric authentication timed out")
215
-
216
- except Exception as e:
217
- self.log(f"❌ Biometric authentication failed: {str(e)}", "ERROR")
218
- raise
219
-
220
- async def is_authenticated(self) -> bool:
221
- """Check if currently authenticated using JWT token."""
222
- try:
223
- # Check if we have a JWT token in cookies
224
- jwt_token = self.session.cookies.get('t')
225
- if not jwt_token:
226
- self.log("❌ No JWT token found", "INFO")
227
- return False
228
-
229
- # Test authentication with a simple API call
230
- response = self.session.get(f"{self.base_url}/authentication")
231
- if response.status_code == 200:
232
- return True
233
- elif response.status_code == 401:
234
- self.log("❌ JWT token expired or invalid (401)", "INFO")
235
- return False
236
- else:
237
- self.log(f"⚠️ Unexpected status code during auth check: {response.status_code}", "WARNING")
238
- return False
239
- except Exception as e:
240
- self.log(f"❌ Error checking authentication: {str(e)}", "ERROR")
241
- return False
242
-
243
- async def ensure_authenticated(self):
244
- """Ensure authentication is valid, re-authenticate if needed."""
245
- if not await self.is_authenticated():
246
- if not self.auth_credentials:
247
- self.log("No credentials in memory, loading from config...", "INFO")
248
- config = load_config()
249
- creds = config.get("credentials", {})
250
- email = creds.get("email")
251
- password = creds.get("password")
252
- if not email or not password:
253
- raise Exception("Authentication credentials not found in config. Please authenticate first.")
254
- self.auth_credentials = {'email': email, 'password': password}
255
-
256
- self.log("🔄 Re-authenticating...", "INFO")
257
- await self.authenticate(self.auth_credentials['email'], self.auth_credentials['password'])
258
-
259
- async def get_authentication_status(self) -> Optional[Dict[str, Any]]:
260
- """Get current authentication status and user info."""
261
- try:
262
- response = self.session.get(f"{self.base_url}/users/self")
263
- response.raise_for_status()
264
- return response.json()
265
- except Exception as e:
266
- self.log(f"Failed to get auth status: {str(e)}", "ERROR")
267
- return None
268
-
269
- async def create_simulation(self, simulation_data: SimulationData) -> Dict[str, str]:
270
- """Create a new simulation on BRAIN platform."""
271
- await self.ensure_authenticated()
272
-
273
- try:
274
- self.log("🚀 Creating simulation...", "INFO")
275
-
276
- # Prepare settings based on simulation type
277
- settings_dict = simulation_data.settings.model_dump()
278
-
279
- # Remove fields based on simulation type
280
- if simulation_data.type == "REGULAR":
281
- # Remove SUPER-specific fields for REGULAR
282
- settings_dict.pop('selectionHandling', None)
283
- settings_dict.pop('selectionLimit', None)
284
- settings_dict.pop('componentActivation', None)
285
-
286
- # Filter out None values from settings
287
- settings_dict = {k: v for k, v in settings_dict.items() if v is not None}
288
-
289
- # Prepare simulation payload
290
- payload = {
291
- 'type': simulation_data.type,
292
- 'settings': settings_dict
293
- }
294
-
295
- # Add type-specific fields
296
- if simulation_data.type == "REGULAR":
297
- if simulation_data.regular:
298
- payload['regular'] = simulation_data.regular
299
- elif simulation_data.type == "SUPER":
300
- if simulation_data.combo:
301
- payload['combo'] = simulation_data.combo
302
- if simulation_data.selection:
303
- payload['selection'] = simulation_data.selection
304
-
305
- # Filter out None values from entire payload
306
- payload = {k: v for k, v in payload.items() if v is not None}
307
-
308
- response = self.session.post(f"{self.base_url}/simulations", json=payload)
309
- response.raise_for_status()
310
-
311
- location = response.headers.get('Location', '')
312
- simulation_id = location.split('/')[-1] if location else None
313
-
314
- self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
315
-
316
- while True:
317
- simulation_progress = self.session.get(location)
318
- if simulation_progress.headers.get("Retry-After", 0) == 0:
319
- break
320
- print("Sleeping for " + simulation_progress.headers["Retry-After"] + " seconds")
321
- sleep(float(simulation_progress.headers["Retry-After"]))
322
- print("Alpha done simulating, getting alpha details")
323
- alpha_id = simulation_progress.json()["alpha"]
324
- alpha = self.session.get("https://api.worldquantbrain.com/alphas/" + alpha_id)
325
- return alpha.json()
326
-
327
- except Exception as e:
328
- self.log(f"❌ Failed to create simulation: {str(e)}", "ERROR")
329
- raise
330
-
331
- async def get_alpha_details(self, alpha_id: str) -> Dict[str, Any]:
332
- """Get detailed information about an alpha."""
333
- await self.ensure_authenticated()
334
-
335
- try:
336
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}")
337
- response.raise_for_status()
338
- return response.json()
339
- except Exception as e:
340
- self.log(f"Failed to get alpha details: {str(e)}", "ERROR")
341
- raise
342
-
343
- async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
344
- delay: int = 1, universe: str = "TOP3000", theme: str = "false", search: Optional[str] = None) -> Dict[str, Any]:
345
- """Get available datasets."""
346
- await self.ensure_authenticated()
347
-
348
- try:
349
- params = {
350
- 'instrumentType': instrument_type,
351
- 'region': region,
352
- 'delay': delay,
353
- 'universe': universe,
354
- 'theme': theme
355
- }
356
-
357
- if search:
358
- params['search'] = search
359
-
360
- response = self.session.get(f"{self.base_url}/data-sets", params=params)
361
- response.raise_for_status()
362
- response_json = response.json()
363
- response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
364
- return response_json
365
- except Exception as e:
366
- self.log(f"Failed to get datasets: {str(e)}", "ERROR")
367
- raise
368
-
369
- async def get_datafields(self, instrument_type: str = "EQUITY", region: str = "USA",
370
- delay: int = 1, universe: str = "TOP3000", theme: str = "false",
371
- dataset_id: Optional[str] = None, data_type: str = "",
372
- search: Optional[str] = None) -> Dict[str, Any]:
373
- """Get available data fields."""
374
- await self.ensure_authenticated()
375
-
376
- try:
377
- params = {
378
- 'instrumentType': instrument_type,
379
- 'region': region,
380
- 'delay': delay,
381
- 'universe': universe,
382
- 'limit': '50',
383
- 'offset': '0'
384
- }
385
-
386
- if data_type != 'ALL':
387
- params['type'] = data_type
388
-
389
- if dataset_id:
390
- params['dataset.id'] = dataset_id
391
- if search:
392
- params['search'] = search
393
-
394
- response = self.session.get(f"{self.base_url}/data-fields", params=params)
395
- response.raise_for_status()
396
- response_json = response.json()
397
- response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
398
- return response_json
399
- except Exception as e:
400
- self.log(f"Failed to get datafields: {str(e)}", "ERROR")
401
- raise
402
-
403
- async def get_alpha_pnl(self, alpha_id: str) -> Dict[str, Any]:
404
- """Get PnL data for an alpha with retry logic."""
405
- await self.ensure_authenticated()
406
-
407
- max_retries = 5
408
- retry_delay = 2 # seconds
409
-
410
- for attempt in range(max_retries):
411
- try:
412
- self.log(f"Attempting to get PnL for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
413
-
414
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
415
- response.raise_for_status()
416
-
417
- text = (response.text or "").strip()
418
- if not text:
419
- if attempt < max_retries - 1:
420
- self.log(f"Empty PnL response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
421
- await asyncio.sleep(retry_delay)
422
- retry_delay *= 1.5
423
- continue
424
- else:
425
- self.log(f"Empty PnL response after {max_retries} attempts for {alpha_id}", "WARNING")
426
- return {}
427
-
428
- try:
429
- pnl_data = response.json()
430
- if pnl_data:
431
- self.log(f"Successfully retrieved PnL data for alpha {alpha_id}", "SUCCESS")
432
- return pnl_data
433
- else:
434
- if attempt < max_retries - 1:
435
- self.log(f"Empty PnL JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
436
- await asyncio.sleep(retry_delay)
437
- retry_delay *= 1.5
438
- continue
439
- else:
440
- self.log(f"Empty PnL JSON after {max_retries} attempts for {alpha_id}", "WARNING")
441
- return {}
442
-
443
- except json.JSONDecodeError as parse_err:
444
- if attempt < max_retries - 1:
445
- self.log(f"PnL JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
446
- await asyncio.sleep(retry_delay)
447
- retry_delay *= 1.5
448
- continue
449
- else:
450
- self.log(f"PnL JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
451
- return {}
452
-
453
- except requests.RequestException as e:
454
- if attempt < max_retries - 1:
455
- self.log(f"Failed to get alpha PnL for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
456
- await asyncio.sleep(retry_delay)
457
- retry_delay *= 1.5
458
- continue
459
- else:
460
- self.log(f"Failed to get alpha PnL for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
461
- raise
462
-
463
- return {}
464
-
465
- async def get_user_alphas(
466
- self,
467
- stage: str = "OS",
468
- limit: int = 30,
469
- offset: int = 0,
470
- start_date: Optional[str] = None,
471
- end_date: Optional[str] = None,
472
- submission_start_date: Optional[str] = None,
473
- submission_end_date: Optional[str] = None,
474
- order: Optional[str] = None,
475
- hidden: Optional[bool] = None,
476
- ) -> Dict[str, Any]:
477
- """Get user's alphas with advanced filtering."""
478
- await self.ensure_authenticated()
479
-
480
- try:
481
- params = {
482
- "stage": stage,
483
- "limit": limit,
484
- "offset": offset,
485
- }
486
- if start_date:
487
- params["dateCreated>"] = start_date
488
- if end_date:
489
- params["dateCreated<"] = end_date
490
- if submission_start_date:
491
- params["dateSubmitted>"] = submission_start_date
492
- if submission_end_date:
493
- params["dateSubmitted<"] = submission_end_date
494
- if order:
495
- params["order"] = order
496
- if hidden is not None:
497
- params["hidden"] = str(hidden).lower()
498
-
499
- response = self.session.get(f"{self.base_url}/users/self/alphas", params=params)
500
- response.raise_for_status()
501
- return response.json()
502
- except Exception as e:
503
- self.log(f"Failed to get user alphas: {str(e)}", "ERROR")
504
- raise
505
-
506
- async def submit_alpha(self, alpha_id: str) -> bool:
507
- """Submit an alpha for production."""
508
- await self.ensure_authenticated()
509
-
510
- try:
511
- response = self.session.post(f"{self.base_url}/alphas/{alpha_id}/submit")
512
- response.raise_for_status()
513
- return True
514
- except Exception as e:
515
- self.log(f"Failed to submit alpha: {str(e)}", "ERROR")
516
- raise
517
-
518
- async def get_events(self) -> Dict[str, Any]:
519
- """Get available events and competitions."""
520
- await self.ensure_authenticated()
521
-
522
- try:
523
- response = self.session.get(f"{self.base_url}/events")
524
- response.raise_for_status()
525
- return response.json()
526
- except Exception as e:
527
- self.log(f"Failed to get events: {str(e)}", "ERROR")
528
- raise
529
-
530
- async def get_leaderboard(self, user_id: Optional[str] = None) -> Dict[str, Any]:
531
- """Get leaderboard data."""
532
- await self.ensure_authenticated()
533
-
534
- try:
535
- params = {}
536
-
537
- if user_id:
538
- params['user'] = user_id
539
- else:
540
- # Get current user ID if not specified
541
- user_response = self.session.get(f"{self.base_url}/users/self")
542
- if user_response.status_code == 200:
543
- user_data = user_response.json()
544
- params['user'] = user_data.get('id')
545
-
546
- response = self.session.get(f"{self.base_url}/consultant/boards/leader", params=params)
547
- response.raise_for_status()
548
- return response.json()
549
- except Exception as e:
550
- self.log(f"Failed to get leaderboard: {str(e)}", "ERROR")
551
- raise
552
-
553
- def _is_atom(self, detail: Optional[Dict[str, Any]]) -> bool:
554
- """Match atom detection used in extract_regular_alphas.py:
555
- - Primary signal: 'classifications' entries containing 'SINGLE_DATA_SET'
556
- - Fallbacks: tags list contains 'atom' or classification id/name contains 'ATOM'
557
- """
558
- if not detail or not isinstance(detail, dict):
559
- return False
560
-
561
- classifications = detail.get('classifications') or []
562
- for c in classifications:
563
- cid = (c.get('id') or c.get('name') or '')
564
- if isinstance(cid, str) and 'SINGLE_DATA_SET' in cid:
565
- return True
566
-
567
- # Fallbacks
568
- tags = detail.get('tags') or []
569
- if isinstance(tags, list):
570
- for t in tags:
571
- if isinstance(t, str) and t.strip().lower() == 'atom':
572
- return True
573
-
574
- for c in classifications:
575
- cid = (c.get('id') or c.get('name') or '')
576
- if isinstance(cid, str) and 'ATOM' in cid.upper():
577
- return True
578
-
579
- return False
580
-
581
- async def value_factor_trendScore(self, start_date: str, end_date: str) -> Dict[str, Any]:
582
- """Compute diversity score for regular alphas in a date range.
583
-
584
- Description:
585
- This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
586
- value factor of a user is defiend by This diversity score, which measures three key aspects of work output: the proportion of works
587
- with the "Atom" tag (S_A), atom proportion, the breadth of pyramids covered (S_P), and how evenly works
588
- are distributed across those pyramids (S_H). Calculated as their product, it rewards
589
- strong performance across all three dimensions—encouraging more Atom-tagged works,
590
- wider pyramid coverage, and balanced distribution—with weaknesses in any area lowering
591
- the total score significantly.
592
-
593
- Inputs (hints for AI callers):
594
- - start_date (str): ISO UTC start datetime, e.g. '2025-08-14T00:00:00Z'
595
- - end_date (str): ISO UTC end datetime, e.g. '2025-08-18T23:59:59Z'
596
- - Note: this tool always uses 'OS' (submission dates) to define the window; callers do not need to supply a stage.
597
- - Note: P_max (total number of possible pyramids) is derived from the platform
598
- pyramid-multipliers endpoint and not supplied by callers.
599
-
600
- Returns (compact JSON): {
601
- 'diversity_score': float,
602
- 'N': int, # total regular alphas in window
603
- 'A': int, # number of Atom-tagged works (is_single_data_set)
604
- 'P': int, # pyramid coverage count in the sample
605
- 'P_max': int, # used max for normalization
606
- 'S_A': float, 'S_P': float, 'S_H': float,
607
- 'per_pyramid_counts': {pyramid_name: count}
608
- }
609
- """
610
- # Fetch user alphas (always use OS / submission dates per product policy)
611
- await self.ensure_authenticated()
612
- alphas_resp = await self.get_user_alphas(stage='OS', limit=500, submission_start_date=start_date, submission_end_date=end_date)
613
-
614
- if not isinstance(alphas_resp, dict) or 'results' not in alphas_resp:
615
- return {'error': 'Unexpected response from get_user_alphas', 'raw': alphas_resp}
616
-
617
- alphas = alphas_resp['results']
618
- regular = [a for a in alphas if a.get('type') == 'REGULAR']
619
-
620
- # Fetch details for each regular alpha
621
- pyramid_list = []
622
- atom_count = 0
623
- per_pyramid = {}
624
- for a in regular:
625
- try:
626
- detail = await self.get_alpha_details(a.get('id'))
627
- except Exception:
628
- continue
629
-
630
- is_atom = self._is_atom(detail)
631
- if is_atom:
632
- atom_count += 1
633
-
634
- # Extract pyramids
635
- ps = []
636
- if isinstance(detail.get('pyramids'), list):
637
- ps = [p.get('name') for p in detail.get('pyramids') if p.get('name')]
638
- else:
639
- pt = detail.get('pyramidThemes') or {}
640
- pss = pt.get('pyramids') if isinstance(pt, dict) else None
641
- if pss and isinstance(pss, list):
642
- ps = [p.get('name') for p in pss if p.get('name')]
643
-
644
- for p in ps:
645
- pyramid_list.append(p)
646
- per_pyramid[p] = per_pyramid.get(p, 0) + 1
647
-
648
- N = len(regular)
649
- A = atom_count
650
- P = len(per_pyramid)
651
-
652
- # Determine P_max similarly to the script: use pyramid multipliers if available
653
- P_max = None
654
- try:
655
- pm = await self.get_pyramid_multipliers()
656
- if isinstance(pm, dict) and 'pyramids' in pm:
657
- pyramids_list = pm.get('pyramids') or []
658
- P_max = len(pyramids_list)
659
- except Exception:
660
- P_max = None
661
-
662
- if not P_max or P_max <= 0:
663
- P_max = max(P, 1)
664
-
665
- # Component scores
666
- S_A = (A / N) if N > 0 else 0.0
667
- S_P = (P / P_max) if P_max > 0 else 0.0
668
-
669
- # Entropy
670
- S_H = 0.0
671
- if P <= 1 or not per_pyramid:
672
- S_H = 0.0
673
- else:
674
- total_occ = sum(per_pyramid.values())
675
- H = 0.0
676
- for cnt in per_pyramid.values():
677
- q = cnt / total_occ if total_occ > 0 else 0
678
- if q > 0:
679
- H -= q * math.log2(q)
680
- max_H = math.log2(P) if P > 0 else 1
681
- S_H = (H / max_H) if max_H > 0 else 0.0
682
-
683
- diversity_score = S_A * S_P * S_H
684
-
685
- return {
686
- 'diversity_score': diversity_score,
687
- 'N': N,
688
- 'A': A,
689
- 'P': P,
690
- 'P_max': P_max,
691
- 'S_A': S_A,
692
- 'S_P': S_P,
693
- 'S_H': S_H,
694
- 'per_pyramid_counts': per_pyramid
695
- }
696
-
697
- async def get_operators(self) -> Dict[str, Any]:
698
- """Get available operators for alpha creation."""
699
- await self.ensure_authenticated()
700
-
701
- try:
702
- response = self.session.get(f"{self.base_url}/operators")
703
- response.raise_for_status()
704
- return response.json()
705
- except Exception as e:
706
- self.log(f"Failed to get operators: {str(e)}", "ERROR")
707
- raise
708
-
709
- async def run_selection(
710
- self,
711
- selection: str,
712
- instrument_type: str = "EQUITY",
713
- region: str = "USA",
714
- delay: int = 1,
715
- selection_limit: int = 1000,
716
- selection_handling: str = "POSITIVE"
717
- ) -> Dict[str, Any]:
718
- """Run a selection query to filter instruments."""
719
- await self.ensure_authenticated()
720
-
721
- try:
722
- selection_data = {
723
- "selection": selection,
724
- "instrumentType": instrument_type,
725
- "region": region,
726
- "delay": delay,
727
- "selectionLimit": selection_limit,
728
- "selectionHandling": selection_handling
729
- }
730
-
731
- response = self.session.get(f"{self.base_url}/simulations/super-selection", params=selection_data)
732
- response.raise_for_status()
733
- return response.json()
734
- except Exception as e:
735
- self.log(f"Failed to run selection: {str(e)}", "ERROR")
736
- raise
737
-
738
- async def get_user_profile(self, user_id: str = "self") -> Dict[str, Any]:
739
- """Get user profile information."""
740
- await self.ensure_authenticated()
741
-
742
- try:
743
- response = self.session.get(f"{self.base_url}/users/{user_id}")
744
- response.raise_for_status()
745
- return response.json()
746
- except Exception as e:
747
- self.log(f"Failed to get user profile: {str(e)}", "ERROR")
748
- raise
749
-
750
- async def get_documentations(self) -> Dict[str, Any]:
751
- """Get available documentations and learning materials."""
752
- await self.ensure_authenticated()
753
-
754
- try:
755
- response = self.session.get(f"{self.base_url}/tutorials")
756
- response.raise_for_status()
757
- return response.json()
758
- except Exception as e:
759
- self.log(f"Failed to get documentations: {str(e)}", "ERROR")
760
- raise
761
-
762
- async def get_messages(self, limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
763
- """Get messages for the current user with optional pagination.
764
-
765
- This function retrieves messages, processes their descriptions to extract
766
- and format embedded JSON, and handles file attachments by saving them locally.
767
- """
768
- from typing import Tuple
769
-
770
- def process_description(desc: str, message_id: str) -> Tuple[str, List[str]]:
771
- """
772
- Processes message description to handle HTML, embedded images, and JSON.
773
- """
774
- attachments = []
775
-
776
- # Handle embedded images
777
- soup = BeautifulSoup(desc, 'html.parser')
778
- for idx, img_tag in enumerate(soup.find_all('img')):
779
- src = img_tag.get('src', '')
780
- if src.startswith('data:image'):
781
- try:
782
- # Extract image data
783
- header, encoded = src.split(',', 1)
784
- ext = header.split(';')[0].split('/')[1]
785
- safe_ext = re.sub(r'[^a-zA-Z0-9]', '', ext)
786
-
787
- # Decode and save image
788
- content = base64.b64decode(encoded)
789
- file_name = f"{message_id}_img_{idx}.{safe_ext}"
790
- with open(file_name, "wb") as f:
791
- f.write(content)
792
-
793
- # Update HTML and add attachment info
794
- img_tag['src'] = file_name
795
- attachments.append(f"Saved embedded image to ./{file_name}")
796
-
797
- except Exception as e:
798
- attachments.append(f"Could not process embedded image: {e}")
799
-
800
- desc = str(soup)
801
-
802
- # Handle JSON content
803
- try:
804
- json_part_match = re.search(r'```json\n({.*?})\n```', desc, re.DOTALL)
805
- if json_part_match:
806
- json_str = json_part_match.group(1)
807
- desc = desc.replace(json_part_match.group(0), "").strip()
808
-
809
- try:
810
- data = json.loads(json_str)
811
- formatted_json = json.dumps(data, indent=2)
812
- desc += f"\n\n---\n**Details**\n```json\n{formatted_json}\n```"
813
- except json.JSONDecodeError:
814
- desc += f"\n\n---\n**Details (raw)**\n{json_str}"
815
- except Exception:
816
- pass
817
-
818
- return desc, attachments
819
-
820
- await self.ensure_authenticated()
821
-
822
- try:
823
- params = {"limit": limit, "offset": offset}
824
- params = {k: v for k, v in params.items() if v is not None}
825
-
826
- response = self.session.get(f"{self.base_url}/users/self/messages", params=params)
827
- response.raise_for_status()
828
- messages_data = response.json()
829
-
830
- # Process descriptions and attachments
831
- for msg in messages_data.get("results", []):
832
- try:
833
- msg_id = msg.get("id", "unknown_id")
834
- new_desc, attachments = process_description(msg.get("description", ""), msg_id)
835
- msg["description"] = new_desc
836
- if attachments:
837
- msg["attachments_info"] = attachments
838
- except Exception as e:
839
- self.log(f"Error processing message {msg.get('id')}: {e}", "ERROR")
840
-
841
- return messages_data
842
-
843
- except Exception as e:
844
- self.log(f"Failed to get messages: {str(e)}", "ERROR")
845
- raise
846
-
847
- async def get_glossary_terms(self, email: str, password: str) -> List[Dict[str, str]]:
848
- """Get glossary terms from forum."""
849
- try:
850
- return await forum_client.get_glossary_terms(email, password)
851
- except Exception as e:
852
- self.log(f"Failed to get glossary terms: {str(e)}", "ERROR")
853
- raise
854
-
855
- async def search_forum_posts(self, email: str, password: str, search_query: str,
856
- max_results: int = 50) -> Dict[str, Any]:
857
- """Search forum posts."""
858
- try:
859
- return await forum_client.search_forum_posts(email, password, search_query, max_results)
860
- except Exception as e:
861
- self.log(f"Failed to search forum posts: {str(e)}", "ERROR")
862
- raise
863
-
864
- async def read_forum_post(self, email: str, password: str, article_id: str,
865
- include_comments: bool = True) -> Dict[str, Any]:
866
- """Get forum post."""
867
- try:
868
- return await forum_client.read_full_forum_post(email, password, article_id, include_comments)
869
- except Exception as e:
870
- self.log(f"Failed to read forum post: {str(e)}", "ERROR")
871
- raise
872
-
873
- async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
874
- """Get yearly statistics for an alpha."""
875
- await self.ensure_authenticated()
876
-
877
- max_retries = 5
878
- retry_delay = 2
879
-
880
- for attempt in range(max_retries):
881
- try:
882
- self.log(f"Attempting to get yearly stats for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
883
-
884
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
885
- response.raise_for_status()
886
-
887
- text = (response.text or "").strip()
888
- if not text:
889
- if attempt < max_retries - 1:
890
- self.log(f"Empty yearly stats response for {alpha_id}, retrying...", "WARNING")
891
- await asyncio.sleep(retry_delay)
892
- retry_delay *= 1.5
893
- continue
894
- else:
895
- return {}
896
-
897
- try:
898
- stats_data = response.json()
899
- if stats_data:
900
- return stats_data
901
- else:
902
- if attempt < max_retries - 1:
903
- self.log(f"Empty yearly stats JSON for {alpha_id}, retrying...", "WARNING")
904
- await asyncio.sleep(retry_delay)
905
- retry_delay *= 1.5
906
- continue
907
- else:
908
- return {}
909
-
910
- except json.JSONDecodeError as parse_err:
911
- if attempt < max_retries - 1:
912
- self.log(f"Yearly stats JSON parse failed for {alpha_id}, retrying...", "WARNING")
913
- await asyncio.sleep(retry_delay)
914
- retry_delay *= 1.5
915
- continue
916
- else:
917
- raise
918
-
919
- except requests.RequestException as e:
920
- if attempt < max_retries - 1:
921
- self.log(f"Failed to get yearly stats for {alpha_id}, retrying: {e}", "WARNING")
922
- await asyncio.sleep(retry_delay)
923
- retry_delay *= 1.5
924
- continue
925
- else:
926
- raise
927
-
928
- return {}
929
-
930
- async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
931
- """Get production correlation data for an alpha."""
932
- await self.ensure_authenticated()
933
-
934
- max_retries = 5
935
- retry_delay = 2
936
-
937
- for attempt in range(max_retries):
938
- try:
939
- self.log(f"Attempting to get production correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
940
-
941
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/prod")
942
- response.raise_for_status()
943
-
944
- # Check if response has content
945
- text = (response.text or "").strip()
946
- if not text:
947
- if attempt < max_retries - 1:
948
- self.log(f"Empty production correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
949
- await asyncio.sleep(retry_delay)
950
- continue
951
- else:
952
- self.log(f"Empty production correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
953
- return {}
954
-
955
- try:
956
- corr_data = response.json()
957
- if corr_data:
958
- return corr_data
959
- else:
960
- if attempt < max_retries - 1:
961
- self.log(f"Empty production correlation JSON for {alpha_id}, retrying...", "WARNING")
962
- await asyncio.sleep(retry_delay)
963
- retry_delay *= 1.5
964
- continue
965
- else:
966
- return {}
967
-
968
- except json.JSONDecodeError:
969
- if attempt < max_retries - 1:
970
- self.log(f"Production correlation JSON parse failed for {alpha_id}, retrying...", "WARNING")
971
- await asyncio.sleep(retry_delay)
972
- retry_delay *= 1.5
973
- continue
974
- else:
975
- raise
976
-
977
- except requests.RequestException as e:
978
- if attempt < max_retries - 1:
979
- self.log(f"Failed to get production correlation for {alpha_id}, retrying: {e}", "WARNING")
980
- await asyncio.sleep(retry_delay)
981
- retry_delay *= 1.5
982
- continue
983
- else:
984
- raise
985
-
986
- return {}
987
-
988
- async def get_self_correlation(self, alpha_id: str) -> Dict[str, Any]:
989
- """Get self correlation data for an alpha."""
990
- await self.ensure_authenticated()
991
-
992
- max_retries = 5
993
- retry_delay = 2
994
-
995
- for attempt in range(max_retries):
996
- try:
997
- self.log(f"Attempting to get self correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
998
-
999
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/self")
1000
- response.raise_for_status()
1001
-
1002
- # Check if response has content
1003
- text = (response.text or "").strip()
1004
- if not text:
1005
- if attempt < max_retries - 1:
1006
- self.log(f"Empty self correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
1007
- await asyncio.sleep(retry_delay)
1008
- continue
1009
- else:
1010
- self.log(f"Empty self correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
1011
- return {}
1012
-
1013
- try:
1014
- corr_data = response.json()
1015
- if corr_data:
1016
- return corr_data
1017
- else:
1018
- if attempt < max_retries - 1:
1019
- self.log(f"Empty self correlation JSON for {alpha_id}, retrying...", "WARNING")
1020
- await asyncio.sleep(retry_delay)
1021
- retry_delay *= 1.5
1022
- continue
1023
- else:
1024
- return {}
1025
-
1026
- except json.JSONDecodeError:
1027
- if attempt < max_retries - 1:
1028
- self.log(f"Self correlation JSON parse failed for {alpha_id}, retrying...", "WARNING")
1029
- await asyncio.sleep(retry_delay)
1030
- retry_delay *= 1.5
1031
- continue
1032
- else:
1033
- raise
1034
-
1035
- except requests.RequestException as e:
1036
- if attempt < max_retries - 1:
1037
- self.log(f"Failed to get self correlation for {alpha_id}, retrying: {e}", "WARNING")
1038
- await asyncio.sleep(retry_delay)
1039
- retry_delay *= 1.5
1040
- continue
1041
- else:
1042
- raise
1043
-
1044
- return {}
1045
-
1046
- async def check_correlation(self, alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
1047
- """Check alpha correlation against production alphas, self alphas, or both."""
1048
- await self.ensure_authenticated()
1049
-
1050
- try:
1051
- results = {}
1052
- if correlation_type in ["production", "both"]:
1053
- results["production"] = await self.get_production_correlation(alpha_id)
1054
- if correlation_type in ["self", "both"]:
1055
- results["self"] = await self.get_self_correlation(alpha_id)
1056
-
1057
- # Add analysis based on threshold
1058
- for key, data in results.items():
1059
- if "correlation" in data and "sharpe" in data["correlation"]:
1060
- high_corr = [item for item in data["correlation"]["sharpe"] if abs(item.get("corr", 0)) > threshold]
1061
- data["analysis"] = {
1062
- "highly_correlated_count": len(high_corr),
1063
- "highly_correlated_alphas": high_corr
1064
- }
1065
-
1066
- return results
1067
- except Exception as e:
1068
- self.log(f"Failed to check correlation: {str(e)}", "ERROR")
1069
- raise
1070
-
1071
- async def get_submission_check(self, alpha_id: str) -> Dict[str, Any]:
1072
- """Comprehensive pre-submission check."""
1073
- await self.ensure_authenticated()
1074
-
1075
- try:
1076
- # This endpoint might not exist, so we simulate it by calling other functions
1077
- # In a real scenario, this would be a single API call
1078
-
1079
- pnl_data = await self.get_alpha_pnl(alpha_id)
1080
- yearly_stats = await self.get_alpha_yearly_stats(alpha_id)
1081
- correlation = await self.check_correlation(alpha_id)
1082
-
1083
- return {
1084
- "pnl_summary": pnl_data.get("pnlSummary", {}),
1085
- "yearly_stats": yearly_stats,
1086
- "correlation": correlation
1087
- }
1088
- except Exception as e:
1089
- self.log(f"Failed submission check: {str(e)}", "ERROR")
1090
- raise
1091
-
1092
- async def set_alpha_properties(self, alpha_id: str, name: Optional[str] = None,
1093
- color: Optional[str] = None, tags: Optional[List[str]] = None,
1094
- selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
1095
- """Update alpha properties (name, color, tags, descriptions)."""
1096
- await self.ensure_authenticated()
1097
-
1098
- try:
1099
- payload = {
1100
- "name": name,
1101
- "color": color,
1102
- "tags": tags,
1103
- "descriptions": {
1104
- "selection": selection_desc,
1105
- "combo": combo_desc
1106
- }
1107
- }
1108
- payload = {k: v for k, v in payload.items() if v is not None}
1109
-
1110
- response = self.session.patch(f"{self.base_url}/alphas/{alpha_id}", json=payload)
1111
- response.raise_for_status()
1112
- return response.json()
1113
- except Exception as e:
1114
- self.log(f"Failed to set alpha properties: {str(e)}", "ERROR")
1115
- raise
1116
-
1117
- async def get_record_sets(self, alpha_id: str) -> Dict[str, Any]:
1118
- """List available record sets for an alpha."""
1119
- await self.ensure_authenticated()
1120
-
1121
- try:
1122
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets")
1123
- response.raise_for_status()
1124
- return response.json()
1125
- except Exception as e:
1126
- self.log(f"Failed to get record sets: {str(e)}", "ERROR")
1127
- raise
1128
-
1129
- async def get_record_set_data(self, alpha_id: str, record_set_name: str) -> Dict[str, Any]:
1130
- """Get data from a specific record set."""
1131
- await self.ensure_authenticated()
1132
-
1133
- try:
1134
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/{record_set_name}")
1135
- response.raise_for_status()
1136
- return response.json()
1137
- except Exception as e:
1138
- self.log(f"Failed to get record set data: {str(e)}", "ERROR")
1139
- raise
1140
-
1141
- async def get_user_activities(self, user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
1142
- """Get user activity diversity data."""
1143
- await self.ensure_authenticated()
1144
-
1145
- try:
1146
- params = {}
1147
- if grouping:
1148
- params['grouping'] = grouping
1149
-
1150
- response = self.session.get(f"{self.base_url}/users/{user_id}/activities", params=params)
1151
- response.raise_for_status()
1152
- return response.json()
1153
- except Exception as e:
1154
- self.log(f"Failed to get user activities: {str(e)}", "ERROR")
1155
- raise
1156
-
1157
- async def get_pyramid_multipliers(self) -> Dict[str, Any]:
1158
- """Get current pyramid multipliers showing BRAIN's encouragement levels."""
1159
- await self.ensure_authenticated()
1160
-
1161
- try:
1162
- response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-multipliers")
1163
- response.raise_for_status()
1164
- return response.json()
1165
- except Exception as e:
1166
- self.log(f"Failed to get pyramid multipliers: {str(e)}", "ERROR")
1167
- raise
1168
-
1169
- async def get_pyramid_alphas(self, start_date: Optional[str] = None,
1170
- end_date: Optional[str] = None) -> Dict[str, Any]:
1171
- """Get user's current alpha distribution across pyramid categories."""
1172
- await self.ensure_authenticated()
1173
-
1174
- try:
1175
- params = {}
1176
- if start_date:
1177
- params["startDate"] = start_date
1178
- if end_date:
1179
- params["endDate"] = end_date
1180
-
1181
- response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-alphas", params=params)
1182
- response.raise_for_status()
1183
- return response.json()
1184
- except Exception as e:
1185
- self.log(f"Failed to get pyramid alphas: {str(e)}", "ERROR")
1186
- raise
1187
-
1188
- async def get_user_competitions(self, user_id: Optional[str] = None) -> Dict[str, Any]:
1189
- """Get list of competitions that the user is participating in."""
1190
- await self.ensure_authenticated()
1191
-
1192
- try:
1193
- if not user_id:
1194
- # Get current user ID if not specified
1195
- user_response = self.session.get(f"{self.base_url}/users/self")
1196
- if user_response.status_code == 200:
1197
- user_data = user_response.json()
1198
- user_id = user_data.get('id')
1199
- else:
1200
- user_id = 'self'
1201
-
1202
- response = self.session.get(f"{self.base_url}/users/{user_id}/competitions")
1203
- response.raise_for_status()
1204
- return response.json()
1205
- except Exception as e:
1206
- self.log(f"Failed to get user competitions: {str(e)}", "ERROR")
1207
- raise
1208
-
1209
- async def get_competition_details(self, competition_id: str) -> Dict[str, Any]:
1210
- """Get detailed information about a specific competition."""
1211
- await self.ensure_authenticated()
1212
-
1213
- try:
1214
- response = self.session.get(f"{self.base_url}/competitions/{competition_id}")
1215
- response.raise_for_status()
1216
- return response.json()
1217
- except Exception as e:
1218
- self.log(f"Failed to get competition details: {str(e)}", "ERROR")
1219
- raise
1220
-
1221
- async def get_competition_agreement(self, competition_id: str) -> Dict[str, Any]:
1222
- """Get the rules, terms, and agreement for a specific competition."""
1223
- await self.ensure_authenticated()
1224
-
1225
- try:
1226
- response = self.session.get(f"{self.base_url}/competitions/{competition_id}/agreement")
1227
- response.raise_for_status()
1228
- return response.json()
1229
- except Exception as e:
1230
- self.log(f"Failed to get competition agreement: {str(e)}", "ERROR")
1231
- raise
1232
-
1233
- async def get_platform_setting_options(self) -> Dict[str, Any]:
1234
- """Get available instrument types, regions, delays, and universes."""
1235
- await self.ensure_authenticated()
1236
-
1237
- try:
1238
- # Use OPTIONS method on simulations endpoint to get configuration options
1239
- response = self.session.options(f"{self.base_url}/simulations")
1240
- response.raise_for_status()
1241
-
1242
- # Parse the settings structure from the response
1243
- settings_data = response.json()
1244
- settings_options = settings_data['actions']['POST']['settings']['children']
1245
-
1246
- # Extract instrument configuration options
1247
- instrument_type_data = {}
1248
- region_data = {}
1249
- universe_data = {}
1250
- delay_data = {}
1251
- neutralization_data = {}
1252
-
1253
- # Parse each setting type
1254
- for key, setting in settings_options.items():
1255
- if setting['type'] == 'choice':
1256
- if setting['label'] == 'Instrument type':
1257
- instrument_type_data = setting['choices']
1258
- elif setting['label'] == 'Region':
1259
- region_data = setting['choices']['instrumentType']
1260
- elif setting['label'] == 'Universe':
1261
- universe_data = setting['choices']['instrumentType']
1262
- elif setting['label'] == 'Delay':
1263
- delay_data = setting['choices']['instrumentType']
1264
- elif setting['label'] == 'Neutralization':
1265
- neutralization_data = setting['choices']['instrumentType']
1266
-
1267
- # Build comprehensive instrument options
1268
- data_list = []
1269
-
1270
- for instrument_type in instrument_type_data:
1271
- for region in region_data[instrument_type['value']]:
1272
- for delay in delay_data[instrument_type['value']]['region'][region['value']]:
1273
- row = {
1274
- 'InstrumentType': instrument_type['value'],
1275
- 'Region': region['value'],
1276
- 'Delay': delay['value']
1277
- }
1278
- row['Universe'] = [
1279
- item['value'] for item in universe_data[instrument_type['value']]['region'][region['value']]
1280
- ]
1281
- row['Neutralization'] = [
1282
- item['value'] for item in neutralization_data[instrument_type['value']]['region'][region['value']]
1283
- ]
1284
- data_list.append(row)
1285
-
1286
- # Return structured data
1287
- return {
1288
- 'instrument_options': data_list,
1289
- 'total_combinations': len(data_list),
1290
- 'instrument_types': [item['value'] for item in instrument_type_data],
1291
- 'regions_by_type': {
1292
- item['value']: [r['value'] for r in region_data[item['value']]]
1293
- for item in instrument_type_data
1294
- }
1295
- }
1296
-
1297
- except Exception as e:
1298
- self.log(f"Failed to get instrument options: {str(e)}", "ERROR")
1299
- raise
1300
-
1301
- async def performance_comparison(self, alpha_id: str, team_id: Optional[str] = None,
1302
- competition: Optional[str] = None) -> Dict[str, Any]:
1303
- """Get performance comparison data for an alpha."""
1304
- await self.ensure_authenticated()
1305
-
1306
- try:
1307
- params = {"teamId": team_id, "competition": competition}
1308
- params = {k: v for k, v in params.items() if v is not None}
1309
-
1310
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/performance-comparison", params=params)
1311
- response.raise_for_status()
1312
- return response.json()
1313
- except Exception as e:
1314
- self.log(f"Failed to get performance comparison: {str(e)}", "ERROR")
1315
- raise
1316
-
1317
- # --- Helper function for data flattening ---
1318
-
1319
- async def expand_nested_data(self, data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
1320
- """Flatten complex nested data structures into tabular format."""
1321
- try:
1322
- df = pd.json_normalize(data, sep='_')
1323
- if preserve_original:
1324
- original_df = pd.DataFrame(data)
1325
- df = pd.concat([original_df, df], axis=1)
1326
- df = df.loc[:,~df.columns.duplicated()]
1327
- return df.to_dict(orient='records')
1328
- except Exception as e:
1329
- self.log(f"Failed to expand nested data: {str(e)}", "ERROR")
1330
- raise
1331
-
1332
- # --- New documentation endpoint ---
1333
-
1334
- async def get_documentation_page(self, page_id: str) -> Dict[str, Any]:
1335
- """Retrieve detailed content of a specific documentation page/article."""
1336
- await self.ensure_authenticated()
1337
-
1338
- try:
1339
- response = self.session.get(f"{self.base_url}/tutorial-pages/{page_id}")
1340
- response.raise_for_status()
1341
- return response.json()
1342
- except Exception as e:
1343
- self.log(f"Failed to get documentation page: {str(e)}", "ERROR")
1344
- raise
1345
-
1346
- brain_client = BrainApiClient()
1347
-
1348
- # --- Configuration Management ---
1349
-
1350
- def _resolve_config_path(for_write: bool = False) -> str:
1351
- """
1352
- Resolve the configuration file path.
1353
-
1354
- Checks for a file specified by the MCP_CONFIG_FILE environment variable,
1355
- then falls back to ~/.brain_mcp_config.json. If for_write is True,
1356
- it ensures the directory exists.
1357
- """
1358
- if 'MCP_CONFIG_FILE' in os.environ:
1359
- return os.environ['MCP_CONFIG_FILE']
1360
-
1361
- config_path = Path(__file__).parent / "user_config.json"
1362
-
1363
- if for_write:
1364
- try:
1365
- config_path.parent.mkdir(parents=True, exist_ok=True)
1366
- except (IOError, OSError) as e:
1367
- logger.warning(f"Could not create config directory {config_path.parent}: {e}")
1368
- # Fallback to a temporary file if home is not writable
1369
- import tempfile
1370
- return tempfile.NamedTemporaryFile(delete=False).name
1371
-
1372
- return str(config_path)
1373
-
1374
- def load_config() -> Dict[str, Any]:
1375
- """Load configuration from file."""
1376
- config_file = _resolve_config_path()
1377
- if os.path.exists(config_file):
1378
- try:
1379
- with open(config_file, 'r', encoding='utf-8') as f:
1380
- return json.load(f)
1381
- except (IOError, json.JSONDecodeError) as e:
1382
- logger.error(f"Error loading config file {config_file}: {e}")
1383
- return {}
1384
-
1385
- def save_config(config: Dict[str, Any]):
1386
- """Save configuration to file using the resolved config path.
1387
-
1388
- This function now uses the write-enabled path resolver to handle
1389
- cases where the default home directory is not writable.
1390
- """
1391
- config_file = _resolve_config_path(for_write=True)
1392
- try:
1393
- with open(config_file, 'w', encoding='utf-8') as f:
1394
- json.dump(config, f, indent=2)
1395
- except IOError as e:
1396
- logger.error(f"Error saving config file to {config_file}: {e}")
1397
-
1398
- # --- MCP Tool Definitions ---
1399
-
1400
- mcp = FastMCP(
1401
- "brain-platform-mcp",
1402
- "A server for interacting with the WorldQuant BRAIN platform",
1403
- )
1404
-
1405
- @mcp.tool()
1406
- async def authenticate(email: Optional[str] = "", password: Optional[str] = "") -> Dict[str, Any]:
1407
- """
1408
- 🔐 Authenticate with WorldQuant BRAIN platform.
1409
-
1410
- This is the first step in any BRAIN workflow. You must authenticate before using any other tools.
1411
-
1412
- Args:
1413
- email: Your BRAIN platform email address (optional if in config or .brain_credentials)
1414
- password: Your BRAIN platform password (optional if in config or .brain_credentials)
1415
-
1416
- Returns:
1417
- Authentication result with user info and permissions
1418
- """
1419
- try:
1420
- # Load config to get credentials if not provided
1421
- config = load_config()
1422
- credentials = config.get("credentials", {})
1423
- email = email or credentials.get("email")
1424
- password = password or credentials.get("password")
1425
- if not email or not password:
1426
- return {"error": "Authentication credentials not provided or found in config."}
1427
-
1428
- auth_result = await brain_client.authenticate(email, password)
1429
-
1430
- # Save successful credentials
1431
- if auth_result.get('status') == 'authenticated':
1432
- if 'credentials' not in config:
1433
- config['credentials'] = {}
1434
- config['credentials']['email'] = email
1435
- config['credentials']['password'] = password
1436
- save_config(config)
1437
-
1438
- return auth_result
1439
- except Exception as e:
1440
- return {"error": f"An unexpected error occurred: {str(e)}"}
1441
-
1442
- @mcp.tool()
1443
- async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
1444
- """
1445
- 🔧 Manage configuration settings - get or update configuration.
1446
-
1447
- Args:
1448
- action: Action to perform ("get" to retrieve config, "set" to update config)
1449
- settings: Configuration settings to update (required when action="set")
1450
-
1451
- Returns:
1452
- Current or updated configuration including authentication status
1453
- """
1454
- config = load_config()
1455
-
1456
- if action == "set" and settings:
1457
- config.update(settings)
1458
- save_config(config)
1459
-
1460
- is_authed = await brain_client.is_authenticated()
1461
- config['isAuthenticated'] = is_authed
1462
-
1463
- # Mask password for security
1464
- if 'password' in config:
1465
- config['password'] = '********'
1466
-
1467
- return config
1468
-
1469
- # --- Simulation Tools ---
1470
-
1471
- @mcp.tool()
1472
- async def create_simulation(
1473
- type: str = "REGULAR",
1474
- instrument_type: str = "EQUITY",
1475
- region: str = "USA",
1476
- universe: str = "TOP3000",
1477
- delay: int = 1,
1478
- decay: float = 0.0,
1479
- neutralization: str = "NONE",
1480
- truncation: float = 0.0,
1481
- test_period: str = "P0Y0M",
1482
- unit_handling: str = "VERIFY",
1483
- nan_handling: str = "OFF",
1484
- language: str = "FASTEXPR",
1485
- visualization: bool = True,
1486
- regular: Optional[str] = None,
1487
- combo: Optional[str] = None,
1488
- selection: Optional[str] = None,
1489
- pasteurization: str = "ON",
1490
- max_trade: str = "OFF",
1491
- selection_handling: str = "POSITIVE",
1492
- selection_limit: int = 1000,
1493
- component_activation: str = "IS",
1494
- ) -> Dict[str, Any]:
1495
- """
1496
- 🚀 Create a new simulation on BRAIN platform.
1497
-
1498
- This tool creates and starts a simulation with your alpha code. Use this after you have your alpha formula ready.
1499
-
1500
- Args:
1501
- type: Simulation type ("REGULAR" or "SUPER")
1502
- instrument_type: Type of instruments (e.g., "EQUITY")
1503
- region: Market region (e.g., "USA")
1504
- universe: Universe of stocks (e.g., "TOP3000")
1505
- delay: Data delay (0 or 1)
1506
- decay: Decay value for the simulation
1507
- neutralization: Neutralization method
1508
- truncation: Truncation value
1509
- test_period: Test period (e.g., "P0Y0M" for 1 year 6 months)
1510
- unit_handling: Unit handling method
1511
- nan_handling: NaN handling method
1512
- language: Expression language (e.g., "FASTEXPR")
1513
- visualization: Enable visualization
1514
- regular: Regular simulation code (for REGULAR type)
1515
- combo: Combo code (for SUPER type)
1516
- selection: Selection code (for SUPER type)
1517
-
1518
- Returns:
1519
- Simulation creation result with ID and location
1520
- """
1521
- try:
1522
- settings = SimulationSettings(
1523
- instrumentType=instrument_type,
1524
- region=region,
1525
- universe=universe,
1526
- delay=delay,
1527
- decay=decay,
1528
- neutralization=neutralization,
1529
- truncation=truncation,
1530
- testPeriod=test_period,
1531
- unitHandling=unit_handling,
1532
- nanHandling=nan_handling,
1533
- language=language,
1534
- visualization=visualization,
1535
- pasteurization=pasteurization,
1536
- maxTrade=max_trade,
1537
- selectionHandling=selection_handling,
1538
- selectionLimit=selection_limit,
1539
- componentActivation=component_activation,
1540
- )
1541
-
1542
- sim_data = SimulationData(
1543
- type=type,
1544
- settings=settings,
1545
- regular=regular,
1546
- combo=combo,
1547
- selection=selection
1548
- )
1549
-
1550
- return await brain_client.create_simulation(sim_data)
1551
- except Exception as e:
1552
- return {"error": f"An unexpected error occurred: {str(e)}"}
1553
-
1554
- # --- Alpha and Data Retrieval Tools ---
1555
-
1556
- @mcp.tool()
1557
- async def get_alpha_details(alpha_id: str) -> Dict[str, Any]:
1558
- """
1559
- 📋 Get detailed information about an alpha.
1560
-
1561
- Args:
1562
- alpha_id: The ID of the alpha to retrieve
1563
-
1564
- Returns:
1565
- Detailed alpha information
1566
- """
1567
- try:
1568
- return await brain_client.get_alpha_details(alpha_id)
1569
- except Exception as e:
1570
- return {"error": f"An unexpected error occurred: {str(e)}"}
1571
-
1572
- @mcp.tool()
1573
- async def get_datasets(
1574
- instrument_type: str = "EQUITY",
1575
- region: str = "USA",
1576
- delay: int = 1,
1577
- universe: str = "TOP3000",
1578
- theme: str = "false",
1579
- search: Optional[str] = None,
1580
- ) -> Dict[str, Any]:
1581
- """
1582
- 📚 Get available datasets for research.
1583
-
1584
- Use this to discover what data is available for your alpha research.
1585
-
1586
- Args:
1587
- instrument_type: Type of instruments (e.g., "EQUITY")
1588
- region: Market region (e.g., "USA")
1589
- delay: Data delay (0 or 1)
1590
- universe: Universe of stocks (e.g., "TOP3000")
1591
- theme: Theme filter
1592
-
1593
- Returns:
1594
- Available datasets
1595
- """
1596
- try:
1597
- return await brain_client.get_datasets(instrument_type, region, delay, universe, theme, search)
1598
- except Exception as e:
1599
- return {"error": f"An unexpected error occurred: {str(e)}"}
1600
-
1601
- @mcp.tool()
1602
- async def get_datafields(
1603
- instrument_type: str = "EQUITY",
1604
- region: str = "USA",
1605
- delay: int = 1,
1606
- universe: str = "TOP3000",
1607
- theme: str = "false",
1608
- dataset_id: Optional[str] = None,
1609
- data_type: str = "",
1610
- search: Optional[str] = None,
1611
- ) -> Dict[str, Any]:
1612
- """
1613
- 🔍 Get available data fields for alpha construction.
1614
-
1615
- Use this to find specific data fields you can use in your alpha formulas.
1616
-
1617
- Args:
1618
- instrument_type: Type of instruments (e.g., "EQUITY")
1619
- region: Market region (e.g., "USA")
1620
- delay: Data delay (0 or 1)
1621
- universe: Universe of stocks (e.g., "TOP3000")
1622
- theme: Theme filter
1623
- dataset_id: Specific dataset ID to filter by
1624
- data_type: Type of data (e.g., "MATRIX",'VECTOR','GROUP')
1625
- search: Search term to filter fields
1626
-
1627
- Returns:
1628
- Available data fields
1629
- """
1630
- try:
1631
- return await brain_client.get_datafields(instrument_type, region, delay, universe, theme, dataset_id, data_type, search)
1632
- except Exception as e:
1633
- return {"error": f"An unexpected error occurred: {str(e)}"}
1634
-
1635
- @mcp.tool()
1636
- async def get_alpha_pnl(alpha_id: str) -> Dict[str, Any]:
1637
- """
1638
- 📈 Get PnL (Profit and Loss) data for an alpha.
1639
-
1640
- Args:
1641
- alpha_id: The ID of the alpha
1642
-
1643
- Returns:
1644
- PnL data for the alpha
1645
- """
1646
- try:
1647
- return await brain_client.get_alpha_pnl(alpha_id)
1648
- except Exception as e:
1649
- return {"error": f"An unexpected error occurred: {str(e)}"}
1650
-
1651
- @mcp.tool()
1652
- async def get_user_alphas(
1653
- stage: str = "IS",
1654
- limit: int = 30,
1655
- offset: int = 0,
1656
- start_date: Optional[str] = None,
1657
- end_date: Optional[str] = None,
1658
- submission_start_date: Optional[str] = None,
1659
- submission_end_date: Optional[str] = None,
1660
- order: Optional[str] = None,
1661
- hidden: Optional[bool] = None,
1662
- ) -> Dict[str, Any]:
1663
- """
1664
- 👤 Get user's alphas with advanced filtering, pagination, and sorting.
1665
-
1666
- This tool retrieves a list of your alphas, allowing for detailed filtering based on stage,
1667
- creation date, submission date, and visibility. It also supports pagination and custom sorting.
1668
-
1669
- Args:
1670
- stage (str): The stage of the alphas to retrieve.
1671
- - "IS": In-Sample (alphas that have not been submitted).
1672
- - "OS": Out-of-Sample (alphas that have been submitted).
1673
- Defaults to "IS".
1674
- limit (int): The maximum number of alphas to return in a single request.
1675
- For example, `limit=50` will return at most 50 alphas. Defaults to 30.
1676
- offset (int): The number of alphas to skip from the beginning of the list.
1677
- Used for pagination. For example, `limit=50, offset=50` will retrieve alphas 51-100.
1678
- Defaults to 0.
1679
- start_date (Optional[str]): The earliest creation date for the alphas to be included.
1680
- Filters for alphas created on or after this date.
1681
- Example format: "2023-01-01T00:00:00Z".
1682
- end_date (Optional[str]): The latest creation date for the alphas to be included.
1683
- Filters for alphas created before this date.
1684
- Example format: "2023-12-31T23:59:59Z".
1685
- submission_start_date (Optional[str]): The earliest submission date for the alphas.
1686
- Only applies to "OS" alphas. Filters for alphas submitted on or after this date.
1687
- Example format: "2024-01-01T00:00:00Z".
1688
- submission_end_date (Optional[str]): The latest submission date for the alphas.
1689
- Only applies to "OS" alphas. Filters for alphas submitted before this date.
1690
- Example format: "2024-06-30T23:59:59Z".
1691
- order (Optional[str]): The sorting order for the returned alphas.
1692
- Prefix with a hyphen (-) for descending order.
1693
- Examples: "name" (sort by name ascending), "-dateSubmitted" (sort by submission date descending).
1694
- hidden (Optional[bool]): Filter alphas based on their visibility.
1695
- - `True`: Only return hidden alphas.
1696
- - `False`: Only return non-hidden alphas.
1697
- If not provided, both hidden and non-hidden alphas are returned.
1698
-
1699
- Returns:
1700
- Dict[str, Any]: A dictionary containing a list of alpha details under the 'results' key,
1701
- along with pagination information. If an error occurs, it returns a dictionary with an 'error' key.
1702
- """
1703
- try:
1704
- return await brain_client.get_user_alphas(
1705
- stage=stage, limit=limit, offset=offset, start_date=start_date,
1706
- end_date=end_date, submission_start_date=submission_start_date,
1707
- submission_end_date=submission_end_date, order=order, hidden=hidden
1708
- )
1709
- except Exception as e:
1710
- return {"error": f"An unexpected error occurred: {str(e)}"}
1711
-
1712
- @mcp.tool()
1713
- async def submit_alpha(alpha_id: str) -> Dict[str, Any]:
1714
- """
1715
- 📤 Submit an alpha for production.
1716
-
1717
- Use this when your alpha is ready for production deployment.
1718
-
1719
- Args:
1720
- alpha_id: The ID of the alpha to submit
1721
-
1722
- Returns:
1723
- Submission result
1724
- """
1725
- try:
1726
- success = await brain_client.submit_alpha(alpha_id)
1727
- return {"success": success}
1728
- except Exception as e:
1729
- return {"error": f"An unexpected error occurred: {str(e)}"}
1730
-
1731
- @mcp.tool()
1732
- async def value_factor_trendScore(start_date: str, end_date: str) -> Dict[str, Any]:
1733
- """Compute and return the diversity score for REGULAR alphas in a submission-date window.
1734
- This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
1735
- This MCP tool wraps BrainApiClient.value_factor_trendScore and always uses submission dates (OS).
1736
-
1737
- Inputs:
1738
- - start_date: ISO UTC start datetime (e.g. '2025-08-14T00:00:00Z')
1739
- - end_date: ISO UTC end datetime (e.g. '2025-08-18T23:59:59Z')
1740
- - p_max: optional integer total number of pyramid categories for normalization
1741
-
1742
- Returns: compact JSON with diversity_score, N, A, P, P_max, S_A, S_P, S_H, per_pyramid_counts
1743
- """
1744
- try:
1745
- return await brain_client.value_factor_trendScore(start_date=start_date, end_date=end_date)
1746
- except Exception as e:
1747
- return {"error": str(e)}
1748
-
1749
- # --- Community and Events Tools ---
1750
-
1751
- @mcp.tool()
1752
- async def get_events() -> Dict[str, Any]:
1753
- """
1754
- 🏆 Get available events and competitions.
1755
-
1756
- Returns:
1757
- Available events and competitions
1758
- """
1759
- try:
1760
- return await brain_client.get_events()
1761
- except Exception as e:
1762
- return {"error": f"An unexpected error occurred: {str(e)}"}
1763
-
1764
- @mcp.tool()
1765
- async def get_leaderboard(user_id: Optional[str] = None) -> Dict[str, Any]:
1766
- """
1767
- 🏅 Get leaderboard data.
1768
-
1769
- Args:
1770
- user_id: Optional user ID to filter results
1771
-
1772
- Returns:
1773
- Leaderboard data
1774
- """
1775
- try:
1776
- return await brain_client.get_leaderboard(user_id)
1777
- except Exception as e:
1778
- return {"error": f"An unexpected error occurred: {str(e)}"}
1779
-
1780
-
1781
- # --- Forum Tools ---
1782
-
1783
- @mcp.tool()
1784
- async def get_operators() -> Dict[str, Any]:
1785
- """
1786
- 🔧 Get available operators for alpha creation.
1787
-
1788
- Returns:
1789
- Dictionary containing operators list and count
1790
- """
1791
- try:
1792
- operators = await brain_client.get_operators()
1793
- if isinstance(operators, list):
1794
- return {"results": operators, "count": len(operators)}
1795
- return operators
1796
- except Exception as e:
1797
- return {"error": f"An unexpected error occurred: {str(e)}"}
1798
-
1799
- @mcp.tool()
1800
- async def run_selection(
1801
- selection: str,
1802
- instrument_type: str = "EQUITY",
1803
- region: str = "USA",
1804
- delay: int = 1,
1805
- selection_limit: int = 1000,
1806
- selection_handling: str = "POSITIVE",
1807
- ) -> Dict[str, Any]:
1808
- """
1809
- 🎯 Run a selection query to filter instruments.
1810
-
1811
- Args:
1812
- selection: Selection criteria
1813
- instrument_type: Type of instruments
1814
- region: Geographic region
1815
- delay: Delay setting
1816
- selection_limit: Maximum number of results
1817
- selection_handling: How to handle selection results
1818
-
1819
- Returns:
1820
- Selection results
1821
- """
1822
- try:
1823
- return await brain_client.run_selection(
1824
- selection, instrument_type, region, delay, selection_limit, selection_handling
1825
- )
1826
- except Exception as e:
1827
- return {"error": f"An unexpected error occurred: {str(e)}"}
1828
-
1829
- @mcp.tool()
1830
- async def get_user_profile(user_id: str = "self") -> Dict[str, Any]:
1831
- """
1832
- 👤 Get user profile information.
1833
-
1834
- Args:
1835
- user_id: User ID (default: "self" for current user)
1836
-
1837
- Returns:
1838
- User profile data
1839
- """
1840
- try:
1841
- return await brain_client.get_user_profile(user_id)
1842
- except Exception as e:
1843
- return {"error": f"An unexpected error occurred: {str(e)}"}
1844
-
1845
- @mcp.tool()
1846
- async def get_documentations() -> Dict[str, Any]:
1847
- """
1848
- 📚 Get available documentations and learning materials.
1849
-
1850
- Returns:
1851
- List of documentations
1852
- """
1853
- try:
1854
- return await brain_client.get_documentations()
1855
- except Exception as e:
1856
- return {"error": f"An unexpected error occurred: {str(e)}"}
1857
-
1858
- # --- Message and Forum Tools ---
1859
-
1860
- @mcp.tool()
1861
- async def get_messages(limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
1862
- """
1863
- 💬 Get messages for the current user with optional pagination.
1864
-
1865
- Args:
1866
- limit: Maximum number of messages to return (e.g., 10 for top 10 messages)
1867
- offset: Number of messages to skip (for pagination)
1868
-
1869
- Returns:
1870
- Messages for the current user, optionally limited by count
1871
- """
1872
- try:
1873
- return await brain_client.get_messages(limit, offset)
1874
- except Exception as e:
1875
- return {"error": f"An unexpected error occurred: {str(e)}"}
1876
-
1877
- @mcp.tool()
1878
- async def get_glossary_terms(email: str = "", password: str = "") -> List[Dict[str, str]]:
1879
- """
1880
- 📚 Get glossary terms from WorldQuant BRAIN forum.
1881
-
1882
- Note: This uses Playwright and is implemented in forum_functions.py
1883
-
1884
- Args:
1885
- email: Your BRAIN platform email address (optional if in config)
1886
- password: Your BRAIN platform password (optional if in config)
1887
-
1888
- Returns:
1889
- A list of glossary terms with definitions
1890
- """
1891
- try:
1892
- config = load_config()
1893
- credentials = config.get("credentials", {})
1894
- email = email or credentials.get("email")
1895
- password = password or credentials.get("password")
1896
- if not email or not password:
1897
- raise ValueError("Authentication credentials not provided or found in config.")
1898
-
1899
- return await brain_client.get_glossary_terms(email, password)
1900
- except Exception as e:
1901
- logger.error(f"Error in get_glossary_terms tool: {e}")
1902
- return [{"error": str(e)}]
1903
-
1904
- @mcp.tool()
1905
- async def search_forum_posts(search_query: str, email: str = "", password: str = "",
1906
- max_results: int = 50) -> Dict[str, Any]:
1907
- """
1908
- 🔍 Search forum posts on WorldQuant BRAIN support site.
1909
-
1910
- Note: This uses Playwright and is implemented in forum_functions.py
1911
-
1912
- Args:
1913
- search_query: Search term or phrase
1914
- email: Your BRAIN platform email address (optional if in config)
1915
- password: Your BRAIN platform password (optional if in config)
1916
- max_results: Maximum number of results to return (default: 50)
1917
-
1918
- Returns:
1919
- Search results with analysis
1920
- """
1921
- try:
1922
- config = load_config()
1923
- credentials = config.get("credentials", {})
1924
- email = email or credentials.get("email")
1925
- password = password or credentials.get("password")
1926
- if not email or not password:
1927
- return {"error": "Authentication credentials not provided or found in config."}
1928
-
1929
- return await brain_client.search_forum_posts(email, password, search_query, max_results)
1930
- except Exception as e:
1931
- return {"error": f"An unexpected error occurred: {str(e)}"}
1932
-
1933
- @mcp.tool()
1934
- async def read_forum_post(article_id: str, email: str = "", password: str = "",
1935
- include_comments: bool = True) -> Dict[str, Any]:
1936
- """
1937
- 📄 Get a specific forum post by article ID.
1938
-
1939
- Note: This uses Playwright and is implemented in forum_functions.py
1940
-
1941
- Args:
1942
- article_id: The article ID to retrieve (e.g., "32984819083415-新人求模板")
1943
- email: Your BRAIN platform email address (optional if in config)
1944
- password: Your BRAIN platform password (optional if in config)
1945
-
1946
- Returns:
1947
- Forum post content with comments
1948
- """
1949
- try:
1950
- config = load_config()
1951
- credentials = config.get("credentials", {})
1952
- email = email or credentials.get("email")
1953
- password = password or credentials.get("password")
1954
- if not email or not password:
1955
- return {"error": "Authentication credentials not provided or found in config."}
1956
-
1957
- return await brain_client.read_forum_post(email, password, article_id, include_comments)
1958
- except Exception as e:
1959
- return {"error": f"An unexpected error occurred: {str(e)}"}
1960
-
1961
- @mcp.tool()
1962
- async def get_alpha_yearly_stats(alpha_id: str) -> Dict[str, Any]:
1963
- """Get yearly statistics for an alpha."""
1964
- try:
1965
- return await brain_client.get_alpha_yearly_stats(alpha_id)
1966
- except Exception as e:
1967
- return {"error": f"An unexpected error occurred: {str(e)}"}
1968
-
1969
- @mcp.tool()
1970
- async def check_correlation(alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
1971
- """Check alpha correlation against production alphas, self alphas, or both."""
1972
- try:
1973
- return await brain_client.check_correlation(alpha_id, correlation_type, threshold)
1974
- except Exception as e:
1975
- return {"error": f"An unexpected error occurred: {str(e)}"}
1976
-
1977
- @mcp.tool()
1978
- async def get_submission_check(alpha_id: str) -> Dict[str, Any]:
1979
- """Comprehensive pre-submission check."""
1980
- try:
1981
- return await brain_client.get_submission_check(alpha_id)
1982
- except Exception as e:
1983
- return {"error": f"An unexpected error occurred: {str(e)}"}
1984
-
1985
- @mcp.tool()
1986
- async def set_alpha_properties(alpha_id: str, name: Optional[str] = None,
1987
- color: Optional[str] = None, tags: Optional[List[str]] = None,
1988
- selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
1989
- """Update alpha properties (name, color, tags, descriptions)."""
1990
- try:
1991
- return await brain_client.set_alpha_properties(alpha_id, name, color, tags, selection_desc, combo_desc)
1992
- except Exception as e:
1993
- return {"error": f"An unexpected error occurred: {str(e)}"}
1994
-
1995
- @mcp.tool()
1996
- async def get_record_sets(alpha_id: str) -> Dict[str, Any]:
1997
- """List available record sets for an alpha."""
1998
- try:
1999
- return await brain_client.get_record_sets(alpha_id)
2000
- except Exception as e:
2001
- return {"error": f"An unexpected error occurred: {str(e)}"}
2002
-
2003
- @mcp.tool()
2004
- async def get_record_set_data(alpha_id: str, record_set_name: str) -> Dict[str, Any]:
2005
- """Get data from a specific record set."""
2006
- try:
2007
- return await brain_client.get_record_set_data(alpha_id, record_set_name)
2008
- except Exception as e:
2009
- return {"error": f"An unexpected error occurred: {str(e)}"}
2010
-
2011
- @mcp.tool()
2012
- async def get_user_activities(user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
2013
- """Get user activity diversity data."""
2014
- try:
2015
- return await brain_client.get_user_activities(user_id, grouping)
2016
- except Exception as e:
2017
- return {"error": f"An unexpected error occurred: {str(e)}"}
2018
-
2019
- @mcp.tool()
2020
- async def get_pyramid_multipliers() -> Dict[str, Any]:
2021
- """Get current pyramid multipliers showing BRAIN's encouragement levels."""
2022
- try:
2023
- return await brain_client.get_pyramid_multipliers()
2024
- except Exception as e:
2025
- return {"error": f"An unexpected error occurred: {str(e)}"}
2026
-
2027
- @mcp.tool()
2028
- async def get_pyramid_alphas(start_date: Optional[str] = None,
2029
- end_date: Optional[str] = None) -> Dict[str, Any]:
2030
- """Get user's current alpha distribution across pyramid categories."""
2031
- try:
2032
- return await brain_client.get_pyramid_alphas(start_date, end_date)
2033
- except Exception as e:
2034
- return {"error": f"An unexpected error occurred: {str(e)}"}
2035
-
2036
- @mcp.tool()
2037
- async def get_user_competitions(user_id: Optional[str] = None) -> Dict[str, Any]:
2038
- """Get list of competitions that the user is participating in."""
2039
- try:
2040
- return await brain_client.get_user_competitions(user_id)
2041
- except Exception as e:
2042
- return {"error": f"An unexpected error occurred: {str(e)}"}
2043
-
2044
- @mcp.tool()
2045
- async def get_competition_details(competition_id: str) -> Dict[str, Any]:
2046
- """Get detailed information about a specific competition."""
2047
- try:
2048
- return await brain_client.get_competition_details(competition_id)
2049
- except Exception as e:
2050
- return {"error": f"An unexpected error occurred: {str(e)}"}
2051
-
2052
- @mcp.tool()
2053
- async def get_competition_agreement(competition_id: str) -> Dict[str, Any]:
2054
- """Get the rules, terms, and agreement for a specific competition."""
2055
- try:
2056
- return await brain_client.get_competition_agreement(competition_id)
2057
- except Exception as e:
2058
- return {"error": f"An unexpected error occurred: {str(e)}"}
2059
-
2060
- @mcp.tool()
2061
- async def get_platform_setting_options() -> Dict[str, Any]:
2062
- """Discover valid simulation setting options (instrument types, regions, delays, universes, neutralization).
2063
-
2064
- Use this when a simulation request might contain an invalid/mismatched setting. If an AI or user supplies
2065
- incorrect parameters (e.g., wrong region for an instrument type), call this tool to retrieve the authoritative
2066
- option sets and correct the inputs before proceeding.
2067
-
2068
- Returns:
2069
- A structured list of valid combinations and choice lists to validate or fix simulation settings.
2070
- """
2071
- try:
2072
- return await brain_client.get_platform_setting_options()
2073
- except Exception as e:
2074
- return {"error": f"An unexpected error occurred: {str(e)}"}
2075
-
2076
- @mcp.tool()
2077
- async def performance_comparison(alpha_id: str, team_id: Optional[str] = None,
2078
- competition: Optional[str] = None) -> Dict[str, Any]:
2079
- """Get performance comparison data for an alpha."""
2080
- try:
2081
- return await brain_client.performance_comparison(alpha_id, team_id, competition)
2082
- except Exception as e:
2083
- return {"error": f"An unexpected error occurred: {str(e)}"}
2084
-
2085
- # --- Dataframe Tool ---
2086
-
2087
- @mcp.tool()
2088
- async def expand_nested_data(data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
2089
- """Flatten complex nested data structures into tabular format."""
2090
- try:
2091
- return await brain_client.expand_nested_data(data, preserve_original)
2092
- except Exception as e:
2093
- return [{"error": f"An unexpected error occurred: {str(e)}"}]
2094
-
2095
- # --- Documentation Tool ---
2096
-
2097
- @mcp.tool()
2098
- async def get_documentation_page(page_id: str) -> Dict[str, Any]:
2099
- """Retrieve detailed content of a specific documentation page/article."""
2100
- try:
2101
- return await brain_client.get_documentation_page(page_id)
2102
- except Exception as e:
2103
- return {"error": f"An unexpected error occurred: {str(e)}"}
2104
-
2105
- # --- Advanced Simulation Tools ---
2106
-
2107
- @mcp.tool()
2108
- async def create_multi_simulation(
2109
- alpha_expressions: List[str],
2110
- instrument_type: str = "EQUITY",
2111
- region: str = "USA",
2112
- universe: str = "TOP3000",
2113
- delay: int = 1,
2114
- decay: float = 0.0,
2115
- neutralization: str = "NONE",
2116
- truncation: float = 0.0,
2117
- test_period: str = "P0Y0M",
2118
- unit_handling: str = "VERIFY",
2119
- nan_handling: str = "OFF",
2120
- language: str = "FASTEXPR",
2121
- visualization: bool = True,
2122
- pasteurization: str = "ON",
2123
- max_trade: str = "OFF"
2124
- ) -> Dict[str, Any]:
2125
- """
2126
- 🚀 Create multiple regular alpha simulations on BRAIN platform in a single request.
2127
-
2128
- This tool creates a multisimulation with multiple regular alpha expressions,
2129
- waits for all simulations to complete, and returns detailed results for each alpha.
2130
-
2131
- ⏰ NOTE: Multisimulations can take 8+ minutes to complete. This tool will wait
2132
- for the entire process and return comprehensive results.
2133
- Call get_platform_setting_options to get the valid options for the simulation.
2134
- Args:
2135
- alpha_expressions: List of alpha expressions (2-8 expressions required)
2136
- instrument_type: Type of instruments (default: "EQUITY")
2137
- region: Market region (default: "USA")
2138
- universe: Universe of stocks (default: "TOP3000")
2139
- delay: Data delay (default: 1)
2140
- decay: Decay value (default: 0.0)
2141
- neutralization: Neutralization method (default: "NONE")
2142
- truncation: Truncation value (default: 0.0)
2143
- test_period: Test period (default: "P0Y0M")
2144
- unit_handling: Unit handling method (default: "VERIFY")
2145
- nan_handling: NaN handling method (default: "OFF")
2146
- language: Expression language (default: "FASTEXPR")
2147
- visualization: Enable visualization (default: True)
2148
- pasteurization: Pasteurization setting (default: "ON")
2149
- max_trade: Max trade setting (default: "OFF")
2150
-
2151
- Returns:
2152
- Dictionary containing multisimulation results and individual alpha details
2153
- """
2154
- try:
2155
- # Validate input
2156
- if len(alpha_expressions) < 2:
2157
- return {"error": "At least 2 alpha expressions are required"}
2158
- if len(alpha_expressions) > 8:
2159
- return {"error": "Maximum 8 alpha expressions allowed per request"}
2160
-
2161
- # Create multisimulation data
2162
- multisimulation_data = []
2163
- for alpha_expr in alpha_expressions:
2164
- simulation_item = {
2165
- 'type': 'REGULAR',
2166
- 'settings': {
2167
- 'instrumentType': instrument_type,
2168
- 'region': region,
2169
- 'universe': universe,
2170
- 'delay': delay,
2171
- 'decay': decay,
2172
- 'neutralization': neutralization,
2173
- 'truncation': truncation,
2174
- 'pasteurization': pasteurization,
2175
- 'unitHandling': unit_handling,
2176
- 'nanHandling': nan_handling,
2177
- 'language': language,
2178
- 'visualization': visualization,
2179
- 'testPeriod': test_period,
2180
- 'maxTrade': max_trade
2181
- },
2182
- 'regular': alpha_expr
2183
- }
2184
- multisimulation_data.append(simulation_item)
2185
-
2186
- # Send multisimulation request
2187
- response = brain_client.session.post(f"{brain_client.base_url}/simulations", json=multisimulation_data)
2188
-
2189
- if response.status_code != 201:
2190
- return {"error": f"Failed to create multisimulation. Status: {response.status_code}"}
2191
-
2192
- # Get multisimulation location
2193
- location = response.headers.get('Location', '')
2194
- if not location:
2195
- return {"error": "No location header in multisimulation response"}
2196
-
2197
- # Wait for children to appear and get results
2198
- return await _wait_for_multisimulation_completion(location, len(alpha_expressions))
2199
-
2200
- except Exception as e:
2201
- return {"error": f"Error creating multisimulation: {str(e)}"}
2202
-
2203
- async def _wait_for_multisimulation_completion(location: str, expected_children: int) -> Dict[str, Any]:
2204
- """Wait for multisimulation to complete and return results"""
2205
- try:
2206
- # Simple progress indicator for users
2207
- print(f"Waiting for multisimulation to complete... (this may take several minutes)")
2208
- print(f"Expected {expected_children} alpha simulations")
2209
- print()
2210
- # Wait for children to appear - much more tolerant for 8+ minute multisimulations
2211
- children = []
2212
- max_wait_attempts = 200 # Increased significantly for 8+ minute multisimulations
2213
- wait_attempt = 0
2214
-
2215
- while wait_attempt < max_wait_attempts and len(children) == 0:
2216
- wait_attempt += 1
2217
-
2218
- try:
2219
- multisim_response = brain_client.session.get(location)
2220
- if multisim_response.status_code == 200:
2221
- multisim_data = multisim_response.json()
2222
- children = multisim_data.get('children', [])
2223
-
2224
- if children:
2225
- break
2226
- else:
2227
- # Wait before next attempt - use longer intervals for multisimulations
2228
- retry_after = multisim_response.headers.get("Retry-After", 5)
2229
- wait_time = float(retry_after)
2230
- await asyncio.sleep(wait_time)
2231
- else:
2232
- await asyncio.sleep(5)
2233
- except Exception as e:
2234
- await asyncio.sleep(5)
2235
-
2236
- if not children:
2237
- return {"error": f"Children did not appear within {max_wait_attempts} attempts (multisimulation may still be processing)"}
2238
-
2239
- # Process each child to get alpha results
2240
- alpha_results = []
2241
- for i, child_id in enumerate(children):
2242
- try:
2243
- # The children are full URLs, not just IDs
2244
- child_url = child_id if child_id.startswith('http') else f"{brain_client.base_url}/simulations/{child_id}"
2245
-
2246
- # Wait for this alpha to complete - more tolerant timing
2247
- finished = False
2248
- max_alpha_attempts = 100 # Increased for longer alpha processing
2249
- alpha_attempt = 0
2250
-
2251
- while not finished and alpha_attempt < max_alpha_attempts:
2252
- alpha_attempt += 1
2253
-
2254
- try:
2255
- alpha_progress = brain_client.session.get(child_url)
2256
- if alpha_progress.status_code == 200:
2257
- alpha_data = alpha_progress.json()
2258
- retry_after = alpha_progress.headers.get("Retry-After", 0)
2259
-
2260
- if retry_after == 0:
2261
- finished = True
2262
- break
2263
- else:
2264
- wait_time = float(retry_after)
2265
- await asyncio.sleep(wait_time)
2266
- else:
2267
- await asyncio.sleep(5)
2268
- except Exception as e:
2269
- await asyncio.sleep(5)
2270
-
2271
- if finished:
2272
- # Get alpha details from the completed simulation
2273
- alpha_id = alpha_data.get("alpha")
2274
- if alpha_id:
2275
- # Now get the actual alpha details from the alpha endpoint
2276
- alpha_details = brain_client.session.get(f"{brain_client.base_url}/alphas/{alpha_id}")
2277
- if alpha_details.status_code == 200:
2278
- alpha_detail_data = alpha_details.json()
2279
- alpha_results.append({
2280
- 'alpha_id': alpha_id,
2281
- 'location': child_url,
2282
- 'details': alpha_detail_data
2283
- })
2284
- else:
2285
- alpha_results.append({
2286
- 'alpha_id': alpha_id,
2287
- 'location': child_url,
2288
- 'error': f'Failed to get alpha details: {alpha_details.status_code}'
2289
- })
2290
- else:
2291
- alpha_results.append({
2292
- 'location': child_url,
2293
- 'error': 'No alpha ID found in completed simulation'
2294
- })
2295
- else:
2296
- alpha_results.append({
2297
- 'location': child_url,
2298
- 'error': f'Alpha simulation did not complete within {max_alpha_attempts} attempts'
2299
- })
2300
-
2301
- except Exception as e:
2302
- alpha_results.append({
2303
- 'location': f"child_{i+1}",
2304
- 'error': str(e)
2305
- })
2306
-
2307
- # Return comprehensive results
2308
- print(f"Multisimulation completed! Retrieved {len(alpha_results)} alpha results")
2309
- return {
2310
- 'success': True,
2311
- 'message': f'Successfully created {expected_children} regular alpha simulations',
2312
- 'total_requested': expected_children,
2313
- 'total_created': len(alpha_results),
2314
- 'multisimulation_id': location.split('/')[-1],
2315
- 'multisimulation_location': location,
2316
- 'alpha_results': alpha_results
2317
- }
2318
-
2319
- except Exception as e:
2320
- return {"error": f"Error waiting for multisimulation completion: {str(e)}"}
2321
- # --- Payment and Financial Tools ---
2322
-
2323
- @mcp.tool()
2324
- async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -> Dict[str, Any]:
2325
- """
2326
- Get daily and quarterly payment information from WorldQuant BRAIN platform.
2327
-
2328
- This function retrieves both base payments (daily alpha performance payments) and
2329
- other payments (competition rewards, quarterly payments, referrals, etc.).
2330
-
2331
- Args:
2332
- email: Your BRAIN platform email address (optional if in config)
2333
- password: Your BRAIN platform password (optional if in config)
2334
-
2335
- Returns:
2336
- Dictionary containing base payment and other payment data with summaries and detailed records
2337
- """
2338
- try:
2339
- config = load_config()
2340
- credentials = config.get("credentials", {})
2341
- email = email or credentials.get("email")
2342
- password = password or credentials.get("password")
2343
- if not email or not password:
2344
- return {"error": "Authentication credentials not provided or found in config."}
2345
-
2346
- await brain_client.authenticate(email, password)
2347
-
2348
- # Get base payments
2349
- try:
2350
- base_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/base-payment")
2351
- base_response.raise_for_status()
2352
- base_payments = base_response.json()
2353
- except:
2354
- base_payments = "no data"
2355
-
2356
- try:
2357
- # Get other payments
2358
- other_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/other-payment")
2359
- other_response.raise_for_status()
2360
- other_payments = other_response.json()
2361
- except:
2362
- other_payments = "no data"
2363
- return {
2364
- "base_payments": base_payments,
2365
- "other_payments": other_payments
2366
- }
2367
-
2368
- except Exception as e:
2369
- return {"error": f"An unexpected error occurred: {str(e)}"}
2370
-
2371
- from typing import Sequence
2372
- @mcp.tool()
2373
- async def lookINTO_SimError_message(locations: Sequence[str]) -> dict:
2374
- """
2375
- Fetch and parse error/status from multiple simulation locations (URLs).
2376
- Args:
2377
- locations: List of simulation result URLs (e.g., /simulations/{id})
2378
- Returns:
2379
- List of dicts with location, error message, and raw response
2380
- """
2381
- results = []
2382
- for loc in locations:
2383
- try:
2384
- resp = brain_client.session.get(loc)
2385
- if resp.status_code != 200:
2386
- results.append({
2387
- "location": loc,
2388
- "error": f"HTTP {resp.status_code}",
2389
- "raw": resp.text
2390
- })
2391
- continue
2392
- data = resp.json() if resp.text else {}
2393
- # Try to extract error message or status
2394
- error_msg = data.get("error") or data.get("message")
2395
- # If alpha ID is missing, include that info
2396
- if not data.get("alpha"):
2397
- error_msg = error_msg or "Simulation did not get through, if you are running a multisimulation, check the other children location in your request"
2398
- results.append({
2399
- "location": loc,
2400
- "error": error_msg,
2401
- "raw": data
2402
- })
2403
- except Exception as e:
2404
- results.append({
2405
- "location": loc,
2406
- "error": str(e),
2407
- "raw": None
2408
- })
2409
- return {"results": results}
2410
-
2411
-
2412
- # --- Main entry point ---
2413
- if __name__ == "__main__":
2414
- print("running the server")
2415
- mcp.run()