cnhkmcp 2.1.4__py3-none-any.whl → 2.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +126 -126
- cnhkmcp/untracked/back_up/forum_functions.py +998 -0
- cnhkmcp/untracked/back_up/platform_functions.py +2886 -0
- cnhkmcp/untracked/brain-consultant.md +31 -0
- cnhkmcp/untracked/forum_functions.py +350 -941
- cnhkmcp/untracked/platform_functions.py +445 -730
- cnhkmcp/untracked/skills/Claude_Skill_Creation_Guide.md +140 -0
- cnhkmcp/untracked/skills/expression_verifier/SKILL.md +51 -0
- cnhkmcp/untracked/skills/expression_verifier/scripts/validator.py +889 -0
- cnhkmcp/untracked/skills/expression_verifier/scripts/verify_expr.py +52 -0
- cnhkmcp/untracked/skills/pull_BRAINSkill/SKILL.md +51 -0
- cnhkmcp/untracked/skills/pull_BRAINSkill/scripts/pull_skills.py +188 -0
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +3 -1
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/METADATA +1 -1
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/RECORD +19 -13
- cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
- cnhkmcp/untracked/APP/Tranformer/parsetab.py +0 -60
- cnhkmcp/untracked/APP/simulator/wqb20260107015647.log +0 -57
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,2886 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# -*- coding: utf-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
WorldQuant BRAIN MCP Server - Python Version
|
|
5
|
+
A comprehensive Model Context Protocol (MCP) server for WorldQuant BRAIN platform integration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# Ensure proper encoding handling for Windows
|
|
9
|
+
import sys
|
|
10
|
+
import os
|
|
11
|
+
|
|
12
|
+
# Note: We'll handle encoding issues in individual functions rather than
|
|
13
|
+
# overriding system streams to avoid conflicts with MCP server
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import time
|
|
17
|
+
import asyncio
|
|
18
|
+
import logging
|
|
19
|
+
from typing import Dict, List, Optional, Any, Union
|
|
20
|
+
from dataclasses import dataclass, asdict
|
|
21
|
+
from datetime import datetime, timedelta
|
|
22
|
+
import math
|
|
23
|
+
from time import sleep
|
|
24
|
+
|
|
25
|
+
import requests
|
|
26
|
+
import pandas as pd
|
|
27
|
+
from selenium import webdriver
|
|
28
|
+
from selenium.webdriver.chrome.options import Options
|
|
29
|
+
from selenium.webdriver.common.by import By
|
|
30
|
+
from selenium.webdriver.support.ui import WebDriverWait
|
|
31
|
+
from selenium.webdriver.support import expected_conditions as EC
|
|
32
|
+
from bs4 import BeautifulSoup
|
|
33
|
+
from mcp.server.fastmcp import FastMCP
|
|
34
|
+
from pydantic import BaseModel, Field, EmailStr
|
|
35
|
+
|
|
36
|
+
from pathlib import Path
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Configure logging
|
|
40
|
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
41
|
+
logger = logging.getLogger(__name__)
|
|
42
|
+
|
|
43
|
+
# Pydantic models for type safety
|
|
44
|
+
class AuthCredentials(BaseModel):
|
|
45
|
+
email: EmailStr
|
|
46
|
+
password: str
|
|
47
|
+
|
|
48
|
+
class SimulationSettings(BaseModel):
|
|
49
|
+
instrumentType: str = "EQUITY"
|
|
50
|
+
region: str = "USA"
|
|
51
|
+
universe: str = "TOP3000"
|
|
52
|
+
delay: int = 1
|
|
53
|
+
decay: float = 0.0
|
|
54
|
+
neutralization: str = "NONE"
|
|
55
|
+
truncation: float = 0.0
|
|
56
|
+
pasteurization: str = "ON"
|
|
57
|
+
unitHandling: str = "VERIFY"
|
|
58
|
+
nanHandling: str = "OFF"
|
|
59
|
+
language: str = "FASTEXPR"
|
|
60
|
+
visualization: bool = True
|
|
61
|
+
testPeriod: str = "P0Y0M"
|
|
62
|
+
selectionHandling: str = "POSITIVE"
|
|
63
|
+
selectionLimit: int = 1000
|
|
64
|
+
maxTrade: str = "OFF"
|
|
65
|
+
componentActivation: str = "IS"
|
|
66
|
+
|
|
67
|
+
class SimulationData(BaseModel):
|
|
68
|
+
type: str = "REGULAR" # "REGULAR" or "SUPER"
|
|
69
|
+
settings: SimulationSettings
|
|
70
|
+
regular: Optional[str] = None
|
|
71
|
+
combo: Optional[str] = None
|
|
72
|
+
selection: Optional[str] = None
|
|
73
|
+
|
|
74
|
+
class BrainApiClient:
|
|
75
|
+
"""WorldQuant BRAIN API client with comprehensive functionality."""
|
|
76
|
+
|
|
77
|
+
def __init__(self):
|
|
78
|
+
self.base_url = "https://api.worldquantbrain.com"
|
|
79
|
+
self.session = requests.Session()
|
|
80
|
+
self.auth_credentials = None
|
|
81
|
+
self.is_authenticating = False
|
|
82
|
+
|
|
83
|
+
# Configure session
|
|
84
|
+
self.session.timeout = 30
|
|
85
|
+
self.session.headers.update({
|
|
86
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
def log(self, message: str, level: str = "INFO"):
|
|
90
|
+
"""Log messages to stderr to avoid MCP protocol interference."""
|
|
91
|
+
try:
|
|
92
|
+
# Try to print with original message first
|
|
93
|
+
print(f"[{level}] {message}", file=sys.stderr)
|
|
94
|
+
except UnicodeEncodeError:
|
|
95
|
+
# Fallback: remove problematic characters and try again
|
|
96
|
+
try:
|
|
97
|
+
safe_message = message.encode('ascii', 'ignore').decode('ascii')
|
|
98
|
+
print(f"[{level}] {safe_message}", file=sys.stderr)
|
|
99
|
+
except Exception:
|
|
100
|
+
# Final fallback: just print the level and a safe message
|
|
101
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
102
|
+
except Exception:
|
|
103
|
+
# Final fallback: just print the level and a safe message
|
|
104
|
+
print(f"[{level}] Log message", file=sys.stderr)
|
|
105
|
+
|
|
106
|
+
async def authenticate(self, email: str, password: str) -> Dict[str, Any]:
|
|
107
|
+
"""Authenticate with WorldQuant BRAIN platform with biometric support."""
|
|
108
|
+
self.log("🔐 Starting Authentication process...", "INFO")
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
# Store credentials for potential re-authentication
|
|
112
|
+
self.auth_credentials = {'email': email, 'password': password}
|
|
113
|
+
|
|
114
|
+
# Clear any existing session data
|
|
115
|
+
self.session.cookies.clear()
|
|
116
|
+
self.session.auth = None
|
|
117
|
+
|
|
118
|
+
# Create Basic Authentication header (base64 encoded credentials)
|
|
119
|
+
import base64
|
|
120
|
+
credentials = f"{email}:{password}"
|
|
121
|
+
encoded_credentials = base64.b64encode(credentials.encode()).decode()
|
|
122
|
+
|
|
123
|
+
# Send POST request with Basic Authentication header
|
|
124
|
+
headers = {
|
|
125
|
+
'Authorization': f'Basic {encoded_credentials}'
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
response = self.session.post('https://api.worldquantbrain.com/authentication', headers=headers)
|
|
129
|
+
|
|
130
|
+
# Check for successful authentication (status code 201)
|
|
131
|
+
if response.status_code == 201:
|
|
132
|
+
self.log("Authentication successful", "SUCCESS")
|
|
133
|
+
|
|
134
|
+
# Check if JWT token was automatically stored by session
|
|
135
|
+
jwt_token = self.session.cookies.get('t')
|
|
136
|
+
if jwt_token:
|
|
137
|
+
self.log("JWT token automatically stored by session", "SUCCESS")
|
|
138
|
+
else:
|
|
139
|
+
self.log("⚠️ No JWT token found in session", "WARNING")
|
|
140
|
+
|
|
141
|
+
# Return success response
|
|
142
|
+
return {
|
|
143
|
+
'user': {'email': email},
|
|
144
|
+
'status': 'authenticated',
|
|
145
|
+
'permissions': ['read', 'write'],
|
|
146
|
+
'message': 'Authentication successful',
|
|
147
|
+
'status_code': response.status_code,
|
|
148
|
+
'has_jwt': jwt_token is not None
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
# Check if biometric authentication is required (401 with persona)
|
|
152
|
+
elif response.status_code == 401:
|
|
153
|
+
www_auth = response.headers.get("WWW-Authenticate")
|
|
154
|
+
location = response.headers.get("Location")
|
|
155
|
+
|
|
156
|
+
if www_auth == "persona" and location:
|
|
157
|
+
self.log("🔴 Biometric authentication required", "INFO")
|
|
158
|
+
|
|
159
|
+
# Handle biometric authentication
|
|
160
|
+
from urllib.parse import urljoin
|
|
161
|
+
biometric_url = urljoin(response.url, location)
|
|
162
|
+
|
|
163
|
+
return await self._handle_biometric_auth(biometric_url, email)
|
|
164
|
+
else:
|
|
165
|
+
raise Exception("Incorrect email or password")
|
|
166
|
+
else:
|
|
167
|
+
raise Exception(f"Authentication failed with status code: {response.status_code}")
|
|
168
|
+
|
|
169
|
+
except requests.HTTPError as e:
|
|
170
|
+
self.log(f"❌ HTTP error during authentication: {e}", "ERROR")
|
|
171
|
+
raise
|
|
172
|
+
except Exception as e:
|
|
173
|
+
self.log(f"❌ Authentication failed: {str(e)}", "ERROR")
|
|
174
|
+
raise
|
|
175
|
+
|
|
176
|
+
async def _handle_biometric_auth(self, biometric_url: str, email: str) -> Dict[str, Any]:
|
|
177
|
+
"""Handle biometric authentication using browser automation."""
|
|
178
|
+
self.log("🌐 Starting biometric authentication...", "INFO")
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
# Import selenium for browser automation
|
|
182
|
+
from selenium import webdriver
|
|
183
|
+
from selenium.webdriver.chrome.options import Options
|
|
184
|
+
import time
|
|
185
|
+
|
|
186
|
+
# Setup Chrome options
|
|
187
|
+
options = Options()
|
|
188
|
+
options.add_argument('--no-sandbox')
|
|
189
|
+
options.add_argument('--disable-dev-shm-usage')
|
|
190
|
+
|
|
191
|
+
driver = None
|
|
192
|
+
try:
|
|
193
|
+
# Open browser with timeout
|
|
194
|
+
driver = webdriver.Chrome(options=options)
|
|
195
|
+
# Set a short timeout so it doesn't wait forever
|
|
196
|
+
driver.set_page_load_timeout(80) # Only wait 5 seconds
|
|
197
|
+
|
|
198
|
+
self.log("🌐 Opening browser for biometric authentication...", "INFO")
|
|
199
|
+
|
|
200
|
+
# Try to open the URL but handle timeout
|
|
201
|
+
try:
|
|
202
|
+
driver.get(biometric_url)
|
|
203
|
+
self.log("Browser page loaded successfully", "SUCCESS")
|
|
204
|
+
except Exception as timeout_error:
|
|
205
|
+
self.log(f"⚠️ Page load timeout (expected): {str(timeout_error)[:50]}...", "WARNING")
|
|
206
|
+
self.log("Browser window is open for biometric authentication", "INFO")
|
|
207
|
+
|
|
208
|
+
# Print instructions
|
|
209
|
+
print("\n" + "="*60, file=sys.stderr)
|
|
210
|
+
print("BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
|
|
211
|
+
print("="*60, file=sys.stderr)
|
|
212
|
+
print("Browser window is open with biometric authentication page", file=sys.stderr)
|
|
213
|
+
print("Complete the biometric authentication in the browser", file=sys.stderr)
|
|
214
|
+
print("The system will automatically check when you're done...", file=sys.stderr)
|
|
215
|
+
print("="*60, file=sys.stderr)
|
|
216
|
+
|
|
217
|
+
# Keep checking until authentication is complete
|
|
218
|
+
max_attempts = 60 # 5 minutes maximum (60 * 5 seconds)
|
|
219
|
+
attempt = 0
|
|
220
|
+
|
|
221
|
+
while attempt < max_attempts:
|
|
222
|
+
time.sleep(5) # Check every 5 seconds
|
|
223
|
+
attempt += 1
|
|
224
|
+
|
|
225
|
+
# Check if authentication completed
|
|
226
|
+
check_response = self.session.post(biometric_url)
|
|
227
|
+
self.log(f"🔄 Checking authentication status (attempt {attempt}/{max_attempts}): {check_response.status_code}", "INFO")
|
|
228
|
+
|
|
229
|
+
if check_response.status_code == 201:
|
|
230
|
+
self.log("Biometric authentication successful!", "SUCCESS")
|
|
231
|
+
|
|
232
|
+
# Close browser
|
|
233
|
+
driver.quit()
|
|
234
|
+
|
|
235
|
+
# Check JWT token
|
|
236
|
+
jwt_token = self.session.cookies.get('t')
|
|
237
|
+
if jwt_token:
|
|
238
|
+
self.log("JWT token received", "SUCCESS")
|
|
239
|
+
|
|
240
|
+
# Return success response
|
|
241
|
+
return {
|
|
242
|
+
'user': {'email': email},
|
|
243
|
+
'status': 'authenticated',
|
|
244
|
+
'permissions': ['read', 'write'],
|
|
245
|
+
'message': 'Biometric authentication successful',
|
|
246
|
+
'status_code': check_response.status_code,
|
|
247
|
+
'has_jwt': jwt_token is not None
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
# If we get here, authentication timed out
|
|
251
|
+
if driver:
|
|
252
|
+
driver.quit()
|
|
253
|
+
raise Exception("Biometric authentication timed out")
|
|
254
|
+
|
|
255
|
+
except Exception as driver_error:
|
|
256
|
+
if driver:
|
|
257
|
+
try:
|
|
258
|
+
driver.quit()
|
|
259
|
+
except:
|
|
260
|
+
pass
|
|
261
|
+
raise Exception(f"Browser automation error: {driver_error}")
|
|
262
|
+
|
|
263
|
+
except Exception as e:
|
|
264
|
+
self.log(f"❌ Biometric authentication failed: {str(e)}", "ERROR")
|
|
265
|
+
raise
|
|
266
|
+
|
|
267
|
+
async def is_authenticated(self) -> bool:
|
|
268
|
+
"""Check if currently authenticated using JWT token."""
|
|
269
|
+
try:
|
|
270
|
+
# Check if we have a JWT token in cookies
|
|
271
|
+
jwt_token = self.session.cookies.get('t')
|
|
272
|
+
if not jwt_token:
|
|
273
|
+
self.log("❌ No JWT token found", "INFO")
|
|
274
|
+
return False
|
|
275
|
+
|
|
276
|
+
# Test authentication with a simple API call
|
|
277
|
+
response = self.session.get(f"{self.base_url}/authentication")
|
|
278
|
+
if response.status_code == 200:
|
|
279
|
+
return True
|
|
280
|
+
elif response.status_code == 401:
|
|
281
|
+
self.log("❌ JWT token expired or invalid (401)", "INFO")
|
|
282
|
+
return False
|
|
283
|
+
else:
|
|
284
|
+
self.log(f"⚠️ Unexpected status code during auth check: {response.status_code}", "WARNING")
|
|
285
|
+
return False
|
|
286
|
+
except Exception as e:
|
|
287
|
+
self.log(f"❌ Error checking authentication: {str(e)}", "ERROR")
|
|
288
|
+
return False
|
|
289
|
+
|
|
290
|
+
async def ensure_authenticated(self):
|
|
291
|
+
"""Ensure authentication is valid, re-authenticate if needed."""
|
|
292
|
+
if not await self.is_authenticated() and self.auth_credentials:
|
|
293
|
+
self.log("🔄 Re-authenticating...", "INFO")
|
|
294
|
+
await self.authenticate(self.auth_credentials['email'], self.auth_credentials['password'])
|
|
295
|
+
elif not self.auth_credentials:
|
|
296
|
+
raise Exception("Not authenticated and no stored credentials available. Please call authenticate() first.")
|
|
297
|
+
|
|
298
|
+
async def get_authentication_status(self) -> Optional[Dict[str, Any]]:
|
|
299
|
+
"""Get current authentication status and user info."""
|
|
300
|
+
try:
|
|
301
|
+
response = self.session.get(f"{self.base_url}/users/self")
|
|
302
|
+
response.raise_for_status()
|
|
303
|
+
return response.json()
|
|
304
|
+
except Exception as e:
|
|
305
|
+
self.log(f"Failed to get auth status: {str(e)}", "ERROR")
|
|
306
|
+
return None
|
|
307
|
+
|
|
308
|
+
async def create_simulation(self, simulation_data: SimulationData) -> Dict[str, str]:
|
|
309
|
+
"""Create a new simulation on BRAIN platform."""
|
|
310
|
+
await self.ensure_authenticated()
|
|
311
|
+
|
|
312
|
+
try:
|
|
313
|
+
self.log("🚀 Creating simulation...", "INFO")
|
|
314
|
+
|
|
315
|
+
# Prepare settings based on simulation type
|
|
316
|
+
settings_dict = simulation_data.settings.dict()
|
|
317
|
+
|
|
318
|
+
# Remove fields based on simulation type
|
|
319
|
+
if simulation_data.type == "REGULAR":
|
|
320
|
+
# Remove SUPER-specific fields for REGULAR
|
|
321
|
+
settings_dict.pop('selectionHandling', None)
|
|
322
|
+
settings_dict.pop('selectionLimit', None)
|
|
323
|
+
settings_dict.pop('componentActivation', None)
|
|
324
|
+
elif simulation_data.type == "SUPER":
|
|
325
|
+
# SUPER type keeps all fields
|
|
326
|
+
pass
|
|
327
|
+
|
|
328
|
+
# Filter out None values from settings
|
|
329
|
+
settings_dict = {k: v for k, v in settings_dict.items() if v is not None}
|
|
330
|
+
|
|
331
|
+
# Prepare simulation payload
|
|
332
|
+
payload = {
|
|
333
|
+
'type': simulation_data.type,
|
|
334
|
+
'settings': settings_dict
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
# Add type-specific fields
|
|
338
|
+
if simulation_data.type == "REGULAR":
|
|
339
|
+
if simulation_data.regular:
|
|
340
|
+
payload['regular'] = simulation_data.regular
|
|
341
|
+
elif simulation_data.type == "SUPER":
|
|
342
|
+
if simulation_data.combo:
|
|
343
|
+
payload['combo'] = simulation_data.combo
|
|
344
|
+
if simulation_data.selection:
|
|
345
|
+
payload['selection'] = simulation_data.selection
|
|
346
|
+
|
|
347
|
+
# Filter out None values from entire payload
|
|
348
|
+
payload = {k: v for k, v in payload.items() if v is not None}
|
|
349
|
+
|
|
350
|
+
# Debug: print payload for troubleshooting
|
|
351
|
+
# print("📋 Sending payload:")
|
|
352
|
+
# print(json.dumps(payload, indent=2))
|
|
353
|
+
|
|
354
|
+
response = self.session.post(f"{self.base_url}/simulations", json=payload)
|
|
355
|
+
response.raise_for_status()
|
|
356
|
+
|
|
357
|
+
# Handle empty response body - extract simulation ID from Location header
|
|
358
|
+
location = response.headers.get('Location', '')
|
|
359
|
+
simulation_id = location.split('/')[-1] if location else None
|
|
360
|
+
|
|
361
|
+
self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
finished = False
|
|
365
|
+
while True:
|
|
366
|
+
simulation_progress = self.session.get(location)
|
|
367
|
+
if simulation_progress.headers.get("Retry-After", 0) == 0:
|
|
368
|
+
break
|
|
369
|
+
print("Sleeping for " + simulation_progress.headers["Retry-After"] + " seconds")
|
|
370
|
+
sleep(float(simulation_progress.headers["Retry-After"]))
|
|
371
|
+
print("Alpha done simulating, getting alpha details")
|
|
372
|
+
alpha_id = simulation_progress.json()["alpha"]
|
|
373
|
+
alpha = self.session.get("https://api.worldquantbrain.com/alphas/" + alpha_id)
|
|
374
|
+
result = alpha.json()
|
|
375
|
+
result['note'] = "if you got a negative alpha sharpe, you can just add a minus sign in front of the last line of the Alpha to flip then think the next step."
|
|
376
|
+
return result
|
|
377
|
+
|
|
378
|
+
except Exception as e:
|
|
379
|
+
self.log(f"❌ Failed to create simulation: {str(e)}", "ERROR")
|
|
380
|
+
raise
|
|
381
|
+
|
|
382
|
+
# get_simulation_status function removed as requested
|
|
383
|
+
# wait_for_simulation function removed as requested
|
|
384
|
+
|
|
385
|
+
async def get_alpha_details(self, alpha_id: str) -> Dict[str, Any]:
|
|
386
|
+
"""Get detailed information about an alpha."""
|
|
387
|
+
await self.ensure_authenticated()
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}")
|
|
391
|
+
response.raise_for_status()
|
|
392
|
+
return response.json()
|
|
393
|
+
except Exception as e:
|
|
394
|
+
self.log(f"Failed to get alpha details: {str(e)}", "ERROR")
|
|
395
|
+
raise
|
|
396
|
+
|
|
397
|
+
def _is_atom(self, detail: Optional[Dict[str, Any]]) -> bool:
|
|
398
|
+
"""Match atom detection used in extract_regular_alphas.py:
|
|
399
|
+
- Primary signal: 'classifications' entries containing 'SINGLE_DATA_SET'
|
|
400
|
+
- Fallbacks: tags list contains 'atom' or classification id/name contains 'ATOM'
|
|
401
|
+
"""
|
|
402
|
+
if not detail or not isinstance(detail, dict):
|
|
403
|
+
return False
|
|
404
|
+
|
|
405
|
+
classifications = detail.get('classifications') or []
|
|
406
|
+
for c in classifications:
|
|
407
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
408
|
+
if isinstance(cid, str) and 'SINGLE_DATA_SET' in cid:
|
|
409
|
+
return True
|
|
410
|
+
|
|
411
|
+
# Fallbacks
|
|
412
|
+
tags = detail.get('tags') or []
|
|
413
|
+
if isinstance(tags, list):
|
|
414
|
+
for t in tags:
|
|
415
|
+
if isinstance(t, str) and t.strip().lower() == 'atom':
|
|
416
|
+
return True
|
|
417
|
+
|
|
418
|
+
for c in classifications:
|
|
419
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
420
|
+
if isinstance(cid, str) and 'ATOM' in cid.upper():
|
|
421
|
+
return True
|
|
422
|
+
|
|
423
|
+
return False
|
|
424
|
+
|
|
425
|
+
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
426
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "false", search: Optional[str] = None) -> Dict[str, Any]:
|
|
427
|
+
"""Get available datasets."""
|
|
428
|
+
await self.ensure_authenticated()
|
|
429
|
+
|
|
430
|
+
try:
|
|
431
|
+
params = {
|
|
432
|
+
'instrumentType': instrument_type,
|
|
433
|
+
'region': region,
|
|
434
|
+
'delay': delay,
|
|
435
|
+
'universe': universe,
|
|
436
|
+
'theme': theme
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
if search:
|
|
440
|
+
params['search'] = search
|
|
441
|
+
|
|
442
|
+
response = self.session.get(f"{self.base_url}/data-sets", params=params)
|
|
443
|
+
response.raise_for_status()
|
|
444
|
+
response = response.json()
|
|
445
|
+
response['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
446
|
+
return response
|
|
447
|
+
except Exception as e:
|
|
448
|
+
self.log(f"Failed to get datasets: {str(e)}", "ERROR")
|
|
449
|
+
raise
|
|
450
|
+
|
|
451
|
+
async def get_datafields(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
452
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "false",
|
|
453
|
+
dataset_id: Optional[str] = None, data_type: str = "",
|
|
454
|
+
search: Optional[str] = None) -> Dict[str, Any]:
|
|
455
|
+
"""Get available data fields."""
|
|
456
|
+
await self.ensure_authenticated()
|
|
457
|
+
|
|
458
|
+
try:
|
|
459
|
+
params = {
|
|
460
|
+
'instrumentType': instrument_type,
|
|
461
|
+
'region': region,
|
|
462
|
+
'delay': delay,
|
|
463
|
+
'universe': universe,
|
|
464
|
+
'limit': '50',
|
|
465
|
+
'offset': '0'
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
if data_type != 'ALL':
|
|
469
|
+
params['type'] = data_type
|
|
470
|
+
|
|
471
|
+
if dataset_id:
|
|
472
|
+
params['dataset.id'] = dataset_id
|
|
473
|
+
if search:
|
|
474
|
+
params['search'] = search
|
|
475
|
+
|
|
476
|
+
response = self.session.get(f"{self.base_url}/data-fields", params=params)
|
|
477
|
+
response.raise_for_status()
|
|
478
|
+
response = response.json()
|
|
479
|
+
response['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
480
|
+
return response
|
|
481
|
+
except Exception as e:
|
|
482
|
+
self.log(f"Failed to get datafields: {str(e)}", "ERROR")
|
|
483
|
+
raise
|
|
484
|
+
|
|
485
|
+
async def get_alpha_pnl(self, alpha_id: str) -> Dict[str, Any]:
|
|
486
|
+
"""Get PnL data for an alpha with retry logic."""
|
|
487
|
+
await self.ensure_authenticated()
|
|
488
|
+
|
|
489
|
+
max_retries = 5
|
|
490
|
+
retry_delay = 2 # seconds
|
|
491
|
+
|
|
492
|
+
for attempt in range(max_retries):
|
|
493
|
+
try:
|
|
494
|
+
self.log(f"Attempting to get PnL for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
495
|
+
|
|
496
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
|
|
497
|
+
response.raise_for_status()
|
|
498
|
+
|
|
499
|
+
# Some alphas may return 204 No Content or an empty body
|
|
500
|
+
text = (response.text or "").strip()
|
|
501
|
+
if not text:
|
|
502
|
+
if attempt < max_retries - 1:
|
|
503
|
+
self.log(f"Empty PnL response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
504
|
+
await asyncio.sleep(retry_delay)
|
|
505
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
506
|
+
continue
|
|
507
|
+
else:
|
|
508
|
+
self.log(f"Empty PnL response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
509
|
+
return {}
|
|
510
|
+
|
|
511
|
+
try:
|
|
512
|
+
pnl_data = response.json()
|
|
513
|
+
if pnl_data:
|
|
514
|
+
self.log(f"Successfully retrieved PnL data for alpha {alpha_id}", "SUCCESS")
|
|
515
|
+
return pnl_data
|
|
516
|
+
else:
|
|
517
|
+
if attempt < max_retries - 1:
|
|
518
|
+
self.log(f"Empty PnL JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
519
|
+
await asyncio.sleep(retry_delay)
|
|
520
|
+
retry_delay *= 1.5
|
|
521
|
+
continue
|
|
522
|
+
else:
|
|
523
|
+
self.log(f"Empty PnL JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
524
|
+
return {}
|
|
525
|
+
|
|
526
|
+
except Exception as parse_err:
|
|
527
|
+
if attempt < max_retries - 1:
|
|
528
|
+
self.log(f"PnL JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
529
|
+
await asyncio.sleep(retry_delay)
|
|
530
|
+
retry_delay *= 1.5
|
|
531
|
+
continue
|
|
532
|
+
else:
|
|
533
|
+
self.log(f"PnL JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
534
|
+
return {}
|
|
535
|
+
|
|
536
|
+
except Exception as e:
|
|
537
|
+
if attempt < max_retries - 1:
|
|
538
|
+
self.log(f"Failed to get alpha PnL for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
539
|
+
await asyncio.sleep(retry_delay)
|
|
540
|
+
retry_delay *= 1.5
|
|
541
|
+
continue
|
|
542
|
+
else:
|
|
543
|
+
self.log(f"Failed to get alpha PnL for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
544
|
+
raise
|
|
545
|
+
|
|
546
|
+
# This should never be reached, but just in case
|
|
547
|
+
return {}
|
|
548
|
+
|
|
549
|
+
async def get_user_alphas(
|
|
550
|
+
self,
|
|
551
|
+
stage: str = "OS",
|
|
552
|
+
limit: int = 30,
|
|
553
|
+
offset: int = 0,
|
|
554
|
+
start_date: Optional[str] = None,
|
|
555
|
+
end_date: Optional[str] = None,
|
|
556
|
+
submission_start_date: Optional[str] = None,
|
|
557
|
+
submission_end_date: Optional[str] = None,
|
|
558
|
+
order: Optional[str] = None,
|
|
559
|
+
hidden: Optional[bool] = None,
|
|
560
|
+
) -> Dict[str, Any]:
|
|
561
|
+
"""Get user's alphas with advanced filtering."""
|
|
562
|
+
await self.ensure_authenticated()
|
|
563
|
+
|
|
564
|
+
try:
|
|
565
|
+
params = {
|
|
566
|
+
"stage": stage,
|
|
567
|
+
"limit": limit,
|
|
568
|
+
"offset": offset,
|
|
569
|
+
}
|
|
570
|
+
if start_date:
|
|
571
|
+
params["dateCreated>"] = start_date
|
|
572
|
+
if end_date:
|
|
573
|
+
params["dateCreated<"] = end_date
|
|
574
|
+
if submission_start_date:
|
|
575
|
+
params["dateSubmitted>"] = submission_start_date
|
|
576
|
+
if submission_end_date:
|
|
577
|
+
params["dateSubmitted<"] = submission_end_date
|
|
578
|
+
if order:
|
|
579
|
+
params["order"] = order
|
|
580
|
+
if hidden is not None:
|
|
581
|
+
params["hidden"] = str(hidden).lower()
|
|
582
|
+
|
|
583
|
+
response = self.session.get(f"{self.base_url}/users/self/alphas", params=params)
|
|
584
|
+
response.raise_for_status()
|
|
585
|
+
return response.json()
|
|
586
|
+
except Exception as e:
|
|
587
|
+
self.log(f"Failed to get user alphas: {str(e)}", "ERROR")
|
|
588
|
+
raise
|
|
589
|
+
|
|
590
|
+
async def submit_alpha(self, alpha_id: str) -> bool:
|
|
591
|
+
"""Submit an alpha for production."""
|
|
592
|
+
await self.ensure_authenticated()
|
|
593
|
+
|
|
594
|
+
try:
|
|
595
|
+
self.log(f"📤 Submitting alpha {alpha_id} for production...", "INFO")
|
|
596
|
+
|
|
597
|
+
response = self.session.post(f"{self.base_url}/alphas/{alpha_id}/submit")
|
|
598
|
+
response.raise_for_status()
|
|
599
|
+
|
|
600
|
+
self.log(f"Alpha {alpha_id} submitted successfully", "SUCCESS")
|
|
601
|
+
return response.__dict__
|
|
602
|
+
|
|
603
|
+
except Exception as e:
|
|
604
|
+
self.log(f"❌ Failed to submit alpha: {str(e)}", "ERROR")
|
|
605
|
+
return False
|
|
606
|
+
|
|
607
|
+
async def get_events(self) -> Dict[str, Any]:
|
|
608
|
+
"""Get available events and competitions."""
|
|
609
|
+
await self.ensure_authenticated()
|
|
610
|
+
|
|
611
|
+
try:
|
|
612
|
+
response = self.session.get(f"{self.base_url}/events")
|
|
613
|
+
response.raise_for_status()
|
|
614
|
+
return response.json()
|
|
615
|
+
except Exception as e:
|
|
616
|
+
self.log(f"Failed to get events: {str(e)}", "ERROR")
|
|
617
|
+
raise
|
|
618
|
+
|
|
619
|
+
async def get_leaderboard(self, user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
620
|
+
"""Get leaderboard data."""
|
|
621
|
+
await self.ensure_authenticated()
|
|
622
|
+
|
|
623
|
+
try:
|
|
624
|
+
params = {}
|
|
625
|
+
|
|
626
|
+
if user_id:
|
|
627
|
+
params['user'] = user_id
|
|
628
|
+
else:
|
|
629
|
+
# Get current user ID if not specified
|
|
630
|
+
user_response = self.session.get(f"{self.base_url}/users/self")
|
|
631
|
+
if user_response.status_code == 200:
|
|
632
|
+
user_data = user_response.json()
|
|
633
|
+
params['user'] = user_data.get('id')
|
|
634
|
+
|
|
635
|
+
response = self.session.get(f"{self.base_url}/consultant/boards/leader", params=params)
|
|
636
|
+
response.raise_for_status()
|
|
637
|
+
return response.json()
|
|
638
|
+
except Exception as e:
|
|
639
|
+
self.log(f"Failed to get leaderboard: {str(e)}", "ERROR")
|
|
640
|
+
raise
|
|
641
|
+
|
|
642
|
+
async def get_operators(self) -> Dict[str, Any]:
|
|
643
|
+
"""Get available operators for alpha creation."""
|
|
644
|
+
await self.ensure_authenticated()
|
|
645
|
+
|
|
646
|
+
try:
|
|
647
|
+
response = self.session.get(f"{self.base_url}/operators")
|
|
648
|
+
response.raise_for_status()
|
|
649
|
+
operators_data = response.json()
|
|
650
|
+
|
|
651
|
+
# Ensure we return a dictionary format even if API returns a list
|
|
652
|
+
if isinstance(operators_data, list):
|
|
653
|
+
return {"operators": operators_data, "count": len(operators_data)}
|
|
654
|
+
else:
|
|
655
|
+
return operators_data
|
|
656
|
+
except Exception as e:
|
|
657
|
+
self.log(f"Failed to get operators: {str(e)}", "ERROR")
|
|
658
|
+
raise
|
|
659
|
+
|
|
660
|
+
async def run_selection(
|
|
661
|
+
self,
|
|
662
|
+
selection: str,
|
|
663
|
+
instrument_type: str = "EQUITY",
|
|
664
|
+
region: str = "USA",
|
|
665
|
+
delay: int = 1,
|
|
666
|
+
selection_limit: int = 1000,
|
|
667
|
+
selection_handling: str = "POSITIVE"
|
|
668
|
+
) -> Dict[str, Any]:
|
|
669
|
+
"""Run a selection query to filter instruments."""
|
|
670
|
+
await self.ensure_authenticated()
|
|
671
|
+
|
|
672
|
+
try:
|
|
673
|
+
selection_data = {
|
|
674
|
+
"selection": selection,
|
|
675
|
+
"instrumentType": instrument_type,
|
|
676
|
+
"region": region,
|
|
677
|
+
"delay": delay,
|
|
678
|
+
"selectionLimit": selection_limit,
|
|
679
|
+
"selectionHandling": selection_handling
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
response = self.session.get(f"{self.base_url}/simulations/super-selection", params=selection_data)
|
|
683
|
+
response.raise_for_status()
|
|
684
|
+
return response.json()
|
|
685
|
+
except Exception as e:
|
|
686
|
+
self.log(f"Failed to run selection: {str(e)}", "ERROR")
|
|
687
|
+
raise
|
|
688
|
+
|
|
689
|
+
async def get_user_profile(self, user_id: str = "self") -> Dict[str, Any]:
|
|
690
|
+
"""Get user profile information."""
|
|
691
|
+
await self.ensure_authenticated()
|
|
692
|
+
|
|
693
|
+
try:
|
|
694
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}")
|
|
695
|
+
response.raise_for_status()
|
|
696
|
+
return response.json()
|
|
697
|
+
except Exception as e:
|
|
698
|
+
self.log(f"Failed to get user profile: {str(e)}", "ERROR")
|
|
699
|
+
raise
|
|
700
|
+
|
|
701
|
+
async def get_documentations(self) -> Dict[str, Any]:
|
|
702
|
+
"""Get available documentations and learning materials."""
|
|
703
|
+
await self.ensure_authenticated()
|
|
704
|
+
|
|
705
|
+
try:
|
|
706
|
+
response = self.session.get(f"{self.base_url}/tutorials")
|
|
707
|
+
response.raise_for_status()
|
|
708
|
+
return response.json()
|
|
709
|
+
except Exception as e:
|
|
710
|
+
self.log(f"Failed to get documentations: {str(e)}", "ERROR")
|
|
711
|
+
raise
|
|
712
|
+
|
|
713
|
+
# get_messages_summary function removed as requested
|
|
714
|
+
|
|
715
|
+
async def get_messages(self, limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
716
|
+
"""Get messages for the current user with optional pagination.
|
|
717
|
+
|
|
718
|
+
Image / large binary payload mitigation:
|
|
719
|
+
Some messages embed base64 encoded images (e.g. <img src="data:image/png;base64,..."/>).
|
|
720
|
+
Returning full base64 can explode token usage for an LLM client. We post-process each
|
|
721
|
+
message description and (by default) extract embedded base64 images to disk and replace
|
|
722
|
+
them with lightweight placeholders while preserving context.
|
|
723
|
+
|
|
724
|
+
Strategies (environment driven in future – currently parameterless public API):
|
|
725
|
+
- placeholder (default): save images to message_images/ and replace with marker text.
|
|
726
|
+
- ignore: strip image tags entirely, leaving a note.
|
|
727
|
+
- keep: leave description unchanged (unsafe for LLM token limits).
|
|
728
|
+
|
|
729
|
+
A message dict gains an 'extracted_images' list when images are processed.
|
|
730
|
+
"""
|
|
731
|
+
await self.ensure_authenticated()
|
|
732
|
+
|
|
733
|
+
import re, base64, pathlib
|
|
734
|
+
|
|
735
|
+
image_handling = os.environ.get("BRAIN_MESSAGE_IMAGE_MODE", "placeholder").lower()
|
|
736
|
+
save_dir = pathlib.Path("message_images")
|
|
737
|
+
|
|
738
|
+
from typing import Tuple
|
|
739
|
+
def process_description(desc: str, message_id: str) -> Tuple[str, List[str]]:
|
|
740
|
+
try:
|
|
741
|
+
if not desc or image_handling == "keep":
|
|
742
|
+
return desc, []
|
|
743
|
+
attachments: List[str] = []
|
|
744
|
+
# Regex to capture full <img ...> tag with data URI
|
|
745
|
+
img_tag_pattern = re.compile(r"<img[^>]+src=\"(data:image/[^\"]+)\"[^>]*>", re.IGNORECASE)
|
|
746
|
+
# Iterate over unique matches to avoid double work
|
|
747
|
+
matches = list(img_tag_pattern.finditer(desc))
|
|
748
|
+
if not matches:
|
|
749
|
+
# Additional heuristic: very long base64-looking token inside quotes followed by </img>
|
|
750
|
+
# (legacy format noted by user sample). Replace with placeholder.
|
|
751
|
+
heuristic_pattern = re.compile(r"([A-Za-z0-9+/]{500,}={0,2})\"\s*</img>")
|
|
752
|
+
if image_handling != "keep" and heuristic_pattern.search(desc):
|
|
753
|
+
placeholder = "[Embedded image removed - large base64 sequence truncated]"
|
|
754
|
+
return heuristic_pattern.sub(placeholder + "</img>", desc), []
|
|
755
|
+
return desc, []
|
|
756
|
+
|
|
757
|
+
# Ensure save directory exists only if we will store something
|
|
758
|
+
if image_handling == "placeholder" and not save_dir.exists():
|
|
759
|
+
try:
|
|
760
|
+
save_dir.mkdir(parents=True, exist_ok=True)
|
|
761
|
+
except Exception as e:
|
|
762
|
+
self.log(f"Could not create image save directory: {e}", "WARNING")
|
|
763
|
+
|
|
764
|
+
new_desc = desc
|
|
765
|
+
for idx, match in enumerate(matches, start=1):
|
|
766
|
+
data_uri = match.group(1) # data:image/...;base64,XXXX
|
|
767
|
+
if not data_uri.lower().startswith("data:image"):
|
|
768
|
+
continue
|
|
769
|
+
# Split header and base64 payload
|
|
770
|
+
if "," not in data_uri:
|
|
771
|
+
continue
|
|
772
|
+
header, b64_data = data_uri.split(",", 1)
|
|
773
|
+
mime_part = header.split(";")[0] # data:image/png
|
|
774
|
+
ext = "png"
|
|
775
|
+
if "/" in mime_part:
|
|
776
|
+
ext = mime_part.split("/")[1]
|
|
777
|
+
safe_ext = (ext or "img").split("?")[0]
|
|
778
|
+
placeholder_text = "[Embedded image]"
|
|
779
|
+
if image_handling == "ignore":
|
|
780
|
+
replacement = f"[Image removed: {safe_ext}]"
|
|
781
|
+
elif image_handling == "placeholder":
|
|
782
|
+
# Try decode & save
|
|
783
|
+
file_name = f"{message_id}_{idx}.{safe_ext}"
|
|
784
|
+
file_path = save_dir / file_name
|
|
785
|
+
try:
|
|
786
|
+
# Guard extremely large strings (>5MB ~ 6.7M base64 chars) to avoid memory blow
|
|
787
|
+
if len(b64_data) > 7_000_000:
|
|
788
|
+
raise ValueError("Image too large to decode safely")
|
|
789
|
+
with open(file_path, "wb") as f:
|
|
790
|
+
f.write(base64.b64decode(b64_data))
|
|
791
|
+
attachments.append(str(file_path))
|
|
792
|
+
replacement = f"[Image extracted -> {file_path}]"
|
|
793
|
+
except Exception as e:
|
|
794
|
+
self.log(f"Failed to decode embedded image in message {message_id}: {e}", "WARNING")
|
|
795
|
+
replacement = "[Image extraction failed - content omitted]"
|
|
796
|
+
else: # keep
|
|
797
|
+
replacement = placeholder_text # shouldn't be used since early return, but safe
|
|
798
|
+
# Replace only the matched tag (not global) – use re.sub with count=1 on substring slice
|
|
799
|
+
# Safer to operate on new_desc using the exact matched string
|
|
800
|
+
original_tag = match.group(0)
|
|
801
|
+
new_desc = new_desc.replace(original_tag, replacement, 1)
|
|
802
|
+
return new_desc, attachments
|
|
803
|
+
except UnicodeEncodeError as ue:
|
|
804
|
+
self.log(f"Unicode encoding error in process_description: {ue}", "WARNING")
|
|
805
|
+
return desc, []
|
|
806
|
+
except Exception as e:
|
|
807
|
+
self.log(f"Error in process_description: {e}", "WARNING")
|
|
808
|
+
return desc, []
|
|
809
|
+
|
|
810
|
+
try:
|
|
811
|
+
params = {}
|
|
812
|
+
if limit is not None:
|
|
813
|
+
params['limit'] = limit
|
|
814
|
+
if offset > 0:
|
|
815
|
+
params['offset'] = offset
|
|
816
|
+
|
|
817
|
+
response = self.session.get(f"{self.base_url}/users/self/messages", params=params)
|
|
818
|
+
response.raise_for_status()
|
|
819
|
+
data = response.json()
|
|
820
|
+
|
|
821
|
+
# Post-process results for image handling
|
|
822
|
+
results = data.get('results', [])
|
|
823
|
+
for msg in results:
|
|
824
|
+
try:
|
|
825
|
+
desc = msg.get('description')
|
|
826
|
+
processed_desc, attachments = process_description(desc, msg.get('id', 'msg'))
|
|
827
|
+
if attachments or desc != processed_desc:
|
|
828
|
+
msg['description'] = processed_desc
|
|
829
|
+
if attachments:
|
|
830
|
+
msg['extracted_images'] = attachments
|
|
831
|
+
else:
|
|
832
|
+
# If changed but no attachments (ignore mode) mark sanitized
|
|
833
|
+
msg['sanitized'] = True
|
|
834
|
+
except UnicodeEncodeError as ue:
|
|
835
|
+
self.log(f"Unicode encoding error sanitizing message {msg.get('id')}: {ue}", "WARNING")
|
|
836
|
+
# Keep original description if encoding fails
|
|
837
|
+
continue
|
|
838
|
+
except Exception as inner_e:
|
|
839
|
+
self.log(f"Failed to sanitize message {msg.get('id')}: {inner_e}", "WARNING")
|
|
840
|
+
data['results'] = results
|
|
841
|
+
data['image_handling'] = image_handling
|
|
842
|
+
return data
|
|
843
|
+
except UnicodeEncodeError as ue:
|
|
844
|
+
self.log(f"Failed to get messages due to encoding error: {str(ue)}", "ERROR")
|
|
845
|
+
raise
|
|
846
|
+
except Exception as e:
|
|
847
|
+
self.log(f"Failed to get messages: {str(e)}", "ERROR")
|
|
848
|
+
raise
|
|
849
|
+
|
|
850
|
+
async def get_glossary_terms(self, email: str, password: str, headless: bool = False) -> Dict[str, Any]:
|
|
851
|
+
"""Get glossary terms from forum."""
|
|
852
|
+
try:
|
|
853
|
+
# Import and use forum functions
|
|
854
|
+
from forum_functions import forum_client
|
|
855
|
+
return await forum_client.get_glossary_terms(email, password, headless)
|
|
856
|
+
except ImportError:
|
|
857
|
+
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
858
|
+
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
859
|
+
except Exception as e:
|
|
860
|
+
self.log(f"Glossary extraction failed: {str(e)}", "ERROR")
|
|
861
|
+
return {"error": str(e)}
|
|
862
|
+
|
|
863
|
+
async def search_forum_posts(self, email: str, password: str, search_query: str,
|
|
864
|
+
max_results: int = 50, headless: bool = True) -> Dict[str, Any]:
|
|
865
|
+
"""Search forum posts."""
|
|
866
|
+
try:
|
|
867
|
+
# Import and use forum functions
|
|
868
|
+
from forum_functions import forum_client
|
|
869
|
+
return await forum_client.search_forum_posts(email, password, search_query, max_results, headless)
|
|
870
|
+
except ImportError:
|
|
871
|
+
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
872
|
+
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
873
|
+
except Exception as e:
|
|
874
|
+
self.log(f"Forum search failed: {str(e)}", "ERROR")
|
|
875
|
+
return {"error": str(e)}
|
|
876
|
+
|
|
877
|
+
async def read_forum_post(self, email: str, password: str, article_id: str,
|
|
878
|
+
headless: bool = False) -> Dict[str, Any]:
|
|
879
|
+
"""Get forum post."""
|
|
880
|
+
try:
|
|
881
|
+
# Import and use forum functions
|
|
882
|
+
from forum_functions import forum_client
|
|
883
|
+
return await forum_client.read_full_forum_post(email, password, article_id, headless, include_comments=True)
|
|
884
|
+
except ImportError:
|
|
885
|
+
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
886
|
+
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
887
|
+
except Exception as e:
|
|
888
|
+
self.log(f"Forum post retrieval failed: {str(e)}", "ERROR")
|
|
889
|
+
return {"error": str(e)}
|
|
890
|
+
|
|
891
|
+
async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
|
|
892
|
+
"""Get yearly statistics for an alpha with retry logic."""
|
|
893
|
+
await self.ensure_authenticated()
|
|
894
|
+
|
|
895
|
+
max_retries = 5
|
|
896
|
+
retry_delay = 2 # seconds
|
|
897
|
+
|
|
898
|
+
for attempt in range(max_retries):
|
|
899
|
+
try:
|
|
900
|
+
self.log(f"Attempting to get yearly stats for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
901
|
+
|
|
902
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
|
|
903
|
+
response.raise_for_status()
|
|
904
|
+
|
|
905
|
+
# Check if response has content
|
|
906
|
+
text = (response.text or "").strip()
|
|
907
|
+
if not text:
|
|
908
|
+
if attempt < max_retries - 1:
|
|
909
|
+
self.log(f"Empty yearly stats response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
910
|
+
await asyncio.sleep(retry_delay)
|
|
911
|
+
retry_delay *= 1.5 # Exponential backoff
|
|
912
|
+
continue
|
|
913
|
+
else:
|
|
914
|
+
self.log(f"Empty yearly stats response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
915
|
+
return {}
|
|
916
|
+
|
|
917
|
+
try:
|
|
918
|
+
yearly_stats = response.json()
|
|
919
|
+
if yearly_stats:
|
|
920
|
+
self.log(f"Successfully retrieved yearly stats for alpha {alpha_id}", "SUCCESS")
|
|
921
|
+
return yearly_stats
|
|
922
|
+
else:
|
|
923
|
+
if attempt < max_retries - 1:
|
|
924
|
+
self.log(f"Empty yearly stats JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
925
|
+
await asyncio.sleep(retry_delay)
|
|
926
|
+
retry_delay *= 1.5
|
|
927
|
+
continue
|
|
928
|
+
else:
|
|
929
|
+
self.log(f"Empty yearly stats JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
930
|
+
return {}
|
|
931
|
+
|
|
932
|
+
except Exception as parse_err:
|
|
933
|
+
if attempt < max_retries - 1:
|
|
934
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
935
|
+
await asyncio.sleep(retry_delay)
|
|
936
|
+
retry_delay *= 1.5
|
|
937
|
+
continue
|
|
938
|
+
else:
|
|
939
|
+
self.log(f"Yearly stats JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
940
|
+
return {}
|
|
941
|
+
|
|
942
|
+
except Exception as e:
|
|
943
|
+
if attempt < max_retries - 1:
|
|
944
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
945
|
+
await asyncio.sleep(retry_delay)
|
|
946
|
+
retry_delay *= 1.5
|
|
947
|
+
continue
|
|
948
|
+
else:
|
|
949
|
+
self.log(f"Failed to get alpha yearly stats for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
950
|
+
raise
|
|
951
|
+
|
|
952
|
+
# This should never be reached, but just in case
|
|
953
|
+
return {}
|
|
954
|
+
|
|
955
|
+
async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
956
|
+
"""Get production correlation data for an alpha with retry logic."""
|
|
957
|
+
await self.ensure_authenticated()
|
|
958
|
+
|
|
959
|
+
max_retries = 5
|
|
960
|
+
retry_delay = 20 # seconds
|
|
961
|
+
|
|
962
|
+
for attempt in range(max_retries):
|
|
963
|
+
try:
|
|
964
|
+
self.log(f"Attempting to get production correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
965
|
+
|
|
966
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/prod")
|
|
967
|
+
response.raise_for_status()
|
|
968
|
+
|
|
969
|
+
# Check if response has content
|
|
970
|
+
text = (response.text or "").strip()
|
|
971
|
+
if not text:
|
|
972
|
+
if attempt < max_retries - 1:
|
|
973
|
+
self.log(f"Empty production correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
974
|
+
await asyncio.sleep(retry_delay)
|
|
975
|
+
continue
|
|
976
|
+
else:
|
|
977
|
+
self.log(f"Empty production correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
978
|
+
return {}
|
|
979
|
+
|
|
980
|
+
try:
|
|
981
|
+
correlation_data = response.json()
|
|
982
|
+
if correlation_data:
|
|
983
|
+
self.log(f"Successfully retrieved production correlation for alpha {alpha_id}", "SUCCESS")
|
|
984
|
+
return correlation_data
|
|
985
|
+
else:
|
|
986
|
+
if attempt < max_retries - 1:
|
|
987
|
+
self.log(f"Empty production correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
988
|
+
await asyncio.sleep(retry_delay)
|
|
989
|
+
continue
|
|
990
|
+
else:
|
|
991
|
+
self.log(f"Empty production correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
992
|
+
return {}
|
|
993
|
+
|
|
994
|
+
except Exception as parse_err:
|
|
995
|
+
if attempt < max_retries - 1:
|
|
996
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
997
|
+
await asyncio.sleep(retry_delay)
|
|
998
|
+
continue
|
|
999
|
+
else:
|
|
1000
|
+
self.log(f"Production correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1001
|
+
return {}
|
|
1002
|
+
|
|
1003
|
+
except Exception as e:
|
|
1004
|
+
if attempt < max_retries - 1:
|
|
1005
|
+
self.log(f"Failed to get production correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1006
|
+
await asyncio.sleep(retry_delay)
|
|
1007
|
+
continue
|
|
1008
|
+
else:
|
|
1009
|
+
self.log(f"Failed to get production correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1010
|
+
raise
|
|
1011
|
+
|
|
1012
|
+
# This should never be reached, but just in case
|
|
1013
|
+
return {}
|
|
1014
|
+
|
|
1015
|
+
async def get_self_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
1016
|
+
"""Get self-correlation data for an alpha with retry logic."""
|
|
1017
|
+
await self.ensure_authenticated()
|
|
1018
|
+
|
|
1019
|
+
max_retries = 5
|
|
1020
|
+
retry_delay = 20 # seconds
|
|
1021
|
+
|
|
1022
|
+
for attempt in range(max_retries):
|
|
1023
|
+
try:
|
|
1024
|
+
self.log(f"Attempting to get self correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
|
|
1025
|
+
|
|
1026
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/self")
|
|
1027
|
+
response.raise_for_status()
|
|
1028
|
+
|
|
1029
|
+
# Check if response has content
|
|
1030
|
+
text = (response.text or "").strip()
|
|
1031
|
+
if not text:
|
|
1032
|
+
if attempt < max_retries - 1:
|
|
1033
|
+
self.log(f"Empty self correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1034
|
+
await asyncio.sleep(retry_delay)
|
|
1035
|
+
continue
|
|
1036
|
+
else:
|
|
1037
|
+
self.log(f"Empty self correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1038
|
+
return {}
|
|
1039
|
+
|
|
1040
|
+
try:
|
|
1041
|
+
correlation_data = response.json()
|
|
1042
|
+
if correlation_data:
|
|
1043
|
+
self.log(f"Successfully retrieved self correlation for alpha {alpha_id}", "SUCCESS")
|
|
1044
|
+
return correlation_data
|
|
1045
|
+
else:
|
|
1046
|
+
if attempt < max_retries - 1:
|
|
1047
|
+
self.log(f"Empty self correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
|
|
1048
|
+
await asyncio.sleep(retry_delay)
|
|
1049
|
+
continue
|
|
1050
|
+
else:
|
|
1051
|
+
self.log(f"Empty self correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
|
|
1052
|
+
return {}
|
|
1053
|
+
|
|
1054
|
+
except Exception as parse_err:
|
|
1055
|
+
if attempt < max_retries - 1:
|
|
1056
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
|
|
1057
|
+
await asyncio.sleep(retry_delay)
|
|
1058
|
+
continue
|
|
1059
|
+
else:
|
|
1060
|
+
self.log(f"Self correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
|
|
1061
|
+
return {}
|
|
1062
|
+
|
|
1063
|
+
except Exception as e:
|
|
1064
|
+
if attempt < max_retries - 1:
|
|
1065
|
+
self.log(f"Failed to get self correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
|
|
1066
|
+
await asyncio.sleep(retry_delay)
|
|
1067
|
+
continue
|
|
1068
|
+
else:
|
|
1069
|
+
self.log(f"Failed to get self correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
|
|
1070
|
+
raise
|
|
1071
|
+
|
|
1072
|
+
# This should never be reached, but just in case
|
|
1073
|
+
return {}
|
|
1074
|
+
|
|
1075
|
+
async def check_correlation(self, alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
1076
|
+
"""Check alpha correlation against production alphas, self alphas, or both."""
|
|
1077
|
+
await self.ensure_authenticated()
|
|
1078
|
+
|
|
1079
|
+
try:
|
|
1080
|
+
results = {
|
|
1081
|
+
'alpha_id': alpha_id,
|
|
1082
|
+
'threshold': threshold,
|
|
1083
|
+
'correlation_type': correlation_type,
|
|
1084
|
+
'checks': {}
|
|
1085
|
+
}
|
|
1086
|
+
|
|
1087
|
+
# Determine which correlations to check
|
|
1088
|
+
check_types = []
|
|
1089
|
+
if correlation_type == "both":
|
|
1090
|
+
check_types = ["production", "self"]
|
|
1091
|
+
else:
|
|
1092
|
+
check_types = [correlation_type]
|
|
1093
|
+
|
|
1094
|
+
all_passed = True
|
|
1095
|
+
|
|
1096
|
+
for check_type in check_types:
|
|
1097
|
+
if check_type == "production":
|
|
1098
|
+
correlation_data = await self.get_production_correlation(alpha_id)
|
|
1099
|
+
elif check_type == "self":
|
|
1100
|
+
correlation_data = await self.get_self_correlation(alpha_id)
|
|
1101
|
+
else:
|
|
1102
|
+
continue
|
|
1103
|
+
|
|
1104
|
+
# Analyze correlation data (robust to schema/records format)
|
|
1105
|
+
if isinstance(correlation_data, dict):
|
|
1106
|
+
# Prefer strict access to schema.max or top-level max; otherwise error
|
|
1107
|
+
schema = correlation_data.get('schema') or {}
|
|
1108
|
+
if isinstance(schema, dict) and 'max' in schema:
|
|
1109
|
+
max_correlation = float(schema['max'])
|
|
1110
|
+
elif 'max' in correlation_data:
|
|
1111
|
+
# Some endpoints place max at top-level
|
|
1112
|
+
max_correlation = float(correlation_data['max'])
|
|
1113
|
+
else:
|
|
1114
|
+
# Attempt to derive from records; if none found, raise error instead of defaulting
|
|
1115
|
+
records = correlation_data.get('records') or []
|
|
1116
|
+
if isinstance(records, list) and records:
|
|
1117
|
+
candidate_max = None
|
|
1118
|
+
for row in records:
|
|
1119
|
+
if isinstance(row, (list, tuple)):
|
|
1120
|
+
for v in row:
|
|
1121
|
+
try:
|
|
1122
|
+
vf = float(v)
|
|
1123
|
+
if -1.0 <= vf <= 1.0:
|
|
1124
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1125
|
+
except Exception:
|
|
1126
|
+
continue
|
|
1127
|
+
elif isinstance(row, dict):
|
|
1128
|
+
for key in ('correlation', 'prodCorrelation', 'selfCorrelation', 'max'):
|
|
1129
|
+
try:
|
|
1130
|
+
vf = float(row.get(key))
|
|
1131
|
+
if -1.0 <= vf <= 1.0:
|
|
1132
|
+
candidate_max = vf if candidate_max is None else max(candidate_max, vf)
|
|
1133
|
+
except Exception:
|
|
1134
|
+
continue
|
|
1135
|
+
if candidate_max is None:
|
|
1136
|
+
raise ValueError("Unable to derive max correlation from records")
|
|
1137
|
+
max_correlation = float(candidate_max)
|
|
1138
|
+
else:
|
|
1139
|
+
raise KeyError("Correlation response missing 'schema.max' or top-level 'max' and no 'records' to derive from")
|
|
1140
|
+
else:
|
|
1141
|
+
raise TypeError("Correlation data is not a dictionary")
|
|
1142
|
+
|
|
1143
|
+
passes_check = max_correlation < threshold
|
|
1144
|
+
|
|
1145
|
+
results['checks'][check_type] = {
|
|
1146
|
+
'max_correlation': max_correlation,
|
|
1147
|
+
'passes_check': passes_check,
|
|
1148
|
+
'correlation_data': correlation_data
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
if not passes_check:
|
|
1152
|
+
all_passed = False
|
|
1153
|
+
|
|
1154
|
+
results['all_passed'] = all_passed
|
|
1155
|
+
|
|
1156
|
+
return results
|
|
1157
|
+
|
|
1158
|
+
except Exception as e:
|
|
1159
|
+
self.log(f"Failed to check correlation: {str(e)}", "ERROR")
|
|
1160
|
+
raise
|
|
1161
|
+
|
|
1162
|
+
async def get_submission_check(self, alpha_id: str) -> Dict[str, Any]:
|
|
1163
|
+
"""Comprehensive pre-submission check."""
|
|
1164
|
+
await self.ensure_authenticated()
|
|
1165
|
+
|
|
1166
|
+
try:
|
|
1167
|
+
# Get correlation checks using the unified function
|
|
1168
|
+
correlation_checks = await self.check_correlation(alpha_id, correlation_type="both")
|
|
1169
|
+
|
|
1170
|
+
# Get alpha details for additional validation
|
|
1171
|
+
alpha_details = await self.get_alpha_details(alpha_id)
|
|
1172
|
+
|
|
1173
|
+
# Compile comprehensive check results
|
|
1174
|
+
checks = {
|
|
1175
|
+
'correlation_checks': correlation_checks,
|
|
1176
|
+
'alpha_details': alpha_details,
|
|
1177
|
+
'all_passed': correlation_checks['all_passed']
|
|
1178
|
+
}
|
|
1179
|
+
|
|
1180
|
+
return checks
|
|
1181
|
+
except Exception as e:
|
|
1182
|
+
self.log(f"Failed to get submission check: {str(e)}", "ERROR")
|
|
1183
|
+
raise
|
|
1184
|
+
|
|
1185
|
+
async def set_alpha_properties(self, alpha_id: str, name: Optional[str] = None,
|
|
1186
|
+
color: Optional[str] = None, tags: List[str] = None,
|
|
1187
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
1188
|
+
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
1189
|
+
await self.ensure_authenticated()
|
|
1190
|
+
|
|
1191
|
+
try:
|
|
1192
|
+
data = {}
|
|
1193
|
+
if name:
|
|
1194
|
+
data['name'] = name
|
|
1195
|
+
if color:
|
|
1196
|
+
data['color'] = color
|
|
1197
|
+
if tags:
|
|
1198
|
+
data['tags'] = tags
|
|
1199
|
+
if selection_desc:
|
|
1200
|
+
data['selectionDesc'] = selection_desc
|
|
1201
|
+
if combo_desc:
|
|
1202
|
+
data['comboDesc'] = combo_desc
|
|
1203
|
+
|
|
1204
|
+
response = self.session.patch(f"{self.base_url}/alphas/{alpha_id}", json=data)
|
|
1205
|
+
response.raise_for_status()
|
|
1206
|
+
return response.json()
|
|
1207
|
+
except Exception as e:
|
|
1208
|
+
self.log(f"Failed to set alpha properties: {str(e)}", "ERROR")
|
|
1209
|
+
raise
|
|
1210
|
+
|
|
1211
|
+
async def get_record_sets(self, alpha_id: str) -> Dict[str, Any]:
|
|
1212
|
+
"""List available record sets for an alpha."""
|
|
1213
|
+
await self.ensure_authenticated()
|
|
1214
|
+
|
|
1215
|
+
try:
|
|
1216
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets")
|
|
1217
|
+
response.raise_for_status()
|
|
1218
|
+
return response.json()
|
|
1219
|
+
except Exception as e:
|
|
1220
|
+
self.log(f"Failed to get record sets: {str(e)}", "ERROR")
|
|
1221
|
+
raise
|
|
1222
|
+
|
|
1223
|
+
async def get_record_set_data(self, alpha_id: str, record_set_name: str) -> Dict[str, Any]:
|
|
1224
|
+
"""Get data from a specific record set."""
|
|
1225
|
+
await self.ensure_authenticated()
|
|
1226
|
+
|
|
1227
|
+
try:
|
|
1228
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/{record_set_name}")
|
|
1229
|
+
response.raise_for_status()
|
|
1230
|
+
return response.json()
|
|
1231
|
+
except Exception as e:
|
|
1232
|
+
self.log(f"Failed to get record set data: {str(e)}", "ERROR")
|
|
1233
|
+
raise
|
|
1234
|
+
|
|
1235
|
+
async def get_user_activities(self, user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
|
|
1236
|
+
"""Get user activity diversity data."""
|
|
1237
|
+
await self.ensure_authenticated()
|
|
1238
|
+
|
|
1239
|
+
try:
|
|
1240
|
+
params = {}
|
|
1241
|
+
if grouping:
|
|
1242
|
+
params['grouping'] = grouping
|
|
1243
|
+
|
|
1244
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}/activities", params=params)
|
|
1245
|
+
response.raise_for_status()
|
|
1246
|
+
return response.json()
|
|
1247
|
+
except Exception as e:
|
|
1248
|
+
self.log(f"Failed to get user activities: {str(e)}", "ERROR")
|
|
1249
|
+
raise
|
|
1250
|
+
|
|
1251
|
+
async def get_pyramid_multipliers(self) -> Dict[str, Any]:
|
|
1252
|
+
"""Get current pyramid multipliers showing BRAIN's encouragement levels."""
|
|
1253
|
+
await self.ensure_authenticated()
|
|
1254
|
+
|
|
1255
|
+
try:
|
|
1256
|
+
# Use the correct endpoint without parameters
|
|
1257
|
+
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-multipliers")
|
|
1258
|
+
response.raise_for_status()
|
|
1259
|
+
return response.json()
|
|
1260
|
+
except Exception as e:
|
|
1261
|
+
self.log(f"Failed to get pyramid multipliers: {str(e)}", "ERROR")
|
|
1262
|
+
raise
|
|
1263
|
+
|
|
1264
|
+
async def value_factor_trendScore(self, start_date: str, end_date: str) -> Dict[str, Any]:
|
|
1265
|
+
"""Compute diversity score for regular alphas in a date range.
|
|
1266
|
+
|
|
1267
|
+
Description:
|
|
1268
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1269
|
+
value factor of a user is defiend by This diversity score, which measures three key aspects of work output: the proportion of works
|
|
1270
|
+
with the "Atom" tag (S_A), atom proportion, the breadth of pyramids covered (S_P), and how evenly works
|
|
1271
|
+
are distributed across those pyramids (S_H). Calculated as their product, it rewards
|
|
1272
|
+
strong performance across all three dimensions—encouraging more Atom-tagged works,
|
|
1273
|
+
wider pyramid coverage, and balanced distribution—with weaknesses in any area lowering
|
|
1274
|
+
the total score significantly.
|
|
1275
|
+
|
|
1276
|
+
Inputs (hints for AI callers):
|
|
1277
|
+
- start_date (str): ISO UTC start datetime, e.g. '2025-08-14T00:00:00Z'
|
|
1278
|
+
- end_date (str): ISO UTC end datetime, e.g. '2025-08-18T23:59:59Z'
|
|
1279
|
+
- Note: this tool always uses 'OS' (submission dates) to define the window; callers do not need to supply a stage.
|
|
1280
|
+
- Note: P_max (total number of possible pyramids) is derived from the platform
|
|
1281
|
+
pyramid-multipliers endpoint and not supplied by callers.
|
|
1282
|
+
|
|
1283
|
+
Returns (compact JSON): {
|
|
1284
|
+
'diversity_score': float,
|
|
1285
|
+
'N': int, # total regular alphas in window
|
|
1286
|
+
'A': int, # number of Atom-tagged works (is_single_data_set)
|
|
1287
|
+
'P': int, # pyramid coverage count in the sample
|
|
1288
|
+
'P_max': int, # used max for normalization
|
|
1289
|
+
'S_A': float, 'S_P': float, 'S_H': float,
|
|
1290
|
+
'per_pyramid_counts': {pyramid_name: count}
|
|
1291
|
+
}
|
|
1292
|
+
"""
|
|
1293
|
+
# Fetch user alphas (always use OS / submission dates per product policy)
|
|
1294
|
+
await self.ensure_authenticated()
|
|
1295
|
+
alphas_resp = await self.get_user_alphas(stage='OS', limit=500, submission_start_date=start_date, submission_end_date=end_date)
|
|
1296
|
+
|
|
1297
|
+
if not isinstance(alphas_resp, dict) or 'results' not in alphas_resp:
|
|
1298
|
+
return {'error': 'Unexpected response from get_user_alphas', 'raw': alphas_resp}
|
|
1299
|
+
|
|
1300
|
+
alphas = alphas_resp['results']
|
|
1301
|
+
regular = [a for a in alphas if a.get('type') == 'REGULAR']
|
|
1302
|
+
|
|
1303
|
+
# Fetch details for each regular alpha
|
|
1304
|
+
pyramid_list = []
|
|
1305
|
+
atom_count = 0
|
|
1306
|
+
per_pyramid = {}
|
|
1307
|
+
for a in regular:
|
|
1308
|
+
try:
|
|
1309
|
+
detail = await self.get_alpha_details(a.get('id'))
|
|
1310
|
+
except Exception:
|
|
1311
|
+
continue
|
|
1312
|
+
|
|
1313
|
+
is_atom = self._is_atom(detail)
|
|
1314
|
+
if is_atom:
|
|
1315
|
+
atom_count += 1
|
|
1316
|
+
|
|
1317
|
+
# Extract pyramids
|
|
1318
|
+
ps = []
|
|
1319
|
+
if isinstance(detail.get('pyramids'), list):
|
|
1320
|
+
ps = [p.get('name') for p in detail.get('pyramids') if p.get('name')]
|
|
1321
|
+
else:
|
|
1322
|
+
pt = detail.get('pyramidThemes') or {}
|
|
1323
|
+
pss = pt.get('pyramids') if isinstance(pt, dict) else None
|
|
1324
|
+
if pss and isinstance(pss, list):
|
|
1325
|
+
ps = [p.get('name') for p in pss if p.get('name')]
|
|
1326
|
+
|
|
1327
|
+
for p in ps:
|
|
1328
|
+
pyramid_list.append(p)
|
|
1329
|
+
per_pyramid[p] = per_pyramid.get(p, 0) + 1
|
|
1330
|
+
|
|
1331
|
+
N = len(regular)
|
|
1332
|
+
A = atom_count
|
|
1333
|
+
P = len(per_pyramid)
|
|
1334
|
+
|
|
1335
|
+
# Determine P_max similarly to the script: use pyramid multipliers if available
|
|
1336
|
+
P_max = None
|
|
1337
|
+
try:
|
|
1338
|
+
pm = await self.get_pyramid_multipliers()
|
|
1339
|
+
if isinstance(pm, dict) and 'pyramids' in pm:
|
|
1340
|
+
pyramids_list = pm.get('pyramids') or []
|
|
1341
|
+
P_max = len(pyramids_list)
|
|
1342
|
+
except Exception:
|
|
1343
|
+
P_max = None
|
|
1344
|
+
|
|
1345
|
+
if not P_max or P_max <= 0:
|
|
1346
|
+
P_max = max(P, 1)
|
|
1347
|
+
|
|
1348
|
+
# Component scores
|
|
1349
|
+
S_A = (A / N) if N > 0 else 0.0
|
|
1350
|
+
S_P = (P / P_max) if P_max > 0 else 0.0
|
|
1351
|
+
|
|
1352
|
+
# Entropy
|
|
1353
|
+
S_H = 0.0
|
|
1354
|
+
if P <= 1 or not per_pyramid:
|
|
1355
|
+
S_H = 0.0
|
|
1356
|
+
else:
|
|
1357
|
+
total_occ = sum(per_pyramid.values())
|
|
1358
|
+
H = 0.0
|
|
1359
|
+
for cnt in per_pyramid.values():
|
|
1360
|
+
q = cnt / total_occ if total_occ > 0 else 0
|
|
1361
|
+
if q > 0:
|
|
1362
|
+
H -= q * math.log2(q)
|
|
1363
|
+
max_H = math.log2(P) if P > 0 else 1
|
|
1364
|
+
S_H = (H / max_H) if max_H > 0 else 0.0
|
|
1365
|
+
|
|
1366
|
+
diversity_score = S_A * S_P * S_H
|
|
1367
|
+
|
|
1368
|
+
return {
|
|
1369
|
+
'diversity_score': diversity_score,
|
|
1370
|
+
'N': N,
|
|
1371
|
+
'A': A,
|
|
1372
|
+
'P': P,
|
|
1373
|
+
'P_max': P_max,
|
|
1374
|
+
'S_A': S_A,
|
|
1375
|
+
'S_P': S_P,
|
|
1376
|
+
'S_H': S_H,
|
|
1377
|
+
'per_pyramid_counts': per_pyramid
|
|
1378
|
+
}
|
|
1379
|
+
|
|
1380
|
+
async def get_pyramid_alphas(self, start_date: Optional[str] = None,
|
|
1381
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
1382
|
+
"""Get user's current alpha distribution across pyramid categories."""
|
|
1383
|
+
await self.ensure_authenticated()
|
|
1384
|
+
|
|
1385
|
+
try:
|
|
1386
|
+
params = {}
|
|
1387
|
+
if start_date:
|
|
1388
|
+
params['startDate'] = start_date
|
|
1389
|
+
if end_date:
|
|
1390
|
+
params['endDate'] = end_date
|
|
1391
|
+
|
|
1392
|
+
# Try the user-specific activities endpoint first (like pyramid-multipliers)
|
|
1393
|
+
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-alphas", params=params)
|
|
1394
|
+
|
|
1395
|
+
# If that fails, try alternative endpoints
|
|
1396
|
+
if response.status_code == 404:
|
|
1397
|
+
# Try alternative endpoint structure
|
|
1398
|
+
response = self.session.get(f"{self.base_url}/users/self/pyramid/alphas", params=params)
|
|
1399
|
+
|
|
1400
|
+
if response.status_code == 404:
|
|
1401
|
+
# Try yet another alternative
|
|
1402
|
+
response = self.session.get(f"{self.base_url}/activities/pyramid-alphas", params=params)
|
|
1403
|
+
|
|
1404
|
+
if response.status_code == 404:
|
|
1405
|
+
# Return an informative error with what we tried
|
|
1406
|
+
return {
|
|
1407
|
+
"error": "Pyramid alphas endpoint not found",
|
|
1408
|
+
"tried_endpoints": [
|
|
1409
|
+
"/users/self/activities/pyramid-alphas",
|
|
1410
|
+
"/users/self/pyramid/alphas",
|
|
1411
|
+
"/activities/pyramid-alphas",
|
|
1412
|
+
"/pyramid/alphas"
|
|
1413
|
+
],
|
|
1414
|
+
"suggestion": "This endpoint may not be available in the current API version"
|
|
1415
|
+
}
|
|
1416
|
+
|
|
1417
|
+
response.raise_for_status()
|
|
1418
|
+
return response.json()
|
|
1419
|
+
except Exception as e:
|
|
1420
|
+
self.log(f"Failed to get pyramid alphas: {str(e)}", "ERROR")
|
|
1421
|
+
raise
|
|
1422
|
+
|
|
1423
|
+
async def get_user_competitions(self, user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1424
|
+
"""Get list of competitions that the user is participating in."""
|
|
1425
|
+
await self.ensure_authenticated()
|
|
1426
|
+
|
|
1427
|
+
try:
|
|
1428
|
+
if not user_id:
|
|
1429
|
+
# Get current user ID if not specified
|
|
1430
|
+
user_response = self.session.get(f"{self.base_url}/users/self")
|
|
1431
|
+
if user_response.status_code == 200:
|
|
1432
|
+
user_data = user_response.json()
|
|
1433
|
+
user_id = user_data.get('id')
|
|
1434
|
+
else:
|
|
1435
|
+
user_id = 'self'
|
|
1436
|
+
|
|
1437
|
+
response = self.session.get(f"{self.base_url}/users/{user_id}/competitions")
|
|
1438
|
+
response.raise_for_status()
|
|
1439
|
+
return response.json()
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
self.log(f"Failed to get user competitions: {str(e)}", "ERROR")
|
|
1442
|
+
raise
|
|
1443
|
+
|
|
1444
|
+
async def get_competition_details(self, competition_id: str) -> Dict[str, Any]:
|
|
1445
|
+
"""Get detailed information about a specific competition."""
|
|
1446
|
+
await self.ensure_authenticated()
|
|
1447
|
+
|
|
1448
|
+
try:
|
|
1449
|
+
response = self.session.get(f"{self.base_url}/competitions/{competition_id}")
|
|
1450
|
+
response.raise_for_status()
|
|
1451
|
+
return response.json()
|
|
1452
|
+
except Exception as e:
|
|
1453
|
+
self.log(f"Failed to get competition details: {str(e)}", "ERROR")
|
|
1454
|
+
raise
|
|
1455
|
+
|
|
1456
|
+
async def get_competition_agreement(self, competition_id: str) -> Dict[str, Any]:
|
|
1457
|
+
"""Get the rules, terms, and agreement for a specific competition."""
|
|
1458
|
+
await self.ensure_authenticated()
|
|
1459
|
+
|
|
1460
|
+
try:
|
|
1461
|
+
response = self.session.get(f"{self.base_url}/competitions/{competition_id}/agreement")
|
|
1462
|
+
response.raise_for_status()
|
|
1463
|
+
return response.json()
|
|
1464
|
+
except Exception as e:
|
|
1465
|
+
self.log(f"Failed to get competition agreement: {str(e)}", "ERROR")
|
|
1466
|
+
raise
|
|
1467
|
+
|
|
1468
|
+
async def get_platform_setting_options(self) -> Dict[str, Any]:
|
|
1469
|
+
"""Get available instrument types, regions, delays, and universes."""
|
|
1470
|
+
await self.ensure_authenticated()
|
|
1471
|
+
|
|
1472
|
+
try:
|
|
1473
|
+
# Use OPTIONS method on simulations endpoint to get configuration options
|
|
1474
|
+
response = self.session.options(f"{self.base_url}/simulations")
|
|
1475
|
+
response.raise_for_status()
|
|
1476
|
+
|
|
1477
|
+
# Parse the settings structure from the response
|
|
1478
|
+
settings_data = response.json()
|
|
1479
|
+
settings_options = settings_data['actions']['POST']['settings']['children']
|
|
1480
|
+
|
|
1481
|
+
# Extract instrument configuration options
|
|
1482
|
+
instrument_type_data = {}
|
|
1483
|
+
region_data = {}
|
|
1484
|
+
universe_data = {}
|
|
1485
|
+
delay_data = {}
|
|
1486
|
+
neutralization_data = {}
|
|
1487
|
+
|
|
1488
|
+
# Parse each setting type
|
|
1489
|
+
for key, setting in settings_options.items():
|
|
1490
|
+
if setting['type'] == 'choice':
|
|
1491
|
+
if setting['label'] == 'Instrument type':
|
|
1492
|
+
instrument_type_data = setting['choices']
|
|
1493
|
+
elif setting['label'] == 'Region':
|
|
1494
|
+
region_data = setting['choices']['instrumentType']
|
|
1495
|
+
elif setting['label'] == 'Universe':
|
|
1496
|
+
universe_data = setting['choices']['instrumentType']
|
|
1497
|
+
elif setting['label'] == 'Delay':
|
|
1498
|
+
delay_data = setting['choices']['instrumentType']
|
|
1499
|
+
elif setting['label'] == 'Neutralization':
|
|
1500
|
+
neutralization_data = setting['choices']['instrumentType']
|
|
1501
|
+
|
|
1502
|
+
# Build comprehensive instrument options
|
|
1503
|
+
data_list = []
|
|
1504
|
+
|
|
1505
|
+
for instrument_type in instrument_type_data:
|
|
1506
|
+
for region in region_data[instrument_type['value']]:
|
|
1507
|
+
for delay in delay_data[instrument_type['value']]['region'][region['value']]:
|
|
1508
|
+
row = {
|
|
1509
|
+
'InstrumentType': instrument_type['value'],
|
|
1510
|
+
'Region': region['value'],
|
|
1511
|
+
'Delay': delay['value']
|
|
1512
|
+
}
|
|
1513
|
+
row['Universe'] = [
|
|
1514
|
+
item['value'] for item in universe_data[instrument_type['value']]['region'][region['value']]
|
|
1515
|
+
]
|
|
1516
|
+
row['Neutralization'] = [
|
|
1517
|
+
item['value'] for item in neutralization_data[instrument_type['value']]['region'][region['value']]
|
|
1518
|
+
]
|
|
1519
|
+
data_list.append(row)
|
|
1520
|
+
|
|
1521
|
+
# Return structured data
|
|
1522
|
+
return {
|
|
1523
|
+
'instrument_options': data_list,
|
|
1524
|
+
'total_combinations': len(data_list),
|
|
1525
|
+
'instrument_types': [item['value'] for item in instrument_type_data],
|
|
1526
|
+
'regions_by_type': {
|
|
1527
|
+
item['value']: [r['value'] for r in region_data[item['value']]]
|
|
1528
|
+
for item in instrument_type_data
|
|
1529
|
+
}
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
except Exception as e:
|
|
1533
|
+
self.log(f"Failed to get instrument options: {str(e)}", "ERROR")
|
|
1534
|
+
raise
|
|
1535
|
+
|
|
1536
|
+
async def performance_comparison(self, alpha_id: str, team_id: Optional[str] = None,
|
|
1537
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
1538
|
+
"""Get performance comparison data for an alpha."""
|
|
1539
|
+
await self.ensure_authenticated()
|
|
1540
|
+
|
|
1541
|
+
try:
|
|
1542
|
+
params = {}
|
|
1543
|
+
if team_id:
|
|
1544
|
+
params['team_id'] = team_id
|
|
1545
|
+
if competition:
|
|
1546
|
+
params['competition'] = competition
|
|
1547
|
+
|
|
1548
|
+
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/performance-comparison", params=params)
|
|
1549
|
+
response.raise_for_status()
|
|
1550
|
+
return response.json()
|
|
1551
|
+
except Exception as e:
|
|
1552
|
+
self.log(f"Failed to get performance comparison: {str(e)}", "ERROR")
|
|
1553
|
+
raise
|
|
1554
|
+
|
|
1555
|
+
# combine_test_results function removed as requested
|
|
1556
|
+
|
|
1557
|
+
async def expand_nested_data(self, data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
1558
|
+
"""Flatten complex nested data structures into tabular format."""
|
|
1559
|
+
try:
|
|
1560
|
+
expanded_data = []
|
|
1561
|
+
|
|
1562
|
+
for item in data:
|
|
1563
|
+
expanded_item = {}
|
|
1564
|
+
|
|
1565
|
+
for key, value in item.items():
|
|
1566
|
+
if isinstance(value, dict):
|
|
1567
|
+
# Expand nested dictionary
|
|
1568
|
+
for nested_key, nested_value in value.items():
|
|
1569
|
+
expanded_key = f"{key}_{nested_key}"
|
|
1570
|
+
expanded_item[expanded_key] = nested_value
|
|
1571
|
+
|
|
1572
|
+
# Preserve original if requested
|
|
1573
|
+
if preserve_original:
|
|
1574
|
+
expanded_item[key] = value
|
|
1575
|
+
elif isinstance(value, list):
|
|
1576
|
+
# Handle list values
|
|
1577
|
+
expanded_item[key] = str(value) if value else []
|
|
1578
|
+
|
|
1579
|
+
# Preserve original if requested
|
|
1580
|
+
if preserve_original:
|
|
1581
|
+
expanded_item[key] = value
|
|
1582
|
+
else:
|
|
1583
|
+
# Simple value
|
|
1584
|
+
expanded_item[key] = value
|
|
1585
|
+
|
|
1586
|
+
expanded_data.append(expanded_item)
|
|
1587
|
+
|
|
1588
|
+
return expanded_data
|
|
1589
|
+
except Exception as e:
|
|
1590
|
+
self.log(f"Failed to expand nested data: {str(e)}", "ERROR")
|
|
1591
|
+
raise
|
|
1592
|
+
|
|
1593
|
+
# generate_alpha_links function removed as requested
|
|
1594
|
+
|
|
1595
|
+
async def read_specific_documentation(self, page_id: str) -> Dict[str, Any]:
|
|
1596
|
+
"""Retrieve detailed content of a specific documentation page/article."""
|
|
1597
|
+
await self.ensure_authenticated()
|
|
1598
|
+
|
|
1599
|
+
try:
|
|
1600
|
+
response = self.session.get(f"{self.base_url}/tutorial-pages/{page_id}")
|
|
1601
|
+
response.raise_for_status()
|
|
1602
|
+
return response.json()
|
|
1603
|
+
except Exception as e:
|
|
1604
|
+
self.log(f"Failed to get documentation page: {str(e)}", "ERROR")
|
|
1605
|
+
raise
|
|
1606
|
+
|
|
1607
|
+
# Badge status function removed as requested
|
|
1608
|
+
|
|
1609
|
+
# Initialize MCP server
|
|
1610
|
+
mcp = FastMCP('brain_mcp_server')
|
|
1611
|
+
|
|
1612
|
+
# Initialize API client
|
|
1613
|
+
brain_client = BrainApiClient()
|
|
1614
|
+
|
|
1615
|
+
# Configuration management
|
|
1616
|
+
CONFIG_FILE = "user_config.json"
|
|
1617
|
+
|
|
1618
|
+
def _resolve_config_path(for_write: bool = False) -> str:
|
|
1619
|
+
"""
|
|
1620
|
+
Resolve the config file path with this priority:
|
|
1621
|
+
1) BRAIN_CONFIG_PATH (file or directory)
|
|
1622
|
+
2) Directory of running script when available, else current working directory
|
|
1623
|
+
3) Current working directory
|
|
1624
|
+
|
|
1625
|
+
When for_write=True, returns the preferred path even if it doesn't exist yet.
|
|
1626
|
+
"""
|
|
1627
|
+
# 1) Explicit override via env var
|
|
1628
|
+
env_path = os.environ.get("BRAIN_CONFIG_PATH")
|
|
1629
|
+
if env_path:
|
|
1630
|
+
p = Path(env_path).expanduser()
|
|
1631
|
+
target = p / CONFIG_FILE if p.is_dir() else p
|
|
1632
|
+
# For read, only if it exists; for write, allow regardless
|
|
1633
|
+
if for_write or target.exists():
|
|
1634
|
+
return str(target.resolve())
|
|
1635
|
+
|
|
1636
|
+
# 2) Script/module directory when available, else CWD (works in notebooks)
|
|
1637
|
+
base_dir = Path.cwd()
|
|
1638
|
+
try:
|
|
1639
|
+
# __file__ is not defined in notebooks; this will fail there and keep CWD
|
|
1640
|
+
script_dir = Path(__file__).resolve().parent # type: ignore[name-defined]
|
|
1641
|
+
base_dir = script_dir
|
|
1642
|
+
except Exception:
|
|
1643
|
+
# Fall back to current working directory for notebooks/REPL
|
|
1644
|
+
pass
|
|
1645
|
+
|
|
1646
|
+
module_path = base_dir / CONFIG_FILE
|
|
1647
|
+
if not for_write and module_path.exists():
|
|
1648
|
+
return str(module_path.resolve())
|
|
1649
|
+
|
|
1650
|
+
# 3) Fallback to CWD for backward compatibility
|
|
1651
|
+
cwd_path = Path.cwd() / CONFIG_FILE
|
|
1652
|
+
if not for_write and cwd_path.exists():
|
|
1653
|
+
return str(cwd_path.resolve())
|
|
1654
|
+
|
|
1655
|
+
# For writes (or when nothing exists), prefer the module/base directory
|
|
1656
|
+
return str(module_path.resolve())
|
|
1657
|
+
|
|
1658
|
+
def load_config() -> Dict[str, Any]:
|
|
1659
|
+
"""Load configuration from file with robust path resolution.
|
|
1660
|
+
|
|
1661
|
+
Looks for the config in this order: BRAIN_CONFIG_PATH -> module directory -> CWD.
|
|
1662
|
+
Returns an empty dict when not found or on error.
|
|
1663
|
+
"""
|
|
1664
|
+
path = _resolve_config_path(for_write=False)
|
|
1665
|
+
if os.path.exists(path):
|
|
1666
|
+
try:
|
|
1667
|
+
with open(path, 'r', encoding='utf-8') as f:
|
|
1668
|
+
return json.load(f)
|
|
1669
|
+
except Exception as e:
|
|
1670
|
+
logger.error(f"Failed to load config from '{path}': {e}")
|
|
1671
|
+
return {}
|
|
1672
|
+
|
|
1673
|
+
|
|
1674
|
+
def save_config(config: Dict[str, Any]):
|
|
1675
|
+
"""Save configuration to file using the resolved config path.
|
|
1676
|
+
|
|
1677
|
+
Uses BRAIN_CONFIG_PATH if set; otherwise writes next to this module.
|
|
1678
|
+
Ensures the target directory exists.
|
|
1679
|
+
"""
|
|
1680
|
+
try:
|
|
1681
|
+
path = _resolve_config_path(for_write=True)
|
|
1682
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
1683
|
+
with open(path, 'w', encoding='utf-8') as f:
|
|
1684
|
+
json.dump(config, f, indent=2, ensure_ascii=False)
|
|
1685
|
+
except Exception as e:
|
|
1686
|
+
logger.error(f"Failed to save config: {e}")
|
|
1687
|
+
|
|
1688
|
+
# MCP Tools
|
|
1689
|
+
|
|
1690
|
+
@mcp.tool()
|
|
1691
|
+
async def authenticate(email: Optional[str] = "", password: Optional[str] = "") -> Dict[str, Any]:
|
|
1692
|
+
"""
|
|
1693
|
+
🔐 Authenticate with WorldQuant BRAIN platform.
|
|
1694
|
+
|
|
1695
|
+
This is the first step in any BRAIN workflow. You must authenticate before using any other tools.
|
|
1696
|
+
|
|
1697
|
+
Args:
|
|
1698
|
+
email: Your BRAIN platform email address (optional if in config or .brain_credentials)
|
|
1699
|
+
password: Your BRAIN platform password (optional if in config or .brain_credentials)
|
|
1700
|
+
|
|
1701
|
+
Returns:
|
|
1702
|
+
Authentication result with user info and permissions
|
|
1703
|
+
"""
|
|
1704
|
+
try:
|
|
1705
|
+
config = load_config()
|
|
1706
|
+
if 'credentials' in config:
|
|
1707
|
+
if not email:
|
|
1708
|
+
email = config['credentials'].get('email', '')
|
|
1709
|
+
if not password:
|
|
1710
|
+
password = config['credentials'].get('password', '')
|
|
1711
|
+
|
|
1712
|
+
if not email or not password:
|
|
1713
|
+
return {"error": "Email and password required. Either provide them as arguments, configure them in user_config.json, or create a .brain_credentials file in your home directory with format: [\"email\", \"password\"]"}
|
|
1714
|
+
|
|
1715
|
+
result = await brain_client.authenticate(email, password)
|
|
1716
|
+
|
|
1717
|
+
# Save credentials to config for future use
|
|
1718
|
+
config = load_config()
|
|
1719
|
+
if 'credentials' not in config:
|
|
1720
|
+
config['credentials'] = {}
|
|
1721
|
+
config['credentials']['email'] = email
|
|
1722
|
+
config['credentials']['password'] = password
|
|
1723
|
+
save_config(config)
|
|
1724
|
+
|
|
1725
|
+
return result
|
|
1726
|
+
except Exception as e:
|
|
1727
|
+
return {"error": str(e)}
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
@mcp.tool()
|
|
1731
|
+
async def value_factor_trendScore(start_date: str, end_date: str) -> Dict[str, Any]:
|
|
1732
|
+
"""Compute and return the diversity score for REGULAR alphas in a submission-date window.
|
|
1733
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1734
|
+
This MCP tool wraps BrainApiClient.value_factor_trendScore and always uses submission dates (OS).
|
|
1735
|
+
|
|
1736
|
+
Inputs:
|
|
1737
|
+
- start_date: ISO UTC start datetime (e.g. '2025-08-14T00:00:00Z')
|
|
1738
|
+
- end_date: ISO UTC end datetime (e.g. '2025-08-18T23:59:59Z')
|
|
1739
|
+
- p_max: optional integer total number of pyramid categories for normalization
|
|
1740
|
+
|
|
1741
|
+
Returns: compact JSON with diversity_score, N, A, P, P_max, S_A, S_P, S_H, per_pyramid_counts
|
|
1742
|
+
"""
|
|
1743
|
+
try:
|
|
1744
|
+
return await brain_client.value_factor_trendScore(start_date=start_date, end_date=end_date)
|
|
1745
|
+
except Exception as e:
|
|
1746
|
+
return {"error": str(e)}
|
|
1747
|
+
|
|
1748
|
+
@mcp.tool()
|
|
1749
|
+
async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
1750
|
+
"""
|
|
1751
|
+
🔧 Manage configuration settings - get or update configuration.
|
|
1752
|
+
|
|
1753
|
+
Args:
|
|
1754
|
+
action: Action to perform ("get" to retrieve config, "set" to update config)
|
|
1755
|
+
settings: Configuration settings to update (required when action="set")
|
|
1756
|
+
|
|
1757
|
+
Returns:
|
|
1758
|
+
Current or updated configuration including authentication status
|
|
1759
|
+
"""
|
|
1760
|
+
if action == "get":
|
|
1761
|
+
config = load_config()
|
|
1762
|
+
auth_status = await brain_client.get_authentication_status()
|
|
1763
|
+
|
|
1764
|
+
return {
|
|
1765
|
+
"config": config,
|
|
1766
|
+
"auth_status": auth_status,
|
|
1767
|
+
"is_authenticated": await brain_client.is_authenticated()
|
|
1768
|
+
}
|
|
1769
|
+
|
|
1770
|
+
elif action == "set":
|
|
1771
|
+
if settings is None:
|
|
1772
|
+
return {"error": "Settings parameter is required when action='set'"}
|
|
1773
|
+
|
|
1774
|
+
config = load_config()
|
|
1775
|
+
config.update(settings)
|
|
1776
|
+
save_config(config)
|
|
1777
|
+
return config
|
|
1778
|
+
|
|
1779
|
+
else:
|
|
1780
|
+
return {"error": f"Invalid action '{action}'. Use 'get' or 'set'."}
|
|
1781
|
+
|
|
1782
|
+
@mcp.tool()
|
|
1783
|
+
async def create_simulation(
|
|
1784
|
+
type: str = "REGULAR",
|
|
1785
|
+
instrument_type: str = "EQUITY",
|
|
1786
|
+
region: str = "USA",
|
|
1787
|
+
universe: str = "TOP3000",
|
|
1788
|
+
delay: int = 1,
|
|
1789
|
+
decay: float = 0.0,
|
|
1790
|
+
neutralization: str = "NONE",
|
|
1791
|
+
truncation: float = 0.0,
|
|
1792
|
+
test_period: str = "P0Y0M",
|
|
1793
|
+
unit_handling: str = "VERIFY",
|
|
1794
|
+
nan_handling: str = "OFF",
|
|
1795
|
+
language: str = "FASTEXPR",
|
|
1796
|
+
visualization: bool = True,
|
|
1797
|
+
regular: Optional[str] = None,
|
|
1798
|
+
combo: Optional[str] = None,
|
|
1799
|
+
selection: Optional[str] = None,
|
|
1800
|
+
pasteurization: str = "ON",
|
|
1801
|
+
max_trade: str = "OFF",
|
|
1802
|
+
selection_handling: str = "POSITIVE",
|
|
1803
|
+
selection_limit: int = 1000,
|
|
1804
|
+
component_activation: str = "IS"
|
|
1805
|
+
) -> Dict[str, Any]:
|
|
1806
|
+
"""
|
|
1807
|
+
🚀 Create a new simulation on BRAIN platform.
|
|
1808
|
+
|
|
1809
|
+
This tool creates and starts a simulation with your alpha code. Use this after you have your alpha formula ready.
|
|
1810
|
+
|
|
1811
|
+
Args:
|
|
1812
|
+
type: Simulation type ("REGULAR" or "SUPER")
|
|
1813
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1814
|
+
region: Market region (e.g., "USA")
|
|
1815
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1816
|
+
delay: Data delay (0 or 1)
|
|
1817
|
+
decay: Decay value for the simulation
|
|
1818
|
+
neutralization: Neutralization method
|
|
1819
|
+
truncation: Truncation value
|
|
1820
|
+
test_period: Test period (e.g., "P0Y0M" for 1 year 6 months)
|
|
1821
|
+
unit_handling: Unit handling method
|
|
1822
|
+
nan_handling: NaN handling method
|
|
1823
|
+
language: Expression language (e.g., "FASTEXPR")
|
|
1824
|
+
visualization: Enable visualization
|
|
1825
|
+
regular: Regular simulation code (for REGULAR type)
|
|
1826
|
+
combo: Combo code (for SUPER type)
|
|
1827
|
+
selection: Selection code (for SUPER type)
|
|
1828
|
+
|
|
1829
|
+
Returns:
|
|
1830
|
+
Simulation creation result with ID and location
|
|
1831
|
+
"""
|
|
1832
|
+
try:
|
|
1833
|
+
settings = SimulationSettings(
|
|
1834
|
+
instrumentType=instrument_type,
|
|
1835
|
+
region=region,
|
|
1836
|
+
universe=universe,
|
|
1837
|
+
delay=delay,
|
|
1838
|
+
decay=decay,
|
|
1839
|
+
neutralization=neutralization,
|
|
1840
|
+
truncation=truncation,
|
|
1841
|
+
testPeriod=test_period,
|
|
1842
|
+
unitHandling=unit_handling,
|
|
1843
|
+
nanHandling=nan_handling,
|
|
1844
|
+
language=language,
|
|
1845
|
+
visualization=visualization
|
|
1846
|
+
)
|
|
1847
|
+
|
|
1848
|
+
simulation_data = SimulationData(
|
|
1849
|
+
type=type,
|
|
1850
|
+
settings=settings,
|
|
1851
|
+
regular=regular,
|
|
1852
|
+
combo=combo,
|
|
1853
|
+
selection=selection
|
|
1854
|
+
)
|
|
1855
|
+
|
|
1856
|
+
result = await brain_client.create_simulation(simulation_data)
|
|
1857
|
+
return result
|
|
1858
|
+
except Exception as e:
|
|
1859
|
+
return {"error": str(e), "note":", you need to call three mcp tools get_operators, get_platform_setting_options and get_datafields to check whether you correctly use the operators, setting the simulation settings, and existing data fields."}
|
|
1860
|
+
|
|
1861
|
+
# get_simulation_status MCP tool removed as requested
|
|
1862
|
+
# wait_for_simulation MCP tool removed as requested
|
|
1863
|
+
|
|
1864
|
+
@mcp.tool()
|
|
1865
|
+
async def get_alpha_details(alpha_id: str) -> Dict[str, Any]:
|
|
1866
|
+
"""
|
|
1867
|
+
📋 Get detailed information about an alpha.
|
|
1868
|
+
|
|
1869
|
+
Args:
|
|
1870
|
+
alpha_id: The ID of the alpha to retrieve
|
|
1871
|
+
|
|
1872
|
+
Returns:
|
|
1873
|
+
Detailed alpha information
|
|
1874
|
+
"""
|
|
1875
|
+
try:
|
|
1876
|
+
return await brain_client.get_alpha_details(alpha_id)
|
|
1877
|
+
except Exception as e:
|
|
1878
|
+
return {"error": str(e)}
|
|
1879
|
+
|
|
1880
|
+
@mcp.tool()
|
|
1881
|
+
async def get_datasets(
|
|
1882
|
+
instrument_type: str = "EQUITY",
|
|
1883
|
+
region: str = "USA",
|
|
1884
|
+
delay: int = 1,
|
|
1885
|
+
universe: str = "TOP3000",
|
|
1886
|
+
theme: str = "false",
|
|
1887
|
+
search: Optional[str] = None
|
|
1888
|
+
) -> Dict[str, Any]:
|
|
1889
|
+
"""
|
|
1890
|
+
📚 Get available datasets for research.
|
|
1891
|
+
|
|
1892
|
+
Use this to discover what data is available for your alpha research.
|
|
1893
|
+
|
|
1894
|
+
Args:
|
|
1895
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1896
|
+
region: Market region (e.g., "USA")
|
|
1897
|
+
delay: Data delay (0 or 1)
|
|
1898
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1899
|
+
theme: Theme filter
|
|
1900
|
+
|
|
1901
|
+
Returns:
|
|
1902
|
+
Available datasets
|
|
1903
|
+
"""
|
|
1904
|
+
try:
|
|
1905
|
+
return await brain_client.get_datasets(instrument_type, region, delay, universe, theme,search)
|
|
1906
|
+
except Exception as e:
|
|
1907
|
+
return {"error": str(e)}
|
|
1908
|
+
|
|
1909
|
+
@mcp.tool()
|
|
1910
|
+
async def get_datafields(
|
|
1911
|
+
instrument_type: str = "EQUITY",
|
|
1912
|
+
region: str = "USA",
|
|
1913
|
+
delay: int = 1,
|
|
1914
|
+
universe: str = "TOP3000",
|
|
1915
|
+
theme: str = "false",
|
|
1916
|
+
dataset_id: Optional[str] = None,
|
|
1917
|
+
data_type: str = "",
|
|
1918
|
+
search: Optional[str] = None
|
|
1919
|
+
) -> Dict[str, Any]:
|
|
1920
|
+
"""
|
|
1921
|
+
🔍 Get available data fields for alpha construction.
|
|
1922
|
+
|
|
1923
|
+
Use this to find specific data fields you can use in your alpha formulas.
|
|
1924
|
+
|
|
1925
|
+
Args:
|
|
1926
|
+
instrument_type: Type of instruments (e.g., "EQUITY")
|
|
1927
|
+
region: Market region (e.g., "USA")
|
|
1928
|
+
delay: Data delay (0 or 1)
|
|
1929
|
+
universe: Universe of stocks (e.g., "TOP3000")
|
|
1930
|
+
theme: Theme filter
|
|
1931
|
+
dataset_id: Specific dataset ID to filter by
|
|
1932
|
+
data_type: Type of data (e.g., "MATRIX")
|
|
1933
|
+
search: Search term to filter fields
|
|
1934
|
+
|
|
1935
|
+
Returns:
|
|
1936
|
+
Available data fields
|
|
1937
|
+
"""
|
|
1938
|
+
try:
|
|
1939
|
+
return await brain_client.get_datafields(
|
|
1940
|
+
instrument_type, region, delay, universe, theme,
|
|
1941
|
+
dataset_id, data_type, search
|
|
1942
|
+
)
|
|
1943
|
+
except Exception as e:
|
|
1944
|
+
return {"error": str(e)}
|
|
1945
|
+
|
|
1946
|
+
@mcp.tool()
|
|
1947
|
+
async def get_alpha_pnl(alpha_id: str) -> Dict[str, Any]:
|
|
1948
|
+
"""
|
|
1949
|
+
📈 Get PnL (Profit and Loss) data for an alpha.
|
|
1950
|
+
|
|
1951
|
+
Args:
|
|
1952
|
+
alpha_id: The ID of the alpha
|
|
1953
|
+
|
|
1954
|
+
Returns:
|
|
1955
|
+
PnL data for the alpha
|
|
1956
|
+
"""
|
|
1957
|
+
try:
|
|
1958
|
+
return await brain_client.get_alpha_pnl(alpha_id)
|
|
1959
|
+
except Exception as e:
|
|
1960
|
+
return {"error": str(e)}
|
|
1961
|
+
|
|
1962
|
+
@mcp.tool()
|
|
1963
|
+
async def get_user_alphas(
|
|
1964
|
+
stage: str = "IS",
|
|
1965
|
+
limit: int = 30,
|
|
1966
|
+
offset: int = 0,
|
|
1967
|
+
start_date: Optional[str] = None,
|
|
1968
|
+
end_date: Optional[str] = None,
|
|
1969
|
+
submission_start_date: Optional[str] = None,
|
|
1970
|
+
submission_end_date: Optional[str] = None,
|
|
1971
|
+
order: Optional[str] = None,
|
|
1972
|
+
hidden: Optional[bool] = None,
|
|
1973
|
+
) -> Dict[str, Any]:
|
|
1974
|
+
"""
|
|
1975
|
+
👤 Get user's alphas with advanced filtering, pagination, and sorting.
|
|
1976
|
+
|
|
1977
|
+
This tool retrieves a list of your alphas, allowing for detailed filtering based on stage,
|
|
1978
|
+
creation date, submission date, and visibility. It also supports pagination and custom sorting.
|
|
1979
|
+
|
|
1980
|
+
Args:
|
|
1981
|
+
stage (str): The stage of the alphas to retrieve.
|
|
1982
|
+
- "IS": In-Sample (alphas that have not been submitted).
|
|
1983
|
+
- "OS": Out-of-Sample (alphas that have been submitted).
|
|
1984
|
+
Defaults to "IS".
|
|
1985
|
+
limit (int): The maximum number of alphas to return in a single request.
|
|
1986
|
+
For example, `limit=50` will return at most 50 alphas. Defaults to 30.
|
|
1987
|
+
offset (int): The number of alphas to skip from the beginning of the list.
|
|
1988
|
+
Used for pagination. For example, `limit=50, offset=50` will retrieve alphas 51-100.
|
|
1989
|
+
Defaults to 0.
|
|
1990
|
+
start_date (Optional[str]): The earliest creation date for the alphas to be included.
|
|
1991
|
+
Filters for alphas created on or after this date.
|
|
1992
|
+
Example format: "2023-01-01T00:00:00Z".
|
|
1993
|
+
end_date (Optional[str]): The latest creation date for the alphas to be included.
|
|
1994
|
+
Filters for alphas created before this date.
|
|
1995
|
+
Example format: "2023-12-31T23:59:59Z".
|
|
1996
|
+
submission_start_date (Optional[str]): The earliest submission date for the alphas.
|
|
1997
|
+
Only applies to "OS" alphas. Filters for alphas submitted on or after this date.
|
|
1998
|
+
Example format: "2024-01-01T00:00:00Z".
|
|
1999
|
+
submission_end_date (Optional[str]): The latest submission date for the alphas.
|
|
2000
|
+
Only applies to "OS" alphas. Filters for alphas submitted before this date.
|
|
2001
|
+
Example format: "2024-06-30T23:59:59Z".
|
|
2002
|
+
order (Optional[str]): The sorting order for the returned alphas.
|
|
2003
|
+
Prefix with a hyphen (-) for descending order.
|
|
2004
|
+
Examples: "name" (sort by name ascending), "-dateSubmitted" (sort by submission date descending).
|
|
2005
|
+
hidden (Optional[bool]): Filter alphas based on their visibility.
|
|
2006
|
+
- `True`: Only return hidden alphas.
|
|
2007
|
+
- `False`: Only return non-hidden alphas.
|
|
2008
|
+
If not provided, both hidden and non-hidden alphas are returned.
|
|
2009
|
+
|
|
2010
|
+
Returns:
|
|
2011
|
+
Dict[str, Any]: A dictionary containing a list of alpha details under the 'results' key,
|
|
2012
|
+
along with pagination information. If an error occurs, it returns a dictionary with an 'error' key.
|
|
2013
|
+
"""
|
|
2014
|
+
try:
|
|
2015
|
+
return await brain_client.get_user_alphas(
|
|
2016
|
+
stage=stage,
|
|
2017
|
+
limit=limit,
|
|
2018
|
+
offset=offset,
|
|
2019
|
+
start_date=start_date,
|
|
2020
|
+
end_date=end_date,
|
|
2021
|
+
submission_start_date=submission_start_date,
|
|
2022
|
+
submission_end_date=submission_end_date,
|
|
2023
|
+
order=order,
|
|
2024
|
+
hidden=hidden,
|
|
2025
|
+
)
|
|
2026
|
+
except Exception as e:
|
|
2027
|
+
return {"error": str(e)}
|
|
2028
|
+
|
|
2029
|
+
@mcp.tool()
|
|
2030
|
+
async def submit_alpha(alpha_id: str) -> Dict[str, Any]:
|
|
2031
|
+
"""
|
|
2032
|
+
📤 Submit an alpha for production.
|
|
2033
|
+
|
|
2034
|
+
Use this when your alpha is ready for production deployment.
|
|
2035
|
+
|
|
2036
|
+
Args:
|
|
2037
|
+
alpha_id: The ID of the alpha to submit
|
|
2038
|
+
|
|
2039
|
+
Returns:
|
|
2040
|
+
Submission result
|
|
2041
|
+
"""
|
|
2042
|
+
try:
|
|
2043
|
+
success = await brain_client.submit_alpha(alpha_id)
|
|
2044
|
+
return {"submit_result": success, "alpha_id": alpha_id}
|
|
2045
|
+
except Exception as e:
|
|
2046
|
+
return {"error": str(e)}
|
|
2047
|
+
|
|
2048
|
+
@mcp.tool()
|
|
2049
|
+
async def get_events() -> Dict[str, Any]:
|
|
2050
|
+
"""
|
|
2051
|
+
🏆 Get available events and competitions.
|
|
2052
|
+
|
|
2053
|
+
Returns:
|
|
2054
|
+
Available events and competitions
|
|
2055
|
+
"""
|
|
2056
|
+
try:
|
|
2057
|
+
return await brain_client.get_events()
|
|
2058
|
+
except Exception as e:
|
|
2059
|
+
return {"error": str(e)}
|
|
2060
|
+
|
|
2061
|
+
@mcp.tool()
|
|
2062
|
+
async def get_leaderboard(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
2063
|
+
"""
|
|
2064
|
+
🏅 Get leaderboard data.
|
|
2065
|
+
|
|
2066
|
+
Args:
|
|
2067
|
+
user_id: Optional user ID to filter results
|
|
2068
|
+
|
|
2069
|
+
Returns:
|
|
2070
|
+
Leaderboard data
|
|
2071
|
+
"""
|
|
2072
|
+
try:
|
|
2073
|
+
return await brain_client.get_leaderboard(user_id)
|
|
2074
|
+
except Exception as e:
|
|
2075
|
+
return {"error": str(e)}
|
|
2076
|
+
|
|
2077
|
+
# batch_process_alphas MCP tool removed as requested
|
|
2078
|
+
|
|
2079
|
+
@mcp.tool()
|
|
2080
|
+
async def save_simulation_data(simulation_id: str, filename: str) -> Dict[str, Any]:
|
|
2081
|
+
"""
|
|
2082
|
+
💾 Save simulation data to a file.
|
|
2083
|
+
|
|
2084
|
+
Args:
|
|
2085
|
+
simulation_id: The simulation ID
|
|
2086
|
+
filename: Filename to save the data
|
|
2087
|
+
|
|
2088
|
+
Returns:
|
|
2089
|
+
Save operation result
|
|
2090
|
+
"""
|
|
2091
|
+
try:
|
|
2092
|
+
# Get simulation data
|
|
2093
|
+
simulation_data = await brain_client.get_simulation_status(simulation_id)
|
|
2094
|
+
|
|
2095
|
+
# Save to file
|
|
2096
|
+
with open(filename, 'w', encoding='utf-8') as f:
|
|
2097
|
+
json.dump(simulation_data, f, indent=2)
|
|
2098
|
+
|
|
2099
|
+
return {"success": True, "filename": filename, "simulation_id": simulation_id}
|
|
2100
|
+
except Exception as e:
|
|
2101
|
+
return {"error": str(e)}
|
|
2102
|
+
|
|
2103
|
+
|
|
2104
|
+
|
|
2105
|
+
@mcp.tool()
|
|
2106
|
+
async def get_operators() -> Dict[str, Any]:
|
|
2107
|
+
"""
|
|
2108
|
+
🔧 Get available operators for alpha creation.
|
|
2109
|
+
|
|
2110
|
+
Returns:
|
|
2111
|
+
Dictionary containing operators list and count
|
|
2112
|
+
"""
|
|
2113
|
+
try:
|
|
2114
|
+
return await brain_client.get_operators()
|
|
2115
|
+
except Exception as e:
|
|
2116
|
+
return {"error": str(e)}
|
|
2117
|
+
|
|
2118
|
+
@mcp.tool()
|
|
2119
|
+
async def run_selection(
|
|
2120
|
+
selection: str,
|
|
2121
|
+
instrument_type: str = "EQUITY",
|
|
2122
|
+
region: str = "USA",
|
|
2123
|
+
delay: int = 1,
|
|
2124
|
+
selection_limit: int = 1000,
|
|
2125
|
+
selection_handling: str = "POSITIVE"
|
|
2126
|
+
) -> Dict[str, Any]:
|
|
2127
|
+
"""
|
|
2128
|
+
🎯 Run a selection query to filter instruments.
|
|
2129
|
+
|
|
2130
|
+
Args:
|
|
2131
|
+
selection: Selection criteria
|
|
2132
|
+
instrument_type: Type of instruments
|
|
2133
|
+
region: Geographic region
|
|
2134
|
+
delay: Delay setting
|
|
2135
|
+
selection_limit: Maximum number of results
|
|
2136
|
+
selection_handling: How to handle selection results
|
|
2137
|
+
|
|
2138
|
+
Returns:
|
|
2139
|
+
Selection results
|
|
2140
|
+
"""
|
|
2141
|
+
try:
|
|
2142
|
+
return await brain_client.run_selection(
|
|
2143
|
+
selection, instrument_type, region, delay, selection_limit, selection_handling
|
|
2144
|
+
)
|
|
2145
|
+
except Exception as e:
|
|
2146
|
+
return {"error": str(e)}
|
|
2147
|
+
|
|
2148
|
+
@mcp.tool()
|
|
2149
|
+
async def get_user_profile(user_id: str = "self") -> Dict[str, Any]:
|
|
2150
|
+
"""
|
|
2151
|
+
👤 Get user profile information.
|
|
2152
|
+
|
|
2153
|
+
Args:
|
|
2154
|
+
user_id: User ID (default: "self" for current user)
|
|
2155
|
+
|
|
2156
|
+
Returns:
|
|
2157
|
+
User profile data
|
|
2158
|
+
"""
|
|
2159
|
+
try:
|
|
2160
|
+
return await brain_client.get_user_profile(user_id)
|
|
2161
|
+
except Exception as e:
|
|
2162
|
+
return {"error": str(e)}
|
|
2163
|
+
|
|
2164
|
+
@mcp.tool()
|
|
2165
|
+
async def get_documentations() -> Dict[str, Any]:
|
|
2166
|
+
"""
|
|
2167
|
+
📚 Get available documentations and learning materials.
|
|
2168
|
+
|
|
2169
|
+
Returns:
|
|
2170
|
+
List of documentations
|
|
2171
|
+
"""
|
|
2172
|
+
try:
|
|
2173
|
+
return await brain_client.get_documentations()
|
|
2174
|
+
except Exception as e:
|
|
2175
|
+
return {"error": str(e)}
|
|
2176
|
+
|
|
2177
|
+
# get_messages_summary MCP tool removed as requested
|
|
2178
|
+
|
|
2179
|
+
@mcp.tool()
|
|
2180
|
+
async def get_messages(limit: Optional[int] = 0, offset: int = 0) -> Dict[str, Any]:
|
|
2181
|
+
"""
|
|
2182
|
+
Get messages for the current user with optional pagination.
|
|
2183
|
+
|
|
2184
|
+
Args:
|
|
2185
|
+
limit: Maximum number of messages to return (e.g., 10 for top 10 messages)
|
|
2186
|
+
Can be None (no limit), an integer, or a string that can be converted to int
|
|
2187
|
+
offset: Number of messages to skip (for pagination)
|
|
2188
|
+
Can be an integer or a string that can be converted to int
|
|
2189
|
+
|
|
2190
|
+
Returns:
|
|
2191
|
+
Messages for the current user, optionally limited by count
|
|
2192
|
+
"""
|
|
2193
|
+
# Wrap the entire function in a try-catch to handle any encoding issues
|
|
2194
|
+
try:
|
|
2195
|
+
# Enhanced parameter validation and conversion
|
|
2196
|
+
validated_limit = None
|
|
2197
|
+
validated_offset = 0
|
|
2198
|
+
|
|
2199
|
+
# Validate and convert limit parameter
|
|
2200
|
+
if limit is not None:
|
|
2201
|
+
if isinstance(limit, str):
|
|
2202
|
+
if limit.strip() == "":
|
|
2203
|
+
# Empty string means no limit
|
|
2204
|
+
validated_limit = 0
|
|
2205
|
+
else:
|
|
2206
|
+
try:
|
|
2207
|
+
validated_limit = int(limit)
|
|
2208
|
+
if validated_limit < 0:
|
|
2209
|
+
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
2210
|
+
except ValueError:
|
|
2211
|
+
return {"error": f"Invalid limit value '{limit}'. Must be a number or empty string."}
|
|
2212
|
+
elif isinstance(limit, (int, float)):
|
|
2213
|
+
validated_limit = int(limit)
|
|
2214
|
+
if validated_limit < 0:
|
|
2215
|
+
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
2216
|
+
else:
|
|
2217
|
+
return {"error": f"Invalid limit type {type(limit).__name__}. Expected int, float, str, or None."}
|
|
2218
|
+
|
|
2219
|
+
# Validate and convert offset parameter
|
|
2220
|
+
if isinstance(offset, str):
|
|
2221
|
+
try:
|
|
2222
|
+
validated_offset = int(offset)
|
|
2223
|
+
except ValueError:
|
|
2224
|
+
return {"error": f"Invalid offset value '{offset}'. Must be a number."}
|
|
2225
|
+
elif isinstance(offset, (int, float)):
|
|
2226
|
+
validated_offset = int(offset)
|
|
2227
|
+
else:
|
|
2228
|
+
return {"error": f"Invalid offset type {type(offset).__name__}. Expected int, float, or str."}
|
|
2229
|
+
|
|
2230
|
+
if validated_offset < 0:
|
|
2231
|
+
return {"error": f"Offset must be non-negative, got: {offset}"}
|
|
2232
|
+
|
|
2233
|
+
# Log the validated parameters for debugging (without emojis to avoid encoding issues)
|
|
2234
|
+
try:
|
|
2235
|
+
print(f"get_messages called with validated parameters: limit={validated_limit}, offset={validated_offset}")
|
|
2236
|
+
except Exception:
|
|
2237
|
+
print(f"get_messages called with parameters: limit={validated_limit}, offset={validated_offset}")
|
|
2238
|
+
|
|
2239
|
+
# Call the brain client with validated parameters
|
|
2240
|
+
result = await brain_client.get_messages(validated_limit, validated_offset)
|
|
2241
|
+
|
|
2242
|
+
# Add validation info to the result
|
|
2243
|
+
if isinstance(result, dict) and "error" not in result:
|
|
2244
|
+
result["_validation"] = {
|
|
2245
|
+
"original_limit": limit,
|
|
2246
|
+
"original_offset": offset,
|
|
2247
|
+
"validated_limit": validated_limit,
|
|
2248
|
+
"validated_offset": validated_offset,
|
|
2249
|
+
"parameter_types": {
|
|
2250
|
+
"limit": str(type(limit)),
|
|
2251
|
+
"offset": str(type(offset))
|
|
2252
|
+
}
|
|
2253
|
+
}
|
|
2254
|
+
|
|
2255
|
+
return result
|
|
2256
|
+
|
|
2257
|
+
except UnicodeEncodeError as ue:
|
|
2258
|
+
# Handle encoding errors specifically
|
|
2259
|
+
error_msg = f"get_messages failed due to encoding error: {str(ue)}"
|
|
2260
|
+
try:
|
|
2261
|
+
print(f"ENCODING ERROR: {error_msg}")
|
|
2262
|
+
except Exception:
|
|
2263
|
+
print(f"get_messages encoding error: {str(ue)}")
|
|
2264
|
+
return {
|
|
2265
|
+
"error": error_msg,
|
|
2266
|
+
"error_type": "UnicodeEncodeError",
|
|
2267
|
+
"original_params": {
|
|
2268
|
+
"limit": limit,
|
|
2269
|
+
"offset": offset,
|
|
2270
|
+
"limit_type": str(type(limit)),
|
|
2271
|
+
"offset_type": str(type(offset))
|
|
2272
|
+
}
|
|
2273
|
+
}
|
|
2274
|
+
except Exception as e:
|
|
2275
|
+
error_msg = f"get_messages failed: {str(e)}"
|
|
2276
|
+
try:
|
|
2277
|
+
print(f"ERROR: {error_msg}")
|
|
2278
|
+
except Exception:
|
|
2279
|
+
print(f"get_messages failed: {str(e)}")
|
|
2280
|
+
return {
|
|
2281
|
+
"error": error_msg,
|
|
2282
|
+
"error_type": type(e).__name__,
|
|
2283
|
+
"original_params": {
|
|
2284
|
+
"limit": limit,
|
|
2285
|
+
"offset": offset,
|
|
2286
|
+
"limit_type": str(type(limit)),
|
|
2287
|
+
"offset_type": str(type(offset))
|
|
2288
|
+
}
|
|
2289
|
+
}
|
|
2290
|
+
|
|
2291
|
+
@mcp.tool()
|
|
2292
|
+
async def get_glossary_terms(email: str = "", password: str = "", headless: bool = False) -> Dict[str, Any]:
|
|
2293
|
+
"""
|
|
2294
|
+
📚 Get glossary terms from WorldQuant BRAIN forum.
|
|
2295
|
+
|
|
2296
|
+
Note: This requires Selenium and is implemented in forum_functions.py
|
|
2297
|
+
|
|
2298
|
+
Args:
|
|
2299
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2300
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2301
|
+
headless: Run browser in headless mode (default: False)
|
|
2302
|
+
|
|
2303
|
+
Returns:
|
|
2304
|
+
Glossary terms with definitions
|
|
2305
|
+
"""
|
|
2306
|
+
try:
|
|
2307
|
+
# Load config to get credentials if not provided
|
|
2308
|
+
config = load_config()
|
|
2309
|
+
|
|
2310
|
+
# Use provided credentials or fall back to config
|
|
2311
|
+
if not email and 'credentials' in config:
|
|
2312
|
+
email = config['credentials'].get('email', '')
|
|
2313
|
+
if not password and 'credentials' in config:
|
|
2314
|
+
password = config['credentials'].get('password', '')
|
|
2315
|
+
|
|
2316
|
+
if not email or not password:
|
|
2317
|
+
return {"error": "Email and password required. Either provide them as arguments or configure them in user_config.json"}
|
|
2318
|
+
|
|
2319
|
+
return await brain_client.get_glossary_terms(email, password, headless)
|
|
2320
|
+
except Exception as e:
|
|
2321
|
+
return {"error": str(e)}
|
|
2322
|
+
|
|
2323
|
+
@mcp.tool()
|
|
2324
|
+
async def search_forum_posts(search_query: str, email: str = "", password: str = "",
|
|
2325
|
+
max_results: int = 50, headless: bool = True) -> Dict[str, Any]:
|
|
2326
|
+
"""
|
|
2327
|
+
🔍 Search forum posts on WorldQuant BRAIN support site.
|
|
2328
|
+
|
|
2329
|
+
Note: This requires Selenium and is implemented in forum_functions.py
|
|
2330
|
+
|
|
2331
|
+
Args:
|
|
2332
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2333
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2334
|
+
search_query: Search term or phrase
|
|
2335
|
+
max_results: Maximum number of results to return (default: 50)
|
|
2336
|
+
headless: Run browser in headless mode (default: True)
|
|
2337
|
+
|
|
2338
|
+
Returns:
|
|
2339
|
+
Search results with analysis
|
|
2340
|
+
"""
|
|
2341
|
+
try:
|
|
2342
|
+
# Load config to get credentials if not provided
|
|
2343
|
+
config = load_config()
|
|
2344
|
+
|
|
2345
|
+
# Use provided credentials or fall back to config
|
|
2346
|
+
if not email and 'credentials' in config:
|
|
2347
|
+
email = config['credentials'].get('email', '')
|
|
2348
|
+
if not password and 'credentials' in config:
|
|
2349
|
+
password = config['credentials'].get('password', '')
|
|
2350
|
+
|
|
2351
|
+
if not email or not password:
|
|
2352
|
+
return {"error": "Email and password required. Either provide them as arguments or configure them in user_config.json"}
|
|
2353
|
+
|
|
2354
|
+
return await brain_client.search_forum_posts(email, password, search_query, max_results, headless)
|
|
2355
|
+
except Exception as e:
|
|
2356
|
+
return {"error": str(e)}
|
|
2357
|
+
|
|
2358
|
+
@mcp.tool()
|
|
2359
|
+
async def read_forum_post(article_id: str, email: str = "", password: str = "",
|
|
2360
|
+
headless: bool = False) -> Dict[str, Any]:
|
|
2361
|
+
"""
|
|
2362
|
+
📄 Get a specific forum post by article ID.
|
|
2363
|
+
|
|
2364
|
+
Note: This requires Selenium and is implemented in forum_functions.py
|
|
2365
|
+
|
|
2366
|
+
Args:
|
|
2367
|
+
article_id: The article ID to retrieve (e.g., "32984819083415-新人求模板")
|
|
2368
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2369
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2370
|
+
headless: Run browser in headless mode (default: False)
|
|
2371
|
+
|
|
2372
|
+
Returns:
|
|
2373
|
+
Forum post content with comments
|
|
2374
|
+
"""
|
|
2375
|
+
try:
|
|
2376
|
+
# Load config to get credentials if not provided
|
|
2377
|
+
config = load_config()
|
|
2378
|
+
|
|
2379
|
+
# Use provided credentials or fall back to config
|
|
2380
|
+
if not email and 'credentials' in config:
|
|
2381
|
+
email = config['credentials'].get('email', '')
|
|
2382
|
+
if not password and 'credentials' in config:
|
|
2383
|
+
password = config['credentials'].get('password', '')
|
|
2384
|
+
|
|
2385
|
+
if not email or not password:
|
|
2386
|
+
return {"error": "Email and password required. Either provide them as arguments or configure them in user_config.json"}
|
|
2387
|
+
|
|
2388
|
+
# Import and use forum functions directly
|
|
2389
|
+
from forum_functions import forum_client
|
|
2390
|
+
return await forum_client.read_full_forum_post(email, password, article_id, headless, include_comments=True)
|
|
2391
|
+
except ImportError:
|
|
2392
|
+
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
2393
|
+
except Exception as e:
|
|
2394
|
+
return {"error": str(e)}
|
|
2395
|
+
|
|
2396
|
+
@mcp.tool()
|
|
2397
|
+
async def get_alpha_yearly_stats(alpha_id: str) -> Dict[str, Any]:
|
|
2398
|
+
"""Get yearly statistics for an alpha."""
|
|
2399
|
+
try:
|
|
2400
|
+
return await brain_client.get_alpha_yearly_stats(alpha_id)
|
|
2401
|
+
except Exception as e:
|
|
2402
|
+
return {"error": str(e)}
|
|
2403
|
+
|
|
2404
|
+
@mcp.tool()
|
|
2405
|
+
async def check_correlation(alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
2406
|
+
"""Check alpha correlation against production alphas, self alphas, or both."""
|
|
2407
|
+
try:
|
|
2408
|
+
return await brain_client.check_correlation(alpha_id, correlation_type, threshold)
|
|
2409
|
+
except Exception as e:
|
|
2410
|
+
return {"error": str(e)}
|
|
2411
|
+
|
|
2412
|
+
@mcp.tool()
|
|
2413
|
+
async def get_submission_check(alpha_id: str) -> Dict[str, Any]:
|
|
2414
|
+
"""Comprehensive pre-submission check."""
|
|
2415
|
+
try:
|
|
2416
|
+
return await brain_client.get_submission_check(alpha_id)
|
|
2417
|
+
except Exception as e:
|
|
2418
|
+
return {"error": str(e)}
|
|
2419
|
+
|
|
2420
|
+
@mcp.tool()
|
|
2421
|
+
async def set_alpha_properties(alpha_id: str, name: Optional[str] = None,
|
|
2422
|
+
color: Optional[str] = None, tags: List[str] = None,
|
|
2423
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
2424
|
+
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
2425
|
+
try:
|
|
2426
|
+
return await brain_client.set_alpha_properties(alpha_id, name, color, tags, selection_desc, combo_desc)
|
|
2427
|
+
except Exception as e:
|
|
2428
|
+
return {"error": str(e)}
|
|
2429
|
+
|
|
2430
|
+
@mcp.tool()
|
|
2431
|
+
async def get_record_sets(alpha_id: str) -> Dict[str, Any]:
|
|
2432
|
+
"""List available record sets for an alpha."""
|
|
2433
|
+
try:
|
|
2434
|
+
return await brain_client.get_record_sets(alpha_id)
|
|
2435
|
+
except Exception as e:
|
|
2436
|
+
return {"error": str(e)}
|
|
2437
|
+
|
|
2438
|
+
@mcp.tool()
|
|
2439
|
+
async def get_record_set_data(alpha_id: str, record_set_name: str) -> Dict[str, Any]:
|
|
2440
|
+
"""Get data from a specific record set."""
|
|
2441
|
+
try:
|
|
2442
|
+
return await brain_client.get_record_set_data(alpha_id, record_set_name)
|
|
2443
|
+
except Exception as e:
|
|
2444
|
+
return {"error": str(e)}
|
|
2445
|
+
|
|
2446
|
+
@mcp.tool()
|
|
2447
|
+
async def get_user_activities(user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
|
|
2448
|
+
"""Get user activity diversity data."""
|
|
2449
|
+
try:
|
|
2450
|
+
return await brain_client.get_user_activities(user_id, grouping)
|
|
2451
|
+
except Exception as e:
|
|
2452
|
+
return {"error": str(e)}
|
|
2453
|
+
|
|
2454
|
+
@mcp.tool()
|
|
2455
|
+
async def get_pyramid_multipliers() -> Dict[str, Any]:
|
|
2456
|
+
"""Get current pyramid multipliers showing BRAIN's encouragement levels."""
|
|
2457
|
+
try:
|
|
2458
|
+
return await brain_client.get_pyramid_multipliers()
|
|
2459
|
+
except Exception as e:
|
|
2460
|
+
return {"error": str(e)}
|
|
2461
|
+
|
|
2462
|
+
@mcp.tool()
|
|
2463
|
+
async def get_pyramid_alphas(start_date: Optional[str] = None,
|
|
2464
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
2465
|
+
"""Get user's current alpha distribution across pyramid categories."""
|
|
2466
|
+
try:
|
|
2467
|
+
return await brain_client.get_pyramid_alphas(start_date, end_date)
|
|
2468
|
+
except Exception as e:
|
|
2469
|
+
return {"error": str(e)}
|
|
2470
|
+
|
|
2471
|
+
@mcp.tool()
|
|
2472
|
+
async def get_user_competitions(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
2473
|
+
"""Get list of competitions that the user is participating in."""
|
|
2474
|
+
try:
|
|
2475
|
+
return await brain_client.get_user_competitions(user_id)
|
|
2476
|
+
except Exception as e:
|
|
2477
|
+
return {"error": str(e)}
|
|
2478
|
+
|
|
2479
|
+
@mcp.tool()
|
|
2480
|
+
async def get_competition_details(competition_id: str) -> Dict[str, Any]:
|
|
2481
|
+
"""Get detailed information about a specific competition."""
|
|
2482
|
+
try:
|
|
2483
|
+
return await brain_client.get_competition_details(competition_id)
|
|
2484
|
+
except Exception as e:
|
|
2485
|
+
return {"error": str(e)}
|
|
2486
|
+
|
|
2487
|
+
@mcp.tool()
|
|
2488
|
+
async def get_competition_agreement(competition_id: str) -> Dict[str, Any]:
|
|
2489
|
+
"""Get the rules, terms, and agreement for a specific competition."""
|
|
2490
|
+
try:
|
|
2491
|
+
return await brain_client.get_competition_agreement(competition_id)
|
|
2492
|
+
except Exception as e:
|
|
2493
|
+
return {"error": str(e)}
|
|
2494
|
+
|
|
2495
|
+
@mcp.tool()
|
|
2496
|
+
async def get_platform_setting_options() -> Dict[str, Any]:
|
|
2497
|
+
"""Discover valid simulation setting options (instrument types, regions, delays, universes, neutralization).
|
|
2498
|
+
|
|
2499
|
+
Use this when a simulation request might contain an invalid/mismatched setting. If an AI or user supplies
|
|
2500
|
+
incorrect parameters (e.g., wrong region for an instrument type), call this tool to retrieve the authoritative
|
|
2501
|
+
option sets and correct the inputs before proceeding.
|
|
2502
|
+
|
|
2503
|
+
Returns:
|
|
2504
|
+
A structured list of valid combinations and choice lists to validate or fix simulation settings.
|
|
2505
|
+
"""
|
|
2506
|
+
try:
|
|
2507
|
+
return await brain_client.get_platform_setting_options()
|
|
2508
|
+
except Exception as e:
|
|
2509
|
+
return {"error": str(e)}
|
|
2510
|
+
|
|
2511
|
+
@mcp.tool()
|
|
2512
|
+
async def performance_comparison(alpha_id: str, team_id: Optional[str] = None,
|
|
2513
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
2514
|
+
"""Get performance comparison data for an alpha."""
|
|
2515
|
+
try:
|
|
2516
|
+
return await brain_client.performance_comparison(alpha_id, team_id, competition)
|
|
2517
|
+
except Exception as e:
|
|
2518
|
+
return {"error": str(e)}
|
|
2519
|
+
|
|
2520
|
+
# combine_test_results MCP tool removed as requested
|
|
2521
|
+
|
|
2522
|
+
@mcp.tool()
|
|
2523
|
+
async def expand_nested_data(data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
2524
|
+
"""Flatten complex nested data structures into tabular format."""
|
|
2525
|
+
try:
|
|
2526
|
+
return await brain_client.expand_nested_data(data, preserve_original)
|
|
2527
|
+
except Exception as e:
|
|
2528
|
+
return {"error": str(e)}
|
|
2529
|
+
|
|
2530
|
+
# generate_alpha_links MCP tool removed as requested
|
|
2531
|
+
|
|
2532
|
+
@mcp.tool()
|
|
2533
|
+
async def read_specific_documentation(page_id: str) -> Dict[str, Any]:
|
|
2534
|
+
"""Retrieve detailed content of a specific documentation page/article."""
|
|
2535
|
+
try:
|
|
2536
|
+
return await brain_client.read_specific_documentation(page_id)
|
|
2537
|
+
except Exception as e:
|
|
2538
|
+
return {"error": str(e)}
|
|
2539
|
+
|
|
2540
|
+
# Badge status MCP tool removed as requested
|
|
2541
|
+
|
|
2542
|
+
@mcp.tool()
|
|
2543
|
+
async def create_multiSim(
|
|
2544
|
+
alpha_expressions: List[str],
|
|
2545
|
+
instrument_type: str = "EQUITY",
|
|
2546
|
+
region: str = "USA",
|
|
2547
|
+
universe: str = "TOP3000",
|
|
2548
|
+
delay: int = 1,
|
|
2549
|
+
decay: float = 0.0,
|
|
2550
|
+
neutralization: str = "NONE",
|
|
2551
|
+
truncation: float = 0.0,
|
|
2552
|
+
test_period: str = "P0Y0M",
|
|
2553
|
+
unit_handling: str = "VERIFY",
|
|
2554
|
+
nan_handling: str = "OFF",
|
|
2555
|
+
language: str = "FASTEXPR",
|
|
2556
|
+
visualization: bool = True,
|
|
2557
|
+
pasteurization: str = "ON",
|
|
2558
|
+
max_trade: str = "OFF"
|
|
2559
|
+
) -> Dict[str, Any]:
|
|
2560
|
+
"""
|
|
2561
|
+
🚀 Create multiple regular alpha simulations on BRAIN platform in a single request.
|
|
2562
|
+
|
|
2563
|
+
This tool creates a multisimulation with multiple regular alpha expressions,
|
|
2564
|
+
waits for all simulations to complete, and returns detailed results for each alpha.
|
|
2565
|
+
|
|
2566
|
+
⏰ NOTE: Multisimulations can take 8+ minutes to complete. This tool will wait
|
|
2567
|
+
for the entire process and return comprehensive results.
|
|
2568
|
+
Call get_platform_setting_options to get the valid options for the simulation.
|
|
2569
|
+
Args:
|
|
2570
|
+
alpha_expressions: List of alpha expressions (2-8 expressions required)
|
|
2571
|
+
instrument_type: Type of instruments (default: "EQUITY")
|
|
2572
|
+
region: Market region (default: "USA")
|
|
2573
|
+
universe: Universe of stocks (default: "TOP3000")
|
|
2574
|
+
delay: Data delay (default: 1)
|
|
2575
|
+
decay: Decay value (default: 0.0)
|
|
2576
|
+
neutralization: Neutralization method (default: "NONE")
|
|
2577
|
+
truncation: Truncation value (default: 0.0)
|
|
2578
|
+
test_period: Test period (default: "P0Y0M")
|
|
2579
|
+
unit_handling: Unit handling method (default: "VERIFY")
|
|
2580
|
+
nan_handling: NaN handling method (default: "OFF")
|
|
2581
|
+
language: Expression language (default: "FASTEXPR")
|
|
2582
|
+
visualization: Enable visualization (default: True)
|
|
2583
|
+
pasteurization: Pasteurization setting (default: "ON")
|
|
2584
|
+
max_trade: Max trade setting (default: "OFF")
|
|
2585
|
+
|
|
2586
|
+
Returns:
|
|
2587
|
+
Dictionary containing multisimulation results and individual alpha details
|
|
2588
|
+
"""
|
|
2589
|
+
try:
|
|
2590
|
+
# Validate input
|
|
2591
|
+
if len(alpha_expressions) < 2:
|
|
2592
|
+
return {"error": "At least 2 alpha expressions are required"}
|
|
2593
|
+
if len(alpha_expressions) > 8:
|
|
2594
|
+
return {"error": "Maximum 8 alpha expressions allowed per request"}
|
|
2595
|
+
|
|
2596
|
+
# Create multisimulation data
|
|
2597
|
+
multisimulation_data = []
|
|
2598
|
+
for alpha_expr in alpha_expressions:
|
|
2599
|
+
simulation_item = {
|
|
2600
|
+
'type': 'REGULAR',
|
|
2601
|
+
'settings': {
|
|
2602
|
+
'instrumentType': instrument_type,
|
|
2603
|
+
'region': region,
|
|
2604
|
+
'universe': universe,
|
|
2605
|
+
'delay': delay,
|
|
2606
|
+
'decay': decay,
|
|
2607
|
+
'neutralization': neutralization,
|
|
2608
|
+
'truncation': truncation,
|
|
2609
|
+
'pasteurization': pasteurization,
|
|
2610
|
+
'unitHandling': unit_handling,
|
|
2611
|
+
'nanHandling': nan_handling,
|
|
2612
|
+
'language': language,
|
|
2613
|
+
'visualization': visualization,
|
|
2614
|
+
'testPeriod': test_period,
|
|
2615
|
+
'maxTrade': max_trade
|
|
2616
|
+
},
|
|
2617
|
+
'regular': alpha_expr
|
|
2618
|
+
}
|
|
2619
|
+
multisimulation_data.append(simulation_item)
|
|
2620
|
+
|
|
2621
|
+
# Send multisimulation request
|
|
2622
|
+
response = brain_client.session.post(f"{brain_client.base_url}/simulations", json=multisimulation_data)
|
|
2623
|
+
|
|
2624
|
+
if response.status_code != 201:
|
|
2625
|
+
return {"error": f"Failed to create multisimulation. Status: {response.status_code},, you need to call three mcp tools get_operators, get_platform_setting_options and get_datafields to check whether you correctly use the operators, setting the simulation settings, and existing data fields."}
|
|
2626
|
+
|
|
2627
|
+
# Get multisimulation location
|
|
2628
|
+
location = response.headers.get('Location', '')
|
|
2629
|
+
if not location:
|
|
2630
|
+
return {"error": "No location header in multisimulation response"}
|
|
2631
|
+
|
|
2632
|
+
# Wait for children to appear and get results
|
|
2633
|
+
return await _wait_for_multisimulation_completion(location, len(alpha_expressions))
|
|
2634
|
+
|
|
2635
|
+
except Exception as e:
|
|
2636
|
+
return {"error": f"Error creating multisimulation: {str(e)}, , you need to call three mcp tools get_operators, get_platform_setting_options and get_datafields to check whether you correctly use the operators, setting the simulation settings, and existing data fields."}
|
|
2637
|
+
|
|
2638
|
+
async def _wait_for_multisimulation_completion(location: str, expected_children: int) -> Dict[str, Any]:
|
|
2639
|
+
"""Wait for multisimulation to complete and return results"""
|
|
2640
|
+
try:
|
|
2641
|
+
# Simple progress indicator for users
|
|
2642
|
+
print(f"Waiting for multisimulation to complete... (this may take several minutes)")
|
|
2643
|
+
print(f"Expected {expected_children} alpha simulations")
|
|
2644
|
+
print()
|
|
2645
|
+
# Wait for children to appear - much more tolerant for 8+ minute multisimulations
|
|
2646
|
+
children = []
|
|
2647
|
+
max_wait_attempts = 200 # Increased significantly for 8+ minute multisimulations
|
|
2648
|
+
wait_attempt = 0
|
|
2649
|
+
|
|
2650
|
+
while wait_attempt < max_wait_attempts and len(children) == 0:
|
|
2651
|
+
wait_attempt += 1
|
|
2652
|
+
|
|
2653
|
+
try:
|
|
2654
|
+
multisim_response = brain_client.session.get(location)
|
|
2655
|
+
if multisim_response.status_code == 200:
|
|
2656
|
+
multisim_data = multisim_response.json()
|
|
2657
|
+
children = multisim_data.get('children', [])
|
|
2658
|
+
|
|
2659
|
+
if children:
|
|
2660
|
+
break
|
|
2661
|
+
else:
|
|
2662
|
+
# Wait before next attempt - use longer intervals for multisimulations
|
|
2663
|
+
retry_after = multisim_response.headers.get("Retry-After", 5)
|
|
2664
|
+
wait_time = float(retry_after)
|
|
2665
|
+
await asyncio.sleep(wait_time)
|
|
2666
|
+
else:
|
|
2667
|
+
await asyncio.sleep(5)
|
|
2668
|
+
except Exception as e:
|
|
2669
|
+
await asyncio.sleep(5)
|
|
2670
|
+
|
|
2671
|
+
if not children:
|
|
2672
|
+
return {"error": f"Children did not appear within {max_wait_attempts} attempts (multisimulation may still be processing)"}
|
|
2673
|
+
|
|
2674
|
+
# Process each child to get alpha results
|
|
2675
|
+
alpha_results = []
|
|
2676
|
+
for i, child_id in enumerate(children):
|
|
2677
|
+
try:
|
|
2678
|
+
# The children are full URLs, not just IDs
|
|
2679
|
+
child_url = child_id if child_id.startswith('http') else f"{brain_client.base_url}/simulations/{child_id}"
|
|
2680
|
+
|
|
2681
|
+
# Wait for this alpha to complete - more tolerant timing
|
|
2682
|
+
finished = False
|
|
2683
|
+
max_alpha_attempts = 100 # Increased for longer alpha processing
|
|
2684
|
+
alpha_attempt = 0
|
|
2685
|
+
|
|
2686
|
+
while not finished and alpha_attempt < max_alpha_attempts:
|
|
2687
|
+
alpha_attempt += 1
|
|
2688
|
+
|
|
2689
|
+
try:
|
|
2690
|
+
alpha_progress = brain_client.session.get(child_url)
|
|
2691
|
+
if alpha_progress.status_code == 200:
|
|
2692
|
+
alpha_data = alpha_progress.json()
|
|
2693
|
+
retry_after = alpha_progress.headers.get("Retry-After", 0)
|
|
2694
|
+
|
|
2695
|
+
if retry_after == 0:
|
|
2696
|
+
finished = True
|
|
2697
|
+
break
|
|
2698
|
+
else:
|
|
2699
|
+
wait_time = float(retry_after)
|
|
2700
|
+
await asyncio.sleep(wait_time)
|
|
2701
|
+
else:
|
|
2702
|
+
await asyncio.sleep(5)
|
|
2703
|
+
except Exception as e:
|
|
2704
|
+
await asyncio.sleep(5)
|
|
2705
|
+
|
|
2706
|
+
if finished:
|
|
2707
|
+
# Get alpha details from the completed simulation
|
|
2708
|
+
alpha_id = alpha_data.get("alpha")
|
|
2709
|
+
if alpha_id:
|
|
2710
|
+
# Now get the actual alpha details from the alpha endpoint
|
|
2711
|
+
alpha_details = brain_client.session.get(f"{brain_client.base_url}/alphas/{alpha_id}")
|
|
2712
|
+
if alpha_details.status_code == 200:
|
|
2713
|
+
alpha_detail_data = alpha_details.json()
|
|
2714
|
+
alpha_results.append({
|
|
2715
|
+
'alpha_id': alpha_id,
|
|
2716
|
+
'location': child_url,
|
|
2717
|
+
'details': alpha_detail_data
|
|
2718
|
+
})
|
|
2719
|
+
else:
|
|
2720
|
+
alpha_results.append({
|
|
2721
|
+
'alpha_id': alpha_id,
|
|
2722
|
+
'location': child_url,
|
|
2723
|
+
'error': f'Failed to get alpha details: {alpha_details.status_code}'
|
|
2724
|
+
})
|
|
2725
|
+
else:
|
|
2726
|
+
alpha_results.append({
|
|
2727
|
+
'location': child_url,
|
|
2728
|
+
'error': 'No alpha ID found in completed simulation'
|
|
2729
|
+
})
|
|
2730
|
+
else:
|
|
2731
|
+
alpha_results.append({
|
|
2732
|
+
'location': child_url,
|
|
2733
|
+
'error': f'Alpha simulation did not complete within {max_alpha_attempts} attempts'
|
|
2734
|
+
})
|
|
2735
|
+
|
|
2736
|
+
except Exception as e:
|
|
2737
|
+
alpha_results.append({
|
|
2738
|
+
'location': f"child_{i+1}",
|
|
2739
|
+
'error': str(e)
|
|
2740
|
+
})
|
|
2741
|
+
|
|
2742
|
+
# Return comprehensive results
|
|
2743
|
+
print(f"Multisimulation completed! Retrieved {len(alpha_results)} alpha results")
|
|
2744
|
+
return {
|
|
2745
|
+
'success': True,
|
|
2746
|
+
'message': f'Successfully created {expected_children} regular alpha simulations',
|
|
2747
|
+
'total_requested': expected_children,
|
|
2748
|
+
'total_created': len(alpha_results),
|
|
2749
|
+
'multisimulation_id': location.split('/')[-1],
|
|
2750
|
+
'multisimulation_location': location,
|
|
2751
|
+
'alpha_results': alpha_results,
|
|
2752
|
+
'note': "if you got a negative alpha sharpe, you can just add a minus sign in front of the last line of the Alpha to flip then think the next step."
|
|
2753
|
+
}
|
|
2754
|
+
|
|
2755
|
+
except Exception as e:
|
|
2756
|
+
return {"error": f"Error waiting for multisimulation completion: {str(e)}, you need to call three mcp tools get_operators, get_platform_setting_options and get_datafields to check whether you correctly use the operators, setting the simulation settings, and existing data fields."}
|
|
2757
|
+
|
|
2758
|
+
@mcp.tool()
|
|
2759
|
+
async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -> Dict[str, Any]:
|
|
2760
|
+
"""
|
|
2761
|
+
Get daily and quarterly payment information from WorldQuant BRAIN platform.
|
|
2762
|
+
|
|
2763
|
+
This function retrieves both base payments (daily alpha performance payments) and
|
|
2764
|
+
other payments (competition rewards, quarterly payments, referrals, etc.).
|
|
2765
|
+
|
|
2766
|
+
Args:
|
|
2767
|
+
email: Your BRAIN platform email address (optional if in config)
|
|
2768
|
+
password: Your BRAIN platform password (optional if in config)
|
|
2769
|
+
|
|
2770
|
+
Returns:
|
|
2771
|
+
Dictionary containing base payment and other payment data with summaries and detailed records
|
|
2772
|
+
"""
|
|
2773
|
+
try:
|
|
2774
|
+
# Authenticate if credentials provided
|
|
2775
|
+
if email and password:
|
|
2776
|
+
auth_result = await brain_client.authenticate(email, password)
|
|
2777
|
+
if auth_result.get('status') != 'authenticated':
|
|
2778
|
+
return {"error": f"Authentication failed: {auth_result.get('message', 'Unknown error')}"}
|
|
2779
|
+
else:
|
|
2780
|
+
# Try to use existing session or config
|
|
2781
|
+
config = await manage_config("get")
|
|
2782
|
+
if not config.get('is_authenticated'):
|
|
2783
|
+
return {"error": "Not authenticated. Please provide email and password or authenticate first."}
|
|
2784
|
+
|
|
2785
|
+
|
|
2786
|
+
# Set required Accept header for API v3.0
|
|
2787
|
+
header = {"Accept": "application/json;version=3.0"}
|
|
2788
|
+
|
|
2789
|
+
# Get base payment data
|
|
2790
|
+
base_payment_response = brain_client.session.get(
|
|
2791
|
+
'https://api.worldquantbrain.com/users/self/activities/base-payment', headers=header
|
|
2792
|
+
)
|
|
2793
|
+
|
|
2794
|
+
if base_payment_response.status_code != 200:
|
|
2795
|
+
return {"error": f"Failed to get base payment data: {base_payment_response.status_code}"}
|
|
2796
|
+
|
|
2797
|
+
base_payment_data = base_payment_response.json()
|
|
2798
|
+
|
|
2799
|
+
# Get other payment data
|
|
2800
|
+
other_payment_response = brain_client.session.get(
|
|
2801
|
+
'https://api.worldquantbrain.com/users/self/activities/other-payment', headers=header
|
|
2802
|
+
)
|
|
2803
|
+
|
|
2804
|
+
if other_payment_response.status_code != 200:
|
|
2805
|
+
return {"error": f"Failed to get other payment data: {other_payment_response.status_code}"}
|
|
2806
|
+
|
|
2807
|
+
other_payment_data = other_payment_response.json()
|
|
2808
|
+
|
|
2809
|
+
# Return comprehensive payment information
|
|
2810
|
+
return {
|
|
2811
|
+
"success": True,
|
|
2812
|
+
"base_payment": {
|
|
2813
|
+
"summary": {
|
|
2814
|
+
"yesterday": base_payment_data.get("yesterday"),
|
|
2815
|
+
"current_quarter": base_payment_data.get("current"),
|
|
2816
|
+
"previous_quarter": base_payment_data.get("previous"),
|
|
2817
|
+
"year_to_date": base_payment_data.get("ytd"),
|
|
2818
|
+
"total_all_time": base_payment_data.get("total"),
|
|
2819
|
+
"currency": base_payment_data.get("currency")
|
|
2820
|
+
},
|
|
2821
|
+
"daily_records": base_payment_data.get("records", {}).get("records", []),
|
|
2822
|
+
"schema": base_payment_data.get("records", {}).get("schema")
|
|
2823
|
+
},
|
|
2824
|
+
"other_payment": {
|
|
2825
|
+
"total_all_time": other_payment_data.get("total"),
|
|
2826
|
+
"special_payments": other_payment_data.get("records", {}).get("records", []),
|
|
2827
|
+
"schema": other_payment_data.get("records", {}).get("schema"),
|
|
2828
|
+
"currency": other_payment_data.get("currency")
|
|
2829
|
+
},
|
|
2830
|
+
"timestamp": datetime.now().isoformat()
|
|
2831
|
+
}
|
|
2832
|
+
|
|
2833
|
+
except Exception as e:
|
|
2834
|
+
return {"error": f"Error retrieving payment information: {str(e)}"}
|
|
2835
|
+
|
|
2836
|
+
|
|
2837
|
+
|
|
2838
|
+
# New MCP tool: get_SimError_detail
|
|
2839
|
+
from typing import Sequence
|
|
2840
|
+
@mcp.tool()
|
|
2841
|
+
async def get_SimError_detail(locations: Sequence[str]) -> dict:
|
|
2842
|
+
"""
|
|
2843
|
+
Fetch and parse error/status from multiple simulation locations (URLs).
|
|
2844
|
+
Args:
|
|
2845
|
+
locations: List of simulation result URLs (e.g., /simulations/{id})
|
|
2846
|
+
Returns:
|
|
2847
|
+
List of dicts with location, error message, and raw response
|
|
2848
|
+
"""
|
|
2849
|
+
results = []
|
|
2850
|
+
for loc in locations:
|
|
2851
|
+
try:
|
|
2852
|
+
resp = brain_client.session.get(loc)
|
|
2853
|
+
if resp.status_code != 200:
|
|
2854
|
+
results.append({
|
|
2855
|
+
"location": loc,
|
|
2856
|
+
"error": f"HTTP {resp.status_code}",
|
|
2857
|
+
"raw": resp.text
|
|
2858
|
+
})
|
|
2859
|
+
continue
|
|
2860
|
+
data = resp.json() if resp.text else {}
|
|
2861
|
+
# Try to extract error message or status
|
|
2862
|
+
error_msg = data.get("error") or data.get("message")
|
|
2863
|
+
# If alpha ID is missing, include that info
|
|
2864
|
+
if not data.get("alpha"):
|
|
2865
|
+
error_msg = error_msg or "Simulation did not get through, if you are running a multisimulation, check the other children location in your request"
|
|
2866
|
+
results.append({
|
|
2867
|
+
"location": loc,
|
|
2868
|
+
"error": error_msg,
|
|
2869
|
+
"raw": data
|
|
2870
|
+
})
|
|
2871
|
+
except Exception as e:
|
|
2872
|
+
results.append({
|
|
2873
|
+
"location": loc,
|
|
2874
|
+
"error": str(e),
|
|
2875
|
+
"raw": None
|
|
2876
|
+
})
|
|
2877
|
+
return {"results": results}
|
|
2878
|
+
|
|
2879
|
+
if __name__ == "__main__":
|
|
2880
|
+
try:
|
|
2881
|
+
print("WorldQuant BRAIN MCP Server Starting...", file=sys.stderr)
|
|
2882
|
+
mcp.run()
|
|
2883
|
+
except Exception as e:
|
|
2884
|
+
print(f"Failed to start MCP server: {e}", file=sys.stderr)
|
|
2885
|
+
sys.exit(1)
|
|
2886
|
+
|