cnhkmcp 2.1.4__py3-none-any.whl → 2.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +126 -126
- cnhkmcp/untracked/back_up/forum_functions.py +998 -0
- cnhkmcp/untracked/back_up/platform_functions.py +2886 -0
- cnhkmcp/untracked/brain-consultant.md +31 -0
- cnhkmcp/untracked/forum_functions.py +350 -941
- cnhkmcp/untracked/platform_functions.py +445 -730
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +3 -1
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/METADATA +1 -1
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/RECORD +13 -11
- cnhkmcp/untracked/APP/simulator/wqb20260107015647.log +0 -57
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.1.4.dist-info → cnhkmcp-2.1.5.dist-info}/top_level.txt +0 -0
|
@@ -1,40 +1,33 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
2
|
"""
|
|
4
3
|
WorldQuant BRAIN MCP Server - Python Version
|
|
5
4
|
A comprehensive Model Context Protocol (MCP) server for WorldQuant BRAIN platform integration.
|
|
6
5
|
"""
|
|
7
6
|
|
|
8
|
-
# Ensure proper encoding handling for Windows
|
|
9
|
-
import sys
|
|
10
|
-
import os
|
|
11
|
-
|
|
12
|
-
# Note: We'll handle encoding issues in individual functions rather than
|
|
13
|
-
# overriding system streams to avoid conflicts with MCP server
|
|
14
|
-
|
|
15
7
|
import json
|
|
16
8
|
import time
|
|
17
9
|
import asyncio
|
|
18
10
|
import logging
|
|
19
|
-
from typing import Dict, List, Optional, Any, Union
|
|
11
|
+
from typing import Dict, List, Optional, Any, Union, Tuple
|
|
12
|
+
import re
|
|
13
|
+
import base64
|
|
14
|
+
from bs4 import BeautifulSoup
|
|
20
15
|
from dataclasses import dataclass, asdict
|
|
21
16
|
from datetime import datetime, timedelta
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
22
19
|
import math
|
|
23
20
|
from time import sleep
|
|
24
21
|
|
|
25
22
|
import requests
|
|
26
23
|
import pandas as pd
|
|
27
|
-
from selenium import webdriver
|
|
28
|
-
from selenium.webdriver.chrome.options import Options
|
|
29
|
-
from selenium.webdriver.common.by import By
|
|
30
|
-
from selenium.webdriver.support.ui import WebDriverWait
|
|
31
|
-
from selenium.webdriver.support import expected_conditions as EC
|
|
32
|
-
from bs4 import BeautifulSoup
|
|
33
24
|
from mcp.server.fastmcp import FastMCP
|
|
34
25
|
from pydantic import BaseModel, Field, EmailStr
|
|
35
26
|
|
|
36
27
|
from pathlib import Path
|
|
37
28
|
|
|
29
|
+
# Import the new forum client
|
|
30
|
+
from forum_functions import forum_client
|
|
38
31
|
|
|
39
32
|
# Configure logging
|
|
40
33
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -178,33 +171,18 @@ class BrainApiClient:
|
|
|
178
171
|
self.log("🌐 Starting biometric authentication...", "INFO")
|
|
179
172
|
|
|
180
173
|
try:
|
|
181
|
-
# Import
|
|
182
|
-
from
|
|
183
|
-
from selenium.webdriver.chrome.options import Options
|
|
174
|
+
# Import playwright for browser automation
|
|
175
|
+
from playwright.async_api import async_playwright
|
|
184
176
|
import time
|
|
185
177
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
driver = None
|
|
192
|
-
try:
|
|
193
|
-
# Open browser with timeout
|
|
194
|
-
driver = webdriver.Chrome(options=options)
|
|
195
|
-
# Set a short timeout so it doesn't wait forever
|
|
196
|
-
driver.set_page_load_timeout(80) # Only wait 5 seconds
|
|
197
|
-
|
|
178
|
+
async with async_playwright() as p:
|
|
179
|
+
browser = await p.chromium.launch(headless=False)
|
|
180
|
+
page = await browser.new_page()
|
|
181
|
+
|
|
198
182
|
self.log("🌐 Opening browser for biometric authentication...", "INFO")
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
driver.get(biometric_url)
|
|
203
|
-
self.log("Browser page loaded successfully", "SUCCESS")
|
|
204
|
-
except Exception as timeout_error:
|
|
205
|
-
self.log(f"⚠️ Page load timeout (expected): {str(timeout_error)[:50]}...", "WARNING")
|
|
206
|
-
self.log("Browser window is open for biometric authentication", "INFO")
|
|
207
|
-
|
|
183
|
+
await page.goto(biometric_url)
|
|
184
|
+
self.log("Browser page loaded successfully", "SUCCESS")
|
|
185
|
+
|
|
208
186
|
# Print instructions
|
|
209
187
|
print("\n" + "="*60, file=sys.stderr)
|
|
210
188
|
print("BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
|
|
@@ -213,24 +191,23 @@ class BrainApiClient:
|
|
|
213
191
|
print("Complete the biometric authentication in the browser", file=sys.stderr)
|
|
214
192
|
print("The system will automatically check when you're done...", file=sys.stderr)
|
|
215
193
|
print("="*60, file=sys.stderr)
|
|
216
|
-
|
|
194
|
+
|
|
217
195
|
# Keep checking until authentication is complete
|
|
218
196
|
max_attempts = 60 # 5 minutes maximum (60 * 5 seconds)
|
|
219
197
|
attempt = 0
|
|
220
|
-
|
|
198
|
+
|
|
221
199
|
while attempt < max_attempts:
|
|
222
200
|
time.sleep(5) # Check every 5 seconds
|
|
223
201
|
attempt += 1
|
|
224
|
-
|
|
202
|
+
|
|
225
203
|
# Check if authentication completed
|
|
226
204
|
check_response = self.session.post(biometric_url)
|
|
227
205
|
self.log(f"🔄 Checking authentication status (attempt {attempt}/{max_attempts}): {check_response.status_code}", "INFO")
|
|
228
|
-
|
|
206
|
+
|
|
229
207
|
if check_response.status_code == 201:
|
|
230
208
|
self.log("Biometric authentication successful!", "SUCCESS")
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
driver.quit()
|
|
209
|
+
|
|
210
|
+
await browser.close()
|
|
234
211
|
|
|
235
212
|
# Check JWT token
|
|
236
213
|
jwt_token = self.session.cookies.get('t')
|
|
@@ -247,19 +224,9 @@ class BrainApiClient:
|
|
|
247
224
|
'has_jwt': jwt_token is not None
|
|
248
225
|
}
|
|
249
226
|
|
|
250
|
-
|
|
251
|
-
if driver:
|
|
252
|
-
driver.quit()
|
|
227
|
+
await browser.close()
|
|
253
228
|
raise Exception("Biometric authentication timed out")
|
|
254
|
-
|
|
255
|
-
except Exception as driver_error:
|
|
256
|
-
if driver:
|
|
257
|
-
try:
|
|
258
|
-
driver.quit()
|
|
259
|
-
except:
|
|
260
|
-
pass
|
|
261
|
-
raise Exception(f"Browser automation error: {driver_error}")
|
|
262
|
-
|
|
229
|
+
|
|
263
230
|
except Exception as e:
|
|
264
231
|
self.log(f"❌ Biometric authentication failed: {str(e)}", "ERROR")
|
|
265
232
|
raise
|
|
@@ -289,11 +256,19 @@ class BrainApiClient:
|
|
|
289
256
|
|
|
290
257
|
async def ensure_authenticated(self):
|
|
291
258
|
"""Ensure authentication is valid, re-authenticate if needed."""
|
|
292
|
-
if not await self.is_authenticated()
|
|
259
|
+
if not await self.is_authenticated():
|
|
260
|
+
if not self.auth_credentials:
|
|
261
|
+
self.log("No credentials in memory, loading from config...", "INFO")
|
|
262
|
+
config = load_config()
|
|
263
|
+
creds = config.get("credentials", {})
|
|
264
|
+
email = creds.get("email")
|
|
265
|
+
password = creds.get("password")
|
|
266
|
+
if not email or not password:
|
|
267
|
+
raise Exception("Authentication credentials not found in config. Please authenticate first.")
|
|
268
|
+
self.auth_credentials = {'email': email, 'password': password}
|
|
269
|
+
|
|
293
270
|
self.log("🔄 Re-authenticating...", "INFO")
|
|
294
271
|
await self.authenticate(self.auth_credentials['email'], self.auth_credentials['password'])
|
|
295
|
-
elif not self.auth_credentials:
|
|
296
|
-
raise Exception("Not authenticated and no stored credentials available. Please call authenticate() first.")
|
|
297
272
|
|
|
298
273
|
async def get_authentication_status(self) -> Optional[Dict[str, Any]]:
|
|
299
274
|
"""Get current authentication status and user info."""
|
|
@@ -313,7 +288,7 @@ class BrainApiClient:
|
|
|
313
288
|
self.log("🚀 Creating simulation...", "INFO")
|
|
314
289
|
|
|
315
290
|
# Prepare settings based on simulation type
|
|
316
|
-
settings_dict = simulation_data.settings.
|
|
291
|
+
settings_dict = simulation_data.settings.model_dump()
|
|
317
292
|
|
|
318
293
|
# Remove fields based on simulation type
|
|
319
294
|
if simulation_data.type == "REGULAR":
|
|
@@ -321,9 +296,6 @@ class BrainApiClient:
|
|
|
321
296
|
settings_dict.pop('selectionHandling', None)
|
|
322
297
|
settings_dict.pop('selectionLimit', None)
|
|
323
298
|
settings_dict.pop('componentActivation', None)
|
|
324
|
-
elif simulation_data.type == "SUPER":
|
|
325
|
-
# SUPER type keeps all fields
|
|
326
|
-
pass
|
|
327
299
|
|
|
328
300
|
# Filter out None values from settings
|
|
329
301
|
settings_dict = {k: v for k, v in settings_dict.items() if v is not None}
|
|
@@ -347,21 +319,14 @@ class BrainApiClient:
|
|
|
347
319
|
# Filter out None values from entire payload
|
|
348
320
|
payload = {k: v for k, v in payload.items() if v is not None}
|
|
349
321
|
|
|
350
|
-
# Debug: print payload for troubleshooting
|
|
351
|
-
# print("📋 Sending payload:")
|
|
352
|
-
# print(json.dumps(payload, indent=2))
|
|
353
|
-
|
|
354
322
|
response = self.session.post(f"{self.base_url}/simulations", json=payload)
|
|
355
323
|
response.raise_for_status()
|
|
356
324
|
|
|
357
|
-
# Handle empty response body - extract simulation ID from Location header
|
|
358
325
|
location = response.headers.get('Location', '')
|
|
359
326
|
simulation_id = location.split('/')[-1] if location else None
|
|
360
327
|
|
|
361
328
|
self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
|
|
362
329
|
|
|
363
|
-
|
|
364
|
-
finished = False
|
|
365
330
|
while True:
|
|
366
331
|
simulation_progress = self.session.get(location)
|
|
367
332
|
if simulation_progress.headers.get("Retry-After", 0) == 0:
|
|
@@ -379,9 +344,6 @@ class BrainApiClient:
|
|
|
379
344
|
self.log(f"❌ Failed to create simulation: {str(e)}", "ERROR")
|
|
380
345
|
raise
|
|
381
346
|
|
|
382
|
-
# get_simulation_status function removed as requested
|
|
383
|
-
# wait_for_simulation function removed as requested
|
|
384
|
-
|
|
385
347
|
async def get_alpha_details(self, alpha_id: str) -> Dict[str, Any]:
|
|
386
348
|
"""Get detailed information about an alpha."""
|
|
387
349
|
await self.ensure_authenticated()
|
|
@@ -393,36 +355,8 @@ class BrainApiClient:
|
|
|
393
355
|
except Exception as e:
|
|
394
356
|
self.log(f"Failed to get alpha details: {str(e)}", "ERROR")
|
|
395
357
|
raise
|
|
396
|
-
|
|
397
|
-
def _is_atom(self, detail: Optional[Dict[str, Any]]) -> bool:
|
|
398
|
-
"""Match atom detection used in extract_regular_alphas.py:
|
|
399
|
-
- Primary signal: 'classifications' entries containing 'SINGLE_DATA_SET'
|
|
400
|
-
- Fallbacks: tags list contains 'atom' or classification id/name contains 'ATOM'
|
|
401
|
-
"""
|
|
402
|
-
if not detail or not isinstance(detail, dict):
|
|
403
|
-
return False
|
|
404
|
-
|
|
405
|
-
classifications = detail.get('classifications') or []
|
|
406
|
-
for c in classifications:
|
|
407
|
-
cid = (c.get('id') or c.get('name') or '')
|
|
408
|
-
if isinstance(cid, str) and 'SINGLE_DATA_SET' in cid:
|
|
409
|
-
return True
|
|
410
|
-
|
|
411
|
-
# Fallbacks
|
|
412
|
-
tags = detail.get('tags') or []
|
|
413
|
-
if isinstance(tags, list):
|
|
414
|
-
for t in tags:
|
|
415
|
-
if isinstance(t, str) and t.strip().lower() == 'atom':
|
|
416
|
-
return True
|
|
417
|
-
|
|
418
|
-
for c in classifications:
|
|
419
|
-
cid = (c.get('id') or c.get('name') or '')
|
|
420
|
-
if isinstance(cid, str) and 'ATOM' in cid.upper():
|
|
421
|
-
return True
|
|
422
|
-
|
|
423
|
-
return False
|
|
424
358
|
|
|
425
|
-
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
359
|
+
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
426
360
|
delay: int = 1, universe: str = "TOP3000", theme: str = "false", search: Optional[str] = None) -> Dict[str, Any]:
|
|
427
361
|
"""Get available datasets."""
|
|
428
362
|
await self.ensure_authenticated()
|
|
@@ -441,9 +375,9 @@ class BrainApiClient:
|
|
|
441
375
|
|
|
442
376
|
response = self.session.get(f"{self.base_url}/data-sets", params=params)
|
|
443
377
|
response.raise_for_status()
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
return
|
|
378
|
+
response_json = response.json()
|
|
379
|
+
response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
380
|
+
return response_json
|
|
447
381
|
except Exception as e:
|
|
448
382
|
self.log(f"Failed to get datasets: {str(e)}", "ERROR")
|
|
449
383
|
raise
|
|
@@ -475,9 +409,9 @@ class BrainApiClient:
|
|
|
475
409
|
|
|
476
410
|
response = self.session.get(f"{self.base_url}/data-fields", params=params)
|
|
477
411
|
response.raise_for_status()
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
return
|
|
412
|
+
response_json = response.json()
|
|
413
|
+
response_json['extraNote'] = "if your returned result is 0, you may want to check your parameter by using get_platform_setting_options tool to got correct parameter"
|
|
414
|
+
return response_json
|
|
481
415
|
except Exception as e:
|
|
482
416
|
self.log(f"Failed to get datafields: {str(e)}", "ERROR")
|
|
483
417
|
raise
|
|
@@ -639,6 +573,150 @@ class BrainApiClient:
|
|
|
639
573
|
self.log(f"Failed to get leaderboard: {str(e)}", "ERROR")
|
|
640
574
|
raise
|
|
641
575
|
|
|
576
|
+
def _is_atom(self, detail: Optional[Dict[str, Any]]) -> bool:
|
|
577
|
+
"""Match atom detection used in extract_regular_alphas.py:
|
|
578
|
+
- Primary signal: 'classifications' entries containing 'SINGLE_DATA_SET'
|
|
579
|
+
- Fallbacks: tags list contains 'atom' or classification id/name contains 'ATOM'
|
|
580
|
+
"""
|
|
581
|
+
if not detail or not isinstance(detail, dict):
|
|
582
|
+
return False
|
|
583
|
+
|
|
584
|
+
classifications = detail.get('classifications') or []
|
|
585
|
+
for c in classifications:
|
|
586
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
587
|
+
if isinstance(cid, str) and 'SINGLE_DATA_SET' in cid:
|
|
588
|
+
return True
|
|
589
|
+
|
|
590
|
+
# Fallbacks
|
|
591
|
+
tags = detail.get('tags') or []
|
|
592
|
+
if isinstance(tags, list):
|
|
593
|
+
for t in tags:
|
|
594
|
+
if isinstance(t, str) and t.strip().lower() == 'atom':
|
|
595
|
+
return True
|
|
596
|
+
|
|
597
|
+
for c in classifications:
|
|
598
|
+
cid = (c.get('id') or c.get('name') or '')
|
|
599
|
+
if isinstance(cid, str) and 'ATOM' in cid.upper():
|
|
600
|
+
return True
|
|
601
|
+
|
|
602
|
+
return False
|
|
603
|
+
|
|
604
|
+
async def value_factor_trendScore(self, start_date: str, end_date: str) -> Dict[str, Any]:
|
|
605
|
+
"""Compute diversity score for regular alphas in a date range.
|
|
606
|
+
|
|
607
|
+
Description:
|
|
608
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
609
|
+
value factor of a user is defiend by This diversity score, which measures three key aspects of work output: the proportion of works
|
|
610
|
+
with the "Atom" tag (S_A), atom proportion, the breadth of pyramids covered (S_P), and how evenly works
|
|
611
|
+
are distributed across those pyramids (S_H). Calculated as their product, it rewards
|
|
612
|
+
strong performance across all three dimensions—encouraging more Atom-tagged works,
|
|
613
|
+
wider pyramid coverage, and balanced distribution—with weaknesses in any area lowering
|
|
614
|
+
the total score significantly.
|
|
615
|
+
|
|
616
|
+
Inputs (hints for AI callers):
|
|
617
|
+
- start_date (str): ISO UTC start datetime, e.g. '2025-08-14T00:00:00Z'
|
|
618
|
+
- end_date (str): ISO UTC end datetime, e.g. '2025-08-18T23:59:59Z'
|
|
619
|
+
- Note: this tool always uses 'OS' (submission dates) to define the window; callers do not need to supply a stage.
|
|
620
|
+
- Note: P_max (total number of possible pyramids) is derived from the platform
|
|
621
|
+
pyramid-multipliers endpoint and not supplied by callers.
|
|
622
|
+
|
|
623
|
+
Returns (compact JSON): {
|
|
624
|
+
'diversity_score': float,
|
|
625
|
+
'N': int, # total regular alphas in window
|
|
626
|
+
'A': int, # number of Atom-tagged works (is_single_data_set)
|
|
627
|
+
'P': int, # pyramid coverage count in the sample
|
|
628
|
+
'P_max': int, # used max for normalization
|
|
629
|
+
'S_A': float, 'S_P': float, 'S_H': float,
|
|
630
|
+
'per_pyramid_counts': {pyramid_name: count}
|
|
631
|
+
}
|
|
632
|
+
"""
|
|
633
|
+
# Fetch user alphas (always use OS / submission dates per product policy)
|
|
634
|
+
await self.ensure_authenticated()
|
|
635
|
+
alphas_resp = await self.get_user_alphas(stage='OS', limit=500, submission_start_date=start_date, submission_end_date=end_date)
|
|
636
|
+
|
|
637
|
+
if not isinstance(alphas_resp, dict) or 'results' not in alphas_resp:
|
|
638
|
+
return {'error': 'Unexpected response from get_user_alphas', 'raw': alphas_resp}
|
|
639
|
+
|
|
640
|
+
alphas = alphas_resp['results']
|
|
641
|
+
regular = [a for a in alphas if a.get('type') == 'REGULAR']
|
|
642
|
+
|
|
643
|
+
# Fetch details for each regular alpha
|
|
644
|
+
pyramid_list = []
|
|
645
|
+
atom_count = 0
|
|
646
|
+
per_pyramid = {}
|
|
647
|
+
for a in regular:
|
|
648
|
+
try:
|
|
649
|
+
detail = await self.get_alpha_details(a.get('id'))
|
|
650
|
+
except Exception:
|
|
651
|
+
continue
|
|
652
|
+
|
|
653
|
+
is_atom = self._is_atom(detail)
|
|
654
|
+
if is_atom:
|
|
655
|
+
atom_count += 1
|
|
656
|
+
|
|
657
|
+
# Extract pyramids
|
|
658
|
+
ps = []
|
|
659
|
+
if isinstance(detail.get('pyramids'), list):
|
|
660
|
+
ps = [p.get('name') for p in detail.get('pyramids') if p.get('name')]
|
|
661
|
+
else:
|
|
662
|
+
pt = detail.get('pyramidThemes') or {}
|
|
663
|
+
pss = pt.get('pyramids') if isinstance(pt, dict) else None
|
|
664
|
+
if pss and isinstance(pss, list):
|
|
665
|
+
ps = [p.get('name') for p in pss if p.get('name')]
|
|
666
|
+
|
|
667
|
+
for p in ps:
|
|
668
|
+
pyramid_list.append(p)
|
|
669
|
+
per_pyramid[p] = per_pyramid.get(p, 0) + 1
|
|
670
|
+
|
|
671
|
+
N = len(regular)
|
|
672
|
+
A = atom_count
|
|
673
|
+
P = len(per_pyramid)
|
|
674
|
+
|
|
675
|
+
# Determine P_max similarly to the script: use pyramid multipliers if available
|
|
676
|
+
P_max = None
|
|
677
|
+
try:
|
|
678
|
+
pm = await self.get_pyramid_multipliers()
|
|
679
|
+
if isinstance(pm, dict) and 'pyramids' in pm:
|
|
680
|
+
pyramids_list = pm.get('pyramids') or []
|
|
681
|
+
P_max = len(pyramids_list)
|
|
682
|
+
except Exception:
|
|
683
|
+
P_max = None
|
|
684
|
+
|
|
685
|
+
if not P_max or P_max <= 0:
|
|
686
|
+
P_max = max(P, 1)
|
|
687
|
+
|
|
688
|
+
# Component scores
|
|
689
|
+
S_A = (A / N) if N > 0 else 0.0
|
|
690
|
+
S_P = (P / P_max) if P_max > 0 else 0.0
|
|
691
|
+
|
|
692
|
+
# Entropy
|
|
693
|
+
S_H = 0.0
|
|
694
|
+
if P <= 1 or not per_pyramid:
|
|
695
|
+
S_H = 0.0
|
|
696
|
+
else:
|
|
697
|
+
total_occ = sum(per_pyramid.values())
|
|
698
|
+
H = 0.0
|
|
699
|
+
for cnt in per_pyramid.values():
|
|
700
|
+
q = cnt / total_occ if total_occ > 0 else 0
|
|
701
|
+
if q > 0:
|
|
702
|
+
H -= q * math.log2(q)
|
|
703
|
+
max_H = math.log2(P) if P > 0 else 1
|
|
704
|
+
S_H = (H / max_H) if max_H > 0 else 0.0
|
|
705
|
+
|
|
706
|
+
diversity_score = S_A * S_P * S_H
|
|
707
|
+
|
|
708
|
+
return {
|
|
709
|
+
'diversity_score': diversity_score,
|
|
710
|
+
'N': N,
|
|
711
|
+
'A': A,
|
|
712
|
+
'P': P,
|
|
713
|
+
'P_max': P_max,
|
|
714
|
+
'S_A': S_A,
|
|
715
|
+
'S_P': S_P,
|
|
716
|
+
'S_H': S_H,
|
|
717
|
+
'per_pyramid_counts': per_pyramid
|
|
718
|
+
}
|
|
719
|
+
|
|
642
720
|
async def get_operators(self) -> Dict[str, Any]:
|
|
643
721
|
"""Get available operators for alpha creation."""
|
|
644
722
|
await self.ensure_authenticated()
|
|
@@ -656,7 +734,7 @@ class BrainApiClient:
|
|
|
656
734
|
except Exception as e:
|
|
657
735
|
self.log(f"Failed to get operators: {str(e)}", "ERROR")
|
|
658
736
|
raise
|
|
659
|
-
|
|
737
|
+
|
|
660
738
|
async def run_selection(
|
|
661
739
|
self,
|
|
662
740
|
selection: str,
|
|
@@ -697,7 +775,7 @@ class BrainApiClient:
|
|
|
697
775
|
except Exception as e:
|
|
698
776
|
self.log(f"Failed to get user profile: {str(e)}", "ERROR")
|
|
699
777
|
raise
|
|
700
|
-
|
|
778
|
+
|
|
701
779
|
async def get_documentations(self) -> Dict[str, Any]:
|
|
702
780
|
"""Get available documentations and learning materials."""
|
|
703
781
|
await self.ensure_authenticated()
|
|
@@ -709,9 +787,7 @@ class BrainApiClient:
|
|
|
709
787
|
except Exception as e:
|
|
710
788
|
self.log(f"Failed to get documentations: {str(e)}", "ERROR")
|
|
711
789
|
raise
|
|
712
|
-
|
|
713
|
-
# get_messages_summary function removed as requested
|
|
714
|
-
|
|
790
|
+
|
|
715
791
|
async def get_messages(self, limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
716
792
|
"""Get messages for the current user with optional pagination.
|
|
717
793
|
|
|
@@ -847,47 +923,32 @@ class BrainApiClient:
|
|
|
847
923
|
self.log(f"Failed to get messages: {str(e)}", "ERROR")
|
|
848
924
|
raise
|
|
849
925
|
|
|
850
|
-
async def get_glossary_terms(self, email: str, password: str
|
|
926
|
+
async def get_glossary_terms(self, email: str, password: str) -> List[Dict[str, str]]:
|
|
851
927
|
"""Get glossary terms from forum."""
|
|
852
928
|
try:
|
|
853
|
-
|
|
854
|
-
from forum_functions import forum_client
|
|
855
|
-
return await forum_client.get_glossary_terms(email, password, headless)
|
|
856
|
-
except ImportError:
|
|
857
|
-
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
858
|
-
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
929
|
+
return await forum_client.get_glossary_terms(email, password)
|
|
859
930
|
except Exception as e:
|
|
860
|
-
self.log(f"
|
|
861
|
-
|
|
931
|
+
self.log(f"Failed to get glossary terms: {str(e)}", "ERROR")
|
|
932
|
+
raise
|
|
862
933
|
|
|
863
934
|
async def search_forum_posts(self, email: str, password: str, search_query: str,
|
|
864
|
-
|
|
935
|
+
max_results: int = 50) -> Dict[str, Any]:
|
|
865
936
|
"""Search forum posts."""
|
|
866
937
|
try:
|
|
867
|
-
|
|
868
|
-
from forum_functions import forum_client
|
|
869
|
-
return await forum_client.search_forum_posts(email, password, search_query, max_results, headless)
|
|
870
|
-
except ImportError:
|
|
871
|
-
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
872
|
-
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
938
|
+
return await forum_client.search_forum_posts(email, password, search_query, max_results)
|
|
873
939
|
except Exception as e:
|
|
874
|
-
self.log(f"
|
|
875
|
-
|
|
940
|
+
self.log(f"Failed to search forum posts: {str(e)}", "ERROR")
|
|
941
|
+
raise
|
|
876
942
|
|
|
877
943
|
async def read_forum_post(self, email: str, password: str, article_id: str,
|
|
878
|
-
|
|
944
|
+
include_comments: bool = True) -> Dict[str, Any]:
|
|
879
945
|
"""Get forum post."""
|
|
880
946
|
try:
|
|
881
|
-
|
|
882
|
-
from forum_functions import forum_client
|
|
883
|
-
return await forum_client.read_full_forum_post(email, password, article_id, headless, include_comments=True)
|
|
884
|
-
except ImportError:
|
|
885
|
-
self.log("Forum functions not available - install selenium and run forum_functions.py", "WARNING")
|
|
886
|
-
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
947
|
+
return await forum_client.read_full_forum_post(email, password, article_id, include_comments)
|
|
887
948
|
except Exception as e:
|
|
888
|
-
self.log(f"
|
|
889
|
-
|
|
890
|
-
|
|
949
|
+
self.log(f"Failed to read forum post: {str(e)}", "ERROR")
|
|
950
|
+
raise
|
|
951
|
+
|
|
891
952
|
async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
|
|
892
953
|
"""Get yearly statistics for an alpha with retry logic."""
|
|
893
954
|
await self.ensure_authenticated()
|
|
@@ -951,7 +1012,7 @@ class BrainApiClient:
|
|
|
951
1012
|
|
|
952
1013
|
# This should never be reached, but just in case
|
|
953
1014
|
return {}
|
|
954
|
-
|
|
1015
|
+
|
|
955
1016
|
async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
|
|
956
1017
|
"""Get production correlation data for an alpha with retry logic."""
|
|
957
1018
|
await self.ensure_authenticated()
|
|
@@ -1183,8 +1244,8 @@ class BrainApiClient:
|
|
|
1183
1244
|
raise
|
|
1184
1245
|
|
|
1185
1246
|
async def set_alpha_properties(self, alpha_id: str, name: Optional[str] = None,
|
|
1186
|
-
|
|
1187
|
-
|
|
1247
|
+
color: Optional[str] = None, tags: Optional[List[str]] = None,
|
|
1248
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
1188
1249
|
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
1189
1250
|
await self.ensure_authenticated()
|
|
1190
1251
|
|
|
@@ -1253,7 +1314,6 @@ class BrainApiClient:
|
|
|
1253
1314
|
await self.ensure_authenticated()
|
|
1254
1315
|
|
|
1255
1316
|
try:
|
|
1256
|
-
# Use the correct endpoint without parameters
|
|
1257
1317
|
response = self.session.get(f"{self.base_url}/users/self/activities/pyramid-multipliers")
|
|
1258
1318
|
response.raise_for_status()
|
|
1259
1319
|
return response.json()
|
|
@@ -1261,125 +1321,9 @@ class BrainApiClient:
|
|
|
1261
1321
|
self.log(f"Failed to get pyramid multipliers: {str(e)}", "ERROR")
|
|
1262
1322
|
raise
|
|
1263
1323
|
|
|
1264
|
-
async def
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
Description:
|
|
1268
|
-
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1269
|
-
value factor of a user is defiend by This diversity score, which measures three key aspects of work output: the proportion of works
|
|
1270
|
-
with the "Atom" tag (S_A), atom proportion, the breadth of pyramids covered (S_P), and how evenly works
|
|
1271
|
-
are distributed across those pyramids (S_H). Calculated as their product, it rewards
|
|
1272
|
-
strong performance across all three dimensions—encouraging more Atom-tagged works,
|
|
1273
|
-
wider pyramid coverage, and balanced distribution—with weaknesses in any area lowering
|
|
1274
|
-
the total score significantly.
|
|
1275
|
-
|
|
1276
|
-
Inputs (hints for AI callers):
|
|
1277
|
-
- start_date (str): ISO UTC start datetime, e.g. '2025-08-14T00:00:00Z'
|
|
1278
|
-
- end_date (str): ISO UTC end datetime, e.g. '2025-08-18T23:59:59Z'
|
|
1279
|
-
- Note: this tool always uses 'OS' (submission dates) to define the window; callers do not need to supply a stage.
|
|
1280
|
-
- Note: P_max (total number of possible pyramids) is derived from the platform
|
|
1281
|
-
pyramid-multipliers endpoint and not supplied by callers.
|
|
1282
|
-
|
|
1283
|
-
Returns (compact JSON): {
|
|
1284
|
-
'diversity_score': float,
|
|
1285
|
-
'N': int, # total regular alphas in window
|
|
1286
|
-
'A': int, # number of Atom-tagged works (is_single_data_set)
|
|
1287
|
-
'P': int, # pyramid coverage count in the sample
|
|
1288
|
-
'P_max': int, # used max for normalization
|
|
1289
|
-
'S_A': float, 'S_P': float, 'S_H': float,
|
|
1290
|
-
'per_pyramid_counts': {pyramid_name: count}
|
|
1291
|
-
}
|
|
1292
|
-
"""
|
|
1293
|
-
# Fetch user alphas (always use OS / submission dates per product policy)
|
|
1294
|
-
await self.ensure_authenticated()
|
|
1295
|
-
alphas_resp = await self.get_user_alphas(stage='OS', limit=500, submission_start_date=start_date, submission_end_date=end_date)
|
|
1296
|
-
|
|
1297
|
-
if not isinstance(alphas_resp, dict) or 'results' not in alphas_resp:
|
|
1298
|
-
return {'error': 'Unexpected response from get_user_alphas', 'raw': alphas_resp}
|
|
1299
|
-
|
|
1300
|
-
alphas = alphas_resp['results']
|
|
1301
|
-
regular = [a for a in alphas if a.get('type') == 'REGULAR']
|
|
1302
|
-
|
|
1303
|
-
# Fetch details for each regular alpha
|
|
1304
|
-
pyramid_list = []
|
|
1305
|
-
atom_count = 0
|
|
1306
|
-
per_pyramid = {}
|
|
1307
|
-
for a in regular:
|
|
1308
|
-
try:
|
|
1309
|
-
detail = await self.get_alpha_details(a.get('id'))
|
|
1310
|
-
except Exception:
|
|
1311
|
-
continue
|
|
1312
|
-
|
|
1313
|
-
is_atom = self._is_atom(detail)
|
|
1314
|
-
if is_atom:
|
|
1315
|
-
atom_count += 1
|
|
1316
|
-
|
|
1317
|
-
# Extract pyramids
|
|
1318
|
-
ps = []
|
|
1319
|
-
if isinstance(detail.get('pyramids'), list):
|
|
1320
|
-
ps = [p.get('name') for p in detail.get('pyramids') if p.get('name')]
|
|
1321
|
-
else:
|
|
1322
|
-
pt = detail.get('pyramidThemes') or {}
|
|
1323
|
-
pss = pt.get('pyramids') if isinstance(pt, dict) else None
|
|
1324
|
-
if pss and isinstance(pss, list):
|
|
1325
|
-
ps = [p.get('name') for p in pss if p.get('name')]
|
|
1326
|
-
|
|
1327
|
-
for p in ps:
|
|
1328
|
-
pyramid_list.append(p)
|
|
1329
|
-
per_pyramid[p] = per_pyramid.get(p, 0) + 1
|
|
1330
|
-
|
|
1331
|
-
N = len(regular)
|
|
1332
|
-
A = atom_count
|
|
1333
|
-
P = len(per_pyramid)
|
|
1334
|
-
|
|
1335
|
-
# Determine P_max similarly to the script: use pyramid multipliers if available
|
|
1336
|
-
P_max = None
|
|
1337
|
-
try:
|
|
1338
|
-
pm = await self.get_pyramid_multipliers()
|
|
1339
|
-
if isinstance(pm, dict) and 'pyramids' in pm:
|
|
1340
|
-
pyramids_list = pm.get('pyramids') or []
|
|
1341
|
-
P_max = len(pyramids_list)
|
|
1342
|
-
except Exception:
|
|
1343
|
-
P_max = None
|
|
1344
|
-
|
|
1345
|
-
if not P_max or P_max <= 0:
|
|
1346
|
-
P_max = max(P, 1)
|
|
1347
|
-
|
|
1348
|
-
# Component scores
|
|
1349
|
-
S_A = (A / N) if N > 0 else 0.0
|
|
1350
|
-
S_P = (P / P_max) if P_max > 0 else 0.0
|
|
1351
|
-
|
|
1352
|
-
# Entropy
|
|
1353
|
-
S_H = 0.0
|
|
1354
|
-
if P <= 1 or not per_pyramid:
|
|
1355
|
-
S_H = 0.0
|
|
1356
|
-
else:
|
|
1357
|
-
total_occ = sum(per_pyramid.values())
|
|
1358
|
-
H = 0.0
|
|
1359
|
-
for cnt in per_pyramid.values():
|
|
1360
|
-
q = cnt / total_occ if total_occ > 0 else 0
|
|
1361
|
-
if q > 0:
|
|
1362
|
-
H -= q * math.log2(q)
|
|
1363
|
-
max_H = math.log2(P) if P > 0 else 1
|
|
1364
|
-
S_H = (H / max_H) if max_H > 0 else 0.0
|
|
1365
|
-
|
|
1366
|
-
diversity_score = S_A * S_P * S_H
|
|
1367
|
-
|
|
1368
|
-
return {
|
|
1369
|
-
'diversity_score': diversity_score,
|
|
1370
|
-
'N': N,
|
|
1371
|
-
'A': A,
|
|
1372
|
-
'P': P,
|
|
1373
|
-
'P_max': P_max,
|
|
1374
|
-
'S_A': S_A,
|
|
1375
|
-
'S_P': S_P,
|
|
1376
|
-
'S_H': S_H,
|
|
1377
|
-
'per_pyramid_counts': per_pyramid
|
|
1378
|
-
}
|
|
1379
|
-
|
|
1380
|
-
async def get_pyramid_alphas(self, start_date: Optional[str] = None,
|
|
1381
|
-
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
1382
|
-
"""Get user's current alpha distribution across pyramid categories."""
|
|
1324
|
+
async def get_pyramid_alphas(self, start_date: Optional[str] = None,
|
|
1325
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
1326
|
+
"""Get user's current alpha distribution across pyramid categories."""
|
|
1383
1327
|
await self.ensure_authenticated()
|
|
1384
1328
|
|
|
1385
1329
|
try:
|
|
@@ -1419,7 +1363,7 @@ class BrainApiClient:
|
|
|
1419
1363
|
except Exception as e:
|
|
1420
1364
|
self.log(f"Failed to get pyramid alphas: {str(e)}", "ERROR")
|
|
1421
1365
|
raise
|
|
1422
|
-
|
|
1366
|
+
|
|
1423
1367
|
async def get_user_competitions(self, user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
1424
1368
|
"""Get list of competitions that the user is participating in."""
|
|
1425
1369
|
await self.ensure_authenticated()
|
|
@@ -1440,7 +1384,7 @@ class BrainApiClient:
|
|
|
1440
1384
|
except Exception as e:
|
|
1441
1385
|
self.log(f"Failed to get user competitions: {str(e)}", "ERROR")
|
|
1442
1386
|
raise
|
|
1443
|
-
|
|
1387
|
+
|
|
1444
1388
|
async def get_competition_details(self, competition_id: str) -> Dict[str, Any]:
|
|
1445
1389
|
"""Get detailed information about a specific competition."""
|
|
1446
1390
|
await self.ensure_authenticated()
|
|
@@ -1452,7 +1396,7 @@ class BrainApiClient:
|
|
|
1452
1396
|
except Exception as e:
|
|
1453
1397
|
self.log(f"Failed to get competition details: {str(e)}", "ERROR")
|
|
1454
1398
|
raise
|
|
1455
|
-
|
|
1399
|
+
|
|
1456
1400
|
async def get_competition_agreement(self, competition_id: str) -> Dict[str, Any]:
|
|
1457
1401
|
"""Get the rules, terms, and agreement for a specific competition."""
|
|
1458
1402
|
await self.ensure_authenticated()
|
|
@@ -1532,18 +1476,15 @@ class BrainApiClient:
|
|
|
1532
1476
|
except Exception as e:
|
|
1533
1477
|
self.log(f"Failed to get instrument options: {str(e)}", "ERROR")
|
|
1534
1478
|
raise
|
|
1535
|
-
|
|
1479
|
+
|
|
1536
1480
|
async def performance_comparison(self, alpha_id: str, team_id: Optional[str] = None,
|
|
1537
|
-
|
|
1481
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
1538
1482
|
"""Get performance comparison data for an alpha."""
|
|
1539
1483
|
await self.ensure_authenticated()
|
|
1540
1484
|
|
|
1541
1485
|
try:
|
|
1542
|
-
params = {}
|
|
1543
|
-
if
|
|
1544
|
-
params['team_id'] = team_id
|
|
1545
|
-
if competition:
|
|
1546
|
-
params['competition'] = competition
|
|
1486
|
+
params = {"teamId": team_id, "competition": competition}
|
|
1487
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1547
1488
|
|
|
1548
1489
|
response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/performance-comparison", params=params)
|
|
1549
1490
|
response.raise_for_status()
|
|
@@ -1551,48 +1492,25 @@ class BrainApiClient:
|
|
|
1551
1492
|
except Exception as e:
|
|
1552
1493
|
self.log(f"Failed to get performance comparison: {str(e)}", "ERROR")
|
|
1553
1494
|
raise
|
|
1554
|
-
|
|
1555
|
-
#
|
|
1556
|
-
|
|
1495
|
+
|
|
1496
|
+
# --- Helper function for data flattening ---
|
|
1497
|
+
|
|
1557
1498
|
async def expand_nested_data(self, data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
1558
1499
|
"""Flatten complex nested data structures into tabular format."""
|
|
1559
1500
|
try:
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
if isinstance(value, dict):
|
|
1567
|
-
# Expand nested dictionary
|
|
1568
|
-
for nested_key, nested_value in value.items():
|
|
1569
|
-
expanded_key = f"{key}_{nested_key}"
|
|
1570
|
-
expanded_item[expanded_key] = nested_value
|
|
1571
|
-
|
|
1572
|
-
# Preserve original if requested
|
|
1573
|
-
if preserve_original:
|
|
1574
|
-
expanded_item[key] = value
|
|
1575
|
-
elif isinstance(value, list):
|
|
1576
|
-
# Handle list values
|
|
1577
|
-
expanded_item[key] = str(value) if value else []
|
|
1578
|
-
|
|
1579
|
-
# Preserve original if requested
|
|
1580
|
-
if preserve_original:
|
|
1581
|
-
expanded_item[key] = value
|
|
1582
|
-
else:
|
|
1583
|
-
# Simple value
|
|
1584
|
-
expanded_item[key] = value
|
|
1585
|
-
|
|
1586
|
-
expanded_data.append(expanded_item)
|
|
1587
|
-
|
|
1588
|
-
return expanded_data
|
|
1501
|
+
df = pd.json_normalize(data, sep='_')
|
|
1502
|
+
if preserve_original:
|
|
1503
|
+
original_df = pd.DataFrame(data)
|
|
1504
|
+
df = pd.concat([original_df, df], axis=1)
|
|
1505
|
+
df = df.loc[:,~df.columns.duplicated()]
|
|
1506
|
+
return df.to_dict(orient='records')
|
|
1589
1507
|
except Exception as e:
|
|
1590
1508
|
self.log(f"Failed to expand nested data: {str(e)}", "ERROR")
|
|
1591
1509
|
raise
|
|
1592
|
-
|
|
1593
|
-
#
|
|
1594
|
-
|
|
1595
|
-
async def
|
|
1510
|
+
|
|
1511
|
+
# --- New documentation endpoint ---
|
|
1512
|
+
|
|
1513
|
+
async def get_documentation_page(self, page_id: str) -> Dict[str, Any]:
|
|
1596
1514
|
"""Retrieve detailed content of a specific documentation page/article."""
|
|
1597
1515
|
await self.ensure_authenticated()
|
|
1598
1516
|
|
|
@@ -1604,88 +1522,64 @@ class BrainApiClient:
|
|
|
1604
1522
|
self.log(f"Failed to get documentation page: {str(e)}", "ERROR")
|
|
1605
1523
|
raise
|
|
1606
1524
|
|
|
1607
|
-
# Badge status function removed as requested
|
|
1608
|
-
|
|
1609
|
-
# Initialize MCP server
|
|
1610
|
-
mcp = FastMCP('brain_mcp_server')
|
|
1611
|
-
|
|
1612
|
-
# Initialize API client
|
|
1613
1525
|
brain_client = BrainApiClient()
|
|
1614
1526
|
|
|
1615
|
-
# Configuration
|
|
1616
|
-
CONFIG_FILE = "user_config.json"
|
|
1527
|
+
# --- Configuration Management ---
|
|
1617
1528
|
|
|
1618
1529
|
def _resolve_config_path(for_write: bool = False) -> str:
|
|
1619
1530
|
"""
|
|
1620
|
-
Resolve the
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
When for_write=True, returns the preferred path even if it doesn't exist yet.
|
|
1531
|
+
Resolve the configuration file path.
|
|
1532
|
+
|
|
1533
|
+
Checks for a file specified by the MCP_CONFIG_FILE environment variable,
|
|
1534
|
+
then falls back to ~/.brain_mcp_config.json. If for_write is True,
|
|
1535
|
+
it ensures the directory exists.
|
|
1626
1536
|
"""
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
except Exception:
|
|
1643
|
-
# Fall back to current working directory for notebooks/REPL
|
|
1644
|
-
pass
|
|
1645
|
-
|
|
1646
|
-
module_path = base_dir / CONFIG_FILE
|
|
1647
|
-
if not for_write and module_path.exists():
|
|
1648
|
-
return str(module_path.resolve())
|
|
1649
|
-
|
|
1650
|
-
# 3) Fallback to CWD for backward compatibility
|
|
1651
|
-
cwd_path = Path.cwd() / CONFIG_FILE
|
|
1652
|
-
if not for_write and cwd_path.exists():
|
|
1653
|
-
return str(cwd_path.resolve())
|
|
1654
|
-
|
|
1655
|
-
# For writes (or when nothing exists), prefer the module/base directory
|
|
1656
|
-
return str(module_path.resolve())
|
|
1537
|
+
if 'MCP_CONFIG_FILE' in os.environ:
|
|
1538
|
+
return os.environ['MCP_CONFIG_FILE']
|
|
1539
|
+
|
|
1540
|
+
config_path = Path(__file__).parent / "user_config.json"
|
|
1541
|
+
|
|
1542
|
+
if for_write:
|
|
1543
|
+
try:
|
|
1544
|
+
config_path.parent.mkdir(parents=True, exist_ok=True)
|
|
1545
|
+
except (IOError, OSError) as e:
|
|
1546
|
+
logger.warning(f"Could not create config directory {config_path.parent}: {e}")
|
|
1547
|
+
# Fallback to a temporary file if home is not writable
|
|
1548
|
+
import tempfile
|
|
1549
|
+
return tempfile.NamedTemporaryFile(delete=False).name
|
|
1550
|
+
|
|
1551
|
+
return str(config_path)
|
|
1657
1552
|
|
|
1658
1553
|
def load_config() -> Dict[str, Any]:
|
|
1659
|
-
"""Load configuration from file
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
Returns an empty dict when not found or on error.
|
|
1663
|
-
"""
|
|
1664
|
-
path = _resolve_config_path(for_write=False)
|
|
1665
|
-
if os.path.exists(path):
|
|
1554
|
+
"""Load configuration from file."""
|
|
1555
|
+
config_file = _resolve_config_path()
|
|
1556
|
+
if os.path.exists(config_file):
|
|
1666
1557
|
try:
|
|
1667
|
-
with open(
|
|
1558
|
+
with open(config_file, 'r', encoding='utf-8') as f:
|
|
1668
1559
|
return json.load(f)
|
|
1669
|
-
except
|
|
1670
|
-
logger.error(f"
|
|
1560
|
+
except (IOError, json.JSONDecodeError) as e:
|
|
1561
|
+
logger.error(f"Error loading config file {config_file}: {e}")
|
|
1671
1562
|
return {}
|
|
1672
1563
|
|
|
1673
|
-
|
|
1674
1564
|
def save_config(config: Dict[str, Any]):
|
|
1675
1565
|
"""Save configuration to file using the resolved config path.
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1566
|
+
|
|
1567
|
+
This function now uses the write-enabled path resolver to handle
|
|
1568
|
+
cases where the default home directory is not writable.
|
|
1679
1569
|
"""
|
|
1570
|
+
config_file = _resolve_config_path(for_write=True)
|
|
1680
1571
|
try:
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
except Exception as e:
|
|
1686
|
-
logger.error(f"Failed to save config: {e}")
|
|
1572
|
+
with open(config_file, 'w', encoding='utf-8') as f:
|
|
1573
|
+
json.dump(config, f, indent=2)
|
|
1574
|
+
except IOError as e:
|
|
1575
|
+
logger.error(f"Error saving config file to {config_file}: {e}")
|
|
1687
1576
|
|
|
1688
|
-
# MCP
|
|
1577
|
+
# --- MCP Tool Definitions ---
|
|
1578
|
+
|
|
1579
|
+
mcp = FastMCP(
|
|
1580
|
+
"brain-platform-mcp",
|
|
1581
|
+
"A server for interacting with the WorldQuant BRAIN platform",
|
|
1582
|
+
)
|
|
1689
1583
|
|
|
1690
1584
|
@mcp.tool()
|
|
1691
1585
|
async def authenticate(email: Optional[str] = "", password: Optional[str] = "") -> Dict[str, Any]:
|
|
@@ -1702,48 +1596,27 @@ async def authenticate(email: Optional[str] = "", password: Optional[str] = "")
|
|
|
1702
1596
|
Authentication result with user info and permissions
|
|
1703
1597
|
"""
|
|
1704
1598
|
try:
|
|
1599
|
+
# Load config to get credentials if not provided
|
|
1705
1600
|
config = load_config()
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
if not password:
|
|
1710
|
-
password = config['credentials'].get('password', '')
|
|
1711
|
-
|
|
1601
|
+
credentials = config.get("credentials", {})
|
|
1602
|
+
email = email or credentials.get("email")
|
|
1603
|
+
password = password or credentials.get("password")
|
|
1712
1604
|
if not email or not password:
|
|
1713
|
-
return {"error": "
|
|
1605
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
1714
1606
|
|
|
1715
|
-
|
|
1607
|
+
auth_result = await brain_client.authenticate(email, password)
|
|
1716
1608
|
|
|
1717
|
-
# Save credentials
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
return
|
|
1726
|
-
except Exception as e:
|
|
1727
|
-
return {"error": str(e)}
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
@mcp.tool()
|
|
1731
|
-
async def value_factor_trendScore(start_date: str, end_date: str) -> Dict[str, Any]:
|
|
1732
|
-
"""Compute and return the diversity score for REGULAR alphas in a submission-date window.
|
|
1733
|
-
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1734
|
-
This MCP tool wraps BrainApiClient.value_factor_trendScore and always uses submission dates (OS).
|
|
1735
|
-
|
|
1736
|
-
Inputs:
|
|
1737
|
-
- start_date: ISO UTC start datetime (e.g. '2025-08-14T00:00:00Z')
|
|
1738
|
-
- end_date: ISO UTC end datetime (e.g. '2025-08-18T23:59:59Z')
|
|
1739
|
-
- p_max: optional integer total number of pyramid categories for normalization
|
|
1740
|
-
|
|
1741
|
-
Returns: compact JSON with diversity_score, N, A, P, P_max, S_A, S_P, S_H, per_pyramid_counts
|
|
1742
|
-
"""
|
|
1743
|
-
try:
|
|
1744
|
-
return await brain_client.value_factor_trendScore(start_date=start_date, end_date=end_date)
|
|
1609
|
+
# Save successful credentials
|
|
1610
|
+
if auth_result.get('status') == 'authenticated':
|
|
1611
|
+
if 'credentials' not in config:
|
|
1612
|
+
config['credentials'] = {}
|
|
1613
|
+
config['credentials']['email'] = email
|
|
1614
|
+
config['credentials']['password'] = password
|
|
1615
|
+
save_config(config)
|
|
1616
|
+
|
|
1617
|
+
return auth_result
|
|
1745
1618
|
except Exception as e:
|
|
1746
|
-
return {"error": str(e)}
|
|
1619
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1747
1620
|
|
|
1748
1621
|
@mcp.tool()
|
|
1749
1622
|
async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
@@ -1779,6 +1652,8 @@ async def manage_config(action: str = "get", settings: Optional[Dict[str, Any]]
|
|
|
1779
1652
|
else:
|
|
1780
1653
|
return {"error": f"Invalid action '{action}'. Use 'get' or 'set'."}
|
|
1781
1654
|
|
|
1655
|
+
# --- Simulation Tools ---
|
|
1656
|
+
|
|
1782
1657
|
@mcp.tool()
|
|
1783
1658
|
async def create_simulation(
|
|
1784
1659
|
type: str = "REGULAR",
|
|
@@ -1801,7 +1676,7 @@ async def create_simulation(
|
|
|
1801
1676
|
max_trade: str = "OFF",
|
|
1802
1677
|
selection_handling: str = "POSITIVE",
|
|
1803
1678
|
selection_limit: int = 1000,
|
|
1804
|
-
component_activation: str = "IS"
|
|
1679
|
+
component_activation: str = "IS",
|
|
1805
1680
|
) -> Dict[str, Any]:
|
|
1806
1681
|
"""
|
|
1807
1682
|
🚀 Create a new simulation on BRAIN platform.
|
|
@@ -1842,10 +1717,15 @@ async def create_simulation(
|
|
|
1842
1717
|
unitHandling=unit_handling,
|
|
1843
1718
|
nanHandling=nan_handling,
|
|
1844
1719
|
language=language,
|
|
1845
|
-
visualization=visualization
|
|
1720
|
+
visualization=visualization,
|
|
1721
|
+
pasteurization=pasteurization,
|
|
1722
|
+
maxTrade=max_trade,
|
|
1723
|
+
selectionHandling=selection_handling,
|
|
1724
|
+
selectionLimit=selection_limit,
|
|
1725
|
+
componentActivation=component_activation,
|
|
1846
1726
|
)
|
|
1847
1727
|
|
|
1848
|
-
|
|
1728
|
+
sim_data = SimulationData(
|
|
1849
1729
|
type=type,
|
|
1850
1730
|
settings=settings,
|
|
1851
1731
|
regular=regular,
|
|
@@ -1853,13 +1733,11 @@ async def create_simulation(
|
|
|
1853
1733
|
selection=selection
|
|
1854
1734
|
)
|
|
1855
1735
|
|
|
1856
|
-
|
|
1857
|
-
return result
|
|
1736
|
+
return await brain_client.create_simulation(sim_data)
|
|
1858
1737
|
except Exception as e:
|
|
1859
|
-
return {"error":
|
|
1738
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1860
1739
|
|
|
1861
|
-
#
|
|
1862
|
-
# wait_for_simulation MCP tool removed as requested
|
|
1740
|
+
# --- Alpha and Data Retrieval Tools ---
|
|
1863
1741
|
|
|
1864
1742
|
@mcp.tool()
|
|
1865
1743
|
async def get_alpha_details(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -1875,7 +1753,7 @@ async def get_alpha_details(alpha_id: str) -> Dict[str, Any]:
|
|
|
1875
1753
|
try:
|
|
1876
1754
|
return await brain_client.get_alpha_details(alpha_id)
|
|
1877
1755
|
except Exception as e:
|
|
1878
|
-
return {"error": str(e)}
|
|
1756
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1879
1757
|
|
|
1880
1758
|
@mcp.tool()
|
|
1881
1759
|
async def get_datasets(
|
|
@@ -1884,7 +1762,7 @@ async def get_datasets(
|
|
|
1884
1762
|
delay: int = 1,
|
|
1885
1763
|
universe: str = "TOP3000",
|
|
1886
1764
|
theme: str = "false",
|
|
1887
|
-
search: Optional[str] = None
|
|
1765
|
+
search: Optional[str] = None,
|
|
1888
1766
|
) -> Dict[str, Any]:
|
|
1889
1767
|
"""
|
|
1890
1768
|
📚 Get available datasets for research.
|
|
@@ -1902,9 +1780,9 @@ async def get_datasets(
|
|
|
1902
1780
|
Available datasets
|
|
1903
1781
|
"""
|
|
1904
1782
|
try:
|
|
1905
|
-
return await brain_client.get_datasets(instrument_type, region, delay, universe, theme,search)
|
|
1783
|
+
return await brain_client.get_datasets(instrument_type, region, delay, universe, theme, search)
|
|
1906
1784
|
except Exception as e:
|
|
1907
|
-
return {"error": str(e)}
|
|
1785
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1908
1786
|
|
|
1909
1787
|
@mcp.tool()
|
|
1910
1788
|
async def get_datafields(
|
|
@@ -1915,7 +1793,7 @@ async def get_datafields(
|
|
|
1915
1793
|
theme: str = "false",
|
|
1916
1794
|
dataset_id: Optional[str] = None,
|
|
1917
1795
|
data_type: str = "",
|
|
1918
|
-
search: Optional[str] = None
|
|
1796
|
+
search: Optional[str] = None,
|
|
1919
1797
|
) -> Dict[str, Any]:
|
|
1920
1798
|
"""
|
|
1921
1799
|
🔍 Get available data fields for alpha construction.
|
|
@@ -1929,19 +1807,16 @@ async def get_datafields(
|
|
|
1929
1807
|
universe: Universe of stocks (e.g., "TOP3000")
|
|
1930
1808
|
theme: Theme filter
|
|
1931
1809
|
dataset_id: Specific dataset ID to filter by
|
|
1932
|
-
data_type: Type of data (e.g., "MATRIX")
|
|
1810
|
+
data_type: Type of data (e.g., "MATRIX",'VECTOR','GROUP')
|
|
1933
1811
|
search: Search term to filter fields
|
|
1934
1812
|
|
|
1935
1813
|
Returns:
|
|
1936
1814
|
Available data fields
|
|
1937
1815
|
"""
|
|
1938
1816
|
try:
|
|
1939
|
-
return await brain_client.get_datafields(
|
|
1940
|
-
instrument_type, region, delay, universe, theme,
|
|
1941
|
-
dataset_id, data_type, search
|
|
1942
|
-
)
|
|
1817
|
+
return await brain_client.get_datafields(instrument_type, region, delay, universe, theme, dataset_id, data_type, search)
|
|
1943
1818
|
except Exception as e:
|
|
1944
|
-
return {"error": str(e)}
|
|
1819
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1945
1820
|
|
|
1946
1821
|
@mcp.tool()
|
|
1947
1822
|
async def get_alpha_pnl(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -1957,7 +1832,7 @@ async def get_alpha_pnl(alpha_id: str) -> Dict[str, Any]:
|
|
|
1957
1832
|
try:
|
|
1958
1833
|
return await brain_client.get_alpha_pnl(alpha_id)
|
|
1959
1834
|
except Exception as e:
|
|
1960
|
-
return {"error": str(e)}
|
|
1835
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1961
1836
|
|
|
1962
1837
|
@mcp.tool()
|
|
1963
1838
|
async def get_user_alphas(
|
|
@@ -2013,18 +1888,12 @@ async def get_user_alphas(
|
|
|
2013
1888
|
"""
|
|
2014
1889
|
try:
|
|
2015
1890
|
return await brain_client.get_user_alphas(
|
|
2016
|
-
stage=stage,
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
start_date=start_date,
|
|
2020
|
-
end_date=end_date,
|
|
2021
|
-
submission_start_date=submission_start_date,
|
|
2022
|
-
submission_end_date=submission_end_date,
|
|
2023
|
-
order=order,
|
|
2024
|
-
hidden=hidden,
|
|
1891
|
+
stage=stage, limit=limit, offset=offset, start_date=start_date,
|
|
1892
|
+
end_date=end_date, submission_start_date=submission_start_date,
|
|
1893
|
+
submission_end_date=submission_end_date, order=order, hidden=hidden
|
|
2025
1894
|
)
|
|
2026
1895
|
except Exception as e:
|
|
2027
|
-
return {"error": str(e)}
|
|
1896
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2028
1897
|
|
|
2029
1898
|
@mcp.tool()
|
|
2030
1899
|
async def submit_alpha(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -2041,10 +1910,30 @@ async def submit_alpha(alpha_id: str) -> Dict[str, Any]:
|
|
|
2041
1910
|
"""
|
|
2042
1911
|
try:
|
|
2043
1912
|
success = await brain_client.submit_alpha(alpha_id)
|
|
2044
|
-
return {"
|
|
1913
|
+
return {"success": success}
|
|
1914
|
+
except Exception as e:
|
|
1915
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
1916
|
+
|
|
1917
|
+
@mcp.tool()
|
|
1918
|
+
async def value_factor_trendScore(start_date: str, end_date: str) -> Dict[str, Any]:
|
|
1919
|
+
"""Compute and return the diversity score for REGULAR alphas in a submission-date window.
|
|
1920
|
+
This function calculate the diversity of the users' submission, by checking the diversity, we can have a good understanding on the valuefactor's trend.
|
|
1921
|
+
This MCP tool wraps BrainApiClient.value_factor_trendScore and always uses submission dates (OS).
|
|
1922
|
+
|
|
1923
|
+
Inputs:
|
|
1924
|
+
- start_date: ISO UTC start datetime (e.g. '2025-08-14T00:00:00Z')
|
|
1925
|
+
- end_date: ISO UTC end datetime (e.g. '2025-08-18T23:59:59Z')
|
|
1926
|
+
- p_max: optional integer total number of pyramid categories for normalization
|
|
1927
|
+
|
|
1928
|
+
Returns: compact JSON with diversity_score, N, A, P, P_max, S_A, S_P, S_H, per_pyramid_counts
|
|
1929
|
+
"""
|
|
1930
|
+
try:
|
|
1931
|
+
return await brain_client.value_factor_trendScore(start_date=start_date, end_date=end_date)
|
|
2045
1932
|
except Exception as e:
|
|
2046
1933
|
return {"error": str(e)}
|
|
2047
1934
|
|
|
1935
|
+
# --- Community and Events Tools ---
|
|
1936
|
+
|
|
2048
1937
|
@mcp.tool()
|
|
2049
1938
|
async def get_events() -> Dict[str, Any]:
|
|
2050
1939
|
"""
|
|
@@ -2056,7 +1945,7 @@ async def get_events() -> Dict[str, Any]:
|
|
|
2056
1945
|
try:
|
|
2057
1946
|
return await brain_client.get_events()
|
|
2058
1947
|
except Exception as e:
|
|
2059
|
-
return {"error": str(e)}
|
|
1948
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2060
1949
|
|
|
2061
1950
|
@mcp.tool()
|
|
2062
1951
|
async def get_leaderboard(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
@@ -2072,35 +1961,10 @@ async def get_leaderboard(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
|
2072
1961
|
try:
|
|
2073
1962
|
return await brain_client.get_leaderboard(user_id)
|
|
2074
1963
|
except Exception as e:
|
|
2075
|
-
return {"error": str(e)}
|
|
2076
|
-
|
|
2077
|
-
# batch_process_alphas MCP tool removed as requested
|
|
2078
|
-
|
|
2079
|
-
@mcp.tool()
|
|
2080
|
-
async def save_simulation_data(simulation_id: str, filename: str) -> Dict[str, Any]:
|
|
2081
|
-
"""
|
|
2082
|
-
💾 Save simulation data to a file.
|
|
2083
|
-
|
|
2084
|
-
Args:
|
|
2085
|
-
simulation_id: The simulation ID
|
|
2086
|
-
filename: Filename to save the data
|
|
2087
|
-
|
|
2088
|
-
Returns:
|
|
2089
|
-
Save operation result
|
|
2090
|
-
"""
|
|
2091
|
-
try:
|
|
2092
|
-
# Get simulation data
|
|
2093
|
-
simulation_data = await brain_client.get_simulation_status(simulation_id)
|
|
2094
|
-
|
|
2095
|
-
# Save to file
|
|
2096
|
-
with open(filename, 'w', encoding='utf-8') as f:
|
|
2097
|
-
json.dump(simulation_data, f, indent=2)
|
|
2098
|
-
|
|
2099
|
-
return {"success": True, "filename": filename, "simulation_id": simulation_id}
|
|
2100
|
-
except Exception as e:
|
|
2101
|
-
return {"error": str(e)}
|
|
1964
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2102
1965
|
|
|
2103
1966
|
|
|
1967
|
+
# --- Forum Tools ---
|
|
2104
1968
|
|
|
2105
1969
|
@mcp.tool()
|
|
2106
1970
|
async def get_operators() -> Dict[str, Any]:
|
|
@@ -2111,9 +1975,12 @@ async def get_operators() -> Dict[str, Any]:
|
|
|
2111
1975
|
Dictionary containing operators list and count
|
|
2112
1976
|
"""
|
|
2113
1977
|
try:
|
|
2114
|
-
|
|
1978
|
+
operators = await brain_client.get_operators()
|
|
1979
|
+
if isinstance(operators, list):
|
|
1980
|
+
return {"results": operators, "count": len(operators)}
|
|
1981
|
+
return operators
|
|
2115
1982
|
except Exception as e:
|
|
2116
|
-
return {"error": str(e)}
|
|
1983
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2117
1984
|
|
|
2118
1985
|
@mcp.tool()
|
|
2119
1986
|
async def run_selection(
|
|
@@ -2122,7 +1989,7 @@ async def run_selection(
|
|
|
2122
1989
|
region: str = "USA",
|
|
2123
1990
|
delay: int = 1,
|
|
2124
1991
|
selection_limit: int = 1000,
|
|
2125
|
-
selection_handling: str = "POSITIVE"
|
|
1992
|
+
selection_handling: str = "POSITIVE",
|
|
2126
1993
|
) -> Dict[str, Any]:
|
|
2127
1994
|
"""
|
|
2128
1995
|
🎯 Run a selection query to filter instruments.
|
|
@@ -2143,7 +2010,7 @@ async def run_selection(
|
|
|
2143
2010
|
selection, instrument_type, region, delay, selection_limit, selection_handling
|
|
2144
2011
|
)
|
|
2145
2012
|
except Exception as e:
|
|
2146
|
-
return {"error": str(e)}
|
|
2013
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2147
2014
|
|
|
2148
2015
|
@mcp.tool()
|
|
2149
2016
|
async def get_user_profile(user_id: str = "self") -> Dict[str, Any]:
|
|
@@ -2159,7 +2026,7 @@ async def get_user_profile(user_id: str = "self") -> Dict[str, Any]:
|
|
|
2159
2026
|
try:
|
|
2160
2027
|
return await brain_client.get_user_profile(user_id)
|
|
2161
2028
|
except Exception as e:
|
|
2162
|
-
return {"error": str(e)}
|
|
2029
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2163
2030
|
|
|
2164
2031
|
@mcp.tool()
|
|
2165
2032
|
async def get_documentations() -> Dict[str, Any]:
|
|
@@ -2172,226 +2039,110 @@ async def get_documentations() -> Dict[str, Any]:
|
|
|
2172
2039
|
try:
|
|
2173
2040
|
return await brain_client.get_documentations()
|
|
2174
2041
|
except Exception as e:
|
|
2175
|
-
return {"error": str(e)}
|
|
2042
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2176
2043
|
|
|
2177
|
-
#
|
|
2044
|
+
# --- Message and Forum Tools ---
|
|
2178
2045
|
|
|
2179
2046
|
@mcp.tool()
|
|
2180
|
-
async def get_messages(limit: Optional[int] =
|
|
2047
|
+
async def get_messages(limit: Optional[int] = None, offset: int = 0) -> Dict[str, Any]:
|
|
2181
2048
|
"""
|
|
2182
|
-
Get messages for the current user with optional pagination.
|
|
2049
|
+
💬 Get messages for the current user with optional pagination.
|
|
2183
2050
|
|
|
2184
2051
|
Args:
|
|
2185
2052
|
limit: Maximum number of messages to return (e.g., 10 for top 10 messages)
|
|
2186
|
-
Can be None (no limit), an integer, or a string that can be converted to int
|
|
2187
2053
|
offset: Number of messages to skip (for pagination)
|
|
2188
|
-
Can be an integer or a string that can be converted to int
|
|
2189
2054
|
|
|
2190
2055
|
Returns:
|
|
2191
2056
|
Messages for the current user, optionally limited by count
|
|
2192
2057
|
"""
|
|
2193
|
-
# Wrap the entire function in a try-catch to handle any encoding issues
|
|
2194
2058
|
try:
|
|
2195
|
-
|
|
2196
|
-
validated_limit = None
|
|
2197
|
-
validated_offset = 0
|
|
2198
|
-
|
|
2199
|
-
# Validate and convert limit parameter
|
|
2200
|
-
if limit is not None:
|
|
2201
|
-
if isinstance(limit, str):
|
|
2202
|
-
if limit.strip() == "":
|
|
2203
|
-
# Empty string means no limit
|
|
2204
|
-
validated_limit = 0
|
|
2205
|
-
else:
|
|
2206
|
-
try:
|
|
2207
|
-
validated_limit = int(limit)
|
|
2208
|
-
if validated_limit < 0:
|
|
2209
|
-
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
2210
|
-
except ValueError:
|
|
2211
|
-
return {"error": f"Invalid limit value '{limit}'. Must be a number or empty string."}
|
|
2212
|
-
elif isinstance(limit, (int, float)):
|
|
2213
|
-
validated_limit = int(limit)
|
|
2214
|
-
if validated_limit < 0:
|
|
2215
|
-
return {"error": f"Limit must be non-negative, got: {limit}"}
|
|
2216
|
-
else:
|
|
2217
|
-
return {"error": f"Invalid limit type {type(limit).__name__}. Expected int, float, str, or None."}
|
|
2218
|
-
|
|
2219
|
-
# Validate and convert offset parameter
|
|
2220
|
-
if isinstance(offset, str):
|
|
2221
|
-
try:
|
|
2222
|
-
validated_offset = int(offset)
|
|
2223
|
-
except ValueError:
|
|
2224
|
-
return {"error": f"Invalid offset value '{offset}'. Must be a number."}
|
|
2225
|
-
elif isinstance(offset, (int, float)):
|
|
2226
|
-
validated_offset = int(offset)
|
|
2227
|
-
else:
|
|
2228
|
-
return {"error": f"Invalid offset type {type(offset).__name__}. Expected int, float, or str."}
|
|
2229
|
-
|
|
2230
|
-
if validated_offset < 0:
|
|
2231
|
-
return {"error": f"Offset must be non-negative, got: {offset}"}
|
|
2232
|
-
|
|
2233
|
-
# Log the validated parameters for debugging (without emojis to avoid encoding issues)
|
|
2234
|
-
try:
|
|
2235
|
-
print(f"get_messages called with validated parameters: limit={validated_limit}, offset={validated_offset}")
|
|
2236
|
-
except Exception:
|
|
2237
|
-
print(f"get_messages called with parameters: limit={validated_limit}, offset={validated_offset}")
|
|
2238
|
-
|
|
2239
|
-
# Call the brain client with validated parameters
|
|
2240
|
-
result = await brain_client.get_messages(validated_limit, validated_offset)
|
|
2241
|
-
|
|
2242
|
-
# Add validation info to the result
|
|
2243
|
-
if isinstance(result, dict) and "error" not in result:
|
|
2244
|
-
result["_validation"] = {
|
|
2245
|
-
"original_limit": limit,
|
|
2246
|
-
"original_offset": offset,
|
|
2247
|
-
"validated_limit": validated_limit,
|
|
2248
|
-
"validated_offset": validated_offset,
|
|
2249
|
-
"parameter_types": {
|
|
2250
|
-
"limit": str(type(limit)),
|
|
2251
|
-
"offset": str(type(offset))
|
|
2252
|
-
}
|
|
2253
|
-
}
|
|
2254
|
-
|
|
2255
|
-
return result
|
|
2256
|
-
|
|
2257
|
-
except UnicodeEncodeError as ue:
|
|
2258
|
-
# Handle encoding errors specifically
|
|
2259
|
-
error_msg = f"get_messages failed due to encoding error: {str(ue)}"
|
|
2260
|
-
try:
|
|
2261
|
-
print(f"ENCODING ERROR: {error_msg}")
|
|
2262
|
-
except Exception:
|
|
2263
|
-
print(f"get_messages encoding error: {str(ue)}")
|
|
2264
|
-
return {
|
|
2265
|
-
"error": error_msg,
|
|
2266
|
-
"error_type": "UnicodeEncodeError",
|
|
2267
|
-
"original_params": {
|
|
2268
|
-
"limit": limit,
|
|
2269
|
-
"offset": offset,
|
|
2270
|
-
"limit_type": str(type(limit)),
|
|
2271
|
-
"offset_type": str(type(offset))
|
|
2272
|
-
}
|
|
2273
|
-
}
|
|
2059
|
+
return await brain_client.get_messages(limit, offset)
|
|
2274
2060
|
except Exception as e:
|
|
2275
|
-
|
|
2276
|
-
try:
|
|
2277
|
-
print(f"ERROR: {error_msg}")
|
|
2278
|
-
except Exception:
|
|
2279
|
-
print(f"get_messages failed: {str(e)}")
|
|
2280
|
-
return {
|
|
2281
|
-
"error": error_msg,
|
|
2282
|
-
"error_type": type(e).__name__,
|
|
2283
|
-
"original_params": {
|
|
2284
|
-
"limit": limit,
|
|
2285
|
-
"offset": offset,
|
|
2286
|
-
"limit_type": str(type(limit)),
|
|
2287
|
-
"offset_type": str(type(offset))
|
|
2288
|
-
}
|
|
2289
|
-
}
|
|
2061
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2290
2062
|
|
|
2291
2063
|
@mcp.tool()
|
|
2292
|
-
async def get_glossary_terms(email: str = "", password: str = ""
|
|
2064
|
+
async def get_glossary_terms(email: str = "", password: str = "") -> List[Dict[str, str]]:
|
|
2293
2065
|
"""
|
|
2294
2066
|
📚 Get glossary terms from WorldQuant BRAIN forum.
|
|
2295
2067
|
|
|
2296
|
-
Note: This
|
|
2068
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2297
2069
|
|
|
2298
2070
|
Args:
|
|
2299
2071
|
email: Your BRAIN platform email address (optional if in config)
|
|
2300
2072
|
password: Your BRAIN platform password (optional if in config)
|
|
2301
|
-
headless: Run browser in headless mode (default: False)
|
|
2302
2073
|
|
|
2303
2074
|
Returns:
|
|
2304
|
-
|
|
2075
|
+
A list of glossary terms with definitions
|
|
2305
2076
|
"""
|
|
2306
2077
|
try:
|
|
2307
|
-
# Load config to get credentials if not provided
|
|
2308
2078
|
config = load_config()
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
email = config['credentials'].get('email', '')
|
|
2313
|
-
if not password and 'credentials' in config:
|
|
2314
|
-
password = config['credentials'].get('password', '')
|
|
2315
|
-
|
|
2079
|
+
credentials = config.get("credentials", {})
|
|
2080
|
+
email = email or credentials.get("email")
|
|
2081
|
+
password = password or credentials.get("password")
|
|
2316
2082
|
if not email or not password:
|
|
2317
|
-
|
|
2083
|
+
raise ValueError("Authentication credentials not provided or found in config.")
|
|
2318
2084
|
|
|
2319
|
-
return await brain_client.get_glossary_terms(email, password
|
|
2085
|
+
return await brain_client.get_glossary_terms(email, password)
|
|
2320
2086
|
except Exception as e:
|
|
2321
|
-
|
|
2087
|
+
logger.error(f"Error in get_glossary_terms tool: {e}")
|
|
2088
|
+
return [{"error": str(e)}]
|
|
2322
2089
|
|
|
2323
2090
|
@mcp.tool()
|
|
2324
2091
|
async def search_forum_posts(search_query: str, email: str = "", password: str = "",
|
|
2325
|
-
|
|
2092
|
+
max_results: int = 50) -> Dict[str, Any]:
|
|
2326
2093
|
"""
|
|
2327
2094
|
🔍 Search forum posts on WorldQuant BRAIN support site.
|
|
2328
2095
|
|
|
2329
|
-
Note: This
|
|
2096
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2330
2097
|
|
|
2331
2098
|
Args:
|
|
2099
|
+
search_query: Search term or phrase
|
|
2332
2100
|
email: Your BRAIN platform email address (optional if in config)
|
|
2333
2101
|
password: Your BRAIN platform password (optional if in config)
|
|
2334
|
-
search_query: Search term or phrase
|
|
2335
2102
|
max_results: Maximum number of results to return (default: 50)
|
|
2336
|
-
headless: Run browser in headless mode (default: True)
|
|
2337
2103
|
|
|
2338
2104
|
Returns:
|
|
2339
2105
|
Search results with analysis
|
|
2340
2106
|
"""
|
|
2341
2107
|
try:
|
|
2342
|
-
# Load config to get credentials if not provided
|
|
2343
2108
|
config = load_config()
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
|
|
2347
|
-
email = config['credentials'].get('email', '')
|
|
2348
|
-
if not password and 'credentials' in config:
|
|
2349
|
-
password = config['credentials'].get('password', '')
|
|
2350
|
-
|
|
2109
|
+
credentials = config.get("credentials", {})
|
|
2110
|
+
email = email or credentials.get("email")
|
|
2111
|
+
password = password or credentials.get("password")
|
|
2351
2112
|
if not email or not password:
|
|
2352
|
-
return {"error": "
|
|
2353
|
-
|
|
2354
|
-
return await brain_client.search_forum_posts(email, password, search_query, max_results
|
|
2113
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2114
|
+
|
|
2115
|
+
return await brain_client.search_forum_posts(email, password, search_query, max_results)
|
|
2355
2116
|
except Exception as e:
|
|
2356
|
-
return {"error": str(e)}
|
|
2117
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2357
2118
|
|
|
2358
2119
|
@mcp.tool()
|
|
2359
2120
|
async def read_forum_post(article_id: str, email: str = "", password: str = "",
|
|
2360
|
-
|
|
2121
|
+
include_comments: bool = True) -> Dict[str, Any]:
|
|
2361
2122
|
"""
|
|
2362
2123
|
📄 Get a specific forum post by article ID.
|
|
2363
2124
|
|
|
2364
|
-
Note: This
|
|
2125
|
+
Note: This uses Playwright and is implemented in forum_functions.py
|
|
2365
2126
|
|
|
2366
2127
|
Args:
|
|
2367
2128
|
article_id: The article ID to retrieve (e.g., "32984819083415-新人求模板")
|
|
2368
2129
|
email: Your BRAIN platform email address (optional if in config)
|
|
2369
2130
|
password: Your BRAIN platform password (optional if in config)
|
|
2370
|
-
headless: Run browser in headless mode (default: False)
|
|
2371
2131
|
|
|
2372
2132
|
Returns:
|
|
2373
2133
|
Forum post content with comments
|
|
2374
2134
|
"""
|
|
2375
2135
|
try:
|
|
2376
|
-
# Load config to get credentials if not provided
|
|
2377
2136
|
config = load_config()
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
email = config['credentials'].get('email', '')
|
|
2382
|
-
if not password and 'credentials' in config:
|
|
2383
|
-
password = config['credentials'].get('password', '')
|
|
2384
|
-
|
|
2137
|
+
credentials = config.get("credentials", {})
|
|
2138
|
+
email = email or credentials.get("email")
|
|
2139
|
+
password = password or credentials.get("password")
|
|
2385
2140
|
if not email or not password:
|
|
2386
|
-
return {"error": "
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
from forum_functions import forum_client
|
|
2390
|
-
return await forum_client.read_full_forum_post(email, password, article_id, headless, include_comments=True)
|
|
2391
|
-
except ImportError:
|
|
2392
|
-
return {"error": "Forum functions require selenium. Use forum_functions.py directly."}
|
|
2141
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2142
|
+
|
|
2143
|
+
return await brain_client.read_forum_post(email, password, article_id, include_comments)
|
|
2393
2144
|
except Exception as e:
|
|
2394
|
-
return {"error": str(e)}
|
|
2145
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2395
2146
|
|
|
2396
2147
|
@mcp.tool()
|
|
2397
2148
|
async def get_alpha_yearly_stats(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -2399,7 +2150,7 @@ async def get_alpha_yearly_stats(alpha_id: str) -> Dict[str, Any]:
|
|
|
2399
2150
|
try:
|
|
2400
2151
|
return await brain_client.get_alpha_yearly_stats(alpha_id)
|
|
2401
2152
|
except Exception as e:
|
|
2402
|
-
return {"error": str(e)}
|
|
2153
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2403
2154
|
|
|
2404
2155
|
@mcp.tool()
|
|
2405
2156
|
async def check_correlation(alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
|
|
@@ -2407,7 +2158,7 @@ async def check_correlation(alpha_id: str, correlation_type: str = "both", thres
|
|
|
2407
2158
|
try:
|
|
2408
2159
|
return await brain_client.check_correlation(alpha_id, correlation_type, threshold)
|
|
2409
2160
|
except Exception as e:
|
|
2410
|
-
return {"error": str(e)}
|
|
2161
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2411
2162
|
|
|
2412
2163
|
@mcp.tool()
|
|
2413
2164
|
async def get_submission_check(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -2415,17 +2166,17 @@ async def get_submission_check(alpha_id: str) -> Dict[str, Any]:
|
|
|
2415
2166
|
try:
|
|
2416
2167
|
return await brain_client.get_submission_check(alpha_id)
|
|
2417
2168
|
except Exception as e:
|
|
2418
|
-
return {"error": str(e)}
|
|
2169
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2419
2170
|
|
|
2420
2171
|
@mcp.tool()
|
|
2421
2172
|
async def set_alpha_properties(alpha_id: str, name: Optional[str] = None,
|
|
2422
|
-
|
|
2423
|
-
|
|
2173
|
+
color: Optional[str] = None, tags: Optional[List[str]] = None,
|
|
2174
|
+
selection_desc: str = "None", combo_desc: str = "None") -> Dict[str, Any]:
|
|
2424
2175
|
"""Update alpha properties (name, color, tags, descriptions)."""
|
|
2425
2176
|
try:
|
|
2426
2177
|
return await brain_client.set_alpha_properties(alpha_id, name, color, tags, selection_desc, combo_desc)
|
|
2427
2178
|
except Exception as e:
|
|
2428
|
-
return {"error": str(e)}
|
|
2179
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2429
2180
|
|
|
2430
2181
|
@mcp.tool()
|
|
2431
2182
|
async def get_record_sets(alpha_id: str) -> Dict[str, Any]:
|
|
@@ -2433,7 +2184,7 @@ async def get_record_sets(alpha_id: str) -> Dict[str, Any]:
|
|
|
2433
2184
|
try:
|
|
2434
2185
|
return await brain_client.get_record_sets(alpha_id)
|
|
2435
2186
|
except Exception as e:
|
|
2436
|
-
return {"error": str(e)}
|
|
2187
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2437
2188
|
|
|
2438
2189
|
@mcp.tool()
|
|
2439
2190
|
async def get_record_set_data(alpha_id: str, record_set_name: str) -> Dict[str, Any]:
|
|
@@ -2441,7 +2192,7 @@ async def get_record_set_data(alpha_id: str, record_set_name: str) -> Dict[str,
|
|
|
2441
2192
|
try:
|
|
2442
2193
|
return await brain_client.get_record_set_data(alpha_id, record_set_name)
|
|
2443
2194
|
except Exception as e:
|
|
2444
|
-
return {"error": str(e)}
|
|
2195
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2445
2196
|
|
|
2446
2197
|
@mcp.tool()
|
|
2447
2198
|
async def get_user_activities(user_id: str, grouping: Optional[str] = None) -> Dict[str, Any]:
|
|
@@ -2449,7 +2200,7 @@ async def get_user_activities(user_id: str, grouping: Optional[str] = None) -> D
|
|
|
2449
2200
|
try:
|
|
2450
2201
|
return await brain_client.get_user_activities(user_id, grouping)
|
|
2451
2202
|
except Exception as e:
|
|
2452
|
-
return {"error": str(e)}
|
|
2203
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2453
2204
|
|
|
2454
2205
|
@mcp.tool()
|
|
2455
2206
|
async def get_pyramid_multipliers() -> Dict[str, Any]:
|
|
@@ -2457,24 +2208,24 @@ async def get_pyramid_multipliers() -> Dict[str, Any]:
|
|
|
2457
2208
|
try:
|
|
2458
2209
|
return await brain_client.get_pyramid_multipliers()
|
|
2459
2210
|
except Exception as e:
|
|
2460
|
-
return {"error": str(e)}
|
|
2211
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2461
2212
|
|
|
2462
2213
|
@mcp.tool()
|
|
2463
2214
|
async def get_pyramid_alphas(start_date: Optional[str] = None,
|
|
2464
|
-
|
|
2215
|
+
end_date: Optional[str] = None) -> Dict[str, Any]:
|
|
2465
2216
|
"""Get user's current alpha distribution across pyramid categories."""
|
|
2466
2217
|
try:
|
|
2467
2218
|
return await brain_client.get_pyramid_alphas(start_date, end_date)
|
|
2468
2219
|
except Exception as e:
|
|
2469
|
-
return {"error": str(e)}
|
|
2470
|
-
|
|
2220
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2221
|
+
|
|
2471
2222
|
@mcp.tool()
|
|
2472
2223
|
async def get_user_competitions(user_id: Optional[str] = None) -> Dict[str, Any]:
|
|
2473
2224
|
"""Get list of competitions that the user is participating in."""
|
|
2474
2225
|
try:
|
|
2475
2226
|
return await brain_client.get_user_competitions(user_id)
|
|
2476
2227
|
except Exception as e:
|
|
2477
|
-
return {"error": str(e)}
|
|
2228
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2478
2229
|
|
|
2479
2230
|
@mcp.tool()
|
|
2480
2231
|
async def get_competition_details(competition_id: str) -> Dict[str, Any]:
|
|
@@ -2482,7 +2233,7 @@ async def get_competition_details(competition_id: str) -> Dict[str, Any]:
|
|
|
2482
2233
|
try:
|
|
2483
2234
|
return await brain_client.get_competition_details(competition_id)
|
|
2484
2235
|
except Exception as e:
|
|
2485
|
-
return {"error": str(e)}
|
|
2236
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2486
2237
|
|
|
2487
2238
|
@mcp.tool()
|
|
2488
2239
|
async def get_competition_agreement(competition_id: str) -> Dict[str, Any]:
|
|
@@ -2490,7 +2241,7 @@ async def get_competition_agreement(competition_id: str) -> Dict[str, Any]:
|
|
|
2490
2241
|
try:
|
|
2491
2242
|
return await brain_client.get_competition_agreement(competition_id)
|
|
2492
2243
|
except Exception as e:
|
|
2493
|
-
return {"error": str(e)}
|
|
2244
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2494
2245
|
|
|
2495
2246
|
@mcp.tool()
|
|
2496
2247
|
async def get_platform_setting_options() -> Dict[str, Any]:
|
|
@@ -2506,18 +2257,18 @@ async def get_platform_setting_options() -> Dict[str, Any]:
|
|
|
2506
2257
|
try:
|
|
2507
2258
|
return await brain_client.get_platform_setting_options()
|
|
2508
2259
|
except Exception as e:
|
|
2509
|
-
return {"error": str(e)}
|
|
2260
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2510
2261
|
|
|
2511
2262
|
@mcp.tool()
|
|
2512
2263
|
async def performance_comparison(alpha_id: str, team_id: Optional[str] = None,
|
|
2513
|
-
|
|
2264
|
+
competition: Optional[str] = None) -> Dict[str, Any]:
|
|
2514
2265
|
"""Get performance comparison data for an alpha."""
|
|
2515
2266
|
try:
|
|
2516
2267
|
return await brain_client.performance_comparison(alpha_id, team_id, competition)
|
|
2517
2268
|
except Exception as e:
|
|
2518
|
-
return {"error": str(e)}
|
|
2519
|
-
|
|
2520
|
-
#
|
|
2269
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2270
|
+
|
|
2271
|
+
# --- Dataframe Tool ---
|
|
2521
2272
|
|
|
2522
2273
|
@mcp.tool()
|
|
2523
2274
|
async def expand_nested_data(data: List[Dict[str, Any]], preserve_original: bool = True) -> List[Dict[str, Any]]:
|
|
@@ -2525,22 +2276,22 @@ async def expand_nested_data(data: List[Dict[str, Any]], preserve_original: bool
|
|
|
2525
2276
|
try:
|
|
2526
2277
|
return await brain_client.expand_nested_data(data, preserve_original)
|
|
2527
2278
|
except Exception as e:
|
|
2528
|
-
return {"error": str(e)}
|
|
2529
|
-
|
|
2530
|
-
#
|
|
2279
|
+
return [{"error": f"An unexpected error occurred: {str(e)}"}]
|
|
2280
|
+
|
|
2281
|
+
# --- Documentation Tool ---
|
|
2531
2282
|
|
|
2532
2283
|
@mcp.tool()
|
|
2533
|
-
async def
|
|
2284
|
+
async def get_documentation_page(page_id: str) -> Dict[str, Any]:
|
|
2534
2285
|
"""Retrieve detailed content of a specific documentation page/article."""
|
|
2535
2286
|
try:
|
|
2536
|
-
return await brain_client.
|
|
2287
|
+
return await brain_client.get_documentation_page(page_id)
|
|
2537
2288
|
except Exception as e:
|
|
2538
|
-
return {"error": str(e)}
|
|
2289
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2539
2290
|
|
|
2540
|
-
#
|
|
2291
|
+
# --- Advanced Simulation Tools ---
|
|
2541
2292
|
|
|
2542
2293
|
@mcp.tool()
|
|
2543
|
-
async def
|
|
2294
|
+
async def create_multi_simulation(
|
|
2544
2295
|
alpha_expressions: List[str],
|
|
2545
2296
|
instrument_type: str = "EQUITY",
|
|
2546
2297
|
region: str = "USA",
|
|
@@ -2622,7 +2373,7 @@ async def create_multiSim(
|
|
|
2622
2373
|
response = brain_client.session.post(f"{brain_client.base_url}/simulations", json=multisimulation_data)
|
|
2623
2374
|
|
|
2624
2375
|
if response.status_code != 201:
|
|
2625
|
-
return {"error": f"Failed to create multisimulation. Status: {response.status_code}
|
|
2376
|
+
return {"error": f"Failed to create multisimulation. Status: {response.status_code}"}
|
|
2626
2377
|
|
|
2627
2378
|
# Get multisimulation location
|
|
2628
2379
|
location = response.headers.get('Location', '')
|
|
@@ -2633,7 +2384,7 @@ async def create_multiSim(
|
|
|
2633
2384
|
return await _wait_for_multisimulation_completion(location, len(alpha_expressions))
|
|
2634
2385
|
|
|
2635
2386
|
except Exception as e:
|
|
2636
|
-
return {"error": f"Error creating multisimulation: {str(e)}
|
|
2387
|
+
return {"error": f"Error creating multisimulation: {str(e)}"}
|
|
2637
2388
|
|
|
2638
2389
|
async def _wait_for_multisimulation_completion(location: str, expected_children: int) -> Dict[str, Any]:
|
|
2639
2390
|
"""Wait for multisimulation to complete and return results"""
|
|
@@ -2748,12 +2499,12 @@ async def _wait_for_multisimulation_completion(location: str, expected_children:
|
|
|
2748
2499
|
'total_created': len(alpha_results),
|
|
2749
2500
|
'multisimulation_id': location.split('/')[-1],
|
|
2750
2501
|
'multisimulation_location': location,
|
|
2751
|
-
'alpha_results': alpha_results
|
|
2752
|
-
'note': "if you got a negative alpha sharpe, you can just add a minus sign in front of the last line of the Alpha to flip then think the next step."
|
|
2502
|
+
'alpha_results': alpha_results
|
|
2753
2503
|
}
|
|
2754
2504
|
|
|
2755
2505
|
except Exception as e:
|
|
2756
|
-
return {"error": f"Error waiting for multisimulation completion: {str(e)}
|
|
2506
|
+
return {"error": f"Error waiting for multisimulation completion: {str(e)}"}
|
|
2507
|
+
# --- Payment and Financial Tools ---
|
|
2757
2508
|
|
|
2758
2509
|
@mcp.tool()
|
|
2759
2510
|
async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -> Dict[str, Any]:
|
|
@@ -2771,74 +2522,41 @@ async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -
|
|
|
2771
2522
|
Dictionary containing base payment and other payment data with summaries and detailed records
|
|
2772
2523
|
"""
|
|
2773
2524
|
try:
|
|
2774
|
-
|
|
2775
|
-
|
|
2776
|
-
|
|
2777
|
-
|
|
2778
|
-
|
|
2779
|
-
|
|
2780
|
-
|
|
2781
|
-
|
|
2782
|
-
if not config.get('is_authenticated'):
|
|
2783
|
-
return {"error": "Not authenticated. Please provide email and password or authenticate first."}
|
|
2784
|
-
|
|
2785
|
-
|
|
2786
|
-
# Set required Accept header for API v3.0
|
|
2787
|
-
header = {"Accept": "application/json;version=3.0"}
|
|
2788
|
-
|
|
2789
|
-
# Get base payment data
|
|
2790
|
-
base_payment_response = brain_client.session.get(
|
|
2791
|
-
'https://api.worldquantbrain.com/users/self/activities/base-payment', headers=header
|
|
2792
|
-
)
|
|
2793
|
-
|
|
2794
|
-
if base_payment_response.status_code != 200:
|
|
2795
|
-
return {"error": f"Failed to get base payment data: {base_payment_response.status_code}"}
|
|
2796
|
-
|
|
2797
|
-
base_payment_data = base_payment_response.json()
|
|
2798
|
-
|
|
2799
|
-
# Get other payment data
|
|
2800
|
-
other_payment_response = brain_client.session.get(
|
|
2801
|
-
'https://api.worldquantbrain.com/users/self/activities/other-payment', headers=header
|
|
2802
|
-
)
|
|
2803
|
-
|
|
2804
|
-
if other_payment_response.status_code != 200:
|
|
2805
|
-
return {"error": f"Failed to get other payment data: {other_payment_response.status_code}"}
|
|
2806
|
-
|
|
2807
|
-
other_payment_data = other_payment_response.json()
|
|
2525
|
+
config = load_config()
|
|
2526
|
+
credentials = config.get("credentials", {})
|
|
2527
|
+
email = email or credentials.get("email")
|
|
2528
|
+
password = password or credentials.get("password")
|
|
2529
|
+
if not email or not password:
|
|
2530
|
+
return {"error": "Authentication credentials not provided or found in config."}
|
|
2531
|
+
|
|
2532
|
+
await brain_client.authenticate(email, password)
|
|
2808
2533
|
|
|
2809
|
-
#
|
|
2534
|
+
# Get base payments
|
|
2535
|
+
try:
|
|
2536
|
+
base_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/base-payment")
|
|
2537
|
+
base_response.raise_for_status()
|
|
2538
|
+
base_payments = base_response.json()
|
|
2539
|
+
except:
|
|
2540
|
+
base_payments = "no data"
|
|
2541
|
+
|
|
2542
|
+
try:
|
|
2543
|
+
# Get other payments
|
|
2544
|
+
other_response = brain_client.session.get(f"{brain_client.base_url}/users/self/activities/other-payment")
|
|
2545
|
+
other_response.raise_for_status()
|
|
2546
|
+
other_payments = other_response.json()
|
|
2547
|
+
except:
|
|
2548
|
+
other_payments = "no data"
|
|
2810
2549
|
return {
|
|
2811
|
-
"
|
|
2812
|
-
"
|
|
2813
|
-
"summary": {
|
|
2814
|
-
"yesterday": base_payment_data.get("yesterday"),
|
|
2815
|
-
"current_quarter": base_payment_data.get("current"),
|
|
2816
|
-
"previous_quarter": base_payment_data.get("previous"),
|
|
2817
|
-
"year_to_date": base_payment_data.get("ytd"),
|
|
2818
|
-
"total_all_time": base_payment_data.get("total"),
|
|
2819
|
-
"currency": base_payment_data.get("currency")
|
|
2820
|
-
},
|
|
2821
|
-
"daily_records": base_payment_data.get("records", {}).get("records", []),
|
|
2822
|
-
"schema": base_payment_data.get("records", {}).get("schema")
|
|
2823
|
-
},
|
|
2824
|
-
"other_payment": {
|
|
2825
|
-
"total_all_time": other_payment_data.get("total"),
|
|
2826
|
-
"special_payments": other_payment_data.get("records", {}).get("records", []),
|
|
2827
|
-
"schema": other_payment_data.get("records", {}).get("schema"),
|
|
2828
|
-
"currency": other_payment_data.get("currency")
|
|
2829
|
-
},
|
|
2830
|
-
"timestamp": datetime.now().isoformat()
|
|
2550
|
+
"base_payments": base_payments,
|
|
2551
|
+
"other_payments": other_payments
|
|
2831
2552
|
}
|
|
2832
2553
|
|
|
2833
2554
|
except Exception as e:
|
|
2834
|
-
return {"error": f"
|
|
2835
|
-
|
|
2555
|
+
return {"error": f"An unexpected error occurred: {str(e)}"}
|
|
2836
2556
|
|
|
2837
|
-
|
|
2838
|
-
# New MCP tool: get_SimError_detail
|
|
2839
2557
|
from typing import Sequence
|
|
2840
2558
|
@mcp.tool()
|
|
2841
|
-
async def
|
|
2559
|
+
async def lookINTO_SimError_message(locations: Sequence[str]) -> dict:
|
|
2842
2560
|
"""
|
|
2843
2561
|
Fetch and parse error/status from multiple simulation locations (URLs).
|
|
2844
2562
|
Args:
|
|
@@ -2876,11 +2594,8 @@ async def get_SimError_detail(locations: Sequence[str]) -> dict:
|
|
|
2876
2594
|
})
|
|
2877
2595
|
return {"results": results}
|
|
2878
2596
|
|
|
2597
|
+
|
|
2598
|
+
# --- Main entry point ---
|
|
2879
2599
|
if __name__ == "__main__":
|
|
2880
|
-
|
|
2881
|
-
|
|
2882
|
-
mcp.run()
|
|
2883
|
-
except Exception as e:
|
|
2884
|
-
print(f"Failed to start MCP server: {e}", file=sys.stderr)
|
|
2885
|
-
sys.exit(1)
|
|
2886
|
-
|
|
2600
|
+
print("running the server")
|
|
2601
|
+
mcp.run()
|