cnhkmcp 1.2.4__py3-none-any.whl → 1.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cnhkmcp/__init__.py CHANGED
@@ -50,7 +50,7 @@ from .untracked.forum_functions import (
50
50
  read_full_forum_post
51
51
  )
52
52
 
53
- __version__ = "1.2.4"
53
+ __version__ = "1.2.6"
54
54
  __author__ = "CNHK"
55
55
  __email__ = "cnhk@example.com"
56
56
 
@@ -989,83 +989,10 @@ Edge WebDriver not found. Please install Edge WebDriver:
989
989
  # Initialize forum client
990
990
  forum_client = ForumClient()
991
991
 
992
- # MCP Tools for Forum Functions
993
- if forum_mcp:
994
- @forum_mcp.tool()
995
- async def get_glossary_terms(email: str, password: str, headless: bool = False) -> Dict[str, Any]:
996
- """
997
- 📚 Extract glossary terms from WorldQuant BRAIN forum.
998
-
999
- Args:
1000
- email: Your BRAIN platform email address
1001
- password: Your BRAIN platform password
1002
- headless: Run browser in headless mode (default: False)
1003
-
1004
- Returns:
1005
- Glossary terms with definitions
1006
- """
1007
- try:
1008
- return await forum_client.get_glossary_terms(email, password, headless)
1009
- except Exception as e:
1010
- return {"error": str(e)}
1011
-
1012
- @forum_mcp.tool()
1013
- async def search_forum_posts(email: str, password: str, search_query: str,
1014
- max_results: int = 50, headless: bool = True) -> Dict[str, Any]:
1015
- """
1016
- 🔍 Search forum posts on WorldQuant BRAIN support site.
1017
-
1018
- Args:
1019
- email: Your BRAIN platform email address
1020
- password: Your BRAIN platform password
1021
- search_query: Search term or phrase
1022
- max_results: Maximum number of results to return (default: 50)
1023
- headless: Run browser in headless mode (default: True)
1024
-
1025
- Returns:
1026
- Search results with analysis. Each result item now includes:
1027
- - title: Post title
1028
- - link: Absolute URL to the post (may be a tracking redirect)
1029
- - description_html: Raw HTML snippet/preview
1030
- - description_text: Text version of snippet
1031
- - votes: Vote count (raw text, may include locale suffix)
1032
- - comments: Comment count (raw text)
1033
- - author: Parsed author name (or 'Unknown')
1034
- - date: Parsed date/datetime string when available
1035
- - snippet: Alias of description_text (backward compatibility)
1036
- - metadata: Compact string summary (author/date/votes/comments)
1037
- - page: Page number where the result appeared
1038
- - index: Index within that page (0-based)
1039
- """
1040
- try:
1041
- return await forum_client.search_forum_posts(email, password, search_query, max_results, headless)
1042
- except Exception as e:
1043
- return {"error": str(e)}
1044
-
1045
- @forum_mcp.tool()
1046
- async def read_full_forum_post(email: str, password: str, post_url_or_id: str,
1047
- headless: bool = False, include_comments: bool = True) -> Dict[str, Any]:
1048
- """
1049
- 📖 Read a full forum post with optional comments.
1050
-
1051
- Args:
1052
- email: Your BRAIN platform email address
1053
- password: Your BRAIN platform password
1054
- post_url_or_id: URL or ID of the post to read
1055
- headless: Run browser in headless mode (default: False)
1056
- include_comments: Include comments in the result (default: True)
1057
-
1058
- Returns:
1059
- Complete forum post with all content
1060
- """
1061
- try:
1062
- return await forum_client.read_full_forum_post(email, password, post_url_or_id, headless, include_comments)
1063
- except Exception as e:
1064
- return {"error": str(e)}
992
+ # MCP Tools for Forum Functions - REMOVED (duplicate with platform_functions.py)
993
+ # These tools are already properly integrated in the main platform_functions.py
1065
994
 
1066
995
  if __name__ == "__main__":
1067
996
  print("📚 WorldQuant BRAIN Forum Functions Server Starting...", file=sys.stderr)
1068
- if forum_mcp:
1069
- forum_mcp.run()
1070
- else:
1071
- print("FastMCP is not available. Please install it to run the server.", file=sys.stderr)
997
+ print("Note: Forum tools are now integrated in the main platform_functions.py", file=sys.stderr)
998
+ print("This file provides the ForumClient class for internal use.", file=sys.stderr)
@@ -109,12 +109,12 @@ class BrainApiClient:
109
109
 
110
110
  # Check for successful authentication (status code 201)
111
111
  if response.status_code == 201:
112
- self.log("Authentication successful", "SUCCESS")
112
+ self.log("Authentication successful", "SUCCESS")
113
113
 
114
114
  # Check if JWT token was automatically stored by session
115
115
  jwt_token = self.session.cookies.get('t')
116
116
  if jwt_token:
117
- self.log("JWT token automatically stored by session", "SUCCESS")
117
+ self.log("JWT token automatically stored by session", "SUCCESS")
118
118
  else:
119
119
  self.log("⚠️ No JWT token found in session", "WARNING")
120
120
 
@@ -180,18 +180,18 @@ class BrainApiClient:
180
180
  # Try to open the URL but handle timeout
181
181
  try:
182
182
  driver.get(biometric_url)
183
- self.log("Browser page loaded successfully", "SUCCESS")
183
+ self.log("Browser page loaded successfully", "SUCCESS")
184
184
  except Exception as timeout_error:
185
185
  self.log(f"⚠️ Page load timeout (expected): {str(timeout_error)[:50]}...", "WARNING")
186
- self.log("Browser window is open for biometric authentication", "INFO")
186
+ self.log("Browser window is open for biometric authentication", "INFO")
187
187
 
188
188
  # Print instructions
189
189
  print("\n" + "="*60, file=sys.stderr)
190
- print("🔒 BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
190
+ print("BIOMETRIC AUTHENTICATION REQUIRED", file=sys.stderr)
191
191
  print("="*60, file=sys.stderr)
192
- print("🌐 Browser window is open with biometric authentication page", file=sys.stderr)
193
- print("🔧 Complete the biometric authentication in the browser", file=sys.stderr)
194
- print("The system will automatically check when you're done...", file=sys.stderr)
192
+ print("Browser window is open with biometric authentication page", file=sys.stderr)
193
+ print("Complete the biometric authentication in the browser", file=sys.stderr)
194
+ print("The system will automatically check when you're done...", file=sys.stderr)
195
195
  print("="*60, file=sys.stderr)
196
196
 
197
197
  # Keep checking until authentication is complete
@@ -207,7 +207,7 @@ class BrainApiClient:
207
207
  self.log(f"🔄 Checking authentication status (attempt {attempt}/{max_attempts}): {check_response.status_code}", "INFO")
208
208
 
209
209
  if check_response.status_code == 201:
210
- self.log("Biometric authentication successful!", "SUCCESS")
210
+ self.log("Biometric authentication successful!", "SUCCESS")
211
211
 
212
212
  # Close browser
213
213
  driver.quit()
@@ -215,7 +215,7 @@ class BrainApiClient:
215
215
  # Check JWT token
216
216
  jwt_token = self.session.cookies.get('t')
217
217
  if jwt_token:
218
- self.log("JWT token received", "SUCCESS")
218
+ self.log("JWT token received", "SUCCESS")
219
219
 
220
220
  # Return success response
221
221
  return {
@@ -338,7 +338,7 @@ class BrainApiClient:
338
338
  location = response.headers.get('Location', '')
339
339
  simulation_id = location.split('/')[-1] if location else None
340
340
 
341
- self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
341
+ self.log(f"Simulation created with ID: {simulation_id}", "SUCCESS")
342
342
 
343
343
 
344
344
  finished = False
@@ -358,7 +358,7 @@ class BrainApiClient:
358
358
  raise
359
359
 
360
360
  # get_simulation_status function removed as requested
361
- # wait_for_simulation_completion function removed as requested
361
+ # wait_for_simulation function removed as requested
362
362
 
363
363
  async def get_alpha_details(self, alpha_id: str) -> Dict[str, Any]:
364
364
  """Get detailed information about an alpha."""
@@ -433,24 +433,68 @@ class BrainApiClient:
433
433
  raise
434
434
 
435
435
  async def get_alpha_pnl(self, alpha_id: str) -> Dict[str, Any]:
436
- """Get PnL data for an alpha."""
436
+ """Get PnL data for an alpha with retry logic."""
437
437
  await self.ensure_authenticated()
438
438
 
439
- try:
440
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
441
- response.raise_for_status()
442
- # Some alphas may return 204 No Content or an empty body
443
- text = (response.text or "").strip()
444
- if not text:
445
- return {}
439
+ max_retries = 5
440
+ retry_delay = 2 # seconds
441
+
442
+ for attempt in range(max_retries):
446
443
  try:
447
- return response.json()
448
- except Exception as parse_err:
449
- self.log(f"PnL JSON parse failed for {alpha_id}: {parse_err}", "WARNING")
450
- return {}
451
- except Exception as e:
452
- self.log(f"Failed to get alpha PnL: {str(e)}", "ERROR")
453
- raise
444
+ self.log(f"Attempting to get PnL for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
445
+
446
+ response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/pnl")
447
+ response.raise_for_status()
448
+
449
+ # Some alphas may return 204 No Content or an empty body
450
+ text = (response.text or "").strip()
451
+ if not text:
452
+ if attempt < max_retries - 1:
453
+ self.log(f"Empty PnL response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
454
+ await asyncio.sleep(retry_delay)
455
+ retry_delay *= 1.5 # Exponential backoff
456
+ continue
457
+ else:
458
+ self.log(f"Empty PnL response after {max_retries} attempts for {alpha_id}", "WARNING")
459
+ return {}
460
+
461
+ try:
462
+ pnl_data = response.json()
463
+ if pnl_data:
464
+ self.log(f"Successfully retrieved PnL data for alpha {alpha_id}", "SUCCESS")
465
+ return pnl_data
466
+ else:
467
+ if attempt < max_retries - 1:
468
+ self.log(f"Empty PnL JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
469
+ await asyncio.sleep(retry_delay)
470
+ retry_delay *= 1.5
471
+ continue
472
+ else:
473
+ self.log(f"Empty PnL JSON after {max_retries} attempts for {alpha_id}", "WARNING")
474
+ return {}
475
+
476
+ except Exception as parse_err:
477
+ if attempt < max_retries - 1:
478
+ self.log(f"PnL JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
479
+ await asyncio.sleep(retry_delay)
480
+ retry_delay *= 1.5
481
+ continue
482
+ else:
483
+ self.log(f"PnL JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
484
+ return {}
485
+
486
+ except Exception as e:
487
+ if attempt < max_retries - 1:
488
+ self.log(f"Failed to get alpha PnL for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
489
+ await asyncio.sleep(retry_delay)
490
+ retry_delay *= 1.5
491
+ continue
492
+ else:
493
+ self.log(f"Failed to get alpha PnL for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
494
+ raise
495
+
496
+ # This should never be reached, but just in case
497
+ return {}
454
498
 
455
499
  async def get_user_alphas(
456
500
  self,
@@ -503,7 +547,7 @@ class BrainApiClient:
503
547
  response = self.session.post(f"{self.base_url}/alphas/{alpha_id}/submit")
504
548
  response.raise_for_status()
505
549
 
506
- self.log(f"Alpha {alpha_id} submitted successfully", "SUCCESS")
550
+ self.log(f"Alpha {alpha_id} submitted successfully", "SUCCESS")
507
551
  return True
508
552
 
509
553
  except Exception as e:
@@ -781,44 +825,188 @@ class BrainApiClient:
781
825
  return {"error": str(e)}
782
826
 
783
827
  async def get_alpha_yearly_stats(self, alpha_id: str) -> Dict[str, Any]:
784
- """Get yearly statistics for an alpha."""
828
+ """Get yearly statistics for an alpha with retry logic."""
785
829
  await self.ensure_authenticated()
786
830
 
787
- try:
788
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
789
- response.raise_for_status()
790
- return response.json()
791
- except Exception as e:
792
- self.log(f"Failed to get alpha yearly stats: {str(e)}", "ERROR")
793
- raise
831
+ max_retries = 5
832
+ retry_delay = 2 # seconds
833
+
834
+ for attempt in range(max_retries):
835
+ try:
836
+ self.log(f"Attempting to get yearly stats for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
837
+
838
+ response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/recordsets/yearly-stats")
839
+ response.raise_for_status()
840
+
841
+ # Check if response has content
842
+ text = (response.text or "").strip()
843
+ if not text:
844
+ if attempt < max_retries - 1:
845
+ self.log(f"Empty yearly stats response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
846
+ await asyncio.sleep(retry_delay)
847
+ retry_delay *= 1.5 # Exponential backoff
848
+ continue
849
+ else:
850
+ self.log(f"Empty yearly stats response after {max_retries} attempts for {alpha_id}", "WARNING")
851
+ return {}
852
+
853
+ try:
854
+ yearly_stats = response.json()
855
+ if yearly_stats:
856
+ self.log(f"Successfully retrieved yearly stats for alpha {alpha_id}", "SUCCESS")
857
+ return yearly_stats
858
+ else:
859
+ if attempt < max_retries - 1:
860
+ self.log(f"Empty yearly stats JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
861
+ await asyncio.sleep(retry_delay)
862
+ retry_delay *= 1.5
863
+ continue
864
+ else:
865
+ self.log(f"Empty yearly stats JSON after {max_retries} attempts for {alpha_id}", "WARNING")
866
+ return {}
867
+
868
+ except Exception as parse_err:
869
+ if attempt < max_retries - 1:
870
+ self.log(f"Yearly stats JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
871
+ await asyncio.sleep(retry_delay)
872
+ retry_delay *= 1.5
873
+ continue
874
+ else:
875
+ self.log(f"Yearly stats JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
876
+ return {}
877
+
878
+ except Exception as e:
879
+ if attempt < max_retries - 1:
880
+ self.log(f"Failed to get alpha yearly stats for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
881
+ await asyncio.sleep(retry_delay)
882
+ retry_delay *= 1.5
883
+ continue
884
+ else:
885
+ self.log(f"Failed to get alpha yearly stats for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
886
+ raise
887
+
888
+ # This should never be reached, but just in case
889
+ return {}
794
890
 
795
891
  async def get_production_correlation(self, alpha_id: str) -> Dict[str, Any]:
796
- """Get production correlation data for an alpha."""
892
+ """Get production correlation data for an alpha with retry logic."""
797
893
  await self.ensure_authenticated()
798
894
 
799
- try:
800
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/prod")
801
- response.raise_for_status()
802
- if response.text:
803
- return response.json()
804
- return {} # Return empty dict for empty response
805
- except Exception as e:
806
- self.log(f"Failed to get production correlation: {str(e)}", "ERROR")
807
- raise
895
+ max_retries = 5
896
+ retry_delay = 20 # seconds
897
+
898
+ for attempt in range(max_retries):
899
+ try:
900
+ self.log(f"Attempting to get production correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
901
+
902
+ response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/prod")
903
+ response.raise_for_status()
904
+
905
+ # Check if response has content
906
+ text = (response.text or "").strip()
907
+ if not text:
908
+ if attempt < max_retries - 1:
909
+ self.log(f"Empty production correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
910
+ await asyncio.sleep(retry_delay)
911
+ continue
912
+ else:
913
+ self.log(f"Empty production correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
914
+ return {}
915
+
916
+ try:
917
+ correlation_data = response.json()
918
+ if correlation_data:
919
+ self.log(f"Successfully retrieved production correlation for alpha {alpha_id}", "SUCCESS")
920
+ return correlation_data
921
+ else:
922
+ if attempt < max_retries - 1:
923
+ self.log(f"Empty production correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
924
+ await asyncio.sleep(retry_delay)
925
+ continue
926
+ else:
927
+ self.log(f"Empty production correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
928
+ return {}
929
+
930
+ except Exception as parse_err:
931
+ if attempt < max_retries - 1:
932
+ self.log(f"Production correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
933
+ await asyncio.sleep(retry_delay)
934
+ continue
935
+ else:
936
+ self.log(f"Production correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
937
+ return {}
938
+
939
+ except Exception as e:
940
+ if attempt < max_retries - 1:
941
+ self.log(f"Failed to get production correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
942
+ await asyncio.sleep(retry_delay)
943
+ continue
944
+ else:
945
+ self.log(f"Failed to get production correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
946
+ raise
947
+
948
+ # This should never be reached, but just in case
949
+ return {}
808
950
 
809
951
  async def get_self_correlation(self, alpha_id: str) -> Dict[str, Any]:
810
- """Get self-correlation data for an alpha."""
952
+ """Get self-correlation data for an alpha with retry logic."""
811
953
  await self.ensure_authenticated()
812
954
 
813
- try:
814
- response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/self")
815
- response.raise_for_status()
816
- if response.text:
817
- return response.json()
818
- return {} # Return empty dict for empty response
819
- except Exception as e:
820
- self.log(f"Failed to get self correlation: {str(e)}", "ERROR")
821
- raise
955
+ max_retries = 5
956
+ retry_delay = 20 # seconds
957
+
958
+ for attempt in range(max_retries):
959
+ try:
960
+ self.log(f"Attempting to get self correlation for alpha {alpha_id} (attempt {attempt + 1}/{max_retries})", "INFO")
961
+
962
+ response = self.session.get(f"{self.base_url}/alphas/{alpha_id}/correlations/self")
963
+ response.raise_for_status()
964
+
965
+ # Check if response has content
966
+ text = (response.text or "").strip()
967
+ if not text:
968
+ if attempt < max_retries - 1:
969
+ self.log(f"Empty self correlation response for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
970
+ await asyncio.sleep(retry_delay)
971
+ continue
972
+ else:
973
+ self.log(f"Empty self correlation response after {max_retries} attempts for {alpha_id}", "WARNING")
974
+ return {}
975
+
976
+ try:
977
+ correlation_data = response.json()
978
+ if correlation_data:
979
+ self.log(f"Successfully retrieved self correlation for alpha {alpha_id}", "SUCCESS")
980
+ return correlation_data
981
+ else:
982
+ if attempt < max_retries - 1:
983
+ self.log(f"Empty self correlation JSON for {alpha_id}, retrying in {retry_delay} seconds...", "WARNING")
984
+ await asyncio.sleep(retry_delay)
985
+ continue
986
+ else:
987
+ self.log(f"Empty self correlation JSON after {max_retries} attempts for {alpha_id}", "WARNING")
988
+ return {}
989
+
990
+ except Exception as parse_err:
991
+ if attempt < max_retries - 1:
992
+ self.log(f"Self correlation JSON parse failed for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds...", "WARNING")
993
+ await asyncio.sleep(retry_delay)
994
+ continue
995
+ else:
996
+ self.log(f"Self correlation JSON parse failed for {alpha_id} after {max_retries} attempts: {parse_err}", "WARNING")
997
+ return {}
998
+
999
+ except Exception as e:
1000
+ if attempt < max_retries - 1:
1001
+ self.log(f"Failed to get self correlation for {alpha_id} (attempt {attempt + 1}), retrying in {retry_delay} seconds: {str(e)}", "WARNING")
1002
+ await asyncio.sleep(retry_delay)
1003
+ continue
1004
+ else:
1005
+ self.log(f"Failed to get self correlation for {alpha_id} after {max_retries} attempts: {str(e)}", "ERROR")
1006
+ raise
1007
+
1008
+ # This should never be reached, but just in case
1009
+ return {}
822
1010
 
823
1011
  async def check_correlation(self, alpha_id: str, correlation_type: str = "both", threshold: float = 0.7) -> Dict[str, Any]:
824
1012
  """Check alpha correlation against production alphas, self alphas, or both."""
@@ -1370,7 +1558,7 @@ async def create_simulation(
1370
1558
  decay: float = 0.0,
1371
1559
  neutralization: str = "NONE",
1372
1560
  truncation: float = 0.0,
1373
- test_period: str = "P1Y6M",
1561
+ test_period: str = "P0Y0M",
1374
1562
  unit_handling: str = "VERIFY",
1375
1563
  nan_handling: str = "OFF",
1376
1564
  language: str = "FASTEXPR",
@@ -1398,7 +1586,7 @@ async def create_simulation(
1398
1586
  decay: Decay value for the simulation
1399
1587
  neutralization: Neutralization method
1400
1588
  truncation: Truncation value
1401
- test_period: Test period (e.g., "P1Y6M" for 1 year 6 months)
1589
+ test_period: Test period (e.g., "P0Y0M" for 1 year 6 months)
1402
1590
  unit_handling: Unit handling method
1403
1591
  nan_handling: NaN handling method
1404
1592
  language: Expression language (e.g., "FASTEXPR")
@@ -2025,6 +2213,298 @@ async def get_documentation_page(page_id: str) -> Dict[str, Any]:
2025
2213
 
2026
2214
  # Badge status MCP tool removed as requested
2027
2215
 
2216
+ @mcp.tool()
2217
+ async def create_multi_regularAlpha_simulation(
2218
+ alpha_expressions: List[str],
2219
+ instrument_type: str = "EQUITY",
2220
+ region: str = "USA",
2221
+ universe: str = "TOP3000",
2222
+ delay: int = 1,
2223
+ decay: float = 0.0,
2224
+ neutralization: str = "NONE",
2225
+ truncation: float = 0.0,
2226
+ test_period: str = "P0Y0M",
2227
+ unit_handling: str = "VERIFY",
2228
+ nan_handling: str = "OFF",
2229
+ language: str = "FASTEXPR",
2230
+ visualization: bool = True,
2231
+ pasteurization: str = "ON",
2232
+ max_trade: str = "OFF"
2233
+ ) -> Dict[str, Any]:
2234
+ """
2235
+ 🚀 Create multiple regular alpha simulations on BRAIN platform in a single request.
2236
+
2237
+ This tool creates a multisimulation with multiple regular alpha expressions,
2238
+ waits for all simulations to complete, and returns detailed results for each alpha.
2239
+
2240
+ ⏰ NOTE: Multisimulations can take 8+ minutes to complete. This tool will wait
2241
+ for the entire process and return comprehensive results.
2242
+ Call get_platform_setting_options to get the valid options for the simulation.
2243
+ Args:
2244
+ alpha_expressions: List of alpha expressions (2-8 expressions required)
2245
+ instrument_type: Type of instruments (default: "EQUITY")
2246
+ region: Market region (default: "USA")
2247
+ universe: Universe of stocks (default: "TOP3000")
2248
+ delay: Data delay (default: 1)
2249
+ decay: Decay value (default: 0.0)
2250
+ neutralization: Neutralization method (default: "NONE")
2251
+ truncation: Truncation value (default: 0.0)
2252
+ test_period: Test period (default: "P0Y0M")
2253
+ unit_handling: Unit handling method (default: "VERIFY")
2254
+ nan_handling: NaN handling method (default: "OFF")
2255
+ language: Expression language (default: "FASTEXPR")
2256
+ visualization: Enable visualization (default: True)
2257
+ pasteurization: Pasteurization setting (default: "ON")
2258
+ max_trade: Max trade setting (default: "OFF")
2259
+
2260
+ Returns:
2261
+ Dictionary containing multisimulation results and individual alpha details
2262
+ """
2263
+ try:
2264
+ # Validate input
2265
+ if len(alpha_expressions) < 2:
2266
+ return {"error": "At least 2 alpha expressions are required"}
2267
+ if len(alpha_expressions) > 8:
2268
+ return {"error": "Maximum 8 alpha expressions allowed per request"}
2269
+
2270
+ # Create multisimulation data
2271
+ multisimulation_data = []
2272
+ for alpha_expr in alpha_expressions:
2273
+ simulation_item = {
2274
+ 'type': 'REGULAR',
2275
+ 'settings': {
2276
+ 'instrumentType': instrument_type,
2277
+ 'region': region,
2278
+ 'universe': universe,
2279
+ 'delay': delay,
2280
+ 'decay': decay,
2281
+ 'neutralization': neutralization,
2282
+ 'truncation': truncation,
2283
+ 'pasteurization': pasteurization,
2284
+ 'unitHandling': unit_handling,
2285
+ 'nanHandling': nan_handling,
2286
+ 'language': language,
2287
+ 'visualization': visualization,
2288
+ 'testPeriod': test_period,
2289
+ 'maxTrade': max_trade
2290
+ },
2291
+ 'regular': alpha_expr
2292
+ }
2293
+ multisimulation_data.append(simulation_item)
2294
+
2295
+ # Send multisimulation request
2296
+ response = brain_client.session.post(f"{brain_client.base_url}/simulations", json=multisimulation_data)
2297
+
2298
+ if response.status_code != 201:
2299
+ return {"error": f"Failed to create multisimulation. Status: {response.status_code}"}
2300
+
2301
+ # Get multisimulation location
2302
+ location = response.headers.get('Location', '')
2303
+ if not location:
2304
+ return {"error": "No location header in multisimulation response"}
2305
+
2306
+ # Wait for children to appear and get results
2307
+ return await _wait_for_multisimulation_completion(location, len(alpha_expressions))
2308
+
2309
+ except Exception as e:
2310
+ return {"error": f"Error creating multisimulation: {str(e)}"}
2311
+
2312
+ async def _wait_for_multisimulation_completion(location: str, expected_children: int) -> Dict[str, Any]:
2313
+ """Wait for multisimulation to complete and return results"""
2314
+ try:
2315
+ # Simple progress indicator for users
2316
+ print(f"Waiting for multisimulation to complete... (this may take several minutes)")
2317
+ print(f"Expected {expected_children} alpha simulations")
2318
+ print()
2319
+ # Wait for children to appear - much more tolerant for 8+ minute multisimulations
2320
+ children = []
2321
+ max_wait_attempts = 200 # Increased significantly for 8+ minute multisimulations
2322
+ wait_attempt = 0
2323
+
2324
+ while wait_attempt < max_wait_attempts and len(children) == 0:
2325
+ wait_attempt += 1
2326
+
2327
+ try:
2328
+ multisim_response = brain_client.session.get(location)
2329
+ if multisim_response.status_code == 200:
2330
+ multisim_data = multisim_response.json()
2331
+ children = multisim_data.get('children', [])
2332
+
2333
+ if children:
2334
+ break
2335
+ else:
2336
+ # Wait before next attempt - use longer intervals for multisimulations
2337
+ retry_after = multisim_response.headers.get("Retry-After", 5)
2338
+ wait_time = float(retry_after)
2339
+ await asyncio.sleep(wait_time)
2340
+ else:
2341
+ await asyncio.sleep(5)
2342
+ except Exception as e:
2343
+ await asyncio.sleep(5)
2344
+
2345
+ if not children:
2346
+ return {"error": f"Children did not appear within {max_wait_attempts} attempts (multisimulation may still be processing)"}
2347
+
2348
+ # Process each child to get alpha results
2349
+ alpha_results = []
2350
+ for i, child_id in enumerate(children):
2351
+ try:
2352
+ # The children are full URLs, not just IDs
2353
+ child_url = child_id if child_id.startswith('http') else f"{brain_client.base_url}/simulations/{child_id}"
2354
+
2355
+ # Wait for this alpha to complete - more tolerant timing
2356
+ finished = False
2357
+ max_alpha_attempts = 100 # Increased for longer alpha processing
2358
+ alpha_attempt = 0
2359
+
2360
+ while not finished and alpha_attempt < max_alpha_attempts:
2361
+ alpha_attempt += 1
2362
+
2363
+ try:
2364
+ alpha_progress = brain_client.session.get(child_url)
2365
+ if alpha_progress.status_code == 200:
2366
+ alpha_data = alpha_progress.json()
2367
+ retry_after = alpha_progress.headers.get("Retry-After", 0)
2368
+
2369
+ if retry_after == 0:
2370
+ finished = True
2371
+ break
2372
+ else:
2373
+ wait_time = float(retry_after)
2374
+ await asyncio.sleep(wait_time)
2375
+ else:
2376
+ await asyncio.sleep(5)
2377
+ except Exception as e:
2378
+ await asyncio.sleep(5)
2379
+
2380
+ if finished:
2381
+ # Get alpha details from the completed simulation
2382
+ alpha_id = alpha_data.get("alpha")
2383
+ if alpha_id:
2384
+ # Now get the actual alpha details from the alpha endpoint
2385
+ alpha_details = brain_client.session.get(f"{brain_client.base_url}/alphas/{alpha_id}")
2386
+ if alpha_details.status_code == 200:
2387
+ alpha_detail_data = alpha_details.json()
2388
+ alpha_results.append({
2389
+ 'alpha_id': alpha_id,
2390
+ 'location': child_url,
2391
+ 'details': alpha_detail_data
2392
+ })
2393
+ else:
2394
+ alpha_results.append({
2395
+ 'alpha_id': alpha_id,
2396
+ 'location': child_url,
2397
+ 'error': f'Failed to get alpha details: {alpha_details.status_code}'
2398
+ })
2399
+ else:
2400
+ alpha_results.append({
2401
+ 'location': child_url,
2402
+ 'error': 'No alpha ID found in completed simulation'
2403
+ })
2404
+ else:
2405
+ alpha_results.append({
2406
+ 'location': child_url,
2407
+ 'error': f'Alpha simulation did not complete within {max_alpha_attempts} attempts'
2408
+ })
2409
+
2410
+ except Exception as e:
2411
+ alpha_results.append({
2412
+ 'location': f"child_{i+1}",
2413
+ 'error': str(e)
2414
+ })
2415
+
2416
+ # Return comprehensive results
2417
+ print(f"Multisimulation completed! Retrieved {len(alpha_results)} alpha results")
2418
+ return {
2419
+ 'success': True,
2420
+ 'message': f'Successfully created {expected_children} regular alpha simulations',
2421
+ 'total_requested': expected_children,
2422
+ 'total_created': len(alpha_results),
2423
+ 'multisimulation_id': location.split('/')[-1],
2424
+ 'multisimulation_location': location,
2425
+ 'alpha_results': alpha_results
2426
+ }
2427
+
2428
+ except Exception as e:
2429
+ return {"error": f"Error waiting for multisimulation completion: {str(e)}"}
2430
+
2431
+ @mcp.tool()
2432
+ async def get_daily_and_quarterly_payment(email: str = "", password: str = "") -> Dict[str, Any]:
2433
+ """
2434
+ Get daily and quarterly payment information from WorldQuant BRAIN platform.
2435
+
2436
+ This function retrieves both base payments (daily alpha performance payments) and
2437
+ other payments (competition rewards, quarterly payments, referrals, etc.).
2438
+
2439
+ Args:
2440
+ email: Your BRAIN platform email address (optional if in config)
2441
+ password: Your BRAIN platform password (optional if in config)
2442
+
2443
+ Returns:
2444
+ Dictionary containing base payment and other payment data with summaries and detailed records
2445
+ """
2446
+ try:
2447
+ # Initialize client
2448
+ brain_client = BrainApiClient()
2449
+
2450
+ # Authenticate if credentials provided
2451
+ if email and password:
2452
+ auth_result = await brain_client.authenticate(email, password)
2453
+ if auth_result.get('status') != 'authenticated':
2454
+ return {"error": f"Authentication failed: {auth_result.get('message', 'Unknown error')}"}
2455
+ else:
2456
+ # Try to use existing session or config
2457
+ config = await manage_config("get")
2458
+ if not config.get('is_authenticated'):
2459
+ return {"error": "Not authenticated. Please provide email and password or authenticate first."}
2460
+
2461
+ # Get base payment data
2462
+ base_payment_response = brain_client.session.get(
2463
+ 'https://api.worldquantbrain.com/users/self/activities/base-payment'
2464
+ )
2465
+
2466
+ if base_payment_response.status_code != 200:
2467
+ return {"error": f"Failed to get base payment data: {base_payment_response.status_code}"}
2468
+
2469
+ base_payment_data = base_payment_response.json()
2470
+
2471
+ # Get other payment data
2472
+ other_payment_response = brain_client.session.get(
2473
+ 'https://api.worldquantbrain.com/users/self/activities/other-payment'
2474
+ )
2475
+
2476
+ if other_payment_response.status_code != 200:
2477
+ return {"error": f"Failed to get other payment data: {other_payment_response.status_code}"}
2478
+
2479
+ other_payment_data = other_payment_response.json()
2480
+
2481
+ # Return comprehensive payment information
2482
+ return {
2483
+ "success": True,
2484
+ "base_payment": {
2485
+ "summary": {
2486
+ "yesterday": base_payment_data.get("yesterday"),
2487
+ "current_quarter": base_payment_data.get("current"),
2488
+ "previous_quarter": base_payment_data.get("previous"),
2489
+ "year_to_date": base_payment_data.get("ytd"),
2490
+ "total_all_time": base_payment_data.get("total"),
2491
+ "currency": base_payment_data.get("currency")
2492
+ },
2493
+ "daily_records": base_payment_data.get("records", {}).get("records", []),
2494
+ "schema": base_payment_data.get("records", {}).get("schema")
2495
+ },
2496
+ "other_payment": {
2497
+ "total_all_time": other_payment_data.get("total"),
2498
+ "special_payments": other_payment_data.get("records", {}).get("records", []),
2499
+ "schema": other_payment_data.get("records", {}).get("schema"),
2500
+ "currency": other_payment_data.get("currency")
2501
+ },
2502
+ "timestamp": datetime.now().isoformat()
2503
+ }
2504
+
2505
+ except Exception as e:
2506
+ return {"error": f"Error retrieving payment information: {str(e)}"}
2507
+
2028
2508
  if __name__ == "__main__":
2029
2509
  print("🧠 WorldQuant BRAIN MCP Server Starting...", file=sys.stderr)
2030
2510
  mcp.run()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cnhkmcp
3
- Version: 1.2.4
3
+ Version: 1.2.6
4
4
  Summary: A comprehensive Model Context Protocol (MCP) server for quantitative trading platform integration
5
5
  Home-page: https://github.com/cnhk/cnhkmcp
6
6
  Author: CNHK
@@ -0,0 +1,10 @@
1
+ cnhkmcp/__init__.py,sha256=jqCFXwAan-B5a2BJ_At7_gfrb9lkBOvHO06O6VX3_uA,2758
2
+ cnhkmcp/untracked/forum_functions.py,sha256=QW-CplAsqDkw-Wcwq-1tuZBq48dEO-vXZ8xw7X65EuE,42303
3
+ cnhkmcp/untracked/platform_functions.py,sha256=-R_CFxpIRZ7q3YXdD7DUavqTd6f09kWaQLVDwzSFGGE,105521
4
+ cnhkmcp/untracked/user_config.json,sha256=_INn1X1qIsITrmEno-BRlQOAGm9wnNCw-6B333DEvnk,695
5
+ cnhkmcp-1.2.6.dist-info/licenses/LICENSE,sha256=QLxO2eNMnJQEdI_R1UV2AOD-IvuA8zVrkHWA4D9gtoc,1081
6
+ cnhkmcp-1.2.6.dist-info/METADATA,sha256=rlHVfLo9I8KPoxLpJ8HZ9pngFaBMBwi8-KqzBcBSYAM,5171
7
+ cnhkmcp-1.2.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ cnhkmcp-1.2.6.dist-info/entry_points.txt,sha256=lTQieVyIvjhSMK4fT-XwnccY-JBC1H4vVQ3V9dDM-Pc,70
9
+ cnhkmcp-1.2.6.dist-info/top_level.txt,sha256=x--ibUcSgOS9Z_RWK2Qc-vfs7DaXQN-WMaaxEETJ1Bw,8
10
+ cnhkmcp-1.2.6.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- cnhkmcp/__init__.py,sha256=-laBHbRG7AtBQp4pu6I9nSFtNBoBp_p35pQzNcf2Jkw,2758
2
- cnhkmcp/untracked/forum_functions.py,sha256=78wzvN_UYWwbWU40q8_FJNSFPJnND6W9ZRey6MSSiEk,45516
3
- cnhkmcp/untracked/platform_functions.py,sha256=o0l1MNcUUmvCRgjhxbn0QaxdZSLyV_C1DI-2DaXx9Eg,81739
4
- cnhkmcp/untracked/user_config.json,sha256=_INn1X1qIsITrmEno-BRlQOAGm9wnNCw-6B333DEvnk,695
5
- cnhkmcp-1.2.4.dist-info/licenses/LICENSE,sha256=QLxO2eNMnJQEdI_R1UV2AOD-IvuA8zVrkHWA4D9gtoc,1081
6
- cnhkmcp-1.2.4.dist-info/METADATA,sha256=a86cCJaC48vkhIHafwnyDZYliEfK6Ht6qGZdO7qCsy8,5171
7
- cnhkmcp-1.2.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- cnhkmcp-1.2.4.dist-info/entry_points.txt,sha256=lTQieVyIvjhSMK4fT-XwnccY-JBC1H4vVQ3V9dDM-Pc,70
9
- cnhkmcp-1.2.4.dist-info/top_level.txt,sha256=x--ibUcSgOS9Z_RWK2Qc-vfs7DaXQN-WMaaxEETJ1Bw,8
10
- cnhkmcp-1.2.4.dist-info/RECORD,,