cnhkmcp 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. cnhkmcp/__init__.py +125 -0
  2. cnhkmcp/untracked/APP/.gitignore +32 -0
  3. cnhkmcp/untracked/APP/MODULAR_STRUCTURE.md +112 -0
  4. cnhkmcp/untracked/APP/README.md +309 -0
  5. cnhkmcp/untracked/APP/Tranformer/Transformer.py +2192 -0
  6. cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
  7. cnhkmcp/untracked/APP/Tranformer/ace_lib.py +1489 -0
  8. cnhkmcp/untracked/APP/Tranformer/helpful_functions.py +180 -0
  9. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +1786 -0
  10. cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates_/321/207/320/264/342/225/221/321/204/342/225/233/320/233.json +654 -0
  11. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +261 -0
  12. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +170 -0
  13. cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_/321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/277/321/207/320/253/342/224/244/321/206/320/236/320/265/321/210/342/225/234/342/225/234/321/205/320/225/320/265Machine_lib.json +22 -0
  14. cnhkmcp/untracked/APP/Tranformer/parsetab.py +60 -0
  15. cnhkmcp/untracked/APP/Tranformer/template_summary.txt +408 -0
  16. cnhkmcp/untracked/APP/Tranformer/transformer_config.json +7 -0
  17. cnhkmcp/untracked/APP/Tranformer/validator.py +889 -0
  18. cnhkmcp/untracked/APP/ace.log +65 -0
  19. cnhkmcp/untracked/APP/ace_lib.py +1489 -0
  20. cnhkmcp/untracked/APP/blueprints/__init__.py +6 -0
  21. cnhkmcp/untracked/APP/blueprints/feature_engineering.py +347 -0
  22. cnhkmcp/untracked/APP/blueprints/idea_house.py +221 -0
  23. cnhkmcp/untracked/APP/blueprints/inspiration_house.py +432 -0
  24. cnhkmcp/untracked/APP/blueprints/paper_analysis.py +570 -0
  25. cnhkmcp/untracked/APP/custom_templates/templates.json +1257 -0
  26. cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +400 -0
  27. cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +1489 -0
  28. cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +247 -0
  29. cnhkmcp/untracked/APP/give_me_idea/helpful_functions.py +180 -0
  30. cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +11 -0
  31. cnhkmcp/untracked/APP/helpful_functions.py +180 -0
  32. cnhkmcp/untracked/APP/hkSimulator/ace.log +0 -0
  33. cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +1476 -0
  34. cnhkmcp/untracked/APP/hkSimulator/autosim_20251205_145240.log +0 -0
  35. cnhkmcp/untracked/APP/hkSimulator/autosim_20251215_030103.log +0 -0
  36. cnhkmcp/untracked/APP/hkSimulator/autosimulator.py +447 -0
  37. cnhkmcp/untracked/APP/hkSimulator/helpful_functions.py +180 -0
  38. cnhkmcp/untracked/APP/mirror_config.txt +20 -0
  39. cnhkmcp/untracked/APP/operaters.csv +129 -0
  40. cnhkmcp/untracked/APP/requirements.txt +53 -0
  41. cnhkmcp/untracked/APP/run_app.bat +28 -0
  42. cnhkmcp/untracked/APP/run_app.sh +34 -0
  43. cnhkmcp/untracked/APP/setup_tsinghua.bat +39 -0
  44. cnhkmcp/untracked/APP/setup_tsinghua.sh +43 -0
  45. cnhkmcp/untracked/APP/simulator/alpha_submitter.py +404 -0
  46. cnhkmcp/untracked/APP/simulator/simulator_wqb.py +618 -0
  47. cnhkmcp/untracked/APP/ssrn-3332513.pdf +109188 -19
  48. cnhkmcp/untracked/APP/static/brain.js +528 -0
  49. cnhkmcp/untracked/APP/static/decoder.js +1540 -0
  50. cnhkmcp/untracked/APP/static/feature_engineering.js +1729 -0
  51. cnhkmcp/untracked/APP/static/idea_house.js +937 -0
  52. cnhkmcp/untracked/APP/static/inspiration.js +465 -0
  53. cnhkmcp/untracked/APP/static/inspiration_house.js +868 -0
  54. cnhkmcp/untracked/APP/static/paper_analysis.js +390 -0
  55. cnhkmcp/untracked/APP/static/script.js +2942 -0
  56. cnhkmcp/untracked/APP/static/simulator.js +597 -0
  57. cnhkmcp/untracked/APP/static/styles.css +3127 -0
  58. cnhkmcp/untracked/APP/static/usage_widget.js +508 -0
  59. cnhkmcp/untracked/APP/templates/alpha_inspector.html +511 -0
  60. cnhkmcp/untracked/APP/templates/feature_engineering.html +960 -0
  61. cnhkmcp/untracked/APP/templates/idea_house.html +564 -0
  62. cnhkmcp/untracked/APP/templates/index.html +911 -0
  63. cnhkmcp/untracked/APP/templates/inspiration_house.html +861 -0
  64. cnhkmcp/untracked/APP/templates/paper_analysis.html +91 -0
  65. cnhkmcp/untracked/APP/templates/simulator.html +343 -0
  66. cnhkmcp/untracked/APP/templates/transformer_web.html +580 -0
  67. cnhkmcp/untracked/APP/usage.md +351 -0
  68. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +1489 -0
  69. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +712 -0
  70. cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/helpful_functions.py +180 -0
  71. cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +2393 -0
  72. cnhkmcp/untracked/arXiv_API_Tool_Manual.md +490 -0
  73. cnhkmcp/untracked/arxiv_api.py +229 -0
  74. cnhkmcp/untracked/forum_functions.py +998 -0
  75. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +407 -0
  76. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +2415 -0
  77. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/user_config.json +31 -0
  78. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/210/320/276/320/271AI/321/210/320/277/342/225/227/321/210/342/224/220/320/251/321/204/342/225/225/320/272/321/206/320/246/320/227/321/206/320/261/320/263/321/206/320/255/320/265/321/205/320/275/320/266/321/204/342/225/235/320/252/321/204/342/225/225/320/233/321/210/342/225/234/342/225/234/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270.md +101 -0
  79. cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +190 -0
  80. cnhkmcp/untracked/platform_functions.py +2886 -0
  81. cnhkmcp/untracked/sample_mcp_config.json +11 -0
  82. cnhkmcp/untracked/user_config.json +31 -0
  83. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/222/321/210/320/220/320/223/321/206/320/246/320/227/321/206/320/261/320/263_BRAIN_Alpha_Test_Requirements_and_Tips.md +202 -0
  84. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Alpha_explaination_workflow.md +56 -0
  85. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_6_Tips_Datafield_Exploration_Guide.md +194 -0
  86. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_Alpha_Improvement_Workflow.md +101 -0
  87. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Dataset_Exploration_Expert_Manual.md +436 -0
  88. cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_daily_report_workflow.md +128 -0
  89. cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +190 -0
  90. cnhkmcp-2.0.1.dist-info/METADATA +187 -0
  91. cnhkmcp-2.0.1.dist-info/RECORD +95 -0
  92. cnhkmcp-2.0.1.dist-info/WHEEL +5 -0
  93. cnhkmcp-2.0.1.dist-info/entry_points.txt +2 -0
  94. cnhkmcp-2.0.1.dist-info/licenses/LICENSE +21 -0
  95. cnhkmcp-2.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2393 @@
1
+ """
2
+ BRAIN Expression Template Decoder - Flask Web Application
3
+ A complete web application for decoding string templates with WorldQuant BRAIN integration
4
+ """
5
+
6
+ # Auto-install dependencies if missing
7
+ import subprocess
8
+ import sys
9
+ import os
10
+
11
+ def install_requirements():
12
+ """Install required packages from requirements.txt if they're missing"""
13
+ print("🔍 Checking and installing required dependencies...")
14
+ print("📋 Verifying packages needed for BRAIN Expression Template Decoder...")
15
+
16
+ # Get the directory where this script is located
17
+ script_dir = os.path.dirname(os.path.abspath(__file__))
18
+
19
+ # Check if requirements.txt exists in the script directory
20
+ req_file = os.path.join(script_dir, 'requirements.txt')
21
+ if not os.path.exists(req_file):
22
+ print("❌ Error: requirements.txt not found!")
23
+ print(f"Looking for: {req_file}")
24
+ return False
25
+
26
+ # Read mirror configuration if it exists
27
+ mirror_url = 'https://pypi.tuna.tsinghua.edu.cn/simple' # Default to Tsinghua
28
+ mirror_config_file = os.path.join(script_dir, 'mirror_config.txt')
29
+
30
+ if os.path.exists(mirror_config_file):
31
+ try:
32
+ with open(mirror_config_file, 'r', encoding='utf-8') as f:
33
+ for line in f:
34
+ line = line.strip()
35
+ if line and not line.startswith('#') and line.startswith('http'):
36
+ mirror_url = line
37
+ break
38
+ except Exception as e:
39
+ print(f"Warning: Could not read mirror configuration: {e}")
40
+
41
+ # Try to import the main packages to check if they're installed
42
+ packages_to_check = {
43
+ 'flask': 'flask',
44
+ 'flask_cors': 'flask-cors',
45
+ 'requests': 'requests',
46
+ 'pandas': 'pandas',
47
+ 'PyPDF2': 'PyPDF2',
48
+ 'docx': 'python-docx',
49
+ 'pdfplumber': 'pdfplumber',
50
+ 'fitz': 'PyMuPDF',
51
+ 'cozepy': 'cozepy',
52
+ 'lxml': 'lxml',
53
+ 'bs4': 'beautifulsoup4'
54
+ }
55
+
56
+ missing_packages = []
57
+ for import_name, pip_name in packages_to_check.items():
58
+ try:
59
+ __import__(import_name)
60
+ except ImportError:
61
+ missing_packages.append(pip_name)
62
+ print(f"Missing package: {pip_name} (import name: {import_name})")
63
+
64
+ if missing_packages:
65
+ print(f"⚠️ Missing packages detected: {', '.join(missing_packages)}")
66
+ print("📦 Installing dependencies from requirements.txt...")
67
+ print(f"🌐 Using mirror: {mirror_url}")
68
+
69
+ try:
70
+ # Install all requirements using configured mirror
71
+ subprocess.check_call([
72
+ sys.executable, '-m', 'pip', 'install',
73
+ '-i', mirror_url,
74
+ '-r', req_file
75
+ ])
76
+ print("✅ All dependencies installed successfully!")
77
+ return True
78
+ except subprocess.CalledProcessError:
79
+ print(f"❌ Error: Failed to install dependencies using {mirror_url}")
80
+ print("🔄 Trying with default PyPI...")
81
+ try:
82
+ # Fallback to default PyPI
83
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', req_file])
84
+ print("✅ All dependencies installed successfully!")
85
+ return True
86
+ except subprocess.CalledProcessError:
87
+ print("❌ Error: Failed to install dependencies. Please run manually:")
88
+ print(f" {sys.executable} -m pip install -i {mirror_url} -r requirements.txt")
89
+ return False
90
+ else:
91
+ print("✅ All required dependencies are already installed!")
92
+ return True
93
+
94
+ # Check and install dependencies before importing
95
+ # This will run every time the module is imported, but only install if needed
96
+ def check_and_install_dependencies():
97
+ """Check and install dependencies if needed"""
98
+ if not globals().get('_dependencies_checked'):
99
+ if install_requirements():
100
+ globals()['_dependencies_checked'] = True
101
+ return True
102
+ else:
103
+ print("\nPlease install the dependencies manually and try again.")
104
+ return False
105
+ return True
106
+
107
+ # Always run the dependency check when this module is imported
108
+ print("🚀 Initializing BRAIN Expression Template Decoder...")
109
+ if not check_and_install_dependencies():
110
+ if __name__ == "__main__":
111
+ sys.exit(1)
112
+ else:
113
+ print("⚠️ Warning: Some dependencies may be missing. Please run 'pip install -r requirements.txt'")
114
+ print("🔄 Continuing with import, but some features may not work properly.")
115
+
116
+ # Now import the packages
117
+ try:
118
+ from flask import Flask, render_template, request, jsonify, session as flask_session, Response, stream_with_context, send_from_directory
119
+ from flask_cors import CORS
120
+ import requests
121
+ import json
122
+ import time
123
+ import os
124
+ import threading
125
+ import queue
126
+ import uuid
127
+ from datetime import datetime
128
+ print("📚 Core packages imported successfully!")
129
+
130
+ # Import ace_lib for simulation options
131
+ try:
132
+ # Try importing from hkSimulator package
133
+ sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hkSimulator'))
134
+ from ace_lib import get_instrument_type_region_delay
135
+ print("✅ Imported get_instrument_type_region_delay from ace_lib")
136
+ except ImportError as e:
137
+ print(f"⚠️ Warning: Could not import get_instrument_type_region_delay: {e}")
138
+ get_instrument_type_region_delay = None
139
+
140
+ except ImportError as e:
141
+ print(f"❌ Failed to import core packages: {e}")
142
+ print("Please run: pip install -r requirements.txt")
143
+ if __name__ == "__main__":
144
+ sys.exit(1)
145
+ raise
146
+
147
+ app = Flask(__name__)
148
+ app.secret_key = 'brain_template_decoder_secret_key_change_in_production'
149
+ CORS(app)
150
+
151
+ print("🌐 Flask application initialized with CORS support!")
152
+
153
+ # BRAIN API configuration
154
+ BRAIN_API_BASE = 'https://api.worldquantbrain.com'
155
+
156
+ # Store BRAIN sessions (in production, use proper session management like Redis)
157
+ brain_sessions = {}
158
+
159
+ print("🧠 BRAIN API integration configured!")
160
+
161
+ def sign_in_to_brain(username, password):
162
+ """Sign in to BRAIN API with retry logic and biometric authentication support"""
163
+ from urllib.parse import urljoin
164
+
165
+ # Create a session to persistently store the headers
166
+ session = requests.Session()
167
+ # Save credentials into the session
168
+ session.auth = (username, password)
169
+
170
+ retry_count = 0
171
+ max_retries = 3
172
+
173
+ while retry_count < max_retries:
174
+ try:
175
+ # Send a POST request to the /authentication API
176
+ response = session.post(f'{BRAIN_API_BASE}/authentication')
177
+
178
+ # Check if biometric authentication is needed
179
+ if response.status_code == requests.codes.unauthorized:
180
+ if response.headers.get("WWW-Authenticate") == "persona":
181
+ # Get biometric auth URL
182
+ location = response.headers.get("Location")
183
+ if location:
184
+ biometric_url = urljoin(response.url, location)
185
+
186
+ # Return special response indicating biometric auth is needed
187
+ return {
188
+ 'requires_biometric': True,
189
+ 'biometric_url': biometric_url,
190
+ 'session': session,
191
+ 'location': location
192
+ }
193
+ else:
194
+ raise Exception("Biometric authentication required but no Location header provided")
195
+ else:
196
+ # Regular authentication failure
197
+ print("Incorrect username or password")
198
+ raise requests.HTTPError(
199
+ "Authentication failed: Invalid username or password",
200
+ response=response,
201
+ )
202
+
203
+ # If we get here, authentication was successful
204
+ response.raise_for_status()
205
+ print("Authentication successful.")
206
+ return session
207
+
208
+ except requests.HTTPError as e:
209
+ if "Invalid username or password" in str(e) or "Authentication failed" in str(e):
210
+ raise # Don't retry for invalid credentials
211
+ print(f"HTTP error occurred: {e}")
212
+ retry_count += 1
213
+ if retry_count < max_retries:
214
+ print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
215
+ time.sleep(10)
216
+ else:
217
+ print("Max retries reached. Authentication failed.")
218
+ raise
219
+ except Exception as e:
220
+ print(f"Error during authentication: {e}")
221
+ retry_count += 1
222
+ if retry_count < max_retries:
223
+ print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
224
+ time.sleep(10)
225
+ else:
226
+ print("Max retries reached. Authentication failed.")
227
+ raise
228
+
229
+ # Routes
230
+ @app.route('/')
231
+ def index():
232
+ """Main application page"""
233
+ return render_template('index.html')
234
+
235
+ @app.route('/simulator')
236
+ def simulator():
237
+ """User-friendly simulator interface"""
238
+ return render_template('simulator.html')
239
+
240
+ @app.route('/api/simulator/logs', methods=['GET'])
241
+ def get_simulator_logs():
242
+ """Get available log files in the simulator directory"""
243
+ try:
244
+ import glob
245
+ import os
246
+ from datetime import datetime
247
+
248
+ # Look for log files in the current directory and simulator directory
249
+ script_dir = os.path.dirname(os.path.abspath(__file__))
250
+ simulator_dir = os.path.join(script_dir, 'simulator')
251
+
252
+ log_files = []
253
+
254
+ # Check both current directory and simulator directory
255
+ for directory in [script_dir, simulator_dir]:
256
+ if os.path.exists(directory):
257
+ pattern = os.path.join(directory, 'wqb*.log')
258
+ for log_file in glob.glob(pattern):
259
+ try:
260
+ stat = os.stat(log_file)
261
+ log_files.append({
262
+ 'filename': os.path.basename(log_file),
263
+ 'path': log_file,
264
+ 'size': f"{stat.st_size / 1024:.1f} KB",
265
+ 'modified': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S'),
266
+ 'mtime': stat.st_mtime
267
+ })
268
+ except Exception as e:
269
+ print(f"Error reading log file {log_file}: {e}")
270
+
271
+ # Sort by modification time (newest first)
272
+ log_files.sort(key=lambda x: x['mtime'], reverse=True)
273
+
274
+ # Find the latest log file
275
+ latest = log_files[0]['filename'] if log_files else None
276
+
277
+ return jsonify({
278
+ 'logs': log_files,
279
+ 'latest': latest,
280
+ 'count': len(log_files)
281
+ })
282
+
283
+ except Exception as e:
284
+ return jsonify({'error': f'Error getting log files: {str(e)}'}), 500
285
+
286
+ @app.route('/api/transformer_candidates')
287
+ def get_transformer_candidates():
288
+ """Get Alpha candidates generated by Transformer"""
289
+ try:
290
+ # Path to the Transformer output file
291
+ # Note: Folder name is 'Tranformer' (missing 's') based on user context
292
+ file_path = os.path.join(os.path.dirname(__file__), 'Tranformer', 'output', 'Alpha_candidates.json')
293
+
294
+ if os.path.exists(file_path):
295
+ with open(file_path, 'r', encoding='utf-8') as f:
296
+ data = json.load(f)
297
+ return jsonify(data)
298
+ else:
299
+ return jsonify({"error": "File not found", "path": file_path})
300
+ except Exception as e:
301
+ return jsonify({"error": str(e)})
302
+
303
+ @app.route('/api/simulator/logs/<filename>', methods=['GET'])
304
+ def get_simulator_log_content(filename):
305
+ """Get content of a specific log file"""
306
+ try:
307
+ import os
308
+
309
+ # Security: only allow log files with safe names
310
+ if not filename.startswith('wqb') or not filename.endswith('.log'):
311
+ return jsonify({'error': 'Invalid log file name'}), 400
312
+
313
+ script_dir = os.path.dirname(os.path.abspath(__file__))
314
+ simulator_dir = os.path.join(script_dir, 'simulator')
315
+
316
+ # Look for the file in both directories
317
+ log_path = None
318
+ for directory in [script_dir, simulator_dir]:
319
+ potential_path = os.path.join(directory, filename)
320
+ if os.path.exists(potential_path):
321
+ log_path = potential_path
322
+ break
323
+
324
+ if not log_path:
325
+ return jsonify({'error': 'Log file not found'}), 404
326
+
327
+ # Read file content with multiple encoding attempts
328
+ content = None
329
+ encodings_to_try = ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'cp1252']
330
+
331
+ for encoding in encodings_to_try:
332
+ try:
333
+ with open(log_path, 'r', encoding=encoding) as f:
334
+ content = f.read()
335
+ print(f"Successfully read log file with {encoding} encoding")
336
+ break
337
+ except UnicodeDecodeError:
338
+ continue
339
+ except Exception as e:
340
+ print(f"Error reading with {encoding}: {e}")
341
+ continue
342
+
343
+ if content is None:
344
+ # Last resort: read as binary and decode with error handling
345
+ try:
346
+ with open(log_path, 'rb') as f:
347
+ raw_content = f.read()
348
+ content = raw_content.decode('utf-8', errors='replace')
349
+ print("Used UTF-8 with error replacement for log content")
350
+ except Exception as e:
351
+ content = f"Error: Could not decode file content - {str(e)}"
352
+
353
+ response = jsonify({
354
+ 'content': content,
355
+ 'filename': filename,
356
+ 'size': len(content)
357
+ })
358
+ response.headers['Content-Type'] = 'application/json; charset=utf-8'
359
+ return response
360
+
361
+ except Exception as e:
362
+ return jsonify({'error': f'Error reading log file: {str(e)}'}), 500
363
+
364
+ @app.route('/api/simulator/test-connection', methods=['POST'])
365
+ def test_simulator_connection():
366
+ """Test BRAIN API connection for simulator"""
367
+ try:
368
+ data = request.get_json()
369
+ username = data.get('username')
370
+ password = data.get('password')
371
+
372
+ if not username or not password:
373
+ return jsonify({'error': 'Username and password required'}), 400
374
+
375
+ # Test connection using the existing sign_in_to_brain function
376
+ result = sign_in_to_brain(username, password)
377
+
378
+ # Handle biometric authentication requirement
379
+ if isinstance(result, dict) and result.get('requires_biometric'):
380
+ return jsonify({
381
+ 'success': False,
382
+ 'error': 'Biometric authentication required. Please use the main interface first to complete authentication.',
383
+ 'requires_biometric': True
384
+ })
385
+
386
+ # Test a simple API call to verify connection
387
+ brain_session = result
388
+ response = brain_session.get(f'{BRAIN_API_BASE}/data-fields/open')
389
+
390
+ if response.ok:
391
+ return jsonify({
392
+ 'success': True,
393
+ 'message': 'Connection successful'
394
+ })
395
+ else:
396
+ return jsonify({
397
+ 'success': False,
398
+ 'error': f'API test failed: {response.status_code}'
399
+ })
400
+
401
+ except Exception as e:
402
+ return jsonify({
403
+ 'success': False,
404
+ 'error': f'Connection failed: {str(e)}'
405
+ })
406
+
407
+ @app.route('/api/simulator/run', methods=['POST'])
408
+ def run_simulator_with_params():
409
+ """Run simulator with user-provided parameters in a new terminal"""
410
+ try:
411
+ import subprocess
412
+ import threading
413
+ import json
414
+ import os
415
+ import tempfile
416
+ import sys
417
+ import time
418
+
419
+ # Get form data
420
+ json_file = request.files.get('jsonFile')
421
+ username = request.form.get('username')
422
+ password = request.form.get('password')
423
+ start_position = int(request.form.get('startPosition', 0))
424
+ concurrent_count = int(request.form.get('concurrentCount', 3))
425
+ random_shuffle = request.form.get('randomShuffle') == 'true'
426
+ use_multi_sim = request.form.get('useMultiSim') == 'true'
427
+ alpha_count_per_slot = int(request.form.get('alphaCountPerSlot', 3))
428
+
429
+ if not json_file or not username or not password:
430
+ return jsonify({'error': 'Missing required parameters'}), 400
431
+
432
+ # Validate and read JSON file
433
+ try:
434
+ json_content = json_file.read().decode('utf-8')
435
+ expressions_data = json.loads(json_content)
436
+ if not isinstance(expressions_data, list):
437
+ return jsonify({'error': 'JSON file must contain an array of expressions'}), 400
438
+ except Exception as e:
439
+ return jsonify({'error': f'Invalid JSON file: {str(e)}'}), 400
440
+
441
+ # Get paths
442
+ script_dir = os.path.dirname(os.path.abspath(__file__))
443
+ simulator_dir = os.path.join(script_dir, 'simulator')
444
+
445
+ # Create temporary files for the automated run
446
+ temp_json_path = os.path.join(simulator_dir, f'temp_expressions_{int(time.time())}.json')
447
+ temp_script_path = os.path.join(simulator_dir, f'temp_automated_{int(time.time())}.py')
448
+ temp_batch_path = os.path.join(simulator_dir, f'temp_run_{int(time.time())}.bat')
449
+
450
+ try:
451
+ # Save the JSON data to temporary file
452
+ with open(temp_json_path, 'w', encoding='utf-8') as f:
453
+ json.dump(expressions_data, f, ensure_ascii=False, indent=2)
454
+
455
+ # Create the automated script that calls automated_main
456
+ script_content = f'''
457
+ import asyncio
458
+ import sys
459
+ import os
460
+ import json
461
+
462
+ # Add current directory to path
463
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
464
+
465
+ import simulator_wqb
466
+
467
+ async def run_automated():
468
+ """Run the automated simulator with parameters from web interface"""
469
+ try:
470
+ # Load JSON data
471
+ with open(r"{temp_json_path}", 'r', encoding='utf-8') as f:
472
+ json_content = f.read()
473
+
474
+ # Call automated_main with parameters
475
+ result = await simulator_wqb.automated_main(
476
+ json_file_content=json_content,
477
+ username="{username}",
478
+ password="{password}",
479
+ start_position={start_position},
480
+ concurrent_count={concurrent_count},
481
+ random_shuffle={random_shuffle},
482
+ use_multi_sim={use_multi_sim},
483
+ alpha_count_per_slot={alpha_count_per_slot}
484
+ )
485
+
486
+ if result['success']:
487
+ print("\\n" + "="*60)
488
+ print("🎉 WEB INTERFACE AUTOMATION SUCCESS 🎉")
489
+ print("="*60)
490
+ print(f"✅ Total simulations: {{result['results']['total']}}")
491
+ print(f"✅ Successful: {{result['results']['successful']}}")
492
+ print(f"❌ Failed: {{result['results']['failed']}}")
493
+ if result['results']['alphaIds']:
494
+ print(f"📊 Generated {{len(result['results']['alphaIds'])}} Alpha IDs")
495
+ print("="*60)
496
+ else:
497
+ print("\\n" + "="*60)
498
+ print("❌ WEB INTERFACE AUTOMATION FAILED")
499
+ print("="*60)
500
+ print(f"Error: {{result['error']}}")
501
+ print("="*60)
502
+
503
+ except Exception as e:
504
+ print(f"\\n❌ Script execution error: {{e}}")
505
+
506
+ finally:
507
+ # Clean up temporary files
508
+ try:
509
+ if os.path.exists(r"{temp_json_path}"):
510
+ os.remove(r"{temp_json_path}")
511
+ if os.path.exists(r"{temp_script_path}"):
512
+ os.remove(r"{temp_script_path}")
513
+ if os.path.exists(r"{temp_batch_path}"):
514
+ os.remove(r"{temp_batch_path}")
515
+ except:
516
+ pass
517
+
518
+ print("\\n🔄 Press any key to close this window...")
519
+ input()
520
+
521
+ if __name__ == '__main__':
522
+ asyncio.run(run_automated())
523
+ '''
524
+
525
+ # Save the script
526
+ with open(temp_script_path, 'w', encoding='utf-8') as f:
527
+ f.write(script_content)
528
+
529
+ # Create batch file for Windows
530
+ batch_content = f'''@echo off
531
+ cd /d "{simulator_dir}"
532
+ "{sys.executable}" "{os.path.basename(temp_script_path)}"
533
+ '''
534
+ with open(temp_batch_path, 'w', encoding='utf-8') as f:
535
+ f.write(batch_content)
536
+
537
+ # Launch in new terminal
538
+ def launch_simulator():
539
+ try:
540
+ if os.name == 'nt': # Windows
541
+ # Use cmd /c to execute batch file properly
542
+ subprocess.Popen(
543
+ f'cmd.exe /c "{temp_batch_path}"',
544
+ creationflags=subprocess.CREATE_NEW_CONSOLE
545
+ )
546
+ else: # Unix-like systems
547
+ # Try different terminal emulators
548
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
549
+ for terminal in terminals:
550
+ try:
551
+ if terminal == 'gnome-terminal':
552
+ subprocess.Popen([
553
+ terminal, '--working-directory', simulator_dir,
554
+ '--', sys.executable, os.path.basename(temp_script_path)
555
+ ])
556
+ else:
557
+ # Use bash -c to handle shell commands like &&
558
+ command = f'cd "{simulator_dir}" && "{sys.executable}" "{os.path.basename(temp_script_path)}"'
559
+ subprocess.Popen([
560
+ terminal, '-e',
561
+ 'bash', '-c', command
562
+ ])
563
+ break
564
+ except FileNotFoundError:
565
+ continue
566
+ else:
567
+ # Fallback: run in background if no terminal found
568
+ subprocess.Popen([
569
+ sys.executable, temp_script_path
570
+ ], cwd=simulator_dir)
571
+ except Exception as e:
572
+ print(f"Error launching simulator: {e}")
573
+
574
+ # Start the simulator in a separate thread
575
+ thread = threading.Thread(target=launch_simulator)
576
+ thread.daemon = True
577
+ thread.start()
578
+
579
+ return jsonify({
580
+ 'success': True,
581
+ 'message': 'Simulator launched in new terminal window',
582
+ 'parameters': {
583
+ 'expressions_count': len(expressions_data),
584
+ 'concurrent_count': concurrent_count,
585
+ 'use_multi_sim': use_multi_sim,
586
+ 'alpha_count_per_slot': alpha_count_per_slot if use_multi_sim else None
587
+ }
588
+ })
589
+
590
+ except Exception as e:
591
+ # Clean up on error
592
+ try:
593
+ if os.path.exists(temp_json_path):
594
+ os.remove(temp_json_path)
595
+ if os.path.exists(temp_script_path):
596
+ os.remove(temp_script_path)
597
+ if os.path.exists(temp_batch_path):
598
+ os.remove(temp_batch_path)
599
+ except:
600
+ pass
601
+ raise e
602
+
603
+ except Exception as e:
604
+ return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
605
+
606
+ @app.route('/api/simulator/stop', methods=['POST'])
607
+ def stop_simulator():
608
+ """Stop running simulator"""
609
+ try:
610
+ # This is a placeholder - in a production environment, you'd want to
611
+ # implement proper process management to stop running simulations
612
+ return jsonify({
613
+ 'success': True,
614
+ 'message': 'Stop signal sent'
615
+ })
616
+ except Exception as e:
617
+ return jsonify({'error': f'Failed to stop simulator: {str(e)}'}), 500
618
+
619
+ @app.route('/api/authenticate', methods=['POST'])
620
+ def authenticate():
621
+ """Authenticate with BRAIN API"""
622
+ try:
623
+ data = request.get_json()
624
+ username = data.get('username')
625
+ password = data.get('password')
626
+
627
+ if not username or not password:
628
+ return jsonify({'error': 'Username and password required'}), 400
629
+
630
+ # Authenticate with BRAIN
631
+ result = sign_in_to_brain(username, password)
632
+
633
+ # Check if biometric authentication is required
634
+ if isinstance(result, dict) and result.get('requires_biometric'):
635
+ # Store the session temporarily with biometric pending status
636
+ session_id = f"{username}_{int(time.time())}_biometric_pending"
637
+ brain_sessions[session_id] = {
638
+ 'session': result['session'],
639
+ 'username': username,
640
+ 'timestamp': time.time(),
641
+ 'biometric_pending': True,
642
+ 'biometric_location': result['location']
643
+ }
644
+
645
+ # Store session ID in Flask session
646
+ flask_session['brain_session_id'] = session_id
647
+
648
+ return jsonify({
649
+ 'success': False,
650
+ 'requires_biometric': True,
651
+ 'biometric_url': result['biometric_url'],
652
+ 'session_id': session_id,
653
+ 'message': 'Please complete biometric authentication by visiting the provided URL'
654
+ })
655
+
656
+ # Regular successful authentication
657
+ brain_session = result
658
+
659
+ # Fetch simulation options
660
+ valid_options = get_valid_simulation_options(brain_session)
661
+
662
+ # Store session
663
+ session_id = f"{username}_{int(time.time())}"
664
+ brain_sessions[session_id] = {
665
+ 'session': brain_session,
666
+ 'username': username,
667
+ 'timestamp': time.time(),
668
+ 'options': valid_options
669
+ }
670
+
671
+ # Store session ID in Flask session
672
+ flask_session['brain_session_id'] = session_id
673
+
674
+ return jsonify({
675
+ 'success': True,
676
+ 'session_id': session_id,
677
+ 'message': 'Authentication successful',
678
+ 'options': valid_options
679
+ })
680
+
681
+ except requests.HTTPError as e:
682
+ resp = getattr(e, 'response', None)
683
+ status_code = getattr(resp, 'status_code', None)
684
+
685
+ # Common: wrong username/password
686
+ if status_code == 401 or 'Invalid username or password' in str(e):
687
+ return jsonify({
688
+ 'error': '用户名或密码错误',
689
+ 'hint': '请检查账号密码是否正确;如果你的账号需要生物验证(persona),请按弹出的生物验证流程完成后再点“Complete Authentication”。'
690
+ }), 401
691
+
692
+ # Upstream/network/server issues
693
+ return jsonify({
694
+ 'error': 'Authentication failed',
695
+ 'detail': str(e)
696
+ }), 502
697
+ except Exception as e:
698
+ return jsonify({'error': f'Authentication error: {str(e)}'}), 500
699
+
700
+ @app.route('/api/complete-biometric', methods=['POST'])
701
+ def complete_biometric():
702
+ """Complete biometric authentication after user has done it in browser"""
703
+ try:
704
+ from urllib.parse import urljoin
705
+
706
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
707
+ if not session_id or session_id not in brain_sessions:
708
+ return jsonify({'error': 'Invalid or expired session'}), 401
709
+
710
+ session_info = brain_sessions[session_id]
711
+
712
+ # Check if this session is waiting for biometric completion
713
+ if not session_info.get('biometric_pending'):
714
+ return jsonify({'error': 'Session is not pending biometric authentication'}), 400
715
+
716
+ brain_session = session_info['session']
717
+ location = session_info['biometric_location']
718
+
719
+ # Complete the biometric authentication following the reference pattern
720
+ try:
721
+ # Construct the full URL for biometric authentication
722
+ auth_url = urljoin(f'{BRAIN_API_BASE}/authentication', location)
723
+
724
+ # Keep trying until biometric auth succeeds (like in reference code)
725
+ max_attempts = 5
726
+ attempt = 0
727
+
728
+ while attempt < max_attempts:
729
+ bio_response = brain_session.post(auth_url)
730
+ if bio_response.status_code == 201:
731
+ # Biometric authentication successful
732
+ break
733
+ elif bio_response.status_code == 401:
734
+ # Biometric authentication not complete yet
735
+ attempt += 1
736
+ if attempt >= max_attempts:
737
+ return jsonify({
738
+ 'success': False,
739
+ 'error': 'Biometric authentication not completed. Please try again.'
740
+ })
741
+ time.sleep(2) # Wait a bit before retrying
742
+ else:
743
+ # Other error
744
+ bio_response.raise_for_status()
745
+
746
+ # Update session info - remove biometric pending status
747
+ session_info['biometric_pending'] = False
748
+ del session_info['biometric_location']
749
+
750
+ # Create a new session ID without the biometric_pending suffix
751
+ new_session_id = f"{session_info['username']}_{int(time.time())}"
752
+ brain_sessions[new_session_id] = {
753
+ 'session': brain_session,
754
+ 'username': session_info['username'],
755
+ 'timestamp': time.time()
756
+ }
757
+
758
+ # Remove old session
759
+ del brain_sessions[session_id]
760
+
761
+ # Update Flask session
762
+ flask_session['brain_session_id'] = new_session_id
763
+
764
+ return jsonify({
765
+ 'success': True,
766
+ 'session_id': new_session_id,
767
+ 'message': 'Biometric authentication completed successfully'
768
+ })
769
+
770
+ except requests.HTTPError as e:
771
+ return jsonify({
772
+ 'success': False,
773
+ 'error': f'Failed to complete biometric authentication: {str(e)}'
774
+ })
775
+
776
+ except Exception as e:
777
+ return jsonify({
778
+ 'success': False,
779
+ 'error': f'Error completing biometric authentication: {str(e)}'
780
+ })
781
+
782
+ @app.route('/api/operators', methods=['GET'])
783
+ def get_operators():
784
+ """Get user operators from BRAIN API"""
785
+ try:
786
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
787
+ if not session_id or session_id not in brain_sessions:
788
+ return jsonify({'error': 'Invalid or expired session'}), 401
789
+
790
+ session_info = brain_sessions[session_id]
791
+ brain_session = session_info['session']
792
+
793
+ # First try without pagination parameters (most APIs return all operators at once)
794
+ try:
795
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators')
796
+ response.raise_for_status()
797
+
798
+ data = response.json()
799
+
800
+ # If it's a list, we got all operators
801
+ if isinstance(data, list):
802
+ all_operators = data
803
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (direct)")
804
+ # If it's a dict with results, handle pagination
805
+ elif isinstance(data, dict) and 'results' in data:
806
+ all_operators = []
807
+ total_count = data.get('count', len(data['results']))
808
+ print(f"Found {total_count} total operators, fetching all...")
809
+
810
+ # Get first batch
811
+ all_operators.extend(data['results'])
812
+
813
+ # Get remaining batches if needed
814
+ limit = 100
815
+ offset = len(data['results'])
816
+
817
+ while len(all_operators) < total_count:
818
+ params = {'limit': limit, 'offset': offset}
819
+ batch_response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
820
+ batch_response.raise_for_status()
821
+ batch_data = batch_response.json()
822
+
823
+ if isinstance(batch_data, dict) and 'results' in batch_data:
824
+ batch_operators = batch_data['results']
825
+ if not batch_operators: # No more data
826
+ break
827
+ all_operators.extend(batch_operators)
828
+ offset += len(batch_operators)
829
+ else:
830
+ break
831
+
832
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (paginated)")
833
+ else:
834
+ # Unknown format, treat as empty
835
+ all_operators = []
836
+ print("Unknown response format for operators API")
837
+
838
+ except Exception as e:
839
+ print(f"Error fetching operators: {str(e)}")
840
+ # Fallback: try with explicit pagination
841
+ all_operators = []
842
+ limit = 100
843
+ offset = 0
844
+
845
+ while True:
846
+ params = {'limit': limit, 'offset': offset}
847
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
848
+ response.raise_for_status()
849
+
850
+ data = response.json()
851
+ if isinstance(data, list):
852
+ all_operators.extend(data)
853
+ if len(data) < limit:
854
+ break
855
+ elif isinstance(data, dict) and 'results' in data:
856
+ batch_operators = data['results']
857
+ all_operators.extend(batch_operators)
858
+ if len(batch_operators) < limit:
859
+ break
860
+ else:
861
+ break
862
+
863
+ offset += limit
864
+
865
+ print(f"Fetched {len(all_operators)} operators from BRAIN API (fallback)")
866
+
867
+ # Extract name, category, description, definition and other fields (if available)
868
+ filtered_operators = []
869
+ for op in all_operators:
870
+ operator_data = {
871
+ 'name': op['name'],
872
+ 'category': op['category']
873
+ }
874
+ # Include description if available
875
+ if 'description' in op and op['description']:
876
+ operator_data['description'] = op['description']
877
+ # Include definition if available
878
+ if 'definition' in op and op['definition']:
879
+ operator_data['definition'] = op['definition']
880
+ # Include usage count if available
881
+ if 'usageCount' in op:
882
+ operator_data['usageCount'] = op['usageCount']
883
+ # Include other useful fields if available
884
+ if 'example' in op and op['example']:
885
+ operator_data['example'] = op['example']
886
+ filtered_operators.append(operator_data)
887
+
888
+ return jsonify(filtered_operators)
889
+
890
+ except Exception as e:
891
+ print(f"Error fetching operators: {str(e)}")
892
+ return jsonify({'error': f'Failed to fetch operators: {str(e)}'}), 500
893
+
894
+ @app.route('/api/datafields', methods=['GET'])
895
+ def get_datafields():
896
+ """Get data fields from BRAIN API"""
897
+ try:
898
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
899
+ if not session_id or session_id not in brain_sessions:
900
+ return jsonify({'error': 'Invalid or expired session'}), 401
901
+
902
+ session_info = brain_sessions[session_id]
903
+ brain_session = session_info['session']
904
+
905
+ # Get parameters
906
+ region = request.args.get('region', 'USA')
907
+ delay = request.args.get('delay', '1')
908
+ universe = request.args.get('universe', 'TOP3000')
909
+ dataset_id = request.args.get('dataset_id', 'fundamental6')
910
+ search = ''
911
+
912
+ # Build URL template based on notebook implementation
913
+ if len(search) == 0:
914
+ url_template = f"{BRAIN_API_BASE}/data-fields?" + \
915
+ f"&instrumentType=EQUITY" + \
916
+ f"&region={region}&delay={delay}&universe={universe}&dataset.id={dataset_id}&limit=50" + \
917
+ "&offset={x}"
918
+ # Get count from first request
919
+ first_response = brain_session.get(url_template.format(x=0))
920
+ first_response.raise_for_status()
921
+ count = first_response.json()['count']
922
+ else:
923
+ url_template = f"{BRAIN_API_BASE}/data-fields?" + \
924
+ f"&instrumentType=EQUITY" + \
925
+ f"&region={region}&delay={delay}&universe={universe}&limit=50" + \
926
+ f"&search={search}" + \
927
+ "&offset={x}"
928
+ count = 100 # Default for search queries
929
+
930
+ # Fetch all data fields in batches
931
+ datafields_list = []
932
+ for x in range(0, count, 50):
933
+ response = brain_session.get(url_template.format(x=x))
934
+ response.raise_for_status()
935
+ datafields_list.append(response.json()['results'])
936
+
937
+ # Flatten the list
938
+ datafields_list_flat = [item for sublist in datafields_list for item in sublist]
939
+
940
+ # Filter fields to only include necessary information
941
+ filtered_fields = [
942
+ {
943
+ 'id': field['id'],
944
+ 'description': field['description'],
945
+ 'type': field['type'],
946
+ 'coverage': field.get('coverage', 0),
947
+ 'userCount': field.get('userCount', 0),
948
+ 'alphaCount': field.get('alphaCount', 0)
949
+ }
950
+ for field in datafields_list_flat
951
+ ]
952
+
953
+ return jsonify(filtered_fields)
954
+
955
+ except Exception as e:
956
+ return jsonify({'error': f'Failed to fetch data fields: {str(e)}'}), 500
957
+
958
+ @app.route('/api/dataset-description', methods=['GET'])
959
+ def get_dataset_description():
960
+ """Get dataset description from BRAIN API"""
961
+ try:
962
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
963
+ if not session_id or session_id not in brain_sessions:
964
+ return jsonify({'error': 'Invalid or expired session'}), 401
965
+
966
+ session_info = brain_sessions[session_id]
967
+ brain_session = session_info['session']
968
+
969
+ # Get parameters
970
+ region = request.args.get('region', 'USA')
971
+ delay = request.args.get('delay', '1')
972
+ universe = request.args.get('universe', 'TOP3000')
973
+ dataset_id = request.args.get('dataset_id', 'analyst10')
974
+
975
+ # Build URL for dataset description
976
+ url = f"{BRAIN_API_BASE}/data-sets/{dataset_id}?" + \
977
+ f"instrumentType=EQUITY&region={region}&delay={delay}&universe={universe}"
978
+
979
+ print(f"Getting dataset description from: {url}")
980
+
981
+ # Make request to BRAIN API
982
+ response = brain_session.get(url)
983
+ response.raise_for_status()
984
+
985
+ data = response.json()
986
+ description = data.get('description', 'No description available')
987
+
988
+ print(f"Dataset description retrieved: {description[:100]}...")
989
+
990
+ return jsonify({
991
+ 'success': True,
992
+ 'description': description,
993
+ 'dataset_id': dataset_id
994
+ })
995
+
996
+ except Exception as e:
997
+ print(f"Dataset description error: {str(e)}")
998
+ return jsonify({'error': f'Failed to get dataset description: {str(e)}'}), 500
999
+
1000
+ @app.route('/api/status', methods=['GET'])
1001
+ def check_status():
1002
+ """Check if session is still valid"""
1003
+ try:
1004
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1005
+ if not session_id or session_id not in brain_sessions:
1006
+ return jsonify({'valid': False})
1007
+
1008
+ session_info = brain_sessions[session_id]
1009
+ # Check if session is not too old (24 hours)
1010
+ if time.time() - session_info['timestamp'] > 86400:
1011
+ del brain_sessions[session_id]
1012
+ return jsonify({'valid': False})
1013
+
1014
+ # Check if biometric authentication is pending
1015
+ if session_info.get('biometric_pending'):
1016
+ return jsonify({
1017
+ 'valid': False,
1018
+ 'biometric_pending': True,
1019
+ 'username': session_info['username'],
1020
+ 'message': 'Biometric authentication pending'
1021
+ })
1022
+
1023
+ return jsonify({
1024
+ 'valid': True,
1025
+ 'username': session_info['username']
1026
+ })
1027
+
1028
+ except Exception as e:
1029
+ return jsonify({'error': f'Status check failed: {str(e)}'}), 500
1030
+
1031
+ @app.route('/api/logout', methods=['POST'])
1032
+ def logout():
1033
+ """Logout and clean up session"""
1034
+ try:
1035
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1036
+ if session_id and session_id in brain_sessions:
1037
+ del brain_sessions[session_id]
1038
+
1039
+ if 'brain_session_id' in flask_session:
1040
+ flask_session.pop('brain_session_id')
1041
+
1042
+ return jsonify({'success': True, 'message': 'Logged out successfully'})
1043
+
1044
+ except Exception as e:
1045
+ return jsonify({'error': f'Logout failed: {str(e)}'}), 500
1046
+
1047
+ @app.route('/api/test-expression', methods=['POST'])
1048
+ def test_expression():
1049
+ """Test an expression using BRAIN API simulation"""
1050
+ try:
1051
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1052
+ if not session_id or session_id not in brain_sessions:
1053
+ return jsonify({'error': 'Invalid or expired session'}), 401
1054
+
1055
+ session_info = brain_sessions[session_id]
1056
+ brain_session = session_info['session']
1057
+
1058
+ # Get the simulation data from request
1059
+ simulation_data = request.get_json()
1060
+
1061
+ # Ensure required fields are present
1062
+ if 'type' not in simulation_data:
1063
+ simulation_data['type'] = 'REGULAR'
1064
+
1065
+ # Ensure settings have required fields
1066
+ if 'settings' not in simulation_data:
1067
+ simulation_data['settings'] = {}
1068
+
1069
+ # Set default values for missing settings
1070
+ default_settings = {
1071
+ 'instrumentType': 'EQUITY',
1072
+ 'region': 'USA',
1073
+ 'universe': 'TOP3000',
1074
+ 'delay': 1,
1075
+ 'decay': 15,
1076
+ 'neutralization': 'SUBINDUSTRY',
1077
+ 'truncation': 0.08,
1078
+ 'pasteurization': 'ON',
1079
+ 'testPeriod': 'P1Y6M',
1080
+ 'unitHandling': 'VERIFY',
1081
+ 'nanHandling': 'OFF',
1082
+ 'language': 'FASTEXPR',
1083
+ 'visualization': False
1084
+ }
1085
+
1086
+ for key, value in default_settings.items():
1087
+ if key not in simulation_data['settings']:
1088
+ simulation_data['settings'][key] = value
1089
+
1090
+ # Convert string boolean values to actual boolean
1091
+ if isinstance(simulation_data['settings'].get('visualization'), str):
1092
+ viz_value = simulation_data['settings']['visualization'].lower()
1093
+ simulation_data['settings']['visualization'] = viz_value == 'true'
1094
+
1095
+ # Validate settings against cached options
1096
+ valid_options = session_info.get('options')
1097
+ if valid_options:
1098
+ settings = simulation_data['settings']
1099
+ inst_type = settings.get('instrumentType', 'EQUITY')
1100
+ region = settings.get('region')
1101
+ neut = settings.get('neutralization')
1102
+
1103
+ # Check if this specific neutralization is allowed for this region
1104
+ allowed_neuts = valid_options.get(inst_type, {}).get(region, {}).get('neutralizations', [])
1105
+
1106
+ if neut and allowed_neuts and neut not in allowed_neuts:
1107
+ print(f"Warning: {neut} is invalid for {region}. Auto-correcting.")
1108
+ # Auto-correct to the first valid one if available
1109
+ if allowed_neuts:
1110
+ print(f"Auto-correcting neutralization to {allowed_neuts[0]}")
1111
+ settings['neutralization'] = allowed_neuts[0]
1112
+ else:
1113
+ del settings['neutralization']
1114
+
1115
+ # Send simulation request (following notebook pattern)
1116
+ try:
1117
+ message = {}
1118
+ simulation_response = brain_session.post(f'{BRAIN_API_BASE}/simulations', json=simulation_data)
1119
+
1120
+ # Check if we got a Location header (following notebook pattern)
1121
+ if 'Location' in simulation_response.headers:
1122
+ # Follow the location to get the actual status
1123
+ message = brain_session.get(simulation_response.headers['Location']).json()
1124
+
1125
+ # Check if simulation is running or completed
1126
+ if 'progress' in message.keys():
1127
+ info_to_print = "Simulation is running"
1128
+ return jsonify({
1129
+ 'success': True,
1130
+ 'status': 'RUNNING',
1131
+ 'message': info_to_print,
1132
+ 'full_response': message
1133
+ })
1134
+ else:
1135
+ # Return the full message as in notebook
1136
+ return jsonify({
1137
+ 'success': message.get('status') != 'ERROR',
1138
+ 'status': message.get('status', 'UNKNOWN'),
1139
+ 'message': str(message),
1140
+ 'full_response': message
1141
+ })
1142
+ else:
1143
+ # Try to get error from response body (following notebook pattern)
1144
+ try:
1145
+ message = simulation_response.json()
1146
+ return jsonify({
1147
+ 'success': False,
1148
+ 'status': 'ERROR',
1149
+ 'message': str(message),
1150
+ 'full_response': message
1151
+ })
1152
+ except:
1153
+ return jsonify({
1154
+ 'success': False,
1155
+ 'status': 'ERROR',
1156
+ 'message': 'web Connection Error',
1157
+ 'full_response': {}
1158
+ })
1159
+
1160
+ except Exception as e:
1161
+ return jsonify({
1162
+ 'success': False,
1163
+ 'status': 'ERROR',
1164
+ 'message': 'web Connection Error',
1165
+ 'full_response': {'error': str(e)}
1166
+ })
1167
+
1168
+ except Exception as e:
1169
+ import traceback
1170
+ return jsonify({
1171
+ 'success': False,
1172
+ 'status': 'ERROR',
1173
+ 'message': f'Test expression failed: {str(e)}',
1174
+ 'full_response': {'error': str(e), 'traceback': traceback.format_exc()}
1175
+ }), 500
1176
+
1177
+ @app.route('/api/test-operators', methods=['GET'])
1178
+ def test_operators():
1179
+ """Test endpoint to check raw BRAIN operators API response"""
1180
+ try:
1181
+ session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
1182
+ if not session_id or session_id not in brain_sessions:
1183
+ return jsonify({'error': 'Invalid or expired session'}), 401
1184
+
1185
+ session_info = brain_sessions[session_id]
1186
+ brain_session = session_info['session']
1187
+
1188
+ # Get raw response from BRAIN API
1189
+ response = brain_session.get(f'{BRAIN_API_BASE}/operators')
1190
+ response.raise_for_status()
1191
+
1192
+ data = response.json()
1193
+
1194
+ # Return raw response info for debugging
1195
+ result = {
1196
+ 'type': str(type(data)),
1197
+ 'is_list': isinstance(data, list),
1198
+ 'is_dict': isinstance(data, dict),
1199
+ 'length': len(data) if isinstance(data, list) else None,
1200
+ 'keys': list(data.keys()) if isinstance(data, dict) else None,
1201
+ 'count_key': data.get('count') if isinstance(data, dict) else None,
1202
+ 'first_few_items': data[:3] if isinstance(data, list) else (data.get('results', [])[:3] if isinstance(data, dict) else None)
1203
+ }
1204
+
1205
+ return jsonify(result)
1206
+
1207
+ except Exception as e:
1208
+ return jsonify({'error': f'Test failed: {str(e)}'}), 500
1209
+
1210
+ # Import blueprints
1211
+ try:
1212
+ from blueprints import idea_house_bp, paper_analysis_bp, feature_engineering_bp, inspiration_house_bp
1213
+ print("📦 Blueprints imported successfully!")
1214
+ except ImportError as e:
1215
+ print(f"❌ Failed to import blueprints: {e}")
1216
+ print("Some features may not be available.")
1217
+
1218
+ # Register blueprints
1219
+ app.register_blueprint(idea_house_bp, url_prefix='/idea-house')
1220
+ app.register_blueprint(paper_analysis_bp, url_prefix='/paper-analysis')
1221
+ app.register_blueprint(feature_engineering_bp, url_prefix='/feature-engineering')
1222
+ app.register_blueprint(inspiration_house_bp, url_prefix='/inspiration-house')
1223
+
1224
+ print("🔧 All blueprints registered successfully!")
1225
+ print(" - Idea House: /idea-house")
1226
+ print(" - Paper Analysis: /paper-analysis")
1227
+ print(" - Feature Engineering: /feature-engineering")
1228
+ print(" - Inspiration House: /inspiration-house")
1229
+
1230
+ # Template Management Routes
1231
+ # Get the directory where this script is located for templates
1232
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1233
+ TEMPLATES_DIR = os.path.join(script_dir, 'custom_templates')
1234
+
1235
+ # Ensure templates directory exists
1236
+ if not os.path.exists(TEMPLATES_DIR):
1237
+ os.makedirs(TEMPLATES_DIR)
1238
+ print(f"📁 Created templates directory: {TEMPLATES_DIR}")
1239
+ else:
1240
+ print(f"📁 Templates directory ready: {TEMPLATES_DIR}")
1241
+
1242
+ print("✅ BRAIN Expression Template Decoder fully initialized!")
1243
+ print("🎯 Ready to process templates and integrate with BRAIN API!")
1244
+
1245
+ @app.route('/api/templates', methods=['GET'])
1246
+ def get_templates():
1247
+ """Get all custom templates"""
1248
+ try:
1249
+ templates = []
1250
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1251
+
1252
+ if os.path.exists(templates_file):
1253
+ with open(templates_file, 'r', encoding='utf-8') as f:
1254
+ templates = json.load(f)
1255
+
1256
+ return jsonify(templates)
1257
+ except Exception as e:
1258
+ return jsonify({'error': f'Error loading templates: {str(e)}'}), 500
1259
+
1260
+ @app.route('/api/templates', methods=['POST'])
1261
+ def save_template():
1262
+ """Save a new custom template"""
1263
+ try:
1264
+ data = request.get_json()
1265
+ name = data.get('name', '').strip()
1266
+ description = data.get('description', '').strip()
1267
+ expression = data.get('expression', '').strip()
1268
+ template_configurations = data.get('templateConfigurations', {})
1269
+
1270
+ if not name or not expression:
1271
+ return jsonify({'error': 'Name and expression are required'}), 400
1272
+
1273
+ # Load existing templates
1274
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1275
+ templates = []
1276
+
1277
+ if os.path.exists(templates_file):
1278
+ with open(templates_file, 'r', encoding='utf-8') as f:
1279
+ templates = json.load(f)
1280
+
1281
+ # Check for duplicate names
1282
+ existing_index = next((i for i, t in enumerate(templates) if t['name'] == name), None)
1283
+
1284
+ new_template = {
1285
+ 'name': name,
1286
+ 'description': description,
1287
+ 'expression': expression,
1288
+ 'templateConfigurations': template_configurations,
1289
+ 'createdAt': datetime.now().isoformat()
1290
+ }
1291
+
1292
+ if existing_index is not None:
1293
+ # Update existing template but preserve createdAt if it exists
1294
+ if 'createdAt' in templates[existing_index]:
1295
+ new_template['createdAt'] = templates[existing_index]['createdAt']
1296
+ new_template['updatedAt'] = datetime.now().isoformat()
1297
+ templates[existing_index] = new_template
1298
+ message = f'Template "{name}" updated successfully'
1299
+ else:
1300
+ # Add new template
1301
+ templates.append(new_template)
1302
+ message = f'Template "{name}" saved successfully'
1303
+
1304
+ # Save to file
1305
+ with open(templates_file, 'w', encoding='utf-8') as f:
1306
+ json.dump(templates, f, indent=2, ensure_ascii=False)
1307
+
1308
+ return jsonify({'success': True, 'message': message})
1309
+
1310
+ except Exception as e:
1311
+ return jsonify({'error': f'Error saving template: {str(e)}'}), 500
1312
+
1313
+ @app.route('/api/templates/<int:template_id>', methods=['DELETE'])
1314
+ def delete_template(template_id):
1315
+ """Delete a custom template"""
1316
+ try:
1317
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1318
+ templates = []
1319
+
1320
+ if os.path.exists(templates_file):
1321
+ with open(templates_file, 'r', encoding='utf-8') as f:
1322
+ templates = json.load(f)
1323
+
1324
+ if 0 <= template_id < len(templates):
1325
+ deleted_template = templates.pop(template_id)
1326
+
1327
+ # Save updated templates
1328
+ with open(templates_file, 'w', encoding='utf-8') as f:
1329
+ json.dump(templates, f, indent=2, ensure_ascii=False)
1330
+
1331
+ return jsonify({'success': True, 'message': f'Template "{deleted_template["name"]}" deleted successfully'})
1332
+ else:
1333
+ return jsonify({'error': 'Template not found'}), 404
1334
+
1335
+ except Exception as e:
1336
+ return jsonify({'error': f'Error deleting template: {str(e)}'}), 500
1337
+
1338
+ @app.route('/api/templates/export', methods=['GET'])
1339
+ def export_templates():
1340
+ """Export all templates as JSON"""
1341
+ try:
1342
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1343
+ templates = []
1344
+
1345
+ if os.path.exists(templates_file):
1346
+ with open(templates_file, 'r', encoding='utf-8') as f:
1347
+ templates = json.load(f)
1348
+
1349
+ return jsonify(templates)
1350
+
1351
+ except Exception as e:
1352
+ return jsonify({'error': f'Error exporting templates: {str(e)}'}), 500
1353
+
1354
+ @app.route('/api/templates/import', methods=['POST'])
1355
+ def import_templates():
1356
+ """Import templates from JSON"""
1357
+ try:
1358
+ data = request.get_json()
1359
+ imported_templates = data.get('templates', [])
1360
+ overwrite = data.get('overwrite', False)
1361
+
1362
+ if not isinstance(imported_templates, list):
1363
+ return jsonify({'error': 'Invalid template format'}), 400
1364
+
1365
+ # Validate template structure
1366
+ valid_templates = []
1367
+ for template in imported_templates:
1368
+ if (isinstance(template, dict) and
1369
+ 'name' in template and 'expression' in template and
1370
+ template['name'].strip() and template['expression'].strip()):
1371
+ valid_templates.append({
1372
+ 'name': template['name'].strip(),
1373
+ 'description': template.get('description', '').strip(),
1374
+ 'expression': template['expression'].strip(),
1375
+ 'templateConfigurations': template.get('templateConfigurations', {}),
1376
+ 'createdAt': template.get('createdAt', datetime.now().isoformat())
1377
+ })
1378
+
1379
+ if not valid_templates:
1380
+ return jsonify({'error': 'No valid templates found'}), 400
1381
+
1382
+ # Load existing templates
1383
+ templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
1384
+ existing_templates = []
1385
+
1386
+ if os.path.exists(templates_file):
1387
+ with open(templates_file, 'r', encoding='utf-8') as f:
1388
+ existing_templates = json.load(f)
1389
+
1390
+ # Handle duplicates
1391
+ duplicates = []
1392
+ new_templates = []
1393
+
1394
+ for template in valid_templates:
1395
+ existing_index = next((i for i, t in enumerate(existing_templates) if t['name'] == template['name']), None)
1396
+
1397
+ if existing_index is not None:
1398
+ duplicates.append(template['name'])
1399
+ if overwrite:
1400
+ existing_templates[existing_index] = template
1401
+ else:
1402
+ new_templates.append(template)
1403
+
1404
+ # Add new templates
1405
+ existing_templates.extend(new_templates)
1406
+
1407
+ # Save to file
1408
+ with open(templates_file, 'w', encoding='utf-8') as f:
1409
+ json.dump(existing_templates, f, indent=2, ensure_ascii=False)
1410
+
1411
+ result = {
1412
+ 'success': True,
1413
+ 'imported': len(new_templates),
1414
+ 'duplicates': duplicates,
1415
+ 'overwritten': len(duplicates) if overwrite else 0
1416
+ }
1417
+
1418
+ return jsonify(result)
1419
+
1420
+ except Exception as e:
1421
+ return jsonify({'error': f'Error importing templates: {str(e)}'}), 500
1422
+
1423
+ @app.route('/api/run-simulator', methods=['POST'])
1424
+ def run_simulator():
1425
+ """Run the simulator_wqb.py script"""
1426
+ try:
1427
+ import subprocess
1428
+ import threading
1429
+ from pathlib import Path
1430
+
1431
+ # Get the script path (now in simulator subfolder)
1432
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1433
+ simulator_dir = os.path.join(script_dir, 'simulator')
1434
+ simulator_path = os.path.join(simulator_dir, 'simulator_wqb.py')
1435
+
1436
+ # Check if the script exists
1437
+ if not os.path.exists(simulator_path):
1438
+ return jsonify({'error': 'simulator_wqb.py not found in simulator folder'}), 404
1439
+
1440
+ # Run the script in a new terminal window
1441
+ def run_script():
1442
+ try:
1443
+ if os.name == 'nt':
1444
+ # Windows: Use cmd
1445
+ subprocess.Popen(['cmd', '/k', 'python', 'simulator_wqb.py'],
1446
+ cwd=simulator_dir,
1447
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1448
+ elif sys.platform == 'darwin':
1449
+ # macOS: Use AppleScript to call Terminal.app
1450
+ script = f'''
1451
+ tell application "Terminal"
1452
+ do script "cd '{simulator_dir}' && python3 simulator_wqb.py"
1453
+ activate
1454
+ end tell
1455
+ '''
1456
+ subprocess.Popen(['osascript', '-e', script])
1457
+ else:
1458
+ # Linux: Try multiple terminal emulators
1459
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1460
+ for terminal in terminals:
1461
+ try:
1462
+ if terminal == 'gnome-terminal':
1463
+ subprocess.Popen([terminal, '--working-directory', simulator_dir,
1464
+ '--', 'python3', 'simulator_wqb.py'])
1465
+ else:
1466
+ subprocess.Popen([terminal, '-e',
1467
+ f'cd "{simulator_dir}" && python3 simulator_wqb.py'])
1468
+ break
1469
+ except FileNotFoundError:
1470
+ continue
1471
+ else:
1472
+ # Fallback: Run in background
1473
+ print("Warning: No terminal emulator found, running in background")
1474
+ subprocess.Popen([sys.executable, 'simulator_wqb.py'], cwd=simulator_dir)
1475
+ except Exception as e:
1476
+ print(f"Error running simulator: {e}")
1477
+
1478
+ # Start the script in a separate thread
1479
+ thread = threading.Thread(target=run_script)
1480
+ thread.daemon = True
1481
+ thread.start()
1482
+
1483
+ return jsonify({
1484
+ 'success': True,
1485
+ 'message': 'Simulator script started in new terminal window'
1486
+ })
1487
+
1488
+ except Exception as e:
1489
+ return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
1490
+
1491
+ @app.route('/api/open-submitter', methods=['POST'])
1492
+ def open_submitter():
1493
+ """Run the alpha_submitter.py script"""
1494
+ try:
1495
+ import subprocess
1496
+ import threading
1497
+ from pathlib import Path
1498
+
1499
+ # Get the script path (now in simulator subfolder)
1500
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1501
+ simulator_dir = os.path.join(script_dir, 'simulator')
1502
+ submitter_path = os.path.join(simulator_dir, 'alpha_submitter.py')
1503
+
1504
+ # Check if the script exists
1505
+ if not os.path.exists(submitter_path):
1506
+ return jsonify({'error': 'alpha_submitter.py not found in simulator folder'}), 404
1507
+
1508
+ # Run the script in a new terminal window
1509
+ def run_script():
1510
+ try:
1511
+ if os.name == 'nt':
1512
+ # Windows: Use cmd
1513
+ subprocess.Popen(['cmd', '/k', 'python', 'alpha_submitter.py'],
1514
+ cwd=simulator_dir,
1515
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1516
+ elif sys.platform == 'darwin':
1517
+ # macOS: Use AppleScript to call Terminal.app
1518
+ script = f'''
1519
+ tell application "Terminal"
1520
+ do script "cd '{simulator_dir}' && python3 alpha_submitter.py"
1521
+ activate
1522
+ end tell
1523
+ '''
1524
+ subprocess.Popen(['osascript', '-e', script])
1525
+ else:
1526
+ # Linux: Try multiple terminal emulators
1527
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1528
+ for terminal in terminals:
1529
+ try:
1530
+ if terminal == 'gnome-terminal':
1531
+ subprocess.Popen([terminal, '--working-directory', simulator_dir,
1532
+ '--', 'python3', 'alpha_submitter.py'])
1533
+ else:
1534
+ subprocess.Popen([terminal, '-e',
1535
+ f'cd "{simulator_dir}" && python3 alpha_submitter.py'])
1536
+ break
1537
+ except FileNotFoundError:
1538
+ continue
1539
+ else:
1540
+ # Fallback: Run in background
1541
+ print("Warning: No terminal emulator found, running in background")
1542
+ subprocess.Popen([sys.executable, 'alpha_submitter.py'], cwd=simulator_dir)
1543
+ except Exception as e:
1544
+ print(f"Error running submitter: {e}")
1545
+
1546
+ # Start the script in a separate thread
1547
+ thread = threading.Thread(target=run_script)
1548
+ thread.daemon = True
1549
+ thread.start()
1550
+
1551
+ return jsonify({
1552
+ 'success': True,
1553
+ 'message': 'Alpha submitter script started in new terminal window'
1554
+ })
1555
+
1556
+ except Exception as e:
1557
+ return jsonify({'error': f'Failed to open submitter: {str(e)}'}), 500
1558
+
1559
+ @app.route('/api/open-hk-simulator', methods=['POST'])
1560
+ def open_hk_simulator():
1561
+ """Run the autosimulator.py script from hkSimulator folder"""
1562
+ try:
1563
+ import subprocess
1564
+ import threading
1565
+ from pathlib import Path
1566
+
1567
+ # Get the script path (hkSimulator subfolder)
1568
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1569
+ hk_simulator_dir = os.path.join(script_dir, 'hkSimulator')
1570
+ autosimulator_path = os.path.join(hk_simulator_dir, 'autosimulator.py')
1571
+
1572
+ # Check if the script exists
1573
+ if not os.path.exists(autosimulator_path):
1574
+ return jsonify({'error': 'autosimulator.py not found in hkSimulator folder'}), 404
1575
+
1576
+ # Run the script in a new terminal window
1577
+ def run_script():
1578
+ try:
1579
+ if os.name == 'nt':
1580
+ # Windows: Use cmd
1581
+ subprocess.Popen(['cmd', '/k', 'python', 'autosimulator.py'],
1582
+ cwd=hk_simulator_dir,
1583
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1584
+ elif sys.platform == 'darwin':
1585
+ # macOS: Use AppleScript to call Terminal.app
1586
+ script = f'''
1587
+ tell application "Terminal"
1588
+ do script "cd '{hk_simulator_dir}' && python3 autosimulator.py"
1589
+ activate
1590
+ end tell
1591
+ '''
1592
+ subprocess.Popen(['osascript', '-e', script])
1593
+ else:
1594
+ # Linux: Try multiple terminal emulators
1595
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
1596
+ for terminal in terminals:
1597
+ try:
1598
+ if terminal == 'gnome-terminal':
1599
+ subprocess.Popen([terminal, '--working-directory', hk_simulator_dir,
1600
+ '--', 'python3', 'autosimulator.py'])
1601
+ else:
1602
+ subprocess.Popen([terminal, '-e',
1603
+ f'cd "{hk_simulator_dir}" && python3 autosimulator.py'])
1604
+ break
1605
+ except FileNotFoundError:
1606
+ continue
1607
+ else:
1608
+ # Fallback: Run in background
1609
+ print("Warning: No terminal emulator found, running in background")
1610
+ subprocess.Popen([sys.executable, 'autosimulator.py'], cwd=hk_simulator_dir)
1611
+ except Exception as e:
1612
+ print(f"Error running HK simulator: {e}")
1613
+
1614
+ # Start the script in a separate thread
1615
+ thread = threading.Thread(target=run_script)
1616
+ thread.daemon = True
1617
+ thread.start()
1618
+
1619
+ return jsonify({
1620
+ 'success': True,
1621
+ 'message': 'HK simulator script started in new terminal window'
1622
+ })
1623
+
1624
+ except Exception as e:
1625
+ return jsonify({'error': f'Failed to open HK simulator: {str(e)}'}), 500
1626
+
1627
+ @app.route('/api/open-transformer', methods=['POST'])
1628
+ def open_transformer():
1629
+ """Run the Transformer.py script from the Tranformer folder in a new terminal."""
1630
+ try:
1631
+ import subprocess
1632
+ import threading
1633
+
1634
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1635
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1636
+ transformer_path = os.path.join(transformer_dir, 'Transformer.py')
1637
+
1638
+ if not os.path.exists(transformer_path):
1639
+ return jsonify({'error': 'Transformer.py not found in Tranformer folder'}), 404
1640
+
1641
+ def run_script():
1642
+ try:
1643
+ if os.name == 'nt':
1644
+ subprocess.Popen(['cmd', '/k', 'python', 'Transformer.py'],
1645
+ cwd=transformer_dir,
1646
+ creationflags=subprocess.CREATE_NEW_CONSOLE)
1647
+ else:
1648
+ terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
1649
+ for terminal in terminals:
1650
+ try:
1651
+ if terminal == 'gnome-terminal':
1652
+ subprocess.Popen([terminal, '--working-directory', transformer_dir, '--', 'python3', 'Transformer.py'])
1653
+ else:
1654
+ subprocess.Popen([terminal, '-e', f'cd "{transformer_dir}" && python3 "Transformer.py"'])
1655
+ break
1656
+ except FileNotFoundError:
1657
+ continue
1658
+ else:
1659
+ subprocess.Popen([sys.executable, 'Transformer.py'], cwd=transformer_dir)
1660
+ except Exception as e:
1661
+ print(f"Error running Transformer: {e}")
1662
+
1663
+ thread = threading.Thread(target=run_script)
1664
+ thread.daemon = True
1665
+ thread.start()
1666
+
1667
+ return jsonify({'success': True, 'message': 'Transformer script started in new terminal window'})
1668
+
1669
+ except Exception as e:
1670
+ return jsonify({'error': f'Failed to open Transformer: {str(e)}'}), 500
1671
+
1672
+
1673
+ @app.route('/api/usage-doc', methods=['GET'])
1674
+ def get_usage_doc():
1675
+ """Return usage.md as raw markdown text for in-app help display."""
1676
+ try:
1677
+ base_dir = os.path.dirname(os.path.abspath(__file__))
1678
+ usage_path = os.path.join(base_dir, 'usage.md')
1679
+ if not os.path.exists(usage_path):
1680
+ return jsonify({'success': False, 'error': 'usage.md not found'}), 404
1681
+
1682
+ with open(usage_path, 'r', encoding='utf-8') as f:
1683
+ content = f.read()
1684
+
1685
+ return jsonify({'success': True, 'markdown': content})
1686
+ except Exception as e:
1687
+ return jsonify({'success': False, 'error': str(e)}), 500
1688
+
1689
+ # Global task manager for Transformer Web
1690
+ transformer_tasks = {}
1691
+
1692
+ @app.route('/transformer-web')
1693
+ def transformer_web():
1694
+ return render_template('transformer_web.html')
1695
+
1696
+ @app.route('/api/test-llm-connection', methods=['POST'])
1697
+ def test_llm_connection():
1698
+ data = request.json
1699
+ api_key = data.get('apiKey')
1700
+ base_url = data.get('baseUrl')
1701
+ model = data.get('model')
1702
+
1703
+ try:
1704
+ import openai
1705
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
1706
+ # Simple test call
1707
+ response = client.chat.completions.create(
1708
+ model=model,
1709
+ messages=[{"role": "user", "content": "Hello"}],
1710
+ max_tokens=5
1711
+ )
1712
+ return jsonify({'success': True})
1713
+ except Exception as e:
1714
+ return jsonify({'success': False, 'error': str(e)})
1715
+
1716
+ @app.route('/api/get-default-template-summary')
1717
+ def get_default_template_summary():
1718
+ try:
1719
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1720
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1721
+
1722
+ # Read the file directly to avoid import issues/side effects
1723
+ transformer_path = os.path.join(transformer_dir, 'Transformer.py')
1724
+ with open(transformer_path, 'r', encoding='utf-8') as f:
1725
+ content = f.read()
1726
+
1727
+ # Extract template_summary variable using regex
1728
+ import re
1729
+ match = re.search(r'template_summary\s*=\s*"""(.*?)"""', content, re.DOTALL)
1730
+ if match:
1731
+ return jsonify({'success': True, 'summary': match.group(1)})
1732
+ else:
1733
+ return jsonify({'success': False, 'error': 'Could not find template_summary in Transformer.py'})
1734
+
1735
+ except Exception as e:
1736
+ return jsonify({'success': False, 'error': str(e)})
1737
+
1738
+ @app.route('/api/run-transformer-web', methods=['POST'])
1739
+ def run_transformer_web():
1740
+ data = request.json
1741
+ task_id = str(uuid.uuid4())
1742
+
1743
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1744
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1745
+
1746
+ # Handle template summary content
1747
+ template_summary_content = data.get('template_summary_content')
1748
+ template_summary_path = None
1749
+
1750
+ if template_summary_content:
1751
+ template_summary_path = os.path.join(transformer_dir, f'temp_summary_{task_id}.txt')
1752
+ with open(template_summary_path, 'w', encoding='utf-8') as f:
1753
+ f.write(template_summary_content)
1754
+
1755
+ # Create a temporary config file
1756
+ config = {
1757
+ "LLM_model_name": data.get('LLM_model_name'),
1758
+ "LLM_API_KEY": data.get('LLM_API_KEY'),
1759
+ "llm_base_url": data.get('llm_base_url'),
1760
+ "username": data.get('username'),
1761
+ "password": data.get('password'),
1762
+ "template_summary_path": template_summary_path,
1763
+ "alpha_id": data.get('alpha_id'),
1764
+ "top_n_datafield": int(data.get('top_n_datafield', 50)),
1765
+ "user_region": data.get('region'),
1766
+ "user_universe": data.get('universe'),
1767
+ "user_delay": int(data.get('delay')) if data.get('delay') else None,
1768
+ "user_category": data.get('category'),
1769
+ "user_data_type": data.get('data_type', 'MATRIX')
1770
+ }
1771
+
1772
+ config_path = os.path.join(transformer_dir, f'config_{task_id}.json')
1773
+
1774
+ with open(config_path, 'w', encoding='utf-8') as f:
1775
+ json.dump(config, f, indent=4)
1776
+
1777
+ # Start the process
1778
+ transformer_script = os.path.join(transformer_dir, 'Transformer.py')
1779
+
1780
+ # Use a queue to store logs
1781
+ log_queue = queue.Queue()
1782
+
1783
+ def run_process():
1784
+ try:
1785
+ # Force UTF-8 encoding for the subprocess output to avoid UnicodeEncodeError on Windows
1786
+ env = os.environ.copy()
1787
+ env["PYTHONIOENCODING"] = "utf-8"
1788
+
1789
+ process = subprocess.Popen(
1790
+ [sys.executable, '-u', transformer_script, config_path],
1791
+ cwd=transformer_dir,
1792
+ stdout=subprocess.PIPE,
1793
+ stderr=subprocess.STDOUT,
1794
+ text=True,
1795
+ bufsize=1,
1796
+ encoding='utf-8',
1797
+ errors='replace',
1798
+ env=env
1799
+ )
1800
+
1801
+ transformer_tasks[task_id]['process'] = process
1802
+
1803
+ for line in iter(process.stdout.readline, ''):
1804
+ log_queue.put(line)
1805
+
1806
+ process.stdout.close()
1807
+ process.wait()
1808
+ transformer_tasks[task_id]['return_code'] = process.returncode
1809
+ except Exception as e:
1810
+ log_queue.put(f"Error running process: {str(e)}")
1811
+ transformer_tasks[task_id]['return_code'] = 1
1812
+ finally:
1813
+ log_queue.put(None) # Signal end
1814
+ # Clean up config file and temp summary file
1815
+ try:
1816
+ if os.path.exists(config_path):
1817
+ os.remove(config_path)
1818
+ if template_summary_path and os.path.exists(template_summary_path):
1819
+ os.remove(template_summary_path)
1820
+ except:
1821
+ pass
1822
+
1823
+ thread = threading.Thread(target=run_process)
1824
+ thread.start()
1825
+
1826
+ transformer_tasks[task_id] = {
1827
+ 'queue': log_queue,
1828
+ 'status': 'running',
1829
+ 'output_dir': os.path.join(transformer_dir, 'output')
1830
+ }
1831
+
1832
+ return jsonify({'success': True, 'taskId': task_id})
1833
+
1834
+ @app.route('/api/transformer/login-and-fetch-options', methods=['POST'])
1835
+ def transformer_login_and_fetch_options():
1836
+ data = request.json
1837
+ username = data.get('username')
1838
+ password = data.get('password')
1839
+
1840
+ if not username or not password:
1841
+ return jsonify({'success': False, 'error': 'Username and password are required'})
1842
+
1843
+ try:
1844
+ # Add Tranformer to path to import ace_lib
1845
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1846
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1847
+ if transformer_dir not in sys.path:
1848
+ sys.path.append(transformer_dir)
1849
+
1850
+ from ace_lib import SingleSession, get_instrument_type_region_delay
1851
+
1852
+ # Use SingleSession for consistency with ace_lib
1853
+ session = SingleSession()
1854
+ # Force re-authentication
1855
+ session.auth = (username, password)
1856
+
1857
+ brain_api_url = "https://api.worldquantbrain.com"
1858
+ response = session.post(brain_api_url + "/authentication")
1859
+
1860
+ if response.status_code == 201:
1861
+ # Auth success
1862
+ pass
1863
+ elif response.status_code == 401:
1864
+ return jsonify({'success': False, 'error': 'Authentication failed: Invalid credentials'})
1865
+ else:
1866
+ return jsonify({'success': False, 'error': f'Authentication failed: {response.status_code} {response.text}'})
1867
+
1868
+ # Now fetch options
1869
+ df = get_instrument_type_region_delay(session)
1870
+
1871
+ # Fetch categories
1872
+ brain_api_url = "https://api.worldquantbrain.com"
1873
+ categories_resp = session.get(brain_api_url + "/data-categories")
1874
+ categories = []
1875
+ if categories_resp.status_code == 200:
1876
+ categories_data = categories_resp.json()
1877
+ if isinstance(categories_data, list):
1878
+ categories = categories_data
1879
+ elif isinstance(categories_data, dict):
1880
+ categories = categories_data.get('results', [])
1881
+
1882
+ # Convert DataFrame to a nested dictionary structure for the frontend
1883
+ # Structure: Region -> Delay -> Universe
1884
+ # We only care about EQUITY for now as per previous code
1885
+
1886
+ df_equity = df[df['InstrumentType'] == 'EQUITY']
1887
+
1888
+ options = {}
1889
+ for _, row in df_equity.iterrows():
1890
+ region = row['Region']
1891
+ delay = row['Delay']
1892
+ universes = row['Universe'] # This is a list
1893
+
1894
+ if region not in options:
1895
+ options[region] = {}
1896
+
1897
+ # Convert delay to string for JSON keys
1898
+ delay_str = str(delay)
1899
+ if delay_str not in options[region]:
1900
+ options[region][delay_str] = universes
1901
+
1902
+ return jsonify({
1903
+ 'success': True,
1904
+ 'options': options,
1905
+ 'categories': categories
1906
+ })
1907
+
1908
+ except Exception as e:
1909
+ return jsonify({'success': False, 'error': str(e)})
1910
+
1911
+ @app.route('/api/stream-transformer-logs/<task_id>')
1912
+ def stream_transformer_logs(task_id):
1913
+ def generate():
1914
+ if task_id not in transformer_tasks:
1915
+ yield f"data: {json.dumps({'status': 'error', 'log': 'Task not found'})}\n\n"
1916
+ return
1917
+
1918
+ q = transformer_tasks[task_id]['queue']
1919
+
1920
+ while True:
1921
+ try:
1922
+ line = q.get(timeout=1)
1923
+ if line is None:
1924
+ return_code = transformer_tasks[task_id].get('return_code', 0)
1925
+ status = 'completed' if return_code == 0 else 'error'
1926
+ yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
1927
+ break
1928
+ yield f"data: {json.dumps({'status': 'running', 'log': line})}\n\n"
1929
+ except queue.Empty:
1930
+ # Check if process is still running
1931
+ if 'process' in transformer_tasks[task_id]:
1932
+ proc = transformer_tasks[task_id]['process']
1933
+ if proc.poll() is not None and q.empty():
1934
+ return_code = proc.returncode
1935
+ status = 'completed' if return_code == 0 else 'error'
1936
+ yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
1937
+ break
1938
+ yield f"data: {json.dumps({'status': 'running', 'log': ''})}\n\n" # Keep alive
1939
+
1940
+ return Response(stream_with_context(generate()), mimetype='text/event-stream')
1941
+
1942
+ @app.route('/api/download-transformer-result/<task_id>/<file_type>')
1943
+ def download_transformer_result(task_id, file_type):
1944
+ script_dir = os.path.dirname(os.path.abspath(__file__))
1945
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
1946
+ output_dir = os.path.join(transformer_dir, 'output')
1947
+
1948
+ if file_type == 'candidates':
1949
+ filename = 'Alpha_candidates.json'
1950
+ elif file_type == 'success':
1951
+ filename = 'Alpha_generated_expressions_success.json'
1952
+ elif file_type == 'error':
1953
+ filename = 'Alpha_generated_expressions_error.json'
1954
+ else:
1955
+ return "Invalid file type", 400
1956
+
1957
+ return send_from_directory(output_dir, filename, as_attachment=True)
1958
+
1959
+ # --- 缘分一道桥 (Alpha Inspector) Routes ---
1960
+
1961
+ # Add '缘分一道桥' to sys.path to allow importing brain_alpha_inspector
1962
+ yuanfen_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '缘分一道桥')
1963
+ if yuanfen_dir not in sys.path:
1964
+ sys.path.append(yuanfen_dir)
1965
+
1966
+ try:
1967
+ import brain_alpha_inspector
1968
+ except ImportError as e:
1969
+ print(f"Warning: Could not import brain_alpha_inspector: {e}")
1970
+ brain_alpha_inspector = None
1971
+
1972
+ @app.route('/alpha_inspector')
1973
+ def alpha_inspector_page():
1974
+ return render_template('alpha_inspector.html')
1975
+
1976
+ @app.route('/api/yuanfen/login', methods=['POST'])
1977
+ def yuanfen_login():
1978
+ if not brain_alpha_inspector:
1979
+ return jsonify({'success': False, 'message': 'Module not loaded'})
1980
+
1981
+ data = request.json
1982
+ username = data.get('username')
1983
+ password = data.get('password')
1984
+
1985
+ try:
1986
+ session = brain_alpha_inspector.brain_login(username, password)
1987
+ session_id = str(uuid.uuid4())
1988
+ brain_sessions[session_id] = session
1989
+ return jsonify({'success': True, 'session_id': session_id})
1990
+ except Exception as e:
1991
+ return jsonify({'success': False, 'message': str(e)})
1992
+
1993
+ @app.route('/api/yuanfen/fetch_alphas', methods=['POST'])
1994
+ def yuanfen_fetch_alphas():
1995
+ if not brain_alpha_inspector:
1996
+ return jsonify({'success': False, 'message': 'Module not loaded'})
1997
+
1998
+ data = request.json
1999
+ session_id = data.get('session_id')
2000
+ mode = data.get('mode', 'date_range')
2001
+
2002
+ session = brain_sessions.get(session_id)
2003
+ if not session:
2004
+ return jsonify({'success': False, 'message': 'Invalid session'})
2005
+
2006
+ def generate():
2007
+ try:
2008
+ alphas = []
2009
+ if mode == 'ids':
2010
+ alpha_ids_str = data.get('alpha_ids', '')
2011
+ import re
2012
+ alpha_ids = [x.strip() for x in re.split(r'[,\s\n]+', alpha_ids_str) if x.strip()]
2013
+ yield json.dumps({"type": "progress", "message": f"Fetching {len(alpha_ids)} alphas by ID..."}) + "\n"
2014
+ alphas = brain_alpha_inspector.fetch_alphas_by_ids(session, alpha_ids)
2015
+ else:
2016
+ start_date = data.get('start_date')
2017
+ end_date = data.get('end_date')
2018
+ yield json.dumps({"type": "progress", "message": f"Fetching alphas from {start_date} to {end_date}..."}) + "\n"
2019
+ alphas = brain_alpha_inspector.fetch_alphas_by_date_range(session, start_date, end_date)
2020
+ yield json.dumps({"type": "progress", "message": f"Found {len(alphas)} alphas. Fetching operators..."}) + "\n"
2021
+
2022
+ # 2. Fetch Operators (needed for parsing)
2023
+ operators = brain_alpha_inspector.fetch_operators(session)
2024
+
2025
+ # 2.5 Fetch Simulation Options (for validation)
2026
+ simulation_options = None
2027
+ if brain_alpha_inspector.get_instrument_type_region_delay:
2028
+ yield json.dumps({"type": "progress", "message": "Fetching simulation options..."}) + "\n"
2029
+ try:
2030
+ simulation_options = brain_alpha_inspector.get_instrument_type_region_delay(session)
2031
+ except Exception as e:
2032
+ print(f"Error fetching simulation options: {e}")
2033
+
2034
+ yield json.dumps({"type": "progress", "message": f"Analyzing {len(alphas)} alphas..."}) + "\n"
2035
+
2036
+ # 3. Analyze each alpha
2037
+ analyzed_alphas = []
2038
+ for i, alpha in enumerate(alphas):
2039
+ alpha_id = alpha.get('id', 'Unknown')
2040
+ yield json.dumps({"type": "progress", "message": f"Processing alpha {i+1}/{len(alphas)}: {alpha_id}"}) + "\n"
2041
+
2042
+ result = brain_alpha_inspector.get_alpha_variants(session, alpha, operators, simulation_options)
2043
+ if result['valid'] and result['variants']:
2044
+ analyzed_alphas.append(result)
2045
+
2046
+ yield json.dumps({"type": "result", "success": True, "alphas": analyzed_alphas}) + "\n"
2047
+
2048
+ except Exception as e:
2049
+ print(f"Error in fetch_alphas: {e}")
2050
+ yield json.dumps({"type": "error", "message": str(e)}) + "\n"
2051
+
2052
+ return Response(stream_with_context(generate()), mimetype='application/x-ndjson')
2053
+
2054
+ @app.route('/api/yuanfen/simulate', methods=['POST'])
2055
+ def yuanfen_simulate():
2056
+ if not brain_alpha_inspector:
2057
+ return jsonify({'success': False, 'message': 'Module not loaded'})
2058
+
2059
+ data = request.json
2060
+ session_id = data.get('session_id')
2061
+ # alpha_id = data.get('alpha_id') # Not strictly needed if we have full payload
2062
+ payload = data.get('payload') # The full simulation payload
2063
+
2064
+ session = brain_sessions.get(session_id)
2065
+ if not session:
2066
+ return jsonify({'success': False, 'message': 'Invalid session'})
2067
+
2068
+ try:
2069
+ success, result_or_msg = brain_alpha_inspector.run_simulation_payload(session, payload)
2070
+
2071
+ if success:
2072
+ return jsonify({'success': True, 'result': result_or_msg})
2073
+ else:
2074
+ return jsonify({'success': False, 'message': result_or_msg})
2075
+
2076
+ except Exception as e:
2077
+ return jsonify({'success': False, 'message': str(e)})
2078
+
2079
+ def process_options_dataframe(df):
2080
+ """
2081
+ Transforms the options DataFrame into a nested dictionary:
2082
+ {
2083
+
2084
+ "EQUITY": {
2085
+ "USA": {
2086
+ "delays": [0, 1],
2087
+ "universes": ["TOP3000", ...],
2088
+ "neutralizations": ["MARKET", "INDUSTRY", ...]
2089
+ },
2090
+ "TWN": { ... }
2091
+ }
2092
+ }
2093
+ """
2094
+ result = {}
2095
+ if df is None or df.empty:
2096
+ return result
2097
+
2098
+ for _, row in df.iterrows():
2099
+ inst = row.get('InstrumentType', 'EQUITY')
2100
+ region = row.get('Region')
2101
+
2102
+ if inst not in result: result[inst] = {}
2103
+ if region not in result[inst]:
2104
+ result[inst][region] = {
2105
+ "delays": [],
2106
+ "universes": [],
2107
+ "neutralizations": []
2108
+ }
2109
+
2110
+ # Aggregate unique values
2111
+ delay = row.get('Delay')
2112
+ if delay is not None and delay not in result[inst][region]['delays']:
2113
+ result[inst][region]['delays'].append(delay)
2114
+
2115
+ universes = row.get('Universe')
2116
+ if isinstance(universes, list):
2117
+ for u in universes:
2118
+ if u not in result[inst][region]['universes']:
2119
+ result[inst][region]['universes'].append(u)
2120
+ elif isinstance(universes, str):
2121
+ if universes not in result[inst][region]['universes']:
2122
+ result[inst][region]['universes'].append(universes)
2123
+
2124
+ neutralizations = row.get('Neutralization')
2125
+ if isinstance(neutralizations, list):
2126
+ for n in neutralizations:
2127
+ if n not in result[inst][region]['neutralizations']:
2128
+ result[inst][region]['neutralizations'].append(n)
2129
+ elif isinstance(neutralizations, str):
2130
+ if neutralizations not in result[inst][region]['neutralizations']:
2131
+ result[inst][region]['neutralizations'].append(neutralizations)
2132
+
2133
+ return result
2134
+
2135
+ def get_valid_simulation_options(session):
2136
+ """Fetch valid simulation options from BRAIN."""
2137
+ try:
2138
+ if get_instrument_type_region_delay:
2139
+ print("Fetching simulation options using ace_lib...")
2140
+ df = get_instrument_type_region_delay(session)
2141
+ return process_options_dataframe(df)
2142
+ else:
2143
+ print("ace_lib not available, skipping options fetch")
2144
+ return {}
2145
+ except Exception as e:
2146
+ print(f"Error fetching options: {e}")
2147
+ return {}
2148
+
2149
+ # --- Inspiration Master Routes ---
2150
+
2151
+ def get_active_session():
2152
+ """Helper to get active session from header or SingleSession"""
2153
+ # Check header first
2154
+ session_id = request.headers.get('Session-ID')
2155
+ if session_id and session_id in brain_sessions:
2156
+ return brain_sessions[session_id]['session']
2157
+
2158
+ # Fallback to SingleSession
2159
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2160
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2161
+ if transformer_dir not in sys.path:
2162
+ sys.path.append(transformer_dir)
2163
+ from ace_lib import SingleSession
2164
+ s = SingleSession()
2165
+ if hasattr(s, 'auth') and s.auth:
2166
+ return s
2167
+ return None
2168
+
2169
+ @app.route('/api/check_login', methods=['GET'])
2170
+ def check_login():
2171
+ try:
2172
+ s = get_active_session()
2173
+ if s:
2174
+ return jsonify({'logged_in': True})
2175
+ else:
2176
+ return jsonify({'logged_in': False})
2177
+ except Exception as e:
2178
+ print(f"Check login error: {e}")
2179
+ return jsonify({'logged_in': False})
2180
+
2181
+ @app.route('/api/inspiration/options', methods=['GET'])
2182
+ def inspiration_options():
2183
+ try:
2184
+ # Use the same path logic as the main login
2185
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2186
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2187
+ if transformer_dir not in sys.path:
2188
+ sys.path.append(transformer_dir)
2189
+
2190
+ from ace_lib import get_instrument_type_region_delay
2191
+
2192
+ s = get_active_session()
2193
+ if not s:
2194
+ return jsonify({'error': 'Not logged in'}), 401
2195
+
2196
+ df = get_instrument_type_region_delay(s)
2197
+
2198
+ result = {}
2199
+ for _, row in df.iterrows():
2200
+ inst = row['InstrumentType']
2201
+ region = row['Region']
2202
+ delay = row['Delay']
2203
+ univs = row['Universe']
2204
+
2205
+ if inst not in result: result[inst] = {}
2206
+ if region not in result[inst]:
2207
+ result[inst][region] = {"delays": [], "universes": []}
2208
+
2209
+ if delay not in result[inst][region]['delays']:
2210
+ result[inst][region]['delays'].append(delay)
2211
+
2212
+ if isinstance(univs, list):
2213
+ for u in univs:
2214
+ if u not in result[inst][region]['universes']:
2215
+ result[inst][region]['universes'].append(u)
2216
+ else:
2217
+ if univs not in result[inst][region]['universes']:
2218
+ result[inst][region]['universes'].append(univs)
2219
+
2220
+ return jsonify(result)
2221
+ except Exception as e:
2222
+ return jsonify({'error': str(e)}), 500
2223
+
2224
+ @app.route('/api/inspiration/datasets', methods=['POST'])
2225
+ def inspiration_datasets():
2226
+ data = request.json
2227
+ region = data.get('region')
2228
+ delay = data.get('delay')
2229
+ universe = data.get('universe')
2230
+ search = data.get('search', '')
2231
+
2232
+ try:
2233
+ # Use the same path logic as the main login
2234
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2235
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2236
+ if transformer_dir not in sys.path:
2237
+ sys.path.append(transformer_dir)
2238
+
2239
+ from ace_lib import get_datasets
2240
+
2241
+ s = get_active_session()
2242
+ if not s:
2243
+ return jsonify({'error': 'Not logged in'}), 401
2244
+
2245
+ df = get_datasets(s, region=region, delay=int(delay), universe=universe)
2246
+
2247
+ if search:
2248
+ search = search.lower()
2249
+ mask = (
2250
+ df['id'].str.lower().str.contains(search, na=False) |
2251
+ df['name'].str.lower().str.contains(search, na=False) |
2252
+ df['description'].str.lower().str.contains(search, na=False)
2253
+ )
2254
+ df = df[mask]
2255
+
2256
+ results = df.head(50).to_dict(orient='records')
2257
+ return jsonify(results)
2258
+ except Exception as e:
2259
+ return jsonify({'error': str(e)}), 500
2260
+
2261
+ @app.route('/api/inspiration/test_llm', methods=['POST'])
2262
+ def inspiration_test_llm():
2263
+ data = request.json
2264
+ api_key = data.get('apiKey')
2265
+ base_url = data.get('baseUrl')
2266
+ model = data.get('model')
2267
+
2268
+ try:
2269
+ import openai
2270
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
2271
+ # Simple call to list models or chat completion
2272
+ # Using a very cheap/fast call if possible, or just listing models
2273
+ try:
2274
+ client.models.list()
2275
+ return jsonify({'success': True})
2276
+ except Exception as e:
2277
+ # Fallback to a simple completion if models.list is restricted
2278
+ try:
2279
+ client.chat.completions.create(
2280
+ model=model,
2281
+ messages=[{"role": "user", "content": "hi"}],
2282
+ max_tokens=1
2283
+ )
2284
+ return jsonify({'success': True})
2285
+ except Exception as e2:
2286
+ return jsonify({'success': False, 'error': str(e2)})
2287
+
2288
+ except Exception as e:
2289
+ return jsonify({'success': False, 'error': str(e)})
2290
+
2291
+ @app.route('/api/inspiration/generate', methods=['POST'])
2292
+ def inspiration_generate():
2293
+ data = request.json
2294
+ api_key = data.get('apiKey')
2295
+ base_url = data.get('baseUrl')
2296
+ model = data.get('model')
2297
+ region = data.get('region')
2298
+ delay = data.get('delay')
2299
+ universe = data.get('universe')
2300
+ dataset_id = data.get('datasetId')
2301
+
2302
+ try:
2303
+ import openai
2304
+ # Use the same path logic as the main login
2305
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2306
+ transformer_dir = os.path.join(script_dir, 'Tranformer')
2307
+ if transformer_dir not in sys.path:
2308
+ sys.path.append(transformer_dir)
2309
+
2310
+ from ace_lib import get_operators, get_datafields
2311
+
2312
+ s = get_active_session()
2313
+ if not s:
2314
+ return jsonify({'error': 'Not logged in'}), 401
2315
+
2316
+ operators_df = get_operators(s)
2317
+ operators_df = operators_df[operators_df['scope'] == 'REGULAR']
2318
+
2319
+ datafields_df = get_datafields(s, region=region, delay=int(delay), universe=universe, dataset_id=dataset_id)
2320
+
2321
+ script_dir = os.path.dirname(os.path.abspath(__file__))
2322
+ prompt_path = os.path.join(script_dir, "give_me_idea", "what_is_Alpha_template.md")
2323
+ try:
2324
+ with open(prompt_path, "r", encoding="utf-8") as f:
2325
+ system_prompt = f.read()
2326
+ except:
2327
+ system_prompt = "You are a helpful assistant for generating Alpha templates."
2328
+
2329
+ client = openai.OpenAI(api_key=api_key, base_url=base_url)
2330
+
2331
+ max_retries = 5
2332
+ n_ops = len(operators_df)
2333
+ n_fields = len(datafields_df)
2334
+
2335
+ last_error = None
2336
+
2337
+ for attempt in range(max_retries + 1):
2338
+ ops_subset = operators_df.head(n_ops)
2339
+ fields_subset = datafields_df.head(n_fields)
2340
+
2341
+ operators_info = ops_subset[['name', 'category', 'description']].to_string()
2342
+ datafields_info = fields_subset[['id', 'description', 'subcategory']].to_string()
2343
+
2344
+ user_prompt = f"""
2345
+ Here is the information about available operators (first {n_ops} rows):
2346
+ {operators_info}
2347
+
2348
+ Here is the information about the dataset '{dataset_id}' (first {n_fields} rows):
2349
+ {datafields_info}
2350
+
2351
+ Please come up with several Alpha templates based on this information.
2352
+ Specify the AI answer in Chinese.
2353
+ """
2354
+ try:
2355
+ completion = client.chat.completions.create(
2356
+ model=model,
2357
+ messages=[
2358
+ {"role": "system", "content": system_prompt},
2359
+ {"role": "user", "content": user_prompt}
2360
+ ],
2361
+ temperature=0.3,
2362
+ )
2363
+ return jsonify({'result': completion.choices[0].message.content})
2364
+
2365
+ except Exception as e:
2366
+ error_msg = str(e)
2367
+ last_error = error_msg
2368
+ if "token limit" in error_msg or "context_length_exceeded" in error_msg or "400" in error_msg:
2369
+ n_ops = max(1, n_ops // 2)
2370
+ n_fields = max(1, n_fields // 2)
2371
+ if n_ops == 1 and n_fields == 1:
2372
+ break
2373
+ else:
2374
+ break
2375
+
2376
+ return jsonify({'error': f"Failed after retries. Last error: {last_error}"})
2377
+
2378
+ except Exception as e:
2379
+ return jsonify({'error': str(e)}), 500
2380
+
2381
+ if __name__ == '__main__':
2382
+ print("Starting BRAIN Expression Template Decoder Web Application...")
2383
+ print("Starting in safe mode: binding only to localhost (127.0.0.1)")
2384
+ # Allow an explicit override only via an environment variable (not recommended)
2385
+ bind_host = os.environ.get('BRAIN_BIND_HOST', '127.0.0.1')
2386
+ if bind_host not in ('127.0.0.1', 'localhost'):
2387
+ print(f"Refusing to bind to non-localhost address: {bind_host}")
2388
+ print("To override (not recommended), set environment variable BRAIN_BIND_HOST")
2389
+ sys.exit(1)
2390
+
2391
+ print(f"Application will run on http://{bind_host}:5000")
2392
+ print("BRAIN API integration included - no separate proxy needed!")
2393
+ app.run(debug=False, host=bind_host, port=5000)