cnhkmcp 2.1.2__py3-none-any.whl → 2.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/METADATA +1 -1
- cnhkmcp-2.1.3.dist-info/RECORD +6 -0
- cnhkmcp-2.1.3.dist-info/top_level.txt +1 -0
- cnhkmcp/__init__.py +0 -125
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/README.md +0 -38
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/ace.log +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +0 -6
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/ace_lib.py +0 -1510
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_datasets.py +0 -157
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_documentation.py +0 -132
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/fetch_all_operators.py +0 -99
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/get_knowledgeBase_tool/helpful_functions.py +0 -180
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.ico +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/icon.png +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/knowledge/test.txt +0 -1
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +0 -576
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/process_knowledge_base.py +0 -281
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/rag_engine.py +0 -408
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/requirements.txt +0 -7
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/run.bat +0 -3
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242//321/211/320/266/320/246/321/206/320/274/320/261/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +0 -265
- cnhkmcp/untracked/APP/.gitignore +0 -32
- cnhkmcp/untracked/APP/MODULAR_STRUCTURE.md +0 -112
- cnhkmcp/untracked/APP/README.md +0 -309
- cnhkmcp/untracked/APP/Tranformer/Transformer.py +0 -4985
- cnhkmcp/untracked/APP/Tranformer/ace.log +0 -0
- cnhkmcp/untracked/APP/Tranformer/ace_lib.py +0 -1510
- cnhkmcp/untracked/APP/Tranformer/helpful_functions.py +0 -180
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates.json +0 -2421
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_candidates_/321/207/320/264/342/225/221/321/204/342/225/233/320/233.json +0 -654
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_error.json +0 -1034
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_success.json +0 -444
- cnhkmcp/untracked/APP/Tranformer/output/Alpha_generated_expressions_/321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/277/321/207/320/253/342/224/244/321/206/320/236/320/265/321/210/342/225/234/342/225/234/321/205/320/225/320/265Machine_lib.json +0 -22
- cnhkmcp/untracked/APP/Tranformer/parsetab.py +0 -60
- cnhkmcp/untracked/APP/Tranformer/template_summary.txt +0 -3182
- cnhkmcp/untracked/APP/Tranformer/transformer_config.json +0 -7
- cnhkmcp/untracked/APP/Tranformer/validator.py +0 -889
- cnhkmcp/untracked/APP/ace.log +0 -69
- cnhkmcp/untracked/APP/ace_lib.py +0 -1510
- cnhkmcp/untracked/APP/blueprints/__init__.py +0 -6
- cnhkmcp/untracked/APP/blueprints/feature_engineering.py +0 -347
- cnhkmcp/untracked/APP/blueprints/idea_house.py +0 -221
- cnhkmcp/untracked/APP/blueprints/inspiration_house.py +0 -432
- cnhkmcp/untracked/APP/blueprints/paper_analysis.py +0 -570
- cnhkmcp/untracked/APP/custom_templates/templates.json +0 -1257
- cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +0 -400
- cnhkmcp/untracked/APP/give_me_idea/ace_lib.py +0 -1510
- cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +0 -252
- cnhkmcp/untracked/APP/give_me_idea/fetch_all_datasets.py +0 -157
- cnhkmcp/untracked/APP/give_me_idea/fetch_all_operators.py +0 -99
- cnhkmcp/untracked/APP/give_me_idea/helpful_functions.py +0 -180
- cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +0 -11
- cnhkmcp/untracked/APP/helpful_functions.py +0 -180
- cnhkmcp/untracked/APP/hkSimulator/ace_lib.py +0 -1497
- cnhkmcp/untracked/APP/hkSimulator/autosimulator.py +0 -447
- cnhkmcp/untracked/APP/hkSimulator/helpful_functions.py +0 -180
- cnhkmcp/untracked/APP/mirror_config.txt +0 -20
- cnhkmcp/untracked/APP/operaters.csv +0 -129
- cnhkmcp/untracked/APP/requirements.txt +0 -53
- cnhkmcp/untracked/APP/run_app.bat +0 -28
- cnhkmcp/untracked/APP/run_app.sh +0 -34
- cnhkmcp/untracked/APP/setup_tsinghua.bat +0 -39
- cnhkmcp/untracked/APP/setup_tsinghua.sh +0 -43
- cnhkmcp/untracked/APP/simulator/alpha_submitter.py +0 -404
- cnhkmcp/untracked/APP/simulator/simulator_wqb.py +0 -618
- cnhkmcp/untracked/APP/ssrn-3332513.pdf +6 -109201
- cnhkmcp/untracked/APP/static/brain.js +0 -589
- cnhkmcp/untracked/APP/static/decoder.js +0 -1540
- cnhkmcp/untracked/APP/static/feature_engineering.js +0 -1729
- cnhkmcp/untracked/APP/static/idea_house.js +0 -937
- cnhkmcp/untracked/APP/static/inspiration.js +0 -465
- cnhkmcp/untracked/APP/static/inspiration_house.js +0 -868
- cnhkmcp/untracked/APP/static/paper_analysis.js +0 -390
- cnhkmcp/untracked/APP/static/script.js +0 -3082
- cnhkmcp/untracked/APP/static/simulator.js +0 -597
- cnhkmcp/untracked/APP/static/styles.css +0 -3127
- cnhkmcp/untracked/APP/static/usage_widget.js +0 -508
- cnhkmcp/untracked/APP/templates/alpha_inspector.html +0 -511
- cnhkmcp/untracked/APP/templates/feature_engineering.html +0 -960
- cnhkmcp/untracked/APP/templates/idea_house.html +0 -564
- cnhkmcp/untracked/APP/templates/index.html +0 -932
- cnhkmcp/untracked/APP/templates/inspiration_house.html +0 -861
- cnhkmcp/untracked/APP/templates/paper_analysis.html +0 -91
- cnhkmcp/untracked/APP/templates/simulator.html +0 -343
- cnhkmcp/untracked/APP/templates/transformer_web.html +0 -580
- cnhkmcp/untracked/APP/usage.md +0 -351
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/ace_lib.py +0 -1510
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/brain_alpha_inspector.py +0 -712
- cnhkmcp/untracked/APP//321/207/342/225/235/320/250/321/205/320/230/320/226/321/204/342/225/225/320/220/321/211/320/221/320/243/321/206/320/261/320/265/helpful_functions.py +0 -180
- cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +0 -2456
- cnhkmcp/untracked/arXiv_API_Tool_Manual.md +0 -490
- cnhkmcp/untracked/arxiv_api.py +0 -229
- cnhkmcp/untracked/forum_functions.py +0 -998
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +0 -407
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +0 -2415
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/user_config.json +0 -31
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/210/320/276/320/271AI/321/210/320/277/342/225/227/321/210/342/224/220/320/251/321/204/342/225/225/320/272/321/206/320/246/320/227/321/206/320/261/320/263/321/206/320/255/320/265/321/205/320/275/320/266/321/204/342/225/235/320/252/321/204/342/225/225/320/233/321/210/342/225/234/342/225/234/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270.md +0 -101
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +0 -190
- cnhkmcp/untracked/platform_functions.py +0 -2886
- cnhkmcp/untracked/sample_mcp_config.json +0 -11
- cnhkmcp/untracked/user_config.json +0 -31
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/320/237/320/222/321/210/320/220/320/223/321/206/320/246/320/227/321/206/320/261/320/263_BRAIN_Alpha_Test_Requirements_and_Tips.md +0 -202
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Alpha_explaination_workflow.md +0 -56
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_6_Tips_Datafield_Exploration_Guide.md +0 -194
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_BRAIN_Alpha_Improvement_Workflow.md +0 -101
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_Dataset_Exploration_Expert_Manual.md +0 -436
- cnhkmcp/untracked//321/207/320/264/342/225/221/321/204/342/225/233/320/233/321/205/342/225/226/320/265/321/204/342/225/234/320/254/321/206/342/225/241/320/221_daily_report_workflow.md +0 -128
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +0 -190
- cnhkmcp-2.1.2.dist-info/RECORD +0 -111
- cnhkmcp-2.1.2.dist-info/top_level.txt +0 -1
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.2.dist-info → cnhkmcp-2.1.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,2456 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
BRAIN Expression Template Decoder - Flask Web Application
|
|
3
|
-
A complete web application for decoding string templates with WorldQuant BRAIN integration
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
# Auto-install dependencies if missing
|
|
7
|
-
import subprocess
|
|
8
|
-
import sys
|
|
9
|
-
import os
|
|
10
|
-
|
|
11
|
-
def install_requirements():
|
|
12
|
-
"""Install required packages from requirements.txt if they're missing"""
|
|
13
|
-
print("🔍 Checking and installing required dependencies...")
|
|
14
|
-
print("📋 Verifying packages needed for BRAIN Expression Template Decoder...")
|
|
15
|
-
|
|
16
|
-
# Get the directory where this script is located
|
|
17
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
18
|
-
|
|
19
|
-
# Check if requirements.txt exists in the script directory
|
|
20
|
-
req_file = os.path.join(script_dir, 'requirements.txt')
|
|
21
|
-
if not os.path.exists(req_file):
|
|
22
|
-
print("❌ Error: requirements.txt not found!")
|
|
23
|
-
print(f"Looking for: {req_file}")
|
|
24
|
-
return False
|
|
25
|
-
|
|
26
|
-
# Read mirror configuration if it exists
|
|
27
|
-
mirror_url = 'https://pypi.tuna.tsinghua.edu.cn/simple' # Default to Tsinghua
|
|
28
|
-
mirror_config_file = os.path.join(script_dir, 'mirror_config.txt')
|
|
29
|
-
|
|
30
|
-
if os.path.exists(mirror_config_file):
|
|
31
|
-
try:
|
|
32
|
-
with open(mirror_config_file, 'r', encoding='utf-8') as f:
|
|
33
|
-
for line in f:
|
|
34
|
-
line = line.strip()
|
|
35
|
-
if line and not line.startswith('#') and line.startswith('http'):
|
|
36
|
-
mirror_url = line
|
|
37
|
-
break
|
|
38
|
-
except Exception as e:
|
|
39
|
-
print(f"Warning: Could not read mirror configuration: {e}")
|
|
40
|
-
|
|
41
|
-
# Try to import the main packages to check if they're installed
|
|
42
|
-
packages_to_check = {
|
|
43
|
-
'flask': 'flask',
|
|
44
|
-
'flask_cors': 'flask-cors',
|
|
45
|
-
'requests': 'requests',
|
|
46
|
-
'pandas': 'pandas',
|
|
47
|
-
'PyPDF2': 'PyPDF2',
|
|
48
|
-
'docx': 'python-docx',
|
|
49
|
-
'pdfplumber': 'pdfplumber',
|
|
50
|
-
'fitz': 'PyMuPDF',
|
|
51
|
-
'cozepy': 'cozepy',
|
|
52
|
-
'lxml': 'lxml',
|
|
53
|
-
'bs4': 'beautifulsoup4'
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
missing_packages = []
|
|
57
|
-
for import_name, pip_name in packages_to_check.items():
|
|
58
|
-
try:
|
|
59
|
-
__import__(import_name)
|
|
60
|
-
except ImportError:
|
|
61
|
-
missing_packages.append(pip_name)
|
|
62
|
-
print(f"Missing package: {pip_name} (import name: {import_name})")
|
|
63
|
-
|
|
64
|
-
if missing_packages:
|
|
65
|
-
print(f"⚠️ Missing packages detected: {', '.join(missing_packages)}")
|
|
66
|
-
print("📦 Installing dependencies from requirements.txt...")
|
|
67
|
-
print(f"🌐 Using mirror: {mirror_url}")
|
|
68
|
-
|
|
69
|
-
try:
|
|
70
|
-
# Install all requirements using configured mirror
|
|
71
|
-
subprocess.check_call([
|
|
72
|
-
sys.executable, '-m', 'pip', 'install',
|
|
73
|
-
'-i', mirror_url,
|
|
74
|
-
'-r', req_file
|
|
75
|
-
])
|
|
76
|
-
print("✅ All dependencies installed successfully!")
|
|
77
|
-
return True
|
|
78
|
-
except subprocess.CalledProcessError:
|
|
79
|
-
print(f"❌ Error: Failed to install dependencies using {mirror_url}")
|
|
80
|
-
print("🔄 Trying with default PyPI...")
|
|
81
|
-
try:
|
|
82
|
-
# Fallback to default PyPI
|
|
83
|
-
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-r', req_file])
|
|
84
|
-
print("✅ All dependencies installed successfully!")
|
|
85
|
-
return True
|
|
86
|
-
except subprocess.CalledProcessError:
|
|
87
|
-
print("❌ Error: Failed to install dependencies. Please run manually:")
|
|
88
|
-
print(f" {sys.executable} -m pip install -i {mirror_url} -r requirements.txt")
|
|
89
|
-
return False
|
|
90
|
-
else:
|
|
91
|
-
print("✅ All required dependencies are already installed!")
|
|
92
|
-
return True
|
|
93
|
-
|
|
94
|
-
# Check and install dependencies before importing
|
|
95
|
-
# This will run every time the module is imported, but only install if needed
|
|
96
|
-
def check_and_install_dependencies():
|
|
97
|
-
"""Check and install dependencies if needed"""
|
|
98
|
-
if not globals().get('_dependencies_checked'):
|
|
99
|
-
if install_requirements():
|
|
100
|
-
globals()['_dependencies_checked'] = True
|
|
101
|
-
return True
|
|
102
|
-
else:
|
|
103
|
-
print("\nPlease install the dependencies manually and try again.")
|
|
104
|
-
return False
|
|
105
|
-
return True
|
|
106
|
-
|
|
107
|
-
# Always run the dependency check when this module is imported
|
|
108
|
-
print("🚀 Initializing BRAIN Expression Template Decoder...")
|
|
109
|
-
if not check_and_install_dependencies():
|
|
110
|
-
if __name__ == "__main__":
|
|
111
|
-
sys.exit(1)
|
|
112
|
-
else:
|
|
113
|
-
print("⚠️ Warning: Some dependencies may be missing. Please run 'pip install -r requirements.txt'")
|
|
114
|
-
print("🔄 Continuing with import, but some features may not work properly.")
|
|
115
|
-
|
|
116
|
-
# Now import the packages
|
|
117
|
-
try:
|
|
118
|
-
from flask import Flask, render_template, request, jsonify, session as flask_session, Response, stream_with_context, send_from_directory
|
|
119
|
-
from flask_cors import CORS
|
|
120
|
-
import requests
|
|
121
|
-
import json
|
|
122
|
-
import time
|
|
123
|
-
import os
|
|
124
|
-
import threading
|
|
125
|
-
import queue
|
|
126
|
-
import uuid
|
|
127
|
-
from datetime import datetime
|
|
128
|
-
print("📚 Core packages imported successfully!")
|
|
129
|
-
|
|
130
|
-
# Import ace_lib for simulation options
|
|
131
|
-
try:
|
|
132
|
-
# Try importing from hkSimulator package
|
|
133
|
-
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hkSimulator'))
|
|
134
|
-
from ace_lib import get_instrument_type_region_delay
|
|
135
|
-
print("✅ Imported get_instrument_type_region_delay from ace_lib")
|
|
136
|
-
except ImportError as e:
|
|
137
|
-
print(f"⚠️ Warning: Could not import get_instrument_type_region_delay: {e}")
|
|
138
|
-
get_instrument_type_region_delay = None
|
|
139
|
-
|
|
140
|
-
except ImportError as e:
|
|
141
|
-
print(f"❌ Failed to import core packages: {e}")
|
|
142
|
-
print("Please run: pip install -r requirements.txt")
|
|
143
|
-
if __name__ == "__main__":
|
|
144
|
-
sys.exit(1)
|
|
145
|
-
raise
|
|
146
|
-
|
|
147
|
-
app = Flask(__name__)
|
|
148
|
-
app.secret_key = 'brain_template_decoder_secret_key_change_in_production'
|
|
149
|
-
CORS(app)
|
|
150
|
-
|
|
151
|
-
print("🌐 Flask application initialized with CORS support!")
|
|
152
|
-
|
|
153
|
-
# BRAIN API configuration
|
|
154
|
-
BRAIN_API_BASE = 'https://api.worldquantbrain.com'
|
|
155
|
-
|
|
156
|
-
# Store BRAIN sessions (in production, use proper session management like Redis)
|
|
157
|
-
brain_sessions = {}
|
|
158
|
-
|
|
159
|
-
print("🧠 BRAIN API integration configured!")
|
|
160
|
-
|
|
161
|
-
def sign_in_to_brain(username, password):
|
|
162
|
-
"""Sign in to BRAIN API with retry logic and biometric authentication support"""
|
|
163
|
-
from urllib.parse import urljoin
|
|
164
|
-
|
|
165
|
-
# Create a session to persistently store the headers
|
|
166
|
-
session = requests.Session()
|
|
167
|
-
# Save credentials into the session
|
|
168
|
-
session.auth = (username, password)
|
|
169
|
-
|
|
170
|
-
retry_count = 0
|
|
171
|
-
max_retries = 3
|
|
172
|
-
|
|
173
|
-
while retry_count < max_retries:
|
|
174
|
-
try:
|
|
175
|
-
# Send a POST request to the /authentication API
|
|
176
|
-
response = session.post(f'{BRAIN_API_BASE}/authentication')
|
|
177
|
-
|
|
178
|
-
# Check if biometric authentication is needed
|
|
179
|
-
if response.status_code == requests.codes.unauthorized:
|
|
180
|
-
if response.headers.get("WWW-Authenticate") == "persona":
|
|
181
|
-
# Get biometric auth URL
|
|
182
|
-
location = response.headers.get("Location")
|
|
183
|
-
if location:
|
|
184
|
-
biometric_url = urljoin(response.url, location)
|
|
185
|
-
|
|
186
|
-
# Return special response indicating biometric auth is needed
|
|
187
|
-
return {
|
|
188
|
-
'requires_biometric': True,
|
|
189
|
-
'biometric_url': biometric_url,
|
|
190
|
-
'session': session,
|
|
191
|
-
'location': location
|
|
192
|
-
}
|
|
193
|
-
else:
|
|
194
|
-
raise Exception("Biometric authentication required but no Location header provided")
|
|
195
|
-
else:
|
|
196
|
-
# Regular authentication failure
|
|
197
|
-
print("Incorrect username or password")
|
|
198
|
-
raise requests.HTTPError(
|
|
199
|
-
"Authentication failed: Invalid username or password",
|
|
200
|
-
response=response,
|
|
201
|
-
)
|
|
202
|
-
|
|
203
|
-
# If we get here, authentication was successful
|
|
204
|
-
response.raise_for_status()
|
|
205
|
-
print("Authentication successful.")
|
|
206
|
-
return session
|
|
207
|
-
|
|
208
|
-
except requests.HTTPError as e:
|
|
209
|
-
if "Invalid username or password" in str(e) or "Authentication failed" in str(e):
|
|
210
|
-
raise # Don't retry for invalid credentials
|
|
211
|
-
print(f"HTTP error occurred: {e}")
|
|
212
|
-
retry_count += 1
|
|
213
|
-
if retry_count < max_retries:
|
|
214
|
-
print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
|
|
215
|
-
time.sleep(10)
|
|
216
|
-
else:
|
|
217
|
-
print("Max retries reached. Authentication failed.")
|
|
218
|
-
raise
|
|
219
|
-
except Exception as e:
|
|
220
|
-
print(f"Error during authentication: {e}")
|
|
221
|
-
retry_count += 1
|
|
222
|
-
if retry_count < max_retries:
|
|
223
|
-
print(f"Retrying... Attempt {retry_count + 1} of {max_retries}")
|
|
224
|
-
time.sleep(10)
|
|
225
|
-
else:
|
|
226
|
-
print("Max retries reached. Authentication failed.")
|
|
227
|
-
raise
|
|
228
|
-
|
|
229
|
-
# Routes
|
|
230
|
-
@app.route('/')
|
|
231
|
-
def index():
|
|
232
|
-
"""Main application page"""
|
|
233
|
-
return render_template('index.html')
|
|
234
|
-
|
|
235
|
-
@app.route('/simulator')
|
|
236
|
-
def simulator():
|
|
237
|
-
"""User-friendly simulator interface"""
|
|
238
|
-
return render_template('simulator.html')
|
|
239
|
-
|
|
240
|
-
@app.route('/api/simulator/logs', methods=['GET'])
|
|
241
|
-
def get_simulator_logs():
|
|
242
|
-
"""Get available log files in the simulator directory"""
|
|
243
|
-
try:
|
|
244
|
-
import glob
|
|
245
|
-
import os
|
|
246
|
-
from datetime import datetime
|
|
247
|
-
|
|
248
|
-
# Look for log files in the current directory and simulator directory
|
|
249
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
250
|
-
simulator_dir = os.path.join(script_dir, 'simulator')
|
|
251
|
-
|
|
252
|
-
log_files = []
|
|
253
|
-
|
|
254
|
-
# Check both current directory and simulator directory
|
|
255
|
-
for directory in [script_dir, simulator_dir]:
|
|
256
|
-
if os.path.exists(directory):
|
|
257
|
-
pattern = os.path.join(directory, 'wqb*.log')
|
|
258
|
-
for log_file in glob.glob(pattern):
|
|
259
|
-
try:
|
|
260
|
-
stat = os.stat(log_file)
|
|
261
|
-
log_files.append({
|
|
262
|
-
'filename': os.path.basename(log_file),
|
|
263
|
-
'path': log_file,
|
|
264
|
-
'size': f"{stat.st_size / 1024:.1f} KB",
|
|
265
|
-
'modified': datetime.fromtimestamp(stat.st_mtime).strftime('%Y-%m-%d %H:%M:%S'),
|
|
266
|
-
'mtime': stat.st_mtime
|
|
267
|
-
})
|
|
268
|
-
except Exception as e:
|
|
269
|
-
print(f"Error reading log file {log_file}: {e}")
|
|
270
|
-
|
|
271
|
-
# Sort by modification time (newest first)
|
|
272
|
-
log_files.sort(key=lambda x: x['mtime'], reverse=True)
|
|
273
|
-
|
|
274
|
-
# Find the latest log file
|
|
275
|
-
latest = log_files[0]['filename'] if log_files else None
|
|
276
|
-
|
|
277
|
-
return jsonify({
|
|
278
|
-
'logs': log_files,
|
|
279
|
-
'latest': latest,
|
|
280
|
-
'count': len(log_files)
|
|
281
|
-
})
|
|
282
|
-
|
|
283
|
-
except Exception as e:
|
|
284
|
-
return jsonify({'error': f'Error getting log files: {str(e)}'}), 500
|
|
285
|
-
|
|
286
|
-
@app.route('/api/transformer_candidates')
|
|
287
|
-
def get_transformer_candidates():
|
|
288
|
-
"""Get Alpha candidates generated by Transformer"""
|
|
289
|
-
try:
|
|
290
|
-
# Path to the Transformer output file
|
|
291
|
-
# Note: Folder name is 'Tranformer' (missing 's') based on user context
|
|
292
|
-
file_path = os.path.join(os.path.dirname(__file__), 'Tranformer', 'output', 'Alpha_candidates.json')
|
|
293
|
-
|
|
294
|
-
if os.path.exists(file_path):
|
|
295
|
-
with open(file_path, 'r', encoding='utf-8') as f:
|
|
296
|
-
data = json.load(f)
|
|
297
|
-
return jsonify(data)
|
|
298
|
-
else:
|
|
299
|
-
return jsonify({"error": "File not found", "path": file_path})
|
|
300
|
-
except Exception as e:
|
|
301
|
-
return jsonify({"error": str(e)})
|
|
302
|
-
|
|
303
|
-
@app.route('/api/simulator/logs/<filename>', methods=['GET'])
|
|
304
|
-
def get_simulator_log_content(filename):
|
|
305
|
-
"""Get content of a specific log file"""
|
|
306
|
-
try:
|
|
307
|
-
import os
|
|
308
|
-
|
|
309
|
-
# Security: only allow log files with safe names
|
|
310
|
-
if not filename.startswith('wqb') or not filename.endswith('.log'):
|
|
311
|
-
return jsonify({'error': 'Invalid log file name'}), 400
|
|
312
|
-
|
|
313
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
314
|
-
simulator_dir = os.path.join(script_dir, 'simulator')
|
|
315
|
-
|
|
316
|
-
# Look for the file in both directories
|
|
317
|
-
log_path = None
|
|
318
|
-
for directory in [script_dir, simulator_dir]:
|
|
319
|
-
potential_path = os.path.join(directory, filename)
|
|
320
|
-
if os.path.exists(potential_path):
|
|
321
|
-
log_path = potential_path
|
|
322
|
-
break
|
|
323
|
-
|
|
324
|
-
if not log_path:
|
|
325
|
-
return jsonify({'error': 'Log file not found'}), 404
|
|
326
|
-
|
|
327
|
-
# Read file content with multiple encoding attempts
|
|
328
|
-
content = None
|
|
329
|
-
encodings_to_try = ['utf-8', 'gbk', 'gb2312', 'big5', 'latin-1', 'cp1252']
|
|
330
|
-
|
|
331
|
-
for encoding in encodings_to_try:
|
|
332
|
-
try:
|
|
333
|
-
with open(log_path, 'r', encoding=encoding) as f:
|
|
334
|
-
content = f.read()
|
|
335
|
-
print(f"Successfully read log file with {encoding} encoding")
|
|
336
|
-
break
|
|
337
|
-
except UnicodeDecodeError:
|
|
338
|
-
continue
|
|
339
|
-
except Exception as e:
|
|
340
|
-
print(f"Error reading with {encoding}: {e}")
|
|
341
|
-
continue
|
|
342
|
-
|
|
343
|
-
if content is None:
|
|
344
|
-
# Last resort: read as binary and decode with error handling
|
|
345
|
-
try:
|
|
346
|
-
with open(log_path, 'rb') as f:
|
|
347
|
-
raw_content = f.read()
|
|
348
|
-
content = raw_content.decode('utf-8', errors='replace')
|
|
349
|
-
print("Used UTF-8 with error replacement for log content")
|
|
350
|
-
except Exception as e:
|
|
351
|
-
content = f"Error: Could not decode file content - {str(e)}"
|
|
352
|
-
|
|
353
|
-
response = jsonify({
|
|
354
|
-
'content': content,
|
|
355
|
-
'filename': filename,
|
|
356
|
-
'size': len(content)
|
|
357
|
-
})
|
|
358
|
-
response.headers['Content-Type'] = 'application/json; charset=utf-8'
|
|
359
|
-
return response
|
|
360
|
-
|
|
361
|
-
except Exception as e:
|
|
362
|
-
return jsonify({'error': f'Error reading log file: {str(e)}'}), 500
|
|
363
|
-
|
|
364
|
-
@app.route('/api/simulator/test-connection', methods=['POST'])
|
|
365
|
-
def test_simulator_connection():
|
|
366
|
-
"""Test BRAIN API connection for simulator"""
|
|
367
|
-
try:
|
|
368
|
-
data = request.get_json()
|
|
369
|
-
username = data.get('username')
|
|
370
|
-
password = data.get('password')
|
|
371
|
-
|
|
372
|
-
if not username or not password:
|
|
373
|
-
return jsonify({'error': 'Username and password required'}), 400
|
|
374
|
-
|
|
375
|
-
# Test connection using the existing sign_in_to_brain function
|
|
376
|
-
result = sign_in_to_brain(username, password)
|
|
377
|
-
|
|
378
|
-
# Handle biometric authentication requirement
|
|
379
|
-
if isinstance(result, dict) and result.get('requires_biometric'):
|
|
380
|
-
return jsonify({
|
|
381
|
-
'success': False,
|
|
382
|
-
'error': 'Biometric authentication required. Please use the main interface first to complete authentication.',
|
|
383
|
-
'requires_biometric': True
|
|
384
|
-
})
|
|
385
|
-
|
|
386
|
-
# Test a simple API call to verify connection
|
|
387
|
-
brain_session = result
|
|
388
|
-
response = brain_session.get(f'{BRAIN_API_BASE}/data-fields/open')
|
|
389
|
-
|
|
390
|
-
if response.ok:
|
|
391
|
-
return jsonify({
|
|
392
|
-
'success': True,
|
|
393
|
-
'message': 'Connection successful'
|
|
394
|
-
})
|
|
395
|
-
else:
|
|
396
|
-
return jsonify({
|
|
397
|
-
'success': False,
|
|
398
|
-
'error': f'API test failed: {response.status_code}'
|
|
399
|
-
})
|
|
400
|
-
|
|
401
|
-
except Exception as e:
|
|
402
|
-
return jsonify({
|
|
403
|
-
'success': False,
|
|
404
|
-
'error': f'Connection failed: {str(e)}'
|
|
405
|
-
})
|
|
406
|
-
|
|
407
|
-
@app.route('/api/simulator/run', methods=['POST'])
|
|
408
|
-
def run_simulator_with_params():
|
|
409
|
-
"""Run simulator with user-provided parameters in a new terminal"""
|
|
410
|
-
try:
|
|
411
|
-
import subprocess
|
|
412
|
-
import threading
|
|
413
|
-
import json
|
|
414
|
-
import os
|
|
415
|
-
import tempfile
|
|
416
|
-
import sys
|
|
417
|
-
import time
|
|
418
|
-
|
|
419
|
-
# Get form data
|
|
420
|
-
json_file = request.files.get('jsonFile')
|
|
421
|
-
username = request.form.get('username')
|
|
422
|
-
password = request.form.get('password')
|
|
423
|
-
start_position = int(request.form.get('startPosition', 0))
|
|
424
|
-
concurrent_count = int(request.form.get('concurrentCount', 3))
|
|
425
|
-
random_shuffle = request.form.get('randomShuffle') == 'true'
|
|
426
|
-
use_multi_sim = request.form.get('useMultiSim') == 'true'
|
|
427
|
-
alpha_count_per_slot = int(request.form.get('alphaCountPerSlot', 3))
|
|
428
|
-
|
|
429
|
-
if not json_file or not username or not password:
|
|
430
|
-
return jsonify({'error': 'Missing required parameters'}), 400
|
|
431
|
-
|
|
432
|
-
# Validate and read JSON file
|
|
433
|
-
try:
|
|
434
|
-
json_content = json_file.read().decode('utf-8')
|
|
435
|
-
expressions_data = json.loads(json_content)
|
|
436
|
-
if not isinstance(expressions_data, list):
|
|
437
|
-
return jsonify({'error': 'JSON file must contain an array of expressions'}), 400
|
|
438
|
-
except Exception as e:
|
|
439
|
-
return jsonify({'error': f'Invalid JSON file: {str(e)}'}), 400
|
|
440
|
-
|
|
441
|
-
# Get paths
|
|
442
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
443
|
-
simulator_dir = os.path.join(script_dir, 'simulator')
|
|
444
|
-
|
|
445
|
-
# Create temporary files for the automated run
|
|
446
|
-
temp_json_path = os.path.join(simulator_dir, f'temp_expressions_{int(time.time())}.json')
|
|
447
|
-
temp_script_path = os.path.join(simulator_dir, f'temp_automated_{int(time.time())}.py')
|
|
448
|
-
temp_batch_path = os.path.join(simulator_dir, f'temp_run_{int(time.time())}.bat')
|
|
449
|
-
|
|
450
|
-
try:
|
|
451
|
-
# Save the JSON data to temporary file
|
|
452
|
-
with open(temp_json_path, 'w', encoding='utf-8') as f:
|
|
453
|
-
json.dump(expressions_data, f, ensure_ascii=False, indent=2)
|
|
454
|
-
|
|
455
|
-
# Create the automated script that calls automated_main
|
|
456
|
-
script_content = f'''
|
|
457
|
-
import asyncio
|
|
458
|
-
import sys
|
|
459
|
-
import os
|
|
460
|
-
import json
|
|
461
|
-
|
|
462
|
-
# Add current directory to path
|
|
463
|
-
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
464
|
-
|
|
465
|
-
import simulator_wqb
|
|
466
|
-
|
|
467
|
-
async def run_automated():
|
|
468
|
-
"""Run the automated simulator with parameters from web interface"""
|
|
469
|
-
try:
|
|
470
|
-
# Load JSON data
|
|
471
|
-
with open(r"{temp_json_path}", 'r', encoding='utf-8') as f:
|
|
472
|
-
json_content = f.read()
|
|
473
|
-
|
|
474
|
-
# Call automated_main with parameters
|
|
475
|
-
result = await simulator_wqb.automated_main(
|
|
476
|
-
json_file_content=json_content,
|
|
477
|
-
username="{username}",
|
|
478
|
-
password="{password}",
|
|
479
|
-
start_position={start_position},
|
|
480
|
-
concurrent_count={concurrent_count},
|
|
481
|
-
random_shuffle={random_shuffle},
|
|
482
|
-
use_multi_sim={use_multi_sim},
|
|
483
|
-
alpha_count_per_slot={alpha_count_per_slot}
|
|
484
|
-
)
|
|
485
|
-
|
|
486
|
-
if result['success']:
|
|
487
|
-
print("\\n" + "="*60)
|
|
488
|
-
print("🎉 WEB INTERFACE AUTOMATION Finished, Go to the webpage to check your result 🎉")
|
|
489
|
-
print("="*60)
|
|
490
|
-
print(f"Total simulations: {{result['results']['total']}}")
|
|
491
|
-
print("="*60)
|
|
492
|
-
else:
|
|
493
|
-
print("\\n" + "="*60)
|
|
494
|
-
print("❌ WEB INTERFACE AUTOMATION FAILED")
|
|
495
|
-
print("="*60)
|
|
496
|
-
print(f"Error: {{result['error']}}")
|
|
497
|
-
print("="*60)
|
|
498
|
-
|
|
499
|
-
except Exception as e:
|
|
500
|
-
print(f"\\n❌ Script execution error: {{e}}")
|
|
501
|
-
|
|
502
|
-
finally:
|
|
503
|
-
# Clean up temporary files
|
|
504
|
-
try:
|
|
505
|
-
if os.path.exists(r"{temp_json_path}"):
|
|
506
|
-
os.remove(r"{temp_json_path}")
|
|
507
|
-
if os.path.exists(r"{temp_script_path}"):
|
|
508
|
-
os.remove(r"{temp_script_path}")
|
|
509
|
-
if os.path.exists(r"{temp_batch_path}"):
|
|
510
|
-
os.remove(r"{temp_batch_path}")
|
|
511
|
-
except:
|
|
512
|
-
pass
|
|
513
|
-
|
|
514
|
-
print("\\n🔄 Press any key to close this window...")
|
|
515
|
-
input()
|
|
516
|
-
|
|
517
|
-
if __name__ == '__main__':
|
|
518
|
-
asyncio.run(run_automated())
|
|
519
|
-
'''
|
|
520
|
-
|
|
521
|
-
# Save the script
|
|
522
|
-
with open(temp_script_path, 'w', encoding='utf-8') as f:
|
|
523
|
-
f.write(script_content)
|
|
524
|
-
|
|
525
|
-
# Create batch file for Windows
|
|
526
|
-
batch_content = f'''@echo off
|
|
527
|
-
cd /d "{simulator_dir}"
|
|
528
|
-
"{sys.executable}" "{os.path.basename(temp_script_path)}"
|
|
529
|
-
'''
|
|
530
|
-
with open(temp_batch_path, 'w', encoding='utf-8') as f:
|
|
531
|
-
f.write(batch_content)
|
|
532
|
-
|
|
533
|
-
# Launch in new terminal
|
|
534
|
-
def launch_simulator():
|
|
535
|
-
try:
|
|
536
|
-
if os.name == 'nt': # Windows
|
|
537
|
-
# Use cmd /c to execute batch file properly
|
|
538
|
-
subprocess.Popen(
|
|
539
|
-
f'cmd.exe /c "{temp_batch_path}"',
|
|
540
|
-
creationflags=subprocess.CREATE_NEW_CONSOLE
|
|
541
|
-
)
|
|
542
|
-
else: # Unix-like systems
|
|
543
|
-
# Try different terminal emulators
|
|
544
|
-
terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
|
|
545
|
-
for terminal in terminals:
|
|
546
|
-
try:
|
|
547
|
-
if terminal == 'gnome-terminal':
|
|
548
|
-
subprocess.Popen([
|
|
549
|
-
terminal, '--working-directory', simulator_dir,
|
|
550
|
-
'--', sys.executable, os.path.basename(temp_script_path)
|
|
551
|
-
])
|
|
552
|
-
else:
|
|
553
|
-
# Use bash -c to handle shell commands like &&
|
|
554
|
-
command = f'cd "{simulator_dir}" && "{sys.executable}" "{os.path.basename(temp_script_path)}"'
|
|
555
|
-
subprocess.Popen([
|
|
556
|
-
terminal, '-e',
|
|
557
|
-
'bash', '-c', command
|
|
558
|
-
])
|
|
559
|
-
break
|
|
560
|
-
except FileNotFoundError:
|
|
561
|
-
continue
|
|
562
|
-
else:
|
|
563
|
-
# Fallback: run in background if no terminal found
|
|
564
|
-
subprocess.Popen([
|
|
565
|
-
sys.executable, temp_script_path
|
|
566
|
-
], cwd=simulator_dir)
|
|
567
|
-
except Exception as e:
|
|
568
|
-
print(f"Error launching simulator: {e}")
|
|
569
|
-
|
|
570
|
-
# Start the simulator in a separate thread
|
|
571
|
-
thread = threading.Thread(target=launch_simulator)
|
|
572
|
-
thread.daemon = True
|
|
573
|
-
thread.start()
|
|
574
|
-
|
|
575
|
-
return jsonify({
|
|
576
|
-
'success': True,
|
|
577
|
-
'message': 'Simulator launched in new terminal window',
|
|
578
|
-
'parameters': {
|
|
579
|
-
'expressions_count': len(expressions_data),
|
|
580
|
-
'concurrent_count': concurrent_count,
|
|
581
|
-
'use_multi_sim': use_multi_sim,
|
|
582
|
-
'alpha_count_per_slot': alpha_count_per_slot if use_multi_sim else None
|
|
583
|
-
}
|
|
584
|
-
})
|
|
585
|
-
|
|
586
|
-
except Exception as e:
|
|
587
|
-
# Clean up on error
|
|
588
|
-
try:
|
|
589
|
-
if os.path.exists(temp_json_path):
|
|
590
|
-
os.remove(temp_json_path)
|
|
591
|
-
if os.path.exists(temp_script_path):
|
|
592
|
-
os.remove(temp_script_path)
|
|
593
|
-
if os.path.exists(temp_batch_path):
|
|
594
|
-
os.remove(temp_batch_path)
|
|
595
|
-
except:
|
|
596
|
-
pass
|
|
597
|
-
raise e
|
|
598
|
-
|
|
599
|
-
except Exception as e:
|
|
600
|
-
return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
|
|
601
|
-
|
|
602
|
-
@app.route('/api/simulator/stop', methods=['POST'])
|
|
603
|
-
def stop_simulator():
|
|
604
|
-
"""Stop running simulator"""
|
|
605
|
-
try:
|
|
606
|
-
# This is a placeholder - in a production environment, you'd want to
|
|
607
|
-
# implement proper process management to stop running simulations
|
|
608
|
-
return jsonify({
|
|
609
|
-
'success': True,
|
|
610
|
-
'message': 'Stop signal sent'
|
|
611
|
-
})
|
|
612
|
-
except Exception as e:
|
|
613
|
-
return jsonify({'error': f'Failed to stop simulator: {str(e)}'}), 500
|
|
614
|
-
|
|
615
|
-
@app.route('/api/authenticate', methods=['POST'])
|
|
616
|
-
def authenticate():
|
|
617
|
-
"""Authenticate with BRAIN API"""
|
|
618
|
-
try:
|
|
619
|
-
data = request.get_json()
|
|
620
|
-
username = data.get('username')
|
|
621
|
-
password = data.get('password')
|
|
622
|
-
|
|
623
|
-
if not username or not password:
|
|
624
|
-
return jsonify({'error': 'Username and password required'}), 400
|
|
625
|
-
|
|
626
|
-
# Authenticate with BRAIN
|
|
627
|
-
result = sign_in_to_brain(username, password)
|
|
628
|
-
|
|
629
|
-
# Check if biometric authentication is required
|
|
630
|
-
if isinstance(result, dict) and result.get('requires_biometric'):
|
|
631
|
-
# Store the session temporarily with biometric pending status
|
|
632
|
-
session_id = f"{username}_{int(time.time())}_biometric_pending"
|
|
633
|
-
brain_sessions[session_id] = {
|
|
634
|
-
'session': result['session'],
|
|
635
|
-
'username': username,
|
|
636
|
-
'timestamp': time.time(),
|
|
637
|
-
'biometric_pending': True,
|
|
638
|
-
'biometric_location': result['location']
|
|
639
|
-
}
|
|
640
|
-
|
|
641
|
-
# Store session ID in Flask session
|
|
642
|
-
flask_session['brain_session_id'] = session_id
|
|
643
|
-
|
|
644
|
-
return jsonify({
|
|
645
|
-
'success': False,
|
|
646
|
-
'requires_biometric': True,
|
|
647
|
-
'biometric_url': result['biometric_url'],
|
|
648
|
-
'session_id': session_id,
|
|
649
|
-
'message': 'Please complete biometric authentication by visiting the provided URL'
|
|
650
|
-
})
|
|
651
|
-
|
|
652
|
-
# Regular successful authentication
|
|
653
|
-
brain_session = result
|
|
654
|
-
|
|
655
|
-
# Fetch simulation options
|
|
656
|
-
valid_options = get_valid_simulation_options(brain_session)
|
|
657
|
-
|
|
658
|
-
# Store session
|
|
659
|
-
session_id = f"{username}_{int(time.time())}"
|
|
660
|
-
brain_sessions[session_id] = {
|
|
661
|
-
'session': brain_session,
|
|
662
|
-
'username': username,
|
|
663
|
-
'timestamp': time.time(),
|
|
664
|
-
'options': valid_options
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
# Store session ID in Flask session
|
|
668
|
-
flask_session['brain_session_id'] = session_id
|
|
669
|
-
|
|
670
|
-
return jsonify({
|
|
671
|
-
'success': True,
|
|
672
|
-
'session_id': session_id,
|
|
673
|
-
'message': 'Authentication successful',
|
|
674
|
-
'options': valid_options
|
|
675
|
-
})
|
|
676
|
-
|
|
677
|
-
except requests.HTTPError as e:
|
|
678
|
-
resp = getattr(e, 'response', None)
|
|
679
|
-
status_code = getattr(resp, 'status_code', None)
|
|
680
|
-
|
|
681
|
-
# Common: wrong username/password
|
|
682
|
-
if status_code == 401 or 'Invalid username or password' in str(e):
|
|
683
|
-
return jsonify({
|
|
684
|
-
'error': '用户名或密码错误',
|
|
685
|
-
'hint': '请检查账号密码是否正确;如果你的账号需要生物验证(persona),请按弹出的生物验证流程完成后再点“Complete Authentication”。'
|
|
686
|
-
}), 401
|
|
687
|
-
|
|
688
|
-
# Upstream/network/server issues
|
|
689
|
-
return jsonify({
|
|
690
|
-
'error': 'Authentication failed',
|
|
691
|
-
'detail': str(e)
|
|
692
|
-
}), 502
|
|
693
|
-
except Exception as e:
|
|
694
|
-
return jsonify({'error': f'Authentication error: {str(e)}'}), 500
|
|
695
|
-
|
|
696
|
-
@app.route('/api/complete-biometric', methods=['POST'])
|
|
697
|
-
def complete_biometric():
|
|
698
|
-
"""Complete biometric authentication after user has done it in browser"""
|
|
699
|
-
try:
|
|
700
|
-
from urllib.parse import urljoin
|
|
701
|
-
|
|
702
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
703
|
-
if not session_id or session_id not in brain_sessions:
|
|
704
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
705
|
-
|
|
706
|
-
session_info = brain_sessions[session_id]
|
|
707
|
-
|
|
708
|
-
# Check if this session is waiting for biometric completion
|
|
709
|
-
if not session_info.get('biometric_pending'):
|
|
710
|
-
return jsonify({'error': 'Session is not pending biometric authentication'}), 400
|
|
711
|
-
|
|
712
|
-
brain_session = session_info['session']
|
|
713
|
-
location = session_info['biometric_location']
|
|
714
|
-
|
|
715
|
-
# Complete the biometric authentication following the reference pattern
|
|
716
|
-
try:
|
|
717
|
-
# Construct the full URL for biometric authentication
|
|
718
|
-
auth_url = urljoin(f'{BRAIN_API_BASE}/authentication', location)
|
|
719
|
-
|
|
720
|
-
# Keep trying until biometric auth succeeds (like in reference code)
|
|
721
|
-
max_attempts = 5
|
|
722
|
-
attempt = 0
|
|
723
|
-
|
|
724
|
-
while attempt < max_attempts:
|
|
725
|
-
bio_response = brain_session.post(auth_url)
|
|
726
|
-
if bio_response.status_code == 201:
|
|
727
|
-
# Biometric authentication successful
|
|
728
|
-
break
|
|
729
|
-
elif bio_response.status_code == 401:
|
|
730
|
-
# Biometric authentication not complete yet
|
|
731
|
-
attempt += 1
|
|
732
|
-
if attempt >= max_attempts:
|
|
733
|
-
return jsonify({
|
|
734
|
-
'success': False,
|
|
735
|
-
'error': 'Biometric authentication not completed. Please try again.'
|
|
736
|
-
})
|
|
737
|
-
time.sleep(2) # Wait a bit before retrying
|
|
738
|
-
else:
|
|
739
|
-
# Other error
|
|
740
|
-
bio_response.raise_for_status()
|
|
741
|
-
|
|
742
|
-
# Update session info - remove biometric pending status
|
|
743
|
-
session_info['biometric_pending'] = False
|
|
744
|
-
del session_info['biometric_location']
|
|
745
|
-
|
|
746
|
-
# Create a new session ID without the biometric_pending suffix
|
|
747
|
-
new_session_id = f"{session_info['username']}_{int(time.time())}"
|
|
748
|
-
brain_sessions[new_session_id] = {
|
|
749
|
-
'session': brain_session,
|
|
750
|
-
'username': session_info['username'],
|
|
751
|
-
'timestamp': time.time()
|
|
752
|
-
}
|
|
753
|
-
|
|
754
|
-
# Remove old session
|
|
755
|
-
del brain_sessions[session_id]
|
|
756
|
-
|
|
757
|
-
# Update Flask session
|
|
758
|
-
flask_session['brain_session_id'] = new_session_id
|
|
759
|
-
|
|
760
|
-
return jsonify({
|
|
761
|
-
'success': True,
|
|
762
|
-
'session_id': new_session_id,
|
|
763
|
-
'message': 'Biometric authentication completed successfully'
|
|
764
|
-
})
|
|
765
|
-
|
|
766
|
-
except requests.HTTPError as e:
|
|
767
|
-
return jsonify({
|
|
768
|
-
'success': False,
|
|
769
|
-
'error': f'Failed to complete biometric authentication: {str(e)}'
|
|
770
|
-
})
|
|
771
|
-
|
|
772
|
-
except Exception as e:
|
|
773
|
-
return jsonify({
|
|
774
|
-
'success': False,
|
|
775
|
-
'error': f'Error completing biometric authentication: {str(e)}'
|
|
776
|
-
})
|
|
777
|
-
|
|
778
|
-
@app.route('/api/operators', methods=['GET'])
|
|
779
|
-
def get_operators():
|
|
780
|
-
"""Get user operators from BRAIN API"""
|
|
781
|
-
try:
|
|
782
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
783
|
-
if not session_id or session_id not in brain_sessions:
|
|
784
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
785
|
-
|
|
786
|
-
session_info = brain_sessions[session_id]
|
|
787
|
-
brain_session = session_info['session']
|
|
788
|
-
|
|
789
|
-
# First try without pagination parameters (most APIs return all operators at once)
|
|
790
|
-
try:
|
|
791
|
-
response = brain_session.get(f'{BRAIN_API_BASE}/operators')
|
|
792
|
-
response.raise_for_status()
|
|
793
|
-
|
|
794
|
-
data = response.json()
|
|
795
|
-
|
|
796
|
-
# If it's a list, we got all operators
|
|
797
|
-
if isinstance(data, list):
|
|
798
|
-
all_operators = data
|
|
799
|
-
print(f"Fetched {len(all_operators)} operators from BRAIN API (direct)")
|
|
800
|
-
# If it's a dict with results, handle pagination
|
|
801
|
-
elif isinstance(data, dict) and 'results' in data:
|
|
802
|
-
all_operators = []
|
|
803
|
-
total_count = data.get('count', len(data['results']))
|
|
804
|
-
print(f"Found {total_count} total operators, fetching all...")
|
|
805
|
-
|
|
806
|
-
# Get first batch
|
|
807
|
-
all_operators.extend(data['results'])
|
|
808
|
-
|
|
809
|
-
# Get remaining batches if needed
|
|
810
|
-
limit = 100
|
|
811
|
-
offset = len(data['results'])
|
|
812
|
-
|
|
813
|
-
while len(all_operators) < total_count:
|
|
814
|
-
params = {'limit': limit, 'offset': offset}
|
|
815
|
-
batch_response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
|
|
816
|
-
batch_response.raise_for_status()
|
|
817
|
-
batch_data = batch_response.json()
|
|
818
|
-
|
|
819
|
-
if isinstance(batch_data, dict) and 'results' in batch_data:
|
|
820
|
-
batch_operators = batch_data['results']
|
|
821
|
-
if not batch_operators: # No more data
|
|
822
|
-
break
|
|
823
|
-
all_operators.extend(batch_operators)
|
|
824
|
-
offset += len(batch_operators)
|
|
825
|
-
else:
|
|
826
|
-
break
|
|
827
|
-
|
|
828
|
-
print(f"Fetched {len(all_operators)} operators from BRAIN API (paginated)")
|
|
829
|
-
else:
|
|
830
|
-
# Unknown format, treat as empty
|
|
831
|
-
all_operators = []
|
|
832
|
-
print("Unknown response format for operators API")
|
|
833
|
-
|
|
834
|
-
except Exception as e:
|
|
835
|
-
print(f"Error fetching operators: {str(e)}")
|
|
836
|
-
# Fallback: try with explicit pagination
|
|
837
|
-
all_operators = []
|
|
838
|
-
limit = 100
|
|
839
|
-
offset = 0
|
|
840
|
-
|
|
841
|
-
while True:
|
|
842
|
-
params = {'limit': limit, 'offset': offset}
|
|
843
|
-
response = brain_session.get(f'{BRAIN_API_BASE}/operators', params=params)
|
|
844
|
-
response.raise_for_status()
|
|
845
|
-
|
|
846
|
-
data = response.json()
|
|
847
|
-
if isinstance(data, list):
|
|
848
|
-
all_operators.extend(data)
|
|
849
|
-
if len(data) < limit:
|
|
850
|
-
break
|
|
851
|
-
elif isinstance(data, dict) and 'results' in data:
|
|
852
|
-
batch_operators = data['results']
|
|
853
|
-
all_operators.extend(batch_operators)
|
|
854
|
-
if len(batch_operators) < limit:
|
|
855
|
-
break
|
|
856
|
-
else:
|
|
857
|
-
break
|
|
858
|
-
|
|
859
|
-
offset += limit
|
|
860
|
-
|
|
861
|
-
print(f"Fetched {len(all_operators)} operators from BRAIN API (fallback)")
|
|
862
|
-
|
|
863
|
-
# Extract name, category, description, definition and other fields (if available)
|
|
864
|
-
filtered_operators = []
|
|
865
|
-
for op in all_operators:
|
|
866
|
-
operator_data = {
|
|
867
|
-
'name': op['name'],
|
|
868
|
-
'category': op['category']
|
|
869
|
-
}
|
|
870
|
-
# Include description if available
|
|
871
|
-
if 'description' in op and op['description']:
|
|
872
|
-
operator_data['description'] = op['description']
|
|
873
|
-
# Include definition if available
|
|
874
|
-
if 'definition' in op and op['definition']:
|
|
875
|
-
operator_data['definition'] = op['definition']
|
|
876
|
-
# Include usage count if available
|
|
877
|
-
if 'usageCount' in op:
|
|
878
|
-
operator_data['usageCount'] = op['usageCount']
|
|
879
|
-
# Include other useful fields if available
|
|
880
|
-
if 'example' in op and op['example']:
|
|
881
|
-
operator_data['example'] = op['example']
|
|
882
|
-
filtered_operators.append(operator_data)
|
|
883
|
-
|
|
884
|
-
return jsonify(filtered_operators)
|
|
885
|
-
|
|
886
|
-
except Exception as e:
|
|
887
|
-
print(f"Error fetching operators: {str(e)}")
|
|
888
|
-
return jsonify({'error': f'Failed to fetch operators: {str(e)}'}), 500
|
|
889
|
-
|
|
890
|
-
@app.route('/api/simulation-options', methods=['GET'])
|
|
891
|
-
def get_simulation_options():
|
|
892
|
-
"""Get valid simulation options from BRAIN"""
|
|
893
|
-
try:
|
|
894
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
895
|
-
if not session_id or session_id not in brain_sessions:
|
|
896
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
897
|
-
|
|
898
|
-
session_info = brain_sessions[session_id]
|
|
899
|
-
|
|
900
|
-
# Return cached options if available
|
|
901
|
-
if 'options' in session_info and session_info['options']:
|
|
902
|
-
return jsonify(session_info['options'])
|
|
903
|
-
|
|
904
|
-
# Otherwise fetch them
|
|
905
|
-
brain_session = session_info['session']
|
|
906
|
-
valid_options = get_valid_simulation_options(brain_session)
|
|
907
|
-
|
|
908
|
-
# Cache them
|
|
909
|
-
session_info['options'] = valid_options
|
|
910
|
-
|
|
911
|
-
return jsonify(valid_options)
|
|
912
|
-
|
|
913
|
-
except Exception as e:
|
|
914
|
-
print(f"Error fetching simulation options: {str(e)}")
|
|
915
|
-
return jsonify({'error': f'Failed to fetch simulation options: {str(e)}'}), 500
|
|
916
|
-
|
|
917
|
-
@app.route('/api/datasets', methods=['GET'])
|
|
918
|
-
def get_datasets():
|
|
919
|
-
"""Get datasets from BRAIN API"""
|
|
920
|
-
try:
|
|
921
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
922
|
-
if not session_id or session_id not in brain_sessions:
|
|
923
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
924
|
-
|
|
925
|
-
session_info = brain_sessions[session_id]
|
|
926
|
-
brain_session = session_info['session']
|
|
927
|
-
|
|
928
|
-
# Get parameters
|
|
929
|
-
region = request.args.get('region', 'USA')
|
|
930
|
-
delay = request.args.get('delay', '1')
|
|
931
|
-
universe = request.args.get('universe', 'TOP3000')
|
|
932
|
-
instrument_type = request.args.get('instrument_type', 'EQUITY')
|
|
933
|
-
|
|
934
|
-
# Fetch datasets (theme=false)
|
|
935
|
-
url_false = f"{BRAIN_API_BASE}/data-sets?instrumentType={instrument_type}®ion={region}&delay={delay}&universe={universe}&theme=false"
|
|
936
|
-
response_false = brain_session.get(url_false)
|
|
937
|
-
response_false.raise_for_status()
|
|
938
|
-
datasets_false = response_false.json().get('results', [])
|
|
939
|
-
|
|
940
|
-
# Fetch datasets (theme=true)
|
|
941
|
-
url_true = f"{BRAIN_API_BASE}/data-sets?instrumentType={instrument_type}®ion={region}&delay={delay}&universe={universe}&theme=true"
|
|
942
|
-
response_true = brain_session.get(url_true)
|
|
943
|
-
response_true.raise_for_status()
|
|
944
|
-
datasets_true = response_true.json().get('results', [])
|
|
945
|
-
|
|
946
|
-
# Combine results
|
|
947
|
-
all_datasets = datasets_false + datasets_true
|
|
948
|
-
|
|
949
|
-
return jsonify({'results': all_datasets, 'count': len(all_datasets)})
|
|
950
|
-
|
|
951
|
-
except Exception as e:
|
|
952
|
-
print(f"Error fetching datasets: {str(e)}")
|
|
953
|
-
return jsonify({'error': f'Failed to fetch datasets: {str(e)}'}), 500
|
|
954
|
-
|
|
955
|
-
@app.route('/api/datafields', methods=['GET'])
|
|
956
|
-
def get_datafields():
|
|
957
|
-
"""Get data fields from BRAIN API"""
|
|
958
|
-
try:
|
|
959
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
960
|
-
if not session_id or session_id not in brain_sessions:
|
|
961
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
962
|
-
|
|
963
|
-
session_info = brain_sessions[session_id]
|
|
964
|
-
brain_session = session_info['session']
|
|
965
|
-
|
|
966
|
-
# Get parameters
|
|
967
|
-
region = request.args.get('region', 'USA')
|
|
968
|
-
delay = request.args.get('delay', '1')
|
|
969
|
-
universe = request.args.get('universe', 'TOP3000')
|
|
970
|
-
dataset_id = request.args.get('dataset_id', 'fundamental6')
|
|
971
|
-
search = ''
|
|
972
|
-
|
|
973
|
-
# Build URL template based on notebook implementation
|
|
974
|
-
if len(search) == 0:
|
|
975
|
-
url_template = f"{BRAIN_API_BASE}/data-fields?" + \
|
|
976
|
-
f"&instrumentType=EQUITY" + \
|
|
977
|
-
f"®ion={region}&delay={delay}&universe={universe}&dataset.id={dataset_id}&limit=50" + \
|
|
978
|
-
"&offset={x}"
|
|
979
|
-
# Get count from first request
|
|
980
|
-
first_response = brain_session.get(url_template.format(x=0))
|
|
981
|
-
first_response.raise_for_status()
|
|
982
|
-
count = first_response.json()['count']
|
|
983
|
-
else:
|
|
984
|
-
url_template = f"{BRAIN_API_BASE}/data-fields?" + \
|
|
985
|
-
f"&instrumentType=EQUITY" + \
|
|
986
|
-
f"®ion={region}&delay={delay}&universe={universe}&limit=50" + \
|
|
987
|
-
f"&search={search}" + \
|
|
988
|
-
"&offset={x}"
|
|
989
|
-
count = 100 # Default for search queries
|
|
990
|
-
|
|
991
|
-
# Fetch all data fields in batches
|
|
992
|
-
datafields_list = []
|
|
993
|
-
for x in range(0, count, 50):
|
|
994
|
-
response = brain_session.get(url_template.format(x=x))
|
|
995
|
-
response.raise_for_status()
|
|
996
|
-
datafields_list.append(response.json()['results'])
|
|
997
|
-
|
|
998
|
-
# Flatten the list
|
|
999
|
-
datafields_list_flat = [item for sublist in datafields_list for item in sublist]
|
|
1000
|
-
|
|
1001
|
-
# Filter fields to only include necessary information
|
|
1002
|
-
filtered_fields = [
|
|
1003
|
-
{
|
|
1004
|
-
'id': field['id'],
|
|
1005
|
-
'description': field['description'],
|
|
1006
|
-
'type': field['type'],
|
|
1007
|
-
'coverage': field.get('coverage', 0),
|
|
1008
|
-
'userCount': field.get('userCount', 0),
|
|
1009
|
-
'alphaCount': field.get('alphaCount', 0)
|
|
1010
|
-
}
|
|
1011
|
-
for field in datafields_list_flat
|
|
1012
|
-
]
|
|
1013
|
-
|
|
1014
|
-
return jsonify(filtered_fields)
|
|
1015
|
-
|
|
1016
|
-
except Exception as e:
|
|
1017
|
-
return jsonify({'error': f'Failed to fetch data fields: {str(e)}'}), 500
|
|
1018
|
-
|
|
1019
|
-
@app.route('/api/dataset-description', methods=['GET'])
|
|
1020
|
-
def get_dataset_description():
|
|
1021
|
-
"""Get dataset description from BRAIN API"""
|
|
1022
|
-
try:
|
|
1023
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
1024
|
-
if not session_id or session_id not in brain_sessions:
|
|
1025
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
1026
|
-
|
|
1027
|
-
session_info = brain_sessions[session_id]
|
|
1028
|
-
brain_session = session_info['session']
|
|
1029
|
-
|
|
1030
|
-
# Get parameters
|
|
1031
|
-
region = request.args.get('region', 'USA')
|
|
1032
|
-
delay = request.args.get('delay', '1')
|
|
1033
|
-
universe = request.args.get('universe', 'TOP3000')
|
|
1034
|
-
dataset_id = request.args.get('dataset_id', 'analyst10')
|
|
1035
|
-
|
|
1036
|
-
# Build URL for dataset description
|
|
1037
|
-
url = f"{BRAIN_API_BASE}/data-sets/{dataset_id}?" + \
|
|
1038
|
-
f"instrumentType=EQUITY®ion={region}&delay={delay}&universe={universe}"
|
|
1039
|
-
|
|
1040
|
-
print(f"Getting dataset description from: {url}")
|
|
1041
|
-
|
|
1042
|
-
# Make request to BRAIN API
|
|
1043
|
-
response = brain_session.get(url)
|
|
1044
|
-
response.raise_for_status()
|
|
1045
|
-
|
|
1046
|
-
data = response.json()
|
|
1047
|
-
description = data.get('description', 'No description available')
|
|
1048
|
-
|
|
1049
|
-
print(f"Dataset description retrieved: {description[:100]}...")
|
|
1050
|
-
|
|
1051
|
-
return jsonify({
|
|
1052
|
-
'success': True,
|
|
1053
|
-
'description': description,
|
|
1054
|
-
'dataset_id': dataset_id
|
|
1055
|
-
})
|
|
1056
|
-
|
|
1057
|
-
except Exception as e:
|
|
1058
|
-
print(f"Dataset description error: {str(e)}")
|
|
1059
|
-
return jsonify({'error': f'Failed to get dataset description: {str(e)}'}), 500
|
|
1060
|
-
|
|
1061
|
-
@app.route('/api/status', methods=['GET'])
|
|
1062
|
-
def check_status():
|
|
1063
|
-
"""Check if session is still valid"""
|
|
1064
|
-
try:
|
|
1065
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
1066
|
-
if not session_id or session_id not in brain_sessions:
|
|
1067
|
-
return jsonify({'valid': False})
|
|
1068
|
-
|
|
1069
|
-
session_info = brain_sessions[session_id]
|
|
1070
|
-
# Check if session is not too old (24 hours)
|
|
1071
|
-
if time.time() - session_info['timestamp'] > 86400:
|
|
1072
|
-
del brain_sessions[session_id]
|
|
1073
|
-
return jsonify({'valid': False})
|
|
1074
|
-
|
|
1075
|
-
# Check if biometric authentication is pending
|
|
1076
|
-
if session_info.get('biometric_pending'):
|
|
1077
|
-
return jsonify({
|
|
1078
|
-
'valid': False,
|
|
1079
|
-
'biometric_pending': True,
|
|
1080
|
-
'username': session_info['username'],
|
|
1081
|
-
'message': 'Biometric authentication pending'
|
|
1082
|
-
})
|
|
1083
|
-
|
|
1084
|
-
return jsonify({
|
|
1085
|
-
'valid': True,
|
|
1086
|
-
'username': session_info['username']
|
|
1087
|
-
})
|
|
1088
|
-
|
|
1089
|
-
except Exception as e:
|
|
1090
|
-
return jsonify({'error': f'Status check failed: {str(e)}'}), 500
|
|
1091
|
-
|
|
1092
|
-
@app.route('/api/logout', methods=['POST'])
|
|
1093
|
-
def logout():
|
|
1094
|
-
"""Logout and clean up session"""
|
|
1095
|
-
try:
|
|
1096
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
1097
|
-
if session_id and session_id in brain_sessions:
|
|
1098
|
-
del brain_sessions[session_id]
|
|
1099
|
-
|
|
1100
|
-
if 'brain_session_id' in flask_session:
|
|
1101
|
-
flask_session.pop('brain_session_id')
|
|
1102
|
-
|
|
1103
|
-
return jsonify({'success': True, 'message': 'Logged out successfully'})
|
|
1104
|
-
|
|
1105
|
-
except Exception as e:
|
|
1106
|
-
return jsonify({'error': f'Logout failed: {str(e)}'}), 500
|
|
1107
|
-
|
|
1108
|
-
@app.route('/api/test-expression', methods=['POST'])
|
|
1109
|
-
def test_expression():
|
|
1110
|
-
"""Test an expression using BRAIN API simulation"""
|
|
1111
|
-
try:
|
|
1112
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
1113
|
-
if not session_id or session_id not in brain_sessions:
|
|
1114
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
1115
|
-
|
|
1116
|
-
session_info = brain_sessions[session_id]
|
|
1117
|
-
brain_session = session_info['session']
|
|
1118
|
-
|
|
1119
|
-
# Get the simulation data from request
|
|
1120
|
-
simulation_data = request.get_json()
|
|
1121
|
-
|
|
1122
|
-
# Ensure required fields are present
|
|
1123
|
-
if 'type' not in simulation_data:
|
|
1124
|
-
simulation_data['type'] = 'REGULAR'
|
|
1125
|
-
|
|
1126
|
-
# Ensure settings have required fields
|
|
1127
|
-
if 'settings' not in simulation_data:
|
|
1128
|
-
simulation_data['settings'] = {}
|
|
1129
|
-
|
|
1130
|
-
# Set default values for missing settings
|
|
1131
|
-
default_settings = {
|
|
1132
|
-
'instrumentType': 'EQUITY',
|
|
1133
|
-
'region': 'USA',
|
|
1134
|
-
'universe': 'TOP3000',
|
|
1135
|
-
'delay': 1,
|
|
1136
|
-
'decay': 15,
|
|
1137
|
-
'neutralization': 'SUBINDUSTRY',
|
|
1138
|
-
'truncation': 0.08,
|
|
1139
|
-
'pasteurization': 'ON',
|
|
1140
|
-
'testPeriod': 'P1Y6M',
|
|
1141
|
-
'unitHandling': 'VERIFY',
|
|
1142
|
-
'nanHandling': 'OFF',
|
|
1143
|
-
'language': 'FASTEXPR',
|
|
1144
|
-
'visualization': False
|
|
1145
|
-
}
|
|
1146
|
-
|
|
1147
|
-
for key, value in default_settings.items():
|
|
1148
|
-
if key not in simulation_data['settings']:
|
|
1149
|
-
simulation_data['settings'][key] = value
|
|
1150
|
-
|
|
1151
|
-
# Convert string boolean values to actual boolean
|
|
1152
|
-
if isinstance(simulation_data['settings'].get('visualization'), str):
|
|
1153
|
-
viz_value = simulation_data['settings']['visualization'].lower()
|
|
1154
|
-
simulation_data['settings']['visualization'] = viz_value == 'true'
|
|
1155
|
-
|
|
1156
|
-
# Validate settings against cached options
|
|
1157
|
-
valid_options = session_info.get('options')
|
|
1158
|
-
if valid_options:
|
|
1159
|
-
settings = simulation_data['settings']
|
|
1160
|
-
inst_type = settings.get('instrumentType', 'EQUITY')
|
|
1161
|
-
region = settings.get('region')
|
|
1162
|
-
neut = settings.get('neutralization')
|
|
1163
|
-
|
|
1164
|
-
# Check if this specific neutralization is allowed for this region
|
|
1165
|
-
allowed_neuts = valid_options.get(inst_type, {}).get(region, {}).get('neutralizations', [])
|
|
1166
|
-
|
|
1167
|
-
if neut and allowed_neuts and neut not in allowed_neuts:
|
|
1168
|
-
print(f"Warning: {neut} is invalid for {region}. Auto-correcting.")
|
|
1169
|
-
# Auto-correct to the first valid one if available
|
|
1170
|
-
if allowed_neuts:
|
|
1171
|
-
print(f"Auto-correcting neutralization to {allowed_neuts[0]}")
|
|
1172
|
-
settings['neutralization'] = allowed_neuts[0]
|
|
1173
|
-
else:
|
|
1174
|
-
del settings['neutralization']
|
|
1175
|
-
|
|
1176
|
-
# Send simulation request (following notebook pattern)
|
|
1177
|
-
try:
|
|
1178
|
-
message = {}
|
|
1179
|
-
simulation_response = brain_session.post(f'{BRAIN_API_BASE}/simulations', json=simulation_data)
|
|
1180
|
-
|
|
1181
|
-
# Check if we got a Location header (following notebook pattern)
|
|
1182
|
-
if 'Location' in simulation_response.headers:
|
|
1183
|
-
# Follow the location to get the actual status
|
|
1184
|
-
message = brain_session.get(simulation_response.headers['Location']).json()
|
|
1185
|
-
|
|
1186
|
-
# Check if simulation is running or completed
|
|
1187
|
-
if 'progress' in message.keys():
|
|
1188
|
-
info_to_print = "Simulation is running"
|
|
1189
|
-
return jsonify({
|
|
1190
|
-
'success': True,
|
|
1191
|
-
'status': 'RUNNING',
|
|
1192
|
-
'message': info_to_print,
|
|
1193
|
-
'full_response': message
|
|
1194
|
-
})
|
|
1195
|
-
else:
|
|
1196
|
-
# Return the full message as in notebook
|
|
1197
|
-
return jsonify({
|
|
1198
|
-
'success': message.get('status') != 'ERROR',
|
|
1199
|
-
'status': message.get('status', 'UNKNOWN'),
|
|
1200
|
-
'message': str(message),
|
|
1201
|
-
'full_response': message
|
|
1202
|
-
})
|
|
1203
|
-
else:
|
|
1204
|
-
# Try to get error from response body (following notebook pattern)
|
|
1205
|
-
try:
|
|
1206
|
-
message = simulation_response.json()
|
|
1207
|
-
return jsonify({
|
|
1208
|
-
'success': False,
|
|
1209
|
-
'status': 'ERROR',
|
|
1210
|
-
'message': str(message),
|
|
1211
|
-
'full_response': message
|
|
1212
|
-
})
|
|
1213
|
-
except:
|
|
1214
|
-
return jsonify({
|
|
1215
|
-
'success': False,
|
|
1216
|
-
'status': 'ERROR',
|
|
1217
|
-
'message': 'web Connection Error',
|
|
1218
|
-
'full_response': {}
|
|
1219
|
-
})
|
|
1220
|
-
|
|
1221
|
-
except Exception as e:
|
|
1222
|
-
return jsonify({
|
|
1223
|
-
'success': False,
|
|
1224
|
-
'status': 'ERROR',
|
|
1225
|
-
'message': 'web Connection Error',
|
|
1226
|
-
'full_response': {'error': str(e)}
|
|
1227
|
-
})
|
|
1228
|
-
|
|
1229
|
-
except Exception as e:
|
|
1230
|
-
import traceback
|
|
1231
|
-
return jsonify({
|
|
1232
|
-
'success': False,
|
|
1233
|
-
'status': 'ERROR',
|
|
1234
|
-
'message': f'Test expression failed: {str(e)}',
|
|
1235
|
-
'full_response': {'error': str(e), 'traceback': traceback.format_exc()}
|
|
1236
|
-
}), 500
|
|
1237
|
-
|
|
1238
|
-
@app.route('/api/test-operators', methods=['GET'])
|
|
1239
|
-
def test_operators():
|
|
1240
|
-
"""Test endpoint to check raw BRAIN operators API response"""
|
|
1241
|
-
try:
|
|
1242
|
-
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
1243
|
-
if not session_id or session_id not in brain_sessions:
|
|
1244
|
-
return jsonify({'error': 'Invalid or expired session'}), 401
|
|
1245
|
-
|
|
1246
|
-
session_info = brain_sessions[session_id]
|
|
1247
|
-
brain_session = session_info['session']
|
|
1248
|
-
|
|
1249
|
-
# Get raw response from BRAIN API
|
|
1250
|
-
response = brain_session.get(f'{BRAIN_API_BASE}/operators')
|
|
1251
|
-
response.raise_for_status()
|
|
1252
|
-
|
|
1253
|
-
data = response.json()
|
|
1254
|
-
|
|
1255
|
-
# Return raw response info for debugging
|
|
1256
|
-
result = {
|
|
1257
|
-
'type': str(type(data)),
|
|
1258
|
-
'is_list': isinstance(data, list),
|
|
1259
|
-
'is_dict': isinstance(data, dict),
|
|
1260
|
-
'length': len(data) if isinstance(data, list) else None,
|
|
1261
|
-
'keys': list(data.keys()) if isinstance(data, dict) else None,
|
|
1262
|
-
'count_key': data.get('count') if isinstance(data, dict) else None,
|
|
1263
|
-
'first_few_items': data[:3] if isinstance(data, list) else (data.get('results', [])[:3] if isinstance(data, dict) else None)
|
|
1264
|
-
}
|
|
1265
|
-
|
|
1266
|
-
return jsonify(result)
|
|
1267
|
-
|
|
1268
|
-
except Exception as e:
|
|
1269
|
-
return jsonify({'error': f'Test failed: {str(e)}'}), 500
|
|
1270
|
-
|
|
1271
|
-
# Import blueprints
|
|
1272
|
-
try:
|
|
1273
|
-
from blueprints import idea_house_bp, paper_analysis_bp, feature_engineering_bp, inspiration_house_bp
|
|
1274
|
-
print("📦 Blueprints imported successfully!")
|
|
1275
|
-
except ImportError as e:
|
|
1276
|
-
print(f"❌ Failed to import blueprints: {e}")
|
|
1277
|
-
print("Some features may not be available.")
|
|
1278
|
-
|
|
1279
|
-
# Register blueprints
|
|
1280
|
-
app.register_blueprint(idea_house_bp, url_prefix='/idea-house')
|
|
1281
|
-
app.register_blueprint(paper_analysis_bp, url_prefix='/paper-analysis')
|
|
1282
|
-
app.register_blueprint(feature_engineering_bp, url_prefix='/feature-engineering')
|
|
1283
|
-
app.register_blueprint(inspiration_house_bp, url_prefix='/inspiration-house')
|
|
1284
|
-
|
|
1285
|
-
print("🔧 All blueprints registered successfully!")
|
|
1286
|
-
print(" - Idea House: /idea-house")
|
|
1287
|
-
print(" - Paper Analysis: /paper-analysis")
|
|
1288
|
-
print(" - Feature Engineering: /feature-engineering")
|
|
1289
|
-
print(" - Inspiration House: /inspiration-house")
|
|
1290
|
-
|
|
1291
|
-
# Template Management Routes
|
|
1292
|
-
# Get the directory where this script is located for templates
|
|
1293
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1294
|
-
TEMPLATES_DIR = os.path.join(script_dir, 'custom_templates')
|
|
1295
|
-
|
|
1296
|
-
# Ensure templates directory exists
|
|
1297
|
-
if not os.path.exists(TEMPLATES_DIR):
|
|
1298
|
-
os.makedirs(TEMPLATES_DIR)
|
|
1299
|
-
print(f"📁 Created templates directory: {TEMPLATES_DIR}")
|
|
1300
|
-
else:
|
|
1301
|
-
print(f"📁 Templates directory ready: {TEMPLATES_DIR}")
|
|
1302
|
-
|
|
1303
|
-
print("✅ BRAIN Expression Template Decoder fully initialized!")
|
|
1304
|
-
print("🎯 Ready to process templates and integrate with BRAIN API!")
|
|
1305
|
-
|
|
1306
|
-
@app.route('/api/templates', methods=['GET'])
|
|
1307
|
-
def get_templates():
|
|
1308
|
-
"""Get all custom templates"""
|
|
1309
|
-
try:
|
|
1310
|
-
templates = []
|
|
1311
|
-
templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
|
|
1312
|
-
|
|
1313
|
-
if os.path.exists(templates_file):
|
|
1314
|
-
with open(templates_file, 'r', encoding='utf-8') as f:
|
|
1315
|
-
templates = json.load(f)
|
|
1316
|
-
|
|
1317
|
-
return jsonify(templates)
|
|
1318
|
-
except Exception as e:
|
|
1319
|
-
return jsonify({'error': f'Error loading templates: {str(e)}'}), 500
|
|
1320
|
-
|
|
1321
|
-
@app.route('/api/templates', methods=['POST'])
|
|
1322
|
-
def save_template():
|
|
1323
|
-
"""Save a new custom template"""
|
|
1324
|
-
try:
|
|
1325
|
-
data = request.get_json()
|
|
1326
|
-
name = data.get('name', '').strip()
|
|
1327
|
-
description = data.get('description', '').strip()
|
|
1328
|
-
expression = data.get('expression', '').strip()
|
|
1329
|
-
template_configurations = data.get('templateConfigurations', {})
|
|
1330
|
-
|
|
1331
|
-
if not name or not expression:
|
|
1332
|
-
return jsonify({'error': 'Name and expression are required'}), 400
|
|
1333
|
-
|
|
1334
|
-
# Load existing templates
|
|
1335
|
-
templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
|
|
1336
|
-
templates = []
|
|
1337
|
-
|
|
1338
|
-
if os.path.exists(templates_file):
|
|
1339
|
-
with open(templates_file, 'r', encoding='utf-8') as f:
|
|
1340
|
-
templates = json.load(f)
|
|
1341
|
-
|
|
1342
|
-
# Check for duplicate names
|
|
1343
|
-
existing_index = next((i for i, t in enumerate(templates) if t['name'] == name), None)
|
|
1344
|
-
|
|
1345
|
-
new_template = {
|
|
1346
|
-
'name': name,
|
|
1347
|
-
'description': description,
|
|
1348
|
-
'expression': expression,
|
|
1349
|
-
'templateConfigurations': template_configurations,
|
|
1350
|
-
'createdAt': datetime.now().isoformat()
|
|
1351
|
-
}
|
|
1352
|
-
|
|
1353
|
-
if existing_index is not None:
|
|
1354
|
-
# Update existing template but preserve createdAt if it exists
|
|
1355
|
-
if 'createdAt' in templates[existing_index]:
|
|
1356
|
-
new_template['createdAt'] = templates[existing_index]['createdAt']
|
|
1357
|
-
new_template['updatedAt'] = datetime.now().isoformat()
|
|
1358
|
-
templates[existing_index] = new_template
|
|
1359
|
-
message = f'Template "{name}" updated successfully'
|
|
1360
|
-
else:
|
|
1361
|
-
# Add new template
|
|
1362
|
-
templates.append(new_template)
|
|
1363
|
-
message = f'Template "{name}" saved successfully'
|
|
1364
|
-
|
|
1365
|
-
# Save to file
|
|
1366
|
-
with open(templates_file, 'w', encoding='utf-8') as f:
|
|
1367
|
-
json.dump(templates, f, indent=2, ensure_ascii=False)
|
|
1368
|
-
|
|
1369
|
-
return jsonify({'success': True, 'message': message})
|
|
1370
|
-
|
|
1371
|
-
except Exception as e:
|
|
1372
|
-
return jsonify({'error': f'Error saving template: {str(e)}'}), 500
|
|
1373
|
-
|
|
1374
|
-
@app.route('/api/templates/<int:template_id>', methods=['DELETE'])
|
|
1375
|
-
def delete_template(template_id):
|
|
1376
|
-
"""Delete a custom template"""
|
|
1377
|
-
try:
|
|
1378
|
-
templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
|
|
1379
|
-
templates = []
|
|
1380
|
-
|
|
1381
|
-
if os.path.exists(templates_file):
|
|
1382
|
-
with open(templates_file, 'r', encoding='utf-8') as f:
|
|
1383
|
-
templates = json.load(f)
|
|
1384
|
-
|
|
1385
|
-
if 0 <= template_id < len(templates):
|
|
1386
|
-
deleted_template = templates.pop(template_id)
|
|
1387
|
-
|
|
1388
|
-
# Save updated templates
|
|
1389
|
-
with open(templates_file, 'w', encoding='utf-8') as f:
|
|
1390
|
-
json.dump(templates, f, indent=2, ensure_ascii=False)
|
|
1391
|
-
|
|
1392
|
-
return jsonify({'success': True, 'message': f'Template "{deleted_template["name"]}" deleted successfully'})
|
|
1393
|
-
else:
|
|
1394
|
-
return jsonify({'error': 'Template not found'}), 404
|
|
1395
|
-
|
|
1396
|
-
except Exception as e:
|
|
1397
|
-
return jsonify({'error': f'Error deleting template: {str(e)}'}), 500
|
|
1398
|
-
|
|
1399
|
-
@app.route('/api/templates/export', methods=['GET'])
|
|
1400
|
-
def export_templates():
|
|
1401
|
-
"""Export all templates as JSON"""
|
|
1402
|
-
try:
|
|
1403
|
-
templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
|
|
1404
|
-
templates = []
|
|
1405
|
-
|
|
1406
|
-
if os.path.exists(templates_file):
|
|
1407
|
-
with open(templates_file, 'r', encoding='utf-8') as f:
|
|
1408
|
-
templates = json.load(f)
|
|
1409
|
-
|
|
1410
|
-
return jsonify(templates)
|
|
1411
|
-
|
|
1412
|
-
except Exception as e:
|
|
1413
|
-
return jsonify({'error': f'Error exporting templates: {str(e)}'}), 500
|
|
1414
|
-
|
|
1415
|
-
@app.route('/api/templates/import', methods=['POST'])
|
|
1416
|
-
def import_templates():
|
|
1417
|
-
"""Import templates from JSON"""
|
|
1418
|
-
try:
|
|
1419
|
-
data = request.get_json()
|
|
1420
|
-
imported_templates = data.get('templates', [])
|
|
1421
|
-
overwrite = data.get('overwrite', False)
|
|
1422
|
-
|
|
1423
|
-
if not isinstance(imported_templates, list):
|
|
1424
|
-
return jsonify({'error': 'Invalid template format'}), 400
|
|
1425
|
-
|
|
1426
|
-
# Validate template structure
|
|
1427
|
-
valid_templates = []
|
|
1428
|
-
for template in imported_templates:
|
|
1429
|
-
if (isinstance(template, dict) and
|
|
1430
|
-
'name' in template and 'expression' in template and
|
|
1431
|
-
template['name'].strip() and template['expression'].strip()):
|
|
1432
|
-
valid_templates.append({
|
|
1433
|
-
'name': template['name'].strip(),
|
|
1434
|
-
'description': template.get('description', '').strip(),
|
|
1435
|
-
'expression': template['expression'].strip(),
|
|
1436
|
-
'templateConfigurations': template.get('templateConfigurations', {}),
|
|
1437
|
-
'createdAt': template.get('createdAt', datetime.now().isoformat())
|
|
1438
|
-
})
|
|
1439
|
-
|
|
1440
|
-
if not valid_templates:
|
|
1441
|
-
return jsonify({'error': 'No valid templates found'}), 400
|
|
1442
|
-
|
|
1443
|
-
# Load existing templates
|
|
1444
|
-
templates_file = os.path.join(TEMPLATES_DIR, 'templates.json')
|
|
1445
|
-
existing_templates = []
|
|
1446
|
-
|
|
1447
|
-
if os.path.exists(templates_file):
|
|
1448
|
-
with open(templates_file, 'r', encoding='utf-8') as f:
|
|
1449
|
-
existing_templates = json.load(f)
|
|
1450
|
-
|
|
1451
|
-
# Handle duplicates
|
|
1452
|
-
duplicates = []
|
|
1453
|
-
new_templates = []
|
|
1454
|
-
|
|
1455
|
-
for template in valid_templates:
|
|
1456
|
-
existing_index = next((i for i, t in enumerate(existing_templates) if t['name'] == template['name']), None)
|
|
1457
|
-
|
|
1458
|
-
if existing_index is not None:
|
|
1459
|
-
duplicates.append(template['name'])
|
|
1460
|
-
if overwrite:
|
|
1461
|
-
existing_templates[existing_index] = template
|
|
1462
|
-
else:
|
|
1463
|
-
new_templates.append(template)
|
|
1464
|
-
|
|
1465
|
-
# Add new templates
|
|
1466
|
-
existing_templates.extend(new_templates)
|
|
1467
|
-
|
|
1468
|
-
# Save to file
|
|
1469
|
-
with open(templates_file, 'w', encoding='utf-8') as f:
|
|
1470
|
-
json.dump(existing_templates, f, indent=2, ensure_ascii=False)
|
|
1471
|
-
|
|
1472
|
-
result = {
|
|
1473
|
-
'success': True,
|
|
1474
|
-
'imported': len(new_templates),
|
|
1475
|
-
'duplicates': duplicates,
|
|
1476
|
-
'overwritten': len(duplicates) if overwrite else 0
|
|
1477
|
-
}
|
|
1478
|
-
|
|
1479
|
-
return jsonify(result)
|
|
1480
|
-
|
|
1481
|
-
except Exception as e:
|
|
1482
|
-
return jsonify({'error': f'Error importing templates: {str(e)}'}), 500
|
|
1483
|
-
|
|
1484
|
-
@app.route('/api/run-simulator', methods=['POST'])
|
|
1485
|
-
def run_simulator():
|
|
1486
|
-
"""Run the simulator_wqb.py script"""
|
|
1487
|
-
try:
|
|
1488
|
-
import subprocess
|
|
1489
|
-
import threading
|
|
1490
|
-
from pathlib import Path
|
|
1491
|
-
|
|
1492
|
-
# Get the script path (now in simulator subfolder)
|
|
1493
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1494
|
-
simulator_dir = os.path.join(script_dir, 'simulator')
|
|
1495
|
-
simulator_path = os.path.join(simulator_dir, 'simulator_wqb.py')
|
|
1496
|
-
|
|
1497
|
-
# Check if the script exists
|
|
1498
|
-
if not os.path.exists(simulator_path):
|
|
1499
|
-
return jsonify({'error': 'simulator_wqb.py not found in simulator folder'}), 404
|
|
1500
|
-
|
|
1501
|
-
# Run the script in a new terminal window
|
|
1502
|
-
def run_script():
|
|
1503
|
-
try:
|
|
1504
|
-
if os.name == 'nt':
|
|
1505
|
-
# Windows: Use cmd
|
|
1506
|
-
subprocess.Popen(['cmd', '/k', 'python', 'simulator_wqb.py'],
|
|
1507
|
-
cwd=simulator_dir,
|
|
1508
|
-
creationflags=subprocess.CREATE_NEW_CONSOLE)
|
|
1509
|
-
elif sys.platform == 'darwin':
|
|
1510
|
-
# macOS: Use AppleScript to call Terminal.app
|
|
1511
|
-
script = f'''
|
|
1512
|
-
tell application "Terminal"
|
|
1513
|
-
do script "cd '{simulator_dir}' && python3 simulator_wqb.py"
|
|
1514
|
-
activate
|
|
1515
|
-
end tell
|
|
1516
|
-
'''
|
|
1517
|
-
subprocess.Popen(['osascript', '-e', script])
|
|
1518
|
-
else:
|
|
1519
|
-
# Linux: Try multiple terminal emulators
|
|
1520
|
-
terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
|
|
1521
|
-
for terminal in terminals:
|
|
1522
|
-
try:
|
|
1523
|
-
if terminal == 'gnome-terminal':
|
|
1524
|
-
subprocess.Popen([terminal, '--working-directory', simulator_dir,
|
|
1525
|
-
'--', 'python3', 'simulator_wqb.py'])
|
|
1526
|
-
else:
|
|
1527
|
-
subprocess.Popen([terminal, '-e',
|
|
1528
|
-
f'cd "{simulator_dir}" && python3 simulator_wqb.py'])
|
|
1529
|
-
break
|
|
1530
|
-
except FileNotFoundError:
|
|
1531
|
-
continue
|
|
1532
|
-
else:
|
|
1533
|
-
# Fallback: Run in background
|
|
1534
|
-
print("Warning: No terminal emulator found, running in background")
|
|
1535
|
-
subprocess.Popen([sys.executable, 'simulator_wqb.py'], cwd=simulator_dir)
|
|
1536
|
-
except Exception as e:
|
|
1537
|
-
print(f"Error running simulator: {e}")
|
|
1538
|
-
|
|
1539
|
-
# Start the script in a separate thread
|
|
1540
|
-
thread = threading.Thread(target=run_script)
|
|
1541
|
-
thread.daemon = True
|
|
1542
|
-
thread.start()
|
|
1543
|
-
|
|
1544
|
-
return jsonify({
|
|
1545
|
-
'success': True,
|
|
1546
|
-
'message': 'Simulator script started in new terminal window'
|
|
1547
|
-
})
|
|
1548
|
-
|
|
1549
|
-
except Exception as e:
|
|
1550
|
-
return jsonify({'error': f'Failed to run simulator: {str(e)}'}), 500
|
|
1551
|
-
|
|
1552
|
-
@app.route('/api/open-submitter', methods=['POST'])
|
|
1553
|
-
def open_submitter():
|
|
1554
|
-
"""Run the alpha_submitter.py script"""
|
|
1555
|
-
try:
|
|
1556
|
-
import subprocess
|
|
1557
|
-
import threading
|
|
1558
|
-
from pathlib import Path
|
|
1559
|
-
|
|
1560
|
-
# Get the script path (now in simulator subfolder)
|
|
1561
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1562
|
-
simulator_dir = os.path.join(script_dir, 'simulator')
|
|
1563
|
-
submitter_path = os.path.join(simulator_dir, 'alpha_submitter.py')
|
|
1564
|
-
|
|
1565
|
-
# Check if the script exists
|
|
1566
|
-
if not os.path.exists(submitter_path):
|
|
1567
|
-
return jsonify({'error': 'alpha_submitter.py not found in simulator folder'}), 404
|
|
1568
|
-
|
|
1569
|
-
# Run the script in a new terminal window
|
|
1570
|
-
def run_script():
|
|
1571
|
-
try:
|
|
1572
|
-
if os.name == 'nt':
|
|
1573
|
-
# Windows: Use cmd
|
|
1574
|
-
subprocess.Popen(['cmd', '/k', 'python', 'alpha_submitter.py'],
|
|
1575
|
-
cwd=simulator_dir,
|
|
1576
|
-
creationflags=subprocess.CREATE_NEW_CONSOLE)
|
|
1577
|
-
elif sys.platform == 'darwin':
|
|
1578
|
-
# macOS: Use AppleScript to call Terminal.app
|
|
1579
|
-
script = f'''
|
|
1580
|
-
tell application "Terminal"
|
|
1581
|
-
do script "cd '{simulator_dir}' && python3 alpha_submitter.py"
|
|
1582
|
-
activate
|
|
1583
|
-
end tell
|
|
1584
|
-
'''
|
|
1585
|
-
subprocess.Popen(['osascript', '-e', script])
|
|
1586
|
-
else:
|
|
1587
|
-
# Linux: Try multiple terminal emulators
|
|
1588
|
-
terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
|
|
1589
|
-
for terminal in terminals:
|
|
1590
|
-
try:
|
|
1591
|
-
if terminal == 'gnome-terminal':
|
|
1592
|
-
subprocess.Popen([terminal, '--working-directory', simulator_dir,
|
|
1593
|
-
'--', 'python3', 'alpha_submitter.py'])
|
|
1594
|
-
else:
|
|
1595
|
-
subprocess.Popen([terminal, '-e',
|
|
1596
|
-
f'cd "{simulator_dir}" && python3 alpha_submitter.py'])
|
|
1597
|
-
break
|
|
1598
|
-
except FileNotFoundError:
|
|
1599
|
-
continue
|
|
1600
|
-
else:
|
|
1601
|
-
# Fallback: Run in background
|
|
1602
|
-
print("Warning: No terminal emulator found, running in background")
|
|
1603
|
-
subprocess.Popen([sys.executable, 'alpha_submitter.py'], cwd=simulator_dir)
|
|
1604
|
-
except Exception as e:
|
|
1605
|
-
print(f"Error running submitter: {e}")
|
|
1606
|
-
|
|
1607
|
-
# Start the script in a separate thread
|
|
1608
|
-
thread = threading.Thread(target=run_script)
|
|
1609
|
-
thread.daemon = True
|
|
1610
|
-
thread.start()
|
|
1611
|
-
|
|
1612
|
-
return jsonify({
|
|
1613
|
-
'success': True,
|
|
1614
|
-
'message': 'Alpha submitter script started in new terminal window'
|
|
1615
|
-
})
|
|
1616
|
-
|
|
1617
|
-
except Exception as e:
|
|
1618
|
-
return jsonify({'error': f'Failed to open submitter: {str(e)}'}), 500
|
|
1619
|
-
|
|
1620
|
-
@app.route('/api/open-hk-simulator', methods=['POST'])
|
|
1621
|
-
def open_hk_simulator():
|
|
1622
|
-
"""Run the autosimulator.py script from hkSimulator folder"""
|
|
1623
|
-
try:
|
|
1624
|
-
import subprocess
|
|
1625
|
-
import threading
|
|
1626
|
-
from pathlib import Path
|
|
1627
|
-
|
|
1628
|
-
# Get the script path (hkSimulator subfolder)
|
|
1629
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1630
|
-
hk_simulator_dir = os.path.join(script_dir, 'hkSimulator')
|
|
1631
|
-
autosimulator_path = os.path.join(hk_simulator_dir, 'autosimulator.py')
|
|
1632
|
-
|
|
1633
|
-
# Check if the script exists
|
|
1634
|
-
if not os.path.exists(autosimulator_path):
|
|
1635
|
-
return jsonify({'error': 'autosimulator.py not found in hkSimulator folder'}), 404
|
|
1636
|
-
|
|
1637
|
-
# Run the script in a new terminal window
|
|
1638
|
-
def run_script():
|
|
1639
|
-
try:
|
|
1640
|
-
if os.name == 'nt':
|
|
1641
|
-
# Windows: Use cmd
|
|
1642
|
-
subprocess.Popen(['cmd', '/k', 'python', 'autosimulator.py'],
|
|
1643
|
-
cwd=hk_simulator_dir,
|
|
1644
|
-
creationflags=subprocess.CREATE_NEW_CONSOLE)
|
|
1645
|
-
elif sys.platform == 'darwin':
|
|
1646
|
-
# macOS: Use AppleScript to call Terminal.app
|
|
1647
|
-
script = f'''
|
|
1648
|
-
tell application "Terminal"
|
|
1649
|
-
do script "cd '{hk_simulator_dir}' && python3 autosimulator.py"
|
|
1650
|
-
activate
|
|
1651
|
-
end tell
|
|
1652
|
-
'''
|
|
1653
|
-
subprocess.Popen(['osascript', '-e', script])
|
|
1654
|
-
else:
|
|
1655
|
-
# Linux: Try multiple terminal emulators
|
|
1656
|
-
terminals = ['gnome-terminal', 'xterm', 'konsole', 'x-terminal-emulator']
|
|
1657
|
-
for terminal in terminals:
|
|
1658
|
-
try:
|
|
1659
|
-
if terminal == 'gnome-terminal':
|
|
1660
|
-
subprocess.Popen([terminal, '--working-directory', hk_simulator_dir,
|
|
1661
|
-
'--', 'python3', 'autosimulator.py'])
|
|
1662
|
-
else:
|
|
1663
|
-
subprocess.Popen([terminal, '-e',
|
|
1664
|
-
f'cd "{hk_simulator_dir}" && python3 autosimulator.py'])
|
|
1665
|
-
break
|
|
1666
|
-
except FileNotFoundError:
|
|
1667
|
-
continue
|
|
1668
|
-
else:
|
|
1669
|
-
# Fallback: Run in background
|
|
1670
|
-
print("Warning: No terminal emulator found, running in background")
|
|
1671
|
-
subprocess.Popen([sys.executable, 'autosimulator.py'], cwd=hk_simulator_dir)
|
|
1672
|
-
except Exception as e:
|
|
1673
|
-
print(f"Error running HK simulator: {e}")
|
|
1674
|
-
|
|
1675
|
-
# Start the script in a separate thread
|
|
1676
|
-
thread = threading.Thread(target=run_script)
|
|
1677
|
-
thread.daemon = True
|
|
1678
|
-
thread.start()
|
|
1679
|
-
|
|
1680
|
-
return jsonify({
|
|
1681
|
-
'success': True,
|
|
1682
|
-
'message': 'HK simulator script started in new terminal window'
|
|
1683
|
-
})
|
|
1684
|
-
|
|
1685
|
-
except Exception as e:
|
|
1686
|
-
return jsonify({'error': f'Failed to open HK simulator: {str(e)}'}), 500
|
|
1687
|
-
|
|
1688
|
-
@app.route('/api/open-transformer', methods=['POST'])
|
|
1689
|
-
def open_transformer():
|
|
1690
|
-
"""Run the Transformer.py script from the Tranformer folder in a new terminal."""
|
|
1691
|
-
try:
|
|
1692
|
-
import subprocess
|
|
1693
|
-
import threading
|
|
1694
|
-
|
|
1695
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1696
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
1697
|
-
transformer_path = os.path.join(transformer_dir, 'Transformer.py')
|
|
1698
|
-
|
|
1699
|
-
if not os.path.exists(transformer_path):
|
|
1700
|
-
return jsonify({'error': 'Transformer.py not found in Tranformer folder'}), 404
|
|
1701
|
-
|
|
1702
|
-
def run_script():
|
|
1703
|
-
try:
|
|
1704
|
-
if os.name == 'nt':
|
|
1705
|
-
subprocess.Popen(['cmd', '/k', 'python', 'Transformer.py'],
|
|
1706
|
-
cwd=transformer_dir,
|
|
1707
|
-
creationflags=subprocess.CREATE_NEW_CONSOLE)
|
|
1708
|
-
else:
|
|
1709
|
-
terminals = ['gnome-terminal', 'xterm', 'konsole', 'terminal']
|
|
1710
|
-
for terminal in terminals:
|
|
1711
|
-
try:
|
|
1712
|
-
if terminal == 'gnome-terminal':
|
|
1713
|
-
subprocess.Popen([terminal, '--working-directory', transformer_dir, '--', 'python3', 'Transformer.py'])
|
|
1714
|
-
else:
|
|
1715
|
-
subprocess.Popen([terminal, '-e', f'cd "{transformer_dir}" && python3 "Transformer.py"'])
|
|
1716
|
-
break
|
|
1717
|
-
except FileNotFoundError:
|
|
1718
|
-
continue
|
|
1719
|
-
else:
|
|
1720
|
-
subprocess.Popen([sys.executable, 'Transformer.py'], cwd=transformer_dir)
|
|
1721
|
-
except Exception as e:
|
|
1722
|
-
print(f"Error running Transformer: {e}")
|
|
1723
|
-
|
|
1724
|
-
thread = threading.Thread(target=run_script)
|
|
1725
|
-
thread.daemon = True
|
|
1726
|
-
thread.start()
|
|
1727
|
-
|
|
1728
|
-
return jsonify({'success': True, 'message': 'Transformer script started in new terminal window'})
|
|
1729
|
-
|
|
1730
|
-
except Exception as e:
|
|
1731
|
-
return jsonify({'error': f'Failed to open Transformer: {str(e)}'}), 500
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
@app.route('/api/usage-doc', methods=['GET'])
|
|
1735
|
-
def get_usage_doc():
|
|
1736
|
-
"""Return usage.md as raw markdown text for in-app help display."""
|
|
1737
|
-
try:
|
|
1738
|
-
base_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1739
|
-
usage_path = os.path.join(base_dir, 'usage.md')
|
|
1740
|
-
if not os.path.exists(usage_path):
|
|
1741
|
-
return jsonify({'success': False, 'error': 'usage.md not found'}), 404
|
|
1742
|
-
|
|
1743
|
-
with open(usage_path, 'r', encoding='utf-8') as f:
|
|
1744
|
-
content = f.read()
|
|
1745
|
-
|
|
1746
|
-
return jsonify({'success': True, 'markdown': content})
|
|
1747
|
-
except Exception as e:
|
|
1748
|
-
return jsonify({'success': False, 'error': str(e)}), 500
|
|
1749
|
-
|
|
1750
|
-
# Global task manager for Transformer Web
|
|
1751
|
-
transformer_tasks = {}
|
|
1752
|
-
|
|
1753
|
-
@app.route('/transformer-web')
|
|
1754
|
-
def transformer_web():
|
|
1755
|
-
return render_template('transformer_web.html')
|
|
1756
|
-
|
|
1757
|
-
@app.route('/api/test-llm-connection', methods=['POST'])
|
|
1758
|
-
def test_llm_connection():
|
|
1759
|
-
data = request.json
|
|
1760
|
-
api_key = data.get('apiKey')
|
|
1761
|
-
base_url = data.get('baseUrl')
|
|
1762
|
-
model = data.get('model')
|
|
1763
|
-
|
|
1764
|
-
try:
|
|
1765
|
-
import openai
|
|
1766
|
-
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
|
1767
|
-
# Simple test call
|
|
1768
|
-
response = client.chat.completions.create(
|
|
1769
|
-
model=model,
|
|
1770
|
-
messages=[{"role": "user", "content": "Hello"}],
|
|
1771
|
-
max_tokens=5
|
|
1772
|
-
)
|
|
1773
|
-
return jsonify({'success': True})
|
|
1774
|
-
except Exception as e:
|
|
1775
|
-
return jsonify({'success': False, 'error': str(e)})
|
|
1776
|
-
|
|
1777
|
-
@app.route('/api/get-default-template-summary')
|
|
1778
|
-
def get_default_template_summary():
|
|
1779
|
-
try:
|
|
1780
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1781
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
1782
|
-
|
|
1783
|
-
# Read the file directly to avoid import issues/side effects
|
|
1784
|
-
transformer_path = os.path.join(transformer_dir, 'Transformer.py')
|
|
1785
|
-
with open(transformer_path, 'r', encoding='utf-8') as f:
|
|
1786
|
-
content = f.read()
|
|
1787
|
-
|
|
1788
|
-
# Extract template_summary variable using regex
|
|
1789
|
-
import re
|
|
1790
|
-
match = re.search(r'template_summary\s*=\s*"""(.*?)"""', content, re.DOTALL)
|
|
1791
|
-
if match:
|
|
1792
|
-
return jsonify({'success': True, 'summary': match.group(1)})
|
|
1793
|
-
else:
|
|
1794
|
-
return jsonify({'success': False, 'error': 'Could not find template_summary in Transformer.py'})
|
|
1795
|
-
|
|
1796
|
-
except Exception as e:
|
|
1797
|
-
return jsonify({'success': False, 'error': str(e)})
|
|
1798
|
-
|
|
1799
|
-
@app.route('/api/run-transformer-web', methods=['POST'])
|
|
1800
|
-
def run_transformer_web():
|
|
1801
|
-
data = request.json
|
|
1802
|
-
task_id = str(uuid.uuid4())
|
|
1803
|
-
|
|
1804
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1805
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
1806
|
-
|
|
1807
|
-
# Handle template summary content
|
|
1808
|
-
template_summary_content = data.get('template_summary_content')
|
|
1809
|
-
template_summary_path = None
|
|
1810
|
-
|
|
1811
|
-
if template_summary_content:
|
|
1812
|
-
template_summary_path = os.path.join(transformer_dir, f'temp_summary_{task_id}.txt')
|
|
1813
|
-
with open(template_summary_path, 'w', encoding='utf-8') as f:
|
|
1814
|
-
f.write(template_summary_content)
|
|
1815
|
-
|
|
1816
|
-
# Create a temporary config file
|
|
1817
|
-
config = {
|
|
1818
|
-
"LLM_model_name": data.get('LLM_model_name'),
|
|
1819
|
-
"LLM_API_KEY": data.get('LLM_API_KEY'),
|
|
1820
|
-
"llm_base_url": data.get('llm_base_url'),
|
|
1821
|
-
"username": data.get('username'),
|
|
1822
|
-
"password": data.get('password'),
|
|
1823
|
-
"template_summary_path": template_summary_path,
|
|
1824
|
-
"alpha_id": data.get('alpha_id'),
|
|
1825
|
-
"top_n_datafield": int(data.get('top_n_datafield', 50)),
|
|
1826
|
-
"user_region": data.get('region'),
|
|
1827
|
-
"user_universe": data.get('universe'),
|
|
1828
|
-
"user_delay": int(data.get('delay')) if data.get('delay') else None,
|
|
1829
|
-
"user_category": data.get('category'),
|
|
1830
|
-
"user_data_type": data.get('data_type', 'MATRIX')
|
|
1831
|
-
}
|
|
1832
|
-
|
|
1833
|
-
config_path = os.path.join(transformer_dir, f'config_{task_id}.json')
|
|
1834
|
-
|
|
1835
|
-
with open(config_path, 'w', encoding='utf-8') as f:
|
|
1836
|
-
json.dump(config, f, indent=4)
|
|
1837
|
-
|
|
1838
|
-
# Start the process
|
|
1839
|
-
transformer_script = os.path.join(transformer_dir, 'Transformer.py')
|
|
1840
|
-
|
|
1841
|
-
# Use a queue to store logs
|
|
1842
|
-
log_queue = queue.Queue()
|
|
1843
|
-
|
|
1844
|
-
def run_process():
|
|
1845
|
-
try:
|
|
1846
|
-
# Force UTF-8 encoding for the subprocess output to avoid UnicodeEncodeError on Windows
|
|
1847
|
-
env = os.environ.copy()
|
|
1848
|
-
env["PYTHONIOENCODING"] = "utf-8"
|
|
1849
|
-
|
|
1850
|
-
process = subprocess.Popen(
|
|
1851
|
-
[sys.executable, '-u', transformer_script, config_path],
|
|
1852
|
-
cwd=transformer_dir,
|
|
1853
|
-
stdout=subprocess.PIPE,
|
|
1854
|
-
stderr=subprocess.STDOUT,
|
|
1855
|
-
text=True,
|
|
1856
|
-
bufsize=1,
|
|
1857
|
-
encoding='utf-8',
|
|
1858
|
-
errors='replace',
|
|
1859
|
-
env=env
|
|
1860
|
-
)
|
|
1861
|
-
|
|
1862
|
-
transformer_tasks[task_id]['process'] = process
|
|
1863
|
-
|
|
1864
|
-
for line in iter(process.stdout.readline, ''):
|
|
1865
|
-
log_queue.put(line)
|
|
1866
|
-
|
|
1867
|
-
process.stdout.close()
|
|
1868
|
-
process.wait()
|
|
1869
|
-
transformer_tasks[task_id]['return_code'] = process.returncode
|
|
1870
|
-
except Exception as e:
|
|
1871
|
-
log_queue.put(f"Error running process: {str(e)}")
|
|
1872
|
-
transformer_tasks[task_id]['return_code'] = 1
|
|
1873
|
-
finally:
|
|
1874
|
-
log_queue.put(None) # Signal end
|
|
1875
|
-
# Clean up config file and temp summary file
|
|
1876
|
-
try:
|
|
1877
|
-
if os.path.exists(config_path):
|
|
1878
|
-
os.remove(config_path)
|
|
1879
|
-
if template_summary_path and os.path.exists(template_summary_path):
|
|
1880
|
-
os.remove(template_summary_path)
|
|
1881
|
-
except:
|
|
1882
|
-
pass
|
|
1883
|
-
|
|
1884
|
-
thread = threading.Thread(target=run_process)
|
|
1885
|
-
thread.start()
|
|
1886
|
-
|
|
1887
|
-
transformer_tasks[task_id] = {
|
|
1888
|
-
'queue': log_queue,
|
|
1889
|
-
'status': 'running',
|
|
1890
|
-
'output_dir': os.path.join(transformer_dir, 'output')
|
|
1891
|
-
}
|
|
1892
|
-
|
|
1893
|
-
return jsonify({'success': True, 'taskId': task_id})
|
|
1894
|
-
|
|
1895
|
-
@app.route('/api/transformer/login-and-fetch-options', methods=['POST'])
|
|
1896
|
-
def transformer_login_and_fetch_options():
|
|
1897
|
-
data = request.json
|
|
1898
|
-
username = data.get('username')
|
|
1899
|
-
password = data.get('password')
|
|
1900
|
-
|
|
1901
|
-
if not username or not password:
|
|
1902
|
-
return jsonify({'success': False, 'error': 'Username and password are required'})
|
|
1903
|
-
|
|
1904
|
-
try:
|
|
1905
|
-
# Add Tranformer to path to import ace_lib
|
|
1906
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
1907
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
1908
|
-
if transformer_dir not in sys.path:
|
|
1909
|
-
sys.path.append(transformer_dir)
|
|
1910
|
-
|
|
1911
|
-
from ace_lib import SingleSession, get_instrument_type_region_delay
|
|
1912
|
-
|
|
1913
|
-
# Use SingleSession for consistency with ace_lib
|
|
1914
|
-
session = SingleSession()
|
|
1915
|
-
# Force re-authentication
|
|
1916
|
-
session.auth = (username, password)
|
|
1917
|
-
|
|
1918
|
-
brain_api_url = "https://api.worldquantbrain.com"
|
|
1919
|
-
response = session.post(brain_api_url + "/authentication")
|
|
1920
|
-
|
|
1921
|
-
if response.status_code == 201:
|
|
1922
|
-
# Auth success
|
|
1923
|
-
pass
|
|
1924
|
-
elif response.status_code == 401:
|
|
1925
|
-
return jsonify({'success': False, 'error': 'Authentication failed: Invalid credentials'})
|
|
1926
|
-
else:
|
|
1927
|
-
return jsonify({'success': False, 'error': f'Authentication failed: {response.status_code} {response.text}'})
|
|
1928
|
-
|
|
1929
|
-
# Now fetch options
|
|
1930
|
-
df = get_instrument_type_region_delay(session)
|
|
1931
|
-
|
|
1932
|
-
# Fetch categories
|
|
1933
|
-
brain_api_url = "https://api.worldquantbrain.com"
|
|
1934
|
-
categories_resp = session.get(brain_api_url + "/data-categories")
|
|
1935
|
-
categories = []
|
|
1936
|
-
if categories_resp.status_code == 200:
|
|
1937
|
-
categories_data = categories_resp.json()
|
|
1938
|
-
if isinstance(categories_data, list):
|
|
1939
|
-
categories = categories_data
|
|
1940
|
-
elif isinstance(categories_data, dict):
|
|
1941
|
-
categories = categories_data.get('results', [])
|
|
1942
|
-
|
|
1943
|
-
# Convert DataFrame to a nested dictionary structure for the frontend
|
|
1944
|
-
# Structure: Region -> Delay -> Universe
|
|
1945
|
-
# We only care about EQUITY for now as per previous code
|
|
1946
|
-
|
|
1947
|
-
df_equity = df[df['InstrumentType'] == 'EQUITY']
|
|
1948
|
-
|
|
1949
|
-
options = {}
|
|
1950
|
-
for _, row in df_equity.iterrows():
|
|
1951
|
-
region = row['Region']
|
|
1952
|
-
delay = row['Delay']
|
|
1953
|
-
universes = row['Universe'] # This is a list
|
|
1954
|
-
|
|
1955
|
-
if region not in options:
|
|
1956
|
-
options[region] = {}
|
|
1957
|
-
|
|
1958
|
-
# Convert delay to string for JSON keys
|
|
1959
|
-
delay_str = str(delay)
|
|
1960
|
-
if delay_str not in options[region]:
|
|
1961
|
-
options[region][delay_str] = universes
|
|
1962
|
-
|
|
1963
|
-
return jsonify({
|
|
1964
|
-
'success': True,
|
|
1965
|
-
'options': options,
|
|
1966
|
-
'categories': categories
|
|
1967
|
-
})
|
|
1968
|
-
|
|
1969
|
-
except Exception as e:
|
|
1970
|
-
return jsonify({'success': False, 'error': str(e)})
|
|
1971
|
-
|
|
1972
|
-
@app.route('/api/stream-transformer-logs/<task_id>')
|
|
1973
|
-
def stream_transformer_logs(task_id):
|
|
1974
|
-
def generate():
|
|
1975
|
-
if task_id not in transformer_tasks:
|
|
1976
|
-
yield f"data: {json.dumps({'status': 'error', 'log': 'Task not found'})}\n\n"
|
|
1977
|
-
return
|
|
1978
|
-
|
|
1979
|
-
q = transformer_tasks[task_id]['queue']
|
|
1980
|
-
|
|
1981
|
-
while True:
|
|
1982
|
-
try:
|
|
1983
|
-
line = q.get(timeout=1)
|
|
1984
|
-
if line is None:
|
|
1985
|
-
return_code = transformer_tasks[task_id].get('return_code', 0)
|
|
1986
|
-
status = 'completed' if return_code == 0 else 'error'
|
|
1987
|
-
yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
|
|
1988
|
-
break
|
|
1989
|
-
yield f"data: {json.dumps({'status': 'running', 'log': line})}\n\n"
|
|
1990
|
-
except queue.Empty:
|
|
1991
|
-
# Check if process is still running
|
|
1992
|
-
if 'process' in transformer_tasks[task_id]:
|
|
1993
|
-
proc = transformer_tasks[task_id]['process']
|
|
1994
|
-
if proc.poll() is not None and q.empty():
|
|
1995
|
-
return_code = proc.returncode
|
|
1996
|
-
status = 'completed' if return_code == 0 else 'error'
|
|
1997
|
-
yield f"data: {json.dumps({'status': status, 'log': ''})}\n\n"
|
|
1998
|
-
break
|
|
1999
|
-
yield f"data: {json.dumps({'status': 'running', 'log': ''})}\n\n" # Keep alive
|
|
2000
|
-
|
|
2001
|
-
return Response(stream_with_context(generate()), mimetype='text/event-stream')
|
|
2002
|
-
|
|
2003
|
-
@app.route('/api/download-transformer-result/<task_id>/<file_type>')
|
|
2004
|
-
def download_transformer_result(task_id, file_type):
|
|
2005
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2006
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
2007
|
-
output_dir = os.path.join(transformer_dir, 'output')
|
|
2008
|
-
|
|
2009
|
-
if file_type == 'candidates':
|
|
2010
|
-
filename = 'Alpha_candidates.json'
|
|
2011
|
-
elif file_type == 'success':
|
|
2012
|
-
filename = 'Alpha_generated_expressions_success.json'
|
|
2013
|
-
elif file_type == 'error':
|
|
2014
|
-
filename = 'Alpha_generated_expressions_error.json'
|
|
2015
|
-
else:
|
|
2016
|
-
return "Invalid file type", 400
|
|
2017
|
-
|
|
2018
|
-
return send_from_directory(output_dir, filename, as_attachment=True)
|
|
2019
|
-
|
|
2020
|
-
# --- 缘分一道桥 (Alpha Inspector) Routes ---
|
|
2021
|
-
|
|
2022
|
-
# Add '缘分一道桥' to sys.path to allow importing brain_alpha_inspector
|
|
2023
|
-
yuanfen_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '缘分一道桥')
|
|
2024
|
-
if yuanfen_dir not in sys.path:
|
|
2025
|
-
sys.path.append(yuanfen_dir)
|
|
2026
|
-
|
|
2027
|
-
try:
|
|
2028
|
-
import brain_alpha_inspector
|
|
2029
|
-
except ImportError as e:
|
|
2030
|
-
print(f"Warning: Could not import brain_alpha_inspector: {e}")
|
|
2031
|
-
brain_alpha_inspector = None
|
|
2032
|
-
|
|
2033
|
-
@app.route('/alpha_inspector')
|
|
2034
|
-
def alpha_inspector_page():
|
|
2035
|
-
return render_template('alpha_inspector.html')
|
|
2036
|
-
|
|
2037
|
-
@app.route('/api/yuanfen/login', methods=['POST'])
|
|
2038
|
-
def yuanfen_login():
|
|
2039
|
-
if not brain_alpha_inspector:
|
|
2040
|
-
return jsonify({'success': False, 'message': 'Module not loaded'})
|
|
2041
|
-
|
|
2042
|
-
data = request.json
|
|
2043
|
-
username = data.get('username')
|
|
2044
|
-
password = data.get('password')
|
|
2045
|
-
|
|
2046
|
-
try:
|
|
2047
|
-
session = brain_alpha_inspector.brain_login(username, password)
|
|
2048
|
-
session_id = str(uuid.uuid4())
|
|
2049
|
-
brain_sessions[session_id] = session
|
|
2050
|
-
return jsonify({'success': True, 'session_id': session_id})
|
|
2051
|
-
except Exception as e:
|
|
2052
|
-
return jsonify({'success': False, 'message': str(e)})
|
|
2053
|
-
|
|
2054
|
-
@app.route('/api/yuanfen/fetch_alphas', methods=['POST'])
|
|
2055
|
-
def yuanfen_fetch_alphas():
|
|
2056
|
-
if not brain_alpha_inspector:
|
|
2057
|
-
return jsonify({'success': False, 'message': 'Module not loaded'})
|
|
2058
|
-
|
|
2059
|
-
data = request.json
|
|
2060
|
-
session_id = data.get('session_id')
|
|
2061
|
-
mode = data.get('mode', 'date_range')
|
|
2062
|
-
|
|
2063
|
-
session = brain_sessions.get(session_id)
|
|
2064
|
-
if not session:
|
|
2065
|
-
return jsonify({'success': False, 'message': 'Invalid session'})
|
|
2066
|
-
|
|
2067
|
-
def generate():
|
|
2068
|
-
try:
|
|
2069
|
-
alphas = []
|
|
2070
|
-
if mode == 'ids':
|
|
2071
|
-
alpha_ids_str = data.get('alpha_ids', '')
|
|
2072
|
-
import re
|
|
2073
|
-
alpha_ids = [x.strip() for x in re.split(r'[,\s\n]+', alpha_ids_str) if x.strip()]
|
|
2074
|
-
yield json.dumps({"type": "progress", "message": f"Fetching {len(alpha_ids)} alphas by ID..."}) + "\n"
|
|
2075
|
-
alphas = brain_alpha_inspector.fetch_alphas_by_ids(session, alpha_ids)
|
|
2076
|
-
else:
|
|
2077
|
-
start_date = data.get('start_date')
|
|
2078
|
-
end_date = data.get('end_date')
|
|
2079
|
-
yield json.dumps({"type": "progress", "message": f"Fetching alphas from {start_date} to {end_date}..."}) + "\n"
|
|
2080
|
-
alphas = brain_alpha_inspector.fetch_alphas_by_date_range(session, start_date, end_date)
|
|
2081
|
-
yield json.dumps({"type": "progress", "message": f"Found {len(alphas)} alphas. Fetching operators..."}) + "\n"
|
|
2082
|
-
|
|
2083
|
-
# 2. Fetch Operators (needed for parsing)
|
|
2084
|
-
operators = brain_alpha_inspector.fetch_operators(session)
|
|
2085
|
-
|
|
2086
|
-
# 2.5 Fetch Simulation Options (for validation)
|
|
2087
|
-
simulation_options = None
|
|
2088
|
-
if brain_alpha_inspector.get_instrument_type_region_delay:
|
|
2089
|
-
yield json.dumps({"type": "progress", "message": "Fetching simulation options..."}) + "\n"
|
|
2090
|
-
try:
|
|
2091
|
-
simulation_options = brain_alpha_inspector.get_instrument_type_region_delay(session)
|
|
2092
|
-
except Exception as e:
|
|
2093
|
-
print(f"Error fetching simulation options: {e}")
|
|
2094
|
-
|
|
2095
|
-
yield json.dumps({"type": "progress", "message": f"Analyzing {len(alphas)} alphas..."}) + "\n"
|
|
2096
|
-
|
|
2097
|
-
# 3. Analyze each alpha
|
|
2098
|
-
analyzed_alphas = []
|
|
2099
|
-
for i, alpha in enumerate(alphas):
|
|
2100
|
-
alpha_id = alpha.get('id', 'Unknown')
|
|
2101
|
-
yield json.dumps({"type": "progress", "message": f"Processing alpha {i+1}/{len(alphas)}: {alpha_id}"}) + "\n"
|
|
2102
|
-
|
|
2103
|
-
result = brain_alpha_inspector.get_alpha_variants(session, alpha, operators, simulation_options)
|
|
2104
|
-
if result['valid'] and result['variants']:
|
|
2105
|
-
analyzed_alphas.append(result)
|
|
2106
|
-
|
|
2107
|
-
yield json.dumps({"type": "result", "success": True, "alphas": analyzed_alphas}) + "\n"
|
|
2108
|
-
|
|
2109
|
-
except Exception as e:
|
|
2110
|
-
print(f"Error in fetch_alphas: {e}")
|
|
2111
|
-
yield json.dumps({"type": "error", "message": str(e)}) + "\n"
|
|
2112
|
-
|
|
2113
|
-
return Response(stream_with_context(generate()), mimetype='application/x-ndjson')
|
|
2114
|
-
|
|
2115
|
-
@app.route('/api/yuanfen/simulate', methods=['POST'])
|
|
2116
|
-
def yuanfen_simulate():
|
|
2117
|
-
if not brain_alpha_inspector:
|
|
2118
|
-
return jsonify({'success': False, 'message': 'Module not loaded'})
|
|
2119
|
-
|
|
2120
|
-
data = request.json
|
|
2121
|
-
session_id = data.get('session_id')
|
|
2122
|
-
# alpha_id = data.get('alpha_id') # Not strictly needed if we have full payload
|
|
2123
|
-
payload = data.get('payload') # The full simulation payload
|
|
2124
|
-
|
|
2125
|
-
session = brain_sessions.get(session_id)
|
|
2126
|
-
if not session:
|
|
2127
|
-
return jsonify({'success': False, 'message': 'Invalid session'})
|
|
2128
|
-
|
|
2129
|
-
try:
|
|
2130
|
-
success, result_or_msg = brain_alpha_inspector.run_simulation_payload(session, payload)
|
|
2131
|
-
|
|
2132
|
-
if success:
|
|
2133
|
-
return jsonify({'success': True, 'result': result_or_msg})
|
|
2134
|
-
else:
|
|
2135
|
-
return jsonify({'success': False, 'message': result_or_msg})
|
|
2136
|
-
|
|
2137
|
-
except Exception as e:
|
|
2138
|
-
return jsonify({'success': False, 'message': str(e)})
|
|
2139
|
-
|
|
2140
|
-
def process_options_dataframe(df):
|
|
2141
|
-
"""
|
|
2142
|
-
Transforms the options DataFrame into a nested dictionary:
|
|
2143
|
-
{
|
|
2144
|
-
|
|
2145
|
-
"EQUITY": {
|
|
2146
|
-
"USA": {
|
|
2147
|
-
"delays": [0, 1],
|
|
2148
|
-
"universes": ["TOP3000", ...],
|
|
2149
|
-
"neutralizations": ["MARKET", "INDUSTRY", ...]
|
|
2150
|
-
},
|
|
2151
|
-
"TWN": { ... }
|
|
2152
|
-
}
|
|
2153
|
-
}
|
|
2154
|
-
"""
|
|
2155
|
-
result = {}
|
|
2156
|
-
if df is None or df.empty:
|
|
2157
|
-
return result
|
|
2158
|
-
|
|
2159
|
-
for _, row in df.iterrows():
|
|
2160
|
-
inst = row.get('InstrumentType', 'EQUITY')
|
|
2161
|
-
region = row.get('Region')
|
|
2162
|
-
|
|
2163
|
-
if inst not in result: result[inst] = {}
|
|
2164
|
-
if region not in result[inst]:
|
|
2165
|
-
result[inst][region] = {
|
|
2166
|
-
"delays": [],
|
|
2167
|
-
"universes": [],
|
|
2168
|
-
"neutralizations": []
|
|
2169
|
-
}
|
|
2170
|
-
|
|
2171
|
-
# Aggregate unique values
|
|
2172
|
-
delay = row.get('Delay')
|
|
2173
|
-
if delay is not None and delay not in result[inst][region]['delays']:
|
|
2174
|
-
result[inst][region]['delays'].append(delay)
|
|
2175
|
-
|
|
2176
|
-
universes = row.get('Universe')
|
|
2177
|
-
if isinstance(universes, list):
|
|
2178
|
-
for u in universes:
|
|
2179
|
-
if u not in result[inst][region]['universes']:
|
|
2180
|
-
result[inst][region]['universes'].append(u)
|
|
2181
|
-
elif isinstance(universes, str):
|
|
2182
|
-
if universes not in result[inst][region]['universes']:
|
|
2183
|
-
result[inst][region]['universes'].append(universes)
|
|
2184
|
-
|
|
2185
|
-
neutralizations = row.get('Neutralization')
|
|
2186
|
-
if isinstance(neutralizations, list):
|
|
2187
|
-
for n in neutralizations:
|
|
2188
|
-
if n not in result[inst][region]['neutralizations']:
|
|
2189
|
-
result[inst][region]['neutralizations'].append(n)
|
|
2190
|
-
elif isinstance(neutralizations, str):
|
|
2191
|
-
if neutralizations not in result[inst][region]['neutralizations']:
|
|
2192
|
-
result[inst][region]['neutralizations'].append(neutralizations)
|
|
2193
|
-
|
|
2194
|
-
return result
|
|
2195
|
-
|
|
2196
|
-
def get_valid_simulation_options(session):
|
|
2197
|
-
"""Fetch valid simulation options from BRAIN."""
|
|
2198
|
-
try:
|
|
2199
|
-
if get_instrument_type_region_delay:
|
|
2200
|
-
print("Fetching simulation options using ace_lib...")
|
|
2201
|
-
df = get_instrument_type_region_delay(session)
|
|
2202
|
-
return process_options_dataframe(df)
|
|
2203
|
-
else:
|
|
2204
|
-
print("ace_lib not available, skipping options fetch")
|
|
2205
|
-
return {}
|
|
2206
|
-
except Exception as e:
|
|
2207
|
-
print(f"Error fetching options: {e}")
|
|
2208
|
-
return {}
|
|
2209
|
-
|
|
2210
|
-
# --- Inspiration Master Routes ---
|
|
2211
|
-
|
|
2212
|
-
def get_active_session():
|
|
2213
|
-
"""Helper to get active session from header or SingleSession"""
|
|
2214
|
-
# Check header first
|
|
2215
|
-
session_id = request.headers.get('Session-ID')
|
|
2216
|
-
if session_id and session_id in brain_sessions:
|
|
2217
|
-
return brain_sessions[session_id]['session']
|
|
2218
|
-
|
|
2219
|
-
# Fallback to SingleSession
|
|
2220
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2221
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
2222
|
-
if transformer_dir not in sys.path:
|
|
2223
|
-
sys.path.append(transformer_dir)
|
|
2224
|
-
from ace_lib import SingleSession
|
|
2225
|
-
s = SingleSession()
|
|
2226
|
-
if hasattr(s, 'auth') and s.auth:
|
|
2227
|
-
return s
|
|
2228
|
-
return None
|
|
2229
|
-
|
|
2230
|
-
@app.route('/api/check_login', methods=['GET'])
|
|
2231
|
-
def check_login():
|
|
2232
|
-
try:
|
|
2233
|
-
s = get_active_session()
|
|
2234
|
-
if s:
|
|
2235
|
-
return jsonify({'logged_in': True})
|
|
2236
|
-
else:
|
|
2237
|
-
return jsonify({'logged_in': False})
|
|
2238
|
-
except Exception as e:
|
|
2239
|
-
print(f"Check login error: {e}")
|
|
2240
|
-
return jsonify({'logged_in': False})
|
|
2241
|
-
|
|
2242
|
-
@app.route('/api/inspiration/options', methods=['GET'])
|
|
2243
|
-
def inspiration_options():
|
|
2244
|
-
try:
|
|
2245
|
-
# Use the same path logic as the main login
|
|
2246
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2247
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
2248
|
-
if transformer_dir not in sys.path:
|
|
2249
|
-
sys.path.append(transformer_dir)
|
|
2250
|
-
|
|
2251
|
-
from ace_lib import get_instrument_type_region_delay
|
|
2252
|
-
|
|
2253
|
-
s = get_active_session()
|
|
2254
|
-
if not s:
|
|
2255
|
-
return jsonify({'error': 'Not logged in'}), 401
|
|
2256
|
-
|
|
2257
|
-
df = get_instrument_type_region_delay(s)
|
|
2258
|
-
|
|
2259
|
-
result = {}
|
|
2260
|
-
for _, row in df.iterrows():
|
|
2261
|
-
inst = row['InstrumentType']
|
|
2262
|
-
region = row['Region']
|
|
2263
|
-
delay = row['Delay']
|
|
2264
|
-
univs = row['Universe']
|
|
2265
|
-
|
|
2266
|
-
if inst not in result: result[inst] = {}
|
|
2267
|
-
if region not in result[inst]:
|
|
2268
|
-
result[inst][region] = {"delays": [], "universes": []}
|
|
2269
|
-
|
|
2270
|
-
if delay not in result[inst][region]['delays']:
|
|
2271
|
-
result[inst][region]['delays'].append(delay)
|
|
2272
|
-
|
|
2273
|
-
if isinstance(univs, list):
|
|
2274
|
-
for u in univs:
|
|
2275
|
-
if u not in result[inst][region]['universes']:
|
|
2276
|
-
result[inst][region]['universes'].append(u)
|
|
2277
|
-
else:
|
|
2278
|
-
if univs not in result[inst][region]['universes']:
|
|
2279
|
-
result[inst][region]['universes'].append(univs)
|
|
2280
|
-
|
|
2281
|
-
return jsonify(result)
|
|
2282
|
-
except Exception as e:
|
|
2283
|
-
return jsonify({'error': str(e)}), 500
|
|
2284
|
-
|
|
2285
|
-
@app.route('/api/inspiration/datasets', methods=['POST'])
|
|
2286
|
-
def inspiration_datasets():
|
|
2287
|
-
data = request.json
|
|
2288
|
-
region = data.get('region')
|
|
2289
|
-
delay = data.get('delay')
|
|
2290
|
-
universe = data.get('universe')
|
|
2291
|
-
search = data.get('search', '')
|
|
2292
|
-
|
|
2293
|
-
try:
|
|
2294
|
-
# Use the same path logic as the main login
|
|
2295
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2296
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
2297
|
-
if transformer_dir not in sys.path:
|
|
2298
|
-
sys.path.append(transformer_dir)
|
|
2299
|
-
|
|
2300
|
-
from ace_lib import get_datasets
|
|
2301
|
-
|
|
2302
|
-
s = get_active_session()
|
|
2303
|
-
if not s:
|
|
2304
|
-
return jsonify({'error': 'Not logged in'}), 401
|
|
2305
|
-
|
|
2306
|
-
df = get_datasets(s, region=region, delay=int(delay), universe=universe)
|
|
2307
|
-
|
|
2308
|
-
if search:
|
|
2309
|
-
search = search.lower()
|
|
2310
|
-
mask = (
|
|
2311
|
-
df['id'].str.lower().str.contains(search, na=False) |
|
|
2312
|
-
df['name'].str.lower().str.contains(search, na=False) |
|
|
2313
|
-
df['description'].str.lower().str.contains(search, na=False)
|
|
2314
|
-
)
|
|
2315
|
-
df = df[mask]
|
|
2316
|
-
|
|
2317
|
-
# Return all results instead of limiting to 50
|
|
2318
|
-
# Use to_json to handle NaN values correctly (converts to null)
|
|
2319
|
-
json_str = df.to_json(orient='records', date_format='iso')
|
|
2320
|
-
return Response(json_str, mimetype='application/json')
|
|
2321
|
-
except Exception as e:
|
|
2322
|
-
return jsonify({'error': str(e)}), 500
|
|
2323
|
-
|
|
2324
|
-
@app.route('/api/inspiration/test_llm', methods=['POST'])
|
|
2325
|
-
def inspiration_test_llm():
|
|
2326
|
-
data = request.json
|
|
2327
|
-
api_key = data.get('apiKey')
|
|
2328
|
-
base_url = data.get('baseUrl')
|
|
2329
|
-
model = data.get('model')
|
|
2330
|
-
|
|
2331
|
-
try:
|
|
2332
|
-
import openai
|
|
2333
|
-
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
|
2334
|
-
# Simple call to list models or chat completion
|
|
2335
|
-
# Using a very cheap/fast call if possible, or just listing models
|
|
2336
|
-
try:
|
|
2337
|
-
client.models.list()
|
|
2338
|
-
return jsonify({'success': True})
|
|
2339
|
-
except Exception as e:
|
|
2340
|
-
# Fallback to a simple completion if models.list is restricted
|
|
2341
|
-
try:
|
|
2342
|
-
client.chat.completions.create(
|
|
2343
|
-
model=model,
|
|
2344
|
-
messages=[{"role": "user", "content": "hi"}],
|
|
2345
|
-
max_tokens=1
|
|
2346
|
-
)
|
|
2347
|
-
return jsonify({'success': True})
|
|
2348
|
-
except Exception as e2:
|
|
2349
|
-
return jsonify({'success': False, 'error': str(e2)})
|
|
2350
|
-
|
|
2351
|
-
except Exception as e:
|
|
2352
|
-
return jsonify({'success': False, 'error': str(e)})
|
|
2353
|
-
|
|
2354
|
-
@app.route('/api/inspiration/generate', methods=['POST'])
|
|
2355
|
-
def inspiration_generate():
|
|
2356
|
-
data = request.json
|
|
2357
|
-
api_key = data.get('apiKey')
|
|
2358
|
-
base_url = data.get('baseUrl')
|
|
2359
|
-
model = data.get('model')
|
|
2360
|
-
region = data.get('region')
|
|
2361
|
-
delay = data.get('delay')
|
|
2362
|
-
universe = data.get('universe')
|
|
2363
|
-
dataset_id = data.get('datasetId')
|
|
2364
|
-
|
|
2365
|
-
try:
|
|
2366
|
-
import openai
|
|
2367
|
-
# Use the same path logic as the main login
|
|
2368
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2369
|
-
transformer_dir = os.path.join(script_dir, 'Tranformer')
|
|
2370
|
-
if transformer_dir not in sys.path:
|
|
2371
|
-
sys.path.append(transformer_dir)
|
|
2372
|
-
|
|
2373
|
-
from ace_lib import get_operators, get_datafields
|
|
2374
|
-
|
|
2375
|
-
s = get_active_session()
|
|
2376
|
-
if not s:
|
|
2377
|
-
return jsonify({'error': 'Not logged in'}), 401
|
|
2378
|
-
|
|
2379
|
-
operators_df = get_operators(s)
|
|
2380
|
-
operators_df = operators_df[operators_df['scope'] == 'REGULAR']
|
|
2381
|
-
|
|
2382
|
-
datafields_df = get_datafields(s, region=region, delay=int(delay), universe=universe, dataset_id=dataset_id, data_type="ALL")
|
|
2383
|
-
|
|
2384
|
-
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2385
|
-
prompt_path = os.path.join(script_dir, "give_me_idea", "what_is_Alpha_template.md")
|
|
2386
|
-
try:
|
|
2387
|
-
with open(prompt_path, "r", encoding="utf-8") as f:
|
|
2388
|
-
system_prompt = f.read()
|
|
2389
|
-
except:
|
|
2390
|
-
system_prompt = "You are a helpful assistant for generating Alpha templates."
|
|
2391
|
-
|
|
2392
|
-
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
|
2393
|
-
|
|
2394
|
-
max_retries = 5
|
|
2395
|
-
n_ops = len(operators_df)
|
|
2396
|
-
n_fields = len(datafields_df)
|
|
2397
|
-
|
|
2398
|
-
last_error = None
|
|
2399
|
-
|
|
2400
|
-
for attempt in range(max_retries + 1):
|
|
2401
|
-
ops_subset = operators_df.head(n_ops)
|
|
2402
|
-
fields_subset = datafields_df.head(n_fields)
|
|
2403
|
-
|
|
2404
|
-
operators_info = ops_subset[['name', 'category', 'description']].to_string()
|
|
2405
|
-
datafields_info = fields_subset[['id', 'description', 'subcategory']].to_string()
|
|
2406
|
-
|
|
2407
|
-
user_prompt = f"""
|
|
2408
|
-
Here is the information about available operators (first {n_ops} rows):
|
|
2409
|
-
{operators_info}
|
|
2410
|
-
|
|
2411
|
-
Here is the information about the dataset '{dataset_id}' (first {n_fields} rows):
|
|
2412
|
-
{datafields_info}
|
|
2413
|
-
|
|
2414
|
-
Please come up with several Alpha templates based on this information.
|
|
2415
|
-
Specify the AI answer in Chinese.
|
|
2416
|
-
"""
|
|
2417
|
-
try:
|
|
2418
|
-
completion = client.chat.completions.create(
|
|
2419
|
-
model=model,
|
|
2420
|
-
messages=[
|
|
2421
|
-
{"role": "system", "content": system_prompt},
|
|
2422
|
-
{"role": "user", "content": user_prompt}
|
|
2423
|
-
],
|
|
2424
|
-
temperature=0.3,
|
|
2425
|
-
)
|
|
2426
|
-
return jsonify({'result': completion.choices[0].message.content})
|
|
2427
|
-
|
|
2428
|
-
except Exception as e:
|
|
2429
|
-
error_msg = str(e)
|
|
2430
|
-
last_error = error_msg
|
|
2431
|
-
if "token limit" in error_msg or "context_length_exceeded" in error_msg or "400" in error_msg:
|
|
2432
|
-
n_ops = max(1, n_ops // 2)
|
|
2433
|
-
n_fields = max(1, n_fields // 2)
|
|
2434
|
-
if n_ops == 1 and n_fields == 1:
|
|
2435
|
-
break
|
|
2436
|
-
else:
|
|
2437
|
-
break
|
|
2438
|
-
|
|
2439
|
-
return jsonify({'error': f"Failed after retries. Last error: {last_error}"})
|
|
2440
|
-
|
|
2441
|
-
except Exception as e:
|
|
2442
|
-
return jsonify({'error': str(e)}), 500
|
|
2443
|
-
|
|
2444
|
-
if __name__ == '__main__':
|
|
2445
|
-
print("Starting BRAIN Expression Template Decoder Web Application...")
|
|
2446
|
-
print("Starting in safe mode: binding only to localhost (127.0.0.1)")
|
|
2447
|
-
# Allow an explicit override only via an environment variable (not recommended)
|
|
2448
|
-
bind_host = os.environ.get('BRAIN_BIND_HOST', '127.0.0.1')
|
|
2449
|
-
if bind_host not in ('127.0.0.1', 'localhost'):
|
|
2450
|
-
print(f"Refusing to bind to non-localhost address: {bind_host}")
|
|
2451
|
-
print("To override (not recommended), set environment variable BRAIN_BIND_HOST")
|
|
2452
|
-
sys.exit(1)
|
|
2453
|
-
|
|
2454
|
-
print(f"Application will run on http://{bind_host}:5000")
|
|
2455
|
-
print("BRAIN API integration included - no separate proxy needed!")
|
|
2456
|
-
app.run(debug=False, host=bind_host, port=5000)
|