cnhkmcp 2.2.0__py3-none-any.whl → 2.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +1 -1
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/README.md +1 -1
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/config.json +2 -2
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +1 -1
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/vector_db/chroma.sqlite3 +0 -0
- cnhkmcp/untracked/APP/Tranformer/Transformer.py +2 -2
- cnhkmcp/untracked/APP/Tranformer/transformer_config.json +1 -1
- cnhkmcp/untracked/APP/blueprints/feature_engineering.py +2 -2
- cnhkmcp/untracked/APP/blueprints/inspiration_house.py +4 -4
- cnhkmcp/untracked/APP/blueprints/paper_analysis.py +3 -3
- cnhkmcp/untracked/APP/give_me_idea/BRAIN_Alpha_Template_Expert_SystemPrompt.md +34 -73
- cnhkmcp/untracked/APP/give_me_idea/alpha_data_specific_template_master.py +2 -2
- cnhkmcp/untracked/APP/give_me_idea/what_is_Alpha_template.md +366 -1
- cnhkmcp/untracked/APP/simulator/wqb20260130130030.log +210 -0
- cnhkmcp/untracked/APP/simulator/wqb20260130131757.log +104 -0
- cnhkmcp/untracked/APP/simulator/wqb20260130172245.log +70 -0
- cnhkmcp/untracked/APP/static/inspiration.js +350 -14
- cnhkmcp/untracked/APP/templates/index.html +18 -3
- cnhkmcp/untracked/APP/templates/transformer_web.html +1 -1
- cnhkmcp/untracked/APP/trailSomeAlphas/README.md +38 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/ace.log +66 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/enhance_template.py +588 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/requirements.txt +3 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/run_pipeline.py +1051 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/run_pipeline_step_by_step.ipynb +5258 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/OUTPUT_TEMPLATE.md +325 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/SKILL.md +503 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/examples.md +244 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/output_report/ASI_delay1_analyst11_ideas.md +285 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/output_report/GLB_delay1_fundamental72_ideas.md +362 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/reference.md +399 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/SKILL.md +40 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/config.json +6 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709385783386000.json +388 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709386274840400.json +131 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709386838244700.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709387369198500.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709387908905800.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709388486243600.json +240 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709389024058600.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709389549608700.json +41 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709390068714000.json +110 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709390591996900.json +36 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709391129137100.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709391691643500.json +41 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709392192099200.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709392703423500.json +46 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769709393213729400.json +246 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710186683932500.json +388 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710187165414300.json +131 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710187665211700.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710188149193400.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710188667627400.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710189220822000.json +240 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710189726189500.json +1926 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710190248066100.json +41 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710190768298700.json +110 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710191282588100.json +36 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710191838960900.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710192396688000.json +41 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710192941922400.json +31 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710193473524600.json +46 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710194001961200.json +246 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710420975888800.json +46 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710421647590100.json +196 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710422131378500.json +5 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710422644184400.json +196 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710423702350600.json +196 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_1_idea_1769710424244661800.json +5 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/analyst11_ASI_delay1.csv +211 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/analyst11_ASI_delay1/final_expressions.json +7062 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/final_expressions.json +138 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759441444909600.json +38 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759441920092000.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759442418767100.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759442902507600.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759443377036200.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759443845377000.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759444313546700.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759444784598600.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759445274311200.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759445747421700.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759446222137800.json +22 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759446686222600.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759447154698500.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759447629677000.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759448102331200.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_1_idea_1769759448573382000.json +14 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental72_GLB_delay1/fundamental72_GLB_delay1.csv +330 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/ace.log +3 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/ace_lib.py +1514 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/fetch_dataset.py +119 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/helpful_functions.py +180 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/implement_idea.py +236 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/merge_expression_list.py +90 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/parsetab.py +60 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/template_final_enhance/op/321/206/320/220/342/225/227/321/207/342/225/227/320/243.md +434 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/template_final_enhance/sample_prompt.md +62 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/template_final_enhance//321/205/320/235/320/245/321/205/320/253/320/260/321/205/320/275/320/240/321/206/320/220/320/255/321/210/320/220/320/223/321/211/320/220/342/225/227/321/210/342/225/233/320/241/321/211/320/243/342/225/233.md +354 -0
- cnhkmcp/untracked/APP/usage.md +2 -2
- cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +400 -9
- cnhkmcp/untracked/back_up/platform_functions.py +2 -2
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +2 -2
- cnhkmcp/untracked/platform_functions.py +2 -2
- cnhkmcp/untracked/skills/alpha-expression-verifier/scripts/validator.py +889 -0
- cnhkmcp/untracked/skills/brain-feature-implementation/scripts/implement_idea.py +4 -3
- cnhkmcp/untracked/skills/brain-improve-alpha-performance/arXiv_API_Tool_Manual.md +490 -0
- cnhkmcp/untracked/skills/brain-improve-alpha-performance/reference.md +1 -1
- cnhkmcp/untracked/skills/brain-improve-alpha-performance/scripts/arxiv_api.py +229 -0
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +35 -11
- cnhkmcp/vector_db/_manifest.json +1 -0
- cnhkmcp/vector_db/_meta.json +1 -0
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/METADATA +1 -1
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/RECORD +121 -33
- /cnhkmcp/untracked/{skills/expression_verifier → APP/trailSomeAlphas/skills/brain-feature-implementation}/scripts/validator.py +0 -0
- /cnhkmcp/untracked/skills/{expression_verifier → alpha-expression-verifier}/SKILL.md +0 -0
- /cnhkmcp/untracked/skills/{expression_verifier → alpha-expression-verifier}/scripts/verify_expr.py +0 -0
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.2.0.dist-info → cnhkmcp-2.3.1.dist-info}/top_level.txt +0 -0
|
@@ -115,12 +115,15 @@ if not check_and_install_dependencies():
|
|
|
115
115
|
|
|
116
116
|
# Now import the packages
|
|
117
117
|
try:
|
|
118
|
-
from flask import Flask, render_template, request, jsonify, session as flask_session, Response, stream_with_context, send_from_directory
|
|
118
|
+
from flask import Flask, render_template, request, jsonify, session as flask_session, Response, stream_with_context, send_from_directory, send_file, after_this_request
|
|
119
|
+
from werkzeug.utils import secure_filename
|
|
119
120
|
from flask_cors import CORS
|
|
120
121
|
import requests
|
|
121
122
|
import json
|
|
122
123
|
import time
|
|
123
124
|
import os
|
|
125
|
+
import zipfile
|
|
126
|
+
import tempfile
|
|
124
127
|
import threading
|
|
125
128
|
import queue
|
|
126
129
|
import uuid
|
|
@@ -660,6 +663,7 @@ def authenticate():
|
|
|
660
663
|
brain_sessions[session_id] = {
|
|
661
664
|
'session': brain_session,
|
|
662
665
|
'username': username,
|
|
666
|
+
'password': password,
|
|
663
667
|
'timestamp': time.time(),
|
|
664
668
|
'options': valid_options
|
|
665
669
|
}
|
|
@@ -748,6 +752,7 @@ def complete_biometric():
|
|
|
748
752
|
brain_sessions[new_session_id] = {
|
|
749
753
|
'session': brain_session,
|
|
750
754
|
'username': session_info['username'],
|
|
755
|
+
'password': session_info.get('password'),
|
|
751
756
|
'timestamp': time.time()
|
|
752
757
|
}
|
|
753
758
|
|
|
@@ -1754,6 +1759,12 @@ def get_usage_doc():
|
|
|
1754
1759
|
# Global task manager for Transformer Web
|
|
1755
1760
|
transformer_tasks = {}
|
|
1756
1761
|
|
|
1762
|
+
# Global task manager for Inspiration direct pipeline
|
|
1763
|
+
inspiration_pipeline_tasks = {}
|
|
1764
|
+
|
|
1765
|
+
# Global task manager for template enhancement
|
|
1766
|
+
inspiration_enhance_tasks = {}
|
|
1767
|
+
|
|
1757
1768
|
@app.route('/transformer-web')
|
|
1758
1769
|
def transformer_web():
|
|
1759
1770
|
return render_template('transformer_web.html')
|
|
@@ -2365,6 +2376,7 @@ def inspiration_generate():
|
|
|
2365
2376
|
delay = data.get('delay')
|
|
2366
2377
|
universe = data.get('universe')
|
|
2367
2378
|
dataset_id = data.get('datasetId')
|
|
2379
|
+
data_type = data.get('dataType') or 'MATRIX'
|
|
2368
2380
|
|
|
2369
2381
|
try:
|
|
2370
2382
|
import openai
|
|
@@ -2380,11 +2392,24 @@ def inspiration_generate():
|
|
|
2380
2392
|
if not s:
|
|
2381
2393
|
return jsonify({'error': 'Not logged in'}), 401
|
|
2382
2394
|
|
|
2395
|
+
if data_type not in ("MATRIX", "VECTOR"):
|
|
2396
|
+
data_type = "MATRIX"
|
|
2397
|
+
|
|
2383
2398
|
operators_df = get_operators(s)
|
|
2384
2399
|
operators_df = operators_df[operators_df['scope'] == 'REGULAR']
|
|
2385
2400
|
|
|
2386
|
-
datafields_df = get_datafields(s, region=region, delay=int(delay), universe=universe, dataset_id=dataset_id, data_type=
|
|
2401
|
+
datafields_df = get_datafields(s, region=region, delay=int(delay), universe=universe, dataset_id=dataset_id, data_type=data_type)
|
|
2387
2402
|
|
|
2403
|
+
# count the datatype of the datafields_df, if most of them are VECTOR, then we keep the VECTOR category operators in the operators_df, otherwise we remove them
|
|
2404
|
+
datatype_counts = datafields_df['type'].value_counts().to_dict()
|
|
2405
|
+
vector_count = datatype_counts.get('VECTOR', 0)
|
|
2406
|
+
total_fields = sum(datatype_counts.values())
|
|
2407
|
+
if total_fields > 0 and vector_count > (total_fields / 2):
|
|
2408
|
+
# keep VECTOR operators
|
|
2409
|
+
pass
|
|
2410
|
+
print("Keeping VECTOR operators because majority of datafields are VECTOR type")
|
|
2411
|
+
operators_df = operators_df[operators_df['category'] != 'Vector']
|
|
2412
|
+
|
|
2388
2413
|
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2389
2414
|
prompt_path = os.path.join(script_dir, "give_me_idea", "what_is_Alpha_template.md")
|
|
2390
2415
|
try:
|
|
@@ -2392,7 +2417,7 @@ def inspiration_generate():
|
|
|
2392
2417
|
system_prompt = f.read()
|
|
2393
2418
|
except:
|
|
2394
2419
|
system_prompt = "You are a helpful assistant for generating Alpha templates."
|
|
2395
|
-
|
|
2420
|
+
|
|
2396
2421
|
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
|
2397
2422
|
|
|
2398
2423
|
max_retries = 5
|
|
@@ -2405,9 +2430,35 @@ def inspiration_generate():
|
|
|
2405
2430
|
ops_subset = operators_df.head(n_ops)
|
|
2406
2431
|
fields_subset = datafields_df.head(n_fields)
|
|
2407
2432
|
|
|
2408
|
-
|
|
2409
|
-
|
|
2410
|
-
|
|
2433
|
+
# Render subsets as Markdown tables (with robust fallbacks)
|
|
2434
|
+
try:
|
|
2435
|
+
operators_info = ops_subset[['name', 'category', 'description']].to_markdown(index=False)
|
|
2436
|
+
except Exception:
|
|
2437
|
+
try:
|
|
2438
|
+
from tabulate import tabulate
|
|
2439
|
+
operators_info = tabulate(
|
|
2440
|
+
ops_subset[['name', 'category', 'description']].fillna(''),
|
|
2441
|
+
headers='keys',
|
|
2442
|
+
tablefmt='github',
|
|
2443
|
+
showindex=False
|
|
2444
|
+
)
|
|
2445
|
+
except Exception:
|
|
2446
|
+
operators_info = ops_subset[['name', 'category', 'description']].to_string(index=False)
|
|
2447
|
+
|
|
2448
|
+
try:
|
|
2449
|
+
datafields_info = fields_subset[['id', 'description', 'subcategory']].to_markdown(index=False)
|
|
2450
|
+
except Exception:
|
|
2451
|
+
try:
|
|
2452
|
+
from tabulate import tabulate
|
|
2453
|
+
datafields_info = tabulate(
|
|
2454
|
+
fields_subset[['id', 'description', 'subcategory']].fillna(''),
|
|
2455
|
+
headers='keys',
|
|
2456
|
+
tablefmt='github',
|
|
2457
|
+
showindex=False
|
|
2458
|
+
)
|
|
2459
|
+
except Exception:
|
|
2460
|
+
datafields_info = fields_subset[['id', 'description', 'subcategory']].to_string(index=False)
|
|
2461
|
+
|
|
2411
2462
|
user_prompt = f"""
|
|
2412
2463
|
Here is the information about available operators (first {n_ops} rows):
|
|
2413
2464
|
{operators_info}
|
|
@@ -2415,8 +2466,8 @@ Here is the information about available operators (first {n_ops} rows):
|
|
|
2415
2466
|
Here is the information about the dataset '{dataset_id}' (first {n_fields} rows):
|
|
2416
2467
|
{datafields_info}
|
|
2417
2468
|
|
|
2418
|
-
Please come up with
|
|
2419
|
-
|
|
2469
|
+
Please come up with as much diverse Alpha templates as you can based on above information. And do remember to make some innovation of the templates.
|
|
2470
|
+
Answer in Chinese.
|
|
2420
2471
|
"""
|
|
2421
2472
|
try:
|
|
2422
2473
|
completion = client.chat.completions.create(
|
|
@@ -2425,7 +2476,7 @@ Specify the AI answer in Chinese.
|
|
|
2425
2476
|
{"role": "system", "content": system_prompt},
|
|
2426
2477
|
{"role": "user", "content": user_prompt}
|
|
2427
2478
|
],
|
|
2428
|
-
temperature=
|
|
2479
|
+
temperature=1,
|
|
2429
2480
|
)
|
|
2430
2481
|
return jsonify({'result': completion.choices[0].message.content})
|
|
2431
2482
|
|
|
@@ -2445,6 +2496,346 @@ Specify the AI answer in Chinese.
|
|
|
2445
2496
|
except Exception as e:
|
|
2446
2497
|
return jsonify({'error': str(e)}), 500
|
|
2447
2498
|
|
|
2499
|
+
|
|
2500
|
+
def _safe_dataset_id(dataset_id: str) -> str:
|
|
2501
|
+
return "".join([c for c in str(dataset_id) if c.isalnum() or c in ("-", "_")])
|
|
2502
|
+
|
|
2503
|
+
|
|
2504
|
+
def _get_pipeline_paths(dataset_id: str, region: str, delay: int):
|
|
2505
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2506
|
+
trail_dir = os.path.join(script_dir, 'trailSomeAlphas')
|
|
2507
|
+
run_pipeline_path = os.path.join(trail_dir, 'run_pipeline.py')
|
|
2508
|
+
data_dir = os.path.join(trail_dir, 'skills', 'brain-feature-implementation', 'data')
|
|
2509
|
+
dataset_folder = f"{_safe_dataset_id(dataset_id)}_{region}_delay{delay}"
|
|
2510
|
+
output_folder = os.path.join(data_dir, dataset_folder)
|
|
2511
|
+
return run_pipeline_path, trail_dir, output_folder, dataset_folder
|
|
2512
|
+
|
|
2513
|
+
|
|
2514
|
+
@app.route('/api/inspiration/run-pipeline', methods=['POST'])
|
|
2515
|
+
def inspiration_run_pipeline():
|
|
2516
|
+
try:
|
|
2517
|
+
data = request.get_json() or {}
|
|
2518
|
+
dataset_id = data.get('datasetId')
|
|
2519
|
+
data_category = data.get('dataCategory')
|
|
2520
|
+
region = data.get('region')
|
|
2521
|
+
delay = data.get('delay')
|
|
2522
|
+
universe = data.get('universe')
|
|
2523
|
+
data_type = data.get('dataType') or 'MATRIX'
|
|
2524
|
+
api_key = data.get('apiKey')
|
|
2525
|
+
base_url = data.get('baseUrl')
|
|
2526
|
+
model = data.get('model')
|
|
2527
|
+
|
|
2528
|
+
if not dataset_id or not data_category or not region or delay is None or not universe:
|
|
2529
|
+
return jsonify({'success': False, 'error': 'Missing required parameters'}), 400
|
|
2530
|
+
|
|
2531
|
+
run_pipeline_path, trail_dir, output_folder, dataset_folder = _get_pipeline_paths(dataset_id, region, int(delay))
|
|
2532
|
+
if not os.path.exists(run_pipeline_path):
|
|
2533
|
+
return jsonify({'success': False, 'error': f'run_pipeline.py not found: {run_pipeline_path}'}), 404
|
|
2534
|
+
|
|
2535
|
+
task_id = str(uuid.uuid4())
|
|
2536
|
+
log_queue = queue.Queue()
|
|
2537
|
+
inspiration_pipeline_tasks[task_id] = {
|
|
2538
|
+
'queue': log_queue,
|
|
2539
|
+
'status': 'running',
|
|
2540
|
+
'output_folder': output_folder,
|
|
2541
|
+
'dataset_folder': dataset_folder
|
|
2542
|
+
}
|
|
2543
|
+
|
|
2544
|
+
session_id = request.headers.get('Session-ID') or flask_session.get('brain_session_id')
|
|
2545
|
+
session_info = brain_sessions.get(session_id) if session_id else None
|
|
2546
|
+
|
|
2547
|
+
def run_process():
|
|
2548
|
+
try:
|
|
2549
|
+
if data_type not in ("MATRIX", "VECTOR"):
|
|
2550
|
+
dt = "MATRIX"
|
|
2551
|
+
else:
|
|
2552
|
+
dt = str(data_type)
|
|
2553
|
+
|
|
2554
|
+
cmd = [
|
|
2555
|
+
sys.executable,
|
|
2556
|
+
run_pipeline_path,
|
|
2557
|
+
'--data-category', str(data_category),
|
|
2558
|
+
'--region', str(region),
|
|
2559
|
+
'--delay', str(delay),
|
|
2560
|
+
'--dataset-id', str(dataset_id),
|
|
2561
|
+
'--universe', str(universe),
|
|
2562
|
+
'--data-type', dt,
|
|
2563
|
+
]
|
|
2564
|
+
|
|
2565
|
+
if api_key:
|
|
2566
|
+
cmd.extend(['--moonshot-api-key', str(api_key)])
|
|
2567
|
+
if model:
|
|
2568
|
+
cmd.extend(['--moonshot-model', str(model)])
|
|
2569
|
+
|
|
2570
|
+
env = os.environ.copy()
|
|
2571
|
+
if api_key:
|
|
2572
|
+
env['MOONSHOT_API_KEY'] = str(api_key)
|
|
2573
|
+
if base_url:
|
|
2574
|
+
env['MOONSHOT_BASE_URL'] = str(base_url)
|
|
2575
|
+
if model:
|
|
2576
|
+
env['MOONSHOT_MODEL'] = str(model)
|
|
2577
|
+
if session_info and session_info.get('username') and session_info.get('password'):
|
|
2578
|
+
env['BRAIN_USERNAME'] = session_info['username']
|
|
2579
|
+
env['BRAIN_PASSWORD'] = session_info['password']
|
|
2580
|
+
|
|
2581
|
+
proc = subprocess.Popen(
|
|
2582
|
+
cmd,
|
|
2583
|
+
cwd=trail_dir,
|
|
2584
|
+
stdout=subprocess.PIPE,
|
|
2585
|
+
stderr=subprocess.STDOUT,
|
|
2586
|
+
text=True,
|
|
2587
|
+
encoding='utf-8',
|
|
2588
|
+
errors='replace',
|
|
2589
|
+
bufsize=1,
|
|
2590
|
+
env=env
|
|
2591
|
+
)
|
|
2592
|
+
|
|
2593
|
+
if proc.stdout:
|
|
2594
|
+
for line in proc.stdout:
|
|
2595
|
+
log_queue.put(line.rstrip('\n'))
|
|
2596
|
+
|
|
2597
|
+
exit_code = proc.wait()
|
|
2598
|
+
success = exit_code == 0
|
|
2599
|
+
inspiration_pipeline_tasks[task_id]['status'] = 'completed' if success else 'failed'
|
|
2600
|
+
log_queue.put({
|
|
2601
|
+
'__event__': 'done',
|
|
2602
|
+
'success': success,
|
|
2603
|
+
'exit_code': exit_code,
|
|
2604
|
+
'dataset_folder': dataset_folder
|
|
2605
|
+
})
|
|
2606
|
+
except Exception as e:
|
|
2607
|
+
inspiration_pipeline_tasks[task_id]['status'] = 'failed'
|
|
2608
|
+
log_queue.put({
|
|
2609
|
+
'__event__': 'done',
|
|
2610
|
+
'success': False,
|
|
2611
|
+
'error': str(e),
|
|
2612
|
+
'dataset_folder': dataset_folder
|
|
2613
|
+
})
|
|
2614
|
+
|
|
2615
|
+
thread = threading.Thread(target=run_process)
|
|
2616
|
+
thread.daemon = True
|
|
2617
|
+
thread.start()
|
|
2618
|
+
|
|
2619
|
+
return jsonify({'success': True, 'taskId': task_id})
|
|
2620
|
+
|
|
2621
|
+
except Exception as e:
|
|
2622
|
+
return jsonify({'success': False, 'error': str(e)}), 500
|
|
2623
|
+
|
|
2624
|
+
|
|
2625
|
+
@app.route('/api/inspiration/stream-pipeline/<task_id>')
|
|
2626
|
+
def inspiration_stream_pipeline(task_id):
|
|
2627
|
+
task = inspiration_pipeline_tasks.get(task_id)
|
|
2628
|
+
if not task:
|
|
2629
|
+
return jsonify({'success': False, 'error': 'Task not found'}), 404
|
|
2630
|
+
|
|
2631
|
+
def generate():
|
|
2632
|
+
q = task['queue']
|
|
2633
|
+
while True:
|
|
2634
|
+
item = q.get()
|
|
2635
|
+
if isinstance(item, dict) and item.get('__event__') == 'done':
|
|
2636
|
+
yield f"event: done\ndata: {json.dumps(item, ensure_ascii=False)}\n\n"
|
|
2637
|
+
break
|
|
2638
|
+
|
|
2639
|
+
payload = {'line': item}
|
|
2640
|
+
yield f"data: {json.dumps(payload, ensure_ascii=False)}\n\n"
|
|
2641
|
+
|
|
2642
|
+
return Response(stream_with_context(generate()), mimetype='text/event-stream')
|
|
2643
|
+
|
|
2644
|
+
|
|
2645
|
+
@app.route('/api/inspiration/download-pipeline/<task_id>')
|
|
2646
|
+
def inspiration_download_pipeline(task_id):
|
|
2647
|
+
task = inspiration_pipeline_tasks.get(task_id)
|
|
2648
|
+
if not task:
|
|
2649
|
+
return jsonify({'success': False, 'error': 'Task not found'}), 404
|
|
2650
|
+
|
|
2651
|
+
output_folder = task.get('output_folder')
|
|
2652
|
+
if not output_folder or not os.path.isdir(output_folder):
|
|
2653
|
+
return jsonify({'success': False, 'error': 'Output folder not found'}), 404
|
|
2654
|
+
|
|
2655
|
+
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.zip')
|
|
2656
|
+
temp.close()
|
|
2657
|
+
|
|
2658
|
+
with zipfile.ZipFile(temp.name, 'w', zipfile.ZIP_DEFLATED) as zf:
|
|
2659
|
+
base_name = os.path.basename(output_folder.rstrip(os.sep))
|
|
2660
|
+
for root, _, files in os.walk(output_folder):
|
|
2661
|
+
for filename in files:
|
|
2662
|
+
abs_path = os.path.join(root, filename)
|
|
2663
|
+
rel_path = os.path.relpath(abs_path, output_folder)
|
|
2664
|
+
arcname = os.path.join(base_name, rel_path)
|
|
2665
|
+
zf.write(abs_path, arcname=arcname)
|
|
2666
|
+
|
|
2667
|
+
@after_this_request
|
|
2668
|
+
def _cleanup_zip(response):
|
|
2669
|
+
try:
|
|
2670
|
+
os.remove(temp.name)
|
|
2671
|
+
except Exception:
|
|
2672
|
+
pass
|
|
2673
|
+
return response
|
|
2674
|
+
|
|
2675
|
+
download_name = f"{os.path.basename(output_folder)}.zip"
|
|
2676
|
+
return send_file(temp.name, as_attachment=True, download_name=download_name)
|
|
2677
|
+
|
|
2678
|
+
|
|
2679
|
+
@app.route('/api/inspiration/enhance-template', methods=['POST'])
|
|
2680
|
+
def inspiration_enhance_template():
|
|
2681
|
+
try:
|
|
2682
|
+
idea_files = request.files.getlist('ideaFiles')
|
|
2683
|
+
api_key = request.form.get('apiKey')
|
|
2684
|
+
base_url = request.form.get('baseUrl')
|
|
2685
|
+
model = request.form.get('model')
|
|
2686
|
+
|
|
2687
|
+
if not idea_files or not api_key:
|
|
2688
|
+
return jsonify({'success': False, 'error': 'Missing ideaFiles or apiKey'}), 400
|
|
2689
|
+
|
|
2690
|
+
script_dir = os.path.dirname(os.path.abspath(__file__))
|
|
2691
|
+
trail_dir = os.path.join(script_dir, 'trailSomeAlphas')
|
|
2692
|
+
enhance_script = os.path.join(trail_dir, 'enhance_template.py')
|
|
2693
|
+
if not os.path.exists(enhance_script):
|
|
2694
|
+
return jsonify({'success': False, 'error': f'enhance_template.py not found: {enhance_script}'}), 404
|
|
2695
|
+
|
|
2696
|
+
task_id = str(uuid.uuid4())
|
|
2697
|
+
log_queue = queue.Queue()
|
|
2698
|
+
task_root = tempfile.mkdtemp(prefix='enhance_batch_')
|
|
2699
|
+
|
|
2700
|
+
saved_files = []
|
|
2701
|
+
for idx, idea_file in enumerate(idea_files, start=1):
|
|
2702
|
+
name = secure_filename(idea_file.filename or f'idea_{idx}.json')
|
|
2703
|
+
file_dir = os.path.join(task_root, f"{idx:02d}_{os.path.splitext(name)[0]}")
|
|
2704
|
+
os.makedirs(file_dir, exist_ok=True)
|
|
2705
|
+
idea_path = os.path.join(file_dir, name)
|
|
2706
|
+
idea_file.save(idea_path)
|
|
2707
|
+
saved_files.append((name, idea_path))
|
|
2708
|
+
|
|
2709
|
+
inspiration_enhance_tasks[task_id] = {
|
|
2710
|
+
'queue': log_queue,
|
|
2711
|
+
'status': 'running',
|
|
2712
|
+
'task_root': task_root,
|
|
2713
|
+
'saved_files': saved_files
|
|
2714
|
+
}
|
|
2715
|
+
|
|
2716
|
+
def run_process():
|
|
2717
|
+
try:
|
|
2718
|
+
total = len(saved_files)
|
|
2719
|
+
completed_ok = True
|
|
2720
|
+
|
|
2721
|
+
for idx, (name, idea_path) in enumerate(saved_files, start=1):
|
|
2722
|
+
env = os.environ.copy()
|
|
2723
|
+
env['IDEA_JSON'] = idea_path
|
|
2724
|
+
env['MOONSHOT_API_KEY'] = api_key
|
|
2725
|
+
if base_url:
|
|
2726
|
+
env['MOONSHOT_BASE_URL'] = base_url
|
|
2727
|
+
if model:
|
|
2728
|
+
env['MOONSHOT_MODEL'] = model
|
|
2729
|
+
env['PYTHONIOENCODING'] = 'utf-8'
|
|
2730
|
+
|
|
2731
|
+
log_queue.put(f"=== 开始处理: {name} ({idx}/{total}) ===")
|
|
2732
|
+
proc = subprocess.Popen(
|
|
2733
|
+
[sys.executable, enhance_script],
|
|
2734
|
+
cwd=trail_dir,
|
|
2735
|
+
stdout=subprocess.PIPE,
|
|
2736
|
+
stderr=subprocess.STDOUT,
|
|
2737
|
+
text=True,
|
|
2738
|
+
encoding='utf-8',
|
|
2739
|
+
errors='replace',
|
|
2740
|
+
bufsize=1,
|
|
2741
|
+
env=env
|
|
2742
|
+
)
|
|
2743
|
+
|
|
2744
|
+
if proc.stdout:
|
|
2745
|
+
for line in proc.stdout:
|
|
2746
|
+
log_queue.put({'line': line.rstrip('\n'), 'file': name})
|
|
2747
|
+
|
|
2748
|
+
exit_code = proc.wait()
|
|
2749
|
+
success = exit_code == 0
|
|
2750
|
+
if not success:
|
|
2751
|
+
completed_ok = False
|
|
2752
|
+
log_queue.put({'__event__': 'file_done', 'type': 'file_done', 'file': name, 'success': success})
|
|
2753
|
+
|
|
2754
|
+
inspiration_enhance_tasks[task_id]['status'] = 'completed' if completed_ok else 'failed'
|
|
2755
|
+
log_queue.put({
|
|
2756
|
+
'__event__': 'done',
|
|
2757
|
+
'success': completed_ok,
|
|
2758
|
+
'total': total
|
|
2759
|
+
})
|
|
2760
|
+
except Exception as e:
|
|
2761
|
+
inspiration_enhance_tasks[task_id]['status'] = 'failed'
|
|
2762
|
+
log_queue.put({
|
|
2763
|
+
'__event__': 'done',
|
|
2764
|
+
'success': False,
|
|
2765
|
+
'error': str(e)
|
|
2766
|
+
})
|
|
2767
|
+
|
|
2768
|
+
thread = threading.Thread(target=run_process)
|
|
2769
|
+
thread.daemon = True
|
|
2770
|
+
thread.start()
|
|
2771
|
+
|
|
2772
|
+
return jsonify({'success': True, 'taskId': task_id})
|
|
2773
|
+
|
|
2774
|
+
except Exception as e:
|
|
2775
|
+
return jsonify({'success': False, 'error': str(e)}), 500
|
|
2776
|
+
|
|
2777
|
+
|
|
2778
|
+
@app.route('/api/inspiration/stream-enhance/<task_id>')
|
|
2779
|
+
def inspiration_stream_enhance(task_id):
|
|
2780
|
+
task = inspiration_enhance_tasks.get(task_id)
|
|
2781
|
+
if not task:
|
|
2782
|
+
return jsonify({'success': False, 'error': 'Task not found'}), 404
|
|
2783
|
+
|
|
2784
|
+
def generate():
|
|
2785
|
+
q = task['queue']
|
|
2786
|
+
while True:
|
|
2787
|
+
item = q.get()
|
|
2788
|
+
if isinstance(item, dict) and item.get('__event__') == 'done':
|
|
2789
|
+
yield f"event: done\ndata: {json.dumps(item, ensure_ascii=False)}\n\n"
|
|
2790
|
+
break
|
|
2791
|
+
|
|
2792
|
+
if isinstance(item, dict) and item.get('__event__') == 'file_done':
|
|
2793
|
+
payload = {'type': 'file_done', 'file': item.get('file'), 'success': item.get('success')}
|
|
2794
|
+
yield f"data: {json.dumps(payload, ensure_ascii=False)}\n\n"
|
|
2795
|
+
continue
|
|
2796
|
+
|
|
2797
|
+
if isinstance(item, dict):
|
|
2798
|
+
payload = item
|
|
2799
|
+
else:
|
|
2800
|
+
payload = {'line': item}
|
|
2801
|
+
yield f"data: {json.dumps(payload, ensure_ascii=False)}\n\n"
|
|
2802
|
+
|
|
2803
|
+
return Response(stream_with_context(generate()), mimetype='text/event-stream')
|
|
2804
|
+
|
|
2805
|
+
|
|
2806
|
+
@app.route('/api/inspiration/download-enhance/<task_id>')
|
|
2807
|
+
def inspiration_download_enhance(task_id):
|
|
2808
|
+
task = inspiration_enhance_tasks.get(task_id)
|
|
2809
|
+
if not task:
|
|
2810
|
+
return jsonify({'success': False, 'error': 'Task not found'}), 404
|
|
2811
|
+
|
|
2812
|
+
task_root = task.get('task_root')
|
|
2813
|
+
if not task_root or not os.path.isdir(task_root):
|
|
2814
|
+
return jsonify({'success': False, 'error': 'Task output not found'}), 404
|
|
2815
|
+
|
|
2816
|
+
temp = tempfile.NamedTemporaryFile(delete=False, suffix='.zip')
|
|
2817
|
+
temp.close()
|
|
2818
|
+
|
|
2819
|
+
with zipfile.ZipFile(temp.name, 'w', zipfile.ZIP_DEFLATED) as zf:
|
|
2820
|
+
base_name = os.path.basename(task_root.rstrip(os.sep))
|
|
2821
|
+
for root, _, files in os.walk(task_root):
|
|
2822
|
+
for filename in files:
|
|
2823
|
+
abs_path = os.path.join(root, filename)
|
|
2824
|
+
rel_path = os.path.relpath(abs_path, task_root)
|
|
2825
|
+
arcname = os.path.join(base_name, rel_path)
|
|
2826
|
+
zf.write(abs_path, arcname=arcname)
|
|
2827
|
+
|
|
2828
|
+
@after_this_request
|
|
2829
|
+
def _cleanup_zip(response):
|
|
2830
|
+
try:
|
|
2831
|
+
os.remove(temp.name)
|
|
2832
|
+
except Exception:
|
|
2833
|
+
pass
|
|
2834
|
+
return response
|
|
2835
|
+
|
|
2836
|
+
download_name = f"{os.path.basename(task_root)}.zip"
|
|
2837
|
+
return send_file(temp.name, as_attachment=True, download_name=download_name)
|
|
2838
|
+
|
|
2448
2839
|
if __name__ == '__main__':
|
|
2449
2840
|
print("Starting BRAIN Expression Template Decoder Web Application...")
|
|
2450
2841
|
print("Starting in safe mode: binding only to localhost (127.0.0.1)")
|
|
@@ -423,7 +423,7 @@ class BrainApiClient:
|
|
|
423
423
|
return False
|
|
424
424
|
|
|
425
425
|
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
426
|
-
delay: int = 1, universe: str = "TOP3000", theme: str = "
|
|
426
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "ALL", search: Optional[str] = None) -> Dict[str, Any]:
|
|
427
427
|
"""Get available datasets."""
|
|
428
428
|
await self.ensure_authenticated()
|
|
429
429
|
|
|
@@ -1883,7 +1883,7 @@ async def get_datasets(
|
|
|
1883
1883
|
region: str = "USA",
|
|
1884
1884
|
delay: int = 1,
|
|
1885
1885
|
universe: str = "TOP3000",
|
|
1886
|
-
theme: str = "
|
|
1886
|
+
theme: str = "ALL",
|
|
1887
1887
|
search: Optional[str] = None
|
|
1888
1888
|
) -> Dict[str, Any]:
|
|
1889
1889
|
"""
|
|
@@ -357,7 +357,7 @@ class BrainApiClient:
|
|
|
357
357
|
raise
|
|
358
358
|
|
|
359
359
|
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
360
|
-
delay: int = 1, universe: str = "TOP3000", theme: str = "
|
|
360
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "ALL", search: Optional[str] = None) -> Dict[str, Any]:
|
|
361
361
|
"""Get available datasets."""
|
|
362
362
|
await self.ensure_authenticated()
|
|
363
363
|
|
|
@@ -1761,7 +1761,7 @@ async def get_datasets(
|
|
|
1761
1761
|
region: str = "USA",
|
|
1762
1762
|
delay: int = 1,
|
|
1763
1763
|
universe: str = "TOP3000",
|
|
1764
|
-
theme: str = "
|
|
1764
|
+
theme: str = "ALL",
|
|
1765
1765
|
search: Optional[str] = None,
|
|
1766
1766
|
) -> Dict[str, Any]:
|
|
1767
1767
|
"""
|
|
@@ -357,7 +357,7 @@ class BrainApiClient:
|
|
|
357
357
|
raise
|
|
358
358
|
|
|
359
359
|
async def get_datasets(self, instrument_type: str = "EQUITY", region: str = "USA",
|
|
360
|
-
delay: int = 1, universe: str = "TOP3000", theme: str = "
|
|
360
|
+
delay: int = 1, universe: str = "TOP3000", theme: str = "ALL", search: Optional[str] = None) -> Dict[str, Any]:
|
|
361
361
|
"""Get available datasets."""
|
|
362
362
|
await self.ensure_authenticated()
|
|
363
363
|
|
|
@@ -1761,7 +1761,7 @@ async def get_datasets(
|
|
|
1761
1761
|
region: str = "USA",
|
|
1762
1762
|
delay: int = 1,
|
|
1763
1763
|
universe: str = "TOP3000",
|
|
1764
|
-
theme: str = "
|
|
1764
|
+
theme: str = "ALL",
|
|
1765
1765
|
search: Optional[str] = None,
|
|
1766
1766
|
) -> Dict[str, Any]:
|
|
1767
1767
|
"""
|