cnhkmcp 2.3.2__py3-none-any.whl → 2.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +1 -1
- cnhkmcp/untracked/AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221/BRAIN_AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221Mac_Linux/321/207/320/231/320/230/321/206/320/254/320/274.zip +0 -0
- cnhkmcp/untracked/AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221//321/205/320/237/320/234/321/205/320/227/342/225/227/321/205/320/276/320/231/321/210/320/263/320/225AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221_Windows/321/207/320/231/320/230/321/206/320/254/320/274.exe +0 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242/main.py +7 -0
- cnhkmcp/untracked/AI/321/206/320/261/320/234/321/211/320/255/320/262/321/206/320/237/320/242/321/204/342/225/227/342/225/242//321/211/320/266/320/246/321/206/320/274/320/261/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +8 -0
- cnhkmcp/untracked/APP/ace_lib.py +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/ace.log +1 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-data-feature-engineering/output_report/GLB_delay1_fundamental28_ideas.md +384 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/final_expressions.json +41 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874844124598400.json +7 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874844589448700.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874845048996700.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874845510819100.json +12 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874845978315000.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874846459411100.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874846924915700.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874847399137200.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874847858960800.json +10 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874848327921300.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874848810818000.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874849327754300.json +7 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874849795807500.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874850272279500.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874850757124200.json +7 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_1_idea_1769874851224506800.json +8 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/data/fundamental28_GLB_delay1/fundamental28_GLB_delay1.csv +930 -0
- cnhkmcp/untracked/APP/trailSomeAlphas/skills/brain-feature-implementation/scripts/ace.log +1 -0
- cnhkmcp/untracked/APP//321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/231/320/243/321/205/342/225/235/320/220/321/206/320/230/320/241.py +8 -0
- cnhkmcp/untracked/arxiv_api.py +7 -0
- cnhkmcp/untracked/back_up/forum_functions.py +8 -0
- cnhkmcp/untracked/forum_functions.py +8 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/forum_functions.py +8 -0
- cnhkmcp/untracked/mcp/321/206/320/246/320/227/321/204/342/225/227/342/225/242/321/210/320/276/342/225/221/321/205/320/255/320/253/321/207/320/231/320/2302_/321/205/320/266/320/222/321/206/320/256/320/254/321/205/320/236/320/257/321/207/320/231/320/230/321/205/320/240/320/277/321/205/320/232/320/270/321/204/342/225/225/320/235/321/204/342/225/221/320/226/321/206/342/225/241/320/237/321/210/320/267/320/230/321/205/320/251/320/270/321/205/342/226/221/342/226/222/321/210/320/277/320/245/321/210/342/224/220/320/251/321/204/342/225/225/320/272/platform_functions.py +8 -1
- cnhkmcp/untracked/platform_functions.py +7 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/.gitignore +14 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/SKILL.md +76 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/ace.log +0 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/ace_lib.py +1512 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/config.json +6 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/fundamental28_GLB_1_idea_1769874845978315000.json +10 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/helpful_functions.py +180 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/__init__.py +0 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/build_alpha_list.py +86 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/fetch_sim_options.py +51 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/load_credentials.py +93 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/parse_idea_file.py +85 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/process_template.py +80 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/scripts/resolve_settings.py +94 -0
- cnhkmcp/untracked/skills/brain-inspectTemplate-create-Setting/sim_options_snapshot.json +414 -0
- cnhkmcp/untracked//321/211/320/225/320/235/321/207/342/225/234/320/276/321/205/320/231/320/235/321/210/342/224/220/320/240/321/210/320/261/320/234/321/206/320/230/320/241_/321/205/320/276/320/231/321/210/320/263/320/225/321/205/342/224/220/320/225/321/210/320/266/320/221/321/204/342/225/233/320/255/321/210/342/225/241/320/246/321/205/320/234/320/225.py +8 -0
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/METADATA +1 -1
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/RECORD +56 -22
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.3.2.dist-info → cnhkmcp-2.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
{
|
|
2
|
+
"template": "divide({value_03501a}, {value_02300q})",
|
|
3
|
+
"idea": "**Concept**: Equity Capital Structure Ratio\n- **Sample Fields Used**: `value_03501a`, `value_02300q`\n- **Definition**: Common equity as a proportion of total assets (Equity/Assets ratio)\n- **Why This Feature**: Measures financial leverage and capital structure conservatism; higher equity indicates lower leverage risk\n- **Logical Meaning**: Ownership cushion against asset value declines; inverse of leverage ratio\n- **is filling nan necessary**: we have some operators to fill nan value like ts_backfill() or group_mean() etc. however, in some cases, if the nan value itself has some meaning, then we should not fill it blindly since it may introduce some bias. so before filling nan value, we should think about whether the nan value has some meaning in the specific scenario. Annual equity vs quarterly assets creates frequency mismatch; do not interpolate annual data to quarterly.\n- **Directionality**: Higher values indicate less leveraged, more conservative capital structure (typically lower risk)\n- **Boundary Conditions**: Values near 1 indicate no debt; near 0 indicate highly leveraged or negative equity situations",
|
|
4
|
+
"expression_list": [
|
|
5
|
+
"divide(fnd28_bdea_value_03501a, fnd28_bsassetq_value_02300q)",
|
|
6
|
+
"divide(fnd28_bdea_value_03501a, fnd28_nddq1_value_02300q)",
|
|
7
|
+
"divide(fnd28_fsa1_value_03501a, fnd28_bsassetq_value_02300q)",
|
|
8
|
+
"divide(fnd28_fsa1_value_03501a, fnd28_nddq1_value_02300q)"
|
|
9
|
+
]
|
|
10
|
+
}
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from typing import Union
|
|
4
|
+
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from pandas.io.formats.style import Styler
|
|
7
|
+
|
|
8
|
+
brain_api_url = os.environ.get("BRAIN_API_URL", "https://api.worldquantbrain.com")
|
|
9
|
+
brain_url = os.environ.get("BRAIN_URL", "https://platform.worldquantbrain.com")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def make_clickable_alpha_id(alpha_id: str) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Create a clickable HTML link for an alpha ID.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
alpha_id (str): The ID of the alpha.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
str: An HTML string containing a clickable link to the alpha's page on the platform.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
url = brain_url + "/alpha/"
|
|
24
|
+
return f'<a href="{url}{alpha_id}">{alpha_id}</a>'
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def prettify_result(
|
|
28
|
+
result: list, detailed_tests_view: bool = False, clickable_alpha_id: bool = False
|
|
29
|
+
) -> Union[pd.DataFrame, Styler]:
|
|
30
|
+
"""
|
|
31
|
+
Combine and format simulation results into a single DataFrame for analysis.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
result (list): A list of dictionaries containing simulation results.
|
|
35
|
+
detailed_tests_view (bool, optional): If True, include detailed test results. Defaults to False.
|
|
36
|
+
clickable_alpha_id (bool, optional): If True, make alpha IDs clickable. Defaults to False.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
pandas.DataFrame or pandas.io.formats.style.Styler: A DataFrame containing formatted results,
|
|
40
|
+
optionally with clickable alpha IDs.
|
|
41
|
+
"""
|
|
42
|
+
list_of_is_stats = [result[x]["is_stats"] for x in range(len(result)) if result[x]["is_stats"] is not None]
|
|
43
|
+
is_stats_df = pd.concat(list_of_is_stats).reset_index(drop=True)
|
|
44
|
+
is_stats_df = is_stats_df.sort_values("fitness", ascending=False)
|
|
45
|
+
|
|
46
|
+
expressions = {
|
|
47
|
+
result[x]["alpha_id"]: (
|
|
48
|
+
{
|
|
49
|
+
"selection": result[x]["simulate_data"]["selection"],
|
|
50
|
+
"combo": result[x]["simulate_data"]["combo"],
|
|
51
|
+
}
|
|
52
|
+
if result[x]["simulate_data"]["type"] == "SUPER"
|
|
53
|
+
else result[x]["simulate_data"]["regular"]
|
|
54
|
+
)
|
|
55
|
+
for x in range(len(result))
|
|
56
|
+
if result[x]["is_stats"] is not None
|
|
57
|
+
}
|
|
58
|
+
expression_df = pd.DataFrame(list(expressions.items()), columns=["alpha_id", "expression"])
|
|
59
|
+
|
|
60
|
+
list_of_is_tests = [result[x]["is_tests"] for x in range(len(result)) if result[x]["is_tests"] is not None]
|
|
61
|
+
is_tests_df = pd.concat(list_of_is_tests, sort=True).reset_index(drop=True)
|
|
62
|
+
is_tests_df = is_tests_df[is_tests_df["result"] != "WARNING"]
|
|
63
|
+
if detailed_tests_view:
|
|
64
|
+
cols = ["limit", "result", "value"]
|
|
65
|
+
is_tests_df["details"] = is_tests_df[cols].to_dict(orient="records")
|
|
66
|
+
is_tests_df = is_tests_df.pivot(index="alpha_id", columns="name", values="details").reset_index()
|
|
67
|
+
else:
|
|
68
|
+
is_tests_df = is_tests_df.pivot(index="alpha_id", columns="name", values="result").reset_index()
|
|
69
|
+
|
|
70
|
+
alpha_stats = pd.merge(is_stats_df, expression_df, on="alpha_id")
|
|
71
|
+
alpha_stats = pd.merge(alpha_stats, is_tests_df, on="alpha_id")
|
|
72
|
+
alpha_stats = alpha_stats.drop(columns=alpha_stats.columns[(alpha_stats == "PENDING").any()])
|
|
73
|
+
alpha_stats.columns = alpha_stats.columns.str.replace("(?<=[a-z])(?=[A-Z])", "_", regex=True).str.lower()
|
|
74
|
+
if clickable_alpha_id:
|
|
75
|
+
return alpha_stats.style.format({"alpha_id": lambda x: make_clickable_alpha_id(str(x))})
|
|
76
|
+
return alpha_stats
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def concat_pnl(result: list) -> pd.DataFrame:
|
|
80
|
+
"""
|
|
81
|
+
Combine PnL results from multiple alphas into a single DataFrame.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
result (list): A list of dictionaries containing simulation results with PnL data.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
pandas.DataFrame: A DataFrame containing combined PnL data for all alphas.
|
|
88
|
+
"""
|
|
89
|
+
list_of_pnls = [result[x]["pnl"] for x in range(len(result)) if result[x]["pnl"] is not None]
|
|
90
|
+
pnls_df = pd.concat(list_of_pnls).reset_index()
|
|
91
|
+
|
|
92
|
+
return pnls_df
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def concat_is_tests(result: list) -> pd.DataFrame:
|
|
96
|
+
"""
|
|
97
|
+
Combine in-sample test results from multiple alphas into a single DataFrame.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
result (list): A list of dictionaries containing simulation results with in-sample test data.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
pandas.DataFrame: A DataFrame containing combined in-sample test results for all alphas.
|
|
104
|
+
"""
|
|
105
|
+
is_tests_list = [result[x]["is_tests"] for x in range(len(result)) if result[x]["is_tests"] is not None]
|
|
106
|
+
is_tests_df = pd.concat(is_tests_list, sort=True).reset_index(drop=True)
|
|
107
|
+
return is_tests_df
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def save_simulation_result(result: dict) -> None:
|
|
111
|
+
"""
|
|
112
|
+
Save the simulation result to a JSON file in the 'simulation_results' folder.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
result (dict): A dictionary containing the simulation result for an alpha.
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
alpha_id = result["id"]
|
|
119
|
+
region = result["settings"]["region"]
|
|
120
|
+
folder_path = "simulation_results/"
|
|
121
|
+
file_path = os.path.join(folder_path, f"{alpha_id}_{region}")
|
|
122
|
+
|
|
123
|
+
os.makedirs(folder_path, exist_ok=True)
|
|
124
|
+
|
|
125
|
+
with open(file_path, "w", encoding="utf-8") as file:
|
|
126
|
+
json.dump(result, file)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def save_pnl(pnl_df: pd.DataFrame, alpha_id: str, region: str) -> None:
|
|
130
|
+
"""
|
|
131
|
+
Save the PnL data for an alpha to a CSV file in the 'alphas_pnl' folder.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
pnl_df (pandas.DataFrame): The DataFrame containing PnL data.
|
|
135
|
+
alpha_id (str): The ID of the alpha.
|
|
136
|
+
region (str): The region for which the PnL data was generated.
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
folder_path = "alphas_pnl/"
|
|
140
|
+
file_path = os.path.join(folder_path, f"{alpha_id}_{region}.csv")
|
|
141
|
+
os.makedirs(folder_path, exist_ok=True)
|
|
142
|
+
|
|
143
|
+
pnl_df.to_csv(file_path)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def save_yearly_stats(yearly_stats: pd.DataFrame, alpha_id: str, region: str):
|
|
147
|
+
"""
|
|
148
|
+
Save the yearly statistics for an alpha to a CSV file in the 'yearly_stats' folder.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
yearly_stats (pandas.DataFrame): The DataFrame containing yearly statistics.
|
|
152
|
+
alpha_id (str): The ID of the alpha.
|
|
153
|
+
region (str): The region for which the statistics were generated.
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
folder_path = "yearly_stats/"
|
|
157
|
+
file_path = os.path.join(folder_path, f"{alpha_id}_{region}.csv")
|
|
158
|
+
os.makedirs(folder_path, exist_ok=True)
|
|
159
|
+
|
|
160
|
+
yearly_stats.to_csv(file_path, index=False)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def expand_dict_columns(data: pd.DataFrame) -> pd.DataFrame:
|
|
164
|
+
"""
|
|
165
|
+
Expand dictionary columns in a DataFrame into separate columns.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
data (pandas.DataFrame): The input DataFrame with dictionary columns.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
pandas.DataFrame: A new DataFrame with expanded columns.
|
|
172
|
+
"""
|
|
173
|
+
dict_columns = list(filter(lambda x: isinstance(data[x].iloc[0], dict), data.columns))
|
|
174
|
+
new_columns = pd.concat(
|
|
175
|
+
[data[col].apply(pd.Series).rename(columns=lambda x: f"{col}_{x}") for col in dict_columns],
|
|
176
|
+
axis=1,
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
data = pd.concat([data, new_columns], axis=1)
|
|
180
|
+
return data
|
|
File without changes
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
SKILL_DIR = Path(__file__).resolve().parents[1]
|
|
10
|
+
if str(SKILL_DIR) not in sys.path:
|
|
11
|
+
sys.path.insert(0, str(SKILL_DIR))
|
|
12
|
+
|
|
13
|
+
import ace_lib
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _load_json(path: Path) -> dict[str, Any]:
|
|
17
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def main() -> None:
|
|
21
|
+
ap = argparse.ArgumentParser()
|
|
22
|
+
ap.add_argument("--idea", required=True, help="idea_context.json")
|
|
23
|
+
ap.add_argument("--settings_json", required=True, help="JSON string of settings config")
|
|
24
|
+
ap.add_argument("--out", required=True, help="alpha_list.json (will append if exists)")
|
|
25
|
+
args = ap.parse_args()
|
|
26
|
+
|
|
27
|
+
idea_ctx = _load_json(Path(args.idea).resolve())
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
settings_doc = json.loads(args.settings_json)
|
|
31
|
+
except json.JSONDecodeError as e:
|
|
32
|
+
raise ValueError(f"Invalid JSON string provided for --settings_json: {e}")
|
|
33
|
+
|
|
34
|
+
# Support wrapped key "resolved" or flat dict
|
|
35
|
+
resolved_raw = settings_doc.get("resolved", settings_doc)
|
|
36
|
+
if not isinstance(resolved_raw, dict):
|
|
37
|
+
raise ValueError("Settings file must contain settings dict or 'resolved' key")
|
|
38
|
+
|
|
39
|
+
# Robustness: Normalize keys to lowercase to handle PascalCase (Region vs region)
|
|
40
|
+
# and camelCase (nanHandling vs nanhandling) inconsistencies.
|
|
41
|
+
resolved = {k.lower(): v for k, v in resolved_raw.items()}
|
|
42
|
+
|
|
43
|
+
out_path = Path(args.out).resolve()
|
|
44
|
+
existing_alphas = []
|
|
45
|
+
if out_path.exists():
|
|
46
|
+
try:
|
|
47
|
+
existing_alphas = _load_json(out_path)
|
|
48
|
+
if not isinstance(existing_alphas, list):
|
|
49
|
+
print(f"Warning: Existing file {out_path} is not a list. Overwriting.")
|
|
50
|
+
existing_alphas = []
|
|
51
|
+
except Exception as e:
|
|
52
|
+
print(f"Warning: Could not read existing {out_path}: {e}. Overwriting.")
|
|
53
|
+
existing_alphas = []
|
|
54
|
+
|
|
55
|
+
expressions = idea_ctx.get("expression_list") or []
|
|
56
|
+
if not isinstance(expressions, list) or not all(isinstance(x, str) for x in expressions):
|
|
57
|
+
raise ValueError("idea_context.json must contain expression_list: list[str]")
|
|
58
|
+
|
|
59
|
+
new_alphas = [
|
|
60
|
+
ace_lib.generate_alpha(
|
|
61
|
+
regular=expr,
|
|
62
|
+
alpha_type="REGULAR",
|
|
63
|
+
region=resolved["region"],
|
|
64
|
+
universe=resolved["universe"],
|
|
65
|
+
delay=int(resolved["delay"]),
|
|
66
|
+
decay=int(resolved.get("decay", 0)),
|
|
67
|
+
neutralization=resolved["neutralization"],
|
|
68
|
+
truncation=float(resolved.get("truncation", 0.08)),
|
|
69
|
+
pasteurization=resolved.get("pasteurization", "ON"),
|
|
70
|
+
test_period=resolved.get("testperiod", "P0Y0M0D"),
|
|
71
|
+
unit_handling=resolved.get("unithandling", "VERIFY"),
|
|
72
|
+
nan_handling=resolved.get("nanhandling", "OFF"),
|
|
73
|
+
max_trade=resolved.get("maxtrade", "OFF"),
|
|
74
|
+
visualization=bool(resolved.get("visualization", False)),
|
|
75
|
+
)
|
|
76
|
+
for expr in expressions
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
final_list = existing_alphas + new_alphas
|
|
80
|
+
|
|
81
|
+
out_path.write_text(json.dumps(final_list, ensure_ascii=False, indent=2), encoding="utf-8")
|
|
82
|
+
print(f"Wrote {len(new_alphas)} new alphas. Total: {len(final_list)} alphas in {out_path}")
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
if __name__ == "__main__":
|
|
86
|
+
main()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
SKILL_DIR = Path(__file__).resolve().parents[1]
|
|
9
|
+
if str(SKILL_DIR) not in sys.path:
|
|
10
|
+
sys.path.insert(0, str(SKILL_DIR))
|
|
11
|
+
|
|
12
|
+
import ace_lib
|
|
13
|
+
from scripts.load_credentials import load_credentials
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def main() -> None:
|
|
17
|
+
ap = argparse.ArgumentParser()
|
|
18
|
+
ap.add_argument(
|
|
19
|
+
"--out",
|
|
20
|
+
required=True,
|
|
21
|
+
help="Output snapshot JSON file (e.g., sim_options_snapshot.json)",
|
|
22
|
+
)
|
|
23
|
+
args = ap.parse_args()
|
|
24
|
+
|
|
25
|
+
skill_dir = Path(__file__).resolve().parents[1]
|
|
26
|
+
creds = load_credentials(skill_dir=skill_dir)
|
|
27
|
+
|
|
28
|
+
# Ensure ace_lib uses configured API base for normal requests.
|
|
29
|
+
# NOTE: get_instrument_type_region_delay historically hard-coded the URL; we patch ace_lib to respect brain_api_url.
|
|
30
|
+
ace_lib.brain_api_url = creds.brain_api_url
|
|
31
|
+
|
|
32
|
+
def _get_credentials():
|
|
33
|
+
return creds.username, creds.password
|
|
34
|
+
|
|
35
|
+
ace_lib.get_credentials = _get_credentials # type: ignore[assignment]
|
|
36
|
+
|
|
37
|
+
s = ace_lib.start_session()
|
|
38
|
+
df = ace_lib.get_instrument_type_region_delay(s)
|
|
39
|
+
|
|
40
|
+
payload = {
|
|
41
|
+
"brain_api_url": creds.brain_api_url,
|
|
42
|
+
"rows": df.to_dict(orient="records"),
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
out_path = Path(args.out).resolve()
|
|
46
|
+
out_path.write_text(json.dumps(payload, ensure_ascii=False, indent=2), encoding="utf-8")
|
|
47
|
+
print(f"Wrote simulation options snapshot: {out_path} ({len(df)} rows)")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
if __name__ == "__main__":
|
|
51
|
+
main()
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass(frozen=True)
|
|
11
|
+
class BrainCredentials:
|
|
12
|
+
username: str
|
|
13
|
+
password: str
|
|
14
|
+
brain_api_url: str = "https://api.worldquantbrain.com"
|
|
15
|
+
brain_url: str = "https://platform.worldquantbrain.com"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _read_json_file(path: Path) -> dict:
|
|
19
|
+
try:
|
|
20
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
21
|
+
except FileNotFoundError:
|
|
22
|
+
return {}
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def load_credentials(
|
|
26
|
+
*,
|
|
27
|
+
skill_dir: Path,
|
|
28
|
+
config_filename: str = "config.json",
|
|
29
|
+
allow_env: bool = True,
|
|
30
|
+
allow_home_secrets: bool = True,
|
|
31
|
+
) -> BrainCredentials:
|
|
32
|
+
"""Load credentials without printing secrets.
|
|
33
|
+
|
|
34
|
+
Priority:
|
|
35
|
+
1) env vars (if allow_env)
|
|
36
|
+
2) skill-local config.json
|
|
37
|
+
3) ~/secrets/platform-brain.json (if allow_home_secrets)
|
|
38
|
+
|
|
39
|
+
Supported env vars:
|
|
40
|
+
- BRAIN_USERNAME or BRAIN_EMAIL
|
|
41
|
+
- BRAIN_PASSWORD
|
|
42
|
+
- BRAIN_API_URL (optional)
|
|
43
|
+
- BRAIN_URL (optional)
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
env_username = ""
|
|
47
|
+
env_password = ""
|
|
48
|
+
brain_api_url = os.environ.get("BRAIN_API_URL", "https://api.worldquantbrain.com")
|
|
49
|
+
brain_url = os.environ.get("BRAIN_URL", "https://platform.worldquantbrain.com")
|
|
50
|
+
|
|
51
|
+
if allow_env:
|
|
52
|
+
env_username = os.environ.get("BRAIN_USERNAME", os.environ.get("BRAIN_EMAIL", ""))
|
|
53
|
+
env_password = os.environ.get("BRAIN_PASSWORD", "")
|
|
54
|
+
|
|
55
|
+
if env_username and env_password:
|
|
56
|
+
return BrainCredentials(
|
|
57
|
+
username=env_username,
|
|
58
|
+
password=env_password,
|
|
59
|
+
brain_api_url=brain_api_url,
|
|
60
|
+
brain_url=brain_url,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
cfg_path = skill_dir / config_filename
|
|
64
|
+
cfg = _read_json_file(cfg_path)
|
|
65
|
+
|
|
66
|
+
username = (cfg.get("username") or cfg.get("email") or "").strip()
|
|
67
|
+
password = (cfg.get("password") or "").strip()
|
|
68
|
+
brain_api_url = (cfg.get("BRAIN_API_URL") or brain_api_url).strip() or brain_api_url
|
|
69
|
+
brain_url = (cfg.get("BRAIN_URL") or brain_url).strip() or brain_url
|
|
70
|
+
|
|
71
|
+
if username and password:
|
|
72
|
+
return BrainCredentials(username=username, password=password, brain_api_url=brain_api_url, brain_url=brain_url)
|
|
73
|
+
|
|
74
|
+
if allow_home_secrets:
|
|
75
|
+
home_secret_path = Path.home() / "secrets" / "platform-brain.json"
|
|
76
|
+
secret = _read_json_file(home_secret_path)
|
|
77
|
+
username = (secret.get("email") or secret.get("username") or "").strip()
|
|
78
|
+
password = (secret.get("password") or "").strip()
|
|
79
|
+
if username and password:
|
|
80
|
+
return BrainCredentials(
|
|
81
|
+
username=username,
|
|
82
|
+
password=password,
|
|
83
|
+
brain_api_url=brain_api_url,
|
|
84
|
+
brain_url=brain_url,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
raise RuntimeError(
|
|
88
|
+
"Missing BRAIN credentials. Provide either: "
|
|
89
|
+
"(1) env vars BRAIN_USERNAME/BRAIN_EMAIL + BRAIN_PASSWORD, or "
|
|
90
|
+
f"(2) {cfg_path} with username/password, or "
|
|
91
|
+
"(3) ~/secrets/platform-brain.json with email/password. "
|
|
92
|
+
"See config.example.json for the expected schema."
|
|
93
|
+
)
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import json
|
|
5
|
+
import re
|
|
6
|
+
import sys
|
|
7
|
+
from dataclasses import dataclass, asdict
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
SKILL_DIR = Path(__file__).resolve().parents[1]
|
|
12
|
+
if str(SKILL_DIR) not in sys.path:
|
|
13
|
+
sys.path.insert(0, str(SKILL_DIR))
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass(frozen=True)
|
|
17
|
+
class IdeaContext:
|
|
18
|
+
input_file: str
|
|
19
|
+
dataset_id: str
|
|
20
|
+
region: str
|
|
21
|
+
delay: int
|
|
22
|
+
template: str
|
|
23
|
+
idea: str
|
|
24
|
+
expression_list: list[str]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
_FILENAME_RE = re.compile(
|
|
28
|
+
r"^(?P<dataset>[^_]+)_(?P<region>[A-Za-z]+)_(?P<delay>[01])_idea_\d+\.json$"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def parse_filename_metadata(path: Path) -> tuple[str, str, int]:
|
|
33
|
+
m = _FILENAME_RE.match(path.name)
|
|
34
|
+
if not m:
|
|
35
|
+
raise ValueError(
|
|
36
|
+
"Unsupported filename format. Expected like: fundamental28_GLB_1_idea_<ts>.json; "
|
|
37
|
+
f"got: {path.name}"
|
|
38
|
+
)
|
|
39
|
+
dataset = m.group("dataset")
|
|
40
|
+
region = m.group("region")
|
|
41
|
+
delay = int(m.group("delay"))
|
|
42
|
+
return dataset, region, delay
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def load_idea_json(path: Path) -> dict[str, Any]:
|
|
46
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def build_context(path: Path) -> IdeaContext:
|
|
50
|
+
dataset, region, delay = parse_filename_metadata(path)
|
|
51
|
+
data = load_idea_json(path)
|
|
52
|
+
|
|
53
|
+
template = str(data.get("template") or "")
|
|
54
|
+
idea = str(data.get("idea") or "")
|
|
55
|
+
expression_list = data.get("expression_list") or []
|
|
56
|
+
if not isinstance(expression_list, list) or not all(isinstance(x, str) for x in expression_list):
|
|
57
|
+
raise ValueError("expression_list must be a list of strings")
|
|
58
|
+
|
|
59
|
+
return IdeaContext(
|
|
60
|
+
input_file=str(path),
|
|
61
|
+
dataset_id=dataset,
|
|
62
|
+
region=region,
|
|
63
|
+
delay=delay,
|
|
64
|
+
template=template,
|
|
65
|
+
idea=idea,
|
|
66
|
+
expression_list=expression_list,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def main() -> None:
|
|
71
|
+
ap = argparse.ArgumentParser()
|
|
72
|
+
ap.add_argument("--input", required=True, help="Path to idea JSON")
|
|
73
|
+
ap.add_argument("--out", required=True, help="Output idea_context.json")
|
|
74
|
+
args = ap.parse_args()
|
|
75
|
+
|
|
76
|
+
input_path = Path(args.input).resolve()
|
|
77
|
+
out_path = Path(args.out).resolve()
|
|
78
|
+
|
|
79
|
+
ctx = build_context(input_path)
|
|
80
|
+
out_path.write_text(json.dumps(asdict(ctx), ensure_ascii=False, indent=2), encoding="utf-8")
|
|
81
|
+
print(f"Wrote idea context: {out_path}")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
if __name__ == "__main__":
|
|
85
|
+
main()
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import argparse
|
|
4
|
+
import shutil
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
SKILL_DIR = Path(__file__).resolve().parents[1]
|
|
10
|
+
SCRIPTS_DIR = SKILL_DIR / "scripts"
|
|
11
|
+
PYTHON_EXE = sys.executable
|
|
12
|
+
|
|
13
|
+
def run_step(script_name: str, args: list[str]) -> None:
|
|
14
|
+
"""Run a script from the scripts directory with the given arguments."""
|
|
15
|
+
script_path = SCRIPTS_DIR / script_name
|
|
16
|
+
cmd = [PYTHON_EXE, str(script_path)] + args
|
|
17
|
+
print(f"Running: {script_name} {' '.join(args)}")
|
|
18
|
+
try:
|
|
19
|
+
subprocess.run(cmd, check=True, cwd=SKILL_DIR)
|
|
20
|
+
except subprocess.CalledProcessError as e:
|
|
21
|
+
print(f"Error running {script_name}: {e}")
|
|
22
|
+
sys.exit(1)
|
|
23
|
+
|
|
24
|
+
def main() -> None:
|
|
25
|
+
parser = argparse.ArgumentParser(description="Process a single template file and generate artifacts in a dedicated folder.")
|
|
26
|
+
parser.add_argument("--file", required=True, help="Path to the input idea JSON file.")
|
|
27
|
+
parser.add_argument("--force-fetch-options", action="store_true", help="Force fetching new simulation options even if snapshot exists.")
|
|
28
|
+
args = parser.parse_args()
|
|
29
|
+
|
|
30
|
+
input_path = Path(args.file).resolve()
|
|
31
|
+
if not input_path.exists():
|
|
32
|
+
print(f"Error: Input file not found: {input_path}")
|
|
33
|
+
sys.exit(1)
|
|
34
|
+
|
|
35
|
+
# 1. Create output directory
|
|
36
|
+
output_dir = SKILL_DIR / "processed_templates" / input_path.stem
|
|
37
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
38
|
+
print(f"Output directory: {output_dir}")
|
|
39
|
+
|
|
40
|
+
# 2. Parse idea file
|
|
41
|
+
idea_context_path = output_dir / "idea_context.json"
|
|
42
|
+
run_step("parse_idea_file.py", ["--input", str(input_path), "--out", str(idea_context_path)])
|
|
43
|
+
|
|
44
|
+
# 3. Handle Simulation Options
|
|
45
|
+
# Strategy: Use shared snapshot in skill root if available to save time/network, unless forced.
|
|
46
|
+
global_options_path = SKILL_DIR / "sim_options_snapshot.json"
|
|
47
|
+
|
|
48
|
+
if args.force_fetch_options or not global_options_path.exists():
|
|
49
|
+
print("Fetching simulation options (network required)...")
|
|
50
|
+
run_step("fetch_sim_options.py", ["--out", str(global_options_path)])
|
|
51
|
+
|
|
52
|
+
# Optional: Copy snapshot to output dir for full reproducibility?
|
|
53
|
+
# Let's verify if the user wants strictly isolated execution.
|
|
54
|
+
# For now, we pass the global path to resolve_settings.
|
|
55
|
+
|
|
56
|
+
# 4. Resolve Candidates (Not final choice)
|
|
57
|
+
candidates_path = output_dir / "settings_candidates.json"
|
|
58
|
+
run_step("resolve_settings.py", [
|
|
59
|
+
"--idea", str(idea_context_path),
|
|
60
|
+
"--options", str(global_options_path),
|
|
61
|
+
"--out", str(candidates_path)
|
|
62
|
+
])
|
|
63
|
+
|
|
64
|
+
print("\n-------------------------------------------------------------")
|
|
65
|
+
print("STEP 1 COMPLETE: Candidates Generated")
|
|
66
|
+
print(f"Candidates file: {candidates_path}")
|
|
67
|
+
print("-------------------------------------------------------------")
|
|
68
|
+
print("ACTION REQUIRED FOR AI/AGENT:")
|
|
69
|
+
print("1. Read 'idea_context.json' (for intent) and 'settings_candidates.json' (for valid options).")
|
|
70
|
+
print("2. Decide on ONE OR MORE combinations of settings (Universe, Neutralization, Decay, NaN).")
|
|
71
|
+
print("3. For EACH chosen setting combination, run 'build_alpha_list.py' directly with JSON string:")
|
|
72
|
+
print(f" python scripts/build_alpha_list.py --idea {idea_context_path} --out {output_dir}/alpha_list.json --settings_json '{{...json...}}'")
|
|
73
|
+
print(" (The script will APPEND new alphas to alpha_list.json)")
|
|
74
|
+
print("-------------------------------------------------------------")
|
|
75
|
+
|
|
76
|
+
# Stop here to let AI decide.
|
|
77
|
+
sys.exit(0)
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
main()
|