cnhkmcp 2.1.6__py3-none-any.whl → 2.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnhkmcp/__init__.py +1 -1
- cnhkmcp/untracked/AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221/BRAIN_AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221Mac_Linux/321/207/320/231/320/230/321/206/320/254/320/274.zip +0 -0
- cnhkmcp/untracked/AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221//321/205/320/237/320/234/321/205/320/227/342/225/227/321/205/320/276/320/231/321/210/320/263/320/225AI/321/206/320/231/320/243/321/205/342/225/226/320/265/321/204/342/225/221/342/225/221_Windows/321/207/320/231/320/230/321/206/320/254/320/274.exe +0 -0
- cnhkmcp/untracked/skills/brain-calculate-alpha-selfcorrQuick/SKILL.md +25 -0
- cnhkmcp/untracked/skills/brain-calculate-alpha-selfcorrQuick/reference.md +59 -0
- cnhkmcp/untracked/skills/brain-calculate-alpha-selfcorrQuick/scripts/requirements.txt +4 -0
- cnhkmcp/untracked/skills/brain-calculate-alpha-selfcorrQuick/scripts/skill.py +734 -0
- cnhkmcp/untracked/skills/brain-datafield-exploration-general/SKILL.md +45 -0
- cnhkmcp/untracked/skills/brain-datafield-exploration-general/reference.md +194 -0
- cnhkmcp/untracked/skills/brain-dataset-exploration-general/SKILL.md +39 -0
- cnhkmcp/untracked/skills/brain-dataset-exploration-general/reference.md +436 -0
- cnhkmcp/untracked/skills/brain-explain-alphas/SKILL.md +39 -0
- cnhkmcp/untracked/skills/brain-explain-alphas/reference.md +56 -0
- cnhkmcp/untracked/skills/brain-how-to-pass-AlphaTest/SKILL.md +72 -0
- cnhkmcp/untracked/skills/brain-how-to-pass-AlphaTest/reference.md +202 -0
- cnhkmcp/untracked/skills/brain-improve-alpha-performance/SKILL.md +44 -0
- cnhkmcp/untracked/skills/brain-improve-alpha-performance/reference.md +101 -0
- cnhkmcp/untracked/skills/brain-nextMove-analysis/SKILL.md +37 -0
- cnhkmcp/untracked/skills/brain-nextMove-analysis/reference.md +128 -0
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/METADATA +1 -1
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/RECORD +25 -7
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/WHEEL +0 -0
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/entry_points.txt +0 -0
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/licenses/LICENSE +0 -0
- {cnhkmcp-2.1.6.dist-info → cnhkmcp-2.1.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,734 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Alpha Self and PPAC Correlation Calculator Skill
|
|
4
|
+
Calculates self-correlation and PPAC correlation for WorldQuant BRAIN alphas.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import subprocess
|
|
8
|
+
import pkg_resources
|
|
9
|
+
import sys
|
|
10
|
+
import requests
|
|
11
|
+
import pandas as pd
|
|
12
|
+
import logging
|
|
13
|
+
import time
|
|
14
|
+
import pickle
|
|
15
|
+
from collections import defaultdict
|
|
16
|
+
import numpy as np
|
|
17
|
+
from tqdm import tqdm
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
20
|
+
import json
|
|
21
|
+
import os
|
|
22
|
+
import argparse
|
|
23
|
+
from datetime import datetime
|
|
24
|
+
from typing import Optional, Tuple, Dict, List, Union
|
|
25
|
+
from requests import Response
|
|
26
|
+
|
|
27
|
+
# Default parameters
|
|
28
|
+
DEFAULT_START_DATE = "01-10"
|
|
29
|
+
DEFAULT_END_DATE = "01-11"
|
|
30
|
+
DEFAULT_SHARPE_THRESHOLD = -1.0
|
|
31
|
+
DEFAULT_FITNESS_THRESHOLD = -1.0
|
|
32
|
+
DEFAULT_REGION = "IND"
|
|
33
|
+
DEFAULT_ALPHA_NUM = 100
|
|
34
|
+
DEFAULT_MAX_WORKERS = 5
|
|
35
|
+
|
|
36
|
+
# Required packages
|
|
37
|
+
REQUIRED_PACKAGES = [
|
|
38
|
+
"requests>=2.32.0",
|
|
39
|
+
"pandas>=2.0.0",
|
|
40
|
+
"numpy>=1.24.0",
|
|
41
|
+
"tqdm>=4.65.0"
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
def check_and_install_requirements():
|
|
45
|
+
"""检查并安装必要的Python包"""
|
|
46
|
+
missing_packages = []
|
|
47
|
+
|
|
48
|
+
for package in REQUIRED_PACKAGES:
|
|
49
|
+
# 解析包名和版本要求
|
|
50
|
+
if '>=' in package:
|
|
51
|
+
pkg_name, min_version = package.split('>=')
|
|
52
|
+
else:
|
|
53
|
+
pkg_name = package
|
|
54
|
+
min_version = None
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
# 检查包是否已安装
|
|
58
|
+
installed_version = pkg_resources.get_distribution(pkg_name).version
|
|
59
|
+
if min_version:
|
|
60
|
+
# 检查版本是否满足要求
|
|
61
|
+
if pkg_resources.parse_version(installed_version) < pkg_resources.parse_version(min_version):
|
|
62
|
+
missing_packages.append(package)
|
|
63
|
+
except pkg_resources.DistributionNotFound:
|
|
64
|
+
missing_packages.append(package)
|
|
65
|
+
except Exception:
|
|
66
|
+
missing_packages.append(package)
|
|
67
|
+
|
|
68
|
+
if missing_packages:
|
|
69
|
+
print(f"发现 {len(missing_packages)} 个缺失或版本过低的包:")
|
|
70
|
+
for pkg in missing_packages:
|
|
71
|
+
print(f" - {pkg}")
|
|
72
|
+
|
|
73
|
+
# 询问用户是否安装
|
|
74
|
+
response = input("\n是否自动安装缺失的包? (y/n): ").strip().lower()
|
|
75
|
+
if response == 'y':
|
|
76
|
+
print("正在安装缺失的包...")
|
|
77
|
+
for pkg in missing_packages:
|
|
78
|
+
try:
|
|
79
|
+
print(f"安装 {pkg}...")
|
|
80
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", pkg])
|
|
81
|
+
except subprocess.CalledProcessError as e:
|
|
82
|
+
print(f"安装 {pkg} 失败: {e}")
|
|
83
|
+
return False
|
|
84
|
+
print("所有包安装完成!")
|
|
85
|
+
return True
|
|
86
|
+
else:
|
|
87
|
+
print("请手动安装缺失的包:")
|
|
88
|
+
print(f"pip install {' '.join(missing_packages)}")
|
|
89
|
+
return False
|
|
90
|
+
else:
|
|
91
|
+
# 所有包都已安装,不显示任何消息以减少token使用
|
|
92
|
+
return True
|
|
93
|
+
|
|
94
|
+
# ===== 登录函数 =====
|
|
95
|
+
def sign_in(username, password):
|
|
96
|
+
s = requests.Session()
|
|
97
|
+
s.auth = (username, password)
|
|
98
|
+
try:
|
|
99
|
+
response = s.post('https://api.worldquantbrain.com/authentication')
|
|
100
|
+
response.raise_for_status()
|
|
101
|
+
logging.info("Successfully signed in")
|
|
102
|
+
return s
|
|
103
|
+
except requests.exceptions.RequestException as e:
|
|
104
|
+
logging.error(f"Login failed: {e}")
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
# ===== 第一个算法的函数 =====
|
|
108
|
+
def get_submit_alphas(session, start_date, end_date, sharpe_th, fitness_th, region, alpha_num, tag=None):
|
|
109
|
+
"""获取可以提交的Alpha信息"""
|
|
110
|
+
output = [] # 用于存储符合条件的Alpha记录
|
|
111
|
+
count = 0 # 用于统计处理的Alpha数量
|
|
112
|
+
|
|
113
|
+
# Get current year for date filtering
|
|
114
|
+
current_year = datetime.now().year
|
|
115
|
+
|
|
116
|
+
# 分页获取数据,每次获取100条
|
|
117
|
+
for i in range(0, alpha_num, 100):
|
|
118
|
+
print(f"处理偏移量: {i}")
|
|
119
|
+
# 构造API请求URL
|
|
120
|
+
base_url = f"https://api.worldquantbrain.com/users/self/alphas?limit=100&offset={i}&status=UNSUBMITTED%1FIS_FAIL&dateCreated%3E={current_year}-{start_date}T00:00:00-05:00&dateCreated%3C={current_year}-{end_date}T00:00:00-05:00&is.fitness%3E={fitness_th}&is.sharpe%3E={sharpe_th}&settings.region={region}&order=-is.sharpe&hidden=false&type!=SUPER"
|
|
121
|
+
# 添加标签筛选条件
|
|
122
|
+
if tag:
|
|
123
|
+
base_url += f"&tags={tag}"
|
|
124
|
+
url = base_url
|
|
125
|
+
|
|
126
|
+
try:
|
|
127
|
+
response = session.get(url) # 发送GET请求
|
|
128
|
+
if response.status_code == 200: # 如果请求成功
|
|
129
|
+
alpha_list = response.json().get("results", []) # 获取返回的Alpha列表
|
|
130
|
+
for alpha in alpha_list:
|
|
131
|
+
# 提取Alpha的各项信息
|
|
132
|
+
alpha_id = alpha.get("id")
|
|
133
|
+
name = alpha.get("name")
|
|
134
|
+
dateCreated = alpha.get("dateCreated")
|
|
135
|
+
sharpe = alpha.get("is", {}).get("sharpe")
|
|
136
|
+
fitness = alpha.get("is", {}).get("fitness")
|
|
137
|
+
turnover = alpha.get("is", {}).get("turnover")
|
|
138
|
+
margin = alpha.get("is", {}).get("margin")
|
|
139
|
+
longCount = alpha.get("is", {}).get("longCount")
|
|
140
|
+
shortCount = alpha.get("is", {}).get("shortCount")
|
|
141
|
+
decay = alpha.get("settings", {}).get("decay")
|
|
142
|
+
exp = alpha.get("regular", {}).get("code")
|
|
143
|
+
|
|
144
|
+
# 新增:提取中性化设置
|
|
145
|
+
neutralization = alpha.get("settings", {}).get("neutralization", "NONE")
|
|
146
|
+
# 将中性化代码转换为可读名称
|
|
147
|
+
neutralization_map = {
|
|
148
|
+
"SUBINDUSTRY": "Subindustry",
|
|
149
|
+
"STATISTICAL": "Statistical",
|
|
150
|
+
"SLOW": "Slow Factors",
|
|
151
|
+
"SLOW_AND_FAST": "Slow + Fast Factors",
|
|
152
|
+
"SECTOR": "Sector",
|
|
153
|
+
"NONE": "None",
|
|
154
|
+
"MARKET": "Market",
|
|
155
|
+
"INDUSTRY": "Industry",
|
|
156
|
+
"FAST": "Fast Factors",
|
|
157
|
+
"CROWDING": "Crowding Factors",
|
|
158
|
+
"COUNTRY": "Country/Region"
|
|
159
|
+
}
|
|
160
|
+
neutralization_name = neutralization_map.get(neutralization, neutralization)
|
|
161
|
+
|
|
162
|
+
count += 1 # 增加处理计数
|
|
163
|
+
|
|
164
|
+
# 检查是否可以通过检查
|
|
165
|
+
checks = alpha.get("is", {}).get("checks", [])
|
|
166
|
+
checks_df = pd.DataFrame(checks)
|
|
167
|
+
check_status = "Check FAIL" # 默认检查状态为失败
|
|
168
|
+
|
|
169
|
+
# 如果存在检查项
|
|
170
|
+
if not checks_df.empty:
|
|
171
|
+
if "result" in checks_df.columns:
|
|
172
|
+
# 如果所有检查项都通过且longCount + shortCount > 100,则标记为Check OK
|
|
173
|
+
if not any(checks_df["result"].eq("FAIL")) and ((longCount or 0) + (shortCount or 0) > 100):
|
|
174
|
+
check_status = "Check OK"
|
|
175
|
+
|
|
176
|
+
# 构造记录字典
|
|
177
|
+
rec = {
|
|
178
|
+
"alpha_id": alpha_id,
|
|
179
|
+
"check_status": check_status,
|
|
180
|
+
"sharpe": sharpe,
|
|
181
|
+
"turnover": f"{turnover:.2%}" if turnover is not None else None,
|
|
182
|
+
"fitness": fitness,
|
|
183
|
+
"margin": f"{margin * 10000:.2f}‱" if margin is not None else None, # 转换为万分比显示
|
|
184
|
+
"longCount": longCount,
|
|
185
|
+
"shortCount": shortCount,
|
|
186
|
+
"dateCreated": dateCreated,
|
|
187
|
+
"decay": decay,
|
|
188
|
+
"exp": exp,
|
|
189
|
+
"neutralization": neutralization, # 添加中性化代码
|
|
190
|
+
"neutralization_name": neutralization_name # 添加中性化可读名称
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
# 只有标记为 "Check OK" 的记录才会被保存到输出列表中
|
|
194
|
+
if check_status == "Check OK":
|
|
195
|
+
output.append(rec)
|
|
196
|
+
else:
|
|
197
|
+
# 如果请求失败,打印错误信息并尝试重新登录
|
|
198
|
+
print(f"请求失败,状态码: {response.status_code}")
|
|
199
|
+
print(f"响应内容: {response.text}")
|
|
200
|
+
except Exception as e:
|
|
201
|
+
# 捕获异常并打印错误信息
|
|
202
|
+
print(f"处理偏移量 {i} 时出错: {e}")
|
|
203
|
+
|
|
204
|
+
print(f"总计处理 Alpha 数量: {count}") # 打印处理总数
|
|
205
|
+
print(f"符合条件的 Alpha 数量: {len(output)}")
|
|
206
|
+
return output
|
|
207
|
+
|
|
208
|
+
# ===== 基于夏普比率的排名函数 =====
|
|
209
|
+
def rank_alphas_by_sharpe(alpha_data):
|
|
210
|
+
"""根据夏普比率对Alpha进行排名"""
|
|
211
|
+
if not alpha_data:
|
|
212
|
+
print("没有符合条件的Alpha数据,无法进行排名")
|
|
213
|
+
return pd.DataFrame()
|
|
214
|
+
|
|
215
|
+
df = pd.DataFrame(alpha_data)
|
|
216
|
+
# 按照夏普比率降序排序
|
|
217
|
+
df = df.sort_values(by='sharpe', ascending=False)
|
|
218
|
+
# 添加排名列
|
|
219
|
+
df['Rank'] = range(1, len(df) + 1)
|
|
220
|
+
|
|
221
|
+
# 重新排列列顺序
|
|
222
|
+
columns_order = ["exp", "check_status", "alpha_id", "Rank", "sharpe", "turnover",
|
|
223
|
+
"fitness", "margin", "dateCreated", "longCount", "shortCount", "decay",
|
|
224
|
+
"neutralization", "neutralization_name"]
|
|
225
|
+
df = df[columns_order]
|
|
226
|
+
return df
|
|
227
|
+
|
|
228
|
+
# ===== 第二个算法的函数 =====
|
|
229
|
+
def save_obj(obj: object, name: str) -> None:
|
|
230
|
+
with open(name + '.pickle', 'wb') as f:
|
|
231
|
+
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
|
|
232
|
+
|
|
233
|
+
def load_obj(name: str) -> object:
|
|
234
|
+
with open(name + '.pickle', 'rb') as f:
|
|
235
|
+
return pickle.load(f)
|
|
236
|
+
|
|
237
|
+
def wait_get(session, url: str, max_retries: int = 10) -> "Response":
|
|
238
|
+
retries = 0
|
|
239
|
+
while retries < max_retries:
|
|
240
|
+
while True:
|
|
241
|
+
simulation_progress = session.get(url)
|
|
242
|
+
if simulation_progress.headers.get("Retry-After", 0) == 0:
|
|
243
|
+
break
|
|
244
|
+
time.sleep(float(simulation_progress.headers["Retry-After"]))
|
|
245
|
+
if simulation_progress.status_code < 400:
|
|
246
|
+
break
|
|
247
|
+
else:
|
|
248
|
+
time.sleep(2 ** retries)
|
|
249
|
+
retries += 1
|
|
250
|
+
return simulation_progress
|
|
251
|
+
|
|
252
|
+
def _get_alpha_pnl(session, alpha_id: str) -> pd.DataFrame:
|
|
253
|
+
pnl = wait_get(session, "https://api.worldquantbrain.com/alphas/" + alpha_id + "/recordsets/pnl").json()
|
|
254
|
+
df = pd.DataFrame(pnl['records'], columns=[item['name'] for item in pnl['schema']['properties']])
|
|
255
|
+
df = df.rename(columns={'date': 'Date', 'pnl': alpha_id})
|
|
256
|
+
df = df[['Date', alpha_id]]
|
|
257
|
+
return df
|
|
258
|
+
|
|
259
|
+
def get_alpha_pnls(session,
|
|
260
|
+
alphas: list[dict],
|
|
261
|
+
alpha_pnls: Optional[pd.DataFrame] = None,
|
|
262
|
+
alpha_ids: Optional[dict[str, list]] = None) -> Tuple[dict[str, list], pd.DataFrame]:
|
|
263
|
+
if alpha_ids is None:
|
|
264
|
+
alpha_ids = defaultdict(list)
|
|
265
|
+
if alpha_pnls is None:
|
|
266
|
+
alpha_pnls = pd.DataFrame()
|
|
267
|
+
|
|
268
|
+
new_alphas = [item for item in alphas if item['id'] not in alpha_pnls.columns]
|
|
269
|
+
if not new_alphas:
|
|
270
|
+
return alpha_ids, alpha_pnls
|
|
271
|
+
|
|
272
|
+
for item_alpha in new_alphas:
|
|
273
|
+
alpha_ids[item_alpha['settings']['region']].append(item_alpha['id'])
|
|
274
|
+
|
|
275
|
+
fetch_pnl_func = lambda alpha_id: _get_alpha_pnl(session, alpha_id).set_index('Date')
|
|
276
|
+
with ThreadPoolExecutor(max_workers=10) as executor:
|
|
277
|
+
results = executor.map(fetch_pnl_func, [item['id'] for item in new_alphas])
|
|
278
|
+
alpha_pnls = pd.concat([alpha_pnls] + list(results), axis=1)
|
|
279
|
+
alpha_pnls.sort_index(inplace=True)
|
|
280
|
+
return alpha_ids, alpha_pnls
|
|
281
|
+
|
|
282
|
+
def get_os_alphas(session, limit: int = 100, get_first: bool = False) -> List[Dict]:
|
|
283
|
+
fetched_alphas = []
|
|
284
|
+
offset = 0
|
|
285
|
+
retries = 0
|
|
286
|
+
total_alphas = 100
|
|
287
|
+
while len(fetched_alphas) < total_alphas:
|
|
288
|
+
print(f"Fetching alphas from offset {offset} to {offset + limit}")
|
|
289
|
+
url = f"https://api.worldquantbrain.com/users/self/alphas?stage=OS&limit={limit}&offset={offset}&order=-dateSubmitted"
|
|
290
|
+
res = wait_get(session, url).json()
|
|
291
|
+
if offset == 0:
|
|
292
|
+
total_alphas = res['count']
|
|
293
|
+
alphas = res["results"]
|
|
294
|
+
fetched_alphas.extend(alphas)
|
|
295
|
+
if len(alphas) < limit:
|
|
296
|
+
break
|
|
297
|
+
offset += limit
|
|
298
|
+
if get_first:
|
|
299
|
+
break
|
|
300
|
+
return fetched_alphas[:total_alphas]
|
|
301
|
+
|
|
302
|
+
def calc_self_corr(session,
|
|
303
|
+
alpha_id: str,
|
|
304
|
+
os_alpha_rets: pd.DataFrame | None = None,
|
|
305
|
+
os_alpha_ids: dict[str, str] | None = None,
|
|
306
|
+
alpha_result: dict | None = None,
|
|
307
|
+
return_alpha_pnls: bool = False,
|
|
308
|
+
alpha_pnls: pd.DataFrame | None = None) -> float | tuple[float, pd.DataFrame]:
|
|
309
|
+
if alpha_result is None:
|
|
310
|
+
alpha_result = wait_get(session, f"https://api.worldquantbrain.com/alphas/{alpha_id}").json()
|
|
311
|
+
if alpha_pnls is not None:
|
|
312
|
+
if len(alpha_pnls) == 0:
|
|
313
|
+
alpha_pnls = None
|
|
314
|
+
if alpha_pnls is None:
|
|
315
|
+
_, alpha_pnls = get_alpha_pnls(session, [alpha_result])
|
|
316
|
+
alpha_pnls = alpha_pnls[alpha_id]
|
|
317
|
+
alpha_rets = alpha_pnls - alpha_pnls.ffill().shift(1)
|
|
318
|
+
alpha_rets = alpha_rets[pd.to_datetime(alpha_rets.index) > pd.to_datetime(alpha_rets.index).max() - pd.DateOffset(years=4)]
|
|
319
|
+
self_corr = os_alpha_rets[os_alpha_ids[alpha_result['settings']['region']]].corrwith(alpha_rets).max()
|
|
320
|
+
if np.isnan(self_corr):
|
|
321
|
+
self_corr = 0
|
|
322
|
+
return self_corr
|
|
323
|
+
|
|
324
|
+
def download_data(session, data_path: Path, flag_increment=True):
|
|
325
|
+
if flag_increment:
|
|
326
|
+
try:
|
|
327
|
+
os_alpha_ids = load_obj(str(data_path / 'os_alpha_ids'))
|
|
328
|
+
os_alpha_pnls = load_obj(str(data_path / 'os_alpha_pnls'))
|
|
329
|
+
ppac_alpha_ids = load_obj(str(data_path / 'ppac_alpha_ids'))
|
|
330
|
+
exist_alpha = [alpha for ids in os_alpha_ids.values() for alpha in ids]
|
|
331
|
+
print("已加载缓存的Alpha数据")
|
|
332
|
+
except (FileNotFoundError, EOFError, pickle.UnpicklingError) as e:
|
|
333
|
+
# 首次运行或缓存文件损坏时的正常情况
|
|
334
|
+
os_alpha_ids = None
|
|
335
|
+
os_alpha_pnls = None
|
|
336
|
+
exist_alpha = []
|
|
337
|
+
ppac_alpha_ids = []
|
|
338
|
+
if isinstance(e, FileNotFoundError):
|
|
339
|
+
print("首次运行,正在下载基础数据...")
|
|
340
|
+
else:
|
|
341
|
+
print("缓存文件可能已损坏,重新下载数据...")
|
|
342
|
+
except Exception as e:
|
|
343
|
+
# 其他异常情况
|
|
344
|
+
print(f"加载缓存数据时遇到问题,重新下载: {str(e)[:50]}...")
|
|
345
|
+
os_alpha_ids = None
|
|
346
|
+
os_alpha_pnls = None
|
|
347
|
+
exist_alpha = []
|
|
348
|
+
ppac_alpha_ids = []
|
|
349
|
+
else:
|
|
350
|
+
os_alpha_ids = None
|
|
351
|
+
os_alpha_pnls = None
|
|
352
|
+
exist_alpha = []
|
|
353
|
+
ppac_alpha_ids = []
|
|
354
|
+
|
|
355
|
+
if os_alpha_ids is None:
|
|
356
|
+
print("正在下载OS Alpha数据(首次运行需要下载历史数据)...")
|
|
357
|
+
alphas = get_os_alphas(session, limit=100, get_first=False)
|
|
358
|
+
else:
|
|
359
|
+
alphas = get_os_alphas(session, limit=30, get_first=True)
|
|
360
|
+
|
|
361
|
+
alphas = [item for item in alphas if item['id'] not in exist_alpha]
|
|
362
|
+
ppac_alpha_ids += [item['id'] for item in alphas for item_match in item['classifications'] if
|
|
363
|
+
item_match['name'] == 'Power Pool Alpha']
|
|
364
|
+
|
|
365
|
+
os_alpha_ids, os_alpha_pnls = get_alpha_pnls(session, alphas, alpha_pnls=os_alpha_pnls, alpha_ids=os_alpha_ids)
|
|
366
|
+
|
|
367
|
+
try:
|
|
368
|
+
save_obj(os_alpha_ids, str(data_path / 'os_alpha_ids'))
|
|
369
|
+
save_obj(os_alpha_pnls, str(data_path / 'os_alpha_pnls'))
|
|
370
|
+
save_obj(ppac_alpha_ids, str(data_path / 'ppac_alpha_ids'))
|
|
371
|
+
print(f'数据已保存到缓存文件')
|
|
372
|
+
except Exception as e:
|
|
373
|
+
print(f"保存缓存文件时遇到问题,但不影响本次运行: {str(e)[:50]}...")
|
|
374
|
+
|
|
375
|
+
if alphas:
|
|
376
|
+
print(f'新下载的alpha数量: {len(alphas)}, 目前总共alpha数量: {os_alpha_pnls.shape[1]}')
|
|
377
|
+
else:
|
|
378
|
+
print(f'没有新Alpha需要下载,使用现有缓存数据: {os_alpha_pnls.shape[1]}个Alpha')
|
|
379
|
+
return os_alpha_ids, os_alpha_pnls
|
|
380
|
+
|
|
381
|
+
def load_data(data_path: Path, tag='PPAC'):
|
|
382
|
+
try:
|
|
383
|
+
os_alpha_ids = load_obj(str(data_path / 'os_alpha_ids'))
|
|
384
|
+
os_alpha_pnls = load_obj(str(data_path / 'os_alpha_pnls'))
|
|
385
|
+
ppac_alpha_ids = load_obj(str(data_path / 'ppac_alpha_ids'))
|
|
386
|
+
|
|
387
|
+
# 检查数据是否有效
|
|
388
|
+
if not os_alpha_ids or (hasattr(os_alpha_pnls, 'empty') and os_alpha_pnls.empty):
|
|
389
|
+
raise ValueError("缓存文件为空或无效")
|
|
390
|
+
|
|
391
|
+
except (FileNotFoundError, EOFError, pickle.UnpicklingError, ValueError) as e:
|
|
392
|
+
print(f"无法加载缓存数据: {str(e)[:50]}...")
|
|
393
|
+
# 返回空数据,调用者需要处理这种情况
|
|
394
|
+
return {}, pd.DataFrame()
|
|
395
|
+
except Exception as e:
|
|
396
|
+
print(f"加载数据时遇到意外错误: {str(e)[:50]}...")
|
|
397
|
+
return {}, pd.DataFrame()
|
|
398
|
+
|
|
399
|
+
if tag == 'PPAC':
|
|
400
|
+
for item in os_alpha_ids:
|
|
401
|
+
os_alpha_ids[item] = [alpha for alpha in os_alpha_ids[item] if alpha in ppac_alpha_ids]
|
|
402
|
+
elif tag == 'SelfCorr':
|
|
403
|
+
for item in os_alpha_ids:
|
|
404
|
+
os_alpha_ids[item] = [alpha for alpha in os_alpha_ids[item] if alpha not in ppac_alpha_ids]
|
|
405
|
+
else:
|
|
406
|
+
os_alpha_ids = os_alpha_ids
|
|
407
|
+
|
|
408
|
+
exist_alpha = [alpha for ids in os_alpha_ids.values() for alpha in ids]
|
|
409
|
+
if not exist_alpha:
|
|
410
|
+
print("警告: 没有找到符合条件的Alpha数据")
|
|
411
|
+
return os_alpha_ids, pd.DataFrame()
|
|
412
|
+
|
|
413
|
+
os_alpha_pnls = os_alpha_pnls[exist_alpha]
|
|
414
|
+
os_alpha_rets = os_alpha_pnls - os_alpha_pnls.ffill().shift(1)
|
|
415
|
+
os_alpha_rets = os_alpha_rets[pd.to_datetime(os_alpha_rets.index) > pd.to_datetime(os_alpha_rets.index).max() - pd.DateOffset(years=4)]
|
|
416
|
+
return os_alpha_ids, os_alpha_rets
|
|
417
|
+
|
|
418
|
+
# ===== 新增函数:计算PPAC自相关性 =====
|
|
419
|
+
def calculate_ppac_correlation_for_alphas(session, data_path, alpha_df, tag='PPAC',
|
|
420
|
+
max_workers=5):
|
|
421
|
+
"""为Alpha列表计算PPAC自相关性"""
|
|
422
|
+
# 下载并加载PPAC自相关性计算所需的基础数据
|
|
423
|
+
print("\n下载PPAC自相关性计算所需的基础数据...")
|
|
424
|
+
download_data(session, data_path, flag_increment=True)
|
|
425
|
+
print("\n加载PPAC自相关性计算数据...")
|
|
426
|
+
os_alpha_ids, os_alpha_rets = load_data(data_path, tag=tag)
|
|
427
|
+
|
|
428
|
+
# 检查是否成功加载数据
|
|
429
|
+
if os_alpha_rets.empty:
|
|
430
|
+
print("警告: 无法加载足够的PPAC Alpha数据来计算相关性")
|
|
431
|
+
print("首次运行可能需要等待数据下载完成,或当前区域可能没有足够的PPAC Alpha")
|
|
432
|
+
# 返回原始DataFrame,没有PPAC相关性数据
|
|
433
|
+
return alpha_df.copy()
|
|
434
|
+
|
|
435
|
+
# 检查目标区域是否有数据
|
|
436
|
+
target_region = alpha_df['alpha_id'].apply(
|
|
437
|
+
lambda x: wait_get(session, f"https://api.worldquantbrain.com/alphas/{x}").json()['settings']['region']
|
|
438
|
+
).iloc[0] if not alpha_df.empty else None
|
|
439
|
+
|
|
440
|
+
if target_region and target_region not in os_alpha_ids:
|
|
441
|
+
print(f"警告: 没有找到区域 '{target_region}' 的PPAC Alpha数据")
|
|
442
|
+
print("尝试下载完整数据以获取该区域的信息...")
|
|
443
|
+
# 尝试强制下载完整数据
|
|
444
|
+
download_data(session, data_path, flag_increment=False)
|
|
445
|
+
# 重新加载数据
|
|
446
|
+
os_alpha_ids, os_alpha_rets = load_data(data_path, tag=tag)
|
|
447
|
+
|
|
448
|
+
# 检查重新加载的数据是否有效
|
|
449
|
+
if os_alpha_rets.empty:
|
|
450
|
+
print("警告: 重新加载数据后仍然无法获取足够的PPAC Alpha数据")
|
|
451
|
+
return alpha_df.copy()
|
|
452
|
+
|
|
453
|
+
# 再次检查
|
|
454
|
+
if target_region not in os_alpha_ids:
|
|
455
|
+
print(f"警告: 即使下载完整数据后,仍然没有找到区域 '{target_region}' 的PPAC Alpha数据")
|
|
456
|
+
print("可能您在该区域没有PPAC Alpha,或者数据尚未同步")
|
|
457
|
+
# 返回原始DataFrame,没有PPAC相关性数据
|
|
458
|
+
return alpha_df.copy()
|
|
459
|
+
|
|
460
|
+
# 为每个Alpha计算PPAC自相关性
|
|
461
|
+
print(f"\n为 {len(alpha_df)} 个Alpha计算PPAC自相关性...")
|
|
462
|
+
alpha_ids = alpha_df['alpha_id'].tolist()
|
|
463
|
+
ppac_corr_results = []
|
|
464
|
+
|
|
465
|
+
def process_alpha(alpha_id):
|
|
466
|
+
try:
|
|
467
|
+
ppac_corr = calc_self_corr(session=session,
|
|
468
|
+
alpha_id=alpha_id,
|
|
469
|
+
os_alpha_rets=os_alpha_rets,
|
|
470
|
+
os_alpha_ids=os_alpha_ids)
|
|
471
|
+
return alpha_id, ppac_corr
|
|
472
|
+
except Exception as e:
|
|
473
|
+
print(f"计算Alpha {alpha_id} PPAC自相关性失败: {e}")
|
|
474
|
+
return alpha_id, None
|
|
475
|
+
|
|
476
|
+
# 使用线程池并行处理
|
|
477
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
478
|
+
futures = [executor.submit(process_alpha, alpha_id) for alpha_id in alpha_ids]
|
|
479
|
+
for future in tqdm(as_completed(futures), total=len(futures), desc="计算PPAC自相关性"):
|
|
480
|
+
alpha_id, ppac_corr = future.result()
|
|
481
|
+
if ppac_corr is not None:
|
|
482
|
+
ppac_corr_results.append({"alpha_id": alpha_id, "ppac_correlation": ppac_corr})
|
|
483
|
+
|
|
484
|
+
# 创建结果DataFrame
|
|
485
|
+
if ppac_corr_results:
|
|
486
|
+
ppac_corr_df = pd.DataFrame(ppac_corr_results)
|
|
487
|
+
# 合并到原始DataFrame
|
|
488
|
+
result_df = alpha_df.merge(ppac_corr_df, on='alpha_id', how='left')
|
|
489
|
+
else:
|
|
490
|
+
print("警告: 未能计算任何Alpha的PPAC相关性")
|
|
491
|
+
result_df = alpha_df.copy()
|
|
492
|
+
|
|
493
|
+
return result_df
|
|
494
|
+
|
|
495
|
+
# ===== 整合函数 =====
|
|
496
|
+
def calculate_self_correlation_for_alphas(session, data_path, alpha_df, tag='SelfCorr',
|
|
497
|
+
max_workers=5):
|
|
498
|
+
"""为Alpha列表计算自相关性"""
|
|
499
|
+
# 下载并加载自相关性计算所需的基础数据
|
|
500
|
+
print("\n下载自相关性计算所需的基础数据...")
|
|
501
|
+
download_data(session, data_path, flag_increment=True)
|
|
502
|
+
print("\n加载自相关性计算数据...")
|
|
503
|
+
os_alpha_ids, os_alpha_rets = load_data(data_path, tag=tag)
|
|
504
|
+
|
|
505
|
+
# 检查是否成功加载数据
|
|
506
|
+
if os_alpha_rets.empty:
|
|
507
|
+
print("警告: 无法加载足够的OS Alpha数据来计算自相关性")
|
|
508
|
+
print("首次运行可能需要等待数据下载完成,请稍后重试")
|
|
509
|
+
# 返回原始DataFrame,没有相关性数据
|
|
510
|
+
return alpha_df.copy()
|
|
511
|
+
|
|
512
|
+
# 检查目标区域是否有数据
|
|
513
|
+
target_region = alpha_df['alpha_id'].apply(
|
|
514
|
+
lambda x: wait_get(session, f"https://api.worldquantbrain.com/alphas/{x}").json()['settings']['region']
|
|
515
|
+
).iloc[0] if not alpha_df.empty else None
|
|
516
|
+
|
|
517
|
+
if target_region and target_region not in os_alpha_ids:
|
|
518
|
+
print(f"警告: 没有找到区域 '{target_region}' 的OS Alpha数据")
|
|
519
|
+
print("尝试下载完整数据以获取该区域的信息...")
|
|
520
|
+
# 尝试强制下载完整数据
|
|
521
|
+
download_data(session, data_path, flag_increment=False)
|
|
522
|
+
# 重新加载数据
|
|
523
|
+
os_alpha_ids, os_alpha_rets = load_data(data_path, tag=tag)
|
|
524
|
+
|
|
525
|
+
# 检查重新加载的数据是否有效
|
|
526
|
+
if os_alpha_rets.empty:
|
|
527
|
+
print("警告: 重新加载数据后仍然无法获取足够的OS Alpha数据")
|
|
528
|
+
return alpha_df.copy()
|
|
529
|
+
|
|
530
|
+
# 再次检查
|
|
531
|
+
if target_region not in os_alpha_ids:
|
|
532
|
+
print(f"警告: 即使下载完整数据后,仍然没有找到区域 '{target_region}' 的OS Alpha数据")
|
|
533
|
+
print("可能您在该区域没有OS Alpha,或者数据尚未同步")
|
|
534
|
+
# 返回原始DataFrame,没有相关性数据
|
|
535
|
+
return alpha_df.copy()
|
|
536
|
+
|
|
537
|
+
# 为每个Alpha计算自相关性
|
|
538
|
+
print(f"\n为 {len(alpha_df)} 个Alpha计算自相关性...")
|
|
539
|
+
alpha_ids = alpha_df['alpha_id'].tolist()
|
|
540
|
+
self_corr_results = []
|
|
541
|
+
|
|
542
|
+
def process_alpha(alpha_id):
|
|
543
|
+
try:
|
|
544
|
+
self_corr = calc_self_corr(session=session,
|
|
545
|
+
alpha_id=alpha_id,
|
|
546
|
+
os_alpha_rets=os_alpha_rets,
|
|
547
|
+
os_alpha_ids=os_alpha_ids)
|
|
548
|
+
return alpha_id, self_corr
|
|
549
|
+
except Exception as e:
|
|
550
|
+
print(f"计算Alpha {alpha_id} 自相关性失败: {e}")
|
|
551
|
+
return alpha_id, None
|
|
552
|
+
|
|
553
|
+
# 使用线程池并行处理
|
|
554
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
555
|
+
futures = [executor.submit(process_alpha, alpha_id) for alpha_id in alpha_ids]
|
|
556
|
+
for future in tqdm(as_completed(futures), total=len(futures), desc="计算自相关性"):
|
|
557
|
+
alpha_id, self_corr = future.result()
|
|
558
|
+
if self_corr is not None:
|
|
559
|
+
self_corr_results.append({"alpha_id": alpha_id, "self_correlation": self_corr})
|
|
560
|
+
|
|
561
|
+
# 创建结果DataFrame
|
|
562
|
+
if self_corr_results:
|
|
563
|
+
self_corr_df = pd.DataFrame(self_corr_results)
|
|
564
|
+
# 合并到原始DataFrame
|
|
565
|
+
result_df = alpha_df.merge(self_corr_df, on='alpha_id', how='left')
|
|
566
|
+
else:
|
|
567
|
+
print("警告: 未能计算任何Alpha的自相关性")
|
|
568
|
+
result_df = alpha_df.copy()
|
|
569
|
+
|
|
570
|
+
return result_df
|
|
571
|
+
|
|
572
|
+
# ===== 主函数 =====
|
|
573
|
+
def main():
|
|
574
|
+
parser = argparse.ArgumentParser(description='Calculate alpha self-correlation and PPAC correlation')
|
|
575
|
+
parser.add_argument('--start-date', default=DEFAULT_START_DATE, help='Start date in MM-DD format')
|
|
576
|
+
parser.add_argument('--end-date', default=DEFAULT_END_DATE, help='End date in MM-DD format')
|
|
577
|
+
parser.add_argument('--region', default=DEFAULT_REGION, help='Market region (e.g., IND, USA, EUR)')
|
|
578
|
+
parser.add_argument('--sharpe-threshold', type=float, default=DEFAULT_SHARPE_THRESHOLD, help='Sharpe ratio threshold')
|
|
579
|
+
parser.add_argument('--fitness-threshold', type=float, default=DEFAULT_FITNESS_THRESHOLD, help='Fitness threshold')
|
|
580
|
+
parser.add_argument('--alpha-num', type=int, default=DEFAULT_ALPHA_NUM, help='Number of alphas to retrieve')
|
|
581
|
+
parser.add_argument('--username', help='BRAIN platform email')
|
|
582
|
+
parser.add_argument('--password', help='BRAIN platform password')
|
|
583
|
+
parser.add_argument('--output', help='Output Excel file name (default: auto-generated)')
|
|
584
|
+
parser.add_argument('--max-workers', type=int, default=DEFAULT_MAX_WORKERS, help='Maximum workers for correlation calculation')
|
|
585
|
+
|
|
586
|
+
args = parser.parse_args()
|
|
587
|
+
|
|
588
|
+
# 检查并安装必要的包
|
|
589
|
+
if not check_and_install_requirements():
|
|
590
|
+
print("缺少必要的依赖包,程序退出。")
|
|
591
|
+
return 1
|
|
592
|
+
|
|
593
|
+
# 配置参数
|
|
594
|
+
class cfg:
|
|
595
|
+
username = args.username or ""
|
|
596
|
+
password = args.password or ""
|
|
597
|
+
data_path = Path('.')
|
|
598
|
+
|
|
599
|
+
# If no credentials provided, try to get from environment or config
|
|
600
|
+
if not cfg.username or not cfg.password:
|
|
601
|
+
# Try to get from environment variables
|
|
602
|
+
cfg.username = os.environ.get('BRAIN_USERNAME', '')
|
|
603
|
+
cfg.password = os.environ.get('BRAIN_PASSWORD', '')
|
|
604
|
+
|
|
605
|
+
if not cfg.username or not cfg.password:
|
|
606
|
+
print("错误: 需要提供用户名和密码")
|
|
607
|
+
print("请通过 --username 和 --password 参数提供,或设置 BRAIN_USERNAME 和 BRAIN_PASSWORD 环境变量")
|
|
608
|
+
return 1
|
|
609
|
+
|
|
610
|
+
# 动态生成输出文件名
|
|
611
|
+
if args.output:
|
|
612
|
+
output_file = args.output
|
|
613
|
+
else:
|
|
614
|
+
output_file = f"alpha_results_{args.start_date}_{args.region}.xlsx"
|
|
615
|
+
|
|
616
|
+
# 登录
|
|
617
|
+
print("登录WorldQuant Brain...")
|
|
618
|
+
session = sign_in(cfg.username, cfg.password)
|
|
619
|
+
if not session:
|
|
620
|
+
print("登录失败,请检查用户名和密码")
|
|
621
|
+
return 1
|
|
622
|
+
|
|
623
|
+
# 第一步:获取符合条件的Alpha
|
|
624
|
+
print("\n获取符合条件的Alpha...")
|
|
625
|
+
alpha_data = get_submit_alphas(session=session,
|
|
626
|
+
start_date=args.start_date,
|
|
627
|
+
end_date=args.end_date,
|
|
628
|
+
sharpe_th=args.sharpe_threshold,
|
|
629
|
+
fitness_th=args.fitness_threshold,
|
|
630
|
+
region=args.region,
|
|
631
|
+
alpha_num=args.alpha_num,
|
|
632
|
+
)
|
|
633
|
+
if not alpha_data:
|
|
634
|
+
print("没有找到符合条件的Alpha")
|
|
635
|
+
return 0
|
|
636
|
+
|
|
637
|
+
# 第二步:基于夏普比率进行排名
|
|
638
|
+
print("\n基于夏普比率进行排名...")
|
|
639
|
+
alpha_df = rank_alphas_by_sharpe(alpha_data)
|
|
640
|
+
if alpha_df.empty:
|
|
641
|
+
print("没有找到符合条件的Alpha")
|
|
642
|
+
return 0
|
|
643
|
+
|
|
644
|
+
# 第三步:为这些Alpha计算普通自相关性
|
|
645
|
+
result_df = calculate_self_correlation_for_alphas(session=session,
|
|
646
|
+
data_path=cfg.data_path,
|
|
647
|
+
alpha_df=alpha_df,
|
|
648
|
+
tag='SelfCorr',
|
|
649
|
+
max_workers=args.max_workers)
|
|
650
|
+
|
|
651
|
+
# 第四步:为这些Alpha计算PPAC自相关性
|
|
652
|
+
result_df = calculate_ppac_correlation_for_alphas(session=session,
|
|
653
|
+
data_path=cfg.data_path,
|
|
654
|
+
alpha_df=result_df, # 使用上一步的结果
|
|
655
|
+
tag='PPAC',
|
|
656
|
+
max_workers=args.max_workers)
|
|
657
|
+
|
|
658
|
+
# 第五步:保存结果到Excel
|
|
659
|
+
# 选择需要输出的列
|
|
660
|
+
output_columns = ["alpha_id", "exp", "check_status", "Rank", "sharpe",
|
|
661
|
+
"self_correlation", "ppac_correlation", "turnover", "fitness", "margin",
|
|
662
|
+
"dateCreated", "longCount", "shortCount", "decay",
|
|
663
|
+
"neutralization", "neutralization_name"]
|
|
664
|
+
# 确保所有列都存在
|
|
665
|
+
available_columns = [col for col in output_columns if col in result_df.columns]
|
|
666
|
+
result_df = result_df[available_columns]
|
|
667
|
+
|
|
668
|
+
# 保存到Excel
|
|
669
|
+
with pd.ExcelWriter(output_file) as writer:
|
|
670
|
+
result_df.to_excel(writer, sheet_name='Alpha Results', index=False)
|
|
671
|
+
print(f"\n结果已保存到: {output_file}")
|
|
672
|
+
|
|
673
|
+
# 打印前10个结果
|
|
674
|
+
print("\n前10个Alpha的结果:")
|
|
675
|
+
try:
|
|
676
|
+
# 尝试正常打印
|
|
677
|
+
print(result_df.head(10).to_string(index=False))
|
|
678
|
+
except UnicodeEncodeError:
|
|
679
|
+
# 编码问题,尝试使用替代方法
|
|
680
|
+
print("注意: 由于编码问题,使用简化格式显示")
|
|
681
|
+
# 创建一个简化的视图,避免特殊字符
|
|
682
|
+
simple_df = result_df.head(10).copy()
|
|
683
|
+
# 移除可能包含特殊字符的列
|
|
684
|
+
if 'exp' in simple_df.columns:
|
|
685
|
+
simple_df['exp'] = simple_df['exp'].apply(lambda x: str(x)[:50] + '...' if len(str(x)) > 50 else str(x))
|
|
686
|
+
# 只打印部分列
|
|
687
|
+
safe_columns = ['alpha_id', 'check_status', 'Rank', 'sharpe', 'self_correlation', 'ppac_correlation']
|
|
688
|
+
available_columns = [col for col in safe_columns if col in simple_df.columns]
|
|
689
|
+
if available_columns:
|
|
690
|
+
print(simple_df[available_columns].to_string(index=False))
|
|
691
|
+
else:
|
|
692
|
+
print("无法显示结果,请查看生成的Excel文件")
|
|
693
|
+
|
|
694
|
+
# 打印统计信息
|
|
695
|
+
print("\n统计信息:")
|
|
696
|
+
print(f"Alpha总数: {len(result_df)}")
|
|
697
|
+
if 'self_correlation' in result_df.columns:
|
|
698
|
+
print(f"平均自相关性: {result_df['self_correlation'].mean():.4f}")
|
|
699
|
+
print(f"最大自相关性: {result_df['self_correlation'].max():.4f}")
|
|
700
|
+
print(f"最小自相关性: {result_df['self_correlation'].min():.4f}")
|
|
701
|
+
if 'ppac_correlation' in result_df.columns:
|
|
702
|
+
print(f"平均PPAC自相关性: {result_df['ppac_correlation'].mean():.4f}")
|
|
703
|
+
print(f"最大PPAC自相关性: {result_df['ppac_correlation'].max():.4f}")
|
|
704
|
+
print(f"最小PPAC自相关性: {result_df['ppac_correlation'].min():.4f}")
|
|
705
|
+
|
|
706
|
+
# 中性化设置分布统计
|
|
707
|
+
if 'neutralization_name' in result_df.columns:
|
|
708
|
+
print("\n中性化设置分布:")
|
|
709
|
+
print(result_df['neutralization_name'].value_counts())
|
|
710
|
+
|
|
711
|
+
return 0
|
|
712
|
+
|
|
713
|
+
if __name__ == "__main__":
|
|
714
|
+
try:
|
|
715
|
+
sys.exit(main())
|
|
716
|
+
except KeyboardInterrupt:
|
|
717
|
+
print("\n\n操作被用户中断")
|
|
718
|
+
sys.exit(1)
|
|
719
|
+
except Exception as e:
|
|
720
|
+
print(f"\n[错误] 程序运行出错: {str(e)}")
|
|
721
|
+
print("\n可能的原因:")
|
|
722
|
+
print("1. 网络连接问题")
|
|
723
|
+
print("2. BRAIN API服务暂时不可用")
|
|
724
|
+
print("3. 输入参数错误")
|
|
725
|
+
print("4. 系统资源不足")
|
|
726
|
+
print("\n建议:")
|
|
727
|
+
print("1. 检查网络连接")
|
|
728
|
+
print("2. 确认用户名和密码正确")
|
|
729
|
+
print("3. 尝试减少 --alpha-num 参数的值")
|
|
730
|
+
print("4. 稍后重试")
|
|
731
|
+
print("\n详细错误信息:")
|
|
732
|
+
import traceback
|
|
733
|
+
traceback.print_exc()
|
|
734
|
+
sys.exit(1)
|