aigroup-econ-mcp 0.4.2__py3-none-any.whl → 1.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aigroup_econ_mcp/__init__.py +1 -1
- aigroup_econ_mcp/server.py +451 -451
- aigroup_econ_mcp/tools/__init__.py +8 -7
- aigroup_econ_mcp/tools/data_loader.py +51 -27
- aigroup_econ_mcp/tools/file_parser.py +1026 -828
- aigroup_econ_mcp/tools/ml_regularization.py +22 -8
- aigroup_econ_mcp/tools/panel_data.py +70 -4
- aigroup_econ_mcp/tools/time_series.py +53 -22
- aigroup_econ_mcp/tools/tool_descriptions.py +410 -0
- aigroup_econ_mcp/tools/tool_handlers.py +681 -43
- aigroup_econ_mcp/tools/tool_registry.py +328 -20
- aigroup_econ_mcp-1.3.3.dist-info/METADATA +525 -0
- {aigroup_econ_mcp-0.4.2.dist-info → aigroup_econ_mcp-1.3.3.dist-info}/RECORD +16 -15
- aigroup_econ_mcp-0.4.2.dist-info/METADATA +0 -360
- {aigroup_econ_mcp-0.4.2.dist-info → aigroup_econ_mcp-1.3.3.dist-info}/WHEEL +0 -0
- {aigroup_econ_mcp-0.4.2.dist-info → aigroup_econ_mcp-1.3.3.dist-info}/entry_points.txt +0 -0
- {aigroup_econ_mcp-0.4.2.dist-info → aigroup_econ_mcp-1.3.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,16 +3,17 @@
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from . import regression, statistics, time_series, machine_learning, panel_data
|
|
6
|
-
from . import validation, cache, monitoring, file_parser
|
|
6
|
+
from . import validation, cache, monitoring, file_parser, tool_descriptions
|
|
7
7
|
|
|
8
8
|
__all__ = [
|
|
9
|
-
"regression",
|
|
10
|
-
"statistics",
|
|
11
|
-
"time_series",
|
|
12
|
-
"machine_learning",
|
|
9
|
+
"regression",
|
|
10
|
+
"statistics",
|
|
11
|
+
"time_series",
|
|
12
|
+
"machine_learning",
|
|
13
13
|
"panel_data",
|
|
14
14
|
"validation",
|
|
15
|
-
"cache",
|
|
15
|
+
"cache",
|
|
16
16
|
"monitoring",
|
|
17
|
-
"file_parser"
|
|
17
|
+
"file_parser",
|
|
18
|
+
"tool_descriptions"
|
|
18
19
|
]
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
"""
|
|
2
2
|
数据加载辅助模块
|
|
3
|
-
|
|
3
|
+
提供通用的文件加载功能,支持CSV、JSON和TXT格式
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
6
|
from typing import Dict, List, Union
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
import pandas as pd
|
|
9
|
+
from .file_parser import FileParser
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
async def load_data_if_path(
|
|
@@ -16,7 +17,7 @@ async def load_data_if_path(
|
|
|
16
17
|
智能加载数据:如果是字符串则作为文件路径加载,否则直接返回
|
|
17
18
|
|
|
18
19
|
Args:
|
|
19
|
-
data:
|
|
20
|
+
data: 数据字典或文件路径(支持CSV/JSON/TXT)
|
|
20
21
|
ctx: MCP上下文对象(可选,用于日志)
|
|
21
22
|
|
|
22
23
|
Returns:
|
|
@@ -40,23 +41,28 @@ async def load_data_if_path(
|
|
|
40
41
|
if not path.exists():
|
|
41
42
|
raise ValueError(f"文件不存在: {data}")
|
|
42
43
|
|
|
43
|
-
#
|
|
44
|
-
|
|
44
|
+
# 使用FileParser解析文件(支持CSV/JSON/TXT自动检测)
|
|
45
|
+
parsed = FileParser.parse_file_path(str(path), "auto")
|
|
45
46
|
|
|
46
|
-
#
|
|
47
|
-
result =
|
|
47
|
+
# 返回数据字典
|
|
48
|
+
result = parsed["data"]
|
|
48
49
|
|
|
49
50
|
if ctx:
|
|
50
|
-
await ctx.info(
|
|
51
|
+
await ctx.info(
|
|
52
|
+
f"✅ {parsed['format'].upper()}文件加载成功:"
|
|
53
|
+
f"{parsed['n_variables']}个变量,{parsed['n_observations']}个观测"
|
|
54
|
+
)
|
|
51
55
|
|
|
52
56
|
return result
|
|
53
57
|
|
|
54
58
|
except FileNotFoundError:
|
|
55
59
|
raise ValueError(f"文件不存在: {data}")
|
|
56
60
|
except Exception as e:
|
|
57
|
-
raise ValueError(f"
|
|
61
|
+
raise ValueError(f"文件读取失败: {str(e)}")
|
|
58
62
|
|
|
59
63
|
# 其他类型报错
|
|
64
|
+
raise TypeError(f"不支持的数据类型: {type(data)},期望Dict或str")
|
|
65
|
+
|
|
60
66
|
|
|
61
67
|
async def load_single_var_if_path(
|
|
62
68
|
data: Union[List[float], str],
|
|
@@ -67,9 +73,9 @@ async def load_single_var_if_path(
|
|
|
67
73
|
智能加载单变量数据:如果是字符串则作为文件路径加载,否则直接返回
|
|
68
74
|
|
|
69
75
|
Args:
|
|
70
|
-
data:
|
|
76
|
+
data: 数据列表或文件路径(支持CSV/JSON/TXT)
|
|
71
77
|
ctx: MCP上下文对象(可选,用于日志)
|
|
72
|
-
column_name:
|
|
78
|
+
column_name: 文件中要读取的列名(可选,默认读取第一列)
|
|
73
79
|
|
|
74
80
|
Returns:
|
|
75
81
|
数据列表
|
|
@@ -92,32 +98,41 @@ async def load_single_var_if_path(
|
|
|
92
98
|
if not path.exists():
|
|
93
99
|
raise ValueError(f"文件不存在: {data}")
|
|
94
100
|
|
|
95
|
-
#
|
|
96
|
-
|
|
101
|
+
# 使用FileParser解析文件
|
|
102
|
+
parsed = FileParser.parse_file_path(str(path), "auto")
|
|
103
|
+
data_dict = parsed["data"]
|
|
97
104
|
|
|
98
105
|
# 确定要读取的列
|
|
99
106
|
if column_name:
|
|
100
|
-
if column_name not in
|
|
101
|
-
raise ValueError(
|
|
102
|
-
|
|
107
|
+
if column_name not in data_dict:
|
|
108
|
+
raise ValueError(
|
|
109
|
+
f"列'{column_name}'不存在于文件中。"
|
|
110
|
+
f"可用列: {list(data_dict.keys())}"
|
|
111
|
+
)
|
|
112
|
+
result = data_dict[column_name]
|
|
103
113
|
else:
|
|
104
114
|
# 默认读取第一列
|
|
105
|
-
|
|
115
|
+
first_col = parsed["variables"][0]
|
|
116
|
+
result = data_dict[first_col]
|
|
106
117
|
if ctx:
|
|
107
|
-
await ctx.info(f"未指定列名,使用第一列: {
|
|
118
|
+
await ctx.info(f"未指定列名,使用第一列: {first_col}")
|
|
108
119
|
|
|
109
120
|
if ctx:
|
|
110
|
-
await ctx.info(
|
|
121
|
+
await ctx.info(
|
|
122
|
+
f"✅ {parsed['format'].upper()}文件加载成功:{len(result)}个观测"
|
|
123
|
+
)
|
|
111
124
|
|
|
112
125
|
return result
|
|
113
126
|
|
|
114
127
|
except FileNotFoundError:
|
|
115
128
|
raise ValueError(f"文件不存在: {data}")
|
|
116
129
|
except Exception as e:
|
|
117
|
-
raise ValueError(f"
|
|
130
|
+
raise ValueError(f"文件读取失败: {str(e)}")
|
|
118
131
|
|
|
119
132
|
# 其他类型报错
|
|
120
133
|
raise TypeError(f"不支持的数据类型: {type(data)},期望List或str")
|
|
134
|
+
|
|
135
|
+
|
|
121
136
|
async def load_x_data_if_path(
|
|
122
137
|
data: Union[List[List[float]], str],
|
|
123
138
|
ctx = None
|
|
@@ -126,7 +141,7 @@ async def load_x_data_if_path(
|
|
|
126
141
|
智能加载自变量数据:如果是字符串则作为文件路径加载,否则直接返回
|
|
127
142
|
|
|
128
143
|
Args:
|
|
129
|
-
data:
|
|
144
|
+
data: 自变量数据(二维列表)或文件路径(支持CSV/JSON/TXT)
|
|
130
145
|
ctx: MCP上下文对象(可选,用于日志)
|
|
131
146
|
|
|
132
147
|
Returns:
|
|
@@ -150,22 +165,31 @@ async def load_x_data_if_path(
|
|
|
150
165
|
if not path.exists():
|
|
151
166
|
raise ValueError(f"文件不存在: {data}")
|
|
152
167
|
|
|
153
|
-
#
|
|
154
|
-
|
|
168
|
+
# 使用FileParser解析文件
|
|
169
|
+
parsed = FileParser.parse_file_path(str(path), "auto")
|
|
170
|
+
data_dict = parsed["data"]
|
|
155
171
|
|
|
156
172
|
# 转换为二维列表格式
|
|
157
|
-
|
|
173
|
+
variables = parsed["variables"]
|
|
174
|
+
n_obs = parsed["n_observations"]
|
|
175
|
+
|
|
176
|
+
result = []
|
|
177
|
+
for i in range(n_obs):
|
|
178
|
+
row = [data_dict[var][i] for var in variables]
|
|
179
|
+
result.append(row)
|
|
158
180
|
|
|
159
181
|
if ctx:
|
|
160
|
-
await ctx.info(
|
|
182
|
+
await ctx.info(
|
|
183
|
+
f"✅ 自变量{parsed['format'].upper()}文件加载成功:"
|
|
184
|
+
f"{len(result)}个观测,{len(variables)}个自变量"
|
|
185
|
+
)
|
|
161
186
|
|
|
162
187
|
return result
|
|
163
188
|
|
|
164
189
|
except FileNotFoundError:
|
|
165
190
|
raise ValueError(f"文件不存在: {data}")
|
|
166
191
|
except Exception as e:
|
|
167
|
-
raise ValueError(f"
|
|
192
|
+
raise ValueError(f"自变量文件读取失败: {str(e)}")
|
|
168
193
|
|
|
169
194
|
# 其他类型报错
|
|
170
|
-
raise TypeError(f"不支持的数据类型: {type(data)},期望List[List[float]]或str")
|
|
171
|
-
raise TypeError(f"不支持的数据类型: {type(data)},期望Dict或str")
|
|
195
|
+
raise TypeError(f"不支持的数据类型: {type(data)},期望List[List[float]]或str")
|