aigroup-econ-mcp 1.4.3__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- PKG-INFO +344 -322
- README.md +335 -320
- __init__.py +1 -1
- aigroup_econ_mcp-2.0.1.dist-info/METADATA +732 -0
- aigroup_econ_mcp-2.0.1.dist-info/RECORD +170 -0
- cli.py +4 -0
- econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +30 -0
- econometrics/advanced_methods/modern_computing_machine_learning/causal_forest.py +253 -0
- econometrics/advanced_methods/modern_computing_machine_learning/double_ml.py +268 -0
- econometrics/advanced_methods/modern_computing_machine_learning/gradient_boosting.py +249 -0
- econometrics/advanced_methods/modern_computing_machine_learning/hierarchical_clustering.py +243 -0
- econometrics/advanced_methods/modern_computing_machine_learning/kmeans_clustering.py +293 -0
- econometrics/advanced_methods/modern_computing_machine_learning/neural_network.py +264 -0
- econometrics/advanced_methods/modern_computing_machine_learning/random_forest.py +195 -0
- econometrics/advanced_methods/modern_computing_machine_learning/support_vector_machine.py +226 -0
- econometrics/advanced_methods/modern_computing_machine_learning/test_all_modules.py +329 -0
- econometrics/advanced_methods/modern_computing_machine_learning/test_report.md +107 -0
- econometrics/causal_inference/__init__.py +66 -0
- econometrics/causal_inference/causal_identification_strategy/__init__.py +104 -0
- econometrics/causal_inference/causal_identification_strategy/control_function.py +112 -0
- econometrics/causal_inference/causal_identification_strategy/difference_in_differences.py +107 -0
- econometrics/causal_inference/causal_identification_strategy/event_study.py +119 -0
- econometrics/causal_inference/causal_identification_strategy/first_difference.py +89 -0
- econometrics/causal_inference/causal_identification_strategy/fixed_effects.py +103 -0
- econometrics/causal_inference/causal_identification_strategy/hausman_test.py +69 -0
- econometrics/causal_inference/causal_identification_strategy/instrumental_variables.py +145 -0
- econometrics/causal_inference/causal_identification_strategy/mediation_analysis.py +121 -0
- econometrics/causal_inference/causal_identification_strategy/moderation_analysis.py +109 -0
- econometrics/causal_inference/causal_identification_strategy/propensity_score_matching.py +140 -0
- econometrics/causal_inference/causal_identification_strategy/random_effects.py +100 -0
- econometrics/causal_inference/causal_identification_strategy/regression_discontinuity.py +98 -0
- econometrics/causal_inference/causal_identification_strategy/synthetic_control.py +111 -0
- econometrics/causal_inference/causal_identification_strategy/triple_difference.py +86 -0
- econometrics/distribution_analysis/__init__.py +28 -0
- econometrics/distribution_analysis/oaxaca_blinder.py +184 -0
- econometrics/distribution_analysis/time_series_decomposition.py +152 -0
- econometrics/distribution_analysis/variance_decomposition.py +179 -0
- econometrics/missing_data/__init__.py +18 -0
- econometrics/missing_data/imputation_methods.py +219 -0
- econometrics/nonparametric/__init__.py +35 -0
- econometrics/nonparametric/gam_model.py +117 -0
- econometrics/nonparametric/kernel_regression.py +161 -0
- econometrics/nonparametric/quantile_regression.py +249 -0
- econometrics/nonparametric/spline_regression.py +100 -0
- econometrics/spatial_econometrics/__init__.py +68 -0
- econometrics/spatial_econometrics/geographically_weighted_regression.py +211 -0
- econometrics/spatial_econometrics/gwr_simple.py +154 -0
- econometrics/spatial_econometrics/spatial_autocorrelation.py +356 -0
- econometrics/spatial_econometrics/spatial_durbin_model.py +177 -0
- econometrics/spatial_econometrics/spatial_regression.py +315 -0
- econometrics/spatial_econometrics/spatial_weights.py +226 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/README.md +164 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +40 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/count_data_models.py +311 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/discrete_choice_models.py +294 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/limited_dependent_variable_models.py +282 -0
- econometrics/statistical_inference/__init__.py +21 -0
- econometrics/statistical_inference/bootstrap_methods.py +162 -0
- econometrics/statistical_inference/permutation_test.py +177 -0
- econometrics/survival_analysis/__init__.py +18 -0
- econometrics/survival_analysis/survival_models.py +259 -0
- econometrics/tests/causal_inference_tests/__init__.py +3 -0
- econometrics/tests/causal_inference_tests/detailed_test.py +441 -0
- econometrics/tests/causal_inference_tests/test_all_methods.py +418 -0
- econometrics/tests/causal_inference_tests/test_causal_identification_strategy.py +202 -0
- econometrics/tests/causal_inference_tests/test_difference_in_differences.py +53 -0
- econometrics/tests/causal_inference_tests/test_instrumental_variables.py +44 -0
- econometrics/tests/specific_data_modeling_tests/test_micro_discrete_limited_data.py +189 -0
- econometrics//321/206/320/254/320/272/321/205/342/225/235/320/220/321/205/320/237/320/241/321/205/320/264/320/267/321/207/342/226/222/342/225/227/321/204/342/225/235/320/250/321/205/320/225/320/230/321/207/342/225/221/320/267/321/205/320/230/320/226/321/206/320/256/320/240.md +544 -0
- pyproject.toml +9 -2
- server.py +15 -1
- tools/__init__.py +75 -1
- tools/causal_inference_adapter.py +658 -0
- tools/distribution_analysis_adapter.py +121 -0
- tools/gwr_simple_adapter.py +54 -0
- tools/machine_learning_adapter.py +567 -0
- tools/mcp_tool_groups/__init__.py +15 -1
- tools/mcp_tool_groups/causal_inference_tools.py +643 -0
- tools/mcp_tool_groups/distribution_analysis_tools.py +169 -0
- tools/mcp_tool_groups/machine_learning_tools.py +422 -0
- tools/mcp_tool_groups/microecon_tools.py +325 -0
- tools/mcp_tool_groups/missing_data_tools.py +117 -0
- tools/mcp_tool_groups/nonparametric_tools.py +225 -0
- tools/mcp_tool_groups/spatial_econometrics_tools.py +323 -0
- tools/mcp_tool_groups/statistical_inference_tools.py +131 -0
- tools/mcp_tools_registry.py +13 -3
- tools/microecon_adapter.py +412 -0
- tools/missing_data_adapter.py +73 -0
- tools/nonparametric_adapter.py +190 -0
- tools/spatial_econometrics_adapter.py +318 -0
- tools/statistical_inference_adapter.py +90 -0
- tools/survival_analysis_adapter.py +46 -0
- aigroup_econ_mcp-1.4.3.dist-info/METADATA +0 -710
- aigroup_econ_mcp-1.4.3.dist-info/RECORD +0 -92
- {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/WHEEL +0 -0
- {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/entry_points.txt +0 -0
- {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
"""
|
|
2
|
+
微观计量模型工具组
|
|
3
|
+
包含离散选择、计数数据和受限因变量模型的MCP工具
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Optional, Union, Dict, Any
|
|
7
|
+
from mcp.server.fastmcp import Context
|
|
8
|
+
from mcp.server.session import ServerSession
|
|
9
|
+
|
|
10
|
+
from ..mcp_tools_registry import ToolGroup
|
|
11
|
+
from ..microecon_adapter import (
|
|
12
|
+
logit_adapter,
|
|
13
|
+
probit_adapter,
|
|
14
|
+
multinomial_logit_adapter,
|
|
15
|
+
poisson_adapter,
|
|
16
|
+
negative_binomial_adapter,
|
|
17
|
+
tobit_adapter,
|
|
18
|
+
heckman_adapter
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class MicroeconometricsTools(ToolGroup):
|
|
23
|
+
"""微观计量模型工具组"""
|
|
24
|
+
|
|
25
|
+
name = "MICROECONOMETRICS"
|
|
26
|
+
description = "微观计量模型工具"
|
|
27
|
+
version = "1.0.0"
|
|
28
|
+
|
|
29
|
+
@classmethod
|
|
30
|
+
def get_tools(cls) -> List[Dict[str, Any]]:
|
|
31
|
+
"""返回工具列表"""
|
|
32
|
+
return [
|
|
33
|
+
{
|
|
34
|
+
"name": "micro_logit",
|
|
35
|
+
"handler": cls.logit_tool,
|
|
36
|
+
"description": "Logistic Regression Model"
|
|
37
|
+
},
|
|
38
|
+
{
|
|
39
|
+
"name": "micro_probit",
|
|
40
|
+
"handler": cls.probit_tool,
|
|
41
|
+
"description": "Probit Regression Model"
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"name": "micro_multinomial_logit",
|
|
45
|
+
"handler": cls.multinomial_logit_tool,
|
|
46
|
+
"description": "Multinomial Logit Model"
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
"name": "micro_poisson",
|
|
50
|
+
"handler": cls.poisson_tool,
|
|
51
|
+
"description": "Poisson Regression Model"
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"name": "micro_negative_binomial",
|
|
55
|
+
"handler": cls.negative_binomial_tool,
|
|
56
|
+
"description": "Negative Binomial Regression Model"
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"name": "micro_tobit",
|
|
60
|
+
"handler": cls.tobit_tool,
|
|
61
|
+
"description": "Tobit Model (Censored Regression)"
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
"name": "micro_heckman",
|
|
65
|
+
"handler": cls.heckman_tool,
|
|
66
|
+
"description": "Heckman Selection Model"
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def get_help_text(cls) -> str:
|
|
72
|
+
"""返回帮助文档"""
|
|
73
|
+
return """
|
|
74
|
+
微观计量模型工具组 - 7种模型
|
|
75
|
+
|
|
76
|
+
离散选择模型:
|
|
77
|
+
1. Logit Model - micro_logit
|
|
78
|
+
- 二元Logistic回归
|
|
79
|
+
- 适用于二元因变量
|
|
80
|
+
|
|
81
|
+
2. Probit Model - micro_probit
|
|
82
|
+
- Probit回归
|
|
83
|
+
- 基于正态分布假设
|
|
84
|
+
|
|
85
|
+
3. Multinomial Logit - micro_multinomial_logit
|
|
86
|
+
- 多项Logit模型
|
|
87
|
+
- 适用于多分类问题
|
|
88
|
+
|
|
89
|
+
计数数据模型:
|
|
90
|
+
4. Poisson Model - micro_poisson
|
|
91
|
+
- 泊松回归
|
|
92
|
+
- 适用于计数数据
|
|
93
|
+
|
|
94
|
+
5. Negative Binomial - micro_negative_binomial
|
|
95
|
+
- 负二项回归
|
|
96
|
+
- 处理过度离散问题
|
|
97
|
+
|
|
98
|
+
受限因变量模型:
|
|
99
|
+
6. Tobit Model - micro_tobit
|
|
100
|
+
- Tobit模型(截断回归)
|
|
101
|
+
- 适用于受限因变量
|
|
102
|
+
|
|
103
|
+
7. Heckman Model - micro_heckman
|
|
104
|
+
- Heckman样本选择模型
|
|
105
|
+
- 处理样本选择偏差
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
async def logit_tool(
|
|
110
|
+
X_data: Optional[List] = None,
|
|
111
|
+
y_data: Optional[List[int]] = None,
|
|
112
|
+
file_path: Optional[str] = None,
|
|
113
|
+
feature_names: Optional[List[str]] = None,
|
|
114
|
+
output_format: str = 'json',
|
|
115
|
+
save_path: Optional[str] = None,
|
|
116
|
+
ctx: Context[ServerSession, None] = None
|
|
117
|
+
) -> str:
|
|
118
|
+
"""Logit回归分析"""
|
|
119
|
+
try:
|
|
120
|
+
if ctx:
|
|
121
|
+
await ctx.info("Starting Logit regression analysis...")
|
|
122
|
+
|
|
123
|
+
result = logit_adapter(
|
|
124
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
125
|
+
feature_names=feature_names, output_format=output_format,
|
|
126
|
+
save_path=save_path
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
if ctx:
|
|
130
|
+
await ctx.info("Logit analysis complete")
|
|
131
|
+
|
|
132
|
+
return result
|
|
133
|
+
except Exception as e:
|
|
134
|
+
if ctx:
|
|
135
|
+
await ctx.error(f"Error: {str(e)}")
|
|
136
|
+
raise
|
|
137
|
+
|
|
138
|
+
@staticmethod
|
|
139
|
+
async def probit_tool(
|
|
140
|
+
X_data: Optional[List] = None,
|
|
141
|
+
y_data: Optional[List[int]] = None,
|
|
142
|
+
file_path: Optional[str] = None,
|
|
143
|
+
feature_names: Optional[List[str]] = None,
|
|
144
|
+
output_format: str = 'json',
|
|
145
|
+
save_path: Optional[str] = None,
|
|
146
|
+
ctx: Context[ServerSession, None] = None
|
|
147
|
+
) -> str:
|
|
148
|
+
"""Probit回归分析"""
|
|
149
|
+
try:
|
|
150
|
+
if ctx:
|
|
151
|
+
await ctx.info("Starting Probit regression analysis...")
|
|
152
|
+
|
|
153
|
+
result = probit_adapter(
|
|
154
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
155
|
+
feature_names=feature_names, output_format=output_format,
|
|
156
|
+
save_path=save_path
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
if ctx:
|
|
160
|
+
await ctx.info("Probit analysis complete")
|
|
161
|
+
|
|
162
|
+
return result
|
|
163
|
+
except Exception as e:
|
|
164
|
+
if ctx:
|
|
165
|
+
await ctx.error(f"Error: {str(e)}")
|
|
166
|
+
raise
|
|
167
|
+
|
|
168
|
+
@staticmethod
|
|
169
|
+
async def multinomial_logit_tool(
|
|
170
|
+
X_data: Optional[List] = None,
|
|
171
|
+
y_data: Optional[List[int]] = None,
|
|
172
|
+
file_path: Optional[str] = None,
|
|
173
|
+
feature_names: Optional[List[str]] = None,
|
|
174
|
+
output_format: str = 'json',
|
|
175
|
+
save_path: Optional[str] = None,
|
|
176
|
+
ctx: Context[ServerSession, None] = None
|
|
177
|
+
) -> str:
|
|
178
|
+
"""多项Logit分析"""
|
|
179
|
+
try:
|
|
180
|
+
if ctx:
|
|
181
|
+
await ctx.info("Starting Multinomial Logit analysis...")
|
|
182
|
+
|
|
183
|
+
result = multinomial_logit_adapter(
|
|
184
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
185
|
+
feature_names=feature_names, output_format=output_format,
|
|
186
|
+
save_path=save_path
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
if ctx:
|
|
190
|
+
await ctx.info("Multinomial Logit analysis complete")
|
|
191
|
+
|
|
192
|
+
return result
|
|
193
|
+
except Exception as e:
|
|
194
|
+
if ctx:
|
|
195
|
+
await ctx.error(f"Error: {str(e)}")
|
|
196
|
+
raise
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
async def poisson_tool(
|
|
200
|
+
X_data: Optional[List] = None,
|
|
201
|
+
y_data: Optional[List[int]] = None,
|
|
202
|
+
file_path: Optional[str] = None,
|
|
203
|
+
feature_names: Optional[List[str]] = None,
|
|
204
|
+
output_format: str = 'json',
|
|
205
|
+
save_path: Optional[str] = None,
|
|
206
|
+
ctx: Context[ServerSession, None] = None
|
|
207
|
+
) -> str:
|
|
208
|
+
"""泊松回归分析"""
|
|
209
|
+
try:
|
|
210
|
+
if ctx:
|
|
211
|
+
await ctx.info("Starting Poisson regression analysis...")
|
|
212
|
+
|
|
213
|
+
result = poisson_adapter(
|
|
214
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
215
|
+
feature_names=feature_names, output_format=output_format,
|
|
216
|
+
save_path=save_path
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if ctx:
|
|
220
|
+
await ctx.info("Poisson analysis complete")
|
|
221
|
+
|
|
222
|
+
return result
|
|
223
|
+
except Exception as e:
|
|
224
|
+
if ctx:
|
|
225
|
+
await ctx.error(f"Error: {str(e)}")
|
|
226
|
+
raise
|
|
227
|
+
|
|
228
|
+
@staticmethod
|
|
229
|
+
async def negative_binomial_tool(
|
|
230
|
+
X_data: Optional[List] = None,
|
|
231
|
+
y_data: Optional[List[int]] = None,
|
|
232
|
+
file_path: Optional[str] = None,
|
|
233
|
+
feature_names: Optional[List[str]] = None,
|
|
234
|
+
distr: str = 'nb2',
|
|
235
|
+
output_format: str = 'json',
|
|
236
|
+
save_path: Optional[str] = None,
|
|
237
|
+
ctx: Context[ServerSession, None] = None
|
|
238
|
+
) -> str:
|
|
239
|
+
"""负二项回归分析"""
|
|
240
|
+
try:
|
|
241
|
+
if ctx:
|
|
242
|
+
await ctx.info("Starting Negative Binomial regression analysis...")
|
|
243
|
+
|
|
244
|
+
result = negative_binomial_adapter(
|
|
245
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
246
|
+
feature_names=feature_names, distr=distr,
|
|
247
|
+
output_format=output_format, save_path=save_path
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
if ctx:
|
|
251
|
+
await ctx.info("Negative Binomial analysis complete")
|
|
252
|
+
|
|
253
|
+
return result
|
|
254
|
+
except Exception as e:
|
|
255
|
+
if ctx:
|
|
256
|
+
await ctx.error(f"Error: {str(e)}")
|
|
257
|
+
raise
|
|
258
|
+
|
|
259
|
+
@staticmethod
|
|
260
|
+
async def tobit_tool(
|
|
261
|
+
X_data: Optional[List] = None,
|
|
262
|
+
y_data: Optional[List[float]] = None,
|
|
263
|
+
file_path: Optional[str] = None,
|
|
264
|
+
feature_names: Optional[List[str]] = None,
|
|
265
|
+
lower_bound: float = 0.0,
|
|
266
|
+
upper_bound: Optional[float] = None,
|
|
267
|
+
output_format: str = 'json',
|
|
268
|
+
save_path: Optional[str] = None,
|
|
269
|
+
ctx: Context[ServerSession, None] = None
|
|
270
|
+
) -> str:
|
|
271
|
+
"""Tobit模型分析"""
|
|
272
|
+
try:
|
|
273
|
+
if ctx:
|
|
274
|
+
await ctx.info("Starting Tobit model analysis...")
|
|
275
|
+
|
|
276
|
+
result = tobit_adapter(
|
|
277
|
+
X_data=X_data, y_data=y_data, file_path=file_path,
|
|
278
|
+
feature_names=feature_names, lower_bound=lower_bound,
|
|
279
|
+
upper_bound=upper_bound, output_format=output_format,
|
|
280
|
+
save_path=save_path
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
if ctx:
|
|
284
|
+
await ctx.info("Tobit analysis complete")
|
|
285
|
+
|
|
286
|
+
return result
|
|
287
|
+
except Exception as e:
|
|
288
|
+
if ctx:
|
|
289
|
+
await ctx.error(f"Error: {str(e)}")
|
|
290
|
+
raise
|
|
291
|
+
|
|
292
|
+
@staticmethod
|
|
293
|
+
async def heckman_tool(
|
|
294
|
+
X_select_data: Optional[List] = None,
|
|
295
|
+
Z_data: Optional[List] = None,
|
|
296
|
+
y_data: Optional[List[float]] = None,
|
|
297
|
+
s_data: Optional[List[int]] = None,
|
|
298
|
+
file_path: Optional[str] = None,
|
|
299
|
+
selection_feature_names: Optional[List[str]] = None,
|
|
300
|
+
outcome_feature_names: Optional[List[str]] = None,
|
|
301
|
+
output_format: str = 'json',
|
|
302
|
+
save_path: Optional[str] = None,
|
|
303
|
+
ctx: Context[ServerSession, None] = None
|
|
304
|
+
) -> str:
|
|
305
|
+
"""Heckman样本选择模型分析"""
|
|
306
|
+
try:
|
|
307
|
+
if ctx:
|
|
308
|
+
await ctx.info("Starting Heckman selection model analysis...")
|
|
309
|
+
|
|
310
|
+
result = heckman_adapter(
|
|
311
|
+
X_select_data=X_select_data, Z_data=Z_data,
|
|
312
|
+
y_data=y_data, s_data=s_data, file_path=file_path,
|
|
313
|
+
selection_feature_names=selection_feature_names,
|
|
314
|
+
outcome_feature_names=outcome_feature_names,
|
|
315
|
+
output_format=output_format, save_path=save_path
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if ctx:
|
|
319
|
+
await ctx.info("Heckman analysis complete")
|
|
320
|
+
|
|
321
|
+
return result
|
|
322
|
+
except Exception as e:
|
|
323
|
+
if ctx:
|
|
324
|
+
await ctx.error(f"Error: {str(e)}")
|
|
325
|
+
raise
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
"""
|
|
2
|
+
缺失数据处理工具组
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional, Dict, Any
|
|
6
|
+
from mcp.server.fastmcp import Context
|
|
7
|
+
from mcp.server.session import ServerSession
|
|
8
|
+
|
|
9
|
+
from ..mcp_tools_registry import ToolGroup
|
|
10
|
+
from ..missing_data_adapter import (
|
|
11
|
+
simple_imputation_adapter,
|
|
12
|
+
multiple_imputation_adapter
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class MissingDataTools(ToolGroup):
|
|
17
|
+
"""缺失数据处理工具组"""
|
|
18
|
+
|
|
19
|
+
name = "MISSING DATA HANDLING"
|
|
20
|
+
description = "缺失数据插补和处理工具"
|
|
21
|
+
version = "1.0.0"
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
def get_tools(cls) -> List[Dict[str, Any]]:
|
|
25
|
+
"""返回工具列表"""
|
|
26
|
+
return [
|
|
27
|
+
{
|
|
28
|
+
"name": "missing_data_simple_imputation",
|
|
29
|
+
"handler": cls.simple_imputation_tool,
|
|
30
|
+
"description": "Simple Imputation (Mean/Median/Mode/Constant)"
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "missing_data_multiple_imputation",
|
|
34
|
+
"handler": cls.multiple_imputation_tool,
|
|
35
|
+
"description": "Multiple Imputation (MICE)"
|
|
36
|
+
}
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def get_help_text(cls) -> str:
|
|
41
|
+
return """
|
|
42
|
+
缺失数据处理工具组 - 2个工具
|
|
43
|
+
|
|
44
|
+
1. Simple Imputation (missing_data_simple_imputation)
|
|
45
|
+
- 简单插补方法
|
|
46
|
+
- 均值/中位数/众数/常数填充
|
|
47
|
+
- 基于: sklearn.impute
|
|
48
|
+
|
|
49
|
+
2. Multiple Imputation (missing_data_multiple_imputation)
|
|
50
|
+
- 多重插补 (MICE)
|
|
51
|
+
- 迭代插补算法
|
|
52
|
+
- 基于: sklearn.impute
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@staticmethod
|
|
56
|
+
async def simple_imputation_tool(
|
|
57
|
+
data: List[List[float]],
|
|
58
|
+
strategy: str = "mean",
|
|
59
|
+
fill_value: Optional[float] = None,
|
|
60
|
+
output_format: str = "json",
|
|
61
|
+
save_path: Optional[str] = None,
|
|
62
|
+
ctx: Context[ServerSession, None] = None
|
|
63
|
+
) -> str:
|
|
64
|
+
"""简单插补"""
|
|
65
|
+
try:
|
|
66
|
+
if ctx:
|
|
67
|
+
await ctx.info(f"Starting simple imputation ({strategy})...")
|
|
68
|
+
|
|
69
|
+
result = simple_imputation_adapter(
|
|
70
|
+
data=data,
|
|
71
|
+
strategy=strategy,
|
|
72
|
+
fill_value=fill_value,
|
|
73
|
+
output_format=output_format,
|
|
74
|
+
save_path=save_path
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
if ctx:
|
|
78
|
+
await ctx.info("Simple imputation complete")
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
except Exception as e:
|
|
82
|
+
if ctx:
|
|
83
|
+
await ctx.error(f"Error: {str(e)}")
|
|
84
|
+
raise
|
|
85
|
+
|
|
86
|
+
@staticmethod
|
|
87
|
+
async def multiple_imputation_tool(
|
|
88
|
+
data: List[List[float]],
|
|
89
|
+
n_imputations: int = 5,
|
|
90
|
+
max_iter: int = 10,
|
|
91
|
+
random_state: Optional[int] = None,
|
|
92
|
+
output_format: str = "json",
|
|
93
|
+
save_path: Optional[str] = None,
|
|
94
|
+
ctx: Context[ServerSession, None] = None
|
|
95
|
+
) -> str:
|
|
96
|
+
"""多重插补(MICE)"""
|
|
97
|
+
try:
|
|
98
|
+
if ctx:
|
|
99
|
+
await ctx.info(f"Starting multiple imputation (n={n_imputations})...")
|
|
100
|
+
|
|
101
|
+
result = multiple_imputation_adapter(
|
|
102
|
+
data=data,
|
|
103
|
+
n_imputations=n_imputations,
|
|
104
|
+
max_iter=max_iter,
|
|
105
|
+
random_state=random_state,
|
|
106
|
+
output_format=output_format,
|
|
107
|
+
save_path=save_path
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if ctx:
|
|
111
|
+
await ctx.info("Multiple imputation complete")
|
|
112
|
+
|
|
113
|
+
return result
|
|
114
|
+
except Exception as e:
|
|
115
|
+
if ctx:
|
|
116
|
+
await ctx.error(f"Error: {str(e)}")
|
|
117
|
+
raise
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
"""
|
|
2
|
+
非参数与半参数方法工具组
|
|
3
|
+
包含核回归、分位数回归、样条回归和GAM
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Optional, Dict, Any
|
|
7
|
+
from mcp.server.fastmcp import Context
|
|
8
|
+
from mcp.server.session import ServerSession
|
|
9
|
+
|
|
10
|
+
from ..mcp_tools_registry import ToolGroup
|
|
11
|
+
from ..nonparametric_adapter import (
|
|
12
|
+
kernel_regression_adapter,
|
|
13
|
+
quantile_regression_adapter,
|
|
14
|
+
spline_regression_adapter,
|
|
15
|
+
gam_adapter
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class NonparametricTools(ToolGroup):
|
|
20
|
+
"""非参数与半参数方法工具组"""
|
|
21
|
+
|
|
22
|
+
name = "NONPARAMETRIC & SEMIPARAMETRIC METHODS"
|
|
23
|
+
description = "非参数与半参数分析工具"
|
|
24
|
+
version = "1.0.0"
|
|
25
|
+
|
|
26
|
+
@classmethod
|
|
27
|
+
def get_tools(cls) -> List[Dict[str, Any]]:
|
|
28
|
+
"""返回工具列表"""
|
|
29
|
+
return [
|
|
30
|
+
{
|
|
31
|
+
"name": "nonparametric_kernel_regression",
|
|
32
|
+
"handler": cls.kernel_regression_tool,
|
|
33
|
+
"description": "Kernel Regression (Nonparametric)"
|
|
34
|
+
},
|
|
35
|
+
{
|
|
36
|
+
"name": "nonparametric_quantile_regression",
|
|
37
|
+
"handler": cls.quantile_regression_tool,
|
|
38
|
+
"description": "Quantile Regression"
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
"name": "nonparametric_spline_regression",
|
|
42
|
+
"handler": cls.spline_regression_tool,
|
|
43
|
+
"description": "Spline Regression"
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
"name": "nonparametric_gam_model",
|
|
47
|
+
"handler": cls.gam_tool,
|
|
48
|
+
"description": "Generalized Additive Model (GAM)"
|
|
49
|
+
}
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
@classmethod
|
|
53
|
+
def get_help_text(cls) -> str:
|
|
54
|
+
"""返回帮助文档"""
|
|
55
|
+
return """
|
|
56
|
+
非参数与半参数方法工具组 - 4个工具
|
|
57
|
+
|
|
58
|
+
1. Kernel Regression (nonparametric_kernel_regression)
|
|
59
|
+
- 核回归估计
|
|
60
|
+
- 支持核函数: Gaussian, Epanechnikov, Uniform, Triangular, Biweight
|
|
61
|
+
- 带宽选择: 交叉验证, AIC, 正态参考
|
|
62
|
+
- 基于: statsmodels.nonparametric
|
|
63
|
+
|
|
64
|
+
2. Quantile Regression (nonparametric_quantile_regression)
|
|
65
|
+
- 分位数回归
|
|
66
|
+
- 分析条件分位数
|
|
67
|
+
- 稳健于异常值
|
|
68
|
+
- 基于: statsmodels.regression.quantile_regression
|
|
69
|
+
|
|
70
|
+
3. Spline Regression (nonparametric_spline_regression)
|
|
71
|
+
- 样条回归
|
|
72
|
+
- 灵活的非线性拟合
|
|
73
|
+
- 基于: sklearn.preprocessing
|
|
74
|
+
|
|
75
|
+
4. Generalized Additive Model (nonparametric_gam_model)
|
|
76
|
+
- 广义可加模型
|
|
77
|
+
- 多个平滑函数的加和
|
|
78
|
+
- 基于: pygam
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
@staticmethod
|
|
82
|
+
async def kernel_regression_tool(
|
|
83
|
+
y_data: Optional[List[float]] = None,
|
|
84
|
+
x_data: Optional[List[List[float]]] = None,
|
|
85
|
+
file_path: Optional[str] = None,
|
|
86
|
+
kernel_type: str = "gaussian",
|
|
87
|
+
bandwidth: Optional[List[float]] = None,
|
|
88
|
+
bandwidth_method: str = "cv_ls",
|
|
89
|
+
variable_type: Optional[str] = None,
|
|
90
|
+
output_format: str = "json",
|
|
91
|
+
save_path: Optional[str] = None,
|
|
92
|
+
ctx: Context[ServerSession, None] = None
|
|
93
|
+
) -> str:
|
|
94
|
+
"""核回归分析"""
|
|
95
|
+
try:
|
|
96
|
+
if ctx:
|
|
97
|
+
await ctx.info(f"Starting kernel regression ({kernel_type} kernel)...")
|
|
98
|
+
|
|
99
|
+
result = kernel_regression_adapter(
|
|
100
|
+
y_data=y_data,
|
|
101
|
+
x_data=x_data,
|
|
102
|
+
file_path=file_path,
|
|
103
|
+
kernel_type=kernel_type,
|
|
104
|
+
bandwidth=bandwidth,
|
|
105
|
+
bandwidth_method=bandwidth_method,
|
|
106
|
+
variable_type=variable_type,
|
|
107
|
+
output_format=output_format,
|
|
108
|
+
save_path=save_path
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
if ctx:
|
|
112
|
+
await ctx.info("Kernel regression complete")
|
|
113
|
+
|
|
114
|
+
return result
|
|
115
|
+
except Exception as e:
|
|
116
|
+
if ctx:
|
|
117
|
+
await ctx.error(f"Error: {str(e)}")
|
|
118
|
+
raise
|
|
119
|
+
|
|
120
|
+
@staticmethod
|
|
121
|
+
async def quantile_regression_tool(
|
|
122
|
+
y_data: Optional[List[float]] = None,
|
|
123
|
+
x_data: Optional[List[List[float]]] = None,
|
|
124
|
+
file_path: Optional[str] = None,
|
|
125
|
+
quantile: float = 0.5,
|
|
126
|
+
feature_names: Optional[List[str]] = None,
|
|
127
|
+
confidence_level: float = 0.95,
|
|
128
|
+
output_format: str = "json",
|
|
129
|
+
save_path: Optional[str] = None,
|
|
130
|
+
ctx: Context[ServerSession, None] = None
|
|
131
|
+
) -> str:
|
|
132
|
+
"""分位数回归分析"""
|
|
133
|
+
try:
|
|
134
|
+
if ctx:
|
|
135
|
+
await ctx.info(f"Starting quantile regression (τ={quantile})...")
|
|
136
|
+
|
|
137
|
+
result = quantile_regression_adapter(
|
|
138
|
+
y_data=y_data,
|
|
139
|
+
x_data=x_data,
|
|
140
|
+
file_path=file_path,
|
|
141
|
+
quantile=quantile,
|
|
142
|
+
feature_names=feature_names,
|
|
143
|
+
confidence_level=confidence_level,
|
|
144
|
+
output_format=output_format,
|
|
145
|
+
save_path=save_path
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
if ctx:
|
|
149
|
+
await ctx.info("Quantile regression complete")
|
|
150
|
+
|
|
151
|
+
return result
|
|
152
|
+
except Exception as e:
|
|
153
|
+
if ctx:
|
|
154
|
+
await ctx.error(f"Error: {str(e)}")
|
|
155
|
+
raise
|
|
156
|
+
|
|
157
|
+
@staticmethod
|
|
158
|
+
async def spline_regression_tool(
|
|
159
|
+
y_data: List[float],
|
|
160
|
+
x_data: List[float],
|
|
161
|
+
n_knots: int = 5,
|
|
162
|
+
degree: int = 3,
|
|
163
|
+
knots: str = "uniform",
|
|
164
|
+
output_format: str = "json",
|
|
165
|
+
save_path: Optional[str] = None,
|
|
166
|
+
ctx: Context[ServerSession, None] = None
|
|
167
|
+
) -> str:
|
|
168
|
+
"""样条回归"""
|
|
169
|
+
try:
|
|
170
|
+
if ctx:
|
|
171
|
+
await ctx.info(f"Starting spline regression (degree={degree}, knots={n_knots})...")
|
|
172
|
+
|
|
173
|
+
result = spline_regression_adapter(
|
|
174
|
+
y_data=y_data,
|
|
175
|
+
x_data=x_data,
|
|
176
|
+
n_knots=n_knots,
|
|
177
|
+
degree=degree,
|
|
178
|
+
knots=knots,
|
|
179
|
+
output_format=output_format,
|
|
180
|
+
save_path=save_path
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
if ctx:
|
|
184
|
+
await ctx.info("Spline regression complete")
|
|
185
|
+
|
|
186
|
+
return result
|
|
187
|
+
except Exception as e:
|
|
188
|
+
if ctx:
|
|
189
|
+
await ctx.error(f"Error: {str(e)}")
|
|
190
|
+
raise
|
|
191
|
+
|
|
192
|
+
@staticmethod
|
|
193
|
+
async def gam_tool(
|
|
194
|
+
y_data: List[float],
|
|
195
|
+
x_data: List[List[float]],
|
|
196
|
+
problem_type: str = "regression",
|
|
197
|
+
n_splines: int = 10,
|
|
198
|
+
lam: float = 0.6,
|
|
199
|
+
output_format: str = "json",
|
|
200
|
+
save_path: Optional[str] = None,
|
|
201
|
+
ctx: Context[ServerSession, None] = None
|
|
202
|
+
) -> str:
|
|
203
|
+
"""广义可加模型(GAM)"""
|
|
204
|
+
try:
|
|
205
|
+
if ctx:
|
|
206
|
+
await ctx.info(f"Starting GAM model ({problem_type})...")
|
|
207
|
+
|
|
208
|
+
result = gam_adapter(
|
|
209
|
+
y_data=y_data,
|
|
210
|
+
x_data=x_data,
|
|
211
|
+
problem_type=problem_type,
|
|
212
|
+
n_splines=n_splines,
|
|
213
|
+
lam=lam,
|
|
214
|
+
output_format=output_format,
|
|
215
|
+
save_path=save_path
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if ctx:
|
|
219
|
+
await ctx.info("GAM model complete")
|
|
220
|
+
|
|
221
|
+
return result
|
|
222
|
+
except Exception as e:
|
|
223
|
+
if ctx:
|
|
224
|
+
await ctx.error(f"Error: {str(e)}")
|
|
225
|
+
raise
|