aigroup-econ-mcp 1.4.3__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. PKG-INFO +344 -322
  2. README.md +335 -320
  3. __init__.py +1 -1
  4. aigroup_econ_mcp-2.0.1.dist-info/METADATA +732 -0
  5. aigroup_econ_mcp-2.0.1.dist-info/RECORD +170 -0
  6. cli.py +4 -0
  7. econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +30 -0
  8. econometrics/advanced_methods/modern_computing_machine_learning/causal_forest.py +253 -0
  9. econometrics/advanced_methods/modern_computing_machine_learning/double_ml.py +268 -0
  10. econometrics/advanced_methods/modern_computing_machine_learning/gradient_boosting.py +249 -0
  11. econometrics/advanced_methods/modern_computing_machine_learning/hierarchical_clustering.py +243 -0
  12. econometrics/advanced_methods/modern_computing_machine_learning/kmeans_clustering.py +293 -0
  13. econometrics/advanced_methods/modern_computing_machine_learning/neural_network.py +264 -0
  14. econometrics/advanced_methods/modern_computing_machine_learning/random_forest.py +195 -0
  15. econometrics/advanced_methods/modern_computing_machine_learning/support_vector_machine.py +226 -0
  16. econometrics/advanced_methods/modern_computing_machine_learning/test_all_modules.py +329 -0
  17. econometrics/advanced_methods/modern_computing_machine_learning/test_report.md +107 -0
  18. econometrics/causal_inference/__init__.py +66 -0
  19. econometrics/causal_inference/causal_identification_strategy/__init__.py +104 -0
  20. econometrics/causal_inference/causal_identification_strategy/control_function.py +112 -0
  21. econometrics/causal_inference/causal_identification_strategy/difference_in_differences.py +107 -0
  22. econometrics/causal_inference/causal_identification_strategy/event_study.py +119 -0
  23. econometrics/causal_inference/causal_identification_strategy/first_difference.py +89 -0
  24. econometrics/causal_inference/causal_identification_strategy/fixed_effects.py +103 -0
  25. econometrics/causal_inference/causal_identification_strategy/hausman_test.py +69 -0
  26. econometrics/causal_inference/causal_identification_strategy/instrumental_variables.py +145 -0
  27. econometrics/causal_inference/causal_identification_strategy/mediation_analysis.py +121 -0
  28. econometrics/causal_inference/causal_identification_strategy/moderation_analysis.py +109 -0
  29. econometrics/causal_inference/causal_identification_strategy/propensity_score_matching.py +140 -0
  30. econometrics/causal_inference/causal_identification_strategy/random_effects.py +100 -0
  31. econometrics/causal_inference/causal_identification_strategy/regression_discontinuity.py +98 -0
  32. econometrics/causal_inference/causal_identification_strategy/synthetic_control.py +111 -0
  33. econometrics/causal_inference/causal_identification_strategy/triple_difference.py +86 -0
  34. econometrics/distribution_analysis/__init__.py +28 -0
  35. econometrics/distribution_analysis/oaxaca_blinder.py +184 -0
  36. econometrics/distribution_analysis/time_series_decomposition.py +152 -0
  37. econometrics/distribution_analysis/variance_decomposition.py +179 -0
  38. econometrics/missing_data/__init__.py +18 -0
  39. econometrics/missing_data/imputation_methods.py +219 -0
  40. econometrics/nonparametric/__init__.py +35 -0
  41. econometrics/nonparametric/gam_model.py +117 -0
  42. econometrics/nonparametric/kernel_regression.py +161 -0
  43. econometrics/nonparametric/quantile_regression.py +249 -0
  44. econometrics/nonparametric/spline_regression.py +100 -0
  45. econometrics/spatial_econometrics/__init__.py +68 -0
  46. econometrics/spatial_econometrics/geographically_weighted_regression.py +211 -0
  47. econometrics/spatial_econometrics/gwr_simple.py +154 -0
  48. econometrics/spatial_econometrics/spatial_autocorrelation.py +356 -0
  49. econometrics/spatial_econometrics/spatial_durbin_model.py +177 -0
  50. econometrics/spatial_econometrics/spatial_regression.py +315 -0
  51. econometrics/spatial_econometrics/spatial_weights.py +226 -0
  52. econometrics/specific_data_modeling/micro_discrete_limited_data/README.md +164 -0
  53. econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +40 -0
  54. econometrics/specific_data_modeling/micro_discrete_limited_data/count_data_models.py +311 -0
  55. econometrics/specific_data_modeling/micro_discrete_limited_data/discrete_choice_models.py +294 -0
  56. econometrics/specific_data_modeling/micro_discrete_limited_data/limited_dependent_variable_models.py +282 -0
  57. econometrics/statistical_inference/__init__.py +21 -0
  58. econometrics/statistical_inference/bootstrap_methods.py +162 -0
  59. econometrics/statistical_inference/permutation_test.py +177 -0
  60. econometrics/survival_analysis/__init__.py +18 -0
  61. econometrics/survival_analysis/survival_models.py +259 -0
  62. econometrics/tests/causal_inference_tests/__init__.py +3 -0
  63. econometrics/tests/causal_inference_tests/detailed_test.py +441 -0
  64. econometrics/tests/causal_inference_tests/test_all_methods.py +418 -0
  65. econometrics/tests/causal_inference_tests/test_causal_identification_strategy.py +202 -0
  66. econometrics/tests/causal_inference_tests/test_difference_in_differences.py +53 -0
  67. econometrics/tests/causal_inference_tests/test_instrumental_variables.py +44 -0
  68. econometrics/tests/specific_data_modeling_tests/test_micro_discrete_limited_data.py +189 -0
  69. econometrics//321/206/320/254/320/272/321/205/342/225/235/320/220/321/205/320/237/320/241/321/205/320/264/320/267/321/207/342/226/222/342/225/227/321/204/342/225/235/320/250/321/205/320/225/320/230/321/207/342/225/221/320/267/321/205/320/230/320/226/321/206/320/256/320/240.md +544 -0
  70. pyproject.toml +9 -2
  71. server.py +15 -1
  72. tools/__init__.py +75 -1
  73. tools/causal_inference_adapter.py +658 -0
  74. tools/distribution_analysis_adapter.py +121 -0
  75. tools/gwr_simple_adapter.py +54 -0
  76. tools/machine_learning_adapter.py +567 -0
  77. tools/mcp_tool_groups/__init__.py +15 -1
  78. tools/mcp_tool_groups/causal_inference_tools.py +643 -0
  79. tools/mcp_tool_groups/distribution_analysis_tools.py +169 -0
  80. tools/mcp_tool_groups/machine_learning_tools.py +422 -0
  81. tools/mcp_tool_groups/microecon_tools.py +325 -0
  82. tools/mcp_tool_groups/missing_data_tools.py +117 -0
  83. tools/mcp_tool_groups/nonparametric_tools.py +225 -0
  84. tools/mcp_tool_groups/spatial_econometrics_tools.py +323 -0
  85. tools/mcp_tool_groups/statistical_inference_tools.py +131 -0
  86. tools/mcp_tools_registry.py +13 -3
  87. tools/microecon_adapter.py +412 -0
  88. tools/missing_data_adapter.py +73 -0
  89. tools/nonparametric_adapter.py +190 -0
  90. tools/spatial_econometrics_adapter.py +318 -0
  91. tools/statistical_inference_adapter.py +90 -0
  92. tools/survival_analysis_adapter.py +46 -0
  93. aigroup_econ_mcp-1.4.3.dist-info/METADATA +0 -710
  94. aigroup_econ_mcp-1.4.3.dist-info/RECORD +0 -92
  95. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/WHEEL +0 -0
  96. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/entry_points.txt +0 -0
  97. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,169 @@
1
+ """
2
+ 分布分析与分解方法工具组
3
+ 包含Oaxaca-Blinder分解、方差分解、时间序列分解
4
+ """
5
+
6
+ from typing import List, Optional, Dict, Any
7
+ from mcp.server.fastmcp import Context
8
+ from mcp.server.session import ServerSession
9
+
10
+ from ..mcp_tools_registry import ToolGroup
11
+ from ..distribution_analysis_adapter import (
12
+ oaxaca_blinder_adapter,
13
+ variance_decomposition_adapter,
14
+ time_series_decomposition_adapter
15
+ )
16
+
17
+
18
+ class DistributionAnalysisTools(ToolGroup):
19
+ """分布分析与分解方法工具组"""
20
+
21
+ name = "DISTRIBUTION ANALYSIS & DECOMPOSITION"
22
+ description = "分布分析和分解方法工具"
23
+ version = "1.0.0"
24
+
25
+ @classmethod
26
+ def get_tools(cls) -> List[Dict[str, Any]]:
27
+ """返回工具列表"""
28
+ return [
29
+ {
30
+ "name": "decomposition_oaxaca_blinder",
31
+ "handler": cls.oaxaca_blinder_tool,
32
+ "description": "Oaxaca-Blinder Decomposition"
33
+ },
34
+ {
35
+ "name": "decomposition_variance_anova",
36
+ "handler": cls.variance_decomposition_tool,
37
+ "description": "Variance Decomposition (ANOVA)"
38
+ },
39
+ {
40
+ "name": "decomposition_time_series",
41
+ "handler": cls.time_series_decomposition_tool,
42
+ "description": "Time Series Decomposition (Trend-Seasonal-Random)"
43
+ }
44
+ ]
45
+
46
+ @classmethod
47
+ def get_help_text(cls) -> str:
48
+ """返回帮助文档"""
49
+ return """
50
+ 分布分析与分解方法工具组 - 3个工具
51
+
52
+ 1. Oaxaca-Blinder Decomposition (decomposition_oaxaca_blinder)
53
+ - 分解两组之间的平均差异
54
+ - 禀赋效应 vs 系数效应
55
+ - 应用: 工资差距、就业差异分析
56
+
57
+ 2. Variance Decomposition (decomposition_variance_anova)
58
+ - 单因素方差分析
59
+ - 组间方差 vs 组内方差
60
+ - F检验和效应量估计
61
+
62
+ 3. Time Series Decomposition (decomposition_time_series)
63
+ - 趋势-季节-随机分解
64
+ - 加法/乘法模型
65
+ - 经典分解或STL分解
66
+ """
67
+
68
+ @staticmethod
69
+ async def oaxaca_blinder_tool(
70
+ y_a: List[float],
71
+ x_a: List[List[float]],
72
+ y_b: List[float],
73
+ x_b: List[List[float]],
74
+ feature_names: Optional[List[str]] = None,
75
+ weight_matrix: str = "pooled",
76
+ output_format: str = "json",
77
+ save_path: Optional[str] = None,
78
+ ctx: Context[ServerSession, None] = None
79
+ ) -> str:
80
+ """Oaxaca-Blinder分解"""
81
+ try:
82
+ if ctx:
83
+ await ctx.info("Starting Oaxaca-Blinder decomposition...")
84
+
85
+ result = oaxaca_blinder_adapter(
86
+ y_a=y_a,
87
+ x_a=x_a,
88
+ y_b=y_b,
89
+ x_b=x_b,
90
+ feature_names=feature_names,
91
+ weight_matrix=weight_matrix,
92
+ output_format=output_format,
93
+ save_path=save_path
94
+ )
95
+
96
+ if ctx:
97
+ await ctx.info("Oaxaca-Blinder decomposition complete")
98
+
99
+ return result
100
+ except Exception as e:
101
+ if ctx:
102
+ await ctx.error(f"Error: {str(e)}")
103
+ raise
104
+
105
+ @staticmethod
106
+ async def variance_decomposition_tool(
107
+ values: List[float],
108
+ groups: List[str],
109
+ group_names: Optional[List[str]] = None,
110
+ output_format: str = "json",
111
+ save_path: Optional[str] = None,
112
+ ctx: Context[ServerSession, None] = None
113
+ ) -> str:
114
+ """方差分解(ANOVA)"""
115
+ try:
116
+ if ctx:
117
+ await ctx.info("Starting variance decomposition (ANOVA)...")
118
+
119
+ result = variance_decomposition_adapter(
120
+ values=values,
121
+ groups=groups,
122
+ group_names=group_names,
123
+ output_format=output_format,
124
+ save_path=save_path
125
+ )
126
+
127
+ if ctx:
128
+ await ctx.info("Variance decomposition complete")
129
+
130
+ return result
131
+ except Exception as e:
132
+ if ctx:
133
+ await ctx.error(f"Error: {str(e)}")
134
+ raise
135
+
136
+ @staticmethod
137
+ async def time_series_decomposition_tool(
138
+ data: List[float],
139
+ period: int = 12,
140
+ model: str = "additive",
141
+ method: str = "classical",
142
+ extrapolate_trend: str = "freq",
143
+ output_format: str = "json",
144
+ save_path: Optional[str] = None,
145
+ ctx: Context[ServerSession, None] = None
146
+ ) -> str:
147
+ """时间序列分解"""
148
+ try:
149
+ if ctx:
150
+ await ctx.info(f"Starting time series decomposition ({method})...")
151
+
152
+ result = time_series_decomposition_adapter(
153
+ data=data,
154
+ period=period,
155
+ model=model,
156
+ method=method,
157
+ extrapolate_trend=extrapolate_trend,
158
+ output_format=output_format,
159
+ save_path=save_path
160
+ )
161
+
162
+ if ctx:
163
+ await ctx.info("Time series decomposition complete")
164
+
165
+ return result
166
+ except Exception as e:
167
+ if ctx:
168
+ await ctx.error(f"Error: {str(e)}")
169
+ raise
@@ -0,0 +1,422 @@
1
+ """
2
+ 机器学习方法工具组
3
+ 包含8种机器学习模型的MCP工具
4
+ """
5
+
6
+ from typing import List, Optional, Union, Dict, Any
7
+ from mcp.server.fastmcp import Context
8
+ from mcp.server.session import ServerSession
9
+
10
+ from ..mcp_tools_registry import ToolGroup
11
+ from ..machine_learning_adapter import (
12
+ random_forest_adapter,
13
+ gradient_boosting_adapter,
14
+ svm_adapter,
15
+ neural_network_adapter,
16
+ kmeans_clustering_adapter,
17
+ hierarchical_clustering_adapter,
18
+ double_ml_adapter,
19
+ causal_forest_adapter
20
+ )
21
+
22
+
23
+ class MachineLearningTools(ToolGroup):
24
+ """机器学习方法工具组"""
25
+
26
+ name = "MACHINE LEARNING"
27
+ description = "机器学习模型工具"
28
+ version = "1.0.0"
29
+
30
+ @classmethod
31
+ def get_tools(cls) -> List[Dict[str, Any]]:
32
+ """返回工具列表"""
33
+ return [
34
+ {
35
+ "name": "ml_random_forest",
36
+ "handler": cls.random_forest_tool,
37
+ "description": "Random Forest Analysis (Regression/Classification)"
38
+ },
39
+ {
40
+ "name": "ml_gradient_boosting",
41
+ "handler": cls.gradient_boosting_tool,
42
+ "description": "Gradient Boosting Machine Analysis"
43
+ },
44
+ {
45
+ "name": "ml_support_vector_machine",
46
+ "handler": cls.svm_tool,
47
+ "description": "Support Vector Machine Analysis"
48
+ },
49
+ {
50
+ "name": "ml_neural_network",
51
+ "handler": cls.neural_network_tool,
52
+ "description": "Neural Network (MLP) Analysis"
53
+ },
54
+ {
55
+ "name": "ml_kmeans_clustering",
56
+ "handler": cls.kmeans_tool,
57
+ "description": "K-Means Clustering Analysis"
58
+ },
59
+ {
60
+ "name": "ml_hierarchical_clustering",
61
+ "handler": cls.hierarchical_clustering_tool,
62
+ "description": "Hierarchical Clustering Analysis"
63
+ },
64
+ {
65
+ "name": "ml_double_machine_learning",
66
+ "handler": cls.double_ml_tool,
67
+ "description": "Double/Debiased Machine Learning for Causal Inference"
68
+ },
69
+ {
70
+ "name": "ml_causal_forest",
71
+ "handler": cls.causal_forest_tool,
72
+ "description": "Causal Forest for Heterogeneous Treatment Effects"
73
+ }
74
+ ]
75
+
76
+ @classmethod
77
+ def get_help_text(cls) -> str:
78
+ """返回帮助文档"""
79
+ return """
80
+ 机器学习方法工具组 - 8种机器学习模型
81
+
82
+ 监督学习模型:
83
+ 1. Random Forest - ml_random_forest
84
+ - 随机森林回归/分类
85
+ - 支持特征重要性分析
86
+
87
+ 2. Gradient Boosting - ml_gradient_boosting
88
+ - 梯度提升机(支持sklearn/XGBoost)
89
+ - 高性能集成学习
90
+
91
+ 3. Support Vector Machine - ml_support_vector_machine
92
+ - 支持向量机回归/分类
93
+ - 多种核函数选择
94
+
95
+ 4. Neural Network - ml_neural_network
96
+ - 多层感知器(MLP)
97
+ - 可配置网络结构
98
+
99
+ 无监督学习模型:
100
+ 5. K-Means Clustering - ml_kmeans_clustering
101
+ - K均值聚类
102
+ - 聚类质量评估
103
+
104
+ 6. Hierarchical Clustering - ml_hierarchical_clustering
105
+ - 层次聚类
106
+ - 树状图可视化
107
+
108
+ 因果推断模型:
109
+ 7. Double Machine Learning - ml_double_machine_learning
110
+ - 双重/去偏机器学习
111
+ - 处理效应估计
112
+
113
+ 8. Causal Forest - ml_causal_forest
114
+ - 因果森林
115
+ - 异质性治疗效应估计
116
+ """
117
+
118
+ @staticmethod
119
+ async def random_forest_tool(
120
+ X_data: Optional[List] = None,
121
+ y_data: Optional[List[float]] = None,
122
+ file_path: Optional[str] = None,
123
+ feature_names: Optional[List[str]] = None,
124
+ problem_type: str = 'regression',
125
+ test_size: float = 0.2,
126
+ n_estimators: int = 100,
127
+ max_depth: Optional[int] = None,
128
+ random_state: int = 42,
129
+ output_format: str = 'json',
130
+ save_path: Optional[str] = None,
131
+ ctx: Context[ServerSession, None] = None
132
+ ) -> str:
133
+ """随机森林分析"""
134
+ try:
135
+ if ctx:
136
+ await ctx.info("Starting Random Forest analysis...")
137
+
138
+ result = random_forest_adapter(
139
+ X_data=X_data, y_data=y_data, file_path=file_path,
140
+ feature_names=feature_names, problem_type=problem_type,
141
+ test_size=test_size, n_estimators=n_estimators,
142
+ max_depth=max_depth, random_state=random_state,
143
+ output_format=output_format, save_path=save_path
144
+ )
145
+
146
+ if ctx:
147
+ await ctx.info("Random Forest analysis complete")
148
+
149
+ return result
150
+ except Exception as e:
151
+ if ctx:
152
+ await ctx.error(f"Error: {str(e)}")
153
+ raise
154
+
155
+ @staticmethod
156
+ async def gradient_boosting_tool(
157
+ X_data: Optional[List] = None,
158
+ y_data: Optional[List[float]] = None,
159
+ file_path: Optional[str] = None,
160
+ feature_names: Optional[List[str]] = None,
161
+ algorithm: str = 'sklearn',
162
+ problem_type: str = 'regression',
163
+ test_size: float = 0.2,
164
+ n_estimators: int = 100,
165
+ learning_rate: float = 0.1,
166
+ max_depth: int = 3,
167
+ random_state: int = 42,
168
+ output_format: str = 'json',
169
+ save_path: Optional[str] = None,
170
+ ctx: Context[ServerSession, None] = None
171
+ ) -> str:
172
+ """梯度提升分析"""
173
+ try:
174
+ if ctx:
175
+ await ctx.info("Starting Gradient Boosting analysis...")
176
+
177
+ result = gradient_boosting_adapter(
178
+ X_data=X_data, y_data=y_data, file_path=file_path,
179
+ feature_names=feature_names, algorithm=algorithm,
180
+ problem_type=problem_type, test_size=test_size,
181
+ n_estimators=n_estimators, learning_rate=learning_rate,
182
+ max_depth=max_depth, random_state=random_state,
183
+ output_format=output_format, save_path=save_path
184
+ )
185
+
186
+ if ctx:
187
+ await ctx.info("Gradient Boosting analysis complete")
188
+
189
+ return result
190
+ except Exception as e:
191
+ if ctx:
192
+ await ctx.error(f"Error: {str(e)}")
193
+ raise
194
+
195
+ @staticmethod
196
+ async def svm_tool(
197
+ X_data: Optional[List] = None,
198
+ y_data: Optional[List[float]] = None,
199
+ file_path: Optional[str] = None,
200
+ feature_names: Optional[List[str]] = None,
201
+ problem_type: str = 'regression',
202
+ kernel: str = 'rbf',
203
+ test_size: float = 0.2,
204
+ C: float = 1.0,
205
+ gamma: str = 'scale',
206
+ random_state: int = 42,
207
+ output_format: str = 'json',
208
+ save_path: Optional[str] = None,
209
+ ctx: Context[ServerSession, None] = None
210
+ ) -> str:
211
+ """支持向量机分析"""
212
+ try:
213
+ if ctx:
214
+ await ctx.info("Starting SVM analysis...")
215
+
216
+ result = svm_adapter(
217
+ X_data=X_data, y_data=y_data, file_path=file_path,
218
+ feature_names=feature_names, problem_type=problem_type,
219
+ kernel=kernel, test_size=test_size, C=C, gamma=gamma,
220
+ random_state=random_state, output_format=output_format,
221
+ save_path=save_path
222
+ )
223
+
224
+ if ctx:
225
+ await ctx.info("SVM analysis complete")
226
+
227
+ return result
228
+ except Exception as e:
229
+ if ctx:
230
+ await ctx.error(f"Error: {str(e)}")
231
+ raise
232
+
233
+ @staticmethod
234
+ async def neural_network_tool(
235
+ X_data: Optional[List] = None,
236
+ y_data: Optional[List[float]] = None,
237
+ file_path: Optional[str] = None,
238
+ feature_names: Optional[List[str]] = None,
239
+ problem_type: str = 'regression',
240
+ hidden_layer_sizes: tuple = (100,),
241
+ activation: str = 'relu',
242
+ solver: str = 'adam',
243
+ test_size: float = 0.2,
244
+ alpha: float = 0.0001,
245
+ learning_rate: str = 'constant',
246
+ learning_rate_init: float = 0.001,
247
+ max_iter: int = 200,
248
+ random_state: int = 42,
249
+ output_format: str = 'json',
250
+ save_path: Optional[str] = None,
251
+ ctx: Context[ServerSession, None] = None
252
+ ) -> str:
253
+ """神经网络分析"""
254
+ try:
255
+ if ctx:
256
+ await ctx.info("Starting Neural Network analysis...")
257
+
258
+ result = neural_network_adapter(
259
+ X_data=X_data, y_data=y_data, file_path=file_path,
260
+ feature_names=feature_names, problem_type=problem_type,
261
+ hidden_layer_sizes=hidden_layer_sizes, activation=activation,
262
+ solver=solver, test_size=test_size, alpha=alpha,
263
+ learning_rate=learning_rate, learning_rate_init=learning_rate_init,
264
+ max_iter=max_iter, random_state=random_state,
265
+ output_format=output_format, save_path=save_path
266
+ )
267
+
268
+ if ctx:
269
+ await ctx.info("Neural Network analysis complete")
270
+
271
+ return result
272
+ except Exception as e:
273
+ if ctx:
274
+ await ctx.error(f"Error: {str(e)}")
275
+ raise
276
+
277
+ @staticmethod
278
+ async def kmeans_tool(
279
+ X_data: Optional[List] = None,
280
+ file_path: Optional[str] = None,
281
+ feature_names: Optional[List[str]] = None,
282
+ n_clusters: int = 8,
283
+ init: str = 'k-means++',
284
+ n_init: int = 10,
285
+ max_iter: int = 300,
286
+ random_state: int = 42,
287
+ algorithm: str = 'lloyd',
288
+ use_minibatch: bool = False,
289
+ batch_size: int = 1000,
290
+ output_format: str = 'json',
291
+ save_path: Optional[str] = None,
292
+ ctx: Context[ServerSession, None] = None
293
+ ) -> str:
294
+ """K均值聚类分析"""
295
+ try:
296
+ if ctx:
297
+ await ctx.info("Starting K-Means Clustering analysis...")
298
+
299
+ result = kmeans_clustering_adapter(
300
+ X_data=X_data, file_path=file_path, feature_names=feature_names,
301
+ n_clusters=n_clusters, init=init, n_init=n_init,
302
+ max_iter=max_iter, random_state=random_state,
303
+ algorithm=algorithm, use_minibatch=use_minibatch,
304
+ batch_size=batch_size, output_format=output_format,
305
+ save_path=save_path
306
+ )
307
+
308
+ if ctx:
309
+ await ctx.info("K-Means Clustering analysis complete")
310
+
311
+ return result
312
+ except Exception as e:
313
+ if ctx:
314
+ await ctx.error(f"Error: {str(e)}")
315
+ raise
316
+
317
+ @staticmethod
318
+ async def hierarchical_clustering_tool(
319
+ X_data: Optional[List] = None,
320
+ file_path: Optional[str] = None,
321
+ feature_names: Optional[List[str]] = None,
322
+ n_clusters: int = 2,
323
+ linkage: str = 'ward',
324
+ metric: str = 'euclidean',
325
+ output_format: str = 'json',
326
+ save_path: Optional[str] = None,
327
+ ctx: Context[ServerSession, None] = None
328
+ ) -> str:
329
+ """层次聚类分析"""
330
+ try:
331
+ if ctx:
332
+ await ctx.info("Starting Hierarchical Clustering analysis...")
333
+
334
+ result = hierarchical_clustering_adapter(
335
+ X_data=X_data, file_path=file_path, feature_names=feature_names,
336
+ n_clusters=n_clusters, linkage=linkage, metric=metric,
337
+ output_format=output_format, save_path=save_path
338
+ )
339
+
340
+ if ctx:
341
+ await ctx.info("Hierarchical Clustering analysis complete")
342
+
343
+ return result
344
+ except Exception as e:
345
+ if ctx:
346
+ await ctx.error(f"Error: {str(e)}")
347
+ raise
348
+
349
+ @staticmethod
350
+ async def double_ml_tool(
351
+ X_data: Optional[List] = None,
352
+ y_data: Optional[List[float]] = None,
353
+ d_data: Optional[List[float]] = None,
354
+ file_path: Optional[str] = None,
355
+ feature_names: Optional[List[str]] = None,
356
+ treatment_type: str = 'continuous',
357
+ n_folds: int = 5,
358
+ random_state: int = 42,
359
+ output_format: str = 'json',
360
+ save_path: Optional[str] = None,
361
+ ctx: Context[ServerSession, None] = None
362
+ ) -> str:
363
+ """双重机器学习分析"""
364
+ try:
365
+ if ctx:
366
+ await ctx.info("Starting Double Machine Learning analysis...")
367
+
368
+ result = double_ml_adapter(
369
+ X_data=X_data, y_data=y_data, d_data=d_data,
370
+ file_path=file_path, feature_names=feature_names,
371
+ treatment_type=treatment_type, n_folds=n_folds,
372
+ random_state=random_state, output_format=output_format,
373
+ save_path=save_path
374
+ )
375
+
376
+ if ctx:
377
+ await ctx.info("Double Machine Learning analysis complete")
378
+
379
+ return result
380
+ except Exception as e:
381
+ if ctx:
382
+ await ctx.error(f"Error: {str(e)}")
383
+ raise
384
+
385
+ @staticmethod
386
+ async def causal_forest_tool(
387
+ X_data: Optional[List] = None,
388
+ y_data: Optional[List[float]] = None,
389
+ w_data: Optional[List[float]] = None,
390
+ file_path: Optional[str] = None,
391
+ feature_names: Optional[List[str]] = None,
392
+ n_estimators: int = 100,
393
+ min_samples_leaf: int = 5,
394
+ max_depth: Optional[int] = None,
395
+ random_state: int = 42,
396
+ honest: bool = True,
397
+ output_format: str = 'json',
398
+ save_path: Optional[str] = None,
399
+ ctx: Context[ServerSession, None] = None
400
+ ) -> str:
401
+ """因果森林分析"""
402
+ try:
403
+ if ctx:
404
+ await ctx.info("Starting Causal Forest analysis...")
405
+
406
+ result = causal_forest_adapter(
407
+ X_data=X_data, y_data=y_data, w_data=w_data,
408
+ file_path=file_path, feature_names=feature_names,
409
+ n_estimators=n_estimators, min_samples_leaf=min_samples_leaf,
410
+ max_depth=max_depth, random_state=random_state,
411
+ honest=honest, output_format=output_format,
412
+ save_path=save_path
413
+ )
414
+
415
+ if ctx:
416
+ await ctx.info("Causal Forest analysis complete")
417
+
418
+ return result
419
+ except Exception as e:
420
+ if ctx:
421
+ await ctx.error(f"Error: {str(e)}")
422
+ raise