aigroup-econ-mcp 1.3.3__py3-none-any.whl → 1.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (120) hide show
  1. .gitignore +253 -0
  2. PKG-INFO +710 -0
  3. README.md +672 -0
  4. __init__.py +14 -0
  5. aigroup_econ_mcp-1.4.3.dist-info/METADATA +710 -0
  6. aigroup_econ_mcp-1.4.3.dist-info/RECORD +92 -0
  7. aigroup_econ_mcp-1.4.3.dist-info/entry_points.txt +2 -0
  8. aigroup_econ_mcp-1.4.3.dist-info/licenses/LICENSE +21 -0
  9. cli.py +28 -0
  10. econometrics/README.md +18 -0
  11. econometrics/__init__.py +191 -0
  12. econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +0 -0
  13. econometrics/basic_parametric_estimation/__init__.py +31 -0
  14. econometrics/basic_parametric_estimation/gmm/__init__.py +13 -0
  15. econometrics/basic_parametric_estimation/gmm/gmm_model.py +256 -0
  16. econometrics/basic_parametric_estimation/mle/__init__.py +13 -0
  17. econometrics/basic_parametric_estimation/mle/mle_model.py +241 -0
  18. econometrics/basic_parametric_estimation/ols/__init__.py +13 -0
  19. econometrics/basic_parametric_estimation/ols/ols_model.py +141 -0
  20. econometrics/causal_inference/causal_identification_strategy/__init__.py +0 -0
  21. econometrics/missing_data/missing_data_measurement_error/__init__.py +0 -0
  22. econometrics/model_specification_diagnostics_robust_inference/README.md +173 -0
  23. econometrics/model_specification_diagnostics_robust_inference/__init__.py +78 -0
  24. econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/__init__.py +20 -0
  25. econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/diagnostic_tests_model.py +149 -0
  26. econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/__init__.py +15 -0
  27. econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/gls_model.py +130 -0
  28. econometrics/model_specification_diagnostics_robust_inference/model_selection/__init__.py +18 -0
  29. econometrics/model_specification_diagnostics_robust_inference/model_selection/model_selection_model.py +286 -0
  30. econometrics/model_specification_diagnostics_robust_inference/regularization/__init__.py +15 -0
  31. econometrics/model_specification_diagnostics_robust_inference/regularization/regularization_model.py +177 -0
  32. econometrics/model_specification_diagnostics_robust_inference/robust_errors/__init__.py +15 -0
  33. econometrics/model_specification_diagnostics_robust_inference/robust_errors/robust_errors_model.py +122 -0
  34. econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/__init__.py +15 -0
  35. econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/simultaneous_equations_model.py +246 -0
  36. econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/__init__.py +15 -0
  37. econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/wls_model.py +127 -0
  38. econometrics/nonparametric/nonparametric_semiparametric_methods/__init__.py +0 -0
  39. econometrics/spatial_econometrics/spatial_econometrics_new/__init__.py +0 -0
  40. econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +0 -0
  41. econometrics/specific_data_modeling/survival_duration_data/__init__.py +0 -0
  42. econometrics/specific_data_modeling/time_series_panel_data/__init__.py +143 -0
  43. econometrics/specific_data_modeling/time_series_panel_data/arima_model.py +104 -0
  44. econometrics/specific_data_modeling/time_series_panel_data/cointegration_vecm.py +334 -0
  45. econometrics/specific_data_modeling/time_series_panel_data/dynamic_panel_models.py +653 -0
  46. econometrics/specific_data_modeling/time_series_panel_data/exponential_smoothing.py +176 -0
  47. econometrics/specific_data_modeling/time_series_panel_data/garch_model.py +198 -0
  48. econometrics/specific_data_modeling/time_series_panel_data/panel_diagnostics.py +125 -0
  49. econometrics/specific_data_modeling/time_series_panel_data/panel_var.py +60 -0
  50. econometrics/specific_data_modeling/time_series_panel_data/structural_break_tests.py +87 -0
  51. econometrics/specific_data_modeling/time_series_panel_data/time_varying_parameter_models.py +106 -0
  52. econometrics/specific_data_modeling/time_series_panel_data/unit_root_tests.py +204 -0
  53. econometrics/specific_data_modeling/time_series_panel_data/var_svar_model.py +372 -0
  54. econometrics/statistical_inference/statistical_inference_techniques/__init__.py +0 -0
  55. econometrics/statistics/distribution_decomposition_methods/__init__.py +0 -0
  56. econometrics/tests/basic_parametric_estimation_tests/__init__.py +3 -0
  57. econometrics/tests/basic_parametric_estimation_tests/test_gmm.py +128 -0
  58. econometrics/tests/basic_parametric_estimation_tests/test_mle.py +127 -0
  59. econometrics/tests/basic_parametric_estimation_tests/test_ols.py +100 -0
  60. econometrics/tests/model_specification_diagnostics_tests/__init__.py +3 -0
  61. econometrics/tests/model_specification_diagnostics_tests/test_diagnostic_tests.py +86 -0
  62. econometrics/tests/model_specification_diagnostics_tests/test_robust_errors.py +89 -0
  63. econometrics/tests/specific_data_modeling_tests/__init__.py +3 -0
  64. econometrics/tests/specific_data_modeling_tests/test_arima.py +98 -0
  65. econometrics/tests/specific_data_modeling_tests/test_dynamic_panel.py +198 -0
  66. econometrics/tests/specific_data_modeling_tests/test_exponential_smoothing.py +105 -0
  67. econometrics/tests/specific_data_modeling_tests/test_garch.py +118 -0
  68. econometrics/tests/specific_data_modeling_tests/test_unit_root.py +156 -0
  69. econometrics/tests/specific_data_modeling_tests/test_var.py +124 -0
  70. prompts/__init__.py +0 -0
  71. prompts/analysis_guides.py +43 -0
  72. pyproject.toml +78 -0
  73. resources/MCP_MASTER_GUIDE.md +422 -0
  74. resources/MCP_TOOLS_DATA_FORMAT_GUIDE.md +185 -0
  75. resources/__init__.py +0 -0
  76. server.py +83 -0
  77. tools/README.md +88 -0
  78. tools/__init__.py +45 -0
  79. tools/data_loader.py +213 -0
  80. tools/decorators.py +38 -0
  81. tools/econometrics_adapter.py +286 -0
  82. tools/mcp_tool_groups/__init__.py +1 -0
  83. tools/mcp_tool_groups/basic_parametric_tools.py +173 -0
  84. tools/mcp_tool_groups/model_specification_tools.py +402 -0
  85. tools/mcp_tool_groups/time_series_tools.py +494 -0
  86. tools/mcp_tools_registry.py +114 -0
  87. tools/model_specification_adapter.py +369 -0
  88. tools/output_formatter.py +563 -0
  89. tools/time_series_panel_data_adapter.py +858 -0
  90. tools/time_series_panel_data_tools.py +65 -0
  91. aigroup_econ_mcp/__init__.py +0 -19
  92. aigroup_econ_mcp/cli.py +0 -82
  93. aigroup_econ_mcp/config.py +0 -561
  94. aigroup_econ_mcp/server.py +0 -452
  95. aigroup_econ_mcp/tools/__init__.py +0 -19
  96. aigroup_econ_mcp/tools/base.py +0 -470
  97. aigroup_econ_mcp/tools/cache.py +0 -533
  98. aigroup_econ_mcp/tools/data_loader.py +0 -195
  99. aigroup_econ_mcp/tools/file_parser.py +0 -1027
  100. aigroup_econ_mcp/tools/machine_learning.py +0 -60
  101. aigroup_econ_mcp/tools/ml_ensemble.py +0 -210
  102. aigroup_econ_mcp/tools/ml_evaluation.py +0 -272
  103. aigroup_econ_mcp/tools/ml_models.py +0 -54
  104. aigroup_econ_mcp/tools/ml_regularization.py +0 -186
  105. aigroup_econ_mcp/tools/monitoring.py +0 -555
  106. aigroup_econ_mcp/tools/optimized_example.py +0 -229
  107. aigroup_econ_mcp/tools/panel_data.py +0 -619
  108. aigroup_econ_mcp/tools/regression.py +0 -214
  109. aigroup_econ_mcp/tools/statistics.py +0 -154
  110. aigroup_econ_mcp/tools/time_series.py +0 -698
  111. aigroup_econ_mcp/tools/timeout.py +0 -283
  112. aigroup_econ_mcp/tools/tool_descriptions.py +0 -410
  113. aigroup_econ_mcp/tools/tool_handlers.py +0 -1016
  114. aigroup_econ_mcp/tools/tool_registry.py +0 -478
  115. aigroup_econ_mcp/tools/validation.py +0 -482
  116. aigroup_econ_mcp-1.3.3.dist-info/METADATA +0 -525
  117. aigroup_econ_mcp-1.3.3.dist-info/RECORD +0 -30
  118. aigroup_econ_mcp-1.3.3.dist-info/entry_points.txt +0 -2
  119. /aigroup_econ_mcp-1.3.3.dist-info/licenses/LICENSE → /LICENSE +0 -0
  120. {aigroup_econ_mcp-1.3.3.dist-info → aigroup_econ_mcp-1.4.3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,494 @@
1
+ """
2
+ 时间序列和面板数据工具组
3
+ 包含 ARIMA、GARCH、单位根检验、VAR/SVAR、协整分析、动态面板模型等
4
+ """
5
+
6
+ from typing import List, Optional, Union, Dict, Any
7
+ from mcp.server.fastmcp import Context
8
+ from mcp.server.session import ServerSession
9
+
10
+ from ..mcp_tools_registry import ToolGroup
11
+ from ..time_series_panel_data_adapter import (
12
+ arima_adapter,
13
+ exp_smoothing_adapter,
14
+ garch_adapter,
15
+ unit_root_adapter,
16
+ var_svar_adapter,
17
+ cointegration_adapter,
18
+ dynamic_panel_adapter,
19
+ panel_diagnostics_adapter,
20
+ panel_var_adapter,
21
+ structural_break_adapter,
22
+ time_varying_parameter_adapter
23
+ )
24
+
25
+
26
+ class TimeSeriesTools(ToolGroup):
27
+ """时间序列和面板数据工具组"""
28
+
29
+ name = "TIME SERIES & PANEL DATA"
30
+ description = "时间序列分析和面板数据模型工具"
31
+ version = "2.0.0"
32
+
33
+ @classmethod
34
+ def get_tools(cls) -> List[Dict[str, Any]]:
35
+ """返回工具列表"""
36
+ return [
37
+ {
38
+ "name": "time_series_arima_model",
39
+ "handler": cls.arima_tool,
40
+ "description": "ARIMA Time Series Model"
41
+ },
42
+ {
43
+ "name": "time_series_exponential_smoothing",
44
+ "handler": cls.exp_smoothing_tool,
45
+ "description": "Exponential Smoothing Model"
46
+ },
47
+ {
48
+ "name": "time_series_garch_model",
49
+ "handler": cls.garch_tool,
50
+ "description": "GARCH Volatility Model"
51
+ },
52
+ {
53
+ "name": "time_series_unit_root_tests",
54
+ "handler": cls.unit_root_tool,
55
+ "description": "Unit Root Tests (ADF/PP/KPSS)"
56
+ },
57
+ {
58
+ "name": "time_series_var_svar_model",
59
+ "handler": cls.var_svar_tool,
60
+ "description": "VAR/SVAR Model"
61
+ },
62
+ {
63
+ "name": "time_series_cointegration_analysis",
64
+ "handler": cls.cointegration_tool,
65
+ "description": "Cointegration Analysis"
66
+ },
67
+ {
68
+ "name": "panel_data_dynamic_model",
69
+ "handler": cls.dynamic_panel_tool,
70
+ "description": "Dynamic Panel Data Model"
71
+ },
72
+ {
73
+ "name": "panel_data_diagnostics",
74
+ "handler": cls.panel_diagnostics_tool,
75
+ "description": "Panel Data Diagnostic Tests"
76
+ },
77
+ {
78
+ "name": "panel_var_model",
79
+ "handler": cls.panel_var_tool,
80
+ "description": "Panel VAR Model"
81
+ },
82
+ {
83
+ "name": "structural_break_tests",
84
+ "handler": cls.structural_break_tool,
85
+ "description": "Structural Break Tests"
86
+ },
87
+ {
88
+ "name": "time_varying_parameter_models",
89
+ "handler": cls.time_varying_parameter_tool,
90
+ "description": "Time-Varying Parameter Models"
91
+ }
92
+ ]
93
+
94
+ @classmethod
95
+ def get_help_text(cls) -> str:
96
+ """返回帮助文档"""
97
+ return """
98
+ 4. ARIMA Model (time_series_arima_model)
99
+ - Order: (p,d,q) parameters
100
+ - Forecasting: multi-step prediction
101
+
102
+ 5. Exponential Smoothing (time_series_exponential_smoothing)
103
+ - Components: trend, seasonal
104
+ - Forecasting: multi-step prediction
105
+
106
+ 6. GARCH Model (time_series_garch_model)
107
+ - Volatility: conditional variance modeling
108
+ - Order: (p,q) parameters
109
+
110
+ 7. Unit Root Tests (time_series_unit_root_tests)
111
+ - Tests: ADF, PP, KPSS
112
+ - Stationarity: check for unit roots
113
+
114
+ 8. VAR/SVAR Model (time_series_var_svar_model)
115
+ - Models: VAR, SVAR
116
+ - Multivariate: multiple time series analysis
117
+
118
+ 9. Cointegration Analysis (time_series_cointegration_analysis)
119
+ - Tests: Engle-Granger, Johansen
120
+ - Models: VECM
121
+ - Long-run: equilibrium relationships
122
+
123
+ 10. Dynamic Panel Models (panel_data_dynamic_model)
124
+ - Models: Difference GMM, System GMM
125
+ - Panel: cross-sectional and time series data
126
+
127
+ 11. Panel Data Diagnostics (panel_data_diagnostics)
128
+ - Tests: Hausman, Pooling F, LM, Within Correlation
129
+ - Model Selection: FE vs RE vs Pooled
130
+
131
+ 12. Panel VAR Model (panel_var_model)
132
+ - Panel Vector Autoregression
133
+ - Individual and Time Effects
134
+
135
+ 13. Structural Break Tests (structural_break_tests)
136
+ - Tests: Chow, Quandt-Andrews, Bai-Perron
137
+ - Detect: structural changes in time series
138
+
139
+ 14. Time-Varying Parameter Models (time_varying_parameter_models)
140
+ - Models: TAR, STAR, Markov Switching
141
+ - Regime-switching: threshold-based transitions
142
+ """
143
+
144
+ @staticmethod
145
+ async def arima_tool(
146
+ data: Optional[List[float]] = None,
147
+ file_path: Optional[str] = None,
148
+ order: tuple = (1, 1, 1),
149
+ forecast_steps: int = 1,
150
+ output_format: str = "json",
151
+ save_path: Optional[str] = None,
152
+ ctx: Context[ServerSession, None] = None
153
+ ) -> str:
154
+ """ARIMA Time Series Model"""
155
+ try:
156
+ if ctx:
157
+ await ctx.info("Starting ARIMA model analysis...")
158
+ result = arima_adapter(data, file_path, order, forecast_steps, output_format, save_path)
159
+ if ctx:
160
+ await ctx.info("ARIMA analysis complete")
161
+ return result
162
+ except Exception as e:
163
+ if ctx:
164
+ await ctx.error(f"Error: {str(e)}")
165
+ raise
166
+
167
+ @staticmethod
168
+ async def exp_smoothing_tool(
169
+ data: Optional[List[float]] = None,
170
+ file_path: Optional[str] = None,
171
+ trend: bool = True,
172
+ seasonal: bool = False,
173
+ seasonal_periods: Optional[int] = None,
174
+ forecast_steps: int = 1,
175
+ output_format: str = "json",
176
+ save_path: Optional[str] = None,
177
+ ctx: Context[ServerSession, None] = None
178
+ ) -> str:
179
+ """Exponential Smoothing Model"""
180
+ try:
181
+ if ctx:
182
+ await ctx.info("Starting exponential smoothing analysis...")
183
+ result = exp_smoothing_adapter(data, file_path, trend, seasonal, seasonal_periods, forecast_steps, output_format, save_path)
184
+ if ctx:
185
+ await ctx.info("Exponential smoothing complete")
186
+ return result
187
+ except Exception as e:
188
+ if ctx:
189
+ await ctx.error(f"Error: {str(e)}")
190
+ raise
191
+
192
+ @staticmethod
193
+ async def garch_tool(
194
+ data: Optional[List[float]] = None,
195
+ file_path: Optional[str] = None,
196
+ order: tuple = (1, 1),
197
+ output_format: str = "json",
198
+ save_path: Optional[str] = None,
199
+ ctx: Context[ServerSession, None] = None
200
+ ) -> str:
201
+ """GARCH Volatility Model"""
202
+ try:
203
+ if ctx:
204
+ await ctx.info("Starting GARCH model analysis...")
205
+ result = garch_adapter(data, file_path, order, output_format, save_path)
206
+ if ctx:
207
+ await ctx.info("GARCH analysis complete")
208
+ return result
209
+ except Exception as e:
210
+ if ctx:
211
+ await ctx.error(f"Error: {str(e)}")
212
+ raise
213
+
214
+ @staticmethod
215
+ async def unit_root_tool(
216
+ data: Optional[List[float]] = None,
217
+ file_path: Optional[str] = None,
218
+ test_type: str = "adf",
219
+ max_lags: Optional[int] = None,
220
+ regression_type: str = "c",
221
+ output_format: str = "json",
222
+ save_path: Optional[str] = None,
223
+ ctx: Context[ServerSession, None] = None
224
+ ) -> str:
225
+ """Unit Root Tests"""
226
+ try:
227
+ if ctx:
228
+ await ctx.info(f"Starting {test_type.upper()} unit root test...")
229
+ result = unit_root_adapter(data, file_path, test_type, max_lags, regression_type, output_format, save_path)
230
+ if ctx:
231
+ await ctx.info(f"{test_type.upper()} test complete")
232
+ return result
233
+ except Exception as e:
234
+ if ctx:
235
+ await ctx.error(f"Error: {str(e)}")
236
+ raise
237
+
238
+ @staticmethod
239
+ async def var_svar_tool(
240
+ data: Optional[List[List[float]]] = None,
241
+ file_path: Optional[str] = None,
242
+ model_type: str = "var",
243
+ lags: int = 1,
244
+ variables: Optional[List[str]] = None,
245
+ a_matrix: Optional[List[List[float]]] = None,
246
+ b_matrix: Optional[List[List[float]]] = None,
247
+ output_format: str = "json",
248
+ save_path: Optional[str] = None,
249
+ ctx: Context[ServerSession, None] = None
250
+ ) -> str:
251
+ """
252
+ VAR/SVAR Model
253
+
254
+ 数据格式说明:
255
+ - data: 多元时间序列数据,格式为二维列表
256
+ - 每个子列表代表一个时间点的所有变量值
257
+ - 示例: [[var1_t1, var2_t1, var3_t1], [var1_t2, var2_t2, var3_t2], ...]
258
+ - variables: 变量名称列表,如 ["GDP", "Inflation", "Interest"]
259
+
260
+ 示例调用:
261
+ {
262
+ "data": [[1.0, 2.5, 1.8], [1.2, 2.7, 2.0], [1.4, 2.9, 2.2]],
263
+ "model_type": "var",
264
+ "lags": 1,
265
+ "variables": ["GDP", "Inflation", "Interest"],
266
+ "output_format": "json"
267
+ }
268
+ """
269
+ try:
270
+ if ctx:
271
+ await ctx.info(f"Starting {model_type.upper()} model analysis...")
272
+
273
+ # 数据验证和转换
274
+ if data is not None:
275
+ # 确保数据是二维列表格式
276
+ if isinstance(data[0], (int, float)):
277
+ data = [data] # 如果是一维数据,转换为二维
278
+ elif isinstance(data[0], list) and len(data) > 0 and isinstance(data[0][0], (int, float)):
279
+ # 已经是正确的二维格式
280
+ pass
281
+ else:
282
+ raise ValueError("数据格式不正确,应为二维列表")
283
+
284
+ result = var_svar_adapter(data, file_path, model_type, lags, variables, a_matrix, b_matrix, output_format, save_path)
285
+ if ctx:
286
+ await ctx.info(f"{model_type.upper()} analysis complete")
287
+ return result
288
+ except Exception as e:
289
+ if ctx:
290
+ await ctx.error(f"Error: {str(e)}")
291
+ raise
292
+
293
+ @staticmethod
294
+ async def panel_diagnostics_tool(
295
+ test_type: str = "hausman",
296
+ fe_coefficients: Optional[List[float]] = None,
297
+ re_coefficients: Optional[List[float]] = None,
298
+ fe_covariance: Optional[List[List[float]]] = None,
299
+ re_covariance: Optional[List[List[float]]] = None,
300
+ pooled_ssrs: Optional[float] = None,
301
+ fixed_ssrs: Optional[float] = None,
302
+ random_ssrs: Optional[float] = None,
303
+ n_individuals: Optional[int] = None,
304
+ n_params: Optional[int] = None,
305
+ n_obs: Optional[int] = None,
306
+ n_periods: Optional[int] = None,
307
+ residuals: Optional[List[List[float]]] = None,
308
+ output_format: str = "json",
309
+ save_path: Optional[str] = None,
310
+ ctx: Context[ServerSession, None] = None
311
+ ) -> str:
312
+ """Panel Data Diagnostic Tests"""
313
+ try:
314
+ if ctx:
315
+ await ctx.info(f"Starting {test_type} panel diagnostic test...")
316
+ result = panel_diagnostics_adapter(
317
+ test_type, fe_coefficients, re_coefficients, fe_covariance, re_covariance,
318
+ pooled_ssrs, fixed_ssrs, random_ssrs, n_individuals, n_params, n_obs,
319
+ n_periods, residuals, output_format, save_path
320
+ )
321
+ if ctx:
322
+ await ctx.info(f"{test_type} test complete")
323
+ return result
324
+ except Exception as e:
325
+ if ctx:
326
+ await ctx.error(f"Error: {str(e)}")
327
+ raise
328
+
329
+ @staticmethod
330
+ async def panel_var_tool(
331
+ data: Optional[List[List[float]]] = None,
332
+ entity_ids: Optional[List[int]] = None,
333
+ time_periods: Optional[List[int]] = None,
334
+ file_path: Optional[str] = None,
335
+ lags: int = 1,
336
+ variables: Optional[List[str]] = None,
337
+ output_format: str = "json",
338
+ save_path: Optional[str] = None,
339
+ ctx: Context[ServerSession, None] = None
340
+ ) -> str:
341
+ """Panel VAR Model"""
342
+ try:
343
+ if ctx:
344
+ await ctx.info("Starting Panel VAR model analysis...")
345
+ result = panel_var_adapter(
346
+ data, entity_ids, time_periods, file_path, lags, variables,
347
+ output_format, save_path
348
+ )
349
+ if ctx:
350
+ await ctx.info("Panel VAR analysis complete")
351
+ return result
352
+ except Exception as e:
353
+ if ctx:
354
+ await ctx.error(f"Error: {str(e)}")
355
+ raise
356
+
357
+ @staticmethod
358
+ async def structural_break_tool(
359
+ data: Optional[List[float]] = None,
360
+ file_path: Optional[str] = None,
361
+ test_type: str = "chow",
362
+ break_point: Optional[int] = None,
363
+ max_breaks: int = 5,
364
+ output_format: str = "json",
365
+ save_path: Optional[str] = None,
366
+ ctx: Context[ServerSession, None] = None
367
+ ) -> str:
368
+ """Structural Break Tests"""
369
+ try:
370
+ if ctx:
371
+ await ctx.info(f"Starting {test_type} structural break test...")
372
+ result = structural_break_adapter(
373
+ data, file_path, test_type, break_point, max_breaks,
374
+ output_format, save_path
375
+ )
376
+ if ctx:
377
+ await ctx.info(f"{test_type} test complete")
378
+ return result
379
+ except Exception as e:
380
+ if ctx:
381
+ await ctx.error(f"Error: {str(e)}")
382
+ raise
383
+
384
+ @staticmethod
385
+ async def time_varying_parameter_tool(
386
+ y_data: Optional[List[float]] = None,
387
+ x_data: Optional[List[List[float]]] = None,
388
+ file_path: Optional[str] = None,
389
+ model_type: str = "tar",
390
+ threshold_variable: Optional[List[float]] = None,
391
+ n_regimes: int = 2,
392
+ star_type: str = "logistic",
393
+ output_format: str = "json",
394
+ save_path: Optional[str] = None,
395
+ ctx: Context[ServerSession, None] = None
396
+ ) -> str:
397
+ """Time-Varying Parameter Models"""
398
+ try:
399
+ if ctx:
400
+ await ctx.info(f"Starting {model_type.upper()} time-varying parameter model...")
401
+ result = time_varying_parameter_adapter(
402
+ y_data, x_data, file_path, model_type, threshold_variable,
403
+ n_regimes, star_type, output_format, save_path
404
+ )
405
+ if ctx:
406
+ await ctx.info(f"{model_type.upper()} model complete")
407
+ return result
408
+ except Exception as e:
409
+ if ctx:
410
+ await ctx.error(f"Error: {str(e)}")
411
+ raise
412
+
413
+ @staticmethod
414
+ async def cointegration_tool(
415
+ data: Optional[List[List[float]]] = None,
416
+ file_path: Optional[str] = None,
417
+ analysis_type: str = "johansen",
418
+ variables: Optional[List[str]] = None,
419
+ coint_rank: int = 1,
420
+ output_format: str = "json",
421
+ save_path: Optional[str] = None,
422
+ ctx: Context[ServerSession, None] = None
423
+ ) -> str:
424
+ """Cointegration Analysis"""
425
+ try:
426
+ if ctx:
427
+ await ctx.info(f"Starting {analysis_type} cointegration analysis...")
428
+
429
+ # 数据验证和转换
430
+ if data is not None:
431
+ # 确保数据是二维列表格式,每行代表一个时间点的多个变量
432
+ if isinstance(data[0], (int, float)):
433
+ data = [data] # 如果是一维数据,转换为二维
434
+ elif isinstance(data[0], list) and len(data) > 0 and isinstance(data[0][0], (int, float)):
435
+ # 已经是正确的二维格式
436
+ pass
437
+ else:
438
+ raise ValueError("数据格式不正确,应为二维列表,每行代表一个时间点的多个变量值")
439
+
440
+ result = cointegration_adapter(data, file_path, analysis_type, variables, coint_rank, output_format, save_path)
441
+ if ctx:
442
+ await ctx.info(f"{analysis_type} analysis complete")
443
+ return result
444
+ except Exception as e:
445
+ if ctx:
446
+ await ctx.error(f"Error: {str(e)}")
447
+ raise
448
+
449
+ @staticmethod
450
+ async def dynamic_panel_tool(
451
+ y_data: Optional[List[float]] = None,
452
+ x_data: Optional[List[List[float]]] = None,
453
+ entity_ids: Optional[List[int]] = None,
454
+ time_periods: Optional[List[int]] = None,
455
+ file_path: Optional[str] = None,
456
+ model_type: str = "diff_gmm",
457
+ lags: int = 1,
458
+ output_format: str = "json",
459
+ save_path: Optional[str] = None,
460
+ ctx: Context[ServerSession, None] = None
461
+ ) -> str:
462
+ """
463
+ Dynamic Panel Data Model
464
+
465
+ 数据格式说明:
466
+ - y_data: 因变量数据,一维列表,所有个体的因变量时间序列
467
+ - x_data: 自变量数据,二维列表,每个子列表代表一个自变量的完整时间序列
468
+ - entity_ids: 个体标识符,一维列表,标识每个观测属于哪个个体
469
+ - time_periods: 时间标识符,一维列表,标识每个观测的时间点
470
+
471
+ 重要: 所有数据的观测数量必须相同
472
+
473
+ 示例调用:
474
+ {
475
+ "y_data": [1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8],
476
+ "x_data": [[1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7, 2.9, 3.1, 3.3]],
477
+ "entity_ids": [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
478
+ "time_periods": [1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
479
+ "model_type": "diff_gmm",
480
+ "lags": 1,
481
+ "output_format": "json"
482
+ }
483
+ """
484
+ try:
485
+ if ctx:
486
+ await ctx.info(f"Starting {model_type} dynamic panel model...")
487
+ result = dynamic_panel_adapter(y_data, x_data, entity_ids, time_periods, file_path, model_type, lags, output_format, save_path)
488
+ if ctx:
489
+ await ctx.info(f"{model_type} model complete")
490
+ return result
491
+ except Exception as e:
492
+ if ctx:
493
+ await ctx.error(f"Error: {str(e)}")
494
+ raise
@@ -0,0 +1,114 @@
1
+ """
2
+ MCP 工具注册中心
3
+ 自动发现和注册所有工具组件
4
+ """
5
+
6
+ from typing import Dict, List, Callable, Any
7
+ import importlib
8
+ import inspect
9
+ from pathlib import Path
10
+
11
+
12
+ class ToolGroup:
13
+ """工具组基类"""
14
+
15
+ # 工具组元数据
16
+ name: str = ""
17
+ description: str = ""
18
+ version: str = "1.0.0"
19
+
20
+ @classmethod
21
+ def get_tools(cls) -> List[Dict[str, Any]]:
22
+ """
23
+ 返回工具组中的所有工具
24
+
25
+ 返回格式:
26
+ [
27
+ {
28
+ "name": "tool_name",
29
+ "handler": async_function,
30
+ "description": "Tool description"
31
+ },
32
+ ...
33
+ ]
34
+ """
35
+ raise NotImplementedError
36
+
37
+ @classmethod
38
+ def get_help_text(cls) -> str:
39
+ """返回工具组的帮助文档"""
40
+ return f"{cls.name} - {cls.description}"
41
+
42
+
43
+ class ToolRegistry:
44
+ """工具注册中心"""
45
+
46
+ def __init__(self):
47
+ self.tool_groups: List[ToolGroup] = []
48
+ self.tools: Dict[str, Dict[str, Any]] = {}
49
+
50
+ def register_group(self, group: ToolGroup):
51
+ """注册工具组"""
52
+ self.tool_groups.append(group)
53
+
54
+ # 注册工具组中的所有工具
55
+ for tool in group.get_tools():
56
+ self.tools[tool["name"]] = {
57
+ "handler": tool["handler"],
58
+ "description": tool.get("description", ""),
59
+ "group": group.name
60
+ }
61
+
62
+ def auto_discover_groups(self, base_path: str = None):
63
+ """自动发现并注册所有工具组"""
64
+ if base_path is None:
65
+ base_path = Path(__file__).parent
66
+
67
+ # 扫描 mcp_tool_groups 目录
68
+ groups_dir = Path(base_path) / "mcp_tool_groups"
69
+ if not groups_dir.exists():
70
+ return
71
+
72
+ # 导入所有工具组模块
73
+ for module_file in groups_dir.glob("*_tools.py"):
74
+ module_name = module_file.stem
75
+ try:
76
+ module = importlib.import_module(f"tools.mcp_tool_groups.{module_name}")
77
+
78
+ # 查找工具组类
79
+ for name, obj in inspect.getmembers(module):
80
+ if (inspect.isclass(obj) and
81
+ issubclass(obj, ToolGroup) and
82
+ obj != ToolGroup and
83
+ hasattr(obj, 'get_tools')):
84
+
85
+ # 实例化并注册
86
+ group_instance = obj()
87
+ self.register_group(group_instance)
88
+
89
+ except Exception as e:
90
+ print(f"Failed to load tool group from {module_name}: {e}")
91
+
92
+ def get_all_tools(self) -> Dict[str, Dict[str, Any]]:
93
+ """获取所有已注册的工具"""
94
+ return self.tools
95
+
96
+ def get_tool_names(self) -> List[str]:
97
+ """获取所有工具名称列表"""
98
+ return list(self.tools.keys())
99
+
100
+ def get_help_text(self) -> str:
101
+ """生成完整的帮助文档"""
102
+ help_lines = ["Econometrics Tools Guide (Component-Based Architecture):\n"]
103
+
104
+ for group in self.tool_groups:
105
+ help_lines.append(f"\n{group.name}")
106
+ help_lines.append("=" * len(group.name))
107
+ help_lines.append(group.get_help_text())
108
+ help_lines.append("")
109
+
110
+ return "\n".join(help_lines)
111
+
112
+
113
+ # 全局注册中心实例
114
+ registry = ToolRegistry()