aigroup-econ-mcp 1.3.3__py3-none-any.whl → 1.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- .gitignore +253 -0
- PKG-INFO +710 -0
- README.md +672 -0
- __init__.py +14 -0
- aigroup_econ_mcp-1.4.3.dist-info/METADATA +710 -0
- aigroup_econ_mcp-1.4.3.dist-info/RECORD +92 -0
- aigroup_econ_mcp-1.4.3.dist-info/entry_points.txt +2 -0
- aigroup_econ_mcp-1.4.3.dist-info/licenses/LICENSE +21 -0
- cli.py +28 -0
- econometrics/README.md +18 -0
- econometrics/__init__.py +191 -0
- econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +0 -0
- econometrics/basic_parametric_estimation/__init__.py +31 -0
- econometrics/basic_parametric_estimation/gmm/__init__.py +13 -0
- econometrics/basic_parametric_estimation/gmm/gmm_model.py +256 -0
- econometrics/basic_parametric_estimation/mle/__init__.py +13 -0
- econometrics/basic_parametric_estimation/mle/mle_model.py +241 -0
- econometrics/basic_parametric_estimation/ols/__init__.py +13 -0
- econometrics/basic_parametric_estimation/ols/ols_model.py +141 -0
- econometrics/causal_inference/causal_identification_strategy/__init__.py +0 -0
- econometrics/missing_data/missing_data_measurement_error/__init__.py +0 -0
- econometrics/model_specification_diagnostics_robust_inference/README.md +173 -0
- econometrics/model_specification_diagnostics_robust_inference/__init__.py +78 -0
- econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/__init__.py +20 -0
- econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/diagnostic_tests_model.py +149 -0
- econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/gls_model.py +130 -0
- econometrics/model_specification_diagnostics_robust_inference/model_selection/__init__.py +18 -0
- econometrics/model_specification_diagnostics_robust_inference/model_selection/model_selection_model.py +286 -0
- econometrics/model_specification_diagnostics_robust_inference/regularization/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/regularization/regularization_model.py +177 -0
- econometrics/model_specification_diagnostics_robust_inference/robust_errors/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/robust_errors/robust_errors_model.py +122 -0
- econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/simultaneous_equations_model.py +246 -0
- econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/wls_model.py +127 -0
- econometrics/nonparametric/nonparametric_semiparametric_methods/__init__.py +0 -0
- econometrics/spatial_econometrics/spatial_econometrics_new/__init__.py +0 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +0 -0
- econometrics/specific_data_modeling/survival_duration_data/__init__.py +0 -0
- econometrics/specific_data_modeling/time_series_panel_data/__init__.py +143 -0
- econometrics/specific_data_modeling/time_series_panel_data/arima_model.py +104 -0
- econometrics/specific_data_modeling/time_series_panel_data/cointegration_vecm.py +334 -0
- econometrics/specific_data_modeling/time_series_panel_data/dynamic_panel_models.py +653 -0
- econometrics/specific_data_modeling/time_series_panel_data/exponential_smoothing.py +176 -0
- econometrics/specific_data_modeling/time_series_panel_data/garch_model.py +198 -0
- econometrics/specific_data_modeling/time_series_panel_data/panel_diagnostics.py +125 -0
- econometrics/specific_data_modeling/time_series_panel_data/panel_var.py +60 -0
- econometrics/specific_data_modeling/time_series_panel_data/structural_break_tests.py +87 -0
- econometrics/specific_data_modeling/time_series_panel_data/time_varying_parameter_models.py +106 -0
- econometrics/specific_data_modeling/time_series_panel_data/unit_root_tests.py +204 -0
- econometrics/specific_data_modeling/time_series_panel_data/var_svar_model.py +372 -0
- econometrics/statistical_inference/statistical_inference_techniques/__init__.py +0 -0
- econometrics/statistics/distribution_decomposition_methods/__init__.py +0 -0
- econometrics/tests/basic_parametric_estimation_tests/__init__.py +3 -0
- econometrics/tests/basic_parametric_estimation_tests/test_gmm.py +128 -0
- econometrics/tests/basic_parametric_estimation_tests/test_mle.py +127 -0
- econometrics/tests/basic_parametric_estimation_tests/test_ols.py +100 -0
- econometrics/tests/model_specification_diagnostics_tests/__init__.py +3 -0
- econometrics/tests/model_specification_diagnostics_tests/test_diagnostic_tests.py +86 -0
- econometrics/tests/model_specification_diagnostics_tests/test_robust_errors.py +89 -0
- econometrics/tests/specific_data_modeling_tests/__init__.py +3 -0
- econometrics/tests/specific_data_modeling_tests/test_arima.py +98 -0
- econometrics/tests/specific_data_modeling_tests/test_dynamic_panel.py +198 -0
- econometrics/tests/specific_data_modeling_tests/test_exponential_smoothing.py +105 -0
- econometrics/tests/specific_data_modeling_tests/test_garch.py +118 -0
- econometrics/tests/specific_data_modeling_tests/test_unit_root.py +156 -0
- econometrics/tests/specific_data_modeling_tests/test_var.py +124 -0
- prompts/__init__.py +0 -0
- prompts/analysis_guides.py +43 -0
- pyproject.toml +78 -0
- resources/MCP_MASTER_GUIDE.md +422 -0
- resources/MCP_TOOLS_DATA_FORMAT_GUIDE.md +185 -0
- resources/__init__.py +0 -0
- server.py +83 -0
- tools/README.md +88 -0
- tools/__init__.py +45 -0
- tools/data_loader.py +213 -0
- tools/decorators.py +38 -0
- tools/econometrics_adapter.py +286 -0
- tools/mcp_tool_groups/__init__.py +1 -0
- tools/mcp_tool_groups/basic_parametric_tools.py +173 -0
- tools/mcp_tool_groups/model_specification_tools.py +402 -0
- tools/mcp_tool_groups/time_series_tools.py +494 -0
- tools/mcp_tools_registry.py +114 -0
- tools/model_specification_adapter.py +369 -0
- tools/output_formatter.py +563 -0
- tools/time_series_panel_data_adapter.py +858 -0
- tools/time_series_panel_data_tools.py +65 -0
- aigroup_econ_mcp/__init__.py +0 -19
- aigroup_econ_mcp/cli.py +0 -82
- aigroup_econ_mcp/config.py +0 -561
- aigroup_econ_mcp/server.py +0 -452
- aigroup_econ_mcp/tools/__init__.py +0 -19
- aigroup_econ_mcp/tools/base.py +0 -470
- aigroup_econ_mcp/tools/cache.py +0 -533
- aigroup_econ_mcp/tools/data_loader.py +0 -195
- aigroup_econ_mcp/tools/file_parser.py +0 -1027
- aigroup_econ_mcp/tools/machine_learning.py +0 -60
- aigroup_econ_mcp/tools/ml_ensemble.py +0 -210
- aigroup_econ_mcp/tools/ml_evaluation.py +0 -272
- aigroup_econ_mcp/tools/ml_models.py +0 -54
- aigroup_econ_mcp/tools/ml_regularization.py +0 -186
- aigroup_econ_mcp/tools/monitoring.py +0 -555
- aigroup_econ_mcp/tools/optimized_example.py +0 -229
- aigroup_econ_mcp/tools/panel_data.py +0 -619
- aigroup_econ_mcp/tools/regression.py +0 -214
- aigroup_econ_mcp/tools/statistics.py +0 -154
- aigroup_econ_mcp/tools/time_series.py +0 -698
- aigroup_econ_mcp/tools/timeout.py +0 -283
- aigroup_econ_mcp/tools/tool_descriptions.py +0 -410
- aigroup_econ_mcp/tools/tool_handlers.py +0 -1016
- aigroup_econ_mcp/tools/tool_registry.py +0 -478
- aigroup_econ_mcp/tools/validation.py +0 -482
- aigroup_econ_mcp-1.3.3.dist-info/METADATA +0 -525
- aigroup_econ_mcp-1.3.3.dist-info/RECORD +0 -30
- aigroup_econ_mcp-1.3.3.dist-info/entry_points.txt +0 -2
- /aigroup_econ_mcp-1.3.3.dist-info/licenses/LICENSE → /LICENSE +0 -0
- {aigroup_econ_mcp-1.3.3.dist-info → aigroup_econ_mcp-1.4.3.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,858 @@
|
|
|
1
|
+
"""
|
|
2
|
+
时间序列和面板数据模型适配器
|
|
3
|
+
将econometrics/specific_data_modeling/time_series_panel_data中的模型适配为MCP工具
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Optional, Union, Dict, Any
|
|
7
|
+
import sys
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
import json
|
|
10
|
+
|
|
11
|
+
# 确保可以导入econometrics模块
|
|
12
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
13
|
+
|
|
14
|
+
# 导入时间序列和面板数据模型
|
|
15
|
+
from econometrics.specific_data_modeling.time_series_panel_data.arima_model import (
|
|
16
|
+
arima_model as core_arima_model,
|
|
17
|
+
ARIMAResult as CoreARIMAResult
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
from econometrics.specific_data_modeling.time_series_panel_data.exponential_smoothing import (
|
|
21
|
+
exponential_smoothing_model as core_exponential_smoothing_model,
|
|
22
|
+
ExponentialSmoothingResult as CoreExponentialSmoothingResult
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
from econometrics.specific_data_modeling.time_series_panel_data.var_svar_model import (
|
|
26
|
+
var_model as core_var_model,
|
|
27
|
+
svar_model as core_svar_model,
|
|
28
|
+
VARResult as CoreVARResult
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
from econometrics.specific_data_modeling.time_series_panel_data.garch_model import (
|
|
32
|
+
garch_model as core_garch_model,
|
|
33
|
+
GARCHResult as CoreGARCHResult
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from econometrics.specific_data_modeling.time_series_panel_data.cointegration_vecm import (
|
|
37
|
+
engle_granger_cointegration_test as core_engle_granger_cointegration_test,
|
|
38
|
+
johansen_cointegration_test as core_johansen_cointegration_test,
|
|
39
|
+
vecm_model as core_vecm_model,
|
|
40
|
+
CointegrationResult as CoreCointegrationResult,
|
|
41
|
+
VECMResult as CoreVECMResult
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
from econometrics.specific_data_modeling.time_series_panel_data.unit_root_tests import (
|
|
45
|
+
adf_test as core_adf_test,
|
|
46
|
+
pp_test as core_pp_test,
|
|
47
|
+
kpss_test as core_kpss_test,
|
|
48
|
+
UnitRootTestResult as CoreUnitRootTestResult
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
from econometrics.specific_data_modeling.time_series_panel_data.dynamic_panel_models import (
|
|
52
|
+
diff_gmm_model as core_diff_gmm_model,
|
|
53
|
+
sys_gmm_model as core_sys_gmm_model,
|
|
54
|
+
DynamicPanelResult as CoreDynamicPanelResult
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
from econometrics.specific_data_modeling.time_series_panel_data.panel_diagnostics import (
|
|
58
|
+
hausman_test as core_hausman_test,
|
|
59
|
+
pooling_f_test as core_pooling_f_test,
|
|
60
|
+
lm_test as core_lm_test,
|
|
61
|
+
within_correlation_test as core_within_correlation_test,
|
|
62
|
+
PanelDiagnosticResult as CorePanelDiagnosticResult
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
from econometrics.specific_data_modeling.time_series_panel_data.panel_var import (
|
|
66
|
+
panel_var_model as core_panel_var_model,
|
|
67
|
+
PanelVARResult as CorePanelVARResult
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
from econometrics.specific_data_modeling.time_series_panel_data.structural_break_tests import (
|
|
71
|
+
chow_test as core_chow_test,
|
|
72
|
+
quandt_andrews_test as core_quandt_andrews_test,
|
|
73
|
+
bai_perron_test as core_bai_perron_test,
|
|
74
|
+
StructuralBreakResult as CoreStructuralBreakResult
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
from econometrics.specific_data_modeling.time_series_panel_data.time_varying_parameter_models import (
|
|
78
|
+
tar_model as core_tar_model,
|
|
79
|
+
star_model as core_star_model,
|
|
80
|
+
markov_switching_model as core_markov_switching_model,
|
|
81
|
+
TimeVaryingParameterResult as CoreTimeVaryingParameterResult
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# 导入数据加载和格式化组件
|
|
85
|
+
from .data_loader import DataLoader
|
|
86
|
+
from .output_formatter import OutputFormatter
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class TimeSeriesPanelDataAdapter:
|
|
90
|
+
"""
|
|
91
|
+
时间序列和面板数据模型适配器
|
|
92
|
+
将core算法适配为MCP工具,支持文件输入和多种输出格式
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
@staticmethod
|
|
96
|
+
def arima_model(
|
|
97
|
+
data: Optional[List[float]] = None,
|
|
98
|
+
file_path: Optional[str] = None,
|
|
99
|
+
order: tuple = (1, 1, 1),
|
|
100
|
+
forecast_steps: int = 1,
|
|
101
|
+
output_format: str = "json",
|
|
102
|
+
save_path: Optional[str] = None
|
|
103
|
+
) -> str:
|
|
104
|
+
"""
|
|
105
|
+
ARIMA模型适配器
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
data: 时间序列数据
|
|
109
|
+
file_path: 数据文件路径
|
|
110
|
+
order: (p,d,q) 参数设置
|
|
111
|
+
forecast_steps: 预测步数
|
|
112
|
+
output_format: 输出格式 ("json", "markdown", "html")
|
|
113
|
+
save_path: 保存路径
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
str: 格式化的分析结果
|
|
117
|
+
"""
|
|
118
|
+
# 1. 数据准备
|
|
119
|
+
if file_path:
|
|
120
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
121
|
+
data = data_dict["data"]
|
|
122
|
+
elif data is None:
|
|
123
|
+
raise ValueError("Must provide either file_path or data")
|
|
124
|
+
|
|
125
|
+
# 2. 调用核心算法
|
|
126
|
+
result: CoreARIMAResult = core_arima_model(
|
|
127
|
+
data=data,
|
|
128
|
+
order=order,
|
|
129
|
+
forecast_steps=forecast_steps
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# 3. 格式化输出
|
|
133
|
+
if output_format == "json":
|
|
134
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
135
|
+
else:
|
|
136
|
+
try:
|
|
137
|
+
|
|
138
|
+
formatted = OutputFormatter.format_arima_result(result, output_format)
|
|
139
|
+
|
|
140
|
+
except Exception as e:
|
|
141
|
+
|
|
142
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
143
|
+
|
|
144
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
145
|
+
if save_path:
|
|
146
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
147
|
+
return f"ARIMA分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
148
|
+
return formatted
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def exponential_smoothing_model(
|
|
152
|
+
data: Optional[List[float]] = None,
|
|
153
|
+
file_path: Optional[str] = None,
|
|
154
|
+
trend: bool = True,
|
|
155
|
+
seasonal: bool = False,
|
|
156
|
+
seasonal_periods: Optional[int] = None,
|
|
157
|
+
forecast_steps: int = 1,
|
|
158
|
+
output_format: str = "json",
|
|
159
|
+
save_path: Optional[str] = None
|
|
160
|
+
) -> str:
|
|
161
|
+
"""
|
|
162
|
+
指数平滑模型适配器
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
data: 时间序列数据
|
|
166
|
+
file_path: 数据文件路径
|
|
167
|
+
trend: 是否包含趋势成分
|
|
168
|
+
seasonal: 是否包含季节成分
|
|
169
|
+
seasonal_periods: 季节周期长度
|
|
170
|
+
forecast_steps: 预测步数
|
|
171
|
+
output_format: 输出格式
|
|
172
|
+
save_path: 保存路径
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
str: 格式化的分析结果
|
|
176
|
+
"""
|
|
177
|
+
# 1. 数据准备
|
|
178
|
+
if file_path:
|
|
179
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
180
|
+
data = data_dict["data"]
|
|
181
|
+
elif data is None:
|
|
182
|
+
raise ValueError("Must provide either file_path or data")
|
|
183
|
+
|
|
184
|
+
# 2. 调用核心算法
|
|
185
|
+
result: CoreExponentialSmoothingResult = core_exponential_smoothing_model(
|
|
186
|
+
data=data,
|
|
187
|
+
trend=trend,
|
|
188
|
+
seasonal=seasonal,
|
|
189
|
+
seasonal_periods=seasonal_periods,
|
|
190
|
+
forecast_steps=forecast_steps
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# 3. 格式化输出
|
|
194
|
+
if output_format == "json":
|
|
195
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
196
|
+
else:
|
|
197
|
+
try:
|
|
198
|
+
|
|
199
|
+
formatted = OutputFormatter.format_exponential_smoothing_result(result, output_format)
|
|
200
|
+
|
|
201
|
+
except Exception as e:
|
|
202
|
+
|
|
203
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
204
|
+
|
|
205
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
206
|
+
if save_path:
|
|
207
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
208
|
+
return f"指数平滑分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
209
|
+
return formatted
|
|
210
|
+
|
|
211
|
+
@staticmethod
|
|
212
|
+
def garch_model(
|
|
213
|
+
data: Optional[List[float]] = None,
|
|
214
|
+
file_path: Optional[str] = None,
|
|
215
|
+
order: tuple = (1, 1),
|
|
216
|
+
output_format: str = "json",
|
|
217
|
+
save_path: Optional[str] = None
|
|
218
|
+
) -> str:
|
|
219
|
+
"""
|
|
220
|
+
GARCH模型适配器
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
data: 时间序列数据
|
|
224
|
+
file_path: 数据文件路径
|
|
225
|
+
order: (p, q) 参数设置
|
|
226
|
+
output_format: 输出格式
|
|
227
|
+
save_path: 保存路径
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
str: 格式化的分析结果
|
|
231
|
+
"""
|
|
232
|
+
# 1. 数据准备
|
|
233
|
+
if file_path:
|
|
234
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
235
|
+
data = data_dict["data"]
|
|
236
|
+
elif data is None:
|
|
237
|
+
raise ValueError("Must provide either file_path or data")
|
|
238
|
+
|
|
239
|
+
# 2. 调用核心算法
|
|
240
|
+
result: CoreGARCHResult = core_garch_model(
|
|
241
|
+
data=data,
|
|
242
|
+
order=order
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
# 3. 格式化输出
|
|
246
|
+
if output_format == "json":
|
|
247
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
248
|
+
else:
|
|
249
|
+
try:
|
|
250
|
+
|
|
251
|
+
formatted = OutputFormatter.format_garch_result(result, output_format)
|
|
252
|
+
|
|
253
|
+
except Exception as e:
|
|
254
|
+
|
|
255
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
256
|
+
|
|
257
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
258
|
+
if save_path:
|
|
259
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
260
|
+
return f"GARCH分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
261
|
+
return formatted
|
|
262
|
+
|
|
263
|
+
@staticmethod
|
|
264
|
+
def unit_root_tests(
|
|
265
|
+
data: Optional[List[float]] = None,
|
|
266
|
+
file_path: Optional[str] = None,
|
|
267
|
+
test_type: str = "adf",
|
|
268
|
+
max_lags: Optional[int] = None,
|
|
269
|
+
regression_type: str = "c",
|
|
270
|
+
output_format: str = "json",
|
|
271
|
+
save_path: Optional[str] = None
|
|
272
|
+
) -> str:
|
|
273
|
+
"""
|
|
274
|
+
单位根检验适配器
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
data: 时间序列数据
|
|
278
|
+
file_path: 数据文件路径
|
|
279
|
+
test_type: 检验类型 ("adf", "pp", "kpss")
|
|
280
|
+
max_lags: 最大滞后阶数 (仅ADF检验)
|
|
281
|
+
regression_type: 回归类型 ("c"=常数, "ct"=常数和趋势, "nc"=无常数)
|
|
282
|
+
output_format: 输出格式
|
|
283
|
+
save_path: 保存路径
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
str: 格式化的分析结果
|
|
287
|
+
"""
|
|
288
|
+
# 1. 数据准备
|
|
289
|
+
if file_path:
|
|
290
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
291
|
+
data = data_dict["data"]
|
|
292
|
+
elif data is None:
|
|
293
|
+
raise ValueError("Must provide either file_path or data")
|
|
294
|
+
|
|
295
|
+
# 2. 调用核心算法
|
|
296
|
+
result: CoreUnitRootTestResult = None
|
|
297
|
+
if test_type == "adf":
|
|
298
|
+
result = core_adf_test(data, max_lags=max_lags, regression_type=regression_type)
|
|
299
|
+
elif test_type == "pp":
|
|
300
|
+
result = core_pp_test(data, regression_type=regression_type)
|
|
301
|
+
elif test_type == "kpss":
|
|
302
|
+
result = core_kpss_test(data, regression_type=regression_type)
|
|
303
|
+
else:
|
|
304
|
+
raise ValueError(f"Unsupported test_type: {test_type}")
|
|
305
|
+
|
|
306
|
+
# 3. 格式化输出
|
|
307
|
+
if output_format == "json":
|
|
308
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
309
|
+
else:
|
|
310
|
+
try:
|
|
311
|
+
|
|
312
|
+
formatted = OutputFormatter.format_unit_root_test_result(result, output_format)
|
|
313
|
+
|
|
314
|
+
except Exception as e:
|
|
315
|
+
|
|
316
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
317
|
+
|
|
318
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
319
|
+
if save_path:
|
|
320
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
321
|
+
return f"单位根检验({test_type.upper()})完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
322
|
+
return formatted
|
|
323
|
+
|
|
324
|
+
@staticmethod
|
|
325
|
+
def var_svar_model(
|
|
326
|
+
data: Optional[List[List[float]]] = None,
|
|
327
|
+
file_path: Optional[str] = None,
|
|
328
|
+
model_type: str = "var",
|
|
329
|
+
lags: int = 1,
|
|
330
|
+
variables: Optional[List[str]] = None,
|
|
331
|
+
a_matrix: Optional[List[List[float]]] = None,
|
|
332
|
+
b_matrix: Optional[List[List[float]]] = None,
|
|
333
|
+
output_format: str = "json",
|
|
334
|
+
save_path: Optional[str] = None
|
|
335
|
+
) -> str:
|
|
336
|
+
"""
|
|
337
|
+
VAR/SVAR模型适配器
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
data: 多元时间序列数据 (格式: 每个子列表代表一个时间点的所有变量值)
|
|
341
|
+
file_path: 数据文件路径
|
|
342
|
+
model_type: 模型类型 ("var", "svar")
|
|
343
|
+
lags: 滞后期数
|
|
344
|
+
variables: 变量名称列表
|
|
345
|
+
a_matrix: A约束矩阵 (仅SVAR)
|
|
346
|
+
b_matrix: B约束矩阵 (仅SVAR)
|
|
347
|
+
output_format: 输出格式
|
|
348
|
+
save_path: 保存路径
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
str: 格式化的分析结果
|
|
352
|
+
"""
|
|
353
|
+
# 1. 数据准备
|
|
354
|
+
if file_path:
|
|
355
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
356
|
+
data = data_dict["data"]
|
|
357
|
+
variables = data_dict.get("variables") or variables
|
|
358
|
+
elif data is None:
|
|
359
|
+
raise ValueError("Must provide either file_path or data")
|
|
360
|
+
|
|
361
|
+
# 2. 数据格式转换:从时间点格式转换为变量格式
|
|
362
|
+
# 输入格式: [[var1_t1, var2_t1], [var1_t2, var2_t2], ...]
|
|
363
|
+
# 需要转换为: [[var1_t1, var1_t2, ...], [var2_t1, var2_t2, ...]]
|
|
364
|
+
if data and len(data) > 0:
|
|
365
|
+
n_vars = len(data[0])
|
|
366
|
+
n_obs = len(data)
|
|
367
|
+
|
|
368
|
+
# 转换数据格式
|
|
369
|
+
var_data = []
|
|
370
|
+
for var_idx in range(n_vars):
|
|
371
|
+
var_series = [data[t][var_idx] for t in range(n_obs)]
|
|
372
|
+
var_data.append(var_series)
|
|
373
|
+
|
|
374
|
+
# 如果没有提供变量名,自动生成
|
|
375
|
+
if variables is None:
|
|
376
|
+
variables = [f"Variable_{i}" for i in range(n_vars)]
|
|
377
|
+
|
|
378
|
+
# 检查变量数量是否与数据一致
|
|
379
|
+
if len(variables) != n_vars:
|
|
380
|
+
raise ValueError(f"变量名称数量({len(variables)})与数据列数({n_vars})不一致")
|
|
381
|
+
else:
|
|
382
|
+
raise ValueError("数据不能为空")
|
|
383
|
+
|
|
384
|
+
# 3. 调用核心算法
|
|
385
|
+
result: CoreVARResult = None
|
|
386
|
+
if model_type == "var":
|
|
387
|
+
result = core_var_model(var_data, lags=lags, variables=variables)
|
|
388
|
+
elif model_type == "svar":
|
|
389
|
+
result = core_svar_model(var_data, lags=lags, variables=variables, a_matrix=a_matrix, b_matrix=b_matrix)
|
|
390
|
+
else:
|
|
391
|
+
raise ValueError(f"Unsupported model_type: {model_type}")
|
|
392
|
+
|
|
393
|
+
# 4. 格式化输出
|
|
394
|
+
if output_format == "json":
|
|
395
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
396
|
+
else:
|
|
397
|
+
try:
|
|
398
|
+
|
|
399
|
+
formatted = OutputFormatter.format_var_result(result, output_format)
|
|
400
|
+
|
|
401
|
+
except Exception as e:
|
|
402
|
+
|
|
403
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
404
|
+
|
|
405
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
406
|
+
if save_path:
|
|
407
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
408
|
+
return f"{model_type.upper()}分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
409
|
+
return formatted
|
|
410
|
+
|
|
411
|
+
@staticmethod
|
|
412
|
+
def cointegration_analysis(
|
|
413
|
+
data: Optional[List[List[float]]] = None,
|
|
414
|
+
file_path: Optional[str] = None,
|
|
415
|
+
analysis_type: str = "johansen",
|
|
416
|
+
variables: Optional[List[str]] = None,
|
|
417
|
+
coint_rank: int = 1,
|
|
418
|
+
output_format: str = "json",
|
|
419
|
+
save_path: Optional[str] = None
|
|
420
|
+
) -> str:
|
|
421
|
+
"""
|
|
422
|
+
协整分析适配器
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
data: 多元时间序列数据 (格式: 每个子列表代表一个时间点的所有变量值)
|
|
426
|
+
file_path: 数据文件路径
|
|
427
|
+
analysis_type: 分析类型 ("engle-granger", "johansen", "vecm")
|
|
428
|
+
variables: 变量名称列表
|
|
429
|
+
coint_rank: 协整秩 (仅VECM)
|
|
430
|
+
output_format: 输出格式
|
|
431
|
+
save_path: 保存路径
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
str: 格式化的分析结果
|
|
435
|
+
"""
|
|
436
|
+
# 1. 数据准备
|
|
437
|
+
if file_path:
|
|
438
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
439
|
+
data = data_dict["data"]
|
|
440
|
+
variables = data_dict.get("variables") or variables
|
|
441
|
+
elif data is None:
|
|
442
|
+
raise ValueError("Must provide either file_path or data")
|
|
443
|
+
|
|
444
|
+
# 2. 数据格式转换:从时间点格式转换为变量格式
|
|
445
|
+
# 输入格式: [[var1_t1, var2_t1], [var1_t2, var2_t2], ...]
|
|
446
|
+
# 需要转换为: [[var1_t1, var1_t2, ...], [var2_t1, var2_t2, ...]]
|
|
447
|
+
if data and len(data) > 0:
|
|
448
|
+
n_vars = len(data[0])
|
|
449
|
+
n_obs = len(data)
|
|
450
|
+
|
|
451
|
+
# 转换数据格式
|
|
452
|
+
var_data = []
|
|
453
|
+
for var_idx in range(n_vars):
|
|
454
|
+
var_series = [data[t][var_idx] for t in range(n_obs)]
|
|
455
|
+
var_data.append(var_series)
|
|
456
|
+
|
|
457
|
+
# 如果没有提供变量名,自动生成
|
|
458
|
+
if variables is None:
|
|
459
|
+
variables = [f"Variable_{i}" for i in range(n_vars)]
|
|
460
|
+
|
|
461
|
+
# 检查变量数量是否与数据一致
|
|
462
|
+
if len(variables) != n_vars:
|
|
463
|
+
raise ValueError(f"变量名称数量({len(variables)})与数据列数({n_vars})不一致")
|
|
464
|
+
else:
|
|
465
|
+
raise ValueError("数据不能为空")
|
|
466
|
+
|
|
467
|
+
# 3. 调用核心算法
|
|
468
|
+
result = None
|
|
469
|
+
if analysis_type == "engle-granger":
|
|
470
|
+
result: CoreCointegrationResult = core_engle_granger_cointegration_test(var_data, variables=variables)
|
|
471
|
+
elif analysis_type == "johansen":
|
|
472
|
+
result: CoreCointegrationResult = core_johansen_cointegration_test(var_data, variables=variables)
|
|
473
|
+
elif analysis_type == "vecm":
|
|
474
|
+
result: CoreVECMResult = core_vecm_model(var_data, coint_rank=coint_rank, variables=variables)
|
|
475
|
+
else:
|
|
476
|
+
raise ValueError(f"Unsupported analysis_type: {analysis_type}")
|
|
477
|
+
|
|
478
|
+
# 4. 格式化输出
|
|
479
|
+
if output_format == "json":
|
|
480
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
481
|
+
else:
|
|
482
|
+
if analysis_type in ["engle-granger", "johansen"]:
|
|
483
|
+
try:
|
|
484
|
+
|
|
485
|
+
formatted = OutputFormatter.format_cointegration_result(result, output_format)
|
|
486
|
+
|
|
487
|
+
except Exception as e:
|
|
488
|
+
|
|
489
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
490
|
+
|
|
491
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
492
|
+
else: # vecm
|
|
493
|
+
try:
|
|
494
|
+
|
|
495
|
+
formatted = OutputFormatter.format_vecm_result(result, output_format)
|
|
496
|
+
|
|
497
|
+
except Exception as e:
|
|
498
|
+
|
|
499
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
500
|
+
|
|
501
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
502
|
+
|
|
503
|
+
if save_path:
|
|
504
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
505
|
+
return f"{analysis_type.upper()}分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
506
|
+
return formatted
|
|
507
|
+
|
|
508
|
+
@staticmethod
|
|
509
|
+
def dynamic_panel_model(
|
|
510
|
+
y_data: Optional[List[float]] = None,
|
|
511
|
+
x_data: Optional[List[List[float]]] = None,
|
|
512
|
+
entity_ids: Optional[List[int]] = None,
|
|
513
|
+
time_periods: Optional[List[int]] = None,
|
|
514
|
+
file_path: Optional[str] = None,
|
|
515
|
+
model_type: str = "diff_gmm",
|
|
516
|
+
lags: int = 1,
|
|
517
|
+
output_format: str = "json",
|
|
518
|
+
save_path: Optional[str] = None
|
|
519
|
+
) -> str:
|
|
520
|
+
"""
|
|
521
|
+
动态面板模型适配器
|
|
522
|
+
|
|
523
|
+
Args:
|
|
524
|
+
y_data: 因变量数据
|
|
525
|
+
x_data: 自变量数据
|
|
526
|
+
entity_ids: 个体标识符
|
|
527
|
+
time_periods: 时间标识符
|
|
528
|
+
file_path: 数据文件路径
|
|
529
|
+
model_type: 模型类型 ("diff_gmm", "sys_gmm")
|
|
530
|
+
lags: 滞后期数
|
|
531
|
+
output_format: 输出格式
|
|
532
|
+
save_path: 保存路径
|
|
533
|
+
|
|
534
|
+
Returns:
|
|
535
|
+
str: 格式化的分析结果
|
|
536
|
+
"""
|
|
537
|
+
# 1. 数据准备
|
|
538
|
+
if file_path:
|
|
539
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
540
|
+
y_data = data_dict["y_data"]
|
|
541
|
+
x_data = data_dict["x_data"]
|
|
542
|
+
entity_ids = data_dict["entity_ids"]
|
|
543
|
+
time_periods = data_dict["time_periods"]
|
|
544
|
+
elif y_data is None or x_data is None or entity_ids is None or time_periods is None:
|
|
545
|
+
raise ValueError("Must provide either file_path or (y_data, x_data, entity_ids, time_periods)")
|
|
546
|
+
|
|
547
|
+
# 2. 调用核心算法(使用改进的手动实现)
|
|
548
|
+
try:
|
|
549
|
+
result: CoreDynamicPanelResult = None
|
|
550
|
+
if model_type == "diff_gmm":
|
|
551
|
+
result = core_diff_gmm_model(y_data, x_data, entity_ids, time_periods, lags=lags)
|
|
552
|
+
elif model_type == "sys_gmm":
|
|
553
|
+
result = core_sys_gmm_model(y_data, x_data, entity_ids, time_periods, lags=lags)
|
|
554
|
+
else:
|
|
555
|
+
raise ValueError(f"Unsupported model_type: {model_type}")
|
|
556
|
+
except Exception as e:
|
|
557
|
+
# 如果模型拟合失败,返回JSON格式的错误信息
|
|
558
|
+
error_info = {
|
|
559
|
+
"error": True,
|
|
560
|
+
"message": f"动态面板模型({model_type})拟合失败",
|
|
561
|
+
"details": str(e),
|
|
562
|
+
"suggestions": [
|
|
563
|
+
"数据格式问题 - 请检查数据维度是否一致",
|
|
564
|
+
"样本量不足 - 建议增加观测数量或减少滞后期数",
|
|
565
|
+
"多重共线性 - 尝试减少自变量数量或使用正则化",
|
|
566
|
+
"工具变量不足 - 确保有足够的工具变量",
|
|
567
|
+
"数值稳定性 - 尝试标准化数据或增加样本量"
|
|
568
|
+
],
|
|
569
|
+
"note": "当前使用手动实现的GMM算法,无需安装linearmodels包"
|
|
570
|
+
}
|
|
571
|
+
return json.dumps(error_info, ensure_ascii=False, indent=2)
|
|
572
|
+
|
|
573
|
+
# 3. 格式化输出
|
|
574
|
+
if output_format == "json":
|
|
575
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
576
|
+
else:
|
|
577
|
+
try:
|
|
578
|
+
|
|
579
|
+
formatted = OutputFormatter.format_dynamic_panel_result(result, output_format)
|
|
580
|
+
|
|
581
|
+
except Exception as e:
|
|
582
|
+
|
|
583
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
584
|
+
|
|
585
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
586
|
+
if save_path:
|
|
587
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
588
|
+
return f"动态面板模型({model_type})分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
589
|
+
return formatted
|
|
590
|
+
|
|
591
|
+
@staticmethod
|
|
592
|
+
def panel_diagnostics(
|
|
593
|
+
test_type: str = "hausman",
|
|
594
|
+
fe_coefficients: Optional[List[float]] = None,
|
|
595
|
+
re_coefficients: Optional[List[float]] = None,
|
|
596
|
+
fe_covariance: Optional[List[List[float]]] = None,
|
|
597
|
+
re_covariance: Optional[List[List[float]]] = None,
|
|
598
|
+
pooled_ssrs: Optional[float] = None,
|
|
599
|
+
fixed_ssrs: Optional[float] = None,
|
|
600
|
+
random_ssrs: Optional[float] = None,
|
|
601
|
+
n_individuals: Optional[int] = None,
|
|
602
|
+
n_params: Optional[int] = None,
|
|
603
|
+
n_obs: Optional[int] = None,
|
|
604
|
+
n_periods: Optional[int] = None,
|
|
605
|
+
residuals: Optional[List[List[float]]] = None,
|
|
606
|
+
output_format: str = "json",
|
|
607
|
+
save_path: Optional[str] = None
|
|
608
|
+
) -> str:
|
|
609
|
+
"""
|
|
610
|
+
面板数据诊断检验适配器
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
test_type: 检验类型 ("hausman", "pooling_f", "lm", "within_correlation")
|
|
614
|
+
fe_coefficients: 固定效应模型系数 (Hausman)
|
|
615
|
+
re_coefficients: 随机效应模型系数 (Hausman)
|
|
616
|
+
fe_covariance: 固定效应模型协方差矩阵 (Hausman)
|
|
617
|
+
re_covariance: 随机效应模型协方差矩阵 (Hausman)
|
|
618
|
+
pooled_ssrs: 混合OLS模型残差平方和 (Pooling F, LM)
|
|
619
|
+
fixed_ssrs: 固定效应模型残差平方和 (Pooling F)
|
|
620
|
+
random_ssrs: 随机效应模型残差平方和 (LM)
|
|
621
|
+
n_individuals: 个体数量
|
|
622
|
+
n_params: 参数数量 (Pooling F)
|
|
623
|
+
n_obs: 观测数量
|
|
624
|
+
n_periods: 时间期数 (LM)
|
|
625
|
+
residuals: 面板数据残差 (Within Correlation)
|
|
626
|
+
output_format: 输出格式
|
|
627
|
+
save_path: 保存路径
|
|
628
|
+
|
|
629
|
+
Returns:
|
|
630
|
+
str: 格式化的分析结果
|
|
631
|
+
"""
|
|
632
|
+
# 调用核心算法
|
|
633
|
+
result: CorePanelDiagnosticResult = None
|
|
634
|
+
if test_type == "hausman":
|
|
635
|
+
if not all([fe_coefficients, re_coefficients, fe_covariance, re_covariance]):
|
|
636
|
+
raise ValueError("Hausman test requires fe_coefficients, re_coefficients, fe_covariance, re_covariance")
|
|
637
|
+
result = core_hausman_test(fe_coefficients, re_coefficients, fe_covariance, re_covariance)
|
|
638
|
+
elif test_type == "pooling_f":
|
|
639
|
+
if not all([pooled_ssrs is not None, fixed_ssrs is not None, n_individuals, n_params, n_obs]):
|
|
640
|
+
raise ValueError("Pooling F test requires pooled_ssrs, fixed_ssrs, n_individuals, n_params, n_obs")
|
|
641
|
+
result = core_pooling_f_test(pooled_ssrs, fixed_ssrs, n_individuals, n_params, n_obs)
|
|
642
|
+
elif test_type == "lm":
|
|
643
|
+
if not all([pooled_ssrs is not None, random_ssrs is not None, n_individuals, n_periods]):
|
|
644
|
+
raise ValueError("LM test requires pooled_ssrs, random_ssrs, n_individuals, n_periods")
|
|
645
|
+
result = core_lm_test(pooled_ssrs, random_ssrs, n_individuals, n_periods)
|
|
646
|
+
elif test_type == "within_correlation":
|
|
647
|
+
if residuals is None:
|
|
648
|
+
raise ValueError("Within correlation test requires residuals")
|
|
649
|
+
result = core_within_correlation_test(residuals)
|
|
650
|
+
else:
|
|
651
|
+
raise ValueError(f"Unsupported test_type: {test_type}")
|
|
652
|
+
|
|
653
|
+
# 格式化输出
|
|
654
|
+
if output_format == "json":
|
|
655
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
656
|
+
else:
|
|
657
|
+
try:
|
|
658
|
+
formatted = OutputFormatter.format_panel_diagnostic_result(result, output_format)
|
|
659
|
+
except Exception as e:
|
|
660
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
661
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
662
|
+
if save_path:
|
|
663
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
664
|
+
return f"面板数据诊断({test_type})完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
665
|
+
return formatted
|
|
666
|
+
|
|
667
|
+
@staticmethod
|
|
668
|
+
def panel_var_model(
|
|
669
|
+
data: Optional[List[List[float]]] = None,
|
|
670
|
+
entity_ids: Optional[List[int]] = None,
|
|
671
|
+
time_periods: Optional[List[int]] = None,
|
|
672
|
+
file_path: Optional[str] = None,
|
|
673
|
+
lags: int = 1,
|
|
674
|
+
variables: Optional[List[str]] = None,
|
|
675
|
+
output_format: str = "json",
|
|
676
|
+
save_path: Optional[str] = None
|
|
677
|
+
) -> str:
|
|
678
|
+
"""
|
|
679
|
+
面板VAR模型适配器
|
|
680
|
+
|
|
681
|
+
Args:
|
|
682
|
+
data: 多元面板数据
|
|
683
|
+
entity_ids: 个体标识符
|
|
684
|
+
time_periods: 时间标识符
|
|
685
|
+
file_path: 数据文件路径
|
|
686
|
+
lags: 滞后期数
|
|
687
|
+
variables: 变量名称列表
|
|
688
|
+
output_format: 输出格式
|
|
689
|
+
save_path: 保存路径
|
|
690
|
+
|
|
691
|
+
Returns:
|
|
692
|
+
str: 格式化的分析结果
|
|
693
|
+
"""
|
|
694
|
+
# 数据准备
|
|
695
|
+
if file_path:
|
|
696
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
697
|
+
data = data_dict["data"]
|
|
698
|
+
entity_ids = data_dict.get("entity_ids") or entity_ids
|
|
699
|
+
time_periods = data_dict.get("time_periods") or time_periods
|
|
700
|
+
variables = data_dict.get("variables") or variables
|
|
701
|
+
elif data is None or entity_ids is None or time_periods is None:
|
|
702
|
+
raise ValueError("Must provide either file_path or (data, entity_ids, time_periods)")
|
|
703
|
+
|
|
704
|
+
# 调用核心算法
|
|
705
|
+
result: CorePanelVARResult = core_panel_var_model(data, entity_ids, time_periods, lags, variables)
|
|
706
|
+
|
|
707
|
+
# 格式化输出
|
|
708
|
+
if output_format == "json":
|
|
709
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
710
|
+
else:
|
|
711
|
+
try:
|
|
712
|
+
formatted = OutputFormatter.format_panel_var_result(result, output_format)
|
|
713
|
+
except Exception as e:
|
|
714
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
715
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
716
|
+
if save_path:
|
|
717
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
718
|
+
return f"面板VAR模型分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
719
|
+
return formatted
|
|
720
|
+
|
|
721
|
+
@staticmethod
|
|
722
|
+
def structural_break_tests(
|
|
723
|
+
data: Optional[List[float]] = None,
|
|
724
|
+
file_path: Optional[str] = None,
|
|
725
|
+
test_type: str = "chow",
|
|
726
|
+
break_point: Optional[int] = None,
|
|
727
|
+
max_breaks: int = 5,
|
|
728
|
+
output_format: str = "json",
|
|
729
|
+
save_path: Optional[str] = None
|
|
730
|
+
) -> str:
|
|
731
|
+
"""
|
|
732
|
+
结构突变检验适配器
|
|
733
|
+
|
|
734
|
+
Args:
|
|
735
|
+
data: 时间序列数据
|
|
736
|
+
file_path: 数据文件路径
|
|
737
|
+
test_type: 检验类型 ("chow", "quandt_andrews", "bai_perron")
|
|
738
|
+
break_point: 断点位置 (仅Chow检验)
|
|
739
|
+
max_breaks: 最大断点数 (仅Bai-Perron检验)
|
|
740
|
+
output_format: 输出格式
|
|
741
|
+
save_path: 保存路径
|
|
742
|
+
|
|
743
|
+
Returns:
|
|
744
|
+
str: 格式化的分析结果
|
|
745
|
+
"""
|
|
746
|
+
# 数据准备
|
|
747
|
+
if file_path:
|
|
748
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
749
|
+
data = data_dict["data"]
|
|
750
|
+
elif data is None:
|
|
751
|
+
raise ValueError("Must provide either file_path or data")
|
|
752
|
+
|
|
753
|
+
# 调用核心算法
|
|
754
|
+
result: CoreStructuralBreakResult = None
|
|
755
|
+
if test_type == "chow":
|
|
756
|
+
if break_point is None:
|
|
757
|
+
break_point = len(data) // 2 # 默认中点
|
|
758
|
+
result = core_chow_test(data, break_point)
|
|
759
|
+
elif test_type == "quandt_andrews":
|
|
760
|
+
result = core_quandt_andrews_test(data)
|
|
761
|
+
elif test_type == "bai_perron":
|
|
762
|
+
result = core_bai_perron_test(data, max_breaks)
|
|
763
|
+
else:
|
|
764
|
+
raise ValueError(f"Unsupported test_type: {test_type}")
|
|
765
|
+
|
|
766
|
+
# 格式化输出
|
|
767
|
+
if output_format == "json":
|
|
768
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
769
|
+
else:
|
|
770
|
+
try:
|
|
771
|
+
formatted = OutputFormatter.format_structural_break_result(result, output_format)
|
|
772
|
+
except Exception as e:
|
|
773
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
774
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
775
|
+
if save_path:
|
|
776
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
777
|
+
return f"结构突变检验({test_type})完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
778
|
+
return formatted
|
|
779
|
+
|
|
780
|
+
@staticmethod
|
|
781
|
+
def time_varying_parameter_models(
|
|
782
|
+
y_data: Optional[List[float]] = None,
|
|
783
|
+
x_data: Optional[List[List[float]]] = None,
|
|
784
|
+
file_path: Optional[str] = None,
|
|
785
|
+
model_type: str = "tar",
|
|
786
|
+
threshold_variable: Optional[List[float]] = None,
|
|
787
|
+
n_regimes: int = 2,
|
|
788
|
+
star_type: str = "logistic",
|
|
789
|
+
output_format: str = "json",
|
|
790
|
+
save_path: Optional[str] = None
|
|
791
|
+
) -> str:
|
|
792
|
+
"""
|
|
793
|
+
时变参数模型适配器
|
|
794
|
+
|
|
795
|
+
Args:
|
|
796
|
+
y_data: 因变量数据
|
|
797
|
+
x_data: 自变量数据
|
|
798
|
+
file_path: 数据文件路径
|
|
799
|
+
model_type: 模型类型 ("tar", "star", "markov_switching")
|
|
800
|
+
threshold_variable: 门限变量 (TAR/STAR)
|
|
801
|
+
n_regimes: 机制数量 (TAR/Markov)
|
|
802
|
+
star_type: STAR类型 ("logistic", "exponential") (仅STAR)
|
|
803
|
+
output_format: 输出格式
|
|
804
|
+
save_path: 保存路径
|
|
805
|
+
|
|
806
|
+
Returns:
|
|
807
|
+
str: 格式化的分析结果
|
|
808
|
+
"""
|
|
809
|
+
# 数据准备
|
|
810
|
+
if file_path:
|
|
811
|
+
data_dict = DataLoader.load_from_file(file_path)
|
|
812
|
+
y_data = data_dict["y_data"]
|
|
813
|
+
x_data = data_dict["x_data"]
|
|
814
|
+
threshold_variable = data_dict.get("threshold_variable") or threshold_variable
|
|
815
|
+
elif y_data is None or x_data is None:
|
|
816
|
+
raise ValueError("Must provide either file_path or (y_data, x_data)")
|
|
817
|
+
|
|
818
|
+
# 如果没有提供门限变量,使用y_data的滞后值
|
|
819
|
+
if threshold_variable is None and model_type in ["tar", "star"]:
|
|
820
|
+
threshold_variable = y_data[:-1] # 使用y的滞后值
|
|
821
|
+
|
|
822
|
+
# 调用核心算法
|
|
823
|
+
result: CoreTimeVaryingParameterResult = None
|
|
824
|
+
if model_type == "tar":
|
|
825
|
+
result = core_tar_model(y_data, x_data, threshold_variable, n_regimes)
|
|
826
|
+
elif model_type == "star":
|
|
827
|
+
result = core_star_model(y_data, x_data, threshold_variable, star_type)
|
|
828
|
+
elif model_type == "markov_switching":
|
|
829
|
+
result = core_markov_switching_model(y_data, x_data, n_regimes)
|
|
830
|
+
else:
|
|
831
|
+
raise ValueError(f"Unsupported model_type: {model_type}")
|
|
832
|
+
|
|
833
|
+
# 格式化输出
|
|
834
|
+
if output_format == "json":
|
|
835
|
+
return json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
836
|
+
else:
|
|
837
|
+
try:
|
|
838
|
+
formatted = OutputFormatter.format_time_varying_parameter_result(result, output_format)
|
|
839
|
+
except Exception as e:
|
|
840
|
+
formatted = json.dumps(result.dict(), ensure_ascii=False, indent=2)
|
|
841
|
+
formatted = f"警告: {output_format}格式化失败({str(e)}),返回JSON格式\n\n{formatted}"
|
|
842
|
+
if save_path:
|
|
843
|
+
OutputFormatter.save_to_file(formatted, save_path)
|
|
844
|
+
return f"时变参数模型({model_type})分析完成!\n\n{formatted}\n\n结果已保存到: {save_path}"
|
|
845
|
+
return formatted
|
|
846
|
+
|
|
847
|
+
# 便捷别名
|
|
848
|
+
arima_adapter = TimeSeriesPanelDataAdapter.arima_model
|
|
849
|
+
exp_smoothing_adapter = TimeSeriesPanelDataAdapter.exponential_smoothing_model
|
|
850
|
+
garch_adapter = TimeSeriesPanelDataAdapter.garch_model
|
|
851
|
+
unit_root_adapter = TimeSeriesPanelDataAdapter.unit_root_tests
|
|
852
|
+
var_svar_adapter = TimeSeriesPanelDataAdapter.var_svar_model
|
|
853
|
+
cointegration_adapter = TimeSeriesPanelDataAdapter.cointegration_analysis
|
|
854
|
+
dynamic_panel_adapter = TimeSeriesPanelDataAdapter.dynamic_panel_model
|
|
855
|
+
panel_diagnostics_adapter = TimeSeriesPanelDataAdapter.panel_diagnostics
|
|
856
|
+
panel_var_adapter = TimeSeriesPanelDataAdapter.panel_var_model
|
|
857
|
+
structural_break_adapter = TimeSeriesPanelDataAdapter.structural_break_tests
|
|
858
|
+
time_varying_parameter_adapter = TimeSeriesPanelDataAdapter.time_varying_parameter_models
|