aigroup-econ-mcp 1.3.3__py3-none-any.whl → 2.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- .gitignore +253 -0
- PKG-INFO +732 -0
- README.md +687 -0
- __init__.py +14 -0
- aigroup_econ_mcp-2.0.1.dist-info/METADATA +732 -0
- aigroup_econ_mcp-2.0.1.dist-info/RECORD +170 -0
- aigroup_econ_mcp-2.0.1.dist-info/entry_points.txt +2 -0
- aigroup_econ_mcp-2.0.1.dist-info/licenses/LICENSE +21 -0
- cli.py +32 -0
- econometrics/README.md +18 -0
- econometrics/__init__.py +191 -0
- econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +30 -0
- econometrics/advanced_methods/modern_computing_machine_learning/causal_forest.py +253 -0
- econometrics/advanced_methods/modern_computing_machine_learning/double_ml.py +268 -0
- econometrics/advanced_methods/modern_computing_machine_learning/gradient_boosting.py +249 -0
- econometrics/advanced_methods/modern_computing_machine_learning/hierarchical_clustering.py +243 -0
- econometrics/advanced_methods/modern_computing_machine_learning/kmeans_clustering.py +293 -0
- econometrics/advanced_methods/modern_computing_machine_learning/neural_network.py +264 -0
- econometrics/advanced_methods/modern_computing_machine_learning/random_forest.py +195 -0
- econometrics/advanced_methods/modern_computing_machine_learning/support_vector_machine.py +226 -0
- econometrics/advanced_methods/modern_computing_machine_learning/test_all_modules.py +329 -0
- econometrics/advanced_methods/modern_computing_machine_learning/test_report.md +107 -0
- econometrics/basic_parametric_estimation/__init__.py +31 -0
- econometrics/basic_parametric_estimation/gmm/__init__.py +13 -0
- econometrics/basic_parametric_estimation/gmm/gmm_model.py +256 -0
- econometrics/basic_parametric_estimation/mle/__init__.py +13 -0
- econometrics/basic_parametric_estimation/mle/mle_model.py +241 -0
- econometrics/basic_parametric_estimation/ols/__init__.py +13 -0
- econometrics/basic_parametric_estimation/ols/ols_model.py +141 -0
- econometrics/causal_inference/__init__.py +66 -0
- econometrics/causal_inference/causal_identification_strategy/__init__.py +104 -0
- econometrics/causal_inference/causal_identification_strategy/control_function.py +112 -0
- econometrics/causal_inference/causal_identification_strategy/difference_in_differences.py +107 -0
- econometrics/causal_inference/causal_identification_strategy/event_study.py +119 -0
- econometrics/causal_inference/causal_identification_strategy/first_difference.py +89 -0
- econometrics/causal_inference/causal_identification_strategy/fixed_effects.py +103 -0
- econometrics/causal_inference/causal_identification_strategy/hausman_test.py +69 -0
- econometrics/causal_inference/causal_identification_strategy/instrumental_variables.py +145 -0
- econometrics/causal_inference/causal_identification_strategy/mediation_analysis.py +121 -0
- econometrics/causal_inference/causal_identification_strategy/moderation_analysis.py +109 -0
- econometrics/causal_inference/causal_identification_strategy/propensity_score_matching.py +140 -0
- econometrics/causal_inference/causal_identification_strategy/random_effects.py +100 -0
- econometrics/causal_inference/causal_identification_strategy/regression_discontinuity.py +98 -0
- econometrics/causal_inference/causal_identification_strategy/synthetic_control.py +111 -0
- econometrics/causal_inference/causal_identification_strategy/triple_difference.py +86 -0
- econometrics/distribution_analysis/__init__.py +28 -0
- econometrics/distribution_analysis/oaxaca_blinder.py +184 -0
- econometrics/distribution_analysis/time_series_decomposition.py +152 -0
- econometrics/distribution_analysis/variance_decomposition.py +179 -0
- econometrics/missing_data/__init__.py +18 -0
- econometrics/missing_data/imputation_methods.py +219 -0
- econometrics/missing_data/missing_data_measurement_error/__init__.py +0 -0
- econometrics/model_specification_diagnostics_robust_inference/README.md +173 -0
- econometrics/model_specification_diagnostics_robust_inference/__init__.py +78 -0
- econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/__init__.py +20 -0
- econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/diagnostic_tests_model.py +149 -0
- econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/gls_model.py +130 -0
- econometrics/model_specification_diagnostics_robust_inference/model_selection/__init__.py +18 -0
- econometrics/model_specification_diagnostics_robust_inference/model_selection/model_selection_model.py +286 -0
- econometrics/model_specification_diagnostics_robust_inference/regularization/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/regularization/regularization_model.py +177 -0
- econometrics/model_specification_diagnostics_robust_inference/robust_errors/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/robust_errors/robust_errors_model.py +122 -0
- econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/simultaneous_equations_model.py +246 -0
- econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/__init__.py +15 -0
- econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/wls_model.py +127 -0
- econometrics/nonparametric/__init__.py +35 -0
- econometrics/nonparametric/gam_model.py +117 -0
- econometrics/nonparametric/kernel_regression.py +161 -0
- econometrics/nonparametric/nonparametric_semiparametric_methods/__init__.py +0 -0
- econometrics/nonparametric/quantile_regression.py +249 -0
- econometrics/nonparametric/spline_regression.py +100 -0
- econometrics/spatial_econometrics/__init__.py +68 -0
- econometrics/spatial_econometrics/geographically_weighted_regression.py +211 -0
- econometrics/spatial_econometrics/gwr_simple.py +154 -0
- econometrics/spatial_econometrics/spatial_autocorrelation.py +356 -0
- econometrics/spatial_econometrics/spatial_durbin_model.py +177 -0
- econometrics/spatial_econometrics/spatial_econometrics_new/__init__.py +0 -0
- econometrics/spatial_econometrics/spatial_regression.py +315 -0
- econometrics/spatial_econometrics/spatial_weights.py +226 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/README.md +164 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +40 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/count_data_models.py +311 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/discrete_choice_models.py +294 -0
- econometrics/specific_data_modeling/micro_discrete_limited_data/limited_dependent_variable_models.py +282 -0
- econometrics/specific_data_modeling/survival_duration_data/__init__.py +0 -0
- econometrics/specific_data_modeling/time_series_panel_data/__init__.py +143 -0
- econometrics/specific_data_modeling/time_series_panel_data/arima_model.py +104 -0
- econometrics/specific_data_modeling/time_series_panel_data/cointegration_vecm.py +334 -0
- econometrics/specific_data_modeling/time_series_panel_data/dynamic_panel_models.py +653 -0
- econometrics/specific_data_modeling/time_series_panel_data/exponential_smoothing.py +176 -0
- econometrics/specific_data_modeling/time_series_panel_data/garch_model.py +198 -0
- econometrics/specific_data_modeling/time_series_panel_data/panel_diagnostics.py +125 -0
- econometrics/specific_data_modeling/time_series_panel_data/panel_var.py +60 -0
- econometrics/specific_data_modeling/time_series_panel_data/structural_break_tests.py +87 -0
- econometrics/specific_data_modeling/time_series_panel_data/time_varying_parameter_models.py +106 -0
- econometrics/specific_data_modeling/time_series_panel_data/unit_root_tests.py +204 -0
- econometrics/specific_data_modeling/time_series_panel_data/var_svar_model.py +372 -0
- econometrics/statistical_inference/__init__.py +21 -0
- econometrics/statistical_inference/bootstrap_methods.py +162 -0
- econometrics/statistical_inference/permutation_test.py +177 -0
- econometrics/statistical_inference/statistical_inference_techniques/__init__.py +0 -0
- econometrics/statistics/distribution_decomposition_methods/__init__.py +0 -0
- econometrics/survival_analysis/__init__.py +18 -0
- econometrics/survival_analysis/survival_models.py +259 -0
- econometrics/tests/basic_parametric_estimation_tests/__init__.py +3 -0
- econometrics/tests/basic_parametric_estimation_tests/test_gmm.py +128 -0
- econometrics/tests/basic_parametric_estimation_tests/test_mle.py +127 -0
- econometrics/tests/basic_parametric_estimation_tests/test_ols.py +100 -0
- econometrics/tests/causal_inference_tests/__init__.py +3 -0
- econometrics/tests/causal_inference_tests/detailed_test.py +441 -0
- econometrics/tests/causal_inference_tests/test_all_methods.py +418 -0
- econometrics/tests/causal_inference_tests/test_causal_identification_strategy.py +202 -0
- econometrics/tests/causal_inference_tests/test_difference_in_differences.py +53 -0
- econometrics/tests/causal_inference_tests/test_instrumental_variables.py +44 -0
- econometrics/tests/model_specification_diagnostics_tests/__init__.py +3 -0
- econometrics/tests/model_specification_diagnostics_tests/test_diagnostic_tests.py +86 -0
- econometrics/tests/model_specification_diagnostics_tests/test_robust_errors.py +89 -0
- econometrics/tests/specific_data_modeling_tests/__init__.py +3 -0
- econometrics/tests/specific_data_modeling_tests/test_arima.py +98 -0
- econometrics/tests/specific_data_modeling_tests/test_dynamic_panel.py +198 -0
- econometrics/tests/specific_data_modeling_tests/test_exponential_smoothing.py +105 -0
- econometrics/tests/specific_data_modeling_tests/test_garch.py +118 -0
- econometrics/tests/specific_data_modeling_tests/test_micro_discrete_limited_data.py +189 -0
- econometrics/tests/specific_data_modeling_tests/test_unit_root.py +156 -0
- econometrics/tests/specific_data_modeling_tests/test_var.py +124 -0
- econometrics//321/206/320/254/320/272/321/205/342/225/235/320/220/321/205/320/237/320/241/321/205/320/264/320/267/321/207/342/226/222/342/225/227/321/204/342/225/235/320/250/321/205/320/225/320/230/321/207/342/225/221/320/267/321/205/320/230/320/226/321/206/320/256/320/240.md +544 -0
- prompts/__init__.py +0 -0
- prompts/analysis_guides.py +43 -0
- pyproject.toml +85 -0
- resources/MCP_MASTER_GUIDE.md +422 -0
- resources/MCP_TOOLS_DATA_FORMAT_GUIDE.md +185 -0
- resources/__init__.py +0 -0
- server.py +97 -0
- tools/README.md +88 -0
- tools/__init__.py +119 -0
- tools/causal_inference_adapter.py +658 -0
- tools/data_loader.py +213 -0
- tools/decorators.py +38 -0
- tools/distribution_analysis_adapter.py +121 -0
- tools/econometrics_adapter.py +286 -0
- tools/gwr_simple_adapter.py +54 -0
- tools/machine_learning_adapter.py +567 -0
- tools/mcp_tool_groups/__init__.py +15 -0
- tools/mcp_tool_groups/basic_parametric_tools.py +173 -0
- tools/mcp_tool_groups/causal_inference_tools.py +643 -0
- tools/mcp_tool_groups/distribution_analysis_tools.py +169 -0
- tools/mcp_tool_groups/machine_learning_tools.py +422 -0
- tools/mcp_tool_groups/microecon_tools.py +325 -0
- tools/mcp_tool_groups/missing_data_tools.py +117 -0
- tools/mcp_tool_groups/model_specification_tools.py +402 -0
- tools/mcp_tool_groups/nonparametric_tools.py +225 -0
- tools/mcp_tool_groups/spatial_econometrics_tools.py +323 -0
- tools/mcp_tool_groups/statistical_inference_tools.py +131 -0
- tools/mcp_tool_groups/time_series_tools.py +494 -0
- tools/mcp_tools_registry.py +124 -0
- tools/microecon_adapter.py +412 -0
- tools/missing_data_adapter.py +73 -0
- tools/model_specification_adapter.py +369 -0
- tools/nonparametric_adapter.py +190 -0
- tools/output_formatter.py +563 -0
- tools/spatial_econometrics_adapter.py +318 -0
- tools/statistical_inference_adapter.py +90 -0
- tools/survival_analysis_adapter.py +46 -0
- tools/time_series_panel_data_adapter.py +858 -0
- tools/time_series_panel_data_tools.py +65 -0
- aigroup_econ_mcp/__init__.py +0 -19
- aigroup_econ_mcp/cli.py +0 -82
- aigroup_econ_mcp/config.py +0 -561
- aigroup_econ_mcp/server.py +0 -452
- aigroup_econ_mcp/tools/__init__.py +0 -19
- aigroup_econ_mcp/tools/base.py +0 -470
- aigroup_econ_mcp/tools/cache.py +0 -533
- aigroup_econ_mcp/tools/data_loader.py +0 -195
- aigroup_econ_mcp/tools/file_parser.py +0 -1027
- aigroup_econ_mcp/tools/machine_learning.py +0 -60
- aigroup_econ_mcp/tools/ml_ensemble.py +0 -210
- aigroup_econ_mcp/tools/ml_evaluation.py +0 -272
- aigroup_econ_mcp/tools/ml_models.py +0 -54
- aigroup_econ_mcp/tools/ml_regularization.py +0 -186
- aigroup_econ_mcp/tools/monitoring.py +0 -555
- aigroup_econ_mcp/tools/optimized_example.py +0 -229
- aigroup_econ_mcp/tools/panel_data.py +0 -619
- aigroup_econ_mcp/tools/regression.py +0 -214
- aigroup_econ_mcp/tools/statistics.py +0 -154
- aigroup_econ_mcp/tools/time_series.py +0 -698
- aigroup_econ_mcp/tools/timeout.py +0 -283
- aigroup_econ_mcp/tools/tool_descriptions.py +0 -410
- aigroup_econ_mcp/tools/tool_handlers.py +0 -1016
- aigroup_econ_mcp/tools/tool_registry.py +0 -478
- aigroup_econ_mcp/tools/validation.py +0 -482
- aigroup_econ_mcp-1.3.3.dist-info/METADATA +0 -525
- aigroup_econ_mcp-1.3.3.dist-info/RECORD +0 -30
- aigroup_econ_mcp-1.3.3.dist-info/entry_points.txt +0 -2
- /aigroup_econ_mcp-1.3.3.dist-info/licenses/LICENSE → /LICENSE +0 -0
- {aigroup_econ_mcp-1.3.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,643 @@
|
|
|
1
|
+
"""
|
|
2
|
+
因果推断方法工具组
|
|
3
|
+
包含13种主要因果识别策略的MCP工具
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import List, Optional, Union, Dict, Any
|
|
7
|
+
from mcp.server.fastmcp import Context
|
|
8
|
+
from mcp.server.session import ServerSession
|
|
9
|
+
|
|
10
|
+
from ..mcp_tools_registry import ToolGroup
|
|
11
|
+
from ..causal_inference_adapter import (
|
|
12
|
+
did_adapter,
|
|
13
|
+
iv_adapter,
|
|
14
|
+
psm_adapter,
|
|
15
|
+
fixed_effects_adapter,
|
|
16
|
+
random_effects_adapter,
|
|
17
|
+
rdd_adapter,
|
|
18
|
+
synthetic_control_adapter,
|
|
19
|
+
event_study_adapter,
|
|
20
|
+
triple_difference_adapter,
|
|
21
|
+
mediation_adapter,
|
|
22
|
+
moderation_adapter,
|
|
23
|
+
control_function_adapter,
|
|
24
|
+
first_difference_adapter
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class CausalInferenceTools(ToolGroup):
|
|
29
|
+
"""因果推断方法工具组"""
|
|
30
|
+
|
|
31
|
+
name = "CAUSAL INFERENCE"
|
|
32
|
+
description = "因果推断和识别策略工具"
|
|
33
|
+
version = "1.0.0"
|
|
34
|
+
|
|
35
|
+
@classmethod
|
|
36
|
+
def get_tools(cls) -> List[Dict[str, Any]]:
|
|
37
|
+
"""返回工具列表"""
|
|
38
|
+
return [
|
|
39
|
+
{
|
|
40
|
+
"name": "causal_difference_in_differences",
|
|
41
|
+
"handler": cls.did_tool,
|
|
42
|
+
"description": "Difference-in-Differences (DID) Analysis"
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "causal_instrumental_variables",
|
|
46
|
+
"handler": cls.iv_tool,
|
|
47
|
+
"description": "Instrumental Variables (IV/2SLS) Analysis"
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
"name": "causal_propensity_score_matching",
|
|
51
|
+
"handler": cls.psm_tool,
|
|
52
|
+
"description": "Propensity Score Matching (PSM) Analysis"
|
|
53
|
+
},
|
|
54
|
+
{
|
|
55
|
+
"name": "causal_fixed_effects",
|
|
56
|
+
"handler": cls.fixed_effects_tool,
|
|
57
|
+
"description": "Fixed Effects Model"
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
"name": "causal_random_effects",
|
|
61
|
+
"handler": cls.random_effects_tool,
|
|
62
|
+
"description": "Random Effects Model"
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"name": "causal_regression_discontinuity",
|
|
66
|
+
"handler": cls.rdd_tool,
|
|
67
|
+
"description": "Regression Discontinuity Design (RDD)"
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"name": "causal_synthetic_control",
|
|
71
|
+
"handler": cls.synthetic_control_tool,
|
|
72
|
+
"description": "Synthetic Control Method"
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"name": "causal_event_study",
|
|
76
|
+
"handler": cls.event_study_tool,
|
|
77
|
+
"description": "Event Study Analysis"
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
"name": "causal_triple_difference",
|
|
81
|
+
"handler": cls.triple_difference_tool,
|
|
82
|
+
"description": "Triple Difference (DDD) Analysis"
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"name": "causal_mediation_analysis",
|
|
86
|
+
"handler": cls.mediation_tool,
|
|
87
|
+
"description": "Mediation Effect Analysis"
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
"name": "causal_moderation_analysis",
|
|
91
|
+
"handler": cls.moderation_tool,
|
|
92
|
+
"description": "Moderation Effect Analysis"
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
"name": "causal_control_function",
|
|
96
|
+
"handler": cls.control_function_tool,
|
|
97
|
+
"description": "Control Function Approach"
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
"name": "causal_first_difference",
|
|
101
|
+
"handler": cls.first_difference_tool,
|
|
102
|
+
"description": "First Difference Model"
|
|
103
|
+
}
|
|
104
|
+
]
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def get_help_text(cls) -> str:
|
|
108
|
+
"""返回帮助文档"""
|
|
109
|
+
return """
|
|
110
|
+
因果推断方法工具组 - 13种主要因果识别策略
|
|
111
|
+
|
|
112
|
+
1. Difference-in-Differences (DID) - causal_difference_in_differences
|
|
113
|
+
- 双重差分法,用于评估政策干预效果
|
|
114
|
+
- 需要数据:处理组、时间期、结果变量、协变量(可选)
|
|
115
|
+
- 关键假设:平行趋势假设
|
|
116
|
+
|
|
117
|
+
2. Instrumental Variables (IV/2SLS) - causal_instrumental_variables
|
|
118
|
+
- 工具变量法,解决内生性问题
|
|
119
|
+
- 需要数据:因变量、内生自变量、工具变量
|
|
120
|
+
- 关键假设:工具变量相关性和外生性
|
|
121
|
+
|
|
122
|
+
3. Propensity Score Matching (PSM) - causal_propensity_score_matching
|
|
123
|
+
- 倾向得分匹配,控制混杂因素
|
|
124
|
+
- 需要数据:处理状态、结果变量、协变量
|
|
125
|
+
- 方法:最近邻匹配等
|
|
126
|
+
|
|
127
|
+
4. Fixed Effects Model - causal_fixed_effects
|
|
128
|
+
- 固定效应模型,控制不随时间变化的个体异质性
|
|
129
|
+
- 需要数据:面板数据(个体-时间)
|
|
130
|
+
- 应用:面板数据分析
|
|
131
|
+
|
|
132
|
+
5. Random Effects Model - causal_random_effects
|
|
133
|
+
- 随机效应模型,假设个体效应随机
|
|
134
|
+
- 需要数据:面板数据(个体-时间)
|
|
135
|
+
- 应用:面板数据分析
|
|
136
|
+
|
|
137
|
+
6. Regression Discontinuity Design (RDD) - causal_regression_discontinuity
|
|
138
|
+
- 回归断点设计,利用连续变量的断点
|
|
139
|
+
- 需要数据:运行变量、结果变量、断点值
|
|
140
|
+
- 关键假设:断点处的连续性
|
|
141
|
+
|
|
142
|
+
7. Synthetic Control Method - causal_synthetic_control
|
|
143
|
+
- 合成控制法,构造反事实对照组
|
|
144
|
+
- 需要数据:多单元时间序列数据
|
|
145
|
+
- 应用:政策评估、比较案例研究
|
|
146
|
+
|
|
147
|
+
8. Event Study - causal_event_study
|
|
148
|
+
- 事件研究法,分析处理前后的动态效应
|
|
149
|
+
- 需要数据:面板数据、事件时间
|
|
150
|
+
- 应用:验证平行趋势假设
|
|
151
|
+
|
|
152
|
+
9. Triple Difference (DDD) - causal_triple_difference
|
|
153
|
+
- 三重差分法,进一步控制混杂因素
|
|
154
|
+
- 需要数据:处理组、时间、队列组
|
|
155
|
+
- 应用:复杂政策评估
|
|
156
|
+
|
|
157
|
+
10. Mediation Analysis - causal_mediation_analysis
|
|
158
|
+
- 中介效应分析,识别因果机制
|
|
159
|
+
- 需要数据:结果、处理、中介变量
|
|
160
|
+
- 方法:Baron-Kenny方法
|
|
161
|
+
|
|
162
|
+
11. Moderation Analysis - causal_moderation_analysis
|
|
163
|
+
- 调节效应分析,检验条件效应
|
|
164
|
+
- 需要数据:结果、预测变量、调节变量
|
|
165
|
+
- 方法:交互项回归
|
|
166
|
+
|
|
167
|
+
12. Control Function Approach - causal_control_function
|
|
168
|
+
- 控制函数法,解决内生性问题
|
|
169
|
+
- 需要数据:因变量、内生变量、外生变量
|
|
170
|
+
- 应用:非线性模型的内生性处理
|
|
171
|
+
|
|
172
|
+
13. First Difference Model - causal_first_difference
|
|
173
|
+
- 一阶差分模型,消除固定效应
|
|
174
|
+
- 需要数据:面板数据
|
|
175
|
+
- 应用:短面板数据分析
|
|
176
|
+
"""
|
|
177
|
+
|
|
178
|
+
@staticmethod
|
|
179
|
+
async def did_tool(
|
|
180
|
+
treatment: Optional[List[int]] = None,
|
|
181
|
+
time_period: Optional[List[int]] = None,
|
|
182
|
+
outcome: Optional[List[float]] = None,
|
|
183
|
+
covariates: Optional[List[List[float]]] = None,
|
|
184
|
+
file_path: Optional[str] = None,
|
|
185
|
+
output_format: str = "json",
|
|
186
|
+
save_path: Optional[str] = None,
|
|
187
|
+
ctx: Context[ServerSession, None] = None
|
|
188
|
+
) -> str:
|
|
189
|
+
"""双重差分法(DID)分析"""
|
|
190
|
+
try:
|
|
191
|
+
if ctx:
|
|
192
|
+
await ctx.info("Starting Difference-in-Differences analysis...")
|
|
193
|
+
|
|
194
|
+
result = did_adapter(
|
|
195
|
+
treatment=treatment,
|
|
196
|
+
time_period=time_period,
|
|
197
|
+
outcome=outcome,
|
|
198
|
+
covariates=covariates,
|
|
199
|
+
file_path=file_path,
|
|
200
|
+
output_format=output_format,
|
|
201
|
+
save_path=save_path
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if ctx:
|
|
205
|
+
await ctx.info("DID analysis complete")
|
|
206
|
+
|
|
207
|
+
return result
|
|
208
|
+
except Exception as e:
|
|
209
|
+
if ctx:
|
|
210
|
+
await ctx.error(f"Error: {str(e)}")
|
|
211
|
+
raise
|
|
212
|
+
|
|
213
|
+
@staticmethod
|
|
214
|
+
async def iv_tool(
|
|
215
|
+
y_data: Optional[List[float]] = None,
|
|
216
|
+
x_data: Optional[List[List[float]]] = None,
|
|
217
|
+
instruments: Optional[List[List[float]]] = None,
|
|
218
|
+
file_path: Optional[str] = None,
|
|
219
|
+
feature_names: Optional[List[str]] = None,
|
|
220
|
+
instrument_names: Optional[List[str]] = None,
|
|
221
|
+
constant: bool = True,
|
|
222
|
+
output_format: str = "json",
|
|
223
|
+
save_path: Optional[str] = None,
|
|
224
|
+
ctx: Context[ServerSession, None] = None
|
|
225
|
+
) -> str:
|
|
226
|
+
"""工具变量法(IV/2SLS)分析"""
|
|
227
|
+
try:
|
|
228
|
+
if ctx:
|
|
229
|
+
await ctx.info("Starting Instrumental Variables analysis...")
|
|
230
|
+
|
|
231
|
+
result = iv_adapter(
|
|
232
|
+
y_data=y_data,
|
|
233
|
+
x_data=x_data,
|
|
234
|
+
instruments=instruments,
|
|
235
|
+
file_path=file_path,
|
|
236
|
+
feature_names=feature_names,
|
|
237
|
+
instrument_names=instrument_names,
|
|
238
|
+
constant=constant,
|
|
239
|
+
output_format=output_format,
|
|
240
|
+
save_path=save_path
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
if ctx:
|
|
244
|
+
await ctx.info("IV/2SLS analysis complete")
|
|
245
|
+
|
|
246
|
+
return result
|
|
247
|
+
except Exception as e:
|
|
248
|
+
if ctx:
|
|
249
|
+
await ctx.error(f"Error: {str(e)}")
|
|
250
|
+
raise
|
|
251
|
+
|
|
252
|
+
@staticmethod
|
|
253
|
+
async def psm_tool(
|
|
254
|
+
treatment: Optional[List[int]] = None,
|
|
255
|
+
outcome: Optional[List[float]] = None,
|
|
256
|
+
covariates: Optional[List[List[float]]] = None,
|
|
257
|
+
file_path: Optional[str] = None,
|
|
258
|
+
matching_method: str = "nearest",
|
|
259
|
+
k_neighbors: int = 1,
|
|
260
|
+
output_format: str = "json",
|
|
261
|
+
save_path: Optional[str] = None,
|
|
262
|
+
ctx: Context[ServerSession, None] = None
|
|
263
|
+
) -> str:
|
|
264
|
+
"""倾向得分匹配(PSM)分析"""
|
|
265
|
+
try:
|
|
266
|
+
if ctx:
|
|
267
|
+
await ctx.info("Starting Propensity Score Matching analysis...")
|
|
268
|
+
|
|
269
|
+
result = psm_adapter(
|
|
270
|
+
treatment=treatment,
|
|
271
|
+
outcome=outcome,
|
|
272
|
+
covariates=covariates,
|
|
273
|
+
file_path=file_path,
|
|
274
|
+
matching_method=matching_method,
|
|
275
|
+
k_neighbors=k_neighbors,
|
|
276
|
+
output_format=output_format,
|
|
277
|
+
save_path=save_path
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
if ctx:
|
|
281
|
+
await ctx.info("PSM analysis complete")
|
|
282
|
+
|
|
283
|
+
return result
|
|
284
|
+
except Exception as e:
|
|
285
|
+
if ctx:
|
|
286
|
+
await ctx.error(f"Error: {str(e)}")
|
|
287
|
+
raise
|
|
288
|
+
|
|
289
|
+
@staticmethod
|
|
290
|
+
async def fixed_effects_tool(
|
|
291
|
+
y_data: Optional[List[float]] = None,
|
|
292
|
+
x_data: Optional[List[List[float]]] = None,
|
|
293
|
+
entity_ids: Optional[List[str]] = None,
|
|
294
|
+
time_periods: Optional[List[str]] = None,
|
|
295
|
+
file_path: Optional[str] = None,
|
|
296
|
+
constant: bool = True,
|
|
297
|
+
output_format: str = "json",
|
|
298
|
+
save_path: Optional[str] = None,
|
|
299
|
+
ctx: Context[ServerSession, None] = None
|
|
300
|
+
) -> str:
|
|
301
|
+
"""固定效应模型分析"""
|
|
302
|
+
try:
|
|
303
|
+
if ctx:
|
|
304
|
+
await ctx.info("Starting Fixed Effects Model analysis...")
|
|
305
|
+
|
|
306
|
+
result = fixed_effects_adapter(
|
|
307
|
+
y_data=y_data,
|
|
308
|
+
x_data=x_data,
|
|
309
|
+
entity_ids=entity_ids,
|
|
310
|
+
time_periods=time_periods,
|
|
311
|
+
file_path=file_path,
|
|
312
|
+
constant=constant,
|
|
313
|
+
output_format=output_format,
|
|
314
|
+
save_path=save_path
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
if ctx:
|
|
318
|
+
await ctx.info("Fixed Effects Model analysis complete")
|
|
319
|
+
|
|
320
|
+
return result
|
|
321
|
+
except Exception as e:
|
|
322
|
+
if ctx:
|
|
323
|
+
await ctx.error(f"Error: {str(e)}")
|
|
324
|
+
raise
|
|
325
|
+
|
|
326
|
+
@staticmethod
|
|
327
|
+
async def random_effects_tool(
|
|
328
|
+
y_data: Optional[List[float]] = None,
|
|
329
|
+
x_data: Optional[List[List[float]]] = None,
|
|
330
|
+
entity_ids: Optional[List[str]] = None,
|
|
331
|
+
time_periods: Optional[List[str]] = None,
|
|
332
|
+
file_path: Optional[str] = None,
|
|
333
|
+
output_format: str = "json",
|
|
334
|
+
save_path: Optional[str] = None,
|
|
335
|
+
ctx: Context[ServerSession, None] = None
|
|
336
|
+
) -> str:
|
|
337
|
+
"""随机效应模型分析"""
|
|
338
|
+
try:
|
|
339
|
+
if ctx:
|
|
340
|
+
await ctx.info("Starting Random Effects Model analysis...")
|
|
341
|
+
|
|
342
|
+
result = random_effects_adapter(
|
|
343
|
+
y_data=y_data,
|
|
344
|
+
x_data=x_data,
|
|
345
|
+
entity_ids=entity_ids,
|
|
346
|
+
time_periods=time_periods,
|
|
347
|
+
file_path=file_path,
|
|
348
|
+
output_format=output_format,
|
|
349
|
+
save_path=save_path
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
if ctx:
|
|
353
|
+
await ctx.info("Random Effects Model analysis complete")
|
|
354
|
+
|
|
355
|
+
return result
|
|
356
|
+
except Exception as e:
|
|
357
|
+
if ctx:
|
|
358
|
+
await ctx.error(f"Error: {str(e)}")
|
|
359
|
+
raise
|
|
360
|
+
|
|
361
|
+
@staticmethod
|
|
362
|
+
async def rdd_tool(
|
|
363
|
+
running_variable: Optional[List[float]] = None,
|
|
364
|
+
outcome: Optional[List[float]] = None,
|
|
365
|
+
cutoff: float = 0.0,
|
|
366
|
+
file_path: Optional[str] = None,
|
|
367
|
+
bandwidth: Optional[float] = None,
|
|
368
|
+
polynomial_order: int = 1,
|
|
369
|
+
output_format: str = "json",
|
|
370
|
+
save_path: Optional[str] = None,
|
|
371
|
+
ctx: Context[ServerSession, None] = None
|
|
372
|
+
) -> str:
|
|
373
|
+
"""回归断点设计(RDD)分析"""
|
|
374
|
+
try:
|
|
375
|
+
if ctx:
|
|
376
|
+
await ctx.info("Starting Regression Discontinuity Design analysis...")
|
|
377
|
+
|
|
378
|
+
result = rdd_adapter(
|
|
379
|
+
running_variable=running_variable,
|
|
380
|
+
outcome=outcome,
|
|
381
|
+
cutoff=cutoff,
|
|
382
|
+
file_path=file_path,
|
|
383
|
+
bandwidth=bandwidth,
|
|
384
|
+
polynomial_order=polynomial_order,
|
|
385
|
+
output_format=output_format,
|
|
386
|
+
save_path=save_path
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
if ctx:
|
|
390
|
+
await ctx.info("RDD analysis complete")
|
|
391
|
+
|
|
392
|
+
return result
|
|
393
|
+
except Exception as e:
|
|
394
|
+
if ctx:
|
|
395
|
+
await ctx.error(f"Error: {str(e)}")
|
|
396
|
+
raise
|
|
397
|
+
|
|
398
|
+
@staticmethod
|
|
399
|
+
async def synthetic_control_tool(
|
|
400
|
+
outcome: Optional[List[float]] = None,
|
|
401
|
+
treatment_period: int = 0,
|
|
402
|
+
treated_unit: str = "unit_1",
|
|
403
|
+
donor_units: Optional[List[str]] = None,
|
|
404
|
+
time_periods: Optional[List[str]] = None,
|
|
405
|
+
file_path: Optional[str] = None,
|
|
406
|
+
output_format: str = "json",
|
|
407
|
+
save_path: Optional[str] = None,
|
|
408
|
+
ctx: Context[ServerSession, None] = None
|
|
409
|
+
) -> str:
|
|
410
|
+
"""合成控制法分析"""
|
|
411
|
+
try:
|
|
412
|
+
if ctx:
|
|
413
|
+
await ctx.info("Starting Synthetic Control Method analysis...")
|
|
414
|
+
|
|
415
|
+
result = synthetic_control_adapter(
|
|
416
|
+
outcome=outcome,
|
|
417
|
+
treatment_period=treatment_period,
|
|
418
|
+
treated_unit=treated_unit,
|
|
419
|
+
donor_units=donor_units,
|
|
420
|
+
time_periods=time_periods,
|
|
421
|
+
file_path=file_path,
|
|
422
|
+
output_format=output_format,
|
|
423
|
+
save_path=save_path
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
if ctx:
|
|
427
|
+
await ctx.info("Synthetic Control Method analysis complete")
|
|
428
|
+
|
|
429
|
+
return result
|
|
430
|
+
except Exception as e:
|
|
431
|
+
if ctx:
|
|
432
|
+
await ctx.error(f"Error: {str(e)}")
|
|
433
|
+
raise
|
|
434
|
+
|
|
435
|
+
@staticmethod
|
|
436
|
+
async def event_study_tool(
|
|
437
|
+
outcome: Optional[List[float]] = None,
|
|
438
|
+
treatment: Optional[List[int]] = None,
|
|
439
|
+
entity_ids: Optional[List[str]] = None,
|
|
440
|
+
time_periods: Optional[List[str]] = None,
|
|
441
|
+
event_time: Optional[List[int]] = None,
|
|
442
|
+
file_path: Optional[str] = None,
|
|
443
|
+
output_format: str = "json",
|
|
444
|
+
save_path: Optional[str] = None,
|
|
445
|
+
ctx: Context[ServerSession, None] = None
|
|
446
|
+
) -> str:
|
|
447
|
+
"""事件研究法分析"""
|
|
448
|
+
try:
|
|
449
|
+
if ctx:
|
|
450
|
+
await ctx.info("Starting Event Study analysis...")
|
|
451
|
+
|
|
452
|
+
result = event_study_adapter(
|
|
453
|
+
outcome=outcome,
|
|
454
|
+
treatment=treatment,
|
|
455
|
+
entity_ids=entity_ids,
|
|
456
|
+
time_periods=time_periods,
|
|
457
|
+
event_time=event_time,
|
|
458
|
+
file_path=file_path,
|
|
459
|
+
output_format=output_format,
|
|
460
|
+
save_path=save_path
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
if ctx:
|
|
464
|
+
await ctx.info("Event Study analysis complete")
|
|
465
|
+
|
|
466
|
+
return result
|
|
467
|
+
except Exception as e:
|
|
468
|
+
if ctx:
|
|
469
|
+
await ctx.error(f"Error: {str(e)}")
|
|
470
|
+
raise
|
|
471
|
+
|
|
472
|
+
@staticmethod
|
|
473
|
+
async def triple_difference_tool(
|
|
474
|
+
outcome: Optional[List[float]] = None,
|
|
475
|
+
treatment_group: Optional[List[int]] = None,
|
|
476
|
+
time_period: Optional[List[int]] = None,
|
|
477
|
+
cohort_group: Optional[List[int]] = None,
|
|
478
|
+
file_path: Optional[str] = None,
|
|
479
|
+
output_format: str = "json",
|
|
480
|
+
save_path: Optional[str] = None,
|
|
481
|
+
ctx: Context[ServerSession, None] = None
|
|
482
|
+
) -> str:
|
|
483
|
+
"""三重差分法(DDD)分析"""
|
|
484
|
+
try:
|
|
485
|
+
if ctx:
|
|
486
|
+
await ctx.info("Starting Triple Difference analysis...")
|
|
487
|
+
|
|
488
|
+
result = triple_difference_adapter(
|
|
489
|
+
outcome=outcome,
|
|
490
|
+
treatment_group=treatment_group,
|
|
491
|
+
time_period=time_period,
|
|
492
|
+
cohort_group=cohort_group,
|
|
493
|
+
file_path=file_path,
|
|
494
|
+
output_format=output_format,
|
|
495
|
+
save_path=save_path
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
if ctx:
|
|
499
|
+
await ctx.info("Triple Difference analysis complete")
|
|
500
|
+
|
|
501
|
+
return result
|
|
502
|
+
except Exception as e:
|
|
503
|
+
if ctx:
|
|
504
|
+
await ctx.error(f"Error: {str(e)}")
|
|
505
|
+
raise
|
|
506
|
+
|
|
507
|
+
@staticmethod
|
|
508
|
+
async def mediation_tool(
|
|
509
|
+
outcome: Optional[List[float]] = None,
|
|
510
|
+
treatment: Optional[List[float]] = None,
|
|
511
|
+
mediator: Optional[List[float]] = None,
|
|
512
|
+
covariates: Optional[List[List[float]]] = None,
|
|
513
|
+
file_path: Optional[str] = None,
|
|
514
|
+
output_format: str = "json",
|
|
515
|
+
save_path: Optional[str] = None,
|
|
516
|
+
ctx: Context[ServerSession, None] = None
|
|
517
|
+
) -> str:
|
|
518
|
+
"""中介效应分析"""
|
|
519
|
+
try:
|
|
520
|
+
if ctx:
|
|
521
|
+
await ctx.info("Starting Mediation Analysis...")
|
|
522
|
+
|
|
523
|
+
result = mediation_adapter(
|
|
524
|
+
outcome=outcome,
|
|
525
|
+
treatment=treatment,
|
|
526
|
+
mediator=mediator,
|
|
527
|
+
covariates=covariates,
|
|
528
|
+
file_path=file_path,
|
|
529
|
+
output_format=output_format,
|
|
530
|
+
save_path=save_path
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
if ctx:
|
|
534
|
+
await ctx.info("Mediation Analysis complete")
|
|
535
|
+
|
|
536
|
+
return result
|
|
537
|
+
except Exception as e:
|
|
538
|
+
if ctx:
|
|
539
|
+
await ctx.error(f"Error: {str(e)}")
|
|
540
|
+
raise
|
|
541
|
+
|
|
542
|
+
@staticmethod
|
|
543
|
+
async def moderation_tool(
|
|
544
|
+
outcome: Optional[List[float]] = None,
|
|
545
|
+
predictor: Optional[List[float]] = None,
|
|
546
|
+
moderator: Optional[List[float]] = None,
|
|
547
|
+
covariates: Optional[List[List[float]]] = None,
|
|
548
|
+
file_path: Optional[str] = None,
|
|
549
|
+
output_format: str = "json",
|
|
550
|
+
save_path: Optional[str] = None,
|
|
551
|
+
ctx: Context[ServerSession, None] = None
|
|
552
|
+
) -> str:
|
|
553
|
+
"""调节效应分析"""
|
|
554
|
+
try:
|
|
555
|
+
if ctx:
|
|
556
|
+
await ctx.info("Starting Moderation Analysis...")
|
|
557
|
+
|
|
558
|
+
result = moderation_adapter(
|
|
559
|
+
outcome=outcome,
|
|
560
|
+
predictor=predictor,
|
|
561
|
+
moderator=moderator,
|
|
562
|
+
covariates=covariates,
|
|
563
|
+
file_path=file_path,
|
|
564
|
+
output_format=output_format,
|
|
565
|
+
save_path=save_path
|
|
566
|
+
)
|
|
567
|
+
|
|
568
|
+
if ctx:
|
|
569
|
+
await ctx.info("Moderation Analysis complete")
|
|
570
|
+
|
|
571
|
+
return result
|
|
572
|
+
except Exception as e:
|
|
573
|
+
if ctx:
|
|
574
|
+
await ctx.error(f"Error: {str(e)}")
|
|
575
|
+
raise
|
|
576
|
+
|
|
577
|
+
@staticmethod
|
|
578
|
+
async def control_function_tool(
|
|
579
|
+
y_data: Optional[List[float]] = None,
|
|
580
|
+
x_data: Optional[List[float]] = None,
|
|
581
|
+
z_data: Optional[List[List[float]]] = None,
|
|
582
|
+
file_path: Optional[str] = None,
|
|
583
|
+
constant: bool = True,
|
|
584
|
+
output_format: str = "json",
|
|
585
|
+
save_path: Optional[str] = None,
|
|
586
|
+
ctx: Context[ServerSession, None] = None
|
|
587
|
+
) -> str:
|
|
588
|
+
"""控制函数法分析"""
|
|
589
|
+
try:
|
|
590
|
+
if ctx:
|
|
591
|
+
await ctx.info("Starting Control Function analysis...")
|
|
592
|
+
|
|
593
|
+
result = control_function_adapter(
|
|
594
|
+
y_data=y_data,
|
|
595
|
+
x_data=x_data,
|
|
596
|
+
z_data=z_data,
|
|
597
|
+
file_path=file_path,
|
|
598
|
+
constant=constant,
|
|
599
|
+
output_format=output_format,
|
|
600
|
+
save_path=save_path
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
if ctx:
|
|
604
|
+
await ctx.info("Control Function analysis complete")
|
|
605
|
+
|
|
606
|
+
return result
|
|
607
|
+
except Exception as e:
|
|
608
|
+
if ctx:
|
|
609
|
+
await ctx.error(f"Error: {str(e)}")
|
|
610
|
+
raise
|
|
611
|
+
|
|
612
|
+
@staticmethod
|
|
613
|
+
async def first_difference_tool(
|
|
614
|
+
y_data: Optional[List[float]] = None,
|
|
615
|
+
x_data: Optional[List[float]] = None,
|
|
616
|
+
entity_ids: Optional[List[str]] = None,
|
|
617
|
+
file_path: Optional[str] = None,
|
|
618
|
+
output_format: str = "json",
|
|
619
|
+
save_path: Optional[str] = None,
|
|
620
|
+
ctx: Context[ServerSession, None] = None
|
|
621
|
+
) -> str:
|
|
622
|
+
"""一阶差分模型分析"""
|
|
623
|
+
try:
|
|
624
|
+
if ctx:
|
|
625
|
+
await ctx.info("Starting First Difference Model analysis...")
|
|
626
|
+
|
|
627
|
+
result = first_difference_adapter(
|
|
628
|
+
y_data=y_data,
|
|
629
|
+
x_data=x_data,
|
|
630
|
+
entity_ids=entity_ids,
|
|
631
|
+
file_path=file_path,
|
|
632
|
+
output_format=output_format,
|
|
633
|
+
save_path=save_path
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
if ctx:
|
|
637
|
+
await ctx.info("First Difference Model analysis complete")
|
|
638
|
+
|
|
639
|
+
return result
|
|
640
|
+
except Exception as e:
|
|
641
|
+
if ctx:
|
|
642
|
+
await ctx.error(f"Error: {str(e)}")
|
|
643
|
+
raise
|