aigroup-econ-mcp 1.3.3__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (198) hide show
  1. .gitignore +253 -0
  2. PKG-INFO +732 -0
  3. README.md +687 -0
  4. __init__.py +14 -0
  5. aigroup_econ_mcp-2.0.1.dist-info/METADATA +732 -0
  6. aigroup_econ_mcp-2.0.1.dist-info/RECORD +170 -0
  7. aigroup_econ_mcp-2.0.1.dist-info/entry_points.txt +2 -0
  8. aigroup_econ_mcp-2.0.1.dist-info/licenses/LICENSE +21 -0
  9. cli.py +32 -0
  10. econometrics/README.md +18 -0
  11. econometrics/__init__.py +191 -0
  12. econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +30 -0
  13. econometrics/advanced_methods/modern_computing_machine_learning/causal_forest.py +253 -0
  14. econometrics/advanced_methods/modern_computing_machine_learning/double_ml.py +268 -0
  15. econometrics/advanced_methods/modern_computing_machine_learning/gradient_boosting.py +249 -0
  16. econometrics/advanced_methods/modern_computing_machine_learning/hierarchical_clustering.py +243 -0
  17. econometrics/advanced_methods/modern_computing_machine_learning/kmeans_clustering.py +293 -0
  18. econometrics/advanced_methods/modern_computing_machine_learning/neural_network.py +264 -0
  19. econometrics/advanced_methods/modern_computing_machine_learning/random_forest.py +195 -0
  20. econometrics/advanced_methods/modern_computing_machine_learning/support_vector_machine.py +226 -0
  21. econometrics/advanced_methods/modern_computing_machine_learning/test_all_modules.py +329 -0
  22. econometrics/advanced_methods/modern_computing_machine_learning/test_report.md +107 -0
  23. econometrics/basic_parametric_estimation/__init__.py +31 -0
  24. econometrics/basic_parametric_estimation/gmm/__init__.py +13 -0
  25. econometrics/basic_parametric_estimation/gmm/gmm_model.py +256 -0
  26. econometrics/basic_parametric_estimation/mle/__init__.py +13 -0
  27. econometrics/basic_parametric_estimation/mle/mle_model.py +241 -0
  28. econometrics/basic_parametric_estimation/ols/__init__.py +13 -0
  29. econometrics/basic_parametric_estimation/ols/ols_model.py +141 -0
  30. econometrics/causal_inference/__init__.py +66 -0
  31. econometrics/causal_inference/causal_identification_strategy/__init__.py +104 -0
  32. econometrics/causal_inference/causal_identification_strategy/control_function.py +112 -0
  33. econometrics/causal_inference/causal_identification_strategy/difference_in_differences.py +107 -0
  34. econometrics/causal_inference/causal_identification_strategy/event_study.py +119 -0
  35. econometrics/causal_inference/causal_identification_strategy/first_difference.py +89 -0
  36. econometrics/causal_inference/causal_identification_strategy/fixed_effects.py +103 -0
  37. econometrics/causal_inference/causal_identification_strategy/hausman_test.py +69 -0
  38. econometrics/causal_inference/causal_identification_strategy/instrumental_variables.py +145 -0
  39. econometrics/causal_inference/causal_identification_strategy/mediation_analysis.py +121 -0
  40. econometrics/causal_inference/causal_identification_strategy/moderation_analysis.py +109 -0
  41. econometrics/causal_inference/causal_identification_strategy/propensity_score_matching.py +140 -0
  42. econometrics/causal_inference/causal_identification_strategy/random_effects.py +100 -0
  43. econometrics/causal_inference/causal_identification_strategy/regression_discontinuity.py +98 -0
  44. econometrics/causal_inference/causal_identification_strategy/synthetic_control.py +111 -0
  45. econometrics/causal_inference/causal_identification_strategy/triple_difference.py +86 -0
  46. econometrics/distribution_analysis/__init__.py +28 -0
  47. econometrics/distribution_analysis/oaxaca_blinder.py +184 -0
  48. econometrics/distribution_analysis/time_series_decomposition.py +152 -0
  49. econometrics/distribution_analysis/variance_decomposition.py +179 -0
  50. econometrics/missing_data/__init__.py +18 -0
  51. econometrics/missing_data/imputation_methods.py +219 -0
  52. econometrics/missing_data/missing_data_measurement_error/__init__.py +0 -0
  53. econometrics/model_specification_diagnostics_robust_inference/README.md +173 -0
  54. econometrics/model_specification_diagnostics_robust_inference/__init__.py +78 -0
  55. econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/__init__.py +20 -0
  56. econometrics/model_specification_diagnostics_robust_inference/diagnostic_tests/diagnostic_tests_model.py +149 -0
  57. econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/__init__.py +15 -0
  58. econometrics/model_specification_diagnostics_robust_inference/generalized_least_squares/gls_model.py +130 -0
  59. econometrics/model_specification_diagnostics_robust_inference/model_selection/__init__.py +18 -0
  60. econometrics/model_specification_diagnostics_robust_inference/model_selection/model_selection_model.py +286 -0
  61. econometrics/model_specification_diagnostics_robust_inference/regularization/__init__.py +15 -0
  62. econometrics/model_specification_diagnostics_robust_inference/regularization/regularization_model.py +177 -0
  63. econometrics/model_specification_diagnostics_robust_inference/robust_errors/__init__.py +15 -0
  64. econometrics/model_specification_diagnostics_robust_inference/robust_errors/robust_errors_model.py +122 -0
  65. econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/__init__.py +15 -0
  66. econometrics/model_specification_diagnostics_robust_inference/simultaneous_equations/simultaneous_equations_model.py +246 -0
  67. econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/__init__.py +15 -0
  68. econometrics/model_specification_diagnostics_robust_inference/weighted_least_squares/wls_model.py +127 -0
  69. econometrics/nonparametric/__init__.py +35 -0
  70. econometrics/nonparametric/gam_model.py +117 -0
  71. econometrics/nonparametric/kernel_regression.py +161 -0
  72. econometrics/nonparametric/nonparametric_semiparametric_methods/__init__.py +0 -0
  73. econometrics/nonparametric/quantile_regression.py +249 -0
  74. econometrics/nonparametric/spline_regression.py +100 -0
  75. econometrics/spatial_econometrics/__init__.py +68 -0
  76. econometrics/spatial_econometrics/geographically_weighted_regression.py +211 -0
  77. econometrics/spatial_econometrics/gwr_simple.py +154 -0
  78. econometrics/spatial_econometrics/spatial_autocorrelation.py +356 -0
  79. econometrics/spatial_econometrics/spatial_durbin_model.py +177 -0
  80. econometrics/spatial_econometrics/spatial_econometrics_new/__init__.py +0 -0
  81. econometrics/spatial_econometrics/spatial_regression.py +315 -0
  82. econometrics/spatial_econometrics/spatial_weights.py +226 -0
  83. econometrics/specific_data_modeling/micro_discrete_limited_data/README.md +164 -0
  84. econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +40 -0
  85. econometrics/specific_data_modeling/micro_discrete_limited_data/count_data_models.py +311 -0
  86. econometrics/specific_data_modeling/micro_discrete_limited_data/discrete_choice_models.py +294 -0
  87. econometrics/specific_data_modeling/micro_discrete_limited_data/limited_dependent_variable_models.py +282 -0
  88. econometrics/specific_data_modeling/survival_duration_data/__init__.py +0 -0
  89. econometrics/specific_data_modeling/time_series_panel_data/__init__.py +143 -0
  90. econometrics/specific_data_modeling/time_series_panel_data/arima_model.py +104 -0
  91. econometrics/specific_data_modeling/time_series_panel_data/cointegration_vecm.py +334 -0
  92. econometrics/specific_data_modeling/time_series_panel_data/dynamic_panel_models.py +653 -0
  93. econometrics/specific_data_modeling/time_series_panel_data/exponential_smoothing.py +176 -0
  94. econometrics/specific_data_modeling/time_series_panel_data/garch_model.py +198 -0
  95. econometrics/specific_data_modeling/time_series_panel_data/panel_diagnostics.py +125 -0
  96. econometrics/specific_data_modeling/time_series_panel_data/panel_var.py +60 -0
  97. econometrics/specific_data_modeling/time_series_panel_data/structural_break_tests.py +87 -0
  98. econometrics/specific_data_modeling/time_series_panel_data/time_varying_parameter_models.py +106 -0
  99. econometrics/specific_data_modeling/time_series_panel_data/unit_root_tests.py +204 -0
  100. econometrics/specific_data_modeling/time_series_panel_data/var_svar_model.py +372 -0
  101. econometrics/statistical_inference/__init__.py +21 -0
  102. econometrics/statistical_inference/bootstrap_methods.py +162 -0
  103. econometrics/statistical_inference/permutation_test.py +177 -0
  104. econometrics/statistical_inference/statistical_inference_techniques/__init__.py +0 -0
  105. econometrics/statistics/distribution_decomposition_methods/__init__.py +0 -0
  106. econometrics/survival_analysis/__init__.py +18 -0
  107. econometrics/survival_analysis/survival_models.py +259 -0
  108. econometrics/tests/basic_parametric_estimation_tests/__init__.py +3 -0
  109. econometrics/tests/basic_parametric_estimation_tests/test_gmm.py +128 -0
  110. econometrics/tests/basic_parametric_estimation_tests/test_mle.py +127 -0
  111. econometrics/tests/basic_parametric_estimation_tests/test_ols.py +100 -0
  112. econometrics/tests/causal_inference_tests/__init__.py +3 -0
  113. econometrics/tests/causal_inference_tests/detailed_test.py +441 -0
  114. econometrics/tests/causal_inference_tests/test_all_methods.py +418 -0
  115. econometrics/tests/causal_inference_tests/test_causal_identification_strategy.py +202 -0
  116. econometrics/tests/causal_inference_tests/test_difference_in_differences.py +53 -0
  117. econometrics/tests/causal_inference_tests/test_instrumental_variables.py +44 -0
  118. econometrics/tests/model_specification_diagnostics_tests/__init__.py +3 -0
  119. econometrics/tests/model_specification_diagnostics_tests/test_diagnostic_tests.py +86 -0
  120. econometrics/tests/model_specification_diagnostics_tests/test_robust_errors.py +89 -0
  121. econometrics/tests/specific_data_modeling_tests/__init__.py +3 -0
  122. econometrics/tests/specific_data_modeling_tests/test_arima.py +98 -0
  123. econometrics/tests/specific_data_modeling_tests/test_dynamic_panel.py +198 -0
  124. econometrics/tests/specific_data_modeling_tests/test_exponential_smoothing.py +105 -0
  125. econometrics/tests/specific_data_modeling_tests/test_garch.py +118 -0
  126. econometrics/tests/specific_data_modeling_tests/test_micro_discrete_limited_data.py +189 -0
  127. econometrics/tests/specific_data_modeling_tests/test_unit_root.py +156 -0
  128. econometrics/tests/specific_data_modeling_tests/test_var.py +124 -0
  129. econometrics//321/206/320/254/320/272/321/205/342/225/235/320/220/321/205/320/237/320/241/321/205/320/264/320/267/321/207/342/226/222/342/225/227/321/204/342/225/235/320/250/321/205/320/225/320/230/321/207/342/225/221/320/267/321/205/320/230/320/226/321/206/320/256/320/240.md +544 -0
  130. prompts/__init__.py +0 -0
  131. prompts/analysis_guides.py +43 -0
  132. pyproject.toml +85 -0
  133. resources/MCP_MASTER_GUIDE.md +422 -0
  134. resources/MCP_TOOLS_DATA_FORMAT_GUIDE.md +185 -0
  135. resources/__init__.py +0 -0
  136. server.py +97 -0
  137. tools/README.md +88 -0
  138. tools/__init__.py +119 -0
  139. tools/causal_inference_adapter.py +658 -0
  140. tools/data_loader.py +213 -0
  141. tools/decorators.py +38 -0
  142. tools/distribution_analysis_adapter.py +121 -0
  143. tools/econometrics_adapter.py +286 -0
  144. tools/gwr_simple_adapter.py +54 -0
  145. tools/machine_learning_adapter.py +567 -0
  146. tools/mcp_tool_groups/__init__.py +15 -0
  147. tools/mcp_tool_groups/basic_parametric_tools.py +173 -0
  148. tools/mcp_tool_groups/causal_inference_tools.py +643 -0
  149. tools/mcp_tool_groups/distribution_analysis_tools.py +169 -0
  150. tools/mcp_tool_groups/machine_learning_tools.py +422 -0
  151. tools/mcp_tool_groups/microecon_tools.py +325 -0
  152. tools/mcp_tool_groups/missing_data_tools.py +117 -0
  153. tools/mcp_tool_groups/model_specification_tools.py +402 -0
  154. tools/mcp_tool_groups/nonparametric_tools.py +225 -0
  155. tools/mcp_tool_groups/spatial_econometrics_tools.py +323 -0
  156. tools/mcp_tool_groups/statistical_inference_tools.py +131 -0
  157. tools/mcp_tool_groups/time_series_tools.py +494 -0
  158. tools/mcp_tools_registry.py +124 -0
  159. tools/microecon_adapter.py +412 -0
  160. tools/missing_data_adapter.py +73 -0
  161. tools/model_specification_adapter.py +369 -0
  162. tools/nonparametric_adapter.py +190 -0
  163. tools/output_formatter.py +563 -0
  164. tools/spatial_econometrics_adapter.py +318 -0
  165. tools/statistical_inference_adapter.py +90 -0
  166. tools/survival_analysis_adapter.py +46 -0
  167. tools/time_series_panel_data_adapter.py +858 -0
  168. tools/time_series_panel_data_tools.py +65 -0
  169. aigroup_econ_mcp/__init__.py +0 -19
  170. aigroup_econ_mcp/cli.py +0 -82
  171. aigroup_econ_mcp/config.py +0 -561
  172. aigroup_econ_mcp/server.py +0 -452
  173. aigroup_econ_mcp/tools/__init__.py +0 -19
  174. aigroup_econ_mcp/tools/base.py +0 -470
  175. aigroup_econ_mcp/tools/cache.py +0 -533
  176. aigroup_econ_mcp/tools/data_loader.py +0 -195
  177. aigroup_econ_mcp/tools/file_parser.py +0 -1027
  178. aigroup_econ_mcp/tools/machine_learning.py +0 -60
  179. aigroup_econ_mcp/tools/ml_ensemble.py +0 -210
  180. aigroup_econ_mcp/tools/ml_evaluation.py +0 -272
  181. aigroup_econ_mcp/tools/ml_models.py +0 -54
  182. aigroup_econ_mcp/tools/ml_regularization.py +0 -186
  183. aigroup_econ_mcp/tools/monitoring.py +0 -555
  184. aigroup_econ_mcp/tools/optimized_example.py +0 -229
  185. aigroup_econ_mcp/tools/panel_data.py +0 -619
  186. aigroup_econ_mcp/tools/regression.py +0 -214
  187. aigroup_econ_mcp/tools/statistics.py +0 -154
  188. aigroup_econ_mcp/tools/time_series.py +0 -698
  189. aigroup_econ_mcp/tools/timeout.py +0 -283
  190. aigroup_econ_mcp/tools/tool_descriptions.py +0 -410
  191. aigroup_econ_mcp/tools/tool_handlers.py +0 -1016
  192. aigroup_econ_mcp/tools/tool_registry.py +0 -478
  193. aigroup_econ_mcp/tools/validation.py +0 -482
  194. aigroup_econ_mcp-1.3.3.dist-info/METADATA +0 -525
  195. aigroup_econ_mcp-1.3.3.dist-info/RECORD +0 -30
  196. aigroup_econ_mcp-1.3.3.dist-info/entry_points.txt +0 -2
  197. /aigroup_econ_mcp-1.3.3.dist-info/licenses/LICENSE → /LICENSE +0 -0
  198. {aigroup_econ_mcp-1.3.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,658 @@
1
+ """
2
+ 因果推断方法适配器
3
+ 提供统一的接口调用econometrics/causal_inference中的各种因果识别方法
4
+ """
5
+
6
+ from typing import List, Optional, Union, Dict, Any
7
+ import json
8
+
9
+ # 导入所有因果推断方法
10
+ from econometrics.causal_inference.causal_identification_strategy.difference_in_differences import (
11
+ difference_in_differences, DIDResult
12
+ )
13
+ from econometrics.causal_inference.causal_identification_strategy.instrumental_variables import (
14
+ instrumental_variables_2sls, IVResult
15
+ )
16
+ from econometrics.causal_inference.causal_identification_strategy.propensity_score_matching import (
17
+ propensity_score_matching, PSMMatchResult
18
+ )
19
+ from econometrics.causal_inference.causal_identification_strategy.fixed_effects import (
20
+ fixed_effects_model, FixedEffectsResult
21
+ )
22
+ from econometrics.causal_inference.causal_identification_strategy.random_effects import (
23
+ random_effects_model, RandomEffectsResult
24
+ )
25
+ from econometrics.causal_inference.causal_identification_strategy.regression_discontinuity import (
26
+ regression_discontinuity, RDDResult
27
+ )
28
+ from econometrics.causal_inference.causal_identification_strategy.synthetic_control import (
29
+ synthetic_control_method, SyntheticControlResult
30
+ )
31
+ from econometrics.causal_inference.causal_identification_strategy.event_study import (
32
+ event_study, EventStudyResult
33
+ )
34
+ from econometrics.causal_inference.causal_identification_strategy.triple_difference import (
35
+ triple_difference, TripeDifferenceResult
36
+ )
37
+ from econometrics.causal_inference.causal_identification_strategy.mediation_analysis import (
38
+ mediation_analysis, MediationResult
39
+ )
40
+ from econometrics.causal_inference.causal_identification_strategy.moderation_analysis import (
41
+ moderation_analysis, ModerationResult
42
+ )
43
+ from econometrics.causal_inference.causal_identification_strategy.control_function import (
44
+ control_function_approach, ControlFunctionResult
45
+ )
46
+ from econometrics.causal_inference.causal_identification_strategy.first_difference import (
47
+ first_difference_model, FirstDifferenceResult
48
+ )
49
+
50
+ from .data_loader import DataLoader
51
+ from .output_formatter import OutputFormatter
52
+
53
+
54
+ def did_adapter(
55
+ treatment: Optional[List[int]] = None,
56
+ time_period: Optional[List[int]] = None,
57
+ outcome: Optional[List[float]] = None,
58
+ covariates: Optional[List[List[float]]] = None,
59
+ file_path: Optional[str] = None,
60
+ output_format: str = "json",
61
+ save_path: Optional[str] = None
62
+ ) -> str:
63
+ """
64
+ 双重差分法 (DID) 适配器
65
+ """
66
+ try:
67
+ # 从文件加载数据
68
+ if file_path:
69
+ data = DataLoader.load_from_file(file_path)
70
+ treatment = data.get("treatment", treatment)
71
+ time_period = data.get("time_period", time_period)
72
+ outcome = data.get("outcome", outcome)
73
+ covariates = data.get("covariates", covariates)
74
+
75
+ # 调用核心方法
76
+ result: DIDResult = difference_in_differences(
77
+ treatment=treatment,
78
+ time_period=time_period,
79
+ outcome=outcome,
80
+ covariates=covariates
81
+ )
82
+
83
+ # 格式化输出
84
+ if output_format == "json":
85
+ output = result.model_dump_json(indent=2)
86
+ else:
87
+ output = str(result.model_dump())
88
+
89
+ # 保存结果
90
+ if save_path:
91
+ OutputFormatter.save_to_file(output, save_path)
92
+
93
+ return output
94
+
95
+ except Exception as e:
96
+ error_msg = f"DID分析错误: {str(e)}"
97
+ return json.dumps({"error": error_msg}, indent=2)
98
+
99
+
100
+ def iv_adapter(
101
+ y_data: Optional[List[float]] = None,
102
+ x_data: Optional[List[List[float]]] = None,
103
+ instruments: Optional[List[List[float]]] = None,
104
+ file_path: Optional[str] = None,
105
+ feature_names: Optional[List[str]] = None,
106
+ instrument_names: Optional[List[str]] = None,
107
+ constant: bool = True,
108
+ output_format: str = "json",
109
+ save_path: Optional[str] = None
110
+ ) -> str:
111
+ """
112
+ 工具变量法 (IV/2SLS) 适配器
113
+ """
114
+ try:
115
+ # 从文件加载数据
116
+ if file_path:
117
+ data = DataLoader.load_from_file(file_path)
118
+ y_data = data.get("y_data", y_data)
119
+ x_data = data.get("x_data", x_data)
120
+ instruments = data.get("instruments", instruments)
121
+
122
+ # 调用核心方法
123
+ result: IVResult = instrumental_variables_2sls(
124
+ y=y_data,
125
+ x=x_data,
126
+ instruments=instruments,
127
+ feature_names=feature_names,
128
+ instrument_names=instrument_names,
129
+ constant=constant
130
+ )
131
+
132
+ # 格式化输出
133
+ if output_format == "json":
134
+ output = result.model_dump_json(indent=2)
135
+ else:
136
+ output = str(result.model_dump())
137
+
138
+ # 保存结果
139
+ if save_path:
140
+ OutputFormatter.save_to_file(output, save_path)
141
+
142
+ return output
143
+
144
+ except Exception as e:
145
+ error_msg = f"IV/2SLS分析错误: {str(e)}"
146
+ return json.dumps({"error": error_msg}, indent=2)
147
+
148
+
149
+ def psm_adapter(
150
+ treatment: Optional[List[int]] = None,
151
+ outcome: Optional[List[float]] = None,
152
+ covariates: Optional[List[List[float]]] = None,
153
+ file_path: Optional[str] = None,
154
+ matching_method: str = "nearest",
155
+ k_neighbors: int = 1,
156
+ output_format: str = "json",
157
+ save_path: Optional[str] = None
158
+ ) -> str:
159
+ """
160
+ 倾向得分匹配 (PSM) 适配器
161
+ """
162
+ try:
163
+ # 从文件加载数据
164
+ if file_path:
165
+ data = DataLoader.load_from_file(file_path)
166
+ treatment = data.get("treatment", treatment)
167
+ outcome = data.get("outcome", outcome)
168
+ covariates = data.get("covariates", covariates)
169
+
170
+ # 调用核心方法
171
+ result: PSMMatchResult = propensity_score_matching(
172
+ treatment=treatment,
173
+ outcome=outcome,
174
+ covariates=covariates,
175
+ matching_method=matching_method,
176
+ k_neighbors=k_neighbors
177
+ )
178
+
179
+ # 格式化输出
180
+ if output_format == "json":
181
+ output = result.model_dump_json(indent=2)
182
+ else:
183
+ output = str(result.model_dump())
184
+
185
+ # 保存结果
186
+ if save_path:
187
+ OutputFormatter.save_to_file(output, save_path)
188
+
189
+ return output
190
+
191
+ except Exception as e:
192
+ error_msg = f"PSM分析错误: {str(e)}"
193
+ return json.dumps({"error": error_msg}, indent=2)
194
+
195
+
196
+ def fixed_effects_adapter(
197
+ y_data: Optional[List[float]] = None,
198
+ x_data: Optional[List[List[float]]] = None,
199
+ entity_ids: Optional[List[str]] = None,
200
+ time_periods: Optional[List[str]] = None,
201
+ file_path: Optional[str] = None,
202
+ constant: bool = True,
203
+ output_format: str = "json",
204
+ save_path: Optional[str] = None
205
+ ) -> str:
206
+ """
207
+ 固定效应模型适配器
208
+ """
209
+ try:
210
+ # 从文件加载数据
211
+ if file_path:
212
+ data = DataLoader.load_from_file(file_path)
213
+ y_data = data.get("y_data", y_data)
214
+ x_data = data.get("x_data", x_data)
215
+ entity_ids = data.get("entity_ids", entity_ids)
216
+ time_periods = data.get("time_periods", time_periods)
217
+
218
+ # 调用核心方法
219
+ result: FixedEffectsResult = fixed_effects_model(
220
+ y=y_data,
221
+ x=x_data,
222
+ entity_ids=entity_ids,
223
+ time_periods=time_periods,
224
+ constant=constant
225
+ )
226
+
227
+ # 格式化输出
228
+ if output_format == "json":
229
+ output = result.model_dump_json(indent=2)
230
+ else:
231
+ output = str(result.model_dump())
232
+
233
+ # 保存结果
234
+ if save_path:
235
+ OutputFormatter.save_to_file(output, save_path)
236
+
237
+ return output
238
+
239
+ except Exception as e:
240
+ error_msg = f"固定效应模型分析错误: {str(e)}"
241
+ return json.dumps({"error": error_msg}, indent=2)
242
+
243
+
244
+ def random_effects_adapter(
245
+ y_data: Optional[List[float]] = None,
246
+ x_data: Optional[List[List[float]]] = None,
247
+ entity_ids: Optional[List[str]] = None,
248
+ time_periods: Optional[List[str]] = None,
249
+ file_path: Optional[str] = None,
250
+ output_format: str = "json",
251
+ save_path: Optional[str] = None
252
+ ) -> str:
253
+ """
254
+ 随机效应模型适配器
255
+ """
256
+ try:
257
+ # 从文件加载数据
258
+ if file_path:
259
+ data = DataLoader.load_from_file(file_path)
260
+ y_data = data.get("y_data", y_data)
261
+ x_data = data.get("x_data", x_data)
262
+ entity_ids = data.get("entity_ids", entity_ids)
263
+ time_periods = data.get("time_periods", time_periods)
264
+
265
+ # 调用核心方法
266
+ result: RandomEffectsResult = random_effects_model(
267
+ y=y_data,
268
+ x=x_data,
269
+ entity_ids=entity_ids,
270
+ time_periods=time_periods
271
+ )
272
+
273
+ # 格式化输出
274
+ if output_format == "json":
275
+ output = result.model_dump_json(indent=2)
276
+ else:
277
+ output = str(result.model_dump())
278
+
279
+ # 保存结果
280
+ if save_path:
281
+ OutputFormatter.save_to_file(output, save_path)
282
+
283
+ return output
284
+
285
+ except Exception as e:
286
+ error_msg = f"随机效应模型分析错误: {str(e)}"
287
+ return json.dumps({"error": error_msg}, indent=2)
288
+
289
+
290
+ def rdd_adapter(
291
+ running_variable: Optional[List[float]] = None,
292
+ outcome: Optional[List[float]] = None,
293
+ cutoff: float = 0.0,
294
+ file_path: Optional[str] = None,
295
+ bandwidth: Optional[float] = None,
296
+ polynomial_order: int = 1,
297
+ output_format: str = "json",
298
+ save_path: Optional[str] = None
299
+ ) -> str:
300
+ """
301
+ 回归断点设计 (RDD) 适配器
302
+ """
303
+ try:
304
+ # 从文件加载数据
305
+ if file_path:
306
+ data = DataLoader.load_from_file(file_path)
307
+ running_variable = data.get("running_variable", running_variable)
308
+ outcome = data.get("outcome", outcome)
309
+ cutoff = data.get("cutoff", cutoff)
310
+
311
+ # 调用核心方法
312
+ result: RDDResult = regression_discontinuity(
313
+ running_variable=running_variable,
314
+ outcome=outcome,
315
+ cutoff=cutoff,
316
+ bandwidth=bandwidth,
317
+ polynomial_order=polynomial_order
318
+ )
319
+
320
+ # 格式化输出
321
+ if output_format == "json":
322
+ output = result.model_dump_json(indent=2)
323
+ else:
324
+ output = str(result.model_dump())
325
+
326
+ # 保存结果
327
+ if save_path:
328
+ OutputFormatter.save_to_file(output, save_path)
329
+
330
+ return output
331
+
332
+ except Exception as e:
333
+ error_msg = f"RDD分析错误: {str(e)}"
334
+ return json.dumps({"error": error_msg}, indent=2)
335
+
336
+
337
+ def synthetic_control_adapter(
338
+ outcome: Optional[List[float]] = None,
339
+ treatment_period: int = 0,
340
+ treated_unit: str = "unit_1",
341
+ donor_units: Optional[List[str]] = None,
342
+ time_periods: Optional[List[str]] = None,
343
+ file_path: Optional[str] = None,
344
+ output_format: str = "json",
345
+ save_path: Optional[str] = None
346
+ ) -> str:
347
+ """
348
+ 合成控制法适配器
349
+ """
350
+ try:
351
+ # 从文件加载数据
352
+ if file_path:
353
+ data = DataLoader.load_from_file(file_path)
354
+ outcome = data.get("outcome", outcome)
355
+ treatment_period = data.get("treatment_period", treatment_period)
356
+ treated_unit = data.get("treated_unit", treated_unit)
357
+ donor_units = data.get("donor_units", donor_units)
358
+ time_periods = data.get("time_periods", time_periods)
359
+
360
+ # 调用核心方法
361
+ result: SyntheticControlResult = synthetic_control_method(
362
+ outcome=outcome,
363
+ treatment_period=treatment_period,
364
+ treated_unit=treated_unit,
365
+ donor_units=donor_units,
366
+ time_periods=time_periods
367
+ )
368
+
369
+ # 格式化输出
370
+ if output_format == "json":
371
+ output = result.model_dump_json(indent=2)
372
+ else:
373
+ output = str(result.model_dump())
374
+
375
+ # 保存结果
376
+ if save_path:
377
+ OutputFormatter.save_to_file(output, save_path)
378
+
379
+ return output
380
+
381
+ except Exception as e:
382
+ error_msg = f"合成控制法分析错误: {str(e)}"
383
+ return json.dumps({"error": error_msg}, indent=2)
384
+
385
+
386
+ def event_study_adapter(
387
+ outcome: Optional[List[float]] = None,
388
+ treatment: Optional[List[int]] = None,
389
+ entity_ids: Optional[List[str]] = None,
390
+ time_periods: Optional[List[str]] = None,
391
+ event_time: Optional[List[int]] = None,
392
+ file_path: Optional[str] = None,
393
+ output_format: str = "json",
394
+ save_path: Optional[str] = None
395
+ ) -> str:
396
+ """
397
+ 事件研究法适配器
398
+ """
399
+ try:
400
+ # 从文件加载数据
401
+ if file_path:
402
+ data = DataLoader.load_from_file(file_path)
403
+ outcome = data.get("outcome", outcome)
404
+ treatment = data.get("treatment", treatment)
405
+ entity_ids = data.get("entity_ids", entity_ids)
406
+ time_periods = data.get("time_periods", time_periods)
407
+ event_time = data.get("event_time", event_time)
408
+
409
+ # 调用核心方法
410
+ result: EventStudyResult = event_study(
411
+ outcome=outcome,
412
+ treatment=treatment,
413
+ entity_ids=entity_ids,
414
+ time_periods=time_periods,
415
+ event_time=event_time
416
+ )
417
+
418
+ # 格式化输出
419
+ if output_format == "json":
420
+ output = result.model_dump_json(indent=2)
421
+ else:
422
+ output = str(result.model_dump())
423
+
424
+ # 保存结果
425
+ if save_path:
426
+ OutputFormatter.save_to_file(output, save_path)
427
+
428
+ return output
429
+
430
+ except Exception as e:
431
+ error_msg = f"事件研究法分析错误: {str(e)}"
432
+ return json.dumps({"error": error_msg}, indent=2)
433
+
434
+
435
+ def triple_difference_adapter(
436
+ outcome: Optional[List[float]] = None,
437
+ treatment_group: Optional[List[int]] = None,
438
+ time_period: Optional[List[int]] = None,
439
+ cohort_group: Optional[List[int]] = None,
440
+ file_path: Optional[str] = None,
441
+ output_format: str = "json",
442
+ save_path: Optional[str] = None
443
+ ) -> str:
444
+ """
445
+ 三重差分法 (DDD) 适配器
446
+ """
447
+ try:
448
+ # 从文件加载数据
449
+ if file_path:
450
+ data = DataLoader.load_from_file(file_path)
451
+ outcome = data.get("outcome", outcome)
452
+ treatment_group = data.get("treatment_group", treatment_group)
453
+ time_period = data.get("time_period", time_period)
454
+ cohort_group = data.get("cohort_group", cohort_group)
455
+
456
+ # 调用核心方法
457
+ result: TripeDifferenceResult = triple_difference(
458
+ outcome=outcome,
459
+ treatment_group=treatment_group,
460
+ time_period=time_period,
461
+ cohort_group=cohort_group
462
+ )
463
+
464
+ # 格式化输出
465
+ if output_format == "json":
466
+ output = result.model_dump_json(indent=2)
467
+ else:
468
+ output = str(result.model_dump())
469
+
470
+ # 保存结果
471
+ if save_path:
472
+ OutputFormatter.save_to_file(output, save_path)
473
+
474
+ return output
475
+
476
+ except Exception as e:
477
+ error_msg = f"DDD分析错误: {str(e)}"
478
+ return json.dumps({"error": error_msg}, indent=2)
479
+
480
+
481
+ def mediation_adapter(
482
+ outcome: Optional[List[float]] = None,
483
+ treatment: Optional[List[float]] = None,
484
+ mediator: Optional[List[float]] = None,
485
+ covariates: Optional[List[List[float]]] = None,
486
+ file_path: Optional[str] = None,
487
+ output_format: str = "json",
488
+ save_path: Optional[str] = None
489
+ ) -> str:
490
+ """
491
+ 中介效应分析适配器
492
+ """
493
+ try:
494
+ # 从文件加载数据
495
+ if file_path:
496
+ data = DataLoader.load_from_file(file_path)
497
+ outcome = data.get("outcome", outcome)
498
+ treatment = data.get("treatment", treatment)
499
+ mediator = data.get("mediator", mediator)
500
+ covariates = data.get("covariates", covariates)
501
+
502
+ # 调用核心方法
503
+ result: MediationResult = mediation_analysis(
504
+ outcome=outcome,
505
+ treatment=treatment,
506
+ mediator=mediator,
507
+ covariates=covariates
508
+ )
509
+
510
+ # 格式化输出
511
+ if output_format == "json":
512
+ output = result.model_dump_json(indent=2)
513
+ else:
514
+ output = str(result.model_dump())
515
+
516
+ # 保存结果
517
+ if save_path:
518
+ OutputFormatter.save_to_file(output, save_path)
519
+
520
+ return output
521
+
522
+ except Exception as e:
523
+ error_msg = f"中介效应分析错误: {str(e)}"
524
+ return json.dumps({"error": error_msg}, indent=2)
525
+
526
+
527
+ def moderation_adapter(
528
+ outcome: Optional[List[float]] = None,
529
+ predictor: Optional[List[float]] = None,
530
+ moderator: Optional[List[float]] = None,
531
+ covariates: Optional[List[List[float]]] = None,
532
+ file_path: Optional[str] = None,
533
+ output_format: str = "json",
534
+ save_path: Optional[str] = None
535
+ ) -> str:
536
+ """
537
+ 调节效应分析适配器
538
+ """
539
+ try:
540
+ # 从文件加载数据
541
+ if file_path:
542
+ data = DataLoader.load_from_file(file_path)
543
+ outcome = data.get("outcome", outcome)
544
+ predictor = data.get("predictor", predictor)
545
+ moderator = data.get("moderator", moderator)
546
+ covariates = data.get("covariates", covariates)
547
+
548
+ # 调用核心方法
549
+ result: ModerationResult = moderation_analysis(
550
+ outcome=outcome,
551
+ predictor=predictor,
552
+ moderator=moderator,
553
+ covariates=covariates
554
+ )
555
+
556
+ # 格式化输出
557
+ if output_format == "json":
558
+ output = result.model_dump_json(indent=2)
559
+ else:
560
+ output = str(result.model_dump())
561
+
562
+ # 保存结果
563
+ if save_path:
564
+ OutputFormatter.save_to_file(output, save_path)
565
+
566
+ return output
567
+
568
+ except Exception as e:
569
+ error_msg = f"调节效应分析错误: {str(e)}"
570
+ return json.dumps({"error": error_msg}, indent=2)
571
+
572
+
573
+ def control_function_adapter(
574
+ y_data: Optional[List[float]] = None,
575
+ x_data: Optional[List[float]] = None,
576
+ z_data: Optional[List[List[float]]] = None,
577
+ file_path: Optional[str] = None,
578
+ constant: bool = True,
579
+ output_format: str = "json",
580
+ save_path: Optional[str] = None
581
+ ) -> str:
582
+ """
583
+ 控制函数法适配器
584
+ """
585
+ try:
586
+ # 从文件加载数据
587
+ if file_path:
588
+ data = DataLoader.load_from_file(file_path)
589
+ y_data = data.get("y_data", y_data)
590
+ x_data = data.get("x_data", x_data)
591
+ z_data = data.get("z_data", z_data)
592
+
593
+ # 调用核心方法
594
+ result: ControlFunctionResult = control_function_approach(
595
+ y=y_data,
596
+ x=x_data,
597
+ z=z_data,
598
+ constant=constant
599
+ )
600
+
601
+ # 格式化输出
602
+ if output_format == "json":
603
+ output = result.model_dump_json(indent=2)
604
+ else:
605
+ output = str(result.model_dump())
606
+
607
+ # 保存结果
608
+ if save_path:
609
+ OutputFormatter.save_to_file(output, save_path)
610
+
611
+ return output
612
+
613
+ except Exception as e:
614
+ error_msg = f"控制函数法分析错误: {str(e)}"
615
+ return json.dumps({"error": error_msg}, indent=2)
616
+
617
+
618
+ def first_difference_adapter(
619
+ y_data: Optional[List[float]] = None,
620
+ x_data: Optional[List[float]] = None,
621
+ entity_ids: Optional[List[str]] = None,
622
+ file_path: Optional[str] = None,
623
+ output_format: str = "json",
624
+ save_path: Optional[str] = None
625
+ ) -> str:
626
+ """
627
+ 一阶差分模型适配器
628
+ """
629
+ try:
630
+ # 从文件加载数据
631
+ if file_path:
632
+ data = DataLoader.load_from_file(file_path)
633
+ y_data = data.get("y_data", y_data)
634
+ x_data = data.get("x_data", x_data)
635
+ entity_ids = data.get("entity_ids", entity_ids)
636
+
637
+ # 调用核心方法
638
+ result: FirstDifferenceResult = first_difference_model(
639
+ y=y_data,
640
+ x=x_data,
641
+ entity_ids=entity_ids
642
+ )
643
+
644
+ # 格式化输出
645
+ if output_format == "json":
646
+ output = result.model_dump_json(indent=2)
647
+ else:
648
+ output = str(result.model_dump())
649
+
650
+ # 保存结果
651
+ if save_path:
652
+ OutputFormatter.save_to_file(output, save_path)
653
+
654
+ return output
655
+
656
+ except Exception as e:
657
+ error_msg = f"一阶差分模型分析错误: {str(e)}"
658
+ return json.dumps({"error": error_msg}, indent=2)