aigroup-econ-mcp 1.4.3__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. PKG-INFO +344 -322
  2. README.md +335 -320
  3. __init__.py +1 -1
  4. aigroup_econ_mcp-2.0.1.dist-info/METADATA +732 -0
  5. aigroup_econ_mcp-2.0.1.dist-info/RECORD +170 -0
  6. cli.py +4 -0
  7. econometrics/advanced_methods/modern_computing_machine_learning/__init__.py +30 -0
  8. econometrics/advanced_methods/modern_computing_machine_learning/causal_forest.py +253 -0
  9. econometrics/advanced_methods/modern_computing_machine_learning/double_ml.py +268 -0
  10. econometrics/advanced_methods/modern_computing_machine_learning/gradient_boosting.py +249 -0
  11. econometrics/advanced_methods/modern_computing_machine_learning/hierarchical_clustering.py +243 -0
  12. econometrics/advanced_methods/modern_computing_machine_learning/kmeans_clustering.py +293 -0
  13. econometrics/advanced_methods/modern_computing_machine_learning/neural_network.py +264 -0
  14. econometrics/advanced_methods/modern_computing_machine_learning/random_forest.py +195 -0
  15. econometrics/advanced_methods/modern_computing_machine_learning/support_vector_machine.py +226 -0
  16. econometrics/advanced_methods/modern_computing_machine_learning/test_all_modules.py +329 -0
  17. econometrics/advanced_methods/modern_computing_machine_learning/test_report.md +107 -0
  18. econometrics/causal_inference/__init__.py +66 -0
  19. econometrics/causal_inference/causal_identification_strategy/__init__.py +104 -0
  20. econometrics/causal_inference/causal_identification_strategy/control_function.py +112 -0
  21. econometrics/causal_inference/causal_identification_strategy/difference_in_differences.py +107 -0
  22. econometrics/causal_inference/causal_identification_strategy/event_study.py +119 -0
  23. econometrics/causal_inference/causal_identification_strategy/first_difference.py +89 -0
  24. econometrics/causal_inference/causal_identification_strategy/fixed_effects.py +103 -0
  25. econometrics/causal_inference/causal_identification_strategy/hausman_test.py +69 -0
  26. econometrics/causal_inference/causal_identification_strategy/instrumental_variables.py +145 -0
  27. econometrics/causal_inference/causal_identification_strategy/mediation_analysis.py +121 -0
  28. econometrics/causal_inference/causal_identification_strategy/moderation_analysis.py +109 -0
  29. econometrics/causal_inference/causal_identification_strategy/propensity_score_matching.py +140 -0
  30. econometrics/causal_inference/causal_identification_strategy/random_effects.py +100 -0
  31. econometrics/causal_inference/causal_identification_strategy/regression_discontinuity.py +98 -0
  32. econometrics/causal_inference/causal_identification_strategy/synthetic_control.py +111 -0
  33. econometrics/causal_inference/causal_identification_strategy/triple_difference.py +86 -0
  34. econometrics/distribution_analysis/__init__.py +28 -0
  35. econometrics/distribution_analysis/oaxaca_blinder.py +184 -0
  36. econometrics/distribution_analysis/time_series_decomposition.py +152 -0
  37. econometrics/distribution_analysis/variance_decomposition.py +179 -0
  38. econometrics/missing_data/__init__.py +18 -0
  39. econometrics/missing_data/imputation_methods.py +219 -0
  40. econometrics/nonparametric/__init__.py +35 -0
  41. econometrics/nonparametric/gam_model.py +117 -0
  42. econometrics/nonparametric/kernel_regression.py +161 -0
  43. econometrics/nonparametric/quantile_regression.py +249 -0
  44. econometrics/nonparametric/spline_regression.py +100 -0
  45. econometrics/spatial_econometrics/__init__.py +68 -0
  46. econometrics/spatial_econometrics/geographically_weighted_regression.py +211 -0
  47. econometrics/spatial_econometrics/gwr_simple.py +154 -0
  48. econometrics/spatial_econometrics/spatial_autocorrelation.py +356 -0
  49. econometrics/spatial_econometrics/spatial_durbin_model.py +177 -0
  50. econometrics/spatial_econometrics/spatial_regression.py +315 -0
  51. econometrics/spatial_econometrics/spatial_weights.py +226 -0
  52. econometrics/specific_data_modeling/micro_discrete_limited_data/README.md +164 -0
  53. econometrics/specific_data_modeling/micro_discrete_limited_data/__init__.py +40 -0
  54. econometrics/specific_data_modeling/micro_discrete_limited_data/count_data_models.py +311 -0
  55. econometrics/specific_data_modeling/micro_discrete_limited_data/discrete_choice_models.py +294 -0
  56. econometrics/specific_data_modeling/micro_discrete_limited_data/limited_dependent_variable_models.py +282 -0
  57. econometrics/statistical_inference/__init__.py +21 -0
  58. econometrics/statistical_inference/bootstrap_methods.py +162 -0
  59. econometrics/statistical_inference/permutation_test.py +177 -0
  60. econometrics/survival_analysis/__init__.py +18 -0
  61. econometrics/survival_analysis/survival_models.py +259 -0
  62. econometrics/tests/causal_inference_tests/__init__.py +3 -0
  63. econometrics/tests/causal_inference_tests/detailed_test.py +441 -0
  64. econometrics/tests/causal_inference_tests/test_all_methods.py +418 -0
  65. econometrics/tests/causal_inference_tests/test_causal_identification_strategy.py +202 -0
  66. econometrics/tests/causal_inference_tests/test_difference_in_differences.py +53 -0
  67. econometrics/tests/causal_inference_tests/test_instrumental_variables.py +44 -0
  68. econometrics/tests/specific_data_modeling_tests/test_micro_discrete_limited_data.py +189 -0
  69. econometrics//321/206/320/254/320/272/321/205/342/225/235/320/220/321/205/320/237/320/241/321/205/320/264/320/267/321/207/342/226/222/342/225/227/321/204/342/225/235/320/250/321/205/320/225/320/230/321/207/342/225/221/320/267/321/205/320/230/320/226/321/206/320/256/320/240.md +544 -0
  70. pyproject.toml +9 -2
  71. server.py +15 -1
  72. tools/__init__.py +75 -1
  73. tools/causal_inference_adapter.py +658 -0
  74. tools/distribution_analysis_adapter.py +121 -0
  75. tools/gwr_simple_adapter.py +54 -0
  76. tools/machine_learning_adapter.py +567 -0
  77. tools/mcp_tool_groups/__init__.py +15 -1
  78. tools/mcp_tool_groups/causal_inference_tools.py +643 -0
  79. tools/mcp_tool_groups/distribution_analysis_tools.py +169 -0
  80. tools/mcp_tool_groups/machine_learning_tools.py +422 -0
  81. tools/mcp_tool_groups/microecon_tools.py +325 -0
  82. tools/mcp_tool_groups/missing_data_tools.py +117 -0
  83. tools/mcp_tool_groups/nonparametric_tools.py +225 -0
  84. tools/mcp_tool_groups/spatial_econometrics_tools.py +323 -0
  85. tools/mcp_tool_groups/statistical_inference_tools.py +131 -0
  86. tools/mcp_tools_registry.py +13 -3
  87. tools/microecon_adapter.py +412 -0
  88. tools/missing_data_adapter.py +73 -0
  89. tools/nonparametric_adapter.py +190 -0
  90. tools/spatial_econometrics_adapter.py +318 -0
  91. tools/statistical_inference_adapter.py +90 -0
  92. tools/survival_analysis_adapter.py +46 -0
  93. aigroup_econ_mcp-1.4.3.dist-info/METADATA +0 -710
  94. aigroup_econ_mcp-1.4.3.dist-info/RECORD +0 -92
  95. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/WHEEL +0 -0
  96. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/entry_points.txt +0 -0
  97. {aigroup_econ_mcp-1.4.3.dist-info → aigroup_econ_mcp-2.0.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,658 @@
1
+ """
2
+ 因果推断方法适配器
3
+ 提供统一的接口调用econometrics/causal_inference中的各种因果识别方法
4
+ """
5
+
6
+ from typing import List, Optional, Union, Dict, Any
7
+ import json
8
+
9
+ # 导入所有因果推断方法
10
+ from econometrics.causal_inference.causal_identification_strategy.difference_in_differences import (
11
+ difference_in_differences, DIDResult
12
+ )
13
+ from econometrics.causal_inference.causal_identification_strategy.instrumental_variables import (
14
+ instrumental_variables_2sls, IVResult
15
+ )
16
+ from econometrics.causal_inference.causal_identification_strategy.propensity_score_matching import (
17
+ propensity_score_matching, PSMMatchResult
18
+ )
19
+ from econometrics.causal_inference.causal_identification_strategy.fixed_effects import (
20
+ fixed_effects_model, FixedEffectsResult
21
+ )
22
+ from econometrics.causal_inference.causal_identification_strategy.random_effects import (
23
+ random_effects_model, RandomEffectsResult
24
+ )
25
+ from econometrics.causal_inference.causal_identification_strategy.regression_discontinuity import (
26
+ regression_discontinuity, RDDResult
27
+ )
28
+ from econometrics.causal_inference.causal_identification_strategy.synthetic_control import (
29
+ synthetic_control_method, SyntheticControlResult
30
+ )
31
+ from econometrics.causal_inference.causal_identification_strategy.event_study import (
32
+ event_study, EventStudyResult
33
+ )
34
+ from econometrics.causal_inference.causal_identification_strategy.triple_difference import (
35
+ triple_difference, TripeDifferenceResult
36
+ )
37
+ from econometrics.causal_inference.causal_identification_strategy.mediation_analysis import (
38
+ mediation_analysis, MediationResult
39
+ )
40
+ from econometrics.causal_inference.causal_identification_strategy.moderation_analysis import (
41
+ moderation_analysis, ModerationResult
42
+ )
43
+ from econometrics.causal_inference.causal_identification_strategy.control_function import (
44
+ control_function_approach, ControlFunctionResult
45
+ )
46
+ from econometrics.causal_inference.causal_identification_strategy.first_difference import (
47
+ first_difference_model, FirstDifferenceResult
48
+ )
49
+
50
+ from .data_loader import DataLoader
51
+ from .output_formatter import OutputFormatter
52
+
53
+
54
+ def did_adapter(
55
+ treatment: Optional[List[int]] = None,
56
+ time_period: Optional[List[int]] = None,
57
+ outcome: Optional[List[float]] = None,
58
+ covariates: Optional[List[List[float]]] = None,
59
+ file_path: Optional[str] = None,
60
+ output_format: str = "json",
61
+ save_path: Optional[str] = None
62
+ ) -> str:
63
+ """
64
+ 双重差分法 (DID) 适配器
65
+ """
66
+ try:
67
+ # 从文件加载数据
68
+ if file_path:
69
+ data = DataLoader.load_from_file(file_path)
70
+ treatment = data.get("treatment", treatment)
71
+ time_period = data.get("time_period", time_period)
72
+ outcome = data.get("outcome", outcome)
73
+ covariates = data.get("covariates", covariates)
74
+
75
+ # 调用核心方法
76
+ result: DIDResult = difference_in_differences(
77
+ treatment=treatment,
78
+ time_period=time_period,
79
+ outcome=outcome,
80
+ covariates=covariates
81
+ )
82
+
83
+ # 格式化输出
84
+ if output_format == "json":
85
+ output = result.model_dump_json(indent=2)
86
+ else:
87
+ output = str(result.model_dump())
88
+
89
+ # 保存结果
90
+ if save_path:
91
+ OutputFormatter.save_to_file(output, save_path)
92
+
93
+ return output
94
+
95
+ except Exception as e:
96
+ error_msg = f"DID分析错误: {str(e)}"
97
+ return json.dumps({"error": error_msg}, indent=2)
98
+
99
+
100
+ def iv_adapter(
101
+ y_data: Optional[List[float]] = None,
102
+ x_data: Optional[List[List[float]]] = None,
103
+ instruments: Optional[List[List[float]]] = None,
104
+ file_path: Optional[str] = None,
105
+ feature_names: Optional[List[str]] = None,
106
+ instrument_names: Optional[List[str]] = None,
107
+ constant: bool = True,
108
+ output_format: str = "json",
109
+ save_path: Optional[str] = None
110
+ ) -> str:
111
+ """
112
+ 工具变量法 (IV/2SLS) 适配器
113
+ """
114
+ try:
115
+ # 从文件加载数据
116
+ if file_path:
117
+ data = DataLoader.load_from_file(file_path)
118
+ y_data = data.get("y_data", y_data)
119
+ x_data = data.get("x_data", x_data)
120
+ instruments = data.get("instruments", instruments)
121
+
122
+ # 调用核心方法
123
+ result: IVResult = instrumental_variables_2sls(
124
+ y=y_data,
125
+ x=x_data,
126
+ instruments=instruments,
127
+ feature_names=feature_names,
128
+ instrument_names=instrument_names,
129
+ constant=constant
130
+ )
131
+
132
+ # 格式化输出
133
+ if output_format == "json":
134
+ output = result.model_dump_json(indent=2)
135
+ else:
136
+ output = str(result.model_dump())
137
+
138
+ # 保存结果
139
+ if save_path:
140
+ OutputFormatter.save_to_file(output, save_path)
141
+
142
+ return output
143
+
144
+ except Exception as e:
145
+ error_msg = f"IV/2SLS分析错误: {str(e)}"
146
+ return json.dumps({"error": error_msg}, indent=2)
147
+
148
+
149
+ def psm_adapter(
150
+ treatment: Optional[List[int]] = None,
151
+ outcome: Optional[List[float]] = None,
152
+ covariates: Optional[List[List[float]]] = None,
153
+ file_path: Optional[str] = None,
154
+ matching_method: str = "nearest",
155
+ k_neighbors: int = 1,
156
+ output_format: str = "json",
157
+ save_path: Optional[str] = None
158
+ ) -> str:
159
+ """
160
+ 倾向得分匹配 (PSM) 适配器
161
+ """
162
+ try:
163
+ # 从文件加载数据
164
+ if file_path:
165
+ data = DataLoader.load_from_file(file_path)
166
+ treatment = data.get("treatment", treatment)
167
+ outcome = data.get("outcome", outcome)
168
+ covariates = data.get("covariates", covariates)
169
+
170
+ # 调用核心方法
171
+ result: PSMMatchResult = propensity_score_matching(
172
+ treatment=treatment,
173
+ outcome=outcome,
174
+ covariates=covariates,
175
+ matching_method=matching_method,
176
+ k_neighbors=k_neighbors
177
+ )
178
+
179
+ # 格式化输出
180
+ if output_format == "json":
181
+ output = result.model_dump_json(indent=2)
182
+ else:
183
+ output = str(result.model_dump())
184
+
185
+ # 保存结果
186
+ if save_path:
187
+ OutputFormatter.save_to_file(output, save_path)
188
+
189
+ return output
190
+
191
+ except Exception as e:
192
+ error_msg = f"PSM分析错误: {str(e)}"
193
+ return json.dumps({"error": error_msg}, indent=2)
194
+
195
+
196
+ def fixed_effects_adapter(
197
+ y_data: Optional[List[float]] = None,
198
+ x_data: Optional[List[List[float]]] = None,
199
+ entity_ids: Optional[List[str]] = None,
200
+ time_periods: Optional[List[str]] = None,
201
+ file_path: Optional[str] = None,
202
+ constant: bool = True,
203
+ output_format: str = "json",
204
+ save_path: Optional[str] = None
205
+ ) -> str:
206
+ """
207
+ 固定效应模型适配器
208
+ """
209
+ try:
210
+ # 从文件加载数据
211
+ if file_path:
212
+ data = DataLoader.load_from_file(file_path)
213
+ y_data = data.get("y_data", y_data)
214
+ x_data = data.get("x_data", x_data)
215
+ entity_ids = data.get("entity_ids", entity_ids)
216
+ time_periods = data.get("time_periods", time_periods)
217
+
218
+ # 调用核心方法
219
+ result: FixedEffectsResult = fixed_effects_model(
220
+ y=y_data,
221
+ x=x_data,
222
+ entity_ids=entity_ids,
223
+ time_periods=time_periods,
224
+ constant=constant
225
+ )
226
+
227
+ # 格式化输出
228
+ if output_format == "json":
229
+ output = result.model_dump_json(indent=2)
230
+ else:
231
+ output = str(result.model_dump())
232
+
233
+ # 保存结果
234
+ if save_path:
235
+ OutputFormatter.save_to_file(output, save_path)
236
+
237
+ return output
238
+
239
+ except Exception as e:
240
+ error_msg = f"固定效应模型分析错误: {str(e)}"
241
+ return json.dumps({"error": error_msg}, indent=2)
242
+
243
+
244
+ def random_effects_adapter(
245
+ y_data: Optional[List[float]] = None,
246
+ x_data: Optional[List[List[float]]] = None,
247
+ entity_ids: Optional[List[str]] = None,
248
+ time_periods: Optional[List[str]] = None,
249
+ file_path: Optional[str] = None,
250
+ output_format: str = "json",
251
+ save_path: Optional[str] = None
252
+ ) -> str:
253
+ """
254
+ 随机效应模型适配器
255
+ """
256
+ try:
257
+ # 从文件加载数据
258
+ if file_path:
259
+ data = DataLoader.load_from_file(file_path)
260
+ y_data = data.get("y_data", y_data)
261
+ x_data = data.get("x_data", x_data)
262
+ entity_ids = data.get("entity_ids", entity_ids)
263
+ time_periods = data.get("time_periods", time_periods)
264
+
265
+ # 调用核心方法
266
+ result: RandomEffectsResult = random_effects_model(
267
+ y=y_data,
268
+ x=x_data,
269
+ entity_ids=entity_ids,
270
+ time_periods=time_periods
271
+ )
272
+
273
+ # 格式化输出
274
+ if output_format == "json":
275
+ output = result.model_dump_json(indent=2)
276
+ else:
277
+ output = str(result.model_dump())
278
+
279
+ # 保存结果
280
+ if save_path:
281
+ OutputFormatter.save_to_file(output, save_path)
282
+
283
+ return output
284
+
285
+ except Exception as e:
286
+ error_msg = f"随机效应模型分析错误: {str(e)}"
287
+ return json.dumps({"error": error_msg}, indent=2)
288
+
289
+
290
+ def rdd_adapter(
291
+ running_variable: Optional[List[float]] = None,
292
+ outcome: Optional[List[float]] = None,
293
+ cutoff: float = 0.0,
294
+ file_path: Optional[str] = None,
295
+ bandwidth: Optional[float] = None,
296
+ polynomial_order: int = 1,
297
+ output_format: str = "json",
298
+ save_path: Optional[str] = None
299
+ ) -> str:
300
+ """
301
+ 回归断点设计 (RDD) 适配器
302
+ """
303
+ try:
304
+ # 从文件加载数据
305
+ if file_path:
306
+ data = DataLoader.load_from_file(file_path)
307
+ running_variable = data.get("running_variable", running_variable)
308
+ outcome = data.get("outcome", outcome)
309
+ cutoff = data.get("cutoff", cutoff)
310
+
311
+ # 调用核心方法
312
+ result: RDDResult = regression_discontinuity(
313
+ running_variable=running_variable,
314
+ outcome=outcome,
315
+ cutoff=cutoff,
316
+ bandwidth=bandwidth,
317
+ polynomial_order=polynomial_order
318
+ )
319
+
320
+ # 格式化输出
321
+ if output_format == "json":
322
+ output = result.model_dump_json(indent=2)
323
+ else:
324
+ output = str(result.model_dump())
325
+
326
+ # 保存结果
327
+ if save_path:
328
+ OutputFormatter.save_to_file(output, save_path)
329
+
330
+ return output
331
+
332
+ except Exception as e:
333
+ error_msg = f"RDD分析错误: {str(e)}"
334
+ return json.dumps({"error": error_msg}, indent=2)
335
+
336
+
337
+ def synthetic_control_adapter(
338
+ outcome: Optional[List[float]] = None,
339
+ treatment_period: int = 0,
340
+ treated_unit: str = "unit_1",
341
+ donor_units: Optional[List[str]] = None,
342
+ time_periods: Optional[List[str]] = None,
343
+ file_path: Optional[str] = None,
344
+ output_format: str = "json",
345
+ save_path: Optional[str] = None
346
+ ) -> str:
347
+ """
348
+ 合成控制法适配器
349
+ """
350
+ try:
351
+ # 从文件加载数据
352
+ if file_path:
353
+ data = DataLoader.load_from_file(file_path)
354
+ outcome = data.get("outcome", outcome)
355
+ treatment_period = data.get("treatment_period", treatment_period)
356
+ treated_unit = data.get("treated_unit", treated_unit)
357
+ donor_units = data.get("donor_units", donor_units)
358
+ time_periods = data.get("time_periods", time_periods)
359
+
360
+ # 调用核心方法
361
+ result: SyntheticControlResult = synthetic_control_method(
362
+ outcome=outcome,
363
+ treatment_period=treatment_period,
364
+ treated_unit=treated_unit,
365
+ donor_units=donor_units,
366
+ time_periods=time_periods
367
+ )
368
+
369
+ # 格式化输出
370
+ if output_format == "json":
371
+ output = result.model_dump_json(indent=2)
372
+ else:
373
+ output = str(result.model_dump())
374
+
375
+ # 保存结果
376
+ if save_path:
377
+ OutputFormatter.save_to_file(output, save_path)
378
+
379
+ return output
380
+
381
+ except Exception as e:
382
+ error_msg = f"合成控制法分析错误: {str(e)}"
383
+ return json.dumps({"error": error_msg}, indent=2)
384
+
385
+
386
+ def event_study_adapter(
387
+ outcome: Optional[List[float]] = None,
388
+ treatment: Optional[List[int]] = None,
389
+ entity_ids: Optional[List[str]] = None,
390
+ time_periods: Optional[List[str]] = None,
391
+ event_time: Optional[List[int]] = None,
392
+ file_path: Optional[str] = None,
393
+ output_format: str = "json",
394
+ save_path: Optional[str] = None
395
+ ) -> str:
396
+ """
397
+ 事件研究法适配器
398
+ """
399
+ try:
400
+ # 从文件加载数据
401
+ if file_path:
402
+ data = DataLoader.load_from_file(file_path)
403
+ outcome = data.get("outcome", outcome)
404
+ treatment = data.get("treatment", treatment)
405
+ entity_ids = data.get("entity_ids", entity_ids)
406
+ time_periods = data.get("time_periods", time_periods)
407
+ event_time = data.get("event_time", event_time)
408
+
409
+ # 调用核心方法
410
+ result: EventStudyResult = event_study(
411
+ outcome=outcome,
412
+ treatment=treatment,
413
+ entity_ids=entity_ids,
414
+ time_periods=time_periods,
415
+ event_time=event_time
416
+ )
417
+
418
+ # 格式化输出
419
+ if output_format == "json":
420
+ output = result.model_dump_json(indent=2)
421
+ else:
422
+ output = str(result.model_dump())
423
+
424
+ # 保存结果
425
+ if save_path:
426
+ OutputFormatter.save_to_file(output, save_path)
427
+
428
+ return output
429
+
430
+ except Exception as e:
431
+ error_msg = f"事件研究法分析错误: {str(e)}"
432
+ return json.dumps({"error": error_msg}, indent=2)
433
+
434
+
435
+ def triple_difference_adapter(
436
+ outcome: Optional[List[float]] = None,
437
+ treatment_group: Optional[List[int]] = None,
438
+ time_period: Optional[List[int]] = None,
439
+ cohort_group: Optional[List[int]] = None,
440
+ file_path: Optional[str] = None,
441
+ output_format: str = "json",
442
+ save_path: Optional[str] = None
443
+ ) -> str:
444
+ """
445
+ 三重差分法 (DDD) 适配器
446
+ """
447
+ try:
448
+ # 从文件加载数据
449
+ if file_path:
450
+ data = DataLoader.load_from_file(file_path)
451
+ outcome = data.get("outcome", outcome)
452
+ treatment_group = data.get("treatment_group", treatment_group)
453
+ time_period = data.get("time_period", time_period)
454
+ cohort_group = data.get("cohort_group", cohort_group)
455
+
456
+ # 调用核心方法
457
+ result: TripeDifferenceResult = triple_difference(
458
+ outcome=outcome,
459
+ treatment_group=treatment_group,
460
+ time_period=time_period,
461
+ cohort_group=cohort_group
462
+ )
463
+
464
+ # 格式化输出
465
+ if output_format == "json":
466
+ output = result.model_dump_json(indent=2)
467
+ else:
468
+ output = str(result.model_dump())
469
+
470
+ # 保存结果
471
+ if save_path:
472
+ OutputFormatter.save_to_file(output, save_path)
473
+
474
+ return output
475
+
476
+ except Exception as e:
477
+ error_msg = f"DDD分析错误: {str(e)}"
478
+ return json.dumps({"error": error_msg}, indent=2)
479
+
480
+
481
+ def mediation_adapter(
482
+ outcome: Optional[List[float]] = None,
483
+ treatment: Optional[List[float]] = None,
484
+ mediator: Optional[List[float]] = None,
485
+ covariates: Optional[List[List[float]]] = None,
486
+ file_path: Optional[str] = None,
487
+ output_format: str = "json",
488
+ save_path: Optional[str] = None
489
+ ) -> str:
490
+ """
491
+ 中介效应分析适配器
492
+ """
493
+ try:
494
+ # 从文件加载数据
495
+ if file_path:
496
+ data = DataLoader.load_from_file(file_path)
497
+ outcome = data.get("outcome", outcome)
498
+ treatment = data.get("treatment", treatment)
499
+ mediator = data.get("mediator", mediator)
500
+ covariates = data.get("covariates", covariates)
501
+
502
+ # 调用核心方法
503
+ result: MediationResult = mediation_analysis(
504
+ outcome=outcome,
505
+ treatment=treatment,
506
+ mediator=mediator,
507
+ covariates=covariates
508
+ )
509
+
510
+ # 格式化输出
511
+ if output_format == "json":
512
+ output = result.model_dump_json(indent=2)
513
+ else:
514
+ output = str(result.model_dump())
515
+
516
+ # 保存结果
517
+ if save_path:
518
+ OutputFormatter.save_to_file(output, save_path)
519
+
520
+ return output
521
+
522
+ except Exception as e:
523
+ error_msg = f"中介效应分析错误: {str(e)}"
524
+ return json.dumps({"error": error_msg}, indent=2)
525
+
526
+
527
+ def moderation_adapter(
528
+ outcome: Optional[List[float]] = None,
529
+ predictor: Optional[List[float]] = None,
530
+ moderator: Optional[List[float]] = None,
531
+ covariates: Optional[List[List[float]]] = None,
532
+ file_path: Optional[str] = None,
533
+ output_format: str = "json",
534
+ save_path: Optional[str] = None
535
+ ) -> str:
536
+ """
537
+ 调节效应分析适配器
538
+ """
539
+ try:
540
+ # 从文件加载数据
541
+ if file_path:
542
+ data = DataLoader.load_from_file(file_path)
543
+ outcome = data.get("outcome", outcome)
544
+ predictor = data.get("predictor", predictor)
545
+ moderator = data.get("moderator", moderator)
546
+ covariates = data.get("covariates", covariates)
547
+
548
+ # 调用核心方法
549
+ result: ModerationResult = moderation_analysis(
550
+ outcome=outcome,
551
+ predictor=predictor,
552
+ moderator=moderator,
553
+ covariates=covariates
554
+ )
555
+
556
+ # 格式化输出
557
+ if output_format == "json":
558
+ output = result.model_dump_json(indent=2)
559
+ else:
560
+ output = str(result.model_dump())
561
+
562
+ # 保存结果
563
+ if save_path:
564
+ OutputFormatter.save_to_file(output, save_path)
565
+
566
+ return output
567
+
568
+ except Exception as e:
569
+ error_msg = f"调节效应分析错误: {str(e)}"
570
+ return json.dumps({"error": error_msg}, indent=2)
571
+
572
+
573
+ def control_function_adapter(
574
+ y_data: Optional[List[float]] = None,
575
+ x_data: Optional[List[float]] = None,
576
+ z_data: Optional[List[List[float]]] = None,
577
+ file_path: Optional[str] = None,
578
+ constant: bool = True,
579
+ output_format: str = "json",
580
+ save_path: Optional[str] = None
581
+ ) -> str:
582
+ """
583
+ 控制函数法适配器
584
+ """
585
+ try:
586
+ # 从文件加载数据
587
+ if file_path:
588
+ data = DataLoader.load_from_file(file_path)
589
+ y_data = data.get("y_data", y_data)
590
+ x_data = data.get("x_data", x_data)
591
+ z_data = data.get("z_data", z_data)
592
+
593
+ # 调用核心方法
594
+ result: ControlFunctionResult = control_function_approach(
595
+ y=y_data,
596
+ x=x_data,
597
+ z=z_data,
598
+ constant=constant
599
+ )
600
+
601
+ # 格式化输出
602
+ if output_format == "json":
603
+ output = result.model_dump_json(indent=2)
604
+ else:
605
+ output = str(result.model_dump())
606
+
607
+ # 保存结果
608
+ if save_path:
609
+ OutputFormatter.save_to_file(output, save_path)
610
+
611
+ return output
612
+
613
+ except Exception as e:
614
+ error_msg = f"控制函数法分析错误: {str(e)}"
615
+ return json.dumps({"error": error_msg}, indent=2)
616
+
617
+
618
+ def first_difference_adapter(
619
+ y_data: Optional[List[float]] = None,
620
+ x_data: Optional[List[float]] = None,
621
+ entity_ids: Optional[List[str]] = None,
622
+ file_path: Optional[str] = None,
623
+ output_format: str = "json",
624
+ save_path: Optional[str] = None
625
+ ) -> str:
626
+ """
627
+ 一阶差分模型适配器
628
+ """
629
+ try:
630
+ # 从文件加载数据
631
+ if file_path:
632
+ data = DataLoader.load_from_file(file_path)
633
+ y_data = data.get("y_data", y_data)
634
+ x_data = data.get("x_data", x_data)
635
+ entity_ids = data.get("entity_ids", entity_ids)
636
+
637
+ # 调用核心方法
638
+ result: FirstDifferenceResult = first_difference_model(
639
+ y=y_data,
640
+ x=x_data,
641
+ entity_ids=entity_ids
642
+ )
643
+
644
+ # 格式化输出
645
+ if output_format == "json":
646
+ output = result.model_dump_json(indent=2)
647
+ else:
648
+ output = str(result.model_dump())
649
+
650
+ # 保存结果
651
+ if save_path:
652
+ OutputFormatter.save_to_file(output, save_path)
653
+
654
+ return output
655
+
656
+ except Exception as e:
657
+ error_msg = f"一阶差分模型分析错误: {str(e)}"
658
+ return json.dumps({"error": error_msg}, indent=2)