panelbox 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. panelbox/__init__.py +67 -0
  2. panelbox/__version__.py +14 -0
  3. panelbox/cli/__init__.py +0 -0
  4. panelbox/cli/{commands}/__init__.py +0 -0
  5. panelbox/core/__init__.py +0 -0
  6. panelbox/core/base_model.py +164 -0
  7. panelbox/core/formula_parser.py +318 -0
  8. panelbox/core/panel_data.py +387 -0
  9. panelbox/core/results.py +366 -0
  10. panelbox/datasets/__init__.py +0 -0
  11. panelbox/datasets/{data}/__init__.py +0 -0
  12. panelbox/gmm/__init__.py +65 -0
  13. panelbox/gmm/difference_gmm.py +645 -0
  14. panelbox/gmm/estimator.py +562 -0
  15. panelbox/gmm/instruments.py +580 -0
  16. panelbox/gmm/results.py +550 -0
  17. panelbox/gmm/system_gmm.py +621 -0
  18. panelbox/gmm/tests.py +535 -0
  19. panelbox/models/__init__.py +11 -0
  20. panelbox/models/dynamic/__init__.py +0 -0
  21. panelbox/models/iv/__init__.py +0 -0
  22. panelbox/models/static/__init__.py +13 -0
  23. panelbox/models/static/fixed_effects.py +516 -0
  24. panelbox/models/static/pooled_ols.py +298 -0
  25. panelbox/models/static/random_effects.py +512 -0
  26. panelbox/report/__init__.py +61 -0
  27. panelbox/report/asset_manager.py +410 -0
  28. panelbox/report/css_manager.py +472 -0
  29. panelbox/report/exporters/__init__.py +15 -0
  30. panelbox/report/exporters/html_exporter.py +440 -0
  31. panelbox/report/exporters/latex_exporter.py +510 -0
  32. panelbox/report/exporters/markdown_exporter.py +446 -0
  33. panelbox/report/renderers/__init__.py +11 -0
  34. panelbox/report/renderers/static/__init__.py +0 -0
  35. panelbox/report/renderers/static_validation_renderer.py +341 -0
  36. panelbox/report/report_manager.py +502 -0
  37. panelbox/report/template_manager.py +337 -0
  38. panelbox/report/transformers/__init__.py +0 -0
  39. panelbox/report/transformers/static/__init__.py +0 -0
  40. panelbox/report/validation_transformer.py +449 -0
  41. panelbox/standard_errors/__init__.py +0 -0
  42. panelbox/templates/__init__.py +0 -0
  43. panelbox/templates/assets/css/base_styles.css +382 -0
  44. panelbox/templates/assets/css/report_components.css +747 -0
  45. panelbox/templates/assets/js/tab-navigation.js +161 -0
  46. panelbox/templates/assets/js/utils.js +276 -0
  47. panelbox/templates/common/footer.html +24 -0
  48. panelbox/templates/common/header.html +44 -0
  49. panelbox/templates/common/meta.html +5 -0
  50. panelbox/templates/validation/interactive/index.html +272 -0
  51. panelbox/templates/validation/interactive/partials/charts.html +58 -0
  52. panelbox/templates/validation/interactive/partials/methodology.html +201 -0
  53. panelbox/templates/validation/interactive/partials/overview.html +146 -0
  54. panelbox/templates/validation/interactive/partials/recommendations.html +101 -0
  55. panelbox/templates/validation/interactive/partials/test_results.html +231 -0
  56. panelbox/utils/__init__.py +0 -0
  57. panelbox/utils/formatting.py +172 -0
  58. panelbox/utils/matrix_ops.py +233 -0
  59. panelbox/utils/statistical.py +173 -0
  60. panelbox/validation/__init__.py +58 -0
  61. panelbox/validation/base.py +175 -0
  62. panelbox/validation/cointegration/__init__.py +0 -0
  63. panelbox/validation/cross_sectional_dependence/__init__.py +13 -0
  64. panelbox/validation/cross_sectional_dependence/breusch_pagan_lm.py +222 -0
  65. panelbox/validation/cross_sectional_dependence/frees.py +297 -0
  66. panelbox/validation/cross_sectional_dependence/pesaran_cd.py +188 -0
  67. panelbox/validation/heteroskedasticity/__init__.py +13 -0
  68. panelbox/validation/heteroskedasticity/breusch_pagan.py +222 -0
  69. panelbox/validation/heteroskedasticity/modified_wald.py +172 -0
  70. panelbox/validation/heteroskedasticity/white.py +208 -0
  71. panelbox/validation/instruments/__init__.py +0 -0
  72. panelbox/validation/robustness/__init__.py +0 -0
  73. panelbox/validation/serial_correlation/__init__.py +13 -0
  74. panelbox/validation/serial_correlation/baltagi_wu.py +220 -0
  75. panelbox/validation/serial_correlation/breusch_godfrey.py +260 -0
  76. panelbox/validation/serial_correlation/wooldridge_ar.py +200 -0
  77. panelbox/validation/specification/__init__.py +16 -0
  78. panelbox/validation/specification/chow.py +273 -0
  79. panelbox/validation/specification/hausman.py +264 -0
  80. panelbox/validation/specification/mundlak.py +331 -0
  81. panelbox/validation/specification/reset.py +273 -0
  82. panelbox/validation/unit_root/__init__.py +0 -0
  83. panelbox/validation/validation_report.py +257 -0
  84. panelbox/validation/validation_suite.py +401 -0
  85. panelbox-0.2.0.dist-info/METADATA +337 -0
  86. panelbox-0.2.0.dist-info/RECORD +90 -0
  87. panelbox-0.2.0.dist-info/WHEEL +5 -0
  88. panelbox-0.2.0.dist-info/entry_points.txt +2 -0
  89. panelbox-0.2.0.dist-info/licenses/LICENSE +21 -0
  90. panelbox-0.2.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,257 @@
1
+ """
2
+ Validation report container.
3
+ """
4
+
5
+ from typing import Dict, Optional, List
6
+ from panelbox.validation.base import ValidationTestResult
7
+
8
+
9
+ class ValidationReport:
10
+ """
11
+ Container for validation test results.
12
+
13
+ Attributes
14
+ ----------
15
+ model_info : dict
16
+ Information about the model being validated
17
+ specification_tests : dict
18
+ Results of specification tests (Hausman, Mundlak, etc.)
19
+ serial_tests : dict
20
+ Results of serial correlation tests
21
+ het_tests : dict
22
+ Results of heteroskedasticity tests
23
+ cd_tests : dict
24
+ Results of cross-sectional dependence tests
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ model_info: Dict[str, any],
30
+ specification_tests: Optional[Dict[str, ValidationTestResult]] = None,
31
+ serial_tests: Optional[Dict[str, ValidationTestResult]] = None,
32
+ het_tests: Optional[Dict[str, ValidationTestResult]] = None,
33
+ cd_tests: Optional[Dict[str, ValidationTestResult]] = None
34
+ ):
35
+ self.model_info = model_info
36
+ self.specification_tests = specification_tests or {}
37
+ self.serial_tests = serial_tests or {}
38
+ self.het_tests = het_tests or {}
39
+ self.cd_tests = cd_tests or {}
40
+
41
+ def __str__(self) -> str:
42
+ """String representation."""
43
+ return self.summary()
44
+
45
+ def __repr__(self) -> str:
46
+ """Repr."""
47
+ n_tests = (
48
+ len(self.specification_tests) +
49
+ len(self.serial_tests) +
50
+ len(self.het_tests) +
51
+ len(self.cd_tests)
52
+ )
53
+ return f"ValidationReport(model='{self.model_info.get('model_type')}', tests={n_tests})"
54
+
55
+ def summary(self, verbose: bool = True) -> str:
56
+ """
57
+ Generate formatted summary of all validation tests.
58
+
59
+ Parameters
60
+ ----------
61
+ verbose : bool, default=True
62
+ If True, include full details of each test
63
+ If False, show only summary table
64
+
65
+ Returns
66
+ -------
67
+ str
68
+ Formatted validation report
69
+ """
70
+ lines = []
71
+ lines.append("=" * 78)
72
+ lines.append("MODEL VALIDATION REPORT")
73
+ lines.append("=" * 78)
74
+ lines.append("")
75
+
76
+ # Model information
77
+ lines.append("Model Information:")
78
+ lines.append(f" Type: {self.model_info.get('model_type', 'Unknown')}")
79
+ lines.append(f" Formula: {self.model_info.get('formula', 'Unknown')}")
80
+ lines.append(f" N obs: {self.model_info.get('nobs', 'Unknown')}")
81
+ lines.append(f" N entities: {self.model_info.get('n_entities', 'Unknown')}")
82
+ lines.append("")
83
+
84
+ # Summary table
85
+ lines.append("=" * 78)
86
+ lines.append("VALIDATION TESTS SUMMARY")
87
+ lines.append("=" * 78)
88
+ lines.append(f"{'Test':<35} {'Statistic':<12} {'P-value':<10} {'Result':<10}")
89
+ lines.append("-" * 78)
90
+
91
+ # Helper function to add test row
92
+ def add_test_row(test_name, test_result):
93
+ if test_result is None:
94
+ return
95
+
96
+ stat_str = f"{test_result.statistic:.3f}"
97
+ pval_str = f"{test_result.pvalue:.4f}"
98
+ result = "REJECT" if test_result.reject_null else "OK"
99
+
100
+ lines.append(f"{test_name:<35} {stat_str:<12} {pval_str:<10} {result:<10}")
101
+
102
+ # Specification tests
103
+ if self.specification_tests:
104
+ lines.append("")
105
+ lines.append("Specification Tests:")
106
+ for name, result in self.specification_tests.items():
107
+ add_test_row(f" {name}", result)
108
+
109
+ # Serial correlation tests
110
+ if self.serial_tests:
111
+ lines.append("")
112
+ lines.append("Serial Correlation Tests:")
113
+ for name, result in self.serial_tests.items():
114
+ add_test_row(f" {name}", result)
115
+
116
+ # Heteroskedasticity tests
117
+ if self.het_tests:
118
+ lines.append("")
119
+ lines.append("Heteroskedasticity Tests:")
120
+ for name, result in self.het_tests.items():
121
+ add_test_row(f" {name}", result)
122
+
123
+ # Cross-sectional dependence tests
124
+ if self.cd_tests:
125
+ lines.append("")
126
+ lines.append("Cross-Sectional Dependence Tests:")
127
+ for name, result in self.cd_tests.items():
128
+ add_test_row(f" {name}", result)
129
+
130
+ lines.append("=" * 78)
131
+ lines.append("")
132
+
133
+ # Diagnostics summary
134
+ problems = []
135
+
136
+ for category, tests in [
137
+ ("specification", self.specification_tests),
138
+ ("serial correlation", self.serial_tests),
139
+ ("heteroskedasticity", self.het_tests),
140
+ ("cross-sectional dependence", self.cd_tests)
141
+ ]:
142
+ for name, result in tests.items():
143
+ if result.reject_null:
144
+ problems.append(f" - {name}: {category}")
145
+
146
+ if problems:
147
+ lines.append("⚠️ POTENTIAL ISSUES DETECTED:")
148
+ lines.extend(problems)
149
+ lines.append("")
150
+ lines.append("Consider:")
151
+
152
+ if any("serial correlation" in p for p in problems):
153
+ lines.append(" • Use clustered standard errors or HAC errors")
154
+
155
+ if any("heteroskedasticity" in p for p in problems):
156
+ lines.append(" • Use robust standard errors")
157
+
158
+ if any("cross-sectional dependence" in p for p in problems):
159
+ lines.append(" • Use Driscoll-Kraay standard errors")
160
+
161
+ if any("specification" in p for p in problems):
162
+ lines.append(" • Review model specification")
163
+ else:
164
+ lines.append("✓ No major issues detected in validation tests")
165
+
166
+ lines.append("")
167
+ lines.append("=" * 78)
168
+ lines.append("")
169
+
170
+ # Verbose output: detailed results
171
+ if verbose and (self.specification_tests or self.serial_tests or
172
+ self.het_tests or self.cd_tests):
173
+ lines.append("")
174
+ lines.append("DETAILED TEST RESULTS")
175
+ lines.append("=" * 78)
176
+ lines.append("")
177
+
178
+ for category, tests in [
179
+ ("SPECIFICATION TESTS", self.specification_tests),
180
+ ("SERIAL CORRELATION TESTS", self.serial_tests),
181
+ ("HETEROSKEDASTICITY TESTS", self.het_tests),
182
+ ("CROSS-SECTIONAL DEPENDENCE TESTS", self.cd_tests)
183
+ ]:
184
+ if tests:
185
+ lines.append("")
186
+ lines.append(category)
187
+ lines.append("-" * 78)
188
+ for name, result in tests.items():
189
+ lines.append("")
190
+ lines.append(result.summary())
191
+
192
+ return "\n".join(lines)
193
+
194
+ def to_dict(self) -> Dict:
195
+ """
196
+ Export validation report to dictionary.
197
+
198
+ Returns
199
+ -------
200
+ dict
201
+ Dictionary with all test results
202
+ """
203
+ result = {
204
+ 'model_info': self.model_info,
205
+ 'specification_tests': {},
206
+ 'serial_tests': {},
207
+ 'het_tests': {},
208
+ 'cd_tests': {}
209
+ }
210
+
211
+ # Helper to convert test result to dict
212
+ def test_to_dict(test):
213
+ return {
214
+ 'statistic': test.statistic,
215
+ 'pvalue': test.pvalue,
216
+ 'df': test.df,
217
+ 'reject_null': test.reject_null,
218
+ 'conclusion': test.conclusion,
219
+ 'metadata': test.metadata
220
+ }
221
+
222
+ for name, test in self.specification_tests.items():
223
+ result['specification_tests'][name] = test_to_dict(test)
224
+
225
+ for name, test in self.serial_tests.items():
226
+ result['serial_tests'][name] = test_to_dict(test)
227
+
228
+ for name, test in self.het_tests.items():
229
+ result['het_tests'][name] = test_to_dict(test)
230
+
231
+ for name, test in self.cd_tests.items():
232
+ result['cd_tests'][name] = test_to_dict(test)
233
+
234
+ return result
235
+
236
+ def get_failed_tests(self) -> List[str]:
237
+ """
238
+ Get list of tests that rejected the null hypothesis.
239
+
240
+ Returns
241
+ -------
242
+ list
243
+ Names of tests that detected issues
244
+ """
245
+ failed = []
246
+
247
+ for category, tests in [
248
+ ("spec", self.specification_tests),
249
+ ("serial", self.serial_tests),
250
+ ("het", self.het_tests),
251
+ ("cd", self.cd_tests)
252
+ ]:
253
+ for name, result in tests.items():
254
+ if result.reject_null:
255
+ failed.append(f"{category}/{name}")
256
+
257
+ return failed
@@ -0,0 +1,401 @@
1
+ """
2
+ Validation suite for panel models.
3
+ """
4
+
5
+ from typing import List, Optional, Dict, Union
6
+ import warnings
7
+
8
+ from panelbox.core.results import PanelResults
9
+ from panelbox.validation.validation_report import ValidationReport
10
+ from panelbox.validation.base import ValidationTestResult
11
+
12
+ # Import tests
13
+ from panelbox.validation.specification.hausman import HausmanTest
14
+ from panelbox.validation.specification.mundlak import MundlakTest
15
+ from panelbox.validation.specification.reset import RESETTest
16
+ from panelbox.validation.specification.chow import ChowTest
17
+ from panelbox.validation.serial_correlation.wooldridge_ar import WooldridgeARTest
18
+ from panelbox.validation.serial_correlation.breusch_godfrey import BreuschGodfreyTest
19
+ from panelbox.validation.serial_correlation.baltagi_wu import BaltagiWuTest
20
+ from panelbox.validation.heteroskedasticity.modified_wald import ModifiedWaldTest
21
+ from panelbox.validation.heteroskedasticity.breusch_pagan import BreuschPaganTest
22
+ from panelbox.validation.heteroskedasticity.white import WhiteTest
23
+ from panelbox.validation.cross_sectional_dependence.pesaran_cd import PesaranCDTest
24
+ from panelbox.validation.cross_sectional_dependence.breusch_pagan_lm import BreuschPaganLMTest
25
+ from panelbox.validation.cross_sectional_dependence.frees import FreesTest
26
+
27
+
28
+ class ValidationSuite:
29
+ """
30
+ Suite of validation tests for panel models.
31
+
32
+ This class provides a unified interface to run various diagnostic tests
33
+ on panel model results.
34
+
35
+ Parameters
36
+ ----------
37
+ results : PanelResults
38
+ Results from panel model estimation
39
+
40
+ Examples
41
+ --------
42
+ >>> from panelbox.models.static.fixed_effects import FixedEffects
43
+ >>> fe = FixedEffects("y ~ x1 + x2", data, "entity", "time")
44
+ >>> results = fe.fit()
45
+ >>>
46
+ >>> from panelbox.validation.validation_suite import ValidationSuite
47
+ >>> suite = ValidationSuite(results)
48
+ >>> report = suite.run(tests='all')
49
+ >>> print(report)
50
+ """
51
+
52
+ def __init__(self, results: PanelResults):
53
+ """
54
+ Initialize validation suite.
55
+
56
+ Parameters
57
+ ----------
58
+ results : PanelResults
59
+ Results from panel model estimation
60
+ """
61
+ self.results = results
62
+ self.model_type = results.model_type
63
+
64
+ def run(
65
+ self,
66
+ tests: Union[str, List[str]] = 'default',
67
+ alpha: float = 0.05,
68
+ verbose: bool = False
69
+ ) -> ValidationReport:
70
+ """
71
+ Run validation tests.
72
+
73
+ Parameters
74
+ ----------
75
+ tests : str or list of str, default='default'
76
+ Which tests to run:
77
+ - 'all': Run all available tests
78
+ - 'default': Run recommended tests for this model type
79
+ - 'serial': Serial correlation tests only
80
+ - 'het': Heteroskedasticity tests only
81
+ - 'cd': Cross-sectional dependence tests only
82
+ - List of test names
83
+ alpha : float, default=0.05
84
+ Significance level for tests
85
+ verbose : bool, default=False
86
+ If True, print progress during testing
87
+
88
+ Returns
89
+ -------
90
+ ValidationReport
91
+ Report containing all test results
92
+ """
93
+ # Determine which tests to run
94
+ tests_to_run = self._determine_tests(tests)
95
+
96
+ # Run tests by category
97
+ specification_tests = {}
98
+ serial_tests = {}
99
+ het_tests = {}
100
+ cd_tests = {}
101
+
102
+ # Specification tests
103
+ if 'specification' in tests_to_run:
104
+ specification_tests = self.run_specification_tests(alpha, verbose)
105
+
106
+ # Serial correlation tests
107
+ if 'serial' in tests_to_run:
108
+ serial_tests = self.run_serial_correlation_tests(alpha, verbose)
109
+
110
+ # Heteroskedasticity tests
111
+ if 'het' in tests_to_run:
112
+ het_tests = self.run_heteroskedasticity_tests(alpha, verbose)
113
+
114
+ # Cross-sectional dependence tests
115
+ if 'cd' in tests_to_run:
116
+ cd_tests = self.run_cross_sectional_tests(alpha, verbose)
117
+
118
+ # Create validation report
119
+ model_info = {
120
+ 'model_type': self.model_type,
121
+ 'formula': self.results.formula,
122
+ 'nobs': self.results.nobs,
123
+ 'n_entities': self.results.n_entities,
124
+ 'n_periods': self.results.n_periods
125
+ }
126
+
127
+ report = ValidationReport(
128
+ model_info=model_info,
129
+ specification_tests=specification_tests,
130
+ serial_tests=serial_tests,
131
+ het_tests=het_tests,
132
+ cd_tests=cd_tests
133
+ )
134
+
135
+ return report
136
+
137
+ def run_specification_tests(
138
+ self,
139
+ alpha: float = 0.05,
140
+ verbose: bool = False
141
+ ) -> Dict[str, ValidationTestResult]:
142
+ """
143
+ Run specification tests.
144
+
145
+ Parameters
146
+ ----------
147
+ alpha : float, default=0.05
148
+ Significance level
149
+ verbose : bool, default=False
150
+ Print progress
151
+
152
+ Returns
153
+ -------
154
+ dict
155
+ Dictionary of test results
156
+ """
157
+ results = {}
158
+
159
+ # Note: Hausman test requires two model results (FE and RE)
160
+ # It cannot be run from a single results object
161
+ # Users should run it separately
162
+
163
+ # Mundlak test (for RE models)
164
+ if 'Random Effects' in self.model_type:
165
+ try:
166
+ if verbose:
167
+ print("Running Mundlak test...")
168
+ test = MundlakTest(self.results)
169
+ results['Mundlak'] = test.run(alpha)
170
+ except Exception as e:
171
+ if verbose:
172
+ print(f" Warning: Mundlak test failed: {e}")
173
+ warnings.warn(f"Mundlak test failed: {e}")
174
+
175
+ # RESET test (for all models)
176
+ try:
177
+ if verbose:
178
+ print("Running RESET test...")
179
+ test = RESETTest(self.results)
180
+ results['RESET'] = test.run(alpha=alpha)
181
+ except Exception as e:
182
+ if verbose:
183
+ print(f" Warning: RESET test failed: {e}")
184
+ warnings.warn(f"RESET test failed: {e}")
185
+
186
+ # Chow test (for all models)
187
+ # Note: Requires break_point parameter, skip by default
188
+ # Users can run separately with specific break point
189
+
190
+ return results
191
+
192
+ def run_serial_correlation_tests(
193
+ self,
194
+ alpha: float = 0.05,
195
+ verbose: bool = False
196
+ ) -> Dict[str, ValidationTestResult]:
197
+ """
198
+ Run serial correlation tests.
199
+
200
+ Parameters
201
+ ----------
202
+ alpha : float, default=0.05
203
+ Significance level
204
+ verbose : bool, default=False
205
+ Print progress
206
+
207
+ Returns
208
+ -------
209
+ dict
210
+ Dictionary of test results
211
+ """
212
+ results = {}
213
+
214
+ # Wooldridge test (for FE models)
215
+ if 'Fixed Effects' in self.model_type:
216
+ try:
217
+ if verbose:
218
+ print("Running Wooldridge AR test...")
219
+ test = WooldridgeARTest(self.results)
220
+ results['Wooldridge'] = test.run(alpha)
221
+ except Exception as e:
222
+ if verbose:
223
+ print(f" Warning: Wooldridge test failed: {e}")
224
+ warnings.warn(f"Wooldridge test failed: {e}")
225
+
226
+ # Breusch-Godfrey test (for all models)
227
+ try:
228
+ if verbose:
229
+ print("Running Breusch-Godfrey test...")
230
+ test = BreuschGodfreyTest(self.results)
231
+ results['Breusch-Godfrey'] = test.run(lags=1, alpha=alpha)
232
+ except Exception as e:
233
+ if verbose:
234
+ print(f" Warning: Breusch-Godfrey test failed: {e}")
235
+ warnings.warn(f"Breusch-Godfrey test failed: {e}")
236
+
237
+ # Baltagi-Wu LBI test (for all models, especially unbalanced panels)
238
+ try:
239
+ if verbose:
240
+ print("Running Baltagi-Wu LBI test...")
241
+ test = BaltagiWuTest(self.results)
242
+ results['Baltagi-Wu'] = test.run(alpha=alpha)
243
+ except Exception as e:
244
+ if verbose:
245
+ print(f" Warning: Baltagi-Wu test failed: {e}")
246
+ warnings.warn(f"Baltagi-Wu test failed: {e}")
247
+
248
+ return results
249
+
250
+ def run_heteroskedasticity_tests(
251
+ self,
252
+ alpha: float = 0.05,
253
+ verbose: bool = False
254
+ ) -> Dict[str, ValidationTestResult]:
255
+ """
256
+ Run heteroskedasticity tests.
257
+
258
+ Parameters
259
+ ----------
260
+ alpha : float, default=0.05
261
+ Significance level
262
+ verbose : bool, default=False
263
+ Print progress
264
+
265
+ Returns
266
+ -------
267
+ dict
268
+ Dictionary of test results
269
+ """
270
+ results = {}
271
+
272
+ # Modified Wald test (for FE models)
273
+ if 'Fixed Effects' in self.model_type:
274
+ try:
275
+ if verbose:
276
+ print("Running Modified Wald test...")
277
+ test = ModifiedWaldTest(self.results)
278
+ results['Modified Wald'] = test.run(alpha)
279
+ except Exception as e:
280
+ if verbose:
281
+ print(f" Warning: Modified Wald test failed: {e}")
282
+ warnings.warn(f"Modified Wald test failed: {e}")
283
+
284
+ # Breusch-Pagan LM test (for all models)
285
+ try:
286
+ if verbose:
287
+ print("Running Breusch-Pagan test...")
288
+ test = BreuschPaganTest(self.results)
289
+ results['Breusch-Pagan'] = test.run(alpha)
290
+ except Exception as e:
291
+ if verbose:
292
+ print(f" Warning: Breusch-Pagan test failed: {e}")
293
+ warnings.warn(f"Breusch-Pagan test failed: {e}")
294
+
295
+ # White test (for all models)
296
+ try:
297
+ if verbose:
298
+ print("Running White test...")
299
+ test = WhiteTest(self.results)
300
+ results['White'] = test.run(alpha, cross_terms=False) # Without cross terms for speed
301
+ except Exception as e:
302
+ if verbose:
303
+ print(f" Warning: White test failed: {e}")
304
+ warnings.warn(f"White test failed: {e}")
305
+
306
+ return results
307
+
308
+ def run_cross_sectional_tests(
309
+ self,
310
+ alpha: float = 0.05,
311
+ verbose: bool = False
312
+ ) -> Dict[str, ValidationTestResult]:
313
+ """
314
+ Run cross-sectional dependence tests.
315
+
316
+ Parameters
317
+ ----------
318
+ alpha : float, default=0.05
319
+ Significance level
320
+ verbose : bool, default=False
321
+ Print progress
322
+
323
+ Returns
324
+ -------
325
+ dict
326
+ Dictionary of test results
327
+ """
328
+ results = {}
329
+
330
+ # Pesaran CD test (for all models, large N)
331
+ try:
332
+ if verbose:
333
+ print("Running Pesaran CD test...")
334
+ test = PesaranCDTest(self.results)
335
+ results['Pesaran CD'] = test.run(alpha)
336
+ except Exception as e:
337
+ if verbose:
338
+ print(f" Warning: Pesaran CD test failed: {e}")
339
+ warnings.warn(f"Pesaran CD test failed: {e}")
340
+
341
+ # Breusch-Pagan LM test (for all models, small to moderate N)
342
+ try:
343
+ if verbose:
344
+ print("Running Breusch-Pagan LM test...")
345
+ test = BreuschPaganLMTest(self.results)
346
+ results['Breusch-Pagan LM'] = test.run(alpha=alpha)
347
+ except Exception as e:
348
+ if verbose:
349
+ print(f" Warning: Breusch-Pagan LM test failed: {e}")
350
+ warnings.warn(f"Breusch-Pagan LM test failed: {e}")
351
+
352
+ # Frees test (non-parametric, robust to non-normality)
353
+ try:
354
+ if verbose:
355
+ print("Running Frees test...")
356
+ test = FreesTest(self.results)
357
+ results['Frees'] = test.run(alpha=alpha)
358
+ except Exception as e:
359
+ if verbose:
360
+ print(f" Warning: Frees test failed: {e}")
361
+ warnings.warn(f"Frees test failed: {e}")
362
+
363
+ return results
364
+
365
+ def _determine_tests(self, tests: Union[str, List[str]]) -> List[str]:
366
+ """
367
+ Determine which test categories to run.
368
+
369
+ Parameters
370
+ ----------
371
+ tests : str or list
372
+ Test specification
373
+
374
+ Returns
375
+ -------
376
+ list
377
+ List of test categories
378
+ """
379
+ if tests == 'all':
380
+ return ['specification', 'serial', 'het', 'cd']
381
+ elif tests == 'default':
382
+ # Recommended tests based on model type
383
+ if 'Fixed Effects' in self.model_type:
384
+ return ['serial', 'het', 'cd']
385
+ elif 'Random Effects' in self.model_type:
386
+ return ['cd']
387
+ else: # Pooled OLS
388
+ return ['het', 'cd']
389
+ elif tests == 'serial':
390
+ return ['serial']
391
+ elif tests == 'het':
392
+ return ['het']
393
+ elif tests == 'cd':
394
+ return ['cd']
395
+ elif isinstance(tests, list):
396
+ return tests
397
+ else:
398
+ raise ValueError(
399
+ f"Invalid tests specification: {tests}. "
400
+ "Use 'all', 'default', 'serial', 'het', 'cd', or a list of test names"
401
+ )