panelbox 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- panelbox/__init__.py +67 -0
- panelbox/__version__.py +14 -0
- panelbox/cli/__init__.py +0 -0
- panelbox/cli/{commands}/__init__.py +0 -0
- panelbox/core/__init__.py +0 -0
- panelbox/core/base_model.py +164 -0
- panelbox/core/formula_parser.py +318 -0
- panelbox/core/panel_data.py +387 -0
- panelbox/core/results.py +366 -0
- panelbox/datasets/__init__.py +0 -0
- panelbox/datasets/{data}/__init__.py +0 -0
- panelbox/gmm/__init__.py +65 -0
- panelbox/gmm/difference_gmm.py +645 -0
- panelbox/gmm/estimator.py +562 -0
- panelbox/gmm/instruments.py +580 -0
- panelbox/gmm/results.py +550 -0
- panelbox/gmm/system_gmm.py +621 -0
- panelbox/gmm/tests.py +535 -0
- panelbox/models/__init__.py +11 -0
- panelbox/models/dynamic/__init__.py +0 -0
- panelbox/models/iv/__init__.py +0 -0
- panelbox/models/static/__init__.py +13 -0
- panelbox/models/static/fixed_effects.py +516 -0
- panelbox/models/static/pooled_ols.py +298 -0
- panelbox/models/static/random_effects.py +512 -0
- panelbox/report/__init__.py +61 -0
- panelbox/report/asset_manager.py +410 -0
- panelbox/report/css_manager.py +472 -0
- panelbox/report/exporters/__init__.py +15 -0
- panelbox/report/exporters/html_exporter.py +440 -0
- panelbox/report/exporters/latex_exporter.py +510 -0
- panelbox/report/exporters/markdown_exporter.py +446 -0
- panelbox/report/renderers/__init__.py +11 -0
- panelbox/report/renderers/static/__init__.py +0 -0
- panelbox/report/renderers/static_validation_renderer.py +341 -0
- panelbox/report/report_manager.py +502 -0
- panelbox/report/template_manager.py +337 -0
- panelbox/report/transformers/__init__.py +0 -0
- panelbox/report/transformers/static/__init__.py +0 -0
- panelbox/report/validation_transformer.py +449 -0
- panelbox/standard_errors/__init__.py +0 -0
- panelbox/templates/__init__.py +0 -0
- panelbox/templates/assets/css/base_styles.css +382 -0
- panelbox/templates/assets/css/report_components.css +747 -0
- panelbox/templates/assets/js/tab-navigation.js +161 -0
- panelbox/templates/assets/js/utils.js +276 -0
- panelbox/templates/common/footer.html +24 -0
- panelbox/templates/common/header.html +44 -0
- panelbox/templates/common/meta.html +5 -0
- panelbox/templates/validation/interactive/index.html +272 -0
- panelbox/templates/validation/interactive/partials/charts.html +58 -0
- panelbox/templates/validation/interactive/partials/methodology.html +201 -0
- panelbox/templates/validation/interactive/partials/overview.html +146 -0
- panelbox/templates/validation/interactive/partials/recommendations.html +101 -0
- panelbox/templates/validation/interactive/partials/test_results.html +231 -0
- panelbox/utils/__init__.py +0 -0
- panelbox/utils/formatting.py +172 -0
- panelbox/utils/matrix_ops.py +233 -0
- panelbox/utils/statistical.py +173 -0
- panelbox/validation/__init__.py +58 -0
- panelbox/validation/base.py +175 -0
- panelbox/validation/cointegration/__init__.py +0 -0
- panelbox/validation/cross_sectional_dependence/__init__.py +13 -0
- panelbox/validation/cross_sectional_dependence/breusch_pagan_lm.py +222 -0
- panelbox/validation/cross_sectional_dependence/frees.py +297 -0
- panelbox/validation/cross_sectional_dependence/pesaran_cd.py +188 -0
- panelbox/validation/heteroskedasticity/__init__.py +13 -0
- panelbox/validation/heteroskedasticity/breusch_pagan.py +222 -0
- panelbox/validation/heteroskedasticity/modified_wald.py +172 -0
- panelbox/validation/heteroskedasticity/white.py +208 -0
- panelbox/validation/instruments/__init__.py +0 -0
- panelbox/validation/robustness/__init__.py +0 -0
- panelbox/validation/serial_correlation/__init__.py +13 -0
- panelbox/validation/serial_correlation/baltagi_wu.py +220 -0
- panelbox/validation/serial_correlation/breusch_godfrey.py +260 -0
- panelbox/validation/serial_correlation/wooldridge_ar.py +200 -0
- panelbox/validation/specification/__init__.py +16 -0
- panelbox/validation/specification/chow.py +273 -0
- panelbox/validation/specification/hausman.py +264 -0
- panelbox/validation/specification/mundlak.py +331 -0
- panelbox/validation/specification/reset.py +273 -0
- panelbox/validation/unit_root/__init__.py +0 -0
- panelbox/validation/validation_report.py +257 -0
- panelbox/validation/validation_suite.py +401 -0
- panelbox-0.2.0.dist-info/METADATA +337 -0
- panelbox-0.2.0.dist-info/RECORD +90 -0
- panelbox-0.2.0.dist-info/WHEEL +5 -0
- panelbox-0.2.0.dist-info/entry_points.txt +2 -0
- panelbox-0.2.0.dist-info/licenses/LICENSE +21 -0
- panelbox-0.2.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,449 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Validation Report Transformer.
|
|
3
|
+
|
|
4
|
+
Transforms ValidationReport objects into template-friendly data structures.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Any, List, Optional
|
|
8
|
+
from collections import defaultdict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ValidationTransformer:
|
|
12
|
+
"""
|
|
13
|
+
Transforms ValidationReport into data suitable for HTML templates.
|
|
14
|
+
|
|
15
|
+
Converts ValidationReport objects into structured dictionaries with
|
|
16
|
+
all necessary data for rendering interactive and static reports.
|
|
17
|
+
|
|
18
|
+
Parameters
|
|
19
|
+
----------
|
|
20
|
+
validation_report : ValidationReport
|
|
21
|
+
The validation report to transform
|
|
22
|
+
|
|
23
|
+
Examples
|
|
24
|
+
--------
|
|
25
|
+
>>> from panelbox.validation import ValidationReport
|
|
26
|
+
>>> report = ValidationReport(model_info={...}, specification_tests={...})
|
|
27
|
+
>>> transformer = ValidationTransformer(report)
|
|
28
|
+
>>> data = transformer.transform()
|
|
29
|
+
>>> # Use data in report generation
|
|
30
|
+
>>> report_mgr.generate_validation_report(data)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, validation_report):
|
|
34
|
+
"""Initialize transformer with validation report."""
|
|
35
|
+
self.report = validation_report
|
|
36
|
+
|
|
37
|
+
def transform(self, include_charts: bool = True) -> Dict[str, Any]:
|
|
38
|
+
"""
|
|
39
|
+
Transform validation report into template data.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
include_charts : bool, default=True
|
|
44
|
+
Include chart data for interactive reports
|
|
45
|
+
|
|
46
|
+
Returns
|
|
47
|
+
-------
|
|
48
|
+
dict
|
|
49
|
+
Complete data structure for template rendering
|
|
50
|
+
|
|
51
|
+
Examples
|
|
52
|
+
--------
|
|
53
|
+
>>> data = transformer.transform(include_charts=True)
|
|
54
|
+
>>> print(data.keys())
|
|
55
|
+
dict_keys(['model_info', 'tests', 'summary', 'recommendations', 'charts'])
|
|
56
|
+
"""
|
|
57
|
+
data = {
|
|
58
|
+
'model_info': self._transform_model_info(),
|
|
59
|
+
'tests': self._transform_tests(),
|
|
60
|
+
'summary': self._compute_summary(),
|
|
61
|
+
'recommendations': self._generate_recommendations(),
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if include_charts:
|
|
65
|
+
data['charts'] = self._prepare_chart_data()
|
|
66
|
+
|
|
67
|
+
return data
|
|
68
|
+
|
|
69
|
+
def _transform_model_info(self) -> Dict[str, Any]:
|
|
70
|
+
"""
|
|
71
|
+
Transform model information.
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
dict
|
|
76
|
+
Model information for template
|
|
77
|
+
"""
|
|
78
|
+
info = self.report.model_info.copy()
|
|
79
|
+
|
|
80
|
+
# Add formatted strings
|
|
81
|
+
if 'nobs' in info:
|
|
82
|
+
info['nobs_formatted'] = f"{info['nobs']:,}"
|
|
83
|
+
|
|
84
|
+
if 'n_entities' in info:
|
|
85
|
+
info['n_entities_formatted'] = f"{info['n_entities']:,}"
|
|
86
|
+
|
|
87
|
+
if 'n_periods' in info and info.get('n_periods'):
|
|
88
|
+
info['n_periods_formatted'] = f"{info['n_periods']}"
|
|
89
|
+
|
|
90
|
+
return info
|
|
91
|
+
|
|
92
|
+
def _transform_tests(self) -> List[Dict[str, Any]]:
|
|
93
|
+
"""
|
|
94
|
+
Transform test results into table-ready format.
|
|
95
|
+
|
|
96
|
+
Returns
|
|
97
|
+
-------
|
|
98
|
+
list of dict
|
|
99
|
+
List of test results for template tables
|
|
100
|
+
"""
|
|
101
|
+
tests = []
|
|
102
|
+
|
|
103
|
+
# Process each category
|
|
104
|
+
for category_name, category_tests in [
|
|
105
|
+
('Specification', self.report.specification_tests),
|
|
106
|
+
('Serial Correlation', self.report.serial_tests),
|
|
107
|
+
('Heteroskedasticity', self.report.het_tests),
|
|
108
|
+
('Cross-Sectional Dependence', self.report.cd_tests)
|
|
109
|
+
]:
|
|
110
|
+
for test_name, test_result in category_tests.items():
|
|
111
|
+
test_data = {
|
|
112
|
+
'category': category_name,
|
|
113
|
+
'name': test_name,
|
|
114
|
+
'statistic': test_result.statistic,
|
|
115
|
+
'statistic_formatted': f"{test_result.statistic:.3f}",
|
|
116
|
+
'pvalue': test_result.pvalue,
|
|
117
|
+
'pvalue_formatted': self._format_pvalue(test_result.pvalue),
|
|
118
|
+
'df': test_result.df,
|
|
119
|
+
'reject_null': test_result.reject_null,
|
|
120
|
+
'result': 'REJECT' if test_result.reject_null else 'ACCEPT',
|
|
121
|
+
'result_class': 'reject' if test_result.reject_null else 'accept',
|
|
122
|
+
'conclusion': test_result.conclusion,
|
|
123
|
+
'significance': self._get_significance_stars(test_result.pvalue),
|
|
124
|
+
'metadata': test_result.metadata or {}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
tests.append(test_data)
|
|
128
|
+
|
|
129
|
+
return tests
|
|
130
|
+
|
|
131
|
+
def _compute_summary(self) -> Dict[str, Any]:
|
|
132
|
+
"""
|
|
133
|
+
Compute summary statistics.
|
|
134
|
+
|
|
135
|
+
Returns
|
|
136
|
+
-------
|
|
137
|
+
dict
|
|
138
|
+
Summary statistics for dashboard
|
|
139
|
+
"""
|
|
140
|
+
# Count tests
|
|
141
|
+
total_tests = (
|
|
142
|
+
len(self.report.specification_tests) +
|
|
143
|
+
len(self.report.serial_tests) +
|
|
144
|
+
len(self.report.het_tests) +
|
|
145
|
+
len(self.report.cd_tests)
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Count failures by category
|
|
149
|
+
failed_by_category = {
|
|
150
|
+
'specification': 0,
|
|
151
|
+
'serial': 0,
|
|
152
|
+
'heteroskedasticity': 0,
|
|
153
|
+
'cross_sectional': 0
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
total_failed = 0
|
|
157
|
+
|
|
158
|
+
for test_result in self.report.specification_tests.values():
|
|
159
|
+
if test_result.reject_null:
|
|
160
|
+
failed_by_category['specification'] += 1
|
|
161
|
+
total_failed += 1
|
|
162
|
+
|
|
163
|
+
for test_result in self.report.serial_tests.values():
|
|
164
|
+
if test_result.reject_null:
|
|
165
|
+
failed_by_category['serial'] += 1
|
|
166
|
+
total_failed += 1
|
|
167
|
+
|
|
168
|
+
for test_result in self.report.het_tests.values():
|
|
169
|
+
if test_result.reject_null:
|
|
170
|
+
failed_by_category['heteroskedasticity'] += 1
|
|
171
|
+
total_failed += 1
|
|
172
|
+
|
|
173
|
+
for test_result in self.report.cd_tests.values():
|
|
174
|
+
if test_result.reject_null:
|
|
175
|
+
failed_by_category['cross_sectional'] += 1
|
|
176
|
+
total_failed += 1
|
|
177
|
+
|
|
178
|
+
total_passed = total_tests - total_failed
|
|
179
|
+
|
|
180
|
+
# Calculate pass rate
|
|
181
|
+
pass_rate = (total_passed / total_tests * 100) if total_tests > 0 else 0
|
|
182
|
+
|
|
183
|
+
# Overall status
|
|
184
|
+
if total_failed == 0:
|
|
185
|
+
overall_status = 'excellent'
|
|
186
|
+
status_message = 'All tests passed'
|
|
187
|
+
elif total_failed <= 2:
|
|
188
|
+
overall_status = 'good'
|
|
189
|
+
status_message = 'Minor issues detected'
|
|
190
|
+
elif total_failed <= 4:
|
|
191
|
+
overall_status = 'warning'
|
|
192
|
+
status_message = 'Several issues detected'
|
|
193
|
+
else:
|
|
194
|
+
overall_status = 'critical'
|
|
195
|
+
status_message = 'Multiple issues detected'
|
|
196
|
+
|
|
197
|
+
return {
|
|
198
|
+
'total_tests': total_tests,
|
|
199
|
+
'total_passed': total_passed,
|
|
200
|
+
'total_failed': total_failed,
|
|
201
|
+
'pass_rate': pass_rate,
|
|
202
|
+
'pass_rate_formatted': f"{pass_rate:.1f}%",
|
|
203
|
+
'failed_by_category': failed_by_category,
|
|
204
|
+
'overall_status': overall_status,
|
|
205
|
+
'status_message': status_message,
|
|
206
|
+
'has_issues': total_failed > 0
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
def _generate_recommendations(self) -> List[Dict[str, Any]]:
|
|
210
|
+
"""
|
|
211
|
+
Generate recommendations based on failed tests.
|
|
212
|
+
|
|
213
|
+
Returns
|
|
214
|
+
-------
|
|
215
|
+
list of dict
|
|
216
|
+
List of recommendations
|
|
217
|
+
"""
|
|
218
|
+
recommendations = []
|
|
219
|
+
|
|
220
|
+
# Check serial correlation
|
|
221
|
+
serial_issues = [
|
|
222
|
+
name for name, result in self.report.serial_tests.items()
|
|
223
|
+
if result.reject_null
|
|
224
|
+
]
|
|
225
|
+
|
|
226
|
+
if serial_issues:
|
|
227
|
+
recommendations.append({
|
|
228
|
+
'category': 'Serial Correlation',
|
|
229
|
+
'severity': 'high',
|
|
230
|
+
'issue': f"Detected serial correlation in {len(serial_issues)} test(s)",
|
|
231
|
+
'tests': serial_issues,
|
|
232
|
+
'suggestions': [
|
|
233
|
+
'Use clustered standard errors at the entity level',
|
|
234
|
+
'Consider HAC (Heteroskedasticity and Autocorrelation Consistent) errors',
|
|
235
|
+
'Add lagged dependent variable if appropriate',
|
|
236
|
+
'Review model dynamics and time structure'
|
|
237
|
+
]
|
|
238
|
+
})
|
|
239
|
+
|
|
240
|
+
# Check heteroskedasticity
|
|
241
|
+
het_issues = [
|
|
242
|
+
name for name, result in self.report.het_tests.items()
|
|
243
|
+
if result.reject_null
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
if het_issues:
|
|
247
|
+
recommendations.append({
|
|
248
|
+
'category': 'Heteroskedasticity',
|
|
249
|
+
'severity': 'medium',
|
|
250
|
+
'issue': f"Detected heteroskedasticity in {len(het_issues)} test(s)",
|
|
251
|
+
'tests': het_issues,
|
|
252
|
+
'suggestions': [
|
|
253
|
+
'Use robust (White) standard errors',
|
|
254
|
+
'Consider weighted least squares (WLS)',
|
|
255
|
+
'Apply log transformation to dependent variable',
|
|
256
|
+
'Check for outliers and influential observations'
|
|
257
|
+
]
|
|
258
|
+
})
|
|
259
|
+
|
|
260
|
+
# Check cross-sectional dependence
|
|
261
|
+
cd_issues = [
|
|
262
|
+
name for name, result in self.report.cd_tests.items()
|
|
263
|
+
if result.reject_null
|
|
264
|
+
]
|
|
265
|
+
|
|
266
|
+
if cd_issues:
|
|
267
|
+
recommendations.append({
|
|
268
|
+
'category': 'Cross-Sectional Dependence',
|
|
269
|
+
'severity': 'high',
|
|
270
|
+
'issue': f"Detected cross-sectional dependence in {len(cd_issues)} test(s)",
|
|
271
|
+
'tests': cd_issues,
|
|
272
|
+
'suggestions': [
|
|
273
|
+
'Use Driscoll-Kraay standard errors',
|
|
274
|
+
'Consider spatial econometric models if geographic data',
|
|
275
|
+
'Add time fixed effects to control common shocks',
|
|
276
|
+
'Use bootstrap methods robust to cross-sectional dependence'
|
|
277
|
+
]
|
|
278
|
+
})
|
|
279
|
+
|
|
280
|
+
# Check specification
|
|
281
|
+
spec_issues = [
|
|
282
|
+
name for name, result in self.report.specification_tests.items()
|
|
283
|
+
if result.reject_null
|
|
284
|
+
]
|
|
285
|
+
|
|
286
|
+
if spec_issues:
|
|
287
|
+
recommendations.append({
|
|
288
|
+
'category': 'Model Specification',
|
|
289
|
+
'severity': 'critical',
|
|
290
|
+
'issue': f"Specification concerns in {len(spec_issues)} test(s)",
|
|
291
|
+
'tests': spec_issues,
|
|
292
|
+
'suggestions': [
|
|
293
|
+
'Review model specification (Fixed vs Random Effects)',
|
|
294
|
+
'Consider alternative estimators',
|
|
295
|
+
'Add or remove control variables',
|
|
296
|
+
'Test for omitted variable bias'
|
|
297
|
+
]
|
|
298
|
+
})
|
|
299
|
+
|
|
300
|
+
# Sort by severity
|
|
301
|
+
severity_order = {'critical': 0, 'high': 1, 'medium': 2, 'low': 3}
|
|
302
|
+
recommendations.sort(key=lambda r: severity_order.get(r['severity'], 99))
|
|
303
|
+
|
|
304
|
+
return recommendations
|
|
305
|
+
|
|
306
|
+
def _prepare_chart_data(self) -> Dict[str, Any]:
|
|
307
|
+
"""
|
|
308
|
+
Prepare data for interactive charts.
|
|
309
|
+
|
|
310
|
+
Returns
|
|
311
|
+
-------
|
|
312
|
+
dict
|
|
313
|
+
Chart data for Plotly
|
|
314
|
+
"""
|
|
315
|
+
charts = {}
|
|
316
|
+
|
|
317
|
+
# 1. Test Results Overview (Bar Chart)
|
|
318
|
+
test_categories = []
|
|
319
|
+
passed_counts = []
|
|
320
|
+
failed_counts = []
|
|
321
|
+
|
|
322
|
+
for category_name, category_tests in [
|
|
323
|
+
('Specification', self.report.specification_tests),
|
|
324
|
+
('Serial Correlation', self.report.serial_tests),
|
|
325
|
+
('Heteroskedasticity', self.report.het_tests),
|
|
326
|
+
('Cross-Sectional Dep.', self.report.cd_tests)
|
|
327
|
+
]:
|
|
328
|
+
if not category_tests:
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
test_categories.append(category_name)
|
|
332
|
+
|
|
333
|
+
passed = sum(1 for t in category_tests.values() if not t.reject_null)
|
|
334
|
+
failed = sum(1 for t in category_tests.values() if t.reject_null)
|
|
335
|
+
|
|
336
|
+
passed_counts.append(passed)
|
|
337
|
+
failed_counts.append(failed)
|
|
338
|
+
|
|
339
|
+
charts['test_overview'] = {
|
|
340
|
+
'categories': test_categories,
|
|
341
|
+
'passed': passed_counts,
|
|
342
|
+
'failed': failed_counts
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
# 2. P-value Distribution
|
|
346
|
+
all_pvalues = []
|
|
347
|
+
all_test_names = []
|
|
348
|
+
|
|
349
|
+
for test_dict in [
|
|
350
|
+
self.report.specification_tests,
|
|
351
|
+
self.report.serial_tests,
|
|
352
|
+
self.report.het_tests,
|
|
353
|
+
self.report.cd_tests
|
|
354
|
+
]:
|
|
355
|
+
for name, result in test_dict.items():
|
|
356
|
+
all_test_names.append(name)
|
|
357
|
+
all_pvalues.append(result.pvalue)
|
|
358
|
+
|
|
359
|
+
charts['pvalue_distribution'] = {
|
|
360
|
+
'test_names': all_test_names,
|
|
361
|
+
'pvalues': all_pvalues
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
# 3. Statistics by Test
|
|
365
|
+
test_stats = []
|
|
366
|
+
test_labels = []
|
|
367
|
+
|
|
368
|
+
for test_dict in [
|
|
369
|
+
self.report.specification_tests,
|
|
370
|
+
self.report.serial_tests,
|
|
371
|
+
self.report.het_tests,
|
|
372
|
+
self.report.cd_tests
|
|
373
|
+
]:
|
|
374
|
+
for name, result in test_dict.items():
|
|
375
|
+
test_labels.append(name)
|
|
376
|
+
test_stats.append(result.statistic)
|
|
377
|
+
|
|
378
|
+
charts['test_statistics'] = {
|
|
379
|
+
'test_names': test_labels,
|
|
380
|
+
'statistics': test_stats
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
return charts
|
|
384
|
+
|
|
385
|
+
@staticmethod
|
|
386
|
+
def _format_pvalue(pvalue: float) -> str:
|
|
387
|
+
"""
|
|
388
|
+
Format p-value for display.
|
|
389
|
+
|
|
390
|
+
Parameters
|
|
391
|
+
----------
|
|
392
|
+
pvalue : float
|
|
393
|
+
P-value to format
|
|
394
|
+
|
|
395
|
+
Returns
|
|
396
|
+
-------
|
|
397
|
+
str
|
|
398
|
+
Formatted p-value
|
|
399
|
+
"""
|
|
400
|
+
if pvalue < 0.001:
|
|
401
|
+
return f"{pvalue:.2e}"
|
|
402
|
+
return f"{pvalue:.4f}"
|
|
403
|
+
|
|
404
|
+
@staticmethod
|
|
405
|
+
def _get_significance_stars(pvalue: float) -> str:
|
|
406
|
+
"""
|
|
407
|
+
Get significance stars based on p-value.
|
|
408
|
+
|
|
409
|
+
Parameters
|
|
410
|
+
----------
|
|
411
|
+
pvalue : float
|
|
412
|
+
P-value
|
|
413
|
+
|
|
414
|
+
Returns
|
|
415
|
+
-------
|
|
416
|
+
str
|
|
417
|
+
Significance stars
|
|
418
|
+
"""
|
|
419
|
+
if pvalue < 0.001:
|
|
420
|
+
return '***'
|
|
421
|
+
elif pvalue < 0.01:
|
|
422
|
+
return '**'
|
|
423
|
+
elif pvalue < 0.05:
|
|
424
|
+
return '*'
|
|
425
|
+
elif pvalue < 0.1:
|
|
426
|
+
return '.'
|
|
427
|
+
return ''
|
|
428
|
+
|
|
429
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
430
|
+
"""
|
|
431
|
+
Convert to dictionary (alias for transform).
|
|
432
|
+
|
|
433
|
+
Returns
|
|
434
|
+
-------
|
|
435
|
+
dict
|
|
436
|
+
Complete data structure
|
|
437
|
+
"""
|
|
438
|
+
return self.transform()
|
|
439
|
+
|
|
440
|
+
def __repr__(self) -> str:
|
|
441
|
+
"""String representation."""
|
|
442
|
+
n_tests = (
|
|
443
|
+
len(self.report.specification_tests) +
|
|
444
|
+
len(self.report.serial_tests) +
|
|
445
|
+
len(self.report.het_tests) +
|
|
446
|
+
len(self.report.cd_tests)
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
return f"ValidationTransformer(tests={n_tests})"
|
|
File without changes
|
|
File without changes
|