dasycaus 1.0.0__tar.gz → 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {dasycaus-1.0.0/dasycaus.egg-info → dasycaus-1.0.1}/PKG-INFO +1 -1
  2. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/__init__.py +15 -2
  3. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/bootstrap.py +4 -4
  4. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/core.py +18 -13
  5. dasycaus-1.0.1/dasycaus/diagnostics.py +395 -0
  6. {dasycaus-1.0.0 → dasycaus-1.0.1/dasycaus.egg-info}/PKG-INFO +1 -1
  7. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus.egg-info/SOURCES.txt +1 -0
  8. {dasycaus-1.0.0 → dasycaus-1.0.1}/pyproject.toml +1 -1
  9. {dasycaus-1.0.0 → dasycaus-1.0.1}/LICENSE +0 -0
  10. {dasycaus-1.0.0 → dasycaus-1.0.1}/MANIFEST.in +0 -0
  11. {dasycaus-1.0.0 → dasycaus-1.0.1}/README.md +0 -0
  12. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/data_transform.py +0 -0
  13. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/lag_selection.py +0 -0
  14. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus/utils.py +0 -0
  15. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus.egg-info/dependency_links.txt +0 -0
  16. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus.egg-info/requires.txt +0 -0
  17. {dasycaus-1.0.0 → dasycaus-1.0.1}/dasycaus.egg-info/top_level.txt +0 -0
  18. {dasycaus-1.0.0 → dasycaus-1.0.1}/examples/comprehensive_analysis.py +0 -0
  19. {dasycaus-1.0.0 → dasycaus-1.0.1}/examples/example_usage.py +0 -0
  20. {dasycaus-1.0.0 → dasycaus-1.0.1}/setup.cfg +0 -0
  21. {dasycaus-1.0.0 → dasycaus-1.0.1}/setup.py +0 -0
  22. {dasycaus-1.0.0 → dasycaus-1.0.1}/tests/__init__.py +0 -0
  23. {dasycaus-1.0.0 → dasycaus-1.0.1}/tests/test_core.py +0 -0
  24. {dasycaus-1.0.0 → dasycaus-1.0.1}/tests/test_data_transform.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasycaus
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Dynamic Asymmetric Causality Tests for Time Series Analysis
5
5
  Author-email: "Dr. Merwan Roudane" <merwanroudane920@gmail.com>
6
6
  Maintainer-email: "Dr. Merwan Roudane" <merwanroudane920@gmail.com>
@@ -21,7 +21,7 @@ Based on:
21
21
  Journal of Economic Studies, 39(2), 144-160.
22
22
  """
23
23
 
24
- __version__ = "1.0.0"
24
+ __version__ = "1.0.1"
25
25
  __author__ = "Dr. Merwan Roudane"
26
26
  __email__ = "merwanroudane920@gmail.com"
27
27
 
@@ -53,6 +53,14 @@ from .utils import (
53
53
  plot_dynamic_causality
54
54
  )
55
55
 
56
+ from .diagnostics import (
57
+ doornik_hansen_test,
58
+ multivariate_arch_test,
59
+ multivariate_autocorrelation_test,
60
+ run_all_diagnostics,
61
+ print_diagnostic_results
62
+ )
63
+
56
64
  __all__ = [
57
65
  'dynamic_asymmetric_causality_test',
58
66
  'dynamic_symmetric_causality_test',
@@ -66,5 +74,10 @@ __all__ = [
66
74
  'information_criteria',
67
75
  'generate_subsamples',
68
76
  'compute_test_ratio',
69
- 'plot_dynamic_causality'
77
+ 'plot_dynamic_causality',
78
+ 'doornik_hansen_test',
79
+ 'multivariate_arch_test',
80
+ 'multivariate_autocorrelation_test',
81
+ 'run_all_diagnostics',
82
+ 'print_diagnostic_results'
70
83
  ]
@@ -13,7 +13,7 @@ def bootstrap_critical_values(
13
13
  data: np.ndarray,
14
14
  lag_order: int,
15
15
  integration_order: int,
16
- num_simulations: int = 1000,
16
+ num_simulations: int = 10000,
17
17
  significance_level: float = 0.05,
18
18
  random_seed: Optional[int] = None,
19
19
  leverage_adjustment: bool = True
@@ -29,7 +29,7 @@ def bootstrap_critical_values(
29
29
  Lag order for VAR model.
30
30
  integration_order : int
31
31
  Integration order (for adding extra lags).
32
- num_simulations : int, default=1000
32
+ num_simulations : int, default=10000
33
33
  Number of bootstrap simulations.
34
34
  significance_level : float, default=0.05
35
35
  Significance level for critical value.
@@ -94,7 +94,7 @@ def leveraged_bootstrap(
94
94
  data: np.ndarray,
95
95
  lag_order: int,
96
96
  integration_order: int,
97
- num_simulations: int = 1000,
97
+ num_simulations: int = 10000,
98
98
  random_seed: Optional[int] = None
99
99
  ) -> np.ndarray:
100
100
  """
@@ -108,7 +108,7 @@ def leveraged_bootstrap(
108
108
  Lag order for VAR model.
109
109
  integration_order : int
110
110
  Integration order.
111
- num_simulations : int, default=1000
111
+ num_simulations : int, default=10000
112
112
  Number of bootstrap simulations.
113
113
  random_seed : int, optional
114
114
  Random seed for reproducibility.
@@ -19,7 +19,7 @@ def symmetric_causality_test(
19
19
  maxlags: int,
20
20
  integration_order: int = 1,
21
21
  info_criterion: str = 'hjc',
22
- bootstrap_sims: int = 1000,
22
+ bootstrap_sims: int = 10000,
23
23
  significance_levels: List[float] = [0.05, 0.10],
24
24
  random_seed: Optional[int] = 30540
25
25
  ) -> Dict:
@@ -37,7 +37,7 @@ def symmetric_causality_test(
37
37
  Integration order of the variables (0=stationary, 1=I(1), 2=I(2)).
38
38
  info_criterion : str, default='hjc'
39
39
  Information criterion for lag selection: 'aic', 'aicc', 'sbc', 'hqc', 'hjc'.
40
- bootstrap_sims : int, default=1000
40
+ bootstrap_sims : int, default=10000
41
41
  Number of bootstrap simulations for critical values.
42
42
  significance_levels : list, default=[0.05, 0.10]
43
43
  List of significance levels for testing.
@@ -113,10 +113,10 @@ def asymmetric_causality_test(
113
113
  component: str = 'positive',
114
114
  integration_order: int = 1,
115
115
  info_criterion: str = 'hjc',
116
- bootstrap_sims: int = 1000,
116
+ bootstrap_sims: int = 10000,
117
117
  significance_levels: List[float] = [0.05, 0.10],
118
118
  random_seed: Optional[int] = 30540,
119
- include_trend: bool = True
119
+ include_trend: bool = False
120
120
  ) -> Dict:
121
121
  """
122
122
  Conduct static asymmetric Granger causality test.
@@ -133,14 +133,16 @@ def asymmetric_causality_test(
133
133
  Integration order of the original variables.
134
134
  info_criterion : str, default='hjc'
135
135
  Information criterion for lag selection.
136
- bootstrap_sims : int, default=1000
136
+ bootstrap_sims : int, default=10000
137
137
  Number of bootstrap simulations.
138
138
  significance_levels : list, default=[0.05, 0.10]
139
139
  List of significance levels.
140
140
  random_seed : int, optional
141
141
  Random seed for reproducibility.
142
- include_trend : bool, default=True
142
+ include_trend : bool, default=False
143
143
  Whether to include deterministic trend in transformation.
144
+ Default False matches GAUSS implementation (simple cumulative sums).
145
+ Set to True for paper equations (5-8) with deterministic trends.
144
146
 
145
147
  Returns
146
148
  -------
@@ -180,7 +182,7 @@ def dynamic_symmetric_causality_test(
180
182
  maxlags: int,
181
183
  integration_order: int = 1,
182
184
  info_criterion: str = 'hjc',
183
- bootstrap_sims: int = 1000,
185
+ bootstrap_sims: int = 10000,
184
186
  significance_levels: List[float] = [0.05, 0.10],
185
187
  subsample_method: str = 'recursive',
186
188
  random_seed: Optional[int] = 30540
@@ -198,7 +200,7 @@ def dynamic_symmetric_causality_test(
198
200
  Integration order of the variables.
199
201
  info_criterion : str, default='hjc'
200
202
  Information criterion for lag selection.
201
- bootstrap_sims : int, default=1000
203
+ bootstrap_sims : int, default=10000
202
204
  Number of bootstrap simulations.
203
205
  significance_levels : list, default=[0.05, 0.10]
204
206
  List of significance levels.
@@ -280,11 +282,11 @@ def dynamic_asymmetric_causality_test(
280
282
  component: str = 'positive',
281
283
  integration_order: int = 1,
282
284
  info_criterion: str = 'hjc',
283
- bootstrap_sims: int = 1000,
285
+ bootstrap_sims: int = 10000,
284
286
  significance_levels: List[float] = [0.05, 0.10],
285
287
  subsample_method: str = 'recursive',
286
288
  random_seed: Optional[int] = 30540,
287
- include_trend: bool = True
289
+ include_trend: bool = False
288
290
  ) -> Dict:
289
291
  """
290
292
  Conduct dynamic asymmetric Granger causality tests using subsamples.
@@ -301,7 +303,7 @@ def dynamic_asymmetric_causality_test(
301
303
  Integration order of the original variables.
302
304
  info_criterion : str, default='hjc'
303
305
  Information criterion for lag selection.
304
- bootstrap_sims : int, default=1000
306
+ bootstrap_sims : int, default=10000
305
307
  Number of bootstrap simulations.
306
308
  significance_levels : list, default=[0.05, 0.10]
307
309
  List of significance levels.
@@ -309,8 +311,9 @@ def dynamic_asymmetric_causality_test(
309
311
  Method for creating subsamples: 'recursive' or 'rolling'.
310
312
  random_seed : int, optional
311
313
  Random seed for reproducibility.
312
- include_trend : bool, default=True
314
+ include_trend : bool, default=False
313
315
  Whether to include deterministic trend in transformation.
316
+ Default False matches GAUSS implementation.
314
317
 
315
318
  Returns
316
319
  -------
@@ -378,7 +381,9 @@ def _compute_wald_statistic(
378
381
  # Estimate unrestricted VAR
379
382
  A_hat = np.linalg.lstsq(X, Y, rcond=None)[0]
380
383
  residuals = Y - X @ A_hat
381
- Sigma_u = (residuals.T @ residuals) / (T - total_lags - 1)
384
+ T_eff = Y.shape[0] # Effective sample size after lagging
385
+ # Use T_eff as denominator (matches GAUSS line 866: VARCOV = RES'RES/T)
386
+ Sigma_u = (residuals.T @ residuals) / T_eff
382
387
 
383
388
  # Create restriction matrix C
384
389
  # We test if variable 2 does not cause variable 1
@@ -0,0 +1,395 @@
1
+ """
2
+ Diagnostic tests module for VAR residuals.
3
+
4
+ This module implements diagnostic tests for VAR model residuals as used in
5
+ Hatemi-J's causality testing framework:
6
+ - Multivariate Normality Test (Doornik-Hansen, 2008)
7
+ - Multivariate ARCH Test (Hacker-Hatemi-J, 2005)
8
+ - Multivariate Autocorrelation Test (Hatemi-J, 2004)
9
+ """
10
+
11
+ import numpy as np
12
+ from typing import Dict, Optional, Tuple
13
+ from scipy.stats import chi2
14
+
15
+
16
+ def doornik_hansen_test(residuals: np.ndarray) -> Dict:
17
+ """
18
+ Multivariate normality test (Doornik and Hansen, 2008).
19
+
20
+ Tests the null hypothesis that residuals follow a multivariate normal distribution.
21
+
22
+ Parameters
23
+ ----------
24
+ residuals : np.ndarray
25
+ Residuals matrix with shape (T, n) where T is number of observations
26
+ and n is number of variables.
27
+
28
+ Returns
29
+ -------
30
+ dict
31
+ Dictionary containing:
32
+ - 'test_statistic': Overall test statistic
33
+ - 'p_value': P-value for the test
34
+ - 'df': Degrees of freedom
35
+ - 'reject_null_5pct': Boolean indicating rejection at 5% level
36
+ - 'individual_stats': Dict with per-variable statistics
37
+
38
+ References
39
+ ----------
40
+ Doornik, J.A. and Hansen, H. (2008). An omnibus test for univariate and
41
+ multivariate normality. Oxford Bulletin of Economics and Statistics, 70, 927-939.
42
+ """
43
+ T, n = residuals.shape
44
+
45
+ if T < 4:
46
+ raise ValueError("Need at least 4 observations for normality test")
47
+
48
+ # Standardize residuals
49
+ means = np.mean(residuals, axis=0)
50
+ stds = np.std(residuals, axis=0, ddof=1)
51
+ z = (residuals - means) / stds
52
+
53
+ # Compute skewness and kurtosis for each variable
54
+ skewness = np.zeros(n)
55
+ kurtosis = np.zeros(n)
56
+
57
+ for i in range(n):
58
+ m3 = np.mean(z[:, i]**3)
59
+ m4 = np.mean(z[:, i]**4)
60
+ skewness[i] = m3
61
+ kurtosis[i] = m4 - 3 # Excess kurtosis
62
+
63
+ # Transformation for skewness
64
+ beta = (3 * (T**2 + 27*T - 70) * (T+1) * (T+3)) / ((T-2) * (T+5) * (T+7) * (T+9))
65
+ w2 = -1 + np.sqrt(2 * (beta - 1))
66
+ delta = 1 / np.sqrt(np.log(w2))
67
+ alpha = np.sqrt(2 / (w2 - 1))
68
+
69
+ z_skew = np.zeros(n)
70
+ for i in range(n):
71
+ y = skewness[i] * alpha
72
+ z_skew[i] = delta * np.log(y + np.sqrt(y**2 + 1))
73
+
74
+ # Transformation for kurtosis
75
+ delta_k = (T - 3) * (T + 1) * (T**2 + 15*T - 4)
76
+ a = ((T-2) * (T+5) * (T+7) * (T**2 + 27*T - 70)) / (6 * delta_k)
77
+ c = ((T-7) * (T+5) * (T+7) * (T**2 + 2*T - 5)) / (6 * delta_k)
78
+ k = ((T+5) * (T+7) * (T**3 + 37*T**2 + 11*T - 313)) / (12 * delta_k)
79
+
80
+ z_kurt = np.zeros(n)
81
+ for i in range(n):
82
+ x = kurtosis[i] / np.sqrt(8 / T)
83
+ z_kurt[i] = (np.power(1 - 2*c, 0.5) / np.sqrt(2*a)) * \
84
+ (np.power(x / (2*a) + 1 + k, 1/3) - 1)
85
+
86
+ # Combine statistics
87
+ individual_stats = {}
88
+ omnibus_stat = 0
89
+
90
+ for i in range(n):
91
+ var_stat = z_skew[i]**2 + z_kurt[i]**2
92
+ omnibus_stat += var_stat
93
+ individual_stats[f'variable_{i+1}'] = {
94
+ 'statistic': var_stat,
95
+ 'skewness': skewness[i],
96
+ 'kurtosis': kurtosis[i]
97
+ }
98
+
99
+ # Degrees of freedom: 2 * n (2 per variable for skewness and kurtosis)
100
+ df = 2 * n
101
+ p_value = 1 - chi2.cdf(omnibus_stat, df)
102
+
103
+ return {
104
+ 'test_statistic': float(omnibus_stat),
105
+ 'p_value': float(p_value),
106
+ 'df': df,
107
+ 'reject_null_5pct': p_value < 0.05,
108
+ 'individual_stats': individual_stats,
109
+ 'test_name': 'Doornik-Hansen Multivariate Normality Test'
110
+ }
111
+
112
+
113
+ def multivariate_arch_test(
114
+ residuals: np.ndarray,
115
+ lags: int = 1
116
+ ) -> Dict:
117
+ """
118
+ Multivariate ARCH test (Hacker and Hatemi-J, 2005).
119
+
120
+ Tests the null hypothesis of no ARCH effects in VAR residuals.
121
+
122
+ Parameters
123
+ ----------
124
+ residuals : np.ndarray
125
+ Residuals matrix with shape (T, n).
126
+ lags : int, default=1
127
+ Number of lags to test for ARCH effects.
128
+
129
+ Returns
130
+ -------
131
+ dict
132
+ Dictionary containing:
133
+ - 'test_statistic': Test statistic
134
+ - 'p_value': P-value for the test
135
+ - 'df': Degrees of freedom
136
+ - 'reject_null_5pct': Boolean indicating rejection at 5% level
137
+
138
+ References
139
+ ----------
140
+ Hacker, S. and Hatemi-J, A. (2005). A multivariate test for ARCH effects.
141
+ Applied Economics Letters, 12(7), 411-417.
142
+ """
143
+ T, n = residuals.shape
144
+
145
+ if T <= lags:
146
+ raise ValueError(f"Need more than {lags} observations for ARCH test")
147
+
148
+ # Compute squared residuals
149
+ e_squared = residuals ** 2
150
+
151
+ # Stack into vector
152
+ e_vec = e_squared.flatten('F') # Column-major order
153
+
154
+ # Create lagged matrix for regression
155
+ Y = e_vec[n*lags:] # Dependent variable
156
+
157
+ # Create X matrix with lags
158
+ X_list = []
159
+ for lag in range(1, lags + 1):
160
+ start_idx = n*(lags - lag)
161
+ end_idx = start_idx + (T - lags)*n
162
+ X_list.append(e_vec[start_idx:end_idx].reshape(-1, 1))
163
+
164
+ if not X_list:
165
+ raise ValueError("Could not create lagged matrix")
166
+
167
+ X = np.hstack(X_list)
168
+ X = np.column_stack([np.ones(len(Y)), X]) # Add intercept
169
+
170
+ # OLS regression
171
+ try:
172
+ beta_hat = np.linalg.lstsq(X, Y, rcond=None)[0]
173
+ residuals_ols = Y - X @ beta_hat
174
+ SSR = np.sum(residuals_ols ** 2)
175
+ SST = np.sum((Y - np.mean(Y)) ** 2)
176
+ R_squared = 1 - SSR / SST
177
+ except np.linalg.LinAlgError:
178
+ # If singular matrix, return conservative result
179
+ return {
180
+ 'test_statistic': 0.0,
181
+ 'p_value': 1.0,
182
+ 'df': n * lags,
183
+ 'reject_null_5pct': False,
184
+ 'test_name': 'Multivariate ARCH Test',
185
+ 'warning': 'Singular matrix encountered'
186
+ }
187
+
188
+ # Test statistic: T * R^2
189
+ test_stat = (T - lags) * R_squared
190
+ df = n * lags
191
+ p_value = 1 - chi2.cdf(test_stat, df)
192
+
193
+ return {
194
+ 'test_statistic': float(test_stat),
195
+ 'p_value': float(p_value),
196
+ 'df': df,
197
+ 'reject_null_5pct': p_value < 0.05,
198
+ 'R_squared': float(R_squared),
199
+ 'test_name': 'Multivariate ARCH Test'
200
+ }
201
+
202
+
203
+ def multivariate_autocorrelation_test(
204
+ residuals: np.ndarray,
205
+ lags: int
206
+ ) -> Dict:
207
+ """
208
+ Multivariate autocorrelation test (Hatemi-J, 2004).
209
+
210
+ Tests the null hypothesis of no autocorrelation in VAR residuals.
211
+
212
+ Parameters
213
+ ----------
214
+ residuals : np.ndarray
215
+ Residuals matrix with shape (T, n).
216
+ lags : int
217
+ Number of lags to test for autocorrelation.
218
+
219
+ Returns
220
+ -------
221
+ dict
222
+ Dictionary containing:
223
+ - 'test_statistic': Test statistic
224
+ - 'p_value': P-value for the test
225
+ - 'df': Degrees of freedom
226
+ - 'reject_null_5pct': Boolean indicating rejection at 5% level
227
+
228
+ References
229
+ ----------
230
+ Hatemi-J, A. (2004). Multivariate tests for autocorrelation in the stable
231
+ and unstable VAR models. Economic Modelling, 21, 661-683.
232
+ """
233
+ T, n = residuals.shape
234
+
235
+ if T <= lags:
236
+ raise ValueError(f"Need more than {lags} observations for autocorrelation test")
237
+
238
+ # Compute residual covariance matrix
239
+ Sigma_0 = (residuals.T @ residuals) / T
240
+
241
+ # Compute autocov matrices and test statistic
242
+ test_stat = 0
243
+
244
+ for h in range(1, lags + 1):
245
+ # Autocovariance at lag h
246
+ Gamma_h = np.zeros((n, n))
247
+ for t in range(h, T):
248
+ Gamma_h += np.outer(residuals[t], residuals[t - h])
249
+ Gamma_h /= T
250
+
251
+ # Correlation matrix at lag h
252
+ try:
253
+ Sigma_0_inv = np.linalg.inv(Sigma_0)
254
+ D_h = Gamma_h @ Sigma_0_inv
255
+
256
+ # Add to test statistic
257
+ test_stat += T * np.trace(D_h.T @ D_h)
258
+ except np.linalg.LinAlgError:
259
+ # If singular, skip this lag
260
+ continue
261
+
262
+ # Degrees of freedom
263
+ df = n * n * lags
264
+ p_value = 1 - chi2.cdf(test_stat, df)
265
+
266
+ return {
267
+ 'test_statistic': float(test_stat),
268
+ 'p_value': float(p_value),
269
+ 'df': df,
270
+ 'reject_null_5pct': p_value < 0.05,
271
+ 'test_name': 'Multivariate Autocorrelation Test'
272
+ }
273
+
274
+
275
+ def run_all_diagnostics(
276
+ residuals: np.ndarray,
277
+ arch_lags: int = 1,
278
+ autocorr_lags: Optional[int] = None
279
+ ) -> Dict:
280
+ """
281
+ Run all diagnostic tests on VAR residuals.
282
+
283
+ Parameters
284
+ ----------
285
+ residuals : np.ndarray
286
+ Residuals matrix with shape (T, n).
287
+ arch_lags : int, default=1
288
+ Number of lags for ARCH test.
289
+ autocorr_lags : int, optional
290
+ Number of lags for autocorrelation test. If None, uses arch_lags.
291
+
292
+ Returns
293
+ -------
294
+ dict
295
+ Dictionary containing results from all tests:
296
+ - 'normality': Results from Doornik-Hansen test
297
+ - 'arch': Results from multivariate ARCH test
298
+ - 'autocorrelation': Results from autocorrelation test
299
+ - 'summary': Overall summary
300
+ """
301
+ if autocorr_lags is None:
302
+ autocorr_lags = arch_lags
303
+
304
+ results = {}
305
+
306
+ # Run normality test
307
+ try:
308
+ results['normality'] = doornik_hansen_test(residuals)
309
+ except Exception as e:
310
+ results['normality'] = {
311
+ 'error': str(e),
312
+ 'test_name': 'Doornik-Hansen Multivariate Normality Test'
313
+ }
314
+
315
+ # Run ARCH test
316
+ try:
317
+ results['arch'] = multivariate_arch_test(residuals, arch_lags)
318
+ except Exception as e:
319
+ results['arch'] = {
320
+ 'error': str(e),
321
+ 'test_name': 'Multivariate ARCH Test'
322
+ }
323
+
324
+ # Run autocorrelation test
325
+ try:
326
+ results['autocorrelation'] = multivariate_autocorrelation_test(
327
+ residuals, autocorr_lags
328
+ )
329
+ except Exception as e:
330
+ results['autocorrelation'] = {
331
+ 'error': str(e),
332
+ 'test_name': 'Multivariate Autocorrelation Test'
333
+ }
334
+
335
+ # Create summary
336
+ summary = []
337
+ for test_name, test_result in results.items():
338
+ if 'error' in test_result:
339
+ summary.append(f"{test_name}: ERROR - {test_result['error']}")
340
+ else:
341
+ reject = test_result.get('reject_null_5pct', False)
342
+ p_val = test_result.get('p_value', np.nan)
343
+ summary.append(
344
+ f"{test_name}: p-value={p_val:.4f}, "
345
+ f"reject_5%={reject}"
346
+ )
347
+
348
+ results['summary'] = '\n'.join(summary)
349
+
350
+ return results
351
+
352
+
353
+ def print_diagnostic_results(results: Dict) -> None:
354
+ """
355
+ Print diagnostic test results in a formatted table.
356
+
357
+ Parameters
358
+ ----------
359
+ results : dict
360
+ Results from run_all_diagnostics function.
361
+ """
362
+ print("\n" + "="*70)
363
+ print("DIAGNOSTIC TESTS FOR VAR RESIDUALS")
364
+ print("="*70)
365
+
366
+ for test_key in ['normality', 'arch', 'autocorrelation']:
367
+ if test_key not in results:
368
+ continue
369
+
370
+ test_result = results[test_key]
371
+ test_name = test_result.get('test_name', test_key.upper())
372
+
373
+ print(f"\n{test_name}:")
374
+ print("-" * 70)
375
+
376
+ if 'error' in test_result:
377
+ print(f" ERROR: {test_result['error']}")
378
+ continue
379
+
380
+ print(f" Test Statistic: {test_result['test_statistic']:.4f}")
381
+ print(f" Degrees of Freedom: {test_result['df']}")
382
+ print(f" P-value: {test_result['p_value']:.4f}")
383
+ print(f" Reject H0 at 5%: {test_result['reject_null_5pct']}")
384
+
385
+ # Additional info for specific tests
386
+ if test_key == 'arch' and 'R_squared' in test_result:
387
+ print(f" R-squared: {test_result['R_squared']:.4f}")
388
+
389
+ print("\n" + "="*70)
390
+ print("INTERPRETATION:")
391
+ print("-" * 70)
392
+ print("Normality: H0 = Residuals are multivariate normal")
393
+ print("ARCH: H0 = No ARCH effects (constant variance)")
394
+ print("Autocorrelation: H0 = No autocorrelation")
395
+ print("="*70 + "\n")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dasycaus
3
- Version: 1.0.0
3
+ Version: 1.0.1
4
4
  Summary: Dynamic Asymmetric Causality Tests for Time Series Analysis
5
5
  Author-email: "Dr. Merwan Roudane" <merwanroudane920@gmail.com>
6
6
  Maintainer-email: "Dr. Merwan Roudane" <merwanroudane920@gmail.com>
@@ -7,6 +7,7 @@ dasycaus/__init__.py
7
7
  dasycaus/bootstrap.py
8
8
  dasycaus/core.py
9
9
  dasycaus/data_transform.py
10
+ dasycaus/diagnostics.py
10
11
  dasycaus/lag_selection.py
11
12
  dasycaus/utils.py
12
13
  dasycaus.egg-info/PKG-INFO
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "dasycaus"
7
- version = "1.0.0"
7
+ version = "1.0.1"
8
8
  description = "Dynamic Asymmetric Causality Tests for Time Series Analysis"
9
9
  readme = "README.md"
10
10
  authors = [
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes