segmcoint 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
segmcoint/utils.py ADDED
@@ -0,0 +1,433 @@
1
+ """
2
+ Utility functions for segmented cointegration tests.
3
+
4
+ Shared helper routines used by both Kim (2003) and Martins & Rodrigues (2022)
5
+ test implementations.
6
+
7
+ References
8
+ ----------
9
+ Kim, J.-Y. (2003). Inference on Segmented Cointegration.
10
+ Econometric Theory, 19, 620-639.
11
+ Martins, L.F. and Rodrigues, P.M.M. (2022). Tests for Segmented
12
+ Cointegration. Empirical Economics, 63, 567-600.
13
+ """
14
+
15
+ import numpy as np
16
+ from numpy.linalg import inv
17
+ from scipy import stats
18
+ import warnings
19
+
20
+
21
+ def ols_residuals(y, X, model="none"):
22
+ """
23
+ Compute OLS residuals from cointegrating regression.
24
+
25
+ Implements the three model specifications from Kim (2003, Eq. 2.1a-2.1c):
26
+ - 'none': x_{1t} = beta' x_{2t} + epsilon_t (2.1a)
27
+ - 'drift': x_{1t} = alpha + beta' x_{2t} + epsilon_t (2.1b)
28
+ - 'trend': x_{1t} = alpha + gamma*t + beta' x_{2t} + epsilon_t (2.1c)
29
+
30
+ Parameters
31
+ ----------
32
+ y : ndarray, shape (T,)
33
+ Dependent variable (x_{1t}).
34
+ X : ndarray, shape (T,) or (T, K)
35
+ Regressor(s) (x_{2t}).
36
+ model : str, one of {'none', 'drift', 'trend'}
37
+ Deterministic specification for the cointegrating regression.
38
+
39
+ Returns
40
+ -------
41
+ residuals : ndarray, shape (T,)
42
+ OLS residuals epsilon_hat_t.
43
+ beta_hat : ndarray
44
+ Estimated coefficient vector.
45
+ """
46
+ T = len(y)
47
+ y = np.asarray(y, dtype=np.float64).ravel()
48
+ X = np.asarray(X, dtype=np.float64)
49
+ if X.ndim == 1:
50
+ X = X.reshape(-1, 1)
51
+
52
+ if model == "none":
53
+ Z = X.copy()
54
+ elif model == "drift":
55
+ Z = np.column_stack([np.ones(T), X])
56
+ elif model == "trend":
57
+ Z = np.column_stack([np.ones(T), np.arange(1, T + 1), X])
58
+ else:
59
+ raise ValueError(f"Unknown model: {model}. Use 'none', 'drift', or 'trend'.")
60
+
61
+ beta_hat = inv(Z.T @ Z) @ (Z.T @ y)
62
+ residuals = y - Z @ beta_hat
63
+ return residuals, beta_hat
64
+
65
+
66
+ def adf_regression(e, p=1, weighted=None):
67
+ """
68
+ Run augmented Dickey-Fuller regression on residual series.
69
+
70
+ Implements the ADF regression:
71
+ Delta e_t = zeta_1 * Delta e_{t-1} + ... + zeta_{p-1} * Delta e_{t-p+1}
72
+ + w_t(C_T) * (rho - 1) * e_{t-1} + epsilon_t
73
+
74
+ as described in Kim (2003, Eq. 3.5).
75
+
76
+ Parameters
77
+ ----------
78
+ e : ndarray, shape (T,)
79
+ Residual series from cointegrating regression.
80
+ p : int
81
+ Lag order for augmented terms.
82
+ weighted : ndarray or None, shape (T,)
83
+ Weight series w_t(C_T) from Eq. (3.2). If None, all weights are 1.
84
+
85
+ Returns
86
+ -------
87
+ rho_hat : float
88
+ Estimated first-order autoregressive coefficient.
89
+ t_stat : float
90
+ t-statistic for testing rho = 1 (i.e., for rho - 1 = 0).
91
+ sigma_e : float
92
+ Estimated standard deviation of the regression error.
93
+ zeta_hat : ndarray
94
+ Estimated augmented lag coefficients.
95
+ ssr : float
96
+ Sum of squared residuals from the ADF regression.
97
+ """
98
+ e = np.asarray(e, dtype=np.float64).ravel()
99
+ T = len(e)
100
+
101
+ if weighted is None:
102
+ weighted = np.ones(T)
103
+ else:
104
+ weighted = np.asarray(weighted, dtype=np.float64).ravel()
105
+
106
+ # Construct Delta e_t
107
+ de = np.diff(e) # length T-1
108
+
109
+ # Build the regression matrix
110
+ # Dependent: Delta e_t for t = p+1, ..., T (indices p to T-1 in de)
111
+ n_obs = T - 1 - p
112
+ if n_obs <= 0:
113
+ raise ValueError(f"Insufficient observations: T={T}, p={p}")
114
+
115
+ Y = de[p:] # (n_obs,)
116
+
117
+ # Regressor: w_{t}(C_T) * e_{t-1}
118
+ # For period t (1-indexed), e_{t-1} corresponds to e[t-1] in 0-indexed
119
+ # de[i] = e[i+1] - e[i], so for de[p:], t ranges from p+1 to T
120
+ # e_{t-1} for these t's: e[p] to e[T-1]
121
+ e_lag = e[p:-1] if p > 0 else e[:-1]
122
+ w_lag = weighted[p + 1: T] # w_t for t = p+1, ..., T (0-indexed: p+1 to T)
123
+
124
+ # Weighted error correction term
125
+ we_lag = (w_lag * e_lag).reshape(-1, 1)
126
+
127
+ # Augmented lags: Delta e_{t-j} for j = 1, ..., p
128
+ if p > 0:
129
+ aug_lags = np.column_stack([de[p - j: T - 1 - j] for j in range(1, p + 1)])
130
+ Z = np.column_stack([aug_lags, we_lag])
131
+ else:
132
+ Z = we_lag
133
+
134
+ # OLS estimation
135
+ beta = inv(Z.T @ Z) @ (Z.T @ Y)
136
+ residuals = Y - Z @ beta
137
+ ssr = np.sum(residuals ** 2)
138
+ sigma_e_sq = ssr / (n_obs - Z.shape[1])
139
+ sigma_e = np.sqrt(max(sigma_e_sq, 1e-15))
140
+
141
+ # Extract rho - 1 estimate (last regressor)
142
+ gamma_hat = beta[-1] # rho - 1
143
+ rho_hat = gamma_hat + 1.0
144
+
145
+ # Standard error of gamma_hat
146
+ cov_beta = sigma_e_sq * inv(Z.T @ Z)
147
+ se_gamma = np.sqrt(max(cov_beta[-1, -1], 1e-15))
148
+ t_stat = gamma_hat / se_gamma
149
+
150
+ # Extract zeta estimates
151
+ zeta_hat = beta[:-1] if p > 0 else np.array([])
152
+
153
+ return rho_hat, t_stat, sigma_e, zeta_hat, ssr
154
+
155
+
156
+ def ar1_regression(e, weighted=None):
157
+ """
158
+ Run simple AR(1) regression on residual series.
159
+
160
+ Implements: e_t = rho * e_{t-1} + v_t
161
+ with optional weighting for segmented cointegration.
162
+
163
+ Parameters
164
+ ----------
165
+ e : ndarray, shape (T,)
166
+ Residual series.
167
+ weighted : ndarray or None, shape (T,)
168
+ Weight series w_t(C_T).
169
+
170
+ Returns
171
+ -------
172
+ rho_hat : float
173
+ Estimated AR(1) coefficient.
174
+ s_sq : float
175
+ Estimated variance of v_t.
176
+ sigma_rho_sq : float
177
+ Estimated variance of rho_hat.
178
+ ssr : float
179
+ Sum of squared residuals.
180
+ """
181
+ e = np.asarray(e, dtype=np.float64).ravel()
182
+ T = len(e)
183
+
184
+ if weighted is None:
185
+ weighted = np.ones(T)
186
+ else:
187
+ weighted = np.asarray(weighted, dtype=np.float64).ravel()
188
+
189
+ # Build weighted regression: w_t * e_t on w_{t-1} * e_{t-1} (per Kim 2003 p.625)
190
+ # Actually: regress w_t(C_T)*e_t on w_{t-1}(C_T)*e_{t-1}
191
+ y_reg = weighted[1:] * e[1:]
192
+ x_reg = weighted[:-1] * e[:-1]
193
+
194
+ # OLS
195
+ denom = np.sum(x_reg ** 2)
196
+ if denom < 1e-15:
197
+ return 1.0, 0.0, 0.0, 0.0
198
+
199
+ rho_hat = np.sum(x_reg * y_reg) / denom
200
+ residuals = y_reg - rho_hat * x_reg
201
+ ssr = np.sum(residuals ** 2)
202
+ T_eff = np.sum(weighted[1:] > 0)
203
+ s_sq = ssr / max(T_eff - 1, 1)
204
+ sigma_rho_sq = s_sq / denom
205
+
206
+ return rho_hat, s_sq, sigma_rho_sq, ssr
207
+
208
+
209
+ def newey_west_lrv(v, q=None):
210
+ """
211
+ Newey-West long-run variance estimator.
212
+
213
+ Computes the long-run variance lambda^2 and gamma_0 as defined in
214
+ Kim (2003, p.626):
215
+ gamma_j = (1/T_C) * sum w_t * v_hat_t * v_hat_{t-j}
216
+ lambda_hat = gamma_0 + sum_{j=1}^{q} (1 - j/(1+q)) * gamma_j
217
+
218
+ Parameters
219
+ ----------
220
+ v : ndarray, shape (T,)
221
+ Residual series (v_hat).
222
+ q : int or None
223
+ Bandwidth for Newey-West estimator.
224
+ If None, uses floor(4*(T/100)^{2/9}).
225
+
226
+ Returns
227
+ -------
228
+ lambda_sq : float
229
+ Estimated long-run variance.
230
+ gamma_0 : float
231
+ Estimated variance (gamma_0).
232
+ """
233
+ v = np.asarray(v, dtype=np.float64).ravel()
234
+ T = len(v)
235
+
236
+ if q is None:
237
+ q = int(np.floor(4.0 * (T / 100.0) ** (2.0 / 9.0)))
238
+
239
+ # gamma_0
240
+ gamma_0 = np.mean(v ** 2)
241
+
242
+ # gamma_j for j = 1, ..., q
243
+ lambda_sq = gamma_0
244
+ for j in range(1, q + 1):
245
+ gamma_j = np.mean(v[j:] * v[:-j])
246
+ w_j = 1.0 - j / (1.0 + q)
247
+ lambda_sq += 2.0 * w_j * gamma_j # factor 2 for symmetry
248
+
249
+ return lambda_sq, gamma_0
250
+
251
+
252
+ def select_lag_aic(e, max_p=12):
253
+ """
254
+ Select lag order for ADF regression using the Akaike Information Criterion.
255
+
256
+ Parameters
257
+ ----------
258
+ e : ndarray, shape (T,)
259
+ Residual series.
260
+ max_p : int
261
+ Maximum lag order to consider.
262
+
263
+ Returns
264
+ -------
265
+ p_opt : int
266
+ Optimal lag order.
267
+ """
268
+ e = np.asarray(e, dtype=np.float64).ravel()
269
+ T = len(e)
270
+ max_p = min(max_p, T // 4)
271
+
272
+ best_aic = np.inf
273
+ p_opt = 0
274
+
275
+ for p in range(0, max_p + 1):
276
+ try:
277
+ _, _, sigma_e, _, ssr = adf_regression(e, p=p)
278
+ n_obs = T - 1 - p
279
+ n_params = p + 1
280
+ if n_obs <= n_params:
281
+ continue
282
+ aic = n_obs * np.log(ssr / n_obs) + 2 * n_params
283
+ if aic < best_aic:
284
+ best_aic = aic
285
+ p_opt = p
286
+ except (ValueError, np.linalg.LinAlgError):
287
+ continue
288
+
289
+ return p_opt
290
+
291
+
292
+ def select_lag_bic(e, max_p=12):
293
+ """
294
+ Select lag order for ADF regression using the Schwarz (BIC) criterion.
295
+
296
+ Parameters
297
+ ----------
298
+ e : ndarray, shape (T,)
299
+ Residual series.
300
+ max_p : int
301
+ Maximum lag order to consider.
302
+
303
+ Returns
304
+ -------
305
+ p_opt : int
306
+ Optimal lag order.
307
+ """
308
+ e = np.asarray(e, dtype=np.float64).ravel()
309
+ T = len(e)
310
+ max_p = min(max_p, T // 4)
311
+
312
+ best_bic = np.inf
313
+ p_opt = 0
314
+
315
+ for p in range(0, max_p + 1):
316
+ try:
317
+ _, _, sigma_e, _, ssr = adf_regression(e, p=p)
318
+ n_obs = T - 1 - p
319
+ n_params = p + 1
320
+ if n_obs <= n_params:
321
+ continue
322
+ bic = n_obs * np.log(ssr / n_obs) + n_params * np.log(n_obs)
323
+ if bic < best_bic:
324
+ best_bic = bic
325
+ p_opt = p
326
+ except (ValueError, np.linalg.LinAlgError):
327
+ continue
328
+
329
+ return p_opt
330
+
331
+
332
+ def generate_segmented_data(T, beta=1.0, rho=0.9, sigma_v=0.1,
333
+ sigma_u=0.1, n_break_start=None,
334
+ n_break_end=None, model="drift",
335
+ alpha=0.0, gamma=0.0, seed=None):
336
+ """
337
+ Generate data from a segmented cointegration DGP.
338
+
339
+ Implements the model from Kim (2003, Section 4) and Martins &
340
+ Rodrigues (2022, Eqs. 5.1-5.4):
341
+
342
+ x_{1t} = alpha + gamma*t + beta * x_{2t} + epsilon_t
343
+ x_{2t} = x_{2,t-1} + u_t
344
+ epsilon_t = rho^{(t)} * epsilon_{t-1} + v_t
345
+
346
+ where rho^{(t)} = rho for t in C_T (cointegration period)
347
+ and rho^{(t)} = 1 for t in N_T (noncointegration period).
348
+
349
+ Parameters
350
+ ----------
351
+ T : int
352
+ Sample size.
353
+ beta : float or ndarray
354
+ Cointegrating coefficient(s).
355
+ rho : float
356
+ AR(1) root in the cointegration regime. Must satisfy |rho| < 1.
357
+ sigma_v : float
358
+ Standard deviation of the innovation v_t in the error equation.
359
+ sigma_u : float
360
+ Standard deviation of the innovation u_t in x_{2t}.
361
+ n_break_start : int or None
362
+ Start of the noncointegration period (1-indexed).
363
+ If None, defaults to int(0.4 * T).
364
+ n_break_end : int or None
365
+ End of the noncointegration period (1-indexed).
366
+ If None, defaults to int(0.6 * T).
367
+ model : str
368
+ Deterministic specification: 'none', 'drift', 'trend'.
369
+ alpha : float
370
+ Intercept term.
371
+ gamma : float
372
+ Trend coefficient.
373
+ seed : int or None
374
+ Random seed.
375
+
376
+ Returns
377
+ -------
378
+ y : ndarray, shape (T,)
379
+ Dependent variable x_{1t}.
380
+ X : ndarray, shape (T,) or (T, K)
381
+ Regressor(s) x_{2t}.
382
+ eps : ndarray, shape (T,)
383
+ True error process epsilon_t.
384
+ break_info : dict
385
+ Dictionary with 'n_start', 'n_end', 'tau_0', 'tau_1'.
386
+ """
387
+ rng = np.random.default_rng(seed)
388
+
389
+ if n_break_start is None:
390
+ n_break_start = int(0.4 * T)
391
+ if n_break_end is None:
392
+ n_break_end = int(0.6 * T)
393
+
394
+ beta = np.atleast_1d(np.asarray(beta, dtype=np.float64))
395
+ K = len(beta)
396
+
397
+ # Generate x_{2t} as random walk(s)
398
+ u = rng.normal(0, sigma_u, size=(T, K))
399
+ X = np.cumsum(u, axis=0)
400
+
401
+ # Generate epsilon_t with segmented persistence
402
+ v = rng.normal(0, sigma_v, size=T)
403
+ eps = np.zeros(T)
404
+ for t in range(1, T):
405
+ if n_break_start <= t < n_break_end:
406
+ # Noncointegration period: unit root
407
+ eps[t] = eps[t - 1] + v[t]
408
+ else:
409
+ # Cointegration period: stationary
410
+ eps[t] = rho * eps[t - 1] + v[t]
411
+
412
+ # Generate y
413
+ trend = np.arange(1, T + 1)
414
+ if model == "none":
415
+ y = X @ beta + eps
416
+ elif model == "drift":
417
+ y = alpha + X @ beta + eps
418
+ elif model == "trend":
419
+ y = alpha + gamma * trend + X @ beta + eps
420
+ else:
421
+ raise ValueError(f"Unknown model: {model}")
422
+
423
+ if K == 1:
424
+ X = X.ravel()
425
+
426
+ break_info = {
427
+ "n_start": n_break_start,
428
+ "n_end": n_break_end,
429
+ "tau_0": n_break_start / T,
430
+ "tau_1": n_break_end / T,
431
+ }
432
+
433
+ return y, X, eps, break_info
@@ -0,0 +1,144 @@
1
+ Metadata-Version: 2.4
2
+ Name: segmcoint
3
+ Version: 1.0.0
4
+ Summary: Tests for Segmented Cointegration: Kim (2003) and Martins & Rodrigues (2022)
5
+ Author-email: Dr Merwan Roudane <merwanroudane920@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/merwanroudane/segmcoint
8
+ Project-URL: Repository, https://github.com/merwanroudane/segmcoint
9
+ Project-URL: Issues, https://github.com/merwanroudane/segmcoint/issues
10
+ Keywords: cointegration,segmented cointegration,unit root,structural breaks,time series,econometrics
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
20
+ Classifier: Topic :: Scientific/Engineering
21
+ Requires-Python: >=3.9
22
+ Description-Content-Type: text/markdown
23
+ License-File: LICENSE
24
+ Requires-Dist: numpy>=1.21
25
+ Requires-Dist: scipy>=1.7
26
+ Requires-Dist: pandas>=1.3
27
+ Provides-Extra: dev
28
+ Requires-Dist: pytest>=7.0; extra == "dev"
29
+ Requires-Dist: pytest-cov>=4.0; extra == "dev"
30
+ Requires-Dist: matplotlib>=3.5; extra == "dev"
31
+ Dynamic: license-file
32
+
33
+ # segmcoint
34
+
35
+ **Tests for Segmented Cointegration**
36
+
37
+ [![Python 3.9+](https://img.shields.io/badge/python-3.9+-blue.svg)](https://www.python.org/downloads/)
38
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
39
+
40
+ A Python library implementing rigorous tests for segmented cointegration from two seminal econometric papers:
41
+
42
+ 1. **Kim, J.-Y. (2003)**. *Inference on Segmented Cointegration*. Econometric Theory, 19, 620–639.
43
+ 2. **Martins, L.F. and Rodrigues, P.M.M. (2022)**. *Tests for Segmented Cointegration: An Application to US Governments Budgets*. Empirical Economics, 63, 567–600.
44
+
45
+ ## Overview
46
+
47
+ Cointegration relations are often interpreted as long-run equilibrium relationships. In practice, however, real data frequently fail to confirm cointegration for well-understood economic relations. This package addresses the possibility that failure to confirm cointegration is due to **nonstationary deviations in a relatively small portion of the data** ("short-run"), while the cointegration relation prevails in the remaining periods—a situation called **segmented cointegration**.
48
+
49
+ ### Kim (2003) Tests
50
+
51
+ - Phillips–Perron–Ouliaris type tests: $Z^*_\rho(\mathcal{C})$ and $Z^*_t(\mathcal{C})$
52
+ - Augmented Dickey–Fuller type tests: $ADF^*_\rho(\mathcal{C})$ and $ADF^*_t(\mathcal{C})$
53
+ - Extremum estimator for break date identification (Eq. 3.16–3.17)
54
+ - Critical values from Tables 1 and 2 of the paper (for $\bar{\ell}(\mathcal{T}_N) = 0.3$)
55
+
56
+ ### Martins & Rodrigues (2022) Tests
57
+
58
+ - Wald-type statistics: $F_A(\tau, m^*)$ and $F_B(\tau, m^*)$ (Eq. 3.2)
59
+ - Supremum statistics: $\sup F_A(m^*)$ and $\sup F_B(m^*)$ (Eq. 3.3)
60
+ - Combined test: $W(m^*)$ (Eq. 3.4)
61
+ - Double maximum test: $W_{\max}$ (Eq. 3.5)
62
+ - Critical values from Table 1 of the paper
63
+ - Break date estimation (Remark 3)
64
+
65
+ ## Installation
66
+
67
+ ```bash
68
+ pip install .
69
+ ```
70
+
71
+ Or for development:
72
+
73
+ ```bash
74
+ pip install -e ".[dev]"
75
+ ```
76
+
77
+ ## Quick Start
78
+
79
+ ```python
80
+ import numpy as np
81
+ from segmcoint import kim_test, mr_test, generate_segmented_data
82
+
83
+ # Generate data with segmented cointegration
84
+ y, X, eps, info = generate_segmented_data(T=200, rho=0.85, seed=42)
85
+
86
+ # Kim (2003) tests
87
+ res_kim = kim_test(y, X, model='drift')
88
+ print(res_kim)
89
+
90
+ # Martins & Rodrigues (2022) tests
91
+ res_mr = mr_test(y, X, model='drift')
92
+ print(res_mr)
93
+ ```
94
+
95
+ ## Model Specifications
96
+
97
+ Three deterministic specifications are supported, following Kim (2003, Eq. 2.1a–2.1c):
98
+
99
+ | Model | Equation | Kim (2003) | M&R (2022) |
100
+ |----------|---------------------------------------------------------|------------|------------|
101
+ | `'none'` | $x_{1t} = \beta' x_{2t} + \varepsilon_t$ | Case I | No det. |
102
+ | `'drift'`| $x_{1t} = \alpha + \beta' x_{2t} + \varepsilon_t$ | Case II | Intercept |
103
+ | `'trend'`| $x_{1t} = \alpha + \gamma t + \beta' x_{2t} + \varepsilon_t$ | Case III | Int.+Trend |
104
+
105
+ ## API Reference
106
+
107
+ ### `kim_test(y, X, model, max_ell, step, ...)`
108
+
109
+ Computes the infimum test statistics from Kim (2003). Returns a `KimTestResult` object.
110
+
111
+ ### `mr_test(y, X, model, max_breaks, epsilon, step, ...)`
112
+
113
+ Computes the Wald-type test statistics from Martins & Rodrigues (2022). Returns an `MRTestResult` object.
114
+
115
+ ### `kim_break_estimator(y, X, model, max_ell, step)`
116
+
117
+ Extremum estimator for the noncointegration period (Kim 2003, Eq. 3.16–3.17).
118
+
119
+ ### `generate_segmented_data(T, beta, rho, ...)`
120
+
121
+ Generate simulated data from a segmented cointegration DGP.
122
+
123
+ ## Testing
124
+
125
+ ```bash
126
+ pytest tests/ -v
127
+ ```
128
+
129
+ ## Author
130
+
131
+ **Dr Merwan Roudane**
132
+ Email: merwanroudane920@gmail.com
133
+ GitHub: [https://github.com/merwanroudane/segmcoint](https://github.com/merwanroudane/segmcoint)
134
+
135
+ ## License
136
+
137
+ MIT License
138
+
139
+ ## References
140
+
141
+ - Kim, J.-Y. (2003). Inference on Segmented Cointegration. *Econometric Theory*, 19, 620–639.
142
+ - Martins, L.F. and Rodrigues, P.M.M. (2022). Tests for Segmented Cointegration: An Application to US Governments Budgets. *Empirical Economics*, 63, 567–600.
143
+ - Phillips, P.C.B. and Ouliaris, S. (1990). Asymptotic Properties of Residual Based Tests for Cointegration. *Econometrica*, 58, 165–193.
144
+ - Kejriwal, M., Perron, P., and Zhou, J. (2013). Wald Tests for Detecting Multiple Structural Changes in Persistence. *Econometric Theory*, 29, 289–323.
@@ -0,0 +1,10 @@
1
+ segmcoint/__init__.py,sha256=iHpkLjqsW-LTo-57FH98LaWT8ePdcVQaRY9GdDD1NHM,2575
2
+ segmcoint/kim2003.py,sha256=bjiJns95juZD-FBgwKZO8zFigmYOtEBAtxLYytyaB6c,23793
3
+ segmcoint/martins_rodrigues2022.py,sha256=KGfh3HNEsdofnS4PfspfDFNfLSt_z8Z5Y-dLcyme3O4,25050
4
+ segmcoint/simulation.py,sha256=MvKUT_e-t-U2qcVmIWLZzyD1fJFl1lFbjT6eLMrmhFw,10341
5
+ segmcoint/utils.py,sha256=KFBLs-aAswWh1VSxw89Ecy5alNWCMnXdEbaBvNVbHP0,12246
6
+ segmcoint-1.0.0.dist-info/licenses/LICENSE,sha256=eMovZHlHURcKezwsHc3CXOpWkbo_CoOrNKaNP39o-24,1074
7
+ segmcoint-1.0.0.dist-info/METADATA,sha256=meBUqE154ghfgQ71aAldmWlgWtz_Ja-dNU7uJwmVr4c,5825
8
+ segmcoint-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
+ segmcoint-1.0.0.dist-info/top_level.txt,sha256=X76niBdOCsL0M4SiCz3AGphXNxcXSU14X-SuzNJMZ_k,10
10
+ segmcoint-1.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Dr Merwan Roudane
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ segmcoint