space-graph 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,16 @@
1
+ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007
2
+
3
+ Copyright (C) 2025 space_graph authors
4
+
5
+ This program is free software: you can redistribute it and/or modify
6
+ it under the terms of the GNU General Public License as published by
7
+ the Free Software Foundation, either version 3 of the License, or
8
+ (at your option) any later version.
9
+
10
+ This program is distributed in the hope that it will be useful,
11
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
12
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
+ GNU General Public License for more details.
14
+
15
+ You should have received a copy of the GNU General Public License
16
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
@@ -0,0 +1,81 @@
1
+ Metadata-Version: 2.4
2
+ Name: space-graph
3
+ Version: 0.1.0
4
+ Summary: Pure Python SPACE: sparse partial correlation estimation
5
+ License: GPL-3.0-or-later
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: numpy>=1.20
10
+ Provides-Extra: dev
11
+ Requires-Dist: pytest>=7; extra == "dev"
12
+ Provides-Extra: numba
13
+ Requires-Dist: numba>=0.57; extra == "numba"
14
+ Dynamic: license-file
15
+
16
+ # space-graph
17
+
18
+ [![PyPI version](https://img.shields.io/pypi/v/space-graph)](https://pypi.org/project/space-graph/)
19
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/)
20
+ [![License: GPL-3.0-or-later](https://img.shields.io/badge/license-GPL--3.0--or--later-blue.svg)](LICENSE)
21
+
22
+ Pure Python implementation of **SPACE** (Sparse Partial Correlation Estimation) from Peng et al. (2009), with no R or C dependencies.
23
+
24
+ Paper: [Sparse Partial Correlation Estimation for High-Dimensional Data](https://www.tandfonline.com/doi/abs/10.1198/jasa.2009.0126)
25
+
26
+ ## Install
27
+
28
+ ```bash
29
+ pip install space-graph
30
+ ```
31
+
32
+ Optional Numba (faster inner `jsrm` loop):
33
+
34
+ ```bash
35
+ pip install 'space-graph[numba]'
36
+ ```
37
+
38
+ From GitHub:
39
+
40
+ ```bash
41
+ pip install git+https://github.com/shahrozeabbas/space-graph.git
42
+ ```
43
+
44
+ ## Usage
45
+
46
+ ```python
47
+ import numpy as np
48
+ from space_graph import SPACE
49
+
50
+ X = np.random.randn(20, 5)
51
+ model = SPACE(
52
+ alpha=0.7,
53
+ max_outer_iter=2,
54
+ max_inner_iter=500,
55
+ tol=1e-6,
56
+ weight='uniform',
57
+ )
58
+ model.fit(X)
59
+ print(model.partial_correlation_)
60
+ ```
61
+
62
+ ## Penalty
63
+
64
+ The public parameter `alpha` in `[0, 1]` maps to inner penalties as `lam1 = alpha` and `lam2 = 1 - alpha`, matching the reference elastic-net-style JSRM solver.
65
+
66
+ ## Options
67
+
68
+ - **`tol`** (default `1e-6`): inner coordinate-descent stopping tolerance (and active-set threshold), same scale as the reference C code.
69
+ - **`weight`**: default **`uniform`** (unit weights). Use **`equal`** as an alias. Other modes: **`sig`**, **`degree`**, or a custom positive vector of length `p`.
70
+
71
+ ## Tests
72
+
73
+ ```bash
74
+ pytest
75
+ ```
76
+
77
+ Optional: build `libjsrm_test.so` from `../space/src/JSRM.c` to run the ctypes cross-check in `tests/test_space.py`.
78
+
79
+ ## License
80
+
81
+ GPL-3.0-or-later (same family as the original `space` R package).
@@ -0,0 +1,66 @@
1
+ # space-graph
2
+
3
+ [![PyPI version](https://img.shields.io/pypi/v/space-graph)](https://pypi.org/project/space-graph/)
4
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/)
5
+ [![License: GPL-3.0-or-later](https://img.shields.io/badge/license-GPL--3.0--or--later-blue.svg)](LICENSE)
6
+
7
+ Pure Python implementation of **SPACE** (Sparse Partial Correlation Estimation) from Peng et al. (2009), with no R or C dependencies.
8
+
9
+ Paper: [Sparse Partial Correlation Estimation for High-Dimensional Data](https://www.tandfonline.com/doi/abs/10.1198/jasa.2009.0126)
10
+
11
+ ## Install
12
+
13
+ ```bash
14
+ pip install space-graph
15
+ ```
16
+
17
+ Optional Numba (faster inner `jsrm` loop):
18
+
19
+ ```bash
20
+ pip install 'space-graph[numba]'
21
+ ```
22
+
23
+ From GitHub:
24
+
25
+ ```bash
26
+ pip install git+https://github.com/shahrozeabbas/space-graph.git
27
+ ```
28
+
29
+ ## Usage
30
+
31
+ ```python
32
+ import numpy as np
33
+ from space_graph import SPACE
34
+
35
+ X = np.random.randn(20, 5)
36
+ model = SPACE(
37
+ alpha=0.7,
38
+ max_outer_iter=2,
39
+ max_inner_iter=500,
40
+ tol=1e-6,
41
+ weight='uniform',
42
+ )
43
+ model.fit(X)
44
+ print(model.partial_correlation_)
45
+ ```
46
+
47
+ ## Penalty
48
+
49
+ The public parameter `alpha` in `[0, 1]` maps to inner penalties as `lam1 = alpha` and `lam2 = 1 - alpha`, matching the reference elastic-net-style JSRM solver.
50
+
51
+ ## Options
52
+
53
+ - **`tol`** (default `1e-6`): inner coordinate-descent stopping tolerance (and active-set threshold), same scale as the reference C code.
54
+ - **`weight`**: default **`uniform`** (unit weights). Use **`equal`** as an alias. Other modes: **`sig`**, **`degree`**, or a custom positive vector of length `p`.
55
+
56
+ ## Tests
57
+
58
+ ```bash
59
+ pytest
60
+ ```
61
+
62
+ Optional: build `libjsrm_test.so` from `../space/src/JSRM.c` to run the ctypes cross-check in `tests/test_space.py`.
63
+
64
+ ## License
65
+
66
+ GPL-3.0-or-later (same family as the original `space` R package).
@@ -0,0 +1,25 @@
1
+ [build-system]
2
+ requires = ['setuptools>=61', 'wheel']
3
+ build-backend = 'setuptools.build_meta'
4
+
5
+ [project]
6
+ name = 'space-graph'
7
+ version = '0.1.0'
8
+ description = 'Pure Python SPACE: sparse partial correlation estimation'
9
+ readme = 'README.md'
10
+ requires-python = '>=3.10'
11
+ license = { text = 'GPL-3.0-or-later' }
12
+ dependencies = [
13
+ 'numpy>=1.20',
14
+ ]
15
+
16
+ [project.optional-dependencies]
17
+ dev = ['pytest>=7']
18
+ numba = ['numba>=0.57']
19
+
20
+ [tool.setuptools]
21
+ packages = ['space_graph']
22
+
23
+ [tool.pytest.ini_options]
24
+ testpaths = ['tests']
25
+ pythonpath = ['.']
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,4 @@
1
+ from .model import SPACE
2
+
3
+ __all__ = ['SPACE', '__version__']
4
+ __version__ = '0.1.0'
@@ -0,0 +1,183 @@
1
+ """
2
+ Numba-compiled JSRM shooting loop (optional).
3
+
4
+ Install with ``pip install 'space-graph[numba]'`` or ``pip install numba``.
5
+ If Numba is not installed, ``jsrm_shooting_loop`` is ``None`` and ``solver.jsrm``
6
+ uses the pure NumPy loop.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import numpy as np
12
+
13
+
14
+ def _try_build_shooting_loop():
15
+ try:
16
+ from numba import njit
17
+ except ImportError:
18
+ return None
19
+
20
+ @njit(cache=True)
21
+ def jsrm_shooting_loop(
22
+ Y_m: np.ndarray,
23
+ E_m: np.ndarray,
24
+ beta_new: np.ndarray,
25
+ beta_old: np.ndarray,
26
+ beta_last: np.ndarray,
27
+ B: np.ndarray,
28
+ B_s: np.ndarray,
29
+ lambda1: float,
30
+ lambda2: float,
31
+ n: int,
32
+ p: int,
33
+ n_iter: int,
34
+ change_i: int,
35
+ change_j: int,
36
+ beta_change: float,
37
+ tol: float,
38
+ ) -> None:
39
+ """In-place shooting iterations (matches ``solver`` Python loop)."""
40
+ eps1 = tol
41
+ maxdif_tol = tol
42
+ for _ in range(n_iter):
43
+ for ii in range(p):
44
+ for jj in range(p):
45
+ beta_last[ii, jj] = beta_new[ii, jj]
46
+
47
+ nrow_pick = 0
48
+ for j in range(p - 1, 0, -1):
49
+ for i in range(j - 1, -1, -1):
50
+ b = beta_new[i, j]
51
+ if b > eps1 or b < -eps1:
52
+ nrow_pick += 1
53
+
54
+ maxdif = -100.0
55
+
56
+ if nrow_pick > 0:
57
+ for j in range(p - 1, 0, -1):
58
+ for i in range(j - 1, -1, -1):
59
+ cur_i = i
60
+ cur_j = j
61
+ b = beta_new[cur_i, cur_j]
62
+ if not (b > eps1 or b < -eps1):
63
+ continue
64
+
65
+ beta_old[change_i, change_j] = beta_new[
66
+ change_i, change_j
67
+ ]
68
+ beta_old[change_j, change_i] = beta_new[
69
+ change_j, change_i
70
+ ]
71
+
72
+ c1 = beta_change * B[change_j, change_i]
73
+ c2 = beta_change * B[change_i, change_j]
74
+ for kk in range(n):
75
+ E_m[kk, change_i] += Y_m[kk, change_j] * c1
76
+ E_m[kk, change_j] += Y_m[kk, change_i] * c2
77
+
78
+ aij = 0.0
79
+ aji = 0.0
80
+ for kk in range(n):
81
+ aij += E_m[kk, cur_j] * Y_m[kk, cur_i]
82
+ aji += E_m[kk, cur_i] * Y_m[kk, cur_j]
83
+ aij *= B[cur_i, cur_j]
84
+ aji *= B[cur_j, cur_i]
85
+
86
+ b_s = B_s[cur_i, cur_j]
87
+ beta_next = (aij + aji) / b_s + beta_old[cur_i, cur_j]
88
+ temp1 = beta_next
89
+ if beta_next > 0.0:
90
+ temp = beta_next - lambda1 / b_s
91
+ else:
92
+ temp = -beta_next - lambda1 / b_s
93
+ if temp < 0.0:
94
+ temp = 0.0
95
+ else:
96
+ temp = temp / (1.0 + lambda2)
97
+ if temp1 < 0.0:
98
+ temp = -temp
99
+
100
+ beta_new[cur_i, cur_j] = temp
101
+ beta_new[cur_j, cur_i] = temp
102
+
103
+ beta_change = beta_old[cur_i, cur_j] - temp
104
+ change_i = cur_i
105
+ change_j = cur_j
106
+
107
+ maxdif = -100.0
108
+ for ii in range(p):
109
+ for jj in range(p):
110
+ d = beta_last[ii, jj] - beta_new[ii, jj]
111
+ if d < 0.0:
112
+ d = -d
113
+ if d > maxdif:
114
+ maxdif = d
115
+
116
+ if maxdif < maxdif_tol or nrow_pick < 1:
117
+ for ii in range(p):
118
+ for jj in range(p):
119
+ beta_last[ii, jj] = beta_new[ii, jj]
120
+
121
+ for cur_i in range(p - 1):
122
+ for cur_j in range(cur_i + 1, p):
123
+ beta_old[change_i, change_j] = beta_new[
124
+ change_i, change_j
125
+ ]
126
+ beta_old[change_j, change_i] = beta_new[
127
+ change_j, change_i
128
+ ]
129
+
130
+ if beta_change < -eps1 or beta_change > eps1:
131
+ c1 = beta_change * B[change_j, change_i]
132
+ c2 = beta_change * B[change_i, change_j]
133
+ for kk in range(n):
134
+ E_m[kk, change_i] += Y_m[kk, change_j] * c1
135
+ E_m[kk, change_j] += Y_m[kk, change_i] * c2
136
+
137
+ aij = 0.0
138
+ aji = 0.0
139
+ for kk in range(n):
140
+ aij += E_m[kk, cur_j] * Y_m[kk, cur_i]
141
+ aji += E_m[kk, cur_i] * Y_m[kk, cur_j]
142
+ aij *= B[cur_i, cur_j]
143
+ aji *= B[cur_j, cur_i]
144
+
145
+ b_s = B_s[cur_i, cur_j]
146
+ beta_next = (aij + aji) / b_s + beta_old[
147
+ cur_i, cur_j
148
+ ]
149
+ temp1 = beta_next
150
+ if beta_next > 0.0:
151
+ temp = beta_next - lambda1 / b_s
152
+ else:
153
+ temp = -beta_next - lambda1 / b_s
154
+ if temp < 0.0:
155
+ temp = 0.0
156
+ else:
157
+ temp = temp / (1.0 + lambda2)
158
+ if temp1 < 0.0:
159
+ temp = -temp
160
+
161
+ beta_new[cur_i, cur_j] = temp
162
+ beta_new[cur_j, cur_i] = temp
163
+
164
+ beta_change = beta_old[cur_i, cur_j] - temp
165
+ change_i = cur_i
166
+ change_j = cur_j
167
+
168
+ maxdif = -100.0
169
+ for ii in range(p):
170
+ for jj in range(p):
171
+ d = beta_last[ii, jj] - beta_new[ii, jj]
172
+ if d < 0.0:
173
+ d = -d
174
+ if d > maxdif:
175
+ maxdif = d
176
+
177
+ if maxdif < maxdif_tol:
178
+ return
179
+
180
+ return jsrm_shooting_loop
181
+
182
+
183
+ jsrm_shooting_loop = _try_build_shooting_loop()
@@ -0,0 +1,132 @@
1
+ """Public SPACE estimator."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional, Union
6
+
7
+ import numpy as np
8
+
9
+ from .penalties import alpha_to_penalties
10
+ from .solver import jsrm
11
+ from .utils import (
12
+ beta_coef_from_rho_upper,
13
+ inv_sig_diag_new,
14
+ partial_corr_to_precision,
15
+ standardize_columns_l2,
16
+ )
17
+ from .weights import WeightInput, rescale_degree_weights, resolve_weight
18
+
19
+
20
+ class SPACE:
21
+ """
22
+ Sparse partial correlation estimation (SPACE), joint sparse regression.
23
+
24
+ Parameters
25
+ ----------
26
+ alpha : float in [0, 1]
27
+ Mix parameter: inner penalties are ``lam1 = alpha``, ``lam2 = 1 - alpha``.
28
+ weight : {'uniform', 'equal', 'sig', 'degree'} or ndarray of shape (p,)
29
+ Node weights for the joint loss (see Peng et al. and R package).
30
+ ``uniform`` and ``equal`` both mean unit weights (no reweighting).
31
+ max_outer_iter : int
32
+ Outer alternations for ``sig`` / weights (R ``iter``).
33
+ max_inner_iter : int
34
+ Max iterations for the inner JSRM solver.
35
+ tol : float
36
+ Inner solver tolerance: convergence and active-set threshold (default
37
+ ``1e-6``, same scale as the reference C implementation).
38
+ standardize : bool
39
+ If True, center columns and scale to unit L2 norm before fitting.
40
+ fit_sig : bool
41
+ If True, estimate diagonal ``sig^{ii}`` each outer step (when not fixed).
42
+ sig : ndarray of shape (p,) or None
43
+ Initial or fixed ``sig^{ii}``. If provided and ``fit_sig`` is False, held fixed.
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ alpha: float = 1.0,
49
+ weight: WeightInput = 'uniform',
50
+ max_outer_iter: int = 5,
51
+ max_inner_iter: int = 1000,
52
+ tol: float = 1e-6,
53
+ standardize: bool = True,
54
+ fit_sig: bool = True,
55
+ sig: Optional[np.ndarray] = None,
56
+ ):
57
+ self.alpha = float(alpha)
58
+ self.weight = weight
59
+ self.max_outer_iter = int(max_outer_iter)
60
+ self.max_inner_iter = int(max_inner_iter)
61
+ self.tol = float(tol)
62
+ self.standardize = standardize
63
+ self.fit_sig = fit_sig
64
+ self.sig_init = None if sig is None else np.asarray(sig, dtype=np.float64)
65
+ if self.tol <= 0.0:
66
+ raise ValueError('tol must be positive')
67
+
68
+ self.partial_correlation_: Optional[np.ndarray] = None
69
+ self.precision_: Optional[np.ndarray] = None
70
+ self.sig_: Optional[np.ndarray] = None
71
+ self.weight_: Optional[np.ndarray] = None
72
+ self._mean_: Optional[np.ndarray] = None
73
+ self._scale_: Optional[np.ndarray] = None
74
+
75
+ def fit(self, X: np.ndarray) -> 'SPACE':
76
+ X = np.asarray(X, dtype=np.float64)
77
+ n, p = X.shape
78
+ lam1, lam2 = alpha_to_penalties(self.alpha)
79
+
80
+ if self.standardize:
81
+ Xw, self._mean_, self._scale_ = standardize_columns_l2(X)
82
+ else:
83
+ Xw = X.copy()
84
+ self._mean_ = np.zeros(p)
85
+ self._scale_ = np.ones(p)
86
+
87
+ w_vec, w_update, w_tag = resolve_weight(self.weight, p)
88
+
89
+ if self.sig_init is not None:
90
+ sig = np.asarray(self.sig_init, dtype=np.float64).ravel()
91
+ if sig.shape[0] != p:
92
+ raise ValueError('sig must have length p')
93
+ sig_update = self.fit_sig
94
+ else:
95
+ sig = np.ones(p, dtype=np.float64)
96
+ sig_update = self.fit_sig
97
+
98
+ for _ in range(self.max_outer_iter):
99
+ if w_tag == 1:
100
+ w_vec = sig.copy()
101
+ Y_u = Xw * np.sqrt(w_vec)[None, :]
102
+ sig_u = sig / w_vec
103
+
104
+ sigma_sr = np.sqrt(np.maximum(sig_u, 1e-15))
105
+ par_cor = jsrm(
106
+ Y_u,
107
+ sigma_sr,
108
+ lam1,
109
+ lam2,
110
+ self.max_inner_iter,
111
+ tol=self.tol,
112
+ )
113
+ np.fill_diagonal(par_cor, 1.0)
114
+
115
+ coef = par_cor[np.triu_indices(p, k=1)]
116
+ beta_cur = beta_coef_from_rho_upper(coef, sig)
117
+
118
+ if not w_update and not sig_update:
119
+ break
120
+
121
+ if sig_update:
122
+ sig = inv_sig_diag_new(Xw, beta_cur)
123
+
124
+ if w_update:
125
+ if w_tag == 2:
126
+ w_vec = rescale_degree_weights(par_cor)
127
+
128
+ self.partial_correlation_ = par_cor
129
+ self.sig_ = sig
130
+ self.weight_ = w_vec
131
+ self.precision_ = partial_corr_to_precision(par_cor, sig)
132
+ return self
@@ -0,0 +1,17 @@
1
+ """Map public `alpha` to JSRM lam1, lam2."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Tuple
6
+
7
+
8
+ def alpha_to_penalties(alpha: float) -> Tuple[float, float]:
9
+ """
10
+ User contract: alpha in [0, 1] gives lam1 = alpha, lam2 = 1 - alpha.
11
+
12
+ This matches the reference elastic-net-style solver where both penalties appear.
13
+ """
14
+ a = float(alpha)
15
+ if a < 0.0 or a > 1.0:
16
+ raise ValueError('alpha must be in [0, 1]')
17
+ return a, 1.0 - a
@@ -0,0 +1,272 @@
1
+ """
2
+ JSRM inner solver: faithful port of `space/src/JSRM.c` active-shooting logic.
3
+
4
+ Y layout: `Y[k, j]` = sample k, variable j (same as C row-major `Y_m[k*p+j]`).
5
+
6
+ Performance: vectorized BLAS-friendly ops (``Y @ W`` for fitted values, column
7
+ dot products for ``Aij``/``Aji``, in-place column updates for residuals).
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import numpy as np
13
+
14
+ from .kernels import jsrm_shooting_loop
15
+
16
+ _DEFAULT_TOL = 1e-6
17
+
18
+
19
+ def _elastic_net_shrink(
20
+ beta_next: float,
21
+ b_s: float,
22
+ lambda1: float,
23
+ lambda2: float,
24
+ ) -> float:
25
+ """One coordinate elastic-net shrink (matches JSRM.c)."""
26
+ temp1 = beta_next
27
+ if beta_next > 0.0:
28
+ temp = beta_next - lambda1 / b_s
29
+ else:
30
+ temp = -beta_next - lambda1 / b_s
31
+ if temp < 0.0:
32
+ return 0.0
33
+ temp = temp / (1.0 + lambda2)
34
+ if temp1 < 0.0:
35
+ temp = -temp
36
+ return temp
37
+
38
+
39
+ def _aij_aji(
40
+ E_m: np.ndarray,
41
+ Y_m: np.ndarray,
42
+ cur_i: int,
43
+ cur_j: int,
44
+ B: np.ndarray,
45
+ ) -> tuple[float, float]:
46
+ """``Aij``, ``Aji`` as in JSRM (BLAS dot on columns)."""
47
+ aij = B[cur_i, cur_j] * float(np.dot(E_m[:, cur_j], Y_m[:, cur_i]))
48
+ aji = B[cur_j, cur_i] * float(np.dot(E_m[:, cur_i], Y_m[:, cur_j]))
49
+ return aij, aji
50
+
51
+
52
+ def _update_e_pair(
53
+ E_m: np.ndarray,
54
+ Y_m: np.ndarray,
55
+ change_i: int,
56
+ change_j: int,
57
+ beta_change: float,
58
+ B: np.ndarray,
59
+ ) -> None:
60
+ """Residual update equation (11) in-place."""
61
+ c1 = beta_change * B[change_j, change_i]
62
+ c2 = beta_change * B[change_i, change_j]
63
+ if c1 != 0.0:
64
+ E_m[:, change_i] += Y_m[:, change_j] * c1
65
+ if c2 != 0.0:
66
+ E_m[:, change_j] += Y_m[:, change_i] * c2
67
+
68
+
69
+ def jsrm(
70
+ Y_data: np.ndarray,
71
+ sigma_sr: np.ndarray,
72
+ lam1: float,
73
+ lam2: float,
74
+ n_iter: int = 500,
75
+ tol: float = _DEFAULT_TOL,
76
+ ) -> np.ndarray:
77
+ """
78
+ Joint sparse regression model (SPACE inner problem).
79
+
80
+ Parameters
81
+ ----------
82
+ Y_data : ndarray, shape (n, p)
83
+ Data; columns centered to mean 0 inside (C behavior).
84
+ sigma_sr : ndarray, shape (p,)
85
+ sqrt(sig^{ii}) per variable (R `sig.use^0.5`).
86
+ lam1, lam2 : float
87
+ L1 and elastic-net L2 penalties.
88
+ n_iter : int
89
+ Max inner iterations (R `jsrm` uses 500).
90
+ tol : float
91
+ Convergence tolerance: stop when max coordinate change between sweeps
92
+ is below ``tol`` (also used as the active-set threshold for nonzero
93
+ ``beta``, matching the reference ``1e-6`` scale).
94
+
95
+ Returns
96
+ -------
97
+ beta_new : ndarray, shape (p, p)
98
+ Symmetric estimates; diagonal 0.
99
+ """
100
+ Y_data = np.asarray(Y_data, dtype=np.float64, order='C')
101
+ sigma_sr = np.asarray(sigma_sr, dtype=np.float64).ravel()
102
+ n, p = Y_data.shape
103
+ if sigma_sr.shape[0] != p:
104
+ raise ValueError('sigma_sr must have length p')
105
+
106
+ lambda1 = float(lam1)
107
+ lambda2 = float(lam2)
108
+ tol = float(tol)
109
+ if tol <= 0.0:
110
+ raise ValueError('tol must be positive')
111
+ eps1 = tol
112
+ maxdif_tol = tol
113
+
114
+ Y_m = Y_data.copy()
115
+ Y_m -= Y_m.mean(axis=0)
116
+ normx = np.sum(Y_m * Y_m, axis=0)
117
+
118
+ B = sigma_sr[:, None] / sigma_sr[None, :]
119
+ B_sq = B * B
120
+ B_s = B_sq * normx[:, None] + B_sq.T * normx[None, :]
121
+
122
+ G = Y_m.T @ Y_m
123
+ ui, uj = np.triu_indices(p, k=1)
124
+ temp1_vec = G[ui, uj] * (B[uj, ui] + B[ui, uj])
125
+ tt = np.abs(temp1_vec) - lambda1
126
+ b_s_ij = B_s[ui, uj] * (1.0 + lambda2)
127
+ bet = np.zeros(ui.shape[0], dtype=np.float64)
128
+ m = tt >= 0.0
129
+ bet[m] = tt[m] / b_s_ij[m]
130
+ bet[m] *= np.sign(temp1_vec[m])
131
+
132
+ beta_new = np.zeros((p, p), dtype=np.float64)
133
+ beta_new[ui, uj] = bet
134
+ beta_new[uj, ui] = bet
135
+ np.fill_diagonal(beta_new, 0.0)
136
+
137
+ W = beta_new * B
138
+ E_m = Y_m - (Y_m @ W)
139
+
140
+ beta_old = beta_new.copy()
141
+ beta_last = np.empty((p, p), dtype=np.float64)
142
+
143
+ found = False
144
+ pick_i = pick_j = 0
145
+ for j in range(p - 1, 0, -1):
146
+ for i in range(j - 1, -1, -1):
147
+ b = beta_new[i, j]
148
+ if b > eps1 or b < -eps1:
149
+ pick_i, pick_j = i, j
150
+ found = True
151
+ break
152
+ if found:
153
+ break
154
+
155
+ if not found:
156
+ return beta_new
157
+
158
+ cur_i, cur_j = pick_i, pick_j
159
+
160
+ aij, aji = _aij_aji(E_m, Y_m, cur_i, cur_j, B)
161
+ b_s = B_s[cur_i, cur_j]
162
+ beta_next = (aij + aji) / b_s + beta_old[cur_i, cur_j]
163
+ temp = _elastic_net_shrink(beta_next, b_s, lambda1, lambda2)
164
+
165
+ beta_change = beta_old[cur_i, cur_j] - temp
166
+ beta_new[cur_i, cur_j] = temp
167
+ beta_new[cur_j, cur_i] = temp
168
+
169
+ change_i = cur_i
170
+ change_j = cur_j
171
+
172
+ if jsrm_shooting_loop is not None:
173
+ jsrm_shooting_loop(
174
+ Y_m,
175
+ E_m,
176
+ beta_new,
177
+ beta_old,
178
+ beta_last,
179
+ B,
180
+ B_s,
181
+ lambda1,
182
+ lambda2,
183
+ n,
184
+ p,
185
+ n_iter,
186
+ change_i,
187
+ change_j,
188
+ beta_change,
189
+ tol,
190
+ )
191
+ return beta_new
192
+
193
+ nbeta = p * (p - 1) // 2
194
+ pair_buf = np.empty((nbeta, 2), dtype=np.int32)
195
+
196
+ for _ in range(n_iter):
197
+ beta_last[:] = beta_new
198
+
199
+ k = 0
200
+ for j in range(p - 1, 0, -1):
201
+ for i in range(j - 1, -1, -1):
202
+ b = beta_new[i, j]
203
+ if b > eps1 or b < -eps1:
204
+ pair_buf[k, 0] = i
205
+ pair_buf[k, 1] = j
206
+ k += 1
207
+ nrow_pick = k
208
+ maxdif = -100.0
209
+
210
+ if nrow_pick > 0:
211
+ for t in range(nrow_pick):
212
+ cur_i = int(pair_buf[t, 0])
213
+ cur_j = int(pair_buf[t, 1])
214
+ beta_old[change_i, change_j] = beta_new[change_i, change_j]
215
+ beta_old[change_j, change_i] = beta_new[change_j, change_i]
216
+
217
+ _update_e_pair(E_m, Y_m, change_i, change_j, beta_change, B)
218
+
219
+ aij, aji = _aij_aji(E_m, Y_m, cur_i, cur_j, B)
220
+ b_s = B_s[cur_i, cur_j]
221
+ beta_next = (aij + aji) / b_s + beta_old[cur_i, cur_j]
222
+ temp = _elastic_net_shrink(beta_next, b_s, lambda1, lambda2)
223
+
224
+ beta_new[cur_i, cur_j] = temp
225
+ beta_new[cur_j, cur_i] = temp
226
+
227
+ beta_change = beta_old[cur_i, cur_j] - temp
228
+ change_i = cur_i
229
+ change_j = cur_j
230
+
231
+ maxdif = float(np.max(np.abs(beta_last - beta_new)))
232
+
233
+ if maxdif < maxdif_tol or nrow_pick < 1:
234
+ beta_last[:] = beta_new
235
+
236
+ for cur_i in range(p - 1):
237
+ for cur_j in range(cur_i + 1, p):
238
+ beta_old[change_i, change_j] = beta_new[
239
+ change_i, change_j
240
+ ]
241
+ beta_old[change_j, change_i] = beta_new[
242
+ change_j, change_i
243
+ ]
244
+
245
+ if beta_change < -eps1 or beta_change > eps1:
246
+ _update_e_pair(
247
+ E_m,
248
+ Y_m,
249
+ change_i,
250
+ change_j,
251
+ beta_change,
252
+ B,
253
+ )
254
+
255
+ aij, aji = _aij_aji(E_m, Y_m, cur_i, cur_j, B)
256
+ b_s = B_s[cur_i, cur_j]
257
+ beta_next = (aij + aji) / b_s + beta_old[cur_i, cur_j]
258
+ temp = _elastic_net_shrink(beta_next, b_s, lambda1, lambda2)
259
+
260
+ beta_new[cur_i, cur_j] = temp
261
+ beta_new[cur_j, cur_i] = temp
262
+
263
+ beta_change = beta_old[cur_i, cur_j] - temp
264
+ change_i = cur_i
265
+ change_j = cur_j
266
+
267
+ maxdif = float(np.max(np.abs(beta_last - beta_new)))
268
+
269
+ if maxdif < maxdif_tol:
270
+ break
271
+
272
+ return beta_new
@@ -0,0 +1,15 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+
5
+ import numpy as np
6
+
7
+
8
+ @dataclass
9
+ class SpaceState:
10
+ """Internal state for one SPACE fit."""
11
+
12
+ partial_correlation: np.ndarray
13
+ sig: np.ndarray
14
+ weight: np.ndarray
15
+ outer_iter: int
@@ -0,0 +1,62 @@
1
+ """Standardization and matrix transforms matching `space` R code."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import numpy as np
6
+
7
+
8
+ def standardize_columns_l2(X: np.ndarray) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
9
+ """
10
+ Center each column to mean 0 and scale to L2 norm 1 (per column).
11
+ Returns (X_std, mean, scale) where X_std = (X - mean) / scale.
12
+ """
13
+ X = np.asarray(X, dtype=np.float64)
14
+ mean = X.mean(axis=0)
15
+ Xc = X - mean
16
+ scale = np.sqrt(np.sum(Xc**2, axis=0))
17
+ scale = np.where(scale < 1e-15, 1.0, scale)
18
+ return Xc / scale, mean, scale
19
+
20
+
21
+ def beta_coef_from_rho_upper(coef: np.ndarray, sig_fit: np.ndarray) -> np.ndarray:
22
+ """
23
+ `Beta.coef` from space/R/space.R: coef is upper-triangle rho^{ij}, sig_fit is sigma^{ii}.
24
+ """
25
+ p = sig_fit.shape[0]
26
+ result = np.zeros((p, p), dtype=np.float64)
27
+ result[np.triu_indices(p, k=1)] = coef
28
+ result = result + result.T
29
+ inv_sqrt = 1.0 / np.sqrt(sig_fit)
30
+ sqrt_sig = np.sqrt(sig_fit)
31
+ result = (inv_sqrt[:, None] * result) @ np.diag(sqrt_sig)
32
+ return result.T
33
+
34
+
35
+ def inv_sig_diag_new(Y: np.ndarray, beta: np.ndarray) -> np.ndarray:
36
+ """
37
+ `InvSig.diag.new`: 1 / colMeans((Y - Y @ Beta0)^2) with diag(Beta)=0.
38
+ """
39
+ b = beta.copy()
40
+ np.fill_diagonal(b, 0.0)
41
+ esti = Y @ b
42
+ residue = Y - esti
43
+ return 1.0 / np.mean(residue**2, axis=0)
44
+
45
+
46
+ def partial_corr_to_precision(
47
+ parcor: np.ndarray, sig: np.ndarray
48
+ ) -> np.ndarray:
49
+ """
50
+ Reconstruct a precision-like matrix from partial correlations and sig^{ii},
51
+ following NITK/R style: Theta_ij related to rho_ij via scaling by sig.
52
+ """
53
+ p = parcor.shape[0]
54
+ ind = np.triu_indices(p, k=1)
55
+ coef = parcor[ind]
56
+ result = np.zeros((p, p), dtype=np.float64)
57
+ result[ind] = coef
58
+ result = result + result.T
59
+ inv_sqrt = 1.0 / np.sqrt(sig)
60
+ sqrt_sig = np.sqrt(sig)
61
+ result = inv_sqrt[:, None] * result * sqrt_sig[None, :]
62
+ return result.T
@@ -0,0 +1,50 @@
1
+ """Weight modes for SPACE outer loop (R `space.joint` semantics)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Literal, Union
6
+
7
+ import numpy as np
8
+
9
+ WeightInput = Union[
10
+ Literal['uniform'],
11
+ Literal['equal'],
12
+ Literal['sig'],
13
+ Literal['degree'],
14
+ np.ndarray,
15
+ ]
16
+
17
+
18
+ def resolve_weight(
19
+ weight: WeightInput,
20
+ p: int,
21
+ ) -> tuple[np.ndarray, bool, int]:
22
+ """
23
+ Returns (weight_vector, update_each_outer_iter, tag).
24
+
25
+ tag: 0 uniform, 1 sig-based, 2 degree-based, 3 custom.
26
+ """
27
+ if isinstance(weight, str):
28
+ if weight in ('uniform', 'equal'):
29
+ return np.ones(p, dtype=np.float64), False, 0
30
+ if weight == 'sig':
31
+ return np.ones(p, dtype=np.float64), True, 1
32
+ if weight == 'degree':
33
+ return np.ones(p, dtype=np.float64), True, 2
34
+ raise ValueError(f'unknown weight mode: {weight}')
35
+
36
+ w = np.asarray(weight, dtype=np.float64).ravel()
37
+ if w.shape[0] != p:
38
+ raise ValueError('custom weight must have length p')
39
+ if np.any(w <= 0):
40
+ raise ValueError('custom weight must be positive')
41
+ w = w / w.mean()
42
+ return w, False, 3
43
+
44
+
45
+ def rescale_degree_weights(par_cor: np.ndarray) -> np.ndarray:
46
+ """R: temp.w <- row sums of |rho|>1e-6; +max; normalize to mean 1."""
47
+ p = par_cor.shape[0]
48
+ temp_w = np.sum(np.abs(par_cor) > 1e-6, axis=1).astype(np.float64)
49
+ temp_w = temp_w + np.max(temp_w)
50
+ return temp_w / np.sum(temp_w) * p
@@ -0,0 +1,81 @@
1
+ Metadata-Version: 2.4
2
+ Name: space-graph
3
+ Version: 0.1.0
4
+ Summary: Pure Python SPACE: sparse partial correlation estimation
5
+ License: GPL-3.0-or-later
6
+ Requires-Python: >=3.10
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: numpy>=1.20
10
+ Provides-Extra: dev
11
+ Requires-Dist: pytest>=7; extra == "dev"
12
+ Provides-Extra: numba
13
+ Requires-Dist: numba>=0.57; extra == "numba"
14
+ Dynamic: license-file
15
+
16
+ # space-graph
17
+
18
+ [![PyPI version](https://img.shields.io/pypi/v/space-graph)](https://pypi.org/project/space-graph/)
19
+ [![Python 3.10+](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/)
20
+ [![License: GPL-3.0-or-later](https://img.shields.io/badge/license-GPL--3.0--or--later-blue.svg)](LICENSE)
21
+
22
+ Pure Python implementation of **SPACE** (Sparse Partial Correlation Estimation) from Peng et al. (2009), with no R or C dependencies.
23
+
24
+ Paper: [Sparse Partial Correlation Estimation for High-Dimensional Data](https://www.tandfonline.com/doi/abs/10.1198/jasa.2009.0126)
25
+
26
+ ## Install
27
+
28
+ ```bash
29
+ pip install space-graph
30
+ ```
31
+
32
+ Optional Numba (faster inner `jsrm` loop):
33
+
34
+ ```bash
35
+ pip install 'space-graph[numba]'
36
+ ```
37
+
38
+ From GitHub:
39
+
40
+ ```bash
41
+ pip install git+https://github.com/shahrozeabbas/space-graph.git
42
+ ```
43
+
44
+ ## Usage
45
+
46
+ ```python
47
+ import numpy as np
48
+ from space_graph import SPACE
49
+
50
+ X = np.random.randn(20, 5)
51
+ model = SPACE(
52
+ alpha=0.7,
53
+ max_outer_iter=2,
54
+ max_inner_iter=500,
55
+ tol=1e-6,
56
+ weight='uniform',
57
+ )
58
+ model.fit(X)
59
+ print(model.partial_correlation_)
60
+ ```
61
+
62
+ ## Penalty
63
+
64
+ The public parameter `alpha` in `[0, 1]` maps to inner penalties as `lam1 = alpha` and `lam2 = 1 - alpha`, matching the reference elastic-net-style JSRM solver.
65
+
66
+ ## Options
67
+
68
+ - **`tol`** (default `1e-6`): inner coordinate-descent stopping tolerance (and active-set threshold), same scale as the reference C code.
69
+ - **`weight`**: default **`uniform`** (unit weights). Use **`equal`** as an alias. Other modes: **`sig`**, **`degree`**, or a custom positive vector of length `p`.
70
+
71
+ ## Tests
72
+
73
+ ```bash
74
+ pytest
75
+ ```
76
+
77
+ Optional: build `libjsrm_test.so` from `../space/src/JSRM.c` to run the ctypes cross-check in `tests/test_space.py`.
78
+
79
+ ## License
80
+
81
+ GPL-3.0-or-later (same family as the original `space` R package).
@@ -0,0 +1,18 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ space_graph/__init__.py
5
+ space_graph/kernels.py
6
+ space_graph/model.py
7
+ space_graph/penalties.py
8
+ space_graph/solver.py
9
+ space_graph/state.py
10
+ space_graph/utils.py
11
+ space_graph/weights.py
12
+ space_graph.egg-info/PKG-INFO
13
+ space_graph.egg-info/SOURCES.txt
14
+ space_graph.egg-info/dependency_links.txt
15
+ space_graph.egg-info/requires.txt
16
+ space_graph.egg-info/top_level.txt
17
+ tests/test_space.py
18
+ tests/test_utils.py
@@ -0,0 +1,7 @@
1
+ numpy>=1.20
2
+
3
+ [dev]
4
+ pytest>=7
5
+
6
+ [numba]
7
+ numba>=0.57
@@ -0,0 +1 @@
1
+ space_graph
@@ -0,0 +1,125 @@
1
+ """Tests for SPACE / JSRM."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from space_graph.model import SPACE
11
+ from space_graph.solver import jsrm
12
+
13
+
14
+ def _spd_cov(p: int, rng: np.random.Generator) -> np.ndarray:
15
+ a = rng.standard_normal((p, p))
16
+ return a @ a.T + p * np.eye(p)
17
+
18
+
19
+ def test_jsrm_matches_c_when_available():
20
+ lib = '/Users/abba5hahroze/Desktop/space-lasso/space/src/libjsrm_test.so'
21
+ if not os.path.isfile(lib):
22
+ pytest.skip('compiled JSRM test library not present')
23
+ import ctypes
24
+ from numpy.ctypeslib import ndpointer
25
+
26
+ cdll = ctypes.CDLL(lib)
27
+ fun = cdll.JSRM
28
+ fun.restype = None
29
+ fun.argtypes = [
30
+ ctypes.POINTER(ctypes.c_int),
31
+ ctypes.POINTER(ctypes.c_int),
32
+ ctypes.POINTER(ctypes.c_float),
33
+ ctypes.POINTER(ctypes.c_float),
34
+ ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
35
+ ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
36
+ ctypes.POINTER(ctypes.c_int),
37
+ ctypes.POINTER(ctypes.c_int),
38
+ ndpointer(ctypes.c_float, flags='C_CONTIGUOUS'),
39
+ ]
40
+
41
+ rng = np.random.default_rng(42)
42
+ n, p = 12, 6
43
+ X = rng.standard_normal((n, p)).astype(np.float32)
44
+ sig = np.ones(p, dtype=np.float32)
45
+ lam1, lam2 = 0.35, 0.0
46
+
47
+ n_in = ctypes.c_int(n)
48
+ p_in = ctypes.c_int(p)
49
+ l1 = ctypes.c_float(lam1)
50
+ l2 = ctypes.c_float(lam2)
51
+ sigma_sr = np.sqrt(sig).astype(np.float32)
52
+ n_iter = ctypes.c_int(500)
53
+ iter_out = ctypes.c_int(0)
54
+ beta = np.zeros(p * p, dtype=np.float32)
55
+ y_flat = np.ascontiguousarray(X.ravel(order='C'))
56
+ fun(
57
+ ctypes.byref(n_in),
58
+ ctypes.byref(p_in),
59
+ ctypes.byref(l1),
60
+ ctypes.byref(l2),
61
+ y_flat,
62
+ sigma_sr,
63
+ ctypes.byref(n_iter),
64
+ ctypes.byref(iter_out),
65
+ beta,
66
+ )
67
+ beta_c = beta.reshape(p, p, order='C')
68
+
69
+ beta_py = jsrm(
70
+ X.astype(np.float64),
71
+ sigma_sr.astype(np.float64),
72
+ lam1,
73
+ lam2,
74
+ 500,
75
+ tol=1e-6,
76
+ )
77
+ np.testing.assert_allclose(beta_c, beta_py, atol=1e-4, rtol=1e-4)
78
+
79
+
80
+ def test_space_fit_symmetric_unit_diagonal():
81
+ rng = np.random.default_rng(0)
82
+ p, n = 8, 25
83
+ cov = _spd_cov(p, rng)
84
+ X = rng.multivariate_normal(np.zeros(p), cov, size=n)
85
+
86
+ m = SPACE(alpha=1.0, max_outer_iter=2, max_inner_iter=500)
87
+ m.fit(X)
88
+
89
+ r = m.partial_correlation_
90
+ assert r.shape == (p, p)
91
+ assert np.allclose(r, r.T)
92
+ assert np.allclose(np.diag(r), 1.0)
93
+ assert m.sig_ is not None
94
+ assert m.precision_ is not None
95
+
96
+
97
+ def test_alpha_mix_penalty_runs():
98
+ rng = np.random.default_rng(1)
99
+ X = rng.standard_normal((30, 5))
100
+ m = SPACE(alpha=0.6, max_outer_iter=2)
101
+ m.fit(X)
102
+ assert m.partial_correlation_.shape == (5, 5)
103
+
104
+
105
+ def test_weight_uniform_vs_sig():
106
+ rng = np.random.default_rng(2)
107
+ p, n = 6, 40
108
+ X = rng.multivariate_normal(np.zeros(p), _spd_cov(p, rng), size=n)
109
+
110
+ a = SPACE(alpha=0.9, weight='uniform', max_outer_iter=2)
111
+ a.fit(X)
112
+ b = SPACE(alpha=0.9, weight='sig', max_outer_iter=2)
113
+ b.fit(X)
114
+ assert a.partial_correlation_.shape == b.partial_correlation_.shape
115
+
116
+
117
+ def test_weight_equal_alias_matches_uniform():
118
+ rng = np.random.default_rng(3)
119
+ p, n = 5, 30
120
+ X = rng.multivariate_normal(np.zeros(p), _spd_cov(p, rng), size=n)
121
+ u = SPACE(alpha=0.95, weight='uniform', max_outer_iter=2, tol=1e-6)
122
+ e = SPACE(alpha=0.95, weight='equal', max_outer_iter=2, tol=1e-6)
123
+ u.fit(X)
124
+ e.fit(X)
125
+ np.testing.assert_allclose(u.partial_correlation_, e.partial_correlation_)
@@ -0,0 +1,15 @@
1
+ """Unit tests for utilities."""
2
+
3
+ import numpy as np
4
+
5
+ from space_graph.utils import beta_coef_from_rho_upper
6
+
7
+
8
+ def test_beta_coef_shape():
9
+ p = 4
10
+ rho = np.eye(p)
11
+ rho[0, 1] = rho[1, 0] = 0.3
12
+ coef = rho[np.triu_indices(p, k=1)]
13
+ sig = np.ones(p)
14
+ b = beta_coef_from_rho_upper(coef, sig)
15
+ assert b.shape == (p, p)