corticalfields 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- corticalfields/__init__.py +75 -0
- corticalfields/features.py +258 -0
- corticalfields/graphs.py +234 -0
- corticalfields/kernels.py +304 -0
- corticalfields/mtle_pipeline.py +856 -0
- corticalfields/normative.py +686 -0
- corticalfields/spectral.py +509 -0
- corticalfields/surface.py +369 -0
- corticalfields/surprise.py +381 -0
- corticalfields/utils.py +227 -0
- corticalfields/viz.py +357 -0
- corticalfields-0.1.1.dist-info/METADATA +194 -0
- corticalfields-0.1.1.dist-info/RECORD +16 -0
- corticalfields-0.1.1.dist-info/WHEEL +5 -0
- corticalfields-0.1.1.dist-info/licenses/LICENSE +21 -0
- corticalfields-0.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CorticalFields — Geodesic-aware GP normative modeling on cortical surfaces.
|
|
3
|
+
|
|
4
|
+
A library for computing information-theoretic surprise maps on the cortical
|
|
5
|
+
manifold using spectral Matérn Gaussian Processes, Heat Kernel Signatures,
|
|
6
|
+
and Laplace–Beltrami spectral analysis. Designed for structural MRI (T1w)
|
|
7
|
+
data in clinical neuroimaging, with emphasis on epilepsy (MTLE-HS).
|
|
8
|
+
|
|
9
|
+
Core pipeline:
|
|
10
|
+
1. Load FreeSurfer surfaces and morphometric overlays
|
|
11
|
+
2. Compute Laplace–Beltrami eigenpairs on the cortical mesh
|
|
12
|
+
3. Extract spectral shape descriptors (HKS, WKS, GPS)
|
|
13
|
+
4. Build spectral Matérn GP kernels on the manifold
|
|
14
|
+
5. Fit normative models on a reference cohort
|
|
15
|
+
6. Generate vertex-wise surprise / anomaly maps for patients
|
|
16
|
+
|
|
17
|
+
Modules
|
|
18
|
+
-------
|
|
19
|
+
surface : Surface I/O — FreeSurfer, GIfTI, mesh utilities
|
|
20
|
+
spectral : Laplace–Beltrami decomposition, HKS, WKS, GPS
|
|
21
|
+
kernels : Spectral Matérn kernels for GPyTorch
|
|
22
|
+
normative : GP-based normative modeling pipeline
|
|
23
|
+
surprise : Information-theoretic anomaly scoring
|
|
24
|
+
features : Morphometric feature extraction from FreeSurfer
|
|
25
|
+
graphs : Cortical similarity network construction
|
|
26
|
+
viz : Publication-quality surface visualization
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
__version__ = "0.1.0"
|
|
30
|
+
__author__ = "Velho Mago (rdneuro)"
|
|
31
|
+
|
|
32
|
+
# ── Lazy imports ────────────────────────────────────────────────────────
|
|
33
|
+
# Heavy dependencies (torch, gpytorch) are only loaded when the modules
|
|
34
|
+
# that need them are first accessed. This keeps `import corticalfields`
|
|
35
|
+
# fast and memory-light for submodule-level usage.
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def __getattr__(name: str):
|
|
39
|
+
"""Lazy attribute loader — imports submodules on first access."""
|
|
40
|
+
_MAP = {
|
|
41
|
+
# surface.py (lightweight — numpy/nibabel only)
|
|
42
|
+
"CorticalSurface": ("corticalfields.surface", "CorticalSurface"),
|
|
43
|
+
"load_freesurfer_surface": ("corticalfields.surface", "load_freesurfer_surface"),
|
|
44
|
+
# spectral.py (lightweight — numpy/scipy only)
|
|
45
|
+
"LaplaceBeltrami": ("corticalfields.spectral", "LaplaceBeltrami"),
|
|
46
|
+
"heat_kernel_signature": ("corticalfields.spectral", "heat_kernel_signature"),
|
|
47
|
+
"wave_kernel_signature": ("corticalfields.spectral", "wave_kernel_signature"),
|
|
48
|
+
"global_point_signature": ("corticalfields.spectral", "global_point_signature"),
|
|
49
|
+
# kernels.py (heavy — torch + gpytorch)
|
|
50
|
+
"SpectralMaternKernel": ("corticalfields.kernels", "SpectralMaternKernel"),
|
|
51
|
+
# normative.py (heavy — torch + gpytorch)
|
|
52
|
+
"CorticalNormativeModel": ("corticalfields.normative", "CorticalNormativeModel"),
|
|
53
|
+
# surprise.py (lightweight — numpy/scipy only)
|
|
54
|
+
"SurpriseMap": ("corticalfields.surprise", "SurpriseMap"),
|
|
55
|
+
"compute_surprise": ("corticalfields.surprise", "compute_surprise"),
|
|
56
|
+
# features.py (lightweight)
|
|
57
|
+
"MorphometricProfile": ("corticalfields.features", "MorphometricProfile"),
|
|
58
|
+
}
|
|
59
|
+
if name in _MAP:
|
|
60
|
+
module_path, attr = _MAP[name]
|
|
61
|
+
import importlib
|
|
62
|
+
mod = importlib.import_module(module_path)
|
|
63
|
+
return getattr(mod, attr)
|
|
64
|
+
raise AttributeError(f"module 'corticalfields' has no attribute {name!r}")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
__all__ = [
|
|
68
|
+
"CorticalSurface", "load_freesurfer_surface",
|
|
69
|
+
"LaplaceBeltrami", "heat_kernel_signature",
|
|
70
|
+
"wave_kernel_signature", "global_point_signature",
|
|
71
|
+
"SpectralMaternKernel",
|
|
72
|
+
"CorticalNormativeModel",
|
|
73
|
+
"SurpriseMap", "compute_surprise",
|
|
74
|
+
"MorphometricProfile",
|
|
75
|
+
]
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Morphometric feature extraction for CorticalFields.
|
|
3
|
+
|
|
4
|
+
Extracts and manages multi-feature morphometric profiles from FreeSurfer
|
|
5
|
+
outputs. These profiles serve as the observation vectors for GP normative
|
|
6
|
+
modeling: for each vertex, we have a vector of structural features
|
|
7
|
+
(cortical thickness, curvature, sulcal depth, surface area, gyrification,
|
|
8
|
+
etc.) that characterise local brain morphology.
|
|
9
|
+
|
|
10
|
+
The module also handles feature normalisation, missing data, and the
|
|
11
|
+
construction of population-level feature matrices for training.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
20
|
+
|
|
21
|
+
import numpy as np
|
|
22
|
+
|
|
23
|
+
from corticalfields.surface import CorticalSurface, load_freesurfer_surface
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
# Default features to extract (standard FreeSurfer outputs)
|
|
28
|
+
DEFAULT_FEATURES = ["thickness", "curv", "sulc", "area", "volume"]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class MorphometricProfile:
|
|
33
|
+
"""
|
|
34
|
+
Multi-feature morphometric profile for one hemisphere.
|
|
35
|
+
|
|
36
|
+
Stores per-vertex feature values for a set of subjects, enabling
|
|
37
|
+
population-level analyses.
|
|
38
|
+
|
|
39
|
+
Attributes
|
|
40
|
+
----------
|
|
41
|
+
features : dict[str, np.ndarray]
|
|
42
|
+
Feature name → array of shape (N_vertices, N_subjects).
|
|
43
|
+
feature_names : list[str]
|
|
44
|
+
Ordered list of feature names.
|
|
45
|
+
subject_ids : list[str]
|
|
46
|
+
Subject identifiers corresponding to columns.
|
|
47
|
+
n_vertices : int
|
|
48
|
+
Number of mesh vertices.
|
|
49
|
+
hemi : str
|
|
50
|
+
Hemisphere (``'lh'`` or ``'rh'``).
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
features: Dict[str, np.ndarray] = field(default_factory=dict)
|
|
54
|
+
feature_names: List[str] = field(default_factory=list)
|
|
55
|
+
subject_ids: List[str] = field(default_factory=list)
|
|
56
|
+
n_vertices: int = 0
|
|
57
|
+
hemi: str = "lh"
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def n_subjects(self) -> int:
|
|
61
|
+
return len(self.subject_ids)
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def n_features(self) -> int:
|
|
65
|
+
return len(self.feature_names)
|
|
66
|
+
|
|
67
|
+
def get_feature_matrix(self, feature_name: str) -> np.ndarray:
|
|
68
|
+
"""
|
|
69
|
+
Get the (N_vertices, N_subjects) matrix for one feature.
|
|
70
|
+
"""
|
|
71
|
+
return self.features[feature_name]
|
|
72
|
+
|
|
73
|
+
def get_subject_profile(
|
|
74
|
+
self,
|
|
75
|
+
subject_id: str,
|
|
76
|
+
) -> Dict[str, np.ndarray]:
|
|
77
|
+
"""
|
|
78
|
+
Get all features for one subject.
|
|
79
|
+
|
|
80
|
+
Returns
|
|
81
|
+
-------
|
|
82
|
+
profile : dict[str, np.ndarray]
|
|
83
|
+
Feature name → (N_vertices,) array.
|
|
84
|
+
"""
|
|
85
|
+
idx = self.subject_ids.index(subject_id)
|
|
86
|
+
return {
|
|
87
|
+
name: self.features[name][:, idx]
|
|
88
|
+
for name in self.feature_names
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
def get_vertex_feature_vector(
|
|
92
|
+
self,
|
|
93
|
+
vertex_idx: int,
|
|
94
|
+
subject_idx: int = 0,
|
|
95
|
+
) -> np.ndarray:
|
|
96
|
+
"""
|
|
97
|
+
Get the multi-feature vector at one vertex for one subject.
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
vec : np.ndarray, shape (n_features,)
|
|
102
|
+
"""
|
|
103
|
+
return np.array([
|
|
104
|
+
self.features[name][vertex_idx, subject_idx]
|
|
105
|
+
for name in self.feature_names
|
|
106
|
+
])
|
|
107
|
+
|
|
108
|
+
def population_mean(self, feature_name: str) -> np.ndarray:
|
|
109
|
+
"""Mean across subjects for one feature. Shape: (N_vertices,)."""
|
|
110
|
+
return np.nanmean(self.features[feature_name], axis=1)
|
|
111
|
+
|
|
112
|
+
def population_std(self, feature_name: str) -> np.ndarray:
|
|
113
|
+
"""Std across subjects for one feature. Shape: (N_vertices,)."""
|
|
114
|
+
return np.nanstd(self.features[feature_name], axis=1)
|
|
115
|
+
|
|
116
|
+
def normalise(
|
|
117
|
+
self,
|
|
118
|
+
method: str = "z_score",
|
|
119
|
+
reference_mean: Optional[Dict[str, np.ndarray]] = None,
|
|
120
|
+
reference_std: Optional[Dict[str, np.ndarray]] = None,
|
|
121
|
+
) -> "MorphometricProfile":
|
|
122
|
+
"""
|
|
123
|
+
Normalise features vertex-wise.
|
|
124
|
+
|
|
125
|
+
Parameters
|
|
126
|
+
----------
|
|
127
|
+
method : ``'z_score'`` or ``'robust'``
|
|
128
|
+
Normalisation method. ``'z_score'`` uses mean/std;
|
|
129
|
+
``'robust'`` uses median/IQR.
|
|
130
|
+
reference_mean, reference_std : dict or None
|
|
131
|
+
External reference statistics (e.g. from a normative cohort).
|
|
132
|
+
If None, uses internal population statistics.
|
|
133
|
+
|
|
134
|
+
Returns
|
|
135
|
+
-------
|
|
136
|
+
MorphometricProfile
|
|
137
|
+
New profile with normalised features.
|
|
138
|
+
"""
|
|
139
|
+
norm_features = {}
|
|
140
|
+
|
|
141
|
+
for name in self.feature_names:
|
|
142
|
+
data = self.features[name].copy()
|
|
143
|
+
|
|
144
|
+
if method == "z_score":
|
|
145
|
+
mu = (
|
|
146
|
+
reference_mean[name]
|
|
147
|
+
if reference_mean
|
|
148
|
+
else np.nanmean(data, axis=1)
|
|
149
|
+
)
|
|
150
|
+
sigma = (
|
|
151
|
+
reference_std[name]
|
|
152
|
+
if reference_std
|
|
153
|
+
else np.nanstd(data, axis=1)
|
|
154
|
+
)
|
|
155
|
+
sigma[sigma < 1e-8] = 1.0
|
|
156
|
+
norm_features[name] = (data - mu[:, None]) / sigma[:, None]
|
|
157
|
+
|
|
158
|
+
elif method == "robust":
|
|
159
|
+
med = np.nanmedian(data, axis=1)
|
|
160
|
+
q25 = np.nanpercentile(data, 25, axis=1)
|
|
161
|
+
q75 = np.nanpercentile(data, 75, axis=1)
|
|
162
|
+
iqr = q75 - q25
|
|
163
|
+
iqr[iqr < 1e-8] = 1.0
|
|
164
|
+
norm_features[name] = (data - med[:, None]) / iqr[:, None]
|
|
165
|
+
|
|
166
|
+
return MorphometricProfile(
|
|
167
|
+
features=norm_features,
|
|
168
|
+
feature_names=self.feature_names.copy(),
|
|
169
|
+
subject_ids=self.subject_ids.copy(),
|
|
170
|
+
n_vertices=self.n_vertices,
|
|
171
|
+
hemi=self.hemi,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def extract_cohort_profiles(
|
|
176
|
+
subjects_dir: Union[str, Path],
|
|
177
|
+
subject_ids: List[str],
|
|
178
|
+
hemi: str = "lh",
|
|
179
|
+
surface: str = "pial",
|
|
180
|
+
features: Optional[List[str]] = None,
|
|
181
|
+
) -> MorphometricProfile:
|
|
182
|
+
"""
|
|
183
|
+
Extract morphometric profiles for an entire cohort.
|
|
184
|
+
|
|
185
|
+
Loads FreeSurfer surface and overlays for each subject and assembles
|
|
186
|
+
them into a population-level MorphometricProfile.
|
|
187
|
+
|
|
188
|
+
Parameters
|
|
189
|
+
----------
|
|
190
|
+
subjects_dir : path-like
|
|
191
|
+
FreeSurfer SUBJECTS_DIR.
|
|
192
|
+
subject_ids : list[str]
|
|
193
|
+
Subject folder names.
|
|
194
|
+
hemi : ``'lh'`` or ``'rh'``
|
|
195
|
+
surface : str
|
|
196
|
+
Surface type (``'pial'``, ``'white'``, etc.).
|
|
197
|
+
features : list[str] or None
|
|
198
|
+
Feature names to extract (default: ``DEFAULT_FEATURES``).
|
|
199
|
+
|
|
200
|
+
Returns
|
|
201
|
+
-------
|
|
202
|
+
MorphometricProfile
|
|
203
|
+
Population-level feature matrices.
|
|
204
|
+
|
|
205
|
+
Notes
|
|
206
|
+
-----
|
|
207
|
+
All subjects must be in the same template space (e.g. fsaverage)
|
|
208
|
+
or registered to a common surface template, so that vertex indices
|
|
209
|
+
correspond across subjects.
|
|
210
|
+
"""
|
|
211
|
+
if features is None:
|
|
212
|
+
features = DEFAULT_FEATURES.copy()
|
|
213
|
+
|
|
214
|
+
profile = MorphometricProfile(
|
|
215
|
+
feature_names=features,
|
|
216
|
+
subject_ids=list(subject_ids),
|
|
217
|
+
hemi=hemi,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# Load first subject to get dimensions
|
|
221
|
+
first_surf = load_freesurfer_surface(
|
|
222
|
+
subjects_dir, subject_ids[0], hemi=hemi, surface=surface,
|
|
223
|
+
overlays=features,
|
|
224
|
+
)
|
|
225
|
+
N = first_surf.n_vertices
|
|
226
|
+
S = len(subject_ids)
|
|
227
|
+
profile.n_vertices = N
|
|
228
|
+
|
|
229
|
+
# Initialise feature matrices
|
|
230
|
+
for name in features:
|
|
231
|
+
profile.features[name] = np.full((N, S), np.nan, dtype=np.float64)
|
|
232
|
+
|
|
233
|
+
# Fill in data
|
|
234
|
+
for s_idx, sid in enumerate(subject_ids):
|
|
235
|
+
try:
|
|
236
|
+
surf = load_freesurfer_surface(
|
|
237
|
+
subjects_dir, sid, hemi=hemi, surface=surface,
|
|
238
|
+
overlays=features,
|
|
239
|
+
)
|
|
240
|
+
for name in features:
|
|
241
|
+
if name in surf.overlays:
|
|
242
|
+
data = surf.get_overlay(name)
|
|
243
|
+
if data.shape[0] == N:
|
|
244
|
+
profile.features[name][:, s_idx] = data
|
|
245
|
+
else:
|
|
246
|
+
logger.warning(
|
|
247
|
+
"Subject %s: %s has %d vertices (expected %d), skipping.",
|
|
248
|
+
sid, name, data.shape[0], N,
|
|
249
|
+
)
|
|
250
|
+
except Exception as e:
|
|
251
|
+
logger.error("Failed to load subject %s: %s", sid, e)
|
|
252
|
+
|
|
253
|
+
logger.info(
|
|
254
|
+
"Extracted profiles: %d subjects, %d vertices, %d features.",
|
|
255
|
+
S, N, len(features),
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
return profile
|
corticalfields/graphs.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cortical similarity network construction from morphometric features.
|
|
3
|
+
|
|
4
|
+
This module builds brain graphs from structural MRI data alone (no fMRI
|
|
5
|
+
or dMRI required). Two approaches are provided:
|
|
6
|
+
|
|
7
|
+
1. **Morphometric Similarity Networks (MSN)** — inter-regional
|
|
8
|
+
Pearson/Spearman correlation of morphometric feature profiles
|
|
9
|
+
(Seidlitz et al., Neuron 2018). Each ROI has a vector of
|
|
10
|
+
features (mean thickness, curvature, sulcal depth, etc.); the
|
|
11
|
+
correlation between ROI profiles becomes the edge weight.
|
|
12
|
+
|
|
13
|
+
2. **Spectral Similarity Networks** — inter-regional similarity
|
|
14
|
+
based on spectral shape descriptors (HKS, WKS, GPS). This is
|
|
15
|
+
novel: it captures geometric similarity between regions rather
|
|
16
|
+
than just morphometric covariation.
|
|
17
|
+
|
|
18
|
+
Both approaches integrate with NetworkX for graph-theoretic analysis
|
|
19
|
+
(centrality, modularity, efficiency, small-worldness).
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import logging
|
|
25
|
+
from typing import Dict, List, Optional, Tuple
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def morphometric_similarity_network(
|
|
33
|
+
feature_matrix: np.ndarray,
|
|
34
|
+
labels: np.ndarray,
|
|
35
|
+
method: str = "pearson",
|
|
36
|
+
fisher_z: bool = True,
|
|
37
|
+
) -> np.ndarray:
|
|
38
|
+
"""
|
|
39
|
+
Construct a Morphometric Similarity Network (MSN).
|
|
40
|
+
|
|
41
|
+
For each pair of ROIs (i, j), compute the correlation between
|
|
42
|
+
their average morphometric feature profiles across vertices.
|
|
43
|
+
|
|
44
|
+
Parameters
|
|
45
|
+
----------
|
|
46
|
+
feature_matrix : np.ndarray, shape (N_vertices, N_features)
|
|
47
|
+
Per-vertex feature values (columns: thickness, curv, sulc, …).
|
|
48
|
+
labels : np.ndarray, shape (N_vertices,)
|
|
49
|
+
Integer parcellation labels.
|
|
50
|
+
method : ``'pearson'`` or ``'spearman'``
|
|
51
|
+
Correlation method.
|
|
52
|
+
fisher_z : bool
|
|
53
|
+
Apply Fisher z-transform to correlation values.
|
|
54
|
+
|
|
55
|
+
Returns
|
|
56
|
+
-------
|
|
57
|
+
msn : np.ndarray, shape (R, R)
|
|
58
|
+
Symmetric correlation matrix where R is the number of ROIs.
|
|
59
|
+
"""
|
|
60
|
+
from scipy.stats import pearsonr, spearmanr
|
|
61
|
+
|
|
62
|
+
# Get unique ROI labels (exclude label <= 0 as medial wall/unknown)
|
|
63
|
+
roi_labels = np.sort(np.unique(labels[labels > 0]))
|
|
64
|
+
R = len(roi_labels)
|
|
65
|
+
|
|
66
|
+
# Compute mean feature profile per ROI: shape (R, F)
|
|
67
|
+
roi_profiles = np.zeros((R, feature_matrix.shape[1]), dtype=np.float64)
|
|
68
|
+
for i, lab in enumerate(roi_labels):
|
|
69
|
+
mask = labels == lab
|
|
70
|
+
roi_profiles[i] = np.nanmean(feature_matrix[mask], axis=0)
|
|
71
|
+
|
|
72
|
+
# Correlation matrix
|
|
73
|
+
msn = np.zeros((R, R), dtype=np.float64)
|
|
74
|
+
for i in range(R):
|
|
75
|
+
for j in range(i + 1, R):
|
|
76
|
+
if method == "pearson":
|
|
77
|
+
r, _ = pearsonr(roi_profiles[i], roi_profiles[j])
|
|
78
|
+
elif method == "spearman":
|
|
79
|
+
r, _ = spearmanr(roi_profiles[i], roi_profiles[j])
|
|
80
|
+
else:
|
|
81
|
+
raise ValueError(f"Unknown method: {method}")
|
|
82
|
+
|
|
83
|
+
if fisher_z:
|
|
84
|
+
r = np.arctanh(np.clip(r, -0.9999, 0.9999))
|
|
85
|
+
|
|
86
|
+
msn[i, j] = r
|
|
87
|
+
msn[j, i] = r
|
|
88
|
+
|
|
89
|
+
return msn
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def spectral_similarity_network(
|
|
93
|
+
spectral_features: np.ndarray,
|
|
94
|
+
labels: np.ndarray,
|
|
95
|
+
metric: str = "cosine",
|
|
96
|
+
) -> np.ndarray:
|
|
97
|
+
"""
|
|
98
|
+
Construct a similarity network from spectral shape descriptors.
|
|
99
|
+
|
|
100
|
+
Each ROI's spectral profile is the mean HKS/WKS/GPS vector across
|
|
101
|
+
its vertices. Inter-regional similarity is computed via cosine
|
|
102
|
+
similarity, correlation, or Euclidean distance.
|
|
103
|
+
|
|
104
|
+
Parameters
|
|
105
|
+
----------
|
|
106
|
+
spectral_features : np.ndarray, shape (N_vertices, D)
|
|
107
|
+
Per-vertex spectral descriptors (e.g. from ``spectral_feature_matrix``).
|
|
108
|
+
labels : np.ndarray, shape (N_vertices,)
|
|
109
|
+
Parcellation labels.
|
|
110
|
+
metric : ``'cosine'``, ``'correlation'``, ``'euclidean'``
|
|
111
|
+
Similarity metric.
|
|
112
|
+
|
|
113
|
+
Returns
|
|
114
|
+
-------
|
|
115
|
+
ssn : np.ndarray, shape (R, R)
|
|
116
|
+
Symmetric similarity matrix.
|
|
117
|
+
"""
|
|
118
|
+
from scipy.spatial.distance import cdist
|
|
119
|
+
|
|
120
|
+
roi_labels = np.sort(np.unique(labels[labels > 0]))
|
|
121
|
+
R = len(roi_labels)
|
|
122
|
+
|
|
123
|
+
# Mean spectral profile per ROI
|
|
124
|
+
roi_profiles = np.zeros((R, spectral_features.shape[1]), dtype=np.float64)
|
|
125
|
+
for i, lab in enumerate(roi_labels):
|
|
126
|
+
mask = labels == lab
|
|
127
|
+
roi_profiles[i] = np.nanmean(spectral_features[mask], axis=0)
|
|
128
|
+
|
|
129
|
+
# Distance matrix → similarity
|
|
130
|
+
if metric == "cosine":
|
|
131
|
+
dist = cdist(roi_profiles, roi_profiles, metric="cosine")
|
|
132
|
+
ssn = 1.0 - dist # cosine similarity
|
|
133
|
+
elif metric == "correlation":
|
|
134
|
+
dist = cdist(roi_profiles, roi_profiles, metric="correlation")
|
|
135
|
+
ssn = 1.0 - dist
|
|
136
|
+
elif metric == "euclidean":
|
|
137
|
+
dist = cdist(roi_profiles, roi_profiles, metric="euclidean")
|
|
138
|
+
# Convert to similarity: exp(-d / median(d))
|
|
139
|
+
med = np.median(dist[dist > 0])
|
|
140
|
+
ssn = np.exp(-dist / max(med, 1e-8))
|
|
141
|
+
else:
|
|
142
|
+
raise ValueError(f"Unknown metric: {metric}")
|
|
143
|
+
|
|
144
|
+
np.fill_diagonal(ssn, 0.0) # no self-loops
|
|
145
|
+
return ssn
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def graph_metrics(
|
|
149
|
+
adjacency: np.ndarray,
|
|
150
|
+
threshold: Optional[float] = None,
|
|
151
|
+
density: Optional[float] = None,
|
|
152
|
+
) -> Dict[str, object]:
|
|
153
|
+
"""
|
|
154
|
+
Compute standard graph-theoretic metrics from a similarity matrix.
|
|
155
|
+
|
|
156
|
+
Parameters
|
|
157
|
+
----------
|
|
158
|
+
adjacency : np.ndarray, shape (R, R)
|
|
159
|
+
Symmetric similarity/weight matrix.
|
|
160
|
+
threshold : float or None
|
|
161
|
+
Hard threshold on edge weights.
|
|
162
|
+
density : float or None
|
|
163
|
+
Target graph density (proportion of edges to keep).
|
|
164
|
+
Overrides ``threshold`` if both provided.
|
|
165
|
+
|
|
166
|
+
Returns
|
|
167
|
+
-------
|
|
168
|
+
metrics : dict
|
|
169
|
+
Keys include: ``degree``, ``clustering``, ``efficiency``,
|
|
170
|
+
``betweenness``, ``modularity``, ``strength``, ``assortativity``.
|
|
171
|
+
"""
|
|
172
|
+
try:
|
|
173
|
+
import networkx as nx
|
|
174
|
+
from networkx.algorithms.community import greedy_modularity_communities
|
|
175
|
+
except ImportError:
|
|
176
|
+
raise ImportError("NetworkX is required for graph metrics.")
|
|
177
|
+
|
|
178
|
+
W = adjacency.copy()
|
|
179
|
+
|
|
180
|
+
# Apply threshold
|
|
181
|
+
if density is not None:
|
|
182
|
+
flat = np.sort(W[np.triu_indices_from(W, k=1)])[::-1]
|
|
183
|
+
n_edges = int(density * len(flat))
|
|
184
|
+
if n_edges > 0 and n_edges <= len(flat):
|
|
185
|
+
threshold = flat[n_edges - 1]
|
|
186
|
+
|
|
187
|
+
if threshold is not None:
|
|
188
|
+
W[W < threshold] = 0.0
|
|
189
|
+
|
|
190
|
+
# Build NetworkX graph
|
|
191
|
+
G = nx.from_numpy_array(W)
|
|
192
|
+
|
|
193
|
+
# Remove zero-weight edges
|
|
194
|
+
zero_edges = [(u, v) for u, v, d in G.edges(data=True) if d["weight"] <= 0]
|
|
195
|
+
G.remove_edges_from(zero_edges)
|
|
196
|
+
|
|
197
|
+
# Metrics
|
|
198
|
+
result = {}
|
|
199
|
+
result["n_nodes"] = G.number_of_nodes()
|
|
200
|
+
result["n_edges"] = G.number_of_edges()
|
|
201
|
+
result["density"] = nx.density(G)
|
|
202
|
+
result["strength"] = dict(G.degree(weight="weight"))
|
|
203
|
+
result["degree"] = dict(G.degree())
|
|
204
|
+
result["clustering"] = nx.clustering(G, weight="weight")
|
|
205
|
+
|
|
206
|
+
# Global efficiency
|
|
207
|
+
try:
|
|
208
|
+
result["global_efficiency"] = nx.global_efficiency(G)
|
|
209
|
+
except Exception:
|
|
210
|
+
result["global_efficiency"] = np.nan
|
|
211
|
+
|
|
212
|
+
# Betweenness centrality
|
|
213
|
+
result["betweenness"] = nx.betweenness_centrality(G, weight="weight")
|
|
214
|
+
|
|
215
|
+
# Modularity via greedy algorithm
|
|
216
|
+
try:
|
|
217
|
+
communities = list(greedy_modularity_communities(G, weight="weight"))
|
|
218
|
+
result["modularity"] = nx.community.modularity(
|
|
219
|
+
G, communities, weight="weight",
|
|
220
|
+
)
|
|
221
|
+
result["communities"] = communities
|
|
222
|
+
except Exception:
|
|
223
|
+
result["modularity"] = np.nan
|
|
224
|
+
result["communities"] = []
|
|
225
|
+
|
|
226
|
+
# Assortativity
|
|
227
|
+
try:
|
|
228
|
+
result["assortativity"] = nx.degree_assortativity_coefficient(
|
|
229
|
+
G, weight="weight",
|
|
230
|
+
)
|
|
231
|
+
except Exception:
|
|
232
|
+
result["assortativity"] = np.nan
|
|
233
|
+
|
|
234
|
+
return result
|