py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
- py_neuromodulation/__init__.py +80 -13
- py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
- py_neuromodulation/analysis/__init__.py +4 -0
- py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
- py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
- py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
- py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
- py_neuromodulation/data/README +6 -6
- py_neuromodulation/data/dataset_description.json +8 -8
- py_neuromodulation/data/participants.json +32 -32
- py_neuromodulation/data/participants.tsv +2 -2
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
- py_neuromodulation/default_settings.yaml +241 -0
- py_neuromodulation/features/__init__.py +31 -0
- py_neuromodulation/features/bandpower.py +165 -0
- py_neuromodulation/features/bispectra.py +157 -0
- py_neuromodulation/features/bursts.py +297 -0
- py_neuromodulation/features/coherence.py +255 -0
- py_neuromodulation/features/feature_processor.py +121 -0
- py_neuromodulation/features/fooof.py +142 -0
- py_neuromodulation/features/hjorth_raw.py +57 -0
- py_neuromodulation/features/linelength.py +21 -0
- py_neuromodulation/features/mne_connectivity.py +148 -0
- py_neuromodulation/features/nolds.py +94 -0
- py_neuromodulation/features/oscillatory.py +249 -0
- py_neuromodulation/features/sharpwaves.py +432 -0
- py_neuromodulation/filter/__init__.py +3 -0
- py_neuromodulation/filter/kalman_filter.py +67 -0
- py_neuromodulation/filter/kalman_filter_external.py +1890 -0
- py_neuromodulation/filter/mne_filter.py +128 -0
- py_neuromodulation/filter/notch_filter.py +93 -0
- py_neuromodulation/grid_cortex.tsv +40 -40
- py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
- py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
- py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
- py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/processing/__init__.py +10 -0
- py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
- py_neuromodulation/processing/data_preprocessor.py +77 -0
- py_neuromodulation/processing/filter_preprocessing.py +78 -0
- py_neuromodulation/processing/normalization.py +175 -0
- py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
- py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
- py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
- py_neuromodulation/stream/__init__.py +3 -0
- py_neuromodulation/stream/data_processor.py +325 -0
- py_neuromodulation/stream/generator.py +53 -0
- py_neuromodulation/stream/mnelsl_player.py +94 -0
- py_neuromodulation/stream/mnelsl_stream.py +120 -0
- py_neuromodulation/stream/settings.py +292 -0
- py_neuromodulation/stream/stream.py +427 -0
- py_neuromodulation/utils/__init__.py +2 -0
- py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
- py_neuromodulation/utils/database.py +149 -0
- py_neuromodulation/utils/io.py +378 -0
- py_neuromodulation/utils/keyboard.py +52 -0
- py_neuromodulation/utils/logging.py +66 -0
- py_neuromodulation/utils/types.py +251 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
- py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
- py_neuromodulation/FieldTrip.py +0 -589
- py_neuromodulation/_write_example_dataset_helper.py +0 -65
- py_neuromodulation/nm_EpochStream.py +0 -92
- py_neuromodulation/nm_IO.py +0 -417
- py_neuromodulation/nm_across_patient_decoding.py +0 -927
- py_neuromodulation/nm_bispectra.py +0 -168
- py_neuromodulation/nm_bursts.py +0 -198
- py_neuromodulation/nm_coherence.py +0 -205
- py_neuromodulation/nm_cohortwrapper.py +0 -435
- py_neuromodulation/nm_eval_timing.py +0 -239
- py_neuromodulation/nm_features.py +0 -116
- py_neuromodulation/nm_features_abc.py +0 -39
- py_neuromodulation/nm_filter.py +0 -219
- py_neuromodulation/nm_filter_preprocessing.py +0 -91
- py_neuromodulation/nm_fooof.py +0 -159
- py_neuromodulation/nm_generator.py +0 -37
- py_neuromodulation/nm_hjorth_raw.py +0 -73
- py_neuromodulation/nm_kalmanfilter.py +0 -58
- py_neuromodulation/nm_linelength.py +0 -33
- py_neuromodulation/nm_mne_connectivity.py +0 -112
- py_neuromodulation/nm_nolds.py +0 -93
- py_neuromodulation/nm_normalization.py +0 -214
- py_neuromodulation/nm_oscillatory.py +0 -448
- py_neuromodulation/nm_run_analysis.py +0 -435
- py_neuromodulation/nm_settings.json +0 -338
- py_neuromodulation/nm_settings.py +0 -68
- py_neuromodulation/nm_sharpwaves.py +0 -401
- py_neuromodulation/nm_stream_abc.py +0 -218
- py_neuromodulation/nm_stream_offline.py +0 -359
- py_neuromodulation/utils/_logging.py +0 -24
- py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
|
@@ -1,95 +1,97 @@
|
|
|
1
|
-
"""Re-referencing Module."""
|
|
2
|
-
|
|
3
|
-
import
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
ValueError: rereferencing
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
ref_idx = []
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
return data
|
|
1
|
+
"""Re-referencing Module."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import pandas as pd
|
|
5
|
+
|
|
6
|
+
from py_neuromodulation.utils.types import NMPreprocessor
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ReReferencer(NMPreprocessor):
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
sfreq: float,
|
|
13
|
+
channels: pd.DataFrame,
|
|
14
|
+
) -> None:
|
|
15
|
+
"""Initialize real-time rereference information.
|
|
16
|
+
|
|
17
|
+
Parameters
|
|
18
|
+
----------
|
|
19
|
+
sfreq : float
|
|
20
|
+
Sampling frequency. Is not used, only kept for compatibility.
|
|
21
|
+
channels : Pandas DataFrame
|
|
22
|
+
Dataframe containing information about rereferencing, as
|
|
23
|
+
specified in channels.csv.
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
Raises:
|
|
27
|
+
ValueError: rereferencing using undefined channel
|
|
28
|
+
ValueError: rereferencing to same channel
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
self.ref_matrix: np.ndarray | None
|
|
32
|
+
|
|
33
|
+
channels = channels[channels["used"] == 1].reset_index(drop=True)
|
|
34
|
+
# (channels_used,) = np.where((channels.used == 1))
|
|
35
|
+
|
|
36
|
+
ch_names = channels["name"].tolist()
|
|
37
|
+
|
|
38
|
+
# no re-referencing is being performed when there is a single channel present only
|
|
39
|
+
if channels.shape[0] in (0, 1):
|
|
40
|
+
self.ref_matrix = None
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
ch_types = channels["type"]
|
|
44
|
+
refs = channels["rereference"]
|
|
45
|
+
|
|
46
|
+
type_map = {}
|
|
47
|
+
for ch_type in ch_types.unique():
|
|
48
|
+
type_map[ch_type] = np.where(
|
|
49
|
+
(ch_types == ch_type) & (channels["status"] == "good")
|
|
50
|
+
)[0]
|
|
51
|
+
|
|
52
|
+
ref_matrix = np.zeros((len(channels), len(channels)))
|
|
53
|
+
for ind in range(len(channels)):
|
|
54
|
+
ref_matrix[ind, ind] = 1
|
|
55
|
+
# if ind not in channels_used:
|
|
56
|
+
# continue
|
|
57
|
+
ref = refs[ind]
|
|
58
|
+
if ref.lower() == "none" or pd.isnull(ref):
|
|
59
|
+
ref_idx = None
|
|
60
|
+
continue
|
|
61
|
+
if ref.lower() == "average":
|
|
62
|
+
ch_type = ch_types[ind]
|
|
63
|
+
ref_idx = type_map[ch_type][type_map[ch_type] != ind]
|
|
64
|
+
else:
|
|
65
|
+
ref_idx = []
|
|
66
|
+
ref_channels = ref.split("&")
|
|
67
|
+
for ref_chan in ref_channels:
|
|
68
|
+
if ref_chan not in ch_names:
|
|
69
|
+
raise ValueError(
|
|
70
|
+
"One or more of the reference channels are not"
|
|
71
|
+
" part of the recording channels. First missing"
|
|
72
|
+
f" channel: {ref_chan}."
|
|
73
|
+
)
|
|
74
|
+
if ref_chan == ch_names[ind]:
|
|
75
|
+
raise ValueError(
|
|
76
|
+
"You cannot rereference to the same channel."
|
|
77
|
+
f" Channel: {ref_chan}."
|
|
78
|
+
)
|
|
79
|
+
ref_idx.append(ch_names.index(ref_chan))
|
|
80
|
+
ref_matrix[ind, ref_idx] = -1 / len(ref_idx)
|
|
81
|
+
self.ref_matrix = ref_matrix
|
|
82
|
+
|
|
83
|
+
def process(self, data: np.ndarray) -> np.ndarray:
|
|
84
|
+
"""Rereference data according to the initialized ReReferencer class.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
data (numpy ndarray) :
|
|
88
|
+
shape(n_channels, n_samples) - data to be rereferenced.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
reref_data (numpy ndarray):
|
|
92
|
+
shape(n_channels, n_samples) - rereferenced data
|
|
93
|
+
"""
|
|
94
|
+
if self.ref_matrix is not None:
|
|
95
|
+
return self.ref_matrix @ data
|
|
96
|
+
else:
|
|
97
|
+
return data
|
|
@@ -1,50 +1,56 @@
|
|
|
1
|
-
"""Module for resampling."""
|
|
2
|
-
|
|
3
|
-
import numpy as np
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
----------
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
1
|
+
"""Module for resampling."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from py_neuromodulation.utils.types import NMBaseModel, Field, NMPreprocessor
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ResamplerSettings(NMBaseModel):
|
|
8
|
+
resample_freq_hz: float = Field(default=1000, gt=0)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Resampler(NMPreprocessor):
|
|
12
|
+
"""Resample data.
|
|
13
|
+
|
|
14
|
+
Parameters
|
|
15
|
+
----------
|
|
16
|
+
sfreq : float
|
|
17
|
+
Original sampling frequency.
|
|
18
|
+
|
|
19
|
+
Attributes
|
|
20
|
+
----------
|
|
21
|
+
up: float
|
|
22
|
+
Factor to upsample by.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
sfreq: float,
|
|
28
|
+
resample_freq_hz: float,
|
|
29
|
+
) -> None:
|
|
30
|
+
self.settings = ResamplerSettings(resample_freq_hz=resample_freq_hz)
|
|
31
|
+
|
|
32
|
+
ratio = float(resample_freq_hz / sfreq)
|
|
33
|
+
if ratio == 1.0:
|
|
34
|
+
self.up = 0.0
|
|
35
|
+
else:
|
|
36
|
+
self.up = ratio
|
|
37
|
+
|
|
38
|
+
def process(self, data: np.ndarray) -> np.ndarray:
|
|
39
|
+
"""Resample raw data using mne.filter.resample.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
data : np.ndarray
|
|
44
|
+
Data to resample
|
|
45
|
+
|
|
46
|
+
Returns
|
|
47
|
+
-------
|
|
48
|
+
np.ndarray
|
|
49
|
+
Resampled data
|
|
50
|
+
"""
|
|
51
|
+
if not self.up:
|
|
52
|
+
return data
|
|
53
|
+
|
|
54
|
+
from mne.filter import resample
|
|
55
|
+
|
|
56
|
+
return resample(data.astype(np.float64), up=self.up, down=1.0)
|
|
@@ -0,0 +1,325 @@
|
|
|
1
|
+
"""This module contains the class to process a given batch of data."""
|
|
2
|
+
|
|
3
|
+
from time import time
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from py_neuromodulation import logger
|
|
8
|
+
from py_neuromodulation.utils.types import _PathLike
|
|
9
|
+
from py_neuromodulation.features import FeatureProcessors
|
|
10
|
+
from py_neuromodulation.utils import io
|
|
11
|
+
from py_neuromodulation.stream.settings import NMSettings
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from py_neuromodulation.processing.projection import Projection
|
|
15
|
+
import pandas as pd
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DataProcessor:
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
sfreq: float,
|
|
22
|
+
settings: NMSettings | _PathLike,
|
|
23
|
+
channels: "pd.DataFrame | _PathLike",
|
|
24
|
+
coord_names: list | None = None,
|
|
25
|
+
coord_list: list | None = None,
|
|
26
|
+
line_noise: float | None = None,
|
|
27
|
+
path_grids: _PathLike | None = None,
|
|
28
|
+
verbose: bool = True,
|
|
29
|
+
) -> None:
|
|
30
|
+
from py_neuromodulation.processing import DataPreprocessor
|
|
31
|
+
|
|
32
|
+
"""Initialize run class.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
settings : settings.NMSettings object
|
|
37
|
+
channels : pd.DataFrame | _PathLike
|
|
38
|
+
Initialized pd.DataFrame with channel specific information.
|
|
39
|
+
The path to a channels.csv can be also passed.
|
|
40
|
+
coord_names : list | None
|
|
41
|
+
list of coordinate names
|
|
42
|
+
coord_list : list | None
|
|
43
|
+
list of list of 3D coordinates
|
|
44
|
+
path_grids : _PathLike | None
|
|
45
|
+
List to grid_cortex.tsv and grid_subcortex.tsv for grid point projection
|
|
46
|
+
verbose : boolean
|
|
47
|
+
if True, log signal processed and computation time
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
self.settings = NMSettings.load(settings)
|
|
51
|
+
self.channels = io.load_channels(channels)
|
|
52
|
+
|
|
53
|
+
self.sfreq_features: float = self.settings.sampling_rate_features_hz
|
|
54
|
+
self._sfreq_raw_orig: float = sfreq
|
|
55
|
+
self.sfreq_raw: float = sfreq // 1
|
|
56
|
+
self.line_noise: float | None = line_noise
|
|
57
|
+
self.path_grids: _PathLike | None = path_grids
|
|
58
|
+
self.verbose: bool = verbose
|
|
59
|
+
|
|
60
|
+
self.features_previous = None
|
|
61
|
+
|
|
62
|
+
(self.ch_names_used, _, self.feature_idx, _) = self._get_ch_info()
|
|
63
|
+
|
|
64
|
+
self.preprocessors = DataPreprocessor(
|
|
65
|
+
settings=self.settings,
|
|
66
|
+
channels=self.channels,
|
|
67
|
+
sfreq=self.sfreq_raw,
|
|
68
|
+
line_noise=self.line_noise,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
if self.settings.postprocessing.feature_normalization:
|
|
72
|
+
from py_neuromodulation.processing.normalization import FeatureNormalizer
|
|
73
|
+
|
|
74
|
+
self.feature_normalizer = FeatureNormalizer(self.settings)
|
|
75
|
+
|
|
76
|
+
self.features = FeatureProcessors(
|
|
77
|
+
settings=self.settings,
|
|
78
|
+
ch_names=self.ch_names_used,
|
|
79
|
+
sfreq=self.sfreq_raw,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
if coord_list is not None and coord_names is not None:
|
|
83
|
+
self.coords = self._set_coords(
|
|
84
|
+
coord_names=coord_names, coord_list=coord_list
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
self.projection = self._get_projection(self.settings, self.channels)
|
|
88
|
+
|
|
89
|
+
self.cnt_samples = 0
|
|
90
|
+
|
|
91
|
+
@staticmethod
|
|
92
|
+
def _add_coordinates(coord_names: list[str], coord_list: list) -> dict:
|
|
93
|
+
"""Write cortical and subcortical coordinate information in joint dictionary
|
|
94
|
+
|
|
95
|
+
Parameters
|
|
96
|
+
----------
|
|
97
|
+
coord_names : list[str]
|
|
98
|
+
list of coordinate names
|
|
99
|
+
coord_list : list
|
|
100
|
+
list of list of 3D coordinates
|
|
101
|
+
|
|
102
|
+
Returns
|
|
103
|
+
-------
|
|
104
|
+
dict with (sub)cortex_left and (sub)cortex_right ch_names and positions
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def is_left_coord(val: float, coord_region: str) -> bool:
|
|
108
|
+
if coord_region.split("_")[1] == "left":
|
|
109
|
+
return val < 0
|
|
110
|
+
return val > 0
|
|
111
|
+
|
|
112
|
+
coords: dict[str, dict[str, list | np.ndarray]] = {}
|
|
113
|
+
|
|
114
|
+
for coord_region in [
|
|
115
|
+
coord_loc + "_" + lat
|
|
116
|
+
for coord_loc in ["cortex", "subcortex"]
|
|
117
|
+
for lat in ["left", "right"]
|
|
118
|
+
]:
|
|
119
|
+
coords[coord_region] = {}
|
|
120
|
+
|
|
121
|
+
ch_type = "ECOG" if "cortex" == coord_region.split("_")[0] else "LFP"
|
|
122
|
+
|
|
123
|
+
coords[coord_region]["ch_names"] = [
|
|
124
|
+
coord_name
|
|
125
|
+
for coord_name, ch in zip(coord_names, coord_list)
|
|
126
|
+
if is_left_coord(ch[0], coord_region) and (ch_type in coord_name)
|
|
127
|
+
]
|
|
128
|
+
|
|
129
|
+
# multiply by 1000 to get m instead of mm
|
|
130
|
+
positions = []
|
|
131
|
+
for coord, coord_name in zip(coord_list, coord_names):
|
|
132
|
+
if is_left_coord(coord[0], coord_region) and (ch_type in coord_name):
|
|
133
|
+
positions.append(coord)
|
|
134
|
+
coords[coord_region]["positions"] = (
|
|
135
|
+
np.array(positions, dtype=np.float64) * 1000
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
return coords
|
|
139
|
+
|
|
140
|
+
def _get_ch_info(
|
|
141
|
+
self,
|
|
142
|
+
) -> tuple[list[str], list[str], list[int], np.ndarray]:
|
|
143
|
+
"""Get used feature and label info from channels"""
|
|
144
|
+
channels = self.channels
|
|
145
|
+
ch_names_used = channels[channels["used"] == 1]["new_name"].tolist()
|
|
146
|
+
ch_types_used = channels[channels["used"] == 1]["type"].tolist()
|
|
147
|
+
|
|
148
|
+
# used channels for feature estimation
|
|
149
|
+
feature_idx = np.where(channels["used"] & ~channels["target"])[0].tolist()
|
|
150
|
+
|
|
151
|
+
# If multiple targets exist, select only the first
|
|
152
|
+
label_idx = np.where(channels["target"] == 1)[0]
|
|
153
|
+
|
|
154
|
+
return ch_names_used, ch_types_used, feature_idx, label_idx
|
|
155
|
+
|
|
156
|
+
@staticmethod
|
|
157
|
+
def _get_grids(
|
|
158
|
+
settings: "NMSettings",
|
|
159
|
+
path_grids: _PathLike | None,
|
|
160
|
+
) -> "tuple[pd.DataFrame | None, pd.DataFrame | None]":
|
|
161
|
+
"""Read settings specified grids
|
|
162
|
+
|
|
163
|
+
Parameters
|
|
164
|
+
----------
|
|
165
|
+
settings : settings.NMSettings object
|
|
166
|
+
path_grids : _PathLike | str
|
|
167
|
+
|
|
168
|
+
Returns
|
|
169
|
+
-------
|
|
170
|
+
Tuple
|
|
171
|
+
grid_cortex, grid_subcortex,
|
|
172
|
+
might be None if not specified in settings
|
|
173
|
+
"""
|
|
174
|
+
if settings.postprocessing.project_cortex:
|
|
175
|
+
grid_cortex = io.read_grid(path_grids, "cortex")
|
|
176
|
+
else:
|
|
177
|
+
grid_cortex = None
|
|
178
|
+
if settings.postprocessing.project_subcortex:
|
|
179
|
+
grid_subcortex = io.read_grid(path_grids, "subcortex")
|
|
180
|
+
else:
|
|
181
|
+
grid_subcortex = None
|
|
182
|
+
return grid_cortex, grid_subcortex
|
|
183
|
+
|
|
184
|
+
def _get_projection(
|
|
185
|
+
self, settings: "NMSettings", channels: "pd.DataFrame"
|
|
186
|
+
) -> "Projection | None":
|
|
187
|
+
from py_neuromodulation.processing.projection import Projection
|
|
188
|
+
|
|
189
|
+
"""Return projection of used coordinated and grids"""
|
|
190
|
+
|
|
191
|
+
if not any(
|
|
192
|
+
(
|
|
193
|
+
settings.postprocessing.project_cortex,
|
|
194
|
+
settings.postprocessing.project_subcortex,
|
|
195
|
+
)
|
|
196
|
+
):
|
|
197
|
+
return None
|
|
198
|
+
|
|
199
|
+
grid_cortex, grid_subcortex = self._get_grids(self.settings, self.path_grids)
|
|
200
|
+
projection = Projection(
|
|
201
|
+
settings=settings,
|
|
202
|
+
grid_cortex=grid_cortex,
|
|
203
|
+
grid_subcortex=grid_subcortex,
|
|
204
|
+
coords=self.coords,
|
|
205
|
+
channels=channels,
|
|
206
|
+
plot_projection=False,
|
|
207
|
+
)
|
|
208
|
+
return projection
|
|
209
|
+
|
|
210
|
+
def _set_coords(
|
|
211
|
+
self, coord_names: list[str] | None, coord_list: list | None
|
|
212
|
+
) -> dict:
|
|
213
|
+
if not any(
|
|
214
|
+
(
|
|
215
|
+
self.settings.postprocessing.project_cortex,
|
|
216
|
+
self.settings.postprocessing.project_subcortex,
|
|
217
|
+
)
|
|
218
|
+
):
|
|
219
|
+
return {}
|
|
220
|
+
|
|
221
|
+
if any((coord_list is None, coord_names is None)):
|
|
222
|
+
raise ValueError(
|
|
223
|
+
"No coordinates could be loaded. Please provide coord_list and"
|
|
224
|
+
f" coord_names. Got: {coord_list=}, {coord_names=}."
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
return self._add_coordinates(
|
|
228
|
+
coord_names=coord_names,
|
|
229
|
+
coord_list=coord_list, # type: ignore # None case handled above
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def process(self, data: np.ndarray) -> dict[str, float]:
|
|
233
|
+
"""Given a new data batch, calculate and return features.
|
|
234
|
+
|
|
235
|
+
Parameters
|
|
236
|
+
----------
|
|
237
|
+
data : np.ndarray
|
|
238
|
+
Current batch of raw data
|
|
239
|
+
|
|
240
|
+
Returns
|
|
241
|
+
-------
|
|
242
|
+
pandas Series
|
|
243
|
+
Features calculated from current data
|
|
244
|
+
"""
|
|
245
|
+
start_time = time()
|
|
246
|
+
|
|
247
|
+
nan_channels = np.isnan(data).any(axis=1)
|
|
248
|
+
|
|
249
|
+
data = np.nan_to_num(data)[self.feature_idx, :]
|
|
250
|
+
|
|
251
|
+
data = self.preprocessors.process_data(data)
|
|
252
|
+
|
|
253
|
+
# calculate features
|
|
254
|
+
features_dict = self.features.estimate_features(data)
|
|
255
|
+
|
|
256
|
+
# normalize features
|
|
257
|
+
if self.settings.postprocessing.feature_normalization:
|
|
258
|
+
normed_features = self.feature_normalizer.process(
|
|
259
|
+
np.fromiter(features_dict.values(), dtype=np.float64)
|
|
260
|
+
)
|
|
261
|
+
features_dict = {
|
|
262
|
+
key: normed_features[idx]
|
|
263
|
+
for idx, key in enumerate(features_dict.keys())
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
# project features to grid
|
|
267
|
+
if self.projection:
|
|
268
|
+
self.projection.project_features(features_dict)
|
|
269
|
+
|
|
270
|
+
# check for all features, where the channel had a NaN, that the feature is also put to NaN
|
|
271
|
+
if nan_channels.sum() > 0:
|
|
272
|
+
# TONI: no need to do this if we store both old and new names for the channels
|
|
273
|
+
new_nan_channels = []
|
|
274
|
+
for ch in list(np.array(self.ch_names_used)[nan_channels]):
|
|
275
|
+
for key in features_dict.keys():
|
|
276
|
+
if ch in key:
|
|
277
|
+
new_nan_channels.append(key)
|
|
278
|
+
|
|
279
|
+
for ch in new_nan_channels:
|
|
280
|
+
features_dict[ch] = np.nan
|
|
281
|
+
|
|
282
|
+
if self.verbose:
|
|
283
|
+
logger.info("Last batch took: %.3f seconds to process", time() - start_time)
|
|
284
|
+
|
|
285
|
+
return features_dict
|
|
286
|
+
|
|
287
|
+
def save_sidecar(
|
|
288
|
+
self,
|
|
289
|
+
out_dir: _PathLike,
|
|
290
|
+
prefix: str = "",
|
|
291
|
+
additional_args: dict | None = None,
|
|
292
|
+
) -> None:
|
|
293
|
+
"""Save sidecar incuding fs, coords, sess_right to out_dir."""
|
|
294
|
+
|
|
295
|
+
sidecar: dict = {
|
|
296
|
+
"original_fs": self._sfreq_raw_orig,
|
|
297
|
+
"final_fs": self.sfreq_raw,
|
|
298
|
+
"sfreq": self.sfreq_features,
|
|
299
|
+
}
|
|
300
|
+
if self.projection:
|
|
301
|
+
sidecar["coords"] = self.projection.coords
|
|
302
|
+
if self.settings.postprocessing.project_cortex:
|
|
303
|
+
sidecar["grid_cortex"] = self.projection.grid_cortex
|
|
304
|
+
sidecar["proj_matrix_cortex"] = self.projection.proj_matrix_cortex
|
|
305
|
+
if self.settings.postprocessing.project_subcortex:
|
|
306
|
+
sidecar["grid_subcortex"] = self.projection.grid_subcortex
|
|
307
|
+
sidecar["proj_matrix_subcortex"] = self.projection.proj_matrix_subcortex
|
|
308
|
+
if additional_args is not None:
|
|
309
|
+
sidecar = sidecar | additional_args
|
|
310
|
+
|
|
311
|
+
io.save_sidecar(sidecar, out_dir, prefix)
|
|
312
|
+
|
|
313
|
+
def save_settings(self, out_dir: _PathLike, prefix: str = "") -> None:
|
|
314
|
+
self.settings.save(out_dir, prefix)
|
|
315
|
+
|
|
316
|
+
def save_channels(self, out_dir: _PathLike, prefix: str) -> None:
|
|
317
|
+
io.save_channels(self.channels, out_dir, prefix)
|
|
318
|
+
|
|
319
|
+
def save_features(
|
|
320
|
+
self,
|
|
321
|
+
feature_arr: "pd.DataFrame",
|
|
322
|
+
out_dir: _PathLike = "",
|
|
323
|
+
prefix: str = "",
|
|
324
|
+
) -> None:
|
|
325
|
+
io.save_features(feature_arr, out_dir, prefix)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class RawDataGenerator:
|
|
5
|
+
"""
|
|
6
|
+
This generator function mimics online data acquisition.
|
|
7
|
+
The data are iteratively sampled with settings.sampling_rate_features_hz
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
def __init__(
|
|
11
|
+
self,
|
|
12
|
+
data: np.ndarray,
|
|
13
|
+
sfreq: float,
|
|
14
|
+
sampling_rate_features_hz: float,
|
|
15
|
+
segment_length_features_ms: float,
|
|
16
|
+
) -> None:
|
|
17
|
+
"""
|
|
18
|
+
Arguments
|
|
19
|
+
---------
|
|
20
|
+
data (np array): shape (channels, time)
|
|
21
|
+
settings (settings.NMSettings): settings object
|
|
22
|
+
sfreq (float): sampling frequency of the data
|
|
23
|
+
|
|
24
|
+
Returns
|
|
25
|
+
-------
|
|
26
|
+
np.array: 1D array of time stamps
|
|
27
|
+
np.array: new batch for run function of full segment length shape
|
|
28
|
+
"""
|
|
29
|
+
self.batch_counter: int = 0 # counter for the batches
|
|
30
|
+
|
|
31
|
+
self.data = data
|
|
32
|
+
self.sfreq = sfreq
|
|
33
|
+
# Width, in data points, of the moving window used to calculate features
|
|
34
|
+
self.segment_length = segment_length_features_ms / 1000 * sfreq
|
|
35
|
+
# Ratio of the sampling frequency of the input data to the sampling frequency
|
|
36
|
+
self.stride = sfreq / sampling_rate_features_hz
|
|
37
|
+
|
|
38
|
+
def __iter__(self):
|
|
39
|
+
return self
|
|
40
|
+
|
|
41
|
+
def __next__(self):
|
|
42
|
+
start = self.stride * self.batch_counter
|
|
43
|
+
end = start + self.segment_length
|
|
44
|
+
|
|
45
|
+
self.batch_counter += 1
|
|
46
|
+
|
|
47
|
+
start_idx = int(start)
|
|
48
|
+
end_idx = int(end)
|
|
49
|
+
|
|
50
|
+
if end_idx > self.data.shape[1]:
|
|
51
|
+
raise StopIteration
|
|
52
|
+
|
|
53
|
+
return np.arange(start, end) / self.sfreq, self.data[:, start_idx:end_idx]
|