py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
- py_neuromodulation/__init__.py +80 -13
- py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
- py_neuromodulation/analysis/__init__.py +4 -0
- py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
- py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
- py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
- py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
- py_neuromodulation/data/README +6 -6
- py_neuromodulation/data/dataset_description.json +8 -8
- py_neuromodulation/data/participants.json +32 -32
- py_neuromodulation/data/participants.tsv +2 -2
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
- py_neuromodulation/default_settings.yaml +241 -0
- py_neuromodulation/features/__init__.py +31 -0
- py_neuromodulation/features/bandpower.py +165 -0
- py_neuromodulation/features/bispectra.py +157 -0
- py_neuromodulation/features/bursts.py +297 -0
- py_neuromodulation/features/coherence.py +255 -0
- py_neuromodulation/features/feature_processor.py +121 -0
- py_neuromodulation/features/fooof.py +142 -0
- py_neuromodulation/features/hjorth_raw.py +57 -0
- py_neuromodulation/features/linelength.py +21 -0
- py_neuromodulation/features/mne_connectivity.py +148 -0
- py_neuromodulation/features/nolds.py +94 -0
- py_neuromodulation/features/oscillatory.py +249 -0
- py_neuromodulation/features/sharpwaves.py +432 -0
- py_neuromodulation/filter/__init__.py +3 -0
- py_neuromodulation/filter/kalman_filter.py +67 -0
- py_neuromodulation/filter/kalman_filter_external.py +1890 -0
- py_neuromodulation/filter/mne_filter.py +128 -0
- py_neuromodulation/filter/notch_filter.py +93 -0
- py_neuromodulation/grid_cortex.tsv +40 -40
- py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
- py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
- py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
- py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/processing/__init__.py +10 -0
- py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
- py_neuromodulation/processing/data_preprocessor.py +77 -0
- py_neuromodulation/processing/filter_preprocessing.py +78 -0
- py_neuromodulation/processing/normalization.py +175 -0
- py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
- py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
- py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
- py_neuromodulation/stream/__init__.py +3 -0
- py_neuromodulation/stream/data_processor.py +325 -0
- py_neuromodulation/stream/generator.py +53 -0
- py_neuromodulation/stream/mnelsl_player.py +94 -0
- py_neuromodulation/stream/mnelsl_stream.py +120 -0
- py_neuromodulation/stream/settings.py +292 -0
- py_neuromodulation/stream/stream.py +427 -0
- py_neuromodulation/utils/__init__.py +2 -0
- py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
- py_neuromodulation/utils/database.py +149 -0
- py_neuromodulation/utils/io.py +378 -0
- py_neuromodulation/utils/keyboard.py +52 -0
- py_neuromodulation/utils/logging.py +66 -0
- py_neuromodulation/utils/types.py +251 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
- py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
- py_neuromodulation/FieldTrip.py +0 -589
- py_neuromodulation/_write_example_dataset_helper.py +0 -65
- py_neuromodulation/nm_EpochStream.py +0 -92
- py_neuromodulation/nm_IO.py +0 -417
- py_neuromodulation/nm_across_patient_decoding.py +0 -927
- py_neuromodulation/nm_bispectra.py +0 -168
- py_neuromodulation/nm_bursts.py +0 -198
- py_neuromodulation/nm_coherence.py +0 -205
- py_neuromodulation/nm_cohortwrapper.py +0 -435
- py_neuromodulation/nm_eval_timing.py +0 -239
- py_neuromodulation/nm_features.py +0 -116
- py_neuromodulation/nm_features_abc.py +0 -39
- py_neuromodulation/nm_filter.py +0 -219
- py_neuromodulation/nm_filter_preprocessing.py +0 -91
- py_neuromodulation/nm_fooof.py +0 -159
- py_neuromodulation/nm_generator.py +0 -37
- py_neuromodulation/nm_hjorth_raw.py +0 -73
- py_neuromodulation/nm_kalmanfilter.py +0 -58
- py_neuromodulation/nm_linelength.py +0 -33
- py_neuromodulation/nm_mne_connectivity.py +0 -112
- py_neuromodulation/nm_nolds.py +0 -93
- py_neuromodulation/nm_normalization.py +0 -214
- py_neuromodulation/nm_oscillatory.py +0 -448
- py_neuromodulation/nm_run_analysis.py +0 -435
- py_neuromodulation/nm_settings.json +0 -338
- py_neuromodulation/nm_settings.py +0 -68
- py_neuromodulation/nm_sharpwaves.py +0 -401
- py_neuromodulation/nm_stream_abc.py +0 -218
- py_neuromodulation/nm_stream_offline.py +0 -359
- py_neuromodulation/utils/_logging.py +0 -24
- py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
|
@@ -1,92 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import pickle
|
|
3
|
-
|
|
4
|
-
import numpy as np
|
|
5
|
-
import pandas as pd
|
|
6
|
-
|
|
7
|
-
import py_neuromodulation as nm
|
|
8
|
-
from py_neuromodulation import nm_generator
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class EpochStream(nm.nm_stream.PNStream):
|
|
12
|
-
def __init__(self) -> None:
|
|
13
|
-
super().__init__()
|
|
14
|
-
|
|
15
|
-
def read_epoch_data(self, path_epoch) -> None:
|
|
16
|
-
"""Read npy array of epochs. Shape is assumed to be (samples, channels, time)
|
|
17
|
-
|
|
18
|
-
Parameters
|
|
19
|
-
----------
|
|
20
|
-
path_epoch : str
|
|
21
|
-
"""
|
|
22
|
-
self.data = np.load(path_epoch)
|
|
23
|
-
|
|
24
|
-
def get_data(
|
|
25
|
-
self,
|
|
26
|
-
) -> np.array:
|
|
27
|
-
"""This data generator returns one epoch at a time.
|
|
28
|
-
Data will thus be analyzed in steps of the epoch size
|
|
29
|
-
|
|
30
|
-
Returns
|
|
31
|
-
-------
|
|
32
|
-
np.array
|
|
33
|
-
_description_
|
|
34
|
-
|
|
35
|
-
Yields
|
|
36
|
-
------
|
|
37
|
-
Iterator[np.array]
|
|
38
|
-
_description_
|
|
39
|
-
"""
|
|
40
|
-
for n_batch in range(self.data.shape[0]):
|
|
41
|
-
yield self.data[n_batch, :, :]
|
|
42
|
-
|
|
43
|
-
def run(
|
|
44
|
-
self,
|
|
45
|
-
):
|
|
46
|
-
self._set_run()
|
|
47
|
-
# shape is n, channels=7, 800 Hz
|
|
48
|
-
|
|
49
|
-
self.feature_arr = pd.DataFrame()
|
|
50
|
-
self.feature_arr_list = []
|
|
51
|
-
epoch_gen = self.get_data()
|
|
52
|
-
idx_epoch = 0
|
|
53
|
-
|
|
54
|
-
while True:
|
|
55
|
-
data = next(
|
|
56
|
-
epoch_gen, None
|
|
57
|
-
) # None will be returned if generator ran through
|
|
58
|
-
if data is None:
|
|
59
|
-
break
|
|
60
|
-
gen = nm_generator.raw_data_generator(
|
|
61
|
-
data, self.settings, self.sfreq
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
def get_data_within_epoch() -> np.array:
|
|
65
|
-
return next(gen, None)
|
|
66
|
-
|
|
67
|
-
idx_within_epoch = 0
|
|
68
|
-
while True:
|
|
69
|
-
data_within_epoch = get_data_within_epoch()
|
|
70
|
-
if data_within_epoch is None:
|
|
71
|
-
break
|
|
72
|
-
|
|
73
|
-
feature_series = self.run_analysis.process_data(
|
|
74
|
-
data_within_epoch
|
|
75
|
-
)
|
|
76
|
-
if idx_within_epoch == 0:
|
|
77
|
-
self.feature_arr = pd.DataFrame([feature_series])
|
|
78
|
-
idx_within_epoch += 1
|
|
79
|
-
else:
|
|
80
|
-
self.feature_arr = self.feature_arr.append(
|
|
81
|
-
feature_series, ignore_index=True
|
|
82
|
-
)
|
|
83
|
-
self.feature_arr_list.append(self.feature_arr)
|
|
84
|
-
|
|
85
|
-
def _add_timestamp(
|
|
86
|
-
self, feature_series: pd.Series, idx: int = None
|
|
87
|
-
) -> pd.Series:
|
|
88
|
-
# in case of epochs no timestamp is necessary
|
|
89
|
-
return feature_series
|
|
90
|
-
|
|
91
|
-
def _add_coordinates(self) -> None:
|
|
92
|
-
pass
|
py_neuromodulation/nm_IO.py
DELETED
|
@@ -1,417 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import os
|
|
3
|
-
import sys
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
import logging
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger("PynmLogger")
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
import mne
|
|
11
|
-
import mne_bids
|
|
12
|
-
import numpy as np
|
|
13
|
-
import pandas as pd
|
|
14
|
-
from scipy import io
|
|
15
|
-
|
|
16
|
-
import pyarrow
|
|
17
|
-
from pyarrow import csv
|
|
18
|
-
|
|
19
|
-
import py_neuromodulation
|
|
20
|
-
|
|
21
|
-
_PathLike = str | os.PathLike
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def load_nm_channels(
|
|
25
|
-
nm_channels: pd.DataFrame | _PathLike,
|
|
26
|
-
) -> pd.DataFrame:
|
|
27
|
-
"""Read nm_channels from path or specify via BIDS arguments.
|
|
28
|
-
Nexessary parameters are then
|
|
29
|
-
ch_names (list),
|
|
30
|
-
ch_types (list),
|
|
31
|
-
bads (list)
|
|
32
|
-
used_types (list)
|
|
33
|
-
target_keywords (list)
|
|
34
|
-
reference Union[list, str]
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
if isinstance(nm_channels, pd.DataFrame):
|
|
38
|
-
nm_ch_return = nm_channels
|
|
39
|
-
elif nm_channels:
|
|
40
|
-
if not os.path.isfile(nm_channels):
|
|
41
|
-
raise ValueError(
|
|
42
|
-
"PATH_NM_CHANNELS is not a valid file. Got: " f"{nm_channels}"
|
|
43
|
-
)
|
|
44
|
-
nm_ch_return = pd.read_csv(nm_channels)
|
|
45
|
-
|
|
46
|
-
return nm_ch_return
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def read_BIDS_data(
|
|
50
|
-
PATH_RUN: _PathLike | mne_bids.BIDSPath,
|
|
51
|
-
BIDS_PATH: _PathLike | None = None,
|
|
52
|
-
datatype: str = "ieeg",
|
|
53
|
-
line_noise: int = 50,
|
|
54
|
-
) -> tuple[mne.io.Raw, np.ndarray, int | float, int, list | None, list | None]:
|
|
55
|
-
"""Given a run path and bids data path, read the respective data
|
|
56
|
-
|
|
57
|
-
Parameters
|
|
58
|
-
----------
|
|
59
|
-
PATH_RUN : string
|
|
60
|
-
BIDS_PATH : string
|
|
61
|
-
datatype : string
|
|
62
|
-
|
|
63
|
-
Returns
|
|
64
|
-
-------
|
|
65
|
-
raw_arr : mne.io.RawArray
|
|
66
|
-
raw_arr_data : np.ndarray
|
|
67
|
-
fs : int
|
|
68
|
-
line_noise : int
|
|
69
|
-
"""
|
|
70
|
-
if isinstance(PATH_RUN, mne_bids.BIDSPath):
|
|
71
|
-
bids_path = PATH_RUN
|
|
72
|
-
else:
|
|
73
|
-
bids_path = mne_bids.get_bids_path_from_fname(PATH_RUN)
|
|
74
|
-
|
|
75
|
-
raw_arr = mne_bids.read_raw_bids(bids_path)
|
|
76
|
-
coord_list, coord_names = get_coord_list(raw_arr)
|
|
77
|
-
if raw_arr.info["line_freq"] is not None:
|
|
78
|
-
line_noise = int(raw_arr.info["line_freq"])
|
|
79
|
-
else:
|
|
80
|
-
logger.info(
|
|
81
|
-
f"Line noise is not available in the data, using value of {line_noise} Hz."
|
|
82
|
-
)
|
|
83
|
-
return (
|
|
84
|
-
raw_arr,
|
|
85
|
-
raw_arr.get_data(),
|
|
86
|
-
raw_arr.info["sfreq"],
|
|
87
|
-
line_noise,
|
|
88
|
-
coord_list,
|
|
89
|
-
coord_names,
|
|
90
|
-
)
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def get_coord_list(
|
|
94
|
-
raw: mne.io.BaseRaw,
|
|
95
|
-
) -> tuple[list, list] | tuple[None, None]:
|
|
96
|
-
montage = raw.get_montage()
|
|
97
|
-
if montage is not None:
|
|
98
|
-
coord_list = np.array(
|
|
99
|
-
list(dict(montage.get_positions()["ch_pos"]).values())
|
|
100
|
-
).tolist()
|
|
101
|
-
coord_names = np.array(
|
|
102
|
-
list(dict(montage.get_positions()["ch_pos"]).keys())
|
|
103
|
-
).tolist()
|
|
104
|
-
else:
|
|
105
|
-
coord_list = None
|
|
106
|
-
coord_names = None
|
|
107
|
-
|
|
108
|
-
return coord_list, coord_names
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def read_grid(PATH_GRIDS: _PathLike | None, grid_str: str) -> pd.DataFrame:
|
|
112
|
-
if PATH_GRIDS is None:
|
|
113
|
-
grid = pd.read_csv(
|
|
114
|
-
Path(__file__).parent / ("grid_" + grid_str.lower() + ".tsv"),
|
|
115
|
-
sep="\t",
|
|
116
|
-
)
|
|
117
|
-
else:
|
|
118
|
-
grid = pd.read_csv(
|
|
119
|
-
Path(PATH_GRIDS) / ("grid_" + grid_str.lower() + ".tsv"),
|
|
120
|
-
sep="\t",
|
|
121
|
-
)
|
|
122
|
-
return grid
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
def get_annotations(
|
|
126
|
-
PATH_ANNOTATIONS: str, PATH_RUN: str, raw_arr: mne.io.RawArray
|
|
127
|
-
):
|
|
128
|
-
try:
|
|
129
|
-
annot = mne.read_annotations(
|
|
130
|
-
Path(PATH_ANNOTATIONS) / (os.path.basename(PATH_RUN)[:-5] + ".txt")
|
|
131
|
-
)
|
|
132
|
-
raw_arr.set_annotations(annot)
|
|
133
|
-
|
|
134
|
-
# annotations starting with "BAD" are omitted with reject_by_annotations 'omit' param
|
|
135
|
-
annot_data = raw_arr.get_data(reject_by_annotation="omit")
|
|
136
|
-
except FileNotFoundError:
|
|
137
|
-
logger.critical(
|
|
138
|
-
"Annotations file could not be found"
|
|
139
|
-
+ "expected location: "
|
|
140
|
-
+ str(
|
|
141
|
-
Path(PATH_ANNOTATIONS)
|
|
142
|
-
/ (os.path.basename(PATH_RUN)[:-5] + ".txt")
|
|
143
|
-
)
|
|
144
|
-
)
|
|
145
|
-
return annot, annot_data, raw_arr
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
def read_plot_modules(
|
|
149
|
-
PATH_PLOT: _PathLike = Path(__file__).absolute().parent / "plots",
|
|
150
|
-
):
|
|
151
|
-
"""Read required .mat files for plotting
|
|
152
|
-
|
|
153
|
-
Parameters
|
|
154
|
-
----------
|
|
155
|
-
PATH_PLOT : regexp, optional
|
|
156
|
-
path to plotting files, by default
|
|
157
|
-
"""
|
|
158
|
-
|
|
159
|
-
faces = io.loadmat(os.path.join(PATH_PLOT, "faces.mat"))
|
|
160
|
-
vertices = io.loadmat(os.path.join(PATH_PLOT, "Vertices.mat"))
|
|
161
|
-
grid = io.loadmat(os.path.join(PATH_PLOT, "grid.mat"))["grid"]
|
|
162
|
-
stn_surf = io.loadmat(os.path.join(PATH_PLOT, "STN_surf.mat"))
|
|
163
|
-
x_ver = stn_surf["vertices"][::2, 0]
|
|
164
|
-
y_ver = stn_surf["vertices"][::2, 1]
|
|
165
|
-
x_ecog = vertices["Vertices"][::1, 0]
|
|
166
|
-
y_ecog = vertices["Vertices"][::1, 1]
|
|
167
|
-
z_ecog = vertices["Vertices"][::1, 2]
|
|
168
|
-
x_stn = stn_surf["vertices"][::1, 0]
|
|
169
|
-
y_stn = stn_surf["vertices"][::1, 1]
|
|
170
|
-
z_stn = stn_surf["vertices"][::1, 2]
|
|
171
|
-
|
|
172
|
-
return (
|
|
173
|
-
faces,
|
|
174
|
-
vertices,
|
|
175
|
-
grid,
|
|
176
|
-
stn_surf,
|
|
177
|
-
x_ver,
|
|
178
|
-
y_ver,
|
|
179
|
-
x_ecog,
|
|
180
|
-
y_ecog,
|
|
181
|
-
z_ecog,
|
|
182
|
-
x_stn,
|
|
183
|
-
y_stn,
|
|
184
|
-
z_stn,
|
|
185
|
-
)
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
def save_features_and_settings(
|
|
189
|
-
df_features,
|
|
190
|
-
run_analysis,
|
|
191
|
-
folder_name,
|
|
192
|
-
out_path,
|
|
193
|
-
settings,
|
|
194
|
-
nm_channels,
|
|
195
|
-
coords,
|
|
196
|
-
fs,
|
|
197
|
-
line_noise,
|
|
198
|
-
) -> None:
|
|
199
|
-
"""save settings.json, nm_channels.csv and features.csv
|
|
200
|
-
|
|
201
|
-
Parameters
|
|
202
|
-
----------
|
|
203
|
-
df_ : pd.Dataframe
|
|
204
|
-
feature dataframe
|
|
205
|
-
run_analysis_ : run_analysis.py object
|
|
206
|
-
This includes all (optionally projected) run_analysis estimated data
|
|
207
|
-
inluding added the resampled labels in features_arr
|
|
208
|
-
folder_name : string
|
|
209
|
-
output path
|
|
210
|
-
settings_wrapper : settings.py object
|
|
211
|
-
"""
|
|
212
|
-
|
|
213
|
-
# create out folder if doesn't exist
|
|
214
|
-
if not os.path.exists(os.path.join(out_path, folder_name)):
|
|
215
|
-
logger.Info(f"Creating output folder: {folder_name}")
|
|
216
|
-
os.makedirs(os.path.join(out_path, folder_name))
|
|
217
|
-
|
|
218
|
-
dict_sidecar = {"fs": fs, "coords": coords, "line_noise": line_noise}
|
|
219
|
-
|
|
220
|
-
save_sidecar(dict_sidecar, out_path, folder_name)
|
|
221
|
-
save_features(df_features, out_path, folder_name)
|
|
222
|
-
save_settings(settings, out_path, folder_name)
|
|
223
|
-
save_nm_channels(nm_channels, out_path, folder_name)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def write_csv(df, path_out):
|
|
227
|
-
"""
|
|
228
|
-
Function to save Pandas dataframes to disk as CSV using
|
|
229
|
-
PyArrow (almost 10x faster than Pandas)
|
|
230
|
-
Difference with pandas.df.to_csv() is that it does not
|
|
231
|
-
write an index column by default
|
|
232
|
-
"""
|
|
233
|
-
csv.write_csv(pyarrow.Table.from_pandas(df), path_out)
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
def save_settings(
|
|
237
|
-
settings: dict, path_out: _PathLike, folder_name: str | None = None
|
|
238
|
-
) -> None:
|
|
239
|
-
path_out = _pathlike_to_str(path_out)
|
|
240
|
-
if folder_name is not None:
|
|
241
|
-
path_out = os.path.join(
|
|
242
|
-
path_out, folder_name, folder_name + "_SETTINGS.json"
|
|
243
|
-
)
|
|
244
|
-
|
|
245
|
-
with open(path_out, "w") as f:
|
|
246
|
-
json.dump(settings, f, indent=4)
|
|
247
|
-
logger.info(f"settings.json saved to {path_out}")
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
def save_nm_channels(
|
|
251
|
-
nmchannels: pd.DataFrame,
|
|
252
|
-
path_out: _PathLike,
|
|
253
|
-
folder_name: str | None = None,
|
|
254
|
-
) -> None:
|
|
255
|
-
path_out = _pathlike_to_str(path_out)
|
|
256
|
-
if folder_name is not None:
|
|
257
|
-
path_out = os.path.join(
|
|
258
|
-
path_out, folder_name, folder_name + "_nm_channels.csv"
|
|
259
|
-
)
|
|
260
|
-
write_csv(nmchannels, path_out)
|
|
261
|
-
logger.info(f"nm_channels.csv saved to {path_out}")
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
def save_features(
|
|
265
|
-
df_features: pd.DataFrame,
|
|
266
|
-
path_out: _PathLike,
|
|
267
|
-
folder_name: str | None = None,
|
|
268
|
-
) -> None:
|
|
269
|
-
path_out = _pathlike_to_str(path_out)
|
|
270
|
-
if folder_name is not None:
|
|
271
|
-
path_out = os.path.join(
|
|
272
|
-
path_out, folder_name, folder_name + "_FEATURES.csv"
|
|
273
|
-
)
|
|
274
|
-
write_csv(df_features, path_out)
|
|
275
|
-
logger.info(f"FEATURES.csv saved to {str(path_out)}")
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
def save_sidecar(
|
|
279
|
-
sidecar: dict, path_out: _PathLike, folder_name: str | None = None
|
|
280
|
-
) -> None:
|
|
281
|
-
path_out = _pathlike_to_str(path_out)
|
|
282
|
-
save_general_dict(sidecar, path_out, "_SIDECAR.json", folder_name)
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
def save_general_dict(
|
|
286
|
-
dict_: dict,
|
|
287
|
-
path_out: _PathLike,
|
|
288
|
-
str_add: str,
|
|
289
|
-
folder_name: str | None = None,
|
|
290
|
-
) -> None:
|
|
291
|
-
if folder_name is not None:
|
|
292
|
-
path_out = os.path.join(path_out, folder_name, folder_name + str_add)
|
|
293
|
-
|
|
294
|
-
with open(path_out, "w") as f:
|
|
295
|
-
json.dump(
|
|
296
|
-
dict_,
|
|
297
|
-
f,
|
|
298
|
-
default=default_json_convert,
|
|
299
|
-
indent=4,
|
|
300
|
-
separators=(",", ": "),
|
|
301
|
-
)
|
|
302
|
-
logger.info(f"{str_add} saved to " + str(path_out))
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
def default_json_convert(obj) -> list | int | float:
|
|
306
|
-
if isinstance(obj, np.ndarray):
|
|
307
|
-
return obj.tolist()
|
|
308
|
-
if isinstance(obj, pd.DataFrame):
|
|
309
|
-
return obj.to_numpy().tolist()
|
|
310
|
-
if isinstance(obj, np.integer):
|
|
311
|
-
return int(obj)
|
|
312
|
-
if isinstance(obj, np.floating):
|
|
313
|
-
return float(obj)
|
|
314
|
-
raise TypeError("Not serializable")
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
def read_sidecar(PATH: str) -> dict:
|
|
318
|
-
with open(PATH + "_SIDECAR.json") as f:
|
|
319
|
-
return json.load(f)
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
def read_settings(PATH: str) -> dict:
|
|
323
|
-
with open(PATH if ".json" in PATH else PATH + "_SETTINGS.json") as f:
|
|
324
|
-
return json.load(f)
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
def read_features(PATH: str) -> pd.DataFrame:
|
|
328
|
-
return pd.read_csv(PATH + "_FEATURES.csv", engine="pyarrow")
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
def read_nm_channels(PATH: str) -> pd.DataFrame:
|
|
332
|
-
return pd.read_csv(PATH + "_nm_channels.csv")
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
def get_run_list_indir(PATH: str) -> list:
|
|
336
|
-
f_files = []
|
|
337
|
-
for dirpath, _, files in os.walk(PATH):
|
|
338
|
-
for x in files:
|
|
339
|
-
if "FEATURES" in x:
|
|
340
|
-
f_files.append(os.path.basename(dirpath))
|
|
341
|
-
return f_files
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
def loadmat(filename) -> dict:
|
|
345
|
-
"""
|
|
346
|
-
this function should be called instead of direct spio.loadmat
|
|
347
|
-
as it cures the problem of not properly recovering python dictionaries
|
|
348
|
-
from mat files. It calls the function check keys to cure all entries
|
|
349
|
-
which are still mat-objects
|
|
350
|
-
"""
|
|
351
|
-
data = io.loadmat(filename, struct_as_record=False, squeeze_me=True)
|
|
352
|
-
return _check_keys(data)
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
def get_paths_example_data():
|
|
356
|
-
"""
|
|
357
|
-
This function should provide RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT and datatype for the example
|
|
358
|
-
dataset used in most examples.
|
|
359
|
-
"""
|
|
360
|
-
|
|
361
|
-
SCRIPT_DIR = Path(py_neuromodulation.__file__).parent.absolute()
|
|
362
|
-
|
|
363
|
-
sub = "testsub"
|
|
364
|
-
ses = "EphysMedOff"
|
|
365
|
-
task = "gripforce"
|
|
366
|
-
run = 0
|
|
367
|
-
datatype = "ieeg"
|
|
368
|
-
|
|
369
|
-
# Define run name and access paths in the BIDS format.
|
|
370
|
-
RUN_NAME = f"sub-{sub}_ses-{ses}_task-{task}_run-{run}"
|
|
371
|
-
|
|
372
|
-
PATH_BIDS = Path(SCRIPT_DIR) / "data"
|
|
373
|
-
|
|
374
|
-
PATH_RUN = (
|
|
375
|
-
Path(SCRIPT_DIR)
|
|
376
|
-
/ "data"
|
|
377
|
-
/ f"sub-{sub}"
|
|
378
|
-
/ f"ses-{ses}"
|
|
379
|
-
/ datatype
|
|
380
|
-
/ RUN_NAME
|
|
381
|
-
)
|
|
382
|
-
|
|
383
|
-
# Provide a path for the output data.
|
|
384
|
-
PATH_OUT = PATH_BIDS / "derivatives"
|
|
385
|
-
|
|
386
|
-
return RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT, datatype
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
def _check_keys(dict):
|
|
390
|
-
"""
|
|
391
|
-
checks if entries in dictionary are mat-objects. If yes
|
|
392
|
-
todict is called to change them to nested dictionaries
|
|
393
|
-
"""
|
|
394
|
-
for key in dict:
|
|
395
|
-
if isinstance(dict[key], io.matlab.mio5_params.mat_struct):
|
|
396
|
-
dict[key] = _todict(dict[key])
|
|
397
|
-
return dict
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
def _todict(matobj) -> dict:
|
|
401
|
-
"""
|
|
402
|
-
A recursive function which constructs from matobjects nested dictionaries
|
|
403
|
-
"""
|
|
404
|
-
dict = {}
|
|
405
|
-
for strg in matobj._fieldnames:
|
|
406
|
-
elem = matobj.__dict__[strg]
|
|
407
|
-
if isinstance(elem, io.matlab.mio5_params.mat_struct):
|
|
408
|
-
dict[strg] = _todict(elem)
|
|
409
|
-
else:
|
|
410
|
-
dict[strg] = elem
|
|
411
|
-
return dict
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
def _pathlike_to_str(path: _PathLike) -> str:
|
|
415
|
-
if isinstance(path, str):
|
|
416
|
-
return path
|
|
417
|
-
return str(path)
|