py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
- py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
- py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
- py_neuromodulation/FieldTrip.py +589 -589
- py_neuromodulation/__init__.py +74 -13
- py_neuromodulation/_write_example_dataset_helper.py +83 -65
- py_neuromodulation/data/README +6 -6
- py_neuromodulation/data/dataset_description.json +8 -8
- py_neuromodulation/data/participants.json +32 -32
- py_neuromodulation/data/participants.tsv +2 -2
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
- py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
- py_neuromodulation/grid_cortex.tsv +40 -40
- py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
- py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
- py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
- py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
- py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
- py_neuromodulation/nm_IO.py +413 -417
- py_neuromodulation/nm_RMAP.py +496 -531
- py_neuromodulation/nm_analysis.py +993 -1074
- py_neuromodulation/nm_artifacts.py +30 -25
- py_neuromodulation/nm_bispectra.py +154 -168
- py_neuromodulation/nm_bursts.py +292 -198
- py_neuromodulation/nm_coherence.py +251 -205
- py_neuromodulation/nm_database.py +149 -0
- py_neuromodulation/nm_decode.py +918 -992
- py_neuromodulation/nm_define_nmchannels.py +300 -302
- py_neuromodulation/nm_features.py +144 -116
- py_neuromodulation/nm_filter.py +219 -219
- py_neuromodulation/nm_filter_preprocessing.py +79 -91
- py_neuromodulation/nm_fooof.py +139 -159
- py_neuromodulation/nm_generator.py +45 -37
- py_neuromodulation/nm_hjorth_raw.py +52 -73
- py_neuromodulation/nm_kalmanfilter.py +71 -58
- py_neuromodulation/nm_linelength.py +21 -33
- py_neuromodulation/nm_logger.py +66 -0
- py_neuromodulation/nm_mne_connectivity.py +149 -112
- py_neuromodulation/nm_mnelsl_generator.py +90 -0
- py_neuromodulation/nm_mnelsl_stream.py +116 -0
- py_neuromodulation/nm_nolds.py +96 -93
- py_neuromodulation/nm_normalization.py +173 -214
- py_neuromodulation/nm_oscillatory.py +423 -448
- py_neuromodulation/nm_plots.py +585 -612
- py_neuromodulation/nm_preprocessing.py +83 -0
- py_neuromodulation/nm_projection.py +370 -394
- py_neuromodulation/nm_rereference.py +97 -95
- py_neuromodulation/nm_resample.py +59 -50
- py_neuromodulation/nm_run_analysis.py +325 -435
- py_neuromodulation/nm_settings.py +289 -68
- py_neuromodulation/nm_settings.yaml +244 -0
- py_neuromodulation/nm_sharpwaves.py +423 -401
- py_neuromodulation/nm_stats.py +464 -480
- py_neuromodulation/nm_stream.py +398 -0
- py_neuromodulation/nm_stream_abc.py +166 -218
- py_neuromodulation/nm_types.py +193 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/METADATA +29 -26
- py_neuromodulation-0.0.5.dist-info/RECORD +83 -0
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/WHEEL +1 -1
- {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.5.dist-info}/licenses/LICENSE +21 -21
- py_neuromodulation/nm_EpochStream.py +0 -92
- py_neuromodulation/nm_across_patient_decoding.py +0 -927
- py_neuromodulation/nm_cohortwrapper.py +0 -435
- py_neuromodulation/nm_eval_timing.py +0 -239
- py_neuromodulation/nm_features_abc.py +0 -39
- py_neuromodulation/nm_settings.json +0 -338
- py_neuromodulation/nm_stream_offline.py +0 -359
- py_neuromodulation/utils/_logging.py +0 -24
- py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
"""Module for offline data streams."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas as pd
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from contextlib import suppress
|
|
8
|
+
|
|
9
|
+
from py_neuromodulation.nm_stream_abc import NMStream
|
|
10
|
+
from py_neuromodulation.nm_types import _PathLike
|
|
11
|
+
from py_neuromodulation import logger
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from py_neuromodulation.nm_settings import NMSettings
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class _GenericStream(NMStream):
|
|
18
|
+
"""_GenericStream base class.
|
|
19
|
+
This class can be inherited for different types of offline streams
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
nm_stream_abc : nm_stream_abc.NMStream
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def _add_target(self, feature_dict: dict, data: np.ndarray) -> None:
|
|
27
|
+
"""Add target channels to feature series.
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
feature_dict : dict
|
|
32
|
+
data : np.ndarray
|
|
33
|
+
Raw data with shape (n_channels, n_samples).
|
|
34
|
+
Channels not usd for feature computation are also included
|
|
35
|
+
|
|
36
|
+
Returns
|
|
37
|
+
-------
|
|
38
|
+
dict
|
|
39
|
+
feature dict with target channels added
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
if self.nm_channels["target"].sum() > 0:
|
|
43
|
+
if not self.target_idx_initialized:
|
|
44
|
+
self.target_indexes = self.nm_channels[
|
|
45
|
+
self.nm_channels["target"] == 1
|
|
46
|
+
].index
|
|
47
|
+
self.target_names = self.nm_channels.loc[
|
|
48
|
+
self.target_indexes, "name"
|
|
49
|
+
].to_list()
|
|
50
|
+
self.target_idx_initialized = True
|
|
51
|
+
|
|
52
|
+
for target_idx, target_name in zip(self.target_indexes, self.target_names):
|
|
53
|
+
feature_dict[target_name] = data[target_idx, -1]
|
|
54
|
+
|
|
55
|
+
def _handle_data(self, data: np.ndarray | pd.DataFrame) -> np.ndarray:
|
|
56
|
+
names_expected = self.nm_channels["name"].to_list()
|
|
57
|
+
|
|
58
|
+
if isinstance(data, np.ndarray):
|
|
59
|
+
if not len(names_expected) == data.shape[0]:
|
|
60
|
+
raise ValueError(
|
|
61
|
+
"If data is passed as an array, the first dimension must"
|
|
62
|
+
" match the number of channel names in `nm_channels`.\n"
|
|
63
|
+
f" Number of data channels (data.shape[0]): {data.shape[0]}\n"
|
|
64
|
+
f' Length of nm_channels["name"]: {len(names_expected)}.'
|
|
65
|
+
)
|
|
66
|
+
return data
|
|
67
|
+
|
|
68
|
+
names_data = data.columns.to_list()
|
|
69
|
+
if not (
|
|
70
|
+
len(names_expected) == len(names_data)
|
|
71
|
+
and sorted(names_expected) == sorted(names_data)
|
|
72
|
+
):
|
|
73
|
+
raise ValueError(
|
|
74
|
+
"If data is passed as a DataFrame, the"
|
|
75
|
+
"column names must match the channel names in `nm_channels`.\n"
|
|
76
|
+
f"Input dataframe column names: {names_data}\n"
|
|
77
|
+
f'Expected (from nm_channels["name"]): : {names_expected}.'
|
|
78
|
+
)
|
|
79
|
+
return data.to_numpy().transpose()
|
|
80
|
+
|
|
81
|
+
def _run(
|
|
82
|
+
self,
|
|
83
|
+
data: np.ndarray | pd.DataFrame | None = None,
|
|
84
|
+
out_path_root: _PathLike = "",
|
|
85
|
+
folder_name: str = "sub",
|
|
86
|
+
is_stream_lsl: bool = True,
|
|
87
|
+
stream_lsl_name: str = None,
|
|
88
|
+
plot_lsl: bool = False,
|
|
89
|
+
save_csv: bool = False,
|
|
90
|
+
save_interval: int = 10,
|
|
91
|
+
return_df: bool = True,
|
|
92
|
+
) -> pd.DataFrame:
|
|
93
|
+
from py_neuromodulation.nm_database import NMDatabase
|
|
94
|
+
|
|
95
|
+
out_path_root = Path.cwd() if not out_path_root else Path(out_path_root)
|
|
96
|
+
|
|
97
|
+
self.PATH_OUT = out_path_root
|
|
98
|
+
self.PATH_OUT_folder_name = folder_name
|
|
99
|
+
|
|
100
|
+
out_dir = out_path_root / folder_name
|
|
101
|
+
out_dir.mkdir(parents=True, exist_ok=True)
|
|
102
|
+
|
|
103
|
+
# TONI: Need better default experiment name
|
|
104
|
+
experiment_name = folder_name if folder_name else "sub"
|
|
105
|
+
|
|
106
|
+
db = NMDatabase(experiment_name, out_dir) # Create output database
|
|
107
|
+
|
|
108
|
+
self.batch_count: int = 0 # Keep track of the number of batches processed
|
|
109
|
+
|
|
110
|
+
if not is_stream_lsl:
|
|
111
|
+
from py_neuromodulation.nm_generator import raw_data_generator
|
|
112
|
+
|
|
113
|
+
generator = raw_data_generator(
|
|
114
|
+
data=data,
|
|
115
|
+
settings=self.settings,
|
|
116
|
+
sfreq=self.sfreq,
|
|
117
|
+
)
|
|
118
|
+
else:
|
|
119
|
+
from py_neuromodulation.nm_mnelsl_stream import LSLStream
|
|
120
|
+
|
|
121
|
+
self.lsl_stream = LSLStream(
|
|
122
|
+
settings=self.settings, stream_name=stream_lsl_name
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
if plot_lsl:
|
|
126
|
+
from mne_lsl.stream_viewer import StreamViewer
|
|
127
|
+
|
|
128
|
+
viewer = StreamViewer(stream_name=stream_lsl_name)
|
|
129
|
+
viewer.start()
|
|
130
|
+
|
|
131
|
+
if self.sfreq != self.lsl_stream.stream.sinfo.sfreq:
|
|
132
|
+
error_msg = (
|
|
133
|
+
f"Sampling frequency of the lsl-stream ({self.lsl_stream.stream.sinfo.sfreq}) "
|
|
134
|
+
f"does not match the settings ({self.sfreq})."
|
|
135
|
+
"The sampling frequency read from the stream will be used"
|
|
136
|
+
)
|
|
137
|
+
logger.warning(error_msg)
|
|
138
|
+
self.sfreq = self.lsl_stream.stream.sinfo.sfreq
|
|
139
|
+
|
|
140
|
+
generator = self.lsl_stream.get_next_batch()
|
|
141
|
+
|
|
142
|
+
prev_batch_end = 0
|
|
143
|
+
|
|
144
|
+
while True:
|
|
145
|
+
next_item = next(generator, None)
|
|
146
|
+
|
|
147
|
+
if next_item is not None:
|
|
148
|
+
timestamps, data_batch = next_item
|
|
149
|
+
else:
|
|
150
|
+
break
|
|
151
|
+
|
|
152
|
+
if data_batch is None:
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
feature_dict = self.data_processor.process(data_batch)
|
|
156
|
+
|
|
157
|
+
this_batch_end = timestamps[-1]
|
|
158
|
+
batch_length = this_batch_end - prev_batch_end
|
|
159
|
+
logger.debug(
|
|
160
|
+
f"{batch_length:.3f} seconds of new data processed",
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
feature_dict["time"] = (
|
|
164
|
+
batch_length if is_stream_lsl else np.ceil(this_batch_end * 1000 + 1)
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
prev_batch_end = this_batch_end
|
|
168
|
+
|
|
169
|
+
if self.verbose:
|
|
170
|
+
logger.info("Time: %.2f", feature_dict["time"] / 1000)
|
|
171
|
+
|
|
172
|
+
self._add_target(feature_dict, data_batch)
|
|
173
|
+
|
|
174
|
+
# We should ensure that feature output is always either float64 or None and remove this
|
|
175
|
+
with suppress(TypeError): # Need this because some features output None
|
|
176
|
+
for key, value in feature_dict.items():
|
|
177
|
+
feature_dict[key] = np.float64(value)
|
|
178
|
+
|
|
179
|
+
db.insert_data(feature_dict)
|
|
180
|
+
|
|
181
|
+
self.batch_count += 1
|
|
182
|
+
if self.batch_count % save_interval == 0:
|
|
183
|
+
db.commit()
|
|
184
|
+
|
|
185
|
+
db.commit() # Save last batches
|
|
186
|
+
|
|
187
|
+
# If save_csv is False, still save the first row to get the column names
|
|
188
|
+
feature_df: pd.DataFrame = (
|
|
189
|
+
db.fetch_all() if (save_csv or return_df) else db.head()
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
db.close() # Close the database connection
|
|
193
|
+
|
|
194
|
+
self.save_after_stream(
|
|
195
|
+
out_dir=out_dir, prefix=experiment_name, feature_arr=feature_df
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
return feature_df # TONI: Not sure if this makes sense anymore
|
|
199
|
+
|
|
200
|
+
def plot_raw_signal(
|
|
201
|
+
self,
|
|
202
|
+
sfreq: float | None = None,
|
|
203
|
+
data: np.ndarray | None = None,
|
|
204
|
+
lowpass: float | None = None,
|
|
205
|
+
highpass: float | None = None,
|
|
206
|
+
picks: list | None = None,
|
|
207
|
+
plot_time: bool = True,
|
|
208
|
+
plot_psd: bool = False,
|
|
209
|
+
) -> None:
|
|
210
|
+
"""Use MNE-RawArray Plot to investigate PSD or raw_signal plot.
|
|
211
|
+
|
|
212
|
+
Parameters
|
|
213
|
+
----------
|
|
214
|
+
sfreq : float
|
|
215
|
+
sampling frequency [Hz]
|
|
216
|
+
data : np.ndarray, optional
|
|
217
|
+
data (n_channels, n_times), by default None
|
|
218
|
+
lowpass: float, optional
|
|
219
|
+
cutoff lowpass filter frequency
|
|
220
|
+
highpass: float, optional
|
|
221
|
+
cutoff highpass filter frequency
|
|
222
|
+
picks: list, optional
|
|
223
|
+
list of channels to plot
|
|
224
|
+
plot_time : bool, optional
|
|
225
|
+
mne.io.RawArray.plot(), by default True
|
|
226
|
+
plot_psd : bool, optional
|
|
227
|
+
mne.io.RawArray.plot(), by default False
|
|
228
|
+
|
|
229
|
+
Raises
|
|
230
|
+
------
|
|
231
|
+
ValueError
|
|
232
|
+
raise Exception when no data is passed
|
|
233
|
+
"""
|
|
234
|
+
if self.data is None and data is None:
|
|
235
|
+
raise ValueError("No data passed to plot_raw_signal function.")
|
|
236
|
+
|
|
237
|
+
if data is None and self.data is not None:
|
|
238
|
+
data = self.data
|
|
239
|
+
|
|
240
|
+
if sfreq is None:
|
|
241
|
+
sfreq = self.sfreq
|
|
242
|
+
|
|
243
|
+
if self.nm_channels is not None:
|
|
244
|
+
ch_names = self.nm_channels["name"].to_list()
|
|
245
|
+
ch_types = self.nm_channels["type"].to_list()
|
|
246
|
+
else:
|
|
247
|
+
ch_names = [f"ch_{i}" for i in range(data.shape[0])]
|
|
248
|
+
ch_types = ["ecog" for i in range(data.shape[0])]
|
|
249
|
+
|
|
250
|
+
from mne import create_info
|
|
251
|
+
from mne.io import RawArray
|
|
252
|
+
|
|
253
|
+
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
|
|
254
|
+
raw = RawArray(data, info)
|
|
255
|
+
|
|
256
|
+
if picks is not None:
|
|
257
|
+
raw = raw.pick(picks)
|
|
258
|
+
self.raw = raw
|
|
259
|
+
if plot_time:
|
|
260
|
+
raw.plot(highpass=highpass, lowpass=lowpass)
|
|
261
|
+
if plot_psd:
|
|
262
|
+
raw.compute_psd().plot()
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
class Stream(_GenericStream):
|
|
266
|
+
def __init__(
|
|
267
|
+
self,
|
|
268
|
+
sfreq: float,
|
|
269
|
+
data: np.ndarray | pd.DataFrame | None = None,
|
|
270
|
+
nm_channels: pd.DataFrame | _PathLike | None = None,
|
|
271
|
+
settings: "NMSettings | _PathLike | None" = None,
|
|
272
|
+
sampling_rate_features_hz: float | None = None,
|
|
273
|
+
line_noise: float | None = 50,
|
|
274
|
+
path_grids: _PathLike | None = None,
|
|
275
|
+
coord_names: list | None = None,
|
|
276
|
+
coord_list: list | None = None,
|
|
277
|
+
verbose: bool = True,
|
|
278
|
+
) -> None:
|
|
279
|
+
"""Stream initialization
|
|
280
|
+
|
|
281
|
+
Parameters
|
|
282
|
+
----------
|
|
283
|
+
sfreq : float
|
|
284
|
+
sampling frequency of data in Hertz
|
|
285
|
+
data : np.ndarray | pd.DataFrame | None, optional
|
|
286
|
+
data to be streamed with shape (n_channels, n_time), by default None
|
|
287
|
+
nm_channels : pd.DataFrame | _PathLike
|
|
288
|
+
parametrization of channels (see nm_define_channels.py for initialization)
|
|
289
|
+
settings : NMSettings | _PathLike | None, optional
|
|
290
|
+
Initialized nm_settings.NMSettings object, by default the py_neuromodulation/nm_settings.yaml are read
|
|
291
|
+
and passed into a settings object
|
|
292
|
+
line_noise : float | None, optional
|
|
293
|
+
line noise, by default 50
|
|
294
|
+
sampling_rate_features_hz : float | None, optional
|
|
295
|
+
feature sampling rate, by default None
|
|
296
|
+
path_grids : _PathLike | None, optional
|
|
297
|
+
path to grid_cortex.tsv and/or gird_subcortex.tsv, by default Non
|
|
298
|
+
coord_names : list | None, optional
|
|
299
|
+
coordinate name in the form [coord_1_name, coord_2_name, etc], by default None
|
|
300
|
+
coord_list : list | None, optional
|
|
301
|
+
coordinates in the form [[coord_1_x, coord_1_y, coord_1_z], [coord_2_x, coord_2_y, coord_2_z],], by default None
|
|
302
|
+
verbose : bool, optional
|
|
303
|
+
log stream computation time information, by default True
|
|
304
|
+
"""
|
|
305
|
+
|
|
306
|
+
if nm_channels is None and data is not None:
|
|
307
|
+
from py_neuromodulation.nm_define_nmchannels import (
|
|
308
|
+
get_default_channels_from_data,
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
nm_channels = get_default_channels_from_data(data)
|
|
312
|
+
|
|
313
|
+
if nm_channels is None and data is None:
|
|
314
|
+
raise ValueError(
|
|
315
|
+
"Either `nm_channels` or `data` must be passed to `Stream`."
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
super().__init__(
|
|
319
|
+
sfreq=sfreq,
|
|
320
|
+
nm_channels=nm_channels,
|
|
321
|
+
settings=settings,
|
|
322
|
+
line_noise=line_noise,
|
|
323
|
+
sampling_rate_features_hz=sampling_rate_features_hz,
|
|
324
|
+
path_grids=path_grids,
|
|
325
|
+
coord_names=coord_names,
|
|
326
|
+
coord_list=coord_list,
|
|
327
|
+
verbose=verbose,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
self.data = data
|
|
331
|
+
|
|
332
|
+
self.target_idx_initialized: bool = False
|
|
333
|
+
|
|
334
|
+
def run(
|
|
335
|
+
self,
|
|
336
|
+
data: np.ndarray | pd.DataFrame | None = None,
|
|
337
|
+
out_path_root: _PathLike = Path.cwd(),
|
|
338
|
+
folder_name: str = "sub",
|
|
339
|
+
stream_lsl: bool = False,
|
|
340
|
+
stream_lsl_name: str = None,
|
|
341
|
+
save_csv: bool = False,
|
|
342
|
+
plot_lsl: bool = False,
|
|
343
|
+
save_interval: float = 10,
|
|
344
|
+
) -> pd.DataFrame:
|
|
345
|
+
"""Call run function for offline stream.
|
|
346
|
+
|
|
347
|
+
Parameters
|
|
348
|
+
----------
|
|
349
|
+
data : np.ndarray | pd.DataFrame
|
|
350
|
+
shape (n_channels, n_time)
|
|
351
|
+
out_path_root : _PathLike | None, optional
|
|
352
|
+
Full path to store estimated features, by default None
|
|
353
|
+
If None, data is simply returned and not saved
|
|
354
|
+
folder_name : str, optional
|
|
355
|
+
folder output name, commonly subject or run name, by default "sub"
|
|
356
|
+
stream_lsl : bool, optional
|
|
357
|
+
stream data from LSL, by default False
|
|
358
|
+
stream_lsl_name : str, optional
|
|
359
|
+
stream name, by default None
|
|
360
|
+
plot_lsl : bool, optional
|
|
361
|
+
plot data with mne_lsl stream_viewer
|
|
362
|
+
save_csv : bool, optional
|
|
363
|
+
save csv file, by default False
|
|
364
|
+
save_interval : int, optional
|
|
365
|
+
save interval in number of samples, by default 10
|
|
366
|
+
|
|
367
|
+
Returns
|
|
368
|
+
-------
|
|
369
|
+
pd.DataFrame
|
|
370
|
+
feature DataFrame
|
|
371
|
+
"""
|
|
372
|
+
|
|
373
|
+
super().run() # reinitialize the stream
|
|
374
|
+
|
|
375
|
+
self.stream_lsl = stream_lsl
|
|
376
|
+
self.stream_lsl_name = stream_lsl_name
|
|
377
|
+
|
|
378
|
+
if data is not None:
|
|
379
|
+
data = self._handle_data(data)
|
|
380
|
+
elif self.data is not None:
|
|
381
|
+
data = self._handle_data(self.data)
|
|
382
|
+
elif self.data is None and data is None and self.stream_lsl is False:
|
|
383
|
+
raise ValueError("No data passed to run function.")
|
|
384
|
+
|
|
385
|
+
out_path = Path(out_path_root, folder_name)
|
|
386
|
+
out_path.mkdir(parents=True, exist_ok=True)
|
|
387
|
+
logger.log_to_file(out_path)
|
|
388
|
+
|
|
389
|
+
return self._run(
|
|
390
|
+
data,
|
|
391
|
+
out_path_root,
|
|
392
|
+
folder_name,
|
|
393
|
+
is_stream_lsl=stream_lsl,
|
|
394
|
+
stream_lsl_name=stream_lsl_name,
|
|
395
|
+
save_csv=save_csv,
|
|
396
|
+
plot_lsl=plot_lsl,
|
|
397
|
+
save_interval=save_interval,
|
|
398
|
+
)
|