py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
  3. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
  4. py_neuromodulation/__init__.py +80 -13
  5. py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
  6. py_neuromodulation/analysis/__init__.py +4 -0
  7. py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
  8. py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
  9. py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
  10. py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
  11. py_neuromodulation/data/README +6 -6
  12. py_neuromodulation/data/dataset_description.json +8 -8
  13. py_neuromodulation/data/participants.json +32 -32
  14. py_neuromodulation/data/participants.tsv +2 -2
  15. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
  18. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
  19. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
  20. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
  21. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
  22. py_neuromodulation/default_settings.yaml +241 -0
  23. py_neuromodulation/features/__init__.py +31 -0
  24. py_neuromodulation/features/bandpower.py +165 -0
  25. py_neuromodulation/features/bispectra.py +157 -0
  26. py_neuromodulation/features/bursts.py +297 -0
  27. py_neuromodulation/features/coherence.py +255 -0
  28. py_neuromodulation/features/feature_processor.py +121 -0
  29. py_neuromodulation/features/fooof.py +142 -0
  30. py_neuromodulation/features/hjorth_raw.py +57 -0
  31. py_neuromodulation/features/linelength.py +21 -0
  32. py_neuromodulation/features/mne_connectivity.py +148 -0
  33. py_neuromodulation/features/nolds.py +94 -0
  34. py_neuromodulation/features/oscillatory.py +249 -0
  35. py_neuromodulation/features/sharpwaves.py +432 -0
  36. py_neuromodulation/filter/__init__.py +3 -0
  37. py_neuromodulation/filter/kalman_filter.py +67 -0
  38. py_neuromodulation/filter/kalman_filter_external.py +1890 -0
  39. py_neuromodulation/filter/mne_filter.py +128 -0
  40. py_neuromodulation/filter/notch_filter.py +93 -0
  41. py_neuromodulation/grid_cortex.tsv +40 -40
  42. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  43. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  44. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  45. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  46. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  47. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  48. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  49. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  50. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  51. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  52. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  53. py_neuromodulation/processing/__init__.py +10 -0
  54. py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
  55. py_neuromodulation/processing/data_preprocessor.py +77 -0
  56. py_neuromodulation/processing/filter_preprocessing.py +78 -0
  57. py_neuromodulation/processing/normalization.py +175 -0
  58. py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
  59. py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
  60. py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
  61. py_neuromodulation/stream/__init__.py +3 -0
  62. py_neuromodulation/stream/data_processor.py +325 -0
  63. py_neuromodulation/stream/generator.py +53 -0
  64. py_neuromodulation/stream/mnelsl_player.py +94 -0
  65. py_neuromodulation/stream/mnelsl_stream.py +120 -0
  66. py_neuromodulation/stream/settings.py +292 -0
  67. py_neuromodulation/stream/stream.py +427 -0
  68. py_neuromodulation/utils/__init__.py +2 -0
  69. py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
  70. py_neuromodulation/utils/database.py +149 -0
  71. py_neuromodulation/utils/io.py +378 -0
  72. py_neuromodulation/utils/keyboard.py +52 -0
  73. py_neuromodulation/utils/logging.py +66 -0
  74. py_neuromodulation/utils/types.py +251 -0
  75. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
  76. py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
  77. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
  78. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
  79. py_neuromodulation/FieldTrip.py +0 -589
  80. py_neuromodulation/_write_example_dataset_helper.py +0 -65
  81. py_neuromodulation/nm_EpochStream.py +0 -92
  82. py_neuromodulation/nm_IO.py +0 -417
  83. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  84. py_neuromodulation/nm_bispectra.py +0 -168
  85. py_neuromodulation/nm_bursts.py +0 -198
  86. py_neuromodulation/nm_coherence.py +0 -205
  87. py_neuromodulation/nm_cohortwrapper.py +0 -435
  88. py_neuromodulation/nm_eval_timing.py +0 -239
  89. py_neuromodulation/nm_features.py +0 -116
  90. py_neuromodulation/nm_features_abc.py +0 -39
  91. py_neuromodulation/nm_filter.py +0 -219
  92. py_neuromodulation/nm_filter_preprocessing.py +0 -91
  93. py_neuromodulation/nm_fooof.py +0 -159
  94. py_neuromodulation/nm_generator.py +0 -37
  95. py_neuromodulation/nm_hjorth_raw.py +0 -73
  96. py_neuromodulation/nm_kalmanfilter.py +0 -58
  97. py_neuromodulation/nm_linelength.py +0 -33
  98. py_neuromodulation/nm_mne_connectivity.py +0 -112
  99. py_neuromodulation/nm_nolds.py +0 -93
  100. py_neuromodulation/nm_normalization.py +0 -214
  101. py_neuromodulation/nm_oscillatory.py +0 -448
  102. py_neuromodulation/nm_run_analysis.py +0 -435
  103. py_neuromodulation/nm_settings.json +0 -338
  104. py_neuromodulation/nm_settings.py +0 -68
  105. py_neuromodulation/nm_sharpwaves.py +0 -401
  106. py_neuromodulation/nm_stream_abc.py +0 -218
  107. py_neuromodulation/nm_stream_offline.py +0 -359
  108. py_neuromodulation/utils/_logging.py +0 -24
  109. py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
@@ -0,0 +1,427 @@
1
+ """Module for generic and offline data streams."""
2
+
3
+ from typing import TYPE_CHECKING
4
+ from collections.abc import Iterator
5
+ import numpy as np
6
+ from pathlib import Path
7
+
8
+ import py_neuromodulation as nm
9
+ from contextlib import suppress
10
+
11
+ from py_neuromodulation.stream.data_processor import DataProcessor
12
+ from py_neuromodulation.utils.types import _PathLike, FeatureName
13
+ from py_neuromodulation.stream.settings import NMSettings
14
+
15
+ if TYPE_CHECKING:
16
+ import pandas as pd
17
+
18
+
19
+ class Stream:
20
+ """_GenericStream base class.
21
+ This class can be inherited for different types of offline streams
22
+
23
+ Parameters
24
+ ----------
25
+ nm_stream_abc : stream_abc.NMStream
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ sfreq: float,
31
+ channels: "pd.DataFrame | _PathLike | None" = None,
32
+ data: "np.ndarray | pd.DataFrame | None" = None,
33
+ settings: NMSettings | _PathLike | None = None,
34
+ line_noise: float | None = 50,
35
+ sampling_rate_features_hz: float | None = None,
36
+ path_grids: _PathLike | None = None,
37
+ coord_names: list | None = None,
38
+ stream_name: str
39
+ | None = "example_stream", # Timon: do we need those in the nmstream_abc?
40
+ is_stream_lsl: bool = False,
41
+ coord_list: list | None = None,
42
+ verbose: bool = True,
43
+ ) -> None:
44
+ """Stream initialization
45
+
46
+ Parameters
47
+ ----------
48
+ sfreq : float
49
+ sampling frequency of data in Hertz
50
+ channels : pd.DataFrame | _PathLike
51
+ parametrization of channels (see define_channels.py for initialization)
52
+ data : np.ndarray | pd.DataFrame | None, optional
53
+ data to be streamed with shape (n_channels, n_time), by default None
54
+ settings : NMSettings | _PathLike | None, optional
55
+ Initialized settings.NMSettings object, by default the py_neuromodulation/settings.yaml are read
56
+ and passed into a settings object
57
+ line_noise : float | None, optional
58
+ line noise, by default 50
59
+ sampling_rate_features_hz : float | None, optional
60
+ feature sampling rate, by default None
61
+ path_grids : _PathLike | None, optional
62
+ path to grid_cortex.tsv and/or gird_subcortex.tsv, by default Non
63
+ coord_names : list | None, optional
64
+ coordinate name in the form [coord_1_name, coord_2_name, etc], by default None
65
+ coord_list : list | None, optional
66
+ coordinates in the form [[coord_1_x, coord_1_y, coord_1_z], [coord_2_x, coord_2_y, coord_2_z],], by default None
67
+ verbose : bool, optional
68
+ print out stream computation time information, by default True
69
+ """
70
+ self.settings: NMSettings = NMSettings.load(settings)
71
+
72
+ if channels is None and data is not None:
73
+ channels = nm.utils.channels.get_default_channels_from_data(data)
74
+
75
+ if channels is not None:
76
+ self.channels = nm.io.load_channels(channels)
77
+
78
+ if self.channels.query("used == 1 and target == 0").shape[0] == 0:
79
+ raise ValueError(
80
+ "No channels selected for analysis that have column 'used' = 1 and 'target' = 0. Please check your channels"
81
+ )
82
+
83
+ if channels is None and data is None:
84
+ raise ValueError("Either `channels` or `data` must be passed to `Stream`.")
85
+
86
+ # If features that use frequency ranges are on, test them against nyquist frequency
87
+ use_freq_ranges: list[FeatureName] = [
88
+ "bandpass_filter",
89
+ "stft",
90
+ "fft",
91
+ "welch",
92
+ "bursts",
93
+ "coherence",
94
+ "nolds",
95
+ "bispectrum",
96
+ ]
97
+
98
+ need_nyquist_check = any(
99
+ (f in use_freq_ranges for f in self.settings.features.get_enabled())
100
+ )
101
+
102
+ if need_nyquist_check:
103
+ assert all(
104
+ fb.frequency_high_hz < sfreq / 2
105
+ for fb in self.settings.frequency_ranges_hz.values()
106
+ ), (
107
+ "If a feature that uses frequency ranges is selected, "
108
+ "the frequency band ranges need to be smaller than the nyquist frequency.\n"
109
+ f"Got sfreq = {sfreq} and fband ranges:\n {self.settings.frequency_ranges_hz}"
110
+ )
111
+
112
+ if sampling_rate_features_hz is not None:
113
+ self.settings.sampling_rate_features_hz = sampling_rate_features_hz
114
+
115
+ if path_grids is None:
116
+ path_grids = nm.PYNM_DIR
117
+
118
+ self.path_grids = path_grids
119
+ self.verbose = verbose
120
+ self.sfreq = sfreq
121
+ self.line_noise = line_noise
122
+ self.coord_names = coord_names
123
+ self.coord_list = coord_list
124
+ self.sess_right = None
125
+ self.projection = None
126
+ self.model = None
127
+
128
+ # TODO(toni): is it necessary to initialize the DataProcessor on stream init?
129
+ self.data_processor = DataProcessor(
130
+ sfreq=self.sfreq,
131
+ settings=self.settings,
132
+ channels=self.channels,
133
+ path_grids=self.path_grids,
134
+ coord_names=coord_names,
135
+ coord_list=coord_list,
136
+ line_noise=line_noise,
137
+ verbose=self.verbose,
138
+ )
139
+
140
+ self.data = data
141
+
142
+ self.target_idx_initialized: bool = False
143
+
144
+ def _add_target(self, feature_dict: dict, data: np.ndarray) -> None:
145
+ """Add target channels to feature series.
146
+
147
+ Parameters
148
+ ----------
149
+ feature_dict : dict
150
+ data : np.ndarray
151
+ Raw data with shape (n_channels, n_samples).
152
+ Channels not usd for feature computation are also included
153
+
154
+ Returns
155
+ -------
156
+ dict
157
+ feature dict with target channels added
158
+ """
159
+
160
+ if self.channels["target"].sum() > 0:
161
+ if not self.target_idx_initialized:
162
+ self.target_indexes = self.channels[self.channels["target"] == 1].index
163
+ self.target_names = self.channels.loc[
164
+ self.target_indexes, "name"
165
+ ].to_list()
166
+ self.target_idx_initialized = True
167
+
168
+ for target_idx, target_name in zip(self.target_indexes, self.target_names):
169
+ feature_dict[target_name] = data[target_idx, -1]
170
+
171
+ def _handle_data(self, data: "np.ndarray | pd.DataFrame") -> np.ndarray:
172
+ names_expected = self.channels["name"].to_list()
173
+
174
+ if isinstance(data, np.ndarray):
175
+ if not len(names_expected) == data.shape[0]:
176
+ raise ValueError(
177
+ "If data is passed as an array, the first dimension must"
178
+ " match the number of channel names in `channels`.\n"
179
+ f" Number of data channels (data.shape[0]): {data.shape[0]}\n"
180
+ f' Length of channels["name"]: {len(names_expected)}.'
181
+ )
182
+ return data
183
+
184
+ names_data = data.columns.to_list()
185
+ if not (
186
+ len(names_expected) == len(names_data)
187
+ and sorted(names_expected) == sorted(names_data)
188
+ ):
189
+ raise ValueError(
190
+ "If data is passed as a DataFrame, the"
191
+ "column names must match the channel names in `channels`.\n"
192
+ f"Input dataframe column names: {names_data}\n"
193
+ f'Expected (from channels["name"]): : {names_expected}.'
194
+ )
195
+ return data.to_numpy().transpose()
196
+
197
+ def run(
198
+ self,
199
+ data: "np.ndarray | pd.DataFrame | None" = None,
200
+ out_dir: _PathLike = "",
201
+ experiment_name: str = "sub",
202
+ is_stream_lsl: bool = False,
203
+ stream_lsl_name: str | None = None,
204
+ plot_lsl: bool = False,
205
+ save_csv: bool = False,
206
+ save_interval: int = 10,
207
+ return_df: bool = True,
208
+ ) -> "pd.DataFrame":
209
+ self.is_stream_lsl = is_stream_lsl
210
+ self.stream_lsl_name = stream_lsl_name
211
+
212
+ # Validate input data
213
+ if data is not None:
214
+ data = self._handle_data(data)
215
+ elif self.data is not None:
216
+ data = self._handle_data(self.data)
217
+ elif self.data is None and data is None and self.is_stream_lsl is False:
218
+ raise ValueError("No data passed to run function.")
219
+
220
+ # Generate output dirs
221
+ self.out_dir_root = Path.cwd() if not out_dir else Path(out_dir)
222
+ self.out_dir = self.out_dir_root / experiment_name
223
+ # TONI: Need better default experiment name
224
+ self.experiment_name = experiment_name if experiment_name else "sub"
225
+
226
+ self.out_dir.mkdir(parents=True, exist_ok=True)
227
+
228
+ # Open database connection
229
+ # TONI: we should give the user control over the save format
230
+ from py_neuromodulation.utils.database import NMDatabase
231
+
232
+ db = NMDatabase(experiment_name, out_dir) # Create output database
233
+
234
+ self.batch_count: int = 0 # Keep track of the number of batches processed
235
+
236
+ # Reinitialize the data processor in case the nm_channels or nm_settings changed between runs of the same Stream
237
+ # TONI: then I think we can just not initialize the data processor in the init function
238
+ self.data_processor = DataProcessor(
239
+ sfreq=self.sfreq,
240
+ settings=self.settings,
241
+ channels=self.channels,
242
+ path_grids=self.path_grids,
243
+ coord_names=self.coord_names,
244
+ coord_list=self.coord_list,
245
+ line_noise=self.line_noise,
246
+ verbose=self.verbose,
247
+ )
248
+
249
+ nm.logger.log_to_file(out_dir)
250
+
251
+ # Initialize generator
252
+ self.generator: Iterator
253
+ if not is_stream_lsl:
254
+ from py_neuromodulation.stream.generator import RawDataGenerator
255
+
256
+ self.generator = RawDataGenerator(
257
+ data,
258
+ self.sfreq,
259
+ self.settings.sampling_rate_features_hz,
260
+ self.settings.segment_length_features_ms,
261
+ )
262
+ else:
263
+ from py_neuromodulation.stream.mnelsl_stream import LSLStream
264
+
265
+ self.lsl_stream = LSLStream(
266
+ settings=self.settings, stream_name=stream_lsl_name
267
+ )
268
+
269
+ if plot_lsl:
270
+ from mne_lsl.stream_viewer import StreamViewer
271
+
272
+ viewer = StreamViewer(stream_name=stream_lsl_name)
273
+ viewer.start()
274
+
275
+ if self.sfreq != self.lsl_stream.stream.sinfo.sfreq:
276
+ error_msg = (
277
+ f"Sampling frequency of the lsl-stream ({self.lsl_stream.stream.sinfo.sfreq}) "
278
+ f"does not match the settings ({self.sfreq})."
279
+ "The sampling frequency read from the stream will be used"
280
+ )
281
+ nm.logger.warning(error_msg)
282
+ self.sfreq = self.lsl_stream.stream.sinfo.sfreq
283
+
284
+ self.generator = self.lsl_stream.get_next_batch()
285
+
286
+ prev_batch_end = 0
287
+ for timestamps, data_batch in self.generator:
288
+ if data_batch is None:
289
+ break
290
+
291
+ feature_dict = self.data_processor.process(data_batch)
292
+
293
+ this_batch_end = timestamps[-1]
294
+ batch_length = this_batch_end - prev_batch_end
295
+ nm.logger.debug(
296
+ f"{batch_length:.3f} seconds of new data processed",
297
+ )
298
+
299
+ feature_dict["time"] = (
300
+ batch_length if is_stream_lsl else np.ceil(this_batch_end * 1000 + 1)
301
+ )
302
+
303
+ prev_batch_end = this_batch_end
304
+
305
+ if self.verbose:
306
+ nm.logger.info("Time: %.2f", feature_dict["time"] / 1000)
307
+
308
+ self._add_target(feature_dict, data_batch)
309
+
310
+ # We should ensure that feature output is always either float64 or None and remove this
311
+ with suppress(TypeError): # Need this because some features output None
312
+ for key, value in feature_dict.items():
313
+ feature_dict[key] = np.float64(value)
314
+
315
+ db.insert_data(feature_dict)
316
+
317
+ self.batch_count += 1
318
+ if self.batch_count % save_interval == 0:
319
+ db.commit()
320
+
321
+ db.commit() # Save last batches
322
+
323
+ # If save_csv is False, still save the first row to get the column names
324
+ feature_df: "pd.DataFrame" = (
325
+ db.fetch_all() if (save_csv or return_df) else db.head()
326
+ )
327
+
328
+ db.close() # Close the database connection
329
+
330
+ self._save_after_stream(feature_arr=feature_df)
331
+
332
+ return feature_df # TONI: Not sure if this makes sense anymore
333
+
334
+ def plot_raw_signal(
335
+ self,
336
+ sfreq: float | None = None,
337
+ data: np.ndarray | None = None,
338
+ lowpass: float | None = None,
339
+ highpass: float | None = None,
340
+ picks: list | None = None,
341
+ plot_time: bool = True,
342
+ plot_psd: bool = False,
343
+ ) -> None:
344
+ """Use MNE-RawArray Plot to investigate PSD or raw_signal plot.
345
+
346
+ Parameters
347
+ ----------
348
+ sfreq : float
349
+ sampling frequency [Hz]
350
+ data : np.ndarray, optional
351
+ data (n_channels, n_times), by default None
352
+ lowpass: float, optional
353
+ cutoff lowpass filter frequency
354
+ highpass: float, optional
355
+ cutoff highpass filter frequency
356
+ picks: list, optional
357
+ list of channels to plot
358
+ plot_time : bool, optional
359
+ mne.io.RawArray.plot(), by default True
360
+ plot_psd : bool, optional
361
+ mne.io.RawArray.plot(), by default False
362
+
363
+ Raises
364
+ ------
365
+ ValueError
366
+ raise Exception when no data is passed
367
+ """
368
+ if self.data is None and data is None:
369
+ raise ValueError("No data passed to plot_raw_signal function.")
370
+
371
+ if data is None and self.data is not None:
372
+ data = self.data
373
+
374
+ if sfreq is None:
375
+ sfreq = self.sfreq
376
+
377
+ if self.channels is not None:
378
+ ch_names = self.channels["name"].to_list()
379
+ ch_types = self.channels["type"].to_list()
380
+ else:
381
+ ch_names = [f"ch_{i}" for i in range(data.shape[0])]
382
+ ch_types = ["ecog" for i in range(data.shape[0])]
383
+
384
+ from mne import create_info
385
+ from mne.io import RawArray
386
+
387
+ info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
388
+ raw = RawArray(data, info)
389
+
390
+ if picks is not None:
391
+ raw = raw.pick(picks)
392
+ self.raw = raw
393
+ if plot_time:
394
+ raw.plot(highpass=highpass, lowpass=lowpass)
395
+ if plot_psd:
396
+ raw.compute_psd().plot()
397
+
398
+ def _save_after_stream(
399
+ self,
400
+ feature_arr: "pd.DataFrame | None" = None,
401
+ ) -> None:
402
+ """Save features, settings, nm_channels and sidecar after run"""
403
+ self._save_sidecar()
404
+ if feature_arr is not None:
405
+ self._save_features(feature_arr)
406
+ self._save_settings()
407
+ self._save_channels()
408
+
409
+ def _save_features(
410
+ self,
411
+ feature_arr: "pd.DataFrame",
412
+ ) -> None:
413
+ nm.io.save_features(feature_arr, self.out_dir, self.experiment_name)
414
+
415
+ def _save_channels(self) -> None:
416
+ self.data_processor.save_channels(self.out_dir, self.experiment_name)
417
+
418
+ def _save_settings(self) -> None:
419
+ self.data_processor.save_settings(self.out_dir, self.experiment_name)
420
+
421
+ def _save_sidecar(self) -> None:
422
+ """Save sidecar incduing fs, coords, sess_right to
423
+ out_path_root and subfolder 'folder_name'"""
424
+ additional_args = {"sess_right": self.sess_right}
425
+ self.data_processor.save_sidecar(
426
+ self.out_dir, self.experiment_name, additional_args
427
+ )
@@ -0,0 +1,2 @@
1
+ from .channels import *
2
+ from . import types