py-neuromodulation 0.0.5__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. py_neuromodulation/__init__.py +16 -10
  2. py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +2 -2
  3. py_neuromodulation/analysis/__init__.py +4 -0
  4. py_neuromodulation/{nm_decode.py → analysis/decode.py} +4 -4
  5. py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +21 -20
  6. py_neuromodulation/{nm_plots.py → analysis/plots.py} +54 -12
  7. py_neuromodulation/{nm_stats.py → analysis/stats.py} +2 -8
  8. py_neuromodulation/{nm_settings.yaml → default_settings.yaml} +7 -9
  9. py_neuromodulation/features/__init__.py +31 -0
  10. py_neuromodulation/features/bandpower.py +165 -0
  11. py_neuromodulation/{nm_bispectra.py → features/bispectra.py} +11 -12
  12. py_neuromodulation/{nm_bursts.py → features/bursts.py} +14 -9
  13. py_neuromodulation/{nm_coherence.py → features/coherence.py} +28 -19
  14. py_neuromodulation/{nm_features.py → features/feature_processor.py} +30 -53
  15. py_neuromodulation/{nm_fooof.py → features/fooof.py} +11 -8
  16. py_neuromodulation/{nm_hjorth_raw.py → features/hjorth_raw.py} +10 -5
  17. py_neuromodulation/{nm_linelength.py → features/linelength.py} +1 -1
  18. py_neuromodulation/{nm_mne_connectivity.py → features/mne_connectivity.py} +5 -6
  19. py_neuromodulation/{nm_nolds.py → features/nolds.py} +5 -7
  20. py_neuromodulation/{nm_oscillatory.py → features/oscillatory.py} +7 -181
  21. py_neuromodulation/{nm_sharpwaves.py → features/sharpwaves.py} +13 -4
  22. py_neuromodulation/filter/__init__.py +3 -0
  23. py_neuromodulation/{nm_kalmanfilter.py → filter/kalman_filter.py} +67 -71
  24. py_neuromodulation/filter/kalman_filter_external.py +1890 -0
  25. py_neuromodulation/{nm_filter.py → filter/mne_filter.py} +128 -219
  26. py_neuromodulation/filter/notch_filter.py +93 -0
  27. py_neuromodulation/processing/__init__.py +10 -0
  28. py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +2 -3
  29. py_neuromodulation/{nm_preprocessing.py → processing/data_preprocessor.py} +19 -25
  30. py_neuromodulation/{nm_filter_preprocessing.py → processing/filter_preprocessing.py} +3 -4
  31. py_neuromodulation/{nm_normalization.py → processing/normalization.py} +9 -7
  32. py_neuromodulation/{nm_projection.py → processing/projection.py} +14 -14
  33. py_neuromodulation/{nm_rereference.py → processing/rereference.py} +13 -13
  34. py_neuromodulation/{nm_resample.py → processing/resample.py} +1 -4
  35. py_neuromodulation/stream/__init__.py +3 -0
  36. py_neuromodulation/{nm_run_analysis.py → stream/data_processor.py} +42 -42
  37. py_neuromodulation/stream/generator.py +53 -0
  38. py_neuromodulation/{nm_mnelsl_generator.py → stream/mnelsl_player.py} +10 -6
  39. py_neuromodulation/{nm_mnelsl_stream.py → stream/mnelsl_stream.py} +13 -9
  40. py_neuromodulation/{nm_settings.py → stream/settings.py} +27 -24
  41. py_neuromodulation/{nm_stream.py → stream/stream.py} +217 -188
  42. py_neuromodulation/utils/__init__.py +2 -0
  43. py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +14 -9
  44. py_neuromodulation/{nm_database.py → utils/database.py} +2 -2
  45. py_neuromodulation/{nm_IO.py → utils/io.py} +42 -77
  46. py_neuromodulation/utils/keyboard.py +52 -0
  47. py_neuromodulation/{nm_logger.py → utils/logging.py} +3 -3
  48. py_neuromodulation/{nm_types.py → utils/types.py} +72 -14
  49. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.7.dist-info}/METADATA +12 -29
  50. py_neuromodulation-0.0.7.dist-info/RECORD +89 -0
  51. py_neuromodulation/FieldTrip.py +0 -589
  52. py_neuromodulation/_write_example_dataset_helper.py +0 -83
  53. py_neuromodulation/nm_generator.py +0 -45
  54. py_neuromodulation/nm_stream_abc.py +0 -166
  55. py_neuromodulation-0.0.5.dist-info/RECORD +0 -83
  56. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.7.dist-info}/WHEEL +0 -0
  57. {py_neuromodulation-0.0.5.dist-info → py_neuromodulation-0.0.7.dist-info}/licenses/LICENSE +0 -0
@@ -1,28 +1,146 @@
1
- """Module for offline data streams."""
1
+ """Module for generic and offline data streams."""
2
2
 
3
3
  from typing import TYPE_CHECKING
4
+ from collections.abc import Iterator
4
5
  import numpy as np
5
- import pandas as pd
6
6
  from pathlib import Path
7
+
8
+ import py_neuromodulation as nm
7
9
  from contextlib import suppress
8
10
 
9
- from py_neuromodulation.nm_stream_abc import NMStream
10
- from py_neuromodulation.nm_types import _PathLike
11
- from py_neuromodulation import logger
11
+ from py_neuromodulation.stream.data_processor import DataProcessor
12
+ from py_neuromodulation.utils.types import _PathLike, FeatureName
13
+ from py_neuromodulation.stream.settings import NMSettings
12
14
 
13
15
  if TYPE_CHECKING:
14
- from py_neuromodulation.nm_settings import NMSettings
16
+ import pandas as pd
15
17
 
16
18
 
17
- class _GenericStream(NMStream):
19
+ class Stream:
18
20
  """_GenericStream base class.
19
21
  This class can be inherited for different types of offline streams
20
22
 
21
23
  Parameters
22
24
  ----------
23
- nm_stream_abc : nm_stream_abc.NMStream
25
+ nm_stream_abc : stream_abc.NMStream
24
26
  """
25
27
 
28
+ def __init__(
29
+ self,
30
+ sfreq: float,
31
+ channels: "pd.DataFrame | _PathLike | None" = None,
32
+ data: "np.ndarray | pd.DataFrame | None" = None,
33
+ settings: NMSettings | _PathLike | None = None,
34
+ line_noise: float | None = 50,
35
+ sampling_rate_features_hz: float | None = None,
36
+ path_grids: _PathLike | None = None,
37
+ coord_names: list | None = None,
38
+ stream_name: str
39
+ | None = "example_stream", # Timon: do we need those in the nmstream_abc?
40
+ is_stream_lsl: bool = False,
41
+ coord_list: list | None = None,
42
+ verbose: bool = True,
43
+ ) -> None:
44
+ """Stream initialization
45
+
46
+ Parameters
47
+ ----------
48
+ sfreq : float
49
+ sampling frequency of data in Hertz
50
+ channels : pd.DataFrame | _PathLike
51
+ parametrization of channels (see define_channels.py for initialization)
52
+ data : np.ndarray | pd.DataFrame | None, optional
53
+ data to be streamed with shape (n_channels, n_time), by default None
54
+ settings : NMSettings | _PathLike | None, optional
55
+ Initialized settings.NMSettings object, by default the py_neuromodulation/settings.yaml are read
56
+ and passed into a settings object
57
+ line_noise : float | None, optional
58
+ line noise, by default 50
59
+ sampling_rate_features_hz : float | None, optional
60
+ feature sampling rate, by default None
61
+ path_grids : _PathLike | None, optional
62
+ path to grid_cortex.tsv and/or gird_subcortex.tsv, by default Non
63
+ coord_names : list | None, optional
64
+ coordinate name in the form [coord_1_name, coord_2_name, etc], by default None
65
+ coord_list : list | None, optional
66
+ coordinates in the form [[coord_1_x, coord_1_y, coord_1_z], [coord_2_x, coord_2_y, coord_2_z],], by default None
67
+ verbose : bool, optional
68
+ print out stream computation time information, by default True
69
+ """
70
+ self.settings: NMSettings = NMSettings.load(settings)
71
+
72
+ if channels is None and data is not None:
73
+ channels = nm.utils.channels.get_default_channels_from_data(data)
74
+
75
+ if channels is not None:
76
+ self.channels = nm.io.load_channels(channels)
77
+
78
+ if self.channels.query("used == 1 and target == 0").shape[0] == 0:
79
+ raise ValueError(
80
+ "No channels selected for analysis that have column 'used' = 1 and 'target' = 0. Please check your channels"
81
+ )
82
+
83
+ if channels is None and data is None:
84
+ raise ValueError("Either `channels` or `data` must be passed to `Stream`.")
85
+
86
+ # If features that use frequency ranges are on, test them against nyquist frequency
87
+ use_freq_ranges: list[FeatureName] = [
88
+ "bandpass_filter",
89
+ "stft",
90
+ "fft",
91
+ "welch",
92
+ "bursts",
93
+ "coherence",
94
+ "nolds",
95
+ "bispectrum",
96
+ ]
97
+
98
+ need_nyquist_check = any(
99
+ (f in use_freq_ranges for f in self.settings.features.get_enabled())
100
+ )
101
+
102
+ if need_nyquist_check:
103
+ assert all(
104
+ fb.frequency_high_hz < sfreq / 2
105
+ for fb in self.settings.frequency_ranges_hz.values()
106
+ ), (
107
+ "If a feature that uses frequency ranges is selected, "
108
+ "the frequency band ranges need to be smaller than the nyquist frequency.\n"
109
+ f"Got sfreq = {sfreq} and fband ranges:\n {self.settings.frequency_ranges_hz}"
110
+ )
111
+
112
+ if sampling_rate_features_hz is not None:
113
+ self.settings.sampling_rate_features_hz = sampling_rate_features_hz
114
+
115
+ if path_grids is None:
116
+ path_grids = nm.PYNM_DIR
117
+
118
+ self.path_grids = path_grids
119
+ self.verbose = verbose
120
+ self.sfreq = sfreq
121
+ self.line_noise = line_noise
122
+ self.coord_names = coord_names
123
+ self.coord_list = coord_list
124
+ self.sess_right = None
125
+ self.projection = None
126
+ self.model = None
127
+
128
+ # TODO(toni): is it necessary to initialize the DataProcessor on stream init?
129
+ self.data_processor = DataProcessor(
130
+ sfreq=self.sfreq,
131
+ settings=self.settings,
132
+ channels=self.channels,
133
+ path_grids=self.path_grids,
134
+ coord_names=coord_names,
135
+ coord_list=coord_list,
136
+ line_noise=line_noise,
137
+ verbose=self.verbose,
138
+ )
139
+
140
+ self.data = data
141
+
142
+ self.target_idx_initialized: bool = False
143
+
26
144
  def _add_target(self, feature_dict: dict, data: np.ndarray) -> None:
27
145
  """Add target channels to feature series.
28
146
 
@@ -39,12 +157,10 @@ class _GenericStream(NMStream):
39
157
  feature dict with target channels added
40
158
  """
41
159
 
42
- if self.nm_channels["target"].sum() > 0:
160
+ if self.channels["target"].sum() > 0:
43
161
  if not self.target_idx_initialized:
44
- self.target_indexes = self.nm_channels[
45
- self.nm_channels["target"] == 1
46
- ].index
47
- self.target_names = self.nm_channels.loc[
162
+ self.target_indexes = self.channels[self.channels["target"] == 1].index
163
+ self.target_names = self.channels.loc[
48
164
  self.target_indexes, "name"
49
165
  ].to_list()
50
166
  self.target_idx_initialized = True
@@ -52,16 +168,16 @@ class _GenericStream(NMStream):
52
168
  for target_idx, target_name in zip(self.target_indexes, self.target_names):
53
169
  feature_dict[target_name] = data[target_idx, -1]
54
170
 
55
- def _handle_data(self, data: np.ndarray | pd.DataFrame) -> np.ndarray:
56
- names_expected = self.nm_channels["name"].to_list()
171
+ def _handle_data(self, data: "np.ndarray | pd.DataFrame") -> np.ndarray:
172
+ names_expected = self.channels["name"].to_list()
57
173
 
58
174
  if isinstance(data, np.ndarray):
59
175
  if not len(names_expected) == data.shape[0]:
60
176
  raise ValueError(
61
177
  "If data is passed as an array, the first dimension must"
62
- " match the number of channel names in `nm_channels`.\n"
178
+ " match the number of channel names in `channels`.\n"
63
179
  f" Number of data channels (data.shape[0]): {data.shape[0]}\n"
64
- f' Length of nm_channels["name"]: {len(names_expected)}.'
180
+ f' Length of channels["name"]: {len(names_expected)}.'
65
181
  )
66
182
  return data
67
183
 
@@ -72,51 +188,79 @@ class _GenericStream(NMStream):
72
188
  ):
73
189
  raise ValueError(
74
190
  "If data is passed as a DataFrame, the"
75
- "column names must match the channel names in `nm_channels`.\n"
191
+ "column names must match the channel names in `channels`.\n"
76
192
  f"Input dataframe column names: {names_data}\n"
77
- f'Expected (from nm_channels["name"]): : {names_expected}.'
193
+ f'Expected (from channels["name"]): : {names_expected}.'
78
194
  )
79
195
  return data.to_numpy().transpose()
80
196
 
81
- def _run(
197
+ def run(
82
198
  self,
83
- data: np.ndarray | pd.DataFrame | None = None,
84
- out_path_root: _PathLike = "",
85
- folder_name: str = "sub",
86
- is_stream_lsl: bool = True,
87
- stream_lsl_name: str = None,
199
+ data: "np.ndarray | pd.DataFrame | None" = None,
200
+ out_dir: _PathLike = "",
201
+ experiment_name: str = "sub",
202
+ is_stream_lsl: bool = False,
203
+ stream_lsl_name: str | None = None,
88
204
  plot_lsl: bool = False,
89
205
  save_csv: bool = False,
90
206
  save_interval: int = 10,
91
207
  return_df: bool = True,
92
- ) -> pd.DataFrame:
93
- from py_neuromodulation.nm_database import NMDatabase
208
+ ) -> "pd.DataFrame":
209
+ self.is_stream_lsl = is_stream_lsl
210
+ self.stream_lsl_name = stream_lsl_name
94
211
 
95
- out_path_root = Path.cwd() if not out_path_root else Path(out_path_root)
212
+ # Validate input data
213
+ if data is not None:
214
+ data = self._handle_data(data)
215
+ elif self.data is not None:
216
+ data = self._handle_data(self.data)
217
+ elif self.data is None and data is None and self.is_stream_lsl is False:
218
+ raise ValueError("No data passed to run function.")
96
219
 
97
- self.PATH_OUT = out_path_root
98
- self.PATH_OUT_folder_name = folder_name
220
+ # Generate output dirs
221
+ self.out_dir_root = Path.cwd() if not out_dir else Path(out_dir)
222
+ self.out_dir = self.out_dir_root / experiment_name
223
+ # TONI: Need better default experiment name
224
+ self.experiment_name = experiment_name if experiment_name else "sub"
99
225
 
100
- out_dir = out_path_root / folder_name
101
- out_dir.mkdir(parents=True, exist_ok=True)
226
+ self.out_dir.mkdir(parents=True, exist_ok=True)
102
227
 
103
- # TONI: Need better default experiment name
104
- experiment_name = folder_name if folder_name else "sub"
228
+ # Open database connection
229
+ # TONI: we should give the user control over the save format
230
+ from py_neuromodulation.utils.database import NMDatabase
105
231
 
106
232
  db = NMDatabase(experiment_name, out_dir) # Create output database
107
233
 
108
234
  self.batch_count: int = 0 # Keep track of the number of batches processed
109
235
 
236
+ # Reinitialize the data processor in case the nm_channels or nm_settings changed between runs of the same Stream
237
+ # TONI: then I think we can just not initialize the data processor in the init function
238
+ self.data_processor = DataProcessor(
239
+ sfreq=self.sfreq,
240
+ settings=self.settings,
241
+ channels=self.channels,
242
+ path_grids=self.path_grids,
243
+ coord_names=self.coord_names,
244
+ coord_list=self.coord_list,
245
+ line_noise=self.line_noise,
246
+ verbose=self.verbose,
247
+ )
248
+
249
+ nm.logger.log_to_file(out_dir)
250
+
251
+ # Initialize generator
252
+ self.generator: Iterator
110
253
  if not is_stream_lsl:
111
- from py_neuromodulation.nm_generator import raw_data_generator
254
+ from py_neuromodulation.stream.generator import RawDataGenerator
112
255
 
113
- generator = raw_data_generator(
114
- data=data,
115
- settings=self.settings,
116
- sfreq=self.sfreq,
256
+ self.generator = RawDataGenerator(
257
+ data,
258
+ self.sfreq,
259
+ self.settings.sampling_rate_features_hz,
260
+ self.settings.segment_length_features_ms,
117
261
  )
118
262
  else:
119
- from py_neuromodulation.nm_mnelsl_stream import LSLStream
263
+ from py_neuromodulation.stream.mnelsl_stream import LSLStream
120
264
 
121
265
  self.lsl_stream = LSLStream(
122
266
  settings=self.settings, stream_name=stream_lsl_name
@@ -134,21 +278,13 @@ class _GenericStream(NMStream):
134
278
  f"does not match the settings ({self.sfreq})."
135
279
  "The sampling frequency read from the stream will be used"
136
280
  )
137
- logger.warning(error_msg)
281
+ nm.logger.warning(error_msg)
138
282
  self.sfreq = self.lsl_stream.stream.sinfo.sfreq
139
283
 
140
- generator = self.lsl_stream.get_next_batch()
284
+ self.generator = self.lsl_stream.get_next_batch()
141
285
 
142
286
  prev_batch_end = 0
143
-
144
- while True:
145
- next_item = next(generator, None)
146
-
147
- if next_item is not None:
148
- timestamps, data_batch = next_item
149
- else:
150
- break
151
-
287
+ for timestamps, data_batch in self.generator:
152
288
  if data_batch is None:
153
289
  break
154
290
 
@@ -156,7 +292,7 @@ class _GenericStream(NMStream):
156
292
 
157
293
  this_batch_end = timestamps[-1]
158
294
  batch_length = this_batch_end - prev_batch_end
159
- logger.debug(
295
+ nm.logger.debug(
160
296
  f"{batch_length:.3f} seconds of new data processed",
161
297
  )
162
298
 
@@ -167,7 +303,7 @@ class _GenericStream(NMStream):
167
303
  prev_batch_end = this_batch_end
168
304
 
169
305
  if self.verbose:
170
- logger.info("Time: %.2f", feature_dict["time"] / 1000)
306
+ nm.logger.info("Time: %.2f", feature_dict["time"] / 1000)
171
307
 
172
308
  self._add_target(feature_dict, data_batch)
173
309
 
@@ -185,15 +321,13 @@ class _GenericStream(NMStream):
185
321
  db.commit() # Save last batches
186
322
 
187
323
  # If save_csv is False, still save the first row to get the column names
188
- feature_df: pd.DataFrame = (
324
+ feature_df: "pd.DataFrame" = (
189
325
  db.fetch_all() if (save_csv or return_df) else db.head()
190
326
  )
191
327
 
192
328
  db.close() # Close the database connection
193
329
 
194
- self.save_after_stream(
195
- out_dir=out_dir, prefix=experiment_name, feature_arr=feature_df
196
- )
330
+ self._save_after_stream(feature_arr=feature_df)
197
331
 
198
332
  return feature_df # TONI: Not sure if this makes sense anymore
199
333
 
@@ -240,9 +374,9 @@ class _GenericStream(NMStream):
240
374
  if sfreq is None:
241
375
  sfreq = self.sfreq
242
376
 
243
- if self.nm_channels is not None:
244
- ch_names = self.nm_channels["name"].to_list()
245
- ch_types = self.nm_channels["type"].to_list()
377
+ if self.channels is not None:
378
+ ch_names = self.channels["name"].to_list()
379
+ ch_types = self.channels["type"].to_list()
246
380
  else:
247
381
  ch_names = [f"ch_{i}" for i in range(data.shape[0])]
248
382
  ch_types = ["ecog" for i in range(data.shape[0])]
@@ -261,138 +395,33 @@ class _GenericStream(NMStream):
261
395
  if plot_psd:
262
396
  raw.compute_psd().plot()
263
397
 
264
-
265
- class Stream(_GenericStream):
266
- def __init__(
398
+ def _save_after_stream(
267
399
  self,
268
- sfreq: float,
269
- data: np.ndarray | pd.DataFrame | None = None,
270
- nm_channels: pd.DataFrame | _PathLike | None = None,
271
- settings: "NMSettings | _PathLike | None" = None,
272
- sampling_rate_features_hz: float | None = None,
273
- line_noise: float | None = 50,
274
- path_grids: _PathLike | None = None,
275
- coord_names: list | None = None,
276
- coord_list: list | None = None,
277
- verbose: bool = True,
400
+ feature_arr: "pd.DataFrame | None" = None,
278
401
  ) -> None:
279
- """Stream initialization
280
-
281
- Parameters
282
- ----------
283
- sfreq : float
284
- sampling frequency of data in Hertz
285
- data : np.ndarray | pd.DataFrame | None, optional
286
- data to be streamed with shape (n_channels, n_time), by default None
287
- nm_channels : pd.DataFrame | _PathLike
288
- parametrization of channels (see nm_define_channels.py for initialization)
289
- settings : NMSettings | _PathLike | None, optional
290
- Initialized nm_settings.NMSettings object, by default the py_neuromodulation/nm_settings.yaml are read
291
- and passed into a settings object
292
- line_noise : float | None, optional
293
- line noise, by default 50
294
- sampling_rate_features_hz : float | None, optional
295
- feature sampling rate, by default None
296
- path_grids : _PathLike | None, optional
297
- path to grid_cortex.tsv and/or gird_subcortex.tsv, by default Non
298
- coord_names : list | None, optional
299
- coordinate name in the form [coord_1_name, coord_2_name, etc], by default None
300
- coord_list : list | None, optional
301
- coordinates in the form [[coord_1_x, coord_1_y, coord_1_z], [coord_2_x, coord_2_y, coord_2_z],], by default None
302
- verbose : bool, optional
303
- log stream computation time information, by default True
304
- """
305
-
306
- if nm_channels is None and data is not None:
307
- from py_neuromodulation.nm_define_nmchannels import (
308
- get_default_channels_from_data,
309
- )
310
-
311
- nm_channels = get_default_channels_from_data(data)
312
-
313
- if nm_channels is None and data is None:
314
- raise ValueError(
315
- "Either `nm_channels` or `data` must be passed to `Stream`."
316
- )
317
-
318
- super().__init__(
319
- sfreq=sfreq,
320
- nm_channels=nm_channels,
321
- settings=settings,
322
- line_noise=line_noise,
323
- sampling_rate_features_hz=sampling_rate_features_hz,
324
- path_grids=path_grids,
325
- coord_names=coord_names,
326
- coord_list=coord_list,
327
- verbose=verbose,
328
- )
329
-
330
- self.data = data
331
-
332
- self.target_idx_initialized: bool = False
333
-
334
- def run(
402
+ """Save features, settings, nm_channels and sidecar after run"""
403
+ self._save_sidecar()
404
+ if feature_arr is not None:
405
+ self._save_features(feature_arr)
406
+ self._save_settings()
407
+ self._save_channels()
408
+
409
+ def _save_features(
335
410
  self,
336
- data: np.ndarray | pd.DataFrame | None = None,
337
- out_path_root: _PathLike = Path.cwd(),
338
- folder_name: str = "sub",
339
- stream_lsl: bool = False,
340
- stream_lsl_name: str = None,
341
- save_csv: bool = False,
342
- plot_lsl: bool = False,
343
- save_interval: float = 10,
344
- ) -> pd.DataFrame:
345
- """Call run function for offline stream.
346
-
347
- Parameters
348
- ----------
349
- data : np.ndarray | pd.DataFrame
350
- shape (n_channels, n_time)
351
- out_path_root : _PathLike | None, optional
352
- Full path to store estimated features, by default None
353
- If None, data is simply returned and not saved
354
- folder_name : str, optional
355
- folder output name, commonly subject or run name, by default "sub"
356
- stream_lsl : bool, optional
357
- stream data from LSL, by default False
358
- stream_lsl_name : str, optional
359
- stream name, by default None
360
- plot_lsl : bool, optional
361
- plot data with mne_lsl stream_viewer
362
- save_csv : bool, optional
363
- save csv file, by default False
364
- save_interval : int, optional
365
- save interval in number of samples, by default 10
366
-
367
- Returns
368
- -------
369
- pd.DataFrame
370
- feature DataFrame
371
- """
372
-
373
- super().run() # reinitialize the stream
411
+ feature_arr: "pd.DataFrame",
412
+ ) -> None:
413
+ nm.io.save_features(feature_arr, self.out_dir, self.experiment_name)
374
414
 
375
- self.stream_lsl = stream_lsl
376
- self.stream_lsl_name = stream_lsl_name
415
+ def _save_channels(self) -> None:
416
+ self.data_processor.save_channels(self.out_dir, self.experiment_name)
377
417
 
378
- if data is not None:
379
- data = self._handle_data(data)
380
- elif self.data is not None:
381
- data = self._handle_data(self.data)
382
- elif self.data is None and data is None and self.stream_lsl is False:
383
- raise ValueError("No data passed to run function.")
418
+ def _save_settings(self) -> None:
419
+ self.data_processor.save_settings(self.out_dir, self.experiment_name)
384
420
 
385
- out_path = Path(out_path_root, folder_name)
386
- out_path.mkdir(parents=True, exist_ok=True)
387
- logger.log_to_file(out_path)
388
-
389
- return self._run(
390
- data,
391
- out_path_root,
392
- folder_name,
393
- is_stream_lsl=stream_lsl,
394
- stream_lsl_name=stream_lsl_name,
395
- save_csv=save_csv,
396
- plot_lsl=plot_lsl,
397
- save_interval=save_interval,
421
+ def _save_sidecar(self) -> None:
422
+ """Save sidecar incduing fs, coords, sess_right to
423
+ out_path_root and subfolder 'folder_name'"""
424
+ additional_args = {"sess_right": self.sess_right}
425
+ self.data_processor.save_sidecar(
426
+ self.out_dir, self.experiment_name, additional_args
398
427
  )
@@ -0,0 +1,2 @@
1
+ from .channels import *
2
+ from . import types
@@ -1,9 +1,11 @@
1
- """Module for handling nm_channels."""
1
+ """Module for handling channels."""
2
2
 
3
3
  from collections.abc import Iterable
4
- import pandas as pd
4
+ from typing import TYPE_CHECKING
5
5
  import numpy as np
6
6
 
7
+ if TYPE_CHECKING:
8
+ import pandas as pd
7
9
 
8
10
  _LFP_TYPES = ["seeg", "dbs", "lfp"] # must be lower-case
9
11
 
@@ -17,10 +19,10 @@ def set_channels(
17
19
  ecog_only: bool = False,
18
20
  used_types: Iterable[str] | None = ("ecog", "dbs", "seeg"),
19
21
  target_keywords: Iterable[str] | None = ("mov", "squared", "label"),
20
- ) -> pd.DataFrame:
21
- """Return dataframe with channel-specific settings in nm_channels format.
22
+ ) -> "pd.DataFrame":
23
+ """Return dataframe with channel-specific settings in channels format.
22
24
 
23
- Return an nm_channels dataframe with the columns: "name", "rereference",
25
+ Return an channels dataframe with the columns: "name", "rereference",
24
26
  "used", "target", "type", "status", "new_name"]. "name" is set to ch_names,
25
27
  "rereference" can be specified individually. "used" is set to 1 for all
26
28
  channel types specified in `used_types`, else to 0. "target" is set to 1
@@ -63,8 +65,10 @@ def set_channels(
63
65
 
64
66
  Returns
65
67
  -------
66
- df: DataFrame in nm_channels format
68
+ df: DataFrame in channels format
67
69
  """
70
+ import pandas as pd
71
+
68
72
  if not (len(ch_names) == len(ch_types)):
69
73
  raise ValueError(
70
74
  "Number of `ch_names` and `ch_types` must match."
@@ -196,8 +200,8 @@ def set_channels(
196
200
 
197
201
 
198
202
  def _get_default_references(
199
- df: pd.DataFrame, ch_names: list[str], ch_types: list[str]
200
- ) -> pd.DataFrame:
203
+ df: "pd.DataFrame", ch_names: list[str], ch_types: list[str]
204
+ ) -> "pd.DataFrame":
201
205
  """Add references with default settings (ECOG CAR, LFP bipolar)."""
202
206
  ecog_chs = []
203
207
  lfp_chs = []
@@ -250,7 +254,7 @@ def get_default_channels_from_data(
250
254
  data: np.ndarray,
251
255
  car_rereferencing: bool = True,
252
256
  ):
253
- """Return default nm_channels dataframe with
257
+ """Return default channels dataframe with
254
258
  ecog datatype, no bad channels, no targets, common average rereferencing
255
259
 
256
260
  Parameters
@@ -272,6 +276,7 @@ def get_default_channels_from_data(
272
276
  - status
273
277
  - new_name
274
278
  """
279
+ import pandas as pd
275
280
 
276
281
  ch_name = [f"ch{idx}" for idx in range(data.shape[0])]
277
282
  status = ["good" for _ in range(data.shape[0])]
@@ -1,8 +1,8 @@
1
1
  import sqlite3
2
2
  from pathlib import Path
3
3
  import pandas as pd
4
- from py_neuromodulation.nm_types import _PathLike
5
- from py_neuromodulation.nm_IO import generate_unique_filename
4
+ from py_neuromodulation.utils.types import _PathLike
5
+ from py_neuromodulation.utils.io import generate_unique_filename
6
6
 
7
7
 
8
8
  class NMDatabase: