py-neuromodulation 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. py_neuromodulation/ConnectivityDecoding/_get_grid_hull.m +34 -34
  2. py_neuromodulation/ConnectivityDecoding/_get_grid_whole_brain.py +95 -106
  3. py_neuromodulation/ConnectivityDecoding/_helper_write_connectome.py +107 -119
  4. py_neuromodulation/__init__.py +80 -13
  5. py_neuromodulation/{nm_RMAP.py → analysis/RMAP.py} +496 -531
  6. py_neuromodulation/analysis/__init__.py +4 -0
  7. py_neuromodulation/{nm_decode.py → analysis/decode.py} +918 -992
  8. py_neuromodulation/{nm_analysis.py → analysis/feature_reader.py} +994 -1074
  9. py_neuromodulation/{nm_plots.py → analysis/plots.py} +627 -612
  10. py_neuromodulation/{nm_stats.py → analysis/stats.py} +458 -480
  11. py_neuromodulation/data/README +6 -6
  12. py_neuromodulation/data/dataset_description.json +8 -8
  13. py_neuromodulation/data/participants.json +32 -32
  14. py_neuromodulation/data/participants.tsv +2 -2
  15. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_coordsystem.json +5 -5
  16. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_space-mni_electrodes.tsv +11 -11
  17. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_channels.tsv +11 -11
  18. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.json +18 -18
  19. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vhdr +35 -35
  20. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/ieeg/sub-testsub_ses-EphysMedOff_task-gripforce_run-0_ieeg.vmrk +13 -13
  21. py_neuromodulation/data/sub-testsub/ses-EphysMedOff/sub-testsub_ses-EphysMedOff_scans.tsv +2 -2
  22. py_neuromodulation/default_settings.yaml +241 -0
  23. py_neuromodulation/features/__init__.py +31 -0
  24. py_neuromodulation/features/bandpower.py +165 -0
  25. py_neuromodulation/features/bispectra.py +157 -0
  26. py_neuromodulation/features/bursts.py +297 -0
  27. py_neuromodulation/features/coherence.py +255 -0
  28. py_neuromodulation/features/feature_processor.py +121 -0
  29. py_neuromodulation/features/fooof.py +142 -0
  30. py_neuromodulation/features/hjorth_raw.py +57 -0
  31. py_neuromodulation/features/linelength.py +21 -0
  32. py_neuromodulation/features/mne_connectivity.py +148 -0
  33. py_neuromodulation/features/nolds.py +94 -0
  34. py_neuromodulation/features/oscillatory.py +249 -0
  35. py_neuromodulation/features/sharpwaves.py +432 -0
  36. py_neuromodulation/filter/__init__.py +3 -0
  37. py_neuromodulation/filter/kalman_filter.py +67 -0
  38. py_neuromodulation/filter/kalman_filter_external.py +1890 -0
  39. py_neuromodulation/filter/mne_filter.py +128 -0
  40. py_neuromodulation/filter/notch_filter.py +93 -0
  41. py_neuromodulation/grid_cortex.tsv +40 -40
  42. py_neuromodulation/liblsl/libpugixml.so.1.12 +0 -0
  43. py_neuromodulation/liblsl/linux/bionic_amd64/liblsl.1.16.2.so +0 -0
  44. py_neuromodulation/liblsl/linux/bookworm_amd64/liblsl.1.16.2.so +0 -0
  45. py_neuromodulation/liblsl/linux/focal_amd46/liblsl.1.16.2.so +0 -0
  46. py_neuromodulation/liblsl/linux/jammy_amd64/liblsl.1.16.2.so +0 -0
  47. py_neuromodulation/liblsl/linux/jammy_x86/liblsl.1.16.2.so +0 -0
  48. py_neuromodulation/liblsl/linux/noble_amd64/liblsl.1.16.2.so +0 -0
  49. py_neuromodulation/liblsl/macos/amd64/liblsl.1.16.2.dylib +0 -0
  50. py_neuromodulation/liblsl/macos/arm64/liblsl.1.16.0.dylib +0 -0
  51. py_neuromodulation/liblsl/windows/amd64/liblsl.1.16.2.dll +0 -0
  52. py_neuromodulation/liblsl/windows/x86/liblsl.1.16.2.dll +0 -0
  53. py_neuromodulation/processing/__init__.py +10 -0
  54. py_neuromodulation/{nm_artifacts.py → processing/artifacts.py} +29 -25
  55. py_neuromodulation/processing/data_preprocessor.py +77 -0
  56. py_neuromodulation/processing/filter_preprocessing.py +78 -0
  57. py_neuromodulation/processing/normalization.py +175 -0
  58. py_neuromodulation/{nm_projection.py → processing/projection.py} +370 -394
  59. py_neuromodulation/{nm_rereference.py → processing/rereference.py} +97 -95
  60. py_neuromodulation/{nm_resample.py → processing/resample.py} +56 -50
  61. py_neuromodulation/stream/__init__.py +3 -0
  62. py_neuromodulation/stream/data_processor.py +325 -0
  63. py_neuromodulation/stream/generator.py +53 -0
  64. py_neuromodulation/stream/mnelsl_player.py +94 -0
  65. py_neuromodulation/stream/mnelsl_stream.py +120 -0
  66. py_neuromodulation/stream/settings.py +292 -0
  67. py_neuromodulation/stream/stream.py +427 -0
  68. py_neuromodulation/utils/__init__.py +2 -0
  69. py_neuromodulation/{nm_define_nmchannels.py → utils/channels.py} +305 -302
  70. py_neuromodulation/utils/database.py +149 -0
  71. py_neuromodulation/utils/io.py +378 -0
  72. py_neuromodulation/utils/keyboard.py +52 -0
  73. py_neuromodulation/utils/logging.py +66 -0
  74. py_neuromodulation/utils/types.py +251 -0
  75. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/METADATA +28 -33
  76. py_neuromodulation-0.0.6.dist-info/RECORD +89 -0
  77. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/WHEEL +1 -1
  78. {py_neuromodulation-0.0.4.dist-info → py_neuromodulation-0.0.6.dist-info}/licenses/LICENSE +21 -21
  79. py_neuromodulation/FieldTrip.py +0 -589
  80. py_neuromodulation/_write_example_dataset_helper.py +0 -65
  81. py_neuromodulation/nm_EpochStream.py +0 -92
  82. py_neuromodulation/nm_IO.py +0 -417
  83. py_neuromodulation/nm_across_patient_decoding.py +0 -927
  84. py_neuromodulation/nm_bispectra.py +0 -168
  85. py_neuromodulation/nm_bursts.py +0 -198
  86. py_neuromodulation/nm_coherence.py +0 -205
  87. py_neuromodulation/nm_cohortwrapper.py +0 -435
  88. py_neuromodulation/nm_eval_timing.py +0 -239
  89. py_neuromodulation/nm_features.py +0 -116
  90. py_neuromodulation/nm_features_abc.py +0 -39
  91. py_neuromodulation/nm_filter.py +0 -219
  92. py_neuromodulation/nm_filter_preprocessing.py +0 -91
  93. py_neuromodulation/nm_fooof.py +0 -159
  94. py_neuromodulation/nm_generator.py +0 -37
  95. py_neuromodulation/nm_hjorth_raw.py +0 -73
  96. py_neuromodulation/nm_kalmanfilter.py +0 -58
  97. py_neuromodulation/nm_linelength.py +0 -33
  98. py_neuromodulation/nm_mne_connectivity.py +0 -112
  99. py_neuromodulation/nm_nolds.py +0 -93
  100. py_neuromodulation/nm_normalization.py +0 -214
  101. py_neuromodulation/nm_oscillatory.py +0 -448
  102. py_neuromodulation/nm_run_analysis.py +0 -435
  103. py_neuromodulation/nm_settings.json +0 -338
  104. py_neuromodulation/nm_settings.py +0 -68
  105. py_neuromodulation/nm_sharpwaves.py +0 -401
  106. py_neuromodulation/nm_stream_abc.py +0 -218
  107. py_neuromodulation/nm_stream_offline.py +0 -359
  108. py_neuromodulation/utils/_logging.py +0 -24
  109. py_neuromodulation-0.0.4.dist-info/RECORD +0 -72
@@ -0,0 +1,149 @@
1
+ import sqlite3
2
+ from pathlib import Path
3
+ import pandas as pd
4
+ from py_neuromodulation.utils.types import _PathLike
5
+ from py_neuromodulation.utils.io import generate_unique_filename
6
+
7
+
8
+ class NMDatabase:
9
+ """
10
+ Class to create a database and insert data into it.
11
+ Parameters
12
+ ----------
13
+ out_dir : _PathLike
14
+ The directory to save the database.
15
+ csv_path : str, optional
16
+ The path to save the csv file. If not provided, it will be saved in the same folder as the database.
17
+ """
18
+
19
+ def __init__(
20
+ self,
21
+ name: str,
22
+ out_dir: _PathLike,
23
+ csv_path: _PathLike | None = None,
24
+ ):
25
+ # Make sure out_dir exists
26
+ Path(out_dir).mkdir(parents=True, exist_ok=True)
27
+
28
+ self.db_path = Path(out_dir, f"{name}.db")
29
+
30
+ self.table_name = f"{name}_data" # change to param?
31
+ self.table_created = False
32
+
33
+ if self.db_path.exists():
34
+ self.db_path = generate_unique_filename(self.db_path)
35
+ name = self.db_path.stem
36
+
37
+ if csv_path is None:
38
+ self.csv_path = Path(out_dir, f"{name}.csv")
39
+ else:
40
+ self.csv_path = Path(csv_path)
41
+
42
+ self.csv_path.parent.mkdir(parents=True, exist_ok=True)
43
+
44
+ self.conn = sqlite3.connect(self.db_path)
45
+ self.cursor = self.conn.cursor()
46
+
47
+ # Database config and optimization, prioritize data integrity
48
+ self.cursor.execute("PRAGMA journal_mode=WAL") # Write-Ahead Logging mode
49
+ self.cursor.execute("PRAGMA synchronous=FULL") # Sync on every commit
50
+ self.cursor.execute("PRAGMA temp_store=MEMORY") # Store temp tables in memory
51
+ self.cursor.execute(
52
+ "PRAGMA wal_autocheckpoint = 1000"
53
+ ) # WAL checkpoint every 1000 pages (default, 4MB, might change)
54
+ self.cursor.execute(
55
+ f"PRAGMA mmap_size = {2 * 1024 * 1024 * 1024}"
56
+ ) # 2GB of memory mapped
57
+
58
+ def infer_type(self, value):
59
+ """Infer the type of the value to create the table schema.
60
+ Parameters
61
+ ----------
62
+ value : int, float, str
63
+ The value to infer the type."""
64
+
65
+ if isinstance(value, (int, float)):
66
+ return "REAL"
67
+ elif isinstance(value, str):
68
+ return "TEXT"
69
+ else:
70
+ return "BLOB"
71
+
72
+ def create_table(self, feature_dict: dict):
73
+ """
74
+ Create a table in the database.
75
+ Parameters
76
+ ----------
77
+ feature_dict : dict
78
+ The dictionary with the feature names and values.
79
+ """
80
+ columns_schema = ", ".join(
81
+ [
82
+ f'"{column}" {self.infer_type(value)}'
83
+ for column, value in feature_dict.items()
84
+ ]
85
+ )
86
+
87
+ self.cursor.execute(
88
+ f'CREATE TABLE IF NOT EXISTS "{self.table_name}" ({columns_schema})'
89
+ )
90
+
91
+ # Create column names and placeholders for insert statement
92
+ self.columns: str = ", ".join([f'"{column}"' for column in feature_dict.keys()])
93
+ # Use named placeholders for more resiliency against unexpected change in column order
94
+ self.placeholders = ", ".join([f":{key}" for key in feature_dict.keys()])
95
+
96
+ def insert_data(self, feature_dict: dict):
97
+ """
98
+ Insert data into the database.
99
+ Parameters
100
+ ----------
101
+ feature_dict : dict
102
+ The dictionary with the feature names and values.
103
+ """
104
+
105
+ if not self.table_created:
106
+ self.create_table(feature_dict)
107
+ self.table_created = True
108
+
109
+ insert_sql = f'INSERT INTO "{self.table_name}" ({self.columns}) VALUES ({self.placeholders})'
110
+
111
+ self.cursor.execute(insert_sql, feature_dict)
112
+
113
+ def commit(self):
114
+ self.conn.commit()
115
+
116
+ def fetch_all(self):
117
+ """ "
118
+ Fetch all the data from the database.
119
+ Returns
120
+ -------
121
+ pd.DataFrame
122
+ The data in a pandas DataFrame.
123
+ """
124
+ return pd.read_sql_query(f'SELECT * FROM "{self.table_name}"', self.conn)
125
+
126
+ def head(self, n: int = 5):
127
+ """ "
128
+ Returns the first N rows of the database.
129
+ Parameters
130
+ ----------
131
+ n : int, optional
132
+ The number of rows to fetch, by default 1
133
+ -------
134
+ pd.DataFrame
135
+ The data in a pandas DataFrame.
136
+ """
137
+ return pd.read_sql_query(
138
+ f'SELECT * FROM "{self.table_name}" LIMIT {n}', self.conn
139
+ )
140
+
141
+ def save_as_csv(self):
142
+ df = self.fetch_all()
143
+ df.to_csv(self.csv_path, index=False)
144
+
145
+ def close(self):
146
+ # Optimize before closing is recommended:
147
+ # https://www.sqlite.org/pragma.html#pragma_optimize
148
+ self.cursor.execute("PRAGMA optimize")
149
+ self.conn.close()
@@ -0,0 +1,378 @@
1
+ import json
2
+ from pathlib import PurePath, Path
3
+ from typing import TYPE_CHECKING
4
+
5
+ import numpy as np
6
+
7
+ from py_neuromodulation.utils.types import _PathLike
8
+ from py_neuromodulation import logger, PYNM_DIR
9
+
10
+ if TYPE_CHECKING:
11
+ from mne_bids import BIDSPath
12
+ from mne import io as mne_io
13
+ import pandas as pd
14
+
15
+
16
+ def load_channels(
17
+ channels: "pd.DataFrame | _PathLike",
18
+ ) -> "pd.DataFrame":
19
+ """Read channels from path or specify via BIDS arguments.
20
+ Necessary parameters are then ch_names (list), ch_types (list), bads (list), used_types (list),
21
+ target_keywords (list) and reference Union[list, str].
22
+ """
23
+ import pandas as pd
24
+
25
+ if isinstance(channels, pd.DataFrame):
26
+ return channels
27
+
28
+ if not Path(channels).is_file():
29
+ raise ValueError("PATH_CHANNELS is not a valid file. Got: " f"{channels}")
30
+
31
+ return pd.read_csv(channels)
32
+
33
+
34
+ def read_BIDS_data(
35
+ PATH_RUN: "_PathLike | BIDSPath",
36
+ line_noise: int = 50,
37
+ ) -> tuple["mne_io.Raw", np.ndarray, float, int, list | None, list | None]:
38
+ """Given a run path and bids data path, read the respective data
39
+
40
+ Parameters
41
+ ----------
42
+ PATH_RUN : path to bids run file
43
+ supported formats: https://bids-specification.readthedocs.io/en/v1.2.1/04-modality-specific-files/04-intracranial-electroencephalography.html#ieeg-recording-data
44
+ line_noise: int, optional
45
+ by default 50
46
+
47
+ Returns
48
+ -------
49
+ raw_arr : mne.io.RawArray
50
+ raw_arr_data : np.ndarray
51
+ sfreq : float
52
+ line_noise : int
53
+ coord_list : list | None
54
+ coord_names : list | None
55
+ """
56
+
57
+ from mne_bids import read_raw_bids, get_bids_path_from_fname
58
+
59
+ bids_path = get_bids_path_from_fname(PATH_RUN)
60
+
61
+ raw_arr = read_raw_bids(bids_path)
62
+ coord_list, coord_names = get_coord_list(raw_arr)
63
+ if raw_arr.info["line_freq"] is not None:
64
+ line_noise = int(raw_arr.info["line_freq"])
65
+ else:
66
+ logger.info(
67
+ f"Line noise is not available in the data, using value of {line_noise} Hz."
68
+ )
69
+ return (
70
+ raw_arr,
71
+ raw_arr.get_data(),
72
+ raw_arr.info["sfreq"],
73
+ line_noise,
74
+ coord_list,
75
+ coord_names,
76
+ )
77
+
78
+
79
+ def read_mne_data(
80
+ PATH_RUN: "_PathLike | BIDSPath",
81
+ line_noise: int = 50,
82
+ ):
83
+ """Read data in the mne.io.read_raw supported format.
84
+
85
+ Parameters
86
+ ----------
87
+ PATH_RUN : _PathLike | BIDSPath
88
+ Path to mne.io.read_raw supported types https://mne.tools/stable/generated/mne.io.read_raw.html
89
+ line_noise : int, optional
90
+ line noise, by default 50
91
+
92
+ Returns
93
+ -------
94
+ raw : mne.io.Raw
95
+ sfreq : float
96
+ ch_names : list[str]
97
+ ch_type : list[str]
98
+ bads : list[str]
99
+ """
100
+
101
+ from mne import io as mne_io
102
+
103
+ raw_arr = mne_io.read_raw(PATH_RUN)
104
+ sfreq = raw_arr.info["sfreq"]
105
+ ch_names = raw_arr.info["ch_names"]
106
+ ch_types = raw_arr.get_channel_types()
107
+ logger.info(
108
+ "Channel data is read using mne.io.read_raw function. Channel types might not be correct"
109
+ " and set to 'eeg' by default"
110
+ )
111
+ bads = raw_arr.info["bads"]
112
+
113
+ if raw_arr.info["line_freq"] is not None:
114
+ line_noise = int(raw_arr.info["line_freq"])
115
+ else:
116
+ logger.info(
117
+ f"Line noise is not available in the data, using value of {line_noise} Hz."
118
+ )
119
+
120
+ return raw_arr.get_data(), sfreq, ch_names, ch_types, bads
121
+
122
+
123
+ def get_coord_list(
124
+ raw: "mne_io.BaseRaw",
125
+ ) -> tuple[list, list] | tuple[None, None]:
126
+ """Return the coordinate list and names from mne RawArray
127
+
128
+ Parameters
129
+ ----------
130
+ raw : mne_io.BaseRaw
131
+
132
+ Returns
133
+ -------
134
+ coord_list[list, list] | coord_names[None, None]
135
+ """
136
+ montage = raw.get_montage()
137
+ if montage is not None:
138
+ coord_list = np.array(
139
+ list(dict(montage.get_positions()["ch_pos"]).values())
140
+ ).tolist()
141
+ coord_names = np.array(
142
+ list(dict(montage.get_positions()["ch_pos"]).keys())
143
+ ).tolist()
144
+ else:
145
+ coord_list = None
146
+ coord_names = None
147
+
148
+ return coord_list, coord_names
149
+
150
+
151
+ def read_grid(PATH_GRIDS: _PathLike | None, grid_str: str) -> "pd.DataFrame":
152
+ """Read grid file from path or PYNM_DIR
153
+
154
+ Parameters
155
+ ----------
156
+ PATH_GRIDS : _PathLike | None
157
+ path to grid file, by default None
158
+ grid_str : str
159
+ grid name
160
+
161
+ Returns
162
+ -------
163
+ pd.DataFrame
164
+ pd.DataFrame including mni x,y,z coordinates for each grid point
165
+ """
166
+ import pandas as pd
167
+
168
+ if PATH_GRIDS is None:
169
+ grid = pd.read_csv(PYNM_DIR / ("grid_" + grid_str.lower() + ".tsv"), sep="\t")
170
+ else:
171
+ grid = pd.read_csv(
172
+ PurePath(PATH_GRIDS, "grid_" + grid_str.lower() + ".tsv"), sep="\t"
173
+ )
174
+ return grid
175
+
176
+
177
+ def get_annotations(PATH_ANNOTATIONS: str, PATH_RUN: str, raw_arr: "mne_io.RawArray"):
178
+ filepath = PurePath(PATH_ANNOTATIONS, PurePath(PATH_RUN).name[:-5] + ".txt")
179
+ from mne import read_annotations
180
+
181
+ try:
182
+ annot = read_annotations(filepath)
183
+ raw_arr.set_annotations(annot)
184
+
185
+ # annotations starting with "BAD" are omitted with reject_by_annotations 'omit' param
186
+ annot_data = raw_arr.get_data(reject_by_annotation="omit")
187
+ except FileNotFoundError:
188
+ logger.critical(f"Annotations file could not be found: {filepath}")
189
+
190
+ return annot, annot_data, raw_arr
191
+
192
+
193
+ def write_csv(df, path_out):
194
+ """
195
+ Function to save Pandas dataframes to disk as CSV using
196
+ PyArrow (almost 10x faster than Pandas)
197
+ Difference with pandas.df.to_csv() is that it does not
198
+ write an index column by default
199
+ """
200
+ from pyarrow import csv, Table
201
+
202
+ csv.write_csv(Table.from_pandas(df), path_out)
203
+
204
+
205
+ def save_channels(
206
+ nmchannels: "pd.DataFrame",
207
+ out_dir: _PathLike = "",
208
+ prefix: str = "",
209
+ ) -> None:
210
+ out_dir = Path.cwd() if not out_dir else Path(out_dir)
211
+ filename = "channels.csv" if not prefix else prefix + "_channels.csv"
212
+ write_csv(nmchannels, out_dir / filename)
213
+ logger.info(f"{filename} saved to {out_dir}")
214
+
215
+
216
+ def save_features(
217
+ df_features: "pd.DataFrame",
218
+ out_dir: _PathLike = "",
219
+ prefix: str = "",
220
+ ) -> None:
221
+ out_dir = Path.cwd() if not out_dir else Path(out_dir)
222
+ filename = f"{prefix}_FEATURES.csv" if prefix else "_FEATURES.csv"
223
+ write_csv(df_features, out_dir / filename)
224
+ logger.info(f"{filename} saved to {str(out_dir)}")
225
+
226
+
227
+ def save_sidecar(
228
+ sidecar: dict,
229
+ out_dir: _PathLike = "",
230
+ prefix: str = "",
231
+ ) -> None:
232
+ save_general_dict(sidecar, out_dir, prefix, "_SIDECAR.json")
233
+
234
+
235
+ def save_general_dict(
236
+ dict_: dict,
237
+ out_dir: _PathLike = "",
238
+ prefix: str = "",
239
+ str_add: str = "",
240
+ ) -> None:
241
+ out_dir = Path.cwd() if not out_dir else Path(out_dir)
242
+ filename = f"{prefix}{str_add}"
243
+
244
+ with open(out_dir / filename, "w") as f:
245
+ json.dump(
246
+ dict_,
247
+ f,
248
+ default=default_json_convert,
249
+ indent=4,
250
+ separators=(",", ": "),
251
+ )
252
+ logger.info(f"{filename} saved to {out_dir}")
253
+
254
+
255
+ def default_json_convert(obj) -> list | float:
256
+ import pandas as pd
257
+
258
+ if isinstance(obj, np.ndarray):
259
+ return obj.tolist()
260
+ if isinstance(obj, pd.DataFrame):
261
+ return obj.to_numpy().tolist()
262
+ if isinstance(obj, np.integer):
263
+ return int(obj)
264
+ if isinstance(obj, np.floating):
265
+ return float(obj)
266
+ raise TypeError("Not serializable")
267
+
268
+
269
+ def read_sidecar(PATH: _PathLike) -> dict:
270
+ with open(PurePath(str(PATH) + "_SIDECAR.json")) as f:
271
+ return json.load(f)
272
+
273
+
274
+ def read_features(PATH: _PathLike) -> "pd.DataFrame":
275
+ import pandas as pd
276
+
277
+ return pd.read_csv(str(PATH) + "_FEATURES.csv", engine="pyarrow")
278
+
279
+
280
+ def read_channels(PATH: _PathLike) -> "pd.DataFrame":
281
+ import pandas as pd
282
+
283
+ return pd.read_csv(str(PATH) + "_channels.csv")
284
+
285
+
286
+ def get_run_list_indir(PATH: _PathLike) -> list:
287
+ from os import walk
288
+
289
+ f_files = []
290
+ # for dirpath, _, files in Path(PATH).walk(): # Only works in python >=3.12
291
+ for dirpath, _, files in walk(PATH):
292
+ for x in files:
293
+ if "FEATURES" in x:
294
+ f_files.append(PurePath(dirpath).name)
295
+ return f_files
296
+
297
+
298
+ def loadmat(filename) -> dict:
299
+ """
300
+ this function should be called instead of direct spio.loadmat
301
+ as it cures the problem of not properly recovering python dictionaries
302
+ from mat files. It calls the function check keys to cure all entries
303
+ which are still mat-objects
304
+ """
305
+ from scipy.io import loadmat as sio_loadmat
306
+
307
+ data = sio_loadmat(filename, struct_as_record=False, squeeze_me=True)
308
+ return _check_keys(data)
309
+
310
+
311
+ def get_paths_example_data():
312
+ """
313
+ This function should provide RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT and datatype for the example
314
+ dataset used in most examples.
315
+ """
316
+
317
+ sub = "testsub"
318
+ ses = "EphysMedOff"
319
+ task = "gripforce"
320
+ run = 0
321
+ datatype = "ieeg"
322
+
323
+ # Define run name and access paths in the BIDS format.
324
+ RUN_NAME = f"sub-{sub}_ses-{ses}_task-{task}_run-{run}"
325
+
326
+ PATH_BIDS = PYNM_DIR / "data"
327
+
328
+ PATH_RUN = PYNM_DIR / "data" / f"sub-{sub}" / f"ses-{ses}" / datatype / RUN_NAME
329
+
330
+ # Provide a path for the output data.
331
+ PATH_OUT = PATH_BIDS / "derivatives"
332
+
333
+ return RUN_NAME, PATH_RUN, PATH_BIDS, PATH_OUT, datatype
334
+
335
+
336
+ def _check_keys(dict):
337
+ """
338
+ checks if entries in dictionary are mat-objects. If yes
339
+ todict is called to change them to nested dictionaries
340
+ """
341
+ from scipy.io.matlab import mat_struct
342
+
343
+ for key in dict:
344
+ if isinstance(dict[key], mat_struct):
345
+ dict[key] = _todict(dict[key])
346
+ return dict
347
+
348
+
349
+ def _todict(matobj) -> dict:
350
+ """
351
+ A recursive function which constructs from matobjects nested dictionaries
352
+ """
353
+ from scipy.io.matlab import mat_struct
354
+
355
+ dict = {}
356
+ for strg in matobj._fieldnames:
357
+ elem = matobj.__dict__[strg]
358
+ if isinstance(elem, mat_struct):
359
+ dict[strg] = _todict(elem)
360
+ else:
361
+ dict[strg] = elem
362
+ return dict
363
+
364
+
365
+ def generate_unique_filename(path: _PathLike):
366
+ path = Path(path)
367
+
368
+ dir = path.parent
369
+ filename = path.stem
370
+ extension = path.suffix
371
+
372
+ counter = 1
373
+ while True:
374
+ new_filename = f"{filename}_{counter}{extension}"
375
+ new_file_path = dir / new_filename
376
+ if not new_file_path.exists():
377
+ return Path(new_file_path)
378
+ counter += 1
@@ -0,0 +1,52 @@
1
+ import asyncio
2
+ import sys
3
+ from typing import Callable
4
+
5
+ if sys.platform.startswith("win"):
6
+ import msvcrt
7
+ else:
8
+ import termios
9
+ import tty
10
+
11
+
12
+ class KeyboardListener:
13
+ def __init__(self, event_callback: tuple[str, Callable] | None = None):
14
+ self.callbacks = {}
15
+ self.running = False
16
+
17
+ if event_callback is not None:
18
+ self.on_press(*event_callback)
19
+
20
+ def on_press(self, key, callback):
21
+ self.callbacks[key] = callback
22
+
23
+ async def _windows_listener(self):
24
+ while self.running:
25
+ if msvcrt.kbhit():
26
+ key = msvcrt.getch().decode("utf-8").lower()
27
+ if key in self.callbacks:
28
+ await self.callbacks[key]()
29
+ await asyncio.sleep(0.01)
30
+
31
+ async def _unix_listener(self):
32
+ fd = sys.stdin.fileno()
33
+ old_settings = termios.tcgetattr(fd)
34
+ try:
35
+ tty.setraw(fd)
36
+ while self.running:
37
+ key = sys.stdin.read(1).lower()
38
+ if key in self.callbacks:
39
+ await self.callbacks[key]()
40
+ await asyncio.sleep(0.01)
41
+ finally:
42
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
43
+
44
+ async def start(self):
45
+ self.running = True
46
+ if sys.platform.startswith("win"):
47
+ await self._windows_listener()
48
+ else:
49
+ await self._unix_listener()
50
+
51
+ def stop(self):
52
+ self.running = False
@@ -0,0 +1,66 @@
1
+ from pathlib import Path
2
+ from py_neuromodulation.utils.types import _PathLike
3
+ import logging
4
+
5
+ INFOFORMAT = "%(name)s:\t%(message)s"
6
+ DEBUGFORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(filename)s:%(funcName)s:%(lineno)d:\t%(message)s"
7
+
8
+ LOG_LEVELS = {
9
+ "DEBUG": (logging.DEBUG, DEBUGFORMAT),
10
+ "INFO": (logging.INFO, INFOFORMAT),
11
+ "WARNING": (logging.WARN, DEBUGFORMAT),
12
+ "ERROR": (logging.ERROR, DEBUGFORMAT),
13
+ }
14
+
15
+
16
+ class NMLogger(logging.Logger):
17
+ """
18
+ Subclass of logging.Logger with some extra functionality
19
+ """
20
+
21
+ def __init__(self, name: str, level: str = "INFO") -> None:
22
+ super().__init__(name, LOG_LEVELS[level][0])
23
+
24
+ self.setLevel(level)
25
+
26
+ self._console_handler = logging.StreamHandler()
27
+ self._console_handler.setLevel(level)
28
+ self._console_handler.setFormatter(logging.Formatter(LOG_LEVELS[level][1]))
29
+
30
+ self.addHandler(self._console_handler)
31
+
32
+ def set_level(self, level: str):
33
+ """
34
+ Set console logging level
35
+ """
36
+ self.setLevel(level)
37
+ self._console_handler.setLevel(level)
38
+ self._console_handler.setFormatter(logging.Formatter(LOG_LEVELS[level][1]))
39
+
40
+ def log_to_file(self, path: _PathLike, mode: str = "w"):
41
+ """
42
+ Add file handlers to the logger
43
+
44
+ Parameters
45
+ ----------
46
+ path: directory where to save logfiles
47
+ mode : str, ('w', 'a')
48
+ w: overwrite files
49
+ a: append to files
50
+ """
51
+
52
+ path = Path(path)
53
+ path.mkdir(parents=True, exist_ok=True)
54
+
55
+ self.debug_file_handler = logging.FileHandler(path / "logfile_pydebug.log")
56
+ self.debug_file_handler.setLevel(logging.DEBUG)
57
+ self.debug_file_handler.setFormatter(logging.Formatter(DEBUGFORMAT))
58
+
59
+ self.info_file_handler = logging.FileHandler(
60
+ path / "logfile_pyinfo.log", mode=mode
61
+ )
62
+ self.info_file_handler.setLevel(logging.INFO)
63
+ self.info_file_handler.setFormatter(logging.Formatter(INFOFORMAT))
64
+
65
+ self.addHandler(self.info_file_handler)
66
+ self.addHandler(self.debug_file_handler)