braindecode 0.8__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (102) hide show
  1. braindecode/__init__.py +1 -2
  2. braindecode/augmentation/__init__.py +50 -0
  3. braindecode/augmentation/base.py +222 -0
  4. braindecode/augmentation/functional.py +1096 -0
  5. braindecode/augmentation/transforms.py +1274 -0
  6. braindecode/classifier.py +26 -24
  7. braindecode/datasets/__init__.py +34 -0
  8. braindecode/datasets/base.py +840 -0
  9. braindecode/datasets/bbci.py +694 -0
  10. braindecode/datasets/bcicomp.py +194 -0
  11. braindecode/datasets/bids.py +245 -0
  12. braindecode/datasets/mne.py +172 -0
  13. braindecode/datasets/moabb.py +209 -0
  14. braindecode/datasets/nmt.py +311 -0
  15. braindecode/datasets/sleep_physio_challe_18.py +412 -0
  16. braindecode/datasets/sleep_physionet.py +125 -0
  17. braindecode/datasets/tuh.py +588 -0
  18. braindecode/datasets/xy.py +95 -0
  19. braindecode/datautil/__init__.py +49 -0
  20. braindecode/datautil/serialization.py +342 -0
  21. braindecode/datautil/util.py +41 -0
  22. braindecode/eegneuralnet.py +63 -47
  23. braindecode/functional/__init__.py +10 -0
  24. braindecode/functional/functions.py +251 -0
  25. braindecode/functional/initialization.py +47 -0
  26. braindecode/models/__init__.py +52 -0
  27. braindecode/models/atcnet.py +652 -0
  28. braindecode/models/attentionbasenet.py +550 -0
  29. braindecode/models/base.py +296 -0
  30. braindecode/models/biot.py +483 -0
  31. braindecode/models/contrawr.py +296 -0
  32. braindecode/models/ctnet.py +450 -0
  33. braindecode/models/deep4.py +322 -0
  34. braindecode/models/deepsleepnet.py +295 -0
  35. braindecode/models/eegconformer.py +372 -0
  36. braindecode/models/eeginception_erp.py +304 -0
  37. braindecode/models/eeginception_mi.py +371 -0
  38. braindecode/models/eegitnet.py +301 -0
  39. braindecode/models/eegminer.py +255 -0
  40. braindecode/models/eegnet.py +473 -0
  41. braindecode/models/eegnex.py +247 -0
  42. braindecode/models/eegresnet.py +362 -0
  43. braindecode/models/eegsimpleconv.py +199 -0
  44. braindecode/models/eegtcnet.py +335 -0
  45. braindecode/models/fbcnet.py +221 -0
  46. braindecode/models/fblightconvnet.py +313 -0
  47. braindecode/models/fbmsnet.py +325 -0
  48. braindecode/models/hybrid.py +126 -0
  49. braindecode/models/ifnet.py +441 -0
  50. braindecode/models/labram.py +1166 -0
  51. braindecode/models/msvtnet.py +375 -0
  52. braindecode/models/sccnet.py +182 -0
  53. braindecode/models/shallow_fbcsp.py +208 -0
  54. braindecode/models/signal_jepa.py +1012 -0
  55. braindecode/models/sinc_shallow.py +337 -0
  56. braindecode/models/sleep_stager_blanco_2020.py +167 -0
  57. braindecode/models/sleep_stager_chambon_2018.py +157 -0
  58. braindecode/models/sleep_stager_eldele_2021.py +536 -0
  59. braindecode/models/sparcnet.py +378 -0
  60. braindecode/models/summary.csv +41 -0
  61. braindecode/models/syncnet.py +232 -0
  62. braindecode/models/tcn.py +273 -0
  63. braindecode/models/tidnet.py +395 -0
  64. braindecode/models/tsinception.py +258 -0
  65. braindecode/models/usleep.py +340 -0
  66. braindecode/models/util.py +133 -0
  67. braindecode/modules/__init__.py +38 -0
  68. braindecode/modules/activation.py +60 -0
  69. braindecode/modules/attention.py +757 -0
  70. braindecode/modules/blocks.py +108 -0
  71. braindecode/modules/convolution.py +274 -0
  72. braindecode/modules/filter.py +632 -0
  73. braindecode/modules/layers.py +133 -0
  74. braindecode/modules/linear.py +50 -0
  75. braindecode/modules/parametrization.py +38 -0
  76. braindecode/modules/stats.py +77 -0
  77. braindecode/modules/util.py +77 -0
  78. braindecode/modules/wrapper.py +75 -0
  79. braindecode/preprocessing/__init__.py +37 -0
  80. braindecode/preprocessing/mne_preprocess.py +77 -0
  81. braindecode/preprocessing/preprocess.py +478 -0
  82. braindecode/preprocessing/windowers.py +1031 -0
  83. braindecode/regressor.py +23 -12
  84. braindecode/samplers/__init__.py +18 -0
  85. braindecode/samplers/base.py +401 -0
  86. braindecode/samplers/ssl.py +263 -0
  87. braindecode/training/__init__.py +23 -0
  88. braindecode/training/callbacks.py +23 -0
  89. braindecode/training/losses.py +105 -0
  90. braindecode/training/scoring.py +483 -0
  91. braindecode/util.py +55 -59
  92. braindecode/version.py +1 -1
  93. braindecode/visualization/__init__.py +8 -0
  94. braindecode/visualization/confusion_matrices.py +289 -0
  95. braindecode/visualization/gradients.py +57 -0
  96. {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/METADATA +39 -55
  97. braindecode-1.0.0.dist-info/RECORD +101 -0
  98. {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/WHEEL +1 -1
  99. {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info/licenses}/LICENSE.txt +1 -1
  100. braindecode-1.0.0.dist-info/licenses/NOTICE.txt +20 -0
  101. braindecode-0.8.dist-info/RECORD +0 -11
  102. {braindecode-0.8.dist-info → braindecode-1.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,209 @@
1
+ """Dataset objects for some public datasets."""
2
+
3
+ # Authors: Hubert Banville <hubert.jbanville@gmail.com>
4
+ # Lukas Gemein <l.gemein@gmail.com>
5
+ # Simon Brandt <simonbrandt@protonmail.com>
6
+ # David Sabbagh <dav.sabbagh@gmail.com>
7
+ # Pierre Guetschel <pierre.guetschel@gmail.com>
8
+ #
9
+ # License: BSD (3-clause)
10
+
11
+ from __future__ import annotations
12
+
13
+ import warnings
14
+ from typing import Any
15
+
16
+ import mne
17
+ import pandas as pd
18
+
19
+ from braindecode.util import _update_moabb_docstring
20
+
21
+ from .base import BaseConcatDataset, BaseDataset
22
+
23
+
24
+ def _find_dataset_in_moabb(dataset_name, dataset_kwargs=None):
25
+ # soft dependency on moabb
26
+ from moabb.datasets.utils import dataset_list
27
+
28
+ for dataset in dataset_list:
29
+ if dataset_name == dataset.__name__:
30
+ # return an instance of the found dataset class
31
+ if dataset_kwargs is None:
32
+ return dataset()
33
+ else:
34
+ return dataset(**dataset_kwargs)
35
+ raise ValueError(f"{dataset_name} not found in moabb datasets")
36
+
37
+
38
+ def _fetch_and_unpack_moabb_data(dataset, subject_ids=None, dataset_load_kwargs=None):
39
+ if dataset_load_kwargs is None:
40
+ data = dataset.get_data(subject_ids)
41
+ else:
42
+ data = dataset.get_data(subjects=subject_ids, **dataset_load_kwargs)
43
+
44
+ raws, subject_ids, session_ids, run_ids = [], [], [], []
45
+ for subj_id, subj_data in data.items():
46
+ for sess_id, sess_data in subj_data.items():
47
+ for run_id, raw in sess_data.items():
48
+ annots = _annotations_from_moabb_stim_channel(raw, dataset)
49
+ raw.set_annotations(annots)
50
+ raws.append(raw)
51
+ subject_ids.append(subj_id)
52
+ session_ids.append(sess_id)
53
+ run_ids.append(run_id)
54
+ description = pd.DataFrame(
55
+ {"subject": subject_ids, "session": session_ids, "run": run_ids}
56
+ )
57
+ return raws, description
58
+
59
+
60
+ def _annotations_from_moabb_stim_channel(raw, dataset):
61
+ # find events from the stim channel
62
+ stim_channels = mne.utils._get_stim_channel(None, raw.info, raise_error=False)
63
+ if len(stim_channels) > 0:
64
+ # returns an empty array if none found
65
+ events = mne.find_events(raw, shortest_event=0, verbose=False)
66
+ event_id = dataset.event_id
67
+ else:
68
+ events, event_id = mne.events_from_annotations(raw, verbose=False)
69
+
70
+ # get annotations from events
71
+ event_desc = {k: v for v, k in event_id.items()}
72
+ annots = mne.annotations_from_events(events, raw.info["sfreq"], event_desc)
73
+
74
+ # set trial on and offset given by moabb
75
+ onset, offset = dataset.interval
76
+ annots.onset += onset
77
+ annots.duration += offset - onset
78
+ return annots
79
+
80
+
81
+ def fetch_data_with_moabb(
82
+ dataset_name: str,
83
+ subject_ids: list[int] | int | None = None,
84
+ dataset_kwargs: dict[str, Any] | None = None,
85
+ dataset_load_kwargs: dict[str, Any] | None = None,
86
+ ) -> tuple[list[mne.io.Raw], pd.DataFrame]:
87
+ # ToDo: update path to where moabb downloads / looks for the data
88
+ """Fetch data using moabb.
89
+
90
+ Parameters
91
+ ----------
92
+ dataset_name: str | moabb.datasets.base.BaseDataset
93
+ the name of a dataset included in moabb
94
+ subject_ids: list(int) | int
95
+ (list of) int of subject(s) to be fetched
96
+ dataset_kwargs: dict, optional
97
+ optional dictionary containing keyword arguments
98
+ to pass to the moabb dataset when instantiating it.
99
+ data_load_kwargs: dict, optional
100
+ optional dictionary containing keyword arguments
101
+ to pass to the moabb dataset's load_data method.
102
+ Allows using the moabb cache_config=None and
103
+ process_pipeline=None.
104
+
105
+ Returns
106
+ -------
107
+ raws: mne.Raw
108
+ info: pandas.DataFrame
109
+ """
110
+ if isinstance(dataset_name, str):
111
+ dataset = _find_dataset_in_moabb(dataset_name, dataset_kwargs)
112
+ else:
113
+ from moabb.datasets.base import BaseDataset
114
+
115
+ if isinstance(dataset_name, BaseDataset):
116
+ dataset = dataset_name
117
+
118
+ subject_id = [subject_ids] if isinstance(subject_ids, int) else subject_ids
119
+ return _fetch_and_unpack_moabb_data(
120
+ dataset, subject_id, dataset_load_kwargs=dataset_load_kwargs
121
+ )
122
+
123
+
124
+ class MOABBDataset(BaseConcatDataset):
125
+ """A class for moabb datasets.
126
+
127
+ Parameters
128
+ ----------
129
+ dataset_name: str
130
+ name of dataset included in moabb to be fetched
131
+ subject_ids: list(int) | int | None
132
+ (list of) int of subject(s) to be fetched. If None, data of all
133
+ subjects is fetched.
134
+ dataset_kwargs: dict, optional
135
+ optional dictionary containing keyword arguments
136
+ to pass to the moabb dataset when instantiating it.
137
+ dataset_load_kwargs: dict, optional
138
+ optional dictionary containing keyword arguments
139
+ to pass to the moabb dataset's load_data method.
140
+ Allows using the moabb cache_config=None and
141
+ process_pipeline=None.
142
+ """
143
+
144
+ def __init__(
145
+ self,
146
+ dataset_name: str,
147
+ subject_ids: list[int] | int | None = None,
148
+ dataset_kwargs: dict[str, Any] | None = None,
149
+ dataset_load_kwargs: dict[str, Any] | None = None,
150
+ ):
151
+ # soft dependency on moabb
152
+ from moabb import __version__ as moabb_version # type: ignore
153
+
154
+ if moabb_version == "1.0.0":
155
+ warnings.warn(
156
+ "moabb version 1.0.0 generates incorrect annotations. "
157
+ "Please update to another version, version 0.5 or 1.1.0 "
158
+ )
159
+
160
+ raws, description = fetch_data_with_moabb(
161
+ dataset_name,
162
+ subject_ids,
163
+ dataset_kwargs,
164
+ dataset_load_kwargs=dataset_load_kwargs,
165
+ )
166
+ all_base_ds = [
167
+ BaseDataset(raw, row) for raw, (_, row) in zip(raws, description.iterrows())
168
+ ]
169
+ super().__init__(all_base_ds)
170
+
171
+
172
+ class BNCI2014001(MOABBDataset):
173
+ doc = """See moabb.datasets.bnci.BNCI2014001
174
+
175
+ Parameters
176
+ ----------
177
+ subject_ids: list(int) | int | None
178
+ (list of) int of subject(s) to be fetched. If None, data of all
179
+ subjects is fetched.
180
+ """
181
+ try:
182
+ from moabb.datasets import BNCI2014001
183
+
184
+ __doc__ = _update_moabb_docstring(BNCI2014001, doc)
185
+ except ModuleNotFoundError:
186
+ pass # keep moabb soft dependency, otherwise crash on loading of datasets.__init__.py
187
+
188
+ def __init__(self, subject_ids):
189
+ super().__init__("BNCI2014001", subject_ids=subject_ids)
190
+
191
+
192
+ class HGD(MOABBDataset):
193
+ doc = """See moabb.datasets.schirrmeister2017.Schirrmeister2017
194
+
195
+ Parameters
196
+ ----------
197
+ subject_ids: list(int) | int | None
198
+ (list of) int of subject(s) to be fetched. If None, data of all
199
+ subjects is fetched.
200
+ """
201
+ try:
202
+ from moabb.datasets import Schirrmeister2017
203
+
204
+ __doc__ = _update_moabb_docstring(Schirrmeister2017, doc)
205
+ except ModuleNotFoundError:
206
+ pass # keep moabb soft dependency, otherwise crash on loading of datasets.__init__.py
207
+
208
+ def __init__(self, subject_ids):
209
+ super().__init__("Schirrmeister2017", subject_ids=subject_ids)
@@ -0,0 +1,311 @@
1
+ """
2
+ Dataset classes for the NMT EEG Corpus dataset.
3
+
4
+ The NMT Scalp EEG Dataset is an open-source annotated dataset of healthy and
5
+ pathological EEG recordings for predictive modeling. This dataset contains
6
+ 2,417 recordings from unique participants spanning almost 625 h.
7
+
8
+ Note:
9
+ - The signal unit may not be uV and further examination is required.
10
+ - The spectrum shows that the signal may have been band-pass filtered from about 2 - 33Hz,
11
+ which needs to be further determined.
12
+
13
+ """
14
+
15
+ # Authors: Mohammad Bayazi <mj.darvishi92@gmail.com>
16
+ # Bruno Aristimunha <b.aristimunha@gmail.com>
17
+ #
18
+ # License: BSD (3-clause)
19
+
20
+ from __future__ import annotations
21
+
22
+ import glob
23
+ import os
24
+ import warnings
25
+ from pathlib import Path
26
+ from unittest import mock
27
+
28
+ import mne
29
+ import numpy as np
30
+ import pandas as pd
31
+ from joblib import Parallel, delayed
32
+ from mne.datasets import fetch_dataset
33
+
34
+ from braindecode.datasets.base import BaseConcatDataset, BaseDataset
35
+
36
+ NMT_URL = "https://zenodo.org/record/10909103/files/NMT.zip"
37
+ NMT_archive_name = "NMT.zip"
38
+ NMT_folder_name = "MNE-NMT-eeg-dataset"
39
+ NMT_dataset_name = "NMT-EEG-Corpus"
40
+
41
+ NMT_dataset_params = {
42
+ "dataset_name": NMT_dataset_name,
43
+ "url": NMT_URL,
44
+ "archive_name": NMT_archive_name,
45
+ "folder_name": NMT_folder_name,
46
+ "hash": "77b3ce12bcaf6c6cce4e6690ea89cb22bed55af10c525077b430f6e1d2e3c6bf",
47
+ "config_key": NMT_dataset_name,
48
+ }
49
+
50
+
51
+ class NMT(BaseConcatDataset):
52
+ """The NMT Scalp EEG Dataset.
53
+
54
+ An Open-Source Annotated Dataset of Healthy and Pathological EEG
55
+ Recordings for Predictive Modeling.
56
+
57
+ This dataset contains 2,417 recordings from unique participants spanning
58
+ almost 625 h.
59
+
60
+ Here, the dataset can be used for three tasks, brain-age, gender prediction,
61
+ abnormality detection.
62
+
63
+ The dataset is described in [Khan2022]_.
64
+
65
+ .. versionadded:: 0.9
66
+
67
+ Parameters
68
+ ----------
69
+ path: str
70
+ Parent directory of the dataset.
71
+ recording_ids: list(int) | int
72
+ A (list of) int of recording id(s) to be read (order matters and will
73
+ overwrite default chronological order, e.g. if recording_ids=[1,0],
74
+ then the first recording returned by this class will be chronologically
75
+ later than the second recording. Provide recording_ids in ascending
76
+ order to preserve chronological order.).
77
+ target_name: str
78
+ Can be "pathological", "gender", or "age".
79
+ preload: bool
80
+ If True, preload the data of the Raw objects.
81
+
82
+ References
83
+ ----------
84
+ .. [Khan2022] Khan, H.A.,Ul Ain, R., Kamboh, A.M., Butt, H.T.,Shafait,S.,
85
+ Alamgir, W., Stricker, D. and Shafait, F., 2022. The NMT scalp EEG
86
+ dataset: an open-source annotated dataset of healthy and pathological
87
+ EEG recordings for predictive modeling. Frontiers in neuroscience,
88
+ 15, p.755817.
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ path=None,
94
+ target_name="pathological",
95
+ recording_ids=None,
96
+ preload=False,
97
+ n_jobs=1,
98
+ ):
99
+ # correct the path if needed
100
+ if path is not None:
101
+ list_csv = glob.glob(f"{path}/**/Labels.csv", recursive=True)
102
+ if isinstance(list_csv, list) and len(list_csv) > 0:
103
+ path = Path(list_csv[0]).parent
104
+
105
+ if path is None or len(list_csv) == 0:
106
+ path = fetch_dataset(
107
+ dataset_params=NMT_dataset_params,
108
+ path=Path(path) if path is not None else None,
109
+ processor="unzip",
110
+ force_update=False,
111
+ )
112
+ # First time we fetch the dataset, we need to move the files to the
113
+ # correct directory.
114
+ path = _correct_path(path)
115
+
116
+ # Get all file paths
117
+ file_paths = glob.glob(
118
+ os.path.join(path, "**" + os.sep + "*.edf"), recursive=True
119
+ )
120
+
121
+ # sort by subject id
122
+ file_paths = [
123
+ file_path
124
+ for file_path in file_paths
125
+ if os.path.splitext(file_path)[1] == ".edf"
126
+ ]
127
+
128
+ # sort by subject id
129
+ file_paths = sorted(
130
+ file_paths, key=lambda p: int(os.path.splitext(p)[0].split(os.sep)[-1])
131
+ )
132
+ if recording_ids is not None:
133
+ file_paths = [file_paths[rec_id] for rec_id in recording_ids]
134
+
135
+ # read labels and rearrange them to match TUH Abnormal EEG Corpus
136
+ description = pd.read_csv(
137
+ os.path.join(path, "Labels.csv"), index_col="recordname"
138
+ )
139
+ if recording_ids is not None:
140
+ description = description.iloc[recording_ids]
141
+ description.replace(
142
+ {
143
+ "not specified": "X",
144
+ "female": "F",
145
+ "male": "M",
146
+ "abnormal": True,
147
+ "normal": False,
148
+ },
149
+ inplace=True,
150
+ )
151
+ description.rename(columns={"label": "pathological"}, inplace=True)
152
+ description.reset_index(drop=True, inplace=True)
153
+ description["path"] = file_paths
154
+ description = description[["path", "pathological", "age", "gender"]]
155
+
156
+ if n_jobs == 1:
157
+ base_datasets = [
158
+ self._create_dataset(d, target_name, preload)
159
+ for recording_id, d in description.iterrows()
160
+ ]
161
+ else:
162
+ base_datasets = Parallel(n_jobs)(
163
+ delayed(self._create_dataset)(d, target_name, preload)
164
+ for recording_id, d in description.iterrows()
165
+ )
166
+
167
+ super().__init__(base_datasets)
168
+
169
+ @staticmethod
170
+ def _create_dataset(d, target_name, preload):
171
+ raw = mne.io.read_raw_edf(d.path, preload=preload)
172
+ d["n_samples"] = raw.n_times
173
+ d["sfreq"] = raw.info["sfreq"]
174
+ d["train"] = "train" in d.path.split(os.sep)
175
+ base_dataset = BaseDataset(raw, d, target_name)
176
+ return base_dataset
177
+
178
+
179
+ def _correct_path(path: str):
180
+ """
181
+ Check if the path is correct and rename the file if needed.
182
+
183
+ Parameters
184
+ ----------
185
+ path: basestring
186
+ Path to the file.
187
+
188
+ Returns
189
+ -------
190
+ path: basestring
191
+ Corrected path.
192
+ """
193
+ if not Path(path).exists():
194
+ unzip_file_name = f"{NMT_archive_name}.unzip"
195
+ if (Path(path).parent / unzip_file_name).exists():
196
+ try:
197
+ os.rename(
198
+ src=Path(path).parent / unzip_file_name,
199
+ dst=Path(path),
200
+ )
201
+
202
+ except PermissionError:
203
+ raise PermissionError(
204
+ f"Please rename {Path(path).parent / unzip_file_name}"
205
+ + f"manually to {path} and try again."
206
+ )
207
+ path = os.path.join(path, "nmt_scalp_eeg_dataset")
208
+
209
+ return path
210
+
211
+
212
+ def _get_header(*args):
213
+ all_paths = {**_NMT_PATHS}
214
+ return all_paths[args[0]]
215
+
216
+
217
+ def _fake_pd_read_csv(*args, **kwargs):
218
+ # Create a list of lists to hold the data
219
+ data = [
220
+ ["0000001.edf", "normal", 35, "male", "train"],
221
+ ["0000002.edf", "abnormal", 28, "female", "test"],
222
+ ["0000003.edf", "normal", 62, "male", "train"],
223
+ ["0000004.edf", "abnormal", 41, "female", "test"],
224
+ ["0000005.edf", "normal", 19, "male", "train"],
225
+ ["0000006.edf", "abnormal", 55, "female", "test"],
226
+ ["0000007.edf", "normal", 71, "male", "train"],
227
+ ]
228
+
229
+ # Create the DataFrame, specifying column names
230
+ df = pd.DataFrame(data, columns=["recordname", "label", "age", "gender", "loc"])
231
+
232
+ return df
233
+
234
+
235
+ def _fake_raw(*args, **kwargs):
236
+ sfreq = 10
237
+ ch_names = [
238
+ "EEG A1-REF",
239
+ "EEG A2-REF",
240
+ "EEG FP1-REF",
241
+ "EEG FP2-REF",
242
+ "EEG F3-REF",
243
+ "EEG F4-REF",
244
+ "EEG C3-REF",
245
+ "EEG C4-REF",
246
+ "EEG P3-REF",
247
+ "EEG P4-REF",
248
+ "EEG O1-REF",
249
+ "EEG O2-REF",
250
+ "EEG F7-REF",
251
+ "EEG F8-REF",
252
+ "EEG T3-REF",
253
+ "EEG T4-REF",
254
+ "EEG T5-REF",
255
+ "EEG T6-REF",
256
+ "EEG FZ-REF",
257
+ "EEG CZ-REF",
258
+ "EEG PZ-REF",
259
+ ]
260
+ duration_min = 6
261
+ data = np.random.randn(len(ch_names), duration_min * sfreq * 60)
262
+ info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types="eeg")
263
+ raw = mne.io.RawArray(data=data, info=info)
264
+ return raw
265
+
266
+
267
+ _NMT_PATHS = {
268
+ # these are actual file paths and edf headers from NMT EEG Corpus
269
+ "nmt_scalp_eeg_dataset/abnormal/train/0000036.edf": b"0 0000036 M 13-May-1951 0000036 Age:32 ",
270
+ # noqa E501
271
+ "nmt_scalp_eeg_dataset/abnormal/eval/0000037.edf": b"0 0000037 M 13-May-1951 0000037 Age:32 ",
272
+ # noqa E501
273
+ "nmt_scalp_eeg_dataset/abnormal/eval/0000038.edf": b"0 0000038 M 13-May-1951 0000038 Age:32 ",
274
+ # noqa E501
275
+ "nmt_scalp_eeg_dataset/normal/train/0000039.edf": b"0 0000039 M 13-May-1951 0000039 Age:32 ",
276
+ # noqa E501
277
+ "nmt_scalp_eeg_dataset/normal/eval/0000040.edf": b"0 0000040 M 13-May-1951 0000040 Age:32 ",
278
+ # noqa E501
279
+ "nmt_scalp_eeg_dataset/normal/eval/0000041.edf": b"0 0000041 M 13-May-1951 0000041 Age:32 ",
280
+ # noqa E501
281
+ "nmt_scalp_eeg_dataset/abnormal/train/0000042.edf": b"0 0000042 M 13-May-1951 0000042 Age:32 ",
282
+ # noqa E501
283
+ "Labels.csv": b"0 recordname,label,age,gender,loc 1 0000001.edf,normal,22,not specified,train ",
284
+ # noqa E501
285
+ }
286
+
287
+
288
+ class _NMTMock(NMT):
289
+ """Mocked class for testing and examples."""
290
+
291
+ @mock.patch("glob.glob", return_value=_NMT_PATHS.keys())
292
+ @mock.patch("mne.io.read_raw_edf", new=_fake_raw)
293
+ @mock.patch("pandas.read_csv", new=_fake_pd_read_csv)
294
+ def __init__(
295
+ self,
296
+ mock_glob,
297
+ path,
298
+ recording_ids=None,
299
+ target_name="pathological",
300
+ preload=False,
301
+ n_jobs=1,
302
+ ):
303
+ with warnings.catch_warnings():
304
+ warnings.filterwarnings("ignore", message="Cannot save date file")
305
+ super().__init__(
306
+ path=path,
307
+ recording_ids=recording_ids,
308
+ target_name=target_name,
309
+ preload=preload,
310
+ n_jobs=n_jobs,
311
+ )