braindecode 1.2.0.dev184328194__py3-none-any.whl → 1.3.0.dev171178473__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of braindecode might be problematic. Click here for more details.
- braindecode/augmentation/base.py +1 -1
- braindecode/augmentation/functional.py +154 -54
- braindecode/augmentation/transforms.py +2 -2
- braindecode/datasets/__init__.py +10 -2
- braindecode/datasets/base.py +116 -152
- braindecode/datasets/bcicomp.py +4 -4
- braindecode/datasets/bids.py +3 -3
- braindecode/datasets/experimental.py +218 -0
- braindecode/datasets/mne.py +3 -5
- braindecode/datasets/moabb.py +2 -2
- braindecode/datasets/nmt.py +2 -2
- braindecode/datasets/sleep_physio_challe_18.py +4 -3
- braindecode/datasets/sleep_physionet.py +2 -2
- braindecode/datasets/tuh.py +2 -2
- braindecode/datasets/xy.py +2 -2
- braindecode/datautil/serialization.py +18 -13
- braindecode/eegneuralnet.py +2 -0
- braindecode/functional/functions.py +6 -2
- braindecode/functional/initialization.py +2 -3
- braindecode/models/__init__.py +12 -8
- braindecode/models/atcnet.py +156 -17
- braindecode/models/attentionbasenet.py +148 -16
- braindecode/models/{sleep_stager_eldele_2021.py → attn_sleep.py} +12 -2
- braindecode/models/base.py +280 -2
- braindecode/models/bendr.py +469 -0
- braindecode/models/biot.py +3 -1
- braindecode/models/ctnet.py +7 -4
- braindecode/models/deep4.py +6 -2
- braindecode/models/deepsleepnet.py +127 -5
- braindecode/models/eegconformer.py +114 -15
- braindecode/models/eeginception_erp.py +82 -7
- braindecode/models/eeginception_mi.py +2 -0
- braindecode/models/eegnet.py +64 -177
- braindecode/models/eegnex.py +113 -6
- braindecode/models/eegsimpleconv.py +2 -0
- braindecode/models/eegtcnet.py +1 -1
- braindecode/models/labram.py +188 -84
- braindecode/models/patchedtransformer.py +640 -0
- braindecode/models/sccnet.py +81 -8
- braindecode/models/shallow_fbcsp.py +2 -0
- braindecode/models/signal_jepa.py +109 -27
- braindecode/models/sinc_shallow.py +10 -9
- braindecode/models/sleep_stager_blanco_2020.py +2 -0
- braindecode/models/sleep_stager_chambon_2018.py +2 -0
- braindecode/models/sparcnet.py +2 -0
- braindecode/models/sstdpn.py +869 -0
- braindecode/models/summary.csv +42 -41
- braindecode/models/tidnet.py +2 -0
- braindecode/models/tsinception.py +15 -3
- braindecode/models/usleep.py +108 -9
- braindecode/models/util.py +8 -5
- braindecode/modules/attention.py +10 -10
- braindecode/modules/blocks.py +3 -3
- braindecode/modules/filter.py +2 -3
- braindecode/modules/layers.py +18 -17
- braindecode/preprocessing/__init__.py +24 -0
- braindecode/preprocessing/eegprep_preprocess.py +1202 -0
- braindecode/preprocessing/preprocess.py +42 -39
- braindecode/preprocessing/util.py +166 -0
- braindecode/preprocessing/windowers.py +24 -19
- braindecode/samplers/base.py +8 -8
- braindecode/version.py +1 -1
- {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/METADATA +12 -3
- braindecode-1.3.0.dev171178473.dist-info/RECORD +106 -0
- braindecode/models/eegresnet.py +0 -362
- braindecode-1.2.0.dev184328194.dist-info/RECORD +0 -101
- {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/WHEEL +0 -0
- {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/licenses/LICENSE.txt +0 -0
- {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/licenses/NOTICE.txt +0 -0
- {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/top_level.txt +0 -0
|
@@ -30,8 +30,8 @@ from numpy.typing import NDArray
|
|
|
30
30
|
|
|
31
31
|
from braindecode.datasets.base import (
|
|
32
32
|
BaseConcatDataset,
|
|
33
|
-
BaseDataset,
|
|
34
33
|
EEGWindowsDataset,
|
|
34
|
+
RawDataset,
|
|
35
35
|
WindowsDataset,
|
|
36
36
|
)
|
|
37
37
|
from braindecode.datautil.serialization import (
|
|
@@ -55,15 +55,15 @@ class Preprocessor(object):
|
|
|
55
55
|
|
|
56
56
|
Parameters
|
|
57
57
|
----------
|
|
58
|
-
fn: str or callable
|
|
58
|
+
fn : str or callable
|
|
59
59
|
If str, the Raw/Epochs object must have a method with that name.
|
|
60
60
|
If callable, directly apply the callable to the object.
|
|
61
61
|
apply_on_array : bool
|
|
62
|
-
Ignored if
|
|
63
|
-
and Epochs
|
|
64
|
-
|
|
65
|
-
kwargs:
|
|
66
|
-
Keyword arguments
|
|
62
|
+
Ignored if ``fn`` is not a callable. If True, the ``apply_function`` of Raw
|
|
63
|
+
and Epochs will be used to run ``fn`` on the underlying arrays directly.
|
|
64
|
+
If False, ``fn`` must directly modify the Raw or Epochs object.
|
|
65
|
+
**kwargs : dict
|
|
66
|
+
Keyword arguments forwarded to the MNE function or callable.
|
|
67
67
|
"""
|
|
68
68
|
|
|
69
69
|
def __init__(self, fn: Callable | str, *, apply_on_array: bool = True, **kwargs):
|
|
@@ -112,39 +112,38 @@ def preprocess(
|
|
|
112
112
|
n_jobs: int | None = None,
|
|
113
113
|
offset: int = 0,
|
|
114
114
|
copy_data: bool | None = None,
|
|
115
|
+
parallel_kwargs: dict | None = None,
|
|
115
116
|
):
|
|
116
117
|
"""Apply preprocessors to a concat dataset.
|
|
117
118
|
|
|
118
119
|
Parameters
|
|
119
120
|
----------
|
|
120
|
-
concat_ds: BaseConcatDataset
|
|
121
|
-
A concat of
|
|
122
|
-
preprocessors: list
|
|
123
|
-
|
|
121
|
+
concat_ds : BaseConcatDataset
|
|
122
|
+
A concat of ``RecordDataset`` to be preprocessed.
|
|
123
|
+
preprocessors : list of Preprocessor
|
|
124
|
+
Preprocessor objects to apply to each dataset.
|
|
124
125
|
save_dir : str | None
|
|
125
|
-
If
|
|
126
|
-
|
|
127
|
-
`preload=False`.
|
|
126
|
+
If provided, save preprocessed data under this directory and reload
|
|
127
|
+
datasets in ``concat_ds`` with ``preload=False``.
|
|
128
128
|
overwrite : bool
|
|
129
|
-
When
|
|
130
|
-
subdirectories that will be written to under
|
|
131
|
-
the corresponding subdirectories already exist, a ``FileExistsError``
|
|
132
|
-
will be raised.
|
|
129
|
+
When ``save_dir`` is provided, controls whether to delete the old
|
|
130
|
+
subdirectories that will be written to under ``save_dir``. If False and
|
|
131
|
+
the corresponding subdirectories already exist, a ``FileExistsError`` is raised.
|
|
133
132
|
n_jobs : int | None
|
|
134
|
-
Number of jobs for parallel execution. See
|
|
135
|
-
a more detailed explanation.
|
|
133
|
+
Number of jobs for parallel execution. See ``joblib.Parallel`` for details.
|
|
136
134
|
offset : int
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
one dataset has to be processed and saved at a time to account for
|
|
140
|
-
its original position.
|
|
135
|
+
Integer added to the dataset id in the concat. Useful when processing
|
|
136
|
+
and saving very large datasets in chunks to preserve original positions.
|
|
141
137
|
copy_data : bool | None
|
|
142
|
-
Whether the data passed to
|
|
143
|
-
|
|
138
|
+
Whether the data passed to parallel jobs should be copied or passed by reference.
|
|
139
|
+
parallel_kwargs : dict | None
|
|
140
|
+
Additional keyword arguments forwarded to ``joblib.Parallel``.
|
|
141
|
+
Defaults to None (equivalent to ``{}``).
|
|
142
|
+
See https://joblib.readthedocs.io/en/stable/generated/joblib.Parallel.html for details.
|
|
144
143
|
|
|
145
144
|
Returns
|
|
146
145
|
-------
|
|
147
|
-
BaseConcatDataset
|
|
146
|
+
BaseConcatDataset
|
|
148
147
|
Preprocessed dataset.
|
|
149
148
|
"""
|
|
150
149
|
# In case of serialization, make sure directory is available before
|
|
@@ -159,8 +158,12 @@ def preprocess(
|
|
|
159
158
|
|
|
160
159
|
parallel_processing = (n_jobs is not None) and (n_jobs != 1)
|
|
161
160
|
|
|
162
|
-
|
|
163
|
-
|
|
161
|
+
parallel_params = {} if parallel_kwargs is None else dict(parallel_kwargs)
|
|
162
|
+
parallel_params.setdefault(
|
|
163
|
+
"prefer", "threads" if platform.system() == "Windows" else None
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
list_of_ds = Parallel(n_jobs=n_jobs, **parallel_params)(
|
|
164
167
|
delayed(_preprocess)(
|
|
165
168
|
ds,
|
|
166
169
|
i + offset,
|
|
@@ -226,15 +229,15 @@ def _preprocess(
|
|
|
226
229
|
|
|
227
230
|
Parameters
|
|
228
231
|
----------
|
|
229
|
-
ds:
|
|
232
|
+
ds: RecordDataset
|
|
230
233
|
Dataset object to preprocess.
|
|
231
234
|
ds_index : int
|
|
232
|
-
Index of the
|
|
235
|
+
Index of the ``RecordDataset`` in its ``BaseConcatDataset``. Ignored if save_dir
|
|
233
236
|
is None.
|
|
234
237
|
preprocessors: list(Preprocessor)
|
|
235
238
|
List of preprocessors to apply to the dataset.
|
|
236
239
|
save_dir : str | None
|
|
237
|
-
If provided, save the preprocessed
|
|
240
|
+
If provided, save the preprocessed RecordDataset in the
|
|
238
241
|
specified directory.
|
|
239
242
|
overwrite : bool
|
|
240
243
|
If True, overwrite existing file with the same name.
|
|
@@ -260,8 +263,8 @@ def _preprocess(
|
|
|
260
263
|
_preprocess_raw_or_epochs(ds.windows, preprocessors)
|
|
261
264
|
else:
|
|
262
265
|
raise ValueError(
|
|
263
|
-
"Can only preprocess concatenation of
|
|
264
|
-
"
|
|
266
|
+
"Can only preprocess concatenation of RecordDataset, "
|
|
267
|
+
"with either a `raw` or `windows` attribute."
|
|
265
268
|
)
|
|
266
269
|
|
|
267
270
|
# Store preprocessing keyword arguments in the dataset
|
|
@@ -294,11 +297,11 @@ def _get_preproc_kwargs(preprocessors):
|
|
|
294
297
|
|
|
295
298
|
|
|
296
299
|
def _set_preproc_kwargs(ds, preprocessors):
|
|
297
|
-
"""Record preprocessing keyword arguments in
|
|
300
|
+
"""Record preprocessing keyword arguments in RecordDataset.
|
|
298
301
|
|
|
299
302
|
Parameters
|
|
300
303
|
----------
|
|
301
|
-
ds :
|
|
304
|
+
ds : RecordDataset
|
|
302
305
|
Dataset in which to record preprocessing keyword arguments.
|
|
303
306
|
preprocessors : list
|
|
304
307
|
List of preprocessors.
|
|
@@ -306,12 +309,12 @@ def _set_preproc_kwargs(ds, preprocessors):
|
|
|
306
309
|
preproc_kwargs = _get_preproc_kwargs(preprocessors)
|
|
307
310
|
if isinstance(ds, WindowsDataset):
|
|
308
311
|
kind = "window"
|
|
309
|
-
|
|
312
|
+
elif isinstance(ds, EEGWindowsDataset):
|
|
310
313
|
kind = "raw"
|
|
311
|
-
elif isinstance(ds,
|
|
314
|
+
elif isinstance(ds, RawDataset):
|
|
312
315
|
kind = "raw"
|
|
313
316
|
else:
|
|
314
|
-
raise TypeError(f"ds must be a
|
|
317
|
+
raise TypeError(f"ds must be a RecordDataset, got {type(ds)}")
|
|
315
318
|
setattr(ds, kind + "_preproc_kwargs", preproc_kwargs)
|
|
316
319
|
|
|
317
320
|
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""Utilities for preprocessing functionality in Braindecode."""
|
|
2
|
+
|
|
3
|
+
# Authors: Christian Kothe <christian.kothe@intheon.io>
|
|
4
|
+
#
|
|
5
|
+
# License: BSD-3
|
|
6
|
+
|
|
7
|
+
import base64
|
|
8
|
+
import json
|
|
9
|
+
import re
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
from mne.io.base import BaseRaw
|
|
14
|
+
|
|
15
|
+
__all__ = ["mne_store_metadata", "mne_load_metadata"]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Use a unique marker for embedding structured data in info['description']
|
|
19
|
+
_MARKER_PATTERN = re.compile(r"<!-- braindecode-meta:\s*(\S+)\s*-->", re.DOTALL)
|
|
20
|
+
_MARKER_START = "<!-- braindecode-meta:"
|
|
21
|
+
_MARKER_END = "-->"
|
|
22
|
+
|
|
23
|
+
# Marker key for numpy arrays
|
|
24
|
+
_NP_ARRAY_TAG = "__numpy_array__"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _numpy_decoder(dct):
|
|
28
|
+
"""Internal JSON decoder hook to handle numpy arrays."""
|
|
29
|
+
if dct.get(_NP_ARRAY_TAG):
|
|
30
|
+
arr = np.array(dct["data"], dtype=dct["dtype"])
|
|
31
|
+
return arr.reshape(dct["shape"])
|
|
32
|
+
return dct
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class NumpyEncoder(json.JSONEncoder):
|
|
36
|
+
"""Custom JSON encoder hook to handle numpy arrays."""
|
|
37
|
+
|
|
38
|
+
def default(self, obj):
|
|
39
|
+
if isinstance(obj, np.ndarray):
|
|
40
|
+
# Reject complex-valued dtypes as they're not JSON serializable
|
|
41
|
+
if np.issubdtype(obj.dtype, np.complexfloating):
|
|
42
|
+
raise TypeError(
|
|
43
|
+
f"Cannot serialize numpy array with complex dtype {obj.dtype}. "
|
|
44
|
+
"Complex dtypes are not supported."
|
|
45
|
+
)
|
|
46
|
+
return {
|
|
47
|
+
_NP_ARRAY_TAG: True,
|
|
48
|
+
"dtype": obj.dtype.str,
|
|
49
|
+
"shape": obj.shape,
|
|
50
|
+
"data": obj.flatten().tolist(),
|
|
51
|
+
}
|
|
52
|
+
return super().default(obj)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _encode_payload(data: dict) -> str:
|
|
56
|
+
"""Serializes, encodes, and formats data into a marker string."""
|
|
57
|
+
json_str = json.dumps(data, cls=NumpyEncoder)
|
|
58
|
+
encoded = base64.b64encode(json_str.encode("utf-8")).decode("ascii")
|
|
59
|
+
return f"{_MARKER_START} {encoded} {_MARKER_END}"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def mne_store_metadata(
|
|
63
|
+
raw: BaseRaw, payload: Any, *, key: str, no_overwrite: bool = False
|
|
64
|
+
) -> None:
|
|
65
|
+
"""Embed a JSON-serializable metadata payload in an MNE BaseRaw dataset
|
|
66
|
+
under a specified key.
|
|
67
|
+
|
|
68
|
+
This will encode the payload as a base64-encoded JSON string and store it
|
|
69
|
+
in the `info['description']` field of the Raw object while preserving any
|
|
70
|
+
existing content. Note this is not particularly efficient and should not
|
|
71
|
+
be used for very large payloads.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
raw : BaseRaw
|
|
76
|
+
The MNE Raw object to store data in.
|
|
77
|
+
payload : Any
|
|
78
|
+
The JSON-serializable data to store.
|
|
79
|
+
key : str
|
|
80
|
+
The key under which to store the payload.
|
|
81
|
+
no_overwrite : bool
|
|
82
|
+
If True, will not overwrite an existing entry with the same key.
|
|
83
|
+
|
|
84
|
+
"""
|
|
85
|
+
# the description is apparently the only viable place where custom metadata may be
|
|
86
|
+
# stored in MNE Raw objects that persists through saving/loading
|
|
87
|
+
description = raw.info.get("description") or ""
|
|
88
|
+
|
|
89
|
+
# Try to find existing eegprep data
|
|
90
|
+
if match := _MARKER_PATTERN.search(description):
|
|
91
|
+
# Parse existing data
|
|
92
|
+
try:
|
|
93
|
+
decoded = base64.b64decode(match.group(1)).decode("utf-8")
|
|
94
|
+
existing_data = json.loads(decoded, object_hook=_numpy_decoder)
|
|
95
|
+
except (ValueError, json.JSONDecodeError):
|
|
96
|
+
existing_data = {}
|
|
97
|
+
# Check no_overwrite condition
|
|
98
|
+
if no_overwrite and key in existing_data:
|
|
99
|
+
return
|
|
100
|
+
# Update data
|
|
101
|
+
existing_data[key] = payload
|
|
102
|
+
new_marker = _encode_payload(existing_data)
|
|
103
|
+
# Replace the old marker with updated one
|
|
104
|
+
new_description = _MARKER_PATTERN.sub(new_marker, description, count=1)
|
|
105
|
+
else:
|
|
106
|
+
# No existing data, append new marker
|
|
107
|
+
data = {key: payload}
|
|
108
|
+
new_marker = _encode_payload(data)
|
|
109
|
+
# Append with spacing if description exists
|
|
110
|
+
if description.strip():
|
|
111
|
+
new_description = f"{description.rstrip()}\n{new_marker}"
|
|
112
|
+
else:
|
|
113
|
+
new_description = new_marker
|
|
114
|
+
|
|
115
|
+
raw.info["description"] = new_description
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def mne_load_metadata(raw: BaseRaw, *, key: str, delete: bool = False) -> Any | None:
|
|
119
|
+
"""Retrieves data that was previously stored using mne_store_metadata from an MNE
|
|
120
|
+
BaseRaw dataset.
|
|
121
|
+
|
|
122
|
+
This function can retrieve data from an MNE Raw object that was stored
|
|
123
|
+
using `mne_store_metadata`. It decodes the base64-encoded JSON string from the
|
|
124
|
+
`info['description']` field and extracts the payload associated with the
|
|
125
|
+
specified key.
|
|
126
|
+
|
|
127
|
+
Parameters
|
|
128
|
+
----------
|
|
129
|
+
raw : BaseRaw
|
|
130
|
+
The MNE Raw object to retrieve data from.
|
|
131
|
+
key : str
|
|
132
|
+
The key under which the payload was stored.
|
|
133
|
+
delete : bool
|
|
134
|
+
If True, removes the key from the stored data after retrieval.
|
|
135
|
+
|
|
136
|
+
Returns
|
|
137
|
+
-------
|
|
138
|
+
Any | None
|
|
139
|
+
The retrieved payload, or None if not found.
|
|
140
|
+
"""
|
|
141
|
+
description = raw.info.get("description") or ""
|
|
142
|
+
match = _MARKER_PATTERN.search(description)
|
|
143
|
+
if not match:
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
decoded = base64.b64decode(match.group(1)).decode("utf-8")
|
|
148
|
+
data = json.loads(decoded, object_hook=_numpy_decoder)
|
|
149
|
+
except (ValueError, json.JSONDecodeError):
|
|
150
|
+
return None
|
|
151
|
+
|
|
152
|
+
result = data.get(key)
|
|
153
|
+
|
|
154
|
+
if delete and key in data:
|
|
155
|
+
# Remove the key from data
|
|
156
|
+
del data[key]
|
|
157
|
+
if data:
|
|
158
|
+
# Still have other keys, update the marker
|
|
159
|
+
new_marker = _encode_payload(data)
|
|
160
|
+
new_description = _MARKER_PATTERN.sub(new_marker, description, count=1)
|
|
161
|
+
else:
|
|
162
|
+
# No more keys, remove the entire marker
|
|
163
|
+
new_description = _MARKER_PATTERN.sub("", description, count=1).rstrip()
|
|
164
|
+
raw.info["description"] = new_description
|
|
165
|
+
|
|
166
|
+
return result
|
|
@@ -25,7 +25,12 @@ import pandas as pd
|
|
|
25
25
|
from joblib import Parallel, delayed
|
|
26
26
|
from numpy.typing import ArrayLike
|
|
27
27
|
|
|
28
|
-
from ..datasets.base import
|
|
28
|
+
from ..datasets.base import (
|
|
29
|
+
BaseConcatDataset,
|
|
30
|
+
EEGWindowsDataset,
|
|
31
|
+
RawDataset,
|
|
32
|
+
WindowsDataset,
|
|
33
|
+
)
|
|
29
34
|
|
|
30
35
|
|
|
31
36
|
class _LazyDataFrame:
|
|
@@ -189,7 +194,7 @@ def _get_use_mne_epochs(use_mne_epochs, reject, picks, flat, drop_bad_windows):
|
|
|
189
194
|
|
|
190
195
|
# XXX it's called concat_ds...
|
|
191
196
|
def create_windows_from_events(
|
|
192
|
-
concat_ds: BaseConcatDataset,
|
|
197
|
+
concat_ds: BaseConcatDataset[RawDataset],
|
|
193
198
|
trial_start_offset_samples: int = 0,
|
|
194
199
|
trial_stop_offset_samples: int = 0,
|
|
195
200
|
window_size_samples: int | None = None,
|
|
@@ -206,7 +211,7 @@ def create_windows_from_events(
|
|
|
206
211
|
use_mne_epochs: bool | None = None,
|
|
207
212
|
n_jobs: int = 1,
|
|
208
213
|
verbose: bool | str | int | None = "error",
|
|
209
|
-
):
|
|
214
|
+
) -> BaseConcatDataset[WindowsDataset | EEGWindowsDataset]:
|
|
210
215
|
"""Create windows based on events in mne.Raw.
|
|
211
216
|
|
|
212
217
|
This function extracts windows of size window_size_samples in the interval
|
|
@@ -228,7 +233,7 @@ def create_windows_from_events(
|
|
|
228
233
|
|
|
229
234
|
Parameters
|
|
230
235
|
----------
|
|
231
|
-
concat_ds: BaseConcatDataset
|
|
236
|
+
concat_ds: BaseConcatDataset[RawDataset]
|
|
232
237
|
A concat of base datasets each holding raw and description.
|
|
233
238
|
trial_start_offset_samples: int
|
|
234
239
|
Start offset from original trial onsets, in samples. Defaults to zero.
|
|
@@ -268,7 +273,7 @@ def create_windows_from_events(
|
|
|
268
273
|
rejection based on flatness is done. See mne.Epochs.
|
|
269
274
|
on_missing: str
|
|
270
275
|
What to do if one or several event ids are not found in the recording.
|
|
271
|
-
Valid keys are ‘error
|
|
276
|
+
Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
|
|
272
277
|
accepted_bads_ratio: float, optional
|
|
273
278
|
Acceptable proportion of trials with inconsistent length in a raw. If
|
|
274
279
|
the number of trials whose length is exceeded by the window size is
|
|
@@ -286,7 +291,7 @@ def create_windows_from_events(
|
|
|
286
291
|
|
|
287
292
|
Returns
|
|
288
293
|
-------
|
|
289
|
-
windows_datasets: BaseConcatDataset
|
|
294
|
+
windows_datasets: BaseConcatDataset[WindowsDataset | EEGWindowsDataset]
|
|
290
295
|
Concatenated datasets of WindowsDataset containing the extracted windows.
|
|
291
296
|
"""
|
|
292
297
|
_check_windowing_arguments(
|
|
@@ -341,7 +346,7 @@ def create_windows_from_events(
|
|
|
341
346
|
|
|
342
347
|
|
|
343
348
|
def create_fixed_length_windows(
|
|
344
|
-
concat_ds: BaseConcatDataset,
|
|
349
|
+
concat_ds: BaseConcatDataset[RawDataset],
|
|
345
350
|
start_offset_samples: int = 0,
|
|
346
351
|
stop_offset_samples: int | None = None,
|
|
347
352
|
window_size_samples: int | None = None,
|
|
@@ -358,12 +363,12 @@ def create_fixed_length_windows(
|
|
|
358
363
|
on_missing: str = "error",
|
|
359
364
|
n_jobs: int = 1,
|
|
360
365
|
verbose: bool | str | int | None = "error",
|
|
361
|
-
):
|
|
366
|
+
) -> BaseConcatDataset[EEGWindowsDataset]:
|
|
362
367
|
"""Windower that creates sliding windows.
|
|
363
368
|
|
|
364
369
|
Parameters
|
|
365
370
|
----------
|
|
366
|
-
concat_ds: ConcatDataset
|
|
371
|
+
concat_ds: ConcatDataset[RawDataset]
|
|
367
372
|
A concat of base datasets each holding raw and description.
|
|
368
373
|
start_offset_samples: int
|
|
369
374
|
Start offset from beginning of recording in samples.
|
|
@@ -398,7 +403,7 @@ def create_fixed_length_windows(
|
|
|
398
403
|
by using the _LazyDataFrame (experimental).
|
|
399
404
|
on_missing: str
|
|
400
405
|
What to do if one or several event ids are not found in the recording.
|
|
401
|
-
Valid keys are ‘error
|
|
406
|
+
Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
|
|
402
407
|
n_jobs: int
|
|
403
408
|
Number of jobs to use to parallelize the windowing.
|
|
404
409
|
verbose: bool | str | int | None
|
|
@@ -406,7 +411,7 @@ def create_fixed_length_windows(
|
|
|
406
411
|
|
|
407
412
|
Returns
|
|
408
413
|
-------
|
|
409
|
-
windows_datasets: BaseConcatDataset
|
|
414
|
+
windows_datasets: BaseConcatDataset[EEGWindowsDataset]
|
|
410
415
|
Concatenated datasets of WindowsDataset containing the extracted windows.
|
|
411
416
|
"""
|
|
412
417
|
stop_offset_samples, drop_last_window = (
|
|
@@ -473,11 +478,11 @@ def _create_windows_from_events(
|
|
|
473
478
|
verbose="error",
|
|
474
479
|
use_mne_epochs=False,
|
|
475
480
|
):
|
|
476
|
-
"""Create WindowsDataset from
|
|
481
|
+
"""Create WindowsDataset from RawDataset based on events.
|
|
477
482
|
|
|
478
483
|
Parameters
|
|
479
484
|
----------
|
|
480
|
-
ds :
|
|
485
|
+
ds : RawDataset
|
|
481
486
|
Dataset containing continuous data and description.
|
|
482
487
|
infer_mapping : bool
|
|
483
488
|
If True, extract all events from all datasets and map them to
|
|
@@ -648,11 +653,11 @@ def _create_fixed_length_windows(
|
|
|
648
653
|
on_missing="error",
|
|
649
654
|
verbose="error",
|
|
650
655
|
):
|
|
651
|
-
"""Create WindowsDataset from
|
|
656
|
+
"""Create WindowsDataset from RawDataset with sliding windows.
|
|
652
657
|
|
|
653
658
|
Parameters
|
|
654
659
|
----------
|
|
655
|
-
ds :
|
|
660
|
+
ds : RawDataset
|
|
656
661
|
Dataset containing continuous data and description.
|
|
657
662
|
|
|
658
663
|
See `create_fixed_length_windows` for description of other parameters.
|
|
@@ -750,7 +755,7 @@ def _create_fixed_length_windows(
|
|
|
750
755
|
|
|
751
756
|
|
|
752
757
|
def create_windows_from_target_channels(
|
|
753
|
-
concat_ds,
|
|
758
|
+
concat_ds: BaseConcatDataset[RawDataset],
|
|
754
759
|
window_size_samples=None,
|
|
755
760
|
preload=False,
|
|
756
761
|
picks=None,
|
|
@@ -759,7 +764,7 @@ def create_windows_from_target_channels(
|
|
|
759
764
|
n_jobs=1,
|
|
760
765
|
last_target_only=True,
|
|
761
766
|
verbose="error",
|
|
762
|
-
):
|
|
767
|
+
) -> BaseConcatDataset[EEGWindowsDataset]:
|
|
763
768
|
list_of_windows_ds = Parallel(n_jobs=n_jobs)(
|
|
764
769
|
delayed(_create_windows_from_target_channels)(
|
|
765
770
|
ds,
|
|
@@ -788,11 +793,11 @@ def _create_windows_from_target_channels(
|
|
|
788
793
|
on_missing="error",
|
|
789
794
|
verbose="error",
|
|
790
795
|
):
|
|
791
|
-
"""Create WindowsDataset from
|
|
796
|
+
"""Create WindowsDataset from RawDataset using targets `misc` channels from mne.Raw.
|
|
792
797
|
|
|
793
798
|
Parameters
|
|
794
799
|
----------
|
|
795
|
-
ds :
|
|
800
|
+
ds : RawDataset
|
|
796
801
|
Dataset containing continuous data and description.
|
|
797
802
|
|
|
798
803
|
See `create_fixed_length_windows` for description of other parameters.
|
braindecode/samplers/base.py
CHANGED
|
@@ -122,14 +122,14 @@ class DistributedRecordingSampler(DistributedSampler):
|
|
|
122
122
|
DataFrame with at least one of {subject, session, run} columns for each
|
|
123
123
|
window in the BaseConcatDataset to sample examples from. Normally
|
|
124
124
|
obtained with `BaseConcatDataset.get_metadata()`. For instance,
|
|
125
|
-
`metadata.head()` might look like this
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
125
|
+
`metadata.head()` might look like this::
|
|
126
|
+
|
|
127
|
+
i_window_in_trial i_start_in_trial i_stop_in_trial target subject session run
|
|
128
|
+
0 0 0 500 -1 4 session_T run_0
|
|
129
|
+
1 1 500 1000 -1 4 session_T run_0
|
|
130
|
+
2 2 1000 1500 -1 4 session_T run_0
|
|
131
|
+
3 3 1500 2000 -1 4 session_T run_0
|
|
132
|
+
4 4 2000 2500 -1 4 session_T run_0
|
|
133
133
|
|
|
134
134
|
random_state : np.RandomState | int | None
|
|
135
135
|
Random state.
|
braindecode/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.
|
|
1
|
+
__version__ = "1.3.0.dev171178473"
|
{braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171178473.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: braindecode
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0.dev171178473
|
|
4
4
|
Summary: Deep learning software to decode EEG, ECG or MEG signals
|
|
5
5
|
Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
|
|
6
6
|
Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
|
|
@@ -17,7 +17,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
|
17
17
|
Classifier: Programming Language :: Python :: 3.10
|
|
18
18
|
Classifier: Programming Language :: Python :: 3.11
|
|
19
19
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
-
Requires-Python:
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
21
|
Description-Content-Type: text/x-rst
|
|
22
22
|
License-File: LICENSE.txt
|
|
23
23
|
License-File: NOTICE.txt
|
|
@@ -40,6 +40,10 @@ Requires-Dist: linear_attention_transformer
|
|
|
40
40
|
Requires-Dist: docstring_inheritance
|
|
41
41
|
Provides-Extra: moabb
|
|
42
42
|
Requires-Dist: moabb>=1.2.0; extra == "moabb"
|
|
43
|
+
Provides-Extra: eegprep
|
|
44
|
+
Requires-Dist: eegprep[eeglabio]>=0.1.1; extra == "eegprep"
|
|
45
|
+
Provides-Extra: hug
|
|
46
|
+
Requires-Dist: huggingface_hub[torch]>=0.20.0; extra == "hug"
|
|
43
47
|
Provides-Extra: tests
|
|
44
48
|
Requires-Dist: pytest; extra == "tests"
|
|
45
49
|
Requires-Dist: pytest-cov; extra == "tests"
|
|
@@ -49,6 +53,10 @@ Requires-Dist: mypy; extra == "tests"
|
|
|
49
53
|
Provides-Extra: docs
|
|
50
54
|
Requires-Dist: sphinx_gallery; extra == "docs"
|
|
51
55
|
Requires-Dist: sphinx_rtd_theme; extra == "docs"
|
|
56
|
+
Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
|
|
57
|
+
Requires-Dist: sphinx-autobuild; extra == "docs"
|
|
58
|
+
Requires-Dist: sphinxcontrib-bibtex; extra == "docs"
|
|
59
|
+
Requires-Dist: sphinx_sitemap; extra == "docs"
|
|
52
60
|
Requires-Dist: pydata_sphinx_theme; extra == "docs"
|
|
53
61
|
Requires-Dist: numpydoc; extra == "docs"
|
|
54
62
|
Requires-Dist: memory_profiler; extra == "docs"
|
|
@@ -59,8 +67,9 @@ Requires-Dist: lightning; extra == "docs"
|
|
|
59
67
|
Requires-Dist: seaborn; extra == "docs"
|
|
60
68
|
Requires-Dist: pre-commit; extra == "docs"
|
|
61
69
|
Requires-Dist: openneuro-py; extra == "docs"
|
|
70
|
+
Requires-Dist: plotly; extra == "docs"
|
|
62
71
|
Provides-Extra: all
|
|
63
|
-
Requires-Dist: braindecode[docs,moabb,tests]; extra == "all"
|
|
72
|
+
Requires-Dist: braindecode[docs,eegprep,hug,moabb,tests]; extra == "all"
|
|
64
73
|
Dynamic: license-file
|
|
65
74
|
|
|
66
75
|
.. image:: https://badges.gitter.im/braindecodechat/community.svg
|