braindecode 1.3.0.dev181065563__py3-none-any.whl → 1.3.0.dev181594385__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (68) hide show
  1. braindecode/augmentation/base.py +1 -1
  2. braindecode/augmentation/functional.py +154 -54
  3. braindecode/augmentation/transforms.py +2 -2
  4. braindecode/datasets/__init__.py +10 -2
  5. braindecode/datasets/base.py +116 -152
  6. braindecode/datasets/bcicomp.py +4 -4
  7. braindecode/datasets/bids.py +3 -3
  8. braindecode/datasets/experimental.py +2 -2
  9. braindecode/datasets/mne.py +3 -5
  10. braindecode/datasets/moabb.py +2 -2
  11. braindecode/datasets/nmt.py +2 -2
  12. braindecode/datasets/sleep_physio_challe_18.py +4 -3
  13. braindecode/datasets/sleep_physionet.py +2 -2
  14. braindecode/datasets/tuh.py +2 -2
  15. braindecode/datasets/xy.py +2 -2
  16. braindecode/datautil/serialization.py +18 -13
  17. braindecode/eegneuralnet.py +2 -0
  18. braindecode/functional/functions.py +6 -2
  19. braindecode/functional/initialization.py +2 -3
  20. braindecode/models/__init__.py +6 -0
  21. braindecode/models/atcnet.py +33 -34
  22. braindecode/models/attentionbasenet.py +39 -32
  23. braindecode/models/attn_sleep.py +2 -0
  24. braindecode/models/base.py +280 -2
  25. braindecode/models/bendr.py +469 -0
  26. braindecode/models/biot.py +3 -1
  27. braindecode/models/contrawr.py +2 -0
  28. braindecode/models/ctnet.py +8 -3
  29. braindecode/models/deepsleepnet.py +28 -19
  30. braindecode/models/eegconformer.py +2 -2
  31. braindecode/models/eeginception_erp.py +31 -25
  32. braindecode/models/eegitnet.py +2 -0
  33. braindecode/models/eegminer.py +2 -0
  34. braindecode/models/eegnet.py +1 -1
  35. braindecode/models/eegtcnet.py +2 -0
  36. braindecode/models/fbcnet.py +2 -0
  37. braindecode/models/fblightconvnet.py +2 -0
  38. braindecode/models/fbmsnet.py +2 -0
  39. braindecode/models/ifnet.py +2 -0
  40. braindecode/models/labram.py +193 -87
  41. braindecode/models/msvtnet.py +2 -0
  42. braindecode/models/patchedtransformer.py +640 -0
  43. braindecode/models/signal_jepa.py +111 -27
  44. braindecode/models/sinc_shallow.py +12 -9
  45. braindecode/models/sstdpn.py +869 -0
  46. braindecode/models/summary.csv +9 -6
  47. braindecode/models/syncnet.py +2 -0
  48. braindecode/models/tcn.py +2 -0
  49. braindecode/models/usleep.py +26 -21
  50. braindecode/models/util.py +3 -0
  51. braindecode/modules/attention.py +10 -10
  52. braindecode/modules/blocks.py +3 -3
  53. braindecode/modules/filter.py +2 -3
  54. braindecode/modules/layers.py +18 -17
  55. braindecode/preprocessing/__init__.py +24 -0
  56. braindecode/preprocessing/eegprep_preprocess.py +1202 -0
  57. braindecode/preprocessing/preprocess.py +23 -14
  58. braindecode/preprocessing/util.py +166 -0
  59. braindecode/preprocessing/windowers.py +24 -19
  60. braindecode/samplers/base.py +8 -8
  61. braindecode/version.py +1 -1
  62. {braindecode-1.3.0.dev181065563.dist-info → braindecode-1.3.0.dev181594385.dist-info}/METADATA +6 -2
  63. braindecode-1.3.0.dev181594385.dist-info/RECORD +106 -0
  64. braindecode-1.3.0.dev181065563.dist-info/RECORD +0 -101
  65. {braindecode-1.3.0.dev181065563.dist-info → braindecode-1.3.0.dev181594385.dist-info}/WHEEL +0 -0
  66. {braindecode-1.3.0.dev181065563.dist-info → braindecode-1.3.0.dev181594385.dist-info}/licenses/LICENSE.txt +0 -0
  67. {braindecode-1.3.0.dev181065563.dist-info → braindecode-1.3.0.dev181594385.dist-info}/licenses/NOTICE.txt +0 -0
  68. {braindecode-1.3.0.dev181065563.dist-info → braindecode-1.3.0.dev181594385.dist-info}/top_level.txt +0 -0
@@ -30,8 +30,8 @@ from numpy.typing import NDArray
30
30
 
31
31
  from braindecode.datasets.base import (
32
32
  BaseConcatDataset,
33
- BaseDataset,
34
33
  EEGWindowsDataset,
34
+ RawDataset,
35
35
  WindowsDataset,
36
36
  )
37
37
  from braindecode.datautil.serialization import (
@@ -112,13 +112,14 @@ def preprocess(
112
112
  n_jobs: int | None = None,
113
113
  offset: int = 0,
114
114
  copy_data: bool | None = None,
115
+ parallel_kwargs: dict | None = None,
115
116
  ):
116
117
  """Apply preprocessors to a concat dataset.
117
118
 
118
119
  Parameters
119
120
  ----------
120
121
  concat_ds : BaseConcatDataset
121
- A concat of ``BaseDataset`` or ``WindowsDataset`` to be preprocessed.
122
+ A concat of ``RecordDataset`` to be preprocessed.
122
123
  preprocessors : list of Preprocessor
123
124
  Preprocessor objects to apply to each dataset.
124
125
  save_dir : str | None
@@ -135,6 +136,10 @@ def preprocess(
135
136
  and saving very large datasets in chunks to preserve original positions.
136
137
  copy_data : bool | None
137
138
  Whether the data passed to parallel jobs should be copied or passed by reference.
139
+ parallel_kwargs : dict | None
140
+ Additional keyword arguments forwarded to ``joblib.Parallel``.
141
+ Defaults to None (equivalent to ``{}``).
142
+ See https://joblib.readthedocs.io/en/stable/generated/joblib.Parallel.html for details.
138
143
 
139
144
  Returns
140
145
  -------
@@ -153,8 +158,12 @@ def preprocess(
153
158
 
154
159
  parallel_processing = (n_jobs is not None) and (n_jobs != 1)
155
160
 
156
- job_prefer = "threads" if platform.system() == "Windows" else None
157
- list_of_ds = Parallel(n_jobs=n_jobs, prefer=job_prefer)(
161
+ parallel_params = {} if parallel_kwargs is None else dict(parallel_kwargs)
162
+ parallel_params.setdefault(
163
+ "prefer", "threads" if platform.system() == "Windows" else None
164
+ )
165
+
166
+ list_of_ds = Parallel(n_jobs=n_jobs, **parallel_params)(
158
167
  delayed(_preprocess)(
159
168
  ds,
160
169
  i + offset,
@@ -220,15 +229,15 @@ def _preprocess(
220
229
 
221
230
  Parameters
222
231
  ----------
223
- ds: BaseDataset | WindowsDataset
232
+ ds: RecordDataset
224
233
  Dataset object to preprocess.
225
234
  ds_index : int
226
- Index of the BaseDataset in its BaseConcatDataset. Ignored if save_dir
235
+ Index of the ``RecordDataset`` in its ``BaseConcatDataset``. Ignored if save_dir
227
236
  is None.
228
237
  preprocessors: list(Preprocessor)
229
238
  List of preprocessors to apply to the dataset.
230
239
  save_dir : str | None
231
- If provided, save the preprocessed BaseDataset in the
240
+ If provided, save the preprocessed RecordDataset in the
232
241
  specified directory.
233
242
  overwrite : bool
234
243
  If True, overwrite existing file with the same name.
@@ -254,8 +263,8 @@ def _preprocess(
254
263
  _preprocess_raw_or_epochs(ds.windows, preprocessors)
255
264
  else:
256
265
  raise ValueError(
257
- "Can only preprocess concatenation of BaseDataset or "
258
- "WindowsDataset, with either a `raw` or `windows` attribute."
266
+ "Can only preprocess concatenation of RecordDataset, "
267
+ "with either a `raw` or `windows` attribute."
259
268
  )
260
269
 
261
270
  # Store preprocessing keyword arguments in the dataset
@@ -288,11 +297,11 @@ def _get_preproc_kwargs(preprocessors):
288
297
 
289
298
 
290
299
  def _set_preproc_kwargs(ds, preprocessors):
291
- """Record preprocessing keyword arguments in BaseDataset or WindowsDataset.
300
+ """Record preprocessing keyword arguments in RecordDataset.
292
301
 
293
302
  Parameters
294
303
  ----------
295
- ds : BaseDataset | WindowsDataset
304
+ ds : RecordDataset
296
305
  Dataset in which to record preprocessing keyword arguments.
297
306
  preprocessors : list
298
307
  List of preprocessors.
@@ -300,12 +309,12 @@ def _set_preproc_kwargs(ds, preprocessors):
300
309
  preproc_kwargs = _get_preproc_kwargs(preprocessors)
301
310
  if isinstance(ds, WindowsDataset):
302
311
  kind = "window"
303
- if isinstance(ds, EEGWindowsDataset):
312
+ elif isinstance(ds, EEGWindowsDataset):
304
313
  kind = "raw"
305
- elif isinstance(ds, BaseDataset):
314
+ elif isinstance(ds, RawDataset):
306
315
  kind = "raw"
307
316
  else:
308
- raise TypeError(f"ds must be a BaseDataset or a WindowsDataset, got {type(ds)}")
317
+ raise TypeError(f"ds must be a RecordDataset, got {type(ds)}")
309
318
  setattr(ds, kind + "_preproc_kwargs", preproc_kwargs)
310
319
 
311
320
 
@@ -0,0 +1,166 @@
1
+ """Utilities for preprocessing functionality in Braindecode."""
2
+
3
+ # Authors: Christian Kothe <christian.kothe@intheon.io>
4
+ #
5
+ # License: BSD-3
6
+
7
+ import base64
8
+ import json
9
+ import re
10
+ from typing import Any
11
+
12
+ import numpy as np
13
+ from mne.io.base import BaseRaw
14
+
15
+ __all__ = ["mne_store_metadata", "mne_load_metadata"]
16
+
17
+
18
+ # Use a unique marker for embedding structured data in info['description']
19
+ _MARKER_PATTERN = re.compile(r"<!-- braindecode-meta:\s*(\S+)\s*-->", re.DOTALL)
20
+ _MARKER_START = "<!-- braindecode-meta:"
21
+ _MARKER_END = "-->"
22
+
23
+ # Marker key for numpy arrays
24
+ _NP_ARRAY_TAG = "__numpy_array__"
25
+
26
+
27
+ def _numpy_decoder(dct):
28
+ """Internal JSON decoder hook to handle numpy arrays."""
29
+ if dct.get(_NP_ARRAY_TAG):
30
+ arr = np.array(dct["data"], dtype=dct["dtype"])
31
+ return arr.reshape(dct["shape"])
32
+ return dct
33
+
34
+
35
+ class NumpyEncoder(json.JSONEncoder):
36
+ """Custom JSON encoder hook to handle numpy arrays."""
37
+
38
+ def default(self, obj):
39
+ if isinstance(obj, np.ndarray):
40
+ # Reject complex-valued dtypes as they're not JSON serializable
41
+ if np.issubdtype(obj.dtype, np.complexfloating):
42
+ raise TypeError(
43
+ f"Cannot serialize numpy array with complex dtype {obj.dtype}. "
44
+ "Complex dtypes are not supported."
45
+ )
46
+ return {
47
+ _NP_ARRAY_TAG: True,
48
+ "dtype": obj.dtype.str,
49
+ "shape": obj.shape,
50
+ "data": obj.flatten().tolist(),
51
+ }
52
+ return super().default(obj)
53
+
54
+
55
+ def _encode_payload(data: dict) -> str:
56
+ """Serializes, encodes, and formats data into a marker string."""
57
+ json_str = json.dumps(data, cls=NumpyEncoder)
58
+ encoded = base64.b64encode(json_str.encode("utf-8")).decode("ascii")
59
+ return f"{_MARKER_START} {encoded} {_MARKER_END}"
60
+
61
+
62
+ def mne_store_metadata(
63
+ raw: BaseRaw, payload: Any, *, key: str, no_overwrite: bool = False
64
+ ) -> None:
65
+ """Embed a JSON-serializable metadata payload in an MNE BaseRaw dataset
66
+ under a specified key.
67
+
68
+ This will encode the payload as a base64-encoded JSON string and store it
69
+ in the `info['description']` field of the Raw object while preserving any
70
+ existing content. Note this is not particularly efficient and should not
71
+ be used for very large payloads.
72
+
73
+ Parameters
74
+ ----------
75
+ raw : BaseRaw
76
+ The MNE Raw object to store data in.
77
+ payload : Any
78
+ The JSON-serializable data to store.
79
+ key : str
80
+ The key under which to store the payload.
81
+ no_overwrite : bool
82
+ If True, will not overwrite an existing entry with the same key.
83
+
84
+ """
85
+ # the description is apparently the only viable place where custom metadata may be
86
+ # stored in MNE Raw objects that persists through saving/loading
87
+ description = raw.info.get("description") or ""
88
+
89
+ # Try to find existing eegprep data
90
+ if match := _MARKER_PATTERN.search(description):
91
+ # Parse existing data
92
+ try:
93
+ decoded = base64.b64decode(match.group(1)).decode("utf-8")
94
+ existing_data = json.loads(decoded, object_hook=_numpy_decoder)
95
+ except (ValueError, json.JSONDecodeError):
96
+ existing_data = {}
97
+ # Check no_overwrite condition
98
+ if no_overwrite and key in existing_data:
99
+ return
100
+ # Update data
101
+ existing_data[key] = payload
102
+ new_marker = _encode_payload(existing_data)
103
+ # Replace the old marker with updated one
104
+ new_description = _MARKER_PATTERN.sub(new_marker, description, count=1)
105
+ else:
106
+ # No existing data, append new marker
107
+ data = {key: payload}
108
+ new_marker = _encode_payload(data)
109
+ # Append with spacing if description exists
110
+ if description.strip():
111
+ new_description = f"{description.rstrip()}\n{new_marker}"
112
+ else:
113
+ new_description = new_marker
114
+
115
+ raw.info["description"] = new_description
116
+
117
+
118
+ def mne_load_metadata(raw: BaseRaw, *, key: str, delete: bool = False) -> Any | None:
119
+ """Retrieves data that was previously stored using mne_store_metadata from an MNE
120
+ BaseRaw dataset.
121
+
122
+ This function can retrieve data from an MNE Raw object that was stored
123
+ using `mne_store_metadata`. It decodes the base64-encoded JSON string from the
124
+ `info['description']` field and extracts the payload associated with the
125
+ specified key.
126
+
127
+ Parameters
128
+ ----------
129
+ raw : BaseRaw
130
+ The MNE Raw object to retrieve data from.
131
+ key : str
132
+ The key under which the payload was stored.
133
+ delete : bool
134
+ If True, removes the key from the stored data after retrieval.
135
+
136
+ Returns
137
+ -------
138
+ Any | None
139
+ The retrieved payload, or None if not found.
140
+ """
141
+ description = raw.info.get("description") or ""
142
+ match = _MARKER_PATTERN.search(description)
143
+ if not match:
144
+ return None
145
+
146
+ try:
147
+ decoded = base64.b64decode(match.group(1)).decode("utf-8")
148
+ data = json.loads(decoded, object_hook=_numpy_decoder)
149
+ except (ValueError, json.JSONDecodeError):
150
+ return None
151
+
152
+ result = data.get(key)
153
+
154
+ if delete and key in data:
155
+ # Remove the key from data
156
+ del data[key]
157
+ if data:
158
+ # Still have other keys, update the marker
159
+ new_marker = _encode_payload(data)
160
+ new_description = _MARKER_PATTERN.sub(new_marker, description, count=1)
161
+ else:
162
+ # No more keys, remove the entire marker
163
+ new_description = _MARKER_PATTERN.sub("", description, count=1).rstrip()
164
+ raw.info["description"] = new_description
165
+
166
+ return result
@@ -25,7 +25,12 @@ import pandas as pd
25
25
  from joblib import Parallel, delayed
26
26
  from numpy.typing import ArrayLike
27
27
 
28
- from ..datasets.base import BaseConcatDataset, EEGWindowsDataset, WindowsDataset
28
+ from ..datasets.base import (
29
+ BaseConcatDataset,
30
+ EEGWindowsDataset,
31
+ RawDataset,
32
+ WindowsDataset,
33
+ )
29
34
 
30
35
 
31
36
  class _LazyDataFrame:
@@ -189,7 +194,7 @@ def _get_use_mne_epochs(use_mne_epochs, reject, picks, flat, drop_bad_windows):
189
194
 
190
195
  # XXX it's called concat_ds...
191
196
  def create_windows_from_events(
192
- concat_ds: BaseConcatDataset,
197
+ concat_ds: BaseConcatDataset[RawDataset],
193
198
  trial_start_offset_samples: int = 0,
194
199
  trial_stop_offset_samples: int = 0,
195
200
  window_size_samples: int | None = None,
@@ -206,7 +211,7 @@ def create_windows_from_events(
206
211
  use_mne_epochs: bool | None = None,
207
212
  n_jobs: int = 1,
208
213
  verbose: bool | str | int | None = "error",
209
- ):
214
+ ) -> BaseConcatDataset[WindowsDataset | EEGWindowsDataset]:
210
215
  """Create windows based on events in mne.Raw.
211
216
 
212
217
  This function extracts windows of size window_size_samples in the interval
@@ -228,7 +233,7 @@ def create_windows_from_events(
228
233
 
229
234
  Parameters
230
235
  ----------
231
- concat_ds: BaseConcatDataset
236
+ concat_ds: BaseConcatDataset[RawDataset]
232
237
  A concat of base datasets each holding raw and description.
233
238
  trial_start_offset_samples: int
234
239
  Start offset from original trial onsets, in samples. Defaults to zero.
@@ -268,7 +273,7 @@ def create_windows_from_events(
268
273
  rejection based on flatness is done. See mne.Epochs.
269
274
  on_missing: str
270
275
  What to do if one or several event ids are not found in the recording.
271
- Valid keys are ‘error | ‘warning | ‘ignore’. See mne.Epochs.
276
+ Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
272
277
  accepted_bads_ratio: float, optional
273
278
  Acceptable proportion of trials with inconsistent length in a raw. If
274
279
  the number of trials whose length is exceeded by the window size is
@@ -286,7 +291,7 @@ def create_windows_from_events(
286
291
 
287
292
  Returns
288
293
  -------
289
- windows_datasets: BaseConcatDataset
294
+ windows_datasets: BaseConcatDataset[WindowsDataset | EEGWindowsDataset]
290
295
  Concatenated datasets of WindowsDataset containing the extracted windows.
291
296
  """
292
297
  _check_windowing_arguments(
@@ -341,7 +346,7 @@ def create_windows_from_events(
341
346
 
342
347
 
343
348
  def create_fixed_length_windows(
344
- concat_ds: BaseConcatDataset,
349
+ concat_ds: BaseConcatDataset[RawDataset],
345
350
  start_offset_samples: int = 0,
346
351
  stop_offset_samples: int | None = None,
347
352
  window_size_samples: int | None = None,
@@ -358,12 +363,12 @@ def create_fixed_length_windows(
358
363
  on_missing: str = "error",
359
364
  n_jobs: int = 1,
360
365
  verbose: bool | str | int | None = "error",
361
- ):
366
+ ) -> BaseConcatDataset[EEGWindowsDataset]:
362
367
  """Windower that creates sliding windows.
363
368
 
364
369
  Parameters
365
370
  ----------
366
- concat_ds: ConcatDataset
371
+ concat_ds: ConcatDataset[RawDataset]
367
372
  A concat of base datasets each holding raw and description.
368
373
  start_offset_samples: int
369
374
  Start offset from beginning of recording in samples.
@@ -398,7 +403,7 @@ def create_fixed_length_windows(
398
403
  by using the _LazyDataFrame (experimental).
399
404
  on_missing: str
400
405
  What to do if one or several event ids are not found in the recording.
401
- Valid keys are ‘error | ‘warning | ‘ignore’. See mne.Epochs.
406
+ Valid keys are ‘error' | ‘warning' | ‘ignore'. See mne.Epochs.
402
407
  n_jobs: int
403
408
  Number of jobs to use to parallelize the windowing.
404
409
  verbose: bool | str | int | None
@@ -406,7 +411,7 @@ def create_fixed_length_windows(
406
411
 
407
412
  Returns
408
413
  -------
409
- windows_datasets: BaseConcatDataset
414
+ windows_datasets: BaseConcatDataset[EEGWindowsDataset]
410
415
  Concatenated datasets of WindowsDataset containing the extracted windows.
411
416
  """
412
417
  stop_offset_samples, drop_last_window = (
@@ -473,11 +478,11 @@ def _create_windows_from_events(
473
478
  verbose="error",
474
479
  use_mne_epochs=False,
475
480
  ):
476
- """Create WindowsDataset from BaseDataset based on events.
481
+ """Create WindowsDataset from RawDataset based on events.
477
482
 
478
483
  Parameters
479
484
  ----------
480
- ds : BaseDataset
485
+ ds : RawDataset
481
486
  Dataset containing continuous data and description.
482
487
  infer_mapping : bool
483
488
  If True, extract all events from all datasets and map them to
@@ -648,11 +653,11 @@ def _create_fixed_length_windows(
648
653
  on_missing="error",
649
654
  verbose="error",
650
655
  ):
651
- """Create WindowsDataset from BaseDataset with sliding windows.
656
+ """Create WindowsDataset from RawDataset with sliding windows.
652
657
 
653
658
  Parameters
654
659
  ----------
655
- ds : BaseDataset
660
+ ds : RawDataset
656
661
  Dataset containing continuous data and description.
657
662
 
658
663
  See `create_fixed_length_windows` for description of other parameters.
@@ -750,7 +755,7 @@ def _create_fixed_length_windows(
750
755
 
751
756
 
752
757
  def create_windows_from_target_channels(
753
- concat_ds,
758
+ concat_ds: BaseConcatDataset[RawDataset],
754
759
  window_size_samples=None,
755
760
  preload=False,
756
761
  picks=None,
@@ -759,7 +764,7 @@ def create_windows_from_target_channels(
759
764
  n_jobs=1,
760
765
  last_target_only=True,
761
766
  verbose="error",
762
- ):
767
+ ) -> BaseConcatDataset[EEGWindowsDataset]:
763
768
  list_of_windows_ds = Parallel(n_jobs=n_jobs)(
764
769
  delayed(_create_windows_from_target_channels)(
765
770
  ds,
@@ -788,11 +793,11 @@ def _create_windows_from_target_channels(
788
793
  on_missing="error",
789
794
  verbose="error",
790
795
  ):
791
- """Create WindowsDataset from BaseDataset using targets `misc` channels from mne.Raw.
796
+ """Create WindowsDataset from RawDataset using targets `misc` channels from mne.Raw.
792
797
 
793
798
  Parameters
794
799
  ----------
795
- ds : BaseDataset
800
+ ds : RawDataset
796
801
  Dataset containing continuous data and description.
797
802
 
798
803
  See `create_fixed_length_windows` for description of other parameters.
@@ -122,14 +122,14 @@ class DistributedRecordingSampler(DistributedSampler):
122
122
  DataFrame with at least one of {subject, session, run} columns for each
123
123
  window in the BaseConcatDataset to sample examples from. Normally
124
124
  obtained with `BaseConcatDataset.get_metadata()`. For instance,
125
- `metadata.head()` might look like this:
126
-
127
- i_window_in_trial i_start_in_trial i_stop_in_trial target subject session run
128
- 0 0 0 500 -1 4 session_T run_0
129
- 1 1 500 1000 -1 4 session_T run_0
130
- 2 2 1000 1500 -1 4 session_T run_0
131
- 3 3 1500 2000 -1 4 session_T run_0
132
- 4 4 2000 2500 -1 4 session_T run_0
125
+ `metadata.head()` might look like this::
126
+
127
+ i_window_in_trial i_start_in_trial i_stop_in_trial target subject session run
128
+ 0 0 0 500 -1 4 session_T run_0
129
+ 1 1 500 1000 -1 4 session_T run_0
130
+ 2 2 1000 1500 -1 4 session_T run_0
131
+ 3 3 1500 2000 -1 4 session_T run_0
132
+ 4 4 2000 2500 -1 4 session_T run_0
133
133
 
134
134
  random_state : np.RandomState | int | None
135
135
  Random state.
braindecode/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "1.3.0.dev181065563"
1
+ __version__ = "1.3.0.dev181594385"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: braindecode
3
- Version: 1.3.0.dev181065563
3
+ Version: 1.3.0.dev181594385
4
4
  Summary: Deep learning software to decode EEG, ECG or MEG signals
5
5
  Author-email: Robin Tibor Schirrmeister <robintibor@gmail.com>
6
6
  Maintainer-email: Alexandre Gramfort <agramfort@meta.com>, Bruno Aristimunha Pinto <b.aristimunha@gmail.com>, Robin Tibor Schirrmeister <robintibor@gmail.com>
@@ -40,6 +40,10 @@ Requires-Dist: linear_attention_transformer
40
40
  Requires-Dist: docstring_inheritance
41
41
  Provides-Extra: moabb
42
42
  Requires-Dist: moabb>=1.2.0; extra == "moabb"
43
+ Provides-Extra: eegprep
44
+ Requires-Dist: eegprep[eeglabio]>=0.1.1; extra == "eegprep"
45
+ Provides-Extra: hug
46
+ Requires-Dist: huggingface_hub[torch]>=0.20.0; extra == "hug"
43
47
  Provides-Extra: tests
44
48
  Requires-Dist: pytest; extra == "tests"
45
49
  Requires-Dist: pytest-cov; extra == "tests"
@@ -65,7 +69,7 @@ Requires-Dist: pre-commit; extra == "docs"
65
69
  Requires-Dist: openneuro-py; extra == "docs"
66
70
  Requires-Dist: plotly; extra == "docs"
67
71
  Provides-Extra: all
68
- Requires-Dist: braindecode[docs,moabb,tests]; extra == "all"
72
+ Requires-Dist: braindecode[docs,eegprep,hug,moabb,tests]; extra == "all"
69
73
  Dynamic: license-file
70
74
 
71
75
  .. image:: https://badges.gitter.im/braindecodechat/community.svg
@@ -0,0 +1,106 @@
1
+ braindecode/__init__.py,sha256=Ac3LEEyIHWFY_fFh3eAY1GZUqXcUxVSJwOSUCwGEDvQ,182
2
+ braindecode/classifier.py,sha256=k9vSCtfQbld0YVleDi5rrrmk6k_k5JYEPPBYcNxYjZ8,9807
3
+ braindecode/eegneuralnet.py,sha256=U6kRdT2u8A2Ca0axMTR8IAESBsvgjLMusAbYappKAOk,15368
4
+ braindecode/regressor.py,sha256=VLfrpiXklwI4onkwue3QmzlBWcvspu0tlrLo9RT1Oiw,9375
5
+ braindecode/util.py,sha256=J-tBcDJNlMTIFW2mfOy6Ko0nsgdP4obRoEVDeg2rFH0,12686
6
+ braindecode/version.py,sha256=d5lApX7FOz1FJ-TkCgKVIn5cv_lxN0NfB10c8ebK2ls,35
7
+ braindecode/augmentation/__init__.py,sha256=LG7ONqCufYAF9NZt8POIp10lYXb8iSueYkF-CWGK2Ls,1001
8
+ braindecode/augmentation/base.py,sha256=nK90HWzNwroDCC61e3bZfIsCdEHmGstJliS-TB6wrK0,7327
9
+ braindecode/augmentation/functional.py,sha256=lPhGpZcVtgfQ3oV6p6IQLBCWM_Psa60TwxH3Wj1WyOQ,41133
10
+ braindecode/augmentation/transforms.py,sha256=Ur05yLdROm5pfKTsS2opCWI--X6JwWjP7YMa2KTTZTw,44243
11
+ braindecode/datasets/__init__.py,sha256=OVv9sQf9eeHyHo9BuLWupFAKAMO9Gz4XXpe0dsLvBfQ,994
12
+ braindecode/datasets/base.py,sha256=BYpG2VofbSQN2UCn97pKg_zhPnL9E9mq7ibliXMUx8Y,30637
13
+ braindecode/datasets/bbci.py,sha256=BC9o1thEyYBREAo930O7zZz3xZB-l4Odt5j8E_1huXI,19277
14
+ braindecode/datasets/bcicomp.py,sha256=WwSFfP9whc7Vw5-Jt9vQAHKnRUB5TUe7w945748uGjU,7551
15
+ braindecode/datasets/bids.py,sha256=BxiyGnZuhC56ITUQgJuFv62lgw0LimsSIMRVySgje1A,8843
16
+ braindecode/datasets/experimental.py,sha256=ij7tM_7bXFlvAh3sVVRhoh1CsF9OhMxQC6s4NZyinK8,8515
17
+ braindecode/datasets/mne.py,sha256=5l25ZSnYUX5-jnl4ArwKhVqqm2R852J2T9QdWiJ4npE,6110
18
+ braindecode/datasets/moabb.py,sha256=XsAHcPJYL5unLF4JBSO-8Ka-zs1cX9kew7Gr635jjak,7089
19
+ braindecode/datasets/nmt.py,sha256=vzmO1Ks8oMO7uwXhbysCelyuczSJAFz9QdeXyASshVw,10428
20
+ braindecode/datasets/sleep_physio_challe_18.py,sha256=rpgBtxIsq3qnk7L4ePtCd5_q5Bwr_ZCW3x9izsraoXk,15425
21
+ braindecode/datasets/sleep_physionet.py,sha256=N2KxENTuJb9L1sukPAliAeUD7qxgRrQpQNs6PyaMU6M,4085
22
+ braindecode/datasets/tuh.py,sha256=gdYWzY4F1wKsSSzUaKSRk9q6PPELb0byrgelCt0W9_A,22865
23
+ braindecode/datasets/xy.py,sha256=o6VFUalpgPHp6_ZwwbfV6H7AyCoQST7ugMOYltXqvmI,2984
24
+ braindecode/datautil/__init__.py,sha256=GB9xOudUhJGDyG08PBrnotw6HnWoWIXAHfRNFO-pxSk,1797
25
+ braindecode/datautil/serialization.py,sha256=ewnAOn-QRo7TIudX60pPfR3meWGhYzyhaCqjqLO8gAw,13150
26
+ braindecode/datautil/util.py,sha256=ZfDoxLieKsgI8xcWQqebV-vJ5pJYRvRRHkEwhwpgoKU,674
27
+ braindecode/functional/__init__.py,sha256=JPUDFeKtfogEzfrwPaZRBmxexPjBw7AglYMlImaAnWc,413
28
+ braindecode/functional/functions.py,sha256=x3_UGovZ9HPnSAL2DtMwHsGm6MdNm0CdHd3-pzHzEto,8649
29
+ braindecode/functional/initialization.py,sha256=f-4jIS9QY-YD-3R7N77UbBJh8GcuDvVUzn6Ad6Gx8LE,1382
30
+ braindecode/models/__init__.py,sha256=ovF_WX8ZkXEkleRwYsMMS7ldLPh8_2NzTeYGVqH9ilg,2581
31
+ braindecode/models/atcnet.py,sha256=DtAGN9GV_lM7syXhb3pZlwgOWzo8dpF-j_yuXlL4TIk,32243
32
+ braindecode/models/attentionbasenet.py,sha256=bgc6_7jDT_fnfyCtPhI4i6H7Zornxe46-bMoINLl6YE,30416
33
+ braindecode/models/attn_sleep.py,sha256=5mzYfnpaF1-C8WSV3BOq_HafyJxH69KkOF-KPbenqas,17882
34
+ braindecode/models/base.py,sha256=iufKlZf_Oe7wPkkOvfNPOn387E_np6B9YLeVLHTlRHk,20191
35
+ braindecode/models/bendr.py,sha256=MZQdYFERVeBJnynEXDlCLdn_I0mJtgzzFuMhCXkbMkg,21591
36
+ braindecode/models/biot.py,sha256=LpJ8tXqQL2Zh_vcQnpUHEpAGQrPHtn2cBSTUPFCW8jQ,17546
37
+ braindecode/models/contrawr.py,sha256=wXEr1HULWH-qdVXyt2lhyYajxS_AKv9kGZboJbHSsxo,10076
38
+ braindecode/models/ctnet.py,sha256=T03YsDkALys6LnncYrpsrNZABFQxTyFS8c8vfSJ-4_I,17384
39
+ braindecode/models/deep4.py,sha256=-s-R3H7so2xlSiPsU226eSwscv1X9xJMYLm3LhZ3mSU,14645
40
+ braindecode/models/deepsleepnet.py,sha256=oc1Df7e5cWsxYlLGcI467ZpyIVWhuVoAiSBrJjYVGHo,15268
41
+ braindecode/models/eegconformer.py,sha256=z8oSuo1Dv-MKGyxCFQVxQa3sbeku8v8u66c3Qjig38c,17429
42
+ braindecode/models/eeginception_erp.py,sha256=aAjpweNixFgOSL47r-IjHFZujJje8a7TWudtbYdY98M,16410
43
+ braindecode/models/eeginception_mi.py,sha256=VoWtsaWj1xQ4FlrvCbnPvo8eosufYUmTrL4uvFtqKcg,12456
44
+ braindecode/models/eegitnet.py,sha256=AmgxYmzHd4aIz14Oh4tr9h_OFpwZxZs2CBz_VbDyETk,9886
45
+ braindecode/models/eegminer.py,sha256=JZINTEIbEk6F48REdO8qGghaw57wa7GzZZ1o1mantyk,9911
46
+ braindecode/models/eegnet.py,sha256=qmxQZa-owqEuha7iwOAdPQU29DoLpEyNjH-oouddWLc,13684
47
+ braindecode/models/eegnex.py,sha256=eahHolFl15LwNWeC5qjQqUGqURibQZIV425rI1p-dG8,13604
48
+ braindecode/models/eegsimpleconv.py,sha256=6V5ZQNWijmd3-2wv7lJB_HGBS3wHWWVrKoNIeWTXu-w,7300
49
+ braindecode/models/eegtcnet.py,sha256=09CuM6nYaMZgBpw6-LvYljJXxJrC09Cz1PzVIRqxzYA,10896
50
+ braindecode/models/fbcnet.py,sha256=6NNV86Bm8hNAqsySx_ONJfLqDCMkovQklpX5dSuWk6c,7544
51
+ braindecode/models/fblightconvnet.py,sha256=CB_IrJPnrUQOB64dS1SpgmwsiSBrHY6edVeBbSUbWec,11099
52
+ braindecode/models/fbmsnet.py,sha256=b0oeP6WugW5vBBQHGYM4mWLLq_wxB_RL9QhCW2Jn7RA,11717
53
+ braindecode/models/hybrid.py,sha256=hA8jwD3_3LL71BxUjRM1dkhqlHU9E9hjuDokh-jBq-4,4024
54
+ braindecode/models/ifnet.py,sha256=FBga7U4S8VXviu58-vs8my-7TyCq8dAHnmLKzXmAmFs,15195
55
+ braindecode/models/labram.py,sha256=CNQhKvFS-Ab1yzEiqK4JFjb2XG8bQLjh1XrGK5nhT7c,46715
56
+ braindecode/models/msvtnet.py,sha256=-GEBeeGhAitletjGim-79IfuN2aSg6uc-30SgUS0fq0,12729
57
+ braindecode/models/patchedtransformer.py,sha256=G-a4uxbbv8z7OvdqX51J--jLGBpyTClrTWEzthChiLs,23443
58
+ braindecode/models/sccnet.py,sha256=C7vdwIR5cI6wJCl5f8TnGQG6qinq21y4HG6l-D5AwbY,11971
59
+ braindecode/models/shallow_fbcsp.py,sha256=7U07DJBrm2JHV8v5ja-xuE5-IH5tfmryhJtrfO1n4jk,7531
60
+ braindecode/models/signal_jepa.py,sha256=eYYzpRIUaJZ-dCV9ag20gNZxmduVey1EfhP-a4_jUe8,41369
61
+ braindecode/models/sinc_shallow.py,sha256=RqcvnVgk9bo5WF27XW07-IhNI03rBkhAEco8txQO1Z4,11944
62
+ braindecode/models/sleep_stager_blanco_2020.py,sha256=vXulnDYutEFLM0UPXyAI0YIj5QImUMVEmYZb78j34H8,6034
63
+ braindecode/models/sleep_stager_chambon_2018.py,sha256=8w8IR2PsfG0jSc3o0YVopgHpOvCHNIuMi7-QRJOYEW4,5245
64
+ braindecode/models/sparcnet.py,sha256=MG1OB91guI7ssKRk8GvWlzUvaxo_otaYnbEGzNUZVyg,13973
65
+ braindecode/models/sstdpn.py,sha256=wJv-UYP1q8cMGp2wU1efzIZiigRmkJ8uY22rNB2D7Wc,35077
66
+ braindecode/models/summary.csv,sha256=vFmhpCGFZlxC9Zm8KLBaGRHvZZfdRY85NAGj1Wyv1yU,7209
67
+ braindecode/models/syncnet.py,sha256=yepqfW_fx4Vi72jee-WTBZVwMFRMIhwDAfrDR7Da9iw,8399
68
+ braindecode/models/tcn.py,sha256=QuiLF_2oGuD9oXWFQtAaI-SjdEMlCUQeNI6DTIMuJ70,8217
69
+ braindecode/models/tidnet.py,sha256=HSUL1al6gaRbJ-BRYAAs4KDvLuKEvh0NnBfAsPeWMpM,11837
70
+ braindecode/models/tsinception.py,sha256=nnQxzpqRy9FPuN5xgh9fNQ386VbreQ_nZBSFNkSfal0,10356
71
+ braindecode/models/usleep.py,sha256=oZv2Z78d2jfyyh-LbRBSgGfWjP8YugcXEHvQAENM_Q8,17296
72
+ braindecode/models/util.py,sha256=nrYBdd0FTCoYxgg21oz1UlW-PACx-0-_EyvMQua0QI8,5414
73
+ braindecode/modules/__init__.py,sha256=PD2LpeSHWW_MgEef7-G8ief5gheGObzsIoacchxWuyA,1756
74
+ braindecode/modules/activation.py,sha256=lTO2IjZWBDeXZ4ZVDgLmTDmxHdqyAny3Fsy07HY9tmQ,1466
75
+ braindecode/modules/attention.py,sha256=N-GYLyDV5crKFg08x-lkosMjaOTJv8lk_2p1Jkh_PdU,24142
76
+ braindecode/modules/blocks.py,sha256=M_jWtr9kNOP-hZVVzb9hj-jsSV1mvv-eX1qtV5MacEU,3617
77
+ braindecode/modules/convolution.py,sha256=gZMMOa-2gy1nfduA_j2ezgdIdq5Bi2PtonNomWA4D8k,8481
78
+ braindecode/modules/filter.py,sha256=8Li7AYQeN5D2A0Q14m2LDlQBZJbVZoiH50A2EkGgqZc,25228
79
+ braindecode/modules/layers.py,sha256=LqkXuSaSPKD9qWBy7jYLJ9lBSHObYsmwfgGEFFZ6xq0,3659
80
+ braindecode/modules/linear.py,sha256=pNhSUU0u-IGEUCjAfEDq_TJWnIJMWuOk7Y5L-7I8Meg,1702
81
+ braindecode/modules/parametrization.py,sha256=sTvV21-sdpqpiY2PzwDebi7SeEvkFw8yDgA6OqJDo34,1310
82
+ braindecode/modules/stats.py,sha256=ETqZH6PPyYCss2PKBDNrO4uUeijR4bxvjCQCXjNJkH4,2398
83
+ braindecode/modules/util.py,sha256=tVXEhzeTsYrr_wZ5CiXaq3VYGtC5TmGEEW2hMYjTQAE,2609
84
+ braindecode/modules/wrapper.py,sha256=Z-aZ4wxA0psYefMOfj03r7D1XjD4az6GpZpaQoDPJv0,2421
85
+ braindecode/preprocessing/__init__.py,sha256=Odxj0HsNW-PTT_LSE87hVBz_8isrAWbGgAeZCaNRAUw,1336
86
+ braindecode/preprocessing/eegprep_preprocess.py,sha256=F3zJ76ww150czRZ5m70jj-6xytA3fjlivfUwHpLoYLY,54829
87
+ braindecode/preprocessing/mne_preprocess.py,sha256=_Jczaitqbx16utsUOhnonEcoExf6jPsWNwVOVvoKFfU,2210
88
+ braindecode/preprocessing/preprocess.py,sha256=KV8CXOyv7Ns2dAQJfz6p3h5Ird-kCb9-ySA_nP7urb0,17811
89
+ braindecode/preprocessing/util.py,sha256=vshClUYXas0m6ZS-o1ld28g38ZN_6xraCz-Gn9LxHjo,5649
90
+ braindecode/preprocessing/windowers.py,sha256=AABYuMNNkU5Qf8EC-atSuAvoIwlMGzM1AkXLOwH0TA0,36837
91
+ braindecode/samplers/__init__.py,sha256=TLuO6gXv2WioJdX671MI_CHVSsOfbjnly1Xv9K3_WdA,452
92
+ braindecode/samplers/base.py,sha256=PTa4gGAKXH1Tnx4vBXBAb43x7wQKVvqK1mlM_zE3yY4,15133
93
+ braindecode/samplers/ssl.py,sha256=C-FKopnbncN_-spQPCrgljY5Qds4fgTLr2TG3s_-QqU,9146
94
+ braindecode/training/__init__.py,sha256=sxtfI6MgxX3aP03EFc0wJYA37uULoL9SQyUao1Oxyn0,523
95
+ braindecode/training/callbacks.py,sha256=LqXqzJd6s3w0pvAKy9TEVTxWwVRyWNEu2uyWVsvb9RQ,839
96
+ braindecode/training/losses.py,sha256=EyVVZE_028G6WwrAtzLbrRfDLgsoKwLLhqIkOYBXNL4,3551
97
+ braindecode/training/scoring.py,sha256=WRkwqbitA3m_dzRnGp2ZIZPge5Nhx9gAEQhIHzeH4eU,18716
98
+ braindecode/visualization/__init__.py,sha256=4EER_xHqZIDzEvmgUEm7K1bgNKpyZAIClR9ZCkMuY4M,240
99
+ braindecode/visualization/confusion_matrices.py,sha256=qIWMLEHow5CJ7PhGggD8mnD55Le6xhma9HSzt4R33fc,9509
100
+ braindecode/visualization/gradients.py,sha256=KZo-GA0uwiwty2_94j2IjmCR2SKcfPb1Bi3sQq7vpTk,2170
101
+ braindecode-1.3.0.dev181594385.dist-info/licenses/LICENSE.txt,sha256=7rg7k6hyj8m9whQ7dpKbqnCssoOEx_Mbtqb4uSOjljE,1525
102
+ braindecode-1.3.0.dev181594385.dist-info/licenses/NOTICE.txt,sha256=sOxuTbalPxTM8H6VqtvGbXCt_BoOF7JevEYG_knqbm4,620
103
+ braindecode-1.3.0.dev181594385.dist-info/METADATA,sha256=lhNQq48EMn3lg4atgfEgbDawkhjE8mK5mW7kOxTeQUU,7307
104
+ braindecode-1.3.0.dev181594385.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
105
+ braindecode-1.3.0.dev181594385.dist-info/top_level.txt,sha256=pHsWQmSy0uhIez62-HA9j0iaXKvSbUL39ifFRkFnChA,12
106
+ braindecode-1.3.0.dev181594385.dist-info/RECORD,,