braindecode 1.3.0.dev168011974__py3-none-any.whl → 1.3.0.dev168496007__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (66) hide show
  1. braindecode/augmentation/base.py +1 -1
  2. braindecode/datasets/__init__.py +10 -2
  3. braindecode/datasets/base.py +116 -152
  4. braindecode/datasets/bcicomp.py +4 -4
  5. braindecode/datasets/bids.py +3 -3
  6. braindecode/datasets/experimental.py +2 -2
  7. braindecode/datasets/mne.py +3 -5
  8. braindecode/datasets/moabb.py +2 -2
  9. braindecode/datasets/nmt.py +2 -2
  10. braindecode/datasets/sleep_physio_challe_18.py +2 -2
  11. braindecode/datasets/sleep_physionet.py +2 -2
  12. braindecode/datasets/tuh.py +2 -2
  13. braindecode/datasets/xy.py +2 -2
  14. braindecode/datautil/serialization.py +7 -7
  15. braindecode/eegneuralnet.py +2 -0
  16. braindecode/functional/functions.py +6 -2
  17. braindecode/functional/initialization.py +2 -3
  18. braindecode/models/__init__.py +6 -0
  19. braindecode/models/atcnet.py +27 -28
  20. braindecode/models/attentionbasenet.py +39 -32
  21. braindecode/models/attn_sleep.py +2 -0
  22. braindecode/models/base.py +280 -2
  23. braindecode/models/bendr.py +469 -0
  24. braindecode/models/biot.py +2 -0
  25. braindecode/models/contrawr.py +2 -0
  26. braindecode/models/ctnet.py +8 -3
  27. braindecode/models/deepsleepnet.py +28 -19
  28. braindecode/models/eegconformer.py +2 -2
  29. braindecode/models/eeginception_erp.py +31 -25
  30. braindecode/models/eegitnet.py +2 -0
  31. braindecode/models/eegminer.py +2 -0
  32. braindecode/models/eegnet.py +1 -1
  33. braindecode/models/eegtcnet.py +2 -0
  34. braindecode/models/fbcnet.py +5 -1
  35. braindecode/models/fblightconvnet.py +2 -0
  36. braindecode/models/fbmsnet.py +20 -6
  37. braindecode/models/ifnet.py +2 -0
  38. braindecode/models/labram.py +193 -87
  39. braindecode/models/msvtnet.py +2 -0
  40. braindecode/models/patchedtransformer.py +640 -0
  41. braindecode/models/signal_jepa.py +111 -27
  42. braindecode/models/sinc_shallow.py +12 -9
  43. braindecode/models/sstdpn.py +869 -0
  44. braindecode/models/summary.csv +3 -0
  45. braindecode/models/syncnet.py +2 -0
  46. braindecode/models/tcn.py +2 -0
  47. braindecode/models/usleep.py +26 -21
  48. braindecode/models/util.py +3 -0
  49. braindecode/modules/attention.py +10 -10
  50. braindecode/modules/blocks.py +3 -3
  51. braindecode/modules/filter.py +2 -9
  52. braindecode/modules/layers.py +18 -17
  53. braindecode/preprocessing/__init__.py +24 -0
  54. braindecode/preprocessing/eegprep_preprocess.py +1202 -0
  55. braindecode/preprocessing/preprocess.py +12 -12
  56. braindecode/preprocessing/util.py +166 -0
  57. braindecode/preprocessing/windowers.py +26 -20
  58. braindecode/samplers/base.py +8 -8
  59. braindecode/version.py +1 -1
  60. {braindecode-1.3.0.dev168011974.dist-info → braindecode-1.3.0.dev168496007.dist-info}/METADATA +6 -2
  61. braindecode-1.3.0.dev168496007.dist-info/RECORD +106 -0
  62. braindecode-1.3.0.dev168011974.dist-info/RECORD +0 -101
  63. {braindecode-1.3.0.dev168011974.dist-info → braindecode-1.3.0.dev168496007.dist-info}/WHEEL +0 -0
  64. {braindecode-1.3.0.dev168011974.dist-info → braindecode-1.3.0.dev168496007.dist-info}/licenses/LICENSE.txt +0 -0
  65. {braindecode-1.3.0.dev168011974.dist-info → braindecode-1.3.0.dev168496007.dist-info}/licenses/NOTICE.txt +0 -0
  66. {braindecode-1.3.0.dev168011974.dist-info → braindecode-1.3.0.dev168496007.dist-info}/top_level.txt +0 -0
@@ -35,51 +35,57 @@ class EEGInceptionERP(EEGModuleMixin, nn.Sequential):
35
35
  - :class:`_InceptionModule1` **(multi-scale temporal + spatial mixing)**
36
36
 
37
37
  - *Operations.*
38
- - `EEGInceptionERP.c1`: :class:`torch.nn.Conv2d` ``k=(64,1)``, stride ``(1,1)``, *same* pad on input reshaped to ``(B,1,128,8)`` → BN → activation → dropout.
39
- - `EEGInceptionERP.d1`: :class:`torch.nn.Conv2d` (depthwise) ``k=(1,8)``, *valid* pad over channels → BN → activation → dropout.
40
- - `EEGInceptionERP.c2`: :class:`torch.nn.Conv2d` ``k=(32,1)`` → BN → activation → dropout; then `EEGInceptionERP.d2` depthwise ``k=(1,8)`` → BN → activation → dropout.
41
- - `EEGInceptionERP.c3`: :class:`torch.nn.Conv2d` ``k=(16,1)`` → BN → activation → dropout; then `EEGInceptionERP.d3` depthwise ``k=(1,8)`` → BN → activation → dropout.
42
- - `EEGInceptionERP.n1`: :class:`torch.nn.Concat` over branch features.
43
- - `EEGInceptionERP.a1`: :class:`torch.nn.AvgPool2d` ``pool=(4,1)``, stride ``(4,1)`` for temporal downsampling.
38
+
39
+ - `EEGInceptionERP.c1`: :class:`torch.nn.Conv2d` ``k=(64,1)``, stride ``(1,1)``, *same* pad on input reshaped to ``(B,1,128,8)`` → BN → activation → dropout.
40
+ - `EEGInceptionERP.d1`: :class:`torch.nn.Conv2d` (depthwise) ``k=(1,8)``, *valid* pad over channels → BN → activation → dropout.
41
+ - `EEGInceptionERP.c2`: :class:`torch.nn.Conv2d` ``k=(32,1)`` → BN → activation → dropout; then `EEGInceptionERP.d2` depthwise ``k=(1,8)`` → BN → activation → dropout.
42
+ - `EEGInceptionERP.c3`: :class:`torch.nn.Conv2d` ``k=(16,1)`` BN → activation → dropout; then `EEGInceptionERP.d3` depthwise ``k=(1,8)`` → BN → activation → dropout.
43
+ - `EEGInceptionERP.n1`: :class:`torch.nn.Concat` over branch features.
44
+ - `EEGInceptionERP.a1`: :class:`torch.nn.AvgPool2d` ``pool=(4,1)``, stride ``(4,1)`` for temporal downsampling.
44
45
 
45
46
  *Interpretability/robustness.* Depthwise `1 x n_chans` layers act as learnable montage-wide spatial filters per temporal scale; pooling stabilizes against jitter.
46
47
 
47
48
  - :class:`_InceptionModule2` **(refinement at coarser timebase)**
48
49
 
49
50
  - *Operations.*
50
- - `EEGInceptionERP.c4`: :class:`torch.nn.Conv2d` ``k=(16,1)`` → BN → activation → dropout.
51
- - `EEGInceptionERP.c5`: :class:`torch.nn.Conv2d` ``k=(8,1)`` → BN → activation → dropout.
52
- - `EEGInceptionERP.c6`: :class:`torch.nn.Conv2d` ``k=(4,1)`` → BN → activation → dropout.
53
- - `EEGInceptionERP.n2`: :class:`torch.nn.Concat` (merge C4-C6 outputs).
54
- - `EEGInceptionERP.a2`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``, stride ``(2,1)``.
55
- - `EEGInceptionERP.c7`: :class:`torch.nn.Conv2d` ``k=(8,1)`` BN → activation → dropout; then `EEGInceptionERP.a3`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
56
- - `EEGInceptionERP.c8`: :class:`torch.nn.Conv2d` ``k=(4,1)`` → BN → activation → dropout; then `EEGInceptionERP.a4`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
51
+
52
+ - `EEGInceptionERP.c4`: :class:`torch.nn.Conv2d` ``k=(16,1)`` → BN → activation → dropout.
53
+ - `EEGInceptionERP.c5`: :class:`torch.nn.Conv2d` ``k=(8,1)`` → BN → activation → dropout.
54
+ - `EEGInceptionERP.c6`: :class:`torch.nn.Conv2d` ``k=(4,1)`` BN → activation → dropout.
55
+ - `EEGInceptionERP.n2`: :class:`torch.nn.Concat` (merge C4-C6 outputs).
56
+ - `EEGInceptionERP.a2`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``, stride ``(2,1)``.
57
+ - `EEGInceptionERP.c7`: :class:`torch.nn.Conv2d` ``k=(8,1)`` → BN → activation → dropout; then `EEGInceptionERP.a3`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
58
+ - `EEGInceptionERP.c8`: :class:`torch.nn.Conv2d` ``k=(4,1)`` → BN → activation → dropout; then `EEGInceptionERP.a4`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
57
59
 
58
60
  *Role.* Adds higher-level, shorter-window evidence while progressively compressing temporal dimension.
59
61
 
60
62
  - :class:`_OutputModule` **(aggregation + readout)**
61
63
 
62
64
  - *Operations.*
63
- - :class:`torch.nn.Flatten`
64
- - :class:`torch.nn.Linear` ``(features → 2)``
65
+
66
+ - :class:`torch.nn.Flatten`
67
+ - :class:`torch.nn.Linear` ``(features → 2)``
65
68
 
66
69
  .. rubric:: Convolutional Details
67
70
 
68
71
  - **Temporal (where time-domain patterns are learned).**
69
- First module uses 1D temporal kernels along the 128-sample axis: ``64``, ``32``, ``16``
70
- (≈500, 250, 125 ms at 128 Hz). After ``pool=(4,1)``, the second module applies ``16``,
71
- ``8``, ``4`` (≈125, 62.5, 31.25 ms at the pooled rate). All strides are ``1`` in convs;
72
- temporal resolution changes only via average pooling.
72
+
73
+ First module uses 1D temporal kernels along the 128-sample axis: ``64``, ``32``, ``16``
74
+ (≈500, 250, 125 ms at 128 Hz). After ``pool=(4,1)``, the second module applies ``16``,
75
+ ``8``, ``4`` (≈125, 62.5, 31.25 ms at the pooled rate). All strides are ``1`` in convs;
76
+ temporal resolution changes only via average pooling.
73
77
 
74
78
  - **Spatial (how electrodes are processed).**
75
- Depthwise convs with ``k=(1,8)`` span all channels and are applied **per temporal branch**,
76
- yielding scale-specific channel projections (no cross-branch mixing until concatenation).
77
- There is no full 2D mixing kernel; spatial mixing is factorized and lightweight.
79
+
80
+ Depthwise convs with ``k=(1,8)`` span all channels and are applied **per temporal branch**,
81
+ yielding scale-specific channel projections (no cross-branch mixing until concatenation).
82
+ There is no full 2D mixing kernel; spatial mixing is factorized and lightweight.
78
83
 
79
84
  - **Spectral (how frequency information is captured).**
80
- No explicit transform; multiple temporal kernels form a *learned filter bank* over
81
- ERP-relevant bands. Successive pooling acts as low-pass integration to emphasize sustained
82
- post-stimulus components.
85
+
86
+ No explicit transform; multiple temporal kernels form a *learned filter bank* over
87
+ ERP-relevant bands. Successive pooling acts as low-pass integration to emphasize sustained
88
+ post-stimulus components.
83
89
 
84
90
  .. rubric:: Additional Mechanisms
85
91
 
@@ -11,6 +11,8 @@ from braindecode.modules import DepthwiseConv2d, Ensure4d, InceptionBlock
11
11
  class EEGITNet(EEGModuleMixin, nn.Sequential):
12
12
  """EEG-ITNet from Salami, et al (2022) [Salami2022]_
13
13
 
14
+ :bdg-success:`Convolution` :bdg-secondary:`Recurrent`
15
+
14
16
  .. figure:: https://braindecode.org/dev/_static/model/eegitnet.jpg
15
17
  :align: center
16
18
  :alt: EEG-ITNet Architecture
@@ -21,6 +21,8 @@ _eeg_miner_methods = ["mag", "corr", "plv"]
21
21
  class EEGMiner(EEGModuleMixin, nn.Module):
22
22
  """EEGMiner from Ludwig et al (2024) [eegminer]_.
23
23
 
24
+ :bdg-success:`Convolution` :bdg-warning:`Interpretability`
25
+
24
26
  .. figure:: https://content.cld.iop.org/journals/1741-2552/21/3/036010/revision2/jnead44d7f1_hr.jpg
25
27
  :align: center
26
28
  :alt: EEGMiner Architecture
@@ -57,7 +57,7 @@ class EEGNet(EEGModuleMixin, nn.Sequential):
57
57
 
58
58
  - **Temporal.** The initial temporal convs serve as a *learned filter bank*:
59
59
  long 1-D kernels (implemented as 2-D with singleton spatial extent) emphasize oscillatory bands and transients.
60
- Because this stage is linear prior to BN/ELU, kernels can be analyzed as FIR filters to reveal each features spectrum [Lawhern2018]_.
60
+ Because this stage is linear prior to BN/ELU, kernels can be analyzed as FIR filters to reveal each feature's spectrum [Lawhern2018]_.
61
61
 
62
62
  - **Spatial.** The depthwise spatial conv spans the full channel axis (kernel height = #electrodes; temporal size = 1).
63
63
  With ``groups = F1``, each temporal filter learns its own set of ``D`` spatial projections—akin to CSP, learned end-to-end and
@@ -15,6 +15,8 @@ from braindecode.modules import Chomp1d, MaxNormLinear
15
15
  class EEGTCNet(EEGModuleMixin, nn.Module):
16
16
  """EEGTCNet model from Ingolfsson et al. (2020) [ingolfsson2020]_.
17
17
 
18
+ :bdg-success:`Convolution` :bdg-secondary:`Recurrent`
19
+
18
20
  .. figure:: https://braindecode.org/dev/_static/model/eegtcnet.jpg
19
21
  :align: center
20
22
  :alt: EEGTCNet Architecture
@@ -31,6 +31,8 @@ _valid_layers = {
31
31
  class FBCNet(EEGModuleMixin, nn.Module):
32
32
  """FBCNet from Mane, R et al (2021) [fbcnet2021]_.
33
33
 
34
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
35
+
34
36
  .. figure:: https://raw.githubusercontent.com/ravikiran-mane/FBCNet/refs/heads/master/FBCNet-V2.png
35
37
  :align: center
36
38
  :alt: FBCNet Architecture
@@ -67,7 +69,9 @@ class FBCNet(EEGModuleMixin, nn.Module):
67
69
  linear_max_norm : float, default=0.5
68
70
  Maximum norm for the final linear layer.
69
71
  filter_parameters: dict, default None
70
- Parameters for the FilterBankLayer
72
+ Dictionary of parameters to use for the FilterBankLayer.
73
+ If None, a default Chebyshev Type II filter with transition bandwidth of
74
+ 2 Hz and stop-band ripple of 30 dB will be used.
71
75
 
72
76
  References
73
77
  ----------
@@ -18,6 +18,8 @@ from braindecode.modules import (
18
18
  class FBLightConvNet(EEGModuleMixin, nn.Module):
19
19
  """LightConvNet from Ma, X et al (2023) [lightconvnet]_.
20
20
 
21
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
22
+
21
23
  .. figure:: https://raw.githubusercontent.com/Ma-Xinzhi/LightConvNet/refs/heads/main/network_architecture.png
22
24
  :align: center
23
25
  :alt: LightConvNet Neural Network
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Optional, Sequence
3
+ from typing import Any, Sequence
4
4
 
5
5
  import torch
6
6
  from einops.layers.torch import Rearrange
@@ -19,6 +19,8 @@ from braindecode.modules import (
19
19
  class FBMSNet(EEGModuleMixin, nn.Module):
20
20
  """FBMSNet from Liu et al (2022) [fbmsnet]_.
21
21
 
22
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
23
+
22
24
  .. figure:: https://raw.githubusercontent.com/Want2Vanish/FBMSNet/refs/heads/main/FBMSNet.png
23
25
  :align: center
24
26
  :alt: FBMSNet Architecture
@@ -55,16 +57,28 @@ class FBMSNet(EEGModuleMixin, nn.Module):
55
57
  ----------
56
58
  n_bands : int, default=9
57
59
  Number of input channels (e.g., number of frequency bands).
58
- stride_factor : int, default=4
59
- Stride factor for temporal segmentation.
60
- temporal_layer : str, default='LogVarLayer'
61
- Temporal aggregation layer to use.
62
60
  n_filters_spat : int, default=36
63
61
  Number of output channels from the MixedConv2d layer.
62
+ temporal_layer : str, default='LogVarLayer'
63
+ Temporal aggregation layer to use.
64
+ n_dim: int, default=3
65
+ Dimension of the temporal reduction layer.
66
+ stride_factor : int, default=4
67
+ Stride factor for temporal segmentation.
64
68
  dilatability : int, default=8
65
69
  Expansion factor for the spatial convolution block.
66
70
  activation : nn.Module, default=nn.SiLU
67
71
  Activation function class to apply.
72
+ kernels_weights : Sequence[int], default=(15, 31, 63, 125)
73
+ Kernel sizes for the MixedConv2d layer.
74
+ cnn_max_norm : float, default=2
75
+ Maximum norm constraint for the convolutional layers.
76
+ linear_max_norm : float, default=0.5
77
+ Maximum norm constraint for the linear layers.
78
+ filter_parameters : dict, default=None
79
+ Dictionary of parameters to use for the FilterBankLayer.
80
+ If None, a default Chebyshev Type II filter with transition bandwidth of
81
+ 2 Hz and stop-band ripple of 30 dB will be used.
68
82
  verbose: bool, default False
69
83
  Verbose parameter to create the filter using mne.
70
84
 
@@ -101,7 +115,7 @@ class FBMSNet(EEGModuleMixin, nn.Module):
101
115
  cnn_max_norm: float = 2,
102
116
  linear_max_norm: float = 0.5,
103
117
  verbose: bool = False,
104
- filter_parameters: Optional[dict] = None,
118
+ filter_parameters: dict[Any, Any] | None = None,
105
119
  ):
106
120
  super().__init__(
107
121
  n_chans=n_chans,
@@ -31,6 +31,8 @@ from braindecode.modules import (
31
31
  class IFNet(EEGModuleMixin, nn.Module):
32
32
  """IFNetV2 from Wang J et al (2023) [ifnet]_.
33
33
 
34
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
35
+
34
36
  .. figure:: https://raw.githubusercontent.com/Jiaheng-Wang/IFNet/main/IFNet.png
35
37
  :align: center
36
38
  :alt: IFNetV2 Architecture