braindecode 1.2.0.dev184328194__py3-none-any.whl → 1.3.0.dev171478045__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (47) hide show
  1. braindecode/augmentation/functional.py +154 -54
  2. braindecode/augmentation/transforms.py +2 -2
  3. braindecode/datasets/base.py +1 -1
  4. braindecode/datasets/experimental.py +218 -0
  5. braindecode/datasets/sleep_physio_challe_18.py +2 -1
  6. braindecode/datautil/serialization.py +11 -6
  7. braindecode/eegneuralnet.py +2 -0
  8. braindecode/models/__init__.py +12 -8
  9. braindecode/models/atcnet.py +157 -17
  10. braindecode/models/attentionbasenet.py +153 -26
  11. braindecode/models/{sleep_stager_eldele_2021.py → attn_sleep.py} +12 -2
  12. braindecode/models/base.py +280 -2
  13. braindecode/models/bendr.py +469 -0
  14. braindecode/models/biot.py +3 -1
  15. braindecode/models/ctnet.py +1 -1
  16. braindecode/models/deep4.py +6 -2
  17. braindecode/models/deepsleepnet.py +118 -5
  18. braindecode/models/eegconformer.py +114 -15
  19. braindecode/models/eeginception_erp.py +76 -7
  20. braindecode/models/eeginception_mi.py +2 -0
  21. braindecode/models/eegnet.py +64 -177
  22. braindecode/models/eegnex.py +113 -6
  23. braindecode/models/eegsimpleconv.py +2 -0
  24. braindecode/models/eegtcnet.py +1 -1
  25. braindecode/models/labram.py +170 -69
  26. braindecode/models/patchedtransformer.py +640 -0
  27. braindecode/models/sccnet.py +81 -8
  28. braindecode/models/shallow_fbcsp.py +2 -0
  29. braindecode/models/signal_jepa.py +109 -27
  30. braindecode/models/sleep_stager_blanco_2020.py +2 -0
  31. braindecode/models/sleep_stager_chambon_2018.py +2 -0
  32. braindecode/models/sparcnet.py +2 -0
  33. braindecode/models/sstdpn.py +869 -0
  34. braindecode/models/summary.csv +42 -41
  35. braindecode/models/tidnet.py +2 -0
  36. braindecode/models/tsinception.py +15 -3
  37. braindecode/models/usleep.py +103 -9
  38. braindecode/models/util.py +8 -5
  39. braindecode/preprocessing/preprocess.py +31 -28
  40. braindecode/version.py +1 -1
  41. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/METADATA +10 -3
  42. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/RECORD +46 -43
  43. braindecode/models/eegresnet.py +0 -362
  44. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/WHEEL +0 -0
  45. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/licenses/LICENSE.txt +0 -0
  46. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/licenses/NOTICE.txt +0 -0
  47. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev171478045.dist-info}/top_level.txt +0 -0
@@ -12,33 +12,129 @@ from braindecode.modules import FeedForwardBlock, MultiHeadAttention
12
12
 
13
13
 
14
14
  class EEGConformer(EEGModuleMixin, nn.Module):
15
- """EEG Conformer from Song et al. (2022) from [song2022]_.
15
+ """EEG Conformer from Song et al. (2022) [song2022]_.
16
16
 
17
- .. figure:: https://raw.githubusercontent.com/eeyhsong/EEG-Conformer/refs/heads/main/visualization/Fig1.png
17
+ :bdg-success:`Convolution` :bdg-info:`Small Attention`
18
+
19
+ .. figure:: https://raw.githubusercontent.com/eeyhsong/EEG-Conformer/refs/heads/main/visualization/Fig1.png
18
20
  :align: center
19
21
  :alt: EEGConformer Architecture
22
+ :width: 600px
23
+
24
+
25
+ .. rubric:: Architectural Overview
26
+
27
+ EEG-Conformer is a *convolution-first* model augmented with a *lightweight transformer
28
+ encoder*. The end-to-end flow is:
29
+
30
+ - (i) :class:`_PatchEmbedding` converts the continuous EEG into a compact sequence of tokens via a
31
+ :class:`ShallowFBCSPNet` temporal–spatial conv stem and temporal pooling;
32
+ - (ii) :class:`_TransformerEncoder` applies small multi-head self-attention to integrate
33
+ longer-range temporal context across tokens;
34
+ - (iii) :class:`_ClassificationHead` aggregates the sequence and performs a linear readout.
35
+ This preserves the strong inductive biases of shallow CNN filter banks while adding
36
+ just enough attention to capture dependencies beyond the pooling horizon [song2022]_.
37
+
38
+ .. rubric:: Macro Components
39
+
40
+ - :class:`_PatchEmbedding` **(Shallow conv stem → tokens)**
41
+
42
+ - *Operations.*
43
+ - A temporal convolution (`:class:torch.nn.Conv2d`) ``(1 x L_t)`` forms a data-driven "filter bank";
44
+ - A spatial convolution (`:class:torch.nn.Conv2d`) (n_chans x 1)`` projects across electrodes,
45
+ collapsing the channel axis into a virtual channel.
46
+ - **Normalization function** :class:`torch.nn.BatchNorm`
47
+ - **Activation function** :class:`torch.nn.ELU`
48
+ - **Average Pooling** :class:`torch.nn.AvgPool` along time (kernel ``(1, P)`` with stride ``(1, S)``)
49
+ - final ``1x1`` :class:`torch.nn.Linear` projection.
50
+
51
+ The result is rearranged to a token sequence ``(B, S_tokens, D)``, where ``D = n_filters_time``.
52
+
53
+ *Interpretability/robustness.* Temporal kernels can be inspected as FIR filters;
54
+ the spatial conv yields channel projections analogous to :class:`ShallowFBCSPNet`’s learned
55
+ spatial filters. Temporal pooling stabilizes statistics and reduces sequence length.
56
+
57
+ - :class:`_TransformerEncoder` **(context over temporal tokens)**
58
+
59
+ - *Operations.*
60
+ - A stack of ``att_depth`` encoder blocks. :class:`_TransformerEncoderBlock`
61
+ - Each block applies LayerNorm :class:`torch.nn.LayerNorm`
62
+ - Multi-Head Self-Attention (``att_heads``) with dropout + residual :class:`MultiHeadAttention` (:class:`torch.nn.Dropout`)
63
+ - LayerNorm :class:`torch.nn.LayerNorm`
64
+ - 2-layer feed-forward (≈4x expansion, :class:`torch.nn.GELU`) with dropout + residual.
65
+
66
+ Shapes remain ``(B, S_tokens, D)`` throughout.
20
67
 
21
- Convolutional Transformer for EEG decoding.
68
+ *Role.* Small attention focuses on interactions among *temporal patches* (not channels),
69
+ extending effective receptive fields at modest cost.
22
70
 
23
- The paper and original code with more details about the methodological
24
- choices are available at the [song2022]_ and [ConformerCode]_.
71
+ - :class:`ClassificationHead` **(aggregation + readout)**
25
72
 
26
- This neural network architecture receives a traditional braindecode input.
27
- The input shape should be three-dimensional matrix representing the EEG
28
- signals.
73
+ - *Operations*.
74
+ - Flatten, :class:`torch.nn.Flatten` the sequence ``(B, S_tokens·D)`` -
75
+ - MLP (:class:`torch.nn.Linear` → activation (default: :class:`torch.nn.ELU`) → :class:`torch.nn.Dropout` → :class:`torch.nn.Linear`)
76
+ - final Linear to classes.
29
77
 
30
- `(batch_size, n_channels, n_timesteps)`.
78
+ With ``return_features=True``, features before the last Linear can be exported for
79
+ linear probing or downstream tasks.
31
80
 
32
- The EEG Conformer architecture is composed of three modules:
33
- - PatchEmbedding
34
- - TransformerEncoder
35
- - ClassificationHead
81
+ .. rubric:: Convolutional Details
82
+
83
+ - **Temporal (where time-domain patterns are learned).**
84
+ The initial ``(1 x L_t)`` conv per channel acts as a *learned filter bank* for oscillatory
85
+ bands and transients. Subsequent **AvgPool** along time performs local integration,
86
+ converting activations into “patches” (tokens). Pool length/stride control the
87
+ token rate and set the lower bound on temporal context within each token.
88
+
89
+ - **Spatial (how electrodes are processed).**
90
+ A single conv with kernel ``(n_chans x 1)`` spans the full montage to learn spatial
91
+ projections for each temporal feature map, collapsing the channel axis into a
92
+ virtual channel before tokenization. This mirrors the shallow spatial step in
93
+ :class:`ShallowFBCSPNet` (temporal filters → spatial projection → temporal condensation).
94
+
95
+ - **Spectral (how frequency content is captured).**
96
+ No explicit Fourier/wavelet stage is used. Spectral selectivity emerges implicitly
97
+ from the learned temporal kernels; pooling further smooths high-frequency noise.
98
+ The effective spectral resolution is thus governed by ``L_t`` and the pooling
99
+ configuration.
100
+
101
+ .. rubric:: Attention / Sequential Modules
102
+
103
+ - **Type.** Standard multi-head self-attention (MHA) with ``att_heads`` heads over the token sequence.
104
+ - **Shapes.** Input/Output: ``(B, S_tokens, D)``; attention operates along the ``S_tokens`` axis.
105
+ - **Role.** Re-weights and integrates evidence across pooled windows, capturing dependencies
106
+ longer than any single token while leaving channel relationships to the convolutional stem.
107
+ The design is intentionally *small*—attention refines rather than replaces convolutional feature extraction.
108
+
109
+ .. rubric:: Additional Mechanisms
110
+
111
+ - **Parallel with ShallowFBCSPNet.** Both begin with a learned temporal filter bank,
112
+ spatial projection across electrodes, and early temporal condensation.
113
+ :class:`ShallowFBCSPNet` then computes band-power (via squaring/log-variance), whereas
114
+ EEG-Conformer applies BN/ELU and **continues with attention** over tokens to
115
+ refine temporal context before classification.
116
+
117
+ - **Tokenization knob.** ``pool_time_length`` and especially ``pool_time_stride`` set
118
+ the number of tokens ``S_tokens``. Smaller strides → more tokens and higher attention
119
+ capacity (but higher compute); larger strides → fewer tokens and stronger inductive bias.
120
+
121
+ - **Embedding dimension = filters.** ``n_filters_time`` serves double duty as both the
122
+ number of temporal filters in the stem and the transformer’s embedding size ``D``,
123
+ simplifying dimensional alignment.
124
+
125
+ .. rubric:: Usage and Configuration
126
+
127
+ - **Instantiation.** Choose ``n_filters_time`` (embedding size ``D``) and
128
+ ``filter_time_length`` to match the rhythms of interest. Tune
129
+ ``pool_time_length/stride`` to trade temporal resolution for sequence length.
130
+ Keep ``att_depth`` modest (e.g., 4–6) and set ``att_heads`` to divide ``D``.
131
+ ``final_fc_length="auto"`` infers the flattened size from PatchEmbedding.
36
132
 
37
133
  Notes
38
134
  -----
39
135
  The authors recommend using data augmentation before using Conformer,
40
136
  e.g. segmentation and recombination,
41
- Please refer to the original paper and code for more details.
137
+ Please refer to the original paper and code for more details [ConformerCode]_.
42
138
 
43
139
  The model was initially tuned on 4 seconds of 250 Hz data.
44
140
  Please adjust the scale of the temporal convolutional layer,
@@ -47,7 +143,10 @@ class EEGConformer(EEGModuleMixin, nn.Module):
47
143
  .. versionadded:: 0.8
48
144
 
49
145
  We aggregate the parameters based on the parts of the models, or
50
- when the parameters were used first, e.g. n_filters_time.
146
+ when the parameters were used first, e.g. ``n_filters_time``.
147
+
148
+ .. versionadded:: 1.1
149
+
51
150
 
52
151
  Parameters
53
152
  ----------
@@ -15,12 +15,84 @@ from braindecode.modules import DepthwiseConv2d, Ensure4d, InceptionBlock
15
15
  class EEGInceptionERP(EEGModuleMixin, nn.Sequential):
16
16
  """EEG Inception for ERP-based from Santamaria-Vazquez et al (2020) [santamaria2020]_.
17
17
 
18
+ :bdg-success:`Convolution`
19
+
18
20
  .. figure:: https://braindecode.org/dev/_static/model/eeginceptionerp.jpg
19
21
  :align: center
20
22
  :alt: EEGInceptionERP Architecture
21
23
 
22
- The code for the paper and this model is also available at [santamaria2020]_
23
- and an adaptation for PyTorch [2]_.
24
+ Figure: Overview of EEG-Inception architecture. 2D convolution blocks and depthwise 2D convolution blocks include batch normalization, activation and dropout regularization. The kernel size is displayed for convolutional and average pooling layers.
25
+
26
+ .. rubric:: Architectural Overview
27
+
28
+ A two-stage, multi-scale CNN tailored to ERP detection from short (0-1000 ms) single-trial epochs. Signals are mapped through
29
+ * (i) :class:`_InceptionModule1` multi-scale temporal feature extraction plus per-branch spatial mixing;
30
+ * (ii) :class:`_InceptionModule2` deeper multi-scale refinement at a reduced temporal resolution; and
31
+ * (iii) :class:`_OutputModule` compact aggregation and linear readout.
32
+
33
+ .. rubric:: Macro Components
34
+
35
+ - :class:`_InceptionModule1` **(multi-scale temporal + spatial mixing)**
36
+
37
+ - *Operations.*
38
+ - `EEGInceptionERP.c1`: :class:`torch.nn.Conv2d` ``k=(64,1)``, stride ``(1,1)``, *same* pad on input reshaped to ``(B,1,128,8)`` → BN → activation → dropout.
39
+ - `EEGInceptionERP.d1`: :class:`torch.nn.Conv2d` (depthwise) ``k=(1,8)``, *valid* pad over channels → BN → activation → dropout.
40
+ - `EEGInceptionERP.c2`: :class:`torch.nn.Conv2d` ``k=(32,1)`` → BN → activation → dropout; then `EEGInceptionERP.d2` depthwise ``k=(1,8)`` → BN → activation → dropout.
41
+ - `EEGInceptionERP.c3`: :class:`torch.nn.Conv2d` ``k=(16,1)`` → BN → activation → dropout; then `EEGInceptionERP.d3` depthwise ``k=(1,8)`` → BN → activation → dropout.
42
+ - `EEGInceptionERP.n1`: :class:`torch.nn.Concat` over branch features.
43
+ - `EEGInceptionERP.a1`: :class:`torch.nn.AvgPool2d` ``pool=(4,1)``, stride ``(4,1)`` for temporal downsampling.
44
+
45
+ *Interpretability/robustness.* Depthwise `1 x n_chans` layers act as learnable montage-wide spatial filters per temporal scale; pooling stabilizes against jitter.
46
+
47
+ - :class:`_InceptionModule2` **(refinement at coarser timebase)**
48
+
49
+ - *Operations.*
50
+ - `EEGInceptionERP.c4`: :class:`torch.nn.Conv2d` ``k=(16,1)`` → BN → activation → dropout.
51
+ - `EEGInceptionERP.c5`: :class:`torch.nn.Conv2d` ``k=(8,1)`` → BN → activation → dropout.
52
+ - `EEGInceptionERP.c6`: :class:`torch.nn.Conv2d` ``k=(4,1)`` → BN → activation → dropout.
53
+ - `EEGInceptionERP.n2`: :class:`torch.nn.Concat` (merge C4-C6 outputs).
54
+ - `EEGInceptionERP.a2`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``, stride ``(2,1)``.
55
+ - `EEGInceptionERP.c7`: :class:`torch.nn.Conv2d` ``k=(8,1)`` → BN → activation → dropout; then `EEGInceptionERP.a3`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
56
+ - `EEGInceptionERP.c8`: :class:`torch.nn.Conv2d` ``k=(4,1)`` → BN → activation → dropout; then `EEGInceptionERP.a4`: :class:`torch.nn.AvgPool2d` ``pool=(2,1)``.
57
+
58
+ *Role.* Adds higher-level, shorter-window evidence while progressively compressing temporal dimension.
59
+
60
+ - :class:`_OutputModule` **(aggregation + readout)**
61
+
62
+ - *Operations.*
63
+ - :class:`torch.nn.Flatten`
64
+ - :class:`torch.nn.Linear` ``(features → 2)``
65
+
66
+ .. rubric:: Convolutional Details
67
+
68
+ - **Temporal (where time-domain patterns are learned).**
69
+ First module uses 1D temporal kernels along the 128-sample axis: ``64``, ``32``, ``16``
70
+ (≈500, 250, 125 ms at 128 Hz). After ``pool=(4,1)``, the second module applies ``16``,
71
+ ``8``, ``4`` (≈125, 62.5, 31.25 ms at the pooled rate). All strides are ``1`` in convs;
72
+ temporal resolution changes only via average pooling.
73
+
74
+ - **Spatial (how electrodes are processed).**
75
+ Depthwise convs with ``k=(1,8)`` span all channels and are applied **per temporal branch**,
76
+ yielding scale-specific channel projections (no cross-branch mixing until concatenation).
77
+ There is no full 2D mixing kernel; spatial mixing is factorized and lightweight.
78
+
79
+ - **Spectral (how frequency information is captured).**
80
+ No explicit transform; multiple temporal kernels form a *learned filter bank* over
81
+ ERP-relevant bands. Successive pooling acts as low-pass integration to emphasize sustained
82
+ post-stimulus components.
83
+
84
+ .. rubric:: Additional Mechanisms
85
+
86
+ - Every conv/depthwise block includes **BatchNorm**, nonlinearity (paper used grid-searched activation), and **dropout**.
87
+ - Two Inception stages followed by short convs and pooling keep parameters small (≈15k reported) while preserving multi-scale evidence.
88
+ - Expected input: epochs of shape ``(B,1,128,8)`` (time x channels as a 2D map) or reshaped from ``(B,8,128)`` with an added singleton feature dimension.
89
+
90
+ .. rubric:: Usage and Configuration
91
+
92
+ - **Key knobs.** Number of filters per branch; kernel lengths in both Inception modules; depthwise kernel over channels (typically ``n_chans``); pooling lengths/strides; dropout rate; choice of activation.
93
+ - **Training tips.** Use 0-1000 ms windows at 128 Hz with CAR; tune activation and dropout (they strongly affect performance); early-stop on validation loss when overfitting emerges.
94
+
95
+ .. rubric:: Implementation Details
24
96
 
25
97
  The model is strongly based on the original InceptionNet for an image. The main goal is
26
98
  to extract features in parallel with different scales. The authors extracted three scales
@@ -33,12 +105,9 @@ class EEGInceptionERP(EEGModuleMixin, nn.Sequential):
33
105
  The winners of BEETL Competition/NeurIps 2021 used parts of the
34
106
  model [beetl]_.
35
107
 
36
- The model is fully described in [santamaria2020]_.
108
+ The code for the paper and this model is also available at [santamaria2020]_
109
+ and an adaptation for PyTorch [2]_.
37
110
 
38
- Notes
39
- -----
40
- This implementation is not guaranteed to be correct, has not been checked
41
- by original authors, only reimplemented from the paper based on [2]_.
42
111
 
43
112
  Parameters
44
113
  ----------
@@ -13,6 +13,8 @@ from braindecode.modules import Ensure4d
13
13
  class EEGInceptionMI(EEGModuleMixin, nn.Module):
14
14
  """EEG Inception for Motor Imagery, as proposed in Zhang et al. (2021) [1]_
15
15
 
16
+ :bdg-success:`Convolution`
17
+
16
18
  .. figure:: https://content.cld.iop.org/journals/1741-2552/18/4/046014/revision3/jneabed81f1_hr.jpg
17
19
  :align: center
18
20
  :alt: EEGInceptionMI Architecture
@@ -6,7 +6,7 @@ from __future__ import annotations
6
6
  from typing import Dict, Optional
7
7
 
8
8
  from einops.layers.torch import Rearrange
9
- from mne.utils import warn
9
+ from mne.utils import deprecated, warn
10
10
  from torch import nn
11
11
 
12
12
  from braindecode.functional import glorot_weight_zero_bias
@@ -19,14 +19,62 @@ from braindecode.modules import (
19
19
  )
20
20
 
21
21
 
22
- class EEGNetv4(EEGModuleMixin, nn.Sequential):
23
- """EEGNet v4 model from Lawhern et al. (2018) [EEGNet4]_.
22
+ class EEGNet(EEGModuleMixin, nn.Sequential):
23
+ """EEGNet model from Lawhern et al. (2018) [Lawhern2018]_.
24
+
25
+ :bdg-success:`Convolution`
24
26
 
25
27
  .. figure:: https://content.cld.iop.org/journals/1741-2552/15/5/056013/revision2/jneaace8cf01_hr.jpg
26
- :align: center
27
- :alt: EEGNet4 Architecture
28
+ :align: center
29
+ :alt: EEGNet Architecture
30
+ :width: 600px
31
+
32
+ .. rubric:: Architectural Overview
33
+
34
+ EEGNet is a compact convolutional network designed for EEG decoding with a pipeline that mirrors classical EEG processing:
35
+ - (i) learn temporal frequency-selective filters,
36
+ - (ii) learn spatial filters for those frequencies, and
37
+ - (iii) condense features with depthwise-separable convolutions before a lightweight classifier.
38
+
39
+ The architecture is deliberately small (temporal convolutional and spatial patterns) [Lawhern2018]_.
40
+
41
+ .. rubric:: Macro Components
42
+
43
+ - **Temporal convolution**
44
+ Temporal convolution applied per channel; learns ``F1`` kernels that act as data-driven band-pass filters.
45
+ - **Depthwise Spatial Filtering.**
46
+ Depthwise convolution spanning the channel dimension with ``groups = F1``,
47
+ yielding ``D`` spatial filters for each temporal filter (no cross-filter mixing).
48
+ - **Norm-Nonlinearity-Pooling (+ dropout).**
49
+ Batch normalization → ELU → temporal pooling, with dropout.
50
+ - **Depthwise-Separable Convolution Block.**
51
+ (a) depthwise temporal conv to refine temporal structure;
52
+ (b) pointwise 1x1 conv to mix feature maps into ``F2`` combinations.
53
+ - **Classifier Head.**
54
+ Lightweight 1x1 conv or dense layer (often with max-norm constraint).
55
+
56
+ .. rubric:: Convolutional Details
57
+
58
+ - **Temporal.** The initial temporal convs serve as a *learned filter bank*:
59
+ long 1-D kernels (implemented as 2-D with singleton spatial extent) emphasize oscillatory bands and transients.
60
+ Because this stage is linear prior to BN/ELU, kernels can be analyzed as FIR filters to reveal each feature’s spectrum [Lawhern2018]_.
61
+
62
+ - **Spatial.** The depthwise spatial conv spans the full channel axis (kernel height = #electrodes; temporal size = 1).
63
+ With ``groups = F1``, each temporal filter learns its own set of ``D`` spatial projections—akin to CSP, learned end-to-end and
64
+ typically regularized with max-norm.
65
+
66
+ - **Spectral.** No explicit Fourier/wavelet transform is used. Frequency structure
67
+ is captured implicitly by the temporal filter bank; later depthwise temporal kernels act as short-time integrators/refiners.
68
+
69
+ .. rubric:: Additional Comments
70
+
71
+ - **Filter-bank structure:** Parallel temporal kernels (``F1``) emulate classical filter banks; pairing them with frequency-specific spatial filters
72
+ yields features mappable to rhythms and topographies.
73
+ - **Depthwise & separable convs:** Parameter-efficient decomposition (depthwise + pointwise) retains power while limiting overfitting
74
+ [Chollet2017]_ and keeps temporal vs. mixing steps interpretable.
75
+ - **Regularization:** Batch norm, dropout, pooling, and optional max-norm on spatial kernels aid stability on small EEG datasets.
76
+ - The v4 means the version 4 at the arxiv paper [Lawhern2018]_.
28
77
 
29
- See details in [EEGNet4]_.
30
78
 
31
79
  Parameters
32
80
  ----------
@@ -68,10 +116,13 @@ class EEGNetv4(EEGModuleMixin, nn.Sequential):
68
116
 
69
117
  References
70
118
  ----------
71
- .. [EEGNet4] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon, S. M.,
119
+ .. [Lawhern2018] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon, S. M.,
72
120
  Hung, C. P., & Lance, B. J. (2018). EEGNet: a compact convolutional
73
121
  neural network for EEG-based brain–computer interfaces. Journal of
74
122
  neural engineering, 15(5), 056013.
123
+ .. [Chollet2017] Chollet, F., *Xception: Deep Learning with Depthwise Separable
124
+ Convolutions*, CVPR, 2017.
125
+
75
126
  """
76
127
 
77
128
  def __init__(
@@ -299,174 +350,10 @@ class EEGNetv4(EEGModuleMixin, nn.Sequential):
299
350
  glorot_weight_zero_bias(self)
300
351
 
301
352
 
302
- class EEGNetv1(EEGModuleMixin, nn.Sequential):
303
- """EEGNet model from Lawhern et al. 2016 from [EEGNet]_.
304
-
305
- See details in [EEGNet]_.
306
-
307
- Parameters
308
- ----------
309
- in_chans :
310
- Alias for n_chans.
311
- n_classes:
312
- Alias for n_outputs.
313
- input_window_samples :
314
- Alias for n_times.
315
- activation: nn.Module, default=nn.ELU
316
- Activation function class to apply. Should be a PyTorch activation
317
- module class like ``nn.ReLU`` or ``nn.ELU``. Default is ``nn.ELU``.
318
-
319
- Notes
320
- -----
321
- This implementation is not guaranteed to be correct, has not been checked
322
- by original authors, only reimplemented from the paper description.
323
-
324
- References
325
- ----------
326
- .. [EEGNet] Lawhern, V. J., Solon, A. J., Waytowich, N. R., Gordon,
327
- S. M., Hung, C. P., & Lance, B. J. (2016).
328
- EEGNet: A Compact Convolutional Network for EEG-based
329
- Brain-Computer Interfaces.
330
- arXiv preprint arXiv:1611.08024.
331
- """
332
-
333
- def __init__(
334
- self,
335
- n_chans=None,
336
- n_outputs=None,
337
- n_times=None,
338
- final_conv_length="auto",
339
- pool_mode="max",
340
- second_kernel_size=(2, 32),
341
- third_kernel_size=(8, 4),
342
- drop_prob=0.25,
343
- activation: nn.Module = nn.ELU,
344
- chs_info=None,
345
- input_window_seconds=None,
346
- sfreq=None,
347
- ):
348
- super().__init__(
349
- n_outputs=n_outputs,
350
- n_chans=n_chans,
351
- chs_info=chs_info,
352
- n_times=n_times,
353
- input_window_seconds=input_window_seconds,
354
- sfreq=sfreq,
355
- )
356
- del n_outputs, n_chans, chs_info, n_times, input_window_seconds, sfreq
357
- warn(
358
- "The class EEGNetv1 is deprecated and will be removed in the "
359
- "release 1.0 of braindecode. Please use "
360
- "braindecode.models.EEGNetv4 instead in the future.",
361
- DeprecationWarning,
362
- )
363
- if final_conv_length == "auto":
364
- assert self.n_times is not None
365
- self.final_conv_length = final_conv_length
366
- self.pool_mode = pool_mode
367
- self.second_kernel_size = second_kernel_size
368
- self.third_kernel_size = third_kernel_size
369
- self.drop_prob = drop_prob
370
- # For the load_state_dict
371
- # When padronize all layers,
372
- # add the old's parameters here
373
- self.mapping = {
374
- "conv_classifier.weight": "final_layer.conv_classifier.weight",
375
- "conv_classifier.bias": "final_layer.conv_classifier.bias",
376
- }
377
-
378
- pool_class = dict(max=nn.MaxPool2d, mean=nn.AvgPool2d)[self.pool_mode]
379
- self.add_module("ensuredims", Ensure4d())
380
- n_filters_1 = 16
381
- self.add_module(
382
- "conv_1",
383
- nn.Conv2d(self.n_chans, n_filters_1, (1, 1), stride=1, bias=True),
384
- )
385
- self.add_module(
386
- "bnorm_1",
387
- nn.BatchNorm2d(n_filters_1, momentum=0.01, affine=True, eps=1e-3),
388
- )
389
- self.add_module("elu_1", activation())
390
- # transpose to examples x 1 x (virtual, not EEG) channels x time
391
- self.add_module("permute_1", Rearrange("batch x y z -> batch z x y"))
392
-
393
- self.add_module("drop_1", nn.Dropout(p=self.drop_prob))
394
-
395
- n_filters_2 = 4
396
- # keras pads unequal padding more in front, so padding
397
- # too large should be ok.
398
- # Not padding in time so that cropped training makes sense
399
- # https://stackoverflow.com/questions/43994604/padding-with-even-kernel-size-in-a-convolutional-layer-in-keras-theano
400
-
401
- self.add_module(
402
- "conv_2",
403
- nn.Conv2d(
404
- 1,
405
- n_filters_2,
406
- self.second_kernel_size,
407
- stride=1,
408
- padding=(self.second_kernel_size[0] // 2, 0),
409
- bias=True,
410
- ),
411
- )
412
- self.add_module(
413
- "bnorm_2",
414
- nn.BatchNorm2d(n_filters_2, momentum=0.01, affine=True, eps=1e-3),
415
- )
416
- self.add_module("elu_2", activation())
417
- self.add_module("pool_2", pool_class(kernel_size=(2, 4), stride=(2, 4)))
418
- self.add_module("drop_2", nn.Dropout(p=self.drop_prob))
419
-
420
- n_filters_3 = 4
421
- self.add_module(
422
- "conv_3",
423
- nn.Conv2d(
424
- n_filters_2,
425
- n_filters_3,
426
- self.third_kernel_size,
427
- stride=1,
428
- padding=(self.third_kernel_size[0] // 2, 0),
429
- bias=True,
430
- ),
431
- )
432
- self.add_module(
433
- "bnorm_3",
434
- nn.BatchNorm2d(n_filters_3, momentum=0.01, affine=True, eps=1e-3),
435
- )
436
- self.add_module("elu_3", activation())
437
- self.add_module("pool_3", pool_class(kernel_size=(2, 4), stride=(2, 4)))
438
- self.add_module("drop_3", nn.Dropout(p=self.drop_prob))
439
-
440
- output_shape = self.get_output_shape()
441
- n_out_virtual_chans = output_shape[2]
442
-
443
- if self.final_conv_length == "auto":
444
- n_out_time = output_shape[3]
445
- self.final_conv_length = n_out_time
446
-
447
- # Incorporating classification module and subsequent ones in one final layer
448
- module = nn.Sequential()
449
-
450
- module.add_module(
451
- "conv_classifier",
452
- nn.Conv2d(
453
- n_filters_3,
454
- self.n_outputs,
455
- (n_out_virtual_chans, self.final_conv_length),
456
- bias=True,
457
- ),
458
- )
459
-
460
- # Transpose back to the logic of braindecode,
461
-
462
- # so time in third dimension (axis=2)
463
- module.add_module(
464
- "permute_2",
465
- Rearrange("batch x y z -> batch x z y"),
466
- )
467
-
468
- module.add_module("squeeze", SqueezeFinalOutput())
469
-
470
- self.add_module("final_layer", module)
353
+ @deprecated(
354
+ "`EEGNetv4` was renamed to `EEGNet` in v1.12; this alias will be removed in v1.14."
355
+ )
356
+ class EEGNetv4(EEGNet):
357
+ """Deprecated alias for EEGNet."""
471
358
 
472
- glorot_weight_zero_bias(self)
359
+ pass
@@ -16,9 +16,122 @@ from braindecode.modules import Conv2dWithConstraint, LinearWithConstraint
16
16
  class EEGNeX(EEGModuleMixin, nn.Module):
17
17
  """EEGNeX model from Chen et al. (2024) [eegnex]_.
18
18
 
19
+ :bdg-success:`Convolution`
20
+
19
21
  .. figure:: https://braindecode.org/dev/_static/model/eegnex.jpg
20
22
  :align: center
21
23
  :alt: EEGNeX Architecture
24
+ :width: 620px
25
+
26
+ .. rubric:: Architectural Overview
27
+
28
+ EEGNeX is a **purely convolutional** architecture that refines the EEGNet-style stem
29
+ and deepens the temporal stack with **dilated temporal convolutions**. The end-to-end
30
+ flow is:
31
+
32
+ - (i) **Block-1/2**: two temporal convolutions ``(1 x L)`` with BN refine a
33
+ learned FIR-like *temporal filter bank* (no pooling yet);
34
+ - (ii) **Block-3**: depthwise **spatial** convolution across electrodes
35
+ ``(n_chans x 1)`` with max-norm constraint, followed by ELU → AvgPool (time) → Dropout;
36
+ - (iii) **Block-4/5**: two additional **temporal** convolutions with increasing **dilation**
37
+ to expand the receptive field; the last block applies ELU → AvgPool → Dropout → Flatten;
38
+ - (iv) **Classifier**: a max-norm–constrained linear layer.
39
+
40
+ The published work positions EEGNeX as a compact, conv-only alternative that consistently
41
+ outperforms prior baselines across MOABB-style benchmarks, with the popular
42
+ “EEGNeX-8,32” shorthand denoting *8 temporal filters* and *kernel length 32*.
43
+
44
+
45
+ .. rubric:: Macro Components
46
+
47
+ - **Block-1 / Block-2 — Temporal filter (learned).**
48
+
49
+ - *Operations.*
50
+ - :class:`torch.nn.Conv2d` with kernels ``(1, L)``
51
+ - :class:`torch.nn.BatchNorm2d` (no nonlinearity until Block-3, mirroring a linear FIR analysis stage).
52
+ These layers set up frequency-selective detectors before spatial mixing.
53
+
54
+ - *Interpretability.* Kernels can be inspected as FIR filters; two stacked temporal
55
+ convs allow longer effective kernels without parameter blow-up.
56
+
57
+ - **Block-3 — Spatial projection + condensation.**
58
+
59
+ - *Operations.*
60
+ - :class:`braindecode.modules.Conv2dWithConstraint` with kernel``(n_chans, 1)``
61
+ and ``groups = filter_2`` (depthwise across filters)
62
+ - :class:`torch.nn.BatchNorm2d`
63
+ - :class:`torch.nn.ELU`
64
+ - :class:`torch.nn.AvgPool2d` (time)
65
+ - :class:`torch.nn.Dropout`.
66
+
67
+ **Role**: Learns per-filter spatial patterns over the **full montage** while temporal
68
+ pooling stabilizes and compresses features; max-norm encourages well-behaved spatial
69
+ weights similar to EEGNet practice.
70
+
71
+ - **Block-4 / Block-5 — Dilated temporal integration.**
72
+
73
+ - *Operations.*
74
+ - :class:`torch.nn.Conv2d` with kernels ``(1, k)`` and **dilations**
75
+ (e.g., 2 then 4);
76
+ - :class:`torch.nn.BatchNorm2d`
77
+ - :class:`torch.nn.ELU`
78
+ - :class:`torch.nn.AvgPool2d` (time)
79
+ - :class:`torch.nn.Dropout`
80
+ - :class:`torch.nn.Flatten`.
81
+
82
+ **Role**: Expands the temporal receptive field efficiently to capture rhythms and
83
+ long-range context after condensation.
84
+
85
+ - **Final Classifier — Max-norm linear.**
86
+
87
+ - *Operations.*
88
+ - :class:`braindecode.modules.LinearWithConstraint` maps the flattened
89
+ vector to the target classes; the max-norm constraint regularizes the readout.
90
+
91
+
92
+ .. rubric:: Convolutional Details
93
+
94
+ - **Temporal (where time-domain patterns are learned).**
95
+ Blocks 1-2 learn the primary filter bank (oscillations/transients), while Blocks 4-5
96
+ use **dilation** to integrate over longer horizons without extra pooling. The final
97
+ AvgPool in Block-5 sets the output token rate and helps noise suppression.
98
+
99
+ - **Spatial (how electrodes are processed).**
100
+ A *single* depthwise spatial conv (Block-3) spans the entire electrode set
101
+ (kernel ``(n_chans, 1)``), producing per-temporal-filter topographies; no cross-filter
102
+ mixing occurs at this stage, aiding interpretability.
103
+
104
+ - **Spectral (how frequency content is captured).**
105
+ Frequency selectivity emerges from the learned temporal kernels; dilation broadens effective
106
+ bandwidth coverage by composing multiple scales.
107
+
108
+ .. rubric:: Additional Mechanisms
109
+
110
+ - **EEGNeX-8,32 naming.** “8,32” indicates *8 temporal filters* and *kernel length 32*,
111
+ reflecting the paper's ablation path from EEGNet-8,2 toward thicker temporal kernels
112
+ and a deeper conv stack.
113
+ - **Max-norm constraints.** Spatial (Block-3) and final linear layers use max-norm
114
+ regularization—standard in EEG CNNs—to reduce overfitting and encourage stable spatial
115
+ patterns.
116
+
117
+ .. rubric:: Usage and Configuration
118
+
119
+ - **Kernel schedule.** Start with the canonical **EEGNeX-8,32** (``filter_1=8``,
120
+ ``kernel_block_1_2=32``) and keep **Block-3** depth multiplier modest (e.g., 2) to match
121
+ the paper's “pure conv” profile.
122
+ - **Pooling vs. dilation.** Use pooling in Blocks 3 and 5 to control compute and variance;
123
+ increase dilations (Blocks 4-5) to widen temporal context when windows are short.
124
+ - **Regularization.** Combine dropout (Blocks 3 & 5) with max-norm on spatial and
125
+ classifier layers; prefer ELU activations for stable training on small EEG datasets.
126
+
127
+
128
+ - The braindecode implementation follows the paper's conv-only design with five blocks
129
+ and reproduces the depthwise spatial step and dilated temporal stack. See the class
130
+ reference for exact kernel sizes, dilations, and pooling defaults. You can check the
131
+ original implementation at [EEGNexCode]_.
132
+
133
+ .. versionadded:: 1.1
134
+
22
135
 
23
136
  Parameters
24
137
  ----------
@@ -45,12 +158,6 @@ class EEGNeX(EEGModuleMixin, nn.Module):
45
158
  avg_pool_block5 : tuple[int, int], optional
46
159
  Pooling size for block 5. Default is (1, 8).
47
160
 
48
- Notes
49
- -----
50
- This implementation is not guaranteed to be correct, has not been checked
51
- by original authors, only reimplemented from the paper description and
52
- source code in tensorflow [EEGNexCode]_.
53
-
54
161
  References
55
162
  ----------
56
163
  .. [eegnex] Chen, X., Teng, X., Chen, H., Pan, Y., & Geyer, P. (2024).