braindecode 1.2.0.dev184328194__py3-none-any.whl → 1.3.0.dev168496007__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of braindecode might be problematic. Click here for more details.

Files changed (80) hide show
  1. braindecode/augmentation/base.py +1 -1
  2. braindecode/augmentation/functional.py +154 -54
  3. braindecode/augmentation/transforms.py +2 -2
  4. braindecode/datasets/__init__.py +10 -2
  5. braindecode/datasets/base.py +116 -152
  6. braindecode/datasets/bcicomp.py +4 -4
  7. braindecode/datasets/bids.py +3 -3
  8. braindecode/datasets/experimental.py +218 -0
  9. braindecode/datasets/mne.py +3 -5
  10. braindecode/datasets/moabb.py +2 -2
  11. braindecode/datasets/nmt.py +2 -2
  12. braindecode/datasets/sleep_physio_challe_18.py +4 -3
  13. braindecode/datasets/sleep_physionet.py +2 -2
  14. braindecode/datasets/tuh.py +2 -2
  15. braindecode/datasets/xy.py +2 -2
  16. braindecode/datautil/serialization.py +18 -13
  17. braindecode/eegneuralnet.py +2 -0
  18. braindecode/functional/functions.py +6 -2
  19. braindecode/functional/initialization.py +2 -3
  20. braindecode/models/__init__.py +12 -8
  21. braindecode/models/atcnet.py +156 -17
  22. braindecode/models/attentionbasenet.py +148 -16
  23. braindecode/models/{sleep_stager_eldele_2021.py → attn_sleep.py} +14 -2
  24. braindecode/models/base.py +280 -2
  25. braindecode/models/bendr.py +469 -0
  26. braindecode/models/biot.py +3 -1
  27. braindecode/models/contrawr.py +2 -0
  28. braindecode/models/ctnet.py +9 -4
  29. braindecode/models/deep4.py +6 -2
  30. braindecode/models/deepsleepnet.py +127 -5
  31. braindecode/models/eegconformer.py +114 -15
  32. braindecode/models/eeginception_erp.py +82 -7
  33. braindecode/models/eeginception_mi.py +2 -0
  34. braindecode/models/eegitnet.py +2 -0
  35. braindecode/models/eegminer.py +2 -0
  36. braindecode/models/eegnet.py +64 -177
  37. braindecode/models/eegnex.py +113 -6
  38. braindecode/models/eegsimpleconv.py +2 -0
  39. braindecode/models/eegtcnet.py +3 -1
  40. braindecode/models/fbcnet.py +5 -1
  41. braindecode/models/fblightconvnet.py +2 -0
  42. braindecode/models/fbmsnet.py +20 -6
  43. braindecode/models/ifnet.py +2 -0
  44. braindecode/models/labram.py +193 -87
  45. braindecode/models/msvtnet.py +2 -0
  46. braindecode/models/patchedtransformer.py +640 -0
  47. braindecode/models/sccnet.py +81 -8
  48. braindecode/models/shallow_fbcsp.py +2 -0
  49. braindecode/models/signal_jepa.py +111 -27
  50. braindecode/models/sinc_shallow.py +12 -9
  51. braindecode/models/sleep_stager_blanco_2020.py +2 -0
  52. braindecode/models/sleep_stager_chambon_2018.py +2 -0
  53. braindecode/models/sparcnet.py +2 -0
  54. braindecode/models/sstdpn.py +869 -0
  55. braindecode/models/summary.csv +42 -41
  56. braindecode/models/syncnet.py +2 -0
  57. braindecode/models/tcn.py +2 -0
  58. braindecode/models/tidnet.py +2 -0
  59. braindecode/models/tsinception.py +15 -3
  60. braindecode/models/usleep.py +108 -9
  61. braindecode/models/util.py +8 -5
  62. braindecode/modules/attention.py +10 -10
  63. braindecode/modules/blocks.py +3 -3
  64. braindecode/modules/filter.py +2 -9
  65. braindecode/modules/layers.py +18 -17
  66. braindecode/preprocessing/__init__.py +24 -0
  67. braindecode/preprocessing/eegprep_preprocess.py +1202 -0
  68. braindecode/preprocessing/preprocess.py +42 -39
  69. braindecode/preprocessing/util.py +166 -0
  70. braindecode/preprocessing/windowers.py +26 -20
  71. braindecode/samplers/base.py +8 -8
  72. braindecode/version.py +1 -1
  73. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev168496007.dist-info}/METADATA +12 -3
  74. braindecode-1.3.0.dev168496007.dist-info/RECORD +106 -0
  75. braindecode/models/eegresnet.py +0 -362
  76. braindecode-1.2.0.dev184328194.dist-info/RECORD +0 -101
  77. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev168496007.dist-info}/WHEEL +0 -0
  78. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev168496007.dist-info}/licenses/LICENSE.txt +0 -0
  79. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev168496007.dist-info}/licenses/NOTICE.txt +0 -0
  80. {braindecode-1.2.0.dev184328194.dist-info → braindecode-1.3.0.dev168496007.dist-info}/top_level.txt +0 -0
@@ -17,13 +17,21 @@ from braindecode.modules import LogActivation
17
17
  class SCCNet(EEGModuleMixin, nn.Module):
18
18
  """SCCNet from Wei, C S (2019) [sccnet]_.
19
19
 
20
+ :bdg-success:`Convolution`
21
+
20
22
  Spatial component-wise convolutional network (SCCNet) for motor-imagery EEG
21
23
  classification.
22
24
 
23
25
  .. figure:: https://dt5vp8kor0orz.cloudfront.net/6e3ec5d729cd51fe8acc5a978db27d02a5df9e05/2-Figure1-1.png
24
26
  :align: center
25
27
  :alt: Spatial component-wise convolutional network
28
+ :width: 680px
29
+
30
+ .. rubric:: Architectural Overview
26
31
 
32
+ SCCNet is a spatial-first convolutional layer that fixes temporal kernels in seconds
33
+ to make its filters correspond to neurophysiologically aligned windows. The model
34
+ comprises four stages:
27
35
 
28
36
  1. **Spatial Component Analysis**: Performs convolution spatial filtering
29
37
  across all EEG channels to extract spatial components, effectively
@@ -35,11 +43,83 @@ class SCCNet(EEGModuleMixin, nn.Module):
35
43
  4. **Classification**: Flattens the features and applies a fully connected
36
44
  layer.
37
45
 
46
+ .. rubric:: Macro Components
47
+
48
+ - `SCCNet.spatial_conv` **(spatial component analysis)**
49
+
50
+ - *Operations.*
51
+ - :class:`~torch.nn.Conv2d` with kernel `(n_chans, N_t)` and stride `(1, 1)` on an input reshaped to `(B, 1, n_chans, T)`; typical choice `N_t=1` yields a pure across-channel projection (montage-wide linear spatial filter).
52
+ - Zero padding to preserve time, :class:`~torch.nn.BatchNorm2d`; output has `N_u` component signals shaped `(B, 1, N_u, T)` after a permute step.
53
+
54
+ *Interpretability/robustness.* Mimics CSP-like spatial filtering: each learned filter is a channel-weighted component, easing inspection and reducing channel noise.
55
+
56
+ - `SCCNet.spatial_filt_conv` **(spatio-temporal filtering)**
57
+
58
+ - *Operations.*
59
+ - :class:`~torch.nn.Conv2d` with kernel `(N_u, 12)` over components and time (12 samples ~ 0.1 s at 125 Hz),
60
+ - :class:`~torch.nn.BatchNorm2d`;
61
+ - Nonlinearity is **power-like**: the original paper uses **square** like :class:`~braindecode.models.ShallowFBCSPNet` with the class :class:`~braindecode.modules.LogActivation` as default.
62
+ - :class:`~torch.nn.Dropout` with rate `p=0.5`.
63
+
64
+ - *Role.* Learns frequency-selective energy features and inter-component interactions within a 0.1 s context (beta/alpha cycle scale).
65
+
66
+ - `SCCNet.temporal_smoothing` **(aggregation + readout)**
67
+
68
+ - *Operations.*
69
+ - :class:`~torch.nn.AvgPool2d` with size `(1, 62)` (~ 0.5 s) for temporal smoothing and downsampling
70
+ - :class:`~torch.nn.Flatten`
71
+ - :class:`~torch.nn.Linear` to `n_outputs`.
72
+
73
+
74
+ .. rubric:: Convolutional Details
75
+
76
+ * **Temporal (where time-domain patterns are learned).**
77
+ The second block's kernel length is fixed to 12 samples (≈ 100 ms) and slides with
78
+ stride 1; average pooling `(1, 62)` (≈ 500 ms) integrates power over longer spans.
79
+ These choices bake in short-cycle detection followed by half-second trend smoothing.
80
+
81
+ * **Spatial (how electrodes are processed).**
82
+ The first block's kernel spans **all electrodes** `(n_chans, N_t)`. With `N_t=1`,
83
+ it reduces to a montage-wide linear projection, mapping channels → `N_u` components.
84
+ The second block mixes **across components** via kernel height `N_u`.
85
+
86
+ * **Spectral (how frequency information is captured).**
87
+ No explicit transform is used; learned **temporal kernels** serve as bandpass-like
88
+ filters, and the **square/log power** nonlinearity plus 0.5 s averaging approximate
89
+ band-power estimation (ERD/ERS-style features).
90
+
91
+ .. rubric:: Attention / Sequential Modules
92
+
93
+ This model contains **no attention** and **no recurrent units**.
94
+
95
+ .. rubric:: Additional Mechanisms
96
+
97
+ - :class:`~torch.nn.BatchNorm2d` and zero-padding are applied to both convolutions;
98
+ L2 weight decay was used in the original paper; dropout `p=0.5` combats overfitting.
99
+ - Contrasting with other compact neural network, in EEGNet performs a temporal depthwise conv
100
+ followed by a **depthwise spatial** conv (separable), learning temporal filters first.
101
+ SCCNet inverts this order: it performs a **full spatial projection first** (CSP-like),
102
+ then a short **spatio-temporal** conv with an explicit 0.1 s kernel, followed by
103
+ **power-like** nonlinearity and longer temporal averaging. EEGNet's ELU and
104
+ separable design favor parameter efficiency; SCCNet's second-scale kernels and
105
+ square/log emphasize interpretable **band-power** features.
106
+
107
+ - Reference implementation: see [sccnetcode]_.
108
+
109
+ .. rubric:: Usage and Configuration
110
+
111
+ * **Training from the original authors.**
112
+
113
+ * Match window length so that `T` is comfortably larger than pooling length
114
+ (e.g., > 1.5-2 s for MI).
115
+ * Start with standard MI augmentations (channel dropout/shuffle, time reverse)
116
+ and tune `n_spatial_filters` before deeper changes.
38
117
 
39
118
  Parameters
40
119
  ----------
41
120
  n_spatial_filters : int, optional
42
- Number of spatial filters in the first convolutional layer. Default is 22.
121
+ Number of spatial filters in the first convolutional layer, variable `N_u` from the
122
+ original paper. Default is 22.
43
123
  n_spatial_filters_smooth : int, optional
44
124
  Number of spatial filters used as filter in the second convolutional
45
125
  layer. Default is 20.
@@ -49,13 +129,6 @@ class SCCNet(EEGModuleMixin, nn.Module):
49
129
  Activation function after the second convolutional layer. Default is
50
130
  logarithm activation.
51
131
 
52
- Notes
53
- -----
54
- This implementation is not guaranteed to be correct, has not been checked
55
- by original authors, only reimplemented from the paper description and
56
- the source that have not been tested [sccnetcode]_.
57
-
58
-
59
132
  References
60
133
  ----------
61
134
  .. [sccnet] Wei, C. S., Koike-Akino, T., & Wang, Y. (2019, March). Spatial
@@ -20,6 +20,8 @@ from braindecode.modules import (
20
20
  class ShallowFBCSPNet(EEGModuleMixin, nn.Sequential):
21
21
  """Shallow ConvNet model from Schirrmeister et al (2017) [Schirrmeister2017]_.
22
22
 
23
+ :bdg-success:`Convolution`
24
+
23
25
  .. figure:: https://onlinelibrary.wiley.com/cms/asset/221ea375-6701-40d3-ab3f-e411aad62d9e/hbm23730-fig-0002-m.jpg
24
26
  :align: center
25
27
  :alt: ShallowNet Architecture
@@ -5,7 +5,8 @@ from __future__ import annotations
5
5
 
6
6
  import math
7
7
  from copy import deepcopy
8
- from typing import Any, Sequence
8
+ from pathlib import Path
9
+ from typing import Any, Optional, Sequence
9
10
 
10
11
  import torch
11
12
  from einops.layers.torch import Rearrange
@@ -145,6 +146,8 @@ class _BaseSignalJEPA(EEGModuleMixin, nn.Module):
145
146
  class SignalJEPA(_BaseSignalJEPA):
146
147
  """Architecture introduced in signal-JEPA for self-supervised pre-training, Guetschel, P et al (2024) [1]_
147
148
 
149
+ :bdg-success:`Convolution` :bdg-dark-line:`Channel` :bdg-danger:`Large Brain Model`
150
+
148
151
  This model is not meant for classification but for SSL pre-training.
149
152
  Its output shape depends on the input shape.
150
153
  For classification purposes, three variants of this model are available:
@@ -231,6 +234,8 @@ class SignalJEPA(_BaseSignalJEPA):
231
234
  class SignalJEPA_Contextual(_BaseSignalJEPA):
232
235
  """Contextual downstream architecture introduced in signal-JEPA Guetschel, P et al (2024) [1]_.
233
236
 
237
+ :bdg-success:`Convolution` :bdg-dark-line:`Channel` :bdg-danger:`Large Brain Model`
238
+
234
239
  This architecture is one of the variants of :class:`SignalJEPA`
235
240
  that can be used for classification purposes.
236
241
 
@@ -319,25 +324,50 @@ class SignalJEPA_Contextual(_BaseSignalJEPA):
319
324
  @classmethod
320
325
  def from_pretrained(
321
326
  cls,
322
- model: SignalJEPA,
323
- n_outputs: int,
327
+ model: Optional[SignalJEPA | str | Path] = None, # type: ignore
328
+ n_outputs: Optional[int] = None, # type: ignore
324
329
  n_spat_filters: int = 4,
325
- chs_info: list[dict[str, Any]] | None = None,
330
+ chs_info: Optional[list[dict[str, Any]]] = None, # type: ignore
331
+ **kwargs,
326
332
  ):
327
- """Instantiate a new model from a pre-trained :class:`SignalJEPA` model.
333
+ """Instantiate a new model from a pre-trained :class:`SignalJEPA` model or from Hub.
328
334
 
329
335
  Parameters
330
336
  ----------
331
- model: SignalJEPA
332
- Pre-trained model.
333
- n_outputs: int
334
- Number of classes for the new model.
337
+ model: SignalJEPA, str, Path, or None
338
+ Either a pre-trained :class:`SignalJEPA` model, a string/Path to a local directory
339
+ (for Hub-style loading), or None (for Hub loading via kwargs).
340
+ n_outputs: int or None
341
+ Number of classes for the new model. Required when loading from a SignalJEPA model,
342
+ optional when loading from Hub (will be read from config).
335
343
  n_spat_filters: int
336
344
  Number of spatial filters.
337
345
  chs_info: list of dict | None
338
346
  Information about each individual EEG channel. This should be filled with
339
347
  ``info["chs"]``. Refer to :class:`mne.Info` for more details.
348
+ **kwargs
349
+ Additional keyword arguments passed to the parent class for Hub loading.
340
350
  """
351
+ # Check if this is a Hub-style load (from a directory path)
352
+ if isinstance(model, (str, Path)) or (model is None and kwargs):
353
+ # This is a Hub load, delegate to parent class
354
+ if isinstance(model, (str, Path)):
355
+ # model is actually the repo_id or directory path
356
+ return super().from_pretrained(model, **kwargs)
357
+ else:
358
+ # model is None, treat as hub-style load
359
+ return super().from_pretrained(**kwargs)
360
+
361
+ # This is the original SignalJEPA transfer learning case
362
+ if not isinstance(model, SignalJEPA):
363
+ raise TypeError(
364
+ f"model must be a SignalJEPA instance, a path string, or Path object, got {type(model)}"
365
+ )
366
+ if n_outputs is None:
367
+ raise ValueError(
368
+ "n_outputs must be provided when loading from a SignalJEPA model"
369
+ )
370
+
341
371
  feature_encoder = model.feature_encoder
342
372
  pos_encoder = model.pos_encoder
343
373
  transformer = model.transformer
@@ -377,6 +407,8 @@ class SignalJEPA_Contextual(_BaseSignalJEPA):
377
407
  class SignalJEPA_PostLocal(_BaseSignalJEPA):
378
408
  """Post-local downstream architecture introduced in signal-JEPA Guetschel, P et al (2024) [1]_.
379
409
 
410
+ :bdg-success:`Convolution` :bdg-dark-line:`Channel` :bdg-danger:`Large Brain Model`
411
+
380
412
  This architecture is one of the variants of :class:`SignalJEPA`
381
413
  that can be used for classification purposes.
382
414
 
@@ -463,22 +495,47 @@ class SignalJEPA_PostLocal(_BaseSignalJEPA):
463
495
 
464
496
  @classmethod
465
497
  def from_pretrained(
466
- cls, model: SignalJEPA, n_outputs: int, n_spat_filters: int = 4
498
+ cls,
499
+ model: SignalJEPA | str | Path = None, # type: ignore
500
+ n_outputs: int = None, # type: ignore
501
+ n_spat_filters: int = 4,
502
+ **kwargs,
467
503
  ):
468
- """Instantiate a new model from a pre-trained :class:`SignalJEPA` model.
504
+ """Instantiate a new model from a pre-trained :class:`SignalJEPA` model or from Hub.
469
505
 
470
506
  Parameters
471
507
  ----------
472
- model: SignalJEPA
473
- Pre-trained model.
474
- n_outputs: int
475
- Number of classes for the new model.
508
+ model: SignalJEPA, str, Path, or None
509
+ Either a pre-trained :class:`SignalJEPA` model, a string/Path to a local directory
510
+ (for Hub-style loading), or None (for Hub loading via kwargs).
511
+ n_outputs: int or None
512
+ Number of classes for the new model. Required when loading from a SignalJEPA model,
513
+ optional when loading from Hub (will be read from config).
476
514
  n_spat_filters: int
477
515
  Number of spatial filters.
478
- chs_info: list of dict | None
479
- Information about each individual EEG channel. This should be filled with
480
- ``info["chs"]``. Refer to :class:`mne.Info` for more details.
516
+ **kwargs
517
+ Additional keyword arguments passed to the parent class for Hub loading.
481
518
  """
519
+ # Check if this is a Hub-style load (from a directory path)
520
+ if isinstance(model, (str, Path)) or (model is None and kwargs):
521
+ # This is a Hub load, delegate to parent class
522
+ if isinstance(model, (str, Path)):
523
+ # model is actually the repo_id or directory path
524
+ return super().from_pretrained(model, **kwargs)
525
+ else:
526
+ # model is None, treat as hub-style load
527
+ return super().from_pretrained(**kwargs)
528
+
529
+ # This is the original SignalJEPA transfer learning case
530
+ if not isinstance(model, SignalJEPA):
531
+ raise TypeError(
532
+ f"model must be a SignalJEPA instance, a path string, or Path object, got {type(model)}"
533
+ )
534
+ if n_outputs is None:
535
+ raise ValueError(
536
+ "n_outputs must be provided when loading from a SignalJEPA model"
537
+ )
538
+
482
539
  feature_encoder = model.feature_encoder
483
540
  assert feature_encoder is not None
484
541
  new_model = cls(
@@ -501,6 +558,8 @@ class SignalJEPA_PostLocal(_BaseSignalJEPA):
501
558
  class SignalJEPA_PreLocal(_BaseSignalJEPA):
502
559
  """Pre-local downstream architecture introduced in signal-JEPA Guetschel, P et al (2024) [1]_.
503
560
 
561
+ :bdg-success:`Convolution` :bdg-dark-line:`Channel` :bdg-danger:`Large Brain Model`
562
+
504
563
  This architecture is one of the variants of :class:`SignalJEPA`
505
564
  that can be used for classification purposes.
506
565
 
@@ -597,22 +656,47 @@ class SignalJEPA_PreLocal(_BaseSignalJEPA):
597
656
 
598
657
  @classmethod
599
658
  def from_pretrained(
600
- cls, model: SignalJEPA, n_outputs: int, n_spat_filters: int = 4
659
+ cls,
660
+ model: SignalJEPA | str | Path = None, # type: ignore
661
+ n_outputs: int = None, # type: ignore
662
+ n_spat_filters: int = 4,
663
+ **kwargs,
601
664
  ):
602
- """Instantiate a new model from a pre-trained :class:`SignalJEPA` model.
665
+ """Instantiate a new model from a pre-trained :class:`SignalJEPA` model or from Hub.
603
666
 
604
667
  Parameters
605
668
  ----------
606
- model: SignalJEPA
607
- Pre-trained model.
608
- n_outputs: int
609
- Number of classes for the new model.
669
+ model: SignalJEPA, str, Path, or None
670
+ Either a pre-trained :class:`SignalJEPA` model, a string/Path to a local directory
671
+ (for Hub-style loading), or None (for Hub loading via kwargs).
672
+ n_outputs: int or None
673
+ Number of classes for the new model. Required when loading from a SignalJEPA model,
674
+ optional when loading from Hub (will be read from config).
610
675
  n_spat_filters: int
611
676
  Number of spatial filters.
612
- chs_info: list of dict | None
613
- Information about each individual EEG channel. This should be filled with
614
- ``info["chs"]``. Refer to :class:`mne.Info` for more details.
677
+ **kwargs
678
+ Additional keyword arguments passed to the parent class for Hub loading.
615
679
  """
680
+ # Check if this is a Hub-style load (from a directory path)
681
+ if isinstance(model, (str, Path)) or (model is None and kwargs):
682
+ # This is a Hub load, delegate to parent class
683
+ if isinstance(model, (str, Path)):
684
+ # model is actually the repo_id or directory path
685
+ return super().from_pretrained(model, **kwargs)
686
+ else:
687
+ # model is None, treat as hub-style load
688
+ return super().from_pretrained(**kwargs)
689
+
690
+ # This is the original SignalJEPA transfer learning case
691
+ if not isinstance(model, SignalJEPA):
692
+ raise TypeError(
693
+ f"model must be a SignalJEPA instance, a path string, or Path object, got {type(model)}"
694
+ )
695
+ if n_outputs is None:
696
+ raise ValueError(
697
+ "n_outputs must be provided when loading from a SignalJEPA model"
698
+ )
699
+
616
700
  feature_encoder = model.feature_encoder
617
701
  assert feature_encoder is not None
618
702
  new_model = cls(
@@ -12,6 +12,8 @@ from braindecode.models.base import EEGModuleMixin
12
12
  class SincShallowNet(EEGModuleMixin, nn.Module):
13
13
  """Sinc-ShallowNet from Borra, D et al (2020) [borra2020]_.
14
14
 
15
+ :bdg-success:`Convolution` :bdg-warning:`Interpretability`
16
+
15
17
  .. figure:: https://ars.els-cdn.com/content/image/1-s2.0-S0893608020302021-gr2_lrg.jpg
16
18
  :align: center
17
19
  :alt: SincShallowNet Architecture
@@ -19,23 +21,24 @@ class SincShallowNet(EEGModuleMixin, nn.Module):
19
21
  The Sinc-ShallowNet architecture has these fundamental blocks:
20
22
 
21
23
  1. **Block 1: Spectral and Spatial Feature Extraction**
22
- - *Temporal Sinc-Convolutional Layer*:
23
- Uses parametrized sinc functions to learn band-pass filters,
24
- significantly reducing the number of trainable parameters by only
25
- learning the lower and upper cutoff frequencies for each filter.
26
- - *Spatial Depthwise Convolutional Layer*:
27
- Applies depthwise convolutions to learn spatial filters for
28
- each temporal feature map independently, further reducing
29
- parameters and enhancing interpretability.
30
- - *Batch Normalization*
24
+
25
+ - *Temporal Sinc-Convolutional Layer*: Uses parametrized sinc functions to learn band-pass filters,
26
+ significantly reducing the number of trainable parameters by only
27
+ learning the lower and upper cutoff frequencies for each filter.
28
+ - *Spatial Depthwise Convolutional Layer*: Applies depthwise convolutions to learn spatial filters for
29
+ each temporal feature map independently, further reducing
30
+ parameters and enhancing interpretability.
31
+ - *Batch Normalization*
31
32
 
32
33
  2. **Block 2: Temporal Aggregation**
34
+
33
35
  - *Activation Function*: ELU
34
36
  - *Average Pooling Layer*: Aggregation by averaging spatial dim
35
37
  - *Dropout Layer*
36
38
  - *Flatten Layer*
37
39
 
38
40
  3. **Block 3: Classification**
41
+
39
42
  - *Fully Connected Layer*: Maps the feature vector to n_outputs.
40
43
 
41
44
  **Implementation Notes:**
@@ -11,6 +11,8 @@ from braindecode.models.base import EEGModuleMixin
11
11
  class SleepStagerBlanco2020(EEGModuleMixin, nn.Module):
12
12
  """Sleep staging architecture from Blanco et al. (2020) from [Blanco2020]_
13
13
 
14
+ :bdg-success:`Convolution`
15
+
14
16
  .. figure:: https://media.springernature.com/full/springer-static/image/art%3A10.1007%2Fs00500-019-04174-1/MediaObjects/500_2019_4174_Fig2_HTML.png
15
17
  :align: center
16
18
  :alt: SleepStagerBlanco2020 Architecture
@@ -13,6 +13,8 @@ from braindecode.models.base import EEGModuleMixin
13
13
  class SleepStagerChambon2018(EEGModuleMixin, nn.Module):
14
14
  """Sleep staging architecture from Chambon et al. (2018) [Chambon2018]_.
15
15
 
16
+ :bdg-success:`Convolution`
17
+
16
18
  .. figure:: https://braindecode.org/dev/_static/model/SleepStagerChambon2018.jpg
17
19
  :align: center
18
20
  :alt: SleepStagerChambon2018 Architecture
@@ -13,6 +13,8 @@ from braindecode.models.base import EEGModuleMixin
13
13
  class SPARCNet(EEGModuleMixin, nn.Module):
14
14
  """Seizures, Periodic and Rhythmic pattern Continuum Neural Network (SPaRCNet) from Jing et al. (2023) [jing2023]_.
15
15
 
16
+ :bdg-success:`Convolution`
17
+
16
18
  This is a temporal CNN model for biosignal classification based on the DenseNet
17
19
  architecture.
18
20