braindecode 1.3.0.dev180329405__py3-none-any.whl → 1.3.0.dev182330353__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. braindecode/augmentation/base.py +1 -1
  2. braindecode/datasets/__init__.py +12 -4
  3. braindecode/datasets/base.py +115 -151
  4. braindecode/datasets/bcicomp.py +4 -4
  5. braindecode/datasets/bids.py +3 -3
  6. braindecode/datasets/experimental.py +2 -2
  7. braindecode/datasets/mne.py +3 -5
  8. braindecode/datasets/moabb.py +17 -7
  9. braindecode/datasets/nmt.py +2 -2
  10. braindecode/datasets/sleep_physio_challe_18.py +2 -2
  11. braindecode/datasets/sleep_physionet.py +2 -2
  12. braindecode/datasets/tuh.py +2 -2
  13. braindecode/datasets/xy.py +2 -2
  14. braindecode/datautil/__init__.py +11 -1
  15. braindecode/datautil/channel_utils.py +114 -0
  16. braindecode/datautil/serialization.py +7 -7
  17. braindecode/functional/functions.py +6 -2
  18. braindecode/functional/initialization.py +2 -3
  19. braindecode/models/__init__.py +6 -0
  20. braindecode/models/atcnet.py +26 -27
  21. braindecode/models/attentionbasenet.py +37 -32
  22. braindecode/models/attn_sleep.py +2 -0
  23. braindecode/models/base.py +280 -2
  24. braindecode/models/bendr.py +469 -0
  25. braindecode/models/biot.py +2 -0
  26. braindecode/models/contrawr.py +2 -0
  27. braindecode/models/ctnet.py +8 -3
  28. braindecode/models/deepsleepnet.py +28 -19
  29. braindecode/models/eegconformer.py +2 -2
  30. braindecode/models/eeginception_erp.py +31 -25
  31. braindecode/models/eegitnet.py +2 -0
  32. braindecode/models/eegminer.py +2 -0
  33. braindecode/models/eegnet.py +1 -1
  34. braindecode/models/eegsym.py +917 -0
  35. braindecode/models/eegtcnet.py +2 -0
  36. braindecode/models/fbcnet.py +5 -1
  37. braindecode/models/fblightconvnet.py +2 -0
  38. braindecode/models/fbmsnet.py +20 -6
  39. braindecode/models/ifnet.py +2 -0
  40. braindecode/models/labram.py +33 -26
  41. braindecode/models/medformer.py +758 -0
  42. braindecode/models/msvtnet.py +2 -0
  43. braindecode/models/patchedtransformer.py +1 -1
  44. braindecode/models/signal_jepa.py +111 -27
  45. braindecode/models/sinc_shallow.py +12 -9
  46. braindecode/models/sstdpn.py +11 -11
  47. braindecode/models/summary.csv +3 -0
  48. braindecode/models/syncnet.py +2 -0
  49. braindecode/models/tcn.py +2 -0
  50. braindecode/models/usleep.py +26 -21
  51. braindecode/models/util.py +3 -0
  52. braindecode/modules/attention.py +10 -10
  53. braindecode/modules/blocks.py +3 -3
  54. braindecode/modules/filter.py +2 -9
  55. braindecode/modules/layers.py +18 -17
  56. braindecode/preprocessing/__init__.py +232 -3
  57. braindecode/preprocessing/eegprep_preprocess.py +1202 -0
  58. braindecode/preprocessing/mne_preprocess.py +142 -10
  59. braindecode/preprocessing/preprocess.py +28 -18
  60. braindecode/preprocessing/util.py +166 -0
  61. braindecode/preprocessing/windowers.py +26 -20
  62. braindecode/samplers/base.py +8 -8
  63. braindecode/version.py +1 -1
  64. {braindecode-1.3.0.dev180329405.dist-info → braindecode-1.3.0.dev182330353.dist-info}/METADATA +6 -2
  65. braindecode-1.3.0.dev182330353.dist-info/RECORD +109 -0
  66. braindecode-1.3.0.dev180329405.dist-info/RECORD +0 -103
  67. {braindecode-1.3.0.dev180329405.dist-info → braindecode-1.3.0.dev182330353.dist-info}/WHEEL +0 -0
  68. {braindecode-1.3.0.dev180329405.dist-info → braindecode-1.3.0.dev182330353.dist-info}/licenses/LICENSE.txt +0 -0
  69. {braindecode-1.3.0.dev180329405.dist-info → braindecode-1.3.0.dev182330353.dist-info}/licenses/NOTICE.txt +0 -0
  70. {braindecode-1.3.0.dev180329405.dist-info → braindecode-1.3.0.dev182330353.dist-info}/top_level.txt +0 -0
@@ -15,6 +15,8 @@ from braindecode.modules import Chomp1d, MaxNormLinear
15
15
  class EEGTCNet(EEGModuleMixin, nn.Module):
16
16
  """EEGTCNet model from Ingolfsson et al. (2020) [ingolfsson2020]_.
17
17
 
18
+ :bdg-success:`Convolution` :bdg-secondary:`Recurrent`
19
+
18
20
  .. figure:: https://braindecode.org/dev/_static/model/eegtcnet.jpg
19
21
  :align: center
20
22
  :alt: EEGTCNet Architecture
@@ -31,6 +31,8 @@ _valid_layers = {
31
31
  class FBCNet(EEGModuleMixin, nn.Module):
32
32
  """FBCNet from Mane, R et al (2021) [fbcnet2021]_.
33
33
 
34
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
35
+
34
36
  .. figure:: https://raw.githubusercontent.com/ravikiran-mane/FBCNet/refs/heads/master/FBCNet-V2.png
35
37
  :align: center
36
38
  :alt: FBCNet Architecture
@@ -67,7 +69,9 @@ class FBCNet(EEGModuleMixin, nn.Module):
67
69
  linear_max_norm : float, default=0.5
68
70
  Maximum norm for the final linear layer.
69
71
  filter_parameters: dict, default None
70
- Parameters for the FilterBankLayer
72
+ Dictionary of parameters to use for the FilterBankLayer.
73
+ If None, a default Chebyshev Type II filter with transition bandwidth of
74
+ 2 Hz and stop-band ripple of 30 dB will be used.
71
75
 
72
76
  References
73
77
  ----------
@@ -18,6 +18,8 @@ from braindecode.modules import (
18
18
  class FBLightConvNet(EEGModuleMixin, nn.Module):
19
19
  """LightConvNet from Ma, X et al (2023) [lightconvnet]_.
20
20
 
21
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
22
+
21
23
  .. figure:: https://raw.githubusercontent.com/Ma-Xinzhi/LightConvNet/refs/heads/main/network_architecture.png
22
24
  :align: center
23
25
  :alt: LightConvNet Neural Network
@@ -1,6 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Optional, Sequence
3
+ from typing import Any, Sequence
4
4
 
5
5
  import torch
6
6
  from einops.layers.torch import Rearrange
@@ -19,6 +19,8 @@ from braindecode.modules import (
19
19
  class FBMSNet(EEGModuleMixin, nn.Module):
20
20
  """FBMSNet from Liu et al (2022) [fbmsnet]_.
21
21
 
22
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
23
+
22
24
  .. figure:: https://raw.githubusercontent.com/Want2Vanish/FBMSNet/refs/heads/main/FBMSNet.png
23
25
  :align: center
24
26
  :alt: FBMSNet Architecture
@@ -55,16 +57,28 @@ class FBMSNet(EEGModuleMixin, nn.Module):
55
57
  ----------
56
58
  n_bands : int, default=9
57
59
  Number of input channels (e.g., number of frequency bands).
58
- stride_factor : int, default=4
59
- Stride factor for temporal segmentation.
60
- temporal_layer : str, default='LogVarLayer'
61
- Temporal aggregation layer to use.
62
60
  n_filters_spat : int, default=36
63
61
  Number of output channels from the MixedConv2d layer.
62
+ temporal_layer : str, default='LogVarLayer'
63
+ Temporal aggregation layer to use.
64
+ n_dim: int, default=3
65
+ Dimension of the temporal reduction layer.
66
+ stride_factor : int, default=4
67
+ Stride factor for temporal segmentation.
64
68
  dilatability : int, default=8
65
69
  Expansion factor for the spatial convolution block.
66
70
  activation : nn.Module, default=nn.SiLU
67
71
  Activation function class to apply.
72
+ kernels_weights : Sequence[int], default=(15, 31, 63, 125)
73
+ Kernel sizes for the MixedConv2d layer.
74
+ cnn_max_norm : float, default=2
75
+ Maximum norm constraint for the convolutional layers.
76
+ linear_max_norm : float, default=0.5
77
+ Maximum norm constraint for the linear layers.
78
+ filter_parameters : dict, default=None
79
+ Dictionary of parameters to use for the FilterBankLayer.
80
+ If None, a default Chebyshev Type II filter with transition bandwidth of
81
+ 2 Hz and stop-band ripple of 30 dB will be used.
68
82
  verbose: bool, default False
69
83
  Verbose parameter to create the filter using mne.
70
84
 
@@ -101,7 +115,7 @@ class FBMSNet(EEGModuleMixin, nn.Module):
101
115
  cnn_max_norm: float = 2,
102
116
  linear_max_norm: float = 0.5,
103
117
  verbose: bool = False,
104
- filter_parameters: Optional[dict] = None,
118
+ filter_parameters: dict[Any, Any] | None = None,
105
119
  ):
106
120
  super().__init__(
107
121
  n_chans=n_chans,
@@ -31,6 +31,8 @@ from braindecode.modules import (
31
31
  class IFNet(EEGModuleMixin, nn.Module):
32
32
  """IFNetV2 from Wang J et al (2023) [ifnet]_.
33
33
 
34
+ :bdg-success:`Convolution` :bdg-primary:`Filterbank`
35
+
34
36
  .. figure:: https://raw.githubusercontent.com/Jiaheng-Wang/IFNet/main/IFNet.png
35
37
  :align: center
36
38
  :alt: IFNetV2 Architecture
@@ -2,6 +2,7 @@
2
2
  Labram module.
3
3
  Authors: Wei-Bang Jiang
4
4
  Bruno Aristimunha <b.aristimunha@gmail.com>
5
+ Matthew Chen <matt.chen4260@gmail.com>
5
6
  License: BSD 3 clause
6
7
  """
7
8
 
@@ -22,12 +23,14 @@ from braindecode.modules import MLP, DropPath
22
23
  class Labram(EEGModuleMixin, nn.Module):
23
24
  """Labram from Jiang, W B et al (2024) [Jiang2024]_.
24
25
 
26
+ :bdg-success:`Convolution` :bdg-danger:`Large Brain Model`
27
+
25
28
  .. figure:: https://arxiv.org/html/2405.18765v1/x1.png
26
29
  :align: center
27
30
  :alt: Labram Architecture.
28
31
 
29
32
  Large Brain Model for Learning Generic Representations with Tremendous
30
- EEG Data in BCI from [Jiang2024]_
33
+ EEG Data in BCI from [Jiang2024]_.
31
34
 
32
35
  This is an **adaptation** of the code [Code2024]_ from the Labram model.
33
36
 
@@ -35,7 +38,8 @@ class Labram(EEGModuleMixin, nn.Module):
35
38
  BEiTv2 [BeiTv2]_.
36
39
 
37
40
  The models can be used in two modes:
38
- - Neural Tokenizor: Design to get an embedding layers (e.g. classification).
41
+
42
+ - Neural Tokenizer: Design to get an embedding layers (e.g. classification).
39
43
  - Neural Decoder: To extract the ampliture and phase outputs with a VQSNP.
40
44
 
41
45
  The braindecode's modification is to allow the model to be used in
@@ -43,33 +47,36 @@ class Labram(EEGModuleMixin, nn.Module):
43
47
  equals True. The original implementation uses (batch, n_chans, n_patches,
44
48
  patch_size) as input with static segmentation of the input data.
45
49
 
46
- The models have the following sequence of steps:
47
- if neural tokenizer:
48
- - SegmentPatch: Segment the input data in patches;
49
- - TemporalConv: Apply a temporal convolution to the segmented data;
50
- - Residual adding cls, temporal and position embeddings (optional);
51
- - WindowsAttentionBlock: Apply a windows attention block to the data;
52
- - LayerNorm: Apply layer normalization to the data;
53
- - Linear: An head linear layer to transformer the data into classes.
54
-
55
- else:
56
- - PatchEmbed: Apply a patch embedding to the input data;
57
- - Residual adding cls, temporal and position embeddings (optional);
58
- - WindowsAttentionBlock: Apply a windows attention block to the data;
59
- - LayerNorm: Apply layer normalization to the data;
60
- - Linear: An head linear layer to transformer the data into classes.
50
+ The models have the following sequence of steps::
51
+
52
+ if neural tokenizer:
53
+ - SegmentPatch: Segment the input data in patches;
54
+ - TemporalConv: Apply a temporal convolution to the segmented data;
55
+ - Residual adding cls, temporal and position embeddings (optional);
56
+ - WindowsAttentionBlock: Apply a windows attention block to the data;
57
+ - LayerNorm: Apply layer normalization to the data;
58
+ - Linear: An head linear layer to transformer the data into classes.
59
+
60
+ else:
61
+ - PatchEmbed: Apply a patch embedding to the input data;
62
+ - Residual adding cls, temporal and position embeddings (optional);
63
+ - WindowsAttentionBlock: Apply a windows attention block to the data;
64
+ - LayerNorm: Apply layer normalization to the data;
65
+ - Linear: An head linear layer to transformer the data into classes.
61
66
 
62
67
  .. versionadded:: 0.9
63
68
 
64
69
 
65
- Examples on how to load pre-trained weights:
66
- --------------------------------------------
67
- >>> import torch
68
- >>> from braindecode.models import Labram
69
- >>> model = Labram(n_times=1600, n_chans=64, n_outputs=4)
70
- >>> url = 'https://huggingface.co/braindecode/Labram-Braindecode/blob/main/braindecode_labram_base.pt'
71
- >>> state = torch.hub.load_state_dict_from_url(url, progress=True)
72
- >>> model.load_state_dict(state)
70
+ Examples
71
+ --------
72
+ Load pre-trained weights::
73
+
74
+ >>> import torch
75
+ >>> from braindecode.models import Labram
76
+ >>> model = Labram(n_times=1600, n_chans=64, n_outputs=4)
77
+ >>> url = "https://huggingface.co/braindecode/Labram-Braindecode/blob/main/braindecode_labram_base.pt"
78
+ >>> state = torch.hub.load_state_dict_from_url(url, progress=True)
79
+ >>> model.load_state_dict(state)
73
80
 
74
81
 
75
82
  Parameters
@@ -116,7 +123,7 @@ class Labram(EEGModuleMixin, nn.Module):
116
123
  init_scale : float (default=0.001)
117
124
  The initial scale to be used in the parameters of the model.
118
125
  neural_tokenizer : bool (default=True)
119
- The model can be used in two modes: Neural Tokenizor or Neural Decoder.
126
+ The model can be used in two modes: Neural Tokenizer or Neural Decoder.
120
127
  attn_head_dim : bool (default=None)
121
128
  The head dimension to be used in the attention layer, to be used only
122
129
  during pre-training.