broccoli-ml 5.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- broccoli_ml-5.1.1/LICENSE +21 -0
- broccoli_ml-5.1.1/PKG-INFO +43 -0
- broccoli_ml-5.1.1/README.md +25 -0
- broccoli_ml-5.1.1/broccoli/__init__.py +6 -0
- broccoli_ml-5.1.1/broccoli/activation.py +121 -0
- broccoli_ml-5.1.1/broccoli/cnn.py +157 -0
- broccoli_ml-5.1.1/broccoli/linear.py +138 -0
- broccoli_ml-5.1.1/broccoli/rope.py +407 -0
- broccoli_ml-5.1.1/broccoli/tensor.py +128 -0
- broccoli_ml-5.1.1/broccoli/transformer.py +722 -0
- broccoli_ml-5.1.1/broccoli/utils.py +15 -0
- broccoli_ml-5.1.1/broccoli/vit.py +562 -0
- broccoli_ml-5.1.1/pyproject.toml +40 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 nicholasbailey87
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: broccoli-ml
|
|
3
|
+
Version: 5.1.1
|
|
4
|
+
Summary: Some useful Pytorch models, circa 2025
|
|
5
|
+
License: MIT
|
|
6
|
+
Author: Nicholas Bailey
|
|
7
|
+
Requires-Python: >=3.8
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Requires-Dist: einops (>=0.8.1,<0.9.0)
|
|
17
|
+
Description-Content-Type: text/markdown
|
|
18
|
+
|
|
19
|
+
# broccoli
|
|
20
|
+
|
|
21
|
+
Some useful PyTorch models, circa 2025.
|
|
22
|
+
|
|
23
|
+

|
|
24
|
+
|
|
25
|
+
# Getting started
|
|
26
|
+
|
|
27
|
+
You can install broccoli with
|
|
28
|
+
|
|
29
|
+
```
|
|
30
|
+
pip install broccoli-ml
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
PyTorch is a peer dependency of `broccoli`, which means
|
|
34
|
+
* You will need to make sure you have PyTorch installed in order to use `broccoli`
|
|
35
|
+
* PyTorch will **not** be installed automatically when you install `broccoli`
|
|
36
|
+
|
|
37
|
+
We take this approach because PyTorch versioning is environment-specific and we don't know where you will want to use `broccoli`. If we automatically install PyTorch for you, there's a good chance we would get it wrong!
|
|
38
|
+
|
|
39
|
+
Therefore, please also make sure you install PyTorch.
|
|
40
|
+
|
|
41
|
+
# Usage examples
|
|
42
|
+
|
|
43
|
+
...
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# broccoli
|
|
2
|
+
|
|
3
|
+
Some useful PyTorch models, circa 2025.
|
|
4
|
+
|
|
5
|
+

|
|
6
|
+
|
|
7
|
+
# Getting started
|
|
8
|
+
|
|
9
|
+
You can install broccoli with
|
|
10
|
+
|
|
11
|
+
```
|
|
12
|
+
pip install broccoli-ml
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
PyTorch is a peer dependency of `broccoli`, which means
|
|
16
|
+
* You will need to make sure you have PyTorch installed in order to use `broccoli`
|
|
17
|
+
* PyTorch will **not** be installed automatically when you install `broccoli`
|
|
18
|
+
|
|
19
|
+
We take this approach because PyTorch versioning is environment-specific and we don't know where you will want to use `broccoli`. If we automatically install PyTorch for you, there's a good chance we would get it wrong!
|
|
20
|
+
|
|
21
|
+
Therefore, please also make sure you install PyTorch.
|
|
22
|
+
|
|
23
|
+
# Usage examples
|
|
24
|
+
|
|
25
|
+
...
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn
|
|
3
|
+
from torch.nn import functional as F
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ReLU(nn.Module):
|
|
7
|
+
"""
|
|
8
|
+
A ReLU activation function with optional clamp and leakiness.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self, clamp=True, leaky=True, negative_slope=0.01, clamp_max=6.0
|
|
13
|
+
) -> None:
|
|
14
|
+
super().__init__()
|
|
15
|
+
self.clamp = clamp
|
|
16
|
+
self.leaky = leaky
|
|
17
|
+
self.negative_slope = negative_slope
|
|
18
|
+
self.clamp_max = clamp_max
|
|
19
|
+
|
|
20
|
+
def forward(self, x):
|
|
21
|
+
if self.leaky:
|
|
22
|
+
relu = F.leaky_relu(x, negative_slope=self.negative_slope)
|
|
23
|
+
else:
|
|
24
|
+
relu = F.relu(x)
|
|
25
|
+
if self.clamp:
|
|
26
|
+
relu = torch.clamp(relu, max=self.clamp_max)
|
|
27
|
+
return relu
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class GELU(nn.Module):
|
|
31
|
+
"""
|
|
32
|
+
A GELU activation function with optional clamp.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(self, clamp=True) -> None:
|
|
36
|
+
super().__init__()
|
|
37
|
+
self.clamp = clamp
|
|
38
|
+
self.gelu = nn.GELU()
|
|
39
|
+
|
|
40
|
+
def forward(self, x):
|
|
41
|
+
gelu = self.gelu(x)
|
|
42
|
+
if self.clamp:
|
|
43
|
+
gelu = torch.clamp(gelu, max=6)
|
|
44
|
+
return gelu
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class Swish(nn.Module):
|
|
48
|
+
"""
|
|
49
|
+
Implementation of (beta) SwiGLU, as introduced in "GLU Variants Improve Transformer"
|
|
50
|
+
(https://arxiv.org/abs/2002.05202v1) and used to great effect in LLaMa 2.0.
|
|
51
|
+
|
|
52
|
+
Halves the incoming parameter count, which should be scaled up before input.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self) -> None:
|
|
56
|
+
super().__init__()
|
|
57
|
+
# Learnable parameter is called "swiglu beta" so that it is easy to find
|
|
58
|
+
# and exclude from weight decay
|
|
59
|
+
self.swish_beta = nn.Parameter(torch.tensor([1.0]))
|
|
60
|
+
|
|
61
|
+
def forward(self, x):
|
|
62
|
+
return x * F.sigmoid(self.swish_beta * x)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class SquaredReLU(nn.Module):
|
|
66
|
+
"""
|
|
67
|
+
Squared ReLU, as shown in "ReLU^2 wins" (https://arxiv.org/abs/2402.03804) to
|
|
68
|
+
be as effective as SwiGLU for training LLMs, possibly because it can allow a
|
|
69
|
+
NN to learn multyiplication, as noted by
|
|
70
|
+
https://azizbelaweid.substack.com/p/what-is-swiglu-how-to-implement-it
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(
|
|
74
|
+
self, clamp=True, leaky=True, negative_slope: float = 0.01, clamp_max=6
|
|
75
|
+
) -> None:
|
|
76
|
+
super().__init__()
|
|
77
|
+
self.clamp = clamp
|
|
78
|
+
self.leaky = leaky
|
|
79
|
+
self.negative_slope = negative_slope
|
|
80
|
+
self.clamp_max = clamp_max
|
|
81
|
+
|
|
82
|
+
def forward(self, x):
|
|
83
|
+
if self.leaky:
|
|
84
|
+
relu = F.leaky_relu(x, negative_slope=self.negative_slope)
|
|
85
|
+
else:
|
|
86
|
+
relu = F.relu(x)
|
|
87
|
+
relu_squared = relu**2
|
|
88
|
+
if self.clamp:
|
|
89
|
+
relu_squared = torch.clamp(relu_squared, max=self.clamp_max)
|
|
90
|
+
return relu_squared
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class XGLU(nn.Module):
|
|
94
|
+
"""
|
|
95
|
+
Generic Gated Linear Unit
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
def __init__(self, activation_module: nn.Module) -> None:
|
|
99
|
+
super().__init__()
|
|
100
|
+
self.activation = activation_module
|
|
101
|
+
|
|
102
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
103
|
+
gate, value = x.chunk(2, dim=-1)
|
|
104
|
+
return self.activation(gate) * value
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def SquaredReGLU(clamp=True, leaky=True, negative_slope=0.01, clamp_max=6.0) -> XGLU:
|
|
108
|
+
"""
|
|
109
|
+
Factory function that creates a GLU with a SquaredReLU activation.
|
|
110
|
+
"""
|
|
111
|
+
activation_module = SquaredReLU(
|
|
112
|
+
clamp=clamp, leaky=leaky, negative_slope=negative_slope, clamp_max=clamp_max
|
|
113
|
+
)
|
|
114
|
+
return XGLU(activation_module)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def SwiGLU() -> XGLU:
|
|
118
|
+
"""
|
|
119
|
+
Factory function that creates a GLU with a Swish activation.
|
|
120
|
+
"""
|
|
121
|
+
return XGLU(Swish())
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
import torch.nn.functional as F
|
|
4
|
+
import math
|
|
5
|
+
from typing import Union
|
|
6
|
+
|
|
7
|
+
from einops.layers.torch import Rearrange
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def spatial_tuple(size: Union[int, tuple], spatial_dimensions):
|
|
11
|
+
"""
|
|
12
|
+
Converts an integer x to `tuple([x] * spatial_dimensions)`.
|
|
13
|
+
Performs no operation (i.e. the identity operation) on tuples of length `spatial_dimensions`.
|
|
14
|
+
Otherwise
|
|
15
|
+
"""
|
|
16
|
+
if isinstance(size, int):
|
|
17
|
+
return tuple([size] * spatial_dimensions)
|
|
18
|
+
elif isinstance(size, tuple) and (len(size) == spatial_dimensions):
|
|
19
|
+
return size
|
|
20
|
+
else:
|
|
21
|
+
raise ValueError(
|
|
22
|
+
f"For {spatial_dimensions} spatial dimensions, `size` must be "
|
|
23
|
+
f"an integer or a tuple of length {spatial_dimensions}."
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def padding_tensor(padding: tuple):
|
|
28
|
+
"""
|
|
29
|
+
Converts a tuple of ints (x, y, z) into a tuple of 2-tuples,
|
|
30
|
+
like ((x, x), (y, y), (z, z)).
|
|
31
|
+
|
|
32
|
+
Performs no operation (i.e. the identity operation) on a tuple of 2-tuples.
|
|
33
|
+
|
|
34
|
+
Otherwise raises an error.
|
|
35
|
+
"""
|
|
36
|
+
if all(isinstance(x, int) for x in padding):
|
|
37
|
+
return tuple([tuple([p] * 2) for p in padding])
|
|
38
|
+
elif (
|
|
39
|
+
all(isinstance(p, tuple) for p in padding)
|
|
40
|
+
and all(len(p) == 2 for p in padding)
|
|
41
|
+
and all(all(isinstance(x, int) for x in p) for p in padding)
|
|
42
|
+
):
|
|
43
|
+
return padding
|
|
44
|
+
else:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
"Padding must be a tuple of ints of a tuple of 2-tuples of ints. "
|
|
47
|
+
f"It was {padding}."
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def kd_unfold(t: torch.Tensor, kernel_size=1, stride=1, padding=0, k=2):
|
|
52
|
+
"""
|
|
53
|
+
Unfold operation with k spatial dimensions.
|
|
54
|
+
Does not support dilation.
|
|
55
|
+
Only supports equal padding at top and bottom.
|
|
56
|
+
"""
|
|
57
|
+
if len(t.size()[2:]) != k:
|
|
58
|
+
raise ValueError(
|
|
59
|
+
f"Input tensor size should be (N, channels, spatial dims...), so "
|
|
60
|
+
f"for k = {k}, t.size() should be a tuple of length {k + 2}."
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
N, C = t.size(0), t.size(1)
|
|
64
|
+
|
|
65
|
+
kernel_size = spatial_tuple(kernel_size, k)
|
|
66
|
+
stride = spatial_tuple(stride, k)
|
|
67
|
+
padding = padding_tensor(spatial_tuple(padding, k))
|
|
68
|
+
|
|
69
|
+
output = t
|
|
70
|
+
output = F.pad(output, sum(reversed(padding), ())) # i.e. the empty tuple
|
|
71
|
+
|
|
72
|
+
for i, _ in enumerate(kernel_size):
|
|
73
|
+
output = output.unfold(i + 2, kernel_size[i], stride[i])
|
|
74
|
+
|
|
75
|
+
permutation = [0, 1] + [i + k + 2 for i in range(k)] + [i + 2 for i in range(k)]
|
|
76
|
+
|
|
77
|
+
return output.permute(*permutation).reshape(N, math.prod(kernel_size) * C, -1)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def calculate_output_spatial_size(
|
|
81
|
+
input_spatial_size, kernel_size=1, stride=1, padding=0, dilation=0
|
|
82
|
+
):
|
|
83
|
+
"""
|
|
84
|
+
Calculate the output size for the spatial dimensions of a convolutional operation
|
|
85
|
+
"""
|
|
86
|
+
stride = spatial_tuple(stride, len(input_spatial_size))
|
|
87
|
+
|
|
88
|
+
# Handle padding keywords that are sometimes used
|
|
89
|
+
if padding == "same":
|
|
90
|
+
output_size = ()
|
|
91
|
+
for i, in_length in enumerate(input_spatial_size):
|
|
92
|
+
output_size += (math.ceil(in_length / stride[i]),)
|
|
93
|
+
return output_size
|
|
94
|
+
elif padding == "valid":
|
|
95
|
+
padding = 0
|
|
96
|
+
|
|
97
|
+
kernel_size = spatial_tuple(kernel_size, len(input_spatial_size))
|
|
98
|
+
padding = spatial_tuple(padding, len(input_spatial_size))
|
|
99
|
+
dilation = spatial_tuple(dilation, len(input_spatial_size))
|
|
100
|
+
|
|
101
|
+
output_size = ()
|
|
102
|
+
|
|
103
|
+
for i, in_length in enumerate(input_spatial_size):
|
|
104
|
+
output_size += (
|
|
105
|
+
math.floor(
|
|
106
|
+
(in_length + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1)
|
|
107
|
+
/ stride[i]
|
|
108
|
+
+ 1
|
|
109
|
+
),
|
|
110
|
+
)
|
|
111
|
+
return output_size
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class SpaceToDepth(nn.Module):
|
|
115
|
+
"""
|
|
116
|
+
An operation that extracts patches from an image-like tensor and stacks
|
|
117
|
+
them channel-wise.
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
def __init__(self, kernel_size, stride=1, padding=0, spatial_dimensions=2):
|
|
121
|
+
"""
|
|
122
|
+
Input shape should be in order (channels, spatial dims...),
|
|
123
|
+
e.g. (channels, height, width)
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
super().__init__()
|
|
127
|
+
|
|
128
|
+
self.kernel_size = kernel_size
|
|
129
|
+
self.stride = stride
|
|
130
|
+
self.padding = padding
|
|
131
|
+
self.spatial_dimensions = spatial_dimensions
|
|
132
|
+
|
|
133
|
+
def forward(self, x):
|
|
134
|
+
|
|
135
|
+
N, C, *input_spatial_size = x.size()
|
|
136
|
+
|
|
137
|
+
patches = kd_unfold(
|
|
138
|
+
x,
|
|
139
|
+
kernel_size=self.kernel_size,
|
|
140
|
+
stride=self.stride,
|
|
141
|
+
padding=self.padding,
|
|
142
|
+
k=self.spatial_dimensions,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
output_spatial_size = calculate_output_spatial_size(
|
|
146
|
+
input_spatial_size=input_spatial_size,
|
|
147
|
+
kernel_size=self.kernel_size,
|
|
148
|
+
stride=self.stride,
|
|
149
|
+
padding=self.padding,
|
|
150
|
+
dilation=1, # kd_unfold doesn't support dilation
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
output_channels = C * math.prod(
|
|
154
|
+
spatial_tuple(self.kernel_size, self.spatial_dimensions)
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
return patches.view(N, output_channels, *output_spatial_size)
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import torch
|
|
3
|
+
from torch import nn
|
|
4
|
+
from torch.nn import functional as F
|
|
5
|
+
|
|
6
|
+
from .tensor import SigmaReparamTensor, AnchoredReparamTensor, NormReparamTensor
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SpectralNormLinear(nn.Module):
|
|
10
|
+
"""
|
|
11
|
+
Inspired by Apple's Spectral Normed Linear Layers
|
|
12
|
+
(https://github.com/apple/ml-sigma-reparam)
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def __init__(self, in_features: int, out_features: int, bias: bool = True):
|
|
16
|
+
super().__init__()
|
|
17
|
+
self.in_features = in_features
|
|
18
|
+
self.out_features = out_features
|
|
19
|
+
self.use_bias = bias
|
|
20
|
+
|
|
21
|
+
self.weights = None
|
|
22
|
+
|
|
23
|
+
# Define the bias vector as a learnable parameter if required.
|
|
24
|
+
if self.use_bias:
|
|
25
|
+
self.bias = nn.Parameter(torch.empty(out_features))
|
|
26
|
+
else:
|
|
27
|
+
# If no bias, register it as None.
|
|
28
|
+
# This is important so that PyTorch doesn't complain when saving/loading the model.
|
|
29
|
+
self.register_parameter("bias", None)
|
|
30
|
+
|
|
31
|
+
self.reset_parameters()
|
|
32
|
+
|
|
33
|
+
def reset_parameters(self) -> None:
|
|
34
|
+
weights = torch.empty(self.out_features, self.in_features)
|
|
35
|
+
stdv = 1.0 / math.sqrt(self.in_features)
|
|
36
|
+
nn.init.uniform_(weights, a=-stdv, b=stdv)
|
|
37
|
+
if self.use_bias:
|
|
38
|
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
|
|
39
|
+
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
|
|
40
|
+
nn.init.uniform_(self.bias, -bound, bound)
|
|
41
|
+
self.weights = SigmaReparamTensor(weights)
|
|
42
|
+
|
|
43
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
44
|
+
return F.linear(x, self.weights(), self.bias)
|
|
45
|
+
|
|
46
|
+
def __repr__(self) -> str:
|
|
47
|
+
# Optional: A nice representation for printing the module.
|
|
48
|
+
return (
|
|
49
|
+
f"SpectralNormFeedForward(in_features={self.in_features},"
|
|
50
|
+
f"out_features={self.out_features}, bias={self.use_bias})"
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class AnchoredLinear(nn.Module):
|
|
55
|
+
"""
|
|
56
|
+
...
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(self, in_features: int, out_features: int, bias: bool = True):
|
|
60
|
+
super().__init__()
|
|
61
|
+
self.in_features = in_features
|
|
62
|
+
self.out_features = out_features
|
|
63
|
+
self.use_bias = bias
|
|
64
|
+
|
|
65
|
+
self.weights = None
|
|
66
|
+
|
|
67
|
+
# Define the bias vector as a learnable parameter if required.
|
|
68
|
+
if self.use_bias:
|
|
69
|
+
self.bias = nn.Parameter(torch.empty(out_features))
|
|
70
|
+
else:
|
|
71
|
+
# If no bias, register it as None.
|
|
72
|
+
# This is important so that PyTorch doesn't complain when saving/loading the model.
|
|
73
|
+
self.register_parameter("bias", None)
|
|
74
|
+
|
|
75
|
+
self.reset_parameters()
|
|
76
|
+
|
|
77
|
+
def reset_parameters(self) -> None:
|
|
78
|
+
weights = torch.empty(self.out_features, self.in_features)
|
|
79
|
+
stdv = 1.0 / math.sqrt(self.in_features)
|
|
80
|
+
nn.init.uniform_(weights, a=-stdv, b=stdv)
|
|
81
|
+
if self.use_bias:
|
|
82
|
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
|
|
83
|
+
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
|
|
84
|
+
nn.init.uniform_(self.bias, -bound, bound)
|
|
85
|
+
self.weights = AnchoredReparamTensor(weights)
|
|
86
|
+
|
|
87
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
88
|
+
return F.linear(x, self.weights(), self.bias)
|
|
89
|
+
|
|
90
|
+
def __repr__(self) -> str:
|
|
91
|
+
# Optional: A nice representation for printing the module.
|
|
92
|
+
return (
|
|
93
|
+
f"AnchoredLinear(in_features={self.in_features},"
|
|
94
|
+
f"out_features={self.out_features}, bias={self.use_bias})"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class WeightNormedLinear(nn.Module):
|
|
99
|
+
"""
|
|
100
|
+
...
|
|
101
|
+
"""
|
|
102
|
+
|
|
103
|
+
def __init__(self, in_features: int, out_features: int, bias: bool = True):
|
|
104
|
+
super().__init__()
|
|
105
|
+
self.in_features = in_features
|
|
106
|
+
self.out_features = out_features
|
|
107
|
+
self.use_bias = bias
|
|
108
|
+
|
|
109
|
+
self.weights = None
|
|
110
|
+
|
|
111
|
+
# Define the bias vector as a learnable parameter if required.
|
|
112
|
+
if self.use_bias:
|
|
113
|
+
self.bias = nn.Parameter(torch.empty(out_features))
|
|
114
|
+
else:
|
|
115
|
+
# If no bias, register it as None.
|
|
116
|
+
# This is important so that PyTorch doesn't complain when saving/loading the model.
|
|
117
|
+
self.register_parameter("bias", None)
|
|
118
|
+
|
|
119
|
+
self.reset_parameters()
|
|
120
|
+
|
|
121
|
+
def reset_parameters(self) -> None:
|
|
122
|
+
weights = torch.empty(self.out_features, self.in_features)
|
|
123
|
+
stdv = 1.0 / math.sqrt(self.in_features)
|
|
124
|
+
nn.init.uniform_(weights, a=-stdv, b=stdv)
|
|
125
|
+
if self.use_bias:
|
|
126
|
+
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
|
|
127
|
+
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
|
|
128
|
+
nn.init.uniform_(self.bias, -bound, bound)
|
|
129
|
+
self.weights = NormReparamTensor(weights)
|
|
130
|
+
|
|
131
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
132
|
+
return F.linear(x, self.weights(), self.bias)
|
|
133
|
+
|
|
134
|
+
def __repr__(self) -> str:
|
|
135
|
+
return (
|
|
136
|
+
f"WeightNormedLinear(in_features={self.in_features},"
|
|
137
|
+
f"out_features={self.out_features}, bias={self.use_bias})"
|
|
138
|
+
)
|