broccoli-ml 9.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
broccoli/__init__.py ADDED
@@ -0,0 +1,6 @@
1
+ from . import activation
2
+ from . import cnn
3
+ from . import tensor
4
+ from . import transformer
5
+ from . import vit
6
+ from . import rope
broccoli/activation.py ADDED
@@ -0,0 +1,118 @@
1
+ import torch
2
+ from torch import nn
3
+ from torch.nn import functional as F
4
+
5
+
6
+ class ReLU(nn.Module):
7
+ """
8
+ A ReLU activation function with optional clamp and leakiness.
9
+ """
10
+
11
+ def __init__(
12
+ self, clamp=True, leaky=True, negative_slope=0.01, clamp_max=6.0
13
+ ) -> None:
14
+ super().__init__()
15
+ self.clamp = clamp
16
+ self.leaky = leaky
17
+ self.negative_slope = negative_slope
18
+ self.clamp_max = clamp_max
19
+
20
+ def forward(self, x):
21
+ if self.leaky:
22
+ relu = F.leaky_relu(x, negative_slope=self.negative_slope)
23
+ else:
24
+ relu = F.relu(x)
25
+ if self.clamp:
26
+ relu = torch.clamp(relu, max=self.clamp_max)
27
+ return relu
28
+
29
+
30
+ class GELU(nn.Module):
31
+ """
32
+ A GELU activation function with optional clamp.
33
+ """
34
+
35
+ def __init__(self, clamp=True) -> None:
36
+ super().__init__()
37
+ self.clamp = clamp
38
+ self.gelu = nn.GELU()
39
+
40
+ def forward(self, x):
41
+ gelu = self.gelu(x)
42
+ if self.clamp:
43
+ gelu = torch.clamp(gelu, max=6)
44
+ return gelu
45
+
46
+
47
+ class Swish(nn.Module):
48
+ """
49
+ Implementation of (beta) Swish
50
+ """
51
+
52
+ def __init__(self) -> None:
53
+ super().__init__()
54
+ # Learnable parameter is called "swiglu beta" so that it is easy to find
55
+ # and exclude from weight decay
56
+ self.swish_beta = nn.Parameter(torch.tensor([1.0]))
57
+
58
+ def forward(self, x):
59
+ return x * F.sigmoid(self.swish_beta * x)
60
+
61
+
62
+ class SquaredReLU(nn.Module):
63
+ """
64
+ Squared ReLU, as shown in "ReLU^2 wins" (https://arxiv.org/abs/2402.03804) to
65
+ be as effective as SwiGLU for training LLMs, possibly because it can allow a
66
+ NN to learn multyiplication, as noted by
67
+ https://azizbelaweid.substack.com/p/what-is-swiglu-how-to-implement-it
68
+ """
69
+
70
+ def __init__(
71
+ self, clamp=True, leaky=True, negative_slope: float = 0.01, clamp_max=6
72
+ ) -> None:
73
+ super().__init__()
74
+ self.clamp = clamp
75
+ self.leaky = leaky
76
+ self.negative_slope = negative_slope
77
+ self.clamp_max = clamp_max
78
+
79
+ def forward(self, x):
80
+ if self.leaky:
81
+ relu = F.leaky_relu(x, negative_slope=self.negative_slope)
82
+ else:
83
+ relu = F.relu(x)
84
+ relu_squared = relu**2
85
+ if self.clamp:
86
+ relu_squared = torch.clamp(relu_squared, max=self.clamp_max)
87
+ return relu_squared
88
+
89
+
90
+ class XGLU(nn.Module):
91
+ """
92
+ Generic Gated Linear Unit
93
+ """
94
+
95
+ def __init__(self, activation_module: nn.Module) -> None:
96
+ super().__init__()
97
+ self.activation = activation_module
98
+
99
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
100
+ gate, value = x.chunk(2, dim=-1)
101
+ return self.activation(gate) * value
102
+
103
+
104
+ def SquaredReGLU(clamp=True, leaky=True, negative_slope=0.01, clamp_max=6.0) -> XGLU:
105
+ """
106
+ Factory function that creates a GLU with a SquaredReLU activation.
107
+ """
108
+ activation_module = SquaredReLU(
109
+ clamp=clamp, leaky=leaky, negative_slope=negative_slope, clamp_max=clamp_max
110
+ )
111
+ return XGLU(activation_module)
112
+
113
+
114
+ def SwiGLU() -> XGLU:
115
+ """
116
+ Factory function that creates a GLU with a Swish activation.
117
+ """
118
+ return XGLU(Swish())
broccoli/cnn.py ADDED
@@ -0,0 +1,157 @@
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import math
5
+ from typing import Union
6
+
7
+ from einops.layers.torch import Rearrange
8
+
9
+
10
+ def spatial_tuple(size: Union[int, tuple], spatial_dimensions):
11
+ """
12
+ Converts an integer x to `tuple([x] * spatial_dimensions)`.
13
+ Performs no operation (i.e. the identity operation) on tuples of length `spatial_dimensions`.
14
+ Otherwise
15
+ """
16
+ if isinstance(size, int):
17
+ return tuple([size] * spatial_dimensions)
18
+ elif isinstance(size, tuple) and (len(size) == spatial_dimensions):
19
+ return size
20
+ else:
21
+ raise ValueError(
22
+ f"For {spatial_dimensions} spatial dimensions, `size` must be "
23
+ f"an integer or a tuple of length {spatial_dimensions}."
24
+ )
25
+
26
+
27
+ def padding_tensor(padding: tuple):
28
+ """
29
+ Converts a tuple of ints (x, y, z) into a tuple of 2-tuples,
30
+ like ((x, x), (y, y), (z, z)).
31
+
32
+ Performs no operation (i.e. the identity operation) on a tuple of 2-tuples.
33
+
34
+ Otherwise raises an error.
35
+ """
36
+ if all(isinstance(x, int) for x in padding):
37
+ return tuple([tuple([p] * 2) for p in padding])
38
+ elif (
39
+ all(isinstance(p, tuple) for p in padding)
40
+ and all(len(p) == 2 for p in padding)
41
+ and all(all(isinstance(x, int) for x in p) for p in padding)
42
+ ):
43
+ return padding
44
+ else:
45
+ raise ValueError(
46
+ "Padding must be a tuple of ints of a tuple of 2-tuples of ints. "
47
+ f"It was {padding}."
48
+ )
49
+
50
+
51
+ def kd_unfold(t: torch.Tensor, kernel_size=1, stride=1, padding=0, k=2):
52
+ """
53
+ Unfold operation with k spatial dimensions.
54
+ Does not support dilation.
55
+ Only supports equal padding at top and bottom.
56
+ """
57
+ if len(t.size()[2:]) != k:
58
+ raise ValueError(
59
+ f"Input tensor size should be (N, channels, spatial dims...), so "
60
+ f"for k = {k}, t.size() should be a tuple of length {k + 2}."
61
+ )
62
+
63
+ N, C = t.size(0), t.size(1)
64
+
65
+ kernel_size = spatial_tuple(kernel_size, k)
66
+ stride = spatial_tuple(stride, k)
67
+ padding = padding_tensor(spatial_tuple(padding, k))
68
+
69
+ output = t
70
+ output = F.pad(output, sum(reversed(padding), ())) # i.e. the empty tuple
71
+
72
+ for i, _ in enumerate(kernel_size):
73
+ output = output.unfold(i + 2, kernel_size[i], stride[i])
74
+
75
+ permutation = [0, 1] + [i + k + 2 for i in range(k)] + [i + 2 for i in range(k)]
76
+
77
+ return output.permute(*permutation).reshape(N, math.prod(kernel_size) * C, -1)
78
+
79
+
80
+ def calculate_output_spatial_size(
81
+ input_spatial_size, kernel_size=1, stride=1, padding=0, dilation=0
82
+ ):
83
+ """
84
+ Calculate the output size for the spatial dimensions of a convolutional operation
85
+ """
86
+ stride = spatial_tuple(stride, len(input_spatial_size))
87
+
88
+ # Handle padding keywords that are sometimes used
89
+ if padding == "same":
90
+ output_size = ()
91
+ for i, in_length in enumerate(input_spatial_size):
92
+ output_size += (math.ceil(in_length / stride[i]),)
93
+ return output_size
94
+ elif padding == "valid":
95
+ padding = 0
96
+
97
+ kernel_size = spatial_tuple(kernel_size, len(input_spatial_size))
98
+ padding = spatial_tuple(padding, len(input_spatial_size))
99
+ dilation = spatial_tuple(dilation, len(input_spatial_size))
100
+
101
+ output_size = ()
102
+
103
+ for i, in_length in enumerate(input_spatial_size):
104
+ output_size += (
105
+ math.floor(
106
+ (in_length + 2 * padding[i] - dilation[i] * (kernel_size[i] - 1) - 1)
107
+ / stride[i]
108
+ + 1
109
+ ),
110
+ )
111
+ return output_size
112
+
113
+
114
+ class SpaceToDepth(nn.Module):
115
+ """
116
+ An operation that extracts patches from an image-like tensor and stacks
117
+ them channel-wise.
118
+ """
119
+
120
+ def __init__(self, kernel_size, stride=1, padding=0, spatial_dimensions=2):
121
+ """
122
+ Input shape should be in order (channels, spatial dims...),
123
+ e.g. (channels, height, width)
124
+ """
125
+
126
+ super().__init__()
127
+
128
+ self.kernel_size = kernel_size
129
+ self.stride = stride
130
+ self.padding = padding
131
+ self.spatial_dimensions = spatial_dimensions
132
+
133
+ def forward(self, x):
134
+
135
+ N, C, *input_spatial_size = x.size()
136
+
137
+ patches = kd_unfold(
138
+ x,
139
+ kernel_size=self.kernel_size,
140
+ stride=self.stride,
141
+ padding=self.padding,
142
+ k=self.spatial_dimensions,
143
+ )
144
+
145
+ output_spatial_size = calculate_output_spatial_size(
146
+ input_spatial_size=input_spatial_size,
147
+ kernel_size=self.kernel_size,
148
+ stride=self.stride,
149
+ padding=self.padding,
150
+ dilation=1, # kd_unfold doesn't support dilation
151
+ )
152
+
153
+ output_channels = C * math.prod(
154
+ spatial_tuple(self.kernel_size, self.spatial_dimensions)
155
+ )
156
+
157
+ return patches.view(N, output_channels, *output_spatial_size)
broccoli/linear.py ADDED
@@ -0,0 +1,352 @@
1
+ import math
2
+ import random
3
+ import warnings
4
+ from typing import Union, List, Iterable
5
+
6
+ import torch
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+
10
+ from .tensor import SigmaReparamTensor, AnchoredReparamTensor, NormReparamTensor
11
+
12
+
13
+ class SpectralNormLinear(nn.Module):
14
+ """
15
+ Inspired by Apple's Spectral Normed Linear Layers
16
+ (https://github.com/apple/ml-sigma-reparam)
17
+ """
18
+
19
+ def __init__(self, in_features: int, out_features: int, bias: bool = True):
20
+ super().__init__()
21
+ self.in_features = in_features
22
+ self.out_features = out_features
23
+ self.use_bias = bias
24
+
25
+ self.weights = None
26
+
27
+ # Define the bias vector as a learnable parameter if required.
28
+ if self.use_bias:
29
+ self.bias = nn.Parameter(torch.empty(out_features))
30
+ else:
31
+ # If no bias, register it as None.
32
+ # This is important so that PyTorch doesn't complain when saving/loading the model.
33
+ self.register_parameter("bias", None)
34
+
35
+ self.reset_parameters()
36
+
37
+ def reset_parameters(self) -> None:
38
+ weights = torch.empty(self.out_features, self.in_features)
39
+ stdv = 1.0 / math.sqrt(self.in_features)
40
+ nn.init.uniform_(weights, a=-stdv, b=stdv)
41
+ if self.use_bias:
42
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
43
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
44
+ nn.init.uniform_(self.bias, -bound, bound)
45
+ self.weights = SigmaReparamTensor(weights)
46
+
47
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
48
+ return F.linear(x, self.weights(), self.bias)
49
+
50
+ def __repr__(self) -> str:
51
+ # Optional: A nice representation for printing the module.
52
+ return (
53
+ f"SpectralNormFeedForward(in_features={self.in_features},"
54
+ f"out_features={self.out_features}, bias={self.use_bias})"
55
+ )
56
+
57
+
58
+ class AnchoredLinear(nn.Module):
59
+ """
60
+ ...
61
+ """
62
+
63
+ def __init__(self, in_features: int, out_features: int, bias: bool = True):
64
+ super().__init__()
65
+ self.in_features = in_features
66
+ self.out_features = out_features
67
+ self.use_bias = bias
68
+
69
+ self.weights = None
70
+
71
+ # Define the bias vector as a learnable parameter if required.
72
+ if self.use_bias:
73
+ self.bias = nn.Parameter(torch.empty(out_features))
74
+ else:
75
+ # If no bias, register it as None.
76
+ # This is important so that PyTorch doesn't complain when saving/loading the model.
77
+ self.register_parameter("bias", None)
78
+
79
+ self.reset_parameters()
80
+
81
+ def reset_parameters(self) -> None:
82
+ weights = torch.empty(self.out_features, self.in_features)
83
+ stdv = 1.0 / math.sqrt(self.in_features)
84
+ nn.init.uniform_(weights, a=-stdv, b=stdv)
85
+ if self.use_bias:
86
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
87
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
88
+ nn.init.uniform_(self.bias, -bound, bound)
89
+ self.weights = AnchoredReparamTensor(weights)
90
+
91
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
92
+ return F.linear(x, self.weights(), self.bias)
93
+
94
+ def __repr__(self) -> str:
95
+ # Optional: A nice representation for printing the module.
96
+ return (
97
+ f"AnchoredLinear(in_features={self.in_features},"
98
+ f"out_features={self.out_features}, bias={self.use_bias})"
99
+ )
100
+
101
+
102
+ class WeightNormedLinear(nn.Module):
103
+ """
104
+ ...
105
+ """
106
+
107
+ def __init__(self, in_features: int, out_features: int, bias: bool = True):
108
+ super().__init__()
109
+ self.in_features = in_features
110
+ self.out_features = out_features
111
+ self.use_bias = bias
112
+
113
+ self.weights = None
114
+
115
+ # Define the bias vector as a learnable parameter if required.
116
+ if self.use_bias:
117
+ self.bias = nn.Parameter(torch.empty(out_features))
118
+ else:
119
+ # If no bias, register it as None.
120
+ # This is important so that PyTorch doesn't complain when saving/loading the model.
121
+ self.register_parameter("bias", None)
122
+
123
+ self.reset_parameters()
124
+
125
+ def reset_parameters(self) -> None:
126
+ weights = torch.empty(self.out_features, self.in_features)
127
+ stdv = 1.0 / math.sqrt(self.in_features)
128
+ nn.init.uniform_(weights, a=-stdv, b=stdv)
129
+ if self.use_bias:
130
+ fan_in, _ = nn.init._calculate_fan_in_and_fan_out(weights)
131
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
132
+ nn.init.uniform_(self.bias, -bound, bound)
133
+ self.weights = NormReparamTensor(weights)
134
+
135
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
136
+ return F.linear(x, self.weights(), self.bias)
137
+
138
+ def __repr__(self) -> str:
139
+ return (
140
+ f"WeightNormedLinear(in_features={self.in_features},"
141
+ f"out_features={self.out_features}, bias={self.use_bias})"
142
+ )
143
+
144
+
145
+ class RecyclingLinear(nn.Module):
146
+ def __init__(
147
+ self,
148
+ in_features: int,
149
+ out_features: int,
150
+ bias: bool = True,
151
+ row_recycling_rate: float = 0.0,
152
+ column_recycling_rate: float = 0.0,
153
+ adaptive=False,
154
+ xglu=False,
155
+ ):
156
+ super().__init__()
157
+ self.in_features = in_features
158
+ self.out_features = out_features
159
+ self.bias = bias
160
+ self.xglu = xglu
161
+ self.linear = nn.Linear(in_features, out_features, bias=bias)
162
+ self.row_recycling_rate = row_recycling_rate
163
+ self.column_recycling_rate = column_recycling_rate
164
+ self.adaptive = adaptive
165
+ self.optimisers = []
166
+ self.initial_learning_rates = []
167
+ self._warned_about_registration = False
168
+
169
+ def register_optimiser(self, optimiser: torch.optim.Optimizer):
170
+ self.optimisers.append(optimiser)
171
+ self.initial_learning_rates.append(self._get_learning_rate(optimiser))
172
+ if self.initial_learning_rates[-1] == 0.0:
173
+ warnings.warn(
174
+ "Learning rate of registered optimiser was 0.0 - make sure "
175
+ "you haven't initialised a scheduler before registering the "
176
+ "optimiser",
177
+ stacklevel=2,
178
+ )
179
+
180
+ def _get_learning_rate(self, optimiser: torch.optim.Optimizer):
181
+ for group in optimiser.param_groups:
182
+ for param in group["params"]:
183
+ if param is self.linear.weight:
184
+ return group["lr"]
185
+
186
+ def _get_multiplier(self):
187
+ if not self.adaptive or not self.optimisers:
188
+ return 1.0
189
+ else:
190
+ init = self.initial_learning_rates
191
+ current = [self._get_learning_rate(o) for o in self.optimisers]
192
+ pairs = zip(current, init, strict=True)
193
+ multipliers = [a / b for a, b in pairs if b != 0.0]
194
+ return min(multipliers) if multipliers else 0.0
195
+
196
+ def reset_rows(self, indices):
197
+ if not torch.is_tensor(indices):
198
+ idx_tensor = torch.as_tensor(
199
+ list(indices), dtype=torch.long, device=self.linear.weight.device
200
+ )
201
+ else:
202
+ idx_tensor = indices
203
+
204
+ if idx_tensor.size(0):
205
+ value_indices = indices
206
+ centred_value_weights = self._mean_value_weights()
207
+ centred_value_weights = centred_value_weights.expand(indices.size(0), -1)
208
+ if self.xglu:
209
+ gate_indices = indices
210
+ value_indices = indices + (self.linear.out_features // 2)
211
+ centred_gate_weights = self._mean_gate_weights()
212
+ centred_gate_weights = centred_gate_weights.expand(indices.size(0), -1)
213
+ self._update_weights(
214
+ gate_indices, 0, centred_gate_weights, self.optimisers # dim
215
+ )
216
+ self._update_weights(
217
+ value_indices, 0, centred_value_weights, self.optimisers
218
+ )
219
+ else:
220
+ return
221
+
222
+ def reset_columns(self, indices):
223
+ if not torch.is_tensor(indices):
224
+ idx_tensor = torch.as_tensor(
225
+ list(indices), dtype=torch.long, device=self.linear.weight.device
226
+ )
227
+ else:
228
+ idx_tensor = indices
229
+
230
+ if idx_tensor.size(0):
231
+ random_weights = self._random_weights(
232
+ self.linear.weight.size(0), indices.size(0)
233
+ )
234
+ # Make random col weights quiet so they don't introduce loud noise...
235
+ # ...but not so quiet that FP16 zeros them and ruins symmetry breaking!
236
+ random_weights *= 0.1
237
+ self._update_weights(indices, 1, random_weights, self.optimisers) # dim
238
+ else:
239
+ return
240
+
241
+ def forward(self, x):
242
+ if self.training and self.optimisers:
243
+ self.reset_rows(self.get_reset_indices(0))
244
+ self.reset_columns(self.get_reset_indices(1))
245
+ elif self.training and not self._warned_about_registration:
246
+ warnings.warn(
247
+ "RecyclingLinear: No optimiser registered. Recycling disabled.",
248
+ stacklevel=2,
249
+ )
250
+ self._warned_about_registration = True
251
+
252
+ return self.linear(x)
253
+
254
+ def get_reset_indices(self, dim):
255
+ base_rate = self.row_recycling_rate if dim == 0 else self.column_recycling_rate
256
+ p = base_rate * self._get_multiplier()
257
+ if dim == 0:
258
+ if self.xglu:
259
+ sample_space = self.linear.out_features // 2
260
+ else:
261
+ sample_space = self.linear.out_features
262
+ elif dim == 1:
263
+ sample_space = self.linear.in_features
264
+ else:
265
+ raise ValueError("`dim` must be 0 or 1")
266
+
267
+ # Sample the indices
268
+ probs = torch.rand(sample_space, device=self.linear.weight.device)
269
+ mask = probs < p
270
+ if mask.any():
271
+ return torch.nonzero(mask).squeeze(-1)
272
+ else:
273
+ return torch.tensor([], dtype=torch.long, device=self.linear.weight.device)
274
+
275
+ def _random_weights(self, rows, columns):
276
+ device = self.linear.weight.device
277
+ weights = self.linear.weight.data
278
+ stdv = 1.0 / math.sqrt(weights.size(1))
279
+ random_weights = torch.rand(rows, columns, device=device)
280
+ random_weights -= 0.5 # Range [-0.5, +0.5]
281
+ random_weights *= 2.0 * stdv # Range [-stdv, +stdv]
282
+ return random_weights
283
+
284
+ def _mean_value_weights(self):
285
+ """
286
+ Only used when self.xglu
287
+ """
288
+ weights = self.linear.weight.data
289
+ rows = weights.size(0)
290
+ if self.xglu:
291
+ return self.linear.weight[int(rows / 2) :].data.mean(dim=0, keepdim=True)
292
+ else:
293
+ return self.linear.weight.data.mean(dim=0, keepdim=True)
294
+
295
+ def _mean_gate_weights(self):
296
+ """
297
+ Only used when self.xglu
298
+ """
299
+ weights = self.linear.weight.data
300
+ rows = weights.size(0)
301
+ return self.linear.weight[: int(rows / 2)].data.mean(dim=0, keepdim=True)
302
+
303
+ def _update_weights(
304
+ self,
305
+ indices: Iterable[int],
306
+ dim: int,
307
+ data: torch.Tensor,
308
+ optimisers: Union[
309
+ List[torch.optim.Optimizer], torch.optim.Optimizer, None
310
+ ] = None,
311
+ ):
312
+ if optimisers is None:
313
+ optimisers = []
314
+ if not isinstance(optimisers, list):
315
+ optimisers = [optimisers]
316
+
317
+ if not torch.is_tensor(indices):
318
+ idx_tensor = torch.as_tensor(
319
+ list(indices), dtype=torch.long, device=self.linear.weight.device
320
+ )
321
+ else:
322
+ idx_tensor = indices
323
+
324
+ if idx_tensor.numel() == 0:
325
+ return
326
+
327
+ with torch.no_grad():
328
+ if dim == 0:
329
+ self.linear.weight.data[idx_tensor] = data
330
+ elif dim == 1:
331
+ self.linear.weight.data[:, idx_tensor] = data
332
+ else:
333
+ raise ValueError("`dim` must be 0 or 1")
334
+
335
+ self._reset_optim_state(self.linear.weight, idx_tensor, optimisers, dim=dim)
336
+
337
+ def _reset_optim_state(self, param, idx_tensor, optimisers, dim):
338
+ """
339
+ Zeroes out the optimizer state for the given indices in a single operation.
340
+ """
341
+ for optimiser in optimisers:
342
+ if param not in optimiser.state:
343
+ continue
344
+ state = optimiser.state[param]
345
+
346
+ for _, buffer in state.items():
347
+ if torch.is_tensor(buffer) and buffer.shape == param.shape:
348
+ # Vectorized zeroing
349
+ if dim == 0:
350
+ buffer[idx_tensor] = 0.0
351
+ else:
352
+ buffer[:, idx_tensor] = 0.0