broccoli-ml 13.0.5__tar.gz → 14.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/PKG-INFO +1 -1
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/transformer.py +21 -20
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/pyproject.toml +1 -1
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/LICENSE +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/README.md +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/__init__.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/activation.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/cnn.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/linear.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/rope.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/tensor.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/utils.py +0 -0
- {broccoli_ml-13.0.5 → broccoli_ml-14.0.0}/broccoli/vit.py +0 -0
|
@@ -200,26 +200,26 @@ class MHAttention(nn.Module):
|
|
|
200
200
|
"`source_size` must be a tuple of 1, 2 or 3 integers"
|
|
201
201
|
)
|
|
202
202
|
|
|
203
|
-
q = rearrange(q, "b t (h d) -> b t
|
|
204
|
-
k = rearrange(k, "b t (h d) -> b t
|
|
203
|
+
q = rearrange(q, "b t (h d) -> b h t d", h=self.n_heads)
|
|
204
|
+
k = rearrange(k, "b t (h d) -> b h t d", h=self.n_heads)
|
|
205
205
|
|
|
206
206
|
q_util, q_img = (
|
|
207
|
-
q[:, : self.utility_tokens,
|
|
208
|
-
q[:, self.utility_tokens :,
|
|
207
|
+
q[:, :, : self.utility_tokens, :],
|
|
208
|
+
q[:, :, self.utility_tokens :, :],
|
|
209
209
|
)
|
|
210
210
|
k_util, k_img = (
|
|
211
|
-
k[:, : self.utility_tokens,
|
|
212
|
-
k[:, self.utility_tokens :,
|
|
211
|
+
k[:, :, : self.utility_tokens, :],
|
|
212
|
+
k[:, :, self.utility_tokens :, :],
|
|
213
213
|
)
|
|
214
214
|
|
|
215
215
|
q_img = rearrange(
|
|
216
216
|
q_img,
|
|
217
|
-
f"b ({spatial_dimension_names})
|
|
217
|
+
f"b h ({spatial_dimension_names}) d -> b h {spatial_dimension_names} d",
|
|
218
218
|
**spatial_dimension_values,
|
|
219
219
|
)
|
|
220
220
|
k_img = rearrange(
|
|
221
221
|
k_img,
|
|
222
|
-
f"b ({spatial_dimension_names})
|
|
222
|
+
f"b h ({spatial_dimension_names}) d -> b h {spatial_dimension_names} d",
|
|
223
223
|
**spatial_dimension_values,
|
|
224
224
|
)
|
|
225
225
|
|
|
@@ -230,19 +230,19 @@ class MHAttention(nn.Module):
|
|
|
230
230
|
|
|
231
231
|
q_img = rearrange(
|
|
232
232
|
q_img,
|
|
233
|
-
f"b {spatial_dimension_names}
|
|
233
|
+
f"b h {spatial_dimension_names} d -> b h ({spatial_dimension_names}) d",
|
|
234
234
|
)
|
|
235
235
|
k_img = rearrange(
|
|
236
236
|
k_img,
|
|
237
|
-
f"b {spatial_dimension_names}
|
|
237
|
+
f"b h {spatial_dimension_names} d -> b h ({spatial_dimension_names}) d",
|
|
238
238
|
)
|
|
239
239
|
|
|
240
240
|
# Re-combine the utility tokens and the RoPE-enhanced sequence tokens
|
|
241
|
-
q = torch.cat([q_util, q_img], dim=
|
|
242
|
-
k = torch.cat([k_util, k_img], dim=
|
|
241
|
+
q = torch.cat([q_util, q_img], dim=2)
|
|
242
|
+
k = torch.cat([k_util, k_img], dim=2)
|
|
243
243
|
|
|
244
|
-
q = rearrange(q, "b t
|
|
245
|
-
k = rearrange(k, "b t
|
|
244
|
+
q = rearrange(q, "b h t d -> b t (h d)")
|
|
245
|
+
k = rearrange(k, "b h t d -> b t (h d)")
|
|
246
246
|
|
|
247
247
|
return q, k
|
|
248
248
|
|
|
@@ -621,6 +621,7 @@ class EncoderBlock(nn.Module):
|
|
|
621
621
|
|
|
622
622
|
if self.post_norm:
|
|
623
623
|
x = self.post_attention_norm(x)
|
|
624
|
+
process_x = x
|
|
624
625
|
elif self.pre_norm:
|
|
625
626
|
process_x = self.pre_mlp_norm(x)
|
|
626
627
|
else:
|
|
@@ -638,15 +639,15 @@ class EncoderBlock(nn.Module):
|
|
|
638
639
|
def attention_logits(self, x):
|
|
639
640
|
"""
|
|
640
641
|
Give back the attention scores used in this layer.
|
|
642
|
+
Needs to match what the model actually sees during forward()
|
|
643
|
+
by applying the correct normalisations.
|
|
641
644
|
"""
|
|
642
|
-
# Fix: Use the correct attribute name 'pre_attention_norm'
|
|
643
645
|
if self.pre_norm:
|
|
644
|
-
# We must normalize the input before measuring attention logits
|
|
645
|
-
# to match what the model actually sees during forward()
|
|
646
646
|
x = self.pre_attention_norm(x)
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
647
|
+
elif self.post_norm:
|
|
648
|
+
x = self.input_norm(x)
|
|
649
|
+
|
|
650
|
+
return self.attn.attention_logits(x, x, x)
|
|
650
651
|
|
|
651
652
|
def reset_parameters(self):
|
|
652
653
|
if self.pre_norm:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|