broccoli-ml 3.3.1__tar.gz → 4.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/PKG-INFO +1 -1
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/transformer.py +56 -27
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/pyproject.toml +1 -1
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/LICENSE +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/README.md +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/__init__.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/activation.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/cnn.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/linear.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/rope.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/tensor.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/utils.py +0 -0
- {broccoli_ml-3.3.1 → broccoli_ml-4.0.0}/broccoli/vit.py +0 -0
|
@@ -4,11 +4,20 @@ from typing import Optional
|
|
|
4
4
|
import torch
|
|
5
5
|
import torch.nn as nn
|
|
6
6
|
import torch.nn.functional as F
|
|
7
|
+
from torch.utils.checkpoint import checkpoint
|
|
7
8
|
|
|
8
9
|
from einops import rearrange
|
|
9
10
|
|
|
10
11
|
from .rope import RotaryEmbedding, apply_rotary_emb
|
|
11
12
|
|
|
13
|
+
try:
|
|
14
|
+
from flash_attn import flash_attn_func
|
|
15
|
+
|
|
16
|
+
FLASH_ATTN = True
|
|
17
|
+
except ImportError:
|
|
18
|
+
pass
|
|
19
|
+
FLASH_ATTN = False
|
|
20
|
+
|
|
12
21
|
|
|
13
22
|
def drop_path(
|
|
14
23
|
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
|
|
@@ -206,32 +215,53 @@ class MHAttention(nn.Module):
|
|
|
206
215
|
q = torch.cat([q_bos, q_img], dim=1)
|
|
207
216
|
k = torch.cat([k_bos, k_img], dim=1)
|
|
208
217
|
|
|
209
|
-
# Divide Q/K/V into heads
|
|
210
|
-
q = rearrange(q, "b t (h d) -> b h t d", h=self.n_heads)
|
|
211
|
-
k = rearrange(k, "b t (h d) -> b h t d", h=self.n_heads)
|
|
212
|
-
v = rearrange(v, "b t (h d) -> b h t d", h=self.n_heads)
|
|
213
|
-
|
|
214
|
-
qk_scores = q @ k.transpose(-1, -2)
|
|
215
|
-
|
|
216
218
|
if self.scaling == "sqrtd":
|
|
217
|
-
|
|
219
|
+
scaling_factor = 1 / math.sqrt(self.head_dim)
|
|
218
220
|
elif self.scaling == "d":
|
|
219
221
|
# for backwards compatibility, per https://github.com/microsoft/mup
|
|
220
|
-
|
|
222
|
+
scaling_factor = 8 / self.head_dim
|
|
221
223
|
else:
|
|
222
224
|
raise ValueError('`scaling` argument to MHAttention must be "d" or "sqrtd"')
|
|
223
225
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
226
|
+
if FLASH_ATTN:
|
|
227
|
+
# Divide Q/K/V into heads
|
|
228
|
+
q = rearrange(q, "b t (h d) -> b t h d", h=self.n_heads)
|
|
229
|
+
k = rearrange(k, "b t (h d) -> b t h d", h=self.n_heads)
|
|
230
|
+
v = rearrange(v, "b t (h d) -> b t h d", h=self.n_heads)
|
|
231
|
+
|
|
232
|
+
output_with_heads = flash_attn_func(
|
|
233
|
+
q,
|
|
234
|
+
k,
|
|
235
|
+
v,
|
|
236
|
+
dropout_p=self.dropout if self.training else 0.0,
|
|
237
|
+
softmax_scale=scaling_factor,
|
|
238
|
+
causal=self.causal,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
output_without_heads = rearrange(output_with_heads, "b t h d -> b t (h d)")
|
|
242
|
+
|
|
243
|
+
return self.out_proj(output_without_heads)
|
|
244
|
+
else:
|
|
245
|
+
# Divide Q/K/V into heads
|
|
246
|
+
q = rearrange(q, "b t (h d) -> b h t d", h=self.n_heads)
|
|
247
|
+
k = rearrange(k, "b t (h d) -> b h t d", h=self.n_heads)
|
|
248
|
+
v = rearrange(v, "b t (h d) -> b h t d", h=self.n_heads)
|
|
227
249
|
|
|
228
|
-
|
|
250
|
+
qk_scores = q @ k.transpose(-1, -2)
|
|
229
251
|
|
|
230
|
-
|
|
252
|
+
qk_scores *= scaling_factor
|
|
231
253
|
|
|
232
|
-
|
|
254
|
+
# Apply mask if causal (must come before softmax)
|
|
255
|
+
if self.causal:
|
|
256
|
+
qk_scores.masked_fill_(self.mask, float("-inf"))
|
|
233
257
|
|
|
234
|
-
|
|
258
|
+
qk_scores = F.softmax(qk_scores, dim=-1)
|
|
259
|
+
|
|
260
|
+
output_with_heads = qk_scores @ v
|
|
261
|
+
|
|
262
|
+
output_without_heads = rearrange(output_with_heads, "b h t d -> b t (h d)")
|
|
263
|
+
|
|
264
|
+
return self.out_proj(output_without_heads)
|
|
235
265
|
|
|
236
266
|
|
|
237
267
|
class FeedforwardBlock(nn.Module):
|
|
@@ -410,21 +440,20 @@ class TransformerBlock(nn.Module):
|
|
|
410
440
|
def forward(self, x):
|
|
411
441
|
|
|
412
442
|
if self.pre_norm:
|
|
413
|
-
|
|
414
|
-
x = x + self.drop_path(self.attn(
|
|
415
|
-
|
|
416
|
-
x = x + self.drop_path(self.ff
|
|
417
|
-
|
|
443
|
+
x = self.layer_norm_1(x)
|
|
444
|
+
x = x + self.drop_path(self.attn(x, x, x))
|
|
445
|
+
x = self.layer_norm_2(x)
|
|
446
|
+
x = x + self.drop_path(checkpoint(self.ff, x, use_reentrant=False))
|
|
447
|
+
if self.post_norm: # i.e. in addition! Pre and post.
|
|
448
|
+
x = self.layer_norm_3(x)
|
|
449
|
+
elif self.post_norm: # i.e. only, not prenorm, just post
|
|
418
450
|
x = x + self.drop_path(self.attn(x, x, x))
|
|
419
451
|
x = self.layer_norm_1(x)
|
|
420
|
-
x = x + self.drop_path(self.ff
|
|
452
|
+
x = x + self.drop_path(checkpoint(self.ff, x, use_reentrant=False))
|
|
421
453
|
x = self.layer_norm_2(x)
|
|
422
|
-
else:
|
|
454
|
+
else: # Not pre or post norm. Stand well back.
|
|
423
455
|
x = x + self.drop_path(self.attn(x, x, x))
|
|
424
|
-
x = x + self.drop_path(self.ff
|
|
425
|
-
|
|
426
|
-
if self.pre_norm and self.post_norm:
|
|
427
|
-
x = self.layer_norm_3(x)
|
|
456
|
+
x = x + self.drop_path(checkpoint(self.ff, x, use_reentrant=False))
|
|
428
457
|
|
|
429
458
|
return x
|
|
430
459
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|