x-transformers 1.41.0__py3-none-any.whl → 1.41.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- x_transformers/x_transformers.py +4 -19
- {x_transformers-1.41.0.dist-info → x_transformers-1.41.1.dist-info}/METADATA +1 -1
- {x_transformers-1.41.0.dist-info → x_transformers-1.41.1.dist-info}/RECORD +6 -6
- {x_transformers-1.41.0.dist-info → x_transformers-1.41.1.dist-info}/LICENSE +0 -0
- {x_transformers-1.41.0.dist-info → x_transformers-1.41.1.dist-info}/WHEEL +0 -0
- {x_transformers-1.41.0.dist-info → x_transformers-1.41.1.dist-info}/top_level.txt +0 -0
x_transformers/x_transformers.py
CHANGED
@@ -101,12 +101,6 @@ def log(t, eps = 1e-20):
|
|
101
101
|
def max_neg_value(tensor):
|
102
102
|
return -torch.finfo(tensor.dtype).max
|
103
103
|
|
104
|
-
def reverse_cumsum(t, dim = -1):
|
105
|
-
t = t.flip(dims = (dim,))
|
106
|
-
t = t.cumsum(dim = dim)
|
107
|
-
t = t.flip(dims = (dim,))
|
108
|
-
return t
|
109
|
-
|
110
104
|
def l2norm(t, groups = 1):
|
111
105
|
t = rearrange(t, '... (g d) -> ... g d', g = groups)
|
112
106
|
t = F.normalize(t, p = 2, dim = -1)
|
@@ -514,7 +508,7 @@ class DataDependentAlibi(Module):
|
|
514
508
|
self.to_forget_gates = nn.Sequential(
|
515
509
|
linear,
|
516
510
|
Rearrange('b n h -> b h n'),
|
517
|
-
nn.
|
511
|
+
nn.LogSigmoid()
|
518
512
|
)
|
519
513
|
|
520
514
|
nn.init.constant_(linear.bias, 5.)
|
@@ -522,18 +516,9 @@ class DataDependentAlibi(Module):
|
|
522
516
|
def forward(self, x):
|
523
517
|
seq = x.shape[-2]
|
524
518
|
|
525
|
-
forget_gates = self.to_forget_gates(x)
|
526
|
-
forget_gates =
|
527
|
-
|
528
|
-
# causal mask out, including diagonal (so token to itself attention is never masked out)
|
529
|
-
|
530
|
-
causal_mask = torch.ones((seq, seq), dtype = torch.bool, device = x.device).triu()
|
531
|
-
|
532
|
-
forget_gates = forget_gates.masked_fill(causal_mask, 0.)
|
533
|
-
|
534
|
-
# reverse cumulative sum in log space (equivalent to cumprod)
|
535
|
-
|
536
|
-
forget_gates = reverse_cumsum(forget_gates)
|
519
|
+
forget_gates = self.to_forget_gates(x)
|
520
|
+
forget_gates = forget_gates.cumsum(dim = -1)
|
521
|
+
forget_gates = einx.subtract('b h i, b h j -> b h i j', forget_gates, forget_gates)
|
537
522
|
|
538
523
|
return forget_gates
|
539
524
|
|
@@ -5,11 +5,11 @@ x_transformers/continuous.py,sha256=cIVEdhfei258__ziV7kQBrJMxCel54bExBTDrO9rfCI,
|
|
5
5
|
x_transformers/dpo.py,sha256=xt4OuOWhU8pN3OKN2LZAaC2NC8iiEnchqqcrPWVqf0o,3521
|
6
6
|
x_transformers/multi_input.py,sha256=tCh-fTJDj2ib4SMGtsa-AM8MxKzJAQSwqAXOu3HU2mg,9252
|
7
7
|
x_transformers/nonautoregressive_wrapper.py,sha256=2NU58hYMgn-4Jzg3mie-mXb0XH_dCN7fjlzd3K1rLUY,10510
|
8
|
-
x_transformers/x_transformers.py,sha256=
|
8
|
+
x_transformers/x_transformers.py,sha256=n8W19Pnhbz-JxbC7QATApWrhI_yC4oqTHGQ1NLuindY,89814
|
9
9
|
x_transformers/xl_autoregressive_wrapper.py,sha256=CvZMJ6A6PA-Y_bQAhnORwjJBSl6Vjq2IdW5KTdk8NI8,4195
|
10
10
|
x_transformers/xval.py,sha256=7S00kCuab4tWQa-vf-z-XfzADjVj48MoFIr7VSIvttg,8575
|
11
|
-
x_transformers-1.41.
|
12
|
-
x_transformers-1.41.
|
13
|
-
x_transformers-1.41.
|
14
|
-
x_transformers-1.41.
|
15
|
-
x_transformers-1.41.
|
11
|
+
x_transformers-1.41.1.dist-info/LICENSE,sha256=As9u198X-U-vph5noInuUfqsAG2zX_oXPHDmdjwlPPY,1066
|
12
|
+
x_transformers-1.41.1.dist-info/METADATA,sha256=UIPYbEVBLrWDGuezlnyh2tFKPlM_Mdj-pYTGxse_NMI,689
|
13
|
+
x_transformers-1.41.1.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
14
|
+
x_transformers-1.41.1.dist-info/top_level.txt,sha256=hO6KGpFuGucRNEtRfme4A_rGcM53AKwGP7RVlRIxS5Q,15
|
15
|
+
x_transformers-1.41.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|