x-transformers 1.30.23__tar.gz → 1.31.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {x_transformers-1.30.23/x_transformers.egg-info → x_transformers-1.31.0}/PKG-INFO +1 -1
- {x_transformers-1.30.23 → x_transformers-1.31.0}/README.md +18 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/setup.py +1 -1
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/x_transformers.py +55 -13
- {x_transformers-1.30.23 → x_transformers-1.31.0/x_transformers.egg-info}/PKG-INFO +1 -1
- {x_transformers-1.30.23 → x_transformers-1.31.0}/LICENSE +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/setup.cfg +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/tests/test_x_transformers.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/__init__.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/attend.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/autoregressive_wrapper.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/continuous.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/dpo.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/nonautoregressive_wrapper.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/xl_autoregressive_wrapper.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/xval.py +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers.egg-info/SOURCES.txt +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers.egg-info/dependency_links.txt +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers.egg-info/requires.txt +0 -0
- {x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers.egg-info/top_level.txt +0 -0
@@ -2169,4 +2169,22 @@ ids_out, num_out, is_number_mask = model.generate(start_ids, start_nums, 17)
|
|
2169
2169
|
}
|
2170
2170
|
```
|
2171
2171
|
|
2172
|
+
```bibtex
|
2173
|
+
@misc{Guttenberg2023,
|
2174
|
+
author = {Ohad Rubin},
|
2175
|
+
url = {https://medium.com/@ohadrubin/exploring-weight-decay-in-layer-normalization-challenges-and-a-reparameterization-solution-ad4d12c24950}
|
2176
|
+
}
|
2177
|
+
```
|
2178
|
+
|
2179
|
+
```bibtex
|
2180
|
+
@article{Mesnard2024GemmaOM,
|
2181
|
+
title = {Gemma: Open Models Based on Gemini Research and Technology},
|
2182
|
+
author = {Gemma Team Thomas Mesnard and Cassidy Hardin and Robert Dadashi and Surya Bhupatiraju and Shreya Pathak and L. Sifre and Morgane Riviere and Mihir Kale and J Christopher Love and Pouya Dehghani Tafti and L'eonard Hussenot and Aakanksha Chowdhery and Adam Roberts and Aditya Barua and Alex Botev and Alex Castro-Ros and Ambrose Slone and Am'elie H'eliou and Andrea Tacchetti and Anna Bulanova and Antonia Paterson and Beth Tsai and Bobak Shahriari and Charline Le Lan and Christopher A. Choquette-Choo and Cl'ement Crepy and Daniel Cer and Daphne Ippolito and David Reid and Elena Buchatskaya and Eric Ni and Eric Noland and Geng Yan and George Tucker and George-Christian Muraru and Grigory Rozhdestvenskiy and Henryk Michalewski and Ian Tenney and Ivan Grishchenko and Jacob Austin and James Keeling and Jane Labanowski and Jean-Baptiste Lespiau and Jeff Stanway and Jenny Brennan and Jeremy Chen and Johan Ferret and Justin Chiu and Justin Mao-Jones and Katherine Lee and Kathy Yu and Katie Millican and Lars Lowe Sjoesund and Lisa Lee and Lucas Dixon and Machel Reid and Maciej Mikula and Mateo Wirth and Michael Sharman and Nikolai Chinaev and Nithum Thain and Olivier Bachem and Oscar Chang and Oscar Wahltinez and Paige Bailey and Paul Michel and Petko Yotov and Pier Giuseppe Sessa and Rahma Chaabouni and Ramona Comanescu and Reena Jana and Rohan Anil and Ross McIlroy and Ruibo Liu and Ryan Mullins and Samuel L Smith and Sebastian Borgeaud and Sertan Girgin and Sholto Douglas and Shree Pandya and Siamak Shakeri and Soham De and Ted Klimenko and Tom Hennigan and Vladimir Feinberg and Wojciech Stokowiec and Yu-hui Chen and Zafarali Ahmed and Zhitao Gong and Tris Brian Warkentin and Ludovic Peran and Minh Giang and Cl'ement Farabet and Oriol Vinyals and Jeffrey Dean and Koray Kavukcuoglu and Demis Hassabis and Zoubin Ghahramani and Douglas Eck and Joelle Barral and Fernando Pereira and Eli Collins and Armand Joulin and Noah Fiedel and Evan Senter and Alek Andreev and Kathleen Kenealy},
|
2183
|
+
journal = {ArXiv},
|
2184
|
+
year = {2024},
|
2185
|
+
volume = {abs/2403.08295},
|
2186
|
+
url = {https://api.semanticscholar.org/CorpusID:268379206}
|
2187
|
+
}
|
2188
|
+
```
|
2189
|
+
|
2172
2190
|
*solve intelligence... then use that to solve everything else.* - Demis Hassabis
|
@@ -93,6 +93,9 @@ def l2norm(t, groups = 1):
|
|
93
93
|
t = F.normalize(t, p = 2, dim = -1)
|
94
94
|
return rearrange(t, '... g d -> ... (g d)')
|
95
95
|
|
96
|
+
def softclamp(t, value):
|
97
|
+
return (t / value).tanh() * value
|
98
|
+
|
96
99
|
def pad_at_dim(t, pad: Tuple[int, int], dim = -1, value = 0.):
|
97
100
|
if pad == (0, 0):
|
98
101
|
return t
|
@@ -560,22 +563,29 @@ class Scale(Module):
|
|
560
563
|
return (scale_fn(out[0]), *out[1:])
|
561
564
|
|
562
565
|
class LayerNorm(Module):
|
563
|
-
def __init__(
|
566
|
+
def __init__(
|
567
|
+
self,
|
568
|
+
dim,
|
569
|
+
unit_offset = 0.
|
570
|
+
):
|
564
571
|
"""
|
565
572
|
bias-less layernorm has been shown to be more stable. most newer models have moved towards rmsnorm, also bias-less
|
566
573
|
"""
|
567
574
|
super().__init__()
|
575
|
+
self.unit_offset = unit_offset
|
568
576
|
self.gamma = nn.Parameter(torch.ones(dim))
|
569
|
-
self.register_buffer(
|
577
|
+
self.register_buffer('beta', torch.zeros(dim), persistent = False)
|
570
578
|
|
571
579
|
def forward(self, x):
|
572
|
-
|
573
|
-
|
574
|
-
if version.parse(torch.__version__) >= version.parse('2.1.0'):
|
575
|
-
LayerNorm = partial(nn.LayerNorm, bias = False)
|
580
|
+
gamma = self.gamma + self.unit_offset
|
581
|
+
return F.layer_norm(x, x.shape[-1:], gamma, self.beta)
|
576
582
|
|
577
583
|
class AdaptiveLayerNorm(Module):
|
578
|
-
def __init__(
|
584
|
+
def __init__(
|
585
|
+
self,
|
586
|
+
dim,
|
587
|
+
dim_condition = None
|
588
|
+
):
|
579
589
|
super().__init__()
|
580
590
|
dim_condition = default(dim_condition, dim)
|
581
591
|
|
@@ -590,25 +600,39 @@ class AdaptiveLayerNorm(Module):
|
|
590
600
|
return normed * (gamma + 1.)
|
591
601
|
|
592
602
|
class ScaleNorm(Module):
|
593
|
-
def __init__(
|
603
|
+
def __init__(
|
604
|
+
self,
|
605
|
+
dim,
|
606
|
+
unit_offset = 0.
|
607
|
+
):
|
594
608
|
super().__init__()
|
609
|
+
self.unit_offset = unit_offset
|
595
610
|
self.scale = dim ** 0.5
|
596
611
|
self.g = nn.Parameter(torch.ones(1))
|
597
612
|
|
598
613
|
def forward(self, x):
|
599
|
-
return F.normalize(x, dim = -1) * self.scale * self.g
|
614
|
+
return F.normalize(x, dim = -1) * self.scale * (self.g + self.unit_offset)
|
600
615
|
|
601
616
|
class RMSNorm(Module):
|
602
|
-
def __init__(
|
617
|
+
def __init__(
|
618
|
+
self,
|
619
|
+
dim,
|
620
|
+
unit_offset = 0.
|
621
|
+
):
|
603
622
|
super().__init__()
|
623
|
+
self.unit_offset = unit_offset
|
604
624
|
self.scale = dim ** 0.5
|
605
625
|
self.g = nn.Parameter(torch.ones(dim))
|
606
626
|
|
607
627
|
def forward(self, x):
|
608
|
-
return F.normalize(x, dim = -1) * self.scale * self.g
|
628
|
+
return F.normalize(x, dim = -1) * self.scale * (self.g + self.unit_offset)
|
609
629
|
|
610
630
|
class AdaptiveRMSNorm(Module):
|
611
|
-
def __init__(
|
631
|
+
def __init__(
|
632
|
+
self,
|
633
|
+
dim,
|
634
|
+
dim_condition = None
|
635
|
+
):
|
612
636
|
super().__init__()
|
613
637
|
self.scale = dim ** 0.5
|
614
638
|
dim_condition = default(dim_condition, dim)
|
@@ -622,7 +646,11 @@ class AdaptiveRMSNorm(Module):
|
|
622
646
|
return normed * self.scale * (gamma + 1.)
|
623
647
|
|
624
648
|
class SimpleRMSNorm(Module):
|
625
|
-
def __init__(
|
649
|
+
def __init__(
|
650
|
+
self,
|
651
|
+
dim,
|
652
|
+
**kwargs
|
653
|
+
):
|
626
654
|
super().__init__()
|
627
655
|
self.scale = dim ** 0.5
|
628
656
|
|
@@ -1182,6 +1210,7 @@ class AttentionLayers(Module):
|
|
1182
1210
|
use_adaptive_layernorm = False,
|
1183
1211
|
use_adaptive_rmsnorm = False,
|
1184
1212
|
use_adaptive_layerscale = False, # paired with use_adaptive_layernorm for ada-ln-zero from DiT paper
|
1213
|
+
norm_add_unit_offset = False,
|
1185
1214
|
dim_condition = None,
|
1186
1215
|
adaptive_condition_mlp = False,
|
1187
1216
|
adaptive_condition_mlp_expansion = 4,
|
@@ -1215,6 +1244,7 @@ class AttentionLayers(Module):
|
|
1215
1244
|
scale_residual_constant = 1.,
|
1216
1245
|
shift_tokens = 0,
|
1217
1246
|
sandwich_norm = False,
|
1247
|
+
softclamp_output_value: float | None = None,
|
1218
1248
|
resi_dual = False,
|
1219
1249
|
resi_dual_scale = 1.,
|
1220
1250
|
zero_init_branch_output = False,
|
@@ -1315,6 +1345,10 @@ class AttentionLayers(Module):
|
|
1315
1345
|
|
1316
1346
|
norm_fn = partial(norm_class, dim)
|
1317
1347
|
|
1348
|
+
if not norm_need_condition and norm_add_unit_offset:
|
1349
|
+
# research Ohad Rubin shares in a blog post by adding an offset to gammas and betas, they can be subjected to weight decay safely
|
1350
|
+
norm_fn = partial(norm_fn, unit_offset = 1.)
|
1351
|
+
|
1318
1352
|
self.norm_need_condition = norm_need_condition
|
1319
1353
|
self.dim_condition = dim_condition
|
1320
1354
|
|
@@ -1421,6 +1455,11 @@ class AttentionLayers(Module):
|
|
1421
1455
|
|
1422
1456
|
shift_tokens = cast_tuple(shift_tokens, len(layer_types))
|
1423
1457
|
|
1458
|
+
# optional soft clamping just before the final norm
|
1459
|
+
# used in gemma 2
|
1460
|
+
|
1461
|
+
self.softclamp_output_value = softclamp_output_value
|
1462
|
+
|
1424
1463
|
# whether it has post norm
|
1425
1464
|
|
1426
1465
|
self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity()
|
@@ -1652,6 +1691,9 @@ class AttentionLayers(Module):
|
|
1652
1691
|
if return_hiddens:
|
1653
1692
|
layer_hiddens.append(x)
|
1654
1693
|
|
1694
|
+
if exists(self.softclamp_output_value):
|
1695
|
+
x = softclamp(x, self.softclamp_output_value)
|
1696
|
+
|
1655
1697
|
final_norm = self.final_norm
|
1656
1698
|
|
1657
1699
|
if self.need_condition:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/nonautoregressive_wrapper.py
RENAMED
File without changes
|
{x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers/xl_autoregressive_wrapper.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{x_transformers-1.30.23 → x_transformers-1.31.0}/x_transformers.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|