x-transformers 1.42.14__tar.gz → 1.42.16__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (22) hide show
  1. {x_transformers-1.42.14/x_transformers.egg-info → x_transformers-1.42.16}/PKG-INFO +1 -1
  2. {x_transformers-1.42.14 → x_transformers-1.42.16}/setup.py +1 -1
  3. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/x_transformers.py +3 -3
  4. {x_transformers-1.42.14 → x_transformers-1.42.16/x_transformers.egg-info}/PKG-INFO +1 -1
  5. {x_transformers-1.42.14 → x_transformers-1.42.16}/LICENSE +0 -0
  6. {x_transformers-1.42.14 → x_transformers-1.42.16}/README.md +0 -0
  7. {x_transformers-1.42.14 → x_transformers-1.42.16}/setup.cfg +0 -0
  8. {x_transformers-1.42.14 → x_transformers-1.42.16}/tests/test_x_transformers.py +0 -0
  9. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/__init__.py +0 -0
  10. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/attend.py +0 -0
  11. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/autoregressive_wrapper.py +0 -0
  12. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/continuous.py +0 -0
  13. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/dpo.py +0 -0
  14. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/multi_input.py +0 -0
  15. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/neo_mlp.py +0 -0
  16. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/nonautoregressive_wrapper.py +0 -0
  17. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/xl_autoregressive_wrapper.py +0 -0
  18. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/xval.py +0 -0
  19. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/SOURCES.txt +0 -0
  20. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/dependency_links.txt +0 -0
  21. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/requires.txt +0 -0
  22. {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: x-transformers
3
- Version: 1.42.14
3
+ Version: 1.42.16
4
4
  Summary: X-Transformers - Pytorch
5
5
  Home-page: https://github.com/lucidrains/x-transformers
6
6
  Author: Phil Wang
@@ -3,7 +3,7 @@ from setuptools import setup, find_packages
3
3
  setup(
4
4
  name = 'x-transformers',
5
5
  packages = find_packages(exclude=['examples']),
6
- version = '1.42.14',
6
+ version = '1.42.16',
7
7
  license='MIT',
8
8
  description = 'X-Transformers - Pytorch',
9
9
  author = 'Phil Wang',
@@ -1235,9 +1235,9 @@ class Attention(Module):
1235
1235
  # maybe learned value residual mixer per token
1236
1236
 
1237
1237
  self.to_value_residual_mix = nn.Sequential(
1238
- nn.Linear(dim, 1),
1238
+ nn.Linear(dim, heads),
1239
1239
  nn.Sigmoid(),
1240
- Rearrange('b n 1 -> b 1 n 1')
1240
+ Rearrange('b n h -> b h n 1')
1241
1241
  ) if learned_value_residual_mix else always(0.5)
1242
1242
 
1243
1243
  # attention on attention
@@ -1821,7 +1821,7 @@ class AttentionLayers(Module):
1821
1821
  is_first_self_attn = False
1822
1822
  elif layer_type == 'c':
1823
1823
  cross_attn_learned_value_residual = learned_value_residual_mix and not is_first_cross_attn
1824
- layer = Attention(dim, heads = heads, learned_value_residual_mix = learned_value_residual_mix and not is_first_cross_attn, **{**attn_kwargs, **cross_attn_kwargs})
1824
+ layer = Attention(dim, heads = heads, learned_value_residual_mix = cross_attn_learned_value_residual, **{**attn_kwargs, **cross_attn_kwargs})
1825
1825
  is_first_cross_attn = False
1826
1826
  elif layer_type == 'f':
1827
1827
  layer = FeedForward(dim, **ff_kwargs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: x-transformers
3
- Version: 1.42.14
3
+ Version: 1.42.16
4
4
  Summary: X-Transformers - Pytorch
5
5
  Home-page: https://github.com/lucidrains/x-transformers
6
6
  Author: Phil Wang