x-transformers 1.42.14__tar.gz → 1.42.16__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {x_transformers-1.42.14/x_transformers.egg-info → x_transformers-1.42.16}/PKG-INFO +1 -1
- {x_transformers-1.42.14 → x_transformers-1.42.16}/setup.py +1 -1
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/x_transformers.py +3 -3
- {x_transformers-1.42.14 → x_transformers-1.42.16/x_transformers.egg-info}/PKG-INFO +1 -1
- {x_transformers-1.42.14 → x_transformers-1.42.16}/LICENSE +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/README.md +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/setup.cfg +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/tests/test_x_transformers.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/__init__.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/attend.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/autoregressive_wrapper.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/continuous.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/dpo.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/multi_input.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/neo_mlp.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/nonautoregressive_wrapper.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/xl_autoregressive_wrapper.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/xval.py +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/SOURCES.txt +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/dependency_links.txt +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/requires.txt +0 -0
- {x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/top_level.txt +0 -0
@@ -1235,9 +1235,9 @@ class Attention(Module):
|
|
1235
1235
|
# maybe learned value residual mixer per token
|
1236
1236
|
|
1237
1237
|
self.to_value_residual_mix = nn.Sequential(
|
1238
|
-
nn.Linear(dim,
|
1238
|
+
nn.Linear(dim, heads),
|
1239
1239
|
nn.Sigmoid(),
|
1240
|
-
Rearrange('b n
|
1240
|
+
Rearrange('b n h -> b h n 1')
|
1241
1241
|
) if learned_value_residual_mix else always(0.5)
|
1242
1242
|
|
1243
1243
|
# attention on attention
|
@@ -1821,7 +1821,7 @@ class AttentionLayers(Module):
|
|
1821
1821
|
is_first_self_attn = False
|
1822
1822
|
elif layer_type == 'c':
|
1823
1823
|
cross_attn_learned_value_residual = learned_value_residual_mix and not is_first_cross_attn
|
1824
|
-
layer = Attention(dim, heads = heads, learned_value_residual_mix =
|
1824
|
+
layer = Attention(dim, heads = heads, learned_value_residual_mix = cross_attn_learned_value_residual, **{**attn_kwargs, **cross_attn_kwargs})
|
1825
1825
|
is_first_cross_attn = False
|
1826
1826
|
elif layer_type == 'f':
|
1827
1827
|
layer = FeedForward(dim, **ff_kwargs)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/nonautoregressive_wrapper.py
RENAMED
File without changes
|
{x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers/xl_autoregressive_wrapper.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{x_transformers-1.42.14 → x_transformers-1.42.16}/x_transformers.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|