x-transformers 1.42.22__py3-none-any.whl → 1.42.23__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- x_transformers/x_transformers.py +4 -4
- {x_transformers-1.42.22.dist-info → x_transformers-1.42.23.dist-info}/METADATA +1 -1
- {x_transformers-1.42.22.dist-info → x_transformers-1.42.23.dist-info}/RECORD +6 -6
- {x_transformers-1.42.22.dist-info → x_transformers-1.42.23.dist-info}/LICENSE +0 -0
- {x_transformers-1.42.22.dist-info → x_transformers-1.42.23.dist-info}/WHEEL +0 -0
- {x_transformers-1.42.22.dist-info → x_transformers-1.42.23.dist-info}/top_level.txt +0 -0
x_transformers/x_transformers.py
CHANGED
@@ -1079,6 +1079,7 @@ class Attention(Module):
|
|
1079
1079
|
neutreno_alpha = 0.4,
|
1080
1080
|
learned_value_residual_mix = False,
|
1081
1081
|
laser = False, # https://arxiv.org/abs/2411.03493v1
|
1082
|
+
laser_softclamp_value = 15.,
|
1082
1083
|
onnxable = False,
|
1083
1084
|
attend_sdp_kwargs: dict = dict(
|
1084
1085
|
enable_flash = True,
|
@@ -1119,9 +1120,9 @@ class Attention(Module):
|
|
1119
1120
|
self.to_v = LinearNoBias(dim_kv, v_dim) if not shared_kv else None
|
1120
1121
|
|
1121
1122
|
# enhancing gradients to attention through exponentiated values
|
1122
|
-
# todo - compare it to `attn = attn * large_value + attn.detach() * (1. - large_value)`
|
1123
1123
|
|
1124
1124
|
self.laser = laser
|
1125
|
+
self.laser_softclamp_value = laser_softclamp_value
|
1125
1126
|
|
1126
1127
|
# relations projection from tp-attention
|
1127
1128
|
|
@@ -1449,8 +1450,7 @@ class Attention(Module):
|
|
1449
1450
|
attn_bias = pad_at_dim(attn_bias, (num_mem_kv, 0))
|
1450
1451
|
|
1451
1452
|
if self.laser:
|
1452
|
-
|
1453
|
-
v = v - values_max
|
1453
|
+
v = softclamp(v, self.laser_softclamp_value)
|
1454
1454
|
v = v.exp()
|
1455
1455
|
|
1456
1456
|
# attention is all we need
|
@@ -1465,7 +1465,7 @@ class Attention(Module):
|
|
1465
1465
|
# laser
|
1466
1466
|
|
1467
1467
|
if self.laser:
|
1468
|
-
out = log(out)
|
1468
|
+
out = log(out)
|
1469
1469
|
|
1470
1470
|
# store the values for resformer or Neutreno
|
1471
1471
|
|
@@ -6,11 +6,11 @@ x_transformers/dpo.py,sha256=xt4OuOWhU8pN3OKN2LZAaC2NC8iiEnchqqcrPWVqf0o,3521
|
|
6
6
|
x_transformers/multi_input.py,sha256=tCh-fTJDj2ib4SMGtsa-AM8MxKzJAQSwqAXOu3HU2mg,9252
|
7
7
|
x_transformers/neo_mlp.py,sha256=XCNnnop9WLarcxap1kGuYc1x8GHvwkZiDRnXOxSl3Po,3452
|
8
8
|
x_transformers/nonautoregressive_wrapper.py,sha256=2NU58hYMgn-4Jzg3mie-mXb0XH_dCN7fjlzd3K1rLUY,10510
|
9
|
-
x_transformers/x_transformers.py,sha256=
|
9
|
+
x_transformers/x_transformers.py,sha256=a5zP8ORv3x-cdkZkgwDDLBZ9wm3LmHDrqgqK2tCx-mU,95992
|
10
10
|
x_transformers/xl_autoregressive_wrapper.py,sha256=CvZMJ6A6PA-Y_bQAhnORwjJBSl6Vjq2IdW5KTdk8NI8,4195
|
11
11
|
x_transformers/xval.py,sha256=7S00kCuab4tWQa-vf-z-XfzADjVj48MoFIr7VSIvttg,8575
|
12
|
-
x_transformers-1.42.
|
13
|
-
x_transformers-1.42.
|
14
|
-
x_transformers-1.42.
|
15
|
-
x_transformers-1.42.
|
16
|
-
x_transformers-1.42.
|
12
|
+
x_transformers-1.42.23.dist-info/LICENSE,sha256=As9u198X-U-vph5noInuUfqsAG2zX_oXPHDmdjwlPPY,1066
|
13
|
+
x_transformers-1.42.23.dist-info/METADATA,sha256=e6FRIaLa5dolzIpUoq4L3T-h1bqQa8I65bHeWuY9Uck,739
|
14
|
+
x_transformers-1.42.23.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
15
|
+
x_transformers-1.42.23.dist-info/top_level.txt,sha256=hO6KGpFuGucRNEtRfme4A_rGcM53AKwGP7RVlRIxS5Q,15
|
16
|
+
x_transformers-1.42.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|