x-transformers 2.7.3__tar.gz → 2.7.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {x_transformers-2.7.3 → x_transformers-2.7.4}/PKG-INFO +1 -1
- {x_transformers-2.7.3 → x_transformers-2.7.4}/pyproject.toml +1 -1
- {x_transformers-2.7.3 → x_transformers-2.7.4}/tests/test_x_transformers.py +11 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/x_transformers.py +5 -3
- {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/FUNDING.yml +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/workflows/python-publish.yml +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/workflows/python-test.yaml +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/.gitignore +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/LICENSE +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/README.md +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/data/README.md +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/data/enwik8.gz +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/all-attention.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/attention-on-attention.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/cosine-sim-attention.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/deepnorm.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-linear.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-log.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-sinusoidal.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/enhanced-recurrence.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/fcm.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/ffglu.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/flash-attention.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/gate_values.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/gating.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/length-extrapolation-scale.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/macaron-1.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/macaron-2.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/memory-transformer.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/normformer.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/pia.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/qknorm-analysis.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/resi_dual.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/residual_attn.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/rezero.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/rotary.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich-2.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich_norm.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/scalenorm.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/talking-heads.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/topk-attention.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/images/xval.png +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_belief_state.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_copy.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_entropy_tokenizer.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_enwik8.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_length_extrapolate.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/train_parity.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/__init__.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/attend.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/autoregressive_wrapper.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/belief_state_wrapper.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/continuous.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/dpo.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/entropy_based_tokenizer.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/multi_input.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/neo_mlp.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/nonautoregressive_wrapper.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/up_wrapper.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/xl_autoregressive_wrapper.py +0 -0
- {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/xval.py +0 -0
@@ -1314,3 +1314,14 @@ def test_simple_mdlm(
|
|
1314
1314
|
|
1315
1315
|
loss = nar(seq)
|
1316
1316
|
loss.loss.backward()
|
1317
|
+
|
1318
|
+
def test_qk_clip():
|
1319
|
+
from x_transformers import Attention
|
1320
|
+
|
1321
|
+
x = torch.randn(1, 1024, 512)
|
1322
|
+
|
1323
|
+
attn = Attention(dim = 512, dim_out = 384)
|
1324
|
+
|
1325
|
+
out, intermediates = attn(x, return_intermediates = True)
|
1326
|
+
|
1327
|
+
attn.qk_clip_(intermediates, tau = 100)
|
@@ -1637,10 +1637,12 @@ class Attention(Module):
|
|
1637
1637
|
q_weight = self.to_q.weight
|
1638
1638
|
k_weight = self.to_k.weight
|
1639
1639
|
|
1640
|
-
|
1640
|
+
qk_dim, heads = q_weight.shape[0], qk_weight_scale.numel()
|
1641
1641
|
|
1642
|
-
|
1643
|
-
|
1642
|
+
qk_weight_scale = repeat(qk_weight_scale, 'h -> (h expand)', expand = qk_dim // heads)
|
1643
|
+
|
1644
|
+
q_weight.mul_(qk_weight_scale)
|
1645
|
+
k_weight.mul_(qk_weight_scale)
|
1644
1646
|
|
1645
1647
|
def forward(
|
1646
1648
|
self,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|