x-transformers 2.7.3__tar.gz → 2.7.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. {x_transformers-2.7.3 → x_transformers-2.7.4}/PKG-INFO +1 -1
  2. {x_transformers-2.7.3 → x_transformers-2.7.4}/pyproject.toml +1 -1
  3. {x_transformers-2.7.3 → x_transformers-2.7.4}/tests/test_x_transformers.py +11 -0
  4. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/x_transformers.py +5 -3
  5. {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/FUNDING.yml +0 -0
  6. {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/workflows/python-publish.yml +0 -0
  7. {x_transformers-2.7.3 → x_transformers-2.7.4}/.github/workflows/python-test.yaml +0 -0
  8. {x_transformers-2.7.3 → x_transformers-2.7.4}/.gitignore +0 -0
  9. {x_transformers-2.7.3 → x_transformers-2.7.4}/LICENSE +0 -0
  10. {x_transformers-2.7.3 → x_transformers-2.7.4}/README.md +0 -0
  11. {x_transformers-2.7.3 → x_transformers-2.7.4}/data/README.md +0 -0
  12. {x_transformers-2.7.3 → x_transformers-2.7.4}/data/enwik8.gz +0 -0
  13. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/all-attention.png +0 -0
  14. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/attention-on-attention.png +0 -0
  15. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/cosine-sim-attention.png +0 -0
  16. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/deepnorm.png +0 -0
  17. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-linear.png +0 -0
  18. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-log.png +0 -0
  19. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias-sinusoidal.png +0 -0
  20. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/dynamic-pos-bias.png +0 -0
  21. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/enhanced-recurrence.png +0 -0
  22. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/fcm.png +0 -0
  23. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/ffglu.png +0 -0
  24. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/flash-attention.png +0 -0
  25. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/gate_values.png +0 -0
  26. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/gating.png +0 -0
  27. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/length-extrapolation-scale.png +0 -0
  28. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/macaron-1.png +0 -0
  29. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/macaron-2.png +0 -0
  30. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/memory-transformer.png +0 -0
  31. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/normformer.png +0 -0
  32. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/pia.png +0 -0
  33. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/qknorm-analysis.png +0 -0
  34. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/resi_dual.png +0 -0
  35. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/residual_attn.png +0 -0
  36. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/rezero.png +0 -0
  37. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/rotary.png +0 -0
  38. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich-2.png +0 -0
  39. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich.png +0 -0
  40. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/sandwich_norm.png +0 -0
  41. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/scalenorm.png +0 -0
  42. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/talking-heads.png +0 -0
  43. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/topk-attention.png +0 -0
  44. {x_transformers-2.7.3 → x_transformers-2.7.4}/images/xval.png +0 -0
  45. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_belief_state.py +0 -0
  46. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_copy.py +0 -0
  47. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_entropy_tokenizer.py +0 -0
  48. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_enwik8.py +0 -0
  49. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_length_extrapolate.py +0 -0
  50. {x_transformers-2.7.3 → x_transformers-2.7.4}/train_parity.py +0 -0
  51. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/__init__.py +0 -0
  52. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/attend.py +0 -0
  53. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/autoregressive_wrapper.py +0 -0
  54. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/belief_state_wrapper.py +0 -0
  55. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/continuous.py +0 -0
  56. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/dpo.py +0 -0
  57. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/entropy_based_tokenizer.py +0 -0
  58. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/multi_input.py +0 -0
  59. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/neo_mlp.py +0 -0
  60. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/nonautoregressive_wrapper.py +0 -0
  61. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/up_wrapper.py +0 -0
  62. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/xl_autoregressive_wrapper.py +0 -0
  63. {x_transformers-2.7.3 → x_transformers-2.7.4}/x_transformers/xval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: x-transformers
3
- Version: 2.7.3
3
+ Version: 2.7.4
4
4
  Summary: X-Transformers
5
5
  Project-URL: Homepage, https://pypi.org/project/x-transformers/
6
6
  Project-URL: Repository, https://github.com/lucidrains/x-transformers
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "x-transformers"
3
- version = "2.7.3"
3
+ version = "2.7.4"
4
4
  description = "X-Transformers"
5
5
  authors = [
6
6
  { name = "Phil Wang", email = "lucidrains@gmail.com" }
@@ -1314,3 +1314,14 @@ def test_simple_mdlm(
1314
1314
 
1315
1315
  loss = nar(seq)
1316
1316
  loss.loss.backward()
1317
+
1318
+ def test_qk_clip():
1319
+ from x_transformers import Attention
1320
+
1321
+ x = torch.randn(1, 1024, 512)
1322
+
1323
+ attn = Attention(dim = 512, dim_out = 384)
1324
+
1325
+ out, intermediates = attn(x, return_intermediates = True)
1326
+
1327
+ attn.qk_clip_(intermediates, tau = 100)
@@ -1637,10 +1637,12 @@ class Attention(Module):
1637
1637
  q_weight = self.to_q.weight
1638
1638
  k_weight = self.to_k.weight
1639
1639
 
1640
- q_dim, k_dim, heads = q_weight.shape[0], k_weight.shape[0], qk_weight_scale.numel()
1640
+ qk_dim, heads = q_weight.shape[0], qk_weight_scale.numel()
1641
1641
 
1642
- q_weight.mul_(repeat(qk_weight_scale, 'h -> (h expand)', expand = q_dim // heads))
1643
- k_weight.mul_(repeat(qk_weight_scale, 'h -> (h expand)', expand = k_dim // heads))
1642
+ qk_weight_scale = repeat(qk_weight_scale, 'h -> (h expand)', expand = qk_dim // heads)
1643
+
1644
+ q_weight.mul_(qk_weight_scale)
1645
+ k_weight.mul_(qk_weight_scale)
1644
1646
 
1645
1647
  def forward(
1646
1648
  self,
File without changes
File without changes