x-transformers 2.9.1__tar.gz → 2.9.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. {x_transformers-2.9.1 → x_transformers-2.9.2}/PKG-INFO +1 -1
  2. {x_transformers-2.9.1 → x_transformers-2.9.2}/pyproject.toml +1 -1
  3. {x_transformers-2.9.1 → x_transformers-2.9.2}/tests/test_x_transformers.py +5 -1
  4. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/attend.py +9 -0
  5. {x_transformers-2.9.1 → x_transformers-2.9.2}/.github/FUNDING.yml +0 -0
  6. {x_transformers-2.9.1 → x_transformers-2.9.2}/.github/workflows/python-publish.yml +0 -0
  7. {x_transformers-2.9.1 → x_transformers-2.9.2}/.github/workflows/python-test.yaml +0 -0
  8. {x_transformers-2.9.1 → x_transformers-2.9.2}/.gitignore +0 -0
  9. {x_transformers-2.9.1 → x_transformers-2.9.2}/LICENSE +0 -0
  10. {x_transformers-2.9.1 → x_transformers-2.9.2}/README.md +0 -0
  11. {x_transformers-2.9.1 → x_transformers-2.9.2}/data/README.md +0 -0
  12. {x_transformers-2.9.1 → x_transformers-2.9.2}/data/enwik8.gz +0 -0
  13. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/all-attention.png +0 -0
  14. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/attention-on-attention.png +0 -0
  15. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/cosine-sim-attention.png +0 -0
  16. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/deepnorm.png +0 -0
  17. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/dynamic-pos-bias-linear.png +0 -0
  18. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/dynamic-pos-bias-log.png +0 -0
  19. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/dynamic-pos-bias-sinusoidal.png +0 -0
  20. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/dynamic-pos-bias.png +0 -0
  21. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/enhanced-recurrence.png +0 -0
  22. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/fcm.png +0 -0
  23. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/ffglu.png +0 -0
  24. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/flash-attention.png +0 -0
  25. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/gate_values.png +0 -0
  26. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/gating.png +0 -0
  27. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/length-extrapolation-scale.png +0 -0
  28. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/macaron-1.png +0 -0
  29. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/macaron-2.png +0 -0
  30. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/memory-transformer.png +0 -0
  31. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/normformer.png +0 -0
  32. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/pia.png +0 -0
  33. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/qknorm-analysis.png +0 -0
  34. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/resi_dual.png +0 -0
  35. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/residual_attn.png +0 -0
  36. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/rezero.png +0 -0
  37. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/rotary.png +0 -0
  38. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/sandwich-2.png +0 -0
  39. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/sandwich.png +0 -0
  40. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/sandwich_norm.png +0 -0
  41. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/scalenorm.png +0 -0
  42. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/talking-heads.png +0 -0
  43. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/topk-attention.png +0 -0
  44. {x_transformers-2.9.1 → x_transformers-2.9.2}/images/xval.png +0 -0
  45. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_belief_state.py +0 -0
  46. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_copy.py +0 -0
  47. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_entropy_tokenizer.py +0 -0
  48. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_enwik8.py +0 -0
  49. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_gpt_vae.py +0 -0
  50. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_length_extrapolate.py +0 -0
  51. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_parity.py +0 -0
  52. {x_transformers-2.9.1 → x_transformers-2.9.2}/train_with_muon.py +0 -0
  53. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/__init__.py +0 -0
  54. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/autoregressive_wrapper.py +0 -0
  55. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/belief_state_wrapper.py +0 -0
  56. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/continuous.py +0 -0
  57. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/dpo.py +0 -0
  58. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/entropy_based_tokenizer.py +0 -0
  59. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/gpt_vae.py +0 -0
  60. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/multi_input.py +0 -0
  61. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/neo_mlp.py +0 -0
  62. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/nonautoregressive_wrapper.py +0 -0
  63. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/up_wrapper.py +0 -0
  64. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/x_transformers.py +0 -0
  65. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/xl_autoregressive_wrapper.py +0 -0
  66. {x_transformers-2.9.1 → x_transformers-2.9.2}/x_transformers/xval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: x-transformers
3
- Version: 2.9.1
3
+ Version: 2.9.2
4
4
  Summary: X-Transformers
5
5
  Project-URL: Homepage, https://pypi.org/project/x-transformers/
6
6
  Project-URL: Repository, https://github.com/lucidrains/x-transformers
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "x-transformers"
3
- version = "2.9.1"
3
+ version = "2.9.2"
4
4
  description = "X-Transformers"
5
5
  authors = [
6
6
  { name = "Phil Wang", email = "lucidrains@gmail.com" }
@@ -1378,6 +1378,10 @@ def test_stochastic_attn():
1378
1378
  from x_transformers import Attention
1379
1379
 
1380
1380
  attn = Attention(dim = 512, gumbel_softmax = True)
1381
- out = attn(torch.randn(1, 1024, 512))
1381
+ out, intermediate = attn(torch.randn(1, 1024, 512), return_intermediates = True)
1382
1382
 
1383
1383
  assert out.shape == (1, 1024, 512)
1384
+
1385
+ from x_transformers.attend import log_prob_from_hard_attend
1386
+ log_probs = log_prob_from_hard_attend(intermediate)
1387
+ assert log_probs.shape == (1, 8, 1024)
@@ -67,6 +67,15 @@ def once(fn):
67
67
 
68
68
  print_once = once(print)
69
69
 
70
+ # gumbel softmax attention related
71
+
72
+ def log_prob_from_hard_attend(intermeds: Intermediates):
73
+ log_probs = intermeds.pre_softmax_attn.log_softmax(dim = -1)
74
+
75
+ one_hot = intermeds.post_softmax_attn.argmax(dim = -1, keepdim = True)
76
+ log_prob = log_probs.gather(-1, one_hot)
77
+ return rearrange(log_prob, 'b h i 1 -> b h i')
78
+
70
79
  # selective attention
71
80
  # https://arxiv.org/abs/2410.02703 - section 3.3
72
81
  # it is a technique to allow each token to prevent itself from being attended to by future tokens
File without changes
File without changes