x-transformers 2.2.9__tar.gz → 2.2.11__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. {x_transformers-2.2.9 → x_transformers-2.2.11}/PKG-INFO +1 -1
  2. {x_transformers-2.2.9 → x_transformers-2.2.11}/pyproject.toml +1 -1
  3. {x_transformers-2.2.9 → x_transformers-2.2.11}/tests/test_x_transformers.py +27 -0
  4. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/entropy_based_tokenizer.py +4 -0
  5. {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/FUNDING.yml +0 -0
  6. {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/workflows/python-publish.yml +0 -0
  7. {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/workflows/python-test.yaml +0 -0
  8. {x_transformers-2.2.9 → x_transformers-2.2.11}/.gitignore +0 -0
  9. {x_transformers-2.2.9 → x_transformers-2.2.11}/LICENSE +0 -0
  10. {x_transformers-2.2.9 → x_transformers-2.2.11}/README.md +0 -0
  11. {x_transformers-2.2.9 → x_transformers-2.2.11}/data/README.md +0 -0
  12. {x_transformers-2.2.9 → x_transformers-2.2.11}/data/enwik8.gz +0 -0
  13. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/all-attention.png +0 -0
  14. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/attention-on-attention.png +0 -0
  15. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/cosine-sim-attention.png +0 -0
  16. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/deepnorm.png +0 -0
  17. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-linear.png +0 -0
  18. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-log.png +0 -0
  19. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-sinusoidal.png +0 -0
  20. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias.png +0 -0
  21. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/enhanced-recurrence.png +0 -0
  22. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/fcm.png +0 -0
  23. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/ffglu.png +0 -0
  24. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/flash-attention.png +0 -0
  25. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/gate_values.png +0 -0
  26. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/gating.png +0 -0
  27. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/length-extrapolation-scale.png +0 -0
  28. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/macaron-1.png +0 -0
  29. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/macaron-2.png +0 -0
  30. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/memory-transformer.png +0 -0
  31. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/normformer.png +0 -0
  32. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/pia.png +0 -0
  33. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/qknorm-analysis.png +0 -0
  34. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/resi_dual.png +0 -0
  35. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/residual_attn.png +0 -0
  36. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/rezero.png +0 -0
  37. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/rotary.png +0 -0
  38. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich-2.png +0 -0
  39. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich.png +0 -0
  40. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich_norm.png +0 -0
  41. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/scalenorm.png +0 -0
  42. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/talking-heads.png +0 -0
  43. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/topk-attention.png +0 -0
  44. {x_transformers-2.2.9 → x_transformers-2.2.11}/images/xval.png +0 -0
  45. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_belief_state.py +0 -0
  46. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_copy.py +0 -0
  47. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_entropy_tokenizer.py +0 -0
  48. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_enwik8.py +0 -0
  49. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_length_extrapolate.py +0 -0
  50. {x_transformers-2.2.9 → x_transformers-2.2.11}/train_parity.py +0 -0
  51. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/__init__.py +0 -0
  52. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/attend.py +0 -0
  53. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/autoregressive_wrapper.py +0 -0
  54. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/belief_state_wrapper.py +0 -0
  55. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/continuous.py +0 -0
  56. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/dpo.py +0 -0
  57. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/multi_input.py +0 -0
  58. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/neo_mlp.py +0 -0
  59. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/nonautoregressive_wrapper.py +0 -0
  60. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/x_transformers.py +0 -0
  61. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/xl_autoregressive_wrapper.py +0 -0
  62. {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/xval.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: x-transformers
3
- Version: 2.2.9
3
+ Version: 2.2.11
4
4
  Summary: X-Transformers
5
5
  Project-URL: Homepage, https://pypi.org/project/x-transformers/
6
6
  Project-URL: Repository, https://github.com/lucidrains/x-transformers
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "x-transformers"
3
- version = "2.2.9"
3
+ version = "2.2.11"
4
4
  description = "X-Transformers"
5
5
  authors = [
6
6
  { name = "Phil Wang", email = "lucidrains@gmail.com" }
@@ -800,6 +800,33 @@ def test_entropy_based_tokenizer(
800
800
 
801
801
  tokenizer(seq[0]) # able to handle without batch dim
802
802
 
803
+ def test_entropy_based_tokenizer_max_token_len():
804
+ from x_transformers.entropy_based_tokenizer import EntropyBasedTokenizer
805
+
806
+ model = TransformerWrapper(
807
+ num_tokens = 20000,
808
+ max_seq_len = 1024,
809
+ attn_layers = Decoder(
810
+ dim = 128,
811
+ depth = 6,
812
+ heads = 8,
813
+ attn_dim_head = 64,
814
+ )
815
+ )
816
+
817
+ tokenizer = EntropyBasedTokenizer(
818
+ model,
819
+ entropy_threshold = 100,
820
+ max_token_size = 4
821
+ )
822
+
823
+ seq = torch.randint(0, 20000, (1, 16,))
824
+ lens = torch.tensor([14])
825
+
826
+ token_lengths = tokenizer(seq, lens = lens)
827
+
828
+ assert token_lengths.amax().item() <= 4
829
+
803
830
  def test_custom_ff_activation():
804
831
 
805
832
  model = TransformerWrapper(
@@ -1,3 +1,4 @@
1
+ from __future__ import annotations
1
2
  from itertools import zip_longest
2
3
 
3
4
  import torch
@@ -115,6 +116,9 @@ class EntropyBasedTokenizer(Module):
115
116
 
116
117
  boundaries = boundaries | sub_seq_boundaries
117
118
 
119
+ if exists(mask):
120
+ boundaries = boundaries & mask
121
+
118
122
  # number of tokens
119
123
 
120
124
  num_tokens = boundaries.sum(dim = -1)
File without changes