x-transformers 2.2.9__tar.gz → 2.2.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {x_transformers-2.2.9 → x_transformers-2.2.11}/PKG-INFO +1 -1
- {x_transformers-2.2.9 → x_transformers-2.2.11}/pyproject.toml +1 -1
- {x_transformers-2.2.9 → x_transformers-2.2.11}/tests/test_x_transformers.py +27 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/entropy_based_tokenizer.py +4 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/FUNDING.yml +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/workflows/python-publish.yml +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/.github/workflows/python-test.yaml +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/.gitignore +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/LICENSE +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/README.md +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/data/README.md +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/data/enwik8.gz +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/all-attention.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/attention-on-attention.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/cosine-sim-attention.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/deepnorm.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-linear.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-log.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias-sinusoidal.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/dynamic-pos-bias.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/enhanced-recurrence.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/fcm.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/ffglu.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/flash-attention.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/gate_values.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/gating.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/length-extrapolation-scale.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/macaron-1.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/macaron-2.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/memory-transformer.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/normformer.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/pia.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/qknorm-analysis.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/resi_dual.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/residual_attn.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/rezero.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/rotary.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich-2.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/sandwich_norm.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/scalenorm.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/talking-heads.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/topk-attention.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/images/xval.png +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_belief_state.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_copy.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_entropy_tokenizer.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_enwik8.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_length_extrapolate.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/train_parity.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/__init__.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/attend.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/autoregressive_wrapper.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/belief_state_wrapper.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/continuous.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/dpo.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/multi_input.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/neo_mlp.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/nonautoregressive_wrapper.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/x_transformers.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/xl_autoregressive_wrapper.py +0 -0
- {x_transformers-2.2.9 → x_transformers-2.2.11}/x_transformers/xval.py +0 -0
@@ -800,6 +800,33 @@ def test_entropy_based_tokenizer(
|
|
800
800
|
|
801
801
|
tokenizer(seq[0]) # able to handle without batch dim
|
802
802
|
|
803
|
+
def test_entropy_based_tokenizer_max_token_len():
|
804
|
+
from x_transformers.entropy_based_tokenizer import EntropyBasedTokenizer
|
805
|
+
|
806
|
+
model = TransformerWrapper(
|
807
|
+
num_tokens = 20000,
|
808
|
+
max_seq_len = 1024,
|
809
|
+
attn_layers = Decoder(
|
810
|
+
dim = 128,
|
811
|
+
depth = 6,
|
812
|
+
heads = 8,
|
813
|
+
attn_dim_head = 64,
|
814
|
+
)
|
815
|
+
)
|
816
|
+
|
817
|
+
tokenizer = EntropyBasedTokenizer(
|
818
|
+
model,
|
819
|
+
entropy_threshold = 100,
|
820
|
+
max_token_size = 4
|
821
|
+
)
|
822
|
+
|
823
|
+
seq = torch.randint(0, 20000, (1, 16,))
|
824
|
+
lens = torch.tensor([14])
|
825
|
+
|
826
|
+
token_lengths = tokenizer(seq, lens = lens)
|
827
|
+
|
828
|
+
assert token_lengths.amax().item() <= 4
|
829
|
+
|
803
830
|
def test_custom_ff_activation():
|
804
831
|
|
805
832
|
model = TransformerWrapper(
|
@@ -1,3 +1,4 @@
|
|
1
|
+
from __future__ import annotations
|
1
2
|
from itertools import zip_longest
|
2
3
|
|
3
4
|
import torch
|
@@ -115,6 +116,9 @@ class EntropyBasedTokenizer(Module):
|
|
115
116
|
|
116
117
|
boundaries = boundaries | sub_seq_boundaries
|
117
118
|
|
119
|
+
if exists(mask):
|
120
|
+
boundaries = boundaries & mask
|
121
|
+
|
118
122
|
# number of tokens
|
119
123
|
|
120
124
|
num_tokens = boundaries.sum(dim = -1)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|