titans-pytorch 0.2.18__tar.gz → 0.2.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/PKG-INFO +2 -2
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/README.md +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/pyproject.toml +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/tests/test_titans.py +2 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/titans_pytorch/mac_transformer.py +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/titans_pytorch/neural_memory.py +11 -6
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/train_mac.py +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/.github/workflows/python-publish.yml +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/.github/workflows/test.yaml +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/.gitignore +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/LICENSE +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/data/README.md +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/data/enwik8.gz +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/fig1.png +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/fig2.png +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/titans_pytorch/__init__.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/titans_pytorch/associative_scan.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.20}/titans_pytorch/memory_models.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: titans-pytorch
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.20
|
4
4
|
Summary: Titans
|
5
5
|
Project-URL: Homepage, https://pypi.org/project/titans-pytorch/
|
6
6
|
Project-URL: Repository, https://github.com/lucidrains/titans-pytorch
|
@@ -204,6 +204,6 @@ $ python train_mac.py
|
|
204
204
|
eprint = {2501.12352},
|
205
205
|
archivePrefix = {arXiv},
|
206
206
|
primaryClass = {cs.LG},
|
207
|
-
url
|
207
|
+
url = {https://arxiv.org/abs/2501.12352},
|
208
208
|
}
|
209
209
|
```
|
@@ -560,7 +560,7 @@ class MemoryAsContextTransformer(Module):
|
|
560
560
|
chunk_size = self.neural_memory_segment_len,
|
561
561
|
batch_size = neural_memory_batch_size,
|
562
562
|
model = deepcopy(neural_memory_model),
|
563
|
-
|
563
|
+
accept_weight_residual = neural_mem_weight_residual and not is_first_neural_mem,
|
564
564
|
**neural_memory_kwargs
|
565
565
|
)
|
566
566
|
|
@@ -66,6 +66,12 @@ def xnor(x, y):
|
|
66
66
|
def divisible_by(num, den):
|
67
67
|
return (num % den) == 0
|
68
68
|
|
69
|
+
def tuple_index_set(t: tuple, index, value):
|
70
|
+
klass = type(t)
|
71
|
+
t = list(t)
|
72
|
+
t[index] = value
|
73
|
+
return klass(*t)
|
74
|
+
|
69
75
|
def safe_cat(inputs, dim = -2):
|
70
76
|
inputs = tuple(filter(exists, inputs))
|
71
77
|
|
@@ -296,7 +302,7 @@ class NeuralMemory(Module):
|
|
296
302
|
init_adaptive_step_bias = None,
|
297
303
|
init_momentum_bias = None,
|
298
304
|
init_decay_bias = None,
|
299
|
-
|
305
|
+
accept_weight_residual = False,
|
300
306
|
default_model_kwargs: dict = dict(
|
301
307
|
depth = 2
|
302
308
|
)
|
@@ -445,7 +451,7 @@ class NeuralMemory(Module):
|
|
445
451
|
nn.Linear(dim, heads),
|
446
452
|
Rearrange('b n h -> b h n'),
|
447
453
|
nn.Sigmoid()
|
448
|
-
) if
|
454
|
+
) if accept_weight_residual else None
|
449
455
|
|
450
456
|
# allow for softclamp the gradient norms for storing memories
|
451
457
|
|
@@ -584,7 +590,6 @@ class NeuralMemory(Module):
|
|
584
590
|
if exists(self.to_learned_weight_residual_mix):
|
585
591
|
mix = self.to_learned_weight_residual_mix(chunked_seq)
|
586
592
|
mix = rearrange(mix, 'b h n -> (b h) n')
|
587
|
-
|
588
593
|
prev_weights = prev_weights.apply(lambda t: einx.multiply('bh n, bh n ... -> bh n ...', mix, t))
|
589
594
|
|
590
595
|
weights_for_surprise = weights_for_surprise + prev_weights
|
@@ -854,9 +859,9 @@ class NeuralMemory(Module):
|
|
854
859
|
|
855
860
|
weights = last_update
|
856
861
|
|
857
|
-
next_neural_mem_state =
|
858
|
-
|
859
|
-
|
862
|
+
next_neural_mem_state = tuple_index_set(next_neural_mem_state, 1, last_update)
|
863
|
+
|
864
|
+
next_neural_mem_state = tuple_index_set(next_neural_mem_state, -1, updates)
|
860
865
|
|
861
866
|
# retrieve
|
862
867
|
|
@@ -38,10 +38,10 @@ NEURAL_MEM_MAX_LR = 1e-1
|
|
38
38
|
WINDOW_SIZE = 32
|
39
39
|
NEURAL_MEM_SEGMENT_LEN = 4 # set smaller for more granularity for learning rate / momentum etc
|
40
40
|
NEURAL_MEM_BATCH_SIZE = 128 # set smaller to update the neural memory weights more often as it traverses the sequence
|
41
|
-
NEURAL_MEM_WEIGHT_RESIDUAL = False
|
42
41
|
SLIDING_WINDOWS = True
|
43
42
|
STORE_ATTN_POOL_CHUNKS = True # whether to use attention pooling for chunk derived momentum, per-layer lr mod, decay
|
44
43
|
MEMORY_MODEL_PER_LAYER_LEARNED_LR = True
|
44
|
+
NEURAL_MEM_WEIGHT_RESIDUAL = True # learning to accept contributions from the weights of the previous neural mem layer brings about significant improvements. this was improvised and not in the paper, but inspired by the value residual learning free lunch paper
|
45
45
|
|
46
46
|
# experiment related
|
47
47
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|