titans-pytorch 0.2.18__tar.gz → 0.2.19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/PKG-INFO +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/pyproject.toml +1 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/tests/test_titans.py +2 -1
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/titans_pytorch/neural_memory.py +11 -6
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/.github/workflows/python-publish.yml +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/.github/workflows/test.yaml +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/.gitignore +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/LICENSE +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/README.md +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/data/README.md +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/data/enwik8.gz +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/fig1.png +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/fig2.png +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/titans_pytorch/__init__.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/titans_pytorch/associative_scan.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/titans_pytorch/mac_transformer.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/titans_pytorch/memory_models.py +0 -0
- {titans_pytorch-0.2.18 → titans_pytorch-0.2.19}/train_mac.py +0 -0
@@ -66,6 +66,12 @@ def xnor(x, y):
|
|
66
66
|
def divisible_by(num, den):
|
67
67
|
return (num % den) == 0
|
68
68
|
|
69
|
+
def tuple_index_set(t: tuple, index, value):
|
70
|
+
klass = type(t)
|
71
|
+
t = list(t)
|
72
|
+
t[index] = value
|
73
|
+
return klass(*t)
|
74
|
+
|
69
75
|
def safe_cat(inputs, dim = -2):
|
70
76
|
inputs = tuple(filter(exists, inputs))
|
71
77
|
|
@@ -296,7 +302,7 @@ class NeuralMemory(Module):
|
|
296
302
|
init_adaptive_step_bias = None,
|
297
303
|
init_momentum_bias = None,
|
298
304
|
init_decay_bias = None,
|
299
|
-
|
305
|
+
accept_weight_residual = False,
|
300
306
|
default_model_kwargs: dict = dict(
|
301
307
|
depth = 2
|
302
308
|
)
|
@@ -445,7 +451,7 @@ class NeuralMemory(Module):
|
|
445
451
|
nn.Linear(dim, heads),
|
446
452
|
Rearrange('b n h -> b h n'),
|
447
453
|
nn.Sigmoid()
|
448
|
-
) if
|
454
|
+
) if accept_weight_residual else None
|
449
455
|
|
450
456
|
# allow for softclamp the gradient norms for storing memories
|
451
457
|
|
@@ -584,7 +590,6 @@ class NeuralMemory(Module):
|
|
584
590
|
if exists(self.to_learned_weight_residual_mix):
|
585
591
|
mix = self.to_learned_weight_residual_mix(chunked_seq)
|
586
592
|
mix = rearrange(mix, 'b h n -> (b h) n')
|
587
|
-
|
588
593
|
prev_weights = prev_weights.apply(lambda t: einx.multiply('bh n, bh n ... -> bh n ...', mix, t))
|
589
594
|
|
590
595
|
weights_for_surprise = weights_for_surprise + prev_weights
|
@@ -854,9 +859,9 @@ class NeuralMemory(Module):
|
|
854
859
|
|
855
860
|
weights = last_update
|
856
861
|
|
857
|
-
next_neural_mem_state =
|
858
|
-
|
859
|
-
|
862
|
+
next_neural_mem_state = tuple_index_set(next_neural_mem_state, 1, last_update)
|
863
|
+
|
864
|
+
next_neural_mem_state = tuple_index_set(next_neural_mem_state, -1, updates)
|
860
865
|
|
861
866
|
# retrieve
|
862
867
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|