titans-pytorch 0.3.5__tar.gz → 0.3.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/PKG-INFO +1 -1
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/pyproject.toml +1 -1
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/titans_pytorch/memory_models.py +0 -6
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/.github/workflows/python-publish.yml +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/.github/workflows/test.yaml +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/.gitignore +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/LICENSE +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/README.md +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/data/README.md +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/data/enwik8.gz +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/fig1.png +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/fig2.png +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/tests/test_titans.py +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/titans_pytorch/__init__.py +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/titans_pytorch/associative_scan.py +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/titans_pytorch/mac_transformer.py +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/titans_pytorch/neural_memory.py +0 -0
- {titans_pytorch-0.3.5 → titans_pytorch-0.3.6}/train_mac.py +0 -0
@@ -103,8 +103,6 @@ class GatedResidualMemoryMLP(Module):
|
|
103
103
|
|
104
104
|
self.final_proj = Parameter(torch.randn(dim, dim))
|
105
105
|
|
106
|
-
self.ln = LayerNorm(dim)
|
107
|
-
|
108
106
|
for param in self.parameters():
|
109
107
|
nn.init.xavier_uniform_(param)
|
110
108
|
|
@@ -145,8 +143,6 @@ class FactorizedMemoryMLP(Module):
|
|
145
143
|
]) for _ in range(depth)
|
146
144
|
])
|
147
145
|
|
148
|
-
self.ln = LayerNorm(dim)
|
149
|
-
|
150
146
|
for weight1, weight2 in self.weights:
|
151
147
|
nn.init.xavier_uniform_(weight1)
|
152
148
|
nn.init.xavier_uniform_(weight2)
|
@@ -187,8 +183,6 @@ class MemoryAttention(Module):
|
|
187
183
|
nn.Parameter(torch.randn(dim_ff_hidden, dim)), # ff w2
|
188
184
|
])
|
189
185
|
|
190
|
-
self.ln = LayerNorm(dim)
|
191
|
-
|
192
186
|
for weight in self.weights:
|
193
187
|
nn.init.xavier_uniform_(weight)
|
194
188
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|