hippoformer 0.0.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ # This workflow will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ jobs:
16
+ deploy:
17
+
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: '3.x'
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m pip install --upgrade pip
29
+ pip install build
30
+ - name: Build package
31
+ run: python -m build
32
+ - name: Publish package
33
+ uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
34
+ with:
35
+ user: __token__
36
+ password: ${{ secrets.PYPI_API_TOKEN }}
@@ -0,0 +1,21 @@
1
+ name: Pytest
2
+ on: [push, pull_request]
3
+
4
+ jobs:
5
+ build:
6
+
7
+ runs-on: ubuntu-latest
8
+
9
+ steps:
10
+ - uses: actions/checkout@v4
11
+ - name: Set up Python 3.10
12
+ uses: actions/setup-python@v5
13
+ with:
14
+ python-version: "3.10"
15
+ - name: Install dependencies
16
+ run: |
17
+ python -m pip install --upgrade pip
18
+ python -m pip install -e .[test]
19
+ - name: Test with pytest
20
+ run: |
21
+ python -m pytest tests/
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Phil Wang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,65 @@
1
+ Metadata-Version: 2.4
2
+ Name: hippoformer
3
+ Version: 0.0.12
4
+ Summary: hippoformer
5
+ Project-URL: Homepage, https://pypi.org/project/hippoformer/
6
+ Project-URL: Repository, https://github.com/lucidrains/hippoformer
7
+ Author-email: Phil Wang <lucidrains@gmail.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2025 Phil Wang
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+ License-File: LICENSE
30
+ Keywords: artificial intelligence,deep learning,hippocampus,memory
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Developers
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Requires-Python: >=3.9
37
+ Requires-Dist: assoc-scan
38
+ Requires-Dist: beartype
39
+ Requires-Dist: einops>=0.8.1
40
+ Requires-Dist: einx>=0.3.0
41
+ Requires-Dist: torch>=2.4
42
+ Requires-Dist: x-mlps-pytorch
43
+ Provides-Extra: examples
44
+ Provides-Extra: test
45
+ Requires-Dist: pytest; extra == 'test'
46
+ Description-Content-Type: text/markdown
47
+
48
+ <img src="./hippoformer-fig6.png" width="400px"></img>
49
+
50
+ ## Hippoformer (wip)
51
+
52
+ Implementation of [Hippoformer](https://openreview.net/forum?id=hxwV5EubAw), Integrating Hippocampus-inspired Spatial Memory with Transformers
53
+
54
+ ## Citations
55
+
56
+ ```bibtex
57
+ @inproceedings{anonymous2025hippoformer,
58
+ title = {Hippoformer: Integrating Hippocampus-inspired Spatial Memory with Transformers},
59
+ author = {Anonymous},
60
+ booktitle = {Submitted to The Fourteenth International Conference on Learning Representations},
61
+ year = {2025},
62
+ url = {https://openreview.net/forum?id=hxwV5EubAw},
63
+ note = {under review}
64
+ }
65
+ ```
@@ -0,0 +1,18 @@
1
+ <img src="./hippoformer-fig6.png" width="400px"></img>
2
+
3
+ ## Hippoformer (wip)
4
+
5
+ Implementation of [Hippoformer](https://openreview.net/forum?id=hxwV5EubAw), Integrating Hippocampus-inspired Spatial Memory with Transformers
6
+
7
+ ## Citations
8
+
9
+ ```bibtex
10
+ @inproceedings{anonymous2025hippoformer,
11
+ title = {Hippoformer: Integrating Hippocampus-inspired Spatial Memory with Transformers},
12
+ author = {Anonymous},
13
+ booktitle = {Submitted to The Fourteenth International Conference on Learning Representations},
14
+ year = {2025},
15
+ url = {https://openreview.net/forum?id=hxwV5EubAw},
16
+ note = {under review}
17
+ }
18
+ ```
@@ -0,0 +1,4 @@
1
+ from hippoformer.hippoformer import (
2
+ PathIntegration,
3
+ mmTEM
4
+ )
@@ -0,0 +1,627 @@
1
+ from __future__ import annotations
2
+
3
+ import torch
4
+ from torch import nn, Tensor, cat, stack, arange, zeros_like, einsum, tensor
5
+ import torch.nn.functional as F
6
+ from torch.nn import Module
7
+ from torch.jit import ScriptModule, script_method
8
+ from torch.func import vmap, grad, functional_call
9
+
10
+ from beartype import beartype
11
+
12
+ from einx import multiply
13
+ from einops import repeat, rearrange, pack, unpack
14
+ from einops.layers.torch import Rearrange
15
+
16
+ from x_mlps_pytorch import create_mlp
17
+
18
+ from assoc_scan import AssocScan
19
+
20
+ # helpers
21
+
22
+ def exists(v):
23
+ return v is not None
24
+
25
+ def default(v, d):
26
+ return v if exists(v) else d
27
+
28
+ def pack_with_inverse(t, pattern):
29
+ packed, packed_shape = pack([t], pattern)
30
+
31
+ def inverse(out, inv_pattern = None):
32
+ inv_pattern = default(inv_pattern, pattern)
33
+ unpacked, = unpack(out, packed_shape, inv_pattern)
34
+ return unpacked
35
+
36
+ return packed, inverse
37
+
38
+ def l2norm(t):
39
+ return F.normalize(t, dim = -1)
40
+
41
+ # Muon - Jordan et al from oss community - applied to the latest version of titans
42
+
43
+ def newtonschulz5(
44
+ t,
45
+ steps = 5,
46
+ eps = 1e-7,
47
+ coefs = (3.4445, -4.7750, 2.0315)
48
+ ):
49
+ not_weights = t.ndim <= 3
50
+
51
+ if not_weights:
52
+ return t
53
+
54
+ shape = t.shape
55
+ should_transpose = shape[-2] > shape[-1]
56
+
57
+ if should_transpose:
58
+ t = t.transpose(-1, -2)
59
+
60
+ t, inv_pack = pack_with_inverse(t, '* i j')
61
+ t = t / t.norm(dim = (-1, -2), keepdim = True).clamp(min = eps)
62
+
63
+ a, b, c = coefs
64
+
65
+ for _ in range(steps):
66
+ A = t @ t.transpose(-1, -2)
67
+ B = b * A + c * A @ A
68
+ t = a * t + B @ t
69
+
70
+ if should_transpose:
71
+ t = t.transpose(-1, -2)
72
+
73
+ return inv_pack(t)
74
+
75
+ # sensory encoder decoder for 2d
76
+
77
+ grid_sensory_enc_dec = (
78
+ create_mlp(
79
+ dim = 32 * 2,
80
+ dim_in = 9,
81
+ dim_out = 32,
82
+ depth = 3,
83
+ ),
84
+ create_mlp(
85
+ dim = 32 * 2,
86
+ dim_in = 32,
87
+ dim_out = 9,
88
+ depth = 3,
89
+ ),
90
+ )
91
+
92
+ # sensory encoder decoder for 3d maze
93
+
94
+ class EncoderPackTime(Module):
95
+ def __init__(self, fn: Module):
96
+ super().__init__()
97
+ self.fn = fn
98
+
99
+ def forward(self, x):
100
+ x = rearrange(x, 'b c t h w -> b t c h w')
101
+ x, packed_shape = pack([x], '* c h w')
102
+
103
+ x = self.fn(x)
104
+
105
+ x, = unpack(x, packed_shape, '* d')
106
+ print(x.shape)
107
+ return x
108
+
109
+ class DecoderPackTime(Module):
110
+ def __init__(self, fn: Module):
111
+ super().__init__()
112
+ self.fn = fn
113
+
114
+ def forward(self, x):
115
+ x, packed_shape = pack(x, '* d')
116
+
117
+ x = self.fn(x)
118
+
119
+ x = unpack(x, packed_shape, '* c h w')
120
+ x = rearrange(x, 'b t c h w -> b c t h w')
121
+ return x
122
+
123
+ maze_sensory_enc_dec = (
124
+ EncoderPackTime(nn.Sequential(
125
+ nn.Conv2d(3, 16, 7, 2, padding = 3),
126
+ nn.ReLU(),
127
+ nn.Conv2d(16, 32, 3, 2, 1),
128
+ nn.ReLU(),
129
+ nn.Conv2d(32, 64, 3, 2, 1),
130
+ nn.ReLU(),
131
+ nn.Conv2d(64, 128, 3, 2, 1),
132
+ nn.ReLU(),
133
+ Rearrange('b ... -> b (...)'),
134
+ nn.Linear(2048, 32)
135
+ )),
136
+ DecoderPackTime(nn.Sequential(
137
+ nn.Linear(32, 2048),
138
+ Rearrange('b (c h w) -> b c h w', c = 128, h = 4),
139
+ nn.ConvTranspose2d(128, 64, 3, 2, 1, output_padding = (1, 1)),
140
+ nn.ReLU(),
141
+ nn.ConvTranspose2d(64, 32, 3, 2, 1, output_padding = (1, 1)),
142
+ nn.ReLU(),
143
+ nn.ConvTranspose2d(32, 16, 3, 2, 1, output_padding = (1, 1)),
144
+ nn.ReLU(),
145
+ nn.ConvTranspose2d(16, 3, 3, 2, 1, output_padding = (1, 1))
146
+ ))
147
+ )
148
+
149
+ # path integration
150
+
151
+ class RNN(ScriptModule):
152
+ def __init__(
153
+ self,
154
+ dim,
155
+ ):
156
+ super().__init__()
157
+ self.init_hidden = nn.Parameter(torch.randn(1, dim) * 1e-2)
158
+
159
+ @script_method
160
+ def forward(
161
+ self,
162
+ transitions: Tensor,
163
+ hidden: Tensor | None = None
164
+ ) -> Tensor:
165
+
166
+ batch, seq_len = transitions.shape[:2]
167
+
168
+ if hidden is None:
169
+ hidden = l2norm(self.init_hidden)
170
+ hidden = hidden.expand(batch, -1)
171
+
172
+ hiddens: list[Tensor] = []
173
+
174
+ for i in range(seq_len):
175
+ transition = transitions[:, i]
176
+
177
+ hidden = einsum('b i, b i j -> b j', hidden, transition)
178
+ hidden = F.relu(hidden)
179
+ hidden = l2norm(hidden)
180
+
181
+ hiddens.append(hidden)
182
+
183
+ return stack(hiddens, dim = 1)
184
+
185
+ class PathIntegration(Module):
186
+ def __init__(
187
+ self,
188
+ dim_action,
189
+ dim_structure,
190
+ mlp_hidden_dim = None,
191
+ mlp_depth = 2
192
+ ):
193
+ # they use the same approach from Ruiqi Gao's paper from 2021
194
+ super().__init__()
195
+
196
+ self.init_structure = nn.Parameter(torch.randn(dim_structure))
197
+
198
+ self.to_transitions = create_mlp(
199
+ default(mlp_hidden_dim, dim_action * 4),
200
+ dim_in = dim_action,
201
+ dim_out = dim_structure * dim_structure,
202
+ depth = mlp_depth
203
+ )
204
+
205
+ self.mlp_out_to_weights = Rearrange('... (i j) -> ... i j', j = dim_structure)
206
+
207
+ self.rnn = RNN(dim_structure)
208
+
209
+ def forward(
210
+ self,
211
+ actions, # (b n d)
212
+ prev_structural = None # (b n d) | (b d)
213
+ ):
214
+ batch = actions.shape[0]
215
+
216
+ transitions = self.to_transitions(actions)
217
+ transitions = self.mlp_out_to_weights(transitions)
218
+
219
+ if exists(prev_structural) and prev_structural.ndim == 3:
220
+ prev_structural = prev_structural[:, -1]
221
+
222
+ return self.rnn(transitions, prev_structural)
223
+
224
+ # custom transformer proposed by James Whittington that bridges to hippocampal models with a few twists
225
+
226
+ # the mmTEM can be seen as a linear attention / TTT variant of what he proposed
227
+ # needed for the baseline as well as the parallel block to bolster local time prediction
228
+
229
+ # https://arxiv.org/abs/2112.04035
230
+
231
+ def FeedForward(dim, mult = 4.):
232
+ dim_inner = int(dim * mult)
233
+ return nn.Sequential(
234
+ nn.Linear(dim, dim_inner),
235
+ nn.GELU(),
236
+ nn.Linear(dim_inner, dim)
237
+ )
238
+
239
+ class Attention(Module):
240
+ def __init__(
241
+ self,
242
+ dim_q,
243
+ dim_kv,
244
+ window_size,
245
+ dim_head = 64,
246
+ heads = 8,
247
+ ):
248
+ super().__init__()
249
+ dim_inner = dim_head * heads
250
+ self.scale = dim_head ** -0.5
251
+
252
+ self.to_queries = nn.Linear(dim_q, dim_inner, bias = False)
253
+ self.to_key_values = nn.Linear(dim_kv, dim_inner * 2, bias = False)
254
+
255
+ self.split_heads = Rearrange('b n (h d) -> b h n d', h = heads)
256
+ self.merge_heads = Rearrange('b h n d -> b n (h d)')
257
+
258
+ self.window_size = window_size
259
+
260
+ self.to_out = nn.Linear(dim_inner, dim_q, bias = False)
261
+ self.attn_head_sink = nn.Parameter(torch.randn(heads) * 1e-2) # needed as the diagonal is masked out, and for attention sink
262
+
263
+ def forward(
264
+ self,
265
+ queries_input,
266
+ key_values_input,
267
+ kv_cache = None
268
+ ):
269
+ batch, seq_len, device = *queries_input.shape[:2], queries_input.device
270
+
271
+ q = self.to_queries(queries_input)
272
+
273
+ k, v = self.to_key_values(key_values_input).chunk(2, dim = -1)
274
+
275
+ q, k, v = tuple(self.split_heads(t) for t in (q, k, v))
276
+
277
+ if exists(kv_cache):
278
+ ck, cv = kv_cache
279
+ k = cat((ck, k), dim = -2)
280
+ v = cat((cv, v), dim = -2)
281
+
282
+ q = q * self.scale
283
+
284
+ sim = einsum('b h i d, b h j d -> b h i j', q, k)
285
+
286
+ # the diagonal is masked out
287
+
288
+ i, j = sim.shape[-2:]
289
+
290
+ j_seq = arange(j, device = device)[:, None]
291
+ i_seq = arange(i, device = device)[None, :] + (j - i)
292
+
293
+ windowed_causal_mask_without_diagonal = (i_seq > j_seq) & ((i_seq - j_seq) <= self.window_size)
294
+
295
+ sim = sim.masked_fill(windowed_causal_mask_without_diagonal, -torch.finfo(sim.dtype).max)
296
+
297
+ # attention sink, for token as well as for attention sinking - from gpt-oss
298
+
299
+ attn_sink = repeat(self.attn_head_sink, 'h -> b h i 1', b = batch, i = seq_len)
300
+
301
+ sim = cat((attn_sink, sim), dim = -1)
302
+
303
+ attn = sim.softmax(dim = -1)
304
+
305
+ attn = attn[..., 1:] # remove sink
306
+
307
+ # aggregate
308
+
309
+ out = einsum('b h i j, b h j d -> b h i d', attn, v)
310
+
311
+ out = self.merge_heads(out)
312
+
313
+ return self.to_out(out), stack((k, v))
314
+
315
+ class TEMTransformerBlock(Module):
316
+ def __init__(
317
+ self,
318
+ dim_structure,
319
+ dim_encoded_sensory,
320
+ dim_head = 64,
321
+ heads = 8,
322
+ ff_expansion_factor = 4.,
323
+ window_size = 64
324
+ ):
325
+ super().__init__()
326
+
327
+ self.attn = Attention(dim_structure, dim_structure + dim_encoded_sensory, window_size, dim_head = dim_head, heads = heads)
328
+ self.ff = FeedForward(dim_structure, ff_expansion_factor)
329
+
330
+ self.window_size = window_size
331
+
332
+ def forward(
333
+ self,
334
+ structural_codes,
335
+ encoded_sensory,
336
+ kv_cache = None
337
+ ):
338
+ structure_and_sensory = cat((structural_codes, encoded_sensory), dim = -1)
339
+
340
+ retrieved, next_kv_cache = self.attn(structural_codes, structure_and_sensory, kv_cache = kv_cache)
341
+
342
+ x = retrieved + structural_codes
343
+
344
+ x = self.ff(x) + x
345
+
346
+ next_kv_cache = next_kv_cache[:, -self.window_size:]
347
+
348
+ return x, next_kv_cache
349
+
350
+ # proposed mmTEM
351
+
352
+ class mmTEM(Module):
353
+ @beartype
354
+ def __init__(
355
+ self,
356
+ dim,
357
+ *,
358
+ sensory_encoder_decoder: tuple[Module, Module],
359
+ dim_sensory,
360
+ dim_action,
361
+ dim_encoded_sensory,
362
+ dim_structure,
363
+ meta_mlp_depth = 2,
364
+ decoder_mlp_depth = 2,
365
+ structure_variance_pred_mlp_depth = 2,
366
+ path_integrate_kwargs: dict = dict(),
367
+ loss_weight_generative = 1.,
368
+ loss_weight_inference = 1.,
369
+ loss_weight_consistency = 1.,
370
+ loss_weight_relational = 1.,
371
+ integration_ratio_learned = True,
372
+ muon_update = False,
373
+ assoc_scan_kwargs: dict = dict()
374
+ ):
375
+ super().__init__()
376
+
377
+ # sensory
378
+
379
+ sensory_encoder, sensory_decoder = sensory_encoder_decoder
380
+
381
+ self.sensory_encoder = sensory_encoder
382
+ self.sensory_decoder = sensory_decoder
383
+
384
+ dim_joint_rep = dim_encoded_sensory + dim_structure
385
+
386
+ self.dim_encoded_sensory = dim_encoded_sensory
387
+ self.dim_structure = dim_structure
388
+ self.joint_dims = (dim_structure, dim_encoded_sensory)
389
+
390
+ # path integrator
391
+
392
+ self.path_integrator = PathIntegration(
393
+ dim_action = dim_action,
394
+ dim_structure = dim_structure,
395
+ **path_integrate_kwargs
396
+ )
397
+
398
+ # meta mlp related
399
+
400
+ self.to_queries = nn.Linear(dim_joint_rep, dim, bias = False)
401
+ self.to_keys = nn.Linear(dim_joint_rep, dim, bias = False)
402
+ self.to_values = nn.Linear(dim_joint_rep, dim, bias = False)
403
+
404
+ self.to_learned_optim_hparams = nn.Linear(dim_joint_rep, 3, bias = False) # for learning rate, forget gate, and momentum
405
+ self.assoc_scan = AssocScan(*assoc_scan_kwargs)
406
+
407
+ self.meta_memory_mlp = create_mlp(
408
+ dim = dim * 2,
409
+ depth = meta_mlp_depth,
410
+ dim_in = dim,
411
+ dim_out = dim,
412
+ activation = nn.ReLU()
413
+ )
414
+
415
+ def forward_with_mse_loss(params, keys, values):
416
+ pred = functional_call(self.meta_memory_mlp, params, keys)
417
+ return F.mse_loss(pred, values)
418
+
419
+ grad_fn = grad(forward_with_mse_loss)
420
+
421
+ self.per_sample_grad_fn = vmap(vmap(grad_fn, in_dims = (None, 0, 0)), in_dims = (0, 0, 0))
422
+
423
+ # mlp decoder (from meta mlp output to joint)
424
+
425
+ self.memory_output_decoder = create_mlp(
426
+ dim = dim * 2,
427
+ dim_in = dim,
428
+ dim_out = dim_joint_rep,
429
+ depth = decoder_mlp_depth,
430
+ activation = nn.ReLU()
431
+ )
432
+
433
+ # the mlp that predicts the variance for the structural code
434
+ # for correcting the generated structural code modeling the feedback from HC to MEC
435
+
436
+ self.structure_variance_pred_mlp = create_mlp(
437
+ dim = dim_structure * 2,
438
+ dim_in = dim_structure * 2 + 1,
439
+ dim_out = dim_structure,
440
+ depth = structure_variance_pred_mlp_depth
441
+ )
442
+
443
+ # loss related
444
+
445
+ self.loss_weight_generative = loss_weight_generative
446
+ self.loss_weight_inference = loss_weight_inference
447
+ self.loss_weight_relational = loss_weight_relational
448
+ self.loss_weight_consistency = loss_weight_consistency
449
+ self.register_buffer('zero', tensor(0.), persistent = False)
450
+
451
+ # update with muon
452
+
453
+ self.muon_update = muon_update
454
+
455
+ # there is an integration ratio for error correction, but unclear what value this is fixed to or whether it is learned
456
+
457
+ self.integration_ratio = nn.Parameter(tensor(0.), requires_grad = integration_ratio_learned)
458
+
459
+ def init_params_and_momentum(
460
+ self,
461
+ batch_size
462
+ ):
463
+
464
+ params_dict = dict(self.meta_memory_mlp.named_parameters())
465
+
466
+ params = {name: repeat(param, '... -> b ...', b = batch_size) for name, param in params_dict.items()}
467
+
468
+ momentums = {name: zeros_like(param) for name, param in params.items()}
469
+
470
+ return params, momentums
471
+
472
+ def retrieve(
473
+ self,
474
+ structural_codes,
475
+ encoded_sensory
476
+ ):
477
+ joint = cat((structural_codes, encoded_sensory), dim = -1)
478
+
479
+ queries = self.to_queries(joint)
480
+
481
+ retrieved = self.meta_memory_mlp(queries)
482
+
483
+ return self.memory_output_decoder(retrieved).split(self.joint_dims, dim = -1)
484
+
485
+ def forward(
486
+ self,
487
+ sensory,
488
+ actions,
489
+ memory_mlp_params = None,
490
+ return_losses = False,
491
+ return_memory_mlp_params = False
492
+ ):
493
+ batch = actions.shape[0]
494
+
495
+ structural_codes = self.path_integrator(actions)
496
+
497
+ encoded_sensory = self.sensory_encoder(sensory)
498
+
499
+ # 1. first have the structure code be able to fetch from the meta memory mlp
500
+
501
+ decoded_gen_structure, decoded_encoded_sensory = self.retrieve(structural_codes, zeros_like(encoded_sensory))
502
+
503
+ decoded_sensory = self.sensory_decoder(decoded_encoded_sensory)
504
+
505
+ generative_pred_loss = F.mse_loss(sensory, decoded_sensory)
506
+
507
+ # 2. relational
508
+
509
+ # 2a. structure from content
510
+
511
+ decoded_structure, decoded_encoded_sensory = self.retrieve(zeros_like(structural_codes), encoded_sensory)
512
+
513
+ structure_from_content_loss = F.mse_loss(decoded_structure, structural_codes)
514
+
515
+ # 2b. structure from structure
516
+
517
+ decoded_structure, decoded_encoded_sensory = self.retrieve(zeros_like(structural_codes), encoded_sensory)
518
+
519
+ structure_from_structure_loss = F.mse_loss(decoded_structure, structural_codes)
520
+
521
+ relational_loss = structure_from_content_loss + structure_from_structure_loss
522
+
523
+ # 3. consistency - modeling a feedback system from hippocampus to path integration
524
+
525
+ corrected_structural_code, corrected_encoded_sensory = self.retrieve(decoded_gen_structure, encoded_sensory)
526
+
527
+ sensory_sse = (corrected_encoded_sensory - encoded_sensory).norm(dim = -1, keepdim = True).pow(2)
528
+
529
+ pred_variance = self.structure_variance_pred_mlp(cat((corrected_structural_code, decoded_gen_structure, sensory_sse), dim = -1))
530
+
531
+ inf_structural_code = decoded_gen_structure + (corrected_structural_code - decoded_gen_structure) * self.integration_ratio.sigmoid() * pred_variance
532
+
533
+ consistency_loss = F.mse_loss(decoded_gen_structure, inf_structural_code)
534
+
535
+ # 4. final inference loss
536
+
537
+ final_structural_code, inf_encoded_sensory = self.retrieve(inf_structural_code, zeros_like(encoded_sensory))
538
+
539
+ decoded_inf_sensory = self.sensory_decoder(inf_encoded_sensory)
540
+
541
+ inference_pred_loss = F.mse_loss(sensory, decoded_inf_sensory)
542
+
543
+ # 5. store the final structural code from step 4 + encoded sensory
544
+
545
+ joint_code_to_store = cat((final_structural_code, encoded_sensory), dim = -1)
546
+
547
+ keys = self.to_keys(joint_code_to_store)
548
+ values = self.to_values(joint_code_to_store)
549
+
550
+ lr, forget, beta = self.to_learned_optim_hparams(joint_code_to_store).unbind(dim = -1)
551
+
552
+ if exists(memory_mlp_params):
553
+ params, momentums = memory_mlp_params
554
+ else:
555
+ params, momentums = self.init_params_and_momentum(batch)
556
+
557
+ # store by getting gradients of mse loss of keys and values
558
+
559
+ grads = self.per_sample_grad_fn(params, keys, values)
560
+
561
+ # update the meta mlp parameters and momentums
562
+
563
+ next_params = dict()
564
+ next_momentum = dict()
565
+
566
+ for (
567
+ (key, param),
568
+ (_, grad),
569
+ (_, momentum)
570
+ ) in zip(
571
+ params.items(),
572
+ grads.items(),
573
+ momentums.items()
574
+ ):
575
+
576
+ grad, inverse_pack = pack_with_inverse(grad, 'b t *')
577
+
578
+ grad = multiply('b t ..., b t', grad, lr)
579
+
580
+ expanded_beta = repeat(beta, 'b t -> b t w', w = grad.shape[-1])
581
+
582
+ update = self.assoc_scan(grad, expanded_beta.sigmoid(), momentum)
583
+
584
+ # store next momentum
585
+
586
+ next_momentum[key] = update[:, -1]
587
+
588
+ # maybe muon
589
+
590
+ if self.muon_update:
591
+ update = newtonschulz5(update)
592
+
593
+ # with forget gating
594
+
595
+ expanded_forget = repeat(forget, 'b t -> b t w', w = grad.shape[-1])
596
+
597
+ acc_update = self.assoc_scan(-update, expanded_forget.sigmoid(), param)
598
+
599
+ acc_update = inverse_pack(acc_update)
600
+
601
+ # set the next params and momentum, which can be passed back in
602
+
603
+ next_params[key] = acc_update[:, -1]
604
+
605
+ # losses
606
+
607
+ total_loss = (
608
+ generative_pred_loss * self.loss_weight_generative +
609
+ relational_loss * self.loss_weight_relational +
610
+ consistency_loss * self.loss_weight_consistency +
611
+ inference_pred_loss * self.loss_weight_inference
612
+ )
613
+
614
+ losses = (
615
+ generative_pred_loss,
616
+ relational_loss,
617
+ consistency_loss,
618
+ inference_pred_loss
619
+ )
620
+
621
+ if return_memory_mlp_params:
622
+ return next_params, next_momentum
623
+
624
+ if not return_losses:
625
+ return total_loss
626
+
627
+ return total_loss, losses
Binary file
@@ -0,0 +1,62 @@
1
+ [project]
2
+ name = "hippoformer"
3
+ version = "0.0.12"
4
+ description = "hippoformer"
5
+ authors = [
6
+ { name = "Phil Wang", email = "lucidrains@gmail.com" }
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">= 3.9"
10
+ license = { file = "LICENSE" }
11
+ keywords = [
12
+ 'artificial intelligence',
13
+ 'deep learning',
14
+ 'memory',
15
+ 'hippocampus'
16
+ ]
17
+
18
+ classifiers=[
19
+ 'Development Status :: 4 - Beta',
20
+ 'Intended Audience :: Developers',
21
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
22
+ 'License :: OSI Approved :: MIT License',
23
+ 'Programming Language :: Python :: 3.9',
24
+ ]
25
+
26
+ dependencies = [
27
+ "assoc-scan",
28
+ "beartype",
29
+ "einx>=0.3.0",
30
+ "einops>=0.8.1",
31
+ "torch>=2.4",
32
+ "x-mlps-pytorch",
33
+ ]
34
+
35
+ [project.urls]
36
+ Homepage = "https://pypi.org/project/hippoformer/"
37
+ Repository = "https://github.com/lucidrains/hippoformer"
38
+
39
+ [project.optional-dependencies]
40
+ examples = []
41
+ test = [
42
+ "pytest"
43
+ ]
44
+
45
+ [tool.pytest.ini_options]
46
+ pythonpath = [
47
+ "."
48
+ ]
49
+
50
+ [build-system]
51
+ requires = ["hatchling"]
52
+ build-backend = "hatchling.build"
53
+
54
+ [tool.rye]
55
+ managed = true
56
+ dev-dependencies = []
57
+
58
+ [tool.hatch.metadata]
59
+ allow-direct-references = true
60
+
61
+ [tool.hatch.build.targets.wheel]
62
+ packages = ["hippoformer"]
@@ -0,0 +1,79 @@
1
+ import pytest
2
+ param = pytest.mark.parametrize
3
+
4
+ import torch
5
+
6
+ def test_path_integrate():
7
+ from hippoformer.hippoformer import PathIntegration
8
+
9
+ path_integrator = PathIntegration(32, 64)
10
+
11
+ actions = torch.randn(2, 16, 32)
12
+
13
+ structure_codes = path_integrator(actions)
14
+ structure_codes = path_integrator(actions, structure_codes) # pass in previous structure codes, it will auto use the last one as hidden and pass it to the RNN
15
+
16
+ assert structure_codes.shape == (2, 16, 64)
17
+
18
+ @param('sensory_type', ('naive', '2d', '3d'))
19
+ @param('muon_update', (True, False))
20
+ def test_mm_tem(
21
+ sensory_type,
22
+ muon_update
23
+ ):
24
+ import torch
25
+ from hippoformer.hippoformer import mmTEM
26
+
27
+ from torch.nn import Linear
28
+
29
+ if sensory_type == 'naive':
30
+ enc_dec = (
31
+ Linear(11, 32),
32
+ Linear(32, 11)
33
+ )
34
+ sensory = torch.randn(2, 16, 11)
35
+
36
+ elif sensory_type == '2d':
37
+
38
+ from hippoformer.hippoformer import grid_sensory_enc_dec
39
+
40
+ enc_dec = grid_sensory_enc_dec
41
+ sensory = torch.randn(2, 16, 9)
42
+
43
+ elif sensory_type == '3d':
44
+
45
+ from hippoformer.hippoformer import maze_sensory_enc_dec
46
+
47
+ enc_dec = maze_sensory_enc_dec
48
+
49
+ sensory = torch.randn(2, 3, 16, 64, 64)
50
+
51
+ model = mmTEM(
52
+ dim = 32,
53
+ sensory_encoder_decoder = enc_dec,
54
+ dim_sensory = 11,
55
+ dim_action = 7,
56
+ dim_structure = 32,
57
+ dim_encoded_sensory = 32,
58
+ muon_update = muon_update
59
+ )
60
+
61
+ actions = torch.randn(2, 16, 7)
62
+
63
+ next_params = model(sensory, actions, return_memory_mlp_params = True)
64
+ next_params = model(sensory, actions, memory_mlp_params = next_params, return_memory_mlp_params = True)
65
+
66
+ loss = model(sensory, actions, memory_mlp_params = next_params)
67
+ loss.backward()
68
+
69
+ def test_tem_t():
70
+ from hippoformer.hippoformer import TEMTransformerBlock
71
+
72
+ block = TEMTransformerBlock(32, 16, window_size = 3)
73
+
74
+ structural_codes = torch.randn(1, 7, 32)
75
+ encoded_sensory = torch.randn(1, 7, 16)
76
+
77
+ pred, kv_cache = block(structural_codes, encoded_sensory)
78
+
79
+ assert pred.shape == (1, 7, 32)