tiny-recursive-model 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ # This workflow will upload a Python Package using Twine when a release is created
2
+ # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3
+
4
+ # This workflow uses actions that are not certified by GitHub.
5
+ # They are provided by a third-party and are governed by
6
+ # separate terms of service, privacy policy, and support
7
+ # documentation.
8
+
9
+ name: Upload Python Package
10
+
11
+ on:
12
+ release:
13
+ types: [published]
14
+
15
+ jobs:
16
+ deploy:
17
+
18
+ runs-on: ubuntu-latest
19
+
20
+ steps:
21
+ - uses: actions/checkout@v2
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v2
24
+ with:
25
+ python-version: '3.x'
26
+ - name: Install dependencies
27
+ run: |
28
+ python -m pip install --upgrade pip
29
+ pip install build
30
+ - name: Build package
31
+ run: python -m build
32
+ - name: Publish package
33
+ uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29
34
+ with:
35
+ user: __token__
36
+ password: ${{ secrets.PYPI_API_TOKEN }}
@@ -0,0 +1,21 @@
1
+ name: Pytest
2
+ on: [push, pull_request]
3
+
4
+ jobs:
5
+ build:
6
+
7
+ runs-on: ubuntu-latest
8
+
9
+ steps:
10
+ - uses: actions/checkout@v4
11
+ - name: Set up Python 3.10
12
+ uses: actions/setup-python@v5
13
+ with:
14
+ python-version: "3.10"
15
+ - name: Install dependencies
16
+ run: |
17
+ python -m pip install --upgrade pip
18
+ python -m pip install -e .[test]
19
+ - name: Test with pytest
20
+ run: |
21
+ python -m pytest tests/
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Phil Wang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.4
2
+ Name: tiny-recursive-model
3
+ Version: 0.0.1
4
+ Summary: Tiny Recursive Model
5
+ Project-URL: Homepage, https://pypi.org/project/tiny-recursive-model/
6
+ Project-URL: Repository, https://github.com/lucidrains/tiny-recursive-model
7
+ Author-email: Phil Wang <lucidrains@gmail.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2025 Phil Wang
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+ License-File: LICENSE
30
+ Keywords: artificial intelligence,deep learning,reasoning
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Developers
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Requires-Python: >=3.9
37
+ Requires-Dist: accelerate
38
+ Requires-Dist: einops>=0.8.1
39
+ Requires-Dist: ema-pytorch
40
+ Requires-Dist: torch>=2.4
41
+ Requires-Dist: x-transformers
42
+ Provides-Extra: examples
43
+ Provides-Extra: test
44
+ Requires-Dist: pytest; extra == 'test'
45
+ Description-Content-Type: text/markdown
46
+
47
+
48
+ <img width="300" alt="trm-fig1" src="https://github.com/user-attachments/assets/950db79e-5f9c-4fec-a4e4-7b9355b39ce8" />
49
+
50
+ ## Tiny Recursive Model (TRM) wip
51
+
52
+ Implementation of [Tiny Recursive Model](https://arxiv.org/abs/2510.04871) (TRM), improvement to [HRM](https://github.com/lucidrains/hrm) from Sapient AI, by [Alexia Jolicoeur-Martineau](https://ajolicoeur.wordpress.com/about/)
53
+
54
+ Official repository is [here](https://github.com/SamsungSAILMontreal/TinyRecursiveModels)
55
+
56
+ <img width="300" alt="trm-fig3" src="https://github.com/user-attachments/assets/bfe3dd2a-e859-492a-84d5-faf37339f534" />
57
+
58
+ ## Citations
59
+
60
+ ```bibtex
61
+ @misc{jolicoeurmartineau2025morerecursivereasoningtiny,
62
+ title = {Less is More: Recursive Reasoning with Tiny Networks},
63
+ author = {Alexia Jolicoeur-Martineau},
64
+ year = {2025},
65
+ eprint = {2510.04871},
66
+ archivePrefix = {arXiv},
67
+ primaryClass = {cs.LG},
68
+ url = {https://arxiv.org/abs/2510.04871},
69
+ }
70
+ ```
@@ -0,0 +1,24 @@
1
+
2
+ <img width="300" alt="trm-fig1" src="https://github.com/user-attachments/assets/950db79e-5f9c-4fec-a4e4-7b9355b39ce8" />
3
+
4
+ ## Tiny Recursive Model (TRM) wip
5
+
6
+ Implementation of [Tiny Recursive Model](https://arxiv.org/abs/2510.04871) (TRM), improvement to [HRM](https://github.com/lucidrains/hrm) from Sapient AI, by [Alexia Jolicoeur-Martineau](https://ajolicoeur.wordpress.com/about/)
7
+
8
+ Official repository is [here](https://github.com/SamsungSAILMontreal/TinyRecursiveModels)
9
+
10
+ <img width="300" alt="trm-fig3" src="https://github.com/user-attachments/assets/bfe3dd2a-e859-492a-84d5-faf37339f534" />
11
+
12
+ ## Citations
13
+
14
+ ```bibtex
15
+ @misc{jolicoeurmartineau2025morerecursivereasoningtiny,
16
+ title = {Less is More: Recursive Reasoning with Tiny Networks},
17
+ author = {Alexia Jolicoeur-Martineau},
18
+ year = {2025},
19
+ eprint = {2510.04871},
20
+ archivePrefix = {arXiv},
21
+ primaryClass = {cs.LG},
22
+ url = {https://arxiv.org/abs/2510.04871},
23
+ }
24
+ ```
@@ -0,0 +1,60 @@
1
+ [project]
2
+ name = "tiny-recursive-model"
3
+ version = "0.0.1"
4
+ description = "Tiny Recursive Model"
5
+ authors = [
6
+ { name = "Phil Wang", email = "lucidrains@gmail.com" }
7
+ ]
8
+ readme = "README.md"
9
+ requires-python = ">= 3.9"
10
+ license = { file = "LICENSE" }
11
+ keywords = [
12
+ 'artificial intelligence',
13
+ 'deep learning',
14
+ 'reasoning',
15
+ ]
16
+
17
+ classifiers=[
18
+ 'Development Status :: 4 - Beta',
19
+ 'Intended Audience :: Developers',
20
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
21
+ 'License :: OSI Approved :: MIT License',
22
+ 'Programming Language :: Python :: 3.9',
23
+ ]
24
+
25
+ dependencies = [
26
+ "accelerate",
27
+ "einops>=0.8.1",
28
+ "ema-pytorch",
29
+ "torch>=2.4",
30
+ "x-transformers",
31
+ ]
32
+
33
+ [project.urls]
34
+ Homepage = "https://pypi.org/project/tiny-recursive-model/"
35
+ Repository = "https://github.com/lucidrains/tiny-recursive-model"
36
+
37
+ [project.optional-dependencies]
38
+ examples = []
39
+ test = [
40
+ "pytest"
41
+ ]
42
+
43
+ [tool.pytest.ini_options]
44
+ pythonpath = [
45
+ "."
46
+ ]
47
+
48
+ [build-system]
49
+ requires = ["hatchling"]
50
+ build-backend = "hatchling.build"
51
+
52
+ [tool.rye]
53
+ managed = true
54
+ dev-dependencies = []
55
+
56
+ [tool.hatch.metadata]
57
+ allow-direct-references = true
58
+
59
+ [tool.hatch.build.targets.wheel]
60
+ packages = ["tiny_recursive_model"]
@@ -0,0 +1,70 @@
1
+ import pytest
2
+ param = pytest.mark.parametrize
3
+
4
+ import torch
5
+ from tiny_recursive_model.trm import TinyRecursiveModel, Trainer
6
+
7
+ @param('use_self_attn', (False, True))
8
+ def test_trm(
9
+ use_self_attn
10
+ ):
11
+ from torch.optim import AdamW
12
+
13
+ if use_self_attn:
14
+ from x_transformers import Encoder
15
+ network = Encoder(dim = 512, depth = 2)
16
+ else:
17
+ from tiny_recursive_model.mlp_mixer_1d import MLPMixer1D
18
+ network = MLPMixer1D(dim = 512, depth = 2, seq_len = 1024)
19
+
20
+ trm = TinyRecursiveModel(
21
+ dim = 512,
22
+ num_tokens = 256,
23
+ network = network
24
+ )
25
+
26
+ optim = AdamW(trm.parameters(), lr = 1e-4)
27
+
28
+ seq = torch.randint(0, 256, (2, 1024))
29
+ answer = torch.randint(0, 256, (2, 1024))
30
+
31
+ outputs, latents = trm.get_initial()
32
+
33
+ for _ in range(3):
34
+ loss, losses, outputs, latents, pred, halt = trm(seq, outputs, latents, labels = answer)
35
+
36
+ loss.backward()
37
+ optim.step()
38
+ optim.zero_grad()
39
+
40
+ def test_trainer():
41
+ from torch.utils.data import Dataset
42
+ from tiny_recursive_model.mlp_mixer_1d import MLPMixer1D
43
+
44
+ trm = TinyRecursiveModel(
45
+ dim = 16,
46
+ num_tokens = 256,
47
+ network = MLPMixer1D(
48
+ dim = 16,
49
+ depth = 2,
50
+ seq_len = 256
51
+ ),
52
+ )
53
+
54
+ class MockDataset(Dataset):
55
+ def __len__(self):
56
+ return 16
57
+
58
+ def __getitem__(self, idx):
59
+ inp = torch.randint(0, 256, (256,))
60
+ out = torch.randint(0, 256, (256,))
61
+ return inp, out
62
+
63
+ trainer = Trainer(
64
+ trm,
65
+ MockDataset(),
66
+ epochs = 1,
67
+ batch_size = 16
68
+ )
69
+
70
+ trainer()
@@ -0,0 +1,4 @@
1
+ from tiny_recursive_model.trm import (
2
+ TinyRecursiveModel,
3
+ Trainer
4
+ )
@@ -0,0 +1,46 @@
1
+ from functools import partial
2
+
3
+ from torch import nn
4
+ from torch.nn import Module, LayerNorm
5
+ from einops.layers.torch import Rearrange, Reduce
6
+
7
+ pair = lambda x: x if isinstance(x, tuple) else (x, x)
8
+
9
+ class PreNormResidual(Module):
10
+ def __init__(self, dim, fn):
11
+ super().__init__()
12
+ self.fn = fn
13
+ self.norm = LayerNorm(dim, bias = False)
14
+
15
+ def forward(self, x):
16
+ return self.fn(self.norm(x)) + x
17
+
18
+ def FeedForward(dim, dim_hidden, dropout = 0., dense = nn.Linear):
19
+ return nn.Sequential(
20
+ dense(dim, dim_hidden),
21
+ nn.GELU(),
22
+ nn.Dropout(dropout),
23
+ dense(dim_hidden, dim),
24
+ nn.Dropout(dropout)
25
+ )
26
+
27
+ def MLPMixer1D(*, dim, depth, seq_len, expansion_factor = 4, expansion_factor_token = 0.5, dropout = 0.):
28
+ chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
29
+
30
+ return nn.Sequential(
31
+ *[nn.Sequential(
32
+ PreNormResidual(dim, FeedForward(seq_len, int(expansion_factor * dim), dropout, chan_first)),
33
+ PreNormResidual(dim, FeedForward(dim, int(expansion_factor_token * dim), dropout, chan_last))
34
+ ) for _ in range(depth)],
35
+ LayerNorm(dim, bias = False)
36
+ )
37
+
38
+ # quick test
39
+
40
+ if __name__ == '__main__':
41
+
42
+ import torch
43
+ tokens = torch.randn(1, 1024, 512)
44
+ mixer = MLPMixer1D(dim = 512, depth = 4, seq_len = 1024)
45
+
46
+ assert mixer(tokens).shape == tokens.shape
@@ -0,0 +1,232 @@
1
+ from __future__ import annotations
2
+ from contextlib import nullcontext
3
+
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import Module, ModuleList
8
+ from torch.optim import AdamW
9
+ from torch.utils.data import Dataset, DataLoader
10
+
11
+ from einops import rearrange, repeat
12
+ from einops.layers.torch import Reduce, Rearrange
13
+
14
+ # network related
15
+
16
+ from x_transformers import Encoder
17
+ from tiny_recursive_model.mlp_mixer_1d import MLPMixer1D
18
+
19
+ # ema - apparently greatly helped with results
20
+
21
+ from ema_pytorch import EMA
22
+
23
+ # helpers
24
+
25
+ def exists(v):
26
+ return v is not None
27
+
28
+ def default(v, d):
29
+ return v if exists(v) else d
30
+
31
+ def range_from_one(n):
32
+ return range(1, n + 1)
33
+
34
+ def is_empty(t):
35
+ return t.numel() == 0
36
+
37
+ # classes
38
+
39
+ class TinyRecursiveModel(Module):
40
+ def __init__(
41
+ self,
42
+ *,
43
+ dim,
44
+ num_tokens,
45
+ network: Module,
46
+ num_refinement_blocks = 3, # T in paper
47
+ num_latent_refinements = 6, # n in paper - 1 output refinement per N latent refinements
48
+ halt_loss_weight = 1.
49
+ ):
50
+ super().__init__()
51
+ assert num_refinement_blocks > 1
52
+
53
+ self.input_embed = nn.Embedding(num_tokens, dim)
54
+ self.output_init_embed = nn.Parameter(torch.randn(dim) * 1e-2)
55
+ self.latent_init_embed = nn.Parameter(torch.randn(dim) * 1e-2)
56
+
57
+ self.network = network
58
+
59
+ self.num_latent_refinements = num_latent_refinements
60
+ self.num_refinement_blocks = num_refinement_blocks
61
+
62
+ # prediction heads
63
+
64
+ self.to_pred = nn.Linear(dim, num_tokens, bias = False)
65
+
66
+ self.to_halt_pred = nn.Sequential(
67
+ Reduce('b n d -> b d', 'mean'),
68
+ nn.Linear(dim, 1, bias = False),
69
+ nn.Sigmoid(),
70
+ Rearrange('... 1 -> ...')
71
+ )
72
+
73
+ self.halt_loss_weight = halt_loss_weight
74
+
75
+ def refine_latent_then_output_once(
76
+ self,
77
+ inputs, # (b n d)
78
+ outputs, # (b n d)
79
+ latents, # (b n d)
80
+ ):
81
+
82
+ # so it seems for this work, they use only one network
83
+ # the network learns to refine the latents if input is passed in, otherwise it refines the output
84
+
85
+ for _ in range(self.num_latent_refinements):
86
+
87
+ latents = self.network(outputs + latents + inputs)
88
+
89
+ outputs = self.network(outputs + latents)
90
+
91
+ return outputs, latents
92
+
93
+ def get_initial(self):
94
+ outputs = self.output_init_embed
95
+ latents = self.latent_init_embed
96
+
97
+ return outputs, latents
98
+
99
+ def deep_refinement(
100
+ self,
101
+ inputs, # (b n d)
102
+ outputs, # (b n d)
103
+ latents, # (b n d)
104
+ ):
105
+
106
+ for i in range(self.num_refinement_blocks):
107
+
108
+ # only last round of refinement receives gradients
109
+
110
+ is_last = i == (self.num_refinement_blocks - 1)
111
+ context = torch.no_grad if not is_last else nullcontext
112
+
113
+ with context():
114
+ outputs, latents = self.refine_latent_then_output_once(inputs, outputs, latents)
115
+
116
+ return outputs, latents
117
+
118
+ def forward(
119
+ self,
120
+ seq,
121
+ outputs,
122
+ latents,
123
+ labels = None
124
+ ):
125
+ inputs = self.input_embed(seq)
126
+
127
+ outputs, latents = self.deep_refinement(inputs, outputs, latents)
128
+
129
+ pred = self.to_pred(outputs)
130
+
131
+ should_halt = self.to_halt_pred(outputs)
132
+
133
+ outputs, latents = outputs.detach(), latents.detach()
134
+
135
+ return_package = (outputs, latents, pred, should_halt)
136
+
137
+ if not exists(labels):
138
+ return return_package
139
+
140
+ # calculate loss if labels passed in
141
+
142
+ loss = F.cross_entropy(rearrange(pred, 'b n l -> b l n'), labels)
143
+
144
+ is_all_correct = (pred.argmax(dim = -1) == labels).all(dim = -1)
145
+
146
+ halt_loss = F.binary_cross_entropy(should_halt, is_all_correct.float())
147
+
148
+ # total loss and loss breakdown
149
+
150
+ total_loss = loss + halt_loss * self.halt_loss_weight
151
+ losses = (loss, halt_loss)
152
+
153
+ return (total_loss, losses, *return_package)
154
+
155
+ # trainer
156
+
157
+ class Trainer(Module):
158
+ def __init__(
159
+ self,
160
+ model: TinyRecursiveModel | Module,
161
+ dataset: Dataset,
162
+ optim_klass = AdamW,
163
+ learning_rate = 1e-4,
164
+ weight_decay = 1.,
165
+ batch_size = 16,
166
+ epochs = 2,
167
+ halt_prob_thres = 0.5,
168
+ max_recurrent_steps = 12,
169
+ ema_decay_rate = 0.999,
170
+ ema_update_model_with_ema_every = 10000
171
+ ):
172
+ super().__init__()
173
+
174
+ self.batch_size = batch_size
175
+ self.epochs = epochs
176
+
177
+ self.dataset = dataset
178
+ self.dataloader = dataloader = DataLoader(self.dataset, batch_size = self.batch_size, shuffle = True)
179
+
180
+ self.optim = optim_klass(
181
+ model.parameters(),
182
+ lr = learning_rate,
183
+ weight_decay = weight_decay
184
+ )
185
+
186
+ self.model = model
187
+
188
+ self.ema_model = EMA(
189
+ model,
190
+ beta = ema_decay_rate,
191
+ update_model_with_ema_every = ema_update_model_with_ema_every
192
+ )
193
+
194
+ self.halt_prob_thres = halt_prob_thres
195
+
196
+ self.max_recurrent_steps = max_recurrent_steps
197
+
198
+ def forward(self):
199
+
200
+ for epoch in range_from_one(self.epochs):
201
+
202
+ for dataset_input, dataset_output in self.dataloader:
203
+
204
+ outputs, latents = self.model.get_initial()
205
+
206
+ for recurrent_step in range_from_one(self.max_recurrent_steps):
207
+
208
+ loss, (main_loss, halt_loss), outputs, latents, pred, halt = self.model(dataset_input, outputs, latents, labels = dataset_output)
209
+
210
+ print(f'[{epoch} ({recurrent_step} / {self.max_recurrent_steps})] loss: {main_loss.item():.3f} | halt loss: {halt_loss.item():.3f}')
211
+
212
+ loss.backward()
213
+
214
+ self.optim.step()
215
+ self.optim.zero_grad()
216
+
217
+ self.ema_model.update()
218
+
219
+ # handle halting
220
+
221
+ halt_mask = halt >= self.halt_prob_thres
222
+
223
+ if not halt_mask.any():
224
+ continue
225
+
226
+ outputs = outputs[~halt_mask]
227
+ latents = latents[~halt_mask]
228
+ dataset_input = dataset_input[~halt_mask]
229
+ dataset_output = dataset_output[~halt_mask]
230
+
231
+ if is_empty(outputs):
232
+ break