tiny-recursive-model 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ from tiny_recursive_model.trm import (
2
+ TinyRecursiveModel,
3
+ Trainer
4
+ )
@@ -0,0 +1,46 @@
1
+ from functools import partial
2
+
3
+ from torch import nn
4
+ from torch.nn import Module, LayerNorm
5
+ from einops.layers.torch import Rearrange, Reduce
6
+
7
+ pair = lambda x: x if isinstance(x, tuple) else (x, x)
8
+
9
+ class PreNormResidual(Module):
10
+ def __init__(self, dim, fn):
11
+ super().__init__()
12
+ self.fn = fn
13
+ self.norm = LayerNorm(dim, bias = False)
14
+
15
+ def forward(self, x):
16
+ return self.fn(self.norm(x)) + x
17
+
18
+ def FeedForward(dim, dim_hidden, dropout = 0., dense = nn.Linear):
19
+ return nn.Sequential(
20
+ dense(dim, dim_hidden),
21
+ nn.GELU(),
22
+ nn.Dropout(dropout),
23
+ dense(dim_hidden, dim),
24
+ nn.Dropout(dropout)
25
+ )
26
+
27
+ def MLPMixer1D(*, dim, depth, seq_len, expansion_factor = 4, expansion_factor_token = 0.5, dropout = 0.):
28
+ chan_first, chan_last = partial(nn.Conv1d, kernel_size = 1), nn.Linear
29
+
30
+ return nn.Sequential(
31
+ *[nn.Sequential(
32
+ PreNormResidual(dim, FeedForward(seq_len, int(expansion_factor * dim), dropout, chan_first)),
33
+ PreNormResidual(dim, FeedForward(dim, int(expansion_factor_token * dim), dropout, chan_last))
34
+ ) for _ in range(depth)],
35
+ LayerNorm(dim, bias = False)
36
+ )
37
+
38
+ # quick test
39
+
40
+ if __name__ == '__main__':
41
+
42
+ import torch
43
+ tokens = torch.randn(1, 1024, 512)
44
+ mixer = MLPMixer1D(dim = 512, depth = 4, seq_len = 1024)
45
+
46
+ assert mixer(tokens).shape == tokens.shape
@@ -0,0 +1,232 @@
1
+ from __future__ import annotations
2
+ from contextlib import nullcontext
3
+
4
+ import torch
5
+ from torch import nn
6
+ import torch.nn.functional as F
7
+ from torch.nn import Module, ModuleList
8
+ from torch.optim import AdamW
9
+ from torch.utils.data import Dataset, DataLoader
10
+
11
+ from einops import rearrange, repeat
12
+ from einops.layers.torch import Reduce, Rearrange
13
+
14
+ # network related
15
+
16
+ from x_transformers import Encoder
17
+ from tiny_recursive_model.mlp_mixer_1d import MLPMixer1D
18
+
19
+ # ema - apparently greatly helped with results
20
+
21
+ from ema_pytorch import EMA
22
+
23
+ # helpers
24
+
25
+ def exists(v):
26
+ return v is not None
27
+
28
+ def default(v, d):
29
+ return v if exists(v) else d
30
+
31
+ def range_from_one(n):
32
+ return range(1, n + 1)
33
+
34
+ def is_empty(t):
35
+ return t.numel() == 0
36
+
37
+ # classes
38
+
39
+ class TinyRecursiveModel(Module):
40
+ def __init__(
41
+ self,
42
+ *,
43
+ dim,
44
+ num_tokens,
45
+ network: Module,
46
+ num_refinement_blocks = 3, # T in paper
47
+ num_latent_refinements = 6, # n in paper - 1 output refinement per N latent refinements
48
+ halt_loss_weight = 1.
49
+ ):
50
+ super().__init__()
51
+ assert num_refinement_blocks > 1
52
+
53
+ self.input_embed = nn.Embedding(num_tokens, dim)
54
+ self.output_init_embed = nn.Parameter(torch.randn(dim) * 1e-2)
55
+ self.latent_init_embed = nn.Parameter(torch.randn(dim) * 1e-2)
56
+
57
+ self.network = network
58
+
59
+ self.num_latent_refinements = num_latent_refinements
60
+ self.num_refinement_blocks = num_refinement_blocks
61
+
62
+ # prediction heads
63
+
64
+ self.to_pred = nn.Linear(dim, num_tokens, bias = False)
65
+
66
+ self.to_halt_pred = nn.Sequential(
67
+ Reduce('b n d -> b d', 'mean'),
68
+ nn.Linear(dim, 1, bias = False),
69
+ nn.Sigmoid(),
70
+ Rearrange('... 1 -> ...')
71
+ )
72
+
73
+ self.halt_loss_weight = halt_loss_weight
74
+
75
+ def refine_latent_then_output_once(
76
+ self,
77
+ inputs, # (b n d)
78
+ outputs, # (b n d)
79
+ latents, # (b n d)
80
+ ):
81
+
82
+ # so it seems for this work, they use only one network
83
+ # the network learns to refine the latents if input is passed in, otherwise it refines the output
84
+
85
+ for _ in range(self.num_latent_refinements):
86
+
87
+ latents = self.network(outputs + latents + inputs)
88
+
89
+ outputs = self.network(outputs + latents)
90
+
91
+ return outputs, latents
92
+
93
+ def get_initial(self):
94
+ outputs = self.output_init_embed
95
+ latents = self.latent_init_embed
96
+
97
+ return outputs, latents
98
+
99
+ def deep_refinement(
100
+ self,
101
+ inputs, # (b n d)
102
+ outputs, # (b n d)
103
+ latents, # (b n d)
104
+ ):
105
+
106
+ for i in range(self.num_refinement_blocks):
107
+
108
+ # only last round of refinement receives gradients
109
+
110
+ is_last = i == (self.num_refinement_blocks - 1)
111
+ context = torch.no_grad if not is_last else nullcontext
112
+
113
+ with context():
114
+ outputs, latents = self.refine_latent_then_output_once(inputs, outputs, latents)
115
+
116
+ return outputs, latents
117
+
118
+ def forward(
119
+ self,
120
+ seq,
121
+ outputs,
122
+ latents,
123
+ labels = None
124
+ ):
125
+ inputs = self.input_embed(seq)
126
+
127
+ outputs, latents = self.deep_refinement(inputs, outputs, latents)
128
+
129
+ pred = self.to_pred(outputs)
130
+
131
+ should_halt = self.to_halt_pred(outputs)
132
+
133
+ outputs, latents = outputs.detach(), latents.detach()
134
+
135
+ return_package = (outputs, latents, pred, should_halt)
136
+
137
+ if not exists(labels):
138
+ return return_package
139
+
140
+ # calculate loss if labels passed in
141
+
142
+ loss = F.cross_entropy(rearrange(pred, 'b n l -> b l n'), labels)
143
+
144
+ is_all_correct = (pred.argmax(dim = -1) == labels).all(dim = -1)
145
+
146
+ halt_loss = F.binary_cross_entropy(should_halt, is_all_correct.float())
147
+
148
+ # total loss and loss breakdown
149
+
150
+ total_loss = loss + halt_loss * self.halt_loss_weight
151
+ losses = (loss, halt_loss)
152
+
153
+ return (total_loss, losses, *return_package)
154
+
155
+ # trainer
156
+
157
+ class Trainer(Module):
158
+ def __init__(
159
+ self,
160
+ model: TinyRecursiveModel | Module,
161
+ dataset: Dataset,
162
+ optim_klass = AdamW,
163
+ learning_rate = 1e-4,
164
+ weight_decay = 1.,
165
+ batch_size = 16,
166
+ epochs = 2,
167
+ halt_prob_thres = 0.5,
168
+ max_recurrent_steps = 12,
169
+ ema_decay_rate = 0.999,
170
+ ema_update_model_with_ema_every = 10000
171
+ ):
172
+ super().__init__()
173
+
174
+ self.batch_size = batch_size
175
+ self.epochs = epochs
176
+
177
+ self.dataset = dataset
178
+ self.dataloader = dataloader = DataLoader(self.dataset, batch_size = self.batch_size, shuffle = True)
179
+
180
+ self.optim = optim_klass(
181
+ model.parameters(),
182
+ lr = learning_rate,
183
+ weight_decay = weight_decay
184
+ )
185
+
186
+ self.model = model
187
+
188
+ self.ema_model = EMA(
189
+ model,
190
+ beta = ema_decay_rate,
191
+ update_model_with_ema_every = ema_update_model_with_ema_every
192
+ )
193
+
194
+ self.halt_prob_thres = halt_prob_thres
195
+
196
+ self.max_recurrent_steps = max_recurrent_steps
197
+
198
+ def forward(self):
199
+
200
+ for epoch in range_from_one(self.epochs):
201
+
202
+ for dataset_input, dataset_output in self.dataloader:
203
+
204
+ outputs, latents = self.model.get_initial()
205
+
206
+ for recurrent_step in range_from_one(self.max_recurrent_steps):
207
+
208
+ loss, (main_loss, halt_loss), outputs, latents, pred, halt = self.model(dataset_input, outputs, latents, labels = dataset_output)
209
+
210
+ print(f'[{epoch} ({recurrent_step} / {self.max_recurrent_steps})] loss: {main_loss.item():.3f} | halt loss: {halt_loss.item():.3f}')
211
+
212
+ loss.backward()
213
+
214
+ self.optim.step()
215
+ self.optim.zero_grad()
216
+
217
+ self.ema_model.update()
218
+
219
+ # handle halting
220
+
221
+ halt_mask = halt >= self.halt_prob_thres
222
+
223
+ if not halt_mask.any():
224
+ continue
225
+
226
+ outputs = outputs[~halt_mask]
227
+ latents = latents[~halt_mask]
228
+ dataset_input = dataset_input[~halt_mask]
229
+ dataset_output = dataset_output[~halt_mask]
230
+
231
+ if is_empty(outputs):
232
+ break
@@ -0,0 +1,70 @@
1
+ Metadata-Version: 2.4
2
+ Name: tiny-recursive-model
3
+ Version: 0.0.1
4
+ Summary: Tiny Recursive Model
5
+ Project-URL: Homepage, https://pypi.org/project/tiny-recursive-model/
6
+ Project-URL: Repository, https://github.com/lucidrains/tiny-recursive-model
7
+ Author-email: Phil Wang <lucidrains@gmail.com>
8
+ License: MIT License
9
+
10
+ Copyright (c) 2025 Phil Wang
11
+
12
+ Permission is hereby granted, free of charge, to any person obtaining a copy
13
+ of this software and associated documentation files (the "Software"), to deal
14
+ in the Software without restriction, including without limitation the rights
15
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16
+ copies of the Software, and to permit persons to whom the Software is
17
+ furnished to do so, subject to the following conditions:
18
+
19
+ The above copyright notice and this permission notice shall be included in all
20
+ copies or substantial portions of the Software.
21
+
22
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28
+ SOFTWARE.
29
+ License-File: LICENSE
30
+ Keywords: artificial intelligence,deep learning,reasoning
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Developers
33
+ Classifier: License :: OSI Approved :: MIT License
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
36
+ Requires-Python: >=3.9
37
+ Requires-Dist: accelerate
38
+ Requires-Dist: einops>=0.8.1
39
+ Requires-Dist: ema-pytorch
40
+ Requires-Dist: torch>=2.4
41
+ Requires-Dist: x-transformers
42
+ Provides-Extra: examples
43
+ Provides-Extra: test
44
+ Requires-Dist: pytest; extra == 'test'
45
+ Description-Content-Type: text/markdown
46
+
47
+
48
+ <img width="300" alt="trm-fig1" src="https://github.com/user-attachments/assets/950db79e-5f9c-4fec-a4e4-7b9355b39ce8" />
49
+
50
+ ## Tiny Recursive Model (TRM) wip
51
+
52
+ Implementation of [Tiny Recursive Model](https://arxiv.org/abs/2510.04871) (TRM), improvement to [HRM](https://github.com/lucidrains/hrm) from Sapient AI, by [Alexia Jolicoeur-Martineau](https://ajolicoeur.wordpress.com/about/)
53
+
54
+ Official repository is [here](https://github.com/SamsungSAILMontreal/TinyRecursiveModels)
55
+
56
+ <img width="300" alt="trm-fig3" src="https://github.com/user-attachments/assets/bfe3dd2a-e859-492a-84d5-faf37339f534" />
57
+
58
+ ## Citations
59
+
60
+ ```bibtex
61
+ @misc{jolicoeurmartineau2025morerecursivereasoningtiny,
62
+ title = {Less is More: Recursive Reasoning with Tiny Networks},
63
+ author = {Alexia Jolicoeur-Martineau},
64
+ year = {2025},
65
+ eprint = {2510.04871},
66
+ archivePrefix = {arXiv},
67
+ primaryClass = {cs.LG},
68
+ url = {https://arxiv.org/abs/2510.04871},
69
+ }
70
+ ```
@@ -0,0 +1,7 @@
1
+ tiny_recursive_model/__init__.py,sha256=UufV6--ilPn4quRWyhvaFRMKRfHvfLsAmF9RU-L31rM,77
2
+ tiny_recursive_model/mlp_mixer_1d.py,sha256=6ivDK9dgHdVl1axg2ayifJ7H5QI3hXptHnb6lfNrno0,1398
3
+ tiny_recursive_model/trm.py,sha256=YwzTod4CeeXlbAiM-TBB7rEEHWsxnPxavaGiVCTPMEM,6350
4
+ tiny_recursive_model-0.0.1.dist-info/METADATA,sha256=G-cM7okuLAiOxhofXoRh2Ih-bwYifcA3AAhmYmKo-v4,3107
5
+ tiny_recursive_model-0.0.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ tiny_recursive_model-0.0.1.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
7
+ tiny_recursive_model-0.0.1.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Phil Wang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.