titans-pytorch 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- titans_pytorch/associative_scan.py +87 -0
- titans_pytorch/neural_memory.py +41 -82
- {titans_pytorch-0.3.3.dist-info → titans_pytorch-0.3.4.dist-info}/METADATA +1 -1
- titans_pytorch-0.3.4.dist-info/RECORD +9 -0
- titans_pytorch-0.3.3.dist-info/RECORD +0 -9
- {titans_pytorch-0.3.3.dist-info → titans_pytorch-0.3.4.dist-info}/WHEEL +0 -0
- {titans_pytorch-0.3.3.dist-info → titans_pytorch-0.3.4.dist-info}/licenses/LICENSE +0 -0
@@ -3,18 +3,39 @@ from typing import Callable
|
|
3
3
|
|
4
4
|
import torch
|
5
5
|
from torch import Tensor
|
6
|
+
from torch.nn import Module
|
6
7
|
import torch.nn.functional as F
|
7
8
|
|
9
|
+
from einops import rearrange, repeat, reduce, pack, unpack
|
10
|
+
|
8
11
|
# taken from S5-pytorch repository
|
9
12
|
# https://github.com/i404788/s5-pytorch/blob/74e2fdae00b915a62c914bf3615c0b8a4279eb84/s5/jax_compat.py#L51-L134
|
10
13
|
|
11
14
|
# helper functions
|
12
15
|
|
16
|
+
def exists(v):
|
17
|
+
return v is not None
|
18
|
+
|
19
|
+
def default(*args):
|
20
|
+
for arg in args:
|
21
|
+
if exists(arg):
|
22
|
+
return arg
|
23
|
+
return None
|
24
|
+
|
13
25
|
def pad_at_dim(t, pad, dim = -1, value = 0.):
|
14
26
|
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
|
15
27
|
zeros = ((0, 0) * dims_from_right)
|
16
28
|
return F.pad(t, (*zeros, *pad), value = value)
|
17
29
|
|
30
|
+
def pack_one_with_inverse(t, pattern):
|
31
|
+
packed, packed_shape = pack([t], pattern)
|
32
|
+
|
33
|
+
def inverse(out, inv_pattern = None):
|
34
|
+
inv_pattern = default(inv_pattern, pattern)
|
35
|
+
return unpack(out, packed_shape, inv_pattern)[0]
|
36
|
+
|
37
|
+
return packed, inverse
|
38
|
+
|
18
39
|
# the operator that is needed
|
19
40
|
|
20
41
|
@torch.jit.script
|
@@ -88,3 +109,69 @@ def _interleave(a, b):
|
|
88
109
|
interleaved = torch.flatten(stacked, start_dim=1, end_dim=2)
|
89
110
|
|
90
111
|
return interleaved[:, :output_axis_len]
|
112
|
+
|
113
|
+
# associative scan wrapper around naive and accelerated version
|
114
|
+
|
115
|
+
class AssocScan(Module):
|
116
|
+
def __init__(
|
117
|
+
self,
|
118
|
+
use_accelerated = False
|
119
|
+
):
|
120
|
+
super().__init__()
|
121
|
+
self.use_accelerated = use_accelerated
|
122
|
+
|
123
|
+
def forward(
|
124
|
+
self,
|
125
|
+
gates,
|
126
|
+
inputs,
|
127
|
+
prev = None,
|
128
|
+
remove_prev = None
|
129
|
+
):
|
130
|
+
remove_prev = default(remove_prev, exists(prev))
|
131
|
+
|
132
|
+
inputs, inverse_pack_weight_shape = pack_one_with_inverse(inputs, 'b n *')
|
133
|
+
gates, _ = pack_one_with_inverse(gates, 'b n *')
|
134
|
+
|
135
|
+
if exists(prev):
|
136
|
+
prev, _ = pack_one_with_inverse(prev, 'b *')
|
137
|
+
|
138
|
+
if exists(prev):
|
139
|
+
inputs, _ = pack([prev, inputs], 'b * d')
|
140
|
+
gates = pad_at_dim(gates, (1, 0), value = 1., dim = -2)
|
141
|
+
|
142
|
+
if not self.use_accelerated:
|
143
|
+
_, out = associative_scan(binary_operator, (gates, inputs))
|
144
|
+
|
145
|
+
if remove_prev:
|
146
|
+
out = out[:, 1:]
|
147
|
+
|
148
|
+
return inverse_pack_weight_shape(out)
|
149
|
+
|
150
|
+
from accelerated_scan.triton import scan as triton_scan
|
151
|
+
from accelerated_scan.warp import scan as warp_scan
|
152
|
+
|
153
|
+
scan = triton_scan if gates.is_cuda else warp_scan
|
154
|
+
|
155
|
+
def accelerate_scan_fn(gates, inputs):
|
156
|
+
gates = gates.expand_as(inputs)
|
157
|
+
gates, inputs = tuple(rearrange(t, 'b n d -> b d n') for t in (gates, inputs))
|
158
|
+
|
159
|
+
seq_len = gates.shape[-1]
|
160
|
+
next_power_two_seq_len = 2 ** max(5, int(math.ceil(math.log2(seq_len))))
|
161
|
+
|
162
|
+
gates = F.pad(gates, (0, next_power_two_seq_len - seq_len))
|
163
|
+
inputs = F.pad(inputs, (0, next_power_two_seq_len - seq_len))
|
164
|
+
|
165
|
+
outputs = scan(gates.contiguous(), inputs.contiguous())
|
166
|
+
|
167
|
+
outputs = outputs[..., :seq_len]
|
168
|
+
outputs = rearrange(outputs, 'b d n -> b n d')
|
169
|
+
|
170
|
+
return outputs
|
171
|
+
|
172
|
+
out = accelerate_scan_fn(gates, inputs)
|
173
|
+
|
174
|
+
if remove_prev:
|
175
|
+
out = out[:, 1:]
|
176
|
+
|
177
|
+
return inverse_pack_weight_shape(out)
|
titans_pytorch/neural_memory.py
CHANGED
@@ -8,16 +8,12 @@ from collections import namedtuple
|
|
8
8
|
import torch
|
9
9
|
from torch import nn, cat, tensor, Tensor
|
10
10
|
import torch.nn.functional as F
|
11
|
-
from torch.nn import Linear, Module, Parameter, ParameterList
|
11
|
+
from torch.nn import Linear, Module, Parameter, ParameterList, ParameterDict
|
12
12
|
from torch.func import functional_call, vmap, grad
|
13
13
|
|
14
14
|
from tensordict import TensorDict
|
15
15
|
|
16
|
-
from titans_pytorch.associative_scan import
|
17
|
-
associative_scan,
|
18
|
-
binary_operator,
|
19
|
-
pad_at_dim
|
20
|
-
)
|
16
|
+
from titans_pytorch.associative_scan import AssocScan
|
21
17
|
|
22
18
|
from titans_pytorch.memory_models import(
|
23
19
|
MemoryMLP
|
@@ -79,8 +75,8 @@ def safe_cat(inputs, dim = -2):
|
|
79
75
|
def is_empty_tensor(t):
|
80
76
|
return t.numel() == 0
|
81
77
|
|
82
|
-
def
|
83
|
-
return
|
78
|
+
def dict_get_value_shapes(td):
|
79
|
+
return [v.shape for k, v in td.items()]
|
84
80
|
|
85
81
|
def rearrange_dict_values(td, pattern, **kwargs):
|
86
82
|
return td.apply(lambda t: rearrange(t, pattern, **kwargs))
|
@@ -97,6 +93,11 @@ def round_down_multiple(seq, mult):
|
|
97
93
|
def round_up_multiple(seq, mult):
|
98
94
|
return math.ceil(seq / mult) * mult
|
99
95
|
|
96
|
+
def pad_at_dim(t, pad, dim = -1, value = 0.):
|
97
|
+
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
|
98
|
+
zeros = ((0, 0) * dims_from_right)
|
99
|
+
return F.pad(t, (*zeros, *pad), value = value)
|
100
|
+
|
100
101
|
def pack_one_with_inverse(t, pattern):
|
101
102
|
packed, packed_shape = pack([t], pattern)
|
102
103
|
|
@@ -197,72 +198,6 @@ class AttentionPool(Module):
|
|
197
198
|
|
198
199
|
return reduce(x * attn, 'b n c d -> b n d', 'sum')
|
199
200
|
|
200
|
-
# associative scan wrapper
|
201
|
-
|
202
|
-
class AssocScan(Module):
|
203
|
-
def __init__(
|
204
|
-
self,
|
205
|
-
use_accelerated = False
|
206
|
-
):
|
207
|
-
super().__init__()
|
208
|
-
self.use_accelerated = use_accelerated
|
209
|
-
|
210
|
-
def forward(
|
211
|
-
self,
|
212
|
-
gates,
|
213
|
-
inputs,
|
214
|
-
prev = None,
|
215
|
-
remove_prev = None
|
216
|
-
):
|
217
|
-
remove_prev = default(remove_prev, exists(prev))
|
218
|
-
|
219
|
-
inputs, inverse_pack_weight_shape = pack_one_with_inverse(inputs, 'b n *')
|
220
|
-
gates, _ = pack_one_with_inverse(gates, 'b n *')
|
221
|
-
|
222
|
-
if exists(prev):
|
223
|
-
prev, _ = pack_one_with_inverse(prev, 'b *')
|
224
|
-
|
225
|
-
if exists(prev):
|
226
|
-
inputs, _ = pack([prev, inputs], 'b * d')
|
227
|
-
gates = pad_at_dim(gates, (1, 0), value = 1., dim = -2)
|
228
|
-
|
229
|
-
if not self.use_accelerated:
|
230
|
-
_, out = associative_scan(binary_operator, (gates, inputs))
|
231
|
-
|
232
|
-
if remove_prev:
|
233
|
-
out = out[:, 1:]
|
234
|
-
|
235
|
-
return inverse_pack_weight_shape(out)
|
236
|
-
|
237
|
-
from accelerated_scan.triton import scan as triton_scan
|
238
|
-
from accelerated_scan.warp import scan as warp_scan
|
239
|
-
|
240
|
-
scan = triton_scan if gates.is_cuda else warp_scan
|
241
|
-
|
242
|
-
def accelerate_scan_fn(gates, inputs):
|
243
|
-
gates = gates.expand_as(inputs)
|
244
|
-
gates, inputs = tuple(rearrange(t, 'b n d -> b d n') for t in (gates, inputs))
|
245
|
-
|
246
|
-
seq_len = gates.shape[-1]
|
247
|
-
next_power_two_seq_len = 2 ** max(5, int(math.ceil(math.log2(seq_len))))
|
248
|
-
|
249
|
-
gates = F.pad(gates, (0, next_power_two_seq_len - seq_len))
|
250
|
-
inputs = F.pad(inputs, (0, next_power_two_seq_len - seq_len))
|
251
|
-
|
252
|
-
outputs = scan(gates.contiguous(), inputs.contiguous())
|
253
|
-
|
254
|
-
outputs = outputs[..., :seq_len]
|
255
|
-
outputs = rearrange(outputs, 'b d n -> b n d')
|
256
|
-
|
257
|
-
return outputs
|
258
|
-
|
259
|
-
out = accelerate_scan_fn(gates, inputs)
|
260
|
-
|
261
|
-
if remove_prev:
|
262
|
-
out = out[:, 1:]
|
263
|
-
|
264
|
-
return inverse_pack_weight_shape(out)
|
265
|
-
|
266
201
|
# main neural memory
|
267
202
|
|
268
203
|
def default_adaptive_step_transform(adaptive_step, max_lr = 1e-2):
|
@@ -285,6 +220,7 @@ class NeuralMemory(Module):
|
|
285
220
|
default_step_transform_max_lr = 1.,
|
286
221
|
per_parameter_lr_modulation = False, # allow outer network to control learning rate per weight matrix of memory network
|
287
222
|
max_mem_layer_modulation = 1., # max of 10.
|
223
|
+
per_head_learned_parameters = True,
|
288
224
|
attn_pool_chunks = False,
|
289
225
|
momentum = True,
|
290
226
|
pre_rmsnorm = True,
|
@@ -370,9 +306,21 @@ class NeuralMemory(Module):
|
|
370
306
|
|
371
307
|
self.memory_model = model
|
372
308
|
|
373
|
-
|
309
|
+
mem_model_params = dict(model.named_parameters())
|
310
|
+
|
311
|
+
self.num_memory_parameter_tensors = len(mem_model_params)
|
374
312
|
|
375
|
-
self.
|
313
|
+
self.memory_model_parameter_names = [*mem_model_params.keys()]
|
314
|
+
|
315
|
+
memory_model_parameters = [*mem_model_params.values()]
|
316
|
+
|
317
|
+
if per_head_learned_parameters:
|
318
|
+
memory_model_parameters = [repeat(p, '... -> h ...', h = heads) for p in memory_model_parameters]
|
319
|
+
|
320
|
+
self.init_weight_shape = [p.shape for p in memory_model_parameters]
|
321
|
+
|
322
|
+
self.memory_model_parameters = ParameterList(memory_model_parameters)
|
323
|
+
self.per_head_learned_parameters = per_head_learned_parameters
|
376
324
|
|
377
325
|
# the chunk size within the paper where adaptive step, momentum, weight decay are shared
|
378
326
|
|
@@ -488,21 +436,32 @@ class NeuralMemory(Module):
|
|
488
436
|
|
489
437
|
self.register_buffer('zero', torch.tensor(0.), persistent = False)
|
490
438
|
|
439
|
+
@property
|
440
|
+
def memory_model_parameter_dict(self):
|
441
|
+
return TensorDict(dict(zip(self.memory_model_parameter_names, self.memory_model_parameters)))
|
442
|
+
|
491
443
|
def init_weights(
|
492
444
|
self,
|
493
445
|
batch,
|
494
446
|
):
|
495
|
-
|
496
|
-
|
447
|
+
if self.per_head_learned_parameters:
|
448
|
+
weights = repeat_dict_values(self.memory_model_parameter_dict, 'h ... -> (b h) ...', b = batch)
|
449
|
+
else:
|
450
|
+
weights = repeat_dict_values(self.memory_model_parameter_dict, '... -> bh ...', bh = batch * self.heads)
|
451
|
+
|
497
452
|
return weights
|
498
453
|
|
499
454
|
def init_momentum(
|
500
455
|
self,
|
501
456
|
batch,
|
502
457
|
):
|
503
|
-
|
504
|
-
|
505
|
-
|
458
|
+
zeros = self.memory_model_parameter_dict.clone().zero_()
|
459
|
+
|
460
|
+
if self.per_head_learned_parameters:
|
461
|
+
zeros = repeat_dict_values(zeros, 'h ... -> (b h) ...', b = batch)
|
462
|
+
else:
|
463
|
+
zeros = repeat_dict_values(zeros, '... -> bh ...', bh = batch * self.heads)
|
464
|
+
|
506
465
|
return zeros
|
507
466
|
|
508
467
|
def store_memories(
|
@@ -694,7 +653,7 @@ class NeuralMemory(Module):
|
|
694
653
|
):
|
695
654
|
chunk_size = self.retrieve_chunk_size
|
696
655
|
|
697
|
-
weights_have_expanded_shape =
|
656
|
+
weights_have_expanded_shape = dict_get_value_shapes(weights) != self.init_weight_shape
|
698
657
|
|
699
658
|
batch, seq_len = seq.shape[:2]
|
700
659
|
|
@@ -0,0 +1,9 @@
|
|
1
|
+
titans_pytorch/__init__.py,sha256=Y3m_ZlpEqYwp-Md1ARhNGJxq8bQp8ty1o039nZOOJo0,276
|
2
|
+
titans_pytorch/associative_scan.py,sha256=CEPXaZ2fEPWF8ZBe5wihCqPSGi8PNyL0uVSgvY7eV-s,5147
|
3
|
+
titans_pytorch/mac_transformer.py,sha256=5rO4GQxSyFWWEc3pc3xNyG0sK5EXE7MmxKI-_kEMl2M,24941
|
4
|
+
titans_pytorch/memory_models.py,sha256=0KLHZN-y_7lwrhWSnFRaYJ3GiUV3tzVjxS9CxIx_eI8,4843
|
5
|
+
titans_pytorch/neural_memory.py,sha256=9eyeEvYsP5OFlwLDRyVut99uVYGvXAElFPabVoZnGJw,27063
|
6
|
+
titans_pytorch-0.3.4.dist-info/METADATA,sha256=2ZD_DovSYkVejsTWHq7_IOTN-Je0of1f-HOiojaQBhQ,6815
|
7
|
+
titans_pytorch-0.3.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
8
|
+
titans_pytorch-0.3.4.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
9
|
+
titans_pytorch-0.3.4.dist-info/RECORD,,
|
@@ -1,9 +0,0 @@
|
|
1
|
-
titans_pytorch/__init__.py,sha256=Y3m_ZlpEqYwp-Md1ARhNGJxq8bQp8ty1o039nZOOJo0,276
|
2
|
-
titans_pytorch/associative_scan.py,sha256=Y-iYqmFuG-NoCKu6kgql1mhowXTeJfyawi3eUIXamp0,2650
|
3
|
-
titans_pytorch/mac_transformer.py,sha256=5rO4GQxSyFWWEc3pc3xNyG0sK5EXE7MmxKI-_kEMl2M,24941
|
4
|
-
titans_pytorch/memory_models.py,sha256=0KLHZN-y_7lwrhWSnFRaYJ3GiUV3tzVjxS9CxIx_eI8,4843
|
5
|
-
titans_pytorch/neural_memory.py,sha256=Ff-IBv-CCQAP7IYIpokPDoGtsvpzotAJsHB1d_-xd98,27934
|
6
|
-
titans_pytorch-0.3.3.dist-info/METADATA,sha256=CutjohW8xSNycd5W-uyXC4827ubmIpAJCs9xoMbfZzo,6815
|
7
|
-
titans_pytorch-0.3.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
8
|
-
titans_pytorch-0.3.3.dist-info/licenses/LICENSE,sha256=1yCiA9b5nhslTavxPjsQAO-wpOnwJR9-l8LTVi7GJuk,1066
|
9
|
-
titans_pytorch-0.3.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|