x-transformers 1.42.7__py3-none-any.whl → 1.42.9__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- x_transformers/x_transformers.py +39 -18
- {x_transformers-1.42.7.dist-info → x_transformers-1.42.9.dist-info}/METADATA +1 -1
- {x_transformers-1.42.7.dist-info → x_transformers-1.42.9.dist-info}/RECORD +6 -6
- {x_transformers-1.42.7.dist-info → x_transformers-1.42.9.dist-info}/LICENSE +0 -0
- {x_transformers-1.42.7.dist-info → x_transformers-1.42.9.dist-info}/WHEEL +0 -0
- {x_transformers-1.42.7.dist-info → x_transformers-1.42.9.dist-info}/top_level.txt +0 -0
x_transformers/x_transformers.py
CHANGED
@@ -238,6 +238,13 @@ class TokenEmbedding(Module):
|
|
238
238
|
token_emb = self.emb(x.long())
|
239
239
|
return l2norm(token_emb) if self.l2norm_embed else token_emb
|
240
240
|
|
241
|
+
def init_(self):
|
242
|
+
if self.l2norm_embed:
|
243
|
+
nn.init.normal_(self.emb.weight, std=1e-5)
|
244
|
+
return
|
245
|
+
nn.init.kaiming_normal_(self.emb.weight)
|
246
|
+
|
247
|
+
|
241
248
|
# positional embeddings
|
242
249
|
|
243
250
|
class AbsolutePositionalEmbedding(Module):
|
@@ -445,13 +452,20 @@ class DynamicPositionBias(Module):
|
|
445
452
|
return bias
|
446
453
|
|
447
454
|
class AlibiPositionalBias(Module):
|
448
|
-
def __init__(
|
455
|
+
def __init__(
|
456
|
+
self,
|
457
|
+
heads,
|
458
|
+
total_heads = None,
|
459
|
+
slopes: list[int] | None = None,
|
460
|
+
**kwargs
|
461
|
+
):
|
449
462
|
super().__init__()
|
450
463
|
self.heads = heads
|
451
464
|
self.total_heads = default(total_heads, heads)
|
452
465
|
|
453
|
-
slopes = Tensor(self._get_slopes(heads))
|
466
|
+
slopes = Tensor(default(slopes, self._get_slopes(heads)))
|
454
467
|
slopes = rearrange(slopes, 'h -> h 1 1')
|
468
|
+
|
455
469
|
self.register_buffer('slopes', slopes, persistent = False)
|
456
470
|
self.register_buffer('bias', None, persistent = False)
|
457
471
|
|
@@ -480,7 +494,10 @@ class AlibiPositionalBias(Module):
|
|
480
494
|
h, device = self.total_heads, self.device
|
481
495
|
|
482
496
|
pos_j = default(pos_j, pos_i)
|
483
|
-
bias = -einx.subtract('... j, ... i -> ...
|
497
|
+
bias = -einx.subtract('... j, ... i -> ... i j', pos_j, pos_i).abs()
|
498
|
+
|
499
|
+
if bias.ndim == 3:
|
500
|
+
bias = rearrange(bias, 'b i j -> b 1 i j')
|
484
501
|
|
485
502
|
bias = bias * self.slopes
|
486
503
|
num_heads_unalibied = h - bias.shape[-3]
|
@@ -1524,8 +1541,9 @@ class AttentionLayers(Module):
|
|
1524
1541
|
use_layerscale = False,
|
1525
1542
|
layerscale_init_value = 0.,
|
1526
1543
|
unet_skips = False,
|
1527
|
-
reinject_input = False,
|
1528
|
-
add_value_residual = False,
|
1544
|
+
reinject_input = False, # seen first in DEQ paper https://arxiv.org/abs/1909.01377, but later used in a number of papers trying to achieve depthwise generalization https://arxiv.org/abs/2410.03020v1
|
1545
|
+
add_value_residual = False, # resformer from Zhou et al - https://arxiv.org/abs/2410.17897v1
|
1546
|
+
rel_pos_kwargs: dict = dict(),
|
1529
1547
|
**kwargs
|
1530
1548
|
):
|
1531
1549
|
super().__init__()
|
@@ -1566,14 +1584,14 @@ class AttentionLayers(Module):
|
|
1566
1584
|
|
1567
1585
|
if rel_pos_bias:
|
1568
1586
|
assert not flash_attn, 'flash attention not compatible with t5 relative positional bias'
|
1569
|
-
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance)
|
1587
|
+
self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance, **rel_pos_kwargs)
|
1570
1588
|
elif dynamic_pos_bias:
|
1571
1589
|
assert not flash_attn, 'flash attention not compatible with dynamic positional bias'
|
1572
|
-
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm)
|
1590
|
+
self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm, **rel_pos_kwargs)
|
1573
1591
|
elif alibi_pos_bias:
|
1574
1592
|
alibi_num_heads = default(alibi_num_heads, heads)
|
1575
1593
|
assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads'
|
1576
|
-
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads)
|
1594
|
+
self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads, **rel_pos_kwargs)
|
1577
1595
|
|
1578
1596
|
assert at_most_one_of(sandwich_norm, resi_dual), 'either sandwich norm or resiDual is selected, but not both'
|
1579
1597
|
assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm'
|
@@ -2261,7 +2279,8 @@ class TransformerWrapper(Module):
|
|
2261
2279
|
token_emb: TokenEmbedding | None = None,
|
2262
2280
|
mixture_of_softmax = False,
|
2263
2281
|
mixture_of_softmax_k = 4,
|
2264
|
-
sigsoftmax_logits = False
|
2282
|
+
sigsoftmax_logits = False,
|
2283
|
+
to_logits: Module | None = None,
|
2265
2284
|
):
|
2266
2285
|
super().__init__()
|
2267
2286
|
|
@@ -2363,11 +2382,12 @@ class TransformerWrapper(Module):
|
|
2363
2382
|
if return_only_embed:
|
2364
2383
|
self.to_logits = None
|
2365
2384
|
elif tie_embedding:
|
2385
|
+
assert isinstance(token_emb, TokenEmbedding), 'can only tie embedding if using `TokenEmbedding`'
|
2366
2386
|
self.to_logits = lambda t: t @ self.token_emb.emb.weight.t()
|
2367
2387
|
elif num_output_heads > 1:
|
2368
2388
|
self.to_logits = ModuleList([LinearNoBias(dim, logits_dim) for _ in range(num_output_heads)])
|
2369
2389
|
else:
|
2370
|
-
self.to_logits = LinearNoBias(dim, logits_dim)
|
2390
|
+
self.to_logits = LinearNoBias(dim, logits_dim) if not exists(to_logits) else to_logits
|
2371
2391
|
|
2372
2392
|
# memory tokens (like [cls]) from Memory Transformers paper
|
2373
2393
|
|
@@ -2388,13 +2408,12 @@ class TransformerWrapper(Module):
|
|
2388
2408
|
self.can_cache_kv_outside_max_seq_len = no_abs_pos_emb
|
2389
2409
|
|
2390
2410
|
def init_(self):
|
2411
|
+
if hasattr(self.token_emb, 'init_'):
|
2412
|
+
self.token_emb.init_()
|
2413
|
+
|
2391
2414
|
if self.l2norm_embed:
|
2392
|
-
nn.init.normal_(self.token_emb.emb.weight, std = 1e-5)
|
2393
2415
|
if not isinstance(self.pos_emb, always):
|
2394
2416
|
nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5)
|
2395
|
-
return
|
2396
|
-
|
2397
|
-
nn.init.kaiming_normal_(self.token_emb.emb.weight)
|
2398
2417
|
|
2399
2418
|
def forward(
|
2400
2419
|
self,
|
@@ -2417,7 +2436,9 @@ class TransformerWrapper(Module):
|
|
2417
2436
|
attn_z_loss_weight = 1e-4,
|
2418
2437
|
seq_start_pos = None,
|
2419
2438
|
cache: LayerIntermediates | None = None,
|
2420
|
-
|
2439
|
+
token_emb_kwargs = dict(),
|
2440
|
+
to_logits_kwargs = dict(),
|
2441
|
+
**kwargs,
|
2421
2442
|
):
|
2422
2443
|
b, n, device, num_mems, has_memory_tokens, emb_frac_gradient, orig_mask = x.shape[0], x.shape[1], x.device, self.num_memory_tokens, self.num_memory_tokens > 0, self.emb_frac_gradient, mask
|
2423
2444
|
|
@@ -2428,7 +2449,7 @@ class TransformerWrapper(Module):
|
|
2428
2449
|
|
2429
2450
|
external_pos_emb = exists(pos) and pos.dtype != torch.long
|
2430
2451
|
pos_emb = self.pos_emb(x, pos = pos, seq_start_pos = seq_start_pos) if not external_pos_emb else pos
|
2431
|
-
x = self.token_emb(x) + pos_emb
|
2452
|
+
x = self.token_emb(x, **token_emb_kwargs) + pos_emb
|
2432
2453
|
|
2433
2454
|
# add additional embeddings
|
2434
2455
|
|
@@ -2583,9 +2604,9 @@ class TransformerWrapper(Module):
|
|
2583
2604
|
|
2584
2605
|
if not return_embeddings:
|
2585
2606
|
if self.has_multiple_heads:
|
2586
|
-
logits = tuple(fn(x) for fn in self.to_logits)
|
2607
|
+
logits = tuple(fn(x, **to_logits_kwargs) for fn in self.to_logits)
|
2587
2608
|
else:
|
2588
|
-
logits = self.to_logits(x)
|
2609
|
+
logits = self.to_logits(x, **to_logits_kwargs)
|
2589
2610
|
|
2590
2611
|
# maybe sig softmax
|
2591
2612
|
|
@@ -6,11 +6,11 @@ x_transformers/dpo.py,sha256=xt4OuOWhU8pN3OKN2LZAaC2NC8iiEnchqqcrPWVqf0o,3521
|
|
6
6
|
x_transformers/multi_input.py,sha256=tCh-fTJDj2ib4SMGtsa-AM8MxKzJAQSwqAXOu3HU2mg,9252
|
7
7
|
x_transformers/neo_mlp.py,sha256=XCNnnop9WLarcxap1kGuYc1x8GHvwkZiDRnXOxSl3Po,3452
|
8
8
|
x_transformers/nonautoregressive_wrapper.py,sha256=2NU58hYMgn-4Jzg3mie-mXb0XH_dCN7fjlzd3K1rLUY,10510
|
9
|
-
x_transformers/x_transformers.py,sha256=
|
9
|
+
x_transformers/x_transformers.py,sha256=VxdA44EYQhVH1Rp7wreJ83I2e0Ea7VN_bFRE-iDXOI8,93833
|
10
10
|
x_transformers/xl_autoregressive_wrapper.py,sha256=CvZMJ6A6PA-Y_bQAhnORwjJBSl6Vjq2IdW5KTdk8NI8,4195
|
11
11
|
x_transformers/xval.py,sha256=7S00kCuab4tWQa-vf-z-XfzADjVj48MoFIr7VSIvttg,8575
|
12
|
-
x_transformers-1.42.
|
13
|
-
x_transformers-1.42.
|
14
|
-
x_transformers-1.42.
|
15
|
-
x_transformers-1.42.
|
16
|
-
x_transformers-1.42.
|
12
|
+
x_transformers-1.42.9.dist-info/LICENSE,sha256=As9u198X-U-vph5noInuUfqsAG2zX_oXPHDmdjwlPPY,1066
|
13
|
+
x_transformers-1.42.9.dist-info/METADATA,sha256=k9r-D0b0xnf8gwE-SwwgybnfQpoRwiY0wthOn66xc6Y,689
|
14
|
+
x_transformers-1.42.9.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
15
|
+
x_transformers-1.42.9.dist-info/top_level.txt,sha256=hO6KGpFuGucRNEtRfme4A_rGcM53AKwGP7RVlRIxS5Q,15
|
16
|
+
x_transformers-1.42.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|