lt-tensor 0.0.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,628 @@
1
+ __all__ = [
2
+ "log_tensor",
3
+ "set_seed",
4
+ "count_parameters",
5
+ "freeze_all_except",
6
+ "freeze_selected_weights",
7
+ "unfreeze_all_except",
8
+ "unfreeze_selected_weights",
9
+ "clip_gradients",
10
+ "detach_hidden",
11
+ "tensor_summary",
12
+ "one_hot",
13
+ "safe_divide",
14
+ "batch_pad",
15
+ "sample_tensor",
16
+ "TorchCacheUtils",
17
+ "clear_cache",
18
+ "default_device",
19
+ "Packing",
20
+ "Padding",
21
+ "MaskUtils",
22
+ "masked_cross_entropy",
23
+ "NoiseScheduler",
24
+ ]
25
+
26
+ import gc
27
+ import random
28
+ import numpy as np
29
+ from lt_utils.type_utils import is_str
30
+ from ._torch_commons import *
31
+ from lt_utils.misc_utils import log_traceback, cache_wrapper
32
+ from lt_utils.file_ops import load_json, load_yaml, save_json, save_yaml
33
+ import math
34
+
35
+
36
+ def log_tensor(
37
+ item: Union[Tensor, np.ndarray],
38
+ title: Optional[str] = None,
39
+ print_details: bool = True,
40
+ print_tensor: bool = False,
41
+ dim: Optional[int] = None,
42
+ ):
43
+ assert isinstance(item, (Tensor, np.ndarray))
44
+ has_title = is_str(title)
45
+
46
+ if has_title:
47
+ print("========[" + title.title() + "]========")
48
+ _b = 20 + len(title.strip())
49
+ print(f"shape: {item.shape}")
50
+ print(f"dtype: {item.dtype}")
51
+ if print_details:
52
+ print(f"ndim: {item.ndim}")
53
+ if isinstance(item, Tensor):
54
+ print(f"device: {item.device}")
55
+ print(f"min: {item.min():.4f}")
56
+ print(f"max: {item.max():.4f}")
57
+ try:
58
+ print(f"std: {item.std(dim=dim):.4f}")
59
+ except:
60
+ pass
61
+ try:
62
+
63
+ print(f"mean: {item.mean(dim=dim):.4f}")
64
+ except:
65
+ pass
66
+ if print_tensor:
67
+ print(item)
68
+ if has_title:
69
+ print("".join(["-"] * _b), "\n")
70
+
71
+
72
+ def set_seed(seed: int):
73
+ """Set random seed for reproducibility."""
74
+ torch.manual_seed(seed)
75
+ np.random.seed(seed)
76
+ random.seed(seed)
77
+
78
+ if torch.cuda.is_available():
79
+ torch.cuda.manual_seed_all(seed)
80
+ if torch.mps.is_available():
81
+ torch.mps.manual_seed(seed)
82
+ if torch.xpu.is_available():
83
+ torch.xpu.manual_seed_all(seed)
84
+
85
+
86
+ def count_parameters(model: Module) -> int:
87
+ """Returns total number of trainable parameters."""
88
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
89
+
90
+
91
+ def freeze_all_except(model: Module, except_layers: Optional[list[str]] = None):
92
+ """Freezes all model parameters except specified layers."""
93
+ no_exceptions = not except_layers
94
+ for name, param in model.named_parameters():
95
+ if no_exceptions:
96
+ param.requires_grad_(False)
97
+ elif any(layer in name for layer in except_layers):
98
+ param.requires_grad_(False)
99
+
100
+
101
+ def freeze_selected_weights(model: Module, target_layers: list[str]):
102
+ """Freezes only parameters on specified layers."""
103
+ for name, param in model.named_parameters():
104
+ if any(layer in name for layer in target_layers):
105
+ param.requires_grad_(False)
106
+
107
+
108
+ def unfreeze_all_except(model: Module, except_layers: Optional[list[str]] = None):
109
+ """Unfreezes all model parameters except specified layers."""
110
+ no_exceptions = not except_layers
111
+ for name, param in model.named_parameters():
112
+ if no_exceptions:
113
+ param.requires_grad_(True)
114
+ elif not any(layer in name for layer in except_layers):
115
+ param.requires_grad_(True)
116
+
117
+
118
+ def unfreeze_selected_weights(model: Module, target_layers: list[str]):
119
+ """Unfreezes only parameters on specified layers."""
120
+ for name, param in model.named_parameters():
121
+ if not any(layer in name for layer in target_layers):
122
+ param.requires_grad_(True)
123
+
124
+
125
+ def clip_gradients(model: Module, max_norm: float = 1.0):
126
+ """Applies gradient clipping."""
127
+ return nn.utils.clip_grad_norm_(model.parameters(), max_norm)
128
+
129
+
130
+ def detach_hidden(hidden):
131
+ """Detaches hidden states (for RNNs)."""
132
+ if isinstance(hidden, torch.Tensor):
133
+ return hidden.detach()
134
+ else:
135
+ return tuple(detach_hidden(h) for h in hidden)
136
+
137
+
138
+ def tensor_summary(tensor: torch.Tensor) -> str:
139
+ """Prints min/max/mean/std of a tensor for debugging."""
140
+ return f"Shape: {tuple(tensor.shape)}, dtype: {tensor.dtype}, min: {tensor.min():.4f}, max: {tensor.max():.4f}, mean: {tensor.mean():.4f}, std: {tensor.std():.4f}"
141
+
142
+
143
+ def one_hot(labels: torch.Tensor, num_classes: int) -> torch.Tensor:
144
+ """One-hot encodes a tensor of labels."""
145
+ return F.one_hot(labels, num_classes).float()
146
+
147
+
148
+ def safe_divide(a: torch.Tensor, b: torch.Tensor, eps: float = 1e-8):
149
+ """Safe division for tensors (prevents divide-by-zero)."""
150
+ return a / (b + eps)
151
+
152
+
153
+ def batch_pad(tensors: list[torch.Tensor], padding_value: float = 0.0) -> torch.Tensor:
154
+ """Pads a list of tensors to the same shape (assumes 2D+ tensors)."""
155
+ max_shape = [
156
+ max(s[i] for s in [t.shape for t in tensors]) for i in range(tensors[0].dim())
157
+ ]
158
+ padded = []
159
+ for t in tensors:
160
+ pad_dims = [(0, m - s) for s, m in zip(t.shape, max_shape)]
161
+ pad_flat = [p for pair in reversed(pad_dims) for p in pair] # reverse for F.pad
162
+ padded.append(F.pad(t, pad_flat, value=padding_value))
163
+ return torch.stack(padded)
164
+
165
+
166
+ def sample_tensor(tensor: torch.Tensor, num_samples: int = 5):
167
+ """Randomly samples values from tensor for preview."""
168
+ flat = tensor.flatten()
169
+ idx = torch.randperm(len(flat))[:num_samples]
170
+ return flat[idx]
171
+
172
+
173
+ class TorchCacheUtils:
174
+ cached_shortcuts: dict[str, Callable[[None], None]] = {}
175
+
176
+ has_cuda: bool = torch.cuda.is_available()
177
+ has_xpu: bool = torch.xpu.is_available()
178
+ has_mps: bool = torch.mps.is_available()
179
+
180
+ _ignore: list[str] = []
181
+
182
+ def __init__(self):
183
+ pass
184
+
185
+ def _apply_clear(self, device: str):
186
+ if device in self._ignore:
187
+ gc.collect()
188
+ return
189
+ try:
190
+ clear_fn = self.cached_shortcuts.get(
191
+ device, getattr(torch, device).empty_cache
192
+ )
193
+ if device not in self.cached_shortcuts:
194
+ self.cached_shortcuts.update({device: clear_fn})
195
+
196
+ except Exception as e:
197
+ print(e)
198
+ self._ignore.append(device)
199
+
200
+ def clear(self):
201
+ gc.collect()
202
+ if self.has_xpu:
203
+ self._apply_clear("xpu")
204
+ if self.has_cuda:
205
+ self._apply_clear("cuda")
206
+ if self.has_mps:
207
+ self._apply_clear("mps")
208
+ gc.collect()
209
+
210
+
211
+ _clear_cache_cls = TorchCacheUtils()
212
+
213
+
214
+ def clear_cache():
215
+ _clear_cache_cls.clear()
216
+
217
+
218
+ @cache_wrapper
219
+ def default_device(idx: Optional[int] = None):
220
+ try:
221
+ if torch.cuda.is_available():
222
+ return torch.device("cuda", idx)
223
+ if torch.xpu.is_available():
224
+ return torch.device("xpu", idx)
225
+ if torch.mps.is_available():
226
+ return torch.device("mps", idx)
227
+ if hasattr(torch, "is_vulkan_available"):
228
+ if getattr(torch, "is_vulkan_available")():
229
+ return torch.device("vulkan", idx)
230
+ except:
231
+ pass
232
+ finally:
233
+ return torch.device(torch.zeros(1).device)
234
+
235
+
236
+ class Packing:
237
+ """
238
+ example:
239
+
240
+ ```
241
+ x_lengths = torch.tensor([5, 3, 6])
242
+ x_padded = torch.randn(3, 6, 256) # padded input [B, T, C]
243
+
244
+ # 1. RNN expects packed input
245
+ x_packed = Padding.pack_sequence(x_padded, x_lengths)
246
+ output_packed, _ = rnn(x_packed)
247
+
248
+ # 2. Recover padded for loss
249
+ output = Padding.unpack_sequence(output_packed, total_length=x_padded.size(1))
250
+
251
+ # 3. Mask for loss
252
+ mask = torch.arange(x_padded.size(1))[None, :] < x_lengths[:, None]
253
+ loss = (F.mse_loss(output, target, reduction="none") * mask.unsqueeze(-1)).sum() / mask.sum()
254
+ ```
255
+ """
256
+
257
+ @staticmethod
258
+ def pack_sequence(x: Tensor, lengths: Tensor):
259
+ """
260
+ Pack padded sequence for RNN/LSTM.
261
+ Args:
262
+ x (Tensor): Padded input [B, T, C]
263
+ lengths (Tensor): Actual lengths [B]
264
+ Returns:
265
+ PackedSequence
266
+
267
+ """
268
+ return nn.utils.rnn.pack_padded_sequence(
269
+ x,
270
+ lengths.cpu().numpy(),
271
+ batch_first=True,
272
+ enforce_sorted=False,
273
+ )
274
+
275
+ @staticmethod
276
+ def unpack_sequence(packed, total_length: int) -> Tensor:
277
+ """Unpacks RNN PackedSequence to padded [B, T, C]."""
278
+ output, _ = nn.utils.rnn.pad_packed_sequence(
279
+ packed,
280
+ batch_first=True,
281
+ total_length=total_length,
282
+ )
283
+ return output
284
+
285
+
286
+ class Padding:
287
+
288
+ @staticmethod
289
+ def pad_to(x: Tensor, target_length: int, pad_value: float = 0.0) -> Tensor:
290
+ """
291
+ Pad input tensor along time axis (dim=1) to target length.
292
+ Args:
293
+ x (Tensor): Input tensor [B, T, C]
294
+ target_length (int): Target time length
295
+ pad_value (float): Fill value
296
+ Returns:
297
+ Padded tensor [B, target_length, C]
298
+ """
299
+ B, T, C = x.size()
300
+ if T >= target_length:
301
+ return x
302
+ pad = x.new_full((B, target_length - T, C), pad_value)
303
+ return torch.cat([x, pad], dim=1)
304
+
305
+ @staticmethod
306
+ def pad_sequence(
307
+ inputs: Tensor,
308
+ size: int,
309
+ direction: Literal["left", "right"] = "left",
310
+ pad_id: Union[int, float] = 0,
311
+ ) -> Tensor:
312
+ """
313
+ Pads a single tensor to the specified size in 1D.
314
+ Args:
315
+ inputs (Tensor): Tensor of shape [T] or [B, T]
316
+ size (int): Desired size along the last dimension
317
+ direction (str): 'left' or 'right'
318
+ pad_id (int): Value to pad with
319
+ Returns:
320
+ Padded tensor
321
+ """
322
+ total = size - inputs.shape[-1]
323
+ if total < 1:
324
+ return inputs
325
+ pad_config = (total, 0) if direction == "left" else (0, total)
326
+ return F.pad(inputs, pad_config, value=pad_id)
327
+
328
+ @staticmethod
329
+ def pad_batch_1d(
330
+ batch: List[Tensor],
331
+ pad_value: float = 0.0,
332
+ pad_to_multiple: Optional[int] = None,
333
+ direction: Literal["left", "right"] = "right",
334
+ ) -> Tuple[Tensor, Tensor]:
335
+ """
336
+ Pad list of 1D tensors to same length with optional multiple alignment.
337
+ Returns:
338
+ Padded tensor [B, T], Lengths [B]
339
+ """
340
+ lengths = torch.tensor([t.size(0) for t in batch])
341
+ max_len = lengths.max().item()
342
+
343
+ if pad_to_multiple:
344
+ max_len = (
345
+ (max_len + pad_to_multiple - 1) // pad_to_multiple
346
+ ) * pad_to_multiple
347
+
348
+ padded = []
349
+ for t in batch:
350
+ padded.append(Padding.pad_sequence(t, max_len, direction, pad_value))
351
+ return torch.stack(padded), lengths
352
+
353
+ @staticmethod
354
+ def pad_batch_2d(
355
+ batch: List[Tensor],
356
+ pad_value: float = 0.0,
357
+ pad_to_multiple: Optional[int] = None,
358
+ direction: Literal["left", "right"] = "right",
359
+ ) -> Tuple[Tensor, Tensor]:
360
+ """
361
+ Pad list of 2D tensors (e.g. [T, D]) to same T.
362
+ Returns:
363
+ Padded tensor [B, T, D], Lengths [B]
364
+ """
365
+ lengths = torch.tensor([t.size(0) for t in batch])
366
+ feat_dim = batch[0].size(1)
367
+ max_len = lengths.max().item()
368
+
369
+ if pad_to_multiple:
370
+ max_len = (
371
+ (max_len + pad_to_multiple - 1) // pad_to_multiple
372
+ ) * pad_to_multiple
373
+
374
+ padded = []
375
+ for t in batch:
376
+ pad_len = max_len - t.size(0)
377
+ if direction == "left":
378
+ pad_tensor = t.new_full((pad_len, feat_dim), pad_value)
379
+ padded.append(torch.cat([pad_tensor, t], dim=0))
380
+ else:
381
+ pad_tensor = t.new_full((pad_len, feat_dim), pad_value)
382
+ padded.append(torch.cat([t, pad_tensor], dim=0))
383
+ return torch.stack(padded), lengths
384
+
385
+ # --- Batching ---
386
+
387
+ @staticmethod
388
+ def pad_batch_1d(
389
+ batch: List[Tensor],
390
+ pad_value: float = 0.0,
391
+ pad_to_multiple: Optional[int] = None,
392
+ direction: Literal["left", "right"] = "right",
393
+ ) -> Tuple[Tensor, Tensor]:
394
+ """Pads list of 1D tensors → [B, T]"""
395
+ lengths = torch.tensor([t.size(0) for t in batch])
396
+ max_len = lengths.max().item()
397
+ if pad_to_multiple:
398
+ max_len = (
399
+ (max_len + pad_to_multiple - 1) // pad_to_multiple
400
+ ) * pad_to_multiple
401
+
402
+ padded = [Padding.pad_sequence(t, max_len, direction, pad_value) for t in batch]
403
+ return torch.stack(padded), lengths
404
+
405
+ @staticmethod
406
+ def pad_batch_2d(
407
+ batch: List[Tensor],
408
+ pad_value: float = 0.0,
409
+ pad_to_multiple: Optional[int] = None,
410
+ direction: Literal["left", "right"] = "right",
411
+ ) -> Tuple[Tensor, Tensor]:
412
+ """Pads list of 2D tensors [T, D] → [B, T, D]"""
413
+ lengths = torch.tensor([t.size(0) for t in batch])
414
+ feat_dim = batch[0].size(1)
415
+ max_len = lengths.max().item()
416
+ if pad_to_multiple:
417
+ max_len = (
418
+ (max_len + pad_to_multiple - 1) // pad_to_multiple
419
+ ) * pad_to_multiple
420
+
421
+ padded = []
422
+ for t in batch:
423
+ pad_len = max_len - t.size(0)
424
+ pad_tensor = t.new_full((pad_len, feat_dim), pad_value)
425
+ padded_tensor = (
426
+ torch.cat([pad_tensor, t], dim=0)
427
+ if direction == "left"
428
+ else torch.cat([t, pad_tensor], dim=0)
429
+ )
430
+ padded.append(padded_tensor)
431
+ return torch.stack(padded), lengths
432
+
433
+ @staticmethod
434
+ def pad_batch_nd(
435
+ batch: List[Tensor],
436
+ pad_value: float = 0.0,
437
+ dim: int = 0,
438
+ pad_to_multiple: Optional[int] = None,
439
+ ) -> Tuple[Tensor, Tensor]:
440
+ """
441
+ General N-D padding along time axis (dim=0, usually).
442
+ Handles shapes like:
443
+ [T, C] → [B, T, C]
444
+ [T, H, W] → [B, T, H, W]
445
+ """
446
+ lengths = torch.tensor([t.size(dim) for t in batch])
447
+ max_len = lengths.max().item()
448
+ if pad_to_multiple:
449
+ max_len = (
450
+ (max_len + pad_to_multiple - 1) // pad_to_multiple
451
+ ) * pad_to_multiple
452
+
453
+ padded = []
454
+ for t in batch:
455
+ pad_len = max_len - t.size(dim)
456
+ pad_shape = list(t.shape)
457
+ pad_shape[dim] = pad_len
458
+ pad_tensor = t.new_full(pad_shape, pad_value)
459
+ padded_tensor = torch.cat([t, pad_tensor], dim=dim)
460
+ padded.append(padded_tensor)
461
+
462
+ return torch.stack(padded), lengths
463
+
464
+
465
+ class MaskUtils:
466
+
467
+ @staticmethod
468
+ def apply_mask(x: Tensor, mask: Tensor, fill_value: Number = 0) -> Tensor:
469
+ """
470
+ Apply a mask to a tensor, setting masked positions to `fill_value`.
471
+ Args:
472
+ x (Tensor): Input tensor of shape [..., T, D].
473
+ mask (Tensor): Mask of shape [..., T] where True = masked.
474
+ fill_value (Number): Value to fill masked positions with.
475
+ Returns:
476
+ Tensor: Masked tensor.
477
+ """
478
+ return x.masked_fill(mask.unsqueeze(-1), fill_value)
479
+
480
+ @staticmethod
481
+ def get_padding_mask(
482
+ lengths: Optional[Tensor] = None,
483
+ tokens: Optional[Tensor] = None,
484
+ padding_id: int = 0,
485
+ ) -> Tensor:
486
+ """
487
+ Generate a padding mask: 1 for real tokens, 0 for padding.
488
+ Args:
489
+ lengths (Tensor): Tensor of shape [B] with sequence lengths.
490
+ tokens (Tensor): Tensor of shape [B, T] with token ids.
491
+ padding_id (int): Padding token id (default=0).
492
+ Returns:
493
+ Tensor: Boolean mask of shape [B, T].
494
+ """
495
+ assert (
496
+ tokens is not None or lengths is not None
497
+ ), "Either tokens or lengths must be provided."
498
+
499
+ if tokens is not None:
500
+ return tokens != padding_id
501
+
502
+ B = lengths.size(0)
503
+ max_len = lengths.max().item()
504
+ arange = torch.arange(max_len, device=lengths.device).unsqueeze(0).expand(B, -1)
505
+ return arange < lengths.unsqueeze(1)
506
+
507
+ @staticmethod
508
+ def get_padding_mask_fps(lengths: Tensor) -> Tensor:
509
+ """
510
+ Legacy-style padding mask using 1-based comparison.
511
+ """
512
+ mask = (
513
+ torch.arange(lengths.max(), device=lengths.device)
514
+ .unsqueeze(0)
515
+ .expand(lengths.shape[0], -1)
516
+ )
517
+ return (mask + 1) > lengths.unsqueeze(1)
518
+
519
+ @staticmethod
520
+ def get_causal_mask(
521
+ size: Union[int, tuple[int, ...]],
522
+ device: Optional[Union[str, torch.device]] = None,
523
+ ) -> Tensor:
524
+ """
525
+ Generate a causal mask for self-attention.
526
+ Args:
527
+ size (int or tuple): Size (T) or (1, T, T)
528
+ Returns:
529
+ Tensor: [1, T, T] boolean causal mask
530
+ """
531
+ if isinstance(size, int):
532
+ size = (1, size, size)
533
+ return torch.tril(torch.ones(size, dtype=torch.bool, device=device))
534
+
535
+ @staticmethod
536
+ def combine_masks(pad_mask: Tensor, causal_mask: Tensor) -> Tensor:
537
+ """
538
+ Combine padding and causal masks.
539
+ Args:
540
+ pad_mask (Tensor): [B, T] padding mask
541
+ causal_mask (Tensor): [1, T, T] causal mask
542
+ Returns:
543
+ Tensor: [B, T, T] combined mask
544
+ """
545
+ return (
546
+ causal_mask & pad_mask.unsqueeze(1).expand(-1, pad_mask.size(1), -1).bool()
547
+ )
548
+
549
+
550
+ def masked_cross_entropy(
551
+ logits: torch.Tensor, # [B, T, V]
552
+ targets: torch.Tensor, # [B, T]
553
+ lengths: torch.Tensor, # [B]
554
+ reduction: str = "mean",
555
+ ) -> torch.Tensor:
556
+ """
557
+ CrossEntropyLoss with masking for variable-length sequences.
558
+ - logits: unnormalized scores [B, T, V]
559
+ - targets: ground truth indices [B, T]
560
+ - lengths: actual sequence lengths [B]
561
+ """
562
+ B, T, V = logits.size()
563
+ logits = logits.view(-1, V)
564
+ targets = targets.view(-1)
565
+
566
+ # Create mask
567
+ mask = torch.arange(T, device=lengths.device).expand(B, T) < lengths.unsqueeze(1)
568
+ mask = mask.reshape(-1)
569
+
570
+ # Apply CE only where mask == True
571
+ loss = F.cross_entropy(
572
+ logits[mask], targets[mask], reduction="mean" if reduction == "mean" else "none"
573
+ )
574
+ if reduction == "none":
575
+ return loss
576
+ return loss
577
+
578
+
579
+ class NoiseScheduler(Module):
580
+ def __init__(self, timesteps: int = 512):
581
+ super().__init__()
582
+
583
+ betas = torch.linspace(1e-4, 0.02, timesteps)
584
+ alphas = 1.0 - betas
585
+ alpha_cumprod = torch.cumprod(alphas, dim=0)
586
+
587
+ self.register_buffer("sqrt_alpha_cumprod", torch.sqrt(alpha_cumprod))
588
+ self.register_buffer(
589
+ "sqrt_one_minus_alpha_cumprod", torch.sqrt(1.0 - alpha_cumprod)
590
+ )
591
+
592
+ self.timesteps = timesteps
593
+ self.default_noise = math.sqrt(1.25)
594
+
595
+ def get_random_noise(
596
+ self, min_max: Tuple[float, float] = (-3, 3), seed: int = 0
597
+ ) -> float:
598
+ if seed > 0:
599
+ random.seed(seed)
600
+ return random.uniform(*min_max)
601
+
602
+ def set_noise(
603
+ self,
604
+ seed: int = 0,
605
+ min_max: Tuple[float, float] = (-3, 3),
606
+ default: bool = False,
607
+ ):
608
+ self.default_noise = (
609
+ math.sqrt(1.25) if default else self.get_random_noise(min_max, seed)
610
+ )
611
+
612
+ def forward(
613
+ self, x_0: Tensor, t: int, noise: Optional[Union[Tensor, float]] = None
614
+ ) -> Tensor:
615
+ if t < 0 or t >= self.timesteps:
616
+ raise ValueError(
617
+ f"Time step t={t} is out of bounds for scheduler with {self.timesteps} steps."
618
+ )
619
+
620
+ if noise is None:
621
+ noise = self.default_noise
622
+
623
+ if isinstance(noise, (float, int)):
624
+ noise = torch.randn_like(x_0) * noise
625
+
626
+ alpha_term = self.sqrt_alpha_cumprod[t] * x_0
627
+ noise_term = self.sqrt_one_minus_alpha_cumprod[t] * noise
628
+ return alpha_term + noise_term
@@ -0,0 +1,9 @@
1
+ __all__ = [
2
+ "bsc", # basic
3
+ "rsd", # residual
4
+ "tfr", # transformer
5
+ "pos", # positional encoders
6
+ "fsn", # fusion
7
+ "dfs", # diffusion
8
+ ]
9
+ from . import bsc, dfs, fsn, pos, rsd, tfr