tensorneko 0.3.4__tar.gz → 0.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. {tensorneko-0.3.4/src/tensorneko.egg-info → tensorneko-0.3.6}/PKG-INFO +2 -2
  2. {tensorneko-0.3.4 → tensorneko-0.3.6}/README.md +1 -1
  3. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/arch/__init__.py +3 -1
  4. tensorneko-0.3.6/src/tensorneko/arch/binary_classifier.py +59 -0
  5. tensorneko-0.3.6/src/tensorneko/callback/gpu_stats_logger.py +68 -0
  6. tensorneko-0.3.6/src/tensorneko/callback/system_stats_logger.py +44 -0
  7. tensorneko-0.3.6/src/tensorneko/dataset/__init__.py +11 -0
  8. tensorneko-0.3.6/src/tensorneko/dataset/list_dataset.py +19 -0
  9. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/dataset/round_robin_dataset.py +1 -1
  10. tensorneko-0.3.6/src/tensorneko/dataset/sampler/__init__.py +5 -0
  11. tensorneko-0.3.6/src/tensorneko/dataset/sampler/sequential_iter_sampler.py +26 -0
  12. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/__init__.py +2 -0
  13. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/fid.py +39 -3
  14. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/iou.py +1 -1
  15. tensorneko-0.3.6/src/tensorneko/evaluation/secs.py +58 -0
  16. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/neko_trainer.py +7 -3
  17. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/__init__.py +2 -1
  18. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/configuration.py +5 -3
  19. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/misc.py +2 -1
  20. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/type.py +0 -1
  21. tensorneko-0.3.6/src/tensorneko/version.txt +1 -0
  22. {tensorneko-0.3.4 → tensorneko-0.3.6/src/tensorneko.egg-info}/PKG-INFO +2 -2
  23. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko.egg-info/SOURCES.txt +5 -0
  24. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko.egg-info/requires.txt +2 -2
  25. tensorneko-0.3.4/src/tensorneko/callback/gpu_stats_logger.py +0 -31
  26. tensorneko-0.3.4/src/tensorneko/callback/system_stats_logger.py +0 -22
  27. tensorneko-0.3.4/src/tensorneko/dataset/__init__.py +0 -5
  28. tensorneko-0.3.4/src/tensorneko/version.txt +0 -1
  29. {tensorneko-0.3.4 → tensorneko-0.3.6}/LICENSE +0 -0
  30. {tensorneko-0.3.4 → tensorneko-0.3.6}/setup.cfg +0 -0
  31. {tensorneko-0.3.4 → tensorneko-0.3.6}/setup.py +0 -0
  32. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/__init__.py +0 -0
  33. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/arch/auto_encoder.py +0 -0
  34. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/arch/gan.py +0 -0
  35. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/arch/vqvae.py +0 -0
  36. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/arch/wgan.py +0 -0
  37. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/backend/__init__.py +0 -0
  38. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/__init__.py +0 -0
  39. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/display_metrics_callback.py +0 -0
  40. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/earlystop_lr.py +0 -0
  41. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/epoch_num_logger.py +0 -0
  42. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/epoch_time_logger.py +0 -0
  43. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/lr_logger.py +0 -0
  44. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/callback/nil_callback.py +0 -0
  45. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/dataset/nested_dataset.py +0 -0
  46. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/debug/__init__.py +0 -0
  47. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/enum.py +0 -0
  48. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/psnr.py +0 -0
  49. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/evaluation/ssim.py +0 -0
  50. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/__init__.py +0 -0
  51. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/mesh/__init__.py +0 -0
  52. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/mesh/mesh_reader.py +0 -0
  53. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/mesh/mesh_writer.py +0 -0
  54. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/reader.py +0 -0
  55. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/io/writer.py +0 -0
  56. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/__init__.py +0 -0
  57. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/aggregation.py +0 -0
  58. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/attention.py +0 -0
  59. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/concatenate.py +0 -0
  60. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/conv.py +0 -0
  61. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/linear.py +0 -0
  62. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/log.py +0 -0
  63. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/masked_conv2d.py +0 -0
  64. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/noise.py +0 -0
  65. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/patching.py +0 -0
  66. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/positional_embedding.py +0 -0
  67. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/reshape.py +0 -0
  68. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/stack.py +0 -0
  69. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/layer/vector_quantizer.py +0 -0
  70. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/__init__.py +0 -0
  71. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/dense.py +0 -0
  72. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/gated_conv.py +0 -0
  73. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/inception.py +0 -0
  74. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/mlp.py +0 -0
  75. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/residual.py +0 -0
  76. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/module/transformer.py +0 -0
  77. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/neko_model.py +0 -0
  78. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/neko_module.py +0 -0
  79. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/notebook/__init__.py +0 -0
  80. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/optim/__init__.py +0 -0
  81. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/optim/lr_scheduler/__init__.py +0 -0
  82. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/__init__.py +0 -0
  83. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/crop.py +0 -0
  84. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/enum.py +0 -0
  85. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/face_detector/__init__.py +0 -0
  86. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/pad.py +0 -0
  87. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/preprocess/resize.py +0 -0
  88. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/dispatched_misc.py +0 -0
  89. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/reproducibility.py +0 -0
  90. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/util/string_getter.py +0 -0
  91. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/visualization/__init__.py +0 -0
  92. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/visualization/image_browser/__init__.py +0 -0
  93. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/visualization/log_graph.py +0 -0
  94. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/visualization/matplotlib.py +0 -0
  95. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko/visualization/watcher/__init__.py +0 -0
  96. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko.egg-info/dependency_links.txt +0 -0
  97. {tensorneko-0.3.4 → tensorneko-0.3.6}/src/tensorneko.egg-info/top_level.txt +0 -0
  98. {tensorneko-0.3.4 → tensorneko-0.3.6}/test/test_library_info.py +0 -0
  99. {tensorneko-0.3.4 → tensorneko-0.3.6}/test/test_version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tensorneko
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Tensor Neural Engine Kompanion. An util library based on PyTorch and PyTorch Lightning.
5
5
  Home-page: https://github.com/ControlNet/tensorneko
6
6
  Author: ControlNet
@@ -38,7 +38,7 @@ License-File: LICENSE
38
38
  <div align="center">
39
39
  <a href="https://www.python.org/"><img src="https://img.shields.io/pypi/pyversions/tensorneko?style=flat-square"></a>
40
40
  <a href="https://pytorch.org/"><img src="https://img.shields.io/badge/PyTorch-%3E%3D1.9.0-EE4C2C?style=flat-square&logo=pytorch"></a>
41
- <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*-792EE5?style=flat-square&logo=lightning"></a>
41
+ <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*%20|%202.1.*-792EE5?style=flat-square&logo=lightning"></a>
42
42
  </div>
43
43
 
44
44
  <div align="center">
@@ -12,7 +12,7 @@
12
12
  <div align="center">
13
13
  <a href="https://www.python.org/"><img src="https://img.shields.io/pypi/pyversions/tensorneko?style=flat-square"></a>
14
14
  <a href="https://pytorch.org/"><img src="https://img.shields.io/badge/PyTorch-%3E%3D1.9.0-EE4C2C?style=flat-square&logo=pytorch"></a>
15
- <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*-792EE5?style=flat-square&logo=lightning"></a>
15
+ <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*%20|%202.1.*-792EE5?style=flat-square&logo=lightning"></a>
16
16
  </div>
17
17
 
18
18
  <div align="center">
@@ -2,10 +2,12 @@ from .gan import GAN
2
2
  from .vqvae import VQVAE
3
3
  from .wgan import WGAN
4
4
  from .auto_encoder import AutoEncoder
5
+ from .binary_classifier import BinaryClassifier
5
6
 
6
7
  __all__ = [
7
8
  "GAN",
8
9
  "VQVAE",
9
10
  "WGAN",
10
- "AutoEncoder"
11
+ "AutoEncoder",
12
+ "BinaryClassifier",
11
13
  ]
@@ -0,0 +1,59 @@
1
+ from typing import Optional, Union, Sequence, Dict, Any
2
+
3
+ from torch import Tensor
4
+ from torch.nn import BCEWithLogitsLoss, Module
5
+ from torch.optim import Adam
6
+ from torchmetrics import Accuracy, F1Score, AUROC
7
+
8
+ from ..neko_model import NekoModel
9
+
10
+
11
+ class BinaryClassifier(NekoModel):
12
+
13
+ def __init__(self, name, model: Module, learning_rate: float = 1e-4, distributed: bool = False):
14
+ super().__init__(name)
15
+ self.save_hyperparameters()
16
+ self.model = model
17
+ self.learning_rate = learning_rate
18
+ self.distributed = distributed
19
+ self.loss_fn = BCEWithLogitsLoss()
20
+ self.acc_fn = Accuracy(task="binary")
21
+ self.f1_fn = F1Score(task="binary")
22
+ self.auc_fn = AUROC(task="binary")
23
+
24
+ @classmethod
25
+ def from_module(cls, model: Module, learning_rate: float = 1e-4, name: str = "binary_classifier",
26
+ distributed: bool = False
27
+ ):
28
+ return cls(name, model, learning_rate, distributed)
29
+
30
+ def forward(self, x):
31
+ return self.model(x)
32
+
33
+ def step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]]) -> Dict[str, Tensor]:
34
+ x, y = batch
35
+ y_hat = self(x).squeeze(1)
36
+ loss = self.loss_fn(y_hat, y)
37
+ prob = y_hat.sigmoid()
38
+ acc = self.acc_fn(prob, y)
39
+ f1 = self.f1_fn(prob, y)
40
+ auc = self.auc_fn(prob, y)
41
+ return {"loss": loss, "acc": acc, "f1": f1, "auc": auc}
42
+
43
+ def training_step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]] = None, batch_idx: Optional[int] = None,
44
+ optimizer_idx: Optional[int] = None, hiddens: Optional[Tensor] = None
45
+ ) -> Dict[str, Tensor]:
46
+ return self.step(batch)
47
+
48
+ def validation_step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]] = None, batch_idx: Optional[int] = None,
49
+ dataloader_idx: Optional[int] = None
50
+ ) -> Dict[str, Tensor]:
51
+ return self.step(batch)
52
+
53
+ def predict_step(self, batch: Tensor, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
54
+ x, y = batch
55
+ return self(x)
56
+
57
+ def configure_optimizers(self):
58
+ optimizer = Adam(self.parameters(), lr=self.learning_rate)
59
+ return [optimizer]
@@ -0,0 +1,68 @@
1
+ from typing import Any
2
+
3
+ from lightning.pytorch import Callback, Trainer, LightningModule
4
+ from lightning.pytorch.utilities.types import STEP_OUTPUT
5
+
6
+
7
+ class GpuStatsLogger(Callback):
8
+ """Log GPU stats for each training epoch"""
9
+
10
+ def __init__(self, delay: float = 0.5, on_epoch: bool = True, on_step: bool = False):
11
+ try:
12
+ from gpumonitor.monitor import GPUStatMonitor
13
+ except ImportError:
14
+ raise ImportError("gpumonitor is required to use GPUStatsLogger")
15
+
16
+ self.monitor_epoch = GPUStatMonitor(delay=delay) if on_epoch else None
17
+ self.monitor_step = GPUStatMonitor(delay=delay) if on_step else None
18
+ self.on_epoch = on_epoch
19
+ self.on_step = on_step
20
+ assert self.on_epoch or self.on_step, "on_epoch and on_step cannot be both False"
21
+
22
+ def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
23
+ if not self.on_epoch:
24
+ return
25
+ self.monitor_epoch.reset()
26
+
27
+ def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
28
+ if not self.on_epoch:
29
+ return
30
+ for gpu in self.monitor_epoch.average_stats.gpus:
31
+ logged_info = {
32
+ f"gpu{gpu.index}_memory_used_epoch": gpu.memory_used / 1024,
33
+ f"gpu{gpu.index}_memory_total_epoch": gpu.memory_total / 1024,
34
+ f"gpu{gpu.index}_memory_util_epoch": gpu.memory_used / gpu.memory_total,
35
+ f"gpu{gpu.index}_temperature_epoch": float(gpu.temperature),
36
+ f"gpu{gpu.index}_utilization_epoch": gpu.utilization / 100,
37
+ f"gpu{gpu.index}_power_draw_epoch": float(gpu.power_draw),
38
+ f"gpu{gpu.index}_power_percentage_epoch": gpu.power_draw / gpu.power_limit,
39
+ f"gpu{gpu.index}_fan_speed_epoch": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
40
+ }
41
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
42
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
43
+
44
+ def on_train_batch_start(
45
+ self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
46
+ ) -> None:
47
+ if not self.on_step:
48
+ return
49
+ self.monitor_step.reset()
50
+
51
+ def on_train_batch_end(
52
+ self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any, batch_idx: int
53
+ ) -> None:
54
+ if not self.on_step:
55
+ return
56
+ for gpu in self.monitor_step.average_stats.gpus:
57
+ logged_info = {
58
+ f"gpu{gpu.index}_memory_used_step": gpu.memory_used / 1024,
59
+ f"gpu{gpu.index}_memory_total_step": gpu.memory_total / 1024,
60
+ f"gpu{gpu.index}_memory_util_step": gpu.memory_used / gpu.memory_total,
61
+ f"gpu{gpu.index}_temperature_step": float(gpu.temperature),
62
+ f"gpu{gpu.index}_utilization_step": gpu.utilization / 100,
63
+ f"gpu{gpu.index}_power_draw_step": float(gpu.power_draw),
64
+ f"gpu{gpu.index}_power_percentage_step": gpu.power_draw / gpu.power_limit,
65
+ f"gpu{gpu.index}_fan_speed_step": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
66
+ }
67
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
68
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -0,0 +1,44 @@
1
+ from typing import Any
2
+
3
+ from lightning.pytorch import Callback, Trainer, LightningModule
4
+ from lightning.pytorch.utilities.types import STEP_OUTPUT
5
+
6
+
7
+ class SystemStatsLogger(Callback):
8
+ """Log system stats for each training epoch"""
9
+
10
+ def __init__(self, on_epoch: bool = True, on_step: bool = False):
11
+ try:
12
+ import psutil
13
+ except ImportError:
14
+ raise ImportError("psutil is required to use SystemStatsLogger")
15
+ self.psutil = psutil
16
+ self.on_epoch = on_epoch
17
+ self.on_step = on_step
18
+ assert self.on_epoch or self.on_step, "on_epoch and on_step cannot be both False"
19
+
20
+ def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
21
+ if not self.on_epoch:
22
+ return
23
+ cpu_usage = self.psutil.cpu_percent()
24
+ memory_usage = self.psutil.virtual_memory().percent
25
+ logged_info = {
26
+ "cpu_usage_epoch": cpu_usage,
27
+ "memory_usage_epoch": memory_usage
28
+ }
29
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
30
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
31
+
32
+ def on_train_batch_end(
33
+ self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any, batch_idx: int
34
+ ) -> None:
35
+ if not self.on_step:
36
+ return
37
+ cpu_usage = self.psutil.cpu_percent()
38
+ memory_usage = self.psutil.virtual_memory().percent
39
+ logged_info = {
40
+ "cpu_usage_step": cpu_usage,
41
+ "memory_usage_step": memory_usage
42
+ }
43
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
44
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -0,0 +1,11 @@
1
+ from .round_robin_dataset import RoundRobinDataset
2
+ from .nested_dataset import NestedDataset
3
+ from .list_dataset import ListDataset
4
+ from . import sampler
5
+
6
+ __all__ = [
7
+ "RoundRobinDataset",
8
+ "NestedDataset",
9
+ "ListDataset",
10
+ "sampler"
11
+ ]
@@ -0,0 +1,19 @@
1
+ from typing import List
2
+
3
+ from torch.utils.data.dataset import Dataset, T_co
4
+
5
+
6
+ class ListDataset(Dataset[T_co]):
7
+ """
8
+ A dataset wrapping a list of data.
9
+ """
10
+
11
+ def __init__(self, data: List[T_co]):
12
+ super().__init__()
13
+ self.data = data
14
+
15
+ def __getitem__(self, index: int) -> T_co:
16
+ return self.data[index]
17
+
18
+ def __len__(self):
19
+ return len(self.data)
@@ -4,7 +4,7 @@ from typing import List, Optional
4
4
  from torch.utils.data import Dataset
5
5
  from torch.utils.data.dataset import T_co
6
6
 
7
- from tensorneko.util import circular_pad
7
+ from ..util import circular_pad
8
8
 
9
9
 
10
10
  class RoundRobinDataset(Dataset[T_co]):
@@ -0,0 +1,5 @@
1
+ from .sequential_iter_sampler import SequentialIterSampler
2
+
3
+ __all__ = [
4
+ "SequentialIterSampler"
5
+ ]
@@ -0,0 +1,26 @@
1
+ from typing import Sized
2
+
3
+ from torch.utils.data.sampler import Sampler, T_co
4
+
5
+
6
+ class SequentialIterSampler(Sampler[T_co]):
7
+ """
8
+ Use to split the large scale data into small subsets for each epochs
9
+ For example, if the dataset size is 1M, and the num_samples = 1000, then each epoch will only use 1000 samples, and
10
+ the next epoch will use the next 1000 samples.
11
+ """
12
+
13
+ def __init__(self, data_source: Sized, num_samples: int):
14
+ super().__init__(data_source)
15
+ self.data_source = data_source
16
+ self.num_samples = num_samples
17
+ self.total_size = len(data_source)
18
+ self.current_position = 0
19
+
20
+ def __iter__(self):
21
+ yield from map(lambda x: x % self.total_size,
22
+ range(self.current_position, self.current_position + self.num_samples))
23
+ self.current_position = (self.current_position + self.num_samples) % self.total_size
24
+
25
+ def __len__(self):
26
+ return self.num_samples
@@ -1,6 +1,7 @@
1
1
  from .iou import iou_1d, iou_2d
2
2
  from .psnr import psnr_video, psnr_image
3
3
  from .ssim import ssim_video, ssim_image
4
+ from .secs import secs
4
5
  from .fid import FID
5
6
 
6
7
  __all__ = [
@@ -10,5 +11,6 @@ __all__ = [
10
11
  "psnr_image",
11
12
  "ssim_video",
12
13
  "ssim_image",
14
+ "secs",
13
15
  "FID",
14
16
  ]
@@ -22,6 +22,34 @@ except ImportError:
22
22
 
23
23
 
24
24
  class FID:
25
+ """
26
+ Calculate Fréchet inception distance based on torchmetrics. Require library "torch-fidelity".
27
+
28
+ Args:
29
+ device (``str`` | :class:`~torch.device`, optional): Device to run the metric. Default: ``"cpu"``.
30
+
31
+ Example::
32
+
33
+ from tensorneko.evaluation import FID
34
+ fid = FID("cuda")
35
+
36
+ # add predicted and real images
37
+ fid.add_pred_image("path/to/pred/image1.png")
38
+ fid.add_pred_image("path/to/pred/image2.png")
39
+ fid.add_true_image("path/to/true/image1.png")
40
+ fid.add_true_image("path/to/true/image2.png")
41
+
42
+ # add predicted and real videos
43
+ fid.add_pred_video("path/to/pred/video1.mp4")
44
+ fid.add_pred_video("path/to/pred/video2.mp4")
45
+ fid.add_true_video("path/to/true/video1.mp4")
46
+ fid.add_true_video("path/to/true/video2.mp4")
47
+
48
+ # compute FID
49
+ fid_score = fid.compute(batch_size=128, num_workers=8, progress_bar=True)
50
+ print(fid_score)
51
+
52
+ """
25
53
 
26
54
  def __init__(self, device: Union[str, Device] = "cpu"):
27
55
  self.device = torch.device(device)
@@ -56,14 +84,14 @@ class FID:
56
84
  def cuda(self) -> FID:
57
85
  return self.to("cuda")
58
86
 
59
- def compute(self, batch_size=128, num_workers=8, progress_bar: bool = True) -> float:
87
+ def compute(self, batch_size=128, num_workers=0, progress_bar: bool = False) -> float:
60
88
  pred = torch.utils.data.DataLoader(self.pred_data, batch_size=batch_size, num_workers=num_workers)
61
89
  true = torch.utils.data.DataLoader(self.true_data, batch_size=batch_size, num_workers=num_workers)
62
90
 
63
91
  if progress_bar:
64
92
  tqdm = import_tqdm_auto().tqdm
65
- pred = tqdm(pred, desc="Forward predicted features")
66
- true = tqdm(true, desc="Forward ground truth features")
93
+ pred = tqdm(pred, total=len(pred), desc="Forward predicted features")
94
+ true = tqdm(true, total=len(true), desc="Forward ground truth features")
67
95
 
68
96
  for batch in pred:
69
97
  self.fid.update(batch.to(self.device), real=False)
@@ -72,6 +100,11 @@ class FID:
72
100
 
73
101
  return self.fid.compute().item()
74
102
 
103
+ def reset(self):
104
+ self.pred_data = _FIDDataset()
105
+ self.true_data = _FIDDataset()
106
+ self.fid.reset()
107
+
75
108
 
76
109
  @dataclass
77
110
  class _FIDEntry:
@@ -104,6 +137,7 @@ class _FIDDataset(IterableDataset):
104
137
  raise RuntimeError("Cannot open video file.")
105
138
  n_frames = int(cap.get(self.cv2.CAP_PROP_FRAME_COUNT))
106
139
  self.length += n_frames
140
+ cap.release()
107
141
 
108
142
  @staticmethod
109
143
  def _preprocess_image(image: Tensor) -> Tensor:
@@ -130,6 +164,8 @@ class _FIDDataset(IterableDataset):
130
164
  frame = self._preprocess_image(frame)
131
165
  yield frame
132
166
 
167
+ cap.release()
168
+
133
169
  def __iter__(self):
134
170
  for entry in self.content:
135
171
  if entry.type == "image":
@@ -70,7 +70,7 @@ def iou_2d(proposal: Union[Tensor, ndarray], target: Union[Tensor, ndarray]) ->
70
70
 
71
71
  inner_x1 = torch.maximum(proposal_x1, target_x1)
72
72
  inner_y1 = torch.maximum(proposal_y1, target_y1)
73
- inner_x2 = torch.minimum(proposal_x2, target_y2)
73
+ inner_x2 = torch.minimum(proposal_x2, target_x2)
74
74
  inner_y2 = torch.minimum(proposal_y2, target_y2)
75
75
 
76
76
  area_proposal = (proposal_x2 - proposal_x1) * (proposal_y2 - proposal_y1)
@@ -0,0 +1,58 @@
1
+ from numpy import ndarray
2
+ from torch import Tensor
3
+
4
+ from tensorneko_util.util import dispatch, Eval
5
+
6
+ from tensorneko_util.io import read
7
+
8
+
9
+ @Eval.later
10
+ def _secs_encoder():
11
+ from resemblyzer import VoiceEncoder
12
+ return VoiceEncoder()
13
+
14
+
15
+ @dispatch
16
+ def secs(pred: str, real: str) -> float:
17
+ from resemblyzer import VoiceEncoder, preprocess_wav
18
+ pred_audio = preprocess_wav(read.audio(pred).audio[0].numpy())
19
+ real_audio = preprocess_wav(read.audio(real).audio[0].numpy())
20
+ return _secs_compute(pred_audio, real_audio)
21
+
22
+
23
+ @dispatch
24
+ def secs(pred: Tensor, real: Tensor) -> float:
25
+ return secs(pred.numpy(), real.numpy())
26
+
27
+
28
+ @dispatch
29
+ def secs(pred: ndarray, real: ndarray) -> float:
30
+ from resemblyzer import VoiceEncoder, preprocess_wav
31
+ if len(pred.shape) == 2:
32
+ if pred.shape[0] == 1:
33
+ pred = pred.squeeze(0)
34
+ elif pred.shape[1] == 1:
35
+ pred = pred.squeeze(1)
36
+ else:
37
+ raise ValueError("The input audio must be mono.")
38
+
39
+ if len(real.shape) == 2:
40
+ if real.shape[0] == 1:
41
+ real = real.squeeze(0)
42
+ elif real.shape[1] == 1:
43
+ real = real.squeeze(1)
44
+ else:
45
+ raise ValueError("The input audio must be mono.")
46
+
47
+ pred_audio = preprocess_wav(pred)
48
+ real_audio = preprocess_wav(real)
49
+
50
+ return _secs_compute(pred_audio, real_audio)
51
+
52
+
53
+ def _secs_compute(pred_audio: ndarray, real_audio: ndarray) -> float:
54
+ encoder = _secs_encoder.value
55
+ real_embed = encoder.embed_utterance(real_audio)
56
+ pred_embed = encoder.embed_utterance(pred_audio)
57
+
58
+ return float((real_embed * pred_embed).sum())
@@ -3,16 +3,20 @@ from datetime import timedelta
3
3
  from time import time
4
4
  from typing import Optional, Union, List, Dict
5
5
 
6
+ from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT
7
+ from lightning.fabric.utilities.types import _PATH
6
8
  from lightning.pytorch import Trainer, Callback
7
9
  from lightning.pytorch.accelerators import Accelerator
8
10
  from lightning.pytorch.callbacks import ModelCheckpoint, Checkpoint
9
11
  from lightning.pytorch.loggers import Logger, TensorBoardLogger
10
- from lightning.pytorch.plugins import PLUGIN_INPUT
11
12
  from lightning.pytorch.profilers import Profiler
12
13
  from lightning.pytorch.strategies import Strategy
13
14
  from lightning.pytorch.trainer.connectors.accelerator_connector import _LITERAL_WARN
14
- from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT
15
- from lightning.fabric.utilities.types import _PATH
15
+
16
+ try:
17
+ from lightning.pytorch.plugins import PLUGIN_INPUT
18
+ except ImportError:
19
+ from lightning.pytorch.plugins import _PLUGIN_INPUT as PLUGIN_INPUT
16
20
 
17
21
  from .callback import NilCallback, LrLogger, EpochNumLogger, EpochTimeLogger, GpuStatsLogger, SystemStatsLogger
18
22
 
@@ -9,7 +9,7 @@ from . import type
9
9
  from .configuration import Configuration
10
10
  from .misc import reduce_dict_by, summarize_dict_by, with_printed_shape, is_bad_num, count_parameters, compose, \
11
11
  generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, identity, list_to_dict, circular_pad, \
12
- load_py, try_until_success
12
+ load_py, try_until_success, sample_indexes
13
13
  from .misc import get_tensorneko_path
14
14
  from .dispatched_misc import sparse2binary, binary2sparse
15
15
  from .reproducibility import Seed
@@ -71,6 +71,7 @@ __all__ = [
71
71
  "circular_pad",
72
72
  "load_py",
73
73
  "try_until_success",
74
+ "sample_indexes",
74
75
  "download_file",
75
76
  "WindowMerger",
76
77
  ]
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from typing import Any
4
+ from typing import Generic
5
5
 
6
+ from tensorneko_util.util.type import T
6
7
 
7
- class Configuration(ABC):
8
+
9
+ class Configuration(ABC, Generic[T]):
8
10
  """
9
11
  Configuration base abstract class.
10
12
 
@@ -55,7 +57,7 @@ class Configuration(ABC):
55
57
  return iter((*self.args, *self.kwargs.values()))
56
58
 
57
59
  @abstractmethod
58
- def build(self) -> Any:
60
+ def build(self) -> T:
59
61
  """
60
62
  A method to build an object.
61
63
 
@@ -9,7 +9,7 @@ from torch import Tensor
9
9
  from torch.nn import Module
10
10
 
11
11
  from tensorneko_util.util.misc import generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, \
12
- identity, list_to_dict, compose, circular_pad, load_py, try_until_success
12
+ identity, list_to_dict, compose, circular_pad, load_py, try_until_success, sample_indexes
13
13
  from .type import T, A
14
14
 
15
15
 
@@ -165,3 +165,4 @@ list_to_dict = list_to_dict
165
165
  circular_pad = circular_pad
166
166
  load_py = load_py
167
167
  try_until_success = try_until_success
168
+ sample_indexes = sample_indexes
@@ -1,4 +1,3 @@
1
- from enum import Enum
2
1
  from typing import Callable, Union, List, Tuple, TypeVar
3
2
 
4
3
  import numpy as np
@@ -0,0 +1 @@
1
+ 0.3.6
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tensorneko
3
- Version: 0.3.4
3
+ Version: 0.3.6
4
4
  Summary: Tensor Neural Engine Kompanion. An util library based on PyTorch and PyTorch Lightning.
5
5
  Home-page: https://github.com/ControlNet/tensorneko
6
6
  Author: ControlNet
@@ -38,7 +38,7 @@ License-File: LICENSE
38
38
  <div align="center">
39
39
  <a href="https://www.python.org/"><img src="https://img.shields.io/pypi/pyversions/tensorneko?style=flat-square"></a>
40
40
  <a href="https://pytorch.org/"><img src="https://img.shields.io/badge/PyTorch-%3E%3D1.9.0-EE4C2C?style=flat-square&logo=pytorch"></a>
41
- <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*-792EE5?style=flat-square&logo=lightning"></a>
41
+ <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*%20|%202.1.*-792EE5?style=flat-square&logo=lightning"></a>
42
42
  </div>
43
43
 
44
44
  <div align="center">
@@ -13,6 +13,7 @@ src/tensorneko.egg-info/requires.txt
13
13
  src/tensorneko.egg-info/top_level.txt
14
14
  src/tensorneko/arch/__init__.py
15
15
  src/tensorneko/arch/auto_encoder.py
16
+ src/tensorneko/arch/binary_classifier.py
16
17
  src/tensorneko/arch/gan.py
17
18
  src/tensorneko/arch/vqvae.py
18
19
  src/tensorneko/arch/wgan.py
@@ -27,14 +28,18 @@ src/tensorneko/callback/lr_logger.py
27
28
  src/tensorneko/callback/nil_callback.py
28
29
  src/tensorneko/callback/system_stats_logger.py
29
30
  src/tensorneko/dataset/__init__.py
31
+ src/tensorneko/dataset/list_dataset.py
30
32
  src/tensorneko/dataset/nested_dataset.py
31
33
  src/tensorneko/dataset/round_robin_dataset.py
34
+ src/tensorneko/dataset/sampler/__init__.py
35
+ src/tensorneko/dataset/sampler/sequential_iter_sampler.py
32
36
  src/tensorneko/debug/__init__.py
33
37
  src/tensorneko/evaluation/__init__.py
34
38
  src/tensorneko/evaluation/enum.py
35
39
  src/tensorneko/evaluation/fid.py
36
40
  src/tensorneko/evaluation/iou.py
37
41
  src/tensorneko/evaluation/psnr.py
42
+ src/tensorneko/evaluation/secs.py
38
43
  src/tensorneko/evaluation/ssim.py
39
44
  src/tensorneko/io/__init__.py
40
45
  src/tensorneko/io/reader.py
@@ -3,12 +3,12 @@ torchaudio>=0.9.0
3
3
  torchvision>=0.10.0
4
4
  torchmetrics>=0.7.3
5
5
  tensorboard>=2.0.0
6
- lightning==2.0.*
6
+ lightning<2.2,>=2.0
7
7
  pillow>=8.1
8
8
  av>=8.0.3
9
9
  numpy>=1.20.1
10
10
  einops>=0.3.0
11
- tensorneko_util==0.3.4
11
+ tensorneko_util==0.3.6
12
12
 
13
13
  [:platform_system == "Windows"]
14
14
  pysoundfile>=0.9.0
@@ -1,31 +0,0 @@
1
- from lightning.pytorch import Callback, Trainer, LightningModule
2
-
3
-
4
- class GpuStatsLogger(Callback):
5
- """Log GPU stats for each training epoch"""
6
-
7
- def __init__(self, delay: float = 0.5):
8
- try:
9
- from gpumonitor.monitor import GPUStatMonitor
10
- except ImportError:
11
- raise ImportError("gpumonitor is required to use GPUStatsLogger")
12
-
13
- self.monitor = GPUStatMonitor(delay=delay)
14
-
15
- def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
16
- self.monitor.reset()
17
-
18
- def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
19
- for gpu in self.monitor.average_stats.gpus:
20
- logged_info = {
21
- f"gpu{gpu.index}_memory_used": gpu.memory_used / 1024,
22
- f"gpu{gpu.index}_memory_total": gpu.memory_total / 1024,
23
- f"gpu{gpu.index}_memory_util": gpu.memory_used / gpu.memory_total,
24
- f"gpu{gpu.index}_temperature": float(gpu.temperature),
25
- f"gpu{gpu.index}_utilization": gpu.utilization / 100,
26
- f"gpu{gpu.index}_power_draw": float(gpu.power_draw),
27
- f"gpu{gpu.index}_power_percentage": gpu.power_draw / gpu.power_limit,
28
- f"gpu{gpu.index}_fan_speed": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
29
- }
30
- pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
31
- pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -1,22 +0,0 @@
1
- from lightning.pytorch import Callback, Trainer, LightningModule
2
-
3
-
4
- class SystemStatsLogger(Callback):
5
- """Log system stats for each training epoch"""
6
-
7
- def __init__(self):
8
- try:
9
- import psutil
10
- except ImportError:
11
- raise ImportError("psutil is required to use SystemStatsLogger")
12
- self.psutil = psutil
13
-
14
- def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
15
- cpu_usage = self.psutil.cpu_percent()
16
- memory_usage = self.psutil.virtual_memory().percent
17
- logged_info = {
18
- "cpu_usage": cpu_usage,
19
- "memory_usage": memory_usage
20
- }
21
- pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
22
- pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -1,5 +0,0 @@
1
- from .round_robin_dataset import RoundRobinDataset
2
-
3
- __all__ = [
4
- "RoundRobinDataset",
5
- ]
@@ -1 +0,0 @@
1
- 0.3.4
File without changes
File without changes
File without changes