tensorneko 0.3.5__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,18 +1,17 @@
1
- from abc import ABC
2
- from typing import Optional, Union, Sequence, Dict
1
+ from typing import Optional, Union, Sequence, Dict, Any
3
2
 
4
3
  from torch import Tensor
5
- from torch.nn import BCEWithLogitsLoss
4
+ from torch.nn import BCEWithLogitsLoss, Module
6
5
  from torch.optim import Adam
7
6
  from torchmetrics import Accuracy, F1Score, AUROC
8
7
 
9
8
  from ..neko_model import NekoModel
10
9
 
11
10
 
12
- class BinaryClassifier(NekoModel, ABC):
11
+ class BinaryClassifier(NekoModel):
13
12
 
14
- def __init__(self, model=None, learning_rate: float = 1e-4, distributed: bool = False):
15
- super().__init__()
13
+ def __init__(self, name, model: Module, learning_rate: float = 1e-4, distributed: bool = False):
14
+ super().__init__(name)
16
15
  self.save_hyperparameters()
17
16
  self.model = model
18
17
  self.learning_rate = learning_rate
@@ -23,8 +22,10 @@ class BinaryClassifier(NekoModel, ABC):
23
22
  self.auc_fn = AUROC(task="binary")
24
23
 
25
24
  @classmethod
26
- def from_module(cls, model, learning_rate: float = 1e-4, distributed=False):
27
- return cls(model, learning_rate, distributed)
25
+ def from_module(cls, model: Module, learning_rate: float = 1e-4, name: str = "binary_classifier",
26
+ distributed: bool = False
27
+ ):
28
+ return cls(name, model, learning_rate, distributed)
28
29
 
29
30
  def forward(self, x):
30
31
  return self.model(x)
@@ -49,6 +50,10 @@ class BinaryClassifier(NekoModel, ABC):
49
50
  ) -> Dict[str, Tensor]:
50
51
  return self.step(batch)
51
52
 
53
+ def predict_step(self, batch: Tensor, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:
54
+ x, y = batch
55
+ return self(x)
56
+
52
57
  def configure_optimizers(self):
53
58
  optimizer = Adam(self.parameters(), lr=self.learning_rate)
54
59
  return [optimizer]
@@ -1,31 +1,68 @@
1
+ from typing import Any
2
+
1
3
  from lightning.pytorch import Callback, Trainer, LightningModule
4
+ from lightning.pytorch.utilities.types import STEP_OUTPUT
2
5
 
3
6
 
4
7
  class GpuStatsLogger(Callback):
5
8
  """Log GPU stats for each training epoch"""
6
9
 
7
- def __init__(self, delay: float = 0.5):
10
+ def __init__(self, delay: float = 0.5, on_epoch: bool = True, on_step: bool = False):
8
11
  try:
9
12
  from gpumonitor.monitor import GPUStatMonitor
10
13
  except ImportError:
11
14
  raise ImportError("gpumonitor is required to use GPUStatsLogger")
12
15
 
13
- self.monitor = GPUStatMonitor(delay=delay)
16
+ self.monitor_epoch = GPUStatMonitor(delay=delay) if on_epoch else None
17
+ self.monitor_step = GPUStatMonitor(delay=delay) if on_step else None
18
+ self.on_epoch = on_epoch
19
+ self.on_step = on_step
20
+ assert self.on_epoch or self.on_step, "on_epoch and on_step cannot be both False"
14
21
 
15
22
  def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
16
- self.monitor.reset()
23
+ if not self.on_epoch:
24
+ return
25
+ self.monitor_epoch.reset()
17
26
 
18
27
  def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
19
- for gpu in self.monitor.average_stats.gpus:
28
+ if not self.on_epoch:
29
+ return
30
+ for gpu in self.monitor_epoch.average_stats.gpus:
31
+ logged_info = {
32
+ f"gpu{gpu.index}_memory_used_epoch": gpu.memory_used / 1024,
33
+ f"gpu{gpu.index}_memory_total_epoch": gpu.memory_total / 1024,
34
+ f"gpu{gpu.index}_memory_util_epoch": gpu.memory_used / gpu.memory_total,
35
+ f"gpu{gpu.index}_temperature_epoch": float(gpu.temperature),
36
+ f"gpu{gpu.index}_utilization_epoch": gpu.utilization / 100,
37
+ f"gpu{gpu.index}_power_draw_epoch": float(gpu.power_draw),
38
+ f"gpu{gpu.index}_power_percentage_epoch": gpu.power_draw / gpu.power_limit,
39
+ f"gpu{gpu.index}_fan_speed_epoch": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
40
+ }
41
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
42
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
43
+
44
+ def on_train_batch_start(
45
+ self, trainer: Trainer, pl_module: LightningModule, batch: Any, batch_idx: int
46
+ ) -> None:
47
+ if not self.on_step:
48
+ return
49
+ self.monitor_step.reset()
50
+
51
+ def on_train_batch_end(
52
+ self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any, batch_idx: int
53
+ ) -> None:
54
+ if not self.on_step:
55
+ return
56
+ for gpu in self.monitor_step.average_stats.gpus:
20
57
  logged_info = {
21
- f"gpu{gpu.index}_memory_used": gpu.memory_used / 1024,
22
- f"gpu{gpu.index}_memory_total": gpu.memory_total / 1024,
23
- f"gpu{gpu.index}_memory_util": gpu.memory_used / gpu.memory_total,
24
- f"gpu{gpu.index}_temperature": float(gpu.temperature),
25
- f"gpu{gpu.index}_utilization": gpu.utilization / 100,
26
- f"gpu{gpu.index}_power_draw": float(gpu.power_draw),
27
- f"gpu{gpu.index}_power_percentage": gpu.power_draw / gpu.power_limit,
28
- f"gpu{gpu.index}_fan_speed": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
58
+ f"gpu{gpu.index}_memory_used_step": gpu.memory_used / 1024,
59
+ f"gpu{gpu.index}_memory_total_step": gpu.memory_total / 1024,
60
+ f"gpu{gpu.index}_memory_util_step": gpu.memory_used / gpu.memory_total,
61
+ f"gpu{gpu.index}_temperature_step": float(gpu.temperature),
62
+ f"gpu{gpu.index}_utilization_step": gpu.utilization / 100,
63
+ f"gpu{gpu.index}_power_draw_step": float(gpu.power_draw),
64
+ f"gpu{gpu.index}_power_percentage_step": gpu.power_draw / gpu.power_limit,
65
+ f"gpu{gpu.index}_fan_speed_step": float(gpu.fan_speed) if gpu.fan_speed is not None else 0.,
29
66
  }
30
67
  pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
31
68
  pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -1,22 +1,44 @@
1
+ from typing import Any
2
+
1
3
  from lightning.pytorch import Callback, Trainer, LightningModule
4
+ from lightning.pytorch.utilities.types import STEP_OUTPUT
2
5
 
3
6
 
4
7
  class SystemStatsLogger(Callback):
5
8
  """Log system stats for each training epoch"""
6
9
 
7
- def __init__(self):
10
+ def __init__(self, on_epoch: bool = True, on_step: bool = False):
8
11
  try:
9
12
  import psutil
10
13
  except ImportError:
11
14
  raise ImportError("psutil is required to use SystemStatsLogger")
12
15
  self.psutil = psutil
16
+ self.on_epoch = on_epoch
17
+ self.on_step = on_step
18
+ assert self.on_epoch or self.on_step, "on_epoch and on_step cannot be both False"
13
19
 
14
20
  def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
21
+ if not self.on_epoch:
22
+ return
23
+ cpu_usage = self.psutil.cpu_percent()
24
+ memory_usage = self.psutil.virtual_memory().percent
25
+ logged_info = {
26
+ "cpu_usage_epoch": cpu_usage,
27
+ "memory_usage_epoch": memory_usage
28
+ }
29
+ pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
30
+ pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
31
+
32
+ def on_train_batch_end(
33
+ self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any, batch_idx: int
34
+ ) -> None:
35
+ if not self.on_step:
36
+ return
15
37
  cpu_usage = self.psutil.cpu_percent()
16
38
  memory_usage = self.psutil.virtual_memory().percent
17
39
  logged_info = {
18
- "cpu_usage": cpu_usage,
19
- "memory_usage": memory_usage
40
+ "cpu_usage_step": cpu_usage,
41
+ "memory_usage_step": memory_usage
20
42
  }
21
43
  pl_module.logger.log_metrics(logged_info, step=trainer.global_step)
22
44
  pl_module.log_dict(logged_info, logger=False, sync_dist=pl_module.distributed)
@@ -90,8 +90,8 @@ class FID:
90
90
 
91
91
  if progress_bar:
92
92
  tqdm = import_tqdm_auto().tqdm
93
- pred = tqdm(total=len(pred), desc="Forward predicted features")
94
- true = tqdm(total=len(true), desc="Forward ground truth features")
93
+ pred = tqdm(pred, total=len(pred), desc="Forward predicted features")
94
+ true = tqdm(true, total=len(true), desc="Forward ground truth features")
95
95
 
96
96
  for batch in pred:
97
97
  self.fid.update(batch.to(self.device), real=False)
@@ -3,16 +3,20 @@ from datetime import timedelta
3
3
  from time import time
4
4
  from typing import Optional, Union, List, Dict
5
5
 
6
+ from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT
7
+ from lightning.fabric.utilities.types import _PATH
6
8
  from lightning.pytorch import Trainer, Callback
7
9
  from lightning.pytorch.accelerators import Accelerator
8
10
  from lightning.pytorch.callbacks import ModelCheckpoint, Checkpoint
9
11
  from lightning.pytorch.loggers import Logger, TensorBoardLogger
10
- from lightning.pytorch.plugins import PLUGIN_INPUT
11
12
  from lightning.pytorch.profilers import Profiler
12
13
  from lightning.pytorch.strategies import Strategy
13
14
  from lightning.pytorch.trainer.connectors.accelerator_connector import _LITERAL_WARN
14
- from lightning.fabric.plugins.precision.precision import _PRECISION_INPUT
15
- from lightning.fabric.utilities.types import _PATH
15
+
16
+ try:
17
+ from lightning.pytorch.plugins import PLUGIN_INPUT
18
+ except ImportError:
19
+ from lightning.pytorch.plugins import _PLUGIN_INPUT as PLUGIN_INPUT
16
20
 
17
21
  from .callback import NilCallback, LrLogger, EpochNumLogger, EpochTimeLogger, GpuStatsLogger, SystemStatsLogger
18
22
 
@@ -9,7 +9,7 @@ from . import type
9
9
  from .configuration import Configuration
10
10
  from .misc import reduce_dict_by, summarize_dict_by, with_printed_shape, is_bad_num, count_parameters, compose, \
11
11
  generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, identity, list_to_dict, circular_pad, \
12
- load_py, try_until_success
12
+ load_py, try_until_success, sample_indexes
13
13
  from .misc import get_tensorneko_path
14
14
  from .dispatched_misc import sparse2binary, binary2sparse
15
15
  from .reproducibility import Seed
@@ -71,6 +71,7 @@ __all__ = [
71
71
  "circular_pad",
72
72
  "load_py",
73
73
  "try_until_success",
74
+ "sample_indexes",
74
75
  "download_file",
75
76
  "WindowMerger",
76
77
  ]
@@ -1,10 +1,12 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from abc import ABC, abstractmethod
4
- from typing import Any
4
+ from typing import Generic
5
5
 
6
+ from tensorneko_util.util.type import T
6
7
 
7
- class Configuration(ABC):
8
+
9
+ class Configuration(ABC, Generic[T]):
8
10
  """
9
11
  Configuration base abstract class.
10
12
 
@@ -55,7 +57,7 @@ class Configuration(ABC):
55
57
  return iter((*self.args, *self.kwargs.values()))
56
58
 
57
59
  @abstractmethod
58
- def build(self) -> Any:
60
+ def build(self) -> T:
59
61
  """
60
62
  A method to build an object.
61
63
 
tensorneko/util/misc.py CHANGED
@@ -9,7 +9,7 @@ from torch import Tensor
9
9
  from torch.nn import Module
10
10
 
11
11
  from tensorneko_util.util.misc import generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, \
12
- identity, list_to_dict, compose, circular_pad, load_py, try_until_success
12
+ identity, list_to_dict, compose, circular_pad, load_py, try_until_success, sample_indexes
13
13
  from .type import T, A
14
14
 
15
15
 
@@ -165,3 +165,4 @@ list_to_dict = list_to_dict
165
165
  circular_pad = circular_pad
166
166
  load_py = load_py
167
167
  try_until_success = try_until_success
168
+ sample_indexes = sample_indexes
tensorneko/util/type.py CHANGED
@@ -1,4 +1,3 @@
1
- from enum import Enum
2
1
  from typing import Callable, Union, List, Tuple, TypeVar
3
2
 
4
3
  import numpy as np
tensorneko/version.txt CHANGED
@@ -1 +1 @@
1
- 0.3.5
1
+ 0.3.6
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tensorneko
3
- Version: 0.3.5
3
+ Version: 0.3.6
4
4
  Summary: Tensor Neural Engine Kompanion. An util library based on PyTorch and PyTorch Lightning.
5
5
  Home-page: https://github.com/ControlNet/tensorneko
6
6
  Author: ControlNet
@@ -33,7 +33,7 @@ Requires-Dist: pillow >=8.1
33
33
  Requires-Dist: av >=8.0.3
34
34
  Requires-Dist: numpy >=1.20.1
35
35
  Requires-Dist: einops >=0.3.0
36
- Requires-Dist: tensorneko-util ==0.3.5
36
+ Requires-Dist: tensorneko-util ==0.3.6
37
37
  Requires-Dist: pysoundfile >=0.9.0 ; platform_system == "Windows"
38
38
 
39
39
  <h1 style="text-align: center">TensorNeko</h1>
@@ -50,7 +50,7 @@ Requires-Dist: pysoundfile >=0.9.0 ; platform_system == "Windows"
50
50
  <div align="center">
51
51
  <a href="https://www.python.org/"><img src="https://img.shields.io/pypi/pyversions/tensorneko?style=flat-square"></a>
52
52
  <a href="https://pytorch.org/"><img src="https://img.shields.io/badge/PyTorch-%3E%3D1.9.0-EE4C2C?style=flat-square&logo=pytorch"></a>
53
- <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*/2.1.*-792EE5?style=flat-square&logo=lightning"></a>
53
+ <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*%20|%202.1.*-792EE5?style=flat-square&logo=lightning"></a>
54
54
  </div>
55
55
 
56
56
  <div align="center">
@@ -1,11 +1,11 @@
1
1
  tensorneko/__init__.py,sha256=VPPK00Kduwi84QHnZKBZm8kBRdnPAji6f7J-adYAp_Y,770
2
2
  tensorneko/neko_model.py,sha256=5ZE4Kh8pFBcdrq0uERZTawE3PDP9jokD2QapC2s8-gc,10145
3
3
  tensorneko/neko_module.py,sha256=qELXvguSjWo_NvcRQibiFl0Qauzd9JWLSnT4dbGNS3Y,1473
4
- tensorneko/neko_trainer.py,sha256=GAKaXXvh3Lpej8JxjAm0zzZ8eOsyto2FFxv955dxGt4,9972
5
- tensorneko/version.txt,sha256=chs29ukegsplmSIlFfRBz4srybz6fhCYDk5F04KGouQ,5
4
+ tensorneko/neko_trainer.py,sha256=JC8qoKSZ5ngz3grf3S0SjvIFVktDIP_GExth5aFfbGA,10074
5
+ tensorneko/version.txt,sha256=c0yfowy-iqAhxsKbKQbn6GFeVjHOyTqrhcrsDPFq_Ok,5
6
6
  tensorneko/arch/__init__.py,sha256=w4lTUeyBIZelrnSjlBFWUF0erzOmBFl9FqeWQuSOyKs,248
7
7
  tensorneko/arch/auto_encoder.py,sha256=j6PWWyaNYaYNtw_zZ9ikzhCASqe9viXR3JGBIXSK92Y,2137
8
- tensorneko/arch/binary_classifier.py,sha256=CjBpxMHmubFHjdYhlt6XwoEsSNEMQ8p4s5BXl9ybxQo,1941
8
+ tensorneko/arch/binary_classifier.py,sha256=x3fQxQ0igyQ48mPB6vjPcDI38Q9WsbBJ--eOpldzgeI,2159
9
9
  tensorneko/arch/gan.py,sha256=ZAw6bNBXuTWmmC5rKpa7jgMisfpX-ti7gzyYkkh0_ls,7205
10
10
  tensorneko/arch/vqvae.py,sha256=02bHKJljBg6DTUfghxS3k-T5nOgYknhDU1Em0nirsj0,3730
11
11
  tensorneko/arch/wgan.py,sha256=k88x3ZtqqqKc0pIv6hiVqhpq-SitrVxrl7q8Etyqmpo,4712
@@ -15,10 +15,10 @@ tensorneko/callback/display_metrics_callback.py,sha256=qzhHcb68B7o9byfD1ZqEitSVk
15
15
  tensorneko/callback/earlystop_lr.py,sha256=mL27eghHXigohA3FZgcy7vObxXiaqHHoYkzw7dsgLf8,1181
16
16
  tensorneko/callback/epoch_num_logger.py,sha256=54BzG_Ez7hZHgx9Xvc4c42lMs9oppZivaEZIzIzaiYA,456
17
17
  tensorneko/callback/epoch_time_logger.py,sha256=PwOFvlkYk1mkGzpBF__FMzNMGPyKaZRyCQN2DeG0kMQ,645
18
- tensorneko/callback/gpu_stats_logger.py,sha256=Qe0sDvV4KhS1IeXx2ushkpu1mlYJByXq0lydMvJTyZk,1522
18
+ tensorneko/callback/gpu_stats_logger.py,sha256=vhX0uElDEDeoNrTl-hYdKhfMkE7pw4rdhmw6d8DEfeo,3420
19
19
  tensorneko/callback/lr_logger.py,sha256=4nC_teyCX3wmlELrJPq3TGrt2KssRpmgDRyep0h2J2c,605
20
20
  tensorneko/callback/nil_callback.py,sha256=-vKhOG3Ysv_ZToOdyYEkcZ8h0so9rBRY10f1OIoHeZs,131
21
- tensorneko/callback/system_stats_logger.py,sha256=ZrfO67E2FclPpKAcgIz5cPqzQ1tAU7_rfzsvZLcIats,820
21
+ tensorneko/callback/system_stats_logger.py,sha256=RGiVz24N9P0I_-M9gmouGNjwvLwmFZd_ifSCsw-yZvc,1754
22
22
  tensorneko/dataset/__init__.py,sha256=6980ci9Ce57HSyhzrKMJfDz31PCQxifVz1aSf63JEsA,247
23
23
  tensorneko/dataset/list_dataset.py,sha256=oo_cbGJHRlNG-6HyDsc-fqcexpSyRJLZNQb5Hs5Tfjc,396
24
24
  tensorneko/dataset/nested_dataset.py,sha256=qUwyEmEcvSoCWkGfg_9m8liaHPVcFzX50mCq64iUsRo,942
@@ -28,7 +28,7 @@ tensorneko/dataset/sampler/sequential_iter_sampler.py,sha256=cx76cZjnV2Hk80Urc6L
28
28
  tensorneko/debug/__init__.py,sha256=ZMfU3qquhMhl6EgPzM7Yuvvv0PWy3cR39UjPrrSmQcs,163
29
29
  tensorneko/evaluation/__init__.py,sha256=jW8dh1JRMpx3npjTp7wJLzz-IxFZTBh7F-Ztfoep9xs,296
30
30
  tensorneko/evaluation/enum.py,sha256=s3P8XAobku-as4in5vh6BanvVW5Ccwnff0t124lVFFg,137
31
- tensorneko/evaluation/fid.py,sha256=5Tuk1WtxVHWvsNhJGg03_6dQg4pJ6FGx0HuBpxc113E,5538
31
+ tensorneko/evaluation/fid.py,sha256=mDsgh7Ge7K8KrOLeWnSEVzzKfdCK0cI9TAWJJd5eqcQ,5550
32
32
  tensorneko/evaluation/iou.py,sha256=phEmOWQ3cnWW377WeSHCoB8mGkHLHMHCl8_LL0IX3JA,2914
33
33
  tensorneko/evaluation/psnr.py,sha256=DeKxvY_xxawWMXHY0z3Nvbsi4dR57OUV4hjtUoCINXc,3757
34
34
  tensorneko/evaluation/secs.py,sha256=D710GgcSxQgbGyPcWlC5ffF5n1GselLrUr5aA5Vq7oE,1622
@@ -69,20 +69,20 @@ tensorneko/preprocess/enum.py,sha256=Wp5qFaUjea5XU4o3N0WxUd-qfzI-m5vr4ZWSqWjELb4
69
69
  tensorneko/preprocess/pad.py,sha256=b4IbbhGNRotZ7weZcKA7hfDqSixPo5KjM6khnqzaeUA,3238
70
70
  tensorneko/preprocess/resize.py,sha256=hitMlzVnN6n_8nEJwxy4C4ErZrTwpM86QGnYewsrmf8,3469
71
71
  tensorneko/preprocess/face_detector/__init__.py,sha256=_ktIfUZqGTX0hk7RBgKf-zHwG2n9KRH4RS7rjuOI8Bo,262
72
- tensorneko/util/__init__.py,sha256=-9IcgZG6JLmFVoTMj3oIjGLkKILUbpCSFeWhjZStzIY,2006
73
- tensorneko/util/configuration.py,sha256=uLwcx88_AinzTD5DOrEqD-fLihM1Bf2vy_MJlf1rdAM,2455
72
+ tensorneko/util/__init__.py,sha256=OviNLx2sXtQh6-mqjFEoph40UVhG_dnqAfGVcmrq7K4,2044
73
+ tensorneko/util/configuration.py,sha256=xXeAjDh1FCNTmSPwDdkL-uH-ULfzFF6Fg0LT7gsZ6nQ,2510
74
74
  tensorneko/util/dispatched_misc.py,sha256=_0Go7XezdYB7bpMnCs1MDD_6mPNoWP5qt8DoKuPxynI,997
75
- tensorneko/util/misc.py,sha256=6nNfDR19zgL6MJPNj3FFQiGLWB4_RSLLgK0wG8t28Bw,4653
75
+ tensorneko/util/misc.py,sha256=LEvACtGDOX43iK86A8-Cek0S9rbXFR0AtTP1edE3XDI,4701
76
76
  tensorneko/util/reproducibility.py,sha256=sw1vVi7VOnmzQYUocI5x9yKeZoHHiA4A5ja136XolrI,2102
77
77
  tensorneko/util/string_getter.py,sha256=Cq2mDYr3q758xJ9OBTwLDf-b6EMSYwlnNB0-kfsElfs,2491
78
- tensorneko/util/type.py,sha256=7egC8KDOIYYeCmeySjBwImUi-jEFiE7-5QS5Bp5Lqmc,738
78
+ tensorneko/util/type.py,sha256=IaLpRQ5l8Ci6FZaGRohIb1ygrnJ3NTalomxDbhz68VM,716
79
79
  tensorneko/visualization/__init__.py,sha256=PuNMhLz3oosY39AmKUr0biIgjfc_G_rQzp960me08Fg,626
80
80
  tensorneko/visualization/log_graph.py,sha256=NvOwWVc_petXWYdgaHosPFLa43sHBeacbYcfNtdRQg4,1511
81
81
  tensorneko/visualization/matplotlib.py,sha256=xs9Ssc44ojZX65QU8-fftA7Ug_pBuZ3TBtM8vETNq9w,1568
82
82
  tensorneko/visualization/image_browser/__init__.py,sha256=AtykhAE3bXQS6SOWbeYFeeUE9ts9XOFMvrL31z0LoMg,63
83
83
  tensorneko/visualization/watcher/__init__.py,sha256=Nq752qIYvfRUZ8VctKQRSqhxh5KmFbWcqPfZlijVx6s,379
84
- tensorneko-0.3.5.dist-info/LICENSE,sha256=Vd75kwgJpVuMnCRBWasQzceMlXt4YQL13ikBLy8G5h0,1067
85
- tensorneko-0.3.5.dist-info/METADATA,sha256=Hdr3EdQz3wBl6qnUU_KiNUDJe-GZrnwm4MDsQZ8HlMw,18886
86
- tensorneko-0.3.5.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
87
- tensorneko-0.3.5.dist-info/top_level.txt,sha256=sZHwlP0iyk7_zHuhRHzSBkdY9yEgyC48f6UVuZ6CvqE,11
88
- tensorneko-0.3.5.dist-info/RECORD,,
84
+ tensorneko-0.3.6.dist-info/LICENSE,sha256=Vd75kwgJpVuMnCRBWasQzceMlXt4YQL13ikBLy8G5h0,1067
85
+ tensorneko-0.3.6.dist-info/METADATA,sha256=hjmVydZW60cgUcxWrx07q9ohUPwcfl7Yl-aZmCuiI3o,18892
86
+ tensorneko-0.3.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
87
+ tensorneko-0.3.6.dist-info/top_level.txt,sha256=sZHwlP0iyk7_zHuhRHzSBkdY9yEgyC48f6UVuZ6CvqE,11
88
+ tensorneko-0.3.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.3)
2
+ Generator: bdist_wheel (0.42.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5