tensorneko 0.3.3__py3-none-any.whl → 0.3.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,10 +2,12 @@ from .gan import GAN
2
2
  from .vqvae import VQVAE
3
3
  from .wgan import WGAN
4
4
  from .auto_encoder import AutoEncoder
5
+ from .binary_classifier import BinaryClassifier
5
6
 
6
7
  __all__ = [
7
8
  "GAN",
8
9
  "VQVAE",
9
10
  "WGAN",
10
- "AutoEncoder"
11
+ "AutoEncoder",
12
+ "BinaryClassifier",
11
13
  ]
@@ -0,0 +1,54 @@
1
+ from abc import ABC
2
+ from typing import Optional, Union, Sequence, Dict
3
+
4
+ from torch import Tensor
5
+ from torch.nn import BCEWithLogitsLoss
6
+ from torch.optim import Adam
7
+ from torchmetrics import Accuracy, F1Score, AUROC
8
+
9
+ from ..neko_model import NekoModel
10
+
11
+
12
+ class BinaryClassifier(NekoModel, ABC):
13
+
14
+ def __init__(self, model=None, learning_rate: float = 1e-4, distributed: bool = False):
15
+ super().__init__()
16
+ self.save_hyperparameters()
17
+ self.model = model
18
+ self.learning_rate = learning_rate
19
+ self.distributed = distributed
20
+ self.loss_fn = BCEWithLogitsLoss()
21
+ self.acc_fn = Accuracy(task="binary")
22
+ self.f1_fn = F1Score(task="binary")
23
+ self.auc_fn = AUROC(task="binary")
24
+
25
+ @classmethod
26
+ def from_module(cls, model, learning_rate: float = 1e-4, distributed=False):
27
+ return cls(model, learning_rate, distributed)
28
+
29
+ def forward(self, x):
30
+ return self.model(x)
31
+
32
+ def step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]]) -> Dict[str, Tensor]:
33
+ x, y = batch
34
+ y_hat = self(x).squeeze(1)
35
+ loss = self.loss_fn(y_hat, y)
36
+ prob = y_hat.sigmoid()
37
+ acc = self.acc_fn(prob, y)
38
+ f1 = self.f1_fn(prob, y)
39
+ auc = self.auc_fn(prob, y)
40
+ return {"loss": loss, "acc": acc, "f1": f1, "auc": auc}
41
+
42
+ def training_step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]] = None, batch_idx: Optional[int] = None,
43
+ optimizer_idx: Optional[int] = None, hiddens: Optional[Tensor] = None
44
+ ) -> Dict[str, Tensor]:
45
+ return self.step(batch)
46
+
47
+ def validation_step(self, batch: Optional[Union[Tensor, Sequence[Tensor]]] = None, batch_idx: Optional[int] = None,
48
+ dataloader_idx: Optional[int] = None
49
+ ) -> Dict[str, Tensor]:
50
+ return self.step(batch)
51
+
52
+ def configure_optimizers(self):
53
+ optimizer = Adam(self.parameters(), lr=self.learning_rate)
54
+ return [optimizer]
@@ -1,5 +1,11 @@
1
1
  from .round_robin_dataset import RoundRobinDataset
2
+ from .nested_dataset import NestedDataset
3
+ from .list_dataset import ListDataset
4
+ from . import sampler
2
5
 
3
6
  __all__ = [
4
7
  "RoundRobinDataset",
8
+ "NestedDataset",
9
+ "ListDataset",
10
+ "sampler"
5
11
  ]
@@ -0,0 +1,19 @@
1
+ from typing import List
2
+
3
+ from torch.utils.data.dataset import Dataset, T_co
4
+
5
+
6
+ class ListDataset(Dataset[T_co]):
7
+ """
8
+ A dataset wrapping a list of data.
9
+ """
10
+
11
+ def __init__(self, data: List[T_co]):
12
+ super().__init__()
13
+ self.data = data
14
+
15
+ def __getitem__(self, index: int) -> T_co:
16
+ return self.data[index]
17
+
18
+ def __len__(self):
19
+ return len(self.data)
@@ -4,7 +4,7 @@ from typing import List, Optional
4
4
  from torch.utils.data import Dataset
5
5
  from torch.utils.data.dataset import T_co
6
6
 
7
- from tensorneko.util import circular_pad
7
+ from ..util import circular_pad
8
8
 
9
9
 
10
10
  class RoundRobinDataset(Dataset[T_co]):
@@ -0,0 +1,5 @@
1
+ from .sequential_iter_sampler import SequentialIterSampler
2
+
3
+ __all__ = [
4
+ "SequentialIterSampler"
5
+ ]
@@ -0,0 +1,26 @@
1
+ from typing import Sized
2
+
3
+ from torch.utils.data.sampler import Sampler, T_co
4
+
5
+
6
+ class SequentialIterSampler(Sampler[T_co]):
7
+ """
8
+ Use to split the large scale data into small subsets for each epochs
9
+ For example, if the dataset size is 1M, and the num_samples = 1000, then each epoch will only use 1000 samples, and
10
+ the next epoch will use the next 1000 samples.
11
+ """
12
+
13
+ def __init__(self, data_source: Sized, num_samples: int):
14
+ super().__init__(data_source)
15
+ self.data_source = data_source
16
+ self.num_samples = num_samples
17
+ self.total_size = len(data_source)
18
+ self.current_position = 0
19
+
20
+ def __iter__(self):
21
+ yield from map(lambda x: x % self.total_size,
22
+ range(self.current_position, self.current_position + self.num_samples))
23
+ self.current_position = (self.current_position + self.num_samples) % self.total_size
24
+
25
+ def __len__(self):
26
+ return self.num_samples
@@ -1,6 +1,7 @@
1
- from tensorneko_util.debug import get_parser_default_args, Arguments
1
+ from tensorneko_util.debug import get_parser_default_args, Arguments, DummyLogger
2
2
 
3
3
  __all__ = [
4
4
  "get_parser_default_args",
5
5
  "Arguments",
6
+ "DummyLogger"
6
7
  ]
@@ -1,6 +1,7 @@
1
1
  from .iou import iou_1d, iou_2d
2
2
  from .psnr import psnr_video, psnr_image
3
3
  from .ssim import ssim_video, ssim_image
4
+ from .secs import secs
4
5
  from .fid import FID
5
6
 
6
7
  __all__ = [
@@ -10,5 +11,6 @@ __all__ = [
10
11
  "psnr_image",
11
12
  "ssim_video",
12
13
  "ssim_image",
14
+ "secs",
13
15
  "FID",
14
16
  ]
@@ -22,6 +22,34 @@ except ImportError:
22
22
 
23
23
 
24
24
  class FID:
25
+ """
26
+ Calculate Fréchet inception distance based on torchmetrics. Require library "torch-fidelity".
27
+
28
+ Args:
29
+ device (``str`` | :class:`~torch.device`, optional): Device to run the metric. Default: ``"cpu"``.
30
+
31
+ Example::
32
+
33
+ from tensorneko.evaluation import FID
34
+ fid = FID("cuda")
35
+
36
+ # add predicted and real images
37
+ fid.add_pred_image("path/to/pred/image1.png")
38
+ fid.add_pred_image("path/to/pred/image2.png")
39
+ fid.add_true_image("path/to/true/image1.png")
40
+ fid.add_true_image("path/to/true/image2.png")
41
+
42
+ # add predicted and real videos
43
+ fid.add_pred_video("path/to/pred/video1.mp4")
44
+ fid.add_pred_video("path/to/pred/video2.mp4")
45
+ fid.add_true_video("path/to/true/video1.mp4")
46
+ fid.add_true_video("path/to/true/video2.mp4")
47
+
48
+ # compute FID
49
+ fid_score = fid.compute(batch_size=128, num_workers=8, progress_bar=True)
50
+ print(fid_score)
51
+
52
+ """
25
53
 
26
54
  def __init__(self, device: Union[str, Device] = "cpu"):
27
55
  self.device = torch.device(device)
@@ -56,14 +84,14 @@ class FID:
56
84
  def cuda(self) -> FID:
57
85
  return self.to("cuda")
58
86
 
59
- def compute(self, batch_size=128, num_workers=8, progress_bar: bool = True) -> float:
87
+ def compute(self, batch_size=128, num_workers=0, progress_bar: bool = False) -> float:
60
88
  pred = torch.utils.data.DataLoader(self.pred_data, batch_size=batch_size, num_workers=num_workers)
61
89
  true = torch.utils.data.DataLoader(self.true_data, batch_size=batch_size, num_workers=num_workers)
62
90
 
63
91
  if progress_bar:
64
92
  tqdm = import_tqdm_auto().tqdm
65
- pred = tqdm(pred, desc="Forward predicted features")
66
- true = tqdm(true, desc="Forward ground truth features")
93
+ pred = tqdm(total=len(pred), desc="Forward predicted features")
94
+ true = tqdm(total=len(true), desc="Forward ground truth features")
67
95
 
68
96
  for batch in pred:
69
97
  self.fid.update(batch.to(self.device), real=False)
@@ -72,6 +100,11 @@ class FID:
72
100
 
73
101
  return self.fid.compute().item()
74
102
 
103
+ def reset(self):
104
+ self.pred_data = _FIDDataset()
105
+ self.true_data = _FIDDataset()
106
+ self.fid.reset()
107
+
75
108
 
76
109
  @dataclass
77
110
  class _FIDEntry:
@@ -104,6 +137,7 @@ class _FIDDataset(IterableDataset):
104
137
  raise RuntimeError("Cannot open video file.")
105
138
  n_frames = int(cap.get(self.cv2.CAP_PROP_FRAME_COUNT))
106
139
  self.length += n_frames
140
+ cap.release()
107
141
 
108
142
  @staticmethod
109
143
  def _preprocess_image(image: Tensor) -> Tensor:
@@ -130,6 +164,8 @@ class _FIDDataset(IterableDataset):
130
164
  frame = self._preprocess_image(frame)
131
165
  yield frame
132
166
 
167
+ cap.release()
168
+
133
169
  def __iter__(self):
134
170
  for entry in self.content:
135
171
  if entry.type == "image":
@@ -70,7 +70,7 @@ def iou_2d(proposal: Union[Tensor, ndarray], target: Union[Tensor, ndarray]) ->
70
70
 
71
71
  inner_x1 = torch.maximum(proposal_x1, target_x1)
72
72
  inner_y1 = torch.maximum(proposal_y1, target_y1)
73
- inner_x2 = torch.minimum(proposal_x2, target_y2)
73
+ inner_x2 = torch.minimum(proposal_x2, target_x2)
74
74
  inner_y2 = torch.minimum(proposal_y2, target_y2)
75
75
 
76
76
  area_proposal = (proposal_x2 - proposal_x1) * (proposal_y2 - proposal_y1)
@@ -0,0 +1,58 @@
1
+ from numpy import ndarray
2
+ from torch import Tensor
3
+
4
+ from tensorneko_util.util import dispatch, Eval
5
+
6
+ from tensorneko_util.io import read
7
+
8
+
9
+ @Eval.later
10
+ def _secs_encoder():
11
+ from resemblyzer import VoiceEncoder
12
+ return VoiceEncoder()
13
+
14
+
15
+ @dispatch
16
+ def secs(pred: str, real: str) -> float:
17
+ from resemblyzer import VoiceEncoder, preprocess_wav
18
+ pred_audio = preprocess_wav(read.audio(pred).audio[0].numpy())
19
+ real_audio = preprocess_wav(read.audio(real).audio[0].numpy())
20
+ return _secs_compute(pred_audio, real_audio)
21
+
22
+
23
+ @dispatch
24
+ def secs(pred: Tensor, real: Tensor) -> float:
25
+ return secs(pred.numpy(), real.numpy())
26
+
27
+
28
+ @dispatch
29
+ def secs(pred: ndarray, real: ndarray) -> float:
30
+ from resemblyzer import VoiceEncoder, preprocess_wav
31
+ if len(pred.shape) == 2:
32
+ if pred.shape[0] == 1:
33
+ pred = pred.squeeze(0)
34
+ elif pred.shape[1] == 1:
35
+ pred = pred.squeeze(1)
36
+ else:
37
+ raise ValueError("The input audio must be mono.")
38
+
39
+ if len(real.shape) == 2:
40
+ if real.shape[0] == 1:
41
+ real = real.squeeze(0)
42
+ elif real.shape[1] == 1:
43
+ real = real.squeeze(1)
44
+ else:
45
+ raise ValueError("The input audio must be mono.")
46
+
47
+ pred_audio = preprocess_wav(pred)
48
+ real_audio = preprocess_wav(real)
49
+
50
+ return _secs_compute(pred_audio, real_audio)
51
+
52
+
53
+ def _secs_compute(pred_audio: ndarray, real_audio: ndarray) -> float:
54
+ encoder = _secs_encoder.value
55
+ real_embed = encoder.embed_utterance(real_audio)
56
+ pred_embed = encoder.embed_utterance(pred_audio)
57
+
58
+ return float((real_embed * pred_embed).sum())
@@ -9,7 +9,7 @@ from . import type
9
9
  from .configuration import Configuration
10
10
  from .misc import reduce_dict_by, summarize_dict_by, with_printed_shape, is_bad_num, count_parameters, compose, \
11
11
  generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, identity, list_to_dict, circular_pad, \
12
- load_py
12
+ load_py, try_until_success
13
13
  from .misc import get_tensorneko_path
14
14
  from .dispatched_misc import sparse2binary, binary2sparse
15
15
  from .reproducibility import Seed
@@ -70,6 +70,7 @@ __all__ = [
70
70
  "Singleton",
71
71
  "circular_pad",
72
72
  "load_py",
73
+ "try_until_success",
73
74
  "download_file",
74
75
  "WindowMerger",
75
76
  ]
tensorneko/util/misc.py CHANGED
@@ -9,7 +9,7 @@ from torch import Tensor
9
9
  from torch.nn import Module
10
10
 
11
11
  from tensorneko_util.util.misc import generate_inf_seq, listdir, with_printed, ifelse, dict_add, as_list, \
12
- identity, list_to_dict, compose, circular_pad, load_py
12
+ identity, list_to_dict, compose, circular_pad, load_py, try_until_success
13
13
  from .type import T, A
14
14
 
15
15
 
@@ -39,9 +39,11 @@ def reduce_dict_by(key: str, op: Callable[[T, T], T]) -> Callable[[List[Dict[str
39
39
  tensor([350.])
40
40
 
41
41
  """
42
+
42
43
  def wrapper(x: List[Dict[str, T]]) -> T:
43
44
  values = [d[key] for d in x]
44
45
  return reduce(op, values)
46
+
45
47
  return wrapper
46
48
 
47
49
 
@@ -81,6 +83,7 @@ def summarize_dict_by(key: str, op: Callable[[Union[Sequence[T], T]], T]
81
83
  array([3.])
82
84
 
83
85
  """
86
+
84
87
  def wrapper(x: List[Dict[str, T]]) -> T:
85
88
  values = [d[key] for d in x]
86
89
  if type(values[0]) is Tensor:
@@ -88,6 +91,7 @@ def summarize_dict_by(key: str, op: Callable[[Union[Sequence[T], T]], T]
88
91
  elif type(values[0]) is np.ndarray:
89
92
  values = np.vstack(values)
90
93
  return op(values)
94
+
91
95
  return wrapper
92
96
 
93
97
 
@@ -160,3 +164,4 @@ identity = identity
160
164
  list_to_dict = list_to_dict
161
165
  circular_pad = circular_pad
162
166
  load_py = load_py
167
+ try_until_success = try_until_success
tensorneko/version.txt CHANGED
@@ -1 +1 @@
1
- 0.3.3
1
+ 0.3.5
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: tensorneko
3
- Version: 0.3.3
3
+ Version: 0.3.5
4
4
  Summary: Tensor Neural Engine Kompanion. An util library based on PyTorch and PyTorch Lightning.
5
5
  Home-page: https://github.com/ControlNet/tensorneko
6
6
  Author: ControlNet
@@ -28,12 +28,12 @@ Requires-Dist: torchaudio >=0.9.0
28
28
  Requires-Dist: torchvision >=0.10.0
29
29
  Requires-Dist: torchmetrics >=0.7.3
30
30
  Requires-Dist: tensorboard >=2.0.0
31
- Requires-Dist: lightning ==2.0.*
31
+ Requires-Dist: lightning <2.2,>=2.0
32
32
  Requires-Dist: pillow >=8.1
33
33
  Requires-Dist: av >=8.0.3
34
34
  Requires-Dist: numpy >=1.20.1
35
35
  Requires-Dist: einops >=0.3.0
36
- Requires-Dist: tensorneko-util ==0.3.3
36
+ Requires-Dist: tensorneko-util ==0.3.5
37
37
  Requires-Dist: pysoundfile >=0.9.0 ; platform_system == "Windows"
38
38
 
39
39
  <h1 style="text-align: center">TensorNeko</h1>
@@ -50,7 +50,7 @@ Requires-Dist: pysoundfile >=0.9.0 ; platform_system == "Windows"
50
50
  <div align="center">
51
51
  <a href="https://www.python.org/"><img src="https://img.shields.io/pypi/pyversions/tensorneko?style=flat-square"></a>
52
52
  <a href="https://pytorch.org/"><img src="https://img.shields.io/badge/PyTorch-%3E%3D1.9.0-EE4C2C?style=flat-square&logo=pytorch"></a>
53
- <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*-792EE5?style=flat-square&logo=lightning"></a>
53
+ <a href="https://www.pytorchlightning.ai/"><img src="https://img.shields.io/badge/Lightning-2.0.*/2.1.*-792EE5?style=flat-square&logo=lightning"></a>
54
54
  </div>
55
55
 
56
56
  <div align="center">
@@ -2,9 +2,10 @@ tensorneko/__init__.py,sha256=VPPK00Kduwi84QHnZKBZm8kBRdnPAji6f7J-adYAp_Y,770
2
2
  tensorneko/neko_model.py,sha256=5ZE4Kh8pFBcdrq0uERZTawE3PDP9jokD2QapC2s8-gc,10145
3
3
  tensorneko/neko_module.py,sha256=qELXvguSjWo_NvcRQibiFl0Qauzd9JWLSnT4dbGNS3Y,1473
4
4
  tensorneko/neko_trainer.py,sha256=GAKaXXvh3Lpej8JxjAm0zzZ8eOsyto2FFxv955dxGt4,9972
5
- tensorneko/version.txt,sha256=fDhQso1X93KSttjp_gCZja9YI1I-QOl6UceSeQVc8KI,5
6
- tensorneko/arch/__init__.py,sha256=nUWoSy6bjPn5-9Cvw5ABi99vm8hiczcFh17h8RyqPMk,175
5
+ tensorneko/version.txt,sha256=chs29ukegsplmSIlFfRBz4srybz6fhCYDk5F04KGouQ,5
6
+ tensorneko/arch/__init__.py,sha256=w4lTUeyBIZelrnSjlBFWUF0erzOmBFl9FqeWQuSOyKs,248
7
7
  tensorneko/arch/auto_encoder.py,sha256=j6PWWyaNYaYNtw_zZ9ikzhCASqe9viXR3JGBIXSK92Y,2137
8
+ tensorneko/arch/binary_classifier.py,sha256=CjBpxMHmubFHjdYhlt6XwoEsSNEMQ8p4s5BXl9ybxQo,1941
8
9
  tensorneko/arch/gan.py,sha256=ZAw6bNBXuTWmmC5rKpa7jgMisfpX-ti7gzyYkkh0_ls,7205
9
10
  tensorneko/arch/vqvae.py,sha256=02bHKJljBg6DTUfghxS3k-T5nOgYknhDU1Em0nirsj0,3730
10
11
  tensorneko/arch/wgan.py,sha256=k88x3ZtqqqKc0pIv6hiVqhpq-SitrVxrl7q8Etyqmpo,4712
@@ -18,15 +19,19 @@ tensorneko/callback/gpu_stats_logger.py,sha256=Qe0sDvV4KhS1IeXx2ushkpu1mlYJByXq0
18
19
  tensorneko/callback/lr_logger.py,sha256=4nC_teyCX3wmlELrJPq3TGrt2KssRpmgDRyep0h2J2c,605
19
20
  tensorneko/callback/nil_callback.py,sha256=-vKhOG3Ysv_ZToOdyYEkcZ8h0so9rBRY10f1OIoHeZs,131
20
21
  tensorneko/callback/system_stats_logger.py,sha256=ZrfO67E2FclPpKAcgIz5cPqzQ1tAU7_rfzsvZLcIats,820
21
- tensorneko/dataset/__init__.py,sha256=gWVBZBQzBFlIiQizRKOgOO3UMI3dmEWrGewOrlsfyJc,91
22
+ tensorneko/dataset/__init__.py,sha256=6980ci9Ce57HSyhzrKMJfDz31PCQxifVz1aSf63JEsA,247
23
+ tensorneko/dataset/list_dataset.py,sha256=oo_cbGJHRlNG-6HyDsc-fqcexpSyRJLZNQb5Hs5Tfjc,396
22
24
  tensorneko/dataset/nested_dataset.py,sha256=qUwyEmEcvSoCWkGfg_9m8liaHPVcFzX50mCq64iUsRo,942
23
- tensorneko/dataset/round_robin_dataset.py,sha256=KpvxTaLzQFOtRZB5RhBUgWGY4GDKTvR0Rs1MJWbyQ-k,2467
24
- tensorneko/debug/__init__.py,sha256=HLFQnPOWTtZ1KRVHvqyAeq9e9fW6AwgpsnLTbAC-DXE,132
25
- tensorneko/evaluation/__init__.py,sha256=Pk4DBoM225a_KaDhSNtXHMWGdyNojKBiRDLRHfjE8go,261
25
+ tensorneko/dataset/round_robin_dataset.py,sha256=pxSGcrQ2lFXdIaBz5b7i8ok0ihw3_VG1dLY9D68YzNc,2458
26
+ tensorneko/dataset/sampler/__init__.py,sha256=inj-7M5IjafU5yzSpU2BY9FWAiRp0u7RqkgAcIZj2Qk,102
27
+ tensorneko/dataset/sampler/sequential_iter_sampler.py,sha256=cx76cZjnV2Hk80Urc6LThd4FS8sxPh16OqsaP73MNlQ,951
28
+ tensorneko/debug/__init__.py,sha256=ZMfU3qquhMhl6EgPzM7Yuvvv0PWy3cR39UjPrrSmQcs,163
29
+ tensorneko/evaluation/__init__.py,sha256=jW8dh1JRMpx3npjTp7wJLzz-IxFZTBh7F-Ztfoep9xs,296
26
30
  tensorneko/evaluation/enum.py,sha256=s3P8XAobku-as4in5vh6BanvVW5Ccwnff0t124lVFFg,137
27
- tensorneko/evaluation/fid.py,sha256=fw8YUdC4O4hW953JJpH_Ii9XGognVLIkQD2vD79EDA8,4371
28
- tensorneko/evaluation/iou.py,sha256=uWKxmSyvNwPHPvxxte7HtsGYZLp1riV20ZsxxBfXR80,2914
31
+ tensorneko/evaluation/fid.py,sha256=5Tuk1WtxVHWvsNhJGg03_6dQg4pJ6FGx0HuBpxc113E,5538
32
+ tensorneko/evaluation/iou.py,sha256=phEmOWQ3cnWW377WeSHCoB8mGkHLHMHCl8_LL0IX3JA,2914
29
33
  tensorneko/evaluation/psnr.py,sha256=DeKxvY_xxawWMXHY0z3Nvbsi4dR57OUV4hjtUoCINXc,3757
34
+ tensorneko/evaluation/secs.py,sha256=D710GgcSxQgbGyPcWlC5ffF5n1GselLrUr5aA5Vq7oE,1622
30
35
  tensorneko/evaluation/ssim.py,sha256=6vPS4VQqoKxHOG49lChH51KxwNo07B4XHdhLub5DEPU,3758
31
36
  tensorneko/io/__init__.py,sha256=QEyA0mOC-BlKKskYYbDYttYWWRjCeh73lX-yKAUGNik,213
32
37
  tensorneko/io/reader.py,sha256=KB4xpdHKaqtEQXj2EOVB21Ev3ODPiQZFjNadZOipCMU,705
@@ -64,10 +69,10 @@ tensorneko/preprocess/enum.py,sha256=Wp5qFaUjea5XU4o3N0WxUd-qfzI-m5vr4ZWSqWjELb4
64
69
  tensorneko/preprocess/pad.py,sha256=b4IbbhGNRotZ7weZcKA7hfDqSixPo5KjM6khnqzaeUA,3238
65
70
  tensorneko/preprocess/resize.py,sha256=hitMlzVnN6n_8nEJwxy4C4ErZrTwpM86QGnYewsrmf8,3469
66
71
  tensorneko/preprocess/face_detector/__init__.py,sha256=_ktIfUZqGTX0hk7RBgKf-zHwG2n9KRH4RS7rjuOI8Bo,262
67
- tensorneko/util/__init__.py,sha256=ifzn9iITol005ulY-XRJwNR4rEHDTftH5WzwfBmLHMk,1962
72
+ tensorneko/util/__init__.py,sha256=-9IcgZG6JLmFVoTMj3oIjGLkKILUbpCSFeWhjZStzIY,2006
68
73
  tensorneko/util/configuration.py,sha256=uLwcx88_AinzTD5DOrEqD-fLihM1Bf2vy_MJlf1rdAM,2455
69
74
  tensorneko/util/dispatched_misc.py,sha256=_0Go7XezdYB7bpMnCs1MDD_6mPNoWP5qt8DoKuPxynI,997
70
- tensorneko/util/misc.py,sha256=9G3q2CuHbtieStV0QcshuyMwcejSX5e0Jg25temgIs8,4592
75
+ tensorneko/util/misc.py,sha256=6nNfDR19zgL6MJPNj3FFQiGLWB4_RSLLgK0wG8t28Bw,4653
71
76
  tensorneko/util/reproducibility.py,sha256=sw1vVi7VOnmzQYUocI5x9yKeZoHHiA4A5ja136XolrI,2102
72
77
  tensorneko/util/string_getter.py,sha256=Cq2mDYr3q758xJ9OBTwLDf-b6EMSYwlnNB0-kfsElfs,2491
73
78
  tensorneko/util/type.py,sha256=7egC8KDOIYYeCmeySjBwImUi-jEFiE7-5QS5Bp5Lqmc,738
@@ -76,8 +81,8 @@ tensorneko/visualization/log_graph.py,sha256=NvOwWVc_petXWYdgaHosPFLa43sHBeacbYc
76
81
  tensorneko/visualization/matplotlib.py,sha256=xs9Ssc44ojZX65QU8-fftA7Ug_pBuZ3TBtM8vETNq9w,1568
77
82
  tensorneko/visualization/image_browser/__init__.py,sha256=AtykhAE3bXQS6SOWbeYFeeUE9ts9XOFMvrL31z0LoMg,63
78
83
  tensorneko/visualization/watcher/__init__.py,sha256=Nq752qIYvfRUZ8VctKQRSqhxh5KmFbWcqPfZlijVx6s,379
79
- tensorneko-0.3.3.dist-info/LICENSE,sha256=Vd75kwgJpVuMnCRBWasQzceMlXt4YQL13ikBLy8G5h0,1067
80
- tensorneko-0.3.3.dist-info/METADATA,sha256=x7luu6Srwy2I-YoFdMByAlx1-an_QNSN42MHs5EpMxI,18877
81
- tensorneko-0.3.3.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
82
- tensorneko-0.3.3.dist-info/top_level.txt,sha256=sZHwlP0iyk7_zHuhRHzSBkdY9yEgyC48f6UVuZ6CvqE,11
83
- tensorneko-0.3.3.dist-info/RECORD,,
84
+ tensorneko-0.3.5.dist-info/LICENSE,sha256=Vd75kwgJpVuMnCRBWasQzceMlXt4YQL13ikBLy8G5h0,1067
85
+ tensorneko-0.3.5.dist-info/METADATA,sha256=Hdr3EdQz3wBl6qnUU_KiNUDJe-GZrnwm4MDsQZ8HlMw,18886
86
+ tensorneko-0.3.5.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
87
+ tensorneko-0.3.5.dist-info/top_level.txt,sha256=sZHwlP0iyk7_zHuhRHzSBkdY9yEgyC48f6UVuZ6CvqE,11
88
+ tensorneko-0.3.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.2)
2
+ Generator: bdist_wheel (0.41.3)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5