dataeval 0.71.0__py3-none-any.whl → 0.71.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dataeval/__init__.py CHANGED
@@ -1,8 +1,9 @@
1
- __version__ = "0.71.0"
1
+ __version__ = "0.71.1"
2
2
 
3
3
  from importlib.util import find_spec
4
4
 
5
5
  _IS_TORCH_AVAILABLE = find_spec("torch") is not None
6
+ _IS_TORCHVISION_AVAILABLE = find_spec("torchvision") is not None
6
7
  _IS_TENSORFLOW_AVAILABLE = find_spec("tensorflow") is not None and find_spec("tensorflow_probability") is not None
7
8
 
8
9
  del find_spec
@@ -10,9 +10,9 @@ from __future__ import annotations
10
10
 
11
11
  from typing import Callable
12
12
 
13
- import keras
14
13
  import numpy as np
15
14
  import tensorflow as tf
15
+ import tf_keras as keras
16
16
  from numpy.typing import ArrayLike
17
17
 
18
18
  from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
@@ -10,8 +10,8 @@ from __future__ import annotations
10
10
 
11
11
  from typing import Callable
12
12
 
13
- import keras
14
13
  import tensorflow as tf
14
+ import tf_keras as keras
15
15
  from numpy.typing import ArrayLike
16
16
 
17
17
  from dataeval._internal.detectors.ood.base import OODGMMBase, OODScoreOutput
@@ -12,9 +12,9 @@ from abc import ABC, abstractmethod
12
12
  from dataclasses import dataclass
13
13
  from typing import Callable, Literal, cast
14
14
 
15
- import keras
16
15
  import numpy as np
17
16
  import tensorflow as tf
17
+ import tf_keras as keras
18
18
  from numpy.typing import ArrayLike, NDArray
19
19
 
20
20
  from dataeval._internal.interop import to_numpy
@@ -11,12 +11,12 @@ from __future__ import annotations
11
11
  from functools import partial
12
12
  from typing import Callable
13
13
 
14
- import keras
15
14
  import numpy as np
16
15
  import tensorflow as tf
17
- from keras.layers import Input
18
- from keras.models import Model
16
+ import tf_keras as keras
19
17
  from numpy.typing import ArrayLike, NDArray
18
+ from tf_keras.layers import Input
19
+ from tf_keras.models import Model
20
20
 
21
21
  from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
22
22
  from dataeval._internal.interop import to_numpy
@@ -10,9 +10,9 @@ from __future__ import annotations
10
10
 
11
11
  from typing import Callable
12
12
 
13
- import keras
14
13
  import numpy as np
15
14
  import tensorflow as tf
15
+ import tf_keras as keras
16
16
  from numpy.typing import ArrayLike
17
17
 
18
18
  from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
@@ -10,9 +10,9 @@ from __future__ import annotations
10
10
 
11
11
  from typing import Callable
12
12
 
13
- import keras
14
13
  import numpy as np
15
14
  import tensorflow as tf
15
+ import tf_keras as keras
16
16
  from numpy.typing import ArrayLike
17
17
 
18
18
  from dataeval._internal.detectors.ood.base import OODGMMBase, OODScoreOutput
@@ -147,7 +147,7 @@ class Outliers:
147
147
  mask = _get_outlier_mask(values.astype(np.float64), self.outlier_method, self.outlier_threshold)
148
148
  indices = np.flatnonzero(mask)
149
149
  for i, value in zip(indices, values[mask]):
150
- flagged_images.setdefault(i, {}).update({stat: value})
150
+ flagged_images.setdefault(int(i), {}).update({stat: value})
151
151
 
152
152
  return dict(sorted(flagged_images.items()))
153
153
 
@@ -162,10 +162,10 @@ def channelstats(
162
162
 
163
163
  >>> stats = channelstats(images)
164
164
  >>> print(stats.visualstats.darkness)
165
- [0.02124 0.1213 0.2212 0.1013 0.1076 0.11383 0.2013 0.2076 0.2139
166
- 0.3013 0.3076 0.3137 0.4014 0.4075 0.4138 0.5015 0.508 0.5137
167
- 0.6016 0.6074 0.614 0.701 0.7075 0.714 0.8013 0.8076 0.814
168
- 0.9014 0.9077 0.914 ]
165
+ [0.07495 0.1748 0.275 0.1047 0.11096 0.1172 0.2047 0.2109 0.2172
166
+ 0.3047 0.311 0.3171 0.4048 0.411 0.4172 0.505 0.5107 0.517
167
+ 0.6045 0.611 0.617 0.7046 0.711 0.7173 0.8047 0.811 0.8174
168
+ 0.905 0.911 0.917 ]
169
169
  """
170
170
  outputs = run_stats(images, bboxes, True, [PixelStatsProcessor, VisualStatsProcessor])
171
171
  return ChannelStatsOutput(*outputs) # type: ignore
@@ -57,16 +57,16 @@ class DimensionStatsProcessor(StatsProcessor[DimensionStatsOutput]):
57
57
  image_function_map = {
58
58
  "left": lambda x: x.box[0],
59
59
  "top": lambda x: x.box[1],
60
- "width": lambda x: x.shape[-1],
61
- "height": lambda x: x.shape[-2],
60
+ "width": lambda x: x.box[2] - x.box[0],
61
+ "height": lambda x: x.box[3] - x.box[1],
62
62
  "channels": lambda x: x.shape[-3],
63
- "size": lambda x: np.prod(x.shape[-2:]),
64
- "aspect_ratio": lambda x: x.shape[-1] / x.shape[-2],
63
+ "size": lambda x: (x.box[2] - x.box[0]) * (x.box[3] - x.box[1]),
64
+ "aspect_ratio": lambda x: (x.box[2] - x.box[0]) / (x.box[3] - x.box[1]),
65
65
  "depth": lambda x: get_bitdepth(x.image).depth,
66
66
  "center": lambda x: np.asarray([(x.box[0] + x.box[2]) / 2, (x.box[1] + x.box[3]) / 2]),
67
67
  "distance": lambda x: np.sqrt(
68
- np.square(((x.box[0] + x.box[2]) / 2) - (x.width / 2))
69
- + np.square(((x.box[1] + x.box[3]) / 2) - (x.height / 2))
68
+ np.square(((x.box[0] + x.box[2]) / 2) - (x.shape[-1] / 2))
69
+ + np.square(((x.box[1] + x.box[3]) / 2) - (x.shape[-2] / 2))
70
70
  ),
71
71
  }
72
72
 
@@ -22,14 +22,14 @@ class VisualStatsOutput(BaseStatsOutput):
22
22
  ----------
23
23
  brightness : NDArray[np.float16]
24
24
  Brightness of the images
25
- sharpness : NDArray[np.float16]
26
- Blurriness of the images
27
25
  contrast : NDArray[np.float16]
28
26
  Image contrast ratio
29
27
  darkness : NDArray[np.float16]
30
28
  Darkness of the images
31
29
  missing : NDArray[np.float16]
32
30
  Percentage of the images with missing pixels
31
+ sharpness : NDArray[np.float16]
32
+ Sharpness of the images
33
33
  zeros : NDArray[np.float16]
34
34
  Percentage of the images with zero value pixels
35
35
  percentiles : NDArray[np.float16]
@@ -37,10 +37,10 @@ class VisualStatsOutput(BaseStatsOutput):
37
37
  """
38
38
 
39
39
  brightness: NDArray[np.float16]
40
- sharpness: NDArray[np.float16]
41
40
  contrast: NDArray[np.float16]
42
41
  darkness: NDArray[np.float16]
43
42
  missing: NDArray[np.float16]
43
+ sharpness: NDArray[np.float16]
44
44
  zeros: NDArray[np.float16]
45
45
  percentiles: NDArray[np.float16]
46
46
 
@@ -49,25 +49,25 @@ class VisualStatsProcessor(StatsProcessor[VisualStatsOutput]):
49
49
  output_class = VisualStatsOutput
50
50
  cache_keys = ["percentiles"]
51
51
  image_function_map = {
52
- "brightness": lambda x: x.get("percentiles")[-2],
53
- "sharpness": lambda x: np.std(edge_filter(np.mean(x.image, axis=0))),
52
+ "brightness": lambda x: x.get("percentiles")[1],
54
53
  "contrast": lambda x: np.nan_to_num(
55
54
  (np.max(x.get("percentiles")) - np.min(x.get("percentiles"))) / np.mean(x.get("percentiles"))
56
55
  ),
57
- "darkness": lambda x: x.get("percentiles")[1],
56
+ "darkness": lambda x: x.get("percentiles")[-2],
58
57
  "missing": lambda x: np.count_nonzero(np.isnan(np.sum(x.image, axis=0))) / np.prod(x.shape[-2:]),
58
+ "sharpness": lambda x: np.std(edge_filter(np.mean(x.image, axis=0))),
59
59
  "zeros": lambda x: np.count_nonzero(np.sum(x.image, axis=0) == 0) / np.prod(x.shape[-2:]),
60
60
  "percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES),
61
61
  }
62
62
  channel_function_map = {
63
- "brightness": lambda x: x.get("percentiles")[:, -2],
64
- "sharpness": lambda x: np.std(np.vectorize(edge_filter, signature="(m,n)->(m,n)")(x.image), axis=(1, 2)),
63
+ "brightness": lambda x: x.get("percentiles")[:, 1],
65
64
  "contrast": lambda x: np.nan_to_num(
66
65
  (np.max(x.get("percentiles"), axis=1) - np.min(x.get("percentiles"), axis=1))
67
66
  / np.mean(x.get("percentiles"), axis=1)
68
67
  ),
69
- "darkness": lambda x: x.get("percentiles")[:, 1],
68
+ "darkness": lambda x: x.get("percentiles")[:, -2],
70
69
  "missing": lambda x: np.count_nonzero(np.isnan(x.image), axis=(1, 2)) / np.prod(x.shape[-2:]),
70
+ "sharpness": lambda x: np.std(np.vectorize(edge_filter, signature="(m,n)->(m,n)")(x.image), axis=(1, 2)),
71
71
  "zeros": lambda x: np.count_nonzero(x.image == 0, axis=(1, 2)) / np.prod(x.shape[-2:]),
72
72
  "percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES, axis=1).T,
73
73
  }
@@ -113,9 +113,10 @@ def visualstats(
113
113
 
114
114
  >>> results = visualstats(images)
115
115
  >>> print(results.brightness)
116
- [0.0737 0.607 0.0713 0.1046 0.138 0.1713 0.2046 0.2379 0.2712 0.3047
117
- 0.338 0.3713 0.4045 0.438 0.4712 0.5044 0.538 0.5713 0.6045 0.638
118
- 0.6714 0.7046 0.738 0.7715 0.8047 0.838 0.871 0.905 0.938 0.971 ]
116
+ [0.02246 0.5557 0.06805 0.1014 0.1348 0.1681 0.2014 0.2347 0.268
117
+ 0.3015 0.3347 0.3682 0.4014 0.4348 0.468 0.5015 0.5347 0.568
118
+ 0.6016 0.635 0.668 0.701 0.735 0.768 0.8013 0.835 0.868
119
+ 0.9014 0.9346 0.9683 ]
119
120
  >>> print(results.contrast)
120
121
  [2.041 1.332 1.293 1.279 1.272 1.268 1.265 1.263 1.261 1.26 1.259 1.258
121
122
  1.258 1.257 1.257 1.256 1.256 1.255 1.255 1.255 1.255 1.254 1.254 1.254
@@ -12,9 +12,9 @@ from __future__ import annotations
12
12
 
13
13
  from typing import Callable, cast
14
14
 
15
- import keras
16
15
  import tensorflow as tf
17
- from keras.layers import (
16
+ import tf_keras as keras
17
+ from tf_keras.layers import (
18
18
  Dense,
19
19
  Flatten,
20
20
  Layer,
@@ -11,11 +11,11 @@ from __future__ import annotations
11
11
  from typing import Literal, cast
12
12
 
13
13
  import tensorflow as tf
14
- from keras.layers import Flatten
15
14
  from numpy.typing import NDArray
16
15
  from tensorflow_probability.python.distributions.mvn_diag import MultivariateNormalDiag
17
16
  from tensorflow_probability.python.distributions.mvn_tril import MultivariateNormalTriL
18
17
  from tensorflow_probability.python.stats import covariance
18
+ from tf_keras.layers import Flatten
19
19
 
20
20
  from dataeval._internal.models.tensorflow.gmm import gmm_energy, gmm_params
21
21
 
@@ -13,9 +13,9 @@ from __future__ import annotations
13
13
  import functools
14
14
  import warnings
15
15
 
16
- import keras
17
16
  import numpy as np
18
17
  import tensorflow as tf
18
+ import tf_keras as keras
19
19
  from tensorflow_probability.python.bijectors import bijector
20
20
  from tensorflow_probability.python.distributions import (
21
21
  categorical,
@@ -10,9 +10,9 @@ from __future__ import annotations
10
10
 
11
11
  from typing import Callable, Iterable, cast
12
12
 
13
- import keras
14
13
  import numpy as np
15
14
  import tensorflow as tf
15
+ import tf_keras as keras
16
16
  from numpy.typing import NDArray
17
17
 
18
18
 
@@ -11,11 +11,13 @@ from __future__ import annotations
11
11
  import math
12
12
  from typing import Callable, Union, cast
13
13
 
14
- import keras as keras
15
14
  import numpy as np
16
15
  import tensorflow as tf
17
- from keras import Sequential
18
- from keras.layers import (
16
+ import tf_keras as keras
17
+ from numpy.typing import NDArray
18
+ from tensorflow._api.v2.nn import relu, softmax, tanh
19
+ from tf_keras import Sequential
20
+ from tf_keras.layers import (
19
21
  Conv2D,
20
22
  Conv2DTranspose,
21
23
  Dense,
@@ -23,8 +25,6 @@ from keras.layers import (
23
25
  InputLayer,
24
26
  Reshape,
25
27
  )
26
- from numpy.typing import NDArray
27
- from tensorflow._api.v2.nn import relu, softmax, tanh
28
28
 
29
29
  from dataeval._internal.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM
30
30
  from dataeval._internal.models.tensorflow.pixelcnn import PixelCNN
@@ -6,6 +6,12 @@ as well as constructors which allow for customization of the encoder, decoder an
6
6
  layers used by the model.
7
7
  """
8
8
 
9
+ from dataeval import _IS_TENSORFLOW_AVAILABLE
10
+
9
11
  from . import loss, models, recon
10
12
 
11
- __all__ = ["loss", "models", "recon"]
13
+ __all__ = []
14
+
15
+
16
+ if _IS_TENSORFLOW_AVAILABLE:
17
+ __all__ = ["loss", "models", "recon"]
@@ -1,3 +1,7 @@
1
+ from dataeval import _IS_TENSORFLOW_AVAILABLE
1
2
  from dataeval._internal.models.tensorflow.losses import Elbo, LossGMM
2
3
 
3
- __all__ = ["Elbo", "LossGMM"]
4
+ __all__ = []
5
+
6
+ if _IS_TENSORFLOW_AVAILABLE:
7
+ __all__ += ["Elbo", "LossGMM"]
@@ -1,5 +1,9 @@
1
+ from dataeval import _IS_TENSORFLOW_AVAILABLE
1
2
  from dataeval._internal.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM
2
3
  from dataeval._internal.models.tensorflow.pixelcnn import PixelCNN
3
4
  from dataeval._internal.models.tensorflow.utils import create_model
4
5
 
5
- __all__ = ["create_model", "AE", "AEGMM", "PixelCNN", "VAE", "VAEGMM"]
6
+ __all__ = []
7
+
8
+ if _IS_TENSORFLOW_AVAILABLE:
9
+ __all__ += ["create_model", "AE", "AEGMM", "PixelCNN", "VAE", "VAEGMM"]
@@ -5,8 +5,17 @@ While these metrics can take in custom models, DataEval provides utility classes
5
5
  to create a seamless integration between custom models and DataEval's metrics.
6
6
  """
7
7
 
8
+ from dataeval import _IS_TORCH_AVAILABLE, _IS_TORCHVISION_AVAILABLE
8
9
  from dataeval._internal.utils import read_dataset
9
10
 
10
- from . import models, trainer
11
+ __all__ = []
11
12
 
12
- __all__ = ["read_dataset", "models", "trainer"]
13
+ if _IS_TORCH_AVAILABLE:
14
+ from . import models, trainer
15
+
16
+ __all__ += ["read_dataset", "models", "trainer"]
17
+
18
+ if _IS_TORCHVISION_AVAILABLE:
19
+ from . import datasets
20
+
21
+ __all__ += ["datasets"]
@@ -2,6 +2,11 @@
2
2
  Provide access to common Torch datasets used for computer vision
3
3
  """
4
4
 
5
- from dataeval._internal.datasets import CIFAR10, MNIST, VOCDetection
5
+ from dataeval import _IS_TORCHVISION_AVAILABLE
6
6
 
7
- __all__ = ["CIFAR10", "MNIST", "VOCDetection"]
7
+ __all__ = []
8
+
9
+ if _IS_TORCHVISION_AVAILABLE:
10
+ from dataeval._internal.datasets import CIFAR10, MNIST, VOCDetection
11
+
12
+ __all__ += ["CIFAR10", "MNIST", "VOCDetection"]
@@ -1,7 +1,11 @@
1
+ from dataeval import _IS_TORCH_AVAILABLE
1
2
  from dataeval._internal.models.pytorch.autoencoder import (
2
3
  AriaAutoencoder,
3
4
  Decoder,
4
5
  Encoder,
5
6
  )
6
7
 
7
- __all__ = ["AriaAutoencoder", "Decoder", "Encoder"]
8
+ __all__ = []
9
+
10
+ if _IS_TORCH_AVAILABLE:
11
+ __all__ += ["AriaAutoencoder", "Decoder", "Encoder"]
@@ -1,3 +1,7 @@
1
+ from dataeval import _IS_TORCH_AVAILABLE
1
2
  from dataeval._internal.models.pytorch.autoencoder import AETrainer
2
3
 
3
- __all__ = ["AETrainer"]
4
+ __all__ = []
5
+
6
+ if _IS_TORCH_AVAILABLE:
7
+ __all__ += ["AETrainer"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dataeval
3
- Version: 0.71.0
3
+ Version: 0.71.1
4
4
  Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
5
5
  Home-page: https://dataeval.ai/
6
6
  License: MIT
@@ -29,8 +29,9 @@ Requires-Dist: nvidia-cudnn-cu11 (>=8.6.0.163) ; extra == "tensorflow" or extra
29
29
  Requires-Dist: pillow (>=10.3.0)
30
30
  Requires-Dist: scikit-learn (>=1.5.0)
31
31
  Requires-Dist: scipy (>=1.10)
32
- Requires-Dist: tensorflow (>=2.14.1,<2.16) ; extra == "tensorflow" or extra == "all"
33
- Requires-Dist: tensorflow_probability (>=0.22.1,<0.24) ; extra == "tensorflow" or extra == "all"
32
+ Requires-Dist: tensorflow (>=2.16) ; extra == "tensorflow" or extra == "all"
33
+ Requires-Dist: tensorflow_probability (>=0.24) ; extra == "tensorflow" or extra == "all"
34
+ Requires-Dist: tf-keras (>2.16) ; extra == "tensorflow" or extra == "all"
34
35
  Requires-Dist: torch (>=2.2.0) ; extra == "torch" or extra == "all"
35
36
  Requires-Dist: torchvision (>=0.17.0) ; extra == "torch" or extra == "all"
36
37
  Requires-Dist: tqdm
@@ -1,4 +1,4 @@
1
- dataeval/__init__.py,sha256=3y4e1-DHeOSYpqdQcYoQC185-eYkOURrhDoBgUIquAg,555
1
+ dataeval/__init__.py,sha256=Qm1rDTX_NyCAtZl2ilQ49v0j_zqnWhhVwIhe0cvrKjk,620
2
2
  dataeval/_internal/datasets.py,sha256=KbXSR-vOAzFamfXHRnI9mhhqUzEPyGpK47fZsirQn1I,14638
3
3
  dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  dataeval/_internal/detectors/clusterer.py,sha256=srqTHzh9kIy7Ty4VYaptwuQlBh8emFeiEAeS_mYFKro,20750
@@ -12,13 +12,13 @@ dataeval/_internal/detectors/drift/uncertainty.py,sha256=Ot8L42AnFbkij4J3Tis7VzX
12
12
  dataeval/_internal/detectors/duplicates.py,sha256=wggaIl3uFxihNBQhPv5JcreZbhVaFKoMAJMv_9-aaHU,5324
13
13
  dataeval/_internal/detectors/merged_stats.py,sha256=okXGrqAgsqfANMxfIjiUQlZWlaIh5TVvIB9UPsOJZ7k,1351
14
14
  dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- dataeval/_internal/detectors/ood/ae.py,sha256=AIffh11UKZX-3oocDDp8-t-uDUm8aQKvrV0nIE3BLQo,2140
16
- dataeval/_internal/detectors/ood/aegmm.py,sha256=q2kRXZM5X0PoA10mRsi8Gh-W5FdFzEsnM1yDq3GFzn0,2518
17
- dataeval/_internal/detectors/ood/base.py,sha256=qYSmcN74x5-qL0_I7uNo4eQ8X8pr7M4cwjs2qvkJt5g,8561
18
- dataeval/_internal/detectors/ood/llr.py,sha256=VgZtMrMgI8zDVUzsqm2l4tqsULFIhdQeDd4lzdo_G7U,10217
19
- dataeval/_internal/detectors/ood/vae.py,sha256=iXEltu5pATWr42-28hZ3ARZavJrptLwUM5P9c8omA_s,3030
20
- dataeval/_internal/detectors/ood/vaegmm.py,sha256=ujp6UN0wpZcmPDPkVfTHZxgka5kuTOSzgXMmbKdmK2U,2947
21
- dataeval/_internal/detectors/outliers.py,sha256=VSHxfLUPj8VZTcPgQCqVLtpL88hZCCni_1JUfFamOrA,10201
15
+ dataeval/_internal/detectors/ood/ae.py,sha256=OTcfvoiCdSFGaAAkejBKwwiHaHKB6sa01aW5fVBI1Bk,2152
16
+ dataeval/_internal/detectors/ood/aegmm.py,sha256=7fRcTXfyUgYfcZOaa9GpGNNxAAp4sQ9zYowfs4s4420,2530
17
+ dataeval/_internal/detectors/ood/base.py,sha256=jMMObJgPUZc2Vbql_UYNXvQAFO305TRhdVxk0YqfzJo,8573
18
+ dataeval/_internal/detectors/ood/llr.py,sha256=wzWOeyqbD0WdXHRa1Qf-_3TbJYEfT6OuTQfcepPsbTM,10235
19
+ dataeval/_internal/detectors/ood/vae.py,sha256=y_HP3tk7Clo3YG9hl-gke9_tJ4XW8x8sQlrC9ZtbVLw,3042
20
+ dataeval/_internal/detectors/ood/vaegmm.py,sha256=SvdUKC8cVyEWfEGcczRmyA4SGJhbol0eDSDry1mZxII,2959
21
+ dataeval/_internal/detectors/outliers.py,sha256=C7Iu66ze5KCCRQNc1TsqkFVKDFGfP4qjGMUv6RUpk-E,10206
22
22
  dataeval/_internal/interop.py,sha256=FLXJY-5hwJcKCtruyvaarqynXCMfcLbQSFvGnrWQDPo,1338
23
23
  dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  dataeval/_internal/metrics/balance.py,sha256=8KwuR5HvytJtS1YW9KkNrCu2dPn_gP4FSbXrxQ-9kK8,6182
@@ -29,12 +29,12 @@ dataeval/_internal/metrics/diversity.py,sha256=ZSlq1KBvkMRVAvlznils2QmlPC73TTpHs
29
29
  dataeval/_internal/metrics/parity.py,sha256=eTjltNBJOTFH6T_t9V9-1EFr_U0vqlU642o3x2RWgz0,16527
30
30
  dataeval/_internal/metrics/stats/base.py,sha256=DRTaaFVtbH1M-wLO2NrtuAAXx699vlEjP9d2no72pM4,11066
31
31
  dataeval/_internal/metrics/stats/boxratiostats.py,sha256=Ac6nB41q43xHCJRDEXHNgsJF80VE8MpH8_kySxA84BE,6342
32
- dataeval/_internal/metrics/stats/datasetstats.py,sha256=vwJvb5nLvL17hKqO4ES-dEp6LELOT1w2P8dRWGyEjZI,6201
33
- dataeval/_internal/metrics/stats/dimensionstats.py,sha256=s9jM4MhIQPpLEhQw3mXOEijsmhmV7mLE0HEnWqqWLbQ,3848
32
+ dataeval/_internal/metrics/stats/datasetstats.py,sha256=1H8Njtr27oNO8Hn3pwizAlOFkVe3QpbJb-RYk4dLKkY,6201
33
+ dataeval/_internal/metrics/stats/dimensionstats.py,sha256=EIXrRia7OyB147WgAW2tqEwPMcCNWmSQidx5uQukSqQ,3915
34
34
  dataeval/_internal/metrics/stats/hashstats.py,sha256=I-aX-R0Rlvjwo7A5bjq3Bqs7-utTapnXB87z9TyC12w,2088
35
35
  dataeval/_internal/metrics/stats/labelstats.py,sha256=BNxI2flvKhSps2o4-TPbN9nf52ctatI2SuDZ07hah5E,4058
36
36
  dataeval/_internal/metrics/stats/pixelstats.py,sha256=_b0TdjHZwe2yj5Cdmz2IhbQP4LTnHI1qFlDgPV8fuCs,4420
37
- dataeval/_internal/metrics/stats/visualstats.py,sha256=2kwhvwBVDtRdF3hrM-Hd_SkRIbSB2fK80pFesJULXkI,4759
37
+ dataeval/_internal/metrics/stats/visualstats.py,sha256=TdPwiehv0dY5HJmOOQk4_omfMd725NqOPG21A-q_t0I,4788
38
38
  dataeval/_internal/metrics/uap.py,sha256=RumSQey6vNoz9CtOG2_Inb-TurKJrAHqwhkyWBirxhk,2128
39
39
  dataeval/_internal/metrics/utils.py,sha256=vW3mQHjF0AvYlml27X5dZgd0YBk3zyBvvztLEfdRkvI,13475
40
40
  dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -43,12 +43,12 @@ dataeval/_internal/models/pytorch/autoencoder.py,sha256=nPyLjLZrPNla-lMnym3fUW-O
43
43
  dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
44
44
  dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
45
45
  dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- dataeval/_internal/models/tensorflow/autoencoder.py,sha256=Ryn11jDbpZJOM5De-kMGPdbJBQEdwip6B20ajS8HqpE,10354
46
+ dataeval/_internal/models/tensorflow/autoencoder.py,sha256=l-3utb7Rwq6_KiqOPBHnYxR7ngYhpIDFvbvNZc5UvNI,10369
47
47
  dataeval/_internal/models/tensorflow/gmm.py,sha256=QoEgbeax1GETqRmUF7A2ih9uFOZfFAjGzgH2ljExlAc,3669
48
- dataeval/_internal/models/tensorflow/losses.py,sha256=pZH5RnlM9R0RrBde9Lgq32muwAp7_PWc56Mu4u8RVvo,3976
49
- dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=keI1gTNjBk18YD91Cp4exfuGYWU9lt-wapvhSazhcVs,48319
50
- dataeval/_internal/models/tensorflow/trainer.py,sha256=xNY0Iw7Qa1TnCuy9N1b77_VduFoW_BhbZjfQCxOVby4,4082
51
- dataeval/_internal/models/tensorflow/utils.py,sha256=l6jXKMWyQAEI4LpAONq95Xwr7CPgrs408ypf9TuNxkY,8732
48
+ dataeval/_internal/models/tensorflow/losses.py,sha256=LavFmi9AWfE_HO4YxQ54kV8LZG5-UeCOhOlcPpxo-ic,3979
49
+ dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=ru4KF8CZHKbOpp-ZgDxuRdbcv_nTCs1i1H2lTMamL7Y,48331
50
+ dataeval/_internal/models/tensorflow/trainer.py,sha256=LJ3t6Ud95cofKN-cgb5o5nDrYSFse7LSDOYIBkMgDJk,4094
51
+ dataeval/_internal/models/tensorflow/utils.py,sha256=Uq6eUTEeUHGopL1_VBH656-Ue18v6WgiEUjmk8SMsc8,8741
52
52
  dataeval/_internal/output.py,sha256=qVbOi41dvfQICQ4uxysHPWBRKo1XR61kXHPL_vKOPm0,2545
53
53
  dataeval/_internal/utils.py,sha256=jo6bGJZAgyuZqRpAAC4gwhAHYE12316na19ZuFwMqes,1504
54
54
  dataeval/_internal/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -65,16 +65,16 @@ dataeval/metrics/estimators/__init__.py,sha256=4VFMKLPsJdaWiflf84bXGQ2k8ertFQ4WE
65
65
  dataeval/metrics/stats/__init__.py,sha256=AKlNelORMOM2OA9XIvwZ9nOn6dK6k-r-69ldEAuqgLA,1156
66
66
  dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
67
67
  dataeval/utils/__init__.py,sha256=cW_5-DIZG2OFRs3FVLOz0uCv4JWdaoVO7C9rOlR7ZEA,526
68
- dataeval/utils/tensorflow/__init__.py,sha256=XgjqrMtI00ERPPpdokbO1lDyc_H3CZ1TTqUXIj0B6PI,435
69
- dataeval/utils/tensorflow/loss/__init__.py,sha256=E9eB87LNh0o5nUCqssB027EXBsOfEayNHPcNW0QGFdA,101
70
- dataeval/utils/tensorflow/models/__init__.py,sha256=OVpmHF8itDcgOlfw6N9jr7IphZPbMJoiu7OdqYhU9fs,291
68
+ dataeval/utils/tensorflow/__init__.py,sha256=sKRG3b_MLQUrAftkRZ17JyNZt6gjEguvTK83hO_IwRQ,530
69
+ dataeval/utils/tensorflow/loss/__init__.py,sha256=s7tD_5dYWcNDmntGiEHhG7bVDsMAY1UO8FpQFe9cUns,195
70
+ dataeval/utils/tensorflow/models/__init__.py,sha256=1R9Oi5DOYwT0W3JSEfoMsPOvhYFaKqKilwkrUifNnig,385
71
71
  dataeval/utils/tensorflow/recon/__init__.py,sha256=xe6gAQqK9tyAoDQTtaJAxIPK1humt5QzsG_9NPsqx58,116
72
- dataeval/utils/torch/__init__.py,sha256=bYUm-nNlNVU3bqDz7dQHFmaRWgLy3lLrD4cSDumDlxQ,373
73
- dataeval/utils/torch/datasets/__init__.py,sha256=S6C4OaxEjJJaIpHSZcZfkl4U5iS5YtZ9N5GYHqvbzvM,191
74
- dataeval/utils/torch/models/__init__.py,sha256=YnDnePYpRIKHyYn3F5qR1OObMSb-g0FGvI8X-uTB09E,162
75
- dataeval/utils/torch/trainer/__init__.py,sha256=Te-qElt8h-Zv8NN0r-VJOEdCPHTQ2yO3rd2MhRiZGZs,93
72
+ dataeval/utils/torch/__init__.py,sha256=430fNKbqLByuGSeNhnoIJy3g9Z94ckZsAKWUZ15MVP4,575
73
+ dataeval/utils/torch/datasets/__init__.py,sha256=94k7fMQdxYlQXDYouAHUgrQJ2oBwnvq4koFJpyhlUVA,292
74
+ dataeval/utils/torch/models/__init__.py,sha256=q1BzoLHWA0uBXzT2glWJDrxVA1BN7xnkT2r_d-7Dlyw,246
75
+ dataeval/utils/torch/trainer/__init__.py,sha256=hpcrlCCXPzb8b7FOzEAKqFy6Z7Zl4V_cx3yA7n3L1L4,177
76
76
  dataeval/workflows/__init__.py,sha256=VFeJyMhZxvj8WnU5Un32mwO8lNfBQOBjD9IdOqexnAE,320
77
- dataeval-0.71.0.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
78
- dataeval-0.71.0.dist-info/METADATA,sha256=b1faVcCXttUUf9VQ1-TBXTRg6Kv_OEj1nbMNbpo4B5g,4522
79
- dataeval-0.71.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
80
- dataeval-0.71.0.dist-info/RECORD,,
77
+ dataeval-0.71.1.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
78
+ dataeval-0.71.1.dist-info/METADATA,sha256=j1HnzvkOSLR-D6debnFFLlp2vgue8ueFQvjYlrX75cw,4580
79
+ dataeval-0.71.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
80
+ dataeval-0.71.1.dist-info/RECORD,,