dataeval 0.71.0__py3-none-any.whl → 0.72.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataeval/__init__.py +2 -1
- dataeval/_internal/detectors/ood/ae.py +1 -1
- dataeval/_internal/detectors/ood/aegmm.py +1 -1
- dataeval/_internal/detectors/ood/base.py +1 -1
- dataeval/_internal/detectors/ood/llr.py +3 -3
- dataeval/_internal/detectors/ood/vae.py +1 -1
- dataeval/_internal/detectors/ood/vaegmm.py +1 -1
- dataeval/_internal/detectors/outliers.py +1 -1
- dataeval/_internal/metrics/stats/datasetstats.py +4 -4
- dataeval/_internal/metrics/stats/dimensionstats.py +6 -6
- dataeval/_internal/metrics/stats/visualstats.py +13 -12
- dataeval/_internal/models/tensorflow/autoencoder.py +2 -2
- dataeval/_internal/models/tensorflow/losses.py +1 -1
- dataeval/_internal/models/tensorflow/pixelcnn.py +1 -1
- dataeval/_internal/models/tensorflow/trainer.py +1 -1
- dataeval/_internal/models/tensorflow/utils.py +5 -5
- dataeval/utils/tensorflow/__init__.py +7 -1
- dataeval/utils/tensorflow/loss/__init__.py +5 -1
- dataeval/utils/tensorflow/models/__init__.py +5 -1
- dataeval/utils/torch/__init__.py +11 -2
- dataeval/utils/torch/datasets/__init__.py +7 -2
- dataeval/utils/torch/models/__init__.py +5 -1
- dataeval/utils/torch/trainer/__init__.py +5 -1
- {dataeval-0.71.0.dist-info → dataeval-0.72.0.dist-info}/METADATA +4 -3
- {dataeval-0.71.0.dist-info → dataeval-0.72.0.dist-info}/RECORD +27 -27
- {dataeval-0.71.0.dist-info → dataeval-0.72.0.dist-info}/LICENSE.txt +0 -0
- {dataeval-0.71.0.dist-info → dataeval-0.72.0.dist-info}/WHEEL +0 -0
dataeval/__init__.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
|
-
__version__ = "0.
|
1
|
+
__version__ = "0.72.0"
|
2
2
|
|
3
3
|
from importlib.util import find_spec
|
4
4
|
|
5
5
|
_IS_TORCH_AVAILABLE = find_spec("torch") is not None
|
6
|
+
_IS_TORCHVISION_AVAILABLE = find_spec("torchvision") is not None
|
6
7
|
_IS_TENSORFLOW_AVAILABLE = find_spec("tensorflow") is not None and find_spec("tensorflow_probability") is not None
|
7
8
|
|
8
9
|
del find_spec
|
@@ -10,9 +10,9 @@ from __future__ import annotations
|
|
10
10
|
|
11
11
|
from typing import Callable
|
12
12
|
|
13
|
-
import keras
|
14
13
|
import numpy as np
|
15
14
|
import tensorflow as tf
|
15
|
+
import tf_keras as keras
|
16
16
|
from numpy.typing import ArrayLike
|
17
17
|
|
18
18
|
from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
|
@@ -10,8 +10,8 @@ from __future__ import annotations
|
|
10
10
|
|
11
11
|
from typing import Callable
|
12
12
|
|
13
|
-
import keras
|
14
13
|
import tensorflow as tf
|
14
|
+
import tf_keras as keras
|
15
15
|
from numpy.typing import ArrayLike
|
16
16
|
|
17
17
|
from dataeval._internal.detectors.ood.base import OODGMMBase, OODScoreOutput
|
@@ -12,9 +12,9 @@ from abc import ABC, abstractmethod
|
|
12
12
|
from dataclasses import dataclass
|
13
13
|
from typing import Callable, Literal, cast
|
14
14
|
|
15
|
-
import keras
|
16
15
|
import numpy as np
|
17
16
|
import tensorflow as tf
|
17
|
+
import tf_keras as keras
|
18
18
|
from numpy.typing import ArrayLike, NDArray
|
19
19
|
|
20
20
|
from dataeval._internal.interop import to_numpy
|
@@ -11,12 +11,12 @@ from __future__ import annotations
|
|
11
11
|
from functools import partial
|
12
12
|
from typing import Callable
|
13
13
|
|
14
|
-
import keras
|
15
14
|
import numpy as np
|
16
15
|
import tensorflow as tf
|
17
|
-
|
18
|
-
from keras.models import Model
|
16
|
+
import tf_keras as keras
|
19
17
|
from numpy.typing import ArrayLike, NDArray
|
18
|
+
from tf_keras.layers import Input
|
19
|
+
from tf_keras.models import Model
|
20
20
|
|
21
21
|
from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
|
22
22
|
from dataeval._internal.interop import to_numpy
|
@@ -10,9 +10,9 @@ from __future__ import annotations
|
|
10
10
|
|
11
11
|
from typing import Callable
|
12
12
|
|
13
|
-
import keras
|
14
13
|
import numpy as np
|
15
14
|
import tensorflow as tf
|
15
|
+
import tf_keras as keras
|
16
16
|
from numpy.typing import ArrayLike
|
17
17
|
|
18
18
|
from dataeval._internal.detectors.ood.base import OODBase, OODScoreOutput
|
@@ -10,9 +10,9 @@ from __future__ import annotations
|
|
10
10
|
|
11
11
|
from typing import Callable
|
12
12
|
|
13
|
-
import keras
|
14
13
|
import numpy as np
|
15
14
|
import tensorflow as tf
|
15
|
+
import tf_keras as keras
|
16
16
|
from numpy.typing import ArrayLike
|
17
17
|
|
18
18
|
from dataeval._internal.detectors.ood.base import OODGMMBase, OODScoreOutput
|
@@ -147,7 +147,7 @@ class Outliers:
|
|
147
147
|
mask = _get_outlier_mask(values.astype(np.float64), self.outlier_method, self.outlier_threshold)
|
148
148
|
indices = np.flatnonzero(mask)
|
149
149
|
for i, value in zip(indices, values[mask]):
|
150
|
-
flagged_images.setdefault(i, {}).update({stat: value})
|
150
|
+
flagged_images.setdefault(int(i), {}).update({stat: value})
|
151
151
|
|
152
152
|
return dict(sorted(flagged_images.items()))
|
153
153
|
|
@@ -162,10 +162,10 @@ def channelstats(
|
|
162
162
|
|
163
163
|
>>> stats = channelstats(images)
|
164
164
|
>>> print(stats.visualstats.darkness)
|
165
|
-
[0.
|
166
|
-
0.
|
167
|
-
0.
|
168
|
-
0.
|
165
|
+
[0.07495 0.1748 0.275 0.1047 0.11096 0.1172 0.2047 0.2109 0.2172
|
166
|
+
0.3047 0.311 0.3171 0.4048 0.411 0.4172 0.505 0.5107 0.517
|
167
|
+
0.6045 0.611 0.617 0.7046 0.711 0.7173 0.8047 0.811 0.8174
|
168
|
+
0.905 0.911 0.917 ]
|
169
169
|
"""
|
170
170
|
outputs = run_stats(images, bboxes, True, [PixelStatsProcessor, VisualStatsProcessor])
|
171
171
|
return ChannelStatsOutput(*outputs) # type: ignore
|
@@ -57,16 +57,16 @@ class DimensionStatsProcessor(StatsProcessor[DimensionStatsOutput]):
|
|
57
57
|
image_function_map = {
|
58
58
|
"left": lambda x: x.box[0],
|
59
59
|
"top": lambda x: x.box[1],
|
60
|
-
"width": lambda x: x.
|
61
|
-
"height": lambda x: x.
|
60
|
+
"width": lambda x: x.box[2] - x.box[0],
|
61
|
+
"height": lambda x: x.box[3] - x.box[1],
|
62
62
|
"channels": lambda x: x.shape[-3],
|
63
|
-
"size": lambda x:
|
64
|
-
"aspect_ratio": lambda x: x.
|
63
|
+
"size": lambda x: (x.box[2] - x.box[0]) * (x.box[3] - x.box[1]),
|
64
|
+
"aspect_ratio": lambda x: (x.box[2] - x.box[0]) / (x.box[3] - x.box[1]),
|
65
65
|
"depth": lambda x: get_bitdepth(x.image).depth,
|
66
66
|
"center": lambda x: np.asarray([(x.box[0] + x.box[2]) / 2, (x.box[1] + x.box[3]) / 2]),
|
67
67
|
"distance": lambda x: np.sqrt(
|
68
|
-
np.square(((x.box[0] + x.box[2]) / 2) - (x.
|
69
|
-
+ np.square(((x.box[1] + x.box[3]) / 2) - (x.
|
68
|
+
np.square(((x.box[0] + x.box[2]) / 2) - (x.shape[-1] / 2))
|
69
|
+
+ np.square(((x.box[1] + x.box[3]) / 2) - (x.shape[-2] / 2))
|
70
70
|
),
|
71
71
|
}
|
72
72
|
|
@@ -22,14 +22,14 @@ class VisualStatsOutput(BaseStatsOutput):
|
|
22
22
|
----------
|
23
23
|
brightness : NDArray[np.float16]
|
24
24
|
Brightness of the images
|
25
|
-
sharpness : NDArray[np.float16]
|
26
|
-
Blurriness of the images
|
27
25
|
contrast : NDArray[np.float16]
|
28
26
|
Image contrast ratio
|
29
27
|
darkness : NDArray[np.float16]
|
30
28
|
Darkness of the images
|
31
29
|
missing : NDArray[np.float16]
|
32
30
|
Percentage of the images with missing pixels
|
31
|
+
sharpness : NDArray[np.float16]
|
32
|
+
Sharpness of the images
|
33
33
|
zeros : NDArray[np.float16]
|
34
34
|
Percentage of the images with zero value pixels
|
35
35
|
percentiles : NDArray[np.float16]
|
@@ -37,10 +37,10 @@ class VisualStatsOutput(BaseStatsOutput):
|
|
37
37
|
"""
|
38
38
|
|
39
39
|
brightness: NDArray[np.float16]
|
40
|
-
sharpness: NDArray[np.float16]
|
41
40
|
contrast: NDArray[np.float16]
|
42
41
|
darkness: NDArray[np.float16]
|
43
42
|
missing: NDArray[np.float16]
|
43
|
+
sharpness: NDArray[np.float16]
|
44
44
|
zeros: NDArray[np.float16]
|
45
45
|
percentiles: NDArray[np.float16]
|
46
46
|
|
@@ -49,25 +49,25 @@ class VisualStatsProcessor(StatsProcessor[VisualStatsOutput]):
|
|
49
49
|
output_class = VisualStatsOutput
|
50
50
|
cache_keys = ["percentiles"]
|
51
51
|
image_function_map = {
|
52
|
-
"brightness": lambda x: x.get("percentiles")[
|
53
|
-
"sharpness": lambda x: np.std(edge_filter(np.mean(x.image, axis=0))),
|
52
|
+
"brightness": lambda x: x.get("percentiles")[1],
|
54
53
|
"contrast": lambda x: np.nan_to_num(
|
55
54
|
(np.max(x.get("percentiles")) - np.min(x.get("percentiles"))) / np.mean(x.get("percentiles"))
|
56
55
|
),
|
57
|
-
"darkness": lambda x: x.get("percentiles")[
|
56
|
+
"darkness": lambda x: x.get("percentiles")[-2],
|
58
57
|
"missing": lambda x: np.count_nonzero(np.isnan(np.sum(x.image, axis=0))) / np.prod(x.shape[-2:]),
|
58
|
+
"sharpness": lambda x: np.std(edge_filter(np.mean(x.image, axis=0))),
|
59
59
|
"zeros": lambda x: np.count_nonzero(np.sum(x.image, axis=0) == 0) / np.prod(x.shape[-2:]),
|
60
60
|
"percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES),
|
61
61
|
}
|
62
62
|
channel_function_map = {
|
63
|
-
"brightness": lambda x: x.get("percentiles")[:,
|
64
|
-
"sharpness": lambda x: np.std(np.vectorize(edge_filter, signature="(m,n)->(m,n)")(x.image), axis=(1, 2)),
|
63
|
+
"brightness": lambda x: x.get("percentiles")[:, 1],
|
65
64
|
"contrast": lambda x: np.nan_to_num(
|
66
65
|
(np.max(x.get("percentiles"), axis=1) - np.min(x.get("percentiles"), axis=1))
|
67
66
|
/ np.mean(x.get("percentiles"), axis=1)
|
68
67
|
),
|
69
|
-
"darkness": lambda x: x.get("percentiles")[:,
|
68
|
+
"darkness": lambda x: x.get("percentiles")[:, -2],
|
70
69
|
"missing": lambda x: np.count_nonzero(np.isnan(x.image), axis=(1, 2)) / np.prod(x.shape[-2:]),
|
70
|
+
"sharpness": lambda x: np.std(np.vectorize(edge_filter, signature="(m,n)->(m,n)")(x.image), axis=(1, 2)),
|
71
71
|
"zeros": lambda x: np.count_nonzero(x.image == 0, axis=(1, 2)) / np.prod(x.shape[-2:]),
|
72
72
|
"percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES, axis=1).T,
|
73
73
|
}
|
@@ -113,9 +113,10 @@ def visualstats(
|
|
113
113
|
|
114
114
|
>>> results = visualstats(images)
|
115
115
|
>>> print(results.brightness)
|
116
|
-
[0.
|
117
|
-
0.
|
118
|
-
0.
|
116
|
+
[0.02246 0.5557 0.06805 0.1014 0.1348 0.1681 0.2014 0.2347 0.268
|
117
|
+
0.3015 0.3347 0.3682 0.4014 0.4348 0.468 0.5015 0.5347 0.568
|
118
|
+
0.6016 0.635 0.668 0.701 0.735 0.768 0.8013 0.835 0.868
|
119
|
+
0.9014 0.9346 0.9683 ]
|
119
120
|
>>> print(results.contrast)
|
120
121
|
[2.041 1.332 1.293 1.279 1.272 1.268 1.265 1.263 1.261 1.26 1.259 1.258
|
121
122
|
1.258 1.257 1.257 1.256 1.256 1.255 1.255 1.255 1.255 1.254 1.254 1.254
|
@@ -11,11 +11,11 @@ from __future__ import annotations
|
|
11
11
|
from typing import Literal, cast
|
12
12
|
|
13
13
|
import tensorflow as tf
|
14
|
-
from keras.layers import Flatten
|
15
14
|
from numpy.typing import NDArray
|
16
15
|
from tensorflow_probability.python.distributions.mvn_diag import MultivariateNormalDiag
|
17
16
|
from tensorflow_probability.python.distributions.mvn_tril import MultivariateNormalTriL
|
18
17
|
from tensorflow_probability.python.stats import covariance
|
18
|
+
from tf_keras.layers import Flatten
|
19
19
|
|
20
20
|
from dataeval._internal.models.tensorflow.gmm import gmm_energy, gmm_params
|
21
21
|
|
@@ -13,9 +13,9 @@ from __future__ import annotations
|
|
13
13
|
import functools
|
14
14
|
import warnings
|
15
15
|
|
16
|
-
import keras
|
17
16
|
import numpy as np
|
18
17
|
import tensorflow as tf
|
18
|
+
import tf_keras as keras
|
19
19
|
from tensorflow_probability.python.bijectors import bijector
|
20
20
|
from tensorflow_probability.python.distributions import (
|
21
21
|
categorical,
|
@@ -11,11 +11,13 @@ from __future__ import annotations
|
|
11
11
|
import math
|
12
12
|
from typing import Callable, Union, cast
|
13
13
|
|
14
|
-
import keras as keras
|
15
14
|
import numpy as np
|
16
15
|
import tensorflow as tf
|
17
|
-
|
18
|
-
from
|
16
|
+
import tf_keras as keras
|
17
|
+
from numpy.typing import NDArray
|
18
|
+
from tensorflow._api.v2.nn import relu, softmax, tanh
|
19
|
+
from tf_keras import Sequential
|
20
|
+
from tf_keras.layers import (
|
19
21
|
Conv2D,
|
20
22
|
Conv2DTranspose,
|
21
23
|
Dense,
|
@@ -23,8 +25,6 @@ from keras.layers import (
|
|
23
25
|
InputLayer,
|
24
26
|
Reshape,
|
25
27
|
)
|
26
|
-
from numpy.typing import NDArray
|
27
|
-
from tensorflow._api.v2.nn import relu, softmax, tanh
|
28
28
|
|
29
29
|
from dataeval._internal.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM
|
30
30
|
from dataeval._internal.models.tensorflow.pixelcnn import PixelCNN
|
@@ -6,6 +6,12 @@ as well as constructors which allow for customization of the encoder, decoder an
|
|
6
6
|
layers used by the model.
|
7
7
|
"""
|
8
8
|
|
9
|
+
from dataeval import _IS_TENSORFLOW_AVAILABLE
|
10
|
+
|
9
11
|
from . import loss, models, recon
|
10
12
|
|
11
|
-
__all__ = [
|
13
|
+
__all__ = []
|
14
|
+
|
15
|
+
|
16
|
+
if _IS_TENSORFLOW_AVAILABLE:
|
17
|
+
__all__ = ["loss", "models", "recon"]
|
@@ -1,5 +1,9 @@
|
|
1
|
+
from dataeval import _IS_TENSORFLOW_AVAILABLE
|
1
2
|
from dataeval._internal.models.tensorflow.autoencoder import AE, AEGMM, VAE, VAEGMM
|
2
3
|
from dataeval._internal.models.tensorflow.pixelcnn import PixelCNN
|
3
4
|
from dataeval._internal.models.tensorflow.utils import create_model
|
4
5
|
|
5
|
-
__all__ = [
|
6
|
+
__all__ = []
|
7
|
+
|
8
|
+
if _IS_TENSORFLOW_AVAILABLE:
|
9
|
+
__all__ += ["create_model", "AE", "AEGMM", "PixelCNN", "VAE", "VAEGMM"]
|
dataeval/utils/torch/__init__.py
CHANGED
@@ -5,8 +5,17 @@ While these metrics can take in custom models, DataEval provides utility classes
|
|
5
5
|
to create a seamless integration between custom models and DataEval's metrics.
|
6
6
|
"""
|
7
7
|
|
8
|
+
from dataeval import _IS_TORCH_AVAILABLE, _IS_TORCHVISION_AVAILABLE
|
8
9
|
from dataeval._internal.utils import read_dataset
|
9
10
|
|
10
|
-
|
11
|
+
__all__ = []
|
11
12
|
|
12
|
-
|
13
|
+
if _IS_TORCH_AVAILABLE:
|
14
|
+
from . import models, trainer
|
15
|
+
|
16
|
+
__all__ += ["read_dataset", "models", "trainer"]
|
17
|
+
|
18
|
+
if _IS_TORCHVISION_AVAILABLE:
|
19
|
+
from . import datasets
|
20
|
+
|
21
|
+
__all__ += ["datasets"]
|
@@ -2,6 +2,11 @@
|
|
2
2
|
Provide access to common Torch datasets used for computer vision
|
3
3
|
"""
|
4
4
|
|
5
|
-
from dataeval
|
5
|
+
from dataeval import _IS_TORCHVISION_AVAILABLE
|
6
6
|
|
7
|
-
__all__ = [
|
7
|
+
__all__ = []
|
8
|
+
|
9
|
+
if _IS_TORCHVISION_AVAILABLE:
|
10
|
+
from dataeval._internal.datasets import CIFAR10, MNIST, VOCDetection
|
11
|
+
|
12
|
+
__all__ += ["CIFAR10", "MNIST", "VOCDetection"]
|
@@ -1,7 +1,11 @@
|
|
1
|
+
from dataeval import _IS_TORCH_AVAILABLE
|
1
2
|
from dataeval._internal.models.pytorch.autoencoder import (
|
2
3
|
AriaAutoencoder,
|
3
4
|
Decoder,
|
4
5
|
Encoder,
|
5
6
|
)
|
6
7
|
|
7
|
-
__all__ = [
|
8
|
+
__all__ = []
|
9
|
+
|
10
|
+
if _IS_TORCH_AVAILABLE:
|
11
|
+
__all__ += ["AriaAutoencoder", "Decoder", "Encoder"]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: dataeval
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.72.0
|
4
4
|
Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
|
5
5
|
Home-page: https://dataeval.ai/
|
6
6
|
License: MIT
|
@@ -29,8 +29,9 @@ Requires-Dist: nvidia-cudnn-cu11 (>=8.6.0.163) ; extra == "tensorflow" or extra
|
|
29
29
|
Requires-Dist: pillow (>=10.3.0)
|
30
30
|
Requires-Dist: scikit-learn (>=1.5.0)
|
31
31
|
Requires-Dist: scipy (>=1.10)
|
32
|
-
Requires-Dist: tensorflow (>=2.
|
33
|
-
Requires-Dist: tensorflow_probability (>=0.
|
32
|
+
Requires-Dist: tensorflow (>=2.16) ; extra == "tensorflow" or extra == "all"
|
33
|
+
Requires-Dist: tensorflow_probability (>=0.24) ; extra == "tensorflow" or extra == "all"
|
34
|
+
Requires-Dist: tf-keras (>=2.16) ; extra == "tensorflow" or extra == "all"
|
34
35
|
Requires-Dist: torch (>=2.2.0) ; extra == "torch" or extra == "all"
|
35
36
|
Requires-Dist: torchvision (>=0.17.0) ; extra == "torch" or extra == "all"
|
36
37
|
Requires-Dist: tqdm
|
@@ -1,4 +1,4 @@
|
|
1
|
-
dataeval/__init__.py,sha256=
|
1
|
+
dataeval/__init__.py,sha256=Fb4afUULYZpdITrrAVPPzCAfyktPAQFyY4vPmmPSwx0,620
|
2
2
|
dataeval/_internal/datasets.py,sha256=KbXSR-vOAzFamfXHRnI9mhhqUzEPyGpK47fZsirQn1I,14638
|
3
3
|
dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
4
|
dataeval/_internal/detectors/clusterer.py,sha256=srqTHzh9kIy7Ty4VYaptwuQlBh8emFeiEAeS_mYFKro,20750
|
@@ -12,13 +12,13 @@ dataeval/_internal/detectors/drift/uncertainty.py,sha256=Ot8L42AnFbkij4J3Tis7VzX
|
|
12
12
|
dataeval/_internal/detectors/duplicates.py,sha256=wggaIl3uFxihNBQhPv5JcreZbhVaFKoMAJMv_9-aaHU,5324
|
13
13
|
dataeval/_internal/detectors/merged_stats.py,sha256=okXGrqAgsqfANMxfIjiUQlZWlaIh5TVvIB9UPsOJZ7k,1351
|
14
14
|
dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
-
dataeval/_internal/detectors/ood/ae.py,sha256=
|
16
|
-
dataeval/_internal/detectors/ood/aegmm.py,sha256=
|
17
|
-
dataeval/_internal/detectors/ood/base.py,sha256=
|
18
|
-
dataeval/_internal/detectors/ood/llr.py,sha256=
|
19
|
-
dataeval/_internal/detectors/ood/vae.py,sha256=
|
20
|
-
dataeval/_internal/detectors/ood/vaegmm.py,sha256=
|
21
|
-
dataeval/_internal/detectors/outliers.py,sha256=
|
15
|
+
dataeval/_internal/detectors/ood/ae.py,sha256=OTcfvoiCdSFGaAAkejBKwwiHaHKB6sa01aW5fVBI1Bk,2152
|
16
|
+
dataeval/_internal/detectors/ood/aegmm.py,sha256=7fRcTXfyUgYfcZOaa9GpGNNxAAp4sQ9zYowfs4s4420,2530
|
17
|
+
dataeval/_internal/detectors/ood/base.py,sha256=jMMObJgPUZc2Vbql_UYNXvQAFO305TRhdVxk0YqfzJo,8573
|
18
|
+
dataeval/_internal/detectors/ood/llr.py,sha256=wzWOeyqbD0WdXHRa1Qf-_3TbJYEfT6OuTQfcepPsbTM,10235
|
19
|
+
dataeval/_internal/detectors/ood/vae.py,sha256=y_HP3tk7Clo3YG9hl-gke9_tJ4XW8x8sQlrC9ZtbVLw,3042
|
20
|
+
dataeval/_internal/detectors/ood/vaegmm.py,sha256=SvdUKC8cVyEWfEGcczRmyA4SGJhbol0eDSDry1mZxII,2959
|
21
|
+
dataeval/_internal/detectors/outliers.py,sha256=C7Iu66ze5KCCRQNc1TsqkFVKDFGfP4qjGMUv6RUpk-E,10206
|
22
22
|
dataeval/_internal/interop.py,sha256=FLXJY-5hwJcKCtruyvaarqynXCMfcLbQSFvGnrWQDPo,1338
|
23
23
|
dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
24
|
dataeval/_internal/metrics/balance.py,sha256=8KwuR5HvytJtS1YW9KkNrCu2dPn_gP4FSbXrxQ-9kK8,6182
|
@@ -29,12 +29,12 @@ dataeval/_internal/metrics/diversity.py,sha256=ZSlq1KBvkMRVAvlznils2QmlPC73TTpHs
|
|
29
29
|
dataeval/_internal/metrics/parity.py,sha256=eTjltNBJOTFH6T_t9V9-1EFr_U0vqlU642o3x2RWgz0,16527
|
30
30
|
dataeval/_internal/metrics/stats/base.py,sha256=DRTaaFVtbH1M-wLO2NrtuAAXx699vlEjP9d2no72pM4,11066
|
31
31
|
dataeval/_internal/metrics/stats/boxratiostats.py,sha256=Ac6nB41q43xHCJRDEXHNgsJF80VE8MpH8_kySxA84BE,6342
|
32
|
-
dataeval/_internal/metrics/stats/datasetstats.py,sha256=
|
33
|
-
dataeval/_internal/metrics/stats/dimensionstats.py,sha256=
|
32
|
+
dataeval/_internal/metrics/stats/datasetstats.py,sha256=1H8Njtr27oNO8Hn3pwizAlOFkVe3QpbJb-RYk4dLKkY,6201
|
33
|
+
dataeval/_internal/metrics/stats/dimensionstats.py,sha256=EIXrRia7OyB147WgAW2tqEwPMcCNWmSQidx5uQukSqQ,3915
|
34
34
|
dataeval/_internal/metrics/stats/hashstats.py,sha256=I-aX-R0Rlvjwo7A5bjq3Bqs7-utTapnXB87z9TyC12w,2088
|
35
35
|
dataeval/_internal/metrics/stats/labelstats.py,sha256=BNxI2flvKhSps2o4-TPbN9nf52ctatI2SuDZ07hah5E,4058
|
36
36
|
dataeval/_internal/metrics/stats/pixelstats.py,sha256=_b0TdjHZwe2yj5Cdmz2IhbQP4LTnHI1qFlDgPV8fuCs,4420
|
37
|
-
dataeval/_internal/metrics/stats/visualstats.py,sha256=
|
37
|
+
dataeval/_internal/metrics/stats/visualstats.py,sha256=TdPwiehv0dY5HJmOOQk4_omfMd725NqOPG21A-q_t0I,4788
|
38
38
|
dataeval/_internal/metrics/uap.py,sha256=RumSQey6vNoz9CtOG2_Inb-TurKJrAHqwhkyWBirxhk,2128
|
39
39
|
dataeval/_internal/metrics/utils.py,sha256=vW3mQHjF0AvYlml27X5dZgd0YBk3zyBvvztLEfdRkvI,13475
|
40
40
|
dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -43,12 +43,12 @@ dataeval/_internal/models/pytorch/autoencoder.py,sha256=nPyLjLZrPNla-lMnym3fUW-O
|
|
43
43
|
dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
|
44
44
|
dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
|
45
45
|
dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
|
-
dataeval/_internal/models/tensorflow/autoencoder.py,sha256=
|
46
|
+
dataeval/_internal/models/tensorflow/autoencoder.py,sha256=l-3utb7Rwq6_KiqOPBHnYxR7ngYhpIDFvbvNZc5UvNI,10369
|
47
47
|
dataeval/_internal/models/tensorflow/gmm.py,sha256=QoEgbeax1GETqRmUF7A2ih9uFOZfFAjGzgH2ljExlAc,3669
|
48
|
-
dataeval/_internal/models/tensorflow/losses.py,sha256=
|
49
|
-
dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=
|
50
|
-
dataeval/_internal/models/tensorflow/trainer.py,sha256=
|
51
|
-
dataeval/_internal/models/tensorflow/utils.py,sha256=
|
48
|
+
dataeval/_internal/models/tensorflow/losses.py,sha256=LavFmi9AWfE_HO4YxQ54kV8LZG5-UeCOhOlcPpxo-ic,3979
|
49
|
+
dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=ru4KF8CZHKbOpp-ZgDxuRdbcv_nTCs1i1H2lTMamL7Y,48331
|
50
|
+
dataeval/_internal/models/tensorflow/trainer.py,sha256=LJ3t6Ud95cofKN-cgb5o5nDrYSFse7LSDOYIBkMgDJk,4094
|
51
|
+
dataeval/_internal/models/tensorflow/utils.py,sha256=Uq6eUTEeUHGopL1_VBH656-Ue18v6WgiEUjmk8SMsc8,8741
|
52
52
|
dataeval/_internal/output.py,sha256=qVbOi41dvfQICQ4uxysHPWBRKo1XR61kXHPL_vKOPm0,2545
|
53
53
|
dataeval/_internal/utils.py,sha256=jo6bGJZAgyuZqRpAAC4gwhAHYE12316na19ZuFwMqes,1504
|
54
54
|
dataeval/_internal/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -65,16 +65,16 @@ dataeval/metrics/estimators/__init__.py,sha256=4VFMKLPsJdaWiflf84bXGQ2k8ertFQ4WE
|
|
65
65
|
dataeval/metrics/stats/__init__.py,sha256=AKlNelORMOM2OA9XIvwZ9nOn6dK6k-r-69ldEAuqgLA,1156
|
66
66
|
dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
67
67
|
dataeval/utils/__init__.py,sha256=cW_5-DIZG2OFRs3FVLOz0uCv4JWdaoVO7C9rOlR7ZEA,526
|
68
|
-
dataeval/utils/tensorflow/__init__.py,sha256=
|
69
|
-
dataeval/utils/tensorflow/loss/__init__.py,sha256=
|
70
|
-
dataeval/utils/tensorflow/models/__init__.py,sha256=
|
68
|
+
dataeval/utils/tensorflow/__init__.py,sha256=sKRG3b_MLQUrAftkRZ17JyNZt6gjEguvTK83hO_IwRQ,530
|
69
|
+
dataeval/utils/tensorflow/loss/__init__.py,sha256=s7tD_5dYWcNDmntGiEHhG7bVDsMAY1UO8FpQFe9cUns,195
|
70
|
+
dataeval/utils/tensorflow/models/__init__.py,sha256=1R9Oi5DOYwT0W3JSEfoMsPOvhYFaKqKilwkrUifNnig,385
|
71
71
|
dataeval/utils/tensorflow/recon/__init__.py,sha256=xe6gAQqK9tyAoDQTtaJAxIPK1humt5QzsG_9NPsqx58,116
|
72
|
-
dataeval/utils/torch/__init__.py,sha256=
|
73
|
-
dataeval/utils/torch/datasets/__init__.py,sha256=
|
74
|
-
dataeval/utils/torch/models/__init__.py,sha256=
|
75
|
-
dataeval/utils/torch/trainer/__init__.py,sha256=
|
72
|
+
dataeval/utils/torch/__init__.py,sha256=430fNKbqLByuGSeNhnoIJy3g9Z94ckZsAKWUZ15MVP4,575
|
73
|
+
dataeval/utils/torch/datasets/__init__.py,sha256=94k7fMQdxYlQXDYouAHUgrQJ2oBwnvq4koFJpyhlUVA,292
|
74
|
+
dataeval/utils/torch/models/__init__.py,sha256=q1BzoLHWA0uBXzT2glWJDrxVA1BN7xnkT2r_d-7Dlyw,246
|
75
|
+
dataeval/utils/torch/trainer/__init__.py,sha256=hpcrlCCXPzb8b7FOzEAKqFy6Z7Zl4V_cx3yA7n3L1L4,177
|
76
76
|
dataeval/workflows/__init__.py,sha256=VFeJyMhZxvj8WnU5Un32mwO8lNfBQOBjD9IdOqexnAE,320
|
77
|
-
dataeval-0.
|
78
|
-
dataeval-0.
|
79
|
-
dataeval-0.
|
80
|
-
dataeval-0.
|
77
|
+
dataeval-0.72.0.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
|
78
|
+
dataeval-0.72.0.dist-info/METADATA,sha256=zII-d_cbyUn94mYuftonuHljqd8HzDgY2hDQNt1xbcg,4581
|
79
|
+
dataeval-0.72.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
80
|
+
dataeval-0.72.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|