dataeval 0.69.4__py3-none-any.whl → 0.70.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataeval/__init__.py +8 -8
- dataeval/_internal/datasets.py +235 -131
- dataeval/_internal/detectors/clusterer.py +2 -0
- dataeval/_internal/detectors/drift/base.py +7 -8
- dataeval/_internal/detectors/drift/mmd.py +4 -4
- dataeval/_internal/detectors/duplicates.py +64 -45
- dataeval/_internal/detectors/merged_stats.py +23 -54
- dataeval/_internal/detectors/ood/ae.py +8 -6
- dataeval/_internal/detectors/ood/aegmm.py +6 -4
- dataeval/_internal/detectors/ood/base.py +12 -7
- dataeval/_internal/detectors/ood/llr.py +6 -4
- dataeval/_internal/detectors/ood/vae.py +5 -3
- dataeval/_internal/detectors/ood/vaegmm.py +6 -4
- dataeval/_internal/detectors/outliers.py +137 -63
- dataeval/_internal/interop.py +11 -7
- dataeval/_internal/metrics/balance.py +13 -11
- dataeval/_internal/metrics/ber.py +5 -3
- dataeval/_internal/metrics/coverage.py +4 -0
- dataeval/_internal/metrics/divergence.py +9 -5
- dataeval/_internal/metrics/diversity.py +14 -12
- dataeval/_internal/metrics/parity.py +32 -22
- dataeval/_internal/metrics/stats/base.py +231 -0
- dataeval/_internal/metrics/stats/boxratiostats.py +159 -0
- dataeval/_internal/metrics/stats/datasetstats.py +99 -0
- dataeval/_internal/metrics/stats/dimensionstats.py +113 -0
- dataeval/_internal/metrics/stats/hashstats.py +75 -0
- dataeval/_internal/metrics/stats/labelstats.py +125 -0
- dataeval/_internal/metrics/stats/pixelstats.py +119 -0
- dataeval/_internal/metrics/stats/visualstats.py +124 -0
- dataeval/_internal/metrics/uap.py +8 -4
- dataeval/_internal/metrics/utils.py +30 -15
- dataeval/_internal/models/pytorch/autoencoder.py +5 -5
- dataeval/_internal/models/tensorflow/pixelcnn.py +1 -4
- dataeval/_internal/output.py +3 -18
- dataeval/_internal/utils.py +11 -16
- dataeval/_internal/workflows/sufficiency.py +152 -151
- dataeval/detectors/__init__.py +4 -0
- dataeval/detectors/drift/__init__.py +8 -3
- dataeval/detectors/drift/kernels/__init__.py +4 -0
- dataeval/detectors/drift/updates/__init__.py +4 -0
- dataeval/detectors/linters/__init__.py +15 -4
- dataeval/detectors/ood/__init__.py +14 -2
- dataeval/metrics/__init__.py +5 -0
- dataeval/metrics/bias/__init__.py +13 -4
- dataeval/metrics/estimators/__init__.py +8 -8
- dataeval/metrics/stats/__init__.py +25 -3
- dataeval/utils/__init__.py +16 -3
- dataeval/utils/tensorflow/__init__.py +11 -0
- dataeval/utils/torch/__init__.py +12 -0
- dataeval/utils/torch/datasets/__init__.py +7 -0
- dataeval/workflows/__init__.py +6 -2
- {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/METADATA +12 -4
- dataeval-0.70.1.dist-info/RECORD +80 -0
- {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/WHEEL +1 -1
- dataeval/_internal/flags.py +0 -77
- dataeval/_internal/metrics/stats.py +0 -397
- dataeval/flags/__init__.py +0 -3
- dataeval/tensorflow/__init__.py +0 -3
- dataeval/torch/__init__.py +0 -3
- dataeval-0.69.4.dist-info/RECORD +0 -74
- /dataeval/{tensorflow → utils/tensorflow}/loss/__init__.py +0 -0
- /dataeval/{tensorflow → utils/tensorflow}/models/__init__.py +0 -0
- /dataeval/{tensorflow → utils/tensorflow}/recon/__init__.py +0 -0
- /dataeval/{torch → utils/torch}/models/__init__.py +0 -0
- /dataeval/{torch → utils/torch}/trainer/__init__.py +0 -0
- {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/LICENSE.txt +0 -0
@@ -1,7 +1,12 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
1
|
+
"""
|
2
|
+
Bias metrics check for skewed or imbalanced datasets and incomplete feature
|
3
|
+
representation which may impact model performance.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from dataeval._internal.metrics.balance import BalanceOutput, balance
|
7
|
+
from dataeval._internal.metrics.coverage import CoverageOutput, coverage
|
8
|
+
from dataeval._internal.metrics.diversity import DiversityOutput, diversity
|
9
|
+
from dataeval._internal.metrics.parity import ParityOutput, label_parity, parity
|
5
10
|
|
6
11
|
__all__ = [
|
7
12
|
"balance",
|
@@ -9,4 +14,8 @@ __all__ = [
|
|
9
14
|
"diversity",
|
10
15
|
"label_parity",
|
11
16
|
"parity",
|
17
|
+
"BalanceOutput",
|
18
|
+
"CoverageOutput",
|
19
|
+
"DiversityOutput",
|
20
|
+
"ParityOutput",
|
12
21
|
]
|
@@ -1,9 +1,9 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
1
|
+
"""
|
2
|
+
Estimators calculate performance bounds and the statistical distance between datasets.
|
3
|
+
"""
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
]
|
5
|
+
from dataeval._internal.metrics.ber import BEROutput, ber
|
6
|
+
from dataeval._internal.metrics.divergence import DivergenceOutput, divergence
|
7
|
+
from dataeval._internal.metrics.uap import UAPOutput, uap
|
8
|
+
|
9
|
+
__all__ = ["ber", "divergence", "uap", "BEROutput", "DivergenceOutput", "UAPOutput"]
|
@@ -1,6 +1,28 @@
|
|
1
|
-
|
1
|
+
"""
|
2
|
+
Statistics metrics calculate a variety of image properties and pixel statistics
|
3
|
+
and label statistics against the images and labels of a dataset.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from dataeval._internal.metrics.stats.boxratiostats import boxratiostats
|
7
|
+
from dataeval._internal.metrics.stats.datasetstats import DatasetStatsOutput, datasetstats
|
8
|
+
from dataeval._internal.metrics.stats.dimensionstats import DimensionStatsOutput, dimensionstats
|
9
|
+
from dataeval._internal.metrics.stats.hashstats import HashStatsOutput, hashstats
|
10
|
+
from dataeval._internal.metrics.stats.labelstats import LabelStatsOutput, labelstats
|
11
|
+
from dataeval._internal.metrics.stats.pixelstats import PixelStatsOutput, pixelstats
|
12
|
+
from dataeval._internal.metrics.stats.visualstats import VisualStatsOutput, visualstats
|
2
13
|
|
3
14
|
__all__ = [
|
4
|
-
"
|
5
|
-
"
|
15
|
+
"boxratiostats",
|
16
|
+
"datasetstats",
|
17
|
+
"dimensionstats",
|
18
|
+
"hashstats",
|
19
|
+
"labelstats",
|
20
|
+
"pixelstats",
|
21
|
+
"visualstats",
|
22
|
+
"DatasetStatsOutput",
|
23
|
+
"DimensionStatsOutput",
|
24
|
+
"HashStatsOutput",
|
25
|
+
"LabelStatsOutput",
|
26
|
+
"PixelStatsOutput",
|
27
|
+
"VisualStatsOutput",
|
6
28
|
]
|
dataeval/utils/__init__.py
CHANGED
@@ -1,6 +1,19 @@
|
|
1
|
-
|
1
|
+
"""
|
2
|
+
The utility classes and functions are provided by DataEval to assist users
|
3
|
+
in setting up architectures that are guaranteed to work with applicable DataEval
|
4
|
+
metrics. Currently DataEval supports both Tensorflow and PyTorch backends.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from dataeval import _IS_TENSORFLOW_AVAILABLE, _IS_TORCH_AVAILABLE
|
8
|
+
|
9
|
+
__all__ = []
|
2
10
|
|
3
11
|
if _IS_TORCH_AVAILABLE: # pragma: no cover
|
4
|
-
from
|
12
|
+
from . import torch
|
13
|
+
|
14
|
+
__all__ += ["torch"]
|
15
|
+
|
16
|
+
if _IS_TENSORFLOW_AVAILABLE: # pragma: no cover
|
17
|
+
from . import tensorflow
|
5
18
|
|
6
|
-
__all__
|
19
|
+
__all__ += ["tensorflow"]
|
@@ -0,0 +1,11 @@
|
|
1
|
+
"""
|
2
|
+
Tensorflow models are used in out-of-distribution detectors in the :mod:`dataeval.detectors.ood` module.
|
3
|
+
|
4
|
+
DataEval provides both basic default models through the utility :func:`dataeval.utils.tensorflow.models.create_model`
|
5
|
+
as well as constructors which allow for customization of the encoder, decoder and any other applicable
|
6
|
+
layers used by the model.
|
7
|
+
"""
|
8
|
+
|
9
|
+
from . import loss, models, recon
|
10
|
+
|
11
|
+
__all__ = ["loss", "models", "recon"]
|
@@ -0,0 +1,12 @@
|
|
1
|
+
"""
|
2
|
+
PyTorch is the primary backend for metrics that require neural networks.
|
3
|
+
|
4
|
+
While these metrics can take in custom models, DataEval provides utility classes
|
5
|
+
to create a seamless integration between custom models and DataEval's metrics.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from dataeval._internal.utils import read_dataset
|
9
|
+
|
10
|
+
from . import models, trainer
|
11
|
+
|
12
|
+
__all__ = ["read_dataset", "models", "trainer"]
|
dataeval/workflows/__init__.py
CHANGED
@@ -1,6 +1,10 @@
|
|
1
|
+
"""
|
2
|
+
Workflows perform a sequence of actions to analyze the dataset and make predictions.
|
3
|
+
"""
|
4
|
+
|
1
5
|
from dataeval import _IS_TORCH_AVAILABLE
|
2
6
|
|
3
7
|
if _IS_TORCH_AVAILABLE: # pragma: no cover
|
4
|
-
from dataeval._internal.workflows.sufficiency import Sufficiency
|
8
|
+
from dataeval._internal.workflows.sufficiency import Sufficiency, SufficiencyOutput
|
5
9
|
|
6
|
-
__all__ = ["Sufficiency"]
|
10
|
+
__all__ = ["Sufficiency", "SufficiencyOutput"]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: dataeval
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.70.1
|
4
4
|
Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
|
5
5
|
Home-page: https://dataeval.ai/
|
6
6
|
License: MIT
|
@@ -30,10 +30,9 @@ Requires-Dist: pillow (>=10.3.0)
|
|
30
30
|
Requires-Dist: scikit-learn (>=1.5.0)
|
31
31
|
Requires-Dist: scipy (>=1.10)
|
32
32
|
Requires-Dist: tensorflow (>=2.14.1,<2.16) ; extra == "tensorflow" or extra == "all"
|
33
|
-
Requires-Dist: tensorflow-io-gcs-filesystem (>=0.35.0,<0.37) ; extra == "tensorflow" or extra == "all"
|
34
33
|
Requires-Dist: tensorflow_probability (>=0.22.1,<0.24) ; extra == "tensorflow" or extra == "all"
|
35
|
-
Requires-Dist: torch (>=2.
|
36
|
-
Requires-Dist: torchvision (>=0.
|
34
|
+
Requires-Dist: torch (>=2.2.0) ; extra == "torch" or extra == "all"
|
35
|
+
Requires-Dist: torchvision (>=0.17.0) ; extra == "torch" or extra == "all"
|
37
36
|
Requires-Dist: xxhash (>=3.3)
|
38
37
|
Project-URL: Documentation, https://dataeval.readthedocs.io/
|
39
38
|
Project-URL: Repository, https://github.com/aria-ml/dataeval/
|
@@ -75,6 +74,15 @@ You can install DataEval directly from pypi.org using the following command. Th
|
|
75
74
|
pip install dataeval[all]
|
76
75
|
```
|
77
76
|
|
77
|
+
### Installing DataEval in Conda/Mamba
|
78
|
+
|
79
|
+
DataEval can be installed in a Conda/Mamba environment using the provided `environment.yaml` file. As some dependencies
|
80
|
+
are installed from the `pytorch` channel, the channel is specified in the below example.
|
81
|
+
|
82
|
+
```
|
83
|
+
micromamba create -f environment\environment.yaml -c pytorch
|
84
|
+
```
|
85
|
+
|
78
86
|
### Installing DataEval from GitHub
|
79
87
|
|
80
88
|
To install DataEval from source locally on Ubuntu, you will need `git-lfs` to download larger, binary source files and `poetry` for project dependency management.
|
@@ -0,0 +1,80 @@
|
|
1
|
+
dataeval/__init__.py,sha256=AIHxRS7PYlqg4s7fZJTPKuTtyWFWoVROw4knVoSBH6E,555
|
2
|
+
dataeval/_internal/datasets.py,sha256=KbXSR-vOAzFamfXHRnI9mhhqUzEPyGpK47fZsirQn1I,14638
|
3
|
+
dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
+
dataeval/_internal/detectors/clusterer.py,sha256=srqTHzh9kIy7Ty4VYaptwuQlBh8emFeiEAeS_mYFKro,20750
|
5
|
+
dataeval/_internal/detectors/drift/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
dataeval/_internal/detectors/drift/base.py,sha256=9EIb4eHVnZ8j6ms_kxRs6fOWbHkYKeHPPHWVGD4ikZo,15984
|
7
|
+
dataeval/_internal/detectors/drift/cvm.py,sha256=xiyZlf0rAQGG8Z6ZBLPVri805aPRkERrUySwRN8cTZQ,4010
|
8
|
+
dataeval/_internal/detectors/drift/ks.py,sha256=aoDx7ps-5vrSI8Q9ii6cwmKnAyaD8tjG69wI-7R3MVQ,4098
|
9
|
+
dataeval/_internal/detectors/drift/mmd.py,sha256=ztQSdSlpD66z9xFKqvNo3QHR1vEvf6X-m0LvxNckQgc,7517
|
10
|
+
dataeval/_internal/detectors/drift/torch.py,sha256=YhIN85MbUV3C4IJcRvqYdXSWLj5lUeEOb05T5DgB3xo,11552
|
11
|
+
dataeval/_internal/detectors/drift/uncertainty.py,sha256=Ot8L42AnFbkij4J3Tis7VzXLv3hfBxoOWBP4UoCEnVs,5125
|
12
|
+
dataeval/_internal/detectors/duplicates.py,sha256=wggaIl3uFxihNBQhPv5JcreZbhVaFKoMAJMv_9-aaHU,5324
|
13
|
+
dataeval/_internal/detectors/merged_stats.py,sha256=okXGrqAgsqfANMxfIjiUQlZWlaIh5TVvIB9UPsOJZ7k,1351
|
14
|
+
dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
|
+
dataeval/_internal/detectors/ood/ae.py,sha256=AIffh11UKZX-3oocDDp8-t-uDUm8aQKvrV0nIE3BLQo,2140
|
16
|
+
dataeval/_internal/detectors/ood/aegmm.py,sha256=q2kRXZM5X0PoA10mRsi8Gh-W5FdFzEsnM1yDq3GFzn0,2518
|
17
|
+
dataeval/_internal/detectors/ood/base.py,sha256=qYSmcN74x5-qL0_I7uNo4eQ8X8pr7M4cwjs2qvkJt5g,8561
|
18
|
+
dataeval/_internal/detectors/ood/llr.py,sha256=VgZtMrMgI8zDVUzsqm2l4tqsULFIhdQeDd4lzdo_G7U,10217
|
19
|
+
dataeval/_internal/detectors/ood/vae.py,sha256=iXEltu5pATWr42-28hZ3ARZavJrptLwUM5P9c8omA_s,3030
|
20
|
+
dataeval/_internal/detectors/ood/vaegmm.py,sha256=ujp6UN0wpZcmPDPkVfTHZxgka5kuTOSzgXMmbKdmK2U,2947
|
21
|
+
dataeval/_internal/detectors/outliers.py,sha256=JmAXoMO0Od7tc9RVFGJsDyOnByciPFG5FdS54Iu0BII,10396
|
22
|
+
dataeval/_internal/interop.py,sha256=FLXJY-5hwJcKCtruyvaarqynXCMfcLbQSFvGnrWQDPo,1338
|
23
|
+
dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
24
|
+
dataeval/_internal/metrics/balance.py,sha256=8KwuR5HvytJtS1YW9KkNrCu2dPn_gP4FSbXrxQ-9kK8,6182
|
25
|
+
dataeval/_internal/metrics/ber.py,sha256=RLRC3ouKYCLYJsA0sqS1gLjE_WFLq7dHElRfVVabvWA,4719
|
26
|
+
dataeval/_internal/metrics/coverage.py,sha256=jxvzWpVQrfmN3S9rpXvyb35vpRn1ovV2IdXdB6aU2-w,3560
|
27
|
+
dataeval/_internal/metrics/divergence.py,sha256=gKQt9rxmhW8RnODCoLgFSPnCUWEMjmNIPlCV2w6E6oU,4211
|
28
|
+
dataeval/_internal/metrics/diversity.py,sha256=ZSlq1KBvkMRVAvlznils2QmlPC73TTpHs1ux7PoFrio,7664
|
29
|
+
dataeval/_internal/metrics/parity.py,sha256=eTjltNBJOTFH6T_t9V9-1EFr_U0vqlU642o3x2RWgz0,16527
|
30
|
+
dataeval/_internal/metrics/stats/base.py,sha256=HyjgHTQZqgkkCWDzOF-aNZBr88IAjnao8VSbHC5ZtbI,8554
|
31
|
+
dataeval/_internal/metrics/stats/boxratiostats.py,sha256=Ac6nB41q43xHCJRDEXHNgsJF80VE8MpH8_kySxA84BE,6342
|
32
|
+
dataeval/_internal/metrics/stats/datasetstats.py,sha256=6DFl3TE7t2ggDD8WBVgPH7F2bRvae7NR2PVoEWL92dw,3759
|
33
|
+
dataeval/_internal/metrics/stats/dimensionstats.py,sha256=MUQJgrWmRoQFap7gPf8vTFXJ_z7G7bAQpZ7kCPRtNkA,3847
|
34
|
+
dataeval/_internal/metrics/stats/hashstats.py,sha256=xH0k_wOeGO5UC7-0fhAIg4WV2fO8fnF0Jdn18gYhW88,2087
|
35
|
+
dataeval/_internal/metrics/stats/labelstats.py,sha256=BNxI2flvKhSps2o4-TPbN9nf52ctatI2SuDZ07hah5E,4058
|
36
|
+
dataeval/_internal/metrics/stats/pixelstats.py,sha256=LxoDQ6afsNuzB0WnOgmzkEUV7s534MrAYkzS6Be7PPQ,4419
|
37
|
+
dataeval/_internal/metrics/stats/visualstats.py,sha256=3uET0N3WgV5dcxst8Xb9DhcATiNfAXsx1OKbPz2mU4Q,4712
|
38
|
+
dataeval/_internal/metrics/uap.py,sha256=RumSQey6vNoz9CtOG2_Inb-TurKJrAHqwhkyWBirxhk,2128
|
39
|
+
dataeval/_internal/metrics/utils.py,sha256=vW3mQHjF0AvYlml27X5dZgd0YBk3zyBvvztLEfdRkvI,13475
|
40
|
+
dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
41
|
+
dataeval/_internal/models/pytorch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
42
|
+
dataeval/_internal/models/pytorch/autoencoder.py,sha256=nPyLjLZrPNla-lMnym3fUW-O1F00JbIrVUrUFU4C4UQ,8364
|
43
|
+
dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
|
44
|
+
dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
|
45
|
+
dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
|
+
dataeval/_internal/models/tensorflow/autoencoder.py,sha256=Ryn11jDbpZJOM5De-kMGPdbJBQEdwip6B20ajS8HqpE,10354
|
47
|
+
dataeval/_internal/models/tensorflow/gmm.py,sha256=QoEgbeax1GETqRmUF7A2ih9uFOZfFAjGzgH2ljExlAc,3669
|
48
|
+
dataeval/_internal/models/tensorflow/losses.py,sha256=pZH5RnlM9R0RrBde9Lgq32muwAp7_PWc56Mu4u8RVvo,3976
|
49
|
+
dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=keI1gTNjBk18YD91Cp4exfuGYWU9lt-wapvhSazhcVs,48319
|
50
|
+
dataeval/_internal/models/tensorflow/trainer.py,sha256=xNY0Iw7Qa1TnCuy9N1b77_VduFoW_BhbZjfQCxOVby4,4082
|
51
|
+
dataeval/_internal/models/tensorflow/utils.py,sha256=l6jXKMWyQAEI4LpAONq95Xwr7CPgrs408ypf9TuNxkY,8732
|
52
|
+
dataeval/_internal/output.py,sha256=qVbOi41dvfQICQ4uxysHPWBRKo1XR61kXHPL_vKOPm0,2545
|
53
|
+
dataeval/_internal/utils.py,sha256=jo6bGJZAgyuZqRpAAC4gwhAHYE12316na19ZuFwMqes,1504
|
54
|
+
dataeval/_internal/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
55
|
+
dataeval/_internal/workflows/sufficiency.py,sha256=5N07nV5Oi4kUCm_3rJoj0SeIy1iRC3LciEgrR7E9m7Q,18240
|
56
|
+
dataeval/detectors/__init__.py,sha256=8nJk2U99_SQD7RtEKjyS4WJct8gX1HgjEm4oHTnRhTI,320
|
57
|
+
dataeval/detectors/drift/__init__.py,sha256=9TbJok7fH3mcXcM7c2vT3WZnZr2wanY_8TUwu-8JX58,800
|
58
|
+
dataeval/detectors/drift/kernels/__init__.py,sha256=djIbmvYoHWpWxfdYtiouEC2KqzvgmtEqlg1i5p-UCgM,266
|
59
|
+
dataeval/detectors/drift/updates/__init__.py,sha256=tiYSA1-AsTiFgC3LuxM8iYFsWUX0Fr8hElzWvU8ovig,267
|
60
|
+
dataeval/detectors/linters/__init__.py,sha256=m5F5JgGBcqGb3J_qXQ3PBkKyePjOklrYbM9dGUsgxFA,489
|
61
|
+
dataeval/detectors/ood/__init__.py,sha256=K5QrSJg2QePs6Pa3Cg80ZwXu7BELLrSlbEpTdxuL3Ys,777
|
62
|
+
dataeval/metrics/__init__.py,sha256=U0sRw5eiqeeDLbLPxT_rznZsvtNwONVxKVwfC0qVOgo,223
|
63
|
+
dataeval/metrics/bias/__init__.py,sha256=Wn1Ui_g-9cR4c4IS7RFKJ6UH5DLXKjEBoXTuEYPXSBc,619
|
64
|
+
dataeval/metrics/estimators/__init__.py,sha256=4VFMKLPsJdaWiflf84bXGQ2k8ertFQ4WEPhyWqjFFvE,377
|
65
|
+
dataeval/metrics/stats/__init__.py,sha256=UcD41gFwFhcQMtqwWkPQlg6cFA2_gdj6yGRCDrKYXM8,1055
|
66
|
+
dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
67
|
+
dataeval/utils/__init__.py,sha256=cW_5-DIZG2OFRs3FVLOz0uCv4JWdaoVO7C9rOlR7ZEA,526
|
68
|
+
dataeval/utils/tensorflow/__init__.py,sha256=XgjqrMtI00ERPPpdokbO1lDyc_H3CZ1TTqUXIj0B6PI,435
|
69
|
+
dataeval/utils/tensorflow/loss/__init__.py,sha256=E9eB87LNh0o5nUCqssB027EXBsOfEayNHPcNW0QGFdA,101
|
70
|
+
dataeval/utils/tensorflow/models/__init__.py,sha256=OVpmHF8itDcgOlfw6N9jr7IphZPbMJoiu7OdqYhU9fs,291
|
71
|
+
dataeval/utils/tensorflow/recon/__init__.py,sha256=xe6gAQqK9tyAoDQTtaJAxIPK1humt5QzsG_9NPsqx58,116
|
72
|
+
dataeval/utils/torch/__init__.py,sha256=bYUm-nNlNVU3bqDz7dQHFmaRWgLy3lLrD4cSDumDlxQ,373
|
73
|
+
dataeval/utils/torch/datasets/__init__.py,sha256=S6C4OaxEjJJaIpHSZcZfkl4U5iS5YtZ9N5GYHqvbzvM,191
|
74
|
+
dataeval/utils/torch/models/__init__.py,sha256=YnDnePYpRIKHyYn3F5qR1OObMSb-g0FGvI8X-uTB09E,162
|
75
|
+
dataeval/utils/torch/trainer/__init__.py,sha256=Te-qElt8h-Zv8NN0r-VJOEdCPHTQ2yO3rd2MhRiZGZs,93
|
76
|
+
dataeval/workflows/__init__.py,sha256=VFeJyMhZxvj8WnU5Un32mwO8lNfBQOBjD9IdOqexnAE,320
|
77
|
+
dataeval-0.70.1.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
|
78
|
+
dataeval-0.70.1.dist-info/METADATA,sha256=B2slR1eY_xRR4QcUTpV8EJh5Z_plWmHFqTT5j4r2Vvk,4502
|
79
|
+
dataeval-0.70.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
80
|
+
dataeval-0.70.1.dist-info/RECORD,,
|
dataeval/_internal/flags.py
DELETED
@@ -1,77 +0,0 @@
|
|
1
|
-
from __future__ import annotations
|
2
|
-
|
3
|
-
from enum import IntFlag, auto
|
4
|
-
from functools import reduce
|
5
|
-
from typing import Iterable, TypeVar, cast
|
6
|
-
|
7
|
-
TFlag = TypeVar("TFlag", bound=IntFlag)
|
8
|
-
|
9
|
-
|
10
|
-
class ImageStat(IntFlag):
|
11
|
-
"""
|
12
|
-
Flags for calculating image and channel statistics
|
13
|
-
"""
|
14
|
-
|
15
|
-
# HASHES
|
16
|
-
XXHASH = auto()
|
17
|
-
PCHASH = auto()
|
18
|
-
|
19
|
-
# PROPERTIES
|
20
|
-
WIDTH = auto()
|
21
|
-
HEIGHT = auto()
|
22
|
-
SIZE = auto()
|
23
|
-
ASPECT_RATIO = auto()
|
24
|
-
CHANNELS = auto()
|
25
|
-
DEPTH = auto()
|
26
|
-
|
27
|
-
# VISUALS
|
28
|
-
BRIGHTNESS = auto()
|
29
|
-
BLURRINESS = auto()
|
30
|
-
CONTRAST = auto()
|
31
|
-
DARKNESS = auto()
|
32
|
-
MISSING = auto()
|
33
|
-
ZEROS = auto()
|
34
|
-
|
35
|
-
# PIXEL STATS
|
36
|
-
MEAN = auto()
|
37
|
-
STD = auto()
|
38
|
-
VAR = auto()
|
39
|
-
SKEW = auto()
|
40
|
-
KURTOSIS = auto()
|
41
|
-
ENTROPY = auto()
|
42
|
-
PERCENTILES = auto()
|
43
|
-
HISTOGRAM = auto()
|
44
|
-
|
45
|
-
# JOINT FLAGS
|
46
|
-
ALL_HASHES = XXHASH | PCHASH
|
47
|
-
ALL_PROPERTIES = WIDTH | HEIGHT | SIZE | ASPECT_RATIO | CHANNELS | DEPTH
|
48
|
-
ALL_VISUALS = BRIGHTNESS | BLURRINESS | CONTRAST | DARKNESS | MISSING | ZEROS
|
49
|
-
ALL_PIXELSTATS = MEAN | STD | VAR | SKEW | KURTOSIS | ENTROPY | PERCENTILES | HISTOGRAM
|
50
|
-
ALL_CHANNEL_STATS = BRIGHTNESS | CONTRAST | DARKNESS | ZEROS | ALL_PIXELSTATS
|
51
|
-
ALL_STATS = ALL_PROPERTIES | ALL_VISUALS | ALL_PIXELSTATS
|
52
|
-
ALL = ALL_HASHES | ALL_STATS
|
53
|
-
|
54
|
-
|
55
|
-
def is_distinct(flag: IntFlag) -> bool:
|
56
|
-
return (flag & (flag - 1) == 0) and flag != 0
|
57
|
-
|
58
|
-
|
59
|
-
def to_distinct(flag: TFlag) -> dict[TFlag, str]:
|
60
|
-
"""
|
61
|
-
Returns a distinct set of all flags set on the input flag and their names
|
62
|
-
|
63
|
-
NOTE: this is supported natively in Python 3.11, but for earlier versions we need
|
64
|
-
to use a combination of list comprehension and bit fiddling to determine distinct
|
65
|
-
flag values from joint aliases.
|
66
|
-
"""
|
67
|
-
if isinstance(flag, Iterable): # >= py311
|
68
|
-
return {f: f.name.lower() for f in flag if f.name}
|
69
|
-
else: # < py311
|
70
|
-
return {f: f.name.lower() for f in list(flag.__class__) if f & flag and is_distinct(f) and f.name}
|
71
|
-
|
72
|
-
|
73
|
-
def verify_supported(flag: TFlag, flags: TFlag | Iterable[TFlag]):
|
74
|
-
supported = flags if isinstance(flags, flag.__class__) else cast(TFlag, reduce(lambda a, b: a | b, flags)) # type: ignore
|
75
|
-
unsupported = flag & ~supported
|
76
|
-
if unsupported:
|
77
|
-
raise ValueError(f"Unsupported flags {unsupported} called. Only {supported} flags are supported.")
|