dataeval 0.61.0__py3-none-any.whl → 0.63.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataeval/__init__.py +1 -1
- dataeval/_internal/detectors/clusterer.py +44 -16
- dataeval/_internal/detectors/drift/base.py +14 -12
- dataeval/_internal/detectors/drift/cvm.py +11 -8
- dataeval/_internal/detectors/drift/ks.py +6 -3
- dataeval/_internal/detectors/drift/mmd.py +14 -12
- dataeval/_internal/detectors/drift/uncertainty.py +7 -5
- dataeval/_internal/detectors/duplicates.py +35 -12
- dataeval/_internal/detectors/linter.py +85 -16
- dataeval/_internal/detectors/ood/ae.py +6 -5
- dataeval/_internal/detectors/ood/aegmm.py +5 -5
- dataeval/_internal/detectors/ood/base.py +14 -13
- dataeval/_internal/detectors/ood/llr.py +6 -4
- dataeval/_internal/detectors/ood/vae.py +5 -4
- dataeval/_internal/detectors/ood/vaegmm.py +5 -4
- dataeval/_internal/functional/__init__.py +0 -0
- dataeval/_internal/functional/ber.py +63 -0
- dataeval/_internal/functional/coverage.py +75 -0
- dataeval/_internal/functional/divergence.py +16 -0
- dataeval/_internal/{metrics → functional}/hash.py +1 -1
- dataeval/_internal/functional/metadata.py +136 -0
- dataeval/_internal/functional/metadataparity.py +190 -0
- dataeval/_internal/functional/uap.py +6 -0
- dataeval/_internal/interop.py +52 -0
- dataeval/_internal/maite/__init__.py +0 -0
- dataeval/_internal/maite/utils.py +30 -0
- dataeval/_internal/metrics/base.py +2 -2
- dataeval/_internal/metrics/ber.py +16 -66
- dataeval/_internal/metrics/coverage.py +51 -35
- dataeval/_internal/metrics/divergence.py +50 -42
- dataeval/_internal/metrics/metadata.py +610 -0
- dataeval/_internal/metrics/metadataparity.py +67 -0
- dataeval/_internal/metrics/parity.py +40 -56
- dataeval/_internal/metrics/stats.py +46 -35
- dataeval/_internal/metrics/uap.py +14 -17
- dataeval/_internal/workflows/__init__.py +0 -0
- dataeval/metrics/__init__.py +2 -1
- {dataeval-0.61.0.dist-info → dataeval-0.63.0.dist-info}/METADATA +1 -2
- dataeval-0.63.0.dist-info/RECORD +68 -0
- dataeval-0.61.0.dist-info/RECORD +0 -55
- /dataeval/_internal/{metrics → functional}/utils.py +0 -0
- {dataeval-0.61.0.dist-info → dataeval-0.63.0.dist-info}/LICENSE.txt +0 -0
- {dataeval-0.61.0.dist-info → dataeval-0.63.0.dist-info}/WHEEL +0 -0
@@ -10,60 +10,8 @@ class Parity:
|
|
10
10
|
Class for evaluating statistics of observed and expected class labels, including:
|
11
11
|
|
12
12
|
- Chi Squared test for statistical independence between expected and observed labels
|
13
|
-
|
14
|
-
Parameters
|
15
|
-
----------
|
16
|
-
expected_labels : np.ndarray
|
17
|
-
List of class labels in the expected dataset
|
18
|
-
observed_labels : np.ndarray
|
19
|
-
List of class labels in the observed dataset
|
20
|
-
num_classes : Optional[int]
|
21
|
-
The number of unique classes in the datasets. If this is not specified, it will
|
22
|
-
be inferred from the set of unique labels in expected_labels and observed_labels
|
23
13
|
"""
|
24
14
|
|
25
|
-
def __init__(self, expected_labels: np.ndarray, observed_labels: np.ndarray, num_classes: Optional[int] = None):
|
26
|
-
self.set_labels(expected_labels, observed_labels, num_classes)
|
27
|
-
|
28
|
-
def set_labels(self, expected_labels: np.ndarray, observed_labels: np.ndarray, num_classes: Optional[int] = None):
|
29
|
-
"""
|
30
|
-
Calculates the label distributions for expected and observed labels
|
31
|
-
and performs validation on the results.
|
32
|
-
|
33
|
-
Parameters
|
34
|
-
----------
|
35
|
-
expected_labels : np.ndarray
|
36
|
-
List of class labels in the expected dataset
|
37
|
-
observed_labels : np.ndarray
|
38
|
-
List of class labels in the observed dataset
|
39
|
-
num_classes : Optional[int]
|
40
|
-
The number of unique classes in the datasets. If this is not specified, it will
|
41
|
-
be inferred from the set of unique labels in expected_labels and observed_labels
|
42
|
-
|
43
|
-
Raises
|
44
|
-
------
|
45
|
-
ValueError
|
46
|
-
If x is empty
|
47
|
-
"""
|
48
|
-
self.num_classes = num_classes
|
49
|
-
|
50
|
-
# Calculate
|
51
|
-
observed_dist = self._calculate_label_dist(observed_labels)
|
52
|
-
expected_dist = self._calculate_label_dist(expected_labels)
|
53
|
-
|
54
|
-
# Validate
|
55
|
-
self._validate_dist(observed_dist, "observed")
|
56
|
-
|
57
|
-
# Normalize
|
58
|
-
expected_dist = self._normalize_expected_dist(expected_dist, observed_dist)
|
59
|
-
|
60
|
-
# Validate normalized expected distribution
|
61
|
-
self._validate_dist(expected_dist, f"expected for {np.sum(observed_dist)} observations")
|
62
|
-
self._validate_class_balance(expected_dist, observed_dist)
|
63
|
-
|
64
|
-
self._observed_dist = observed_dist
|
65
|
-
self._expected_dist = expected_dist
|
66
|
-
|
67
15
|
def _normalize_expected_dist(self, expected_dist: np.ndarray, observed_dist: np.ndarray) -> np.ndarray:
|
68
16
|
exp_sum = np.sum(expected_dist)
|
69
17
|
obs_sum = np.sum(observed_dist)
|
@@ -81,7 +29,7 @@ class Parity:
|
|
81
29
|
|
82
30
|
return expected_dist
|
83
31
|
|
84
|
-
def _calculate_label_dist(self, labels: np.ndarray) -> np.ndarray:
|
32
|
+
def _calculate_label_dist(self, labels: np.ndarray, num_classes: int) -> np.ndarray:
|
85
33
|
"""
|
86
34
|
Calculate the class frequencies associated with a dataset
|
87
35
|
|
@@ -89,13 +37,15 @@ class Parity:
|
|
89
37
|
----------
|
90
38
|
labels : np.ndarray
|
91
39
|
List of class labels in a dataset
|
40
|
+
num_classes: int
|
41
|
+
The number of unique classes in the datasets
|
92
42
|
|
93
43
|
Returns
|
94
44
|
-------
|
95
45
|
label_dist : np.ndarray
|
96
46
|
Array representing label distributions
|
97
47
|
"""
|
98
|
-
label_dist = np.bincount(labels, minlength=
|
48
|
+
label_dist = np.bincount(labels, minlength=num_classes)
|
99
49
|
return label_dist
|
100
50
|
|
101
51
|
def _validate_class_balance(self, expected_dist: np.ndarray, observed_dist: np.ndarray):
|
@@ -157,7 +107,9 @@ class Parity:
|
|
157
107
|
" to invalid chi-squared evaluation."
|
158
108
|
)
|
159
109
|
|
160
|
-
def evaluate(
|
110
|
+
def evaluate(
|
111
|
+
self, expected_labels: np.ndarray, observed_labels: np.ndarray, num_classes: Optional[int] = None
|
112
|
+
) -> Tuple[np.float64, np.float64]:
|
161
113
|
"""
|
162
114
|
Perform a one-way chi-squared test between observation frequencies and expected frequencies that
|
163
115
|
tests the null hypothesis that the observed data has the expected frequencies.
|
@@ -166,14 +118,46 @@ class Parity:
|
|
166
118
|
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html
|
167
119
|
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html
|
168
120
|
|
121
|
+
Parameters
|
122
|
+
----------
|
123
|
+
expected_labels : np.ndarray
|
124
|
+
List of class labels in the expected dataset
|
125
|
+
observed_labels : np.ndarray
|
126
|
+
List of class labels in the observed dataset
|
127
|
+
num_classes : Optional[int]
|
128
|
+
The number of unique classes in the datasets. If this is not specified, it will
|
129
|
+
be inferred from the set of unique labels in expected_labels and observed_labels
|
130
|
+
|
169
131
|
Returns
|
170
132
|
-------
|
171
133
|
np.float64
|
172
134
|
chi-squared value of the test
|
173
135
|
np.float64
|
174
136
|
p-value of the test
|
137
|
+
|
138
|
+
Raises
|
139
|
+
------
|
140
|
+
ValueError
|
141
|
+
If x is empty
|
175
142
|
"""
|
176
|
-
|
143
|
+
# Calculate
|
144
|
+
if not num_classes:
|
145
|
+
num_classes = 0
|
146
|
+
|
147
|
+
observed_dist = self._calculate_label_dist(observed_labels, num_classes)
|
148
|
+
expected_dist = self._calculate_label_dist(expected_labels, num_classes)
|
149
|
+
|
150
|
+
# Validate
|
151
|
+
self._validate_dist(observed_dist, "observed")
|
152
|
+
|
153
|
+
# Normalize
|
154
|
+
expected_dist = self._normalize_expected_dist(expected_dist, observed_dist)
|
155
|
+
|
156
|
+
# Validate normalized expected distribution
|
157
|
+
self._validate_dist(expected_dist, f"expected for {np.sum(observed_dist)} observations")
|
158
|
+
self._validate_class_balance(expected_dist, observed_dist)
|
159
|
+
|
160
|
+
cs_result = scipy.stats.chisquare(f_obs=observed_dist, f_exp=expected_dist)
|
177
161
|
|
178
162
|
chisquared = cs_result.statistic
|
179
163
|
p_value = cs_result.pvalue
|
@@ -5,22 +5,23 @@ import numpy as np
|
|
5
5
|
from scipy.stats import entropy, kurtosis, skew
|
6
6
|
|
7
7
|
from dataeval._internal.flags import ImageHash, ImageProperty, ImageStatistics, ImageStatsFlags, ImageVisuals
|
8
|
-
from dataeval._internal.
|
9
|
-
from dataeval._internal.
|
10
|
-
from dataeval._internal.
|
8
|
+
from dataeval._internal.functional.hash import pchash, xxhash
|
9
|
+
from dataeval._internal.functional.utils import edge_filter, get_bitdepth, normalize_image_shape, rescale
|
10
|
+
from dataeval._internal.interop import ArrayLike, to_numpy_iter
|
11
|
+
from dataeval._internal.metrics.base import EvaluateMixin, MetricMixin
|
11
12
|
|
12
13
|
QUARTILES = (0, 25, 50, 75, 100)
|
13
14
|
|
14
|
-
TBatch = TypeVar("TBatch", bound=Sequence)
|
15
|
+
TBatch = TypeVar("TBatch", bound=Sequence[ArrayLike])
|
15
16
|
TFlag = TypeVar("TFlag", bound=Flag)
|
16
17
|
|
17
18
|
|
18
|
-
class BaseStatsMetric(MetricMixin, Generic[TBatch, TFlag]):
|
19
|
+
class BaseStatsMetric(EvaluateMixin, MetricMixin, Generic[TBatch, TFlag]):
|
19
20
|
def __init__(self, flags: TFlag):
|
20
21
|
self.flags = flags
|
21
22
|
self.results = []
|
22
23
|
|
23
|
-
def update(self,
|
24
|
+
def update(self, images: TBatch) -> None:
|
24
25
|
"""
|
25
26
|
Updates internal metric cache for later calculation
|
26
27
|
|
@@ -66,6 +67,16 @@ class BaseStatsMetric(MetricMixin, Generic[TBatch, TFlag]):
|
|
66
67
|
)
|
67
68
|
return [flag.name.lower() for flag in flags if flag.name is not None]
|
68
69
|
|
70
|
+
def evaluate(self, images: TBatch) -> Dict[str, Any]:
|
71
|
+
"""Calculate metric results given a single batch of images"""
|
72
|
+
if self.results:
|
73
|
+
raise RuntimeError("Call reset before calling evaluate")
|
74
|
+
|
75
|
+
self.update(images)
|
76
|
+
results = self.compute()
|
77
|
+
self.reset()
|
78
|
+
return results
|
79
|
+
|
69
80
|
|
70
81
|
class ImageHashMetric(BaseStatsMetric):
|
71
82
|
"""
|
@@ -80,12 +91,12 @@ class ImageHashMetric(BaseStatsMetric):
|
|
80
91
|
def __init__(self, flags: ImageHash = ImageHash.ALL):
|
81
92
|
super().__init__(flags)
|
82
93
|
|
83
|
-
def update(self,
|
84
|
-
for
|
94
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
95
|
+
for image in to_numpy_iter(images):
|
85
96
|
results = self._map(
|
86
97
|
{
|
87
|
-
ImageHash.XXHASH: lambda: xxhash(
|
88
|
-
ImageHash.PCHASH: lambda: pchash(
|
98
|
+
ImageHash.XXHASH: lambda: xxhash(image),
|
99
|
+
ImageHash.PCHASH: lambda: pchash(image),
|
89
100
|
}
|
90
101
|
)
|
91
102
|
self.results.append(results)
|
@@ -104,16 +115,16 @@ class ImagePropertyMetric(BaseStatsMetric):
|
|
104
115
|
def __init__(self, flags: ImageProperty = ImageProperty.ALL):
|
105
116
|
super().__init__(flags)
|
106
117
|
|
107
|
-
def update(self,
|
108
|
-
for
|
118
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
119
|
+
for image in to_numpy_iter(images):
|
109
120
|
results = self._map(
|
110
121
|
{
|
111
|
-
ImageProperty.WIDTH: lambda: np.int32(
|
112
|
-
ImageProperty.HEIGHT: lambda: np.int32(
|
113
|
-
ImageProperty.SIZE: lambda: np.int32(
|
114
|
-
ImageProperty.ASPECT_RATIO: lambda:
|
115
|
-
ImageProperty.CHANNELS: lambda:
|
116
|
-
ImageProperty.DEPTH: lambda: get_bitdepth(
|
122
|
+
ImageProperty.WIDTH: lambda: np.int32(image.shape[-1]),
|
123
|
+
ImageProperty.HEIGHT: lambda: np.int32(image.shape[-2]),
|
124
|
+
ImageProperty.SIZE: lambda: np.int32(image.shape[-1] * image.shape[-2]),
|
125
|
+
ImageProperty.ASPECT_RATIO: lambda: image.shape[-1] / np.int32(image.shape[-2]),
|
126
|
+
ImageProperty.CHANNELS: lambda: image.shape[-3],
|
127
|
+
ImageProperty.DEPTH: lambda: get_bitdepth(image).depth,
|
117
128
|
}
|
118
129
|
)
|
119
130
|
self.results.append(results)
|
@@ -132,14 +143,14 @@ class ImageVisualsMetric(BaseStatsMetric):
|
|
132
143
|
def __init__(self, flags: ImageVisuals = ImageVisuals.ALL):
|
133
144
|
super().__init__(flags)
|
134
145
|
|
135
|
-
def update(self,
|
136
|
-
for
|
146
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
147
|
+
for image in to_numpy_iter(images):
|
137
148
|
results = self._map(
|
138
149
|
{
|
139
|
-
ImageVisuals.BRIGHTNESS: lambda: np.mean(rescale(
|
140
|
-
ImageVisuals.BLURRINESS: lambda: np.std(edge_filter(np.mean(
|
141
|
-
ImageVisuals.MISSING: lambda: np.sum(np.isnan(
|
142
|
-
ImageVisuals.ZERO: lambda: np.int32(np.count_nonzero(
|
150
|
+
ImageVisuals.BRIGHTNESS: lambda: np.mean(rescale(image)),
|
151
|
+
ImageVisuals.BLURRINESS: lambda: np.std(edge_filter(np.mean(image, axis=0))),
|
152
|
+
ImageVisuals.MISSING: lambda: np.sum(np.isnan(image)),
|
153
|
+
ImageVisuals.ZERO: lambda: np.int32(np.count_nonzero(image == 0)),
|
143
154
|
}
|
144
155
|
)
|
145
156
|
self.results.append(results)
|
@@ -158,9 +169,9 @@ class ImageStatisticsMetric(BaseStatsMetric):
|
|
158
169
|
def __init__(self, flags: ImageStatistics = ImageStatistics.ALL):
|
159
170
|
super().__init__(flags)
|
160
171
|
|
161
|
-
def update(self,
|
162
|
-
for
|
163
|
-
scaled = rescale(
|
172
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
173
|
+
for image in to_numpy_iter(images):
|
174
|
+
scaled = rescale(image)
|
164
175
|
if (ImageStatistics.HISTOGRAM | ImageStatistics.ENTROPY) & self.flags:
|
165
176
|
hist = np.histogram(scaled, bins=256, range=(0, 1))[0]
|
166
177
|
|
@@ -192,10 +203,10 @@ class ChannelStatisticsMetric(BaseStatsMetric):
|
|
192
203
|
def __init__(self, flags: ImageStatistics = ImageStatistics.ALL):
|
193
204
|
super().__init__(flags)
|
194
205
|
|
195
|
-
def update(self,
|
196
|
-
for
|
197
|
-
scaled = rescale(
|
198
|
-
flattened = scaled.reshape(
|
206
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
207
|
+
for image in to_numpy_iter(images):
|
208
|
+
scaled = rescale(image)
|
209
|
+
flattened = scaled.reshape(image.shape[0], -1)
|
199
210
|
|
200
211
|
if (ImageStatistics.HISTOGRAM | ImageStatistics.ENTROPY) & self.flags:
|
201
212
|
hist = np.apply_along_axis(lambda x: np.histogram(x, bins=256, range=(0, 1))[0], 1, flattened)
|
@@ -253,8 +264,8 @@ class ImageStats(BaseAggregateMetric):
|
|
253
264
|
super().__init__(flags)
|
254
265
|
self._length = 0
|
255
266
|
|
256
|
-
def update(self,
|
257
|
-
for image in
|
267
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
268
|
+
for image in to_numpy_iter(images):
|
258
269
|
self._length += 1
|
259
270
|
img = normalize_image_shape(image)
|
260
271
|
for metric in self._metrics_dict:
|
@@ -295,8 +306,8 @@ class ChannelStats(BaseAggregateMetric):
|
|
295
306
|
def __init__(self, flags: Optional[ImageStatistics] = None) -> None:
|
296
307
|
super().__init__(flags)
|
297
308
|
|
298
|
-
def update(self,
|
299
|
-
for image in
|
309
|
+
def update(self, images: Iterable[ArrayLike]) -> None:
|
310
|
+
for image in to_numpy_iter(images):
|
300
311
|
img = normalize_image_shape(image)
|
301
312
|
for metric in self._metrics_dict:
|
302
313
|
metric.update([img])
|
@@ -6,9 +6,8 @@ average precision using empirical mean precision
|
|
6
6
|
|
7
7
|
from typing import Dict
|
8
8
|
|
9
|
-
|
10
|
-
from
|
11
|
-
|
9
|
+
from dataeval._internal.functional.uap import uap
|
10
|
+
from dataeval._internal.interop import ArrayLike, to_numpy
|
12
11
|
from dataeval._internal.metrics.base import EvaluateMixin
|
13
12
|
|
14
13
|
|
@@ -16,21 +15,19 @@ class UAP(EvaluateMixin):
|
|
16
15
|
"""
|
17
16
|
FR Test Statistic based estimate of the empirical mean precision
|
18
17
|
|
19
|
-
Parameters
|
20
|
-
----------
|
21
|
-
labels : np.ndarray
|
22
|
-
A numpy array of n_samples of class labels with M unique classes.
|
23
|
-
|
24
|
-
scores : np.ndarray
|
25
|
-
A 2D array of class probabilities per image
|
26
18
|
"""
|
27
19
|
|
28
|
-
def
|
29
|
-
self.labels = labels
|
30
|
-
self.scores = scores
|
31
|
-
|
32
|
-
def evaluate(self) -> Dict[str, float]:
|
20
|
+
def evaluate(self, labels: ArrayLike, scores: ArrayLike) -> Dict[str, float]:
|
33
21
|
"""
|
22
|
+
Estimates the upperbound average precision
|
23
|
+
|
24
|
+
Parameters
|
25
|
+
----------
|
26
|
+
labels : ArrayLike
|
27
|
+
A numpy array of n_samples of class labels with M unique classes.
|
28
|
+
scores : ArrayLike
|
29
|
+
A 2D array of class probabilities per image
|
30
|
+
|
34
31
|
Returns
|
35
32
|
-------
|
36
33
|
Dict[str, float]
|
@@ -41,5 +38,5 @@ class UAP(EvaluateMixin):
|
|
41
38
|
ValueError
|
42
39
|
If unique classes M < 2
|
43
40
|
"""
|
44
|
-
|
45
|
-
return {"uap": uap}
|
41
|
+
|
42
|
+
return {"uap": uap(to_numpy(labels), to_numpy(scores))}
|
File without changes
|
dataeval/metrics/__init__.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
from dataeval._internal.metrics.ber import BER
|
2
|
+
from dataeval._internal.metrics.coverage import Coverage
|
2
3
|
from dataeval._internal.metrics.divergence import Divergence
|
3
4
|
from dataeval._internal.metrics.parity import Parity
|
4
5
|
from dataeval._internal.metrics.stats import ChannelStats, ImageStats
|
5
6
|
from dataeval._internal.metrics.uap import UAP
|
6
7
|
|
7
|
-
__all__ = ["BER", "
|
8
|
+
__all__ = ["BER", "Coverage", "Divergence", "Parity", "ChannelStats", "ImageStats", "UAP"]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: dataeval
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.63.0
|
4
4
|
Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
|
5
5
|
Home-page: https://dataeval.ai/
|
6
6
|
License: MIT
|
@@ -23,7 +23,6 @@ Provides-Extra: all
|
|
23
23
|
Provides-Extra: tensorflow
|
24
24
|
Provides-Extra: torch
|
25
25
|
Requires-Dist: hdbscan (>=0.8.36)
|
26
|
-
Requires-Dist: maite
|
27
26
|
Requires-Dist: matplotlib ; extra == "torch" or extra == "all"
|
28
27
|
Requires-Dist: numpy (>1.24.3)
|
29
28
|
Requires-Dist: nvidia-cudnn-cu11 (>=8.6.0.163) ; extra == "tensorflow" or extra == "torch" or extra == "all"
|
@@ -0,0 +1,68 @@
|
|
1
|
+
dataeval/__init__.py,sha256=kUzF3A_ow1IG-GGqko1M2MRd6wNqNTDSD32Nj-b7tXI,408
|
2
|
+
dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
dataeval/_internal/detectors/clusterer.py,sha256=AAIhZQ1Tm5yOh2vLDJ9wlYmz0EuqXiy1tnrkJj119u0,20304
|
4
|
+
dataeval/_internal/detectors/drift/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
dataeval/_internal/detectors/drift/base.py,sha256=MTAuNBx8MQLkGJPt_F2M9hCWfJgMpy1N_fVnUniPul0,9103
|
6
|
+
dataeval/_internal/detectors/drift/cvm.py,sha256=GU2RzqCeu5d-rR-tgpzEo_lqo7Gu7Fzkfxajc5W42ok,4001
|
7
|
+
dataeval/_internal/detectors/drift/ks.py,sha256=MIYFjTSNs3H0pUk6HthYoGDBR6hB7YmS-ie8HOG2K2o,4004
|
8
|
+
dataeval/_internal/detectors/drift/mmd.py,sha256=Q954j5znZNxP4JsEfceY1VAPrMyEsG0jFt5JRvMGbLg,7054
|
9
|
+
dataeval/_internal/detectors/drift/torch.py,sha256=NsQYfDVRcCGmU8k6oBG_aVzmML1zre-xUKBVK1W680o,10872
|
10
|
+
dataeval/_internal/detectors/drift/uncertainty.py,sha256=MnhEpTIkQ1zA3SiaKuNFsMi1Jge2a8uZRm4p2LX46iA,5338
|
11
|
+
dataeval/_internal/detectors/duplicates.py,sha256=IqsX22N7wD8o0bYPc7N1S451w6lzjWLpJZs_RkxOgEU,2168
|
12
|
+
dataeval/_internal/detectors/linter.py,sha256=CJlUviSUl9iVZix1tnZa6ldeYYB-vylA5tgobxq2apk,5292
|
13
|
+
dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
+
dataeval/_internal/detectors/ood/ae.py,sha256=ASsN2XztlFdpupVgBazh_gInnToSAT8QjvJtA6EiVuY,2657
|
15
|
+
dataeval/_internal/detectors/ood/aegmm.py,sha256=6ExAvYD0pGADmH9igk3r6t8cLPF4MgmrpGPrBybCSBA,2392
|
16
|
+
dataeval/_internal/detectors/ood/base.py,sha256=IQQ0ZprNKpz09J7LKKSgoVvcKYIqaHw2p_tMb0OMwpI,6949
|
17
|
+
dataeval/_internal/detectors/ood/llr.py,sha256=wW8d0pxBXvERCG7m8iBxzSC7hzN19O08IlI3FMvoLBw,10132
|
18
|
+
dataeval/_internal/detectors/ood/vae.py,sha256=mWSbNY3-utuzeVSUkaqD_alLsgcfSMqdy2Aj1G7NUM4,2963
|
19
|
+
dataeval/_internal/detectors/ood/vaegmm.py,sha256=FTHhuAF62hl3PQw2fQUZlPLgsi2ul0xxoUrRwr_Jd-o,2837
|
20
|
+
dataeval/_internal/flags.py,sha256=dRApeFkdSXFbYHSmvzgUP78zH8jUGtfzKFfLQtX0Q18,883
|
21
|
+
dataeval/_internal/functional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
+
dataeval/_internal/functional/ber.py,sha256=hJnFswWXnMjoYY62A7x8F3nyctMw3f3UlC8DdUX2Clg,2150
|
23
|
+
dataeval/_internal/functional/coverage.py,sha256=Z56oE9LLosM1228DL0tfs12-MPrg_2ef4KzYhoTPPYg,2520
|
24
|
+
dataeval/_internal/functional/divergence.py,sha256=fSZm2vIqChfj9sbgSuw9P8ZB8ih0nOPryRvnVQHsjzI,517
|
25
|
+
dataeval/_internal/functional/hash.py,sha256=fhcWclSNmp4sJbFSWSqWB2nVxhEFNNje1ifVAX4qqls,2785
|
26
|
+
dataeval/_internal/functional/metadata.py,sha256=6QCxnDzXAF0x6Xx-eUK_c4yOX-_RyTaYCqzDPzDYpLM,4228
|
27
|
+
dataeval/_internal/functional/metadataparity.py,sha256=V6fCtSDx_tf49BnaGgatHNH8lgZoYtPggIuRa13AiHo,7771
|
28
|
+
dataeval/_internal/functional/uap.py,sha256=PIeRAJcA1GBMJK_gBKg0U_bih73h-FKW2ruxJtGiiMc,200
|
29
|
+
dataeval/_internal/functional/utils.py,sha256=u1kkGtS0irnx9dZTo9MahA-_4_uIorPDttQkBe8iU7U,4120
|
30
|
+
dataeval/_internal/interop.py,sha256=tmcp666A_uhndsDLv2PFcZiPH2udz4Y7UVdjqs3vkqc,1280
|
31
|
+
dataeval/_internal/maite/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
|
+
dataeval/_internal/maite/utils.py,sha256=XJ6eCTZA_So_bJ2BoEZvMAY8GyC67p8Q044LGSAZepg,864
|
33
|
+
dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
|
+
dataeval/_internal/metrics/base.py,sha256=CO2deM6T-q1RAbu-C8k58pULt1KJEJM-kYjLMaHYzms,2641
|
35
|
+
dataeval/_internal/metrics/ber.py,sha256=eQaTMjytVASkZz8bp8CqzpKBPzIYDp2qXmz1MBYNoj4,2261
|
36
|
+
dataeval/_internal/metrics/coverage.py,sha256=Bhsvz0ZG8GsgsuYfUQ7i4xB5ebMBJYLk2QqXVfkjgJM,3421
|
37
|
+
dataeval/_internal/metrics/divergence.py,sha256=QOWDfjlE84S2UvpX7x3pxVAPKdeqrdQ2cXzZkiaOyF8,3247
|
38
|
+
dataeval/_internal/metrics/metadata.py,sha256=U2E2bG8BzRoHS6bx3a5KYEqE96ik9Sz1AW3MMMqQmLU,21472
|
39
|
+
dataeval/_internal/metrics/metadataparity.py,sha256=zLo8WPcPM6OVWf7dK4LT8d3o9FlUHji1SJrDinztrv4,2972
|
40
|
+
dataeval/_internal/metrics/parity.py,sha256=KAp_dznww1GXHwvIqKb3ZJVTQU2w8loyk8JrHBciEm4,6070
|
41
|
+
dataeval/_internal/metrics/stats.py,sha256=6zvGdoYEpVpMMzduxzMkMfsaaKEyC4UVn4SWPcAO5Cg,12581
|
42
|
+
dataeval/_internal/metrics/uap.py,sha256=pgfJY8kM5EFYYNbMzDcodOOzvPlzxrpsSXoL7dCrWn8,1113
|
43
|
+
dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
+
dataeval/_internal/models/pytorch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
45
|
+
dataeval/_internal/models/pytorch/autoencoder.py,sha256=iK3Z9claesU_pJkRaiFJIZ9zKZg-Qj8ugzVYTTokDbE,6123
|
46
|
+
dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
|
47
|
+
dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
|
48
|
+
dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
49
|
+
dataeval/_internal/models/tensorflow/autoencoder.py,sha256=rErnOfDFTd7e4brSGQ2Lr1x1kNjSEHdbOREOtUfIhIM,9975
|
50
|
+
dataeval/_internal/models/tensorflow/gmm.py,sha256=wnqQKm3fURuvBROUd2fitCqzKViDo-g0-Djr3TBHZ3U,3640
|
51
|
+
dataeval/_internal/models/tensorflow/losses.py,sha256=3y6tHm7PTQ7hmasJDwTXjdARjCUWycoXqSyXJ1uT2mM,3766
|
52
|
+
dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=B5cwB2IGPw-7b8klt82j_60g_IvqSiDELxvbiBYJtAo,48068
|
53
|
+
dataeval/_internal/models/tensorflow/trainer.py,sha256=2KHtMRniVselCaDXeb8QEfX-wMRsPfT1xiG2gUQgelg,4090
|
54
|
+
dataeval/_internal/models/tensorflow/utils.py,sha256=uK_fQ1JXUSVi0kgnhd9eRArlr36OzXUEdL4inJZCs-8,8579
|
55
|
+
dataeval/_internal/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
56
|
+
dataeval/_internal/workflows/sufficiency.py,sha256=QZQdhERVr3EmPA2sAFFudSDL4KLM0eAvYgV38jhjzaI,18374
|
57
|
+
dataeval/detectors/__init__.py,sha256=I2e7YWb55RRlKQll85Z6KdN5wdBa53smn-_fcZIsCwA,1507
|
58
|
+
dataeval/flags/__init__.py,sha256=1-HmwmtfPkHWwqXUjDwWko396qAKBeaSvqVsQZLrzD0,170
|
59
|
+
dataeval/metrics/__init__.py,sha256=GmuGCzF7Sk6TtAH9amB494gNtDjgVsSemPWJChrz2eo,427
|
60
|
+
dataeval/models/__init__.py,sha256=onevPb5wznCggowBnVT0OUa8uBJXZCbrkFuek1UFvOs,293
|
61
|
+
dataeval/models/tensorflow/__init__.py,sha256=A1XRxVGHefuvh_WpaKE1x95pRD1FecuFp66iuNPA_5U,424
|
62
|
+
dataeval/models/torch/__init__.py,sha256=su7P9DF9LChlVCNHWG6d7s_yeIfWQbhCYWIkzJe0Qig,190
|
63
|
+
dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
64
|
+
dataeval/workflows/__init__.py,sha256=ObgS1cVYFRzFZWbNzGs2OcU02IVkJkAMHNnlnSNTMCE,208
|
65
|
+
dataeval-0.63.0.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
|
66
|
+
dataeval-0.63.0.dist-info/METADATA,sha256=6YmwpTrzkKyvPZ0IY3nwlTzeNsl-EaGJe8m_x2vwiaE,4217
|
67
|
+
dataeval-0.63.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
68
|
+
dataeval-0.63.0.dist-info/RECORD,,
|
dataeval-0.61.0.dist-info/RECORD
DELETED
@@ -1,55 +0,0 @@
|
|
1
|
-
dataeval/__init__.py,sha256=hoMLXpAqvfNQ6BtdLD49hOXWMUp5_4zBg_IPeuGPyZA,408
|
2
|
-
dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
dataeval/_internal/detectors/clusterer.py,sha256=zuWW7qfbQWQ777TqBOsDp2_fEbFmuUzNqOvTWv8xijo,19193
|
4
|
-
dataeval/_internal/detectors/drift/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
dataeval/_internal/detectors/drift/base.py,sha256=b6_kUHGPtfDnN6S9gNkldpoaGwdRKw6ohGI4wGega24,8981
|
6
|
-
dataeval/_internal/detectors/drift/cvm.py,sha256=3YePeStvRSKVFqHEWshngXhJTCh3cn9-9B-Ou7FQHgM,3897
|
7
|
-
dataeval/_internal/detectors/drift/ks.py,sha256=PW4qB4XbDUhVxpKJeA39-0GYVgeAxWggipBh2bhKDKg,3924
|
8
|
-
dataeval/_internal/detectors/drift/mmd.py,sha256=ZJu28o6Brs0-pt3PVY2ysMKVfLYXaEz754I-lSLn1hM,6965
|
9
|
-
dataeval/_internal/detectors/drift/torch.py,sha256=NsQYfDVRcCGmU8k6oBG_aVzmML1zre-xUKBVK1W680o,10872
|
10
|
-
dataeval/_internal/detectors/drift/uncertainty.py,sha256=WeTQWZDL00-cs50neoJzs_9xBIdo-xxcj6bx0DApCQY,5263
|
11
|
-
dataeval/_internal/detectors/duplicates.py,sha256=6MVsopmgA4NccWtriHHvz_3y3tgWWu_vzFcxpXgS5DI,1469
|
12
|
-
dataeval/_internal/detectors/linter.py,sha256=Y8XIAbzMUe1UuYQLsZ1UyF1yVle2ExuzUmDQD2TF0z0,2792
|
13
|
-
dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
-
dataeval/_internal/detectors/ood/ae.py,sha256=nM-uI-7h548yEc25iKlPYXTdG6RqDPA7Mup5IqPW7cg,2576
|
15
|
-
dataeval/_internal/detectors/ood/aegmm.py,sha256=05yOyP1RDOqVWW30weW_YAgciwBYsUJy35nPSuKQD10,2340
|
16
|
-
dataeval/_internal/detectors/ood/base.py,sha256=IxDhU4T8vlURvKkqakrwgUnWhl5a_u9_kXvrKkzWrog,6771
|
17
|
-
dataeval/_internal/detectors/ood/llr.py,sha256=EGgj1LV0qq-fE9RoWwL_6WXPaSTfhbnXkq25VtTiB3s,10029
|
18
|
-
dataeval/_internal/detectors/ood/vae.py,sha256=uh0QJ3b1_SQllHhb7BSt7IV369dmHjlU5PBwrOMesQg,2892
|
19
|
-
dataeval/_internal/detectors/ood/vaegmm.py,sha256=Yr5dKWSEcfP0xqqHJIM98QhS62oS8aYMj6edl26bKHI,2766
|
20
|
-
dataeval/_internal/flags.py,sha256=dRApeFkdSXFbYHSmvzgUP78zH8jUGtfzKFfLQtX0Q18,883
|
21
|
-
dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
-
dataeval/_internal/metrics/base.py,sha256=hUxJO-k0L7_nfach6xMW96YuMuu--A8Ypz7plYKGQA8,2623
|
23
|
-
dataeval/_internal/metrics/ber.py,sha256=HI2Cn-YWkqPz-mVZ8JgwubgjKYFyquq7L3yuKo-s8vc,3970
|
24
|
-
dataeval/_internal/metrics/coverage.py,sha256=eSOiCyqTN6t9OnhgFcPOp6HIReQvNL3tw6PzenrJ8aw,2700
|
25
|
-
dataeval/_internal/metrics/divergence.py,sha256=aihP84c8SUQ7IMUmBebFKtJxbH9VDfV_R-QtWEdUkvA,2823
|
26
|
-
dataeval/_internal/metrics/hash.py,sha256=5hdxMU2mFA9GxjF1MfhE_ztjJh7Ku1XpP82B_N54dwc,2782
|
27
|
-
dataeval/_internal/metrics/parity.py,sha256=9JunlLgrdbB2EPDKEtUDQnkBrsNiFJZD58dg3lSiYgE,6823
|
28
|
-
dataeval/_internal/metrics/stats.py,sha256=WnuO0tNnpz7mYi5_rmxdk8wZoEZFczo1DrV828lgcVk,12118
|
29
|
-
dataeval/_internal/metrics/uap.py,sha256=7QmJb-wM2B6c9ORa9kAar657KhDFScjNOGhpbuTv8YA,1149
|
30
|
-
dataeval/_internal/metrics/utils.py,sha256=u1kkGtS0irnx9dZTo9MahA-_4_uIorPDttQkBe8iU7U,4120
|
31
|
-
dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
|
-
dataeval/_internal/models/pytorch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
33
|
-
dataeval/_internal/models/pytorch/autoencoder.py,sha256=iK3Z9claesU_pJkRaiFJIZ9zKZg-Qj8ugzVYTTokDbE,6123
|
34
|
-
dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
|
35
|
-
dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
|
36
|
-
dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
|
-
dataeval/_internal/models/tensorflow/autoencoder.py,sha256=rErnOfDFTd7e4brSGQ2Lr1x1kNjSEHdbOREOtUfIhIM,9975
|
38
|
-
dataeval/_internal/models/tensorflow/gmm.py,sha256=wnqQKm3fURuvBROUd2fitCqzKViDo-g0-Djr3TBHZ3U,3640
|
39
|
-
dataeval/_internal/models/tensorflow/losses.py,sha256=3y6tHm7PTQ7hmasJDwTXjdARjCUWycoXqSyXJ1uT2mM,3766
|
40
|
-
dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=B5cwB2IGPw-7b8klt82j_60g_IvqSiDELxvbiBYJtAo,48068
|
41
|
-
dataeval/_internal/models/tensorflow/trainer.py,sha256=2KHtMRniVselCaDXeb8QEfX-wMRsPfT1xiG2gUQgelg,4090
|
42
|
-
dataeval/_internal/models/tensorflow/utils.py,sha256=uK_fQ1JXUSVi0kgnhd9eRArlr36OzXUEdL4inJZCs-8,8579
|
43
|
-
dataeval/_internal/workflows/sufficiency.py,sha256=QZQdhERVr3EmPA2sAFFudSDL4KLM0eAvYgV38jhjzaI,18374
|
44
|
-
dataeval/detectors/__init__.py,sha256=I2e7YWb55RRlKQll85Z6KdN5wdBa53smn-_fcZIsCwA,1507
|
45
|
-
dataeval/flags/__init__.py,sha256=1-HmwmtfPkHWwqXUjDwWko396qAKBeaSvqVsQZLrzD0,170
|
46
|
-
dataeval/metrics/__init__.py,sha256=uVz0GtRvCsh_r08qaBM-jc4bjcfAb1yzlIyx6zfPc9Y,358
|
47
|
-
dataeval/models/__init__.py,sha256=onevPb5wznCggowBnVT0OUa8uBJXZCbrkFuek1UFvOs,293
|
48
|
-
dataeval/models/tensorflow/__init__.py,sha256=A1XRxVGHefuvh_WpaKE1x95pRD1FecuFp66iuNPA_5U,424
|
49
|
-
dataeval/models/torch/__init__.py,sha256=su7P9DF9LChlVCNHWG6d7s_yeIfWQbhCYWIkzJe0Qig,190
|
50
|
-
dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
|
-
dataeval/workflows/__init__.py,sha256=ObgS1cVYFRzFZWbNzGs2OcU02IVkJkAMHNnlnSNTMCE,208
|
52
|
-
dataeval-0.61.0.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
|
53
|
-
dataeval-0.61.0.dist-info/METADATA,sha256=-d5akTEAHSqWXZ1JeCTkIvxDc-HdECfNxcLWQlFqhhs,4238
|
54
|
-
dataeval-0.61.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
55
|
-
dataeval-0.61.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|