dataeval 0.69.4__py3-none-any.whl → 0.70.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. dataeval/__init__.py +8 -8
  2. dataeval/_internal/datasets.py +235 -131
  3. dataeval/_internal/detectors/clusterer.py +2 -0
  4. dataeval/_internal/detectors/drift/base.py +7 -8
  5. dataeval/_internal/detectors/drift/mmd.py +4 -4
  6. dataeval/_internal/detectors/duplicates.py +64 -45
  7. dataeval/_internal/detectors/merged_stats.py +23 -54
  8. dataeval/_internal/detectors/ood/ae.py +8 -6
  9. dataeval/_internal/detectors/ood/aegmm.py +6 -4
  10. dataeval/_internal/detectors/ood/base.py +12 -7
  11. dataeval/_internal/detectors/ood/llr.py +6 -4
  12. dataeval/_internal/detectors/ood/vae.py +5 -3
  13. dataeval/_internal/detectors/ood/vaegmm.py +6 -4
  14. dataeval/_internal/detectors/outliers.py +137 -63
  15. dataeval/_internal/interop.py +11 -7
  16. dataeval/_internal/metrics/balance.py +13 -11
  17. dataeval/_internal/metrics/ber.py +5 -3
  18. dataeval/_internal/metrics/coverage.py +4 -0
  19. dataeval/_internal/metrics/divergence.py +9 -5
  20. dataeval/_internal/metrics/diversity.py +14 -12
  21. dataeval/_internal/metrics/parity.py +32 -22
  22. dataeval/_internal/metrics/stats/base.py +231 -0
  23. dataeval/_internal/metrics/stats/boxratiostats.py +159 -0
  24. dataeval/_internal/metrics/stats/datasetstats.py +99 -0
  25. dataeval/_internal/metrics/stats/dimensionstats.py +113 -0
  26. dataeval/_internal/metrics/stats/hashstats.py +75 -0
  27. dataeval/_internal/metrics/stats/labelstats.py +125 -0
  28. dataeval/_internal/metrics/stats/pixelstats.py +119 -0
  29. dataeval/_internal/metrics/stats/visualstats.py +124 -0
  30. dataeval/_internal/metrics/uap.py +8 -4
  31. dataeval/_internal/metrics/utils.py +30 -15
  32. dataeval/_internal/models/pytorch/autoencoder.py +5 -5
  33. dataeval/_internal/models/tensorflow/pixelcnn.py +1 -4
  34. dataeval/_internal/output.py +3 -18
  35. dataeval/_internal/utils.py +11 -16
  36. dataeval/_internal/workflows/sufficiency.py +152 -151
  37. dataeval/detectors/__init__.py +4 -0
  38. dataeval/detectors/drift/__init__.py +8 -3
  39. dataeval/detectors/drift/kernels/__init__.py +4 -0
  40. dataeval/detectors/drift/updates/__init__.py +4 -0
  41. dataeval/detectors/linters/__init__.py +15 -4
  42. dataeval/detectors/ood/__init__.py +14 -2
  43. dataeval/metrics/__init__.py +5 -0
  44. dataeval/metrics/bias/__init__.py +13 -4
  45. dataeval/metrics/estimators/__init__.py +8 -8
  46. dataeval/metrics/stats/__init__.py +25 -3
  47. dataeval/utils/__init__.py +16 -3
  48. dataeval/utils/tensorflow/__init__.py +11 -0
  49. dataeval/utils/torch/__init__.py +12 -0
  50. dataeval/utils/torch/datasets/__init__.py +7 -0
  51. dataeval/workflows/__init__.py +6 -2
  52. {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/METADATA +12 -4
  53. dataeval-0.70.1.dist-info/RECORD +80 -0
  54. {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/WHEEL +1 -1
  55. dataeval/_internal/flags.py +0 -77
  56. dataeval/_internal/metrics/stats.py +0 -397
  57. dataeval/flags/__init__.py +0 -3
  58. dataeval/tensorflow/__init__.py +0 -3
  59. dataeval/torch/__init__.py +0 -3
  60. dataeval-0.69.4.dist-info/RECORD +0 -74
  61. /dataeval/{tensorflow → utils/tensorflow}/loss/__init__.py +0 -0
  62. /dataeval/{tensorflow → utils/tensorflow}/models/__init__.py +0 -0
  63. /dataeval/{tensorflow → utils/tensorflow}/recon/__init__.py +0 -0
  64. /dataeval/{torch → utils/torch}/models/__init__.py +0 -0
  65. /dataeval/{torch → utils/torch}/trainer/__init__.py +0 -0
  66. {dataeval-0.69.4.dist-info → dataeval-0.70.1.dist-info}/LICENSE.txt +0 -0
@@ -1,397 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from dataclasses import dataclass
4
- from typing import Any, Callable, Iterable
5
-
6
- import numpy as np
7
- from numpy.typing import ArrayLike, NDArray
8
- from scipy.stats import entropy, kurtosis, skew
9
-
10
- from dataeval._internal.flags import ImageStat, to_distinct, verify_supported
11
- from dataeval._internal.interop import to_numpy_iter
12
- from dataeval._internal.metrics.utils import edge_filter, get_bitdepth, normalize_image_shape, pchash, rescale, xxhash
13
- from dataeval._internal.output import OutputMetadata, populate_defaults, set_metadata
14
-
15
- CH_IDX_MAP = "ch_idx_map"
16
-
17
-
18
- @dataclass(frozen=True)
19
- class StatsOutput(OutputMetadata):
20
- """
21
- Attributes
22
- ----------
23
- xxhash : List[str]
24
- xxHash hash of the images as a hex string
25
- pchash : List[str]
26
- Perception hash of the images as a hex string
27
- width : NDArray[np.uint16]
28
- Width of the images in pixels
29
- height : NDArray[np.uint16]
30
- Height of the images in pixels
31
- channels : NDArray[np.uint8]
32
- Channel count of the images in pixels
33
- size : NDArray[np.uint32]
34
- Size of the images in pixels
35
- aspect_ratio : NDArray[np.float16]
36
- Aspect ratio of the images (width/height)
37
- depth : NDArray[np.uint8]
38
- Color depth of the images in bits
39
- brightness : NDArray[np.float16]
40
- Brightness of the images
41
- blurriness : NDArray[np.float16]
42
- Blurriness of the images
43
- contrast : NDArray[np.float16]
44
- Image contrast ratio
45
- darkness : NDArray[np.float16]
46
- Darkness of the images
47
- missing : NDArray[np.float16]
48
- Percentage of the images with missing pixels
49
- zeros : NDArray[np.float16]
50
- Percentage of the images with zero value pixels
51
- mean : NDArray[np.float16]
52
- Mean of the pixel values of the images
53
- std : NDArray[np.float16]
54
- Standard deviation of the pixel values of the images
55
- var : NDArray[np.float16]
56
- Variance of the pixel values of the images
57
- skew : NDArray[np.float16]
58
- Skew of the pixel values of the images
59
- kurtosis : NDArray[np.float16]
60
- Kurtosis of the pixel values of the images
61
- percentiles : NDArray[np.float16]
62
- Percentiles of the pixel values of the images with quartiles of (0, 25, 50, 75, 100)
63
- histogram : NDArray[np.uint32]
64
- Histogram of the pixel values of the images across 256 bins scaled between 0 and 1
65
- entropy : NDArray[np.float16]
66
- Entropy of the pixel values of the images
67
- ch_idx_map : Dict[int, List[int]]
68
- Per-channel mapping of indices for each metric
69
- """
70
-
71
- xxhash: list[str]
72
- pchash: list[str]
73
- width: NDArray[np.uint16]
74
- height: NDArray[np.uint16]
75
- channels: NDArray[np.uint8]
76
- size: NDArray[np.uint32]
77
- aspect_ratio: NDArray[np.float16]
78
- depth: NDArray[np.uint8]
79
- brightness: NDArray[np.float16]
80
- blurriness: NDArray[np.float16]
81
- contrast: NDArray[np.float16]
82
- darkness: NDArray[np.float16]
83
- missing: NDArray[np.float16]
84
- zeros: NDArray[np.float16]
85
- mean: NDArray[np.float16]
86
- std: NDArray[np.float16]
87
- var: NDArray[np.float16]
88
- skew: NDArray[np.float16]
89
- kurtosis: NDArray[np.float16]
90
- percentiles: NDArray[np.float16]
91
- histogram: NDArray[np.uint32]
92
- entropy: NDArray[np.float16]
93
- ch_idx_map: dict[int, list[int]]
94
-
95
- def dict(self):
96
- return {k: v for k, v in self.__dict__.items() if not k.startswith("_") and len(v) > 0}
97
-
98
- def __len__(self) -> int:
99
- if self.ch_idx_map:
100
- return sum([len(idxs) for idxs in self.ch_idx_map.values()])
101
- else:
102
- for a in self.__annotations__:
103
- attr = getattr(self, a, None)
104
- if attr is not None and hasattr(a, "__len__") and len(attr) > 0:
105
- return len(attr)
106
- return 0
107
-
108
-
109
- QUARTILES = (0, 25, 50, 75, 100)
110
-
111
- IMAGESTATS_FN_MAP: dict[ImageStat, Callable[[NDArray], Any]] = {
112
- ImageStat.XXHASH: lambda x: xxhash(x),
113
- ImageStat.PCHASH: lambda x: pchash(x),
114
- ImageStat.WIDTH: lambda x: np.uint16(x.shape[-1]),
115
- ImageStat.HEIGHT: lambda x: np.uint16(x.shape[-2]),
116
- ImageStat.CHANNELS: lambda x: np.uint8(x.shape[-3]),
117
- ImageStat.SIZE: lambda x: np.uint32(np.prod(x.shape[-2:])),
118
- ImageStat.ASPECT_RATIO: lambda x: np.float16(x.shape[-1] / x.shape[-2]),
119
- ImageStat.DEPTH: lambda x: np.uint8(get_bitdepth(x).depth),
120
- ImageStat.BRIGHTNESS: lambda x: x[-2],
121
- ImageStat.BLURRINESS: lambda x: np.float16(np.std(edge_filter(np.mean(x, axis=0)))),
122
- ImageStat.CONTRAST: lambda x: np.float16((np.max(x) - np.min(x)) / np.mean(x)),
123
- ImageStat.DARKNESS: lambda x: x[1],
124
- ImageStat.MISSING: lambda x: np.float16(np.sum(np.isnan(x)) / np.prod(x.shape[-2:])),
125
- ImageStat.ZEROS: lambda x: np.float16(np.count_nonzero(x == 0) / np.prod(x.shape[-2:])),
126
- ImageStat.MEAN: lambda x: np.float16(np.mean(x)),
127
- ImageStat.STD: lambda x: np.float16(np.std(x)),
128
- ImageStat.VAR: lambda x: np.float16(np.var(x)),
129
- ImageStat.SKEW: lambda x: np.float16(skew(x.ravel())),
130
- ImageStat.KURTOSIS: lambda x: np.float16(kurtosis(x.ravel())),
131
- ImageStat.PERCENTILES: lambda x: np.float16(np.nanpercentile(x, q=QUARTILES)),
132
- ImageStat.HISTOGRAM: lambda x: np.uint32(np.histogram(x, 256, (0, 1))[0]),
133
- ImageStat.ENTROPY: lambda x: np.float16(entropy(x)),
134
- }
135
-
136
- CHANNELSTATS_FN_MAP: dict[ImageStat, Callable[[NDArray], Any]] = {
137
- ImageStat.BRIGHTNESS: lambda x: np.float16((np.max(x, axis=1) - np.mean(x, axis=1)) / np.var(x, axis=1)),
138
- ImageStat.CONTRAST: lambda x: np.float16((np.max(x, axis=1) - np.min(x, axis=1)) / np.mean(x, axis=1)),
139
- ImageStat.DARKNESS: lambda x: np.float16((np.mean(x, axis=1) - np.min(x, axis=1)) / np.var(x, axis=1)),
140
- ImageStat.ZEROS: lambda x: np.float16(np.count_nonzero(x == 0, axis=(1, 2)) / np.prod(x.shape[-2:])),
141
- ImageStat.MEAN: lambda x: np.float16(np.mean(x, axis=1)),
142
- ImageStat.STD: lambda x: np.float16(np.std(x, axis=1)),
143
- ImageStat.VAR: lambda x: np.float16(np.var(x, axis=1)),
144
- ImageStat.SKEW: lambda x: np.float16(skew(x, axis=1)),
145
- ImageStat.KURTOSIS: lambda x: np.float16(kurtosis(x, axis=1)),
146
- ImageStat.PERCENTILES: lambda x: np.float16(np.nanpercentile(x, q=QUARTILES, axis=1).T),
147
- ImageStat.HISTOGRAM: lambda x: np.uint32(np.apply_along_axis(lambda y: np.histogram(y, 256, (0, 1))[0], 1, x)),
148
- ImageStat.ENTROPY: lambda x: np.float16(entropy(x, axis=1)),
149
- }
150
-
151
-
152
- def run_stats(
153
- images: Iterable[ArrayLike],
154
- flags: ImageStat,
155
- fn_map: dict[ImageStat, Callable[[NDArray], Any]],
156
- flatten: bool,
157
- ):
158
- """
159
- Compute specified statistics on a set of images.
160
-
161
- This function applies a set of statistical operations to each image in the input iterable,
162
- based on the specified flags. The function dynamically determines which statistics to apply
163
- using a flag system and a corresponding function map. It also supports optional image
164
- flattening for pixel-wise calculations.
165
-
166
- Parameters
167
- ----------
168
- images : ArrayLike
169
- An iterable of images (e.g., list of arrays), where each image is represented as an
170
- array-like structure (e.g., NumPy arrays).
171
- flags : ImageStat
172
- A bitwise flag or set of flags specifying the statistics to compute for each image.
173
- These flags determine which functions in `fn_map` to apply.
174
- fn_map : dict[ImageStat, Callable]
175
- A dictionary mapping `ImageStat` flags to functions that compute the corresponding statistics.
176
- Each function accepts a NumPy array (representing an image or rescaled pixel data) and returns a result.
177
- flatten : bool
178
- If True, the image is flattened into a 2D array for pixel-wise operations. Otherwise, the
179
- original image dimensions are preserved.
180
-
181
- Returns
182
- -------
183
- list[dict[str, NDArray]]
184
- A list of dictionaries, where each dictionary contains the computed statistics for an image.
185
- The dictionary keys correspond to the names of the statistics, and the values are NumPy arrays
186
- with the results of the computations.
187
-
188
- Raises
189
- ------
190
- ValueError
191
- If unsupported flags are provided that are not present in `fn_map`.
192
-
193
- Notes
194
- -----
195
- - The function performs image normalization (rescaling the image values)
196
- before applying some of the statistics.
197
- - Pixel-level statistics (e.g., brightness, entropy) are computed after
198
- rescaling and, optionally, flattening the images.
199
- - For statistics like histograms and entropy, intermediate results may
200
- be reused to avoid redundant computation.
201
- """
202
- verify_supported(flags, fn_map)
203
- flag_dict = to_distinct(flags)
204
-
205
- results_list: list[dict[str, NDArray]] = []
206
- for image in to_numpy_iter(images):
207
- normalized = normalize_image_shape(image)
208
- scaled = None
209
- hist = None
210
- percentiles = None
211
- output: dict[str, NDArray] = {}
212
- for flag, stat in flag_dict.items():
213
- if flag & (ImageStat.ALL_PIXELSTATS | ImageStat.BRIGHTNESS | ImageStat.CONTRAST | ImageStat.DARKNESS):
214
- if scaled is None:
215
- scaled = rescale(normalized).reshape(image.shape[0], -1) if flatten else rescale(normalized)
216
- if flag & (ImageStat.HISTOGRAM | ImageStat.ENTROPY):
217
- if hist is None:
218
- hist = fn_map[ImageStat.HISTOGRAM](scaled)
219
- output[stat] = hist if flag & ImageStat.HISTOGRAM else fn_map[flag](hist)
220
- elif flag & (ImageStat.BRIGHTNESS | ImageStat.DARKNESS | ImageStat.PERCENTILES):
221
- if percentiles is None:
222
- percentiles = fn_map[ImageStat.PERCENTILES](scaled)
223
- output[stat] = percentiles if flag & ImageStat.PERCENTILES else fn_map[flag](percentiles)
224
- else:
225
- output[stat] = fn_map[flag](scaled)
226
- else:
227
- output[stat] = fn_map[flag](normalized)
228
- results_list.append(output)
229
- return results_list
230
-
231
-
232
- @set_metadata("dataeval.metrics")
233
- def imagestats(images: Iterable[ArrayLike], flags: ImageStat = ImageStat.ALL_STATS) -> StatsOutput:
234
- """
235
- Calculates image and pixel statistics for each image
236
-
237
- This function computes various statistical metrics (e.g., mean, standard deviation, entropy)
238
- on the images as a whole, based on the specified flags. It supports multiple types of statistics
239
- that can be selected using the `flags` argument.
240
-
241
- Parameters
242
- ----------
243
- images : ArrayLike
244
- Images to run statistical tests on
245
- flags : ImageStat, default ImageStat.ALL_STATS
246
- Metric(s) to calculate for each image. The default flag ``ImageStat.ALL_STATS``
247
- computes all available statistics.
248
-
249
- Returns
250
- -------
251
- StatsOutput
252
- A dictionary-like object containing the computed statistics for each image. The keys correspond
253
- to the names of the statistics (e.g., 'mean', 'std'), and the values are lists of results for
254
- each image or numpy arrays when the results are multi-dimensional.
255
-
256
- See Also
257
- --------
258
- ImageStat, channelstats, Outliers, Duplicates
259
-
260
- Notes
261
- -----
262
- - All metrics in the ImageStat.ALL_PIXELSTATS flag are scaled based on the perceived bit depth
263
- (which is derived from the largest pixel value) to allow for better comparison
264
- between images stored in different formats and different resolutions.
265
- - ImageStat.ZERO and ImageStat.MISSING are presented as a percentage of total pixel counts
266
-
267
- Examples
268
- --------
269
- Calculating the statistics on the images, whose shape is (C, H, W)
270
-
271
- >>> results = imagestats(images, flags=ImageStat.MEAN | ImageStat.ALL_VISUALS)
272
- >>> print(results.mean)
273
- [0.16650391 0.52050781 0.05471802 0.07702637 0.09875488 0.12188721
274
- 0.14440918 0.16711426 0.18859863 0.21264648 0.2355957 0.25854492
275
- 0.27978516 0.3046875 0.32788086 0.35131836 0.37255859 0.39819336
276
- 0.42163086 0.4453125 0.46630859 0.49267578 0.51660156 0.54052734
277
- 0.56152344 0.58837891 0.61230469 0.63671875 0.65771484 0.68505859
278
- 0.70947266 0.73388672 0.75488281 0.78271484 0.80712891 0.83203125
279
- 0.85302734 0.88134766 0.90625 0.93115234]
280
- >>> print(results.zeros)
281
- [0.12561035 0. 0. 0. 0.11730957 0.
282
- 0. 0. 0.10986328 0. 0. 0.
283
- 0.10266113 0. 0. 0. 0.09570312 0.
284
- 0. 0. 0.08898926 0. 0. 0.
285
- 0.08251953 0. 0. 0. 0.07629395 0.
286
- 0. 0. 0.0703125 0. 0. 0.
287
- 0.0645752 0. 0. 0. ]
288
- """
289
- stats = run_stats(images, flags, IMAGESTATS_FN_MAP, False)
290
- output = {}
291
- length = len(stats)
292
- for i, results in enumerate(stats):
293
- for stat, result in results.items():
294
- if not isinstance(result, (np.ndarray, np.generic)):
295
- output.setdefault(stat, []).append(result)
296
- else:
297
- shape = () if np.isscalar(result) else result.shape
298
- output.setdefault(stat, np.empty((length,) + shape))[i] = result
299
- return StatsOutput(**populate_defaults(output, StatsOutput))
300
-
301
-
302
- @set_metadata("dataeval.metrics")
303
- def channelstats(images: Iterable[ArrayLike], flags=ImageStat.ALL_CHANNEL_STATS) -> StatsOutput:
304
- """
305
- Calculates pixel statistics for each image per channel
306
-
307
- This function computes pixel-level statistics (e.g., mean, variance, etc.) on a per-channel basis
308
- for each image. The statistics can be selected using the `flags` argument, and the results will
309
- be grouped by the number of channels (e.g., RGB channels) in each image.
310
-
311
- Parameters
312
- ----------
313
- images : ArrayLike
314
- Images to run statistical tests on
315
- flags: ImageStat, default ImageStat.ALL_CHANNEL_STATS
316
- Metric(s) to calculate for each image per channel.
317
- Only flags within the ``ImageStat.ALL_CHANNEL_STATS`` category are supported.
318
-
319
- Returns
320
- -------
321
- StatsOutput
322
- A dictionary-like object containing the computed statistics for each image per channel. The keys
323
- correspond to the names of the statistics (e.g., 'mean', 'variance'), and the values are numpy arrays
324
- with results for each channel of each image.
325
-
326
- See Also
327
- --------
328
- ImageStat, imagestats, Outliers, Duplicates
329
-
330
- Notes
331
- -----
332
- - All metrics in the ImageStat.ALL_PIXELSTATS flag along with ImageStat.Brightness,
333
- ImageStat.Contrast and ImageStat.Darkness are scaled based on the perceived bit depth
334
- (which is derived from the largest pixel value) to allow for better comparison
335
- between images stored in different formats and different resolutions.
336
-
337
- Examples
338
- --------
339
- Calculating the statistics on a per channel basis for images, whose shape is (N, C, H, W)
340
-
341
- >>> results = channelstats(images, flags=ImageStat.MEAN | ImageStat.VAR)
342
- >>> print(results.mean)
343
- {3: array([[0.01617, 0.5303 , 0.06525, 0.09735, 0.1295 , 0.1616 , 0.1937 ,
344
- 0.2258 , 0.2578 , 0.29 , 0.322 , 0.3542 , 0.3865 , 0.4185 ,
345
- 0.4507 , 0.4827 , 0.5146 , 0.547 , 0.579 , 0.6113 , 0.643 ,
346
- 0.6753 , 0.7075 , 0.7397 , 0.7715 , 0.8037 , 0.836 , 0.868 ,
347
- 0.9004 , 0.932 ],
348
- [0.04828, 0.562 , 0.06726, 0.09937, 0.1315 , 0.1636 , 0.1957 ,
349
- 0.2278 , 0.26 , 0.292 , 0.3242 , 0.3562 , 0.3884 , 0.4204 ,
350
- 0.4526 , 0.4846 , 0.5166 , 0.549 , 0.581 , 0.6133 , 0.6455 ,
351
- 0.6772 , 0.7095 , 0.7417 , 0.774 , 0.8057 , 0.838 , 0.87 ,
352
- 0.9023 , 0.934 ],
353
- [0.0804 , 0.594 , 0.0693 , 0.1014 , 0.1334 , 0.1656 , 0.1978 ,
354
- 0.2299 , 0.262 , 0.294 , 0.3262 , 0.3584 , 0.3904 , 0.4226 ,
355
- 0.4546 , 0.4868 , 0.519 , 0.551 , 0.583 , 0.615 , 0.6475 ,
356
- 0.679 , 0.7114 , 0.7437 , 0.776 , 0.808 , 0.84 , 0.872 ,
357
- 0.9043 , 0.9365 ]], dtype=float16)}
358
- >>> print(results.var)
359
- {3: array([[0.00010103, 0.01077 , 0.0001621 , 0.0003605 , 0.0006375 ,
360
- 0.000993 , 0.001427 , 0.001939 , 0.00253 , 0.003199 ,
361
- 0.003944 , 0.004772 , 0.005676 , 0.006657 , 0.007717 ,
362
- 0.00886 , 0.01008 , 0.01137 , 0.01275 , 0.0142 ,
363
- 0.01573 , 0.01733 , 0.01903 , 0.0208 , 0.02264 ,
364
- 0.02457 , 0.02657 , 0.02864 , 0.0308 , 0.03305 ],
365
- [0.0001798 , 0.0121 , 0.0001721 , 0.0003753 , 0.0006566 ,
366
- 0.001017 , 0.001455 , 0.001972 , 0.002565 , 0.003239 ,
367
- 0.00399 , 0.00482 , 0.00573 , 0.006714 , 0.007782 ,
368
- 0.00893 , 0.01015 , 0.011444 , 0.012825 , 0.01428 ,
369
- 0.01581 , 0.01743 , 0.01912 , 0.02089 , 0.02274 ,
370
- 0.02466 , 0.02667 , 0.02875 , 0.03091 , 0.03314 ],
371
- [0.000337 , 0.0135 , 0.0001824 , 0.0003903 , 0.0006766 ,
372
- 0.00104 , 0.001484 , 0.002005 , 0.002604 , 0.00328 ,
373
- 0.004036 , 0.00487 , 0.005783 , 0.006775 , 0.00784 ,
374
- 0.00899 , 0.010216 , 0.01152 , 0.0129 , 0.01436 ,
375
- 0.0159 , 0.01752 , 0.01921 , 0.02098 , 0.02283 ,
376
- 0.02477 , 0.02676 , 0.02885 , 0.03102 , 0.03326 ]],
377
- dtype=float16)}
378
- """
379
- stats = run_stats(images, flags, CHANNELSTATS_FN_MAP, True)
380
- output = {}
381
- for i, results in enumerate(stats):
382
- for stat, result in results.items():
383
- channels = result.shape[0]
384
- output.setdefault(stat, {}).setdefault(channels, []).append(result)
385
- output.setdefault(CH_IDX_MAP, {}).setdefault(channels, {})[i] = None
386
-
387
- # Concatenate list of channel statistics numpy
388
- for stat in output:
389
- if stat == CH_IDX_MAP:
390
- continue
391
- for channel in output[stat]:
392
- output[stat][channel] = np.array(output[stat][channel]).T
393
-
394
- for channel in output[CH_IDX_MAP]:
395
- output[CH_IDX_MAP][channel] = list(output[CH_IDX_MAP][channel].keys())
396
-
397
- return StatsOutput(**populate_defaults(output, StatsOutput))
@@ -1,3 +0,0 @@
1
- from dataeval._internal.flags import ImageStat
2
-
3
- __all__ = ["ImageStat"]
@@ -1,3 +0,0 @@
1
- from . import loss, models, recon
2
-
3
- __all__ = ["loss", "models", "recon"]
@@ -1,3 +0,0 @@
1
- from . import models, trainer
2
-
3
- __all__ = ["models", "trainer"]
@@ -1,74 +0,0 @@
1
- dataeval/__init__.py,sha256=KOZnb9SovSSuD2UrqV-NS_b5vpfWdQlsweB55fned58,590
2
- dataeval/_internal/datasets.py,sha256=MwN6xgZW1cA5yIxXZ05qBBz4aO3bjKzIEbZZfa1HkQo,9790
3
- dataeval/_internal/detectors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- dataeval/_internal/detectors/clusterer.py,sha256=hJwELUeAdZZ3OVLIfwalw2P7Zz13q2ZqrV6gx90s44E,20695
5
- dataeval/_internal/detectors/drift/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
- dataeval/_internal/detectors/drift/base.py,sha256=XSX1VVUxvFFKVFQVsc2WWeaRRmIxuYaIgD_c5H4OraA,15930
7
- dataeval/_internal/detectors/drift/cvm.py,sha256=xiyZlf0rAQGG8Z6ZBLPVri805aPRkERrUySwRN8cTZQ,4010
8
- dataeval/_internal/detectors/drift/ks.py,sha256=aoDx7ps-5vrSI8Q9ii6cwmKnAyaD8tjG69wI-7R3MVQ,4098
9
- dataeval/_internal/detectors/drift/mmd.py,sha256=j85bwzCiFLNS27WlUFlgpHDMD9yga41ILt-yAr-LABc,7493
10
- dataeval/_internal/detectors/drift/torch.py,sha256=YhIN85MbUV3C4IJcRvqYdXSWLj5lUeEOb05T5DgB3xo,11552
11
- dataeval/_internal/detectors/drift/uncertainty.py,sha256=Ot8L42AnFbkij4J3Tis7VzXLv3hfBxoOWBP4UoCEnVs,5125
12
- dataeval/_internal/detectors/duplicates.py,sha256=qkzbdWuJuUozFLqpnD6CYAGXQb7-aWw2mHr_cxXAfPo,4922
13
- dataeval/_internal/detectors/merged_stats.py,sha256=WVPxz7n5fUkFKW3kobD_TkKkof51YjfIz4M_4CHh-1s,2517
14
- dataeval/_internal/detectors/ood/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- dataeval/_internal/detectors/ood/ae.py,sha256=k8pZP7oPwVyQlv6YcoacNMzpmQZy7W222yYrdXGTYZI,2031
16
- dataeval/_internal/detectors/ood/aegmm.py,sha256=pffThqXRoLx3GuZXEQBd-xEy5DjAZHV7WSeP2HgM_TI,2403
17
- dataeval/_internal/detectors/ood/base.py,sha256=Pw34uFEWOJZiG4ciM0ArUkqhiM8WCGl2rc0BwFPu3xM,8240
18
- dataeval/_internal/detectors/ood/llr.py,sha256=tCo8G7V8VaVuIZ09rg0ZXZmdE0N_zGm7vCfFUnGbGvo,10102
19
- dataeval/_internal/detectors/ood/vae.py,sha256=WbQugS-bBUTTqQ9PRLHBmSUtk7O2_PN4PBLJE9ieMjw,2921
20
- dataeval/_internal/detectors/ood/vaegmm.py,sha256=pVUSlVF2jo8uokyks2QzfBJnNtcFWmcF8EQl-azs2Bg,2832
21
- dataeval/_internal/detectors/outliers.py,sha256=oS8lsCPIM6WtLzUjpMZDfiopZA2fJhsHakmSzZUhqHU,7614
22
- dataeval/_internal/flags.py,sha256=5hZ5AHXjXRKbWtFC45-J7M9NvJHsT4LKRsPzPMksgfQ,2323
23
- dataeval/_internal/interop.py,sha256=x4qj4EiBt5NthSxe8prSLrPDAEcipAdyyLwbNyCBaFk,1059
24
- dataeval/_internal/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
- dataeval/_internal/metrics/balance.py,sha256=eAHvgjiGCH893XSQLqh9j9wgvAECoNPVT8k0u_9Ijzg,6097
26
- dataeval/_internal/metrics/ber.py,sha256=Onsi47AbT9rMvng-Pbu8LIrYRfLpI13En1FxkFoMKQs,4668
27
- dataeval/_internal/metrics/coverage.py,sha256=EZVES1rbZW2j_CtQv1VFfSO-UmWcrt5nmqxDErtrG14,3473
28
- dataeval/_internal/metrics/divergence.py,sha256=nmMUfr9FGnH798eb6xzEiMj4C42rQVthh5HeexiY6EE,4119
29
- dataeval/_internal/metrics/diversity.py,sha256=_oT0FHsgfLOoe_TLD2Aax4r4jmH6WnOPVIkcl_YjaoY,7582
30
- dataeval/_internal/metrics/parity.py,sha256=VszQNbHWjct2bCqrIXUZC_qFi4ZIq2Lm-vs-DiarBFo,16244
31
- dataeval/_internal/metrics/stats.py,sha256=ILKteVMGjrp1s2CECPL_hbLsijIKR2d6II2-8w9oxW8,18105
32
- dataeval/_internal/metrics/uap.py,sha256=w-wvXXnX16kUq-weaZD2SrJi22LJ8EjOFbOhPxeGejI,2043
33
- dataeval/_internal/metrics/utils.py,sha256=mSYa-3cHGcsQwPr7zbdpzrnK_8jIXCiAcu2HCcvrtaY,13007
34
- dataeval/_internal/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- dataeval/_internal/models/pytorch/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- dataeval/_internal/models/pytorch/autoencoder.py,sha256=gmnAHUzzn-fXTUU63SR4ZBjGBLEALWPxmZ_wPzvF_dg,8365
37
- dataeval/_internal/models/pytorch/blocks.py,sha256=pm2xwsDZjZJYXrhhiz8husvh2vHmrkFMSYEn-EDUD5Q,1354
38
- dataeval/_internal/models/pytorch/utils.py,sha256=Qgwym1PxGuwxbXCKUT-8r6Iyrxqm7x94oj45Vf5_CjE,1675
39
- dataeval/_internal/models/tensorflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
- dataeval/_internal/models/tensorflow/autoencoder.py,sha256=Ryn11jDbpZJOM5De-kMGPdbJBQEdwip6B20ajS8HqpE,10354
41
- dataeval/_internal/models/tensorflow/gmm.py,sha256=QoEgbeax1GETqRmUF7A2ih9uFOZfFAjGzgH2ljExlAc,3669
42
- dataeval/_internal/models/tensorflow/losses.py,sha256=pZH5RnlM9R0RrBde9Lgq32muwAp7_PWc56Mu4u8RVvo,3976
43
- dataeval/_internal/models/tensorflow/pixelcnn.py,sha256=lRpRNebMgkCJUnEk1xouVaTfS_YGMQgQhI01wNKAjeM,48420
44
- dataeval/_internal/models/tensorflow/trainer.py,sha256=xNY0Iw7Qa1TnCuy9N1b77_VduFoW_BhbZjfQCxOVby4,4082
45
- dataeval/_internal/models/tensorflow/utils.py,sha256=l6jXKMWyQAEI4LpAONq95Xwr7CPgrs408ypf9TuNxkY,8732
46
- dataeval/_internal/output.py,sha256=bFC2qJxXUc_daQwJHHa9KfFNLuxZANGb7Dpget_TXYs,3049
47
- dataeval/_internal/utils.py,sha256=gK0z4buuQoUYblkrCiRV9pIESzyikcY-3a08XsQkD7E,1585
48
- dataeval/_internal/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- dataeval/_internal/workflows/sufficiency.py,sha256=0k7Dbk3QmEGkZp2IW4OcZBcrxb4zAp9hC9nXGN1v1cY,18199
50
- dataeval/detectors/__init__.py,sha256=WVlwapZtKXVvrW41Sq30sFd8j2phS8JMsCaLeXfbQ7k,204
51
- dataeval/detectors/drift/__init__.py,sha256=XtSjoTy6P_lwRzC9Klmd9BYZ3v4qZrATJ-p7gvvHPGk,598
52
- dataeval/detectors/drift/kernels/__init__.py,sha256=qV_r740iRPw39_kHOttmk3VNikDFKCvF9i1IGbgjf3A,186
53
- dataeval/detectors/drift/updates/__init__.py,sha256=uwkRV-4WVg0XFX_9futvQ0ggGOEvduDedgCno_eIi4U,149
54
- dataeval/detectors/linters/__init__.py,sha256=1yxsJw8CFpHsZwn_YUlWpb-4YBet5U6uB--MeRgB6io,234
55
- dataeval/detectors/ood/__init__.py,sha256=ybWhwbMmWygIwE1A-nYihDfugrj3j0GiuABmVvD7264,583
56
- dataeval/flags/__init__.py,sha256=qo06_Tk0ul4lOhKSEs0HE2G6WBFvMwNJq77vRX1ynww,72
57
- dataeval/metrics/__init__.py,sha256=42szGyZrLekNU-T-rwJu-pUoDBdOoStuScB-mnGzjw4,81
58
- dataeval/metrics/bias/__init__.py,sha256=xqpxCttgzz-hMZQI7_IlaNn4OGZaGVz3KKRd26GbSKE,335
59
- dataeval/metrics/estimators/__init__.py,sha256=fWQZUIxu88u5POYXN1yoFc-Hxx5B1fveEiiSXmK5kPk,210
60
- dataeval/metrics/stats/__init__.py,sha256=N5UvO7reDkYX1xFdAQjwALyJwcC2FAbruzd7ZYYW_4I,123
61
- dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
62
- dataeval/tensorflow/__init__.py,sha256=IH_ELFP9CwKPk_br8_dKi6HeAlwmmV2vgsWdD8IFKXU,72
63
- dataeval/tensorflow/loss/__init__.py,sha256=E9eB87LNh0o5nUCqssB027EXBsOfEayNHPcNW0QGFdA,101
64
- dataeval/tensorflow/models/__init__.py,sha256=OVpmHF8itDcgOlfw6N9jr7IphZPbMJoiu7OdqYhU9fs,291
65
- dataeval/tensorflow/recon/__init__.py,sha256=xe6gAQqK9tyAoDQTtaJAxIPK1humt5QzsG_9NPsqx58,116
66
- dataeval/torch/__init__.py,sha256=ZNGSJJmatdGzbrazw86yNveEXm8smmW63xD-ReA8Nfg,63
67
- dataeval/torch/models/__init__.py,sha256=YnDnePYpRIKHyYn3F5qR1OObMSb-g0FGvI8X-uTB09E,162
68
- dataeval/torch/trainer/__init__.py,sha256=Te-qElt8h-Zv8NN0r-VJOEdCPHTQ2yO3rd2MhRiZGZs,93
69
- dataeval/utils/__init__.py,sha256=ExQ1xj62MjcM9uIu1-g1P2fW0EPJpcIofnvxjQ908c4,172
70
- dataeval/workflows/__init__.py,sha256=gkU2B6yUiefexcYrBwqfZKNl8BvX8abUjfeNvVBXF4E,186
71
- dataeval-0.69.4.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
72
- dataeval-0.69.4.dist-info/METADATA,sha256=R_YlthIsAkOizGWkgXiOCEsD_6F5wJm8qjU4hjhL_c8,4292
73
- dataeval-0.69.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
74
- dataeval-0.69.4.dist-info/RECORD,,
File without changes
File without changes