dataeval 0.86.0__py3-none-any.whl → 0.86.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataeval/__init__.py +1 -1
- dataeval/_log.py +1 -1
- dataeval/config.py +21 -4
- dataeval/data/_embeddings.py +2 -2
- dataeval/data/_images.py +2 -3
- dataeval/data/_metadata.py +48 -37
- dataeval/data/_selection.py +1 -2
- dataeval/data/_split.py +2 -3
- dataeval/data/_targets.py +17 -13
- dataeval/data/selections/_classfilter.py +2 -5
- dataeval/data/selections/_prioritize.py +6 -9
- dataeval/data/selections/_shuffle.py +3 -1
- dataeval/detectors/drift/_base.py +4 -5
- dataeval/detectors/drift/_mmd.py +3 -6
- dataeval/detectors/drift/_nml/_base.py +4 -2
- dataeval/detectors/drift/_nml/_chunk.py +11 -19
- dataeval/detectors/drift/_nml/_domainclassifier.py +8 -19
- dataeval/detectors/drift/_nml/_result.py +8 -9
- dataeval/detectors/drift/_nml/_thresholds.py +66 -77
- dataeval/detectors/linters/outliers.py +7 -7
- dataeval/metrics/bias/_parity.py +10 -13
- dataeval/metrics/estimators/_divergence.py +2 -4
- dataeval/metrics/stats/_base.py +103 -42
- dataeval/metrics/stats/_boxratiostats.py +21 -19
- dataeval/metrics/stats/_dimensionstats.py +14 -10
- dataeval/metrics/stats/_hashstats.py +1 -1
- dataeval/metrics/stats/_pixelstats.py +6 -6
- dataeval/metrics/stats/_visualstats.py +3 -3
- dataeval/outputs/_base.py +22 -7
- dataeval/outputs/_bias.py +26 -28
- dataeval/outputs/_drift.py +1 -9
- dataeval/outputs/_linters.py +11 -11
- dataeval/outputs/_stats.py +82 -23
- dataeval/outputs/_workflows.py +2 -2
- dataeval/utils/_array.py +6 -9
- dataeval/utils/_bin.py +1 -2
- dataeval/utils/_clusterer.py +7 -4
- dataeval/utils/_fast_mst.py +27 -13
- dataeval/utils/_image.py +65 -11
- dataeval/utils/_mst.py +1 -3
- dataeval/utils/_plot.py +15 -10
- dataeval/utils/data/_dataset.py +32 -20
- dataeval/utils/data/metadata.py +104 -82
- dataeval/utils/datasets/__init__.py +2 -0
- dataeval/utils/datasets/_antiuav.py +189 -0
- dataeval/utils/datasets/_base.py +11 -8
- dataeval/utils/datasets/_cifar10.py +104 -45
- dataeval/utils/datasets/_fileio.py +21 -47
- dataeval/utils/datasets/_milco.py +19 -11
- dataeval/utils/datasets/_mixin.py +2 -4
- dataeval/utils/datasets/_mnist.py +3 -4
- dataeval/utils/datasets/_ships.py +14 -7
- dataeval/utils/datasets/_voc.py +229 -42
- dataeval/utils/torch/models.py +5 -10
- dataeval/utils/torch/trainer.py +3 -3
- dataeval/workflows/sufficiency.py +2 -2
- {dataeval-0.86.0.dist-info → dataeval-0.86.1.dist-info}/METADATA +1 -1
- dataeval-0.86.1.dist-info/RECORD +114 -0
- dataeval/detectors/ood/vae.py +0 -74
- dataeval-0.86.0.dist-info/RECORD +0 -114
- {dataeval-0.86.0.dist-info → dataeval-0.86.1.dist-info}/LICENSE.txt +0 -0
- {dataeval-0.86.0.dist-info → dataeval-0.86.1.dist-info}/WHEEL +0 -0
dataeval/utils/datasets/_voc.py
CHANGED
@@ -2,6 +2,8 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
__all__ = []
|
4
4
|
|
5
|
+
import os
|
6
|
+
import shutil
|
5
7
|
from pathlib import Path
|
6
8
|
from typing import TYPE_CHECKING, Any, Literal, Sequence, TypeVar
|
7
9
|
|
@@ -14,6 +16,7 @@ from dataeval.utils.datasets._base import (
|
|
14
16
|
BaseODDataset,
|
15
17
|
BaseSegDataset,
|
16
18
|
DataLocation,
|
19
|
+
_ensure_exists,
|
17
20
|
_TArray,
|
18
21
|
_TTarget,
|
19
22
|
)
|
@@ -51,48 +54,56 @@ TVOCClassMap = TypeVar("TVOCClassMap", VOCClassStringMap, int, list[VOCClassStri
|
|
51
54
|
class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str]]):
|
52
55
|
_resources = [
|
53
56
|
DataLocation(
|
54
|
-
url="
|
57
|
+
url="https://data.brainchip.com/dataset-mirror/voc/VOCtrainval_11-May-2012.tar",
|
55
58
|
filename="VOCtrainval_11-May-2012.tar",
|
56
|
-
md5=
|
57
|
-
checksum="
|
59
|
+
md5=False,
|
60
|
+
checksum="e14f763270cf193d0b5f74b169f44157a4b0c6efa708f4dd0ff78ee691763bcb",
|
58
61
|
),
|
59
62
|
DataLocation(
|
60
63
|
url="http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar",
|
61
64
|
filename="VOCtrainval_25-May-2011.tar",
|
62
|
-
md5=
|
63
|
-
checksum="
|
65
|
+
md5=False,
|
66
|
+
checksum="0a7f5f5d154f7290ec65ec3f78b72ef72c6d93ff6d79acd40dc222a9ee5248ba",
|
64
67
|
),
|
65
68
|
DataLocation(
|
66
69
|
url="http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar",
|
67
70
|
filename="VOCtrainval_03-May-2010.tar",
|
68
|
-
md5=
|
69
|
-
checksum="
|
71
|
+
md5=False,
|
72
|
+
checksum="1af4189cbe44323ab212bff7afbc7d0f55a267cc191eb3aac911037887e5c7d4",
|
70
73
|
),
|
71
74
|
DataLocation(
|
72
75
|
url="http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar",
|
73
76
|
filename="VOCtrainval_11-May-2009.tar",
|
74
|
-
md5=
|
75
|
-
checksum="
|
77
|
+
md5=False,
|
78
|
+
checksum="11cbe1741fb5bdadbbca3c08e9ec62cd95c14884845527d50847bc2cf57e7fd6",
|
76
79
|
),
|
77
80
|
DataLocation(
|
78
81
|
url="http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar",
|
79
82
|
filename="VOCtrainval_14-Jul-2008.tar",
|
80
|
-
md5=
|
81
|
-
checksum="
|
83
|
+
md5=False,
|
84
|
+
checksum="7f0ca53c1b5a838fbe946965fc106c6e86832183240af5c88e3f6c306318d42e",
|
82
85
|
),
|
83
86
|
DataLocation(
|
84
|
-
url="
|
87
|
+
url="https://data.brainchip.com/dataset-mirror/voc/VOCtrainval_06-Nov-2007.tar",
|
85
88
|
filename="VOCtrainval_06-Nov-2007.tar",
|
86
|
-
md5=
|
87
|
-
checksum="
|
89
|
+
md5=False,
|
90
|
+
checksum="7d8cd951101b0957ddfd7a530bdc8a94f06121cfc1e511bb5937e973020c7508",
|
88
91
|
),
|
89
92
|
DataLocation(
|
90
|
-
url="
|
93
|
+
url="https://data.brainchip.com/dataset-mirror/voc/VOC2012test.tar",
|
94
|
+
filename="VOC2012test.tar",
|
95
|
+
md5=False,
|
96
|
+
checksum="f08582b1935816c5eab3bbb1eb6d06201a789eaa173cdf1cf400c26f0cac2fb3",
|
97
|
+
),
|
98
|
+
DataLocation(
|
99
|
+
url="https://data.brainchip.com/dataset-mirror/voc/VOCtest_06-Nov-2007.tar",
|
91
100
|
filename="VOCtest_06-Nov-2007.tar",
|
92
|
-
md5=
|
93
|
-
checksum="
|
101
|
+
md5=False,
|
102
|
+
checksum="6836888e2e01dca84577a849d339fa4f73e1e4f135d312430c4856b5609b4892",
|
94
103
|
),
|
95
104
|
]
|
105
|
+
_base2007: tuple[int, int] = (5, 7)
|
106
|
+
_base2012: tuple[int, int] = (0, 6)
|
96
107
|
|
97
108
|
index2label: dict[int, str] = {
|
98
109
|
0: "aeroplane",
|
@@ -137,26 +148,183 @@ class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str]]):
|
|
137
148
|
)
|
138
149
|
|
139
150
|
def _get_dataset_dir(self) -> Path:
|
140
|
-
"""
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
151
|
+
"""Overrides the base function to determine correct dataset directory for VOC class"""
|
152
|
+
return self._find_main_VOC_dir(self._root)
|
153
|
+
|
154
|
+
def _find_main_VOC_dir(self, base: Path) -> Path:
|
155
|
+
"""
|
156
|
+
Determine the correct dataset directory for VOC detection and segmentation classes.
|
157
|
+
Handles various directory structure possibilities and validates existence.
|
158
|
+
"""
|
159
|
+
|
160
|
+
# VOCdataset directory possibilities
|
161
|
+
dataset_dir = base if base.stem.lower() == "vocdataset" else base / "vocdataset"
|
162
|
+
|
163
|
+
# Define possible directory structures based on patterns
|
164
|
+
# 1. Root is already the specific VOC year directory
|
165
|
+
# 2. Root is the VOCdevkit directory
|
166
|
+
# 3. Standard structure
|
167
|
+
# 4. Special case for year 2011
|
168
|
+
# 5. Within VOCdataset directory
|
169
|
+
# 6. Special case for year 2011 within VOCdataset
|
170
|
+
possible_paths = [
|
171
|
+
base if base.stem == f"VOC{self.year}" else None,
|
172
|
+
base / f"VOC{self.year}" if base.stem == "VOCdevkit" else None,
|
173
|
+
base / "VOCdevkit" / f"VOC{self.year}",
|
174
|
+
base / "TrainVal" / "VOCdevkit" / f"VOC{self.year}" if self.year == "2011" else None,
|
175
|
+
dataset_dir / "VOCdevkit" / f"VOC{self.year}",
|
176
|
+
dataset_dir / "TrainVal" / "VOCdevkit" / f"VOC{self.year}" if self.year == "2011" else None,
|
177
|
+
]
|
178
|
+
|
179
|
+
# Filter out None values and check each path
|
180
|
+
for path in filter(None, possible_paths):
|
181
|
+
if path.exists():
|
182
|
+
return path
|
183
|
+
|
184
|
+
# If no existing path is found, create and return the dataset directory
|
145
185
|
if not dataset_dir.exists():
|
146
186
|
dataset_dir.mkdir(parents=True, exist_ok=True)
|
187
|
+
|
147
188
|
return dataset_dir
|
148
189
|
|
149
|
-
def _get_year_image_set_index(self, year, image_set) -> int:
|
190
|
+
def _get_year_image_set_index(self, year: str, image_set: str) -> int:
|
150
191
|
"""Function to ensure that the correct resource file is accessed"""
|
151
192
|
if year == "2007" and image_set == "test":
|
152
193
|
return -1
|
153
|
-
|
194
|
+
if year == "2012" and image_set == "test":
|
195
|
+
return -2
|
196
|
+
if year != "2007" and image_set == "test":
|
154
197
|
raise ValueError(
|
155
|
-
f"The only test
|
156
|
-
"Either select the year 2007 or use a different image_set."
|
198
|
+
f"The only test sets available are for the years 2007 and 2012, not {year}. "
|
199
|
+
"Either select the year 2007 or 2012, or use a different image_set."
|
157
200
|
)
|
201
|
+
return 2012 - int(year)
|
202
|
+
|
203
|
+
def _update_path(self) -> None:
|
204
|
+
"""Update the path to the new folder structure"""
|
205
|
+
if self.year == "2011" and self.path.stem.lower() == "vocdataset":
|
206
|
+
self.path: Path = self.path / "TrainVal" / "VOCdevkit" / f"VOC{self.year}"
|
207
|
+
elif self.path.stem.lower() == "vocdataset":
|
208
|
+
self.path: Path = self.path / "VOCdevkit" / f"VOC{self.year}"
|
209
|
+
|
210
|
+
def _load_data_exception(self) -> tuple[list[str], list[str], dict[str, Any]]:
|
211
|
+
"""Adjust how the directory is created for the 2007 and 2012 test set"""
|
212
|
+
filepaths: list[str] = []
|
213
|
+
targets: list[str] = []
|
214
|
+
datum_metadata: dict[str, list[Any]] = {}
|
215
|
+
tmp_path: Path = self._root / "tmp_directory_for_download"
|
216
|
+
tmp_path.mkdir(exist_ok=True)
|
217
|
+
resource_idx = self._base2007 if self.year == "2007" else self._base2012
|
218
|
+
|
219
|
+
# Determine if text files exist
|
220
|
+
train_file = self.path / "ImageSets" / "Main" / "trainval.txt"
|
221
|
+
test_file = self.path / "ImageSets" / "Main" / "test.txt"
|
222
|
+
train_exists = train_file.exists()
|
223
|
+
test_exists = test_file.exists()
|
224
|
+
|
225
|
+
if self.image_set == "base":
|
226
|
+
if not train_exists and not test_exists:
|
227
|
+
_ensure_exists(*self._resources[resource_idx[0]], self.path, self._root, self._download, self._verbose)
|
228
|
+
self._update_path()
|
229
|
+
_ensure_exists(*self._resources[resource_idx[1]], tmp_path, self._root, self._download, self._verbose)
|
230
|
+
self._merge_voc_directories(tmp_path)
|
231
|
+
|
232
|
+
elif train_exists and not test_exists:
|
233
|
+
_ensure_exists(*self._resources[resource_idx[1]], tmp_path, self._root, self._download, self._verbose)
|
234
|
+
self._merge_voc_directories(tmp_path)
|
235
|
+
|
236
|
+
elif not train_exists and test_exists:
|
237
|
+
_ensure_exists(*self._resources[resource_idx[0]], tmp_path, self._root, self._download, self._verbose)
|
238
|
+
self._merge_voc_directories(tmp_path)
|
239
|
+
|
240
|
+
# Code to determine what is needed in each category
|
241
|
+
metadata_list: list[dict[str, Any]] = []
|
242
|
+
|
243
|
+
for img_set in ["test", "base"]:
|
244
|
+
self.image_set = img_set
|
245
|
+
resource_filepaths, resource_targets, resource_metadata = self._load_data_inner()
|
246
|
+
filepaths.extend(resource_filepaths)
|
247
|
+
targets.extend(resource_targets)
|
248
|
+
metadata_list.append(resource_metadata)
|
249
|
+
|
250
|
+
# Combine metadata from all resources
|
251
|
+
for data_dict in metadata_list:
|
252
|
+
for key, val in data_dict.items():
|
253
|
+
str_key = str(key) # Ensure key is string
|
254
|
+
if str_key not in datum_metadata:
|
255
|
+
datum_metadata[str_key] = []
|
256
|
+
datum_metadata[str_key].extend(val)
|
257
|
+
|
158
258
|
else:
|
159
|
-
|
259
|
+
self._resource = self._resources[resource_idx[1]]
|
260
|
+
|
261
|
+
if train_exists and not test_exists:
|
262
|
+
_ensure_exists(*self._resource, tmp_path, self._root, self._download, self._verbose)
|
263
|
+
self._merge_voc_directories(tmp_path)
|
264
|
+
|
265
|
+
resource_filepaths, resource_targets, resource_metadata = self._load_try_and_update()
|
266
|
+
filepaths.extend(resource_filepaths)
|
267
|
+
targets.extend(resource_targets)
|
268
|
+
datum_metadata.update(resource_metadata)
|
269
|
+
|
270
|
+
return filepaths, targets, datum_metadata
|
271
|
+
|
272
|
+
def _merge_voc_directories(self, source_dir: Path) -> None:
|
273
|
+
"""Merge two VOC directories, handling file conflicts intelligently."""
|
274
|
+
base: Path = self._find_main_VOC_dir(source_dir)
|
275
|
+
# Create all subdirectories in target if they don't exist
|
276
|
+
for dirpath, dirnames, filenames in os.walk(base):
|
277
|
+
# Convert to Path objects
|
278
|
+
source_path = Path(dirpath)
|
279
|
+
|
280
|
+
# Get the relative path from source_dir
|
281
|
+
rel_path = source_path.relative_to(base)
|
282
|
+
|
283
|
+
# Create the corresponding target path
|
284
|
+
target_path = self.path / rel_path
|
285
|
+
target_path.mkdir(parents=True, exist_ok=True)
|
286
|
+
|
287
|
+
# Copy all files
|
288
|
+
for filename in filenames:
|
289
|
+
source_file = source_path / filename
|
290
|
+
target_file = target_path / filename
|
291
|
+
|
292
|
+
# File doesn't exist in target, just move it
|
293
|
+
if not target_file.exists():
|
294
|
+
shutil.move(source_file, target_file)
|
295
|
+
else:
|
296
|
+
# File exists in both assume they're identical and skip
|
297
|
+
pass
|
298
|
+
|
299
|
+
shutil.rmtree(source_dir)
|
300
|
+
|
301
|
+
def _load_try_and_update(self) -> tuple[list[str], list[str], dict[str, Any]]:
|
302
|
+
"""Test if data needs to be downloaded and update path if it does"""
|
303
|
+
if self._verbose:
|
304
|
+
print(f"Determining if {self._resource.filename} needs to be downloaded.")
|
305
|
+
|
306
|
+
try:
|
307
|
+
result = self._load_data_inner()
|
308
|
+
if self._verbose:
|
309
|
+
print("No download needed, loaded data successfully.")
|
310
|
+
except FileNotFoundError:
|
311
|
+
_ensure_exists(*self._resource, self.path, self._root, self._download, self._verbose)
|
312
|
+
self._update_path()
|
313
|
+
result = self._load_data_inner()
|
314
|
+
return result
|
315
|
+
|
316
|
+
def _load_data(self) -> tuple[list[str], list[str], dict[str, Any]]:
|
317
|
+
"""
|
318
|
+
Function to determine if data can be accessed or if it needs to be downloaded and/or extracted.
|
319
|
+
"""
|
320
|
+
# Exception - test sets
|
321
|
+
year_set_bool = (self.image_set == "test" or self.image_set == "base") and (
|
322
|
+
self.year == "2012" or self.year == "2007"
|
323
|
+
)
|
324
|
+
if year_set_bool:
|
325
|
+
return self._load_data_exception()
|
326
|
+
|
327
|
+
return self._load_try_and_update()
|
160
328
|
|
161
329
|
def _get_image_sets(self) -> dict[str, list[str]]:
|
162
330
|
"""Function to create the list of images in each image set"""
|
@@ -206,20 +374,21 @@ class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str]]):
|
|
206
374
|
def _read_annotations(self, annotation: str) -> tuple[list[list[float]], list[int], dict[str, Any]]:
|
207
375
|
boxes: list[list[float]] = []
|
208
376
|
label_str = []
|
377
|
+
if not Path(annotation).exists():
|
378
|
+
return boxes, label_str, {}
|
209
379
|
root = parse(annotation).getroot()
|
210
380
|
if root is None:
|
211
381
|
raise ValueError(f"Unable to parse {annotation}")
|
212
|
-
num_objects = len(root.findall("object"))
|
213
382
|
additional_meta: dict[str, Any] = {
|
214
|
-
"folder":
|
215
|
-
"filename":
|
216
|
-
"database":
|
217
|
-
"annotation_source":
|
218
|
-
"image_source":
|
219
|
-
"image_width":
|
220
|
-
"image_height":
|
221
|
-
"image_depth":
|
222
|
-
"segmented":
|
383
|
+
"folder": root.findtext("folder", default=""),
|
384
|
+
"filename": root.findtext("filename", default=""),
|
385
|
+
"database": root.findtext("source/database", default=""),
|
386
|
+
"annotation_source": root.findtext("source/annotation", default=""),
|
387
|
+
"image_source": root.findtext("source/image", default=""),
|
388
|
+
"image_width": int(root.findtext("size/width", default="-1")),
|
389
|
+
"image_height": int(root.findtext("size/height", default="-1")),
|
390
|
+
"image_depth": int(root.findtext("size/depth", default="-1")),
|
391
|
+
"segmented": int(root.findtext("segmented", default="-1")),
|
223
392
|
"pose": [],
|
224
393
|
"truncated": [],
|
225
394
|
"difficult": [],
|
@@ -252,9 +421,14 @@ class VOCDetection(
|
|
252
421
|
Parameters
|
253
422
|
----------
|
254
423
|
root : str or pathlib.Path
|
255
|
-
|
424
|
+
Because of the structure of the PASCAL VOC datasets, the root needs to be one of 4 folders.
|
425
|
+
1) Directory containing the year of the **already downloaded** dataset (i.e. .../VOCdevkit/VOC2012 <-)
|
426
|
+
2) Directory to the VOCdevkit folder of the **already downloaded** dataset (i.e. .../VOCdevkit <- /VOC2012)
|
427
|
+
3) Directory to the folder one level up from the VOCdevkit folder,
|
428
|
+
data **may** or **may not** be already downloaded (i.e. ... <- /VOCdevkit/VOC2012)
|
429
|
+
4) Directory to where you would like the dataset to be downloaded
|
256
430
|
image_set : "train", "val", "test", or "base", default "train"
|
257
|
-
If "test", then dataset year must be "2007".
|
431
|
+
If "test", then dataset year must be "2007" or "2012". Note that the 2012 test set does not contain annotations.
|
258
432
|
If "base", then the combined dataset of "train" and "val" is returned.
|
259
433
|
year : "2007", "2008", "2009", "2010", "2011" or "2012", default "2012"
|
260
434
|
The dataset year.
|
@@ -302,9 +476,14 @@ class VOCDetectionTorch(
|
|
302
476
|
Parameters
|
303
477
|
----------
|
304
478
|
root : str or pathlib.Path
|
305
|
-
|
479
|
+
Because of the structure of the PASCAL VOC datasets, the root needs to be one of 4 folders.
|
480
|
+
1) Directory containing the year of the **already downloaded** dataset (i.e. .../VOCdevkit/VOC2012 <-)
|
481
|
+
2) Directory to the VOCdevkit folder of the **already downloaded** dataset (i.e. .../VOCdevkit <- /VOC2012)
|
482
|
+
3) Directory to the folder one level up from the VOCdevkit folder,
|
483
|
+
data **may** or **may not** be already downloaded (i.e. ... <- /VOCdevkit/VOC2012)
|
484
|
+
4) Directory to where you would like the dataset to be downloaded
|
306
485
|
image_set : "train", "val", "test", or "base", default "train"
|
307
|
-
If "test", then dataset year must be "2007".
|
486
|
+
If "test", then dataset year must be "2007" or "2012". Note that the 2012 test set does not contain annotations.
|
308
487
|
If "base", then the combined dataset of "train" and "val" is returned.
|
309
488
|
year : "2007", "2008", "2009", "2010", "2011" or "2012", default "2012"
|
310
489
|
The dataset year.
|
@@ -391,6 +570,14 @@ class VOCSegmentation(
|
|
391
570
|
"""
|
392
571
|
|
393
572
|
def _load_data(self) -> tuple[list[str], list[str], dict[str, list[Any]]]:
|
394
|
-
|
573
|
+
"""Overload base load data to split out masks for segmentation."""
|
574
|
+
# Exception - test sets
|
575
|
+
year_set_bool = (self.image_set == "test" or self.image_set == "base") and (
|
576
|
+
self.year == "2012" or self.year == "2007"
|
577
|
+
)
|
578
|
+
if year_set_bool:
|
579
|
+
filepaths, targets, datum_metadata = self._load_data_exception()
|
580
|
+
else:
|
581
|
+
filepaths, targets, datum_metadata = self._load_try_and_update()
|
395
582
|
self._masks = datum_metadata.pop("mask_path")
|
396
583
|
return filepaths, targets, datum_metadata
|
dataeval/utils/torch/models.py
CHANGED
@@ -47,8 +47,7 @@ class Autoencoder(nn.Module):
|
|
47
47
|
The reconstructed output tensor.
|
48
48
|
"""
|
49
49
|
x = self.encoder(x)
|
50
|
-
|
51
|
-
return x
|
50
|
+
return self.decoder(x)
|
52
51
|
|
53
52
|
def encode(self, x: Any) -> Any:
|
54
53
|
"""
|
@@ -188,8 +187,7 @@ class AE(nn.Module):
|
|
188
187
|
The reconstructed output tensor.
|
189
188
|
"""
|
190
189
|
x = self.encoder(x)
|
191
|
-
|
192
|
-
return x
|
190
|
+
return self.decoder(x)
|
193
191
|
|
194
192
|
def encode(self, x: torch.Tensor) -> torch.Tensor:
|
195
193
|
"""
|
@@ -278,9 +276,7 @@ class Encoder_AE(nn.Module):
|
|
278
276
|
"""
|
279
277
|
x = self.encoding_ops(x)
|
280
278
|
|
281
|
-
|
282
|
-
|
283
|
-
return x
|
279
|
+
return self.flatten(x)
|
284
280
|
|
285
281
|
|
286
282
|
class Decoder_AE(nn.Module):
|
@@ -334,8 +330,7 @@ class Decoder_AE(nn.Module):
|
|
334
330
|
x = self.input(x)
|
335
331
|
x = x.reshape((-1, *self.post_op_shape))
|
336
332
|
x = self.decoder(x)
|
337
|
-
|
338
|
-
return x
|
333
|
+
return x.reshape((-1, *self.input_shape))
|
339
334
|
|
340
335
|
|
341
336
|
class ResNet18(nn.Module):
|
@@ -355,7 +350,7 @@ class ResNet18(nn.Module):
|
|
355
350
|
Please use with caution if deploying this class or subclasses.
|
356
351
|
"""
|
357
352
|
|
358
|
-
def __init__(self, embedding_size: int = 128):
|
353
|
+
def __init__(self, embedding_size: int = 128) -> None:
|
359
354
|
super().__init__()
|
360
355
|
self.model: nn.Module = resnet18(weights=ResNet18_Weights.DEFAULT, progress=False)
|
361
356
|
self.model.fc = nn.Linear(self.model.fc.in_features, embedding_size)
|
dataeval/utils/torch/trainer.py
CHANGED
@@ -2,8 +2,6 @@
|
|
2
2
|
|
3
3
|
from __future__ import annotations
|
4
4
|
|
5
|
-
from dataeval.config import DeviceLike, get_device
|
6
|
-
|
7
5
|
__all__ = ["AETrainer"]
|
8
6
|
|
9
7
|
from typing import Any
|
@@ -13,6 +11,8 @@ import torch.nn as nn
|
|
13
11
|
from torch.optim import Adam
|
14
12
|
from torch.utils.data import DataLoader, Dataset
|
15
13
|
|
14
|
+
from dataeval.config import DeviceLike, get_device
|
15
|
+
|
16
16
|
|
17
17
|
def get_images_from_batch(batch: Any) -> Any:
|
18
18
|
"""Extracts images from a batch of collated data by DataLoader"""
|
@@ -39,7 +39,7 @@ class AETrainer:
|
|
39
39
|
model: nn.Module,
|
40
40
|
device: DeviceLike | None = None,
|
41
41
|
batch_size: int = 8,
|
42
|
-
):
|
42
|
+
) -> None:
|
43
43
|
self.device: torch.device = get_device(device)
|
44
44
|
self.model: nn.Module = model.to(self.device)
|
45
45
|
self.batch_size = batch_size
|
@@ -21,7 +21,7 @@ def reset_parameters(model: nn.Module) -> nn.Module:
|
|
21
21
|
"""
|
22
22
|
|
23
23
|
@torch.no_grad()
|
24
|
-
def weight_reset(m: nn.Module):
|
24
|
+
def weight_reset(m: nn.Module) -> None:
|
25
25
|
# Check if the current module has reset_parameters
|
26
26
|
reset_parameters = getattr(m, "reset_parameters", None)
|
27
27
|
if callable(reset_parameters):
|
@@ -86,7 +86,7 @@ class Sufficiency(Generic[T]):
|
|
86
86
|
substeps: int = 5,
|
87
87
|
train_kwargs: Mapping[str, Any] | None = None,
|
88
88
|
eval_kwargs: Mapping[str, Any] | None = None,
|
89
|
-
):
|
89
|
+
) -> None:
|
90
90
|
self.model = model
|
91
91
|
self.train_ds = train_ds
|
92
92
|
self.test_ds = test_ds
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: dataeval
|
3
|
-
Version: 0.86.
|
3
|
+
Version: 0.86.1
|
4
4
|
Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
|
5
5
|
Home-page: https://dataeval.ai/
|
6
6
|
License: MIT
|
@@ -0,0 +1,114 @@
|
|
1
|
+
dataeval/__init__.py,sha256=oC55_G8B7aR_QRKVy5fQtolW71aKDzMSixWge3cHn3M,1636
|
2
|
+
dataeval/_log.py,sha256=C7AGkIRzymvYJ0LQXtnShiy3i5Xrp8T58JzIHHguk_Q,365
|
3
|
+
dataeval/config.py,sha256=hjad0TK1UmaKQlUuxqxt64_OAUqZkHjicBf06cvTyrQ,4082
|
4
|
+
dataeval/data/__init__.py,sha256=qNnRRiVP_sLthkkHpUrMgI_r8dQK-cC-xoGrrjQeRKc,544
|
5
|
+
dataeval/data/_embeddings.py,sha256=PFjpdV9bfusCB4taTIYSzx1hP8nJb_KCkZTN8kMw-Hs,12885
|
6
|
+
dataeval/data/_images.py,sha256=3d4Cv-xg5z6_LVtw1eL_QdFwzbDI1cwvPNQblkrMEMk,2622
|
7
|
+
dataeval/data/_metadata.py,sha256=va5coOR1rRVzFB9SGzmuTj-Oaexs9LobGMA7u2An_eY,15420
|
8
|
+
dataeval/data/_selection.py,sha256=r06xeiyK8nTWPLyItkoPQRWZI1i6LATSue_cuEbCdc4,4463
|
9
|
+
dataeval/data/_split.py,sha256=pSyeJVW2sDoTU9wyi0d7UWqDuPhYvDyEgA0BUldS9Vg,16743
|
10
|
+
dataeval/data/_targets.py,sha256=pXrHBwT4Pi8DauaOxDVnIMwowWWlXuvSb07ShW7O2zk,3119
|
11
|
+
dataeval/data/selections/__init__.py,sha256=2m8ZB53wXzqLcqmc6p5atO6graB6ZyiRSNJFxf11X_g,613
|
12
|
+
dataeval/data/selections/_classbalance.py,sha256=7v8ApoL3X8eCZ6fGDNTehE_bZ1loaP3TlhsJLaICVWg,1458
|
13
|
+
dataeval/data/selections/_classfilter.py,sha256=KQOmcTIcV3ZPWuiwqOmwX0SB5I2qlbxLSlwINUZWOjU,4339
|
14
|
+
dataeval/data/selections/_indices.py,sha256=RFsR9z10aM3N0gJSfKrukFpi-LkiQGXoOwXhmOQ5cpg,630
|
15
|
+
dataeval/data/selections/_limit.py,sha256=JG4GmEiNKt3sk4PbOUbBnGGzNlyz72H-kQrt8COMm4Y,512
|
16
|
+
dataeval/data/selections/_prioritize.py,sha256=4dGUvgR7m6NGzzPU0N_bw0Xhujo8b72Wo8L4PGHbvBo,11233
|
17
|
+
dataeval/data/selections/_reverse.py,sha256=b67kNC43A5KpQOic5gifjo9HpJ7FMh4LFCrfovPiJ-M,368
|
18
|
+
dataeval/data/selections/_shuffle.py,sha256=TSCIZBgLAn09iMI_WIw0aqwSU4NZLAhHG7t8H_CuDUY,1195
|
19
|
+
dataeval/detectors/__init__.py,sha256=3Sg-XWlwr75zEEH3hZKA4nWMtGvaRlnfzTWvZG_Ak6U,189
|
20
|
+
dataeval/detectors/drift/__init__.py,sha256=Jqv98oOVeC2tvHlNGxQ8RJ6De2q4SyS5lTpaYlb4ocM,756
|
21
|
+
dataeval/detectors/drift/_base.py,sha256=6aNF1LzG3w1sNUrmSBbsvuN5IkQnoRikRacqobYge84,7592
|
22
|
+
dataeval/detectors/drift/_cvm.py,sha256=cS33zWJmFY1fft1XcANcP2jSD5ou7TxvIU2AldhTynM,3004
|
23
|
+
dataeval/detectors/drift/_ks.py,sha256=uMc5-NA-lSV1IODrY8uJe87ll3uRJT_oXLJFXy95M1w,3186
|
24
|
+
dataeval/detectors/drift/_mmd.py,sha256=uw8axM6dWxTBrCaXwkbldIDcdhe4hmim9yrsbuOwA-0,11523
|
25
|
+
dataeval/detectors/drift/_mvdc.py,sha256=ABxGut6KzxF_oM-Hs87WARCR0692dhPVdZNoGGwJaa4,3058
|
26
|
+
dataeval/detectors/drift/_nml/__init__.py,sha256=MNyKyZlfTjr5uQql2uBBfRkUdsuduie_WJdn09GYmqg,137
|
27
|
+
dataeval/detectors/drift/_nml/_base.py,sha256=o34LcCsD9p1A6u8UdQn-dxIVwC2CMr6uCpC0vq16JX0,2663
|
28
|
+
dataeval/detectors/drift/_nml/_chunk.py,sha256=t12eouanRNiu5DJXOaYDZXUvFMqfcp1BETLOufdV79M,13567
|
29
|
+
dataeval/detectors/drift/_nml/_domainclassifier.py,sha256=n7Ttq5Ej7sAY9Jn2iagaGj4IIWiG8gmA3wwFizlBqes,7292
|
30
|
+
dataeval/detectors/drift/_nml/_result.py,sha256=Nz_qTRu_EcJ1OcywSTVXFm9fx3UyuX66ZWACrffG5dI,3255
|
31
|
+
dataeval/detectors/drift/_nml/_thresholds.py,sha256=WGdkLei9w_EvvsRHQzWdDyFVoZHIwM78k_aB3eoh31Q,12060
|
32
|
+
dataeval/detectors/drift/_uncertainty.py,sha256=BHlykJ-r7TGLJxdPfoazXnoAJ1qVDzbk5HjAMdsnHz8,5847
|
33
|
+
dataeval/detectors/drift/updates.py,sha256=L1PnrPlIE1x6ujCc5mCwjcAZwadVTn-Zjb6MnTDvzJQ,2251
|
34
|
+
dataeval/detectors/linters/__init__.py,sha256=xn2zPwUcmsuf-Jd9uw6AVI11C9z1b1Y9fYtuFnXenZ0,404
|
35
|
+
dataeval/detectors/linters/duplicates.py,sha256=X5WSEvI_BHkLoXjkaHK6wTnSkx4IjpO_exMRjSlhc70,4963
|
36
|
+
dataeval/detectors/linters/outliers.py,sha256=R3-p8kzia77Q3k2grXeRXnRiv7nMhosoPY1sDLQVKrs,9049
|
37
|
+
dataeval/detectors/ood/__init__.py,sha256=juCYBDs7CQEAtMhnEpPqF6uTrOIH9kTBSuQ_GRw6a8o,283
|
38
|
+
dataeval/detectors/ood/ae.py,sha256=fTrUfFxv6xUqzKpwMC8rW3JrizA16M_bgzqLuBKMrS0,2944
|
39
|
+
dataeval/detectors/ood/base.py,sha256=9b-Ljznf0lB1SXF4F_Aj3eJ4Y3ijGEDPMjucUsWOGJM,3051
|
40
|
+
dataeval/detectors/ood/mixin.py,sha256=0_o-1HPvgf3-Lf1MSOIfjj5UB8LTLEBGYtJJfyCCzwc,5431
|
41
|
+
dataeval/metadata/__init__.py,sha256=XDDmJbOZBNM6pL0r6Nbu6oMRoyAh22IDkPYGndNlkZU,316
|
42
|
+
dataeval/metadata/_distance.py,sha256=T1Umju_QwBiLmn1iUbxZagzBS2VnHaDIdp6j-NpaZuk,4076
|
43
|
+
dataeval/metadata/_ood.py,sha256=lnKtKModArnUrAhH_XswEtUAhUkh1U_oNsLt1UmNP44,12748
|
44
|
+
dataeval/metadata/_utils.py,sha256=r8qBJT83RblobD5W5zyTVi6vYi51Dwkqswizdbzss-M,1169
|
45
|
+
dataeval/metrics/__init__.py,sha256=8VC8q3HuJN3o_WN51Ae2_wXznl3RMXIvA5GYVcy7vr8,225
|
46
|
+
dataeval/metrics/bias/__init__.py,sha256=329S1_3WnWqeU4-qVcbe0fMy4lDrj9uKslWHIQf93yg,839
|
47
|
+
dataeval/metrics/bias/_balance.py,sha256=l1hTVkVwD85bP20MTthA-I5BkvbytylQkJu3Q6iTuPA,6152
|
48
|
+
dataeval/metrics/bias/_completeness.py,sha256=BysXU2Jpw33n5dl3acJFEqF3mFGiJLsfG4n5Q2fkTaY,4608
|
49
|
+
dataeval/metrics/bias/_coverage.py,sha256=PeUoOiaghUEdn6Ov8z2-am7-fnBVIPcFbJK7Ty5JObA,3647
|
50
|
+
dataeval/metrics/bias/_diversity.py,sha256=B_qWVDMZfh818U0qVm8yidquB0H0XvW8N75OWVWXy2g,5814
|
51
|
+
dataeval/metrics/bias/_parity.py,sha256=PkU3wa77Iyif3McjA510fifTBaph7eJ8iAlI2jQngEM,11374
|
52
|
+
dataeval/metrics/estimators/__init__.py,sha256=Pnds8uIyAovt2fKqZjiHCIP_kVoBWlVllekYuK5UmmU,568
|
53
|
+
dataeval/metrics/estimators/_ber.py,sha256=C30E5LiGGTAfo31zWFYDptDg0R7CTJGJ-a60YgzSkYY,5382
|
54
|
+
dataeval/metrics/estimators/_clusterer.py,sha256=1HrpihGTJ63IkNSOy4Ibw633Gllkm1RxKmoKT5MOgt0,1434
|
55
|
+
dataeval/metrics/estimators/_divergence.py,sha256=-np4nWNtRrHnvo4xdWuTzkyJJmobyjDnVDBOMjtBS1Y,4003
|
56
|
+
dataeval/metrics/estimators/_uap.py,sha256=BULEBbJ9BQ1IcTeZf0x7iI60QHAWCccBOM97FIu9VXA,1928
|
57
|
+
dataeval/metrics/stats/__init__.py,sha256=6tA_9nbbM5ObJ6cds8Y1VBtTQiTOxrpGQSFLu_lWGGA,1098
|
58
|
+
dataeval/metrics/stats/_base.py,sha256=-TIDSHT-zwNXqWMTM6Nu3BQ11cWP4TFYFaUF40vIChs,12534
|
59
|
+
dataeval/metrics/stats/_boxratiostats.py,sha256=ROZrlqgbowkGfCR5PJ5TL7Og40iMOdUqJnsCtaz_Xek,6450
|
60
|
+
dataeval/metrics/stats/_dimensionstats.py,sha256=EVO-BlxrZl8qrP09lwPbyWdrG1ZeDtgj4LiswDwEZ1I,2896
|
61
|
+
dataeval/metrics/stats/_hashstats.py,sha256=qa1CYRgOebkxqkALfffaPM-kJ074ZbyfpWbfOfuObSs,4758
|
62
|
+
dataeval/metrics/stats/_imagestats.py,sha256=gUPNgN5Zwzdr7WnSwbve1NXNsyxd5dy3cSnlR_7guCg,3007
|
63
|
+
dataeval/metrics/stats/_labelstats.py,sha256=lz8I6eSd8tFkmQqy5cOG8hn9yxs0mP-Ic9ratFHiuoU,2813
|
64
|
+
dataeval/metrics/stats/_pixelstats.py,sha256=5RCQh0OQkHiCkn3DgCPVxKoFfifX_FOtwsnotADSZ0I,3265
|
65
|
+
dataeval/metrics/stats/_visualstats.py,sha256=0k6bvAL_d66nQMfG7bydCOFJb7B0dhgG7fqCjVTp1sg,3707
|
66
|
+
dataeval/outputs/__init__.py,sha256=geHB5M3QOiFFaQGV4ZwDTTKpqZPvPePbqG7lzaPhaXQ,1741
|
67
|
+
dataeval/outputs/_base.py,sha256=7KRWFIEw0UHdhb1em92bPE1YqbMYumAW1QD0QfPwVLc,5900
|
68
|
+
dataeval/outputs/_bias.py,sha256=EjJ6jrxDEJYgUj11EyUhdQvdCUSNeefMe5uD3E73GIo,12261
|
69
|
+
dataeval/outputs/_drift.py,sha256=rKn5vqMR6XNujgSqfHsH76oFkoGsUusquZL2Qy4Ae6Y,4581
|
70
|
+
dataeval/outputs/_estimators.py,sha256=a2oAIxxEDZ9WLGfMWH8KD-BVUS_SnULRPR-iI9hFPoQ,3047
|
71
|
+
dataeval/outputs/_linters.py,sha256=3vI8zsSF-JecQut500A629sICidQLWqhEZcj7o7_cfs,6554
|
72
|
+
dataeval/outputs/_metadata.py,sha256=ffZgpX8KWURPHXpOWjbvJ2KRqWQkS2nWuIjKUzoHhMI,1710
|
73
|
+
dataeval/outputs/_ood.py,sha256=suLKVXULGtXH0rq9eXHI1d3d2jhGmItJtz4QiQd47A4,1718
|
74
|
+
dataeval/outputs/_stats.py,sha256=YDdVQmFcOvb4_NYc_d2a2JCA0Zkuh1o6_qupQkc_X1w,15142
|
75
|
+
dataeval/outputs/_utils.py,sha256=HHlGC7sk416m_3Bgn075Qdblz_aPup_UOafJpB0RuXY,893
|
76
|
+
dataeval/outputs/_workflows.py,sha256=0xSwPxBATa29tvwJtpovjYrq4la9fkbamHM_qsw-Llc,10799
|
77
|
+
dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
78
|
+
dataeval/typing.py,sha256=GDMuef-oFFukNtsiKFmsExHdNvYR_j-tQcsCwZ9reow,7198
|
79
|
+
dataeval/utils/__init__.py,sha256=hRvyUK7b3d6JBEV5u47rFcOHEcmDYqAvZQw_T5pDAWw,264
|
80
|
+
dataeval/utils/_array.py,sha256=ftX8S6HKAIUOuc1xd30VC3Pz5yUzRglDpCLisWY_tHs,5888
|
81
|
+
dataeval/utils/_bin.py,sha256=w3eJ2Szw5eapqQ0cGv731rhNgLFGW0cCz2pXo9I6CuY,7296
|
82
|
+
dataeval/utils/_clusterer.py,sha256=XmyW2j_JLMYLds8QYgV0nAfdqxWfNR0ZI-6rnZsyHwU,5630
|
83
|
+
dataeval/utils/_fast_mst.py,sha256=pv42flr1Uf5RBa9qDG0YLDXWH7Mr7a9zpauO1HqZXaY,8061
|
84
|
+
dataeval/utils/_image.py,sha256=4uxTIOYZZlRJOfNmdA3ek3no3FrLWCK5un48kStMDt8,3578
|
85
|
+
dataeval/utils/_method.py,sha256=9B9JQbgqWJBRhQJb7glajUtWaQzUTIUuvrZ9_bisxsM,394
|
86
|
+
dataeval/utils/_mst.py,sha256=bLmJmu_1Dtj3hC5gQp3oAiJ_7TKtEjahTqusVRRU4eI,2168
|
87
|
+
dataeval/utils/_plot.py,sha256=zP0bEvtrLdws7r1Jte8Camq-q5K5F6T8iuv3bStnEJc,7116
|
88
|
+
dataeval/utils/data/__init__.py,sha256=xGzrjrOxOP2DP1tU84AWMKPnSxFvSjM81CTlDg4rNM8,331
|
89
|
+
dataeval/utils/data/_dataset.py,sha256=5Yt7PzNeeUgm3qy71B_IOW7mKyCfvv8AIqs7Xzv7B9Q,8853
|
90
|
+
dataeval/utils/data/collate.py,sha256=5egEEKhNNCGeNLChO1p6dZ4Wg6x51VEaMNHz7hEZUxI,3936
|
91
|
+
dataeval/utils/data/metadata.py,sha256=L1c2bCiMj0aR0QCoKkjwBujIftJDEMgW_3ZbgeS8WHo,14703
|
92
|
+
dataeval/utils/datasets/__init__.py,sha256=pAXqHX76yAoBI8XB3m6zGuW-u3s3PCoIXG5GDzxH7Zs,572
|
93
|
+
dataeval/utils/datasets/_antiuav.py,sha256=kA_ia1fYNcJiz9SpCvh-Z8iSc7iJrdogjBI3soyaa7A,8304
|
94
|
+
dataeval/utils/datasets/_base.py,sha256=pyfpJda3ku469M3TFRsJn9S2oAiQODOGTlLcdcoEW9U,9031
|
95
|
+
dataeval/utils/datasets/_cifar10.py,sha256=hZc_A30yKYBbv2kvVdEkZ9egyEe6XBUnmksoIAoJ-5Y,8265
|
96
|
+
dataeval/utils/datasets/_fileio.py,sha256=OASFA9uX3KgfyPb5vza12BlZyAi9Y8Al9lUR_IYPcsM,5449
|
97
|
+
dataeval/utils/datasets/_milco.py,sha256=O4w4Z97tdGU-_us09lPrMNpcPLsXXbKkyPYAWzzvPc4,7870
|
98
|
+
dataeval/utils/datasets/_mixin.py,sha256=S8iii-SoYUsFFYNXjw2thlZkpBvRLnZ4XI8wTqOKXgU,1729
|
99
|
+
dataeval/utils/datasets/_mnist.py,sha256=uz46sE1Go3TgGjG6x2cXckSVQ0mSg2mhgk8BUvLWjb0,8149
|
100
|
+
dataeval/utils/datasets/_ships.py,sha256=6U04HAoM3jgLl1qv-NnxjZeSsBipcqWJBMhBMn5iIUY,5115
|
101
|
+
dataeval/utils/datasets/_types.py,sha256=iSKyHXRlGuomXs0FHK6md8lXLQrQQ4fxgVOwr4o81bo,1089
|
102
|
+
dataeval/utils/datasets/_voc.py,sha256=pafY112O80isYkrdy7Quie9SBm_TmYhREuyl8SxtsR0,24586
|
103
|
+
dataeval/utils/torch/__init__.py,sha256=dn5mjCrFp0b1aL_UEURhONU0Ag0cmXoTOBSGagpkTiA,325
|
104
|
+
dataeval/utils/torch/_blocks.py,sha256=HVhBTMMD5NA4qheMUgyol1KWiKZDIuc8k5j4RcMKmhk,1466
|
105
|
+
dataeval/utils/torch/_gmm.py,sha256=XM68GNEP97EjaB1U49-ZXRb81d0CEFnPS910alrcB3g,3740
|
106
|
+
dataeval/utils/torch/_internal.py,sha256=vHy-DzPhmvE8h3wmWc3aciBJ8nDGzQ1z1jTZgGjmDyM,4154
|
107
|
+
dataeval/utils/torch/models.py,sha256=1idpXyjrYcCBSsbxxRUOto8xr4MJNjDEqQHiIXVU5Zc,9700
|
108
|
+
dataeval/utils/torch/trainer.py,sha256=Oc2lK13uPGhmLYbmAqlPWyKxgG4YJFlnSXCqFHUZbdA,5528
|
109
|
+
dataeval/workflows/__init__.py,sha256=ou8y0KO-d6W5lgmcyLjKlf-J_ckP3vilW7wHkgiDlZ4,255
|
110
|
+
dataeval/workflows/sufficiency.py,sha256=j-R8dg4XE6a66p_oTXG2GNzgg3vGk85CTblxhFXaxog,8513
|
111
|
+
dataeval-0.86.1.dist-info/LICENSE.txt,sha256=uAooygKWvX6NbU9Ran9oG2msttoG8aeTeHSTe5JeCnY,1061
|
112
|
+
dataeval-0.86.1.dist-info/METADATA,sha256=k9tNiWEDBXit4KU6le2vb1CrArZNxssiW5LHXtVXo0A,5321
|
113
|
+
dataeval-0.86.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
|
114
|
+
dataeval-0.86.1.dist-info/RECORD,,
|