dataeval 0.86.7__py3-none-any.whl → 0.86.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -51,7 +51,7 @@ VOCClassStringMap = Literal[
51
51
  TVOCClassMap = TypeVar("TVOCClassMap", VOCClassStringMap, int, list[VOCClassStringMap], list[int])
52
52
 
53
53
 
54
- class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str]]):
54
+ class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str], str]):
55
55
  _resources = [
56
56
  DataLocation(
57
57
  url="https://data.brainchip.com/dataset-mirror/voc/VOCtrainval_11-May-2012.tar",
@@ -412,7 +412,7 @@ class BaseVOCDataset(BaseDataset[_TArray, _TTarget, list[str]]):
412
412
 
413
413
  class VOCDetection(
414
414
  BaseVOCDataset[NDArray[Any], ObjectDetectionTarget[NDArray[Any]]],
415
- BaseODDataset[NDArray[Any]],
415
+ BaseODDataset[NDArray[Any], list[str], str],
416
416
  BaseDatasetNumpyMixin,
417
417
  ):
418
418
  """
@@ -467,7 +467,7 @@ class VOCDetection(
467
467
 
468
468
  class VOCDetectionTorch(
469
469
  BaseVOCDataset[torch.Tensor, ObjectDetectionTarget[torch.Tensor]],
470
- BaseODDataset[torch.Tensor],
470
+ BaseODDataset[torch.Tensor, list[str], str],
471
471
  BaseDatasetTorchMixin,
472
472
  ):
473
473
  """
@@ -65,7 +65,7 @@ def trainer(
65
65
  model: torch.nn.Module,
66
66
  x_train: NDArray[Any],
67
67
  y_train: NDArray[Any] | None,
68
- loss_fn: Callable[..., torch.Tensor | torch.nn.Module] | None,
68
+ loss_fn: Callable[..., torch.Tensor] | None,
69
69
  optimizer: torch.optim.Optimizer | None,
70
70
  preprocess_fn: Callable[[torch.Tensor], torch.Tensor] | None,
71
71
  epochs: int,
@@ -117,7 +117,7 @@ def trainer(
117
117
  model = model.to(device)
118
118
 
119
119
  # iterate over epochs
120
- loss = torch.nan
120
+ loss = torch.scalar_tensor(torch.nan)
121
121
  disable_tqdm = not verbose
122
122
  for epoch in (pbar := tqdm(range(epochs), disable=disable_tqdm)):
123
123
  epoch_loss = loss
@@ -133,7 +133,7 @@ def trainer(
133
133
  y_hat = model(x)
134
134
  y = x if y is None else y
135
135
 
136
- loss = loss_fn(y, *y_hat) if isinstance(y_hat, tuple) else loss_fn(y, y_hat) # type: ignore
136
+ loss = loss_fn(y, *y_hat) if isinstance(y_hat, tuple) else loss_fn(y, y_hat)
137
137
 
138
138
  optimizer.zero_grad()
139
139
  loss.backward()
@@ -172,7 +172,7 @@ class AETrainer:
172
172
  for batch in dl:
173
173
  imgs = get_images_from_batch(batch)
174
174
  imgs = imgs.to(self.device)
175
- embeddings = encode_func(imgs).to("cpu")
175
+ embeddings = encode_func(imgs).to("cpu") # type: ignore
176
176
  encodings = torch.vstack((encodings, embeddings)) if len(encodings) else embeddings
177
177
 
178
178
  return encodings
@@ -57,22 +57,29 @@ class Sufficiency(Generic[T]):
57
57
  test_ds : torch.Dataset
58
58
  Data that will be used for every run's evaluation
59
59
  train_fn : Callable[[nn.Module, Dataset, Sequence[int]], None]
60
- Function which takes a model (torch.nn.Module), a dataset
61
- (torch.utils.data.Dataset), indices to train on and executes model
60
+ Function which takes a model, a dataset, and indices to train on and then executes model
62
61
  training against the data.
63
62
  eval_fn : Callable[[nn.Module, Dataset], Mapping[str, float | ArrayLike]]
64
- Function which takes a model (torch.nn.Module), a dataset
65
- (torch.utils.data.Dataset) and returns a dictionary of metric
66
- values (Mapping[str, float]) which is used to assess model performance
63
+ Function which takes a model, a dataset and returns a dictionary of metric
64
+ values which is used to assess model performance
67
65
  given the model and data.
68
66
  runs : int, default 1
69
- Number of models to run over all subsets
67
+ Number of models to train over the entire dataset.
70
68
  substeps : int, default 5
71
- Total number of dataset partitions that each model will train on
69
+ The number of steps that each model will be trained and evaluated on.
72
70
  train_kwargs : Mapping | None, default None
73
71
  Additional arguments required for custom training function
74
72
  eval_kwargs : Mapping | None, default None
75
73
  Additional arguments required for custom evaluation function
74
+
75
+ Warning
76
+ -------
77
+ Since each run is trained sequentially, increasing the parameter `runs` can significantly increase runtime.
78
+
79
+ Note
80
+ ----
81
+ Substeps is overridden by the parameter `eval_at` in :meth:`.Sufficiency.evaluate`
82
+
76
83
  """
77
84
 
78
85
  def __init__(
@@ -159,13 +166,22 @@ class Sufficiency(Generic[T]):
159
166
  @set_metadata(state=["runs", "substeps"])
160
167
  def evaluate(self, eval_at: int | Iterable[int] | None = None) -> SufficiencyOutput:
161
168
  """
162
- Creates data indices, trains models, and returns plotting data
169
+ Train and evaluate a model over multiple substeps
170
+
171
+ This function trains a model up to each step calculated from substeps. The model is then evaluated
172
+ at that step and trained from 0 to the next step. This repeats for all substeps. Once a model has been
173
+ trained and evaluated at all substeps, if runs is greater than one, the model weights are reset and
174
+ the process is repeated.
175
+
176
+ During each evaluation, the metrics returned as a dictionary by the given evaluation function are stored
177
+ and then averaged over when all runs are complete.
163
178
 
164
179
  Parameters
165
180
  ----------
166
181
  eval_at : int | Iterable[int] | None, default None
167
- Specify this to collect accuracies over a specific set of dataset lengths, rather
168
- than letting :term:`sufficiency<Sufficiency>` internally create the lengths to evaluate at.
182
+ Specify this to collect metrics over a specific set of dataset lengths.
183
+ If `None`, evaluates at each step is calculated by
184
+ `np.geomspace` over the length of the dataset for self.substeps
169
185
 
170
186
  Returns
171
187
  -------
@@ -179,6 +195,8 @@ class Sufficiency(Generic[T]):
179
195
 
180
196
  Examples
181
197
  --------
198
+ Default runs and substeps
199
+
182
200
  >>> suff = Sufficiency(
183
201
  ... model=model,
184
202
  ... train_ds=train_ds,
@@ -190,6 +208,31 @@ class Sufficiency(Generic[T]):
190
208
  ... )
191
209
  >>> suff.evaluate()
192
210
  SufficiencyOutput(steps=array([ 1, 3, 10, 31, 100], dtype=uint32), measures={'test': array([1., 1., 1., 1., 1.])}, n_iter=1000)
211
+
212
+ Evaluate at a single value
213
+
214
+ >>> suff = Sufficiency(
215
+ ... model=model,
216
+ ... train_ds=train_ds,
217
+ ... test_ds=test_ds,
218
+ ... train_fn=train_fn,
219
+ ... eval_fn=eval_fn,
220
+ ... )
221
+ >>> suff.evaluate(eval_at=50)
222
+ SufficiencyOutput(steps=array([50]), measures={'test': array([1.])}, n_iter=1000)
223
+
224
+ Evaluating at linear steps from 0-100 inclusive
225
+
226
+ >>> suff = Sufficiency(
227
+ ... model=model,
228
+ ... train_ds=train_ds,
229
+ ... test_ds=test_ds,
230
+ ... train_fn=train_fn,
231
+ ... eval_fn=eval_fn,
232
+ ... )
233
+ >>> suff.evaluate(eval_at=np.arange(0, 101, 20))
234
+ SufficiencyOutput(steps=array([ 0, 20, 40, 60, 80, 100]), measures={'test': array([1., 1., 1., 1., 1., 1.])}, n_iter=1000)
235
+
193
236
  """ # noqa: E501
194
237
  if eval_at is not None:
195
238
  ranges = np.asarray(list(eval_at) if isinstance(eval_at, Iterable) else [eval_at])
@@ -1,45 +1,52 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: dataeval
3
- Version: 0.86.7
3
+ Version: 0.86.9
4
4
  Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
5
- Home-page: https://dataeval.ai/
6
- License: MIT
7
- Author: Andrew Weng
8
- Author-email: andrew.weng@ariacoustics.com
9
- Maintainer: ARiA
10
- Maintainer-email: dataeval@ariacoustics.com
11
- Requires-Python: >=3.9,<3.13
5
+ Project-URL: Homepage, https://dataeval.ai/
6
+ Project-URL: Repository, https://github.com/aria-ml/dataeval/
7
+ Project-URL: Documentation, https://dataeval.readthedocs.io/
8
+ Author-email: Andrew Weng <andrew.weng@ariacoustics.com>, Bill Peria <bill.peria@ariacoustics.com>, Jon Botts <jonathan.botts@ariacoustics.com>, Jonathan Christian <jonathan.christian@ariacoustics.com>, Justin McMillan <justin.mcmillan@ariacoustics.com>, Ryan Wood <ryan.wood@ariacoustics.com>, Scott Swan <scott.swan@ariacoustics.com>, Shaun Jullens <shaun.jullens@ariacoustics.com>
9
+ Maintainer-email: ARiA <dataeval@ariacoustics.com>
10
+ License-Expression: MIT
11
+ License-File: LICENSE.txt
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Intended Audience :: Science/Research
14
14
  Classifier: License :: OSI Approved :: MIT License
15
15
  Classifier: Operating System :: OS Independent
16
- Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3 :: Only
17
17
  Classifier: Programming Language :: Python :: 3.9
18
18
  Classifier: Programming Language :: Python :: 3.10
19
19
  Classifier: Programming Language :: Python :: 3.11
20
20
  Classifier: Programming Language :: Python :: 3.12
21
- Classifier: Programming Language :: Python :: 3 :: Only
22
21
  Classifier: Topic :: Scientific/Engineering
22
+ Requires-Python: <3.13,>=3.9
23
+ Requires-Dist: defusedxml>=0.7.1
24
+ Requires-Dist: fast-hdbscan==0.2.0
25
+ Requires-Dist: lightgbm>=4
26
+ Requires-Dist: numba>=0.59.1
27
+ Requires-Dist: numpy>=1.24.2
28
+ Requires-Dist: pandas>=2.0
29
+ Requires-Dist: pillow>=10.3.0
30
+ Requires-Dist: polars>=1.0.0
31
+ Requires-Dist: requests>=2.32.3
32
+ Requires-Dist: scikit-learn>=1.5.0
33
+ Requires-Dist: scipy>=1.10
34
+ Requires-Dist: torch>=2.2.0
35
+ Requires-Dist: torchvision>=0.17.0
36
+ Requires-Dist: tqdm>=4.66
37
+ Requires-Dist: typing-extensions>=4.12; python_version ~= '3.9'
38
+ Requires-Dist: xxhash>=3.3
23
39
  Provides-Extra: all
24
- Requires-Dist: defusedxml (>=0.7.1)
25
- Requires-Dist: fast_hdbscan (==0.2.0)
26
- Requires-Dist: lightgbm (>=4)
27
- Requires-Dist: matplotlib (>=3.7.1) ; extra == "all"
28
- Requires-Dist: numba (>=0.59.1)
29
- Requires-Dist: numpy (>=1.24.2)
30
- Requires-Dist: pandas (>=2.0)
31
- Requires-Dist: pillow (>=10.3.0)
32
- Requires-Dist: polars (>=1.0.0)
33
- Requires-Dist: requests
34
- Requires-Dist: scikit-learn (>=1.5.0)
35
- Requires-Dist: scipy (>=1.10)
36
- Requires-Dist: torch (>=2.2.0)
37
- Requires-Dist: torchvision (>=0.17.0)
38
- Requires-Dist: tqdm
39
- Requires-Dist: typing-extensions (>=4.12) ; python_version >= "3.9" and python_version < "4.0"
40
- Requires-Dist: xxhash (>=3.3)
41
- Project-URL: Documentation, https://dataeval.readthedocs.io/
42
- Project-URL: Repository, https://github.com/aria-ml/dataeval/
40
+ Requires-Dist: matplotlib>=3.7.1; extra == 'all'
41
+ Provides-Extra: cpu
42
+ Requires-Dist: torch>=2.2.0; extra == 'cpu'
43
+ Requires-Dist: torchvision>=0.17.0; extra == 'cpu'
44
+ Provides-Extra: cu118
45
+ Requires-Dist: torch>=2.2.0; extra == 'cu118'
46
+ Requires-Dist: torchvision>=0.17.0; extra == 'cu118'
47
+ Provides-Extra: cu124
48
+ Requires-Dist: torch>=2.2.0; extra == 'cu124'
49
+ Requires-Dist: torchvision>=0.17.0; extra == 'cu124'
43
50
  Description-Content-Type: text/markdown
44
51
 
45
52
  # DataEval
@@ -72,26 +79,28 @@ estimation, bias detection, and dataset linting.
72
79
  <!-- end needs -->
73
80
 
74
81
  <!-- start JATIC interop -->
82
+
75
83
  DataEval is easy to install, supports a wide range of Python versions, and is
76
84
  compatible with many of the most popular packages in the scientific and T&E
77
85
  communities.
78
86
 
79
87
  DataEval also has native interoperability between JATIC's suite of tools when
80
88
  using MAITE-compliant datasets and models.
89
+
81
90
  <!-- end JATIC interop -->
82
91
 
83
92
  ## Getting Started
84
93
 
85
94
  **Python versions:** 3.9 - 3.12
86
95
 
87
- **Supported packages**: *NumPy*, *Pandas*, *Sci-kit learn*, *MAITE*, *NRTK*
96
+ **Supported packages**: _NumPy_, _Pandas_, _Sci-kit learn_, _MAITE_, _NRTK_
88
97
 
89
98
  Choose your preferred method of installation below or follow our
90
99
  [installation guide](https://dataeval.readthedocs.io/en/v0.74.2/installation.html).
91
100
 
92
- * [Installing with pip](#installing-with-pip)
93
- * [Installing with conda/mamba](#installing-with-conda)
94
- * [Installing from GitHub](#installing-from-github)
101
+ - [Installing with pip](#installing-with-pip)
102
+ - [Installing with conda/mamba](#installing-with-conda)
103
+ - [Installing from GitHub](#installing-from-github)
95
104
 
96
105
  ### **Installing with pip**
97
106
 
@@ -105,7 +114,7 @@ pip install dataeval[all]
105
114
  ### **Installing with conda**
106
115
 
107
116
  DataEval can be installed in a Conda/Mamba environment using the provided
108
- `environment.yaml` file. As some dependencies are installed from the `pytorch`
117
+ `environment.yaml` file. As some dependencies are installed from the `pytorch`
109
118
  channel, the channel is specified in the below example.
110
119
 
111
120
  ```bash
@@ -115,12 +124,10 @@ micromamba create -f environment\environment.yaml -c pytorch
115
124
  ### **Installing from GitHub**
116
125
 
117
126
  To install DataEval from source locally on Ubuntu, you will need `git-lfs` to
118
- download larger, binary source files and `poetry` for project dependency
119
- management.
127
+ download larger, binary source files.
120
128
 
121
129
  ```bash
122
130
  sudo apt-get install git-lfs
123
- pip install poetry
124
131
  ```
125
132
 
126
133
  Pull the source down and change to the DataEval project directory.
@@ -130,26 +137,40 @@ git clone https://github.com/aria-ml/dataeval.git
130
137
  cd dataeval
131
138
  ```
132
139
 
133
- Install DataEval with optional dependencies for development.
140
+ #### **Using Poetry**
141
+
142
+ Install DataEval with all extras.
134
143
 
135
144
  ```bash
136
- poetry install --all-extras --with dev
145
+ poetry install --extras=all
137
146
  ```
138
147
 
139
- Now that DataEval is installed, you can run commands in the poetry virtual
140
- environment by prefixing shell commands with `poetry run`, or activate the
141
- virtual environment directly in the shell.
148
+ Enable Poetry's virtual environment.
142
149
 
143
150
  ```bash
144
- poetry shell
151
+ poetry env activate
152
+ ```
153
+
154
+ #### **Using uv**
155
+
156
+ Install DataEval with all extras and dependencies for development.
157
+
158
+ ```bash
159
+ uv sync --extra=all
160
+ ```
161
+
162
+ Enable uv's virtual environment.
163
+
164
+ ```bash
165
+ source .venv/bin/activate
145
166
  ```
146
167
 
147
168
  ## Contact Us
148
169
 
149
170
  If you have any questions, feel free to reach out to the people below:
150
171
 
151
- * **POC**: Scott Swan @scott.swan
152
- * **DPOC**: Andrew Weng @aweng
172
+ - **POC**: Scott Swan @scott.swan
173
+ - **DPOC**: Andrew Weng @aweng
153
174
 
154
175
  ## Acknowledgement
155
176
 
@@ -164,4 +185,3 @@ interpreted as necessarily representing the official policies or endorsements,
164
185
  either expressed or implied, of the U.S. Government.
165
186
 
166
187
  <!-- end acknowledgement -->
167
-
@@ -1,6 +1,9 @@
1
- dataeval/__init__.py,sha256=P6WvVjHlE2nH57bXBR4A9ez6R32OQGm9bshYrxRKwFw,1636
1
+ dataeval/__init__.py,sha256=dEDltdHOnbk4-XAbQwJLOZtCbRLZsDMnptWRwbF2r54,1773
2
2
  dataeval/_log.py,sha256=C7AGkIRzymvYJ0LQXtnShiy3i5Xrp8T58JzIHHguk_Q,365
3
- dataeval/config.py,sha256=bHa8np4FCtLLv8_xlfdDC4lb1InJ_kT0vXDO5P42rvk,4082
3
+ dataeval/_version.py,sha256=NKlNIBKyuGsE6TJjC6ieMwWJh-T6f3KPhk_0sXgjByQ,513
4
+ dataeval/config.py,sha256=g3Np0Q3J5Rzij6Gsz7tJh7eOxgwNPf6NsFYmAR8Atfs,4219
5
+ dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ dataeval/typing.py,sha256=W8rqFFkAqE5a5ar3MmB-O5gcMJqvoDKXC8Y0ggBqAKo,7216
4
7
  dataeval/data/__init__.py,sha256=wzQ6uUFLNB3VJR0a2QnRBYwEmwXT93q0WpHu7FmFW1E,486
5
8
  dataeval/data/_embeddings.py,sha256=PFjpdV9bfusCB4taTIYSzx1hP8nJb_KCkZTN8kMw-Hs,12885
6
9
  dataeval/data/_images.py,sha256=Rc_59CuU4zfN7Xm7an1XUx8ZghQg6a56VJWMZD9edRw,2654
@@ -21,21 +24,21 @@ dataeval/detectors/drift/_base.py,sha256=6aNF1LzG3w1sNUrmSBbsvuN5IkQnoRikRacqobY
21
24
  dataeval/detectors/drift/_cvm.py,sha256=cS33zWJmFY1fft1XcANcP2jSD5ou7TxvIU2AldhTynM,3004
22
25
  dataeval/detectors/drift/_ks.py,sha256=uMc5-NA-lSV1IODrY8uJe87ll3uRJT_oXLJFXy95M1w,3186
23
26
  dataeval/detectors/drift/_mmd.py,sha256=uw8axM6dWxTBrCaXwkbldIDcdhe4hmim9yrsbuOwA-0,11523
24
- dataeval/detectors/drift/_mvdc.py,sha256=ABxGut6KzxF_oM-Hs87WARCR0692dhPVdZNoGGwJaa4,3058
27
+ dataeval/detectors/drift/_mvdc.py,sha256=WMN6aDOWCh1q1MtdRXFIZlFcfnVi4XgBHsS0A6L5UuY,2942
28
+ dataeval/detectors/drift/_uncertainty.py,sha256=BHlykJ-r7TGLJxdPfoazXnoAJ1qVDzbk5HjAMdsnHz8,5847
29
+ dataeval/detectors/drift/updates.py,sha256=L1PnrPlIE1x6ujCc5mCwjcAZwadVTn-Zjb6MnTDvzJQ,2251
25
30
  dataeval/detectors/drift/_nml/__init__.py,sha256=MNyKyZlfTjr5uQql2uBBfRkUdsuduie_WJdn09GYmqg,137
26
31
  dataeval/detectors/drift/_nml/_base.py,sha256=o34LcCsD9p1A6u8UdQn-dxIVwC2CMr6uCpC0vq16JX0,2663
27
- dataeval/detectors/drift/_nml/_chunk.py,sha256=t12eouanRNiu5DJXOaYDZXUvFMqfcp1BETLOufdV79M,13567
32
+ dataeval/detectors/drift/_nml/_chunk.py,sha256=xF3U-CAobzoKX-20yjWjGVD14IOcAV6rPaIrqCMwGdQ,13564
28
33
  dataeval/detectors/drift/_nml/_domainclassifier.py,sha256=n7Ttq5Ej7sAY9Jn2iagaGj4IIWiG8gmA3wwFizlBqes,7292
29
34
  dataeval/detectors/drift/_nml/_result.py,sha256=TMK17bnlgSdL0MCRHtQZJO8YoWWe4C2kh_akESrlP1g,3269
30
35
  dataeval/detectors/drift/_nml/_thresholds.py,sha256=WGdkLei9w_EvvsRHQzWdDyFVoZHIwM78k_aB3eoh31Q,12060
31
- dataeval/detectors/drift/_uncertainty.py,sha256=BHlykJ-r7TGLJxdPfoazXnoAJ1qVDzbk5HjAMdsnHz8,5847
32
- dataeval/detectors/drift/updates.py,sha256=L1PnrPlIE1x6ujCc5mCwjcAZwadVTn-Zjb6MnTDvzJQ,2251
33
36
  dataeval/detectors/linters/__init__.py,sha256=xn2zPwUcmsuf-Jd9uw6AVI11C9z1b1Y9fYtuFnXenZ0,404
34
37
  dataeval/detectors/linters/duplicates.py,sha256=X5WSEvI_BHkLoXjkaHK6wTnSkx4IjpO_exMRjSlhc70,4963
35
38
  dataeval/detectors/linters/outliers.py,sha256=GaM9n8yPgBPzVOL_bxJCj0eCwobEEP4JHKHD9liRdlw,10130
36
39
  dataeval/detectors/ood/__init__.py,sha256=juCYBDs7CQEAtMhnEpPqF6uTrOIH9kTBSuQ_GRw6a8o,283
37
- dataeval/detectors/ood/ae.py,sha256=fTrUfFxv6xUqzKpwMC8rW3JrizA16M_bgzqLuBKMrS0,2944
38
- dataeval/detectors/ood/base.py,sha256=9b-Ljznf0lB1SXF4F_Aj3eJ4Y3ijGEDPMjucUsWOGJM,3051
40
+ dataeval/detectors/ood/ae.py,sha256=cJ7nq4iwTvW8uihHCUhGfTlKsAlthJ2tOhgSsB27cOY,2941
41
+ dataeval/detectors/ood/base.py,sha256=hx-TPJnUTZ7KcBkm8SbN1RGhtJyQN0XLajDyNqiZrJo,3042
39
42
  dataeval/detectors/ood/mixin.py,sha256=0_o-1HPvgf3-Lf1MSOIfjj5UB8LTLEBGYtJJfyCCzwc,5431
40
43
  dataeval/metadata/__init__.py,sha256=XDDmJbOZBNM6pL0r6Nbu6oMRoyAh22IDkPYGndNlkZU,316
41
44
  dataeval/metadata/_distance.py,sha256=MbXM9idsooNWnGLaTKg8j4ZqavUeJUjuW7EPW3-UQyg,4234
@@ -44,14 +47,14 @@ dataeval/metadata/_utils.py,sha256=BcGoYVfA4AkAWpInY5txOc3QBpsGf6cnnUAsHOQTJAE,1
44
47
  dataeval/metrics/__init__.py,sha256=8VC8q3HuJN3o_WN51Ae2_wXznl3RMXIvA5GYVcy7vr8,225
45
48
  dataeval/metrics/bias/__init__.py,sha256=329S1_3WnWqeU4-qVcbe0fMy4lDrj9uKslWHIQf93yg,839
46
49
  dataeval/metrics/bias/_balance.py,sha256=fREtoMLUZPOf_ivqNKwij6oPiKMTk02ECO5rWURf3KY,5541
47
- dataeval/metrics/bias/_completeness.py,sha256=BysXU2Jpw33n5dl3acJFEqF3mFGiJLsfG4n5Q2fkTaY,4608
48
- dataeval/metrics/bias/_coverage.py,sha256=PeUoOiaghUEdn6Ov8z2-am7-fnBVIPcFbJK7Ty5JObA,3647
50
+ dataeval/metrics/bias/_completeness.py,sha256=2cvOXe7fhtxZGH_4QBuiCafIeamxFBarMiUBuEP7QGI,4596
51
+ dataeval/metrics/bias/_coverage.py,sha256=v2x2hbOf2za9jFcSVSJUAoJ2BJfzzlCzt0mFIGtBL0A,3639
49
52
  dataeval/metrics/bias/_diversity.py,sha256=25udDKmel9IjeVT5nM4dOa1apda66QdRxBc922yuUvI,5830
50
- dataeval/metrics/bias/_parity.py,sha256=Kmzr9-NXxGzGtj6A-qUa88FTGaRyJU2xQj7tsplXJH4,11427
53
+ dataeval/metrics/bias/_parity.py,sha256=MKpqL4aoqEHkRl0vtGvVq9V3KBOtDFTtAo5I2GfIG4A,11443
51
54
  dataeval/metrics/estimators/__init__.py,sha256=Pnds8uIyAovt2fKqZjiHCIP_kVoBWlVllekYuK5UmmU,568
52
- dataeval/metrics/estimators/_ber.py,sha256=C30E5LiGGTAfo31zWFYDptDg0R7CTJGJ-a60YgzSkYY,5382
55
+ dataeval/metrics/estimators/_ber.py,sha256=7noeRyOJJYqrJ_jt90nRHtR2t2u5MIvTCmWt0_rd4EU,5370
53
56
  dataeval/metrics/estimators/_clusterer.py,sha256=1HrpihGTJ63IkNSOy4Ibw633Gllkm1RxKmoKT5MOgt0,1434
54
- dataeval/metrics/estimators/_divergence.py,sha256=-np4nWNtRrHnvo4xdWuTzkyJJmobyjDnVDBOMjtBS1Y,4003
57
+ dataeval/metrics/estimators/_divergence.py,sha256=t-Z_7Bq4V4FunxKlq7G4ThtgLany8n4iEU0n0afr7F8,3991
55
58
  dataeval/metrics/estimators/_uap.py,sha256=BULEBbJ9BQ1IcTeZf0x7iI60QHAWCccBOM97FIu9VXA,1928
56
59
  dataeval/metrics/stats/__init__.py,sha256=6tA_9nbbM5ObJ6cds8Y1VBtTQiTOxrpGQSFLu_lWGGA,1098
57
60
  dataeval/metrics/stats/_base.py,sha256=R-hxoEPLreZcxYxBfyjbKfdoGMMTPiqJ5g2zSO-1UYM,12541
@@ -66,19 +69,17 @@ dataeval/outputs/__init__.py,sha256=geHB5M3QOiFFaQGV4ZwDTTKpqZPvPePbqG7lzaPhaXQ,
66
69
  dataeval/outputs/_base.py,sha256=-Wa0gFcBVLbfWPMZyCql7x4vGsnkLP4pecsQIeUZ2_Y,5904
67
70
  dataeval/outputs/_bias.py,sha256=1OZpKncYTryjPLRHb4d6NlhE27uPT57gCob_5jtjKDI,10456
68
71
  dataeval/outputs/_drift.py,sha256=hXILED_soY8ppIQZgftQvmumtwDrTnABbYl-flIGEU4,4588
69
- dataeval/outputs/_estimators.py,sha256=mh-R08CgYtmq9ffANDMYR-V4vrZnSjOjEyOMiMDZ2Ic,3091
72
+ dataeval/outputs/_estimators.py,sha256=IQgSbOPHYzzxn1X64XF2XxQhDlWy6jwy6RNyoyvsipE,3111
70
73
  dataeval/outputs/_linters.py,sha256=k8lkd8EZ23q0m-HOD-FgqMcLQFy1UH7vws2ucLPyn08,6697
71
74
  dataeval/outputs/_metadata.py,sha256=ffZgpX8KWURPHXpOWjbvJ2KRqWQkS2nWuIjKUzoHhMI,1710
72
75
  dataeval/outputs/_ood.py,sha256=suLKVXULGtXH0rq9eXHI1d3d2jhGmItJtz4QiQd47A4,1718
73
76
  dataeval/outputs/_stats.py,sha256=_ItGjs9YaMHqjivkR1YBcSErD5ICfa_-iV9nq0l8bTM,17451
74
77
  dataeval/outputs/_utils.py,sha256=NfhYaGT2PZlhIs8ICKUsPWHZXjhWYDkEJqBDdqMeaOM,929
75
78
  dataeval/outputs/_workflows.py,sha256=K786mOgegxVi81diUA-qpbwGEkwa8YA7Fk4ttgjJeaY,10831
76
- dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
- dataeval/typing.py,sha256=W8rqFFkAqE5a5ar3MmB-O5gcMJqvoDKXC8Y0ggBqAKo,7216
78
79
  dataeval/utils/__init__.py,sha256=hRvyUK7b3d6JBEV5u47rFcOHEcmDYqAvZQw_T5pDAWw,264
79
- dataeval/utils/_array.py,sha256=ftX8S6HKAIUOuc1xd30VC3Pz5yUzRglDpCLisWY_tHs,5888
80
+ dataeval/utils/_array.py,sha256=bIDbnv15_hNzFn2Uc4WV1qRyFzubQj2nNYsFUDIdwT0,6335
80
81
  dataeval/utils/_bin.py,sha256=w3eJ2Szw5eapqQ0cGv731rhNgLFGW0cCz2pXo9I6CuY,7296
81
- dataeval/utils/_clusterer.py,sha256=XmyW2j_JLMYLds8QYgV0nAfdqxWfNR0ZI-6rnZsyHwU,5630
82
+ dataeval/utils/_clusterer.py,sha256=rUvEdyMwp95lffmt6xKMEwsjRXNoBS0n5mAS_HNOnck,5656
82
83
  dataeval/utils/_fast_mst.py,sha256=pv42flr1Uf5RBa9qDG0YLDXWH7Mr7a9zpauO1HqZXaY,8061
83
84
  dataeval/utils/_image.py,sha256=4uxTIOYZZlRJOfNmdA3ek3no3FrLWCK5un48kStMDt8,3578
84
85
  dataeval/utils/_method.py,sha256=9B9JQbgqWJBRhQJb7glajUtWaQzUTIUuvrZ9_bisxsM,394
@@ -88,26 +89,27 @@ dataeval/utils/data/__init__.py,sha256=xGzrjrOxOP2DP1tU84AWMKPnSxFvSjM81CTlDg4rN
88
89
  dataeval/utils/data/_dataset.py,sha256=tC_vqgWnmojAoAANo5BUVfEUYXl7GzOBSeYjR9olbDk,9506
89
90
  dataeval/utils/data/collate.py,sha256=5egEEKhNNCGeNLChO1p6dZ4Wg6x51VEaMNHz7hEZUxI,3936
90
91
  dataeval/utils/data/metadata.py,sha256=L1c2bCiMj0aR0QCoKkjwBujIftJDEMgW_3ZbgeS8WHo,14703
91
- dataeval/utils/datasets/__init__.py,sha256=pAXqHX76yAoBI8XB3m6zGuW-u3s3PCoIXG5GDzxH7Zs,572
92
- dataeval/utils/datasets/_antiuav.py,sha256=kA_ia1fYNcJiz9SpCvh-Z8iSc7iJrdogjBI3soyaa7A,8304
93
- dataeval/utils/datasets/_base.py,sha256=pyfpJda3ku469M3TFRsJn9S2oAiQODOGTlLcdcoEW9U,9031
92
+ dataeval/utils/datasets/__init__.py,sha256=8sEQwOixx9OMkwaU0u9Hl2Cdcb5095tJzz5dgqgdNKc,643
93
+ dataeval/utils/datasets/_antiuav.py,sha256=CvqFIBEu8X1MmKzyUBTw1uzog2DWppiJ6ZynVNp8mv0,8320
94
+ dataeval/utils/datasets/_base.py,sha256=hMVza1lN9yhLVLSR7ucw1cQKn3s8UGdq74NV8MN6ZYo,9285
94
95
  dataeval/utils/datasets/_cifar10.py,sha256=hZc_A30yKYBbv2kvVdEkZ9egyEe6XBUnmksoIAoJ-5Y,8265
95
- dataeval/utils/datasets/_fileio.py,sha256=LEoFVNdryRdi7mKpWw-9D8lA6XMa-Jaszd85bv93POo,5454
96
- dataeval/utils/datasets/_milco.py,sha256=iXf4C1I3Eg_3gHKUe4XPi21yFMBO51zxTIqAkGf9bYg,7869
96
+ dataeval/utils/datasets/_fileio.py,sha256=hMxGm-OnsDMj8_xbbHgv9YSxpqm_8NXWQrj53rDg-nQ,5451
97
+ dataeval/utils/datasets/_milco.py,sha256=yWAqH-Dxe2ZYWG8dW89j4SQHh8O_Ys90LurGKFYfSU4,7885
97
98
  dataeval/utils/datasets/_mixin.py,sha256=S8iii-SoYUsFFYNXjw2thlZkpBvRLnZ4XI8wTqOKXgU,1729
98
99
  dataeval/utils/datasets/_mnist.py,sha256=uz46sE1Go3TgGjG6x2cXckSVQ0mSg2mhgk8BUvLWjb0,8149
100
+ dataeval/utils/datasets/_seadrone.py,sha256=daRjeRNaa5CPhwr1nelbTXaJrF5H6nUbz4scH3gCl8g,270979
99
101
  dataeval/utils/datasets/_ships.py,sha256=6U04HAoM3jgLl1qv-NnxjZeSsBipcqWJBMhBMn5iIUY,5115
100
102
  dataeval/utils/datasets/_types.py,sha256=iSKyHXRlGuomXs0FHK6md8lXLQrQQ4fxgVOwr4o81bo,1089
101
- dataeval/utils/datasets/_voc.py,sha256=pafY112O80isYkrdy7Quie9SBm_TmYhREuyl8SxtsR0,24586
103
+ dataeval/utils/datasets/_voc.py,sha256=P11jLIMo87_f8xBLWCMAX1-lA5OGCowmfWpVxpQJFEc,24623
102
104
  dataeval/utils/torch/__init__.py,sha256=dn5mjCrFp0b1aL_UEURhONU0Ag0cmXoTOBSGagpkTiA,325
103
105
  dataeval/utils/torch/_blocks.py,sha256=HVhBTMMD5NA4qheMUgyol1KWiKZDIuc8k5j4RcMKmhk,1466
104
106
  dataeval/utils/torch/_gmm.py,sha256=XM68GNEP97EjaB1U49-ZXRb81d0CEFnPS910alrcB3g,3740
105
- dataeval/utils/torch/_internal.py,sha256=HuyBB7NWFI9sUrRbOCZFxOfZjRGPdqr5iF7_DT2S0wo,4159
107
+ dataeval/utils/torch/_internal.py,sha256=9rzlMeM8i3p-ctulh9WDQATMXtlp-Jk2pBX7NGC8l2I,4146
106
108
  dataeval/utils/torch/models.py,sha256=1idpXyjrYcCBSsbxxRUOto8xr4MJNjDEqQHiIXVU5Zc,9700
107
- dataeval/utils/torch/trainer.py,sha256=Oc2lK13uPGhmLYbmAqlPWyKxgG4YJFlnSXCqFHUZbdA,5528
109
+ dataeval/utils/torch/trainer.py,sha256=DRyPScGdE4o5Xo3BmD9p2PGOApzi1E-QfsBRNZ5IXW8,5544
108
110
  dataeval/workflows/__init__.py,sha256=ou8y0KO-d6W5lgmcyLjKlf-J_ckP3vilW7wHkgiDlZ4,255
109
- dataeval/workflows/sufficiency.py,sha256=j-R8dg4XE6a66p_oTXG2GNzgg3vGk85CTblxhFXaxog,8513
110
- dataeval-0.86.7.dist-info/LICENSE.txt,sha256=uAooygKWvX6NbU9Ran9oG2msttoG8aeTeHSTe5JeCnY,1061
111
- dataeval-0.86.7.dist-info/METADATA,sha256=7FTgPB4Yj2zF7z2B6IIRe9WFc9VCBqrcFEIf5ByVHdw,5353
112
- dataeval-0.86.7.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
113
- dataeval-0.86.7.dist-info/RECORD,,
111
+ dataeval/workflows/sufficiency.py,sha256=UAPjowFrmM6IJJaOk9GkH3nfQTyDy2_zOY55o2g3G1M,10072
112
+ dataeval-0.86.9.dist-info/METADATA,sha256=qUho4Ureh4Pfo91py79pNNUK5yu0x0c6K8R29Al5yQ4,5925
113
+ dataeval-0.86.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
114
+ dataeval-0.86.9.dist-info/licenses/LICENSE.txt,sha256=uAooygKWvX6NbU9Ran9oG2msttoG8aeTeHSTe5JeCnY,1061
115
+ dataeval-0.86.9.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any