dataeval 0.72.2__py3-none-any.whl → 0.73.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. dataeval/__init__.py +3 -3
  2. dataeval/detectors/__init__.py +1 -1
  3. dataeval/detectors/drift/__init__.py +1 -1
  4. dataeval/detectors/drift/base.py +2 -2
  5. dataeval/detectors/linters/clusterer.py +1 -1
  6. dataeval/detectors/ood/__init__.py +1 -1
  7. dataeval/detectors/ood/ae.py +14 -6
  8. dataeval/detectors/ood/aegmm.py +14 -6
  9. dataeval/detectors/ood/base.py +9 -3
  10. dataeval/detectors/ood/llr.py +22 -16
  11. dataeval/detectors/ood/vae.py +14 -6
  12. dataeval/detectors/ood/vaegmm.py +14 -6
  13. dataeval/interop.py +9 -7
  14. dataeval/metrics/bias/balance.py +50 -44
  15. dataeval/metrics/bias/coverage.py +38 -6
  16. dataeval/metrics/bias/diversity.py +117 -65
  17. dataeval/metrics/bias/metadata.py +225 -60
  18. dataeval/metrics/bias/parity.py +68 -54
  19. dataeval/utils/__init__.py +4 -3
  20. dataeval/utils/lazy.py +26 -0
  21. dataeval/utils/metadata.py +258 -0
  22. dataeval/utils/shared.py +1 -1
  23. dataeval/utils/split_dataset.py +12 -6
  24. dataeval/utils/tensorflow/_internal/gmm.py +8 -2
  25. dataeval/utils/tensorflow/_internal/loss.py +20 -11
  26. dataeval/utils/tensorflow/_internal/{pixelcnn.py → models.py} +371 -77
  27. dataeval/utils/tensorflow/_internal/trainer.py +12 -5
  28. dataeval/utils/tensorflow/_internal/utils.py +70 -71
  29. dataeval/utils/torch/datasets.py +2 -2
  30. dataeval/workflows/__init__.py +1 -1
  31. {dataeval-0.72.2.dist-info → dataeval-0.73.1.dist-info}/METADATA +3 -3
  32. {dataeval-0.72.2.dist-info → dataeval-0.73.1.dist-info}/RECORD +34 -33
  33. dataeval/utils/tensorflow/_internal/autoencoder.py +0 -316
  34. {dataeval-0.72.2.dist-info → dataeval-0.73.1.dist-info}/LICENSE.txt +0 -0
  35. {dataeval-0.72.2.dist-info → dataeval-0.73.1.dist-info}/WHEEL +0 -0
@@ -8,20 +8,27 @@ Licensed under Apache Software License (Apache 2.0)
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from typing import Callable, Iterable, cast
11
+ from typing import TYPE_CHECKING, Callable, Iterable, cast
12
12
 
13
13
  import numpy as np
14
- import tensorflow as tf
15
- import tf_keras as keras
16
14
  from numpy.typing import NDArray
17
15
 
16
+ from dataeval.utils.lazy import lazyload
17
+
18
+ if TYPE_CHECKING:
19
+ import tensorflow as tf
20
+ import tf_keras as keras
21
+ else:
22
+ tf = lazyload("tensorflow")
23
+ keras = lazyload("tf_keras")
24
+
18
25
 
19
26
  def trainer(
20
27
  model: keras.Model,
21
28
  x_train: NDArray,
22
29
  y_train: NDArray | None = None,
23
30
  loss_fn: Callable[..., tf.Tensor] | None = None,
24
- optimizer: keras.optimizers.Optimizer = keras.optimizers.Adam,
31
+ optimizer: keras.optimizers.Optimizer | None = None,
25
32
  preprocess_fn: Callable[[tf.Tensor], tf.Tensor] | None = None,
26
33
  epochs: int = 20,
27
34
  reg_loss_fn: Callable[[keras.Model], tf.Tensor] = (lambda _: cast(tf.Tensor, tf.Variable(0, dtype=tf.float32))),
@@ -58,7 +65,7 @@ def trainer(
58
65
  Whether to print training progress.
59
66
  """
60
67
  loss_fn = loss_fn() if isinstance(loss_fn, type) else loss_fn
61
- optimizer = optimizer() if isinstance(optimizer, type) else optimizer
68
+ optimizer = keras.optimizers.Adam() if optimizer is None else optimizer
62
69
 
63
70
  train_data = (
64
71
  x_train.astype(np.float32) if y_train is None else (x_train.astype(np.float32), y_train.astype(np.float32))
@@ -9,25 +9,24 @@ Licensed under Apache Software License (Apache 2.0)
9
9
  from __future__ import annotations
10
10
 
11
11
  import math
12
- from typing import Any, Callable, Literal, Union, cast
12
+ from typing import TYPE_CHECKING, Any, Callable, Literal, Union, cast
13
13
 
14
14
  import numpy as np
15
- import tensorflow as tf
16
- import tf_keras as keras
17
15
  from numpy.typing import NDArray
18
- from tensorflow._api.v2.nn import relu, softmax, tanh
19
- from tf_keras import Sequential
20
- from tf_keras.layers import (
21
- Conv2D,
22
- Conv2DTranspose,
23
- Dense,
24
- Flatten,
25
- InputLayer,
26
- Reshape,
27
- )
28
16
 
29
- from dataeval.utils.tensorflow._internal.autoencoder import AE, AEGMM, VAE, VAEGMM
30
- from dataeval.utils.tensorflow._internal.pixelcnn import PixelCNN
17
+ from dataeval.utils.lazy import lazyload
18
+
19
+ if TYPE_CHECKING:
20
+ import tensorflow as tf
21
+ import tensorflow._api.v2.nn as nn
22
+ import tf_keras as keras
23
+
24
+ import dataeval.utils.tensorflow._internal.models as tf_models
25
+ else:
26
+ tf = lazyload("tensorflow")
27
+ nn = lazyload("tensorflow._api.v2.nn")
28
+ keras = lazyload("tf_keras")
29
+ tf_models = lazyload("dataeval.utils.tensorflow._internal.models")
31
30
 
32
31
 
33
32
  def predict_batch(
@@ -96,29 +95,29 @@ def predict_batch(
96
95
 
97
96
 
98
97
  def get_default_encoder_net(input_shape: tuple[int, int, int], encoding_dim: int):
99
- return Sequential(
98
+ return keras.Sequential(
100
99
  [
101
- InputLayer(input_shape=input_shape),
102
- Conv2D(64, 4, strides=2, padding="same", activation=relu),
103
- Conv2D(128, 4, strides=2, padding="same", activation=relu),
104
- Conv2D(512, 4, strides=2, padding="same", activation=relu),
105
- Flatten(),
106
- Dense(encoding_dim),
100
+ keras.layers.InputLayer(input_shape=input_shape),
101
+ keras.layers.Conv2D(64, 4, strides=2, padding="same", activation=nn.relu),
102
+ keras.layers.Conv2D(128, 4, strides=2, padding="same", activation=nn.relu),
103
+ keras.layers.Conv2D(512, 4, strides=2, padding="same", activation=nn.relu),
104
+ keras.layers.Flatten(),
105
+ keras.layers.Dense(encoding_dim),
107
106
  ]
108
107
  )
109
108
 
110
109
 
111
110
  def get_default_decoder_net(input_shape: tuple[int, int, int], encoding_dim: int):
112
- return Sequential(
111
+ return keras.Sequential(
113
112
  [
114
- InputLayer(input_shape=(encoding_dim,)),
115
- Dense(4 * 4 * 128),
116
- Reshape(target_shape=(4, 4, 128)),
117
- Conv2DTranspose(256, 4, strides=2, padding="same", activation=relu),
118
- Conv2DTranspose(64, 4, strides=2, padding="same", activation=relu),
119
- Flatten(),
120
- Dense(math.prod(input_shape)),
121
- Reshape(target_shape=input_shape),
113
+ keras.layers.InputLayer(input_shape=(encoding_dim,)),
114
+ keras.layers.Dense(4 * 4 * 128),
115
+ keras.layers.Reshape(target_shape=(4, 4, 128)),
116
+ keras.layers.Conv2DTranspose(256, 4, strides=2, padding="same", activation=nn.relu),
117
+ keras.layers.Conv2DTranspose(64, 4, strides=2, padding="same", activation=nn.relu),
118
+ keras.layers.Flatten(),
119
+ keras.layers.Dense(math.prod(input_shape)),
120
+ keras.layers.Reshape(target_shape=input_shape),
122
121
  ]
123
122
  )
124
123
 
@@ -149,13 +148,13 @@ def create_model(
149
148
  input_dim = math.prod(input_shape)
150
149
  encoding_dim = int(math.pow(2, int(input_dim.bit_length() * 0.8)) if encoding_dim is None else encoding_dim)
151
150
  if model_type == "AE":
152
- return AE(
151
+ return tf_models.AE(
153
152
  get_default_encoder_net(input_shape, encoding_dim),
154
153
  get_default_decoder_net(input_shape, encoding_dim),
155
154
  )
156
155
 
157
156
  if model_type == "VAE":
158
- return VAE(
157
+ return tf_models.VAE(
159
158
  get_default_encoder_net(input_shape, encoding_dim),
160
159
  get_default_decoder_net(input_shape, encoding_dim),
161
160
  encoding_dim,
@@ -165,36 +164,36 @@ def create_model(
165
164
  n_gmm = 2 if n_gmm is None else n_gmm
166
165
  gmm_latent_dim = 1 if gmm_latent_dim is None else gmm_latent_dim
167
166
  # The outlier detector is an encoder/decoder architecture
168
- encoder_net = Sequential(
167
+ encoder_net = keras.Sequential(
169
168
  [
170
- Flatten(),
171
- InputLayer(input_shape=(input_dim,)),
172
- Dense(60, activation=tanh),
173
- Dense(30, activation=tanh),
174
- Dense(10, activation=tanh),
175
- Dense(gmm_latent_dim, activation=None),
169
+ keras.layers.Flatten(),
170
+ keras.layers.InputLayer(input_shape=(input_dim,)),
171
+ keras.layers.Dense(60, activation=nn.tanh),
172
+ keras.layers.Dense(30, activation=nn.tanh),
173
+ keras.layers.Dense(10, activation=nn.tanh),
174
+ keras.layers.Dense(gmm_latent_dim, activation=None),
176
175
  ]
177
176
  )
178
177
  # Here we define the decoder
179
- decoder_net = Sequential(
178
+ decoder_net = keras.Sequential(
180
179
  [
181
- InputLayer(input_shape=(gmm_latent_dim,)),
182
- Dense(10, activation=tanh),
183
- Dense(30, activation=tanh),
184
- Dense(60, activation=tanh),
185
- Dense(input_dim, activation=None),
186
- Reshape(target_shape=input_shape),
180
+ keras.layers.InputLayer(input_shape=(gmm_latent_dim,)),
181
+ keras.layers.Dense(10, activation=nn.tanh),
182
+ keras.layers.Dense(30, activation=nn.tanh),
183
+ keras.layers.Dense(60, activation=nn.tanh),
184
+ keras.layers.Dense(input_dim, activation=None),
185
+ keras.layers.Reshape(target_shape=input_shape),
187
186
  ]
188
187
  )
189
188
  # GMM autoencoders have a density network too
190
- gmm_density_net = Sequential(
189
+ gmm_density_net = keras.Sequential(
191
190
  [
192
- InputLayer(input_shape=(gmm_latent_dim + 2,)),
193
- Dense(10, activation=tanh),
194
- Dense(n_gmm, activation=softmax),
191
+ keras.layers.InputLayer(input_shape=(gmm_latent_dim + 2,)),
192
+ keras.layers.Dense(10, activation=nn.tanh),
193
+ keras.layers.Dense(n_gmm, activation=nn.softmax),
195
194
  ]
196
195
  )
197
- return AEGMM(
196
+ return tf_models.AEGMM(
198
197
  encoder_net=encoder_net,
199
198
  decoder_net=decoder_net,
200
199
  gmm_density_net=gmm_density_net,
@@ -206,35 +205,35 @@ def create_model(
206
205
  gmm_latent_dim = 2 if gmm_latent_dim is None else gmm_latent_dim
207
206
  # The outlier detector is an encoder/decoder architecture
208
207
  # Here we define the encoder
209
- encoder_net = Sequential(
208
+ encoder_net = keras.Sequential(
210
209
  [
211
- Flatten(),
212
- InputLayer(input_shape=(input_dim,)),
213
- Dense(20, activation=relu),
214
- Dense(15, activation=relu),
215
- Dense(7, activation=relu),
210
+ keras.layers.Flatten(),
211
+ keras.layers.InputLayer(input_shape=(input_dim,)),
212
+ keras.layers.Dense(20, activation=nn.relu),
213
+ keras.layers.Dense(15, activation=nn.relu),
214
+ keras.layers.Dense(7, activation=nn.relu),
216
215
  ]
217
216
  )
218
217
  # Here we define the decoder
219
- decoder_net = Sequential(
218
+ decoder_net = keras.Sequential(
220
219
  [
221
- InputLayer(input_shape=(gmm_latent_dim,)),
222
- Dense(7, activation=relu),
223
- Dense(15, activation=relu),
224
- Dense(20, activation=relu),
225
- Dense(input_dim, activation=None),
226
- Reshape(target_shape=input_shape),
220
+ keras.layers.InputLayer(input_shape=(gmm_latent_dim,)),
221
+ keras.layers.Dense(7, activation=nn.relu),
222
+ keras.layers.Dense(15, activation=nn.relu),
223
+ keras.layers.Dense(20, activation=nn.relu),
224
+ keras.layers.Dense(input_dim, activation=None),
225
+ keras.layers.Reshape(target_shape=input_shape),
227
226
  ]
228
227
  )
229
228
  # GMM autoencoders have a density network too
230
- gmm_density_net = Sequential(
229
+ gmm_density_net = keras.Sequential(
231
230
  [
232
- InputLayer(input_shape=(gmm_latent_dim + 2,)),
233
- Dense(10, activation=relu),
234
- Dense(n_gmm, activation=softmax),
231
+ keras.layers.InputLayer(input_shape=(gmm_latent_dim + 2,)),
232
+ keras.layers.Dense(10, activation=nn.relu),
233
+ keras.layers.Dense(n_gmm, activation=nn.softmax),
235
234
  ]
236
235
  )
237
- return VAEGMM(
236
+ return tf_models.VAEGMM(
238
237
  encoder_net=encoder_net,
239
238
  decoder_net=decoder_net,
240
239
  gmm_density_net=gmm_density_net,
@@ -243,7 +242,7 @@ def create_model(
243
242
  )
244
243
 
245
244
  if model_type == "PixelCNN":
246
- return PixelCNN(
245
+ return tf_models.PixelCNN(
247
246
  image_shape=input_shape,
248
247
  num_resnet=5,
249
248
  num_hierarchies=2,
@@ -206,7 +206,7 @@ class MNIST(Dataset[tuple[NDArray[np.float64], int]]):
206
206
  Option to select specific classes from dataset.
207
207
  balance : bool, default True
208
208
  If True, returns equal number of samples for each class.
209
- randomize : bool, default False
209
+ randomize : bool, default True
210
210
  If True, shuffles the data prior to selection - uses a set seed for reproducibility.
211
211
  slice_back : bool, default False
212
212
  If True and size has a value greater than 0, then grabs selection starting at the last image.
@@ -251,7 +251,7 @@ class MNIST(Dataset[tuple[NDArray[np.float64], int]]):
251
251
  corruption: CorruptionStringMap | None = None,
252
252
  classes: TClassMap | None = None,
253
253
  balance: bool = True,
254
- randomize: bool = False,
254
+ randomize: bool = True,
255
255
  slice_back: bool = False,
256
256
  verbose: bool = True,
257
257
  ) -> None:
@@ -4,7 +4,7 @@ Workflows perform a sequence of actions to analyze the dataset and make predicti
4
4
 
5
5
  from dataeval import _IS_TORCH_AVAILABLE
6
6
 
7
- if _IS_TORCH_AVAILABLE: # pragma: no cover
7
+ if _IS_TORCH_AVAILABLE:
8
8
  from dataeval.workflows.sufficiency import Sufficiency, SufficiencyOutput
9
9
 
10
10
  __all__ = ["Sufficiency", "SufficiencyOutput"]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: dataeval
3
- Version: 0.72.2
3
+ Version: 0.73.1
4
4
  Summary: DataEval provides a simple interface to characterize image data and its impact on model performance across classification and object-detection tasks
5
5
  Home-page: https://dataeval.ai/
6
6
  License: MIT
@@ -31,8 +31,8 @@ Requires-Dist: pillow (>=10.3.0)
31
31
  Requires-Dist: scikit-learn (>=1.5.0)
32
32
  Requires-Dist: scipy (>=1.10)
33
33
  Requires-Dist: tensorflow (>=2.16,<2.18) ; extra == "tensorflow" or extra == "all"
34
- Requires-Dist: tensorflow_probability (>=0.24) ; extra == "tensorflow" or extra == "all"
35
- Requires-Dist: tf-keras (>=2.16) ; extra == "tensorflow" or extra == "all"
34
+ Requires-Dist: tensorflow_probability (>=0.24,<0.25) ; extra == "tensorflow" or extra == "all"
35
+ Requires-Dist: tf-keras (>=2.16,<2.18) ; extra == "tensorflow" or extra == "all"
36
36
  Requires-Dist: torch (>=2.2.0) ; extra == "torch" or extra == "all"
37
37
  Requires-Dist: torchvision (>=0.17.0) ; extra == "torch" or extra == "all"
38
38
  Requires-Dist: tqdm
@@ -1,7 +1,7 @@
1
- dataeval/__init__.py,sha256=UYhkwned7TR5hiU_c8I_qUaKogO1EODTBgT-9_t0ofI,641
2
- dataeval/detectors/__init__.py,sha256=xdp8LYOFjV5tVbAwu0Y03KU9EajHkSFy_M3raqbxpDc,383
3
- dataeval/detectors/drift/__init__.py,sha256=MRPWFOaoVoqAHW36nA5F3wk7QXJU4oecND2RbtgG9oY,757
4
- dataeval/detectors/drift/base.py,sha256=0S-0MFpIFaJ4_8IGreFKSmyna2L50FBn7DVaoNWmw8E,14509
1
+ dataeval/__init__.py,sha256=SdXxst_wmjSoQkYzGdR-JXSV-iJmKynWsiwkpmGDDPE,601
2
+ dataeval/detectors/__init__.py,sha256=mwAyY54Hvp6N4D57cde3_besOinK8jVF43k0Mw4XZi8,363
3
+ dataeval/detectors/drift/__init__.py,sha256=BSXm21y7cAawHep-ZldCJ5HOvzYjPzYGKGrmoEs3i0E,737
4
+ dataeval/detectors/drift/base.py,sha256=xwI6C-PEH0ZjpSqP6No6WDZp42DnE16OHi_mXe2JSvI,14499
5
5
  dataeval/detectors/drift/cvm.py,sha256=kc59w2_wtxFGNnLcaJRvX5v_38gPXiebSGNiFVdunEQ,4142
6
6
  dataeval/detectors/drift/ks.py,sha256=gcpe1WIQeNeZdLYkdMZCFLXUp1bHMQUxwJE6-RLVOXs,4229
7
7
  dataeval/detectors/drift/mmd.py,sha256=TqGOnUNYKwpS0GQPV3dSl-_qRa0g2flmoQ-dxzW_JfY,7586
@@ -9,28 +9,28 @@ dataeval/detectors/drift/torch.py,sha256=D46J72OPW8-PpP3w9ODMBfcDSdailIgVjgHVFpb
9
9
  dataeval/detectors/drift/uncertainty.py,sha256=Xz2yzJjtJfw1vLag234jwRvaa_HK36nMajGx8bQaNRs,5322
10
10
  dataeval/detectors/drift/updates.py,sha256=UJ0z5hlunRi7twnkLABfdJG3tT2EqX4y9IGx8_USYvo,1780
11
11
  dataeval/detectors/linters/__init__.py,sha256=BvpaB1RUpkEhhXk3Mqi5NYoOcJKZRFSBOJCmQOIfYRU,483
12
- dataeval/detectors/linters/clusterer.py,sha256=OtBE5rglAGdTTQRmKUHP6J-uWmnh2E3lZxeqJCnc87U,21014
12
+ dataeval/detectors/linters/clusterer.py,sha256=sau5A9YcQ6VDjbZGOIaCaRHW_63opaA31pqHo5Rm-hQ,21018
13
13
  dataeval/detectors/linters/duplicates.py,sha256=tOD43rJkvheIA3mznbUqHhft2yD3xRZQdCt61daIca4,5665
14
14
  dataeval/detectors/linters/merged_stats.py,sha256=X-bDTwjyR8RuVmzxLaHZmQ5nI3oOWvsqVlitdSncapk,1355
15
15
  dataeval/detectors/linters/outliers.py,sha256=BUVvtbKHo04KnRmrgb84MBr0l1gtcY3-xNCHjetFrEQ,10117
16
- dataeval/detectors/ood/__init__.py,sha256=FVyVuaxVKAOgSTaaBf-j2OXXDarSBFcJ7CTlMV6w88s,661
17
- dataeval/detectors/ood/ae.py,sha256=cdwrgCpQkueK_HQoQbeXw7s0oTE-6FKVtXe9vETDe5M,2117
18
- dataeval/detectors/ood/aegmm.py,sha256=jK5aN1UjwwZaSLB3BpzH25eLp5wBqzlgylsfphaoZaE,1814
19
- dataeval/detectors/ood/base.py,sha256=S9jl4xH2zB_-ixalysQJZEvRCGOqMQSruacvfd4Dnfc,8687
20
- dataeval/detectors/ood/llr.py,sha256=HUNsro-cV7RR5Mht6pJ4NWCRR7aWeVdjwkBNurs5LbM,10378
16
+ dataeval/detectors/ood/__init__.py,sha256=yzvCszJ0KrX9Eu4S_ykC_jwC0uYGPjxY3Vyx9fU3zQk,641
17
+ dataeval/detectors/ood/ae.py,sha256=XQ_rCsf0VWg_2YXt33XGe6ZgxEud1PfIl7TmBVP1GkM,2347
18
+ dataeval/detectors/ood/aegmm.py,sha256=6UKv0uJYWAzu1F-cITFGly4w9y_t7wqg3OmVyCN365o,2041
19
+ dataeval/detectors/ood/base.py,sha256=a_d52pJMWVmduSt8OvUWYwHE8mpCaI6pIAE4_ib_GOs,8841
20
+ dataeval/detectors/ood/llr.py,sha256=TwUk1RsZhnM5tUssGVMBhWggCW2izs_Asy9QPHkTJaU,10615
21
21
  dataeval/detectors/ood/metadata_ks_compare.py,sha256=jH7uDwyyBIIcTrRhQEdnLAdrwf7LfNczKBw0CpJyF5c,4282
22
22
  dataeval/detectors/ood/metadata_least_likely.py,sha256=nxMCXUOjOfWHDTGT2SLE7OYBCydRq8zHLd8t17k7hMM,5193
23
23
  dataeval/detectors/ood/metadata_ood_mi.py,sha256=KLay2BmgHrStBV92VpIs_B1yEfQKllsMTgzOQEng01I,4065
24
- dataeval/detectors/ood/vae.py,sha256=O1jpGkpavtJAqn4WrmocPRMtkX4iSdkpiCDUPBF1Ano,2925
25
- dataeval/detectors/ood/vaegmm.py,sha256=37epPiQKeicy6SZD0D7O7hCFQSajZ-8wvga1pmJiq2s,2183
26
- dataeval/interop.py,sha256=CFtGyVTwTqkJFkNfhHYhnBRVwxKIQ9f-9Zuuz_uQDqo,1589
24
+ dataeval/detectors/ood/vae.py,sha256=UKrQNFdHcnxAY0fAFbLrXasY8Z6qg138BXxqwc1hlts,3154
25
+ dataeval/detectors/ood/vaegmm.py,sha256=_wwmT37URs0MyhbORk91XJExClv-4e15LH_Bj60Pw1w,2409
26
+ dataeval/interop.py,sha256=TZCkZo844DvzHoxuRo-YsBhT6GvKmyQTHtUEQZPly1M,1728
27
27
  dataeval/metrics/__init__.py,sha256=fPBNLd-T6mCErZBBJrxWmXIL0jCk7fNUYIcNEBkMa80,238
28
28
  dataeval/metrics/bias/__init__.py,sha256=puf645-hAO5hFHNHlZ239TPopqWIoN-uLGXFB8-hA_o,599
29
- dataeval/metrics/bias/balance.py,sha256=pgxaIqFvRcygYlAUbM_BKrbi45WU7fRV08HBrI7Z5q4,8569
30
- dataeval/metrics/bias/coverage.py,sha256=Ku9l-qvc6YrRiQ0PRzkpfjInyOhkAKKSO_bf_LnOwNg,3623
31
- dataeval/metrics/bias/diversity.py,sha256=-cmh-vyAUrn4rbn6-ZXvLuaO43Ncj28GKyeTmhWRzfE,8973
32
- dataeval/metrics/bias/metadata.py,sha256=nUZRwhcKaJM0GVwXn5k11Fa1s56_OtOBF7tmXjMDpsM,8919
33
- dataeval/metrics/bias/parity.py,sha256=uJ3p8m6id5mZpDNnS1NmxCThb5V6v75lJv_0TGAhCRA,16668
29
+ dataeval/metrics/bias/balance.py,sha256=n4SM2Z46dzps_SPgHV8Q69msZ507AP9neebsQ45cNxc,9170
30
+ dataeval/metrics/bias/coverage.py,sha256=7nDufCmQwZ8QG3Me5UiY0N5YoTByjcwK2zOYuMOHkJ0,4540
31
+ dataeval/metrics/bias/diversity.py,sha256=BKGpyJ1K3S5RS_VxXN5DusB2gfRidOksL7r0L3SFa0Y,11018
32
+ dataeval/metrics/bias/metadata.py,sha256=tPvyfFkfqWBFMX6v8i1ZLAA3DZfF6M4O7qXDdKzhQ6g,15040
33
+ dataeval/metrics/bias/parity.py,sha256=_-WdKRWPlKHLNbjq-4mIhVdR1MI3NEabbMWblAmmVRM,17145
34
34
  dataeval/metrics/estimators/__init__.py,sha256=O6ocxJq8XDkfJWwXeJnnnzbOyRnFPKF4kTIVTTZYOA8,380
35
35
  dataeval/metrics/estimators/ber.py,sha256=SVT-BIC_GLs0l2l2NhWu4OpRbgn96w-OwTSoPHTnQbE,5037
36
36
  dataeval/metrics/estimators/divergence.py,sha256=pImaa216-YYTgGWDCSTcpJrC-dfl7150yVrPfW_TyGc,4293
@@ -46,27 +46,28 @@ dataeval/metrics/stats/pixelstats.py,sha256=x90O10IqVjEORtYwueFLvJnVYTxhPBOOx5HM
46
46
  dataeval/metrics/stats/visualstats.py,sha256=y0xIvst7epcajk8vz2jngiAiz0T7DZC-M97Rs1-vV9I,4950
47
47
  dataeval/output.py,sha256=jWXXNxFNBEaY1rN7Z-6LZl6bQT-I7z_wqr91Rhrdt_0,3061
48
48
  dataeval/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
- dataeval/utils/__init__.py,sha256=zTgPsmloPy0qZMzb4xipNNdIWpaHtseGph68pIAD-hQ,684
49
+ dataeval/utils/__init__.py,sha256=FZLWDA7nMbHOcdg3701cVJpQmUp1Wxxk8h_qIrUQQjY,713
50
50
  dataeval/utils/image.py,sha256=KgC_1nW__nGN5q6bVZNvG4U_qIBdjcPATz9qe8f2XuA,1928
51
- dataeval/utils/shared.py,sha256=BvEeYPMNQTmx4LSaImGeC0VkvcbEY3Byqtxa-jQ3xgc,3623
52
- dataeval/utils/split_dataset.py,sha256=IopyxwC3FaZwgVriW4OXze-mDMpOlvRr83OADA5Jydk,19454
51
+ dataeval/utils/lazy.py,sha256=M0iBHuJh4UPrSJPHZ0jhFwRSZhyjHJQx_KEf1OCkHD8,588
52
+ dataeval/utils/metadata.py,sha256=A6VN7KbdiOA6rUQvUGKwDcvtOyjBer8bRW_wFxNhmW0,8556
53
+ dataeval/utils/shared.py,sha256=xvF3VLfyheVwJtdtDrneOobkKf7t-JTmf_w91FWXmqo,3616
54
+ dataeval/utils/split_dataset.py,sha256=Ot1ZJhbIhVfcShYXF9MkWXak5odBXyuBdRh-noXh-MI,19555
53
55
  dataeval/utils/tensorflow/__init__.py,sha256=l4OjIA75JJXeNWDCkST1xtDMVYsw97lZ-9JXFBlyuYg,539
54
- dataeval/utils/tensorflow/_internal/autoencoder.py,sha256=-pm4VqMEjHcrgre-K8uhMvaEVHyeqZsZbejrnlM6OtY,10430
55
- dataeval/utils/tensorflow/_internal/gmm.py,sha256=QoEgbeax1GETqRmUF7A2ih9uFOZfFAjGzgH2ljExlAc,3669
56
- dataeval/utils/tensorflow/_internal/loss.py,sha256=IXW_kxovLaTLd6UkMOIQLPEAGrOMILHDKagvRYgj-DE,4065
57
- dataeval/utils/tensorflow/_internal/pixelcnn.py,sha256=Aa7koa7YxqhHmFequpsfMw2-61KO03evWWcvvFTuaco,48518
58
- dataeval/utils/tensorflow/_internal/trainer.py,sha256=ld7pisl4ZXjEA6nxBStRNDEuNJme0IPo08oWqal6bYc,4167
59
- dataeval/utils/tensorflow/_internal/utils.py,sha256=k1mjy44oE63SIkckvU8BTlqtWsCnGynJF4eYyw1pebQ,8799
56
+ dataeval/utils/tensorflow/_internal/gmm.py,sha256=RIFx8asEpi2kMf8JVzq9M3aAvNe9fjpJPf3BzWE-aeE,3787
57
+ dataeval/utils/tensorflow/_internal/loss.py,sha256=TFhoNPgqeJtdpIHYobZPyzMpeWjzlFqzu5LCtthEUi4,4463
58
+ dataeval/utils/tensorflow/_internal/models.py,sha256=TzQYRrFe5XomhnPw05v-HBODQdFIqWg21WH1xS0XBlg,59868
59
+ dataeval/utils/tensorflow/_internal/trainer.py,sha256=uBFTnAy9o2T_FoT3RSX-AA7T-2FScyOdYEg9_7Dpd28,4314
60
+ dataeval/utils/tensorflow/_internal/utils.py,sha256=lr5hKkAPbjMCUNIzMUIqbEddwbWQfMdL6hcazTHU3Uc,9541
60
61
  dataeval/utils/tensorflow/loss/__init__.py,sha256=Q-66vt91Oe1ByYfo28tW32zXDq2MqQ2gngWgmIVmof8,227
61
62
  dataeval/utils/torch/__init__.py,sha256=lpkqfgyARUxgrV94cZESQv8PIP2p-UnwItZ_wIr0XzQ,675
62
63
  dataeval/utils/torch/blocks.py,sha256=HVhBTMMD5NA4qheMUgyol1KWiKZDIuc8k5j4RcMKmhk,1466
63
- dataeval/utils/torch/datasets.py,sha256=9YV9-Uhq6NCMuu1hPhMnQXjmeI-Ld8ve1z_haxre88o,15023
64
+ dataeval/utils/torch/datasets.py,sha256=10elNgLuH_FDX_CHE3y2Z215JN4-PQovQm5brcIJOeM,15021
64
65
  dataeval/utils/torch/models.py,sha256=0BsXmLK8W1OZ8nnEGb1f9LzIeCgtevQC37dvKS1v1vA,3236
65
66
  dataeval/utils/torch/trainer.py,sha256=EraOKiXxiMNiycStZNMR5yRz3ehgp87d9ewR9a9dV4w,5559
66
67
  dataeval/utils/torch/utils.py,sha256=FI4LJ6DvXFQJVff8fxSCP7LRkp8H9BIUgYX0kk7_Cuo,1537
67
- dataeval/workflows/__init__.py,sha256=x2JnOoKmLUCZOsB6RNPqMdVvxEb6Hpda5GPJnD_k0v0,310
68
+ dataeval/workflows/__init__.py,sha256=ef1MiVL5IuhlDXXbwsiAfafhnr7tD3TXF9GRusy9_O8,290
68
69
  dataeval/workflows/sufficiency.py,sha256=1jSYhH9i4oesmJYs5PZvWS1LGXf8ekOgNhpFtMPLPXk,18552
69
- dataeval-0.72.2.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
70
- dataeval-0.72.2.dist-info/METADATA,sha256=ddOmTZA6nX7VceQhOmyQ-cQ1aBv2VU9Za32vnmjP-VE,4702
71
- dataeval-0.72.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
72
- dataeval-0.72.2.dist-info/RECORD,,
70
+ dataeval-0.73.1.dist-info/LICENSE.txt,sha256=Kpzcfobf1HlqafF-EX6dQLw9TlJiaJzfgvLQFukyXYw,1060
71
+ dataeval-0.73.1.dist-info/METADATA,sha256=C7xThIWgHNoZEdSiGEZr3VgDLRSzeT3TkFbn4nQgrK0,4714
72
+ dataeval-0.73.1.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
73
+ dataeval-0.73.1.dist-info/RECORD,,