dataeval 0.73.1__py3-none-any.whl → 0.74.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dataeval/__init__.py +3 -9
- dataeval/detectors/__init__.py +2 -10
- dataeval/detectors/drift/base.py +3 -3
- dataeval/detectors/drift/mmd.py +1 -1
- dataeval/detectors/drift/torch.py +1 -101
- dataeval/detectors/linters/clusterer.py +3 -3
- dataeval/detectors/linters/duplicates.py +4 -4
- dataeval/detectors/linters/outliers.py +4 -4
- dataeval/detectors/ood/__init__.py +9 -9
- dataeval/detectors/ood/{ae.py → ae_torch.py} +22 -27
- dataeval/detectors/ood/base.py +63 -113
- dataeval/detectors/ood/base_torch.py +109 -0
- dataeval/detectors/ood/metadata_ks_compare.py +52 -14
- dataeval/interop.py +1 -1
- dataeval/metrics/bias/__init__.py +3 -0
- dataeval/metrics/bias/balance.py +73 -70
- dataeval/metrics/bias/coverage.py +4 -4
- dataeval/metrics/bias/diversity.py +67 -136
- dataeval/metrics/bias/metadata_preprocessing.py +285 -0
- dataeval/metrics/bias/metadata_utils.py +229 -0
- dataeval/metrics/bias/parity.py +51 -161
- dataeval/metrics/estimators/ber.py +3 -3
- dataeval/metrics/estimators/divergence.py +3 -3
- dataeval/metrics/estimators/uap.py +3 -3
- dataeval/metrics/stats/base.py +2 -2
- dataeval/metrics/stats/boxratiostats.py +1 -1
- dataeval/metrics/stats/datasetstats.py +6 -6
- dataeval/metrics/stats/dimensionstats.py +1 -1
- dataeval/metrics/stats/hashstats.py +1 -1
- dataeval/metrics/stats/labelstats.py +3 -3
- dataeval/metrics/stats/pixelstats.py +1 -1
- dataeval/metrics/stats/visualstats.py +1 -1
- dataeval/output.py +77 -53
- dataeval/utils/__init__.py +1 -7
- dataeval/utils/gmm.py +26 -0
- dataeval/utils/metadata.py +29 -9
- dataeval/utils/torch/gmm.py +98 -0
- dataeval/utils/torch/models.py +192 -0
- dataeval/utils/torch/trainer.py +84 -5
- dataeval/utils/torch/utils.py +107 -1
- dataeval/workflows/sufficiency.py +4 -4
- {dataeval-0.73.1.dist-info → dataeval-0.74.1.dist-info}/METADATA +3 -9
- dataeval-0.74.1.dist-info/RECORD +65 -0
- dataeval/detectors/ood/aegmm.py +0 -66
- dataeval/detectors/ood/llr.py +0 -302
- dataeval/detectors/ood/vae.py +0 -97
- dataeval/detectors/ood/vaegmm.py +0 -75
- dataeval/metrics/bias/metadata.py +0 -440
- dataeval/utils/lazy.py +0 -26
- dataeval/utils/tensorflow/__init__.py +0 -19
- dataeval/utils/tensorflow/_internal/gmm.py +0 -123
- dataeval/utils/tensorflow/_internal/loss.py +0 -121
- dataeval/utils/tensorflow/_internal/models.py +0 -1394
- dataeval/utils/tensorflow/_internal/trainer.py +0 -114
- dataeval/utils/tensorflow/_internal/utils.py +0 -256
- dataeval/utils/tensorflow/loss/__init__.py +0 -11
- dataeval-0.73.1.dist-info/RECORD +0 -73
- {dataeval-0.73.1.dist-info → dataeval-0.74.1.dist-info}/LICENSE.txt +0 -0
- {dataeval-0.73.1.dist-info → dataeval-0.74.1.dist-info}/WHEEL +0 -0
dataeval/detectors/ood/llr.py
DELETED
@@ -1,302 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Source code derived from Alibi-Detect 0.11.4
|
3
|
-
https://github.com/SeldonIO/alibi-detect/tree/v0.11.4
|
4
|
-
|
5
|
-
Original code Copyright (c) 2023 Seldon Technologies Ltd
|
6
|
-
Licensed under Apache Software License (Apache 2.0)
|
7
|
-
"""
|
8
|
-
|
9
|
-
from __future__ import annotations
|
10
|
-
|
11
|
-
__all__ = ["OOD_LLR"]
|
12
|
-
|
13
|
-
from functools import partial
|
14
|
-
from typing import TYPE_CHECKING, Callable
|
15
|
-
|
16
|
-
import numpy as np
|
17
|
-
from numpy.typing import ArrayLike, NDArray
|
18
|
-
|
19
|
-
from dataeval.detectors.ood.base import OODBase, OODScoreOutput
|
20
|
-
from dataeval.interop import to_numpy
|
21
|
-
from dataeval.utils.lazy import lazyload
|
22
|
-
from dataeval.utils.tensorflow._internal.trainer import trainer
|
23
|
-
from dataeval.utils.tensorflow._internal.utils import predict_batch
|
24
|
-
|
25
|
-
if TYPE_CHECKING:
|
26
|
-
import tensorflow as tf
|
27
|
-
import tf_keras as keras
|
28
|
-
|
29
|
-
import dataeval.utils.tensorflow._internal.models as tf_models
|
30
|
-
else:
|
31
|
-
tf = lazyload("tensorflow")
|
32
|
-
keras = lazyload("tf_keras")
|
33
|
-
tf_models = lazyload("dataeval.utils.tensorflow._internal.models")
|
34
|
-
|
35
|
-
|
36
|
-
def _build_model(
|
37
|
-
dist: tf_models.PixelCNN, input_shape: tuple | None = None, filepath: str | None = None
|
38
|
-
) -> tuple[keras.Model, tf_models.PixelCNN]:
|
39
|
-
"""
|
40
|
-
Create keras.Model from TF distribution.
|
41
|
-
|
42
|
-
Parameters
|
43
|
-
----------
|
44
|
-
dist
|
45
|
-
:term:`TensorFlow` distribution.
|
46
|
-
input_shape
|
47
|
-
Input shape of the model.
|
48
|
-
filepath
|
49
|
-
File to load model weights from.
|
50
|
-
|
51
|
-
Returns
|
52
|
-
-------
|
53
|
-
TensorFlow model.
|
54
|
-
"""
|
55
|
-
x_in = keras.layers.Input(shape=input_shape)
|
56
|
-
log_prob = dist.log_prob(x_in)
|
57
|
-
model = keras.models.Model(inputs=x_in, outputs=log_prob)
|
58
|
-
model.add_loss(-tf.reduce_mean(log_prob))
|
59
|
-
if isinstance(filepath, str):
|
60
|
-
model.load_weights(filepath)
|
61
|
-
return model, dist
|
62
|
-
|
63
|
-
|
64
|
-
def _mutate_categorical(
|
65
|
-
X: NDArray,
|
66
|
-
rate: float,
|
67
|
-
seed: int = 0,
|
68
|
-
feature_range: tuple[int, int] = (0, 255),
|
69
|
-
) -> tf.Tensor:
|
70
|
-
"""
|
71
|
-
Randomly change integer feature values to values within a set range
|
72
|
-
with a specified permutation rate.
|
73
|
-
|
74
|
-
Parameters
|
75
|
-
----------
|
76
|
-
X
|
77
|
-
Batch of data to be perturbed.
|
78
|
-
rate
|
79
|
-
Permutation rate (between 0 and 1).
|
80
|
-
seed
|
81
|
-
Random seed.
|
82
|
-
feature_range
|
83
|
-
Min and max range for perturbed features.
|
84
|
-
|
85
|
-
Returns
|
86
|
-
-------
|
87
|
-
Array with perturbed data.
|
88
|
-
"""
|
89
|
-
frange = (feature_range[0] + 1, feature_range[1] + 1)
|
90
|
-
shape = X.shape
|
91
|
-
n_samples = np.prod(shape)
|
92
|
-
mask = tf.random.categorical(tf.math.log([[1.0 - rate, rate]]), n_samples, seed=seed, dtype=tf.int32)
|
93
|
-
mask = tf.reshape(mask, shape)
|
94
|
-
possible_mutations = tf.random.uniform(shape, minval=frange[0], maxval=frange[1], dtype=tf.int32, seed=seed + 1)
|
95
|
-
X = tf.math.floormod(tf.cast(X, tf.int32) + mask * possible_mutations, frange[1]) # type: ignore py38
|
96
|
-
return tf.cast(X, tf.float32) # type: ignore
|
97
|
-
|
98
|
-
|
99
|
-
class OOD_LLR(OODBase):
|
100
|
-
"""
|
101
|
-
Likelihood Ratios based outlier detector.
|
102
|
-
|
103
|
-
Parameters
|
104
|
-
----------
|
105
|
-
model : PixelCNN
|
106
|
-
Generative distribution model.
|
107
|
-
model_background : Optional[PixelCNN], default None
|
108
|
-
Optional model for the background. Only needed if it is different from `model`.
|
109
|
-
log_prob : Optional[Callable], default None
|
110
|
-
Function used to evaluate log probabilities under the model
|
111
|
-
if the model does not have a `log_prob` function.
|
112
|
-
sequential : bool, default False
|
113
|
-
Whether the data is sequential. Used to create targets during training.
|
114
|
-
"""
|
115
|
-
|
116
|
-
def __init__(
|
117
|
-
self,
|
118
|
-
model: tf_models.PixelCNN,
|
119
|
-
model_background: tf_models.PixelCNN | None = None,
|
120
|
-
log_prob: Callable | None = None,
|
121
|
-
sequential: bool = False,
|
122
|
-
) -> None:
|
123
|
-
self.dist_s: tf_models.PixelCNN = model
|
124
|
-
self.dist_b: tf_models.PixelCNN = (
|
125
|
-
model.copy()
|
126
|
-
if hasattr(model, "copy")
|
127
|
-
else keras.models.clone_model(model)
|
128
|
-
if model_background is None
|
129
|
-
else model_background
|
130
|
-
)
|
131
|
-
self.has_log_prob: bool = hasattr(model, "log_prob")
|
132
|
-
self.sequential: bool = sequential
|
133
|
-
self.log_prob: Callable | None = log_prob
|
134
|
-
|
135
|
-
self._ref_score: OODScoreOutput
|
136
|
-
self._threshold_perc: float
|
137
|
-
self._data_info: tuple[tuple, type] | None = None
|
138
|
-
|
139
|
-
def fit(
|
140
|
-
self,
|
141
|
-
x_ref: ArrayLike,
|
142
|
-
threshold_perc: float = 100.0,
|
143
|
-
loss_fn: Callable | None = None,
|
144
|
-
optimizer: keras.optimizers.Optimizer | None = None,
|
145
|
-
epochs: int = 20,
|
146
|
-
batch_size: int = 64,
|
147
|
-
verbose: bool = True,
|
148
|
-
mutate_fn: Callable = _mutate_categorical,
|
149
|
-
mutate_fn_kwargs: dict[str, float | int | tuple[int, int]] = {
|
150
|
-
"rate": 0.2,
|
151
|
-
"seed": 0,
|
152
|
-
"feature_range": (0, 255),
|
153
|
-
},
|
154
|
-
mutate_batch_size: int = int(1e10),
|
155
|
-
) -> None:
|
156
|
-
"""
|
157
|
-
Train semantic and background generative models.
|
158
|
-
|
159
|
-
Parameters
|
160
|
-
----------
|
161
|
-
x_ref : ArrayLike
|
162
|
-
Training data.
|
163
|
-
threshold_perc : float, default 100.0
|
164
|
-
Percentage of reference data that is normal.
|
165
|
-
loss_fn : Callable | None, default None
|
166
|
-
Loss function used for training.
|
167
|
-
optimizer : keras.optimizers.Optimizer, default keras.optimizers.Adam
|
168
|
-
Optimizer used for training.
|
169
|
-
epochs : int, default 20
|
170
|
-
Number of training epochs.
|
171
|
-
batch_size : int, default 64
|
172
|
-
Batch size used for training.
|
173
|
-
verbose : bool, default True
|
174
|
-
Whether to print training progress.
|
175
|
-
mutate_fn : Callable, default mutate_categorical
|
176
|
-
Mutation function used to generate the background dataset.
|
177
|
-
mutate_fn_kwargs : dict, default {"rate": 0.2, "seed": 0, "feature_range": (0, 255)}
|
178
|
-
Kwargs for the mutation function used to generate the background dataset.
|
179
|
-
Default values set for an image dataset.
|
180
|
-
mutate_batch_size: int, default int(1e10)
|
181
|
-
Batch size used to generate the mutations for the background dataset.
|
182
|
-
"""
|
183
|
-
x_ref = to_numpy(x_ref)
|
184
|
-
input_shape = x_ref.shape[1:]
|
185
|
-
optimizer = keras.optimizers.Adam() if optimizer is None else optimizer
|
186
|
-
# Separate into two separate optimizers, one for semantic model and one for background model
|
187
|
-
optimizer_s = optimizer
|
188
|
-
optimizer_b = optimizer.__class__.from_config(optimizer.get_config())
|
189
|
-
|
190
|
-
# training arguments
|
191
|
-
kwargs = {
|
192
|
-
"epochs": epochs,
|
193
|
-
"batch_size": batch_size,
|
194
|
-
"verbose": verbose,
|
195
|
-
}
|
196
|
-
|
197
|
-
# create background data
|
198
|
-
mutate_fn = partial(mutate_fn, **mutate_fn_kwargs)
|
199
|
-
X_back = predict_batch(x_ref, mutate_fn, batch_size=mutate_batch_size, dtype=x_ref.dtype) # type: ignore
|
200
|
-
|
201
|
-
# prepare sequential data
|
202
|
-
if self.sequential and not self.has_log_prob:
|
203
|
-
y, y_back = x_ref[:, 1:], X_back[:, 1:] # type: ignore
|
204
|
-
X, X_back = x_ref[:, :-1], X_back[:, :-1] # type: ignore
|
205
|
-
else:
|
206
|
-
X = x_ref
|
207
|
-
y, y_back = None, None
|
208
|
-
|
209
|
-
# check if model needs to be built
|
210
|
-
use_build = self.has_log_prob and not isinstance(self.dist_s, keras.Model)
|
211
|
-
|
212
|
-
if use_build:
|
213
|
-
# build and train semantic model
|
214
|
-
self.model_s: keras.Model = _build_model(self.dist_s, input_shape)[0]
|
215
|
-
self.model_s.compile(optimizer=optimizer_s)
|
216
|
-
self.model_s.fit(X, **kwargs)
|
217
|
-
# build and train background model
|
218
|
-
self.model_b: keras.Model = _build_model(self.dist_b, input_shape)[0]
|
219
|
-
self.model_b.compile(optimizer=optimizer_b)
|
220
|
-
self.model_b.fit(X_back, **kwargs)
|
221
|
-
else:
|
222
|
-
# train semantic model
|
223
|
-
args = [self.dist_s, X]
|
224
|
-
kwargs.update({"y_train": y, "loss_fn": loss_fn, "optimizer": optimizer_s})
|
225
|
-
trainer(*args, **kwargs)
|
226
|
-
|
227
|
-
# train background model
|
228
|
-
args = [self.dist_b, X_back]
|
229
|
-
kwargs.update({"y_train": y_back, "loss_fn": loss_fn, "optimizer": optimizer_b})
|
230
|
-
trainer(*args, **kwargs)
|
231
|
-
|
232
|
-
self._datainfo = self._get_data_info(x_ref)
|
233
|
-
self._ref_score = self.score(x_ref, batch_size=batch_size)
|
234
|
-
self._threshold_perc = threshold_perc
|
235
|
-
|
236
|
-
def _logp(
|
237
|
-
self,
|
238
|
-
dist,
|
239
|
-
X: NDArray,
|
240
|
-
return_per_feature: bool = False,
|
241
|
-
batch_size: int = int(1e10),
|
242
|
-
) -> NDArray:
|
243
|
-
"""
|
244
|
-
Compute log probability of a batch of instances under the :term:`generative model<Generative Model>`.
|
245
|
-
"""
|
246
|
-
logp_fn = partial(dist.log_prob, return_per_feature=return_per_feature)
|
247
|
-
# TODO: TBD: can this be any of the other types from predict_batch? i.e. tf.Tensor or tuple
|
248
|
-
return predict_batch(X, logp_fn, batch_size=batch_size) # type: ignore[return-value]
|
249
|
-
|
250
|
-
def _logp_alt(
|
251
|
-
self,
|
252
|
-
model: keras.Model,
|
253
|
-
X: NDArray,
|
254
|
-
return_per_feature: bool = False,
|
255
|
-
batch_size: int = int(1e10),
|
256
|
-
) -> NDArray:
|
257
|
-
"""
|
258
|
-
Compute log probability of a batch of instances with the user defined log_prob function.
|
259
|
-
"""
|
260
|
-
if self.sequential:
|
261
|
-
y, X = X[:, 1:], X[:, :-1]
|
262
|
-
else:
|
263
|
-
y = X.copy()
|
264
|
-
y_preds = predict_batch(X, model, batch_size=batch_size)
|
265
|
-
logp = self.log_prob(y, y_preds).numpy() # type: ignore
|
266
|
-
if return_per_feature:
|
267
|
-
return logp
|
268
|
-
else:
|
269
|
-
axis = tuple(np.arange(len(logp.shape))[1:])
|
270
|
-
return np.mean(logp, axis=axis)
|
271
|
-
|
272
|
-
def _llr(self, X: NDArray, return_per_feature: bool, batch_size: int = int(1e10)) -> NDArray:
|
273
|
-
"""
|
274
|
-
Compute likelihood ratios.
|
275
|
-
|
276
|
-
Parameters
|
277
|
-
----------
|
278
|
-
X
|
279
|
-
Batch of instances.
|
280
|
-
return_per_feature
|
281
|
-
Return likelihood ratio per feature.
|
282
|
-
batch_size
|
283
|
-
Batch size for the :term:`generative model<Generative Model>` evaluations.
|
284
|
-
|
285
|
-
Returns
|
286
|
-
-------
|
287
|
-
Likelihood ratios.
|
288
|
-
"""
|
289
|
-
logp_fn = self._logp if not isinstance(self.log_prob, Callable) else self._logp_alt # type: ignore
|
290
|
-
logp_s = logp_fn(self.dist_s, X, return_per_feature=return_per_feature, batch_size=batch_size)
|
291
|
-
logp_b = logp_fn(self.dist_b, X, return_per_feature=return_per_feature, batch_size=batch_size)
|
292
|
-
return logp_s - logp_b
|
293
|
-
|
294
|
-
def _score(
|
295
|
-
self,
|
296
|
-
X: ArrayLike,
|
297
|
-
batch_size: int = int(1e10),
|
298
|
-
) -> OODScoreOutput:
|
299
|
-
self._validate(X := to_numpy(X))
|
300
|
-
fscore = -self._llr(X, True, batch_size=batch_size)
|
301
|
-
iscore = -self._llr(X, False, batch_size=batch_size)
|
302
|
-
return OODScoreOutput(iscore, fscore)
|
dataeval/detectors/ood/vae.py
DELETED
@@ -1,97 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Source code derived from Alibi-Detect 0.11.4
|
3
|
-
https://github.com/SeldonIO/alibi-detect/tree/v0.11.4
|
4
|
-
|
5
|
-
Original code Copyright (c) 2023 Seldon Technologies Ltd
|
6
|
-
Licensed under Apache Software License (Apache 2.0)
|
7
|
-
"""
|
8
|
-
|
9
|
-
from __future__ import annotations
|
10
|
-
|
11
|
-
__all__ = ["OOD_VAE"]
|
12
|
-
|
13
|
-
from typing import TYPE_CHECKING, Callable
|
14
|
-
|
15
|
-
import numpy as np
|
16
|
-
from numpy.typing import ArrayLike
|
17
|
-
|
18
|
-
from dataeval.detectors.ood.base import OODBase, OODScoreOutput
|
19
|
-
from dataeval.interop import to_numpy
|
20
|
-
from dataeval.utils.lazy import lazyload
|
21
|
-
from dataeval.utils.tensorflow._internal.loss import Elbo
|
22
|
-
from dataeval.utils.tensorflow._internal.utils import predict_batch
|
23
|
-
|
24
|
-
if TYPE_CHECKING:
|
25
|
-
import tensorflow as tf
|
26
|
-
import tf_keras as keras
|
27
|
-
|
28
|
-
import dataeval.utils.tensorflow._internal.models as tf_models
|
29
|
-
else:
|
30
|
-
tf = lazyload("tensorflow")
|
31
|
-
keras = lazyload("tf_keras")
|
32
|
-
tf_models = lazyload("dataeval.utils.tensorflow._internal.models")
|
33
|
-
|
34
|
-
|
35
|
-
class OOD_VAE(OODBase):
|
36
|
-
"""
|
37
|
-
VAE based outlier detector.
|
38
|
-
|
39
|
-
Parameters
|
40
|
-
----------
|
41
|
-
model : VAE
|
42
|
-
A VAE model.
|
43
|
-
samples : int, default 10
|
44
|
-
Number of samples sampled to evaluate each instance.
|
45
|
-
|
46
|
-
Examples
|
47
|
-
--------
|
48
|
-
Instantiate an OOD detector metric with a generic dataset - batch of images with shape (3,25,25)
|
49
|
-
|
50
|
-
>>> metric = OOD_VAE(create_model("VAE", dataset[0].shape))
|
51
|
-
|
52
|
-
Adjusting fit parameters,
|
53
|
-
including setting the fit threshold at 85% for a training set with about 15% out-of-distribution
|
54
|
-
|
55
|
-
>>> metric.fit(dataset, threshold_perc=85, batch_size=128, verbose=False)
|
56
|
-
|
57
|
-
Detect :term:`out of distribution<Out-of-Distribution (OOD)>` samples at the 'feature' level
|
58
|
-
|
59
|
-
>>> result = metric.predict(dataset, ood_type="feature")
|
60
|
-
"""
|
61
|
-
|
62
|
-
def __init__(self, model: tf_models.VAE, samples: int = 10) -> None:
|
63
|
-
super().__init__(model)
|
64
|
-
self.samples = samples
|
65
|
-
|
66
|
-
def fit(
|
67
|
-
self,
|
68
|
-
x_ref: ArrayLike,
|
69
|
-
threshold_perc: float = 100.0,
|
70
|
-
loss_fn: Callable[..., tf.Tensor] = Elbo(0.05),
|
71
|
-
optimizer: keras.optimizers.Optimizer | None = None,
|
72
|
-
epochs: int = 20,
|
73
|
-
batch_size: int = 64,
|
74
|
-
verbose: bool = True,
|
75
|
-
) -> None:
|
76
|
-
super().fit(x_ref, threshold_perc, loss_fn, optimizer, epochs, batch_size, verbose)
|
77
|
-
|
78
|
-
def _score(self, X: ArrayLike, batch_size: int = int(1e10)) -> OODScoreOutput:
|
79
|
-
self._validate(X := to_numpy(X))
|
80
|
-
|
81
|
-
# sample reconstructed instances
|
82
|
-
X_samples = np.repeat(X, self.samples, axis=0)
|
83
|
-
X_recon = predict_batch(X_samples, model=self.model, batch_size=batch_size)
|
84
|
-
|
85
|
-
# compute feature scores
|
86
|
-
fscore = np.power(X_samples - X_recon, 2)
|
87
|
-
fscore = fscore.reshape((-1, self.samples) + X_samples.shape[1:])
|
88
|
-
fscore = np.mean(fscore, axis=1)
|
89
|
-
|
90
|
-
# compute instance scores
|
91
|
-
fscore_flat = fscore.reshape(fscore.shape[0], -1).copy()
|
92
|
-
n_score_features = int(np.ceil(fscore_flat.shape[1]))
|
93
|
-
sorted_fscore = np.sort(fscore_flat, axis=1)
|
94
|
-
sorted_fscore_perc = sorted_fscore[:, -n_score_features:]
|
95
|
-
iscore = np.mean(sorted_fscore_perc, axis=1)
|
96
|
-
|
97
|
-
return OODScoreOutput(iscore, fscore)
|
dataeval/detectors/ood/vaegmm.py
DELETED
@@ -1,75 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Source code derived from Alibi-Detect 0.11.4
|
3
|
-
https://github.com/SeldonIO/alibi-detect/tree/v0.11.4
|
4
|
-
|
5
|
-
Original code Copyright (c) 2023 Seldon Technologies Ltd
|
6
|
-
Licensed under Apache Software License (Apache 2.0)
|
7
|
-
"""
|
8
|
-
|
9
|
-
from __future__ import annotations
|
10
|
-
|
11
|
-
__all__ = ["OOD_VAEGMM"]
|
12
|
-
|
13
|
-
from typing import TYPE_CHECKING, Callable
|
14
|
-
|
15
|
-
import numpy as np
|
16
|
-
from numpy.typing import ArrayLike
|
17
|
-
|
18
|
-
from dataeval.detectors.ood.base import OODGMMBase, OODScoreOutput
|
19
|
-
from dataeval.interop import to_numpy
|
20
|
-
from dataeval.utils.lazy import lazyload
|
21
|
-
from dataeval.utils.tensorflow._internal.gmm import gmm_energy
|
22
|
-
from dataeval.utils.tensorflow._internal.loss import Elbo, LossGMM
|
23
|
-
from dataeval.utils.tensorflow._internal.utils import predict_batch
|
24
|
-
|
25
|
-
if TYPE_CHECKING:
|
26
|
-
import tensorflow as tf
|
27
|
-
import tf_keras as keras
|
28
|
-
|
29
|
-
import dataeval.utils.tensorflow._internal.models as tf_models
|
30
|
-
else:
|
31
|
-
tf = lazyload("tensorflow")
|
32
|
-
keras = lazyload("tf_keras")
|
33
|
-
tf_models = lazyload("dataeval.utils.tensorflow._internal.models")
|
34
|
-
|
35
|
-
|
36
|
-
class OOD_VAEGMM(OODGMMBase):
|
37
|
-
"""
|
38
|
-
VAE with Gaussian Mixture Model based outlier detector.
|
39
|
-
|
40
|
-
Parameters
|
41
|
-
----------
|
42
|
-
model : VAEGMM
|
43
|
-
A VAEGMM model.
|
44
|
-
samples
|
45
|
-
Number of samples sampled to evaluate each instance.
|
46
|
-
"""
|
47
|
-
|
48
|
-
def __init__(self, model: tf_models.VAEGMM, samples: int = 10) -> None:
|
49
|
-
super().__init__(model)
|
50
|
-
self.samples = samples
|
51
|
-
|
52
|
-
def fit(
|
53
|
-
self,
|
54
|
-
x_ref: ArrayLike,
|
55
|
-
threshold_perc: float = 100.0,
|
56
|
-
loss_fn: Callable[..., tf.Tensor] = LossGMM(elbo=Elbo(0.05)),
|
57
|
-
optimizer: keras.optimizers.Optimizer | None = None,
|
58
|
-
epochs: int = 20,
|
59
|
-
batch_size: int = 64,
|
60
|
-
verbose: bool = True,
|
61
|
-
) -> None:
|
62
|
-
super().fit(x_ref, threshold_perc, loss_fn, optimizer, epochs, batch_size, verbose)
|
63
|
-
|
64
|
-
def _score(self, X: ArrayLike, batch_size: int = int(1e10)) -> OODScoreOutput:
|
65
|
-
self._validate(X := to_numpy(X))
|
66
|
-
|
67
|
-
# draw samples from latent space
|
68
|
-
X_samples = np.repeat(X, self.samples, axis=0)
|
69
|
-
_, z, _ = predict_batch(X_samples, self.model, batch_size=batch_size)
|
70
|
-
|
71
|
-
# compute average energy for samples
|
72
|
-
energy, _ = gmm_energy(z, self.gmm_params, return_mean=False)
|
73
|
-
energy_samples = energy.numpy().reshape((-1, self.samples)) # type: ignore
|
74
|
-
iscore = np.mean(energy_samples, axis=-1)
|
75
|
-
return OODScoreOutput(iscore)
|