quadra 2.3.0a3__py3-none-any.whl → 2.3.2a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quadra/__init__.py +1 -1
- quadra/callbacks/anomalib.py +3 -2
- quadra/callbacks/lightning.py +3 -1
- quadra/datamodules/base.py +5 -5
- quadra/datamodules/classification.py +2 -2
- quadra/datamodules/segmentation.py +6 -6
- quadra/datasets/anomaly.py +2 -2
- quadra/datasets/classification.py +7 -7
- quadra/datasets/patch.py +1 -1
- quadra/datasets/ssl.py +3 -3
- quadra/metrics/segmentation.py +1 -1
- quadra/models/base.py +1 -1
- quadra/models/evaluation.py +1 -1
- quadra/modules/base.py +3 -2
- quadra/modules/ssl/byol.py +1 -0
- quadra/tasks/anomaly.py +7 -4
- quadra/tasks/base.py +8 -4
- quadra/tasks/classification.py +6 -2
- quadra/tasks/patch.py +1 -1
- quadra/tasks/segmentation.py +7 -5
- quadra/tasks/ssl.py +2 -3
- quadra/utils/classification.py +8 -10
- quadra/utils/evaluation.py +12 -3
- quadra/utils/export.py +4 -4
- quadra/utils/mlflow.py +2 -0
- quadra/utils/models.py +5 -7
- quadra/utils/patch/dataset.py +7 -6
- quadra/utils/patch/metrics.py +9 -6
- quadra/utils/patch/visualization.py +2 -2
- quadra/utils/tests/fixtures/dataset/imagenette.py +1 -1
- quadra/utils/utils.py +1 -1
- quadra/utils/validator.py +1 -3
- quadra/utils/visualization.py +8 -5
- quadra/utils/vit_explainability.py +1 -1
- {quadra-2.3.0a3.dist-info → quadra-2.3.2a0.dist-info}/METADATA +2 -2
- {quadra-2.3.0a3.dist-info → quadra-2.3.2a0.dist-info}/RECORD +39 -39
- {quadra-2.3.0a3.dist-info → quadra-2.3.2a0.dist-info}/LICENSE +0 -0
- {quadra-2.3.0a3.dist-info → quadra-2.3.2a0.dist-info}/WHEEL +0 -0
- {quadra-2.3.0a3.dist-info → quadra-2.3.2a0.dist-info}/entry_points.txt +0 -0
quadra/__init__.py
CHANGED
quadra/callbacks/anomalib.py
CHANGED
|
@@ -64,7 +64,7 @@ class Visualizer:
|
|
|
64
64
|
self.figure.subplots_adjust(right=0.9)
|
|
65
65
|
|
|
66
66
|
axes = self.axis if len(self.images) > 1 else [self.axis]
|
|
67
|
-
for axis, image_dict in zip(axes, self.images):
|
|
67
|
+
for axis, image_dict in zip(axes, self.images, strict=False):
|
|
68
68
|
axis.axes.xaxis.set_visible(False)
|
|
69
69
|
axis.axes.yaxis.set_visible(False)
|
|
70
70
|
axis.imshow(image_dict["image"], image_dict["color_map"], vmin=0, vmax=255)
|
|
@@ -201,6 +201,7 @@ class VisualizerCallback(Callback):
|
|
|
201
201
|
outputs["label"],
|
|
202
202
|
outputs["pred_labels"],
|
|
203
203
|
outputs["pred_scores"],
|
|
204
|
+
strict=False,
|
|
204
205
|
)
|
|
205
206
|
):
|
|
206
207
|
denormalized_image = Denormalize()(image.cpu())
|
|
@@ -256,7 +257,7 @@ class VisualizerCallback(Callback):
|
|
|
256
257
|
visualizer.close()
|
|
257
258
|
|
|
258
259
|
if self.plot_raw_outputs:
|
|
259
|
-
for raw_output, raw_name in zip([heatmap, vis_img], ["heatmap", "segmentation"]):
|
|
260
|
+
for raw_output, raw_name in zip([heatmap, vis_img], ["heatmap", "segmentation"], strict=False):
|
|
260
261
|
current_raw_output = raw_output
|
|
261
262
|
if raw_name == "segmentation":
|
|
262
263
|
current_raw_output = (raw_output * 255).astype(np.uint8)
|
quadra/callbacks/lightning.py
CHANGED
|
@@ -46,7 +46,7 @@ def _scale_batch_size(
|
|
|
46
46
|
however in practise a few are needed
|
|
47
47
|
init_val: initial batch size to start the search with
|
|
48
48
|
max_trials: max number of increases in batch size done before
|
|
49
|
-
|
|
49
|
+
algorithm is terminated
|
|
50
50
|
batch_arg_name: name of the attribute that stores the batch size.
|
|
51
51
|
It is expected that the user has provided a model or datamodule that has a hyperparameter
|
|
52
52
|
with that name. We will look for this attribute name in the following places
|
|
@@ -79,6 +79,8 @@ def _scale_batch_size(
|
|
|
79
79
|
new_size = _run_power_scaling(trainer, init_val, batch_arg_name, max_trials, params)
|
|
80
80
|
elif mode == "binsearch":
|
|
81
81
|
new_size = _run_binary_scaling(trainer, init_val, batch_arg_name, max_trials, params)
|
|
82
|
+
else:
|
|
83
|
+
raise ValueError(f"Unknown mode {mode}")
|
|
82
84
|
|
|
83
85
|
garbage_collection_cuda()
|
|
84
86
|
|
quadra/datamodules/base.py
CHANGED
|
@@ -7,7 +7,7 @@ import pickle as pkl
|
|
|
7
7
|
import typing
|
|
8
8
|
from collections.abc import Callable, Iterable, Sequence
|
|
9
9
|
from functools import wraps
|
|
10
|
-
from typing import Any, Literal,
|
|
10
|
+
from typing import Any, Literal, cast
|
|
11
11
|
|
|
12
12
|
import albumentations
|
|
13
13
|
import numpy as np
|
|
@@ -20,8 +20,8 @@ from tqdm import tqdm
|
|
|
20
20
|
from quadra.utils import utils
|
|
21
21
|
|
|
22
22
|
log = utils.get_logger(__name__)
|
|
23
|
-
TrainDataset =
|
|
24
|
-
ValDataset =
|
|
23
|
+
TrainDataset = torch.utils.data.Dataset | Sequence[torch.utils.data.Dataset]
|
|
24
|
+
ValDataset = torch.utils.data.Dataset | Sequence[torch.utils.data.Dataset]
|
|
25
25
|
TestDataset = torch.utils.data.Dataset
|
|
26
26
|
|
|
27
27
|
|
|
@@ -260,7 +260,7 @@ class BaseDataModule(LightningDataModule, metaclass=DecorateParentMethod):
|
|
|
260
260
|
return
|
|
261
261
|
|
|
262
262
|
# TODO: We need to find a way to annotate the columns of data.
|
|
263
|
-
paths_and_hash_length = zip(self.data["samples"], [self.hash_size] * len(self.data))
|
|
263
|
+
paths_and_hash_length = zip(self.data["samples"], [self.hash_size] * len(self.data), strict=False)
|
|
264
264
|
|
|
265
265
|
with mp.Pool(min(8, mp.cpu_count() - 1)) as pool:
|
|
266
266
|
self.data["hash"] = list(
|
|
@@ -355,7 +355,7 @@ class BaseDataModule(LightningDataModule, metaclass=DecorateParentMethod):
|
|
|
355
355
|
raise ValueError("`n_aug_to_take` is not set. Cannot load augmented samples.")
|
|
356
356
|
aug_samples = []
|
|
357
357
|
aug_labels = []
|
|
358
|
-
for sample, label in zip(samples, targets):
|
|
358
|
+
for sample, label in zip(samples, targets, strict=False):
|
|
359
359
|
aug_samples.append(sample)
|
|
360
360
|
aug_labels.append(label)
|
|
361
361
|
final_sample = sample
|
|
@@ -243,7 +243,7 @@ class ClassificationDataModule(BaseDataModule):
|
|
|
243
243
|
samples_test, targets_test = self._read_split(self.test_split_file)
|
|
244
244
|
if not self.train_split_file:
|
|
245
245
|
samples_train, targets_train = [], []
|
|
246
|
-
for sample, target in zip(all_samples, all_targets):
|
|
246
|
+
for sample, target in zip(all_samples, all_targets, strict=False):
|
|
247
247
|
if sample not in samples_test:
|
|
248
248
|
samples_train.append(sample)
|
|
249
249
|
targets_train.append(target)
|
|
@@ -251,7 +251,7 @@ class ClassificationDataModule(BaseDataModule):
|
|
|
251
251
|
samples_train, targets_train = self._read_split(self.train_split_file)
|
|
252
252
|
if not self.test_split_file:
|
|
253
253
|
samples_test, targets_test = [], []
|
|
254
|
-
for sample, target in zip(all_samples, all_targets):
|
|
254
|
+
for sample, target in zip(all_samples, all_targets, strict=False):
|
|
255
255
|
if sample not in samples_train:
|
|
256
256
|
samples_test.append(sample)
|
|
257
257
|
targets_test.append(target)
|
|
@@ -187,7 +187,7 @@ class SegmentationDataModule(BaseDataModule):
|
|
|
187
187
|
samples_test, targets_test, masks_test = self._read_split(self.test_split_file)
|
|
188
188
|
if not self.train_split_file:
|
|
189
189
|
samples_train, targets_train, masks_train = [], [], []
|
|
190
|
-
for sample, target, mask in zip(all_samples, all_targets, all_masks):
|
|
190
|
+
for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
|
|
191
191
|
if sample not in samples_test:
|
|
192
192
|
samples_train.append(sample)
|
|
193
193
|
targets_train.append(target)
|
|
@@ -197,7 +197,7 @@ class SegmentationDataModule(BaseDataModule):
|
|
|
197
197
|
samples_train, targets_train, masks_train = self._read_split(self.train_split_file)
|
|
198
198
|
if not self.test_split_file:
|
|
199
199
|
samples_test, targets_test, masks_test = [], [], []
|
|
200
|
-
for sample, target, mask in zip(all_samples, all_targets, all_masks):
|
|
200
|
+
for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
|
|
201
201
|
if sample not in samples_train:
|
|
202
202
|
samples_test.append(sample)
|
|
203
203
|
targets_test.append(target)
|
|
@@ -549,7 +549,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
|
|
|
549
549
|
samples_and_masks_test,
|
|
550
550
|
targets_test,
|
|
551
551
|
) = iterative_train_test_split(
|
|
552
|
-
np.expand_dims(np.array(list(zip(all_samples, all_masks))), 1),
|
|
552
|
+
np.expand_dims(np.array(list(zip(all_samples, all_masks, strict=False))), 1),
|
|
553
553
|
np.array(all_targets),
|
|
554
554
|
test_size=self.test_size,
|
|
555
555
|
)
|
|
@@ -561,7 +561,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
|
|
|
561
561
|
samples_test, targets_test, masks_test = self._read_split(self.test_split_file)
|
|
562
562
|
if not self.train_split_file:
|
|
563
563
|
samples_train, targets_train, masks_train = [], [], []
|
|
564
|
-
for sample, target, mask in zip(all_samples, all_targets, all_masks):
|
|
564
|
+
for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
|
|
565
565
|
if sample not in samples_test:
|
|
566
566
|
samples_train.append(sample)
|
|
567
567
|
targets_train.append(target)
|
|
@@ -571,7 +571,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
|
|
|
571
571
|
samples_train, targets_train, masks_train = self._read_split(self.train_split_file)
|
|
572
572
|
if not self.test_split_file:
|
|
573
573
|
samples_test, targets_test, masks_test = [], [], []
|
|
574
|
-
for sample, target, mask in zip(all_samples, all_targets, all_masks):
|
|
574
|
+
for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
|
|
575
575
|
if sample not in samples_train:
|
|
576
576
|
samples_test.append(sample)
|
|
577
577
|
targets_test.append(target)
|
|
@@ -583,7 +583,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
|
|
|
583
583
|
raise ValueError("Validation split file is specified but no train or test split file is specified.")
|
|
584
584
|
else:
|
|
585
585
|
samples_and_masks_train, targets_train, samples_and_masks_val, targets_val = iterative_train_test_split(
|
|
586
|
-
np.expand_dims(np.array(list(zip(samples_train, masks_train))), 1),
|
|
586
|
+
np.expand_dims(np.array(list(zip(samples_train, masks_train, strict=False))), 1),
|
|
587
587
|
np.array(targets_train),
|
|
588
588
|
test_size=self.val_size,
|
|
589
589
|
)
|
quadra/datasets/anomaly.py
CHANGED
|
@@ -220,7 +220,7 @@ class AnomalyDataset(Dataset):
|
|
|
220
220
|
if not os.path.exists(valid_area_mask):
|
|
221
221
|
raise RuntimeError(f"Valid area mask {valid_area_mask} does not exist.")
|
|
222
222
|
|
|
223
|
-
self.valid_area_mask = cv2.imread(valid_area_mask, 0) > 0
|
|
223
|
+
self.valid_area_mask = cv2.imread(valid_area_mask, 0) > 0
|
|
224
224
|
|
|
225
225
|
def __len__(self) -> int:
|
|
226
226
|
"""Get length of the dataset."""
|
|
@@ -265,7 +265,7 @@ class AnomalyDataset(Dataset):
|
|
|
265
265
|
if label_index == 0:
|
|
266
266
|
mask = np.zeros(shape=original_image_shape[:2])
|
|
267
267
|
elif os.path.isfile(mask_path):
|
|
268
|
-
mask = cv2.imread(mask_path, flags=0) / 255.0
|
|
268
|
+
mask = cv2.imread(mask_path, flags=0) / 255.0
|
|
269
269
|
else:
|
|
270
270
|
# We need ones in the mask to compute correctly at least image level f1 score
|
|
271
271
|
mask = np.ones(shape=original_image_shape[:2])
|
|
@@ -50,9 +50,9 @@ class ImageClassificationListDataset(Dataset):
|
|
|
50
50
|
allow_missing_label: bool | None = False,
|
|
51
51
|
):
|
|
52
52
|
super().__init__()
|
|
53
|
-
assert len(samples) == len(
|
|
54
|
-
targets
|
|
55
|
-
)
|
|
53
|
+
assert len(samples) == len(targets), (
|
|
54
|
+
f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
|
|
55
|
+
)
|
|
56
56
|
# Setting the ROI
|
|
57
57
|
self.roi = roi
|
|
58
58
|
|
|
@@ -201,9 +201,9 @@ class MultilabelClassificationDataset(torch.utils.data.Dataset):
|
|
|
201
201
|
rgb: bool = True,
|
|
202
202
|
):
|
|
203
203
|
super().__init__()
|
|
204
|
-
assert len(samples) == len(
|
|
205
|
-
targets
|
|
206
|
-
)
|
|
204
|
+
assert len(samples) == len(targets), (
|
|
205
|
+
f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
|
|
206
|
+
)
|
|
207
207
|
|
|
208
208
|
# Data
|
|
209
209
|
self.x = samples
|
|
@@ -215,7 +215,7 @@ class MultilabelClassificationDataset(torch.utils.data.Dataset):
|
|
|
215
215
|
class_to_idx = {c: i for i, c in enumerate(range(unique_targets))}
|
|
216
216
|
self.class_to_idx = class_to_idx
|
|
217
217
|
self.idx_to_class = {v: k for k, v in class_to_idx.items()}
|
|
218
|
-
self.samples = list(zip(self.x, self.y))
|
|
218
|
+
self.samples = list(zip(self.x, self.y, strict=False))
|
|
219
219
|
self.rgb = rgb
|
|
220
220
|
self.transform = transform
|
|
221
221
|
|
quadra/datasets/patch.py
CHANGED
|
@@ -58,7 +58,7 @@ class PatchSklearnClassificationTrainDataset(Dataset):
|
|
|
58
58
|
|
|
59
59
|
cls, counts = np.unique(targets_array, return_counts=True)
|
|
60
60
|
max_count = np.max(counts)
|
|
61
|
-
for cl, count in zip(cls, counts):
|
|
61
|
+
for cl, count in zip(cls, counts, strict=False):
|
|
62
62
|
idx_to_pick = list(np.where(targets_array == cl)[0])
|
|
63
63
|
|
|
64
64
|
if count < max_count:
|
quadra/datasets/ssl.py
CHANGED
|
@@ -75,9 +75,9 @@ class TwoSetAugmentationDataset(Dataset):
|
|
|
75
75
|
return the original image.
|
|
76
76
|
|
|
77
77
|
Example:
|
|
78
|
-
>>> images[0] = global_transform[0](original_image)
|
|
79
|
-
>>> images[1] = global_transform[1](original_image)
|
|
80
|
-
>>> images[2:] = local_transform(s)(original_image)
|
|
78
|
+
>>> `images[0] = global_transform[0](original_image)`
|
|
79
|
+
>>> `images[1] = global_transform[1](original_image)`
|
|
80
|
+
>>> `images[2:] = local_transform(s)(original_image)`
|
|
81
81
|
"""
|
|
82
82
|
|
|
83
83
|
def __init__(
|
quadra/metrics/segmentation.py
CHANGED
|
@@ -171,7 +171,7 @@ def segmentation_props(
|
|
|
171
171
|
# Add dummy Dices so LSA is unique and i can compute FP and FN
|
|
172
172
|
dice_mat = _pad_to_shape(dice_mat, (max_dim, max_dim), 1)
|
|
173
173
|
lsa = linear_sum_assignment(dice_mat, maximize=False)
|
|
174
|
-
for row, col in zip(lsa[0], lsa[1]):
|
|
174
|
+
for row, col in zip(lsa[0], lsa[1], strict=False):
|
|
175
175
|
# More preds than GTs --> False Positive
|
|
176
176
|
if row < n_labels_pred and col >= n_labels_mask:
|
|
177
177
|
min_row = pred_bbox[row][0]
|
quadra/models/base.py
CHANGED
|
@@ -76,7 +76,7 @@ class ModelSignatureWrapper(nn.Module):
|
|
|
76
76
|
|
|
77
77
|
if isinstance(self.instance.forward, torch.ScriptMethod):
|
|
78
78
|
# Handle torchscript backbones
|
|
79
|
-
for i, argument in enumerate(self.instance.forward.schema.arguments):
|
|
79
|
+
for i, argument in enumerate(self.instance.forward.schema.arguments): # type: ignore[attr-defined]
|
|
80
80
|
if i < (len(args) + 1): # +1 for self
|
|
81
81
|
continue
|
|
82
82
|
|
quadra/models/evaluation.py
CHANGED
|
@@ -209,7 +209,7 @@ class ONNXEvaluationModel(BaseEvaluationModel):
|
|
|
209
209
|
|
|
210
210
|
onnx_inputs: dict[str, np.ndarray | torch.Tensor] = {}
|
|
211
211
|
|
|
212
|
-
for onnx_input, current_input in zip(self.model.get_inputs(), inputs):
|
|
212
|
+
for onnx_input, current_input in zip(self.model.get_inputs(), inputs, strict=False):
|
|
213
213
|
if isinstance(current_input, torch.Tensor):
|
|
214
214
|
onnx_inputs[onnx_input.name] = current_input
|
|
215
215
|
use_pytorch = True
|
quadra/modules/base.py
CHANGED
|
@@ -7,6 +7,7 @@ import pytorch_lightning as pl
|
|
|
7
7
|
import sklearn
|
|
8
8
|
import torch
|
|
9
9
|
import torchmetrics
|
|
10
|
+
from pytorch_lightning.utilities.types import OptimizerLRScheduler
|
|
10
11
|
from sklearn.linear_model import LogisticRegression
|
|
11
12
|
from torch import nn
|
|
12
13
|
from torch.optim import Optimizer
|
|
@@ -48,7 +49,7 @@ class BaseLightningModule(pl.LightningModule):
|
|
|
48
49
|
"""
|
|
49
50
|
return self.model(x)
|
|
50
51
|
|
|
51
|
-
def configure_optimizers(self) ->
|
|
52
|
+
def configure_optimizers(self) -> OptimizerLRScheduler:
|
|
52
53
|
"""Get default optimizer if not passed a value.
|
|
53
54
|
|
|
54
55
|
Returns:
|
|
@@ -68,7 +69,7 @@ class BaseLightningModule(pl.LightningModule):
|
|
|
68
69
|
"monitor": "val_loss",
|
|
69
70
|
"strict": False,
|
|
70
71
|
}
|
|
71
|
-
return [self.optimizer], [lr_scheduler_conf]
|
|
72
|
+
return [self.optimizer], [lr_scheduler_conf] # type: ignore[return-value]
|
|
72
73
|
|
|
73
74
|
# pylint: disable=unused-argument
|
|
74
75
|
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx: int = 0):
|
quadra/modules/ssl/byol.py
CHANGED
|
@@ -110,6 +110,7 @@ class BYOL(SSLModule):
|
|
|
110
110
|
for student_ps, teacher_ps in zip(
|
|
111
111
|
list(self.model.parameters()) + list(self.student_projection_mlp.parameters()),
|
|
112
112
|
list(self.teacher.parameters()) + list(self.teacher_projection_mlp.parameters()),
|
|
113
|
+
strict=False,
|
|
113
114
|
):
|
|
114
115
|
teacher_ps.data = teacher_ps.data * teacher_momentum + (1 - teacher_momentum) * student_ps.data
|
|
115
116
|
|
quadra/tasks/anomaly.py
CHANGED
|
@@ -161,7 +161,7 @@ class AnomalibDetection(Generic[AnomalyDataModuleT], LightningTask[AnomalyDataMo
|
|
|
161
161
|
all_output_flatten: dict[str, torch.Tensor | list] = {}
|
|
162
162
|
|
|
163
163
|
for key in all_output[0]:
|
|
164
|
-
if
|
|
164
|
+
if isinstance(all_output[0][key], torch.Tensor):
|
|
165
165
|
tensor_gatherer = torch.cat([x[key] for x in all_output])
|
|
166
166
|
all_output_flatten[key] = tensor_gatherer
|
|
167
167
|
else:
|
|
@@ -205,13 +205,15 @@ class AnomalibDetection(Generic[AnomalyDataModuleT], LightningTask[AnomalyDataMo
|
|
|
205
205
|
class_to_idx.pop("false_defect")
|
|
206
206
|
|
|
207
207
|
anomaly_scores = all_output_flatten["pred_scores"]
|
|
208
|
+
|
|
209
|
+
exportable_anomaly_scores: list[Any] | np.ndarray
|
|
208
210
|
if isinstance(anomaly_scores, torch.Tensor):
|
|
209
211
|
exportable_anomaly_scores = anomaly_scores.cpu().numpy()
|
|
210
212
|
else:
|
|
211
213
|
exportable_anomaly_scores = anomaly_scores
|
|
212
214
|
|
|
213
215
|
# Zip the lists together to create rows for the CSV file
|
|
214
|
-
rows = zip(image_paths, pred_labels, gt_labels, exportable_anomaly_scores)
|
|
216
|
+
rows = zip(image_paths, pred_labels, gt_labels, exportable_anomaly_scores, strict=False)
|
|
215
217
|
# Specify the CSV file name
|
|
216
218
|
csv_file = "test_predictions.csv"
|
|
217
219
|
# Write the data to the CSV file
|
|
@@ -483,7 +485,7 @@ class AnomalibEvaluation(Evaluation[AnomalyDataModule]):
|
|
|
483
485
|
|
|
484
486
|
if hasattr(self.datamodule, "valid_area_mask") and self.datamodule.valid_area_mask is not None:
|
|
485
487
|
mask_area = cv2.imread(self.datamodule.valid_area_mask, 0)
|
|
486
|
-
mask_area = (mask_area > 0).astype(np.uint8)
|
|
488
|
+
mask_area = (mask_area > 0).astype(np.uint8)
|
|
487
489
|
|
|
488
490
|
if hasattr(self.datamodule, "crop_area") and self.datamodule.crop_area is not None:
|
|
489
491
|
crop_area = self.datamodule.crop_area
|
|
@@ -499,12 +501,13 @@ class AnomalibEvaluation(Evaluation[AnomalyDataModule]):
|
|
|
499
501
|
self.metadata["image_labels"],
|
|
500
502
|
anomaly_scores,
|
|
501
503
|
anomaly_maps,
|
|
504
|
+
strict=False,
|
|
502
505
|
),
|
|
503
506
|
total=len(self.metadata["image_paths"]),
|
|
504
507
|
):
|
|
505
508
|
img = cv2.imread(img_path, 0)
|
|
506
509
|
if mask_area is not None:
|
|
507
|
-
img = img * mask_area
|
|
510
|
+
img = img * mask_area
|
|
508
511
|
|
|
509
512
|
if crop_area is not None:
|
|
510
513
|
img = img[crop_area[1] : crop_area[3], crop_area[0] : crop_area[2]]
|
quadra/tasks/base.py
CHANGED
|
@@ -382,15 +382,19 @@ class Evaluation(Generic[DataModuleT], Task[DataModuleT]):
|
|
|
382
382
|
# We assume that each input size has the same height and width
|
|
383
383
|
if input_size[1] != self.config.transforms.input_height:
|
|
384
384
|
log.warning(
|
|
385
|
-
|
|
386
|
-
+
|
|
385
|
+
"Input height of the model (%s) is different from the one specified "
|
|
386
|
+
+ "in the config (%s). Fixing the config.",
|
|
387
|
+
input_size[1],
|
|
388
|
+
self.config.transforms.input_height,
|
|
387
389
|
)
|
|
388
390
|
self.config.transforms.input_height = input_size[1]
|
|
389
391
|
|
|
390
392
|
if input_size[2] != self.config.transforms.input_width:
|
|
391
393
|
log.warning(
|
|
392
|
-
|
|
393
|
-
+
|
|
394
|
+
"Input width of the model (%s) is different from the one specified "
|
|
395
|
+
+ "in the config (%s). Fixing the config.",
|
|
396
|
+
input_size[2],
|
|
397
|
+
self.config.transforms.input_width,
|
|
394
398
|
)
|
|
395
399
|
self.config.transforms.input_width = input_size[2]
|
|
396
400
|
|
quadra/tasks/classification.py
CHANGED
|
@@ -623,7 +623,9 @@ class SklearnClassification(Generic[SklearnClassificationDataModuleT], Task[Skle
|
|
|
623
623
|
all_labels = all_labels[sorted_indices]
|
|
624
624
|
|
|
625
625
|
# cycle over all train/test split
|
|
626
|
-
for train_dataloader, test_dataloader in zip(
|
|
626
|
+
for train_dataloader, test_dataloader in zip(
|
|
627
|
+
self.train_dataloader_list, self.test_dataloader_list, strict=False
|
|
628
|
+
):
|
|
627
629
|
# Reinit classifier
|
|
628
630
|
self.model = self.config.model
|
|
629
631
|
self.trainer.change_classifier(self.model)
|
|
@@ -685,7 +687,7 @@ class SklearnClassification(Generic[SklearnClassificationDataModuleT], Task[Skle
|
|
|
685
687
|
dl: PyTorch dataloader
|
|
686
688
|
feature_extractor: PyTorch backbone
|
|
687
689
|
"""
|
|
688
|
-
if isinstance(feature_extractor,
|
|
690
|
+
if isinstance(feature_extractor, TorchEvaluationModel | TorchscriptEvaluationModel):
|
|
689
691
|
# TODO: I'm not sure torchinfo supports torchscript models
|
|
690
692
|
# If we are working with torch based evaluation models we need to extract the model
|
|
691
693
|
feature_extractor = feature_extractor.model
|
|
@@ -1202,6 +1204,8 @@ class ClassificationEvaluation(Evaluation[ClassificationDataModuleT]):
|
|
|
1202
1204
|
probabilities = [max(item) for sublist in probabilities for item in sublist]
|
|
1203
1205
|
if self.datamodule.class_to_idx is not None:
|
|
1204
1206
|
idx_to_class = {v: k for k, v in self.datamodule.class_to_idx.items()}
|
|
1207
|
+
else:
|
|
1208
|
+
idx_to_class = None
|
|
1205
1209
|
|
|
1206
1210
|
_, pd_cm, test_accuracy = get_results(
|
|
1207
1211
|
test_labels=image_labels,
|
quadra/tasks/patch.py
CHANGED
|
@@ -301,7 +301,7 @@ class PatchSklearnTestClassification(Evaluation[PatchSklearnClassificationDataMo
|
|
|
301
301
|
"test_results": None,
|
|
302
302
|
"test_labels": None,
|
|
303
303
|
}
|
|
304
|
-
self.class_to_skip: list[str] = []
|
|
304
|
+
self.class_to_skip: list[str] | None = []
|
|
305
305
|
self.reconstruction_results: dict[str, Any]
|
|
306
306
|
self.return_polygon: bool = True
|
|
307
307
|
|
quadra/tasks/segmentation.py
CHANGED
|
@@ -92,8 +92,10 @@ class Segmentation(Generic[SegmentationDataModuleT], LightningTask[SegmentationD
|
|
|
92
92
|
len(self.datamodule.idx_to_class) + 1
|
|
93
93
|
):
|
|
94
94
|
log.warning(
|
|
95
|
-
|
|
96
|
-
+
|
|
95
|
+
"Number of classes in the model (%s) does not match the number of "
|
|
96
|
+
+ "classes in the datamodule (%d). Updating the model...",
|
|
97
|
+
module_config.model.num_classes,
|
|
98
|
+
len(self.datamodule.idx_to_class),
|
|
97
99
|
)
|
|
98
100
|
module_config.model.num_classes = len(self.datamodule.idx_to_class) + 1
|
|
99
101
|
|
|
@@ -341,7 +343,7 @@ class SegmentationAnalysisEvaluation(SegmentationEvaluation):
|
|
|
341
343
|
if self.datamodule.test_dataset_available:
|
|
342
344
|
stages.append("test")
|
|
343
345
|
dataloaders.append(self.datamodule.test_dataloader())
|
|
344
|
-
for stage, dataloader in zip(stages, dataloaders):
|
|
346
|
+
for stage, dataloader in zip(stages, dataloaders, strict=False):
|
|
345
347
|
log.info("Running inference on %s set with batch size: %d", stage, dataloader.batch_size)
|
|
346
348
|
image_list, mask_list, mask_pred_list, label_list = [], [], [], []
|
|
347
349
|
for batch in dataloader:
|
|
@@ -369,10 +371,10 @@ class SegmentationAnalysisEvaluation(SegmentationEvaluation):
|
|
|
369
371
|
|
|
370
372
|
for stage, output in self.test_output.items():
|
|
371
373
|
image_mean = OmegaConf.to_container(self.config.transforms.mean)
|
|
372
|
-
if not isinstance(image_mean, list) or any(not isinstance(x,
|
|
374
|
+
if not isinstance(image_mean, list) or any(not isinstance(x, int | float) for x in image_mean):
|
|
373
375
|
raise ValueError("Image mean is not a list of float or integer values, please check your config")
|
|
374
376
|
image_std = OmegaConf.to_container(self.config.transforms.std)
|
|
375
|
-
if not isinstance(image_std, list) or any(not isinstance(x,
|
|
377
|
+
if not isinstance(image_std, list) or any(not isinstance(x, int | float) for x in image_std):
|
|
376
378
|
raise ValueError("Image std is not a list of float or integer values, please check your config")
|
|
377
379
|
reports = create_mask_report(
|
|
378
380
|
stage=stage,
|
quadra/tasks/ssl.py
CHANGED
|
@@ -468,8 +468,7 @@ class EmbeddingVisualization(Task):
|
|
|
468
468
|
self.report_folder = report_folder
|
|
469
469
|
if self.model_path is None:
|
|
470
470
|
raise ValueError(
|
|
471
|
-
"Model path cannot be found!, please specify it in the config or pass it as an argument for"
|
|
472
|
-
" evaluation"
|
|
471
|
+
"Model path cannot be found!, please specify it in the config or pass it as an argument for evaluation"
|
|
473
472
|
)
|
|
474
473
|
self.embeddings_path = os.path.join(self.model_path, self.report_folder)
|
|
475
474
|
if not os.path.exists(self.embeddings_path):
|
|
@@ -547,7 +546,7 @@ class EmbeddingVisualization(Task):
|
|
|
547
546
|
im = interpolate(im, self.embedding_image_size)
|
|
548
547
|
|
|
549
548
|
images.append(im.cpu())
|
|
550
|
-
metadata.extend(zip(targets, class_names, file_paths))
|
|
549
|
+
metadata.extend(zip(targets, class_names, file_paths, strict=False))
|
|
551
550
|
counter += len(im)
|
|
552
551
|
images = torch.cat(images, dim=0)
|
|
553
552
|
embeddings = torch.cat(embeddings, dim=0)
|
quadra/utils/classification.py
CHANGED
|
@@ -46,12 +46,10 @@ def get_file_condition(
|
|
|
46
46
|
if any(fil in root for fil in exclude_filter):
|
|
47
47
|
return False
|
|
48
48
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
return True
|
|
49
|
+
return not (
|
|
50
|
+
include_filter is not None
|
|
51
|
+
and (not any(fil in file_name for fil in include_filter) and not any(fil in root for fil in include_filter))
|
|
52
|
+
)
|
|
55
53
|
|
|
56
54
|
|
|
57
55
|
def natural_key(string_):
|
|
@@ -130,7 +128,7 @@ def find_images_and_targets(
|
|
|
130
128
|
sorted_labels = sorted(unique_labels, key=natural_key)
|
|
131
129
|
class_to_idx = {str(c): idx for idx, c in enumerate(sorted_labels)}
|
|
132
130
|
|
|
133
|
-
images_and_targets = [(f, l) for f, l in zip(filenames, labels) if l in class_to_idx]
|
|
131
|
+
images_and_targets = [(f, l) for f, l in zip(filenames, labels, strict=False) if l in class_to_idx]
|
|
134
132
|
|
|
135
133
|
if sort:
|
|
136
134
|
images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
|
|
@@ -210,7 +208,7 @@ def find_test_image(
|
|
|
210
208
|
file_samples.append(sample_path)
|
|
211
209
|
|
|
212
210
|
test_split = [os.path.join(folder, sample.strip()) for sample in file_samples]
|
|
213
|
-
labels = [t for s, t in zip(filenames, labels) if s in file_samples]
|
|
211
|
+
labels = [t for s, t in zip(filenames, labels, strict=False) if s in file_samples]
|
|
214
212
|
filenames = [s for s in filenames if s in file_samples]
|
|
215
213
|
log.info("Selected %d images using test_split_file for the test", len(filenames))
|
|
216
214
|
if len(filenames) != len(file_samples):
|
|
@@ -353,7 +351,7 @@ def get_split(
|
|
|
353
351
|
|
|
354
352
|
cl, counts = np.unique(targets, return_counts=True)
|
|
355
353
|
|
|
356
|
-
for num, _cl in zip(counts, cl):
|
|
354
|
+
for num, _cl in zip(counts, cl, strict=False):
|
|
357
355
|
if num == 1:
|
|
358
356
|
to_remove = np.where(np.array(targets) == _cl)[0][0]
|
|
359
357
|
samples = np.delete(np.array(samples), to_remove)
|
|
@@ -378,7 +376,7 @@ def get_split(
|
|
|
378
376
|
file_samples.append(sample_path)
|
|
379
377
|
|
|
380
378
|
train_split = [os.path.join(image_dir, sample.strip()) for sample in file_samples]
|
|
381
|
-
targets = np.array([t for s, t in zip(samples, targets) if s in file_samples])
|
|
379
|
+
targets = np.array([t for s, t in zip(samples, targets, strict=False) if s in file_samples])
|
|
382
380
|
samples = np.array([s for s in samples if s in file_samples])
|
|
383
381
|
|
|
384
382
|
if limit_training_data is not None:
|
quadra/utils/evaluation.py
CHANGED
|
@@ -4,6 +4,7 @@ import os
|
|
|
4
4
|
from ast import literal_eval
|
|
5
5
|
from collections.abc import Callable
|
|
6
6
|
from functools import wraps
|
|
7
|
+
from typing import Any
|
|
7
8
|
|
|
8
9
|
import matplotlib.pyplot as plt
|
|
9
10
|
import numpy as np
|
|
@@ -123,7 +124,7 @@ def calculate_mask_based_metrics(
|
|
|
123
124
|
th_thresh_preds = (th_preds > threshold).float().cpu()
|
|
124
125
|
thresh_preds = th_thresh_preds.squeeze(0).numpy()
|
|
125
126
|
dice_scores = metric(th_thresh_preds, th_masks, reduction=None).numpy()
|
|
126
|
-
result = {}
|
|
127
|
+
result: dict[str, Any] = {}
|
|
127
128
|
if multilabel:
|
|
128
129
|
if n_classes is None:
|
|
129
130
|
raise ValueError("n_classes arg shouldn't be None when multilabel is True")
|
|
@@ -167,7 +168,7 @@ def calculate_mask_based_metrics(
|
|
|
167
168
|
"Accuracy": [],
|
|
168
169
|
}
|
|
169
170
|
for idx, (image, pred, mask, thresh_pred, dice_score) in enumerate(
|
|
170
|
-
zip(images, preds, masks, thresh_preds, dice_scores)
|
|
171
|
+
zip(images, preds, masks, thresh_preds, dice_scores, strict=False)
|
|
171
172
|
):
|
|
172
173
|
if np.sum(mask) == 0:
|
|
173
174
|
good_dice.append(dice_score)
|
|
@@ -261,6 +262,7 @@ def create_mask_report(
|
|
|
261
262
|
th_labels = output["label"]
|
|
262
263
|
n_classes = th_preds.shape[1]
|
|
263
264
|
# TODO: Apply sigmoid is a wrong name now
|
|
265
|
+
# TODO: Apply sigmoid false is untested
|
|
264
266
|
if apply_sigmoid:
|
|
265
267
|
if n_classes == 1:
|
|
266
268
|
th_preds = torch.nn.Sigmoid()(th_preds)
|
|
@@ -271,6 +273,13 @@ def create_mask_report(
|
|
|
271
273
|
# Compute labels from the given masks since by default they are all 0
|
|
272
274
|
th_labels = th_masks.max(dim=2)[0].max(dim=2)[0].squeeze(dim=1)
|
|
273
275
|
show_orj_predictions = False
|
|
276
|
+
elif n_classes == 1:
|
|
277
|
+
th_thresh_preds = (th_preds > threshold).float()
|
|
278
|
+
else:
|
|
279
|
+
th_thresh_preds = torch.argmax(th_preds, dim=1).float().unsqueeze(1)
|
|
280
|
+
# Compute labels from the given masks since by default they are all 0
|
|
281
|
+
th_labels = th_masks.max(dim=2)[0].max(dim=2)[0].squeeze(dim=1)
|
|
282
|
+
show_orj_predictions = False
|
|
274
283
|
|
|
275
284
|
mean = np.asarray(mean)
|
|
276
285
|
std = np.asarray(std)
|
|
@@ -303,7 +312,7 @@ def create_mask_report(
|
|
|
303
312
|
non_zero_score_idx = sorted_idx[~binary_labels]
|
|
304
313
|
zero_score_idx = sorted_idx[binary_labels]
|
|
305
314
|
file_paths = []
|
|
306
|
-
for name, current_score_idx in zip(["good", "bad"], [zero_score_idx, non_zero_score_idx]):
|
|
315
|
+
for name, current_score_idx in zip(["good", "bad"], [zero_score_idx, non_zero_score_idx], strict=False):
|
|
307
316
|
if len(current_score_idx) == 0:
|
|
308
317
|
continue
|
|
309
318
|
|
quadra/utils/export.py
CHANGED
|
@@ -45,11 +45,11 @@ def generate_torch_inputs(
|
|
|
45
45
|
"""
|
|
46
46
|
inp = None
|
|
47
47
|
|
|
48
|
-
if isinstance(input_shapes,
|
|
48
|
+
if isinstance(input_shapes, ListConfig | DictConfig):
|
|
49
49
|
input_shapes = OmegaConf.to_container(input_shapes)
|
|
50
50
|
|
|
51
51
|
if isinstance(input_shapes, list):
|
|
52
|
-
if any(isinstance(inp,
|
|
52
|
+
if any(isinstance(inp, Sequence | dict) for inp in input_shapes):
|
|
53
53
|
return [generate_torch_inputs(inp, device, half_precision, dtype) for inp in input_shapes]
|
|
54
54
|
|
|
55
55
|
# Base case
|
|
@@ -59,7 +59,7 @@ def generate_torch_inputs(
|
|
|
59
59
|
return {k: generate_torch_inputs(v, device, half_precision, dtype) for k, v in input_shapes.items()}
|
|
60
60
|
|
|
61
61
|
if isinstance(input_shapes, tuple):
|
|
62
|
-
if any(isinstance(inp,
|
|
62
|
+
if any(isinstance(inp, Sequence | dict) for inp in input_shapes):
|
|
63
63
|
# The tuple contains a list, tuple or dict
|
|
64
64
|
return tuple(generate_torch_inputs(inp, device, half_precision, dtype) for inp in input_shapes)
|
|
65
65
|
|
|
@@ -324,7 +324,7 @@ def _safe_export_half_precision_onnx(
|
|
|
324
324
|
onnx_config: DictConfig,
|
|
325
325
|
input_shapes: list[Any],
|
|
326
326
|
input_names: list[str],
|
|
327
|
-
):
|
|
327
|
+
) -> bool:
|
|
328
328
|
"""Check that the exported half precision ONNX model does not contain NaN values. If it does, attempt to export
|
|
329
329
|
the model with a more stable export and overwrite the original model.
|
|
330
330
|
|
quadra/utils/mlflow.py
CHANGED
|
@@ -11,6 +11,7 @@ except ImportError:
|
|
|
11
11
|
from collections.abc import Sequence
|
|
12
12
|
from typing import Any
|
|
13
13
|
|
|
14
|
+
import numpy as np
|
|
14
15
|
import torch
|
|
15
16
|
from pytorch_lightning import Trainer
|
|
16
17
|
from pytorch_lightning.loggers import MLFlowLogger
|
|
@@ -45,6 +46,7 @@ def infer_signature_input(input_tensor: Any) -> Any:
|
|
|
45
46
|
Raises:
|
|
46
47
|
ValueError: If the input type is not supported or when nested dicts or sequences are encountered.
|
|
47
48
|
"""
|
|
49
|
+
signature: dict[str, Any] | np.ndarray
|
|
48
50
|
if isinstance(input_tensor, Sequence):
|
|
49
51
|
# Mlflow currently does not support sequence outputs, so we use a dict instead
|
|
50
52
|
signature = {}
|
quadra/utils/models.py
CHANGED
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import math
|
|
4
4
|
import warnings
|
|
5
5
|
from collections.abc import Callable
|
|
6
|
-
from typing import
|
|
6
|
+
from typing import cast
|
|
7
7
|
|
|
8
8
|
import numpy as np
|
|
9
9
|
import timm
|
|
@@ -114,7 +114,7 @@ def get_feature(
|
|
|
114
114
|
labels: input_labels
|
|
115
115
|
grayscale_cams: Gradcam output maps, None if gradcam arg is False
|
|
116
116
|
"""
|
|
117
|
-
if isinstance(feature_extractor,
|
|
117
|
+
if isinstance(feature_extractor, TorchEvaluationModel | TorchscriptEvaluationModel):
|
|
118
118
|
# If we are working with torch based evaluation models we need to extract the model
|
|
119
119
|
feature_extractor = feature_extractor.model
|
|
120
120
|
elif isinstance(feature_extractor, ONNXEvaluationModel):
|
|
@@ -160,9 +160,7 @@ def get_feature(
|
|
|
160
160
|
x1 = x1.to(feature_extractor.device).to(feature_extractor.model_dtype)
|
|
161
161
|
|
|
162
162
|
if gradcam:
|
|
163
|
-
y_hat = cast(
|
|
164
|
-
Union[list[torch.Tensor], tuple[torch.Tensor], torch.Tensor], feature_extractor(x1).detach()
|
|
165
|
-
)
|
|
163
|
+
y_hat = cast(list[torch.Tensor] | tuple[torch.Tensor] | torch.Tensor, feature_extractor(x1).detach())
|
|
166
164
|
# mypy can't detect that gradcam is true only if we have a features_extractor
|
|
167
165
|
if is_vision_transformer(feature_extractor.features_extractor): # type: ignore[union-attr]
|
|
168
166
|
grayscale_cam_low_res = grad_rollout(
|
|
@@ -177,10 +175,10 @@ def get_feature(
|
|
|
177
175
|
feature_extractor.zero_grad(set_to_none=True) # type: ignore[union-attr]
|
|
178
176
|
else:
|
|
179
177
|
with torch.no_grad():
|
|
180
|
-
y_hat = cast(
|
|
178
|
+
y_hat = cast(list[torch.Tensor] | tuple[torch.Tensor] | torch.Tensor, feature_extractor(x1))
|
|
181
179
|
grayscale_cams = None
|
|
182
180
|
|
|
183
|
-
if isinstance(y_hat,
|
|
181
|
+
if isinstance(y_hat, list | tuple):
|
|
184
182
|
y_hat = y_hat[0].cpu()
|
|
185
183
|
else:
|
|
186
184
|
y_hat = y_hat.cpu()
|
quadra/utils/patch/dataset.py
CHANGED
|
@@ -566,7 +566,7 @@ def generate_patch_dataset(
|
|
|
566
566
|
num_workers=num_workers,
|
|
567
567
|
)
|
|
568
568
|
|
|
569
|
-
for phase, split_dict in zip(["val", "test"], [val_data_dictionary, test_data_dictionary]):
|
|
569
|
+
for phase, split_dict in zip(["val", "test"], [val_data_dictionary, test_data_dictionary], strict=False):
|
|
570
570
|
if len(split_dict) > 0:
|
|
571
571
|
log.info("Generating %s set", phase)
|
|
572
572
|
generate_patch_sliding_window_dataset(
|
|
@@ -908,9 +908,9 @@ def extract_patches(
|
|
|
908
908
|
patches = np.concatenate([patches, extra_patches_h], axis=0)
|
|
909
909
|
|
|
910
910
|
# If this is not true there's some strange case I didn't take into account
|
|
911
|
-
assert (
|
|
912
|
-
patches.shape
|
|
913
|
-
)
|
|
911
|
+
assert patches.shape[0] == patch_num_h and patches.shape[1] == patch_num_w, (
|
|
912
|
+
f"Patch shape {patches.shape} does not match the expected shape {patch_number}"
|
|
913
|
+
)
|
|
914
914
|
|
|
915
915
|
return patches
|
|
916
916
|
|
|
@@ -1059,11 +1059,12 @@ def create_h5(
|
|
|
1059
1059
|
h = img.shape[0]
|
|
1060
1060
|
w = img.shape[1]
|
|
1061
1061
|
|
|
1062
|
+
mask: np.ndarray
|
|
1062
1063
|
if item["mask"] is None:
|
|
1063
|
-
mask = np.zeros([h, w])
|
|
1064
|
+
mask = np.zeros([h, w], dtype=np.uint8)
|
|
1064
1065
|
else:
|
|
1065
1066
|
# this works even if item["mask"] is already an absolute path
|
|
1066
|
-
mask = cv2.imread(os.path.join(output_folder, item["mask"]), 0)
|
|
1067
|
+
mask = cv2.imread(os.path.join(output_folder, item["mask"]), 0)
|
|
1067
1068
|
|
|
1068
1069
|
if patch_size is not None:
|
|
1069
1070
|
patch_height = patch_size[1]
|
quadra/utils/patch/metrics.py
CHANGED
|
@@ -98,9 +98,9 @@ def compute_patch_metrics(
|
|
|
98
98
|
if (patch_h is not None and patch_w is not None) and (patch_num_h is not None and patch_num_w is not None):
|
|
99
99
|
raise ValueError("Either number of patches or patch size is required for reconstruction")
|
|
100
100
|
|
|
101
|
-
assert (patch_h is not None and patch_w is not None) or (
|
|
102
|
-
|
|
103
|
-
)
|
|
101
|
+
assert (patch_h is not None and patch_w is not None) or (patch_num_h is not None and patch_num_w is not None), (
|
|
102
|
+
"Either number of patches or patch size is required for reconstruction"
|
|
103
|
+
)
|
|
104
104
|
|
|
105
105
|
if patch_h is not None and patch_w is not None and patch_num_h is not None and patch_num_w is not None:
|
|
106
106
|
warnings.warn(
|
|
@@ -191,7 +191,7 @@ def compute_patch_metrics(
|
|
|
191
191
|
if annotated_good is not None:
|
|
192
192
|
gt_img[np.isin(gt_img, annotated_good)] = 0
|
|
193
193
|
|
|
194
|
-
gt_img_binary = (gt_img > 0).astype(bool)
|
|
194
|
+
gt_img_binary = (gt_img > 0).astype(bool)
|
|
195
195
|
regions_pred = label(output_mask).astype(np.uint8)
|
|
196
196
|
|
|
197
197
|
for k in range(1, regions_pred.max() + 1):
|
|
@@ -203,8 +203,11 @@ def compute_patch_metrics(
|
|
|
203
203
|
output_mask = (output_mask > 0).astype(np.uint8)
|
|
204
204
|
gt_img = label(gt_img)
|
|
205
205
|
|
|
206
|
-
|
|
207
|
-
|
|
206
|
+
if gt_img is None:
|
|
207
|
+
raise RuntimeError("Ground truth mask is None after label and it should not be")
|
|
208
|
+
|
|
209
|
+
for i in range(1, gt_img.max() + 1):
|
|
210
|
+
region = (gt_img == i).astype(bool)
|
|
208
211
|
if np.sum(np.bitwise_and(region, output_mask)) == 0:
|
|
209
212
|
false_region_good += 1
|
|
210
213
|
else:
|
|
@@ -69,13 +69,13 @@ def plot_patch_reconstruction(
|
|
|
69
69
|
points = [[item["x"], item["y"]] for item in region["points"]]
|
|
70
70
|
c_label = region["label"]
|
|
71
71
|
|
|
72
|
-
out = cv2.drawContours(
|
|
72
|
+
out = cv2.drawContours( # type: ignore[call-overload]
|
|
73
73
|
out,
|
|
74
74
|
np.array([points], np.int32),
|
|
75
75
|
-1,
|
|
76
76
|
class_to_idx[c_label],
|
|
77
77
|
thickness=cv2.FILLED,
|
|
78
|
-
)
|
|
78
|
+
)
|
|
79
79
|
else:
|
|
80
80
|
out = reconstruction["prediction"]
|
|
81
81
|
|
quadra/utils/utils.py
CHANGED
|
@@ -438,7 +438,7 @@ def flatten_list(input_list: Iterable[Any]) -> Iterator[Any]:
|
|
|
438
438
|
The iterator over the flattend list
|
|
439
439
|
"""
|
|
440
440
|
for v in input_list:
|
|
441
|
-
if isinstance(v, Iterable) and not isinstance(v,
|
|
441
|
+
if isinstance(v, Iterable) and not isinstance(v, str | bytes):
|
|
442
442
|
yield from flatten_list(v)
|
|
443
443
|
else:
|
|
444
444
|
yield v
|
quadra/utils/validator.py
CHANGED
|
@@ -72,9 +72,7 @@ def check_all_arguments(callable_variable: str, configuration_arguments: list[st
|
|
|
72
72
|
"""
|
|
73
73
|
for argument in configuration_arguments:
|
|
74
74
|
if argument not in argument_names:
|
|
75
|
-
error_string =
|
|
76
|
-
f"`{argument}` is not a valid argument passed " f"from configuration to `{callable_variable}`."
|
|
77
|
-
)
|
|
75
|
+
error_string = f"`{argument}` is not a valid argument passed from configuration to `{callable_variable}`."
|
|
78
76
|
closest_match = difflib.get_close_matches(argument, argument_names, n=1, cutoff=0.5)
|
|
79
77
|
if len(closest_match) > 0:
|
|
80
78
|
error_string += f" Did you mean `{closest_match[0]}`?"
|
quadra/utils/visualization.py
CHANGED
|
@@ -46,7 +46,7 @@ class UnNormalize:
|
|
|
46
46
|
new_t = tensor.detach().clone()
|
|
47
47
|
else:
|
|
48
48
|
new_t = tensor
|
|
49
|
-
for t, m, s in zip(new_t, self.mean, self.std):
|
|
49
|
+
for t, m, s in zip(new_t, self.mean, self.std, strict=False):
|
|
50
50
|
t.mul_(s).add_(m)
|
|
51
51
|
# The normalize code -> t.sub_(m).div_(s)
|
|
52
52
|
return new_t
|
|
@@ -82,7 +82,7 @@ def create_grid_figure(
|
|
|
82
82
|
ax[i][j].get_xaxis().set_ticks([])
|
|
83
83
|
ax[i][j].get_yaxis().set_ticks([])
|
|
84
84
|
if row_names is not None:
|
|
85
|
-
for ax, name in zip(ax[:, 0], row_names): # noqa: B020
|
|
85
|
+
for ax, name in zip(ax[:, 0], row_names, strict=False): # noqa: B020
|
|
86
86
|
ax.set_ylabel(name, rotation=90)
|
|
87
87
|
|
|
88
88
|
plt.tight_layout()
|
|
@@ -98,12 +98,12 @@ def create_visualization_dataset(dataset: torch.utils.data.Dataset):
|
|
|
98
98
|
"""Handle different types of transforms."""
|
|
99
99
|
if isinstance(transforms, albumentations.BaseCompose):
|
|
100
100
|
transforms.transforms = convert_transforms(transforms.transforms)
|
|
101
|
-
if isinstance(transforms,
|
|
101
|
+
if isinstance(transforms, list | ListConfig | TransformsSeqType):
|
|
102
102
|
transforms = [convert_transforms(t) for t in transforms]
|
|
103
|
-
if isinstance(transforms,
|
|
103
|
+
if isinstance(transforms, dict | DictConfig):
|
|
104
104
|
for tname, t in transforms.items():
|
|
105
105
|
transforms[tname] = convert_transforms(t)
|
|
106
|
-
if isinstance(transforms,
|
|
106
|
+
if isinstance(transforms, Normalize | ToTensorV2):
|
|
107
107
|
return NoOp(p=1)
|
|
108
108
|
return transforms
|
|
109
109
|
|
|
@@ -362,6 +362,9 @@ def plot_classification_results(
|
|
|
362
362
|
test_label = idx_to_class[test_labels[i]]
|
|
363
363
|
except Exception:
|
|
364
364
|
test_label = test_labels[i]
|
|
365
|
+
else:
|
|
366
|
+
pred_label = pred_labels[i]
|
|
367
|
+
test_label = test_labels[i]
|
|
365
368
|
|
|
366
369
|
ax.axis("off")
|
|
367
370
|
ax.set_title(f"True: {str(test_label)}\nPred {str(pred_label)}")
|
|
@@ -153,7 +153,7 @@ def grad_rollout(
|
|
|
153
153
|
"""
|
|
154
154
|
result = torch.eye(attentions[0].size(-1))
|
|
155
155
|
with torch.no_grad():
|
|
156
|
-
for attention, grad in zip(attentions, gradients):
|
|
156
|
+
for attention, grad in zip(attentions, gradients, strict=False):
|
|
157
157
|
weights = grad
|
|
158
158
|
attention_heads_fused = torch.mean((attention * weights), dim=1)
|
|
159
159
|
attention_heads_fused[attention_heads_fused < 0] = 0
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: quadra
|
|
3
|
-
Version: 2.3.
|
|
3
|
+
Version: 2.3.2a0
|
|
4
4
|
Summary: Deep Learning experiment orchestration library
|
|
5
5
|
Home-page: https://orobix.github.io/quadra
|
|
6
6
|
License: Apache-2.0
|
|
@@ -35,7 +35,7 @@ Requires-Dist: numpy (<2)
|
|
|
35
35
|
Requires-Dist: nvitop (>=0.11,<0.12)
|
|
36
36
|
Requires-Dist: onnx (==1.15.0) ; extra == "onnx"
|
|
37
37
|
Requires-Dist: onnxconverter-common (>=1.14.0,<2.0.0) ; extra == "onnx"
|
|
38
|
-
Requires-Dist: onnxruntime_gpu (==1.
|
|
38
|
+
Requires-Dist: onnxruntime_gpu (==1.21.0) ; extra == "onnx"
|
|
39
39
|
Requires-Dist: onnxsim (==0.4.28) ; extra == "onnx"
|
|
40
40
|
Requires-Dist: opencv_python_headless (>=4.7.0,<4.8.0)
|
|
41
41
|
Requires-Dist: pandas (<2.0)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
quadra/__init__.py,sha256=
|
|
1
|
+
quadra/__init__.py,sha256=ymSCYtvSOBpD8abB7JXFoK4CFM_FY2DMyCntNUqp7os,114
|
|
2
2
|
quadra/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
quadra/callbacks/anomalib.py,sha256=
|
|
4
|
-
quadra/callbacks/lightning.py,sha256=
|
|
3
|
+
quadra/callbacks/anomalib.py,sha256=WLBEGhZA9HoP4Yh9UbbC2GzDOKYTkvU9EY1lkZcV7Fs,11971
|
|
4
|
+
quadra/callbacks/lightning.py,sha256=qvtzDiv8ZUV7K11gKHKWCyo-a9XR_Jm_M-IEicTM1Yo,20242
|
|
5
5
|
quadra/callbacks/mlflow.py,sha256=4LKjrgbRCHP5dOCoDpF7J25gaBgABa0Rof-EA61Iqug,10129
|
|
6
6
|
quadra/callbacks/scheduler.py,sha256=zrglcTUvMO236VchQFtCSlA-XXhc6a3HVWX0uDVQoyc,2656
|
|
7
7
|
quadra/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -187,22 +187,22 @@ quadra/configs/transforms/dino.yaml,sha256=NtEbtJPHYkR9DOBPwXR33uvrEdqv8WNyqRlXr
|
|
|
187
187
|
quadra/configs/transforms/linear_eval.yaml,sha256=fXmJBEwTWQ-QBMNV0mSG9wcrj31YGIV_czcRDczc1ss,488
|
|
188
188
|
quadra/datamodules/__init__.py,sha256=y00iX2YAy6CJzPstKSBNq8_1YsYTRr_sCvqaL-WI7Z8,636
|
|
189
189
|
quadra/datamodules/anomaly.py,sha256=_3FZNSwdMj-ECXlPQDslswtaMn0F1EgzA0q0UH-UgFY,6670
|
|
190
|
-
quadra/datamodules/base.py,sha256=
|
|
191
|
-
quadra/datamodules/classification.py,sha256=
|
|
190
|
+
quadra/datamodules/base.py,sha256=QGkJ8Lq6hznHvaXjD8mhJhrinrs4ZFlZD3-B5cLU0cQ,14010
|
|
191
|
+
quadra/datamodules/classification.py,sha256=VwQd-zhzJuLgq5Kg1niOY4pnRbO7Sk4B77dWiTFv4do,41622
|
|
192
192
|
quadra/datamodules/generic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
193
193
|
quadra/datamodules/generic/imagenette.py,sha256=3hOb-GmvnKx_hqrSqRcAcf22PjtCQ3CY_-5mlaZSTIM,5564
|
|
194
194
|
quadra/datamodules/generic/mnist.py,sha256=j4xWEWQb1utW3yyozgHD1tP0kOAtLpRsgeIBZ1cIiP0,3425
|
|
195
195
|
quadra/datamodules/generic/mvtec.py,sha256=3Ib8JyY1Eg7wbPL2dXw22YCoy_gsitksofFShLQ9Itw,2700
|
|
196
196
|
quadra/datamodules/generic/oxford_pet.py,sha256=tumWy9TBThvVQZ2JOyghosWJEEsYjyXN6pZMJ9C5dBY,6822
|
|
197
197
|
quadra/datamodules/patch.py,sha256=y7leDt1MyVg0LnqKgWCZ0i6cuVln10fiG4X8EFbl-_Q,7789
|
|
198
|
-
quadra/datamodules/segmentation.py,sha256=
|
|
198
|
+
quadra/datamodules/segmentation.py,sha256=hhfOs7QoYslHYfWfnAgZzSusj2tus8k-h7SBqGNVT8E,29004
|
|
199
199
|
quadra/datamodules/ssl.py,sha256=U63FCdcRJjx4K0RZzkKJfvYJhFpvWTnlBBCtXirn_F4,5709
|
|
200
200
|
quadra/datasets/__init__.py,sha256=nVpqp2ffQ6omqCMB3r1ajcUGgUad0eSkDt-kNWDGblU,669
|
|
201
|
-
quadra/datasets/anomaly.py,sha256=
|
|
202
|
-
quadra/datasets/classification.py,sha256=
|
|
203
|
-
quadra/datasets/patch.py,sha256=
|
|
201
|
+
quadra/datasets/anomaly.py,sha256=4rCd2-frgMH3RfQYVFYn5ZXxTKbPOk8GwE-BZIiLwFY,11892
|
|
202
|
+
quadra/datasets/classification.py,sha256=ISKcY2PwD3HNv1JPPbDIJRJWJmu3KR3hlx3HUxlXYpE,7530
|
|
203
|
+
quadra/datasets/patch.py,sha256=imNJONPoREivSZ-6WqYO2zE80PDEr-oCm3rdJuKlWz0,4803
|
|
204
204
|
quadra/datasets/segmentation.py,sha256=cDs45eRh_IBSLB0K5xDos-D4KySRQN64BzaPKGBF7OI,9056
|
|
205
|
-
quadra/datasets/ssl.py,sha256=
|
|
205
|
+
quadra/datasets/ssl.py,sha256=FLL3dYCKnMymtwZfPEi0TzXI6lh6X3HpbqVzaEoGbeU,3931
|
|
206
206
|
quadra/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
207
207
|
quadra/losses/classification/__init__.py,sha256=R1rhnZsksSrY0Tntc7ITViszbbW6i_705zyLlMpcjPs,153
|
|
208
208
|
quadra/losses/classification/asl.py,sha256=ywfT_ifkHoA7VpAhOiJty0OqzKwFqe0OU5Ands1cI0I,2844
|
|
@@ -219,21 +219,21 @@ quadra/losses/ssl/simsiam.py,sha256=uCCbqU9aYMwNa3re0qkeEK5Iz7Hxi0jAcEc-sCWZ8fc,
|
|
|
219
219
|
quadra/losses/ssl/vicreg.py,sha256=ANvhYJz6iPv1A-OBXgBSrZrDG-1VmPtK1IZDtyFqNHE,2427
|
|
220
220
|
quadra/main.py,sha256=6ZYKytVvCzQjgP_0QA6-3ICzVppsbRgPjF-csLKv85o,1407
|
|
221
221
|
quadra/metrics/__init__.py,sha256=HsTK1gxsjp8_MYgA5caa4OK8sXLqtK_tt9wYyjtFnOc,79
|
|
222
|
-
quadra/metrics/segmentation.py,sha256=
|
|
222
|
+
quadra/metrics/segmentation.py,sha256=tVRYEyMiwD0RJ7NtoGRoSbwb8sAKoVmvzEhV6-3iQT4,9465
|
|
223
223
|
quadra/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
224
|
-
quadra/models/base.py,sha256=
|
|
224
|
+
quadra/models/base.py,sha256=QqMRQWqIsUuUHeInqMHZv3wv7Xeqz-zKe4cAQeqaN3M,5544
|
|
225
225
|
quadra/models/classification/__init__.py,sha256=c03CGDyvipXgU7ybSosOaeTl1aM1ge6TqMUgMiTpQtA,243
|
|
226
226
|
quadra/models/classification/backbones.py,sha256=haHNPC-XZ8Jj1i47cfUj8JHy_I-rins-nNfccrPBffo,6281
|
|
227
227
|
quadra/models/classification/base.py,sha256=w-mDPQPtIrNclxjqsve5BTmNhNgnWGh7uJfE5HaTFPA,2996
|
|
228
|
-
quadra/models/evaluation.py,sha256=
|
|
228
|
+
quadra/models/evaluation.py,sha256=LQg2K6PDIKK0ZnkP4pHfRNnKO4WeaROoYoNFA3Bctg0,10709
|
|
229
229
|
quadra/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
230
230
|
quadra/modules/backbone.py,sha256=xiZBqgzr1S45GX9mydl29TFuahLDaHrU7Fy73LGIyGI,909
|
|
231
|
-
quadra/modules/base.py,sha256=
|
|
231
|
+
quadra/modules/base.py,sha256=y96PSFJeo4gswVj3a6uNnoirg-dMgS0MsYDN51fQQ9A,10382
|
|
232
232
|
quadra/modules/classification/__init__.py,sha256=6keltBhC1yzgbNttBuykNYJAUMyOrY-HDNgGZGfI93I,141
|
|
233
233
|
quadra/modules/classification/base.py,sha256=QdHtHY2tF_qh2wU01Oo0TWjh9CTqa46tyF4VgcLd__M,11937
|
|
234
234
|
quadra/modules/ssl/__init__.py,sha256=oeUoGHrsESZ0595-JxPxURBP124jtNfrITbVovBpANA,302
|
|
235
235
|
quadra/modules/ssl/barlowtwins.py,sha256=iW6f7ADSEkbs7z-88x680204-Ez-iF1Yd2SdQzcLpRY,1884
|
|
236
|
-
quadra/modules/ssl/byol.py,sha256=
|
|
236
|
+
quadra/modules/ssl/byol.py,sha256=3UhUr72kpI2lM9JtVPqrTcpTo60NsAHNu3SIwD5_RrI,7114
|
|
237
237
|
quadra/modules/ssl/common.py,sha256=nQMsYEu4PUueMq0KNe898h3wGS2RVQBN0NCpYnMyRqI,9898
|
|
238
238
|
quadra/modules/ssl/dino.py,sha256=Xs4wRYvvxeLuHtOW5Gf-xaqAvT97cIuOG6PlYduPDm4,7300
|
|
239
239
|
quadra/modules/ssl/hyperspherical.py,sha256=yEY0WvYFLvKCeKKJDAWCEttYwNVjB5ai6N2FxXKqYQ4,6356
|
|
@@ -248,31 +248,31 @@ quadra/schedulers/__init__.py,sha256=mQivr18c0j36hpV3Lm8nlyBVKFevWp8TtLuTfvI9kQc
|
|
|
248
248
|
quadra/schedulers/base.py,sha256=T1EdrLOJ0i9MzWoLCkrNA0uypm7hJ-L6NFhjIXFB6NE,1462
|
|
249
249
|
quadra/schedulers/warmup.py,sha256=chzzrK7OqqlicBCxiF4CqMYNrWu6nflIbRE-C86Jrw0,4962
|
|
250
250
|
quadra/tasks/__init__.py,sha256=tmAfMoH0k3UC7r2pNrgbBa1Pfc3tpLl3IObFF6Z0eRE,820
|
|
251
|
-
quadra/tasks/anomaly.py,sha256=
|
|
252
|
-
quadra/tasks/base.py,sha256=
|
|
253
|
-
quadra/tasks/classification.py,sha256=
|
|
254
|
-
quadra/tasks/patch.py,sha256=
|
|
255
|
-
quadra/tasks/segmentation.py,sha256=
|
|
256
|
-
quadra/tasks/ssl.py,sha256=
|
|
251
|
+
quadra/tasks/anomaly.py,sha256=RHeiM1vZF1zsva37iYdiGx_HLgdAp8lXnmUzXja69YU,24638
|
|
252
|
+
quadra/tasks/base.py,sha256=piYlTFtvqH-4s4oEq4GczdAs_gL29UHAJGsOC5Sd3Bc,14187
|
|
253
|
+
quadra/tasks/classification.py,sha256=05l3QM3dsU2yTWhXxNAcJ8sZM0Vbfgey-e5EV6p1TX8,52816
|
|
254
|
+
quadra/tasks/patch.py,sha256=nzo8o-ei7iF1Iarvd8-c08s0Rs_lPvVPDLAbkFMx-Qw,20251
|
|
255
|
+
quadra/tasks/segmentation.py,sha256=9Qy-V0Wvoofl7IrfotnSMgBIXcZd-WfZZtetyqmB0FY,16260
|
|
256
|
+
quadra/tasks/ssl.py,sha256=XsaC9hbhvTA5UfHeRaaCstx9mTYacLRmgoCF5Tj9R5M,20547
|
|
257
257
|
quadra/trainers/README.md,sha256=XtpbUOxwvPpOUL7E5s2JHjRgwT-CRKTxsBeUSXrg9BU,248
|
|
258
258
|
quadra/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
259
259
|
quadra/trainers/classification.py,sha256=YeJ0z7Vk0-dsMTcoKBxSdSA0rxtilEcQTp-Zq9Xi1hw,7042
|
|
260
260
|
quadra/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
261
261
|
quadra/utils/anomaly.py,sha256=49vFvT5-4SxczsEM2Akcut_M1DDwKlOVdGv36oLTgR0,4067
|
|
262
|
-
quadra/utils/classification.py,sha256=
|
|
262
|
+
quadra/utils/classification.py,sha256=dKFuv4RywWhvhstOnEOnaf-6qcViUK0dTgah9m9mw2Q,24917
|
|
263
263
|
quadra/utils/deprecation.py,sha256=zF_S-yqenaZxRBOudhXts0mX763WjEUWCnHd09TZnwY,852
|
|
264
|
-
quadra/utils/evaluation.py,sha256=
|
|
265
|
-
quadra/utils/export.py,sha256=
|
|
264
|
+
quadra/utils/evaluation.py,sha256=oooRJPu1AaHhOwvB1Y6SFjQ645OkgrDzKtUvwWq8oq4,19005
|
|
265
|
+
quadra/utils/export.py,sha256=ghNF8mQw-JjZiVeBJ0y8yIQkx8EG8ssPorn3aaIsgcA,20840
|
|
266
266
|
quadra/utils/imaging.py,sha256=Cz7sGb_axEmnGcwQJP2djFZpIpGCPFIBGT8NWVV-OOE,866
|
|
267
267
|
quadra/utils/logger.py,sha256=tQJ4xpTAFKx1g-UUm5K1x7zgoP6qoXpcUHQyu0rOr1w,556
|
|
268
|
-
quadra/utils/mlflow.py,sha256=
|
|
268
|
+
quadra/utils/mlflow.py,sha256=DVso1lxn126hil8i4tTf5WFUPJ8uJNAzNU8OXbXwOzw,3586
|
|
269
269
|
quadra/utils/model_manager.py,sha256=P5JtY95p6giQ6mb4TUnWsNwUh5ClzHBillnG5SA56QY,12546
|
|
270
|
-
quadra/utils/models.py,sha256=
|
|
270
|
+
quadra/utils/models.py,sha256=49AXecNN7mg8uqO-YW0sLbPxbvWfTI4E4NNpTesW6HE,19699
|
|
271
271
|
quadra/utils/patch/__init__.py,sha256=YenDdsI937kyAJiE0dP3_Xua8gHIoFjheoWMnpx_TGU,509
|
|
272
|
-
quadra/utils/patch/dataset.py,sha256=
|
|
273
|
-
quadra/utils/patch/metrics.py,sha256=
|
|
272
|
+
quadra/utils/patch/dataset.py,sha256=tRwrc01p0sj4nLQ-6b9mvnkTQrjtFSv5qMYiTJRSXKU,61401
|
|
273
|
+
quadra/utils/patch/metrics.py,sha256=r7zxGXC2hU6EiMbfNoUmi6BC0EEUZs9Jy_mtI5Q1x5g,17693
|
|
274
274
|
quadra/utils/patch/model.py,sha256=F-wbMZvM8nS_ZSYewg2SofD7H0I6DH1DBA2ACSr0fCY,5746
|
|
275
|
-
quadra/utils/patch/visualization.py,sha256=
|
|
275
|
+
quadra/utils/patch/visualization.py,sha256=V64SsXcQ2UhBVH2gzzrjF_OaxL58ktEo1Jdzcos3AT8,7044
|
|
276
276
|
quadra/utils/resolver.py,sha256=p8t95b__htcR3hdnF9RtlWNKLTVUWYjADozYNj9lIzQ,1397
|
|
277
277
|
quadra/utils/segmentation.py,sha256=rWOE1qw2RS0dpgJyHqfQURw86K6G2Hst6mpu97PI5Ac,920
|
|
278
278
|
quadra/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -280,7 +280,7 @@ quadra/utils/tests/fixtures/__init__.py,sha256=5KHMpKGK31PRWfdaiM5gmcs3gyTcCQQEv
|
|
|
280
280
|
quadra/utils/tests/fixtures/dataset/__init__.py,sha256=hfNtCxu3PPAzqKcKw40BnsIsUHNDlGjYhLuQ5n48deA,1298
|
|
281
281
|
quadra/utils/tests/fixtures/dataset/anomaly.py,sha256=x9X_zafn7Kvjya0_ztCwYRXVEoubND_9yoHxYgn9pkY,3899
|
|
282
282
|
quadra/utils/tests/fixtures/dataset/classification.py,sha256=Mh4t1fTS3thd0hdfh-jWD04WKCIBTeU2x1Ds5pJnZmM,15422
|
|
283
|
-
quadra/utils/tests/fixtures/dataset/imagenette.py,sha256=
|
|
283
|
+
quadra/utils/tests/fixtures/dataset/imagenette.py,sha256=ICwgvYWngEfjaU2JP3riaHHf0FXlOOEyeUtpJ1fYEds,1460
|
|
284
284
|
quadra/utils/tests/fixtures/dataset/segmentation.py,sha256=mSe93hJEpjDdFM69yGoYUzGa-M5T2P8XE4_Z1ZiORuM,5855
|
|
285
285
|
quadra/utils/tests/fixtures/models/__init__.py,sha256=5cxfDtbV-_prBlVu9L2tC34C8QGyng9chUbJYGFv0J0,123
|
|
286
286
|
quadra/utils/tests/fixtures/models/anomaly.py,sha256=J5dG95RlWNds5xqArQY4JlpmqUcLgoESxVC7K5O7ez4,2942
|
|
@@ -288,13 +288,13 @@ quadra/utils/tests/fixtures/models/classification.py,sha256=5qpyOonqK6W2LCUWEHhm
|
|
|
288
288
|
quadra/utils/tests/fixtures/models/segmentation.py,sha256=CTNXeEPcFxFq-YcNfQi5DbbytPZwBQaZn5dQq3L41j0,765
|
|
289
289
|
quadra/utils/tests/helpers.py,sha256=9PJlwozUl_lpQW-Ck-tN7sGFcgeieEd3q56aYuwMIlk,2381
|
|
290
290
|
quadra/utils/tests/models.py,sha256=KbAlv_ukxaUYsyVNUO_dM0NyIosx8RpC0EVyF1HvPkM,507
|
|
291
|
-
quadra/utils/utils.py,sha256=
|
|
292
|
-
quadra/utils/validator.py,sha256=
|
|
293
|
-
quadra/utils/visualization.py,sha256=
|
|
294
|
-
quadra/utils/vit_explainability.py,sha256=
|
|
291
|
+
quadra/utils/utils.py,sha256=3tgj_tFFhKsGNJ9jrmULI9rWxFyhuUe53Y5SBJFkwSM,19124
|
|
292
|
+
quadra/utils/validator.py,sha256=wmVXycB90VNyAbKBUVncFCxK4nsYiOWJIY3ISXwxYCY,4632
|
|
293
|
+
quadra/utils/visualization.py,sha256=yYm7lPziUOlybxigZ2qTycNewb67Q80H4hjQGWUh788,16094
|
|
294
|
+
quadra/utils/vit_explainability.py,sha256=Gh6BHaDEzWxOjJp1aqvCxLt9Rb8TXd5uKXOAx7-acUk,13351
|
|
295
295
|
hydra_plugins/quadra_searchpath_plugin.py,sha256=AAn4TzR87zUK7nwSsK-KoqALiPtfQ8FvX3fgZPTGIJ0,1189
|
|
296
|
-
quadra-2.3.
|
|
297
|
-
quadra-2.3.
|
|
298
|
-
quadra-2.3.
|
|
299
|
-
quadra-2.3.
|
|
300
|
-
quadra-2.3.
|
|
296
|
+
quadra-2.3.2a0.dist-info/LICENSE,sha256=8cTbQtcWa02YJoSpMeV_gxj3jpMTkxvl-w3WJ5gV_QE,11342
|
|
297
|
+
quadra-2.3.2a0.dist-info/METADATA,sha256=S24N5clfdfcMdjk0fX_vAJbINMhzQTgagQ9FOoYkz_U,17600
|
|
298
|
+
quadra-2.3.2a0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
299
|
+
quadra-2.3.2a0.dist-info/entry_points.txt,sha256=sRYonBZyx-sAJeWcQNQoVQIU5lm02cnCQt6b15k0WHU,43
|
|
300
|
+
quadra-2.3.2a0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|