quadra 2.2.5__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. quadra/__init__.py +1 -1
  2. quadra/callbacks/anomalib.py +3 -2
  3. quadra/callbacks/lightning.py +2 -0
  4. quadra/configs/callbacks/all.yaml +13 -0
  5. quadra/configs/callbacks/default.yaml +0 -3
  6. quadra/configs/callbacks/default_anomalib.yaml +0 -3
  7. quadra/datamodules/base.py +5 -5
  8. quadra/datamodules/classification.py +2 -2
  9. quadra/datamodules/segmentation.py +6 -6
  10. quadra/datasets/anomaly.py +2 -2
  11. quadra/datasets/classification.py +7 -7
  12. quadra/datasets/patch.py +1 -1
  13. quadra/metrics/segmentation.py +1 -1
  14. quadra/models/base.py +1 -1
  15. quadra/models/evaluation.py +1 -1
  16. quadra/modules/base.py +3 -2
  17. quadra/modules/ssl/byol.py +1 -0
  18. quadra/tasks/anomaly.py +7 -4
  19. quadra/tasks/base.py +8 -4
  20. quadra/tasks/classification.py +6 -2
  21. quadra/tasks/patch.py +1 -1
  22. quadra/tasks/segmentation.py +7 -5
  23. quadra/tasks/ssl.py +2 -3
  24. quadra/utils/classification.py +8 -10
  25. quadra/utils/evaluation.py +12 -3
  26. quadra/utils/export.py +4 -4
  27. quadra/utils/mlflow.py +2 -0
  28. quadra/utils/models.py +5 -7
  29. quadra/utils/patch/dataset.py +7 -6
  30. quadra/utils/patch/metrics.py +9 -6
  31. quadra/utils/patch/visualization.py +2 -2
  32. quadra/utils/utils.py +1 -1
  33. quadra/utils/validator.py +1 -3
  34. quadra/utils/visualization.py +8 -5
  35. quadra/utils/vit_explainability.py +1 -1
  36. {quadra-2.2.5.dist-info → quadra-2.3.0.dist-info}/METADATA +9 -9
  37. {quadra-2.2.5.dist-info → quadra-2.3.0.dist-info}/RECORD +40 -40
  38. {quadra-2.2.5.dist-info → quadra-2.3.0.dist-info}/LICENSE +0 -0
  39. {quadra-2.2.5.dist-info → quadra-2.3.0.dist-info}/WHEEL +0 -0
  40. {quadra-2.2.5.dist-info → quadra-2.3.0.dist-info}/entry_points.txt +0 -0
quadra/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2.2.5"
1
+ __version__ = "2.3.0"
2
2
 
3
3
 
4
4
  def get_version():
@@ -64,7 +64,7 @@ class Visualizer:
64
64
  self.figure.subplots_adjust(right=0.9)
65
65
 
66
66
  axes = self.axis if len(self.images) > 1 else [self.axis]
67
- for axis, image_dict in zip(axes, self.images):
67
+ for axis, image_dict in zip(axes, self.images, strict=False):
68
68
  axis.axes.xaxis.set_visible(False)
69
69
  axis.axes.yaxis.set_visible(False)
70
70
  axis.imshow(image_dict["image"], image_dict["color_map"], vmin=0, vmax=255)
@@ -201,6 +201,7 @@ class VisualizerCallback(Callback):
201
201
  outputs["label"],
202
202
  outputs["pred_labels"],
203
203
  outputs["pred_scores"],
204
+ strict=False,
204
205
  )
205
206
  ):
206
207
  denormalized_image = Denormalize()(image.cpu())
@@ -256,7 +257,7 @@ class VisualizerCallback(Callback):
256
257
  visualizer.close()
257
258
 
258
259
  if self.plot_raw_outputs:
259
- for raw_output, raw_name in zip([heatmap, vis_img], ["heatmap", "segmentation"]):
260
+ for raw_output, raw_name in zip([heatmap, vis_img], ["heatmap", "segmentation"], strict=False):
260
261
  current_raw_output = raw_output
261
262
  if raw_name == "segmentation":
262
263
  current_raw_output = (raw_output * 255).astype(np.uint8)
@@ -79,6 +79,8 @@ def _scale_batch_size(
79
79
  new_size = _run_power_scaling(trainer, init_val, batch_arg_name, max_trials, params)
80
80
  elif mode == "binsearch":
81
81
  new_size = _run_binary_scaling(trainer, init_val, batch_arg_name, max_trials, params)
82
+ else:
83
+ raise ValueError(f"Unknown mode {mode}")
82
84
 
83
85
  garbage_collection_cuda()
84
86
 
@@ -30,3 +30,16 @@ progress_bar:
30
30
  lightning_trainer_setup:
31
31
  _target_: quadra.callbacks.lightning.LightningTrainerBaseSetup
32
32
  log_every_n_steps: 1
33
+
34
+ batch_size_finder:
35
+ _target_: quadra.callbacks.lightning.BatchSizeFinder
36
+ mode: power
37
+ steps_per_trial: 3
38
+ init_val: 2
39
+ max_trials: 5 # Max 64
40
+ batch_arg_name: batch_size
41
+ disable: false
42
+ find_train_batch_size: true
43
+ find_validation_batch_size: false
44
+ find_test_batch_size: false
45
+ find_predict_batch_size: false
@@ -9,9 +9,6 @@ model_checkpoint:
9
9
  filename: "epoch_{epoch:03d}"
10
10
  auto_insert_metric_name: False
11
11
 
12
- log_gradients:
13
- _target_: quadra.callbacks.mlflow.LogGradients
14
- norm: 2
15
12
  lr_monitor:
16
13
  _target_: pytorch_lightning.callbacks.LearningRateMonitor
17
14
  logging_interval: "epoch"
@@ -44,9 +44,6 @@ upload_ckpts_as_artifact:
44
44
  upload_best_only: true
45
45
  delete_after_upload: true
46
46
  upload: false
47
- log_gradients:
48
- _target_: quadra.callbacks.mlflow.LogGradients
49
- norm: 2
50
47
  lr_monitor:
51
48
  _target_: pytorch_lightning.callbacks.LearningRateMonitor
52
49
  logging_interval: "epoch"
@@ -7,7 +7,7 @@ import pickle as pkl
7
7
  import typing
8
8
  from collections.abc import Callable, Iterable, Sequence
9
9
  from functools import wraps
10
- from typing import Any, Literal, Union, cast
10
+ from typing import Any, Literal, cast
11
11
 
12
12
  import albumentations
13
13
  import numpy as np
@@ -20,8 +20,8 @@ from tqdm import tqdm
20
20
  from quadra.utils import utils
21
21
 
22
22
  log = utils.get_logger(__name__)
23
- TrainDataset = Union[torch.utils.data.Dataset, Sequence[torch.utils.data.Dataset]]
24
- ValDataset = Union[torch.utils.data.Dataset, Sequence[torch.utils.data.Dataset]]
23
+ TrainDataset = torch.utils.data.Dataset | Sequence[torch.utils.data.Dataset]
24
+ ValDataset = torch.utils.data.Dataset | Sequence[torch.utils.data.Dataset]
25
25
  TestDataset = torch.utils.data.Dataset
26
26
 
27
27
 
@@ -260,7 +260,7 @@ class BaseDataModule(LightningDataModule, metaclass=DecorateParentMethod):
260
260
  return
261
261
 
262
262
  # TODO: We need to find a way to annotate the columns of data.
263
- paths_and_hash_length = zip(self.data["samples"], [self.hash_size] * len(self.data))
263
+ paths_and_hash_length = zip(self.data["samples"], [self.hash_size] * len(self.data), strict=False)
264
264
 
265
265
  with mp.Pool(min(8, mp.cpu_count() - 1)) as pool:
266
266
  self.data["hash"] = list(
@@ -355,7 +355,7 @@ class BaseDataModule(LightningDataModule, metaclass=DecorateParentMethod):
355
355
  raise ValueError("`n_aug_to_take` is not set. Cannot load augmented samples.")
356
356
  aug_samples = []
357
357
  aug_labels = []
358
- for sample, label in zip(samples, targets):
358
+ for sample, label in zip(samples, targets, strict=False):
359
359
  aug_samples.append(sample)
360
360
  aug_labels.append(label)
361
361
  final_sample = sample
@@ -243,7 +243,7 @@ class ClassificationDataModule(BaseDataModule):
243
243
  samples_test, targets_test = self._read_split(self.test_split_file)
244
244
  if not self.train_split_file:
245
245
  samples_train, targets_train = [], []
246
- for sample, target in zip(all_samples, all_targets):
246
+ for sample, target in zip(all_samples, all_targets, strict=False):
247
247
  if sample not in samples_test:
248
248
  samples_train.append(sample)
249
249
  targets_train.append(target)
@@ -251,7 +251,7 @@ class ClassificationDataModule(BaseDataModule):
251
251
  samples_train, targets_train = self._read_split(self.train_split_file)
252
252
  if not self.test_split_file:
253
253
  samples_test, targets_test = [], []
254
- for sample, target in zip(all_samples, all_targets):
254
+ for sample, target in zip(all_samples, all_targets, strict=False):
255
255
  if sample not in samples_train:
256
256
  samples_test.append(sample)
257
257
  targets_test.append(target)
@@ -187,7 +187,7 @@ class SegmentationDataModule(BaseDataModule):
187
187
  samples_test, targets_test, masks_test = self._read_split(self.test_split_file)
188
188
  if not self.train_split_file:
189
189
  samples_train, targets_train, masks_train = [], [], []
190
- for sample, target, mask in zip(all_samples, all_targets, all_masks):
190
+ for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
191
191
  if sample not in samples_test:
192
192
  samples_train.append(sample)
193
193
  targets_train.append(target)
@@ -197,7 +197,7 @@ class SegmentationDataModule(BaseDataModule):
197
197
  samples_train, targets_train, masks_train = self._read_split(self.train_split_file)
198
198
  if not self.test_split_file:
199
199
  samples_test, targets_test, masks_test = [], [], []
200
- for sample, target, mask in zip(all_samples, all_targets, all_masks):
200
+ for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
201
201
  if sample not in samples_train:
202
202
  samples_test.append(sample)
203
203
  targets_test.append(target)
@@ -549,7 +549,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
549
549
  samples_and_masks_test,
550
550
  targets_test,
551
551
  ) = iterative_train_test_split(
552
- np.expand_dims(np.array(list(zip(all_samples, all_masks))), 1),
552
+ np.expand_dims(np.array(list(zip(all_samples, all_masks, strict=False))), 1),
553
553
  np.array(all_targets),
554
554
  test_size=self.test_size,
555
555
  )
@@ -561,7 +561,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
561
561
  samples_test, targets_test, masks_test = self._read_split(self.test_split_file)
562
562
  if not self.train_split_file:
563
563
  samples_train, targets_train, masks_train = [], [], []
564
- for sample, target, mask in zip(all_samples, all_targets, all_masks):
564
+ for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
565
565
  if sample not in samples_test:
566
566
  samples_train.append(sample)
567
567
  targets_train.append(target)
@@ -571,7 +571,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
571
571
  samples_train, targets_train, masks_train = self._read_split(self.train_split_file)
572
572
  if not self.test_split_file:
573
573
  samples_test, targets_test, masks_test = [], [], []
574
- for sample, target, mask in zip(all_samples, all_targets, all_masks):
574
+ for sample, target, mask in zip(all_samples, all_targets, all_masks, strict=False):
575
575
  if sample not in samples_train:
576
576
  samples_test.append(sample)
577
577
  targets_test.append(target)
@@ -583,7 +583,7 @@ class SegmentationMulticlassDataModule(BaseDataModule):
583
583
  raise ValueError("Validation split file is specified but no train or test split file is specified.")
584
584
  else:
585
585
  samples_and_masks_train, targets_train, samples_and_masks_val, targets_val = iterative_train_test_split(
586
- np.expand_dims(np.array(list(zip(samples_train, masks_train))), 1),
586
+ np.expand_dims(np.array(list(zip(samples_train, masks_train, strict=False))), 1),
587
587
  np.array(targets_train),
588
588
  test_size=self.val_size,
589
589
  )
@@ -220,7 +220,7 @@ class AnomalyDataset(Dataset):
220
220
  if not os.path.exists(valid_area_mask):
221
221
  raise RuntimeError(f"Valid area mask {valid_area_mask} does not exist.")
222
222
 
223
- self.valid_area_mask = cv2.imread(valid_area_mask, 0) > 0 # type: ignore[operator]
223
+ self.valid_area_mask = cv2.imread(valid_area_mask, 0) > 0
224
224
 
225
225
  def __len__(self) -> int:
226
226
  """Get length of the dataset."""
@@ -265,7 +265,7 @@ class AnomalyDataset(Dataset):
265
265
  if label_index == 0:
266
266
  mask = np.zeros(shape=original_image_shape[:2])
267
267
  elif os.path.isfile(mask_path):
268
- mask = cv2.imread(mask_path, flags=0) / 255.0 # type: ignore[operator]
268
+ mask = cv2.imread(mask_path, flags=0) / 255.0
269
269
  else:
270
270
  # We need ones in the mask to compute correctly at least image level f1 score
271
271
  mask = np.ones(shape=original_image_shape[:2])
@@ -50,9 +50,9 @@ class ImageClassificationListDataset(Dataset):
50
50
  allow_missing_label: bool | None = False,
51
51
  ):
52
52
  super().__init__()
53
- assert len(samples) == len(
54
- targets
55
- ), f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
53
+ assert len(samples) == len(targets), (
54
+ f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
55
+ )
56
56
  # Setting the ROI
57
57
  self.roi = roi
58
58
 
@@ -201,9 +201,9 @@ class MultilabelClassificationDataset(torch.utils.data.Dataset):
201
201
  rgb: bool = True,
202
202
  ):
203
203
  super().__init__()
204
- assert len(samples) == len(
205
- targets
206
- ), f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
204
+ assert len(samples) == len(targets), (
205
+ f"Samples ({len(samples)}) and targets ({len(targets)}) must have the same length"
206
+ )
207
207
 
208
208
  # Data
209
209
  self.x = samples
@@ -215,7 +215,7 @@ class MultilabelClassificationDataset(torch.utils.data.Dataset):
215
215
  class_to_idx = {c: i for i, c in enumerate(range(unique_targets))}
216
216
  self.class_to_idx = class_to_idx
217
217
  self.idx_to_class = {v: k for k, v in class_to_idx.items()}
218
- self.samples = list(zip(self.x, self.y))
218
+ self.samples = list(zip(self.x, self.y, strict=False))
219
219
  self.rgb = rgb
220
220
  self.transform = transform
221
221
 
quadra/datasets/patch.py CHANGED
@@ -58,7 +58,7 @@ class PatchSklearnClassificationTrainDataset(Dataset):
58
58
 
59
59
  cls, counts = np.unique(targets_array, return_counts=True)
60
60
  max_count = np.max(counts)
61
- for cl, count in zip(cls, counts):
61
+ for cl, count in zip(cls, counts, strict=False):
62
62
  idx_to_pick = list(np.where(targets_array == cl)[0])
63
63
 
64
64
  if count < max_count:
@@ -171,7 +171,7 @@ def segmentation_props(
171
171
  # Add dummy Dices so LSA is unique and i can compute FP and FN
172
172
  dice_mat = _pad_to_shape(dice_mat, (max_dim, max_dim), 1)
173
173
  lsa = linear_sum_assignment(dice_mat, maximize=False)
174
- for row, col in zip(lsa[0], lsa[1]):
174
+ for row, col in zip(lsa[0], lsa[1], strict=False):
175
175
  # More preds than GTs --> False Positive
176
176
  if row < n_labels_pred and col >= n_labels_mask:
177
177
  min_row = pred_bbox[row][0]
quadra/models/base.py CHANGED
@@ -76,7 +76,7 @@ class ModelSignatureWrapper(nn.Module):
76
76
 
77
77
  if isinstance(self.instance.forward, torch.ScriptMethod):
78
78
  # Handle torchscript backbones
79
- for i, argument in enumerate(self.instance.forward.schema.arguments):
79
+ for i, argument in enumerate(self.instance.forward.schema.arguments): # type: ignore[attr-defined]
80
80
  if i < (len(args) + 1): # +1 for self
81
81
  continue
82
82
 
@@ -209,7 +209,7 @@ class ONNXEvaluationModel(BaseEvaluationModel):
209
209
 
210
210
  onnx_inputs: dict[str, np.ndarray | torch.Tensor] = {}
211
211
 
212
- for onnx_input, current_input in zip(self.model.get_inputs(), inputs):
212
+ for onnx_input, current_input in zip(self.model.get_inputs(), inputs, strict=False):
213
213
  if isinstance(current_input, torch.Tensor):
214
214
  onnx_inputs[onnx_input.name] = current_input
215
215
  use_pytorch = True
quadra/modules/base.py CHANGED
@@ -7,6 +7,7 @@ import pytorch_lightning as pl
7
7
  import sklearn
8
8
  import torch
9
9
  import torchmetrics
10
+ from pytorch_lightning.utilities.types import OptimizerLRScheduler
10
11
  from sklearn.linear_model import LogisticRegression
11
12
  from torch import nn
12
13
  from torch.optim import Optimizer
@@ -48,7 +49,7 @@ class BaseLightningModule(pl.LightningModule):
48
49
  """
49
50
  return self.model(x)
50
51
 
51
- def configure_optimizers(self) -> tuple[list[Any], list[dict[str, Any]]]:
52
+ def configure_optimizers(self) -> OptimizerLRScheduler:
52
53
  """Get default optimizer if not passed a value.
53
54
 
54
55
  Returns:
@@ -68,7 +69,7 @@ class BaseLightningModule(pl.LightningModule):
68
69
  "monitor": "val_loss",
69
70
  "strict": False,
70
71
  }
71
- return [self.optimizer], [lr_scheduler_conf]
72
+ return [self.optimizer], [lr_scheduler_conf] # type: ignore[return-value]
72
73
 
73
74
  # pylint: disable=unused-argument
74
75
  def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx: int = 0):
@@ -110,6 +110,7 @@ class BYOL(SSLModule):
110
110
  for student_ps, teacher_ps in zip(
111
111
  list(self.model.parameters()) + list(self.student_projection_mlp.parameters()),
112
112
  list(self.teacher.parameters()) + list(self.teacher_projection_mlp.parameters()),
113
+ strict=False,
113
114
  ):
114
115
  teacher_ps.data = teacher_ps.data * teacher_momentum + (1 - teacher_momentum) * student_ps.data
115
116
 
quadra/tasks/anomaly.py CHANGED
@@ -161,7 +161,7 @@ class AnomalibDetection(Generic[AnomalyDataModuleT], LightningTask[AnomalyDataMo
161
161
  all_output_flatten: dict[str, torch.Tensor | list] = {}
162
162
 
163
163
  for key in all_output[0]:
164
- if type(all_output[0][key]) == torch.Tensor:
164
+ if isinstance(all_output[0][key], torch.Tensor):
165
165
  tensor_gatherer = torch.cat([x[key] for x in all_output])
166
166
  all_output_flatten[key] = tensor_gatherer
167
167
  else:
@@ -205,13 +205,15 @@ class AnomalibDetection(Generic[AnomalyDataModuleT], LightningTask[AnomalyDataMo
205
205
  class_to_idx.pop("false_defect")
206
206
 
207
207
  anomaly_scores = all_output_flatten["pred_scores"]
208
+
209
+ exportable_anomaly_scores: list[Any] | np.ndarray
208
210
  if isinstance(anomaly_scores, torch.Tensor):
209
211
  exportable_anomaly_scores = anomaly_scores.cpu().numpy()
210
212
  else:
211
213
  exportable_anomaly_scores = anomaly_scores
212
214
 
213
215
  # Zip the lists together to create rows for the CSV file
214
- rows = zip(image_paths, pred_labels, gt_labels, exportable_anomaly_scores)
216
+ rows = zip(image_paths, pred_labels, gt_labels, exportable_anomaly_scores, strict=False)
215
217
  # Specify the CSV file name
216
218
  csv_file = "test_predictions.csv"
217
219
  # Write the data to the CSV file
@@ -483,7 +485,7 @@ class AnomalibEvaluation(Evaluation[AnomalyDataModule]):
483
485
 
484
486
  if hasattr(self.datamodule, "valid_area_mask") and self.datamodule.valid_area_mask is not None:
485
487
  mask_area = cv2.imread(self.datamodule.valid_area_mask, 0)
486
- mask_area = (mask_area > 0).astype(np.uint8) # type: ignore[operator]
488
+ mask_area = (mask_area > 0).astype(np.uint8)
487
489
 
488
490
  if hasattr(self.datamodule, "crop_area") and self.datamodule.crop_area is not None:
489
491
  crop_area = self.datamodule.crop_area
@@ -499,12 +501,13 @@ class AnomalibEvaluation(Evaluation[AnomalyDataModule]):
499
501
  self.metadata["image_labels"],
500
502
  anomaly_scores,
501
503
  anomaly_maps,
504
+ strict=False,
502
505
  ),
503
506
  total=len(self.metadata["image_paths"]),
504
507
  ):
505
508
  img = cv2.imread(img_path, 0)
506
509
  if mask_area is not None:
507
- img = img * mask_area # type: ignore[operator]
510
+ img = img * mask_area
508
511
 
509
512
  if crop_area is not None:
510
513
  img = img[crop_area[1] : crop_area[3], crop_area[0] : crop_area[2]]
quadra/tasks/base.py CHANGED
@@ -382,15 +382,19 @@ class Evaluation(Generic[DataModuleT], Task[DataModuleT]):
382
382
  # We assume that each input size has the same height and width
383
383
  if input_size[1] != self.config.transforms.input_height:
384
384
  log.warning(
385
- f"Input height of the model ({input_size[1]}) is different from the one specified "
386
- + f"in the config ({self.config.transforms.input_height}). Fixing the config."
385
+ "Input height of the model (%s) is different from the one specified "
386
+ + "in the config (%s). Fixing the config.",
387
+ input_size[1],
388
+ self.config.transforms.input_height,
387
389
  )
388
390
  self.config.transforms.input_height = input_size[1]
389
391
 
390
392
  if input_size[2] != self.config.transforms.input_width:
391
393
  log.warning(
392
- f"Input width of the model ({input_size[2]}) is different from the one specified "
393
- + f"in the config ({self.config.transforms.input_width}). Fixing the config."
394
+ "Input width of the model (%s) is different from the one specified "
395
+ + "in the config (%s). Fixing the config.",
396
+ input_size[2],
397
+ self.config.transforms.input_width,
394
398
  )
395
399
  self.config.transforms.input_width = input_size[2]
396
400
 
@@ -623,7 +623,9 @@ class SklearnClassification(Generic[SklearnClassificationDataModuleT], Task[Skle
623
623
  all_labels = all_labels[sorted_indices]
624
624
 
625
625
  # cycle over all train/test split
626
- for train_dataloader, test_dataloader in zip(self.train_dataloader_list, self.test_dataloader_list):
626
+ for train_dataloader, test_dataloader in zip(
627
+ self.train_dataloader_list, self.test_dataloader_list, strict=False
628
+ ):
627
629
  # Reinit classifier
628
630
  self.model = self.config.model
629
631
  self.trainer.change_classifier(self.model)
@@ -685,7 +687,7 @@ class SklearnClassification(Generic[SklearnClassificationDataModuleT], Task[Skle
685
687
  dl: PyTorch dataloader
686
688
  feature_extractor: PyTorch backbone
687
689
  """
688
- if isinstance(feature_extractor, (TorchEvaluationModel, TorchscriptEvaluationModel)):
690
+ if isinstance(feature_extractor, TorchEvaluationModel | TorchscriptEvaluationModel):
689
691
  # TODO: I'm not sure torchinfo supports torchscript models
690
692
  # If we are working with torch based evaluation models we need to extract the model
691
693
  feature_extractor = feature_extractor.model
@@ -1202,6 +1204,8 @@ class ClassificationEvaluation(Evaluation[ClassificationDataModuleT]):
1202
1204
  probabilities = [max(item) for sublist in probabilities for item in sublist]
1203
1205
  if self.datamodule.class_to_idx is not None:
1204
1206
  idx_to_class = {v: k for k, v in self.datamodule.class_to_idx.items()}
1207
+ else:
1208
+ idx_to_class = None
1205
1209
 
1206
1210
  _, pd_cm, test_accuracy = get_results(
1207
1211
  test_labels=image_labels,
quadra/tasks/patch.py CHANGED
@@ -301,7 +301,7 @@ class PatchSklearnTestClassification(Evaluation[PatchSklearnClassificationDataMo
301
301
  "test_results": None,
302
302
  "test_labels": None,
303
303
  }
304
- self.class_to_skip: list[str] = []
304
+ self.class_to_skip: list[str] | None = []
305
305
  self.reconstruction_results: dict[str, Any]
306
306
  self.return_polygon: bool = True
307
307
 
@@ -92,8 +92,10 @@ class Segmentation(Generic[SegmentationDataModuleT], LightningTask[SegmentationD
92
92
  len(self.datamodule.idx_to_class) + 1
93
93
  ):
94
94
  log.warning(
95
- f"Number of classes in the model ({module_config.model.num_classes}) does not match the number of "
96
- + f"classes in the datamodule ({len(self.datamodule.idx_to_class)}). Updating the model..."
95
+ "Number of classes in the model (%s) does not match the number of "
96
+ + "classes in the datamodule (%d). Updating the model...",
97
+ module_config.model.num_classes,
98
+ len(self.datamodule.idx_to_class),
97
99
  )
98
100
  module_config.model.num_classes = len(self.datamodule.idx_to_class) + 1
99
101
 
@@ -341,7 +343,7 @@ class SegmentationAnalysisEvaluation(SegmentationEvaluation):
341
343
  if self.datamodule.test_dataset_available:
342
344
  stages.append("test")
343
345
  dataloaders.append(self.datamodule.test_dataloader())
344
- for stage, dataloader in zip(stages, dataloaders):
346
+ for stage, dataloader in zip(stages, dataloaders, strict=False):
345
347
  log.info("Running inference on %s set with batch size: %d", stage, dataloader.batch_size)
346
348
  image_list, mask_list, mask_pred_list, label_list = [], [], [], []
347
349
  for batch in dataloader:
@@ -369,10 +371,10 @@ class SegmentationAnalysisEvaluation(SegmentationEvaluation):
369
371
 
370
372
  for stage, output in self.test_output.items():
371
373
  image_mean = OmegaConf.to_container(self.config.transforms.mean)
372
- if not isinstance(image_mean, list) or any(not isinstance(x, (int, float)) for x in image_mean):
374
+ if not isinstance(image_mean, list) or any(not isinstance(x, int | float) for x in image_mean):
373
375
  raise ValueError("Image mean is not a list of float or integer values, please check your config")
374
376
  image_std = OmegaConf.to_container(self.config.transforms.std)
375
- if not isinstance(image_std, list) or any(not isinstance(x, (int, float)) for x in image_std):
377
+ if not isinstance(image_std, list) or any(not isinstance(x, int | float) for x in image_std):
376
378
  raise ValueError("Image std is not a list of float or integer values, please check your config")
377
379
  reports = create_mask_report(
378
380
  stage=stage,
quadra/tasks/ssl.py CHANGED
@@ -468,8 +468,7 @@ class EmbeddingVisualization(Task):
468
468
  self.report_folder = report_folder
469
469
  if self.model_path is None:
470
470
  raise ValueError(
471
- "Model path cannot be found!, please specify it in the config or pass it as an argument for"
472
- " evaluation"
471
+ "Model path cannot be found!, please specify it in the config or pass it as an argument for evaluation"
473
472
  )
474
473
  self.embeddings_path = os.path.join(self.model_path, self.report_folder)
475
474
  if not os.path.exists(self.embeddings_path):
@@ -547,7 +546,7 @@ class EmbeddingVisualization(Task):
547
546
  im = interpolate(im, self.embedding_image_size)
548
547
 
549
548
  images.append(im.cpu())
550
- metadata.extend(zip(targets, class_names, file_paths))
549
+ metadata.extend(zip(targets, class_names, file_paths, strict=False))
551
550
  counter += len(im)
552
551
  images = torch.cat(images, dim=0)
553
552
  embeddings = torch.cat(embeddings, dim=0)
@@ -46,12 +46,10 @@ def get_file_condition(
46
46
  if any(fil in root for fil in exclude_filter):
47
47
  return False
48
48
 
49
- if include_filter is not None and (
50
- not any(fil in file_name for fil in include_filter) and not any(fil in root for fil in include_filter)
51
- ):
52
- return False
53
-
54
- return True
49
+ return not (
50
+ include_filter is not None
51
+ and (not any(fil in file_name for fil in include_filter) and not any(fil in root for fil in include_filter))
52
+ )
55
53
 
56
54
 
57
55
  def natural_key(string_):
@@ -130,7 +128,7 @@ def find_images_and_targets(
130
128
  sorted_labels = sorted(unique_labels, key=natural_key)
131
129
  class_to_idx = {str(c): idx for idx, c in enumerate(sorted_labels)}
132
130
 
133
- images_and_targets = [(f, l) for f, l in zip(filenames, labels) if l in class_to_idx]
131
+ images_and_targets = [(f, l) for f, l in zip(filenames, labels, strict=False) if l in class_to_idx]
134
132
 
135
133
  if sort:
136
134
  images_and_targets = sorted(images_and_targets, key=lambda k: natural_key(k[0]))
@@ -210,7 +208,7 @@ def find_test_image(
210
208
  file_samples.append(sample_path)
211
209
 
212
210
  test_split = [os.path.join(folder, sample.strip()) for sample in file_samples]
213
- labels = [t for s, t in zip(filenames, labels) if s in file_samples]
211
+ labels = [t for s, t in zip(filenames, labels, strict=False) if s in file_samples]
214
212
  filenames = [s for s in filenames if s in file_samples]
215
213
  log.info("Selected %d images using test_split_file for the test", len(filenames))
216
214
  if len(filenames) != len(file_samples):
@@ -353,7 +351,7 @@ def get_split(
353
351
 
354
352
  cl, counts = np.unique(targets, return_counts=True)
355
353
 
356
- for num, _cl in zip(counts, cl):
354
+ for num, _cl in zip(counts, cl, strict=False):
357
355
  if num == 1:
358
356
  to_remove = np.where(np.array(targets) == _cl)[0][0]
359
357
  samples = np.delete(np.array(samples), to_remove)
@@ -378,7 +376,7 @@ def get_split(
378
376
  file_samples.append(sample_path)
379
377
 
380
378
  train_split = [os.path.join(image_dir, sample.strip()) for sample in file_samples]
381
- targets = np.array([t for s, t in zip(samples, targets) if s in file_samples])
379
+ targets = np.array([t for s, t in zip(samples, targets, strict=False) if s in file_samples])
382
380
  samples = np.array([s for s in samples if s in file_samples])
383
381
 
384
382
  if limit_training_data is not None:
@@ -4,6 +4,7 @@ import os
4
4
  from ast import literal_eval
5
5
  from collections.abc import Callable
6
6
  from functools import wraps
7
+ from typing import Any
7
8
 
8
9
  import matplotlib.pyplot as plt
9
10
  import numpy as np
@@ -123,7 +124,7 @@ def calculate_mask_based_metrics(
123
124
  th_thresh_preds = (th_preds > threshold).float().cpu()
124
125
  thresh_preds = th_thresh_preds.squeeze(0).numpy()
125
126
  dice_scores = metric(th_thresh_preds, th_masks, reduction=None).numpy()
126
- result = {}
127
+ result: dict[str, Any] = {}
127
128
  if multilabel:
128
129
  if n_classes is None:
129
130
  raise ValueError("n_classes arg shouldn't be None when multilabel is True")
@@ -167,7 +168,7 @@ def calculate_mask_based_metrics(
167
168
  "Accuracy": [],
168
169
  }
169
170
  for idx, (image, pred, mask, thresh_pred, dice_score) in enumerate(
170
- zip(images, preds, masks, thresh_preds, dice_scores)
171
+ zip(images, preds, masks, thresh_preds, dice_scores, strict=False)
171
172
  ):
172
173
  if np.sum(mask) == 0:
173
174
  good_dice.append(dice_score)
@@ -261,6 +262,7 @@ def create_mask_report(
261
262
  th_labels = output["label"]
262
263
  n_classes = th_preds.shape[1]
263
264
  # TODO: Apply sigmoid is a wrong name now
265
+ # TODO: Apply sigmoid false is untested
264
266
  if apply_sigmoid:
265
267
  if n_classes == 1:
266
268
  th_preds = torch.nn.Sigmoid()(th_preds)
@@ -271,6 +273,13 @@ def create_mask_report(
271
273
  # Compute labels from the given masks since by default they are all 0
272
274
  th_labels = th_masks.max(dim=2)[0].max(dim=2)[0].squeeze(dim=1)
273
275
  show_orj_predictions = False
276
+ elif n_classes == 1:
277
+ th_thresh_preds = (th_preds > threshold).float()
278
+ else:
279
+ th_thresh_preds = torch.argmax(th_preds, dim=1).float().unsqueeze(1)
280
+ # Compute labels from the given masks since by default they are all 0
281
+ th_labels = th_masks.max(dim=2)[0].max(dim=2)[0].squeeze(dim=1)
282
+ show_orj_predictions = False
274
283
 
275
284
  mean = np.asarray(mean)
276
285
  std = np.asarray(std)
@@ -303,7 +312,7 @@ def create_mask_report(
303
312
  non_zero_score_idx = sorted_idx[~binary_labels]
304
313
  zero_score_idx = sorted_idx[binary_labels]
305
314
  file_paths = []
306
- for name, current_score_idx in zip(["good", "bad"], [zero_score_idx, non_zero_score_idx]):
315
+ for name, current_score_idx in zip(["good", "bad"], [zero_score_idx, non_zero_score_idx], strict=False):
307
316
  if len(current_score_idx) == 0:
308
317
  continue
309
318
 
quadra/utils/export.py CHANGED
@@ -45,11 +45,11 @@ def generate_torch_inputs(
45
45
  """
46
46
  inp = None
47
47
 
48
- if isinstance(input_shapes, (ListConfig, DictConfig)):
48
+ if isinstance(input_shapes, ListConfig | DictConfig):
49
49
  input_shapes = OmegaConf.to_container(input_shapes)
50
50
 
51
51
  if isinstance(input_shapes, list):
52
- if any(isinstance(inp, (Sequence, dict)) for inp in input_shapes):
52
+ if any(isinstance(inp, Sequence | dict) for inp in input_shapes):
53
53
  return [generate_torch_inputs(inp, device, half_precision, dtype) for inp in input_shapes]
54
54
 
55
55
  # Base case
@@ -59,7 +59,7 @@ def generate_torch_inputs(
59
59
  return {k: generate_torch_inputs(v, device, half_precision, dtype) for k, v in input_shapes.items()}
60
60
 
61
61
  if isinstance(input_shapes, tuple):
62
- if any(isinstance(inp, (Sequence, dict)) for inp in input_shapes):
62
+ if any(isinstance(inp, Sequence | dict) for inp in input_shapes):
63
63
  # The tuple contains a list, tuple or dict
64
64
  return tuple(generate_torch_inputs(inp, device, half_precision, dtype) for inp in input_shapes)
65
65
 
@@ -381,7 +381,7 @@ def _safe_export_half_precision_onnx(
381
381
  with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
382
382
  # This function prints a lot of information that is not useful for the user
383
383
  model_fp16 = auto_convert_mixed_precision(
384
- model_fp32, test_data, rtol=0.01, atol=0.001, keep_io_types=False
384
+ model_fp32, test_data, rtol=0.01, atol=5e-3, keep_io_types=False
385
385
  )
386
386
  onnx.save(model_fp16, export_model_path)
387
387
 
quadra/utils/mlflow.py CHANGED
@@ -11,6 +11,7 @@ except ImportError:
11
11
  from collections.abc import Sequence
12
12
  from typing import Any
13
13
 
14
+ import numpy as np
14
15
  import torch
15
16
  from pytorch_lightning import Trainer
16
17
  from pytorch_lightning.loggers import MLFlowLogger
@@ -45,6 +46,7 @@ def infer_signature_input(input_tensor: Any) -> Any:
45
46
  Raises:
46
47
  ValueError: If the input type is not supported or when nested dicts or sequences are encountered.
47
48
  """
49
+ signature: dict[str, Any] | np.ndarray
48
50
  if isinstance(input_tensor, Sequence):
49
51
  # Mlflow currently does not support sequence outputs, so we use a dict instead
50
52
  signature = {}
quadra/utils/models.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import math
4
4
  import warnings
5
5
  from collections.abc import Callable
6
- from typing import Union, cast
6
+ from typing import cast
7
7
 
8
8
  import numpy as np
9
9
  import timm
@@ -114,7 +114,7 @@ def get_feature(
114
114
  labels: input_labels
115
115
  grayscale_cams: Gradcam output maps, None if gradcam arg is False
116
116
  """
117
- if isinstance(feature_extractor, (TorchEvaluationModel, TorchscriptEvaluationModel)):
117
+ if isinstance(feature_extractor, TorchEvaluationModel | TorchscriptEvaluationModel):
118
118
  # If we are working with torch based evaluation models we need to extract the model
119
119
  feature_extractor = feature_extractor.model
120
120
  elif isinstance(feature_extractor, ONNXEvaluationModel):
@@ -160,9 +160,7 @@ def get_feature(
160
160
  x1 = x1.to(feature_extractor.device).to(feature_extractor.model_dtype)
161
161
 
162
162
  if gradcam:
163
- y_hat = cast(
164
- Union[list[torch.Tensor], tuple[torch.Tensor], torch.Tensor], feature_extractor(x1).detach()
165
- )
163
+ y_hat = cast(list[torch.Tensor] | tuple[torch.Tensor] | torch.Tensor, feature_extractor(x1).detach())
166
164
  # mypy can't detect that gradcam is true only if we have a features_extractor
167
165
  if is_vision_transformer(feature_extractor.features_extractor): # type: ignore[union-attr]
168
166
  grayscale_cam_low_res = grad_rollout(
@@ -177,10 +175,10 @@ def get_feature(
177
175
  feature_extractor.zero_grad(set_to_none=True) # type: ignore[union-attr]
178
176
  else:
179
177
  with torch.no_grad():
180
- y_hat = cast(Union[list[torch.Tensor], tuple[torch.Tensor], torch.Tensor], feature_extractor(x1))
178
+ y_hat = cast(list[torch.Tensor] | tuple[torch.Tensor] | torch.Tensor, feature_extractor(x1))
181
179
  grayscale_cams = None
182
180
 
183
- if isinstance(y_hat, (list, tuple)):
181
+ if isinstance(y_hat, list | tuple):
184
182
  y_hat = y_hat[0].cpu()
185
183
  else:
186
184
  y_hat = y_hat.cpu()
@@ -566,7 +566,7 @@ def generate_patch_dataset(
566
566
  num_workers=num_workers,
567
567
  )
568
568
 
569
- for phase, split_dict in zip(["val", "test"], [val_data_dictionary, test_data_dictionary]):
569
+ for phase, split_dict in zip(["val", "test"], [val_data_dictionary, test_data_dictionary], strict=False):
570
570
  if len(split_dict) > 0:
571
571
  log.info("Generating %s set", phase)
572
572
  generate_patch_sliding_window_dataset(
@@ -908,9 +908,9 @@ def extract_patches(
908
908
  patches = np.concatenate([patches, extra_patches_h], axis=0)
909
909
 
910
910
  # If this is not true there's some strange case I didn't take into account
911
- assert (
912
- patches.shape[0] == patch_num_h and patches.shape[1] == patch_num_w
913
- ), f"Patch shape {patches.shape} does not match the expected shape {patch_number}"
911
+ assert patches.shape[0] == patch_num_h and patches.shape[1] == patch_num_w, (
912
+ f"Patch shape {patches.shape} does not match the expected shape {patch_number}"
913
+ )
914
914
 
915
915
  return patches
916
916
 
@@ -1059,11 +1059,12 @@ def create_h5(
1059
1059
  h = img.shape[0]
1060
1060
  w = img.shape[1]
1061
1061
 
1062
+ mask: np.ndarray
1062
1063
  if item["mask"] is None:
1063
- mask = np.zeros([h, w])
1064
+ mask = np.zeros([h, w], dtype=np.uint8)
1064
1065
  else:
1065
1066
  # this works even if item["mask"] is already an absolute path
1066
- mask = cv2.imread(os.path.join(output_folder, item["mask"]), 0) # type: ignore[assignment]
1067
+ mask = cv2.imread(os.path.join(output_folder, item["mask"]), 0)
1067
1068
 
1068
1069
  if patch_size is not None:
1069
1070
  patch_height = patch_size[1]
@@ -98,9 +98,9 @@ def compute_patch_metrics(
98
98
  if (patch_h is not None and patch_w is not None) and (patch_num_h is not None and patch_num_w is not None):
99
99
  raise ValueError("Either number of patches or patch size is required for reconstruction")
100
100
 
101
- assert (patch_h is not None and patch_w is not None) or (
102
- patch_num_h is not None and patch_num_w is not None
103
- ), "Either number of patches or patch size is required for reconstruction"
101
+ assert (patch_h is not None and patch_w is not None) or (patch_num_h is not None and patch_num_w is not None), (
102
+ "Either number of patches or patch size is required for reconstruction"
103
+ )
104
104
 
105
105
  if patch_h is not None and patch_w is not None and patch_num_h is not None and patch_num_w is not None:
106
106
  warnings.warn(
@@ -191,7 +191,7 @@ def compute_patch_metrics(
191
191
  if annotated_good is not None:
192
192
  gt_img[np.isin(gt_img, annotated_good)] = 0
193
193
 
194
- gt_img_binary = (gt_img > 0).astype(bool) # type: ignore[operator]
194
+ gt_img_binary = (gt_img > 0).astype(bool)
195
195
  regions_pred = label(output_mask).astype(np.uint8)
196
196
 
197
197
  for k in range(1, regions_pred.max() + 1):
@@ -203,8 +203,11 @@ def compute_patch_metrics(
203
203
  output_mask = (output_mask > 0).astype(np.uint8)
204
204
  gt_img = label(gt_img)
205
205
 
206
- for i in range(1, gt_img.max() + 1): # type: ignore[union-attr]
207
- region = (gt_img == i).astype(bool) # type: ignore[union-attr]
206
+ if gt_img is None:
207
+ raise RuntimeError("Ground truth mask is None after label and it should not be")
208
+
209
+ for i in range(1, gt_img.max() + 1):
210
+ region = (gt_img == i).astype(bool)
208
211
  if np.sum(np.bitwise_and(region, output_mask)) == 0:
209
212
  false_region_good += 1
210
213
  else:
@@ -69,13 +69,13 @@ def plot_patch_reconstruction(
69
69
  points = [[item["x"], item["y"]] for item in region["points"]]
70
70
  c_label = region["label"]
71
71
 
72
- out = cv2.drawContours(
72
+ out = cv2.drawContours( # type: ignore[call-overload]
73
73
  out,
74
74
  np.array([points], np.int32),
75
75
  -1,
76
76
  class_to_idx[c_label],
77
77
  thickness=cv2.FILLED,
78
- ) # type: ignore[call-overload]
78
+ )
79
79
  else:
80
80
  out = reconstruction["prediction"]
81
81
 
quadra/utils/utils.py CHANGED
@@ -438,7 +438,7 @@ def flatten_list(input_list: Iterable[Any]) -> Iterator[Any]:
438
438
  The iterator over the flattend list
439
439
  """
440
440
  for v in input_list:
441
- if isinstance(v, Iterable) and not isinstance(v, (str, bytes)):
441
+ if isinstance(v, Iterable) and not isinstance(v, str | bytes):
442
442
  yield from flatten_list(v)
443
443
  else:
444
444
  yield v
quadra/utils/validator.py CHANGED
@@ -72,9 +72,7 @@ def check_all_arguments(callable_variable: str, configuration_arguments: list[st
72
72
  """
73
73
  for argument in configuration_arguments:
74
74
  if argument not in argument_names:
75
- error_string = (
76
- f"`{argument}` is not a valid argument passed " f"from configuration to `{callable_variable}`."
77
- )
75
+ error_string = f"`{argument}` is not a valid argument passed from configuration to `{callable_variable}`."
78
76
  closest_match = difflib.get_close_matches(argument, argument_names, n=1, cutoff=0.5)
79
77
  if len(closest_match) > 0:
80
78
  error_string += f" Did you mean `{closest_match[0]}`?"
@@ -46,7 +46,7 @@ class UnNormalize:
46
46
  new_t = tensor.detach().clone()
47
47
  else:
48
48
  new_t = tensor
49
- for t, m, s in zip(new_t, self.mean, self.std):
49
+ for t, m, s in zip(new_t, self.mean, self.std, strict=False):
50
50
  t.mul_(s).add_(m)
51
51
  # The normalize code -> t.sub_(m).div_(s)
52
52
  return new_t
@@ -82,7 +82,7 @@ def create_grid_figure(
82
82
  ax[i][j].get_xaxis().set_ticks([])
83
83
  ax[i][j].get_yaxis().set_ticks([])
84
84
  if row_names is not None:
85
- for ax, name in zip(ax[:, 0], row_names): # noqa: B020
85
+ for ax, name in zip(ax[:, 0], row_names, strict=False): # noqa: B020
86
86
  ax.set_ylabel(name, rotation=90)
87
87
 
88
88
  plt.tight_layout()
@@ -98,12 +98,12 @@ def create_visualization_dataset(dataset: torch.utils.data.Dataset):
98
98
  """Handle different types of transforms."""
99
99
  if isinstance(transforms, albumentations.BaseCompose):
100
100
  transforms.transforms = convert_transforms(transforms.transforms)
101
- if isinstance(transforms, (list, ListConfig, TransformsSeqType)):
101
+ if isinstance(transforms, list | ListConfig | TransformsSeqType):
102
102
  transforms = [convert_transforms(t) for t in transforms]
103
- if isinstance(transforms, (dict, DictConfig)):
103
+ if isinstance(transforms, dict | DictConfig):
104
104
  for tname, t in transforms.items():
105
105
  transforms[tname] = convert_transforms(t)
106
- if isinstance(transforms, (Normalize, ToTensorV2)):
106
+ if isinstance(transforms, Normalize | ToTensorV2):
107
107
  return NoOp(p=1)
108
108
  return transforms
109
109
 
@@ -362,6 +362,9 @@ def plot_classification_results(
362
362
  test_label = idx_to_class[test_labels[i]]
363
363
  except Exception:
364
364
  test_label = test_labels[i]
365
+ else:
366
+ pred_label = pred_labels[i]
367
+ test_label = test_labels[i]
365
368
 
366
369
  ax.axis("off")
367
370
  ax.set_title(f"True: {str(test_label)}\nPred {str(pred_label)}")
@@ -153,7 +153,7 @@ def grad_rollout(
153
153
  """
154
154
  result = torch.eye(attentions[0].size(-1))
155
155
  with torch.no_grad():
156
- for attention, grad in zip(attentions, gradients):
156
+ for attention, grad in zip(attentions, gradients, strict=False):
157
157
  weights = grad
158
158
  attention_heads_fused = torch.mean((attention * weights), dim=1)
159
159
  attention_heads_fused[attention_heads_fused < 0] = 0
@@ -1,19 +1,18 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: quadra
3
- Version: 2.2.5
3
+ Version: 2.3.0
4
4
  Summary: Deep Learning experiment orchestration library
5
5
  Home-page: https://orobix.github.io/quadra
6
6
  License: Apache-2.0
7
7
  Keywords: deep learning,experiment,lightning,hydra-core
8
8
  Author: Federico Belotti
9
9
  Author-email: federico.belotti@orobix.com
10
- Requires-Python: >=3.9,<3.11
10
+ Requires-Python: >=3.10,<3.11
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: Intended Audience :: Education
13
13
  Classifier: Intended Audience :: Science/Research
14
14
  Classifier: License :: OSI Approved :: Apache Software License
15
15
  Classifier: Programming Language :: Python :: 3
16
- Classifier: Programming Language :: Python :: 3.9
17
16
  Classifier: Programming Language :: Python :: 3.10
18
17
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
19
18
  Classifier: Topic :: Software Development
@@ -21,7 +20,7 @@ Classifier: Topic :: Software Development :: Libraries
21
20
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
21
  Provides-Extra: onnx
23
22
  Requires-Dist: albumentations (>=1.3,<1.4)
24
- Requires-Dist: anomalib-orobix (==0.7.0.dev143)
23
+ Requires-Dist: anomalib-orobix (==0.7.0.dev150)
25
24
  Requires-Dist: boto3 (>=1.26,<1.27)
26
25
  Requires-Dist: grad-cam-orobix (==1.5.3.dev001)
27
26
  Requires-Dist: h5py (>=3.8,<3.9)
@@ -32,17 +31,18 @@ Requires-Dist: label_studio_converter (>=0.0,<0.1)
32
31
  Requires-Dist: matplotlib (>=3.6,<3.7)
33
32
  Requires-Dist: minio (>=7.1,<7.2)
34
33
  Requires-Dist: mlflow-skinny (>=2.3.1,<3.0.0)
34
+ Requires-Dist: numpy (<2)
35
35
  Requires-Dist: nvitop (>=0.11,<0.12)
36
36
  Requires-Dist: onnx (==1.15.0) ; extra == "onnx"
37
37
  Requires-Dist: onnxconverter-common (>=1.14.0,<2.0.0) ; extra == "onnx"
38
- Requires-Dist: onnxruntime_gpu (==1.17.0) ; extra == "onnx"
38
+ Requires-Dist: onnxruntime_gpu (==1.20.0) ; extra == "onnx"
39
39
  Requires-Dist: onnxsim (==0.4.28) ; extra == "onnx"
40
40
  Requires-Dist: opencv_python_headless (>=4.7.0,<4.8.0)
41
41
  Requires-Dist: pandas (<2.0)
42
- Requires-Dist: pillow (>=9.3,<9.4)
42
+ Requires-Dist: pillow (>=10,<11)
43
43
  Requires-Dist: pydantic (==1.10.10)
44
44
  Requires-Dist: python_dotenv (>=0.21,<0.22)
45
- Requires-Dist: pytorch_lightning (>=2.1,<2.2)
45
+ Requires-Dist: pytorch_lightning (>=2.4,<2.5)
46
46
  Requires-Dist: rich (>=13.2,<13.3)
47
47
  Requires-Dist: scikit_learn (>=1.2,<1.3)
48
48
  Requires-Dist: scikit_multilearn (>=0.2,<0.3)
@@ -50,11 +50,11 @@ Requires-Dist: seaborn (>=0.12,<0.13)
50
50
  Requires-Dist: segmentation_models_pytorch-orobix (==0.3.3.dev1)
51
51
  Requires-Dist: tensorboard (>=2.11,<2.12)
52
52
  Requires-Dist: timm (==0.9.12)
53
- Requires-Dist: torch (==2.1.2)
53
+ Requires-Dist: torch (==2.4.1)
54
54
  Requires-Dist: torchinfo (>=1.8,<1.9)
55
55
  Requires-Dist: torchmetrics (>=0.10,<0.11)
56
56
  Requires-Dist: torchsummary (>=1.5,<1.6)
57
- Requires-Dist: torchvision (>=0.16,<0.17)
57
+ Requires-Dist: torchvision (>=0.19,<0.20)
58
58
  Requires-Dist: tripy (>=1.0,<1.1)
59
59
  Requires-Dist: typing_extensions (==4.11.0) ; python_version < "3.10"
60
60
  Requires-Dist: xxhash (>=3.2,<3.3)
@@ -1,7 +1,7 @@
1
- quadra/__init__.py,sha256=wi1UBd-2e2ycj46ChXS4E9NfP8aKtbcXQohx6bZlmsg,112
1
+ quadra/__init__.py,sha256=s2xCezYlfSL46r5iADHZuB2zg-0_EgoN-FNv4N4Kw48,112
2
2
  quadra/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- quadra/callbacks/anomalib.py,sha256=Z0Xx3M9UEMYGY0BpSsqemgQSQ6jAr4SrvlbDg_oG9C8,11913
4
- quadra/callbacks/lightning.py,sha256=1OTM6fB7qBVLPWNBAZJOb3B00q0kAxMWkPjTEn9YgF0,20182
3
+ quadra/callbacks/anomalib.py,sha256=WLBEGhZA9HoP4Yh9UbbC2GzDOKYTkvU9EY1lkZcV7Fs,11971
4
+ quadra/callbacks/lightning.py,sha256=mRWvBCrxHry_MfvAHy9H2jRYCcyUCpurXS8B5TpoScs,20241
5
5
  quadra/callbacks/mlflow.py,sha256=4LKjrgbRCHP5dOCoDpF7J25gaBgABa0Rof-EA61Iqug,10129
6
6
  quadra/callbacks/scheduler.py,sha256=zrglcTUvMO236VchQFtCSlA-XXhc6a3HVWX0uDVQoyc,2656
7
7
  quadra/configs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -32,9 +32,9 @@ quadra/configs/backbone/vit16_base.yaml,sha256=LWMAx3uwKQClDds1zUMzdNercHgO3kanV
32
32
  quadra/configs/backbone/vit16_small.yaml,sha256=8VkdiFCDugoSwz3tEbTNYp54P2eHElC8dR9Tea2Qir8,198
33
33
  quadra/configs/backbone/vit16_tiny.yaml,sha256=iHk8eUeieg-MJF4YA7m7p5WiTz5KT_rmNmYA5LLjdaw,197
34
34
  quadra/configs/backbone/xcit_tiny_24_p8_224.yaml,sha256=GnlQpUcahn_kZpj109kCC_5X4HP9-BSlgXRkiQJTlGU,196
35
- quadra/configs/callbacks/all.yaml,sha256=KYEDjqAf7lC5gpU8uUjlfpETIiDaWq5AlnQN-OCOTZU,1113
36
- quadra/configs/callbacks/default.yaml,sha256=sJBXHcrgycjGiz2lfSn54s1c1fbuED2QLKMaJdos078,1254
37
- quadra/configs/callbacks/default_anomalib.yaml,sha256=spzxnmnl5RRWOxzWSIZDmtb3cz9bGAErZEO-PIAFoqE,2330
35
+ quadra/configs/callbacks/all.yaml,sha256=LZx8d0apwv9t0KlKqzFCrYo2NpuNBgcUnHhG_kud5fQ,1437
36
+ quadra/configs/callbacks/default.yaml,sha256=ZFPU1bm36hJsxI-85uiJx7TpX7qWkR8dibBKWtES4Yc,1180
37
+ quadra/configs/callbacks/default_anomalib.yaml,sha256=FjjSj6HgMvH18MV4AKR4Ew0pOerPefj9eUGinUkODLE,2256
38
38
  quadra/configs/config.yaml,sha256=IULhqUF8Z7Cqr5Xx41EGj8dwtPQWSRiZ5jwzNg0Rjwk,686
39
39
  quadra/configs/core/default.yaml,sha256=IfgjKHXuOknq3CKvKKBqMPfiqmZSUOnc80Q2Jkbm7go,239
40
40
  quadra/configs/datamodule/base/anomaly.yaml,sha256=CILLAoQHunrT4BN0ynOzizTX29k-7B9vDVIZUYm-cBU,377
@@ -187,20 +187,20 @@ quadra/configs/transforms/dino.yaml,sha256=NtEbtJPHYkR9DOBPwXR33uvrEdqv8WNyqRlXr
187
187
  quadra/configs/transforms/linear_eval.yaml,sha256=fXmJBEwTWQ-QBMNV0mSG9wcrj31YGIV_czcRDczc1ss,488
188
188
  quadra/datamodules/__init__.py,sha256=y00iX2YAy6CJzPstKSBNq8_1YsYTRr_sCvqaL-WI7Z8,636
189
189
  quadra/datamodules/anomaly.py,sha256=_3FZNSwdMj-ECXlPQDslswtaMn0F1EgzA0q0UH-UgFY,6670
190
- quadra/datamodules/base.py,sha256=vDKyoZMMzKUn9dSU1hNiCW4Tt7G6xFTxuZn6c8CQxIc,14001
191
- quadra/datamodules/classification.py,sha256=UfszA_14TmS-9xQLdVLVJAjUT2PG1KLzfOX2w9mFKzg,41594
190
+ quadra/datamodules/base.py,sha256=QGkJ8Lq6hznHvaXjD8mhJhrinrs4ZFlZD3-B5cLU0cQ,14010
191
+ quadra/datamodules/classification.py,sha256=VwQd-zhzJuLgq5Kg1niOY4pnRbO7Sk4B77dWiTFv4do,41622
192
192
  quadra/datamodules/generic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
193
193
  quadra/datamodules/generic/imagenette.py,sha256=3hOb-GmvnKx_hqrSqRcAcf22PjtCQ3CY_-5mlaZSTIM,5564
194
194
  quadra/datamodules/generic/mnist.py,sha256=j4xWEWQb1utW3yyozgHD1tP0kOAtLpRsgeIBZ1cIiP0,3425
195
195
  quadra/datamodules/generic/mvtec.py,sha256=3Ib8JyY1Eg7wbPL2dXw22YCoy_gsitksofFShLQ9Itw,2700
196
196
  quadra/datamodules/generic/oxford_pet.py,sha256=tumWy9TBThvVQZ2JOyghosWJEEsYjyXN6pZMJ9C5dBY,6822
197
197
  quadra/datamodules/patch.py,sha256=y7leDt1MyVg0LnqKgWCZ0i6cuVln10fiG4X8EFbl-_Q,7789
198
- quadra/datamodules/segmentation.py,sha256=eWS_v7H4ea9h05KW1nMpLMJjCGVZEnzBU-vuKlVAdeY,28920
198
+ quadra/datamodules/segmentation.py,sha256=hhfOs7QoYslHYfWfnAgZzSusj2tus8k-h7SBqGNVT8E,29004
199
199
  quadra/datamodules/ssl.py,sha256=U63FCdcRJjx4K0RZzkKJfvYJhFpvWTnlBBCtXirn_F4,5709
200
200
  quadra/datasets/__init__.py,sha256=nVpqp2ffQ6omqCMB3r1ajcUGgUad0eSkDt-kNWDGblU,669
201
- quadra/datasets/anomaly.py,sha256=Guxb39aZkne5Qp_pEhjPACdzYSkLdLbu5AUXP0Gf1aE,11944
202
- quadra/datasets/classification.py,sha256=e-hNFh76aFnvbtkQLq1ljJ9Q3a6ymOmFziNjNK8VM4U,7512
203
- quadra/datasets/patch.py,sha256=bloUJS_qzzSfVqZaFpQd0uJMhRKRCfEwnSqCJaVtyZQ,4789
201
+ quadra/datasets/anomaly.py,sha256=4rCd2-frgMH3RfQYVFYn5ZXxTKbPOk8GwE-BZIiLwFY,11892
202
+ quadra/datasets/classification.py,sha256=ISKcY2PwD3HNv1JPPbDIJRJWJmu3KR3hlx3HUxlXYpE,7530
203
+ quadra/datasets/patch.py,sha256=imNJONPoREivSZ-6WqYO2zE80PDEr-oCm3rdJuKlWz0,4803
204
204
  quadra/datasets/segmentation.py,sha256=cDs45eRh_IBSLB0K5xDos-D4KySRQN64BzaPKGBF7OI,9056
205
205
  quadra/datasets/ssl.py,sha256=bbGWM-mQvr5xqXtmanr6HKC1Hgq42asu7wx2prb5NVo,3925
206
206
  quadra/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -219,21 +219,21 @@ quadra/losses/ssl/simsiam.py,sha256=uCCbqU9aYMwNa3re0qkeEK5Iz7Hxi0jAcEc-sCWZ8fc,
219
219
  quadra/losses/ssl/vicreg.py,sha256=ANvhYJz6iPv1A-OBXgBSrZrDG-1VmPtK1IZDtyFqNHE,2427
220
220
  quadra/main.py,sha256=6ZYKytVvCzQjgP_0QA6-3ICzVppsbRgPjF-csLKv85o,1407
221
221
  quadra/metrics/__init__.py,sha256=HsTK1gxsjp8_MYgA5caa4OK8sXLqtK_tt9wYyjtFnOc,79
222
- quadra/metrics/segmentation.py,sha256=jkQzCUz0ibkEoF95-uL-4wwxybs5SAs2VHiT5ZGckso,9451
222
+ quadra/metrics/segmentation.py,sha256=tVRYEyMiwD0RJ7NtoGRoSbwb8sAKoVmvzEhV6-3iQT4,9465
223
223
  quadra/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
224
- quadra/models/base.py,sha256=nYbZ1JRmhgcORZhAJoV6d2_L5C5yds_X-PQAFIDoHWk,5514
224
+ quadra/models/base.py,sha256=QqMRQWqIsUuUHeInqMHZv3wv7Xeqz-zKe4cAQeqaN3M,5544
225
225
  quadra/models/classification/__init__.py,sha256=c03CGDyvipXgU7ybSosOaeTl1aM1ge6TqMUgMiTpQtA,243
226
226
  quadra/models/classification/backbones.py,sha256=haHNPC-XZ8Jj1i47cfUj8JHy_I-rins-nNfccrPBffo,6281
227
227
  quadra/models/classification/base.py,sha256=w-mDPQPtIrNclxjqsve5BTmNhNgnWGh7uJfE5HaTFPA,2996
228
- quadra/models/evaluation.py,sha256=dEazagyUvlnnEq16IdM25qAdDXZR-BUhApDF_YUdO-E,10695
228
+ quadra/models/evaluation.py,sha256=LQg2K6PDIKK0ZnkP4pHfRNnKO4WeaROoYoNFA3Bctg0,10709
229
229
  quadra/modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
230
230
  quadra/modules/backbone.py,sha256=xiZBqgzr1S45GX9mydl29TFuahLDaHrU7Fy73LGIyGI,909
231
- quadra/modules/base.py,sha256=fS_kbOAfEihHFfiTcyuyDXTzkmkJr2ivtAMiZqjOUog,10303
231
+ quadra/modules/base.py,sha256=y96PSFJeo4gswVj3a6uNnoirg-dMgS0MsYDN51fQQ9A,10382
232
232
  quadra/modules/classification/__init__.py,sha256=6keltBhC1yzgbNttBuykNYJAUMyOrY-HDNgGZGfI93I,141
233
233
  quadra/modules/classification/base.py,sha256=QdHtHY2tF_qh2wU01Oo0TWjh9CTqa46tyF4VgcLd__M,11937
234
234
  quadra/modules/ssl/__init__.py,sha256=oeUoGHrsESZ0595-JxPxURBP124jtNfrITbVovBpANA,302
235
235
  quadra/modules/ssl/barlowtwins.py,sha256=iW6f7ADSEkbs7z-88x680204-Ez-iF1Yd2SdQzcLpRY,1884
236
- quadra/modules/ssl/byol.py,sha256=8sviU7MS9MXDbS9Ogu9-qJCZrkTI2pPxgEwNvc7EqIo,7084
236
+ quadra/modules/ssl/byol.py,sha256=3UhUr72kpI2lM9JtVPqrTcpTo60NsAHNu3SIwD5_RrI,7114
237
237
  quadra/modules/ssl/common.py,sha256=nQMsYEu4PUueMq0KNe898h3wGS2RVQBN0NCpYnMyRqI,9898
238
238
  quadra/modules/ssl/dino.py,sha256=Xs4wRYvvxeLuHtOW5Gf-xaqAvT97cIuOG6PlYduPDm4,7300
239
239
  quadra/modules/ssl/hyperspherical.py,sha256=yEY0WvYFLvKCeKKJDAWCEttYwNVjB5ai6N2FxXKqYQ4,6356
@@ -248,31 +248,31 @@ quadra/schedulers/__init__.py,sha256=mQivr18c0j36hpV3Lm8nlyBVKFevWp8TtLuTfvI9kQc
248
248
  quadra/schedulers/base.py,sha256=T1EdrLOJ0i9MzWoLCkrNA0uypm7hJ-L6NFhjIXFB6NE,1462
249
249
  quadra/schedulers/warmup.py,sha256=chzzrK7OqqlicBCxiF4CqMYNrWu6nflIbRE-C86Jrw0,4962
250
250
  quadra/tasks/__init__.py,sha256=tmAfMoH0k3UC7r2pNrgbBa1Pfc3tpLl3IObFF6Z0eRE,820
251
- quadra/tasks/anomaly.py,sha256=rl3F39kABt5deSLOeY2s_1t2mR4x1-42VdP0flRrZMs,24583
252
- quadra/tasks/base.py,sha256=5Rsjdothqb4YXMIN_s98HdoPmJBfQKV0ZwnyvUuihYI,14101
253
- quadra/tasks/classification.py,sha256=I4NEsNVn1lOu-HLaD_hqowfnedhD4l-wZ4JPI1eD4pg,52735
254
- quadra/tasks/patch.py,sha256=EJvbtvlebyOtk6m47juW8XMXr1v_bLFTIKqI1KC0HRA,20244
255
- quadra/tasks/segmentation.py,sha256=5GF7CZjm1dQGQ-Q0kAc68GSsNv6FTxBsl_rbEZQwjmU,16213
256
- quadra/tasks/ssl.py,sha256=SVZeAW5xVkfeGTwMqRhGKLagMC2kHrxoOaNEDzbCZ0A,20552
251
+ quadra/tasks/anomaly.py,sha256=RHeiM1vZF1zsva37iYdiGx_HLgdAp8lXnmUzXja69YU,24638
252
+ quadra/tasks/base.py,sha256=piYlTFtvqH-4s4oEq4GczdAs_gL29UHAJGsOC5Sd3Bc,14187
253
+ quadra/tasks/classification.py,sha256=05l3QM3dsU2yTWhXxNAcJ8sZM0Vbfgey-e5EV6p1TX8,52816
254
+ quadra/tasks/patch.py,sha256=nzo8o-ei7iF1Iarvd8-c08s0Rs_lPvVPDLAbkFMx-Qw,20251
255
+ quadra/tasks/segmentation.py,sha256=9Qy-V0Wvoofl7IrfotnSMgBIXcZd-WfZZtetyqmB0FY,16260
256
+ quadra/tasks/ssl.py,sha256=XsaC9hbhvTA5UfHeRaaCstx9mTYacLRmgoCF5Tj9R5M,20547
257
257
  quadra/trainers/README.md,sha256=XtpbUOxwvPpOUL7E5s2JHjRgwT-CRKTxsBeUSXrg9BU,248
258
258
  quadra/trainers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
259
259
  quadra/trainers/classification.py,sha256=YeJ0z7Vk0-dsMTcoKBxSdSA0rxtilEcQTp-Zq9Xi1hw,7042
260
260
  quadra/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
261
261
  quadra/utils/anomaly.py,sha256=49vFvT5-4SxczsEM2Akcut_M1DDwKlOVdGv36oLTgR0,4067
262
- quadra/utils/classification.py,sha256=7Phbywpd1CNzwLM5F7zV9FrzdHGd6EkR6LRTRpkerTc,24882
262
+ quadra/utils/classification.py,sha256=dKFuv4RywWhvhstOnEOnaf-6qcViUK0dTgah9m9mw2Q,24917
263
263
  quadra/utils/deprecation.py,sha256=zF_S-yqenaZxRBOudhXts0mX763WjEUWCnHd09TZnwY,852
264
- quadra/utils/evaluation.py,sha256=5Occ6ONsjnvMkaFFjohtgSvffxEKfjdvMbxqzgtmiHQ,18538
265
- quadra/utils/export.py,sha256=PoKaADeSwGPIW5TEHP3Klw_SUSV1gP1dIN4CN-AgDm0,20836
264
+ quadra/utils/evaluation.py,sha256=oooRJPu1AaHhOwvB1Y6SFjQ645OkgrDzKtUvwWq8oq4,19005
265
+ quadra/utils/export.py,sha256=6g--Bz1Xx8f5j_dSQTZxoiaeT_qtG-8ApfFm-CNUkmQ,20832
266
266
  quadra/utils/imaging.py,sha256=Cz7sGb_axEmnGcwQJP2djFZpIpGCPFIBGT8NWVV-OOE,866
267
267
  quadra/utils/logger.py,sha256=tQJ4xpTAFKx1g-UUm5K1x7zgoP6qoXpcUHQyu0rOr1w,556
268
- quadra/utils/mlflow.py,sha256=7E09JShGm2qO7bLb_8srA_RYdVAudxeDYmX7pMdjoVU,3524
268
+ quadra/utils/mlflow.py,sha256=DVso1lxn126hil8i4tTf5WFUPJ8uJNAzNU8OXbXwOzw,3586
269
269
  quadra/utils/model_manager.py,sha256=P5JtY95p6giQ6mb4TUnWsNwUh5ClzHBillnG5SA56QY,12546
270
- quadra/utils/models.py,sha256=xLOT6Sorpc54dYn9XG8RpqoLupvNrMCiE_QvvIMLwHA,19756
270
+ quadra/utils/models.py,sha256=49AXecNN7mg8uqO-YW0sLbPxbvWfTI4E4NNpTesW6HE,19699
271
271
  quadra/utils/patch/__init__.py,sha256=YenDdsI937kyAJiE0dP3_Xua8gHIoFjheoWMnpx_TGU,509
272
- quadra/utils/patch/dataset.py,sha256=hqM7XyPNDmI9_uJSrAxNYOasdNlEHcg-npP1S9Bb05Y,61374
273
- quadra/utils/patch/metrics.py,sha256=E1PeHFp10pPgkb6484fDvRLn2E9NDy9xIqEmMBeusOw,17644
272
+ quadra/utils/patch/dataset.py,sha256=tRwrc01p0sj4nLQ-6b9mvnkTQrjtFSv5qMYiTJRSXKU,61401
273
+ quadra/utils/patch/metrics.py,sha256=r7zxGXC2hU6EiMbfNoUmi6BC0EEUZs9Jy_mtI5Q1x5g,17693
274
274
  quadra/utils/patch/model.py,sha256=F-wbMZvM8nS_ZSYewg2SofD7H0I6DH1DBA2ACSr0fCY,5746
275
- quadra/utils/patch/visualization.py,sha256=HwO67vRMvhQAKN_FRFv4AAqvYMeOpg0dod3DwGpgTfo,7044
275
+ quadra/utils/patch/visualization.py,sha256=V64SsXcQ2UhBVH2gzzrjF_OaxL58ktEo1Jdzcos3AT8,7044
276
276
  quadra/utils/resolver.py,sha256=p8t95b__htcR3hdnF9RtlWNKLTVUWYjADozYNj9lIzQ,1397
277
277
  quadra/utils/segmentation.py,sha256=rWOE1qw2RS0dpgJyHqfQURw86K6G2Hst6mpu97PI5Ac,920
278
278
  quadra/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -288,13 +288,13 @@ quadra/utils/tests/fixtures/models/classification.py,sha256=5qpyOonqK6W2LCUWEHhm
288
288
  quadra/utils/tests/fixtures/models/segmentation.py,sha256=CTNXeEPcFxFq-YcNfQi5DbbytPZwBQaZn5dQq3L41j0,765
289
289
  quadra/utils/tests/helpers.py,sha256=9PJlwozUl_lpQW-Ck-tN7sGFcgeieEd3q56aYuwMIlk,2381
290
290
  quadra/utils/tests/models.py,sha256=KbAlv_ukxaUYsyVNUO_dM0NyIosx8RpC0EVyF1HvPkM,507
291
- quadra/utils/utils.py,sha256=AjVsJvqACj5Bo8aOF8Itkwqhzws3bwfLnptxosbgzl4,19125
292
- quadra/utils/validator.py,sha256=eFCGr0ss1gYSpsL31JbsCXPZUMJAI9_H-mGodt6UGsU,4668
293
- quadra/utils/visualization.py,sha256=UvGHX0dumfjpT_KX3Yc1W2B5sAXXaZZWIwXQAi4sdoQ,15950
294
- quadra/utils/vit_explainability.py,sha256=hY0awehj6UkyBhnBlW5uWoJTsBfgow5Nll9fAqrzmMo,13337
291
+ quadra/utils/utils.py,sha256=3tgj_tFFhKsGNJ9jrmULI9rWxFyhuUe53Y5SBJFkwSM,19124
292
+ quadra/utils/validator.py,sha256=wmVXycB90VNyAbKBUVncFCxK4nsYiOWJIY3ISXwxYCY,4632
293
+ quadra/utils/visualization.py,sha256=yYm7lPziUOlybxigZ2qTycNewb67Q80H4hjQGWUh788,16094
294
+ quadra/utils/vit_explainability.py,sha256=Gh6BHaDEzWxOjJp1aqvCxLt9Rb8TXd5uKXOAx7-acUk,13351
295
295
  hydra_plugins/quadra_searchpath_plugin.py,sha256=AAn4TzR87zUK7nwSsK-KoqALiPtfQ8FvX3fgZPTGIJ0,1189
296
- quadra-2.2.5.dist-info/LICENSE,sha256=8cTbQtcWa02YJoSpMeV_gxj3jpMTkxvl-w3WJ5gV_QE,11342
297
- quadra-2.2.5.dist-info/METADATA,sha256=rG202HeIVu_pgeoNRHitD6Gs_dQj_fQcmmN1aDEUYz4,17623
298
- quadra-2.2.5.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
299
- quadra-2.2.5.dist-info/entry_points.txt,sha256=sRYonBZyx-sAJeWcQNQoVQIU5lm02cnCQt6b15k0WHU,43
300
- quadra-2.2.5.dist-info/RECORD,,
296
+ quadra-2.3.0.dist-info/LICENSE,sha256=8cTbQtcWa02YJoSpMeV_gxj3jpMTkxvl-w3WJ5gV_QE,11342
297
+ quadra-2.3.0.dist-info/METADATA,sha256=0UHdlekvbPZCjDcDnFGHqKFyT8s8R4ExcaD2bDriDxQ,17598
298
+ quadra-2.3.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
299
+ quadra-2.3.0.dist-info/entry_points.txt,sha256=sRYonBZyx-sAJeWcQNQoVQIU5lm02cnCQt6b15k0WHU,43
300
+ quadra-2.3.0.dist-info/RECORD,,
File without changes