dragon-ml-toolbox 19.14.0__py3-none-any.whl → 20.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dragon_ml_toolbox-19.14.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/METADATA +29 -46
- dragon_ml_toolbox-20.0.0.dist-info/RECORD +178 -0
- ml_tools/{ETL_cleaning.py → ETL_cleaning/__init__.py} +13 -5
- ml_tools/ETL_cleaning/_basic_clean.py +351 -0
- ml_tools/ETL_cleaning/_clean_tools.py +128 -0
- ml_tools/ETL_cleaning/_dragon_cleaner.py +245 -0
- ml_tools/ETL_cleaning/_imprimir.py +13 -0
- ml_tools/{ETL_engineering.py → ETL_engineering/__init__.py} +8 -4
- ml_tools/ETL_engineering/_dragon_engineering.py +261 -0
- ml_tools/ETL_engineering/_imprimir.py +24 -0
- ml_tools/{_core/_ETL_engineering.py → ETL_engineering/_transforms.py} +14 -267
- ml_tools/{_core → GUI_tools}/_GUI_tools.py +37 -40
- ml_tools/{GUI_tools.py → GUI_tools/__init__.py} +7 -5
- ml_tools/GUI_tools/_imprimir.py +12 -0
- ml_tools/IO_tools/_IO_loggers.py +235 -0
- ml_tools/IO_tools/_IO_save_load.py +151 -0
- ml_tools/IO_tools/_IO_utils.py +140 -0
- ml_tools/{IO_tools.py → IO_tools/__init__.py} +13 -5
- ml_tools/IO_tools/_imprimir.py +14 -0
- ml_tools/MICE/_MICE_imputation.py +132 -0
- ml_tools/{MICE_imputation.py → MICE/__init__.py} +6 -7
- ml_tools/{_core/_MICE_imputation.py → MICE/_dragon_mice.py} +243 -322
- ml_tools/MICE/_imprimir.py +11 -0
- ml_tools/{ML_callbacks.py → ML_callbacks/__init__.py} +12 -4
- ml_tools/ML_callbacks/_base.py +101 -0
- ml_tools/ML_callbacks/_checkpoint.py +232 -0
- ml_tools/ML_callbacks/_early_stop.py +208 -0
- ml_tools/ML_callbacks/_imprimir.py +12 -0
- ml_tools/ML_callbacks/_scheduler.py +197 -0
- ml_tools/{ML_chaining_utilities.py → ML_chain/__init__.py} +8 -3
- ml_tools/{_core/_ML_chaining_utilities.py → ML_chain/_chaining_tools.py} +5 -129
- ml_tools/ML_chain/_dragon_chain.py +140 -0
- ml_tools/ML_chain/_imprimir.py +11 -0
- ml_tools/ML_configuration/__init__.py +90 -0
- ml_tools/ML_configuration/_base_model_config.py +69 -0
- ml_tools/ML_configuration/_finalize.py +366 -0
- ml_tools/ML_configuration/_imprimir.py +47 -0
- ml_tools/ML_configuration/_metrics.py +593 -0
- ml_tools/ML_configuration/_models.py +206 -0
- ml_tools/ML_configuration/_training.py +124 -0
- ml_tools/ML_datasetmaster/__init__.py +28 -0
- ml_tools/ML_datasetmaster/_base_datasetmaster.py +337 -0
- ml_tools/{_core/_ML_datasetmaster.py → ML_datasetmaster/_datasetmaster.py} +9 -329
- ml_tools/ML_datasetmaster/_imprimir.py +15 -0
- ml_tools/{_core/_ML_sequence_datasetmaster.py → ML_datasetmaster/_sequence_datasetmaster.py} +13 -15
- ml_tools/{_core/_ML_vision_datasetmaster.py → ML_datasetmaster/_vision_datasetmaster.py} +63 -65
- ml_tools/ML_evaluation/__init__.py +53 -0
- ml_tools/ML_evaluation/_classification.py +629 -0
- ml_tools/ML_evaluation/_feature_importance.py +409 -0
- ml_tools/ML_evaluation/_imprimir.py +25 -0
- ml_tools/ML_evaluation/_loss.py +92 -0
- ml_tools/ML_evaluation/_regression.py +273 -0
- ml_tools/{_core/_ML_sequence_evaluation.py → ML_evaluation/_sequence.py} +8 -11
- ml_tools/{_core/_ML_vision_evaluation.py → ML_evaluation/_vision.py} +12 -17
- ml_tools/{_core → ML_evaluation_captum}/_ML_evaluation_captum.py +11 -38
- ml_tools/{ML_evaluation_captum.py → ML_evaluation_captum/__init__.py} +6 -4
- ml_tools/ML_evaluation_captum/_imprimir.py +10 -0
- ml_tools/{_core → ML_finalize_handler}/_ML_finalize_handler.py +3 -7
- ml_tools/ML_finalize_handler/__init__.py +10 -0
- ml_tools/ML_finalize_handler/_imprimir.py +8 -0
- ml_tools/ML_inference/__init__.py +22 -0
- ml_tools/ML_inference/_base_inference.py +166 -0
- ml_tools/{_core/_ML_chaining_inference.py → ML_inference/_chain_inference.py} +14 -17
- ml_tools/ML_inference/_dragon_inference.py +332 -0
- ml_tools/ML_inference/_imprimir.py +11 -0
- ml_tools/ML_inference/_multi_inference.py +180 -0
- ml_tools/ML_inference_sequence/__init__.py +10 -0
- ml_tools/ML_inference_sequence/_imprimir.py +8 -0
- ml_tools/{_core/_ML_sequence_inference.py → ML_inference_sequence/_sequence_inference.py} +11 -15
- ml_tools/ML_inference_vision/__init__.py +10 -0
- ml_tools/ML_inference_vision/_imprimir.py +8 -0
- ml_tools/{_core/_ML_vision_inference.py → ML_inference_vision/_vision_inference.py} +15 -19
- ml_tools/ML_models/__init__.py +32 -0
- ml_tools/{_core/_ML_models_advanced.py → ML_models/_advanced_models.py} +22 -18
- ml_tools/ML_models/_base_mlp_attention.py +198 -0
- ml_tools/{_core/_models_advanced_base.py → ML_models/_base_save_load.py} +73 -49
- ml_tools/ML_models/_dragon_tabular.py +248 -0
- ml_tools/ML_models/_imprimir.py +18 -0
- ml_tools/ML_models/_mlp_attention.py +134 -0
- ml_tools/{_core → ML_models}/_models_advanced_helpers.py +13 -13
- ml_tools/ML_models_sequence/__init__.py +10 -0
- ml_tools/ML_models_sequence/_imprimir.py +8 -0
- ml_tools/{_core/_ML_sequence_models.py → ML_models_sequence/_sequence_models.py} +5 -8
- ml_tools/ML_models_vision/__init__.py +29 -0
- ml_tools/ML_models_vision/_base_wrapper.py +254 -0
- ml_tools/ML_models_vision/_image_classification.py +182 -0
- ml_tools/ML_models_vision/_image_segmentation.py +108 -0
- ml_tools/ML_models_vision/_imprimir.py +16 -0
- ml_tools/ML_models_vision/_object_detection.py +135 -0
- ml_tools/ML_optimization/__init__.py +21 -0
- ml_tools/ML_optimization/_imprimir.py +13 -0
- ml_tools/{_core/_ML_optimization_pareto.py → ML_optimization/_multi_dragon.py} +18 -24
- ml_tools/ML_optimization/_single_dragon.py +203 -0
- ml_tools/{_core/_ML_optimization.py → ML_optimization/_single_manual.py} +75 -213
- ml_tools/{_core → ML_scaler}/_ML_scaler.py +8 -11
- ml_tools/ML_scaler/__init__.py +10 -0
- ml_tools/ML_scaler/_imprimir.py +8 -0
- ml_tools/ML_trainer/__init__.py +20 -0
- ml_tools/ML_trainer/_base_trainer.py +297 -0
- ml_tools/ML_trainer/_dragon_detection_trainer.py +402 -0
- ml_tools/ML_trainer/_dragon_sequence_trainer.py +540 -0
- ml_tools/ML_trainer/_dragon_trainer.py +1160 -0
- ml_tools/ML_trainer/_imprimir.py +10 -0
- ml_tools/{ML_utilities.py → ML_utilities/__init__.py} +14 -6
- ml_tools/ML_utilities/_artifact_finder.py +382 -0
- ml_tools/ML_utilities/_imprimir.py +16 -0
- ml_tools/ML_utilities/_inspection.py +325 -0
- ml_tools/ML_utilities/_train_tools.py +205 -0
- ml_tools/{ML_vision_transformers.py → ML_vision_transformers/__init__.py} +9 -6
- ml_tools/{_core/_ML_vision_transformers.py → ML_vision_transformers/_core_transforms.py} +11 -155
- ml_tools/ML_vision_transformers/_imprimir.py +14 -0
- ml_tools/ML_vision_transformers/_offline_augmentation.py +159 -0
- ml_tools/{_core/_PSO_optimization.py → PSO_optimization/_PSO.py} +58 -15
- ml_tools/{PSO_optimization.py → PSO_optimization/__init__.py} +5 -3
- ml_tools/PSO_optimization/_imprimir.py +10 -0
- ml_tools/SQL/__init__.py +7 -0
- ml_tools/{_core/_SQL.py → SQL/_dragon_SQL.py} +7 -11
- ml_tools/SQL/_imprimir.py +8 -0
- ml_tools/{_core → VIF}/_VIF_factor.py +5 -8
- ml_tools/{VIF_factor.py → VIF/__init__.py} +4 -2
- ml_tools/VIF/_imprimir.py +10 -0
- ml_tools/_core/__init__.py +7 -1
- ml_tools/_core/_logger.py +8 -18
- ml_tools/_core/_schema_load_ops.py +43 -0
- ml_tools/_core/_script_info.py +2 -2
- ml_tools/{data_exploration.py → data_exploration/__init__.py} +32 -16
- ml_tools/data_exploration/_analysis.py +214 -0
- ml_tools/data_exploration/_cleaning.py +566 -0
- ml_tools/data_exploration/_features.py +583 -0
- ml_tools/data_exploration/_imprimir.py +32 -0
- ml_tools/data_exploration/_plotting.py +487 -0
- ml_tools/data_exploration/_schema_ops.py +176 -0
- ml_tools/{ensemble_evaluation.py → ensemble_evaluation/__init__.py} +6 -4
- ml_tools/{_core → ensemble_evaluation}/_ensemble_evaluation.py +3 -7
- ml_tools/ensemble_evaluation/_imprimir.py +14 -0
- ml_tools/{ensemble_inference.py → ensemble_inference/__init__.py} +5 -3
- ml_tools/{_core → ensemble_inference}/_ensemble_inference.py +15 -18
- ml_tools/ensemble_inference/_imprimir.py +9 -0
- ml_tools/{ensemble_learning.py → ensemble_learning/__init__.py} +4 -6
- ml_tools/{_core → ensemble_learning}/_ensemble_learning.py +7 -10
- ml_tools/ensemble_learning/_imprimir.py +10 -0
- ml_tools/{excel_handler.py → excel_handler/__init__.py} +5 -3
- ml_tools/{_core → excel_handler}/_excel_handler.py +6 -10
- ml_tools/excel_handler/_imprimir.py +13 -0
- ml_tools/{keys.py → keys/__init__.py} +4 -1
- ml_tools/keys/_imprimir.py +11 -0
- ml_tools/{_core → keys}/_keys.py +2 -0
- ml_tools/{math_utilities.py → math_utilities/__init__.py} +5 -2
- ml_tools/math_utilities/_imprimir.py +11 -0
- ml_tools/{_core → math_utilities}/_math_utilities.py +1 -5
- ml_tools/{optimization_tools.py → optimization_tools/__init__.py} +9 -4
- ml_tools/optimization_tools/_imprimir.py +13 -0
- ml_tools/optimization_tools/_optimization_bounds.py +236 -0
- ml_tools/optimization_tools/_optimization_plots.py +218 -0
- ml_tools/{path_manager.py → path_manager/__init__.py} +6 -3
- ml_tools/{_core/_path_manager.py → path_manager/_dragonmanager.py} +11 -347
- ml_tools/path_manager/_imprimir.py +15 -0
- ml_tools/path_manager/_path_tools.py +346 -0
- ml_tools/plot_fonts/__init__.py +8 -0
- ml_tools/plot_fonts/_imprimir.py +8 -0
- ml_tools/{_core → plot_fonts}/_plot_fonts.py +2 -5
- ml_tools/schema/__init__.py +15 -0
- ml_tools/schema/_feature_schema.py +223 -0
- ml_tools/schema/_gui_schema.py +191 -0
- ml_tools/schema/_imprimir.py +10 -0
- ml_tools/{serde.py → serde/__init__.py} +4 -2
- ml_tools/serde/_imprimir.py +10 -0
- ml_tools/{_core → serde}/_serde.py +3 -8
- ml_tools/{utilities.py → utilities/__init__.py} +11 -6
- ml_tools/utilities/_imprimir.py +18 -0
- ml_tools/{_core/_utilities.py → utilities/_utility_save_load.py} +13 -190
- ml_tools/utilities/_utility_tools.py +192 -0
- dragon_ml_toolbox-19.14.0.dist-info/RECORD +0 -111
- ml_tools/ML_chaining_inference.py +0 -8
- ml_tools/ML_configuration.py +0 -86
- ml_tools/ML_configuration_pytab.py +0 -14
- ml_tools/ML_datasetmaster.py +0 -10
- ml_tools/ML_evaluation.py +0 -16
- ml_tools/ML_evaluation_multi.py +0 -12
- ml_tools/ML_finalize_handler.py +0 -8
- ml_tools/ML_inference.py +0 -12
- ml_tools/ML_models.py +0 -14
- ml_tools/ML_models_advanced.py +0 -14
- ml_tools/ML_models_pytab.py +0 -14
- ml_tools/ML_optimization.py +0 -14
- ml_tools/ML_optimization_pareto.py +0 -8
- ml_tools/ML_scaler.py +0 -8
- ml_tools/ML_sequence_datasetmaster.py +0 -8
- ml_tools/ML_sequence_evaluation.py +0 -10
- ml_tools/ML_sequence_inference.py +0 -8
- ml_tools/ML_sequence_models.py +0 -8
- ml_tools/ML_trainer.py +0 -12
- ml_tools/ML_vision_datasetmaster.py +0 -12
- ml_tools/ML_vision_evaluation.py +0 -10
- ml_tools/ML_vision_inference.py +0 -8
- ml_tools/ML_vision_models.py +0 -18
- ml_tools/SQL.py +0 -8
- ml_tools/_core/_ETL_cleaning.py +0 -694
- ml_tools/_core/_IO_tools.py +0 -498
- ml_tools/_core/_ML_callbacks.py +0 -702
- ml_tools/_core/_ML_configuration.py +0 -1332
- ml_tools/_core/_ML_configuration_pytab.py +0 -102
- ml_tools/_core/_ML_evaluation.py +0 -867
- ml_tools/_core/_ML_evaluation_multi.py +0 -544
- ml_tools/_core/_ML_inference.py +0 -646
- ml_tools/_core/_ML_models.py +0 -668
- ml_tools/_core/_ML_models_pytab.py +0 -693
- ml_tools/_core/_ML_trainer.py +0 -2323
- ml_tools/_core/_ML_utilities.py +0 -886
- ml_tools/_core/_ML_vision_models.py +0 -644
- ml_tools/_core/_data_exploration.py +0 -1909
- ml_tools/_core/_optimization_tools.py +0 -493
- ml_tools/_core/_schema.py +0 -359
- ml_tools/plot_fonts.py +0 -8
- ml_tools/schema.py +0 -12
- {dragon_ml_toolbox-19.14.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-19.14.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-19.14.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-19.14.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/top_level.txt +0 -0
|
@@ -2,7 +2,7 @@ import torch
|
|
|
2
2
|
from torch.utils.data import Dataset, Subset
|
|
3
3
|
import numpy
|
|
4
4
|
from sklearn.model_selection import train_test_split
|
|
5
|
-
from typing import Union,
|
|
5
|
+
from typing import Union, Optional, Callable, Any
|
|
6
6
|
from PIL import Image
|
|
7
7
|
from torchvision.datasets import ImageFolder
|
|
8
8
|
from torchvision import transforms
|
|
@@ -12,15 +12,15 @@ import random
|
|
|
12
12
|
import json
|
|
13
13
|
import inspect
|
|
14
14
|
|
|
15
|
-
from .
|
|
16
|
-
from
|
|
17
|
-
from ._logger import get_logger
|
|
18
|
-
from ._script_info import _script_info
|
|
19
|
-
from ._keys import VisionTransformRecipeKeys, ObjectDetectionKeys
|
|
20
|
-
from ._IO_tools import custom_logger
|
|
15
|
+
from ..ML_vision_transformers._core_transforms import TRANSFORM_REGISTRY, _save_recipe
|
|
16
|
+
from ..IO_tools import save_json
|
|
21
17
|
|
|
18
|
+
from ..path_manager import make_fullpath
|
|
19
|
+
from .._core import get_logger
|
|
20
|
+
from ..keys._keys import VisionTransformRecipeKeys, ObjectDetectionKeys
|
|
22
21
|
|
|
23
|
-
|
|
22
|
+
|
|
23
|
+
_LOGGER = get_logger("DragonVisionDataset")
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
__all__ = [
|
|
@@ -50,7 +50,7 @@ class DragonDatasetVision:
|
|
|
50
50
|
self._test_dataset = None
|
|
51
51
|
self._val_dataset = None
|
|
52
52
|
self._full_dataset: Optional[ImageFolder] = None
|
|
53
|
-
self.labels: Optional[
|
|
53
|
+
self.labels: Optional[list[int]] = None
|
|
54
54
|
self.class_map: dict[str,int] = dict()
|
|
55
55
|
|
|
56
56
|
self._is_split = False
|
|
@@ -255,10 +255,10 @@ class DragonDatasetVision:
|
|
|
255
255
|
def configure_transforms(self,
|
|
256
256
|
resize_size: int = 256,
|
|
257
257
|
crop_size: Optional[int] = 224,
|
|
258
|
-
mean: Optional[
|
|
259
|
-
std: Optional[
|
|
260
|
-
pre_transforms: Optional[
|
|
261
|
-
extra_train_transforms: Optional[
|
|
258
|
+
mean: Optional[list[float]] = [0.485, 0.456, 0.406],
|
|
259
|
+
std: Optional[list[float]] = [0.229, 0.224, 0.225],
|
|
260
|
+
pre_transforms: Optional[list[Callable]] = None,
|
|
261
|
+
extra_train_transforms: Optional[list[Callable]] = None) -> 'DragonDatasetVision':
|
|
262
262
|
"""
|
|
263
263
|
Configures and applies the image transformations and augmentations.
|
|
264
264
|
|
|
@@ -368,7 +368,7 @@ class DragonDatasetVision:
|
|
|
368
368
|
_LOGGER.info("Image transforms configured and applied.")
|
|
369
369
|
return self
|
|
370
370
|
|
|
371
|
-
def get_datasets(self) ->
|
|
371
|
+
def get_datasets(self) -> tuple[Dataset, ...]:
|
|
372
372
|
"""
|
|
373
373
|
Returns the final train, validation, and optional test datasets.
|
|
374
374
|
|
|
@@ -407,7 +407,7 @@ class DragonDatasetVision:
|
|
|
407
407
|
_LOGGER.error("Transforms are not configured. Call .configure_transforms() first.")
|
|
408
408
|
raise RuntimeError()
|
|
409
409
|
|
|
410
|
-
recipe:
|
|
410
|
+
recipe: dict[str, Any] = {
|
|
411
411
|
VisionTransformRecipeKeys.TASK: "classification",
|
|
412
412
|
VisionTransformRecipeKeys.PIPELINE: []
|
|
413
413
|
}
|
|
@@ -500,11 +500,12 @@ class DragonDatasetVision:
|
|
|
500
500
|
_LOGGER.error(f"Class to index mapping is empty.")
|
|
501
501
|
raise ValueError()
|
|
502
502
|
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
503
|
+
save_json(data=self.class_map,
|
|
504
|
+
directory=save_dir,
|
|
505
|
+
filename="Class_to_Index",
|
|
506
|
+
verbose=False)
|
|
507
|
+
|
|
508
|
+
_LOGGER.info(f"Class to index mapping saved to {save_dir}.")
|
|
508
509
|
|
|
509
510
|
return self.class_map
|
|
510
511
|
|
|
@@ -606,13 +607,13 @@ class _SegmentationDataset(Dataset):
|
|
|
606
607
|
|
|
607
608
|
Loads images as RGB and masks as 'L' (grayscale, 8-bit integer pixels).
|
|
608
609
|
"""
|
|
609
|
-
def __init__(self, image_paths:
|
|
610
|
+
def __init__(self, image_paths: list[Path], mask_paths: list[Path], transform: Optional[Callable] = None):
|
|
610
611
|
self.image_paths = image_paths
|
|
611
612
|
self.mask_paths = mask_paths
|
|
612
613
|
self.transform = transform
|
|
613
614
|
|
|
614
615
|
# --- Propagate 'classes' if they exist for trainer ---
|
|
615
|
-
self.classes:
|
|
616
|
+
self.classes: list[str] = []
|
|
616
617
|
|
|
617
618
|
def __len__(self):
|
|
618
619
|
return len(self.image_paths)
|
|
@@ -639,17 +640,17 @@ class _SegmentationDataset(Dataset):
|
|
|
639
640
|
# Internal Paired Transform Helpers
|
|
640
641
|
class _PairedCompose:
|
|
641
642
|
"""A 'Compose' for paired image/mask transforms."""
|
|
642
|
-
def __init__(self, transforms:
|
|
643
|
+
def __init__(self, transforms: list[Callable]):
|
|
643
644
|
self.transforms = transforms
|
|
644
645
|
|
|
645
|
-
def __call__(self, image: Any, mask: Any) ->
|
|
646
|
+
def __call__(self, image: Any, mask: Any) -> tuple[Any, Any]:
|
|
646
647
|
for t in self.transforms:
|
|
647
648
|
image, mask = t(image, mask)
|
|
648
649
|
return image, mask
|
|
649
650
|
|
|
650
651
|
class _PairedToTensor:
|
|
651
652
|
"""Converts a PIL Image pair (image, mask) to Tensors."""
|
|
652
|
-
def __call__(self, image: Image.Image, mask: Image.Image) ->
|
|
653
|
+
def __call__(self, image: Image.Image, mask: Image.Image) -> tuple[torch.Tensor, torch.Tensor]:
|
|
653
654
|
# Use new variable names to satisfy the linter
|
|
654
655
|
image_tensor = TF.to_tensor(image)
|
|
655
656
|
# Convert mask to LongTensor, not float.
|
|
@@ -659,10 +660,10 @@ class _PairedToTensor:
|
|
|
659
660
|
|
|
660
661
|
class _PairedNormalize:
|
|
661
662
|
"""Normalizes the image tensor and leaves the mask untouched."""
|
|
662
|
-
def __init__(self, mean:
|
|
663
|
+
def __init__(self, mean: list[float], std: list[float]):
|
|
663
664
|
self.normalize = transforms.Normalize(mean, std)
|
|
664
665
|
|
|
665
|
-
def __call__(self, image: torch.Tensor, mask: torch.Tensor) ->
|
|
666
|
+
def __call__(self, image: torch.Tensor, mask: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
|
666
667
|
image = self.normalize(image)
|
|
667
668
|
return image, mask
|
|
668
669
|
|
|
@@ -671,7 +672,7 @@ class _PairedResize:
|
|
|
671
672
|
def __init__(self, size: int):
|
|
672
673
|
self.size = [size, size]
|
|
673
674
|
|
|
674
|
-
def __call__(self, image: Image.Image, mask: Image.Image) ->
|
|
675
|
+
def __call__(self, image: Image.Image, mask: Image.Image) -> tuple[Image.Image, Image.Image]:
|
|
675
676
|
# Use new variable names to avoid linter confusion
|
|
676
677
|
resized_image = TF.resize(image, self.size, interpolation=TF.InterpolationMode.BILINEAR) # type: ignore
|
|
677
678
|
# Use NEAREST for mask to avoid interpolating class IDs (e.g., 1.5)
|
|
@@ -683,7 +684,7 @@ class _PairedCenterCrop:
|
|
|
683
684
|
def __init__(self, size: int):
|
|
684
685
|
self.size = [size, size]
|
|
685
686
|
|
|
686
|
-
def __call__(self, image: Image.Image, mask: Image.Image) ->
|
|
687
|
+
def __call__(self, image: Image.Image, mask: Image.Image) -> tuple[Image.Image, Image.Image]:
|
|
687
688
|
cropped_image = TF.center_crop(image, self.size) # type: ignore
|
|
688
689
|
cropped_mask = TF.center_crop(mask, self.size) # type: ignore
|
|
689
690
|
return cropped_image, cropped_mask # type: ignore
|
|
@@ -693,7 +694,7 @@ class _PairedRandomHorizontalFlip:
|
|
|
693
694
|
def __init__(self, p: float = 0.5):
|
|
694
695
|
self.p = p
|
|
695
696
|
|
|
696
|
-
def __call__(self, image: Image.Image, mask: Image.Image) ->
|
|
697
|
+
def __call__(self, image: Image.Image, mask: Image.Image) -> tuple[Image.Image, Image.Image]:
|
|
697
698
|
if random.random() < self.p:
|
|
698
699
|
flipped_image = TF.hflip(image) # type: ignore
|
|
699
700
|
flipped_mask = TF.hflip(mask) # type: ignore
|
|
@@ -701,14 +702,14 @@ class _PairedRandomHorizontalFlip:
|
|
|
701
702
|
|
|
702
703
|
class _PairedRandomResizedCrop:
|
|
703
704
|
"""Applies the same random resized crop to both image and mask."""
|
|
704
|
-
def __init__(self, size: int, scale:
|
|
705
|
+
def __init__(self, size: int, scale: tuple[float, float]=(0.08, 1.0), ratio: tuple[float, float]=(3./4., 4./3.)):
|
|
705
706
|
self.size = [size, size]
|
|
706
707
|
self.scale = scale
|
|
707
708
|
self.ratio = ratio
|
|
708
709
|
self.interpolation = TF.InterpolationMode.BILINEAR
|
|
709
710
|
self.mask_interpolation = TF.InterpolationMode.NEAREST
|
|
710
711
|
|
|
711
|
-
def __call__(self, image: Image.Image, mask: Image.Image) ->
|
|
712
|
+
def __call__(self, image: Image.Image, mask: Image.Image) -> tuple[Image.Image, Image.Image]:
|
|
712
713
|
# Get parameters for the random crop
|
|
713
714
|
# Convert scale/ratio tuples to lists to satisfy the linter's type hint
|
|
714
715
|
i, j, h, w = transforms.RandomResizedCrop.get_params(image, list(self.scale), list(self.ratio)) # type: ignore
|
|
@@ -744,9 +745,9 @@ class DragonDatasetSegmentation:
|
|
|
744
745
|
self._train_dataset = None
|
|
745
746
|
self._test_dataset = None
|
|
746
747
|
self._val_dataset = None
|
|
747
|
-
self.image_paths:
|
|
748
|
-
self.mask_paths:
|
|
749
|
-
self.class_map:
|
|
748
|
+
self.image_paths: list[Path] = []
|
|
749
|
+
self.mask_paths: list[Path] = []
|
|
750
|
+
self.class_map: dict[str, int] = {}
|
|
750
751
|
|
|
751
752
|
self._is_split = False
|
|
752
753
|
self._are_transforms_configured = False
|
|
@@ -830,12 +831,12 @@ class DragonDatasetSegmentation:
|
|
|
830
831
|
"""
|
|
831
832
|
DragonDatasetVision.inspect_folder(path)
|
|
832
833
|
|
|
833
|
-
def set_class_map(self, class_map:
|
|
834
|
+
def set_class_map(self, class_map: dict[str, int]) -> 'DragonDatasetSegmentation':
|
|
834
835
|
"""
|
|
835
836
|
Sets a map of class_name -> pixel value. This is used by the Trainer for clear evaluation reports.
|
|
836
837
|
|
|
837
838
|
Args:
|
|
838
|
-
class_map (
|
|
839
|
+
class_map (dict[str, int]): A dictionary mapping the integer pixel
|
|
839
840
|
value in a mask to its string name.
|
|
840
841
|
Example: {'background': 0, 'road': 1, 'car': 2}
|
|
841
842
|
"""
|
|
@@ -844,7 +845,7 @@ class DragonDatasetSegmentation:
|
|
|
844
845
|
return self
|
|
845
846
|
|
|
846
847
|
@property
|
|
847
|
-
def classes(self) ->
|
|
848
|
+
def classes(self) -> list[str]:
|
|
848
849
|
"""Returns the list of class names, if set."""
|
|
849
850
|
if self.class_map:
|
|
850
851
|
return list(self.class_map.keys())
|
|
@@ -916,8 +917,8 @@ class DragonDatasetSegmentation:
|
|
|
916
917
|
def configure_transforms(self,
|
|
917
918
|
resize_size: int = 256,
|
|
918
919
|
crop_size: int = 224,
|
|
919
|
-
mean: Optional[
|
|
920
|
-
std: Optional[
|
|
920
|
+
mean: Optional[list[float]] = [0.485, 0.456, 0.406],
|
|
921
|
+
std: Optional[list[float]] = [0.229, 0.224, 0.225]) -> 'DragonDatasetSegmentation':
|
|
921
922
|
"""
|
|
922
923
|
Configures and applies the image and mask transformations.
|
|
923
924
|
|
|
@@ -993,7 +994,7 @@ class DragonDatasetSegmentation:
|
|
|
993
994
|
_LOGGER.info("Paired segmentation transforms configured and applied.")
|
|
994
995
|
return self
|
|
995
996
|
|
|
996
|
-
def get_datasets(self) ->
|
|
997
|
+
def get_datasets(self) -> tuple[Dataset, ...]:
|
|
997
998
|
"""
|
|
998
999
|
Returns the final train, validation, and optional test datasets.
|
|
999
1000
|
|
|
@@ -1036,7 +1037,7 @@ class DragonDatasetSegmentation:
|
|
|
1036
1037
|
file_path = make_fullpath(filepath, make=True, enforce="file")
|
|
1037
1038
|
|
|
1038
1039
|
# Add standard transforms
|
|
1039
|
-
recipe:
|
|
1040
|
+
recipe: dict[str, Any] = {
|
|
1040
1041
|
VisionTransformRecipeKeys.TASK: "segmentation",
|
|
1041
1042
|
VisionTransformRecipeKeys.PIPELINE: [
|
|
1042
1043
|
{VisionTransformRecipeKeys.NAME: "Resize", "kwargs": {"size": components[VisionTransformRecipeKeys.RESIZE_SIZE]}},
|
|
@@ -1109,7 +1110,7 @@ class DragonDatasetSegmentation:
|
|
|
1109
1110
|
|
|
1110
1111
|
|
|
1111
1112
|
# Object detection
|
|
1112
|
-
def _od_collate_fn(batch:
|
|
1113
|
+
def _od_collate_fn(batch: list[tuple[torch.Tensor, dict[str, torch.Tensor]]]) -> tuple[list[torch.Tensor], list[dict[str, torch.Tensor]]]:
|
|
1113
1114
|
"""
|
|
1114
1115
|
Custom collate function for object detection.
|
|
1115
1116
|
|
|
@@ -1128,13 +1129,13 @@ class _ObjectDetectionDataset(Dataset):
|
|
|
1128
1129
|
Loads an image as 'RGB' and parses its corresponding JSON annotation file
|
|
1129
1130
|
to create the required target dictionary (boxes, labels).
|
|
1130
1131
|
"""
|
|
1131
|
-
def __init__(self, image_paths:
|
|
1132
|
+
def __init__(self, image_paths: list[Path], annotation_paths: list[Path], transform: Optional[Callable] = None):
|
|
1132
1133
|
self.image_paths = image_paths
|
|
1133
1134
|
self.annotation_paths = annotation_paths
|
|
1134
1135
|
self.transform = transform
|
|
1135
1136
|
|
|
1136
1137
|
# --- Propagate 'classes' if they exist ---
|
|
1137
|
-
self.classes:
|
|
1138
|
+
self.classes: list[str] = []
|
|
1138
1139
|
|
|
1139
1140
|
def __len__(self):
|
|
1140
1141
|
return len(self.image_paths)
|
|
@@ -1156,7 +1157,7 @@ class _ObjectDetectionDataset(Dataset):
|
|
|
1156
1157
|
labels = ann_data[ObjectDetectionKeys.LABELS] # Expected: [1, 2, 1, ...]
|
|
1157
1158
|
|
|
1158
1159
|
# Convert to tensors
|
|
1159
|
-
target:
|
|
1160
|
+
target: dict[str, Any] = {}
|
|
1160
1161
|
target[ObjectDetectionKeys.BOXES] = torch.as_tensor(boxes, dtype=torch.float32)
|
|
1161
1162
|
target[ObjectDetectionKeys.LABELS] = torch.as_tensor(labels, dtype=torch.int64)
|
|
1162
1163
|
|
|
@@ -1173,25 +1174,25 @@ class _ObjectDetectionDataset(Dataset):
|
|
|
1173
1174
|
# Internal Paired Transform Helpers for Object Detection
|
|
1174
1175
|
class _OD_PairedCompose:
|
|
1175
1176
|
"""A 'Compose' for paired image/target_dict transforms."""
|
|
1176
|
-
def __init__(self, transforms:
|
|
1177
|
+
def __init__(self, transforms: list[Callable]):
|
|
1177
1178
|
self.transforms = transforms
|
|
1178
1179
|
|
|
1179
|
-
def __call__(self, image: Any, target: Any) ->
|
|
1180
|
+
def __call__(self, image: Any, target: Any) -> tuple[Any, Any]:
|
|
1180
1181
|
for t in self.transforms:
|
|
1181
1182
|
image, target = t(image, target)
|
|
1182
1183
|
return image, target
|
|
1183
1184
|
|
|
1184
1185
|
class _OD_PairedToTensor:
|
|
1185
1186
|
"""Converts a PIL Image to Tensor, passes targets dict through."""
|
|
1186
|
-
def __call__(self, image: Image.Image, target:
|
|
1187
|
+
def __call__(self, image: Image.Image, target: dict[str, Any]) -> tuple[torch.Tensor, dict[str, Any]]:
|
|
1187
1188
|
return TF.to_tensor(image), target
|
|
1188
1189
|
|
|
1189
1190
|
class _OD_PairedNormalize:
|
|
1190
1191
|
"""Normalizes the image tensor and leaves the target dict untouched."""
|
|
1191
|
-
def __init__(self, mean:
|
|
1192
|
+
def __init__(self, mean: list[float], std: list[float]):
|
|
1192
1193
|
self.normalize = transforms.Normalize(mean, std)
|
|
1193
1194
|
|
|
1194
|
-
def __call__(self, image: torch.Tensor, target:
|
|
1195
|
+
def __call__(self, image: torch.Tensor, target: dict[str, Any]) -> tuple[torch.Tensor, dict[str, Any]]:
|
|
1195
1196
|
image_normalized = self.normalize(image)
|
|
1196
1197
|
return image_normalized, target
|
|
1197
1198
|
|
|
@@ -1200,7 +1201,7 @@ class _OD_PairedRandomHorizontalFlip:
|
|
|
1200
1201
|
def __init__(self, p: float = 0.5):
|
|
1201
1202
|
self.p = p
|
|
1202
1203
|
|
|
1203
|
-
def __call__(self, image: Image.Image, target:
|
|
1204
|
+
def __call__(self, image: Image.Image, target: dict[str, Any]) -> tuple[Image.Image, dict[str, Any]]:
|
|
1204
1205
|
if random.random() < self.p:
|
|
1205
1206
|
w, h = image.size
|
|
1206
1207
|
# Use new variable names to avoid linter confusion
|
|
@@ -1250,15 +1251,15 @@ class DragonDatasetObjectDetection:
|
|
|
1250
1251
|
self._train_dataset = None
|
|
1251
1252
|
self._test_dataset = None
|
|
1252
1253
|
self._val_dataset = None
|
|
1253
|
-
self.image_paths:
|
|
1254
|
-
self.annotation_paths:
|
|
1255
|
-
self.class_map:
|
|
1254
|
+
self.image_paths: list[Path] = []
|
|
1255
|
+
self.annotation_paths: list[Path] = []
|
|
1256
|
+
self.class_map: dict[str, int] = {}
|
|
1256
1257
|
|
|
1257
1258
|
self._is_split = False
|
|
1258
1259
|
self._are_transforms_configured = False
|
|
1259
1260
|
self.train_transform: Optional[Callable] = None
|
|
1260
1261
|
self.val_transform: Optional[Callable] = None
|
|
1261
|
-
self._val_recipe_components: Optional[
|
|
1262
|
+
self._val_recipe_components: Optional[dict[str, Any]] = None
|
|
1262
1263
|
self._has_mean_std: bool = False
|
|
1263
1264
|
|
|
1264
1265
|
@classmethod
|
|
@@ -1328,7 +1329,7 @@ class DragonDatasetObjectDetection:
|
|
|
1328
1329
|
"""
|
|
1329
1330
|
DragonDatasetVision.inspect_folder(path)
|
|
1330
1331
|
|
|
1331
|
-
def set_class_map(self, class_map:
|
|
1332
|
+
def set_class_map(self, class_map: dict[str, int]) -> 'DragonDatasetObjectDetection':
|
|
1332
1333
|
"""
|
|
1333
1334
|
Sets a map of class_name -> pixel_value. This is used by the
|
|
1334
1335
|
trainer for clear evaluation reports.
|
|
@@ -1349,7 +1350,7 @@ class DragonDatasetObjectDetection:
|
|
|
1349
1350
|
return self
|
|
1350
1351
|
|
|
1351
1352
|
@property
|
|
1352
|
-
def classes(self) ->
|
|
1353
|
+
def classes(self) -> list[str]:
|
|
1353
1354
|
"""Returns the list of class names, if set."""
|
|
1354
1355
|
if self.class_map:
|
|
1355
1356
|
return list(self.class_map.keys())
|
|
@@ -1419,8 +1420,8 @@ class DragonDatasetObjectDetection:
|
|
|
1419
1420
|
return self
|
|
1420
1421
|
|
|
1421
1422
|
def configure_transforms(self,
|
|
1422
|
-
mean: Optional[
|
|
1423
|
-
std: Optional[
|
|
1423
|
+
mean: Optional[list[float]] = [0.485, 0.456, 0.406],
|
|
1424
|
+
std: Optional[list[float]] = [0.229, 0.224, 0.225]) -> 'DragonDatasetObjectDetection':
|
|
1424
1425
|
"""
|
|
1425
1426
|
Configures and applies the image and target transformations.
|
|
1426
1427
|
|
|
@@ -1488,7 +1489,7 @@ class DragonDatasetObjectDetection:
|
|
|
1488
1489
|
_LOGGER.info("Paired object detection transforms configured and applied.")
|
|
1489
1490
|
return self
|
|
1490
1491
|
|
|
1491
|
-
def get_datasets(self) ->
|
|
1492
|
+
def get_datasets(self) -> tuple[Dataset, ...]:
|
|
1492
1493
|
"""
|
|
1493
1494
|
Returns the final train, validation, and optional test datasets.
|
|
1494
1495
|
|
|
@@ -1536,7 +1537,7 @@ class DragonDatasetObjectDetection:
|
|
|
1536
1537
|
file_path = make_fullpath(filepath, make=True, enforce="file")
|
|
1537
1538
|
|
|
1538
1539
|
# Add standard transforms
|
|
1539
|
-
recipe:
|
|
1540
|
+
recipe: dict[str, Any] = {
|
|
1540
1541
|
VisionTransformRecipeKeys.TASK: "object_detection",
|
|
1541
1542
|
VisionTransformRecipeKeys.PIPELINE: [
|
|
1542
1543
|
{VisionTransformRecipeKeys.NAME: "ToTensor", "kwargs": {}},
|
|
@@ -1605,6 +1606,3 @@ class DragonDatasetObjectDetection:
|
|
|
1605
1606
|
|
|
1606
1607
|
return s
|
|
1607
1608
|
|
|
1608
|
-
|
|
1609
|
-
def info():
|
|
1610
|
-
_script_info(__all__)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from ._regression import (
|
|
2
|
+
regression_metrics,
|
|
3
|
+
multi_target_regression_metrics
|
|
4
|
+
)
|
|
5
|
+
|
|
6
|
+
from ._classification import (
|
|
7
|
+
classification_metrics,
|
|
8
|
+
multi_label_classification_metrics
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
from ._loss import (
|
|
12
|
+
plot_losses,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from ._feature_importance import (
|
|
16
|
+
shap_summary_plot,
|
|
17
|
+
multi_target_shap_summary_plot,
|
|
18
|
+
plot_attention_importance
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from ._sequence import (
|
|
22
|
+
sequence_to_value_metrics,
|
|
23
|
+
sequence_to_sequence_metrics
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from ._vision import (
|
|
27
|
+
segmentation_metrics,
|
|
28
|
+
object_detection_metrics
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
from ._imprimir import info
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
__all__ = [
|
|
35
|
+
# regression
|
|
36
|
+
"regression_metrics",
|
|
37
|
+
"multi_target_regression_metrics",
|
|
38
|
+
# classification
|
|
39
|
+
"classification_metrics",
|
|
40
|
+
"multi_label_classification_metrics",
|
|
41
|
+
# loss
|
|
42
|
+
"plot_losses",
|
|
43
|
+
# feature importance
|
|
44
|
+
"shap_summary_plot",
|
|
45
|
+
"multi_target_shap_summary_plot",
|
|
46
|
+
"plot_attention_importance",
|
|
47
|
+
# sequence
|
|
48
|
+
"sequence_to_value_metrics",
|
|
49
|
+
"sequence_to_sequence_metrics",
|
|
50
|
+
# vision
|
|
51
|
+
"segmentation_metrics",
|
|
52
|
+
"object_detection_metrics",
|
|
53
|
+
]
|