dragon-ml-toolbox 13.0.0__py3-none-any.whl → 14.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/METADATA +12 -2
  2. dragon_ml_toolbox-14.7.0.dist-info/RECORD +49 -0
  3. {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +10 -0
  4. ml_tools/MICE_imputation.py +207 -5
  5. ml_tools/ML_configuration.py +108 -0
  6. ml_tools/ML_datasetmaster.py +241 -260
  7. ml_tools/ML_evaluation.py +229 -76
  8. ml_tools/ML_evaluation_multi.py +45 -16
  9. ml_tools/ML_inference.py +0 -1
  10. ml_tools/ML_models.py +135 -55
  11. ml_tools/ML_models_advanced.py +323 -0
  12. ml_tools/ML_optimization.py +49 -36
  13. ml_tools/ML_trainer.py +498 -29
  14. ml_tools/ML_utilities.py +351 -4
  15. ml_tools/ML_vision_datasetmaster.py +1492 -0
  16. ml_tools/ML_vision_evaluation.py +260 -0
  17. ml_tools/ML_vision_inference.py +428 -0
  18. ml_tools/ML_vision_models.py +641 -0
  19. ml_tools/ML_vision_transformers.py +203 -0
  20. ml_tools/PSO_optimization.py +5 -1
  21. ml_tools/_ML_vision_recipe.py +88 -0
  22. ml_tools/__init__.py +1 -0
  23. ml_tools/_schema.py +96 -0
  24. ml_tools/custom_logger.py +37 -14
  25. ml_tools/data_exploration.py +576 -138
  26. ml_tools/ensemble_evaluation.py +53 -10
  27. ml_tools/keys.py +43 -1
  28. ml_tools/math_utilities.py +1 -1
  29. ml_tools/optimization_tools.py +65 -86
  30. ml_tools/serde.py +78 -17
  31. ml_tools/utilities.py +192 -3
  32. dragon_ml_toolbox-13.0.0.dist-info/RECORD +0 -41
  33. ml_tools/ML_simple_optimization.py +0 -413
  34. {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/WHEEL +0 -0
  35. {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/licenses/LICENSE +0 -0
  36. {dragon_ml_toolbox-13.0.0.dist-info → dragon_ml_toolbox-14.7.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,203 @@
1
+ from typing import Union, Dict, Type, Callable, Optional, Any, List, Literal
2
+ from PIL import ImageOps, Image
3
+ from torchvision import transforms
4
+ from pathlib import Path
5
+
6
+ from ._logger import _LOGGER
7
+ from ._script_info import _script_info
8
+ from .keys import VisionTransformRecipeKeys
9
+ from .path_manager import make_fullpath
10
+
11
+
12
+ __all__ = [
13
+ "TRANSFORM_REGISTRY",
14
+ "ResizeAspectFill",
15
+ "create_offline_augmentations"
16
+ ]
17
+
18
+ # --- Custom Vision Transform Class ---
19
+ class ResizeAspectFill:
20
+ """
21
+ Custom transformation to make an image square by padding it to match the
22
+ longest side, preserving the aspect ratio. The image is finally centered.
23
+
24
+ Args:
25
+ pad_color (Union[str, int]): Color to use for the padding.
26
+ Defaults to "black".
27
+ """
28
+ def __init__(self, pad_color: Union[str, int] = "black") -> None:
29
+ self.pad_color = pad_color
30
+ # Store kwargs to allow for re-creation
31
+ self.__setattr__(VisionTransformRecipeKeys.KWARGS, {"pad_color": pad_color})
32
+
33
+ def __call__(self, image: Image.Image) -> Image.Image:
34
+ if not isinstance(image, Image.Image):
35
+ _LOGGER.error(f"Expected PIL.Image.Image, got {type(image).__name__}")
36
+ raise TypeError()
37
+
38
+ w, h = image.size
39
+ if w == h:
40
+ return image
41
+
42
+ # Determine padding to center the image
43
+ if w > h:
44
+ top_padding = (w - h) // 2
45
+ bottom_padding = w - h - top_padding
46
+ padding = (0, top_padding, 0, bottom_padding)
47
+ else: # h > w
48
+ left_padding = (h - w) // 2
49
+ right_padding = h - w - left_padding
50
+ padding = (left_padding, 0, right_padding, 0)
51
+
52
+ return ImageOps.expand(image, padding, fill=self.pad_color)
53
+
54
+
55
+ #NOTE: Add custom transforms.
56
+ TRANSFORM_REGISTRY: Dict[str, Type[Callable]] = {
57
+ "ResizeAspectFill": ResizeAspectFill,
58
+ }
59
+
60
+
61
+ def _build_transform_from_recipe(recipe: Dict[str, Any]) -> transforms.Compose:
62
+ """Internal helper to build a transform pipeline from a recipe dict."""
63
+ pipeline_steps: List[Callable] = []
64
+
65
+ if VisionTransformRecipeKeys.PIPELINE not in recipe:
66
+ _LOGGER.error("Recipe dict is invalid: missing 'pipeline' key.")
67
+ raise ValueError("Invalid recipe format.")
68
+
69
+ for step in recipe[VisionTransformRecipeKeys.PIPELINE]:
70
+ t_name = step.get(VisionTransformRecipeKeys.NAME)
71
+ t_kwargs = step.get(VisionTransformRecipeKeys.KWARGS, {})
72
+
73
+ if not t_name:
74
+ _LOGGER.error(f"Invalid transform step, missing 'name': {step}")
75
+ continue
76
+
77
+ transform_class: Any = None
78
+
79
+ # 1. Check standard torchvision transforms
80
+ if hasattr(transforms, t_name):
81
+ transform_class = getattr(transforms, t_name)
82
+ # 2. Check custom transforms
83
+ elif t_name in TRANSFORM_REGISTRY:
84
+ transform_class = TRANSFORM_REGISTRY[t_name]
85
+ # 3. Not found
86
+ else:
87
+ _LOGGER.error(f"Unknown transform '{t_name}' in recipe. Not found in torchvision.transforms or TRANSFORM_REGISTRY.")
88
+ raise ValueError(f"Unknown transform name: {t_name}")
89
+
90
+ # Instantiate the transform
91
+ try:
92
+ pipeline_steps.append(transform_class(**t_kwargs))
93
+ except Exception as e:
94
+ _LOGGER.error(f"Failed to instantiate transform '{t_name}' with kwargs {t_kwargs}: {e}")
95
+ raise
96
+
97
+ return transforms.Compose(pipeline_steps)
98
+
99
+
100
+ def create_offline_augmentations(
101
+ input_directory: Union[str, Path],
102
+ output_directory: Union[str, Path],
103
+ results_per_image: int,
104
+ recipe: Optional[Dict[str, Any]] = None,
105
+ save_format: Literal["WEBP", "JPEG", "PNG", "BMP", "TIF"] = "WEBP",
106
+ save_quality: int = 80
107
+ ) -> None:
108
+ """
109
+ Reads all valid images from an input directory, applies augmentations,
110
+ and saves the new images to an output directory (offline augmentation).
111
+
112
+ Skips subdirectories in the input path.
113
+
114
+ Args:
115
+ input_directory (Union[str, Path]): Path to the directory of source images.
116
+ output_directory (Union[str, Path]): Path to save the augmented images.
117
+ results_per_image (int): The number of augmented versions to create
118
+ for each source image.
119
+ recipe (Optional[Dict[str, Any]]): A transform recipe dictionary. If None,
120
+ a default set of strong, random
121
+ augmentations will be used.
122
+ save_format (str): The format to save images (e.g., "WEBP", "JPEG", "PNG").
123
+ Defaults to "WEBP" for good compression.
124
+ save_quality (int): The quality for lossy formats (1-100). Defaults to 80.
125
+ """
126
+ VALID_IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.bmp', '.webp', '.tif', '.tiff')
127
+
128
+ # --- 1. Validate Paths ---
129
+ in_path = make_fullpath(input_directory, enforce="directory")
130
+ out_path = make_fullpath(output_directory, make=True, enforce="directory")
131
+
132
+ _LOGGER.info(f"Starting offline augmentation:\n\tInput: {in_path}\n\tOutput: {out_path}")
133
+
134
+ # --- 2. Find Images ---
135
+ image_files = [
136
+ f for f in in_path.iterdir()
137
+ if f.is_file() and f.suffix.lower() in VALID_IMG_EXTENSIONS
138
+ ]
139
+
140
+ if not image_files:
141
+ _LOGGER.warning(f"No valid image files found in {in_path}.")
142
+ return
143
+
144
+ _LOGGER.info(f"Found {len(image_files)} images to process.")
145
+
146
+ # --- 3. Define Transform Pipeline ---
147
+ transform_pipeline: transforms.Compose
148
+
149
+ if recipe:
150
+ _LOGGER.info("Building transformations from provided recipe.")
151
+ try:
152
+ transform_pipeline = _build_transform_from_recipe(recipe)
153
+ except Exception as e:
154
+ _LOGGER.error(f"Failed to build transform from recipe: {e}")
155
+ return
156
+ else:
157
+ _LOGGER.info("No recipe provided. Using default random augmentation pipeline.")
158
+ # Default "random" pipeline
159
+ transform_pipeline = transforms.Compose([
160
+ transforms.RandomResizedCrop(256, scale=(0.4, 1.0)),
161
+ transforms.RandomHorizontalFlip(p=0.5),
162
+ transforms.RandomRotation(degrees=90),
163
+ transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.15),
164
+ transforms.RandomPerspective(distortion_scale=0.2, p=0.4),
165
+ transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),
166
+ transforms.RandomApply([
167
+ transforms.GaussianBlur(kernel_size=3)
168
+ ], p=0.3)
169
+ ])
170
+
171
+ # --- 4. Process Images ---
172
+ total_saved = 0
173
+ format_upper = save_format.upper()
174
+
175
+ for img_path in image_files:
176
+ _LOGGER.debug(f"Processing {img_path.name}...")
177
+ try:
178
+ original_image = Image.open(img_path).convert("RGB")
179
+
180
+ for i in range(results_per_image):
181
+ new_stem = f"{img_path.stem}_aug_{i+1:03d}"
182
+ output_path = out_path / f"{new_stem}.{format_upper.lower()}"
183
+
184
+ # Apply transform
185
+ transformed_image = transform_pipeline(original_image)
186
+
187
+ # Save
188
+ transformed_image.save(
189
+ output_path,
190
+ format=format_upper,
191
+ quality=save_quality,
192
+ optimize=True # Add optimize flag
193
+ )
194
+ total_saved += 1
195
+
196
+ except Exception as e:
197
+ _LOGGER.warning(f"Failed to process or save augmentations for {img_path.name}: {e}")
198
+
199
+ _LOGGER.info(f"Offline augmentation complete. Saved {total_saved} new images.")
200
+
201
+
202
+ def info():
203
+ _script_info(__all__)
@@ -17,6 +17,10 @@ from ._script_info import _script_info
17
17
  from .SQL import DatabaseManager
18
18
  from .optimization_tools import _save_result
19
19
 
20
+ """
21
+ DEPRECATED
22
+ """
23
+
20
24
 
21
25
  __all__ = [
22
26
  "ObjectiveFunction",
@@ -46,7 +50,7 @@ class ObjectiveFunction():
46
50
  self.binary_features = binary_features
47
51
  self.is_hybrid = False if binary_features <= 0 else True
48
52
  self.use_noise = add_noise
49
- self._artifact = deserialize_object(trained_model_path, verbose=False, raise_on_error=True)
53
+ self._artifact = deserialize_object(trained_model_path, verbose=False)
50
54
  self.model = self._get_from_artifact(EnsembleKeys.MODEL)
51
55
  self.feature_names: Optional[list[str]] = self._get_from_artifact(EnsembleKeys.FEATURES) # type: ignore
52
56
  self.target_name: Optional[str] = self._get_from_artifact(EnsembleKeys.TARGET) # type: ignore
@@ -0,0 +1,88 @@
1
+ import json
2
+ import torch
3
+ from torchvision import transforms
4
+ from typing import Dict, Any, List, Callable, Union
5
+ from pathlib import Path
6
+
7
+ from .ML_vision_transformers import TRANSFORM_REGISTRY
8
+ from ._logger import _LOGGER
9
+ from .keys import VisionTransformRecipeKeys
10
+ from .path_manager import make_fullpath
11
+
12
+
13
+ def save_recipe(recipe: Dict[str, Any], filepath: Path) -> None:
14
+ """
15
+ Saves a transform recipe dictionary to a JSON file.
16
+
17
+ Args:
18
+ recipe (Dict[str, Any]): The recipe dictionary to save.
19
+ filepath (str): The path to the output .json file.
20
+ """
21
+ final_filepath = filepath.with_suffix(".json")
22
+
23
+ try:
24
+ with open(final_filepath, 'w') as f:
25
+ json.dump(recipe, f, indent=4)
26
+ _LOGGER.info(f"Transform recipe saved as '{final_filepath.name}'.")
27
+ except Exception as e:
28
+ _LOGGER.error(f"Failed to save recipe to '{final_filepath}': {e}")
29
+ raise
30
+
31
+
32
+ def load_recipe_and_build_transform(filepath: Union[str,Path]) -> transforms.Compose:
33
+ """
34
+ Loads a transform recipe from a .json file and reconstructs the
35
+ torchvision.transforms.Compose pipeline.
36
+
37
+ Args:
38
+ filepath (str): Path to the saved transform recipe .json file.
39
+
40
+ Returns:
41
+ transforms.Compose: The reconstructed transformation pipeline.
42
+
43
+ Raises:
44
+ ValueError: If a transform name in the recipe is not found in
45
+ torchvision.transforms or the custom TRANSFORM_REGISTRY.
46
+ """
47
+ # validate filepath
48
+ final_filepath = make_fullpath(filepath, enforce="file")
49
+
50
+ try:
51
+ with open(final_filepath, 'r') as f:
52
+ recipe = json.load(f)
53
+ except Exception as e:
54
+ _LOGGER.error(f"Failed to load recipe from '{final_filepath}': {e}")
55
+ raise
56
+
57
+ pipeline_steps: List[Callable] = []
58
+
59
+ if VisionTransformRecipeKeys.PIPELINE not in recipe:
60
+ _LOGGER.error("Recipe file is invalid: missing 'pipeline' key.")
61
+ raise ValueError("Invalid recipe format.")
62
+
63
+ for step in recipe[VisionTransformRecipeKeys.PIPELINE]:
64
+ t_name = step[VisionTransformRecipeKeys.NAME]
65
+ t_kwargs = step[VisionTransformRecipeKeys.KWARGS]
66
+
67
+ transform_class: Any = None
68
+
69
+ # 1. Check standard torchvision transforms
70
+ if hasattr(transforms, t_name):
71
+ transform_class = getattr(transforms, t_name)
72
+ # 2. Check custom transforms
73
+ elif t_name in TRANSFORM_REGISTRY:
74
+ transform_class = TRANSFORM_REGISTRY[t_name]
75
+ # 3. Not found
76
+ else:
77
+ _LOGGER.error(f"Unknown transform '{t_name}' in recipe. Not found in torchvision.transforms or TRANSFORM_REGISTRY.")
78
+ raise ValueError(f"Unknown transform name: {t_name}")
79
+
80
+ # Instantiate the transform
81
+ try:
82
+ pipeline_steps.append(transform_class(**t_kwargs))
83
+ except Exception as e:
84
+ _LOGGER.error(f"Failed to instantiate transform '{t_name}' with kwargs {t_kwargs}: {e}")
85
+ raise
86
+
87
+ _LOGGER.info(f"Successfully loaded and built transform pipeline from '{final_filepath.name}'.")
88
+ return transforms.Compose(pipeline_steps)
ml_tools/__init__.py CHANGED
@@ -1 +1,2 @@
1
1
  from .custom_logger import custom_logger
2
+ from ._schema import FeatureSchema
ml_tools/_schema.py ADDED
@@ -0,0 +1,96 @@
1
+ from typing import NamedTuple, Tuple, Optional, Dict, Union
2
+ from pathlib import Path
3
+
4
+ from .custom_logger import save_list_strings
5
+ from .keys import DatasetKeys
6
+ from ._logger import _LOGGER
7
+
8
+
9
+ class FeatureSchema(NamedTuple):
10
+ """Holds the final, definitive schema for the model pipeline."""
11
+
12
+ # The final, ordered list of all feature names
13
+ feature_names: Tuple[str, ...]
14
+
15
+ # List of all continuous feature names
16
+ continuous_feature_names: Tuple[str, ...]
17
+
18
+ # List of all categorical feature names
19
+ categorical_feature_names: Tuple[str, ...]
20
+
21
+ # Map of {column_index: cardinality} for categorical features
22
+ categorical_index_map: Optional[Dict[int, int]]
23
+
24
+ # Map string-to-int category values (e.g., {'color': {'red': 0, 'blue': 1}})
25
+ categorical_mappings: Optional[Dict[str, Dict[str, int]]]
26
+
27
+ def _save_helper(self, artifact: Tuple[str, ...], directory: Union[str,Path], filename: str, verbose: bool):
28
+ to_save = list(artifact)
29
+
30
+ # empty check
31
+ if not to_save:
32
+ _LOGGER.warning(f"Skipping save for '{filename}': The feature list is empty.")
33
+ return
34
+
35
+ save_list_strings(list_strings=to_save,
36
+ directory=directory,
37
+ filename=filename,
38
+ verbose=verbose)
39
+
40
+ def save_all_features(self, directory: Union[str,Path], verbose: bool=True):
41
+ """
42
+ Saves all feature names to a text file.
43
+
44
+ Args:
45
+ directory: The directory where the file will be saved.
46
+ verbose: If True, prints a confirmation message upon saving.
47
+ """
48
+ self._save_helper(artifact=self.feature_names,
49
+ directory=directory,
50
+ filename=DatasetKeys.FEATURE_NAMES,
51
+ verbose=verbose)
52
+
53
+ def save_continuous_features(self, directory: Union[str,Path], verbose: bool=True):
54
+ """
55
+ Saves continuous feature names to a text file.
56
+
57
+ Args:
58
+ directory: The directory where the file will be saved.
59
+ verbose: If True, prints a confirmation message upon saving.
60
+ """
61
+ self._save_helper(artifact=self.continuous_feature_names,
62
+ directory=directory,
63
+ filename=DatasetKeys.CONTINUOUS_NAMES,
64
+ verbose=verbose)
65
+
66
+ def save_categorical_features(self, directory: Union[str,Path], verbose: bool=True):
67
+ """
68
+ Saves categorical feature names to a text file.
69
+
70
+ Args:
71
+ directory: The directory where the file will be saved.
72
+ verbose: If True, prints a confirmation message upon saving.
73
+ """
74
+ self._save_helper(artifact=self.categorical_feature_names,
75
+ directory=directory,
76
+ filename=DatasetKeys.CATEGORICAL_NAMES,
77
+ verbose=verbose)
78
+
79
+ def save_artifacts(self, directory: Union[str,Path]):
80
+ """
81
+ Saves feature names, categorical feature names, continuous feature names to separate text files.
82
+ """
83
+ self.save_all_features(directory=directory, verbose=True)
84
+ self.save_continuous_features(directory=directory, verbose=True)
85
+ self.save_categorical_features(directory=directory, verbose=True)
86
+
87
+ def __repr__(self) -> str:
88
+ """Returns a concise representation of the schema's contents."""
89
+ total = len(self.feature_names)
90
+ cont = len(self.continuous_feature_names)
91
+ cat = len(self.categorical_feature_names)
92
+ index_map = self.categorical_index_map is not None
93
+ cat_map = self.categorical_mappings is not None
94
+ return (
95
+ f"<FeatureSchema(total={total}, continuous={cont}, categorical={cat}, index_map={index_map}, categorical_map={cat_map})>"
96
+ )
ml_tools/custom_logger.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from pathlib import Path
2
2
  from datetime import datetime
3
- from typing import Union, List, Dict, Any
3
+ from typing import Union, List, Dict, Any, Literal
4
4
  import traceback
5
5
  import json
6
6
  import csv
@@ -29,6 +29,8 @@ def custom_logger(
29
29
  ],
30
30
  save_directory: Union[str, Path],
31
31
  log_name: str,
32
+ add_timestamp: bool=True,
33
+ dict_as: Literal['auto', 'json', 'csv'] = 'auto',
32
34
  ) -> None:
33
35
  """
34
36
  Logs various data types to corresponding output formats:
@@ -36,10 +38,10 @@ def custom_logger(
36
38
  - list[Any] → .txt
37
39
  Each element is written on a new line.
38
40
 
39
- - dict[str, list[Any]] → .csv
41
+ - dict[str, list[Any]] → .csv (if dict_as='auto' or 'csv')
40
42
  Dictionary is treated as tabular data; keys become columns, values become rows.
41
43
 
42
- - dict[str, scalar] → .json
44
+ - dict[str, scalar] → .json (if dict_as='auto' or 'json')
43
45
  Dictionary is treated as structured data and serialized as JSON.
44
46
 
45
47
  - str → .log
@@ -49,29 +51,50 @@ def custom_logger(
49
51
  Full traceback is logged for debugging purposes.
50
52
 
51
53
  Args:
52
- data: The data to be logged. Must be one of the supported types.
53
- save_directory: Directory where the log will be saved. Created if it does not exist.
54
- log_name: Base name for the log file. Timestamp will be appended automatically.
54
+ data (Any): The data to be logged. Must be one of the supported types.
55
+ save_directory (str | Path): Directory where the log will be saved. Created if it does not exist.
56
+ log_name (str): Base name for the log file.
57
+ add_timestamp (bool): Whether to add a timestamp to the filename.
58
+ dict_as ('auto'|'json'|'csv'):
59
+ - 'auto': Guesses format (JSON or CSV) based on dictionary content.
60
+ - 'json': Forces .json format for any dictionary.
61
+ - 'csv': Forces .csv format. Will fail if dict values are not all lists.
55
62
 
56
63
  Raises:
57
64
  ValueError: If the data type is unsupported.
58
65
  """
59
66
  try:
67
+ if not isinstance(data, BaseException) and not data:
68
+ _LOGGER.warning("Empty data received. No log file will be saved.")
69
+ return
70
+
60
71
  save_path = make_fullpath(save_directory, make=True)
61
72
 
62
- timestamp = datetime.now().strftime(r"%Y%m%d_%H%M%S")
63
- log_name = sanitize_filename(log_name)
73
+ sanitized_log_name = sanitize_filename(log_name)
64
74
 
65
- base_path = save_path / f"{log_name}_{timestamp}"
66
-
75
+ if add_timestamp:
76
+ timestamp = datetime.now().strftime(r"%Y%m%d_%H%M%S")
77
+ base_path = save_path / f"{sanitized_log_name}_{timestamp}"
78
+ else:
79
+ base_path = save_path / sanitized_log_name
80
+
81
+ # Router
67
82
  if isinstance(data, list):
68
83
  _log_list_to_txt(data, base_path.with_suffix(".txt"))
69
84
 
70
85
  elif isinstance(data, dict):
71
- if all(isinstance(v, list) for v in data.values()):
72
- _log_dict_to_csv(data, base_path.with_suffix(".csv"))
73
- else:
86
+ if dict_as == 'json':
74
87
  _log_dict_to_json(data, base_path.with_suffix(".json"))
88
+
89
+ elif dict_as == 'csv':
90
+ # This will raise a ValueError if data is not all lists
91
+ _log_dict_to_csv(data, base_path.with_suffix(".csv"))
92
+
93
+ else: # 'auto' mode
94
+ if all(isinstance(v, list) for v in data.values()):
95
+ _log_dict_to_csv(data, base_path.with_suffix(".csv"))
96
+ else:
97
+ _log_dict_to_json(data, base_path.with_suffix(".json"))
75
98
 
76
99
  elif isinstance(data, str):
77
100
  _log_string_to_log(data, base_path.with_suffix(".log"))
@@ -83,7 +106,7 @@ def custom_logger(
83
106
  _LOGGER.error("Unsupported data type. Must be list, dict, str, or BaseException.")
84
107
  raise ValueError()
85
108
 
86
- _LOGGER.info(f"Log saved to: '{base_path}'")
109
+ _LOGGER.info(f"Log saved as: '{base_path.name}'")
87
110
 
88
111
  except Exception:
89
112
  _LOGGER.exception(f"Log not saved.")