dragon-ml-toolbox 13.7.0__py3-none-any.whl → 14.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dragon-ml-toolbox might be problematic. Click here for more details.
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/METADATA +2 -1
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/RECORD +21 -14
- ml_tools/ML_datasetmaster.py +2 -185
- ml_tools/ML_evaluation.py +3 -3
- ml_tools/ML_inference.py +0 -1
- ml_tools/ML_models.py +3 -1
- ml_tools/ML_trainer.py +446 -11
- ml_tools/ML_utilities.py +302 -4
- ml_tools/ML_vision_datasetmaster.py +1315 -0
- ml_tools/ML_vision_evaluation.py +260 -0
- ml_tools/ML_vision_inference.py +428 -0
- ml_tools/ML_vision_models.py +627 -0
- ml_tools/ML_vision_transformers.py +58 -0
- ml_tools/_ML_pytorch_tabular.py +543 -0
- ml_tools/_ML_vision_recipe.py +88 -0
- ml_tools/custom_logger.py +37 -14
- ml_tools/keys.py +38 -0
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-13.7.0.dist-info → dragon_ml_toolbox-14.0.0.dist-info}/top_level.txt +0 -0
ml_tools/custom_logger.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
2
|
from datetime import datetime
|
|
3
|
-
from typing import Union, List, Dict, Any
|
|
3
|
+
from typing import Union, List, Dict, Any, Literal
|
|
4
4
|
import traceback
|
|
5
5
|
import json
|
|
6
6
|
import csv
|
|
@@ -29,6 +29,8 @@ def custom_logger(
|
|
|
29
29
|
],
|
|
30
30
|
save_directory: Union[str, Path],
|
|
31
31
|
log_name: str,
|
|
32
|
+
add_timestamp: bool=True,
|
|
33
|
+
dict_as: Literal['auto', 'json', 'csv'] = 'auto',
|
|
32
34
|
) -> None:
|
|
33
35
|
"""
|
|
34
36
|
Logs various data types to corresponding output formats:
|
|
@@ -36,10 +38,10 @@ def custom_logger(
|
|
|
36
38
|
- list[Any] → .txt
|
|
37
39
|
Each element is written on a new line.
|
|
38
40
|
|
|
39
|
-
- dict[str, list[Any]] → .csv
|
|
41
|
+
- dict[str, list[Any]] → .csv (if dict_as='auto' or 'csv')
|
|
40
42
|
Dictionary is treated as tabular data; keys become columns, values become rows.
|
|
41
43
|
|
|
42
|
-
- dict[str, scalar] → .json
|
|
44
|
+
- dict[str, scalar] → .json (if dict_as='auto' or 'json')
|
|
43
45
|
Dictionary is treated as structured data and serialized as JSON.
|
|
44
46
|
|
|
45
47
|
- str → .log
|
|
@@ -49,29 +51,50 @@ def custom_logger(
|
|
|
49
51
|
Full traceback is logged for debugging purposes.
|
|
50
52
|
|
|
51
53
|
Args:
|
|
52
|
-
data: The data to be logged. Must be one of the supported types.
|
|
53
|
-
save_directory: Directory where the log will be saved. Created if it does not exist.
|
|
54
|
-
log_name: Base name for the log file.
|
|
54
|
+
data (Any): The data to be logged. Must be one of the supported types.
|
|
55
|
+
save_directory (str | Path): Directory where the log will be saved. Created if it does not exist.
|
|
56
|
+
log_name (str): Base name for the log file.
|
|
57
|
+
add_timestamp (bool): Whether to add a timestamp to the filename.
|
|
58
|
+
dict_as ('auto'|'json'|'csv'):
|
|
59
|
+
- 'auto': Guesses format (JSON or CSV) based on dictionary content.
|
|
60
|
+
- 'json': Forces .json format for any dictionary.
|
|
61
|
+
- 'csv': Forces .csv format. Will fail if dict values are not all lists.
|
|
55
62
|
|
|
56
63
|
Raises:
|
|
57
64
|
ValueError: If the data type is unsupported.
|
|
58
65
|
"""
|
|
59
66
|
try:
|
|
67
|
+
if not isinstance(data, BaseException) and not data:
|
|
68
|
+
_LOGGER.warning("Empty data received. No log file will be saved.")
|
|
69
|
+
return
|
|
70
|
+
|
|
60
71
|
save_path = make_fullpath(save_directory, make=True)
|
|
61
72
|
|
|
62
|
-
|
|
63
|
-
log_name = sanitize_filename(log_name)
|
|
73
|
+
sanitized_log_name = sanitize_filename(log_name)
|
|
64
74
|
|
|
65
|
-
|
|
66
|
-
|
|
75
|
+
if add_timestamp:
|
|
76
|
+
timestamp = datetime.now().strftime(r"%Y%m%d_%H%M%S")
|
|
77
|
+
base_path = save_path / f"{sanitized_log_name}_{timestamp}"
|
|
78
|
+
else:
|
|
79
|
+
base_path = save_path / sanitized_log_name
|
|
80
|
+
|
|
81
|
+
# Router
|
|
67
82
|
if isinstance(data, list):
|
|
68
83
|
_log_list_to_txt(data, base_path.with_suffix(".txt"))
|
|
69
84
|
|
|
70
85
|
elif isinstance(data, dict):
|
|
71
|
-
if
|
|
72
|
-
_log_dict_to_csv(data, base_path.with_suffix(".csv"))
|
|
73
|
-
else:
|
|
86
|
+
if dict_as == 'json':
|
|
74
87
|
_log_dict_to_json(data, base_path.with_suffix(".json"))
|
|
88
|
+
|
|
89
|
+
elif dict_as == 'csv':
|
|
90
|
+
# This will raise a ValueError if data is not all lists
|
|
91
|
+
_log_dict_to_csv(data, base_path.with_suffix(".csv"))
|
|
92
|
+
|
|
93
|
+
else: # 'auto' mode
|
|
94
|
+
if all(isinstance(v, list) for v in data.values()):
|
|
95
|
+
_log_dict_to_csv(data, base_path.with_suffix(".csv"))
|
|
96
|
+
else:
|
|
97
|
+
_log_dict_to_json(data, base_path.with_suffix(".json"))
|
|
75
98
|
|
|
76
99
|
elif isinstance(data, str):
|
|
77
100
|
_log_string_to_log(data, base_path.with_suffix(".log"))
|
|
@@ -83,7 +106,7 @@ def custom_logger(
|
|
|
83
106
|
_LOGGER.error("Unsupported data type. Must be list, dict, str, or BaseException.")
|
|
84
107
|
raise ValueError()
|
|
85
108
|
|
|
86
|
-
_LOGGER.info(f"Log saved
|
|
109
|
+
_LOGGER.info(f"Log saved as: '{base_path.name}'")
|
|
87
110
|
|
|
88
111
|
except Exception:
|
|
89
112
|
_LOGGER.exception(f"Log not saved.")
|
ml_tools/keys.py
CHANGED
|
@@ -36,6 +36,7 @@ class PyTorchInferenceKeys:
|
|
|
36
36
|
# For classification tasks
|
|
37
37
|
LABELS = "labels"
|
|
38
38
|
PROBABILITIES = "probabilities"
|
|
39
|
+
LABEL_NAMES = "label_names"
|
|
39
40
|
|
|
40
41
|
|
|
41
42
|
class PytorchModelArchitectureKeys:
|
|
@@ -80,6 +81,43 @@ class PyTorchCheckpointKeys:
|
|
|
80
81
|
BEST_SCORE = "best_score"
|
|
81
82
|
|
|
82
83
|
|
|
84
|
+
class UtilityKeys:
|
|
85
|
+
"""Keys used for utility modules"""
|
|
86
|
+
MODEL_PARAMS_FILE = "model_parameters"
|
|
87
|
+
TOTAL_PARAMS = "Total Parameters"
|
|
88
|
+
TRAINABLE_PARAMS = "Trainable Parameters"
|
|
89
|
+
PTH_FILE = "pth report "
|
|
90
|
+
MODEL_ARCHITECTURE_FILE = "model_architecture_summary"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class VisionKeys:
|
|
94
|
+
"""For vision ML metrics"""
|
|
95
|
+
SEGMENTATION_REPORT = "segmentation_report"
|
|
96
|
+
SEGMENTATION_HEATMAP = "segmentation_metrics_heatmap"
|
|
97
|
+
SEGMENTATION_CONFUSION_MATRIX = "segmentation_confusion_matrix"
|
|
98
|
+
# Object detection
|
|
99
|
+
OBJECT_DETECTION_REPORT = "object_detection_report"
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class VisionTransformRecipeKeys:
|
|
103
|
+
"""Defines the key names for the transform recipe JSON file."""
|
|
104
|
+
TASK = "task"
|
|
105
|
+
PIPELINE = "pipeline"
|
|
106
|
+
NAME = "name"
|
|
107
|
+
KWARGS = "_kwargs"
|
|
108
|
+
PRE_TRANSFORMS = "pre_transforms"
|
|
109
|
+
RESIZE_SIZE = "resize_size"
|
|
110
|
+
CROP_SIZE = "crop_size"
|
|
111
|
+
MEAN = "mean"
|
|
112
|
+
STD = "std"
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class ObjectDetectionKeys:
|
|
116
|
+
"""Used by the object detection dataset"""
|
|
117
|
+
BOXES = "boxes"
|
|
118
|
+
LABELS = "labels"
|
|
119
|
+
|
|
120
|
+
|
|
83
121
|
class _OneHotOtherPlaceholder:
|
|
84
122
|
"""Used internally by GUI_tools."""
|
|
85
123
|
OTHER_GUI = "OTHER"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|