dragon-ml-toolbox 5.3.1__tar.gz → 6.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dragon-ml-toolbox might be problematic. Click here for more details.
- {dragon_ml_toolbox-5.3.1/dragon_ml_toolbox.egg-info → dragon_ml_toolbox-6.0.0}/PKG-INFO +9 -6
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/README.md +8 -5
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0/dragon_ml_toolbox.egg-info}/PKG-INFO +9 -6
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/dragon_ml_toolbox.egg-info/SOURCES.txt +1 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_callbacks.py +6 -6
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_evaluation.py +147 -94
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_trainer.py +13 -13
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/PSO_optimization.py +5 -5
- dragon_ml_toolbox-6.0.0/ml_tools/ensemble_evaluation.py +639 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ensemble_inference.py +10 -10
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ensemble_learning.py +47 -413
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/keys.py +2 -2
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/utilities.py +27 -3
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/pyproject.toml +1 -1
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/LICENSE +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/dragon_ml_toolbox.egg-info/dependency_links.txt +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/dragon_ml_toolbox.egg-info/requires.txt +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/dragon_ml_toolbox.egg-info/top_level.txt +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ETL_engineering.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/GUI_tools.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/MICE_imputation.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_datasetmaster.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_inference.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_models.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/ML_optimization.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/RNN_forecast.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/SQL.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/VIF_factor.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/__init__.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/_logger.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/_script_info.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/custom_logger.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/data_exploration.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/handle_excel.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/optimization_tools.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/ml_tools/path_manager.py +0 -0
- {dragon_ml_toolbox-5.3.1 → dragon_ml_toolbox-6.0.0}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dragon-ml-toolbox
|
|
3
|
-
Version:
|
|
3
|
+
Version: 6.0.0
|
|
4
4
|
Summary: A collection of tools for data science and machine learning projects.
|
|
5
5
|
Author-email: Karl Loza <luigiloza@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -141,19 +141,22 @@ pip install "dragon-ml-toolbox[pytorch]"
|
|
|
141
141
|
```bash
|
|
142
142
|
custom_logger
|
|
143
143
|
data_exploration
|
|
144
|
-
|
|
144
|
+
ensemble_evaluation
|
|
145
145
|
ensemble_inference
|
|
146
|
+
ensemble_learning
|
|
146
147
|
ETL_engineering
|
|
147
|
-
ML_datasetmaster
|
|
148
|
-
ML_models
|
|
149
148
|
ML_callbacks
|
|
149
|
+
ML_datasetmaster
|
|
150
150
|
ML_evaluation
|
|
151
|
-
ML_trainer
|
|
152
151
|
ML_inference
|
|
152
|
+
ML_models
|
|
153
|
+
ML_optimization
|
|
154
|
+
ML_trainer
|
|
155
|
+
optimization_tools
|
|
153
156
|
path_manager
|
|
154
157
|
PSO_optimization
|
|
155
|
-
SQL
|
|
156
158
|
RNN_forecast
|
|
159
|
+
SQL
|
|
157
160
|
utilities
|
|
158
161
|
```
|
|
159
162
|
|
|
@@ -60,19 +60,22 @@ pip install "dragon-ml-toolbox[pytorch]"
|
|
|
60
60
|
```bash
|
|
61
61
|
custom_logger
|
|
62
62
|
data_exploration
|
|
63
|
-
|
|
63
|
+
ensemble_evaluation
|
|
64
64
|
ensemble_inference
|
|
65
|
+
ensemble_learning
|
|
65
66
|
ETL_engineering
|
|
66
|
-
ML_datasetmaster
|
|
67
|
-
ML_models
|
|
68
67
|
ML_callbacks
|
|
68
|
+
ML_datasetmaster
|
|
69
69
|
ML_evaluation
|
|
70
|
-
ML_trainer
|
|
71
70
|
ML_inference
|
|
71
|
+
ML_models
|
|
72
|
+
ML_optimization
|
|
73
|
+
ML_trainer
|
|
74
|
+
optimization_tools
|
|
72
75
|
path_manager
|
|
73
76
|
PSO_optimization
|
|
74
|
-
SQL
|
|
75
77
|
RNN_forecast
|
|
78
|
+
SQL
|
|
76
79
|
utilities
|
|
77
80
|
```
|
|
78
81
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dragon-ml-toolbox
|
|
3
|
-
Version:
|
|
3
|
+
Version: 6.0.0
|
|
4
4
|
Summary: A collection of tools for data science and machine learning projects.
|
|
5
5
|
Author-email: Karl Loza <luigiloza@gmail.com>
|
|
6
6
|
License-Expression: MIT
|
|
@@ -141,19 +141,22 @@ pip install "dragon-ml-toolbox[pytorch]"
|
|
|
141
141
|
```bash
|
|
142
142
|
custom_logger
|
|
143
143
|
data_exploration
|
|
144
|
-
|
|
144
|
+
ensemble_evaluation
|
|
145
145
|
ensemble_inference
|
|
146
|
+
ensemble_learning
|
|
146
147
|
ETL_engineering
|
|
147
|
-
ML_datasetmaster
|
|
148
|
-
ML_models
|
|
149
148
|
ML_callbacks
|
|
149
|
+
ML_datasetmaster
|
|
150
150
|
ML_evaluation
|
|
151
|
-
ML_trainer
|
|
152
151
|
ML_inference
|
|
152
|
+
ML_models
|
|
153
|
+
ML_optimization
|
|
154
|
+
ML_trainer
|
|
155
|
+
optimization_tools
|
|
153
156
|
path_manager
|
|
154
157
|
PSO_optimization
|
|
155
|
-
SQL
|
|
156
158
|
RNN_forecast
|
|
159
|
+
SQL
|
|
157
160
|
utilities
|
|
158
161
|
```
|
|
159
162
|
|
|
@@ -2,7 +2,7 @@ import numpy as np
|
|
|
2
2
|
import torch
|
|
3
3
|
from tqdm.auto import tqdm
|
|
4
4
|
from .path_manager import make_fullpath
|
|
5
|
-
from .keys import
|
|
5
|
+
from .keys import PyTorchLogKeys
|
|
6
6
|
from ._logger import _LOGGER
|
|
7
7
|
from typing import Optional
|
|
8
8
|
from ._script_info import _script_info
|
|
@@ -96,14 +96,14 @@ class TqdmProgressBar(Callback):
|
|
|
96
96
|
def on_batch_end(self, batch, logs=None):
|
|
97
97
|
self.batch_bar.update(1) # type: ignore
|
|
98
98
|
if logs:
|
|
99
|
-
self.batch_bar.set_postfix(loss=f"{logs.get(
|
|
99
|
+
self.batch_bar.set_postfix(loss=f"{logs.get(PyTorchLogKeys.BATCH_LOSS, 0):.4f}") # type: ignore
|
|
100
100
|
|
|
101
101
|
def on_epoch_end(self, epoch, logs=None):
|
|
102
102
|
self.batch_bar.close() # type: ignore
|
|
103
103
|
self.epoch_bar.update(1) # type: ignore
|
|
104
104
|
if logs:
|
|
105
|
-
train_loss_str = f"{logs.get(
|
|
106
|
-
val_loss_str = f"{logs.get(
|
|
105
|
+
train_loss_str = f"{logs.get(PyTorchLogKeys.TRAIN_LOSS, 0):.4f}"
|
|
106
|
+
val_loss_str = f"{logs.get(PyTorchLogKeys.VAL_LOSS, 0):.4f}"
|
|
107
107
|
self.epoch_bar.set_postfix_str(f"Train Loss: {train_loss_str}, Val Loss: {val_loss_str}") # type: ignore
|
|
108
108
|
|
|
109
109
|
def on_train_end(self, logs=None):
|
|
@@ -124,7 +124,7 @@ class EarlyStopping(Callback):
|
|
|
124
124
|
inferred from the name of the monitored quantity.
|
|
125
125
|
verbose (int): Verbosity mode.
|
|
126
126
|
"""
|
|
127
|
-
def __init__(self, monitor: str=
|
|
127
|
+
def __init__(self, monitor: str=PyTorchLogKeys.VAL_LOSS, min_delta: float=0.0, patience: int=5, mode: Literal['auto', 'min', 'max']='auto', verbose: int=1):
|
|
128
128
|
super().__init__()
|
|
129
129
|
self.monitor = monitor
|
|
130
130
|
self.patience = patience
|
|
@@ -201,7 +201,7 @@ class ModelCheckpoint(Callback):
|
|
|
201
201
|
mode (str): One of {'auto', 'min', 'max'}.
|
|
202
202
|
verbose (int): Verbosity mode.
|
|
203
203
|
"""
|
|
204
|
-
def __init__(self, save_dir: Union[str,Path], monitor: str =
|
|
204
|
+
def __init__(self, save_dir: Union[str,Path], monitor: str = PyTorchLogKeys.VAL_LOSS,
|
|
205
205
|
save_best_only: bool = True, mode: Literal['auto', 'min', 'max']= 'auto', verbose: int = 0):
|
|
206
206
|
super().__init__()
|
|
207
207
|
self.save_dir = make_fullpath(save_dir, make=True, enforce="directory")
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import numpy as np
|
|
2
2
|
import pandas as pd
|
|
3
3
|
import matplotlib.pyplot as plt
|
|
4
|
+
import seaborn as sns
|
|
5
|
+
from sklearn.calibration import CalibrationDisplay
|
|
4
6
|
from sklearn.metrics import (
|
|
5
7
|
classification_report,
|
|
6
8
|
ConfusionMatrixDisplay,
|
|
@@ -9,7 +11,9 @@ from sklearn.metrics import (
|
|
|
9
11
|
mean_squared_error,
|
|
10
12
|
mean_absolute_error,
|
|
11
13
|
r2_score,
|
|
12
|
-
median_absolute_error
|
|
14
|
+
median_absolute_error,
|
|
15
|
+
precision_recall_curve,
|
|
16
|
+
average_precision_score
|
|
13
17
|
)
|
|
14
18
|
import torch
|
|
15
19
|
import shap
|
|
@@ -28,13 +32,13 @@ __all__ = [
|
|
|
28
32
|
]
|
|
29
33
|
|
|
30
34
|
|
|
31
|
-
def plot_losses(history: dict, save_dir:
|
|
35
|
+
def plot_losses(history: dict, save_dir: Union[str, Path]):
|
|
32
36
|
"""
|
|
33
37
|
Plots training & validation loss curves from a history object.
|
|
34
38
|
|
|
35
39
|
Args:
|
|
36
40
|
history (dict): A dictionary containing 'train_loss' and 'val_loss'.
|
|
37
|
-
save_dir (str | Path
|
|
41
|
+
save_dir (str | Path): Directory to save the plot image.
|
|
38
42
|
"""
|
|
39
43
|
train_loss = history.get('train_loss', [])
|
|
40
44
|
val_loss = history.get('val_loss', [])
|
|
@@ -62,86 +66,123 @@ def plot_losses(history: dict, save_dir: Optional[Union[str, Path]] = None):
|
|
|
62
66
|
ax.grid(True)
|
|
63
67
|
plt.tight_layout()
|
|
64
68
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
else:
|
|
71
|
-
plt.show()
|
|
69
|
+
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
70
|
+
save_path = save_dir_path / "loss_plot.svg"
|
|
71
|
+
plt.savefig(save_path)
|
|
72
|
+
_LOGGER.info(f"📉 Loss plot saved as '{save_path.name}'")
|
|
73
|
+
|
|
72
74
|
plt.close(fig)
|
|
73
75
|
|
|
74
76
|
|
|
75
|
-
def classification_metrics(y_true: np.ndarray, y_pred: np.ndarray, y_prob: Optional[np.ndarray] = None,
|
|
76
|
-
cmap: str = "Blues"
|
|
77
|
+
def classification_metrics(save_dir: Union[str, Path], y_true: np.ndarray, y_pred: np.ndarray, y_prob: Optional[np.ndarray] = None,
|
|
78
|
+
cmap: str = "Blues"):
|
|
77
79
|
"""
|
|
78
|
-
|
|
80
|
+
Saves classification metrics and plots.
|
|
79
81
|
|
|
80
82
|
Args:
|
|
81
83
|
y_true (np.ndarray): Ground truth labels.
|
|
82
84
|
y_pred (np.ndarray): Predicted labels.
|
|
83
85
|
y_prob (np.ndarray, optional): Predicted probabilities for ROC curve.
|
|
84
86
|
cmap (str): Colormap for the confusion matrix.
|
|
85
|
-
save_dir (str | Path
|
|
87
|
+
save_dir (str | Path): Directory to save plots.
|
|
86
88
|
"""
|
|
87
89
|
print("--- Classification Report ---")
|
|
88
|
-
|
|
89
|
-
|
|
90
|
+
# Generate report as both text and dictionary
|
|
91
|
+
report_text: str = classification_report(y_true, y_pred) # type: ignore
|
|
92
|
+
report_dict: dict = classification_report(y_true, y_pred, output_dict=True) # type: ignore
|
|
93
|
+
print(report_text)
|
|
90
94
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
_LOGGER.info(f"📝 Classification report saved as '{report_path.name}'")
|
|
95
|
+
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
96
|
+
# Save text report
|
|
97
|
+
report_path = save_dir_path / "classification_report.txt"
|
|
98
|
+
report_path.write_text(report_text, encoding="utf-8")
|
|
99
|
+
_LOGGER.info(f"📝 Classification report saved as '{report_path.name}'")
|
|
97
100
|
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
plt.
|
|
104
|
-
|
|
105
|
-
plt.
|
|
101
|
+
# --- Save Classification Report Heatmap ---
|
|
102
|
+
try:
|
|
103
|
+
plt.figure(figsize=(8, 6), dpi=100)
|
|
104
|
+
sns.heatmap(pd.DataFrame(report_dict).iloc[:-1, :].T, annot=True, cmap='viridis', fmt='.2f')
|
|
105
|
+
plt.title("Classification Report")
|
|
106
|
+
plt.tight_layout()
|
|
107
|
+
heatmap_path = save_dir_path / "classification_report_heatmap.svg"
|
|
108
|
+
plt.savefig(heatmap_path)
|
|
109
|
+
_LOGGER.info(f"📊 Report heatmap saved as '{heatmap_path.name}'")
|
|
110
|
+
plt.close()
|
|
111
|
+
except Exception as e:
|
|
112
|
+
_LOGGER.error(f"❌ Could not generate classification report heatmap: {e}")
|
|
106
113
|
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
114
|
+
# Save Confusion Matrix
|
|
115
|
+
fig_cm, ax_cm = plt.subplots(figsize=(6, 6), dpi=100)
|
|
116
|
+
ConfusionMatrixDisplay.from_predictions(y_true, y_pred, cmap=cmap, ax=ax_cm)
|
|
117
|
+
ax_cm.set_title("Confusion Matrix")
|
|
118
|
+
cm_path = save_dir_path / "confusion_matrix.svg"
|
|
119
|
+
plt.savefig(cm_path)
|
|
120
|
+
_LOGGER.info(f"❇️ Confusion matrix saved as '{cm_path.name}'")
|
|
121
|
+
plt.close(fig_cm)
|
|
122
|
+
|
|
123
|
+
# Plotting logic for ROC and PR Curves
|
|
124
|
+
if y_prob is not None and y_prob.ndim > 1 and y_prob.shape[1] >= 2:
|
|
125
|
+
# Use probabilities of the positive class
|
|
126
|
+
y_score = y_prob[:, 1]
|
|
127
|
+
|
|
128
|
+
# --- Save ROC Curve ---
|
|
129
|
+
fpr, tpr, _ = roc_curve(y_true, y_score)
|
|
130
|
+
auc = roc_auc_score(y_true, y_score)
|
|
131
|
+
fig_roc, ax_roc = plt.subplots(figsize=(6, 6), dpi=100)
|
|
132
|
+
ax_roc.plot(fpr, tpr, label=f'AUC = {auc:.2f}')
|
|
133
|
+
ax_roc.plot([0, 1], [0, 1], 'k--')
|
|
134
|
+
ax_roc.set_title('Receiver Operating Characteristic (ROC) Curve')
|
|
135
|
+
ax_roc.set_xlabel('False Positive Rate')
|
|
136
|
+
ax_roc.set_ylabel('True Positive Rate')
|
|
137
|
+
ax_roc.legend(loc='lower right')
|
|
138
|
+
ax_roc.grid(True)
|
|
139
|
+
roc_path = save_dir_path / "roc_curve.svg"
|
|
140
|
+
plt.savefig(roc_path)
|
|
141
|
+
_LOGGER.info(f"📈 ROC curve saved as '{roc_path.name}'")
|
|
142
|
+
plt.close(fig_roc)
|
|
143
|
+
|
|
144
|
+
# --- Save Precision-Recall Curve ---
|
|
145
|
+
precision, recall, _ = precision_recall_curve(y_true, y_score)
|
|
146
|
+
ap_score = average_precision_score(y_true, y_score)
|
|
147
|
+
fig_pr, ax_pr = plt.subplots(figsize=(6, 6), dpi=100)
|
|
148
|
+
ax_pr.plot(recall, precision, label=f'AP = {ap_score:.2f}')
|
|
149
|
+
ax_pr.set_title('Precision-Recall Curve')
|
|
150
|
+
ax_pr.set_xlabel('Recall')
|
|
151
|
+
ax_pr.set_ylabel('Precision')
|
|
152
|
+
ax_pr.legend(loc='lower left')
|
|
153
|
+
ax_pr.grid(True)
|
|
154
|
+
pr_path = save_dir_path / "pr_curve.svg"
|
|
155
|
+
plt.savefig(pr_path)
|
|
156
|
+
_LOGGER.info(f"📈 PR curve saved as '{pr_path.name}'")
|
|
157
|
+
plt.close(fig_pr)
|
|
158
|
+
|
|
159
|
+
# --- Save Calibration Plot ---
|
|
160
|
+
if y_prob.ndim > 1 and y_prob.shape[1] >= 2:
|
|
161
|
+
y_score = y_prob[:, 1] # Use probabilities of the positive class
|
|
162
|
+
|
|
163
|
+
fig_cal, ax_cal = plt.subplots(figsize=(8, 8), dpi=100)
|
|
164
|
+
CalibrationDisplay.from_predictions(y_true, y_score, n_bins=15, ax=ax_cal)
|
|
165
|
+
|
|
166
|
+
ax_cal.set_title('Calibration Plot (Reliability Curve)')
|
|
167
|
+
ax_cal.set_xlabel('Mean Predicted Probability')
|
|
168
|
+
ax_cal.set_ylabel('Fraction of Positives')
|
|
169
|
+
ax_cal.grid(True)
|
|
170
|
+
plt.tight_layout()
|
|
171
|
+
|
|
172
|
+
cal_path = save_dir_path / "calibration_plot.svg"
|
|
173
|
+
plt.savefig(cal_path)
|
|
174
|
+
_LOGGER.info(f"✅ Calibration plot saved as '{cal_path.name}'")
|
|
175
|
+
plt.close(fig_cal)
|
|
135
176
|
|
|
136
177
|
|
|
137
|
-
def regression_metrics(y_true: np.ndarray, y_pred: np.ndarray, save_dir:
|
|
178
|
+
def regression_metrics(y_true: np.ndarray, y_pred: np.ndarray, save_dir: Union[str, Path]):
|
|
138
179
|
"""
|
|
139
|
-
|
|
180
|
+
Saves regression metrics and plots.
|
|
140
181
|
|
|
141
182
|
Args:
|
|
142
183
|
y_true (np.ndarray): Ground truth values.
|
|
143
184
|
y_pred (np.ndarray): Predicted values.
|
|
144
|
-
save_dir (str |
|
|
185
|
+
save_dir (str | Path): Directory to save plots and report.
|
|
145
186
|
"""
|
|
146
187
|
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
|
|
147
188
|
mae = mean_absolute_error(y_true, y_pred)
|
|
@@ -158,41 +199,53 @@ def regression_metrics(y_true: np.ndarray, y_pred: np.ndarray, save_dir: Optiona
|
|
|
158
199
|
report_string = "\n".join(report_lines)
|
|
159
200
|
print(report_string)
|
|
160
201
|
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
_LOGGER.info(f"📝 Regression report saved as '{report_path.name}'")
|
|
202
|
+
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
203
|
+
# Save text report
|
|
204
|
+
report_path = save_dir_path / "regression_report.txt"
|
|
205
|
+
report_path.write_text(report_string)
|
|
206
|
+
_LOGGER.info(f"📝 Regression report saved as '{report_path.name}'")
|
|
167
207
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
208
|
+
# Save residual plot
|
|
209
|
+
residuals = y_true - y_pred
|
|
210
|
+
fig_res, ax_res = plt.subplots(figsize=(8, 6), dpi=100)
|
|
211
|
+
ax_res.scatter(y_pred, residuals, alpha=0.6)
|
|
212
|
+
ax_res.axhline(0, color='red', linestyle='--')
|
|
213
|
+
ax_res.set_xlabel("Predicted Values")
|
|
214
|
+
ax_res.set_ylabel("Residuals")
|
|
215
|
+
ax_res.set_title("Residual Plot")
|
|
216
|
+
ax_res.grid(True)
|
|
217
|
+
plt.tight_layout()
|
|
218
|
+
res_path = save_dir_path / "residual_plot.svg"
|
|
219
|
+
plt.savefig(res_path)
|
|
220
|
+
_LOGGER.info(f"📈 Residual plot saved as '{res_path.name}'")
|
|
221
|
+
plt.close(fig_res)
|
|
182
222
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
223
|
+
# Save true vs predicted plot
|
|
224
|
+
fig_tvp, ax_tvp = plt.subplots(figsize=(8, 6), dpi=100)
|
|
225
|
+
ax_tvp.scatter(y_true, y_pred, alpha=0.6)
|
|
226
|
+
ax_tvp.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], 'k--', lw=2)
|
|
227
|
+
ax_tvp.set_xlabel('True Values')
|
|
228
|
+
ax_tvp.set_ylabel('Predictions')
|
|
229
|
+
ax_tvp.set_title('True vs. Predicted Values')
|
|
230
|
+
ax_tvp.grid(True)
|
|
231
|
+
plt.tight_layout()
|
|
232
|
+
tvp_path = save_dir_path / "true_vs_predicted_plot.svg"
|
|
233
|
+
plt.savefig(tvp_path)
|
|
234
|
+
_LOGGER.info(f"📉 True vs. Predicted plot saved as '{tvp_path.name}'")
|
|
235
|
+
plt.close(fig_tvp)
|
|
236
|
+
|
|
237
|
+
# Save Histogram of Residuals
|
|
238
|
+
fig_hist, ax_hist = plt.subplots(figsize=(8, 6), dpi=100)
|
|
239
|
+
sns.histplot(residuals, kde=True, ax=ax_hist)
|
|
240
|
+
ax_hist.set_xlabel("Residual Value")
|
|
241
|
+
ax_hist.set_ylabel("Frequency")
|
|
242
|
+
ax_hist.set_title("Distribution of Residuals")
|
|
243
|
+
ax_hist.grid(True)
|
|
244
|
+
plt.tight_layout()
|
|
245
|
+
hist_path = save_dir_path / "residuals_histogram.svg"
|
|
246
|
+
plt.savefig(hist_path)
|
|
247
|
+
_LOGGER.info(f"📊 Residuals histogram saved as '{hist_path.name}'")
|
|
248
|
+
plt.close(fig_hist)
|
|
196
249
|
|
|
197
250
|
|
|
198
251
|
def shap_summary_plot(model, background_data: Union[torch.Tensor,np.ndarray], instances_to_explain: Union[torch.Tensor,np.ndarray],
|
|
@@ -8,16 +8,16 @@ import numpy as np
|
|
|
8
8
|
from .ML_callbacks import Callback, History, TqdmProgressBar
|
|
9
9
|
from .ML_evaluation import classification_metrics, regression_metrics, plot_losses, shap_summary_plot
|
|
10
10
|
from ._script_info import _script_info
|
|
11
|
-
from .keys import
|
|
11
|
+
from .keys import PyTorchLogKeys
|
|
12
12
|
from ._logger import _LOGGER
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
__all__ = [
|
|
16
|
-
"
|
|
16
|
+
"MLTrainer"
|
|
17
17
|
]
|
|
18
18
|
|
|
19
19
|
|
|
20
|
-
class
|
|
20
|
+
class MLTrainer:
|
|
21
21
|
def __init__(self, model: nn.Module, train_dataset: Dataset, test_dataset: Dataset,
|
|
22
22
|
kind: Literal["regression", "classification"],
|
|
23
23
|
criterion: nn.Module, optimizer: torch.optim.Optimizer,
|
|
@@ -161,8 +161,8 @@ class MyTrainer:
|
|
|
161
161
|
for batch_idx, (features, target) in enumerate(self.train_loader): # type: ignore
|
|
162
162
|
# Create a log dictionary for the batch
|
|
163
163
|
batch_logs = {
|
|
164
|
-
|
|
165
|
-
|
|
164
|
+
PyTorchLogKeys.BATCH_INDEX: batch_idx,
|
|
165
|
+
PyTorchLogKeys.BATCH_SIZE: features.size(0)
|
|
166
166
|
}
|
|
167
167
|
self.callbacks_hook('on_batch_begin', batch_idx, logs=batch_logs)
|
|
168
168
|
|
|
@@ -180,11 +180,11 @@ class MyTrainer:
|
|
|
180
180
|
running_loss += batch_loss * features.size(0)
|
|
181
181
|
|
|
182
182
|
# Add the batch loss to the logs and call the end-of-batch hook
|
|
183
|
-
batch_logs[
|
|
183
|
+
batch_logs[PyTorchLogKeys.BATCH_LOSS] = batch_loss
|
|
184
184
|
self.callbacks_hook('on_batch_end', batch_idx, logs=batch_logs)
|
|
185
185
|
|
|
186
186
|
# Return the average loss for the entire epoch
|
|
187
|
-
return {
|
|
187
|
+
return {PyTorchLogKeys.TRAIN_LOSS: running_loss / len(self.train_loader.dataset)} # type: ignore
|
|
188
188
|
|
|
189
189
|
def _validation_step(self):
|
|
190
190
|
self.model.eval()
|
|
@@ -197,7 +197,7 @@ class MyTrainer:
|
|
|
197
197
|
output = output.view_as(target)
|
|
198
198
|
loss = self.criterion(output, target)
|
|
199
199
|
running_loss += loss.item() * features.size(0)
|
|
200
|
-
logs = {
|
|
200
|
+
logs = {PyTorchLogKeys.VAL_LOSS: running_loss / len(self.test_loader.dataset)} # type: ignore
|
|
201
201
|
return logs
|
|
202
202
|
|
|
203
203
|
def _predict_for_eval(self, dataloader: DataLoader):
|
|
@@ -232,14 +232,14 @@ class MyTrainer:
|
|
|
232
232
|
|
|
233
233
|
yield y_pred_batch, y_prob_batch, y_true_batch
|
|
234
234
|
|
|
235
|
-
def evaluate(self, save_dir:
|
|
235
|
+
def evaluate(self, save_dir: Union[str,Path], data: Optional[Union[DataLoader, Dataset]] = None):
|
|
236
236
|
"""
|
|
237
237
|
Evaluates the model on the given data.
|
|
238
238
|
|
|
239
239
|
Args:
|
|
240
240
|
data (DataLoader | Dataset | None ): The data to evaluate on.
|
|
241
241
|
Can be a DataLoader or a Dataset. If None, defaults to the trainer's internal test_dataset.
|
|
242
|
-
save_dir (str | Path
|
|
242
|
+
save_dir (str | Path): Directory to save all reports and plots.
|
|
243
243
|
"""
|
|
244
244
|
eval_loader = None
|
|
245
245
|
if isinstance(data, DataLoader):
|
|
@@ -275,14 +275,14 @@ class MyTrainer:
|
|
|
275
275
|
y_prob = np.concatenate(all_probs) if self.kind == "classification" else None
|
|
276
276
|
|
|
277
277
|
if self.kind == "classification":
|
|
278
|
-
classification_metrics(y_true, y_pred, y_prob
|
|
278
|
+
classification_metrics(save_dir, y_true, y_pred, y_prob)
|
|
279
279
|
else:
|
|
280
|
-
regression_metrics(y_true.flatten(), y_pred.flatten(), save_dir
|
|
280
|
+
regression_metrics(y_true.flatten(), y_pred.flatten(), save_dir)
|
|
281
281
|
|
|
282
282
|
print("\n--- Training History ---")
|
|
283
283
|
plot_losses(self.history, save_dir=save_dir)
|
|
284
284
|
|
|
285
|
-
def explain(self, explain_dataset: Optional[Dataset] = None, n_samples: int =
|
|
285
|
+
def explain(self, explain_dataset: Optional[Dataset] = None, n_samples: int = 1000,
|
|
286
286
|
feature_names: Optional[List[str]] = None, save_dir: Optional[Union[str,Path]] = None):
|
|
287
287
|
"""
|
|
288
288
|
Explains model predictions using SHAP and saves all artifacts.
|
|
@@ -12,7 +12,7 @@ from .path_manager import sanitize_filename, make_fullpath, list_files_by_extens
|
|
|
12
12
|
import torch
|
|
13
13
|
from tqdm import trange
|
|
14
14
|
from ._logger import _LOGGER
|
|
15
|
-
from .keys import
|
|
15
|
+
from .keys import EnsembleKeys
|
|
16
16
|
from ._script_info import _script_info
|
|
17
17
|
from .SQL import DatabaseManager
|
|
18
18
|
from contextlib import nullcontext
|
|
@@ -48,9 +48,9 @@ class ObjectiveFunction():
|
|
|
48
48
|
self.is_hybrid = False if binary_features <= 0 else True
|
|
49
49
|
self.use_noise = add_noise
|
|
50
50
|
self._artifact = deserialize_object(trained_model_path, verbose=False, raise_on_error=True)
|
|
51
|
-
self.model = self._get_from_artifact(
|
|
52
|
-
self.feature_names: Optional[list[str]] = self._get_from_artifact(
|
|
53
|
-
self.target_name: Optional[str] = self._get_from_artifact(
|
|
51
|
+
self.model = self._get_from_artifact(EnsembleKeys.MODEL)
|
|
52
|
+
self.feature_names: Optional[list[str]] = self._get_from_artifact(EnsembleKeys.FEATURES) # type: ignore
|
|
53
|
+
self.target_name: Optional[str] = self._get_from_artifact(EnsembleKeys.TARGET) # type: ignore
|
|
54
54
|
self.task = task
|
|
55
55
|
self.check_model() # check for classification models and None values
|
|
56
56
|
|
|
@@ -126,7 +126,7 @@ class ObjectiveFunction():
|
|
|
126
126
|
if self._artifact is None:
|
|
127
127
|
raise TypeError("Load model error")
|
|
128
128
|
val = self._artifact.get(key)
|
|
129
|
-
if key ==
|
|
129
|
+
if key == EnsembleKeys.FEATURES:
|
|
130
130
|
result = val if isinstance(val, list) and val else None
|
|
131
131
|
else:
|
|
132
132
|
result = val if val else None
|