dragon-ml-toolbox 19.13.0__py3-none-any.whl → 20.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/METADATA +29 -46
- dragon_ml_toolbox-20.0.0.dist-info/RECORD +178 -0
- ml_tools/{ETL_cleaning.py → ETL_cleaning/__init__.py} +13 -5
- ml_tools/ETL_cleaning/_basic_clean.py +351 -0
- ml_tools/ETL_cleaning/_clean_tools.py +128 -0
- ml_tools/ETL_cleaning/_dragon_cleaner.py +245 -0
- ml_tools/ETL_cleaning/_imprimir.py +13 -0
- ml_tools/{ETL_engineering.py → ETL_engineering/__init__.py} +8 -4
- ml_tools/ETL_engineering/_dragon_engineering.py +261 -0
- ml_tools/ETL_engineering/_imprimir.py +24 -0
- ml_tools/{_core/_ETL_engineering.py → ETL_engineering/_transforms.py} +14 -267
- ml_tools/{_core → GUI_tools}/_GUI_tools.py +37 -40
- ml_tools/{GUI_tools.py → GUI_tools/__init__.py} +7 -5
- ml_tools/GUI_tools/_imprimir.py +12 -0
- ml_tools/IO_tools/_IO_loggers.py +235 -0
- ml_tools/IO_tools/_IO_save_load.py +151 -0
- ml_tools/IO_tools/_IO_utils.py +140 -0
- ml_tools/{IO_tools.py → IO_tools/__init__.py} +13 -5
- ml_tools/IO_tools/_imprimir.py +14 -0
- ml_tools/MICE/_MICE_imputation.py +132 -0
- ml_tools/{MICE_imputation.py → MICE/__init__.py} +6 -7
- ml_tools/{_core/_MICE_imputation.py → MICE/_dragon_mice.py} +243 -322
- ml_tools/MICE/_imprimir.py +11 -0
- ml_tools/{ML_callbacks.py → ML_callbacks/__init__.py} +12 -4
- ml_tools/ML_callbacks/_base.py +101 -0
- ml_tools/ML_callbacks/_checkpoint.py +232 -0
- ml_tools/ML_callbacks/_early_stop.py +208 -0
- ml_tools/ML_callbacks/_imprimir.py +12 -0
- ml_tools/ML_callbacks/_scheduler.py +197 -0
- ml_tools/{ML_chaining_utilities.py → ML_chain/__init__.py} +8 -3
- ml_tools/{_core/_ML_chaining_utilities.py → ML_chain/_chaining_tools.py} +5 -129
- ml_tools/ML_chain/_dragon_chain.py +140 -0
- ml_tools/ML_chain/_imprimir.py +11 -0
- ml_tools/ML_configuration/__init__.py +90 -0
- ml_tools/ML_configuration/_base_model_config.py +69 -0
- ml_tools/ML_configuration/_finalize.py +366 -0
- ml_tools/ML_configuration/_imprimir.py +47 -0
- ml_tools/ML_configuration/_metrics.py +593 -0
- ml_tools/ML_configuration/_models.py +206 -0
- ml_tools/ML_configuration/_training.py +124 -0
- ml_tools/ML_datasetmaster/__init__.py +28 -0
- ml_tools/ML_datasetmaster/_base_datasetmaster.py +337 -0
- ml_tools/{_core/_ML_datasetmaster.py → ML_datasetmaster/_datasetmaster.py} +9 -329
- ml_tools/ML_datasetmaster/_imprimir.py +15 -0
- ml_tools/{_core/_ML_sequence_datasetmaster.py → ML_datasetmaster/_sequence_datasetmaster.py} +13 -15
- ml_tools/{_core/_ML_vision_datasetmaster.py → ML_datasetmaster/_vision_datasetmaster.py} +63 -65
- ml_tools/ML_evaluation/__init__.py +53 -0
- ml_tools/ML_evaluation/_classification.py +629 -0
- ml_tools/ML_evaluation/_feature_importance.py +409 -0
- ml_tools/ML_evaluation/_imprimir.py +25 -0
- ml_tools/ML_evaluation/_loss.py +92 -0
- ml_tools/ML_evaluation/_regression.py +273 -0
- ml_tools/{_core/_ML_sequence_evaluation.py → ML_evaluation/_sequence.py} +8 -11
- ml_tools/{_core/_ML_vision_evaluation.py → ML_evaluation/_vision.py} +12 -17
- ml_tools/{_core → ML_evaluation_captum}/_ML_evaluation_captum.py +11 -38
- ml_tools/{ML_evaluation_captum.py → ML_evaluation_captum/__init__.py} +6 -4
- ml_tools/ML_evaluation_captum/_imprimir.py +10 -0
- ml_tools/{_core → ML_finalize_handler}/_ML_finalize_handler.py +3 -7
- ml_tools/ML_finalize_handler/__init__.py +10 -0
- ml_tools/ML_finalize_handler/_imprimir.py +8 -0
- ml_tools/ML_inference/__init__.py +22 -0
- ml_tools/ML_inference/_base_inference.py +166 -0
- ml_tools/{_core/_ML_chaining_inference.py → ML_inference/_chain_inference.py} +14 -17
- ml_tools/ML_inference/_dragon_inference.py +332 -0
- ml_tools/ML_inference/_imprimir.py +11 -0
- ml_tools/ML_inference/_multi_inference.py +180 -0
- ml_tools/ML_inference_sequence/__init__.py +10 -0
- ml_tools/ML_inference_sequence/_imprimir.py +8 -0
- ml_tools/{_core/_ML_sequence_inference.py → ML_inference_sequence/_sequence_inference.py} +11 -15
- ml_tools/ML_inference_vision/__init__.py +10 -0
- ml_tools/ML_inference_vision/_imprimir.py +8 -0
- ml_tools/{_core/_ML_vision_inference.py → ML_inference_vision/_vision_inference.py} +15 -19
- ml_tools/ML_models/__init__.py +32 -0
- ml_tools/{_core/_ML_models_advanced.py → ML_models/_advanced_models.py} +22 -18
- ml_tools/ML_models/_base_mlp_attention.py +198 -0
- ml_tools/{_core/_models_advanced_base.py → ML_models/_base_save_load.py} +73 -49
- ml_tools/ML_models/_dragon_tabular.py +248 -0
- ml_tools/ML_models/_imprimir.py +18 -0
- ml_tools/ML_models/_mlp_attention.py +134 -0
- ml_tools/{_core → ML_models}/_models_advanced_helpers.py +13 -13
- ml_tools/ML_models_sequence/__init__.py +10 -0
- ml_tools/ML_models_sequence/_imprimir.py +8 -0
- ml_tools/{_core/_ML_sequence_models.py → ML_models_sequence/_sequence_models.py} +5 -8
- ml_tools/ML_models_vision/__init__.py +29 -0
- ml_tools/ML_models_vision/_base_wrapper.py +254 -0
- ml_tools/ML_models_vision/_image_classification.py +182 -0
- ml_tools/ML_models_vision/_image_segmentation.py +108 -0
- ml_tools/ML_models_vision/_imprimir.py +16 -0
- ml_tools/ML_models_vision/_object_detection.py +135 -0
- ml_tools/ML_optimization/__init__.py +21 -0
- ml_tools/ML_optimization/_imprimir.py +13 -0
- ml_tools/{_core/_ML_optimization_pareto.py → ML_optimization/_multi_dragon.py} +18 -24
- ml_tools/ML_optimization/_single_dragon.py +203 -0
- ml_tools/{_core/_ML_optimization.py → ML_optimization/_single_manual.py} +75 -213
- ml_tools/{_core → ML_scaler}/_ML_scaler.py +8 -11
- ml_tools/ML_scaler/__init__.py +10 -0
- ml_tools/ML_scaler/_imprimir.py +8 -0
- ml_tools/ML_trainer/__init__.py +20 -0
- ml_tools/ML_trainer/_base_trainer.py +297 -0
- ml_tools/ML_trainer/_dragon_detection_trainer.py +402 -0
- ml_tools/ML_trainer/_dragon_sequence_trainer.py +540 -0
- ml_tools/ML_trainer/_dragon_trainer.py +1160 -0
- ml_tools/ML_trainer/_imprimir.py +10 -0
- ml_tools/{ML_utilities.py → ML_utilities/__init__.py} +14 -6
- ml_tools/ML_utilities/_artifact_finder.py +382 -0
- ml_tools/ML_utilities/_imprimir.py +16 -0
- ml_tools/ML_utilities/_inspection.py +325 -0
- ml_tools/ML_utilities/_train_tools.py +205 -0
- ml_tools/{ML_vision_transformers.py → ML_vision_transformers/__init__.py} +9 -6
- ml_tools/{_core/_ML_vision_transformers.py → ML_vision_transformers/_core_transforms.py} +11 -155
- ml_tools/ML_vision_transformers/_imprimir.py +14 -0
- ml_tools/ML_vision_transformers/_offline_augmentation.py +159 -0
- ml_tools/{_core/_PSO_optimization.py → PSO_optimization/_PSO.py} +58 -15
- ml_tools/{PSO_optimization.py → PSO_optimization/__init__.py} +5 -3
- ml_tools/PSO_optimization/_imprimir.py +10 -0
- ml_tools/SQL/__init__.py +7 -0
- ml_tools/{_core/_SQL.py → SQL/_dragon_SQL.py} +7 -11
- ml_tools/SQL/_imprimir.py +8 -0
- ml_tools/{_core → VIF}/_VIF_factor.py +5 -8
- ml_tools/{VIF_factor.py → VIF/__init__.py} +4 -2
- ml_tools/VIF/_imprimir.py +10 -0
- ml_tools/_core/__init__.py +7 -1
- ml_tools/_core/_logger.py +8 -18
- ml_tools/_core/_schema_load_ops.py +43 -0
- ml_tools/_core/_script_info.py +2 -2
- ml_tools/{data_exploration.py → data_exploration/__init__.py} +32 -16
- ml_tools/data_exploration/_analysis.py +214 -0
- ml_tools/data_exploration/_cleaning.py +566 -0
- ml_tools/data_exploration/_features.py +583 -0
- ml_tools/data_exploration/_imprimir.py +32 -0
- ml_tools/data_exploration/_plotting.py +487 -0
- ml_tools/data_exploration/_schema_ops.py +176 -0
- ml_tools/{ensemble_evaluation.py → ensemble_evaluation/__init__.py} +6 -4
- ml_tools/{_core → ensemble_evaluation}/_ensemble_evaluation.py +3 -7
- ml_tools/ensemble_evaluation/_imprimir.py +14 -0
- ml_tools/{ensemble_inference.py → ensemble_inference/__init__.py} +5 -3
- ml_tools/{_core → ensemble_inference}/_ensemble_inference.py +15 -18
- ml_tools/ensemble_inference/_imprimir.py +9 -0
- ml_tools/{ensemble_learning.py → ensemble_learning/__init__.py} +4 -6
- ml_tools/{_core → ensemble_learning}/_ensemble_learning.py +7 -10
- ml_tools/ensemble_learning/_imprimir.py +10 -0
- ml_tools/{excel_handler.py → excel_handler/__init__.py} +5 -3
- ml_tools/{_core → excel_handler}/_excel_handler.py +6 -10
- ml_tools/excel_handler/_imprimir.py +13 -0
- ml_tools/{keys.py → keys/__init__.py} +4 -1
- ml_tools/keys/_imprimir.py +11 -0
- ml_tools/{_core → keys}/_keys.py +2 -0
- ml_tools/{math_utilities.py → math_utilities/__init__.py} +5 -2
- ml_tools/math_utilities/_imprimir.py +11 -0
- ml_tools/{_core → math_utilities}/_math_utilities.py +1 -5
- ml_tools/{optimization_tools.py → optimization_tools/__init__.py} +9 -4
- ml_tools/optimization_tools/_imprimir.py +13 -0
- ml_tools/optimization_tools/_optimization_bounds.py +236 -0
- ml_tools/optimization_tools/_optimization_plots.py +218 -0
- ml_tools/{path_manager.py → path_manager/__init__.py} +6 -3
- ml_tools/{_core/_path_manager.py → path_manager/_dragonmanager.py} +11 -347
- ml_tools/path_manager/_imprimir.py +15 -0
- ml_tools/path_manager/_path_tools.py +346 -0
- ml_tools/plot_fonts/__init__.py +8 -0
- ml_tools/plot_fonts/_imprimir.py +8 -0
- ml_tools/{_core → plot_fonts}/_plot_fonts.py +2 -5
- ml_tools/schema/__init__.py +15 -0
- ml_tools/schema/_feature_schema.py +223 -0
- ml_tools/schema/_gui_schema.py +191 -0
- ml_tools/schema/_imprimir.py +10 -0
- ml_tools/{serde.py → serde/__init__.py} +4 -2
- ml_tools/serde/_imprimir.py +10 -0
- ml_tools/{_core → serde}/_serde.py +3 -8
- ml_tools/{utilities.py → utilities/__init__.py} +11 -6
- ml_tools/utilities/_imprimir.py +18 -0
- ml_tools/{_core/_utilities.py → utilities/_utility_save_load.py} +13 -190
- ml_tools/utilities/_utility_tools.py +192 -0
- dragon_ml_toolbox-19.13.0.dist-info/RECORD +0 -111
- ml_tools/ML_chaining_inference.py +0 -8
- ml_tools/ML_configuration.py +0 -86
- ml_tools/ML_configuration_pytab.py +0 -14
- ml_tools/ML_datasetmaster.py +0 -10
- ml_tools/ML_evaluation.py +0 -16
- ml_tools/ML_evaluation_multi.py +0 -12
- ml_tools/ML_finalize_handler.py +0 -8
- ml_tools/ML_inference.py +0 -12
- ml_tools/ML_models.py +0 -14
- ml_tools/ML_models_advanced.py +0 -14
- ml_tools/ML_models_pytab.py +0 -14
- ml_tools/ML_optimization.py +0 -14
- ml_tools/ML_optimization_pareto.py +0 -8
- ml_tools/ML_scaler.py +0 -8
- ml_tools/ML_sequence_datasetmaster.py +0 -8
- ml_tools/ML_sequence_evaluation.py +0 -10
- ml_tools/ML_sequence_inference.py +0 -8
- ml_tools/ML_sequence_models.py +0 -8
- ml_tools/ML_trainer.py +0 -12
- ml_tools/ML_vision_datasetmaster.py +0 -12
- ml_tools/ML_vision_evaluation.py +0 -10
- ml_tools/ML_vision_inference.py +0 -8
- ml_tools/ML_vision_models.py +0 -18
- ml_tools/SQL.py +0 -8
- ml_tools/_core/_ETL_cleaning.py +0 -694
- ml_tools/_core/_IO_tools.py +0 -498
- ml_tools/_core/_ML_callbacks.py +0 -702
- ml_tools/_core/_ML_configuration.py +0 -1332
- ml_tools/_core/_ML_configuration_pytab.py +0 -102
- ml_tools/_core/_ML_evaluation.py +0 -867
- ml_tools/_core/_ML_evaluation_multi.py +0 -544
- ml_tools/_core/_ML_inference.py +0 -646
- ml_tools/_core/_ML_models.py +0 -668
- ml_tools/_core/_ML_models_pytab.py +0 -693
- ml_tools/_core/_ML_trainer.py +0 -2323
- ml_tools/_core/_ML_utilities.py +0 -886
- ml_tools/_core/_ML_vision_models.py +0 -644
- ml_tools/_core/_data_exploration.py +0 -1901
- ml_tools/_core/_optimization_tools.py +0 -493
- ml_tools/_core/_schema.py +0 -359
- ml_tools/plot_fonts.py +0 -8
- ml_tools/schema.py +0 -12
- {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-19.13.0.dist-info → dragon_ml_toolbox-20.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import pandas as pd
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
import seaborn as sns
|
|
5
|
+
from sklearn.metrics import (
|
|
6
|
+
mean_squared_error,
|
|
7
|
+
mean_absolute_error,
|
|
8
|
+
r2_score,
|
|
9
|
+
median_absolute_error,
|
|
10
|
+
)
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Union, Optional
|
|
13
|
+
|
|
14
|
+
from ..ML_configuration._metrics import (_BaseRegressionFormat,
|
|
15
|
+
FormatRegressionMetrics,
|
|
16
|
+
FormatMultiTargetRegressionMetrics)
|
|
17
|
+
|
|
18
|
+
from ..path_manager import make_fullpath, sanitize_filename
|
|
19
|
+
from .._core import get_logger
|
|
20
|
+
from ..keys._keys import _EvaluationConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
_LOGGER = get_logger("Regression Metrics")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"regression_metrics",
|
|
28
|
+
"multi_target_regression_metrics"
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
DPI_value = _EvaluationConfig.DPI
|
|
33
|
+
REGRESSION_PLOT_SIZE = _EvaluationConfig.REGRESSION_PLOT_SIZE
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def regression_metrics(
|
|
37
|
+
y_true: np.ndarray,
|
|
38
|
+
y_pred: np.ndarray,
|
|
39
|
+
save_dir: Union[str, Path],
|
|
40
|
+
config: Optional[FormatRegressionMetrics] = None
|
|
41
|
+
):
|
|
42
|
+
"""
|
|
43
|
+
Saves regression metrics and plots.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
y_true (np.ndarray): Ground truth values.
|
|
47
|
+
y_pred (np.ndarray): Predicted values.
|
|
48
|
+
save_dir (str | Path): Directory to save plots and report.
|
|
49
|
+
config (RegressionMetricsFormat, optional): Formatting configuration object.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
# --- Parse Config or use defaults ---
|
|
53
|
+
if config is None:
|
|
54
|
+
# Create a default config if one wasn't provided
|
|
55
|
+
format_config = _BaseRegressionFormat()
|
|
56
|
+
else:
|
|
57
|
+
format_config = config
|
|
58
|
+
|
|
59
|
+
# --- Resolve Font Sizes ---
|
|
60
|
+
xtick_size = format_config.xtick_size
|
|
61
|
+
ytick_size = format_config.ytick_size
|
|
62
|
+
base_font_size = format_config.font_size
|
|
63
|
+
|
|
64
|
+
# --- Calculate Metrics ---
|
|
65
|
+
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
|
|
66
|
+
mae = mean_absolute_error(y_true, y_pred)
|
|
67
|
+
r2 = r2_score(y_true, y_pred)
|
|
68
|
+
medae = median_absolute_error(y_true, y_pred)
|
|
69
|
+
|
|
70
|
+
report_lines = [
|
|
71
|
+
"--- Regression Report ---",
|
|
72
|
+
f" Root Mean Squared Error (RMSE): {rmse:.4f}",
|
|
73
|
+
f" Mean Absolute Error (MAE): {mae:.4f}",
|
|
74
|
+
f" Median Absolute Error (MedAE): {medae:.4f}",
|
|
75
|
+
f" Coefficient of Determination (R²): {r2:.4f}"
|
|
76
|
+
]
|
|
77
|
+
report_string = "\n".join(report_lines)
|
|
78
|
+
# print(report_string)
|
|
79
|
+
|
|
80
|
+
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
81
|
+
# Save text report
|
|
82
|
+
report_path = save_dir_path / "regression_report.txt"
|
|
83
|
+
report_path.write_text(report_string)
|
|
84
|
+
_LOGGER.info(f"📝 Regression report saved as '{report_path.name}'")
|
|
85
|
+
|
|
86
|
+
# --- Save residual plot ---
|
|
87
|
+
residuals = y_true - y_pred
|
|
88
|
+
fig_res, ax_res = plt.subplots(figsize=REGRESSION_PLOT_SIZE, dpi=DPI_value)
|
|
89
|
+
ax_res.scatter(y_pred, residuals,
|
|
90
|
+
alpha=format_config.scatter_alpha,
|
|
91
|
+
color=format_config.scatter_color)
|
|
92
|
+
ax_res.axhline(0, color=format_config.residual_line_color, linestyle='--')
|
|
93
|
+
ax_res.set_xlabel("Predicted Values", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
94
|
+
ax_res.set_ylabel("Residuals", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
95
|
+
ax_res.set_title("Residual Plot", pad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size + 2)
|
|
96
|
+
|
|
97
|
+
# Apply Ticks
|
|
98
|
+
ax_res.tick_params(axis='x', labelsize=xtick_size)
|
|
99
|
+
ax_res.tick_params(axis='y', labelsize=ytick_size)
|
|
100
|
+
|
|
101
|
+
ax_res.grid(True)
|
|
102
|
+
plt.tight_layout()
|
|
103
|
+
res_path = save_dir_path / "residual_plot.svg"
|
|
104
|
+
plt.savefig(res_path)
|
|
105
|
+
_LOGGER.info(f"📈 Residual plot saved as '{res_path.name}'")
|
|
106
|
+
plt.close(fig_res)
|
|
107
|
+
|
|
108
|
+
# --- Save true vs predicted plot ---
|
|
109
|
+
fig_tvp, ax_tvp = plt.subplots(figsize=REGRESSION_PLOT_SIZE, dpi=DPI_value)
|
|
110
|
+
ax_tvp.scatter(y_true, y_pred,
|
|
111
|
+
alpha=format_config.scatter_alpha,
|
|
112
|
+
color=format_config.scatter_color)
|
|
113
|
+
ax_tvp.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()],
|
|
114
|
+
linestyle='--',
|
|
115
|
+
lw=2,
|
|
116
|
+
color=format_config.ideal_line_color)
|
|
117
|
+
ax_tvp.set_xlabel('True Values', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
118
|
+
ax_tvp.set_ylabel('Predictions', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
119
|
+
ax_tvp.set_title('True vs. Predicted Values', pad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size + 2)
|
|
120
|
+
|
|
121
|
+
# Apply Ticks
|
|
122
|
+
ax_tvp.tick_params(axis='x', labelsize=xtick_size)
|
|
123
|
+
ax_tvp.tick_params(axis='y', labelsize=ytick_size)
|
|
124
|
+
|
|
125
|
+
ax_tvp.grid(True)
|
|
126
|
+
plt.tight_layout()
|
|
127
|
+
tvp_path = save_dir_path / "true_vs_predicted_plot.svg"
|
|
128
|
+
plt.savefig(tvp_path)
|
|
129
|
+
_LOGGER.info(f"📉 True vs. Predicted plot saved as '{tvp_path.name}'")
|
|
130
|
+
plt.close(fig_tvp)
|
|
131
|
+
|
|
132
|
+
# --- Save Histogram of Residuals ---
|
|
133
|
+
fig_hist, ax_hist = plt.subplots(figsize=REGRESSION_PLOT_SIZE, dpi=DPI_value)
|
|
134
|
+
sns.histplot(residuals, kde=True, ax=ax_hist,
|
|
135
|
+
bins=format_config.hist_bins,
|
|
136
|
+
color=format_config.scatter_color)
|
|
137
|
+
ax_hist.set_xlabel("Residual Value", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
138
|
+
ax_hist.set_ylabel("Frequency", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
139
|
+
ax_hist.set_title("Distribution of Residuals", pad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size + 2)
|
|
140
|
+
|
|
141
|
+
# Apply Ticks
|
|
142
|
+
ax_hist.tick_params(axis='x', labelsize=xtick_size)
|
|
143
|
+
ax_hist.tick_params(axis='y', labelsize=ytick_size)
|
|
144
|
+
|
|
145
|
+
ax_hist.grid(True)
|
|
146
|
+
plt.tight_layout()
|
|
147
|
+
hist_path = save_dir_path / "residuals_histogram.svg"
|
|
148
|
+
plt.savefig(hist_path)
|
|
149
|
+
_LOGGER.info(f"📊 Residuals histogram saved as '{hist_path.name}'")
|
|
150
|
+
plt.close(fig_hist)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def multi_target_regression_metrics(
|
|
154
|
+
y_true: np.ndarray,
|
|
155
|
+
y_pred: np.ndarray,
|
|
156
|
+
target_names: list[str],
|
|
157
|
+
save_dir: Union[str, Path],
|
|
158
|
+
config: Optional[FormatMultiTargetRegressionMetrics] = None
|
|
159
|
+
):
|
|
160
|
+
"""
|
|
161
|
+
Calculates and saves regression metrics for each target individually.
|
|
162
|
+
|
|
163
|
+
For each target, this function saves a residual plot and a true vs. predicted plot.
|
|
164
|
+
It also saves a single CSV file containing the key metrics (RMSE, MAE, R², MedAE)
|
|
165
|
+
for all targets.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
y_true (np.ndarray): Ground truth values, shape (n_samples, n_targets).
|
|
169
|
+
y_pred (np.ndarray): Predicted values, shape (n_samples, n_targets).
|
|
170
|
+
target_names (List[str]): A list of names for the target variables.
|
|
171
|
+
save_dir (str | Path): Directory to save plots and the report.
|
|
172
|
+
config (object): Formatting configuration object.
|
|
173
|
+
"""
|
|
174
|
+
if y_true.ndim != 2 or y_pred.ndim != 2:
|
|
175
|
+
_LOGGER.error("y_true and y_pred must be 2D arrays for multi-target regression.")
|
|
176
|
+
raise ValueError()
|
|
177
|
+
if y_true.shape != y_pred.shape:
|
|
178
|
+
_LOGGER.error("Shapes of y_true and y_pred must match.")
|
|
179
|
+
raise ValueError()
|
|
180
|
+
if y_true.shape[1] != len(target_names):
|
|
181
|
+
_LOGGER.error("Number of target names must match the number of columns in y_true.")
|
|
182
|
+
raise ValueError()
|
|
183
|
+
|
|
184
|
+
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
185
|
+
metrics_summary = []
|
|
186
|
+
|
|
187
|
+
# --- Parse Config or use defaults ---
|
|
188
|
+
if config is None:
|
|
189
|
+
# Create a default config if one wasn't provided
|
|
190
|
+
format_config = _BaseRegressionFormat()
|
|
191
|
+
else:
|
|
192
|
+
format_config = config
|
|
193
|
+
|
|
194
|
+
# ticks font sizes
|
|
195
|
+
xtick_size = format_config.xtick_size
|
|
196
|
+
ytick_size = format_config.ytick_size
|
|
197
|
+
base_font_size = format_config.font_size
|
|
198
|
+
|
|
199
|
+
_LOGGER.debug("--- Multi-Target Regression Evaluation ---")
|
|
200
|
+
|
|
201
|
+
for i, name in enumerate(target_names):
|
|
202
|
+
# print(f" -> Evaluating target: '{name}'")
|
|
203
|
+
true_i = y_true[:, i]
|
|
204
|
+
pred_i = y_pred[:, i]
|
|
205
|
+
sanitized_name = sanitize_filename(name)
|
|
206
|
+
|
|
207
|
+
# --- Calculate Metrics ---
|
|
208
|
+
rmse = np.sqrt(mean_squared_error(true_i, pred_i))
|
|
209
|
+
mae = mean_absolute_error(true_i, pred_i)
|
|
210
|
+
r2 = r2_score(true_i, pred_i)
|
|
211
|
+
medae = median_absolute_error(true_i, pred_i)
|
|
212
|
+
metrics_summary.append({
|
|
213
|
+
'Target': name,
|
|
214
|
+
'RMSE': rmse,
|
|
215
|
+
'MAE': mae,
|
|
216
|
+
'MedAE': medae,
|
|
217
|
+
'R2-score': r2,
|
|
218
|
+
})
|
|
219
|
+
|
|
220
|
+
# --- Save Residual Plot ---
|
|
221
|
+
residuals = true_i - pred_i
|
|
222
|
+
fig_res, ax_res = plt.subplots(figsize=REGRESSION_PLOT_SIZE, dpi=DPI_value)
|
|
223
|
+
ax_res.scatter(pred_i, residuals,
|
|
224
|
+
alpha=format_config.scatter_alpha,
|
|
225
|
+
edgecolors='k',
|
|
226
|
+
s=50,
|
|
227
|
+
color=format_config.scatter_color) # Use config color
|
|
228
|
+
ax_res.axhline(0, color=format_config.residual_line_color, linestyle='--') # Use config color
|
|
229
|
+
ax_res.set_xlabel("Predicted Values", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
230
|
+
ax_res.set_ylabel("Residuals", labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
231
|
+
ax_res.set_title(f"Residual Plot for '{name}'", pad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size + 2)
|
|
232
|
+
|
|
233
|
+
# Apply Ticks
|
|
234
|
+
ax_res.tick_params(axis='x', labelsize=xtick_size)
|
|
235
|
+
ax_res.tick_params(axis='y', labelsize=ytick_size)
|
|
236
|
+
|
|
237
|
+
ax_res.grid(True, linestyle='--', alpha=0.6)
|
|
238
|
+
plt.tight_layout()
|
|
239
|
+
res_path = save_dir_path / f"residual_plot_{sanitized_name}.svg"
|
|
240
|
+
plt.savefig(res_path)
|
|
241
|
+
plt.close(fig_res)
|
|
242
|
+
|
|
243
|
+
# --- Save True vs. Predicted Plot ---
|
|
244
|
+
fig_tvp, ax_tvp = plt.subplots(figsize=REGRESSION_PLOT_SIZE, dpi=DPI_value)
|
|
245
|
+
ax_tvp.scatter(true_i, pred_i,
|
|
246
|
+
alpha=format_config.scatter_alpha,
|
|
247
|
+
edgecolors='k',
|
|
248
|
+
s=50,
|
|
249
|
+
color=format_config.scatter_color) # Use config color
|
|
250
|
+
ax_tvp.plot([true_i.min(), true_i.max()], [true_i.min(), true_i.max()],
|
|
251
|
+
linestyle='--',
|
|
252
|
+
lw=2,
|
|
253
|
+
color=format_config.ideal_line_color) # Use config color
|
|
254
|
+
ax_tvp.set_xlabel('True Values', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
255
|
+
ax_tvp.set_ylabel('Predicted Values', labelpad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size)
|
|
256
|
+
ax_tvp.set_title(f"True vs. Predicted for '{name}'", pad=_EvaluationConfig.LABEL_PADDING, fontsize=base_font_size + 2)
|
|
257
|
+
|
|
258
|
+
# Apply Ticks
|
|
259
|
+
ax_tvp.tick_params(axis='x', labelsize=xtick_size)
|
|
260
|
+
ax_tvp.tick_params(axis='y', labelsize=ytick_size)
|
|
261
|
+
|
|
262
|
+
ax_tvp.grid(True, linestyle='--', alpha=0.6)
|
|
263
|
+
plt.tight_layout()
|
|
264
|
+
tvp_path = save_dir_path / f"true_vs_predicted_plot_{sanitized_name}.svg"
|
|
265
|
+
plt.savefig(tvp_path)
|
|
266
|
+
plt.close(fig_tvp)
|
|
267
|
+
|
|
268
|
+
# --- Save Summary Report ---
|
|
269
|
+
summary_df = pd.DataFrame(metrics_summary)
|
|
270
|
+
report_path = save_dir_path / "regression_report_multi.csv"
|
|
271
|
+
summary_df.to_csv(report_path, index=False)
|
|
272
|
+
_LOGGER.info(f"Full regression report saved to '{report_path.name}'")
|
|
273
|
+
|
|
@@ -11,14 +11,14 @@ from sklearn.metrics import (
|
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
from typing import Union, Optional
|
|
13
13
|
|
|
14
|
-
from .
|
|
15
|
-
from ._logger import get_logger
|
|
16
|
-
from ._script_info import _script_info
|
|
17
|
-
from ._ML_configuration import SequenceValueMetricsFormat, SequenceSequenceMetricsFormat, _BaseSequenceValueFormat, _BaseSequenceSequenceFormat
|
|
18
|
-
from ._keys import _EvaluationConfig
|
|
14
|
+
from ..ML_configuration._metrics import FormatSequenceValueMetrics, FormatSequenceSequenceMetrics, _BaseSequenceValueFormat, _BaseSequenceSequenceFormat
|
|
19
15
|
|
|
16
|
+
from ..keys._keys import _EvaluationConfig
|
|
17
|
+
from ..path_manager import make_fullpath
|
|
18
|
+
from .._core import get_logger
|
|
20
19
|
|
|
21
|
-
|
|
20
|
+
|
|
21
|
+
_LOGGER = get_logger("Sequence Metrics")
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
__all__ = [
|
|
@@ -35,7 +35,7 @@ def sequence_to_value_metrics(
|
|
|
35
35
|
y_true: np.ndarray,
|
|
36
36
|
y_pred: np.ndarray,
|
|
37
37
|
save_dir: Union[str, Path],
|
|
38
|
-
config: Optional[
|
|
38
|
+
config: Optional[FormatSequenceValueMetrics] = None
|
|
39
39
|
):
|
|
40
40
|
"""
|
|
41
41
|
Saves regression metrics and plots for sequence-to-value (many-to-one) tasks.
|
|
@@ -127,7 +127,7 @@ def sequence_to_sequence_metrics(
|
|
|
127
127
|
y_true: np.ndarray,
|
|
128
128
|
y_pred: np.ndarray,
|
|
129
129
|
save_dir: Union[str, Path],
|
|
130
|
-
config: Optional[
|
|
130
|
+
config: Optional[FormatSequenceSequenceMetrics] = None
|
|
131
131
|
):
|
|
132
132
|
"""
|
|
133
133
|
Saves per-step regression metrics for sequence-to-sequence (many-to-many) tasks.
|
|
@@ -220,6 +220,3 @@ def sequence_to_sequence_metrics(
|
|
|
220
220
|
# --- Restore RC params ---
|
|
221
221
|
plt.rcParams.update(original_rc_params)
|
|
222
222
|
|
|
223
|
-
|
|
224
|
-
def info():
|
|
225
|
-
_script_info(__all__)
|
|
@@ -11,20 +11,18 @@ from sklearn.metrics import (
|
|
|
11
11
|
ConfusionMatrixDisplay
|
|
12
12
|
)
|
|
13
13
|
from pathlib import Path
|
|
14
|
-
from typing import Union, Optional
|
|
14
|
+
from typing import Union, Optional
|
|
15
15
|
import json
|
|
16
16
|
from torchmetrics.detection import MeanAveragePrecision
|
|
17
17
|
|
|
18
|
-
from .
|
|
19
|
-
from ._logger import get_logger
|
|
20
|
-
from ._script_info import _script_info
|
|
21
|
-
from ._keys import VisionKeys, _EvaluationConfig
|
|
22
|
-
from ._ML_configuration import (BinarySegmentationMetricsFormat,
|
|
23
|
-
MultiClassSegmentationMetricsFormat,
|
|
24
|
-
_BaseSegmentationFormat)
|
|
18
|
+
from ..ML_configuration._metrics import FormatBinarySegmentationMetrics, FormatMultiClassSegmentationMetrics, _BaseSegmentationFormat
|
|
25
19
|
|
|
20
|
+
from ..path_manager import make_fullpath
|
|
21
|
+
from .._core import get_logger
|
|
22
|
+
from ..keys._keys import VisionKeys, _EvaluationConfig
|
|
26
23
|
|
|
27
|
-
|
|
24
|
+
|
|
25
|
+
_LOGGER = get_logger("Vision Metrics")
|
|
28
26
|
|
|
29
27
|
|
|
30
28
|
__all__ = [
|
|
@@ -40,8 +38,8 @@ def segmentation_metrics(
|
|
|
40
38
|
y_true: np.ndarray,
|
|
41
39
|
y_pred: np.ndarray,
|
|
42
40
|
save_dir: Union[str, Path],
|
|
43
|
-
class_names: Optional[
|
|
44
|
-
config: Optional[Union[
|
|
41
|
+
class_names: Optional[list[str]] = None,
|
|
42
|
+
config: Optional[Union[FormatBinarySegmentationMetrics, FormatMultiClassSegmentationMetrics]] = None
|
|
45
43
|
):
|
|
46
44
|
"""
|
|
47
45
|
Calculates and saves pixel-level metrics for segmentation tasks.
|
|
@@ -191,10 +189,10 @@ def segmentation_metrics(
|
|
|
191
189
|
|
|
192
190
|
|
|
193
191
|
def object_detection_metrics(
|
|
194
|
-
preds:
|
|
195
|
-
targets:
|
|
192
|
+
preds: list[dict[str, torch.Tensor]],
|
|
193
|
+
targets: list[dict[str, torch.Tensor]],
|
|
196
194
|
save_dir: Union[str, Path],
|
|
197
|
-
class_names: Optional[
|
|
195
|
+
class_names: Optional[list[str]] = None,
|
|
198
196
|
print_output: bool=False
|
|
199
197
|
):
|
|
200
198
|
"""
|
|
@@ -283,6 +281,3 @@ def object_detection_metrics(
|
|
|
283
281
|
_LOGGER.error(f"Failed to compute mAP: {e}")
|
|
284
282
|
raise
|
|
285
283
|
|
|
286
|
-
|
|
287
|
-
def info():
|
|
288
|
-
_script_info(__all__)
|
|
@@ -1,29 +1,21 @@
|
|
|
1
|
-
from typing import Optional,
|
|
1
|
+
from typing import Optional, Union
|
|
2
2
|
from pathlib import Path
|
|
3
3
|
import numpy as np
|
|
4
4
|
import pandas as pd
|
|
5
5
|
import matplotlib.pyplot as plt
|
|
6
6
|
import torch
|
|
7
7
|
from torch import nn
|
|
8
|
+
from captum.attr import IntegratedGradients
|
|
9
|
+
from captum.attr import visualization as viz
|
|
8
10
|
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
from .
|
|
12
|
-
from ._keys import CaptumKeys
|
|
11
|
+
from ..path_manager import make_fullpath, sanitize_filename
|
|
12
|
+
from .._core import get_logger
|
|
13
|
+
from ..keys._keys import CaptumKeys
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
_LOGGER = get_logger("Captum")
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
# --- Dependency Check ---
|
|
19
|
-
try:
|
|
20
|
-
from captum.attr import IntegratedGradients
|
|
21
|
-
from captum.attr import visualization as viz
|
|
22
|
-
CAPTUM_AVAILABLE = True
|
|
23
|
-
except ImportError:
|
|
24
|
-
CAPTUM_AVAILABLE = False
|
|
25
|
-
|
|
26
|
-
|
|
27
19
|
__all__ = [
|
|
28
20
|
"captum_feature_importance",
|
|
29
21
|
"captum_image_heatmap",
|
|
@@ -31,16 +23,12 @@ __all__ = [
|
|
|
31
23
|
]
|
|
32
24
|
|
|
33
25
|
|
|
34
|
-
def _is_captum_available() -> bool:
|
|
35
|
-
"""Returns True if captum is installed and importable."""
|
|
36
|
-
return CAPTUM_AVAILABLE
|
|
37
|
-
|
|
38
26
|
|
|
39
27
|
def captum_feature_importance(model: nn.Module,
|
|
40
28
|
input_data: torch.Tensor,
|
|
41
|
-
feature_names: Optional[
|
|
29
|
+
feature_names: Optional[list[str]],
|
|
42
30
|
save_dir: Union[str, Path],
|
|
43
|
-
target_names: Optional[
|
|
31
|
+
target_names: Optional[list[str]] = None,
|
|
44
32
|
n_steps: int = 50,
|
|
45
33
|
device: Union[str, torch.device] = 'cpu'):
|
|
46
34
|
"""
|
|
@@ -67,10 +55,6 @@ def captum_feature_importance(model: nn.Module,
|
|
|
67
55
|
### NOTE:
|
|
68
56
|
The internal batch size used by Captum will be set to the number of samples. If you run into OOM errors, consider reducing `n_samples`.
|
|
69
57
|
"""
|
|
70
|
-
if not CAPTUM_AVAILABLE:
|
|
71
|
-
_LOGGER.error("Captum is not installed. Please run `pip install captum` to use this feature.")
|
|
72
|
-
return
|
|
73
|
-
|
|
74
58
|
# Ensure output directory exists
|
|
75
59
|
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
76
60
|
|
|
@@ -150,7 +134,7 @@ def _process_single_target(ig: 'IntegratedGradients', # type: ignore
|
|
|
150
134
|
inputs: torch.Tensor,
|
|
151
135
|
baseline: torch.Tensor,
|
|
152
136
|
target_index: Union[int, None],
|
|
153
|
-
feature_names: Optional[
|
|
137
|
+
feature_names: Optional[list[str]],
|
|
154
138
|
save_dir: Path,
|
|
155
139
|
n_steps: int,
|
|
156
140
|
file_suffix: str):
|
|
@@ -250,7 +234,7 @@ def _process_single_target(ig: 'IntegratedGradients', # type: ignore
|
|
|
250
234
|
def captum_image_heatmap(model: nn.Module,
|
|
251
235
|
input_data: torch.Tensor,
|
|
252
236
|
save_dir: Union[str, Path],
|
|
253
|
-
target_names: Optional[
|
|
237
|
+
target_names: Optional[list[str]] = None,
|
|
254
238
|
n_steps: int = 50,
|
|
255
239
|
device: Union[str, torch.device] = 'cpu'):
|
|
256
240
|
"""
|
|
@@ -270,10 +254,6 @@ def captum_image_heatmap(model: nn.Module,
|
|
|
270
254
|
Higher values increase accuracy but require more memory/time.
|
|
271
255
|
device (str | torch.device): The device to run the calculation on.
|
|
272
256
|
"""
|
|
273
|
-
if not CAPTUM_AVAILABLE:
|
|
274
|
-
_LOGGER.error("Captum is not installed. Please run `pip install captum` to use this feature.")
|
|
275
|
-
return
|
|
276
|
-
|
|
277
257
|
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
278
258
|
device_obj = torch.device(device) if isinstance(device, str) else device
|
|
279
259
|
|
|
@@ -343,7 +323,7 @@ def captum_image_heatmap(model: nn.Module,
|
|
|
343
323
|
def captum_segmentation_heatmap(model: nn.Module,
|
|
344
324
|
input_data: torch.Tensor,
|
|
345
325
|
save_dir: Union[str, Path],
|
|
346
|
-
target_names: Optional[
|
|
326
|
+
target_names: Optional[list[str]],
|
|
347
327
|
n_steps: int = 30,
|
|
348
328
|
device: Union[str, torch.device] = 'cpu'):
|
|
349
329
|
"""
|
|
@@ -361,10 +341,6 @@ def captum_segmentation_heatmap(model: nn.Module,
|
|
|
361
341
|
n_steps (int): Integration steps. Kept lower by default (30) for performance on high-res images.
|
|
362
342
|
device (str | torch.device): Torch device.
|
|
363
343
|
"""
|
|
364
|
-
if not CAPTUM_AVAILABLE:
|
|
365
|
-
_LOGGER.error("Captum is not installed. Please run `pip install captum` to use this feature.")
|
|
366
|
-
return
|
|
367
|
-
|
|
368
344
|
save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
|
|
369
345
|
device_obj = torch.device(device) if isinstance(device, str) else device
|
|
370
346
|
model.eval()
|
|
@@ -448,6 +424,3 @@ def captum_segmentation_heatmap(model: nn.Module,
|
|
|
448
424
|
except Exception as e:
|
|
449
425
|
_LOGGER.error(f"Failed to generate heatmap for Sample {sample_idx}, Class {class_name}: {e}")
|
|
450
426
|
|
|
451
|
-
|
|
452
|
-
def info():
|
|
453
|
-
_script_info(__all__)
|
|
@@ -1,10 +1,12 @@
|
|
|
1
|
-
from .
|
|
2
|
-
captum_feature_importance,
|
|
1
|
+
from ._ML_evaluation_captum import (
|
|
2
|
+
captum_feature_importance,
|
|
3
3
|
captum_image_heatmap,
|
|
4
|
-
captum_segmentation_heatmap
|
|
5
|
-
info
|
|
4
|
+
captum_segmentation_heatmap
|
|
6
5
|
)
|
|
7
6
|
|
|
7
|
+
from ._imprimir import info
|
|
8
|
+
|
|
9
|
+
|
|
8
10
|
__all__ = [
|
|
9
11
|
"captum_feature_importance",
|
|
10
12
|
"captum_image_heatmap",
|
|
@@ -4,10 +4,9 @@ import numpy as np
|
|
|
4
4
|
from typing import Union, Any, Optional
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
|
|
7
|
-
from
|
|
8
|
-
from
|
|
9
|
-
from .
|
|
10
|
-
from ._keys import PyTorchCheckpointKeys, MagicWords
|
|
7
|
+
from .._core import get_logger
|
|
8
|
+
from ..path_manager import make_fullpath
|
|
9
|
+
from ..keys._keys import PyTorchCheckpointKeys, MagicWords
|
|
11
10
|
|
|
12
11
|
|
|
13
12
|
_LOGGER = get_logger("Finalized-File")
|
|
@@ -160,6 +159,3 @@ class FinalizedFileHandler:
|
|
|
160
159
|
self._none_checker(self._sequence_length, PyTorchCheckpointKeys.SEQUENCE_LENGTH)
|
|
161
160
|
return self._sequence_length
|
|
162
161
|
|
|
163
|
-
|
|
164
|
-
def info():
|
|
165
|
-
_script_info(__all__)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from ._dragon_inference import (
|
|
2
|
+
DragonInferenceHandler
|
|
3
|
+
)
|
|
4
|
+
|
|
5
|
+
from ._chain_inference import (
|
|
6
|
+
DragonChainInference
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
from ._multi_inference import (
|
|
10
|
+
multi_inference_regression,
|
|
11
|
+
multi_inference_classification,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from ._imprimir import info
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"DragonInferenceHandler",
|
|
19
|
+
"DragonChainInference",
|
|
20
|
+
"multi_inference_regression",
|
|
21
|
+
"multi_inference_classification"
|
|
22
|
+
]
|