dragon-ml-toolbox 7.0.0__py3-none-any.whl → 8.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dragon-ml-toolbox might be problematic. Click here for more details.

ml_tools/ML_trainer.py CHANGED
@@ -7,6 +7,7 @@ import numpy as np
7
7
 
8
8
  from .ML_callbacks import Callback, History, TqdmProgressBar
9
9
  from .ML_evaluation import classification_metrics, regression_metrics, plot_losses, shap_summary_plot, plot_attention_importance
10
+ from .ML_evaluation_multi import multi_target_regression_metrics, multi_label_classification_metrics, multi_target_shap_summary_plot
10
11
  from ._script_info import _script_info
11
12
  from .keys import PyTorchLogKeys
12
13
  from ._logger import _LOGGER
@@ -19,7 +20,7 @@ __all__ = [
19
20
 
20
21
  class MLTrainer:
21
22
  def __init__(self, model: nn.Module, train_dataset: Dataset, test_dataset: Dataset,
22
- kind: Literal["regression", "classification"],
23
+ kind: Literal["regression", "classification", "multi_target_regression", "multi_label_classification"],
23
24
  criterion: nn.Module, optimizer: torch.optim.Optimizer,
24
25
  device: Union[Literal['cuda', 'mps', 'cpu'],str], dataloader_workers: int = 2, callbacks: Optional[List[Callback]] = None):
25
26
  """
@@ -31,20 +32,22 @@ class MLTrainer:
31
32
  model (nn.Module): The PyTorch model to train.
32
33
  train_dataset (Dataset): The training dataset.
33
34
  test_dataset (Dataset): The testing/validation dataset.
34
- kind (str): The type of task, 'regression' or 'classification'.
35
+ kind (str): Can be 'regression', 'classification', 'multi_target_regression', or 'multi_label_classification'.
35
36
  criterion (nn.Module): The loss function.
36
37
  optimizer (torch.optim.Optimizer): The optimizer.
37
38
  device (str): The device to run training on ('cpu', 'cuda', 'mps').
38
- dataloader_workers (int): Subprocesses for data loading. Defaults to 2.
39
+ dataloader_workers (int): Subprocesses for data loading.
39
40
  callbacks (List[Callback] | None): A list of callbacks to use during training.
40
41
 
41
42
  Note:
42
- For **regression** tasks, suggested criterions include `nn.MSELoss` or `nn.L1Loss`.
43
-
44
- For **classification** tasks, `nn.CrossEntropyLoss` (multi-class) or `nn.BCEWithLogitsLoss` (binary) are common choices.
43
+ - For **regression** and **multi_target_regression** tasks, suggested criterions include `nn.MSELoss` or `nn.L1Loss`.
44
+
45
+ - For **single-label, multi-class classification** tasks, `nn.CrossEntropyLoss` is the standard choice.
46
+
47
+ - For **multi-label, binary classification** tasks (where each label is a 0 or 1), `nn.BCEWithLogitsLoss` is the correct choice as it treats each output as an independent binary problem.
45
48
  """
46
- if kind not in ["regression", "classification"]:
47
- raise TypeError("Kind must be 'regression' or 'classification'.")
49
+ if kind not in ["regression", "classification", "multi_target_regression", "multi_label_classification"]:
50
+ raise ValueError(f"'{kind}' is not a valid task type.")
48
51
 
49
52
  self.model = model
50
53
  self.train_dataset = train_dataset
@@ -157,7 +160,6 @@ class MLTrainer:
157
160
  def _train_step(self):
158
161
  self.model.train()
159
162
  running_loss = 0.0
160
- # Enumerate to get batch index
161
163
  for batch_idx, (features, target) in enumerate(self.train_loader): # type: ignore
162
164
  # Create a log dictionary for the batch
163
165
  batch_logs = {
@@ -168,22 +170,26 @@ class MLTrainer:
168
170
 
169
171
  features, target = features.to(self.device), target.to(self.device)
170
172
  self.optimizer.zero_grad()
173
+
171
174
  output = self.model(features)
172
- if isinstance(self.criterion, (nn.MSELoss, nn.L1Loss)):
175
+
176
+ # Apply shape correction only for single-target regression
177
+ if self.kind == "regression":
173
178
  output = output.view_as(target)
179
+
174
180
  loss = self.criterion(output, target)
181
+
175
182
  loss.backward()
176
183
  self.optimizer.step()
177
184
 
178
185
  # Calculate batch loss and update running loss for the epoch
179
186
  batch_loss = loss.item()
180
187
  running_loss += batch_loss * features.size(0)
181
-
188
+
182
189
  # Add the batch loss to the logs and call the end-of-batch hook
183
190
  batch_logs[PyTorchLogKeys.BATCH_LOSS] = batch_loss
184
191
  self.callbacks_hook('on_batch_end', batch_idx, logs=batch_logs)
185
192
 
186
- # Return the average loss for the entire epoch
187
193
  return {PyTorchLogKeys.TRAIN_LOSS: running_loss / len(self.train_loader.dataset)} # type: ignore
188
194
 
189
195
  def _validation_step(self):
@@ -192,25 +198,27 @@ class MLTrainer:
192
198
  with torch.no_grad():
193
199
  for features, target in self.test_loader: # type: ignore
194
200
  features, target = features.to(self.device), target.to(self.device)
201
+
195
202
  output = self.model(features)
196
- if isinstance(self.criterion, (nn.MSELoss, nn.L1Loss)):
203
+ # Apply shape correction only for single-target regression
204
+ if self.kind == "regression":
197
205
  output = output.view_as(target)
206
+
198
207
  loss = self.criterion(output, target)
208
+
199
209
  running_loss += loss.item() * features.size(0)
210
+
200
211
  logs = {PyTorchLogKeys.VAL_LOSS: running_loss / len(self.test_loader.dataset)} # type: ignore
201
212
  return logs
202
213
 
203
- def _predict_for_eval(self, dataloader: DataLoader):
214
+ def _predict_for_eval(self, dataloader: DataLoader, classification_threshold: float = 0.5):
204
215
  """
205
216
  Private method to yield model predictions batch by batch for evaluation.
206
- This is used internally by the `evaluate` method.
207
-
208
- Args:
209
- dataloader (DataLoader): The dataloader to predict on.
210
-
217
+
211
218
  Yields:
212
219
  tuple: A tuple containing (y_pred_batch, y_prob_batch, y_true_batch).
213
- y_prob_batch is None for regression tasks.
220
+
221
+ - y_prob_batch is None for regression tasks.
214
222
  """
215
223
  self.model.eval()
216
224
  self.model.to(self.device)
@@ -220,84 +228,135 @@ class MLTrainer:
220
228
  output = self.model(features).cpu()
221
229
  y_true_batch = target.numpy()
222
230
 
223
- if self.kind == "classification":
224
- probs = nn.functional.softmax(output, dim=1)
231
+ y_pred_batch = None
232
+ y_prob_batch = None
233
+
234
+ if self.kind in ["regression", "multi_target_regression"]:
235
+ y_pred_batch = output.numpy()
236
+
237
+ elif self.kind == "classification":
238
+ probs = torch.softmax(output, dim=1)
225
239
  preds = torch.argmax(probs, dim=1)
226
240
  y_pred_batch = preds.numpy()
227
241
  y_prob_batch = probs.numpy()
228
- # regression
229
- else:
230
- y_pred_batch = output.numpy()
231
- y_prob_batch = None
232
-
242
+
243
+ elif self.kind == "multi_label_classification":
244
+ probs = torch.sigmoid(output)
245
+ preds = (probs >= classification_threshold).int()
246
+ y_pred_batch = preds.numpy()
247
+ y_prob_batch = probs.numpy()
248
+
233
249
  yield y_pred_batch, y_prob_batch, y_true_batch
234
-
235
- def evaluate(self, save_dir: Union[str,Path], data: Optional[Union[DataLoader, Dataset]] = None):
250
+
251
+ def evaluate(self, save_dir: Union[str, Path], data: Optional[Union[DataLoader, Dataset]] = None, classification_threshold: float = 0.5):
236
252
  """
237
- Evaluates the model on the given data.
253
+ Evaluates the model, routing to the correct evaluation function based on task `kind`.
238
254
 
239
255
  Args:
240
- data (DataLoader | Dataset | None ): The data to evaluate on.
241
- Can be a DataLoader or a Dataset. If None, defaults to the trainer's internal test_dataset.
242
256
  save_dir (str | Path): Directory to save all reports and plots.
257
+ data (DataLoader | Dataset | None): The data to evaluate on. If None, defaults to the trainer's internal test_dataset.
258
+ classification_threshold (float): Probability threshold for multi-label tasks.
243
259
  """
260
+ dataset_for_names = None
244
261
  eval_loader = None
262
+
245
263
  if isinstance(data, DataLoader):
246
264
  eval_loader = data
247
- else:
248
- # Determine which dataset to use (the one passed in, or the default test_dataset)
249
- dataset_to_use = data if data is not None else self.test_dataset
250
- if not isinstance(dataset_to_use, Dataset):
251
- raise ValueError("Cannot evaluate. No valid DataLoader or Dataset was provided, "
252
- "and no test_dataset is available in the trainer.")
253
-
254
- # Create a new DataLoader from the dataset
255
- eval_loader = DataLoader(
256
- dataset=dataset_to_use,
257
- batch_size=32, # A sensible default for evaluation
258
- shuffle=False,
259
- num_workers=0 if self.device.type == 'mps' else self.dataloader_workers,
260
- pin_memory=(self.device.type == "cuda")
261
- )
262
-
265
+ # Try to get the dataset from the loader for fetching target names
266
+ if hasattr(data, 'dataset'):
267
+ dataset_for_names = data.dataset
268
+ elif isinstance(data, Dataset):
269
+ # Create a new loader from the provided dataset
270
+ eval_loader = DataLoader(data,
271
+ batch_size=32,
272
+ shuffle=False,
273
+ num_workers=0 if self.device.type == 'mps' else self.dataloader_workers,
274
+ pin_memory=(self.device.type == "cuda"))
275
+ dataset_for_names = data
276
+ else: # data is None, use the trainer's default test dataset
277
+ if self.test_dataset is None:
278
+ raise ValueError("Cannot evaluate. No data provided and no test_dataset available in the trainer.")
279
+ # Create a fresh DataLoader from the test_dataset
280
+ eval_loader = DataLoader(self.test_dataset,
281
+ batch_size=32,
282
+ shuffle=False,
283
+ num_workers=0 if self.device.type == 'mps' else self.dataloader_workers,
284
+ pin_memory=(self.device.type == "cuda"))
285
+ dataset_for_names = self.test_dataset
286
+
287
+ if eval_loader is None:
288
+ raise ValueError("Cannot evaluate. No valid data was provided or found.")
289
+
263
290
  print("\n--- Model Evaluation ---")
264
291
 
265
- # Collect results from the predict generator
266
292
  all_preds, all_probs, all_true = [], [], []
267
- for y_pred_b, y_prob_b, y_true_b in self._predict_for_eval(eval_loader):
268
- all_preds.append(y_pred_b)
269
- if y_prob_b is not None:
270
- all_probs.append(y_prob_b)
271
- all_true.append(y_true_b)
293
+ for y_pred_b, y_prob_b, y_true_b in self._predict_for_eval(eval_loader, classification_threshold):
294
+ if y_pred_b is not None: all_preds.append(y_pred_b)
295
+ if y_prob_b is not None: all_probs.append(y_prob_b)
296
+ if y_true_b is not None: all_true.append(y_true_b)
297
+
298
+ if not all_true:
299
+ _LOGGER.error("❌ Evaluation failed: No data was processed.")
300
+ return
272
301
 
273
302
  y_pred = np.concatenate(all_preds)
274
303
  y_true = np.concatenate(all_true)
275
- y_prob = np.concatenate(all_probs) if self.kind == "classification" else None
304
+ y_prob = np.concatenate(all_probs) if all_probs else None
276
305
 
277
- if self.kind == "classification":
278
- classification_metrics(save_dir, y_true, y_pred, y_prob)
279
- else:
306
+ # --- Routing Logic ---
307
+ if self.kind == "regression":
280
308
  regression_metrics(y_true.flatten(), y_pred.flatten(), save_dir)
281
309
 
310
+ elif self.kind == "classification":
311
+ classification_metrics(save_dir, y_true, y_pred, y_prob)
312
+
313
+ elif self.kind == "multi_target_regression":
314
+ try:
315
+ target_names = dataset_for_names.target_names # type: ignore
316
+ except AttributeError:
317
+ num_targets = y_true.shape[1]
318
+ target_names = [f"target_{i}" for i in range(num_targets)]
319
+ _LOGGER.warning(f"⚠️ Dataset has no 'target_names' attribute. Using generic names.")
320
+ multi_target_regression_metrics(y_true, y_pred, target_names, save_dir)
321
+
322
+ elif self.kind == "multi_label_classification":
323
+ try:
324
+ target_names = dataset_for_names.target_names # type: ignore
325
+ except AttributeError:
326
+ num_targets = y_true.shape[1]
327
+ target_names = [f"label_{i}" for i in range(num_targets)]
328
+ _LOGGER.warning(f"⚠️ Dataset has no 'target_names' attribute. Using generic names.")
329
+
330
+ if y_prob is None:
331
+ _LOGGER.error("❌ Evaluation for multi_label_classification requires probabilities (y_prob).")
332
+ return
333
+ multi_label_classification_metrics(y_true, y_prob, target_names, save_dir, classification_threshold)
334
+
282
335
  print("\n--- Training History ---")
283
336
  plot_losses(self.history, save_dir=save_dir)
284
337
 
285
338
  def explain(self,
286
- feature_names: Optional[List[str]],
287
339
  save_dir: Union[str,Path],
288
340
  explain_dataset: Optional[Dataset] = None,
289
- n_samples: int = 1000):
341
+ n_samples: int = 1000,
342
+ feature_names: Optional[List[str]] = None,
343
+ target_names: Optional[List[str]] = None):
290
344
  """
291
345
  Explains model predictions using SHAP and saves all artifacts.
292
346
 
293
347
  The background data is automatically sampled from the trainer's training dataset.
348
+
349
+ This method automatically routes to the appropriate SHAP summary plot
350
+ function based on the task. If `feature_names` or `target_names` (multi-target) are not provided,
351
+ it will attempt to extract them from the dataset.
294
352
 
295
353
  Args:
296
- explain_dataset (Dataset, optional): A specific dataset to explain.
354
+ explain_dataset (Dataset | None): A specific dataset to explain.
297
355
  If None, the trainer's test dataset is used.
298
356
  n_samples (int): The number of samples to use for both background and explanation.
299
- feature_names (List[str], optional): Names for the features.
300
- save_dir (str, optional): Directory to save all SHAP artifacts.
357
+ feature_names (list[str] | None): Feature names.
358
+ target_names (list[str] | None): Target names
359
+ save_dir (str | Path): Directory to save all SHAP artifacts.
301
360
  """
302
361
  # Internal helper to create a dataloader and get a random sample
303
362
  def _get_random_sample(dataset: Dataset, num_samples: int):
@@ -340,16 +399,54 @@ class MLTrainer:
340
399
  if instances_to_explain is None:
341
400
  _LOGGER.error("❌ Explanation dataset is empty or invalid. Skipping SHAP analysis.")
342
401
  return
402
+
403
+ # attempt to get feature names
404
+ if feature_names is None:
405
+ # _LOGGER.info("`feature_names` not provided. Attempting to extract from dataset...")
406
+ if hasattr(target_dataset, "feature_names"):
407
+ feature_names = target_dataset.feature_names # type: ignore
408
+ else:
409
+ try:
410
+ # Handle PyTorch Subset
411
+ feature_names = target_dataset.dataset.feature_names # type: ignore
412
+ except AttributeError:
413
+ _LOGGER.error("❌ Could not extract `feature_names` from the dataset.")
414
+ raise ValueError("`feature_names` must be provided if the dataset object does not have a `feature_names` attribute.")
343
415
 
344
416
  # 3. Call the plotting function
345
- shap_summary_plot(
346
- model=self.model,
347
- background_data=background_data,
348
- instances_to_explain=instances_to_explain,
349
- feature_names=feature_names,
350
- save_dir=save_dir
351
- )
352
-
417
+ if self.kind in ["regression", "classification"]:
418
+ shap_summary_plot(
419
+ model=self.model,
420
+ background_data=background_data,
421
+ instances_to_explain=instances_to_explain,
422
+ feature_names=feature_names,
423
+ save_dir=save_dir
424
+ )
425
+ elif self.kind in ["multi_target_regression", "multi_label_classification"]:
426
+ # try to get target names
427
+ if target_names is None:
428
+ target_names = []
429
+ if hasattr(target_dataset, 'target_names'):
430
+ target_names = target_dataset.target_names # type: ignore
431
+ else:
432
+ # Infer number of targets from the model's output layer
433
+ try:
434
+ num_targets = self.model.output_layer.out_features # type: ignore
435
+ target_names = [f"target_{i}" for i in range(num_targets)] # type: ignore
436
+ _LOGGER.warning("Dataset has no 'target_names' attribute. Using generic names.")
437
+ except AttributeError:
438
+ _LOGGER.error("Cannot determine target names for multi-target SHAP plot. Skipping.")
439
+ return
440
+
441
+ multi_target_shap_summary_plot(
442
+ model=self.model,
443
+ background_data=background_data,
444
+ instances_to_explain=instances_to_explain,
445
+ feature_names=feature_names, # type: ignore
446
+ target_names=target_names, # type: ignore
447
+ save_dir=save_dir
448
+ )
449
+
353
450
  def _attention_helper(self, dataloader: DataLoader):
354
451
  """
355
452
  Private method to yield model attention weights batch by batch for evaluation.
@@ -0,0 +1,231 @@
1
+ import pandas as pd
2
+ import torch
3
+ import numpy as np
4
+ import evotorch
5
+ from evotorch.algorithms import NSGA2
6
+ from evotorch.logging import PandasLogger
7
+ from typing import Literal, Union, Tuple, List, Optional, Any, Callable
8
+ from pathlib import Path
9
+ from tqdm.auto import trange
10
+ from functools import partial
11
+ from contextlib import nullcontext
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+
15
+ from .path_manager import make_fullpath, sanitize_filename
16
+ from ._logger import _LOGGER
17
+ from ._script_info import _script_info
18
+ from .ML_inference import PyTorchInferenceHandlerMulti # Using the multi-target handler
19
+ from .keys import PyTorchInferenceKeys
20
+ from .utilities import threshold_binary_values, save_dataframe
21
+ from .SQL import DatabaseManager # Added for SQL saving
22
+
23
+ __all__ = [
24
+ "create_multi_objective_problem",
25
+ "run_multi_objective_optimization",
26
+ "plot_pareto_front"
27
+ ]
28
+
29
+
30
+ def create_multi_objective_problem(
31
+ inference_handler: PyTorchInferenceHandlerMulti,
32
+ bounds: Tuple[List[float], List[float]],
33
+ binary_features: int,
34
+ objective_senses: Tuple[Literal["min", "max"], ...],
35
+ algorithm: Literal["NSGA2"] = "NSGA2",
36
+ population_size: int = 200,
37
+ **searcher_kwargs
38
+ ) -> Tuple[evotorch.Problem, Callable[[], Any]]:
39
+ """
40
+ Creates and configures an EvoTorch Problem and a Searcher for multi-objective optimization.
41
+
42
+ This function sets up a problem where the goal is to optimize multiple conflicting
43
+ objectives simultaneously, using an algorithm like NSGA2 to find the Pareto front.
44
+
45
+ Args:
46
+ inference_handler (PyTorchInferenceHandlerMulti): An initialized handler for the multi-target model.
47
+ bounds (tuple[list[float], list[float]]): Lower and upper bounds for the solution features.
48
+ binary_features (int): Number of binary features at the end of the feature vector.
49
+ objective_senses (Tuple[Literal["min", "max"], ...]): A tuple specifying the optimization
50
+ goal for each target (e.g., ("max", "min", "max")). The length of this tuple
51
+ must match the number of outputs from the model.
52
+ algorithm (str): The multi-objective search algorithm to use. Currently supports "NSGA2".
53
+ population_size (int): The number of solutions in each generation.
54
+ **searcher_kwargs: Additional keyword arguments for the search algorithm's constructor.
55
+
56
+ Returns:
57
+ A tuple containing the configured multi-objective Problem and the Searcher factory.
58
+ """
59
+ lower_bounds, upper_bounds = list(bounds[0]), list(bounds[1])
60
+
61
+ if binary_features > 0:
62
+ lower_bounds.extend([0.45] * binary_features)
63
+ upper_bounds.extend([0.55] * binary_features)
64
+
65
+ solution_length = len(lower_bounds)
66
+ device = inference_handler.device
67
+
68
+ def fitness_func(solution_tensor: torch.Tensor) -> torch.Tensor:
69
+ """
70
+ The fitness function for a multi-objective problem.
71
+ It returns the entire output tensor from the model. EvoTorch handles the rest.
72
+ """
73
+ # The handler returns a tensor of shape [batch_size, num_targets]
74
+ predictions = inference_handler.predict_batch(solution_tensor)[PyTorchInferenceKeys.PREDICTIONS]
75
+ return predictions
76
+
77
+ if algorithm == "NSGA2":
78
+ problem = evotorch.Problem(
79
+ objective_sense=objective_senses,
80
+ objective_func=fitness_func,
81
+ solution_length=solution_length,
82
+ bounds=(lower_bounds, upper_bounds),
83
+ device=device,
84
+ vectorized=True,
85
+ num_actors='max' # Use available CPU cores
86
+ )
87
+ SearcherClass = NSGA2
88
+ if 'popsize' not in searcher_kwargs:
89
+ searcher_kwargs['popsize'] = population_size
90
+ else:
91
+ raise ValueError(f"Unknown multi-objective algorithm '{algorithm}'.")
92
+
93
+ searcher_factory = partial(SearcherClass, problem, **searcher_kwargs)
94
+ return problem, searcher_factory
95
+
96
+
97
+ def run_multi_objective_optimization(
98
+ problem: evotorch.Problem,
99
+ searcher_factory: Callable[[], Any],
100
+ num_generations: int,
101
+ run_name: str,
102
+ binary_features: int,
103
+ save_dir: Union[str, Path],
104
+ feature_names: List[str],
105
+ target_names: List[str],
106
+ save_format: Literal['csv', 'sqlite', 'both'] = 'csv',
107
+ verbose: bool = True
108
+ ):
109
+ """
110
+ Runs the multi-objective evolutionary optimization process to find the Pareto front.
111
+
112
+ This function executes a multi-objective algorithm (like NSGA2) and saves the
113
+ entire set of non-dominated solutions (the Pareto front) to the specified format(s).
114
+ It also generates and saves a plot of the Pareto front.
115
+
116
+ Args:
117
+ problem (evotorch.Problem): The configured multi-objective problem.
118
+ searcher_factory (Callable): A factory function to generate a fresh searcher instance.
119
+ num_generations (int): The number of generations to run the algorithm.
120
+ run_name (str): A name for this optimization run, used for filenames/table names.
121
+ binary_features (int): Number of binary features in the solution vector.
122
+ save_dir (str | Path): The directory where the result files will be saved.
123
+ feature_names (List[str]): Names of the solution features for labeling columns.
124
+ target_names (List[str]): Names of the target objectives for labeling columns.
125
+ save_format (str): The format to save results in ('csv', 'sqlite', or 'both').
126
+ verbose (bool): If True, attaches a logger and saves the evolution history.
127
+ """
128
+ save_path = make_fullpath(save_dir, make=True, enforce="directory")
129
+ sanitized_run_name = sanitize_filename(run_name)
130
+
131
+ if len(target_names) != problem.num_objectives:
132
+ raise ValueError("The number of `target_names` must match the number of objectives in the problem.")
133
+
134
+ searcher = searcher_factory()
135
+ _LOGGER.info(f"🤖 Starting multi-objective optimization with {searcher.__class__.__name__} for {num_generations} generations...")
136
+
137
+ logger = PandasLogger(searcher) if verbose else None
138
+ searcher.run(num_generations)
139
+
140
+ pareto_front = searcher.status["pareto_front"]
141
+ _LOGGER.info(f"✅ Optimization complete. Found {len(pareto_front)} non-dominated solutions.")
142
+
143
+ solutions_np = pareto_front.values.cpu().numpy()
144
+ objectives_np = pareto_front.evals.cpu().numpy()
145
+
146
+ if binary_features > 0:
147
+ solutions_np = threshold_binary_values(input_array=solutions_np, binary_values=binary_features)
148
+
149
+ results_df = pd.DataFrame(solutions_np, columns=feature_names)
150
+ objective_cols = []
151
+ for i, name in enumerate(target_names):
152
+ col_name = f"predicted_{name}"
153
+ results_df[col_name] = objectives_np[:, i]
154
+ objective_cols.append(col_name)
155
+
156
+ # --- Saving Logic ---
157
+ if save_format in ['csv', 'both']:
158
+ csv_path = save_path / f"pareto_front_{sanitized_run_name}.csv"
159
+ results_df.to_csv(csv_path, index=False)
160
+ _LOGGER.info(f"📄 Pareto front data saved to '{csv_path.name}'")
161
+
162
+ if save_format in ['sqlite', 'both']:
163
+ db_path = save_path / "Optimization_Multi.db"
164
+ with DatabaseManager(db_path) as db:
165
+ db.insert_from_dataframe(
166
+ table_name=sanitized_run_name,
167
+ df=results_df,
168
+ if_exists='replace'
169
+ )
170
+ _LOGGER.info(f"🗃️ Pareto front data saved to table '{sanitized_run_name}' in '{db_path.name}'")
171
+
172
+ # --- Plotting Logic ---
173
+ plot_pareto_front(
174
+ results_df,
175
+ objective_cols=objective_cols,
176
+ save_path=save_path / f"pareto_plot_{sanitized_run_name}.svg"
177
+ )
178
+
179
+ if logger:
180
+ log_df = logger.to_dataframe()
181
+ save_dataframe(df=log_df, save_dir=save_path / "EvolutionLogs", filename=f"log_{sanitized_run_name}")
182
+
183
+
184
+ def plot_pareto_front(results_df: pd.DataFrame, objective_cols: List[str], save_path: Path):
185
+ """
186
+ Generates and saves a plot of the Pareto front.
187
+
188
+ - For 2 objectives, it creates a 2D scatter plot.
189
+ - For 3 objectives, it creates a 3D scatter plot.
190
+ - For >3 objectives, it creates a scatter plot matrix (pairs plot).
191
+
192
+ Args:
193
+ results_df (pd.DataFrame): DataFrame containing the optimization results.
194
+ objective_cols (List[str]): The names of the columns that hold the objective values.
195
+ save_path (Path): The full path (including filename) to save the SVG plot.
196
+ """
197
+ num_objectives = len(objective_cols)
198
+ _LOGGER.info(f"🎨 Generating Pareto front plot for {num_objectives} objectives...")
199
+
200
+ plt.style.use('seaborn-v0_8-whitegrid')
201
+
202
+ if num_objectives == 2:
203
+ fig, ax = plt.subplots(figsize=(8, 6), dpi=120)
204
+ ax.scatter(results_df[objective_cols[0]], results_df[objective_cols[1]], alpha=0.7, edgecolors='k')
205
+ ax.set_xlabel(objective_cols[0])
206
+ ax.set_ylabel(objective_cols[1])
207
+ ax.set_title("Pareto Front (2D)")
208
+
209
+ elif num_objectives == 3:
210
+ fig = plt.figure(figsize=(9, 7), dpi=120)
211
+ ax = fig.add_subplot(111, projection='3d')
212
+ ax.scatter(results_df[objective_cols[0]], results_df[objective_cols[1]], results_df[objective_cols[2]], alpha=0.7, depthshade=True)
213
+ ax.set_xlabel(objective_cols[0])
214
+ ax.set_ylabel(objective_cols[1])
215
+ ax.set_zlabel(objective_cols[2])
216
+ ax.set_title("Pareto Front (3D)")
217
+
218
+ else: # > 3 objectives
219
+ _LOGGER.info(" -> More than 3 objectives found, generating a scatter plot matrix.")
220
+ g = sns.pairplot(results_df[objective_cols], diag_kind="kde", plot_kws={'alpha': 0.6})
221
+ g.fig.suptitle("Pareto Front (Pairs Plot)", y=1.02)
222
+ plt.savefig(save_path, bbox_inches='tight')
223
+ plt.close()
224
+ _LOGGER.info(f"📊 Pareto plot saved to '{save_path.name}'")
225
+ return
226
+
227
+ plt.tight_layout()
228
+ plt.savefig(save_path)
229
+ plt.close()
230
+ _LOGGER.info(f"📊 Pareto plot saved to '{save_path.name}'")
231
+