dragon-ml-toolbox 13.3.0__py3-none-any.whl → 16.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {dragon_ml_toolbox-13.3.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/METADATA +20 -6
  2. dragon_ml_toolbox-16.2.0.dist-info/RECORD +51 -0
  3. {dragon_ml_toolbox-13.3.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +10 -0
  4. ml_tools/ETL_cleaning.py +20 -20
  5. ml_tools/ETL_engineering.py +23 -25
  6. ml_tools/GUI_tools.py +20 -20
  7. ml_tools/MICE_imputation.py +207 -5
  8. ml_tools/ML_callbacks.py +43 -26
  9. ml_tools/ML_configuration.py +788 -0
  10. ml_tools/ML_datasetmaster.py +303 -448
  11. ml_tools/ML_evaluation.py +351 -93
  12. ml_tools/ML_evaluation_multi.py +139 -42
  13. ml_tools/ML_inference.py +290 -209
  14. ml_tools/ML_models.py +33 -106
  15. ml_tools/ML_models_advanced.py +323 -0
  16. ml_tools/ML_optimization.py +12 -12
  17. ml_tools/ML_scaler.py +11 -11
  18. ml_tools/ML_sequence_datasetmaster.py +341 -0
  19. ml_tools/ML_sequence_evaluation.py +219 -0
  20. ml_tools/ML_sequence_inference.py +391 -0
  21. ml_tools/ML_sequence_models.py +139 -0
  22. ml_tools/ML_trainer.py +1604 -179
  23. ml_tools/ML_utilities.py +351 -4
  24. ml_tools/ML_vision_datasetmaster.py +1540 -0
  25. ml_tools/ML_vision_evaluation.py +284 -0
  26. ml_tools/ML_vision_inference.py +405 -0
  27. ml_tools/ML_vision_models.py +641 -0
  28. ml_tools/ML_vision_transformers.py +284 -0
  29. ml_tools/PSO_optimization.py +6 -6
  30. ml_tools/SQL.py +4 -4
  31. ml_tools/_keys.py +171 -0
  32. ml_tools/_schema.py +1 -1
  33. ml_tools/custom_logger.py +37 -14
  34. ml_tools/data_exploration.py +502 -93
  35. ml_tools/ensemble_evaluation.py +54 -11
  36. ml_tools/ensemble_inference.py +7 -33
  37. ml_tools/ensemble_learning.py +1 -1
  38. ml_tools/math_utilities.py +1 -1
  39. ml_tools/optimization_tools.py +2 -2
  40. ml_tools/path_manager.py +5 -5
  41. ml_tools/serde.py +2 -2
  42. ml_tools/utilities.py +192 -4
  43. dragon_ml_toolbox-13.3.0.dist-info/RECORD +0 -41
  44. ml_tools/RNN_forecast.py +0 -56
  45. ml_tools/keys.py +0 -87
  46. {dragon_ml_toolbox-13.3.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/WHEEL +0 -0
  47. {dragon_ml_toolbox-13.3.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/licenses/LICENSE +0 -0
  48. {dragon_ml_toolbox-13.3.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/top_level.txt +0 -0
ml_tools/ML_scaler.py CHANGED
@@ -9,11 +9,11 @@ from .path_manager import make_fullpath
9
9
 
10
10
 
11
11
  __all__ = [
12
- "PytorchScaler"
12
+ "DragonScaler"
13
13
  ]
14
14
 
15
15
 
16
- class PytorchScaler:
16
+ class DragonScaler:
17
17
  """
18
18
  Standardizes continuous features in a PyTorch dataset by subtracting the
19
19
  mean and dividing by the standard deviation.
@@ -38,7 +38,7 @@ class PytorchScaler:
38
38
  self.continuous_feature_indices = continuous_feature_indices
39
39
 
40
40
  @classmethod
41
- def fit(cls, dataset: Dataset, continuous_feature_indices: List[int], batch_size: int = 64) -> 'PytorchScaler':
41
+ def fit(cls, dataset: Dataset, continuous_feature_indices: List[int], batch_size: int = 64) -> 'DragonScaler':
42
42
  """
43
43
  Fits the scaler by computing the mean and std dev from a dataset using a
44
44
  fast, single-pass, vectorized algorithm.
@@ -50,7 +50,7 @@ class PytorchScaler:
50
50
  batch_size (int): The batch size for iterating through the dataset.
51
51
 
52
52
  Returns:
53
- PytorchScaler: A new, fitted instance of the scaler.
53
+ DragonScaler: A new, fitted instance of the scaler.
54
54
  """
55
55
  if not continuous_feature_indices:
56
56
  _LOGGER.error("No continuous feature indices provided. Scaler will not be fitted.")
@@ -167,10 +167,10 @@ class PytorchScaler:
167
167
  }
168
168
  torch.save(state, path_obj)
169
169
  if verbose:
170
- _LOGGER.info(f"PytorchScaler state saved as '{path_obj.name}'.")
170
+ _LOGGER.info(f"DragonScaler state saved as '{path_obj.name}'.")
171
171
 
172
172
  @staticmethod
173
- def load(filepath: Union[str, Path], verbose: bool=True) -> 'PytorchScaler':
173
+ def load(filepath: Union[str, Path], verbose: bool=True) -> 'DragonScaler':
174
174
  """
175
175
  Loads a scaler's state from a .pth file.
176
176
 
@@ -178,13 +178,13 @@ class PytorchScaler:
178
178
  filepath (str | Path): The path to the saved scaler file.
179
179
 
180
180
  Returns:
181
- PytorchScaler: An instance of the scaler with the loaded state.
181
+ DragonScaler: An instance of the scaler with the loaded state.
182
182
  """
183
183
  path_obj = make_fullpath(filepath, enforce="file")
184
184
  state = torch.load(path_obj)
185
185
  if verbose:
186
- _LOGGER.info(f"PytorchScaler state loaded from '{path_obj.name}'.")
187
- return PytorchScaler(
186
+ _LOGGER.info(f"DragonScaler state loaded from '{path_obj.name}'.")
187
+ return DragonScaler(
188
188
  mean=state['mean'],
189
189
  std=state['std'],
190
190
  continuous_feature_indices=state['continuous_feature_indices']
@@ -194,8 +194,8 @@ class PytorchScaler:
194
194
  """Returns the developer-friendly string representation of the scaler."""
195
195
  if self.continuous_feature_indices:
196
196
  num_features = len(self.continuous_feature_indices)
197
- return f"PytorchScaler(fitted for {num_features} features)"
198
- return "PytorchScaler(not fitted)"
197
+ return f"DragonScaler(fitted for {num_features} features)"
198
+ return "DragonScaler(not fitted)"
199
199
 
200
200
 
201
201
  def info():
@@ -0,0 +1,341 @@
1
+ import torch
2
+ from torch.utils.data import Dataset
3
+ import pandas
4
+ import numpy
5
+ from typing import Literal, Union, Tuple
6
+ import matplotlib.pyplot as plt
7
+ from pathlib import Path
8
+
9
+ from .path_manager import make_fullpath
10
+ from ._logger import _LOGGER
11
+ from ._script_info import _script_info
12
+ from .ML_scaler import DragonScaler
13
+ from .ML_datasetmaster import _PytorchDataset
14
+ from ._keys import DatasetKeys, MLTaskKeys
15
+
16
+
17
+ __all__ = [
18
+ "DragonDatasetSequence"
19
+ ]
20
+
21
+
22
+ # --- SequenceMaker ---
23
+ class DragonDatasetSequence:
24
+ """
25
+ Creates windowed PyTorch datasets from a univariate (one feature) sequential data.
26
+
27
+ Automatic Pipeline:
28
+
29
+ 1. Split Data: Separate data into training, validation, and testing portions.
30
+ 2. Normalize Data: Normalize the data. The scaler will be fitted on the training portion.
31
+ 3. Generate Windows: Create the windowed sequences from the normalized splits.
32
+ """
33
+ def __init__(self,
34
+ prediction_mode: Literal["sequence-to-sequence", "sequence-to-value"],
35
+ data: Union[pandas.DataFrame, pandas.Series, numpy.ndarray],
36
+ sequence_length: int,
37
+ validation_size: float = 0.2,
38
+ test_size: float = 0.1):
39
+ """
40
+ Initializes the dataset manager and automatically processes the data.
41
+
42
+ The constructor runs the full pipeline:
43
+ 1. Splits the data chronologically (train, validation, test).
44
+ 2. Fits a DragonScaler on the training split.
45
+ 3. Normalizes all splits using the fitted scaler.
46
+ 4. Generates windowed datasets for training, validation, and testing.
47
+
48
+ Args:
49
+ prediction_mode: The type of sequence task.
50
+ data: The input univariate time-series data.
51
+ - If pandas.DataFrame: The index is used for the time axis
52
+ and the *first column* is used as the sequence.
53
+ - If pandas.Series: The index is used for the time axis.
54
+ - If numpy.ndarray: A simple integer range is used for the time axis.
55
+ sequence_length (int): The number of time steps in each input window (X).
56
+ validation_size (float): The fraction of data to hold out for validation.
57
+ test_size (float): The fraction of data to hold out for testing.
58
+ """
59
+ self._train_dataset = None
60
+ self._test_dataset = None
61
+ self._val_dataset = None
62
+ self.sequence_length = sequence_length
63
+ self.scaler = None
64
+
65
+ if not prediction_mode in [MLTaskKeys.SEQUENCE_SEQUENCE, MLTaskKeys.SEQUENCE_VALUE]:
66
+ _LOGGER.error(f"Unrecognized prediction mode: '{prediction_mode}'.")
67
+ raise ValueError()
68
+ else:
69
+ self.prediction_mode = prediction_mode
70
+
71
+ if isinstance(data, pandas.DataFrame):
72
+ self.time_axis = data.index.values
73
+ self.sequence = data.iloc[:, 0].values.astype(numpy.float32)
74
+ elif isinstance(data, pandas.Series):
75
+ self.time_axis = data.index.values
76
+ self.sequence = data.values.astype(numpy.float32)
77
+ elif isinstance(data, numpy.ndarray):
78
+ self.time_axis = numpy.arange(len(data))
79
+ self.sequence = data.astype(numpy.float32)
80
+ else:
81
+ _LOGGER.error("Data must be a pandas DataFrame/Series or a numpy array.")
82
+ raise TypeError()
83
+
84
+ self.train_sequence = None
85
+ self.val_sequence = None
86
+ self.test_sequence = None
87
+
88
+ self.train_time_axis = None
89
+ self.val_time_axis = None
90
+ self.test_time_axis = None
91
+
92
+ self._is_split = False
93
+ self._is_normalized = False
94
+ self._are_windows_generated = False
95
+
96
+ # Automation
97
+ self._split_data(validation_size=validation_size, test_size=test_size)
98
+ self._normalize_data()
99
+ self._generate_windows()
100
+
101
+ def _split_data(self, validation_size: float = 0.2, test_size: float = 0.1) -> None:
102
+ """
103
+ Splits the sequence chronologically into training, validation, and testing portions.
104
+
105
+ To prevent windowing errors, the validation and test sets include an overlap of `sequence_length` from the preceding data.
106
+ """
107
+ if self._is_split:
108
+ _LOGGER.warning("Data has already been split.")
109
+ return
110
+
111
+ if (validation_size + test_size) >= 1.0:
112
+ _LOGGER.error(f"The sum of validation_size ({validation_size}) and test_size ({test_size}) must be less than 1.0.")
113
+ raise ValueError("validation_size and test_size sum must be < 1.0")
114
+
115
+ total_size = len(self.sequence)
116
+
117
+ # Calculate split indices
118
+ test_split_idx = int(total_size * (1 - test_size))
119
+ val_split_idx = int(total_size * (1 - test_size - validation_size))
120
+
121
+ # --- Create sequences ---
122
+ # Train sequence is from the beginning to the validation index
123
+ self.train_sequence = self.sequence[:val_split_idx]
124
+
125
+ # Validation sequence starts `sequence_length` before its split index for windowing
126
+ self.val_sequence = self.sequence[val_split_idx - self.sequence_length : test_split_idx]
127
+
128
+ # Test sequence starts `sequence_length` before its split index for windowing
129
+ self.test_sequence = self.sequence[test_split_idx - self.sequence_length:]
130
+
131
+ # --- Create time axes ---
132
+ self.train_time_axis = self.time_axis[:val_split_idx]
133
+ # The "plottable" validation/test time axes start from their respective split indices
134
+ self.val_time_axis = self.time_axis[val_split_idx : test_split_idx]
135
+ self.test_time_axis = self.time_axis[test_split_idx:]
136
+
137
+ self._is_split = True
138
+ _LOGGER.info(f"Sequence split into training ({len(self.train_sequence)}), validation ({len(self.val_sequence)}), and testing ({len(self.test_sequence)}) points.")
139
+
140
+ def _normalize_data(self) -> None:
141
+ """
142
+ Normalizes the sequence data using DragonScaler. Must be called AFTER splitting to prevent data leakage from the test set.
143
+ """
144
+ if not self._is_split:
145
+ _LOGGER.error("Data must be split BEFORE normalizing.")
146
+ raise RuntimeError()
147
+
148
+ if self.scaler:
149
+ _LOGGER.warning("Data has already been normalized.")
150
+ return
151
+
152
+ # 1. DragonScaler requires a Dataset to fit. Create a temporary one.
153
+ # The scaler expects 2D data [n_samples, n_features].
154
+ train_features = self.train_sequence.reshape(-1, 1) # type: ignore
155
+
156
+ # _PytorchDataset needs labels, so we create dummy ones.
157
+ dummy_labels = numpy.zeros(len(train_features))
158
+ temp_train_ds = _PytorchDataset(train_features, dummy_labels, labels_dtype=torch.float32)
159
+
160
+ # 2. Fit the DragonScaler on the temporary training dataset.
161
+ # The sequence is a single feature, so its index is [0].
162
+ _LOGGER.info("Fitting DragonScaler on the training data...")
163
+ self.scaler = DragonScaler.fit(temp_train_ds, continuous_feature_indices=[0])
164
+
165
+ # 3. Transform sequences using the fitted scaler.
166
+ # The transform method requires a tensor, so we convert, transform, and convert back.
167
+ train_tensor = torch.tensor(self.train_sequence.reshape(-1, 1), dtype=torch.float32) # type: ignore
168
+ val_tensor = torch.tensor(self.val_sequence.reshape(-1, 1), dtype=torch.float32) # type: ignore
169
+ test_tensor = torch.tensor(self.test_sequence.reshape(-1, 1), dtype=torch.float32) # type: ignore
170
+
171
+ self.train_sequence = self.scaler.transform(train_tensor).numpy().flatten()
172
+ self.val_sequence = self.scaler.transform(val_tensor).numpy().flatten()
173
+ self.test_sequence = self.scaler.transform(test_tensor).numpy().flatten()
174
+
175
+ self._is_normalized = True
176
+ _LOGGER.info("Sequence data normalized using DragonScaler.")
177
+
178
+ def _generate_windows(self) -> None:
179
+ """
180
+ Generates overlapping windows for features and labels.
181
+ """
182
+ if not self._is_split:
183
+ _LOGGER.error("Cannot generate windows before splitting data.")
184
+ raise RuntimeError()
185
+
186
+ if not self._is_normalized:
187
+ _LOGGER.error("Cannot generate windows before normalizing data.")
188
+ raise RuntimeError()
189
+
190
+ if self._are_windows_generated:
191
+ _LOGGER.warning("Windows have already been generated.")
192
+ return
193
+
194
+ self._train_dataset = self._create_windowed_dataset(self.train_sequence) # type: ignore
195
+ self._val_dataset = self._create_windowed_dataset(self.val_sequence) # type: ignore
196
+ self._test_dataset = self._create_windowed_dataset(self.test_sequence) # type: ignore
197
+
198
+ self._are_windows_generated = True
199
+ _LOGGER.info("Feature and label windows generated for train, validation, and test sets.")
200
+
201
+ def _create_windowed_dataset(self, data: numpy.ndarray) -> Dataset:
202
+ """Efficiently creates windowed features and labels using numpy."""
203
+ if len(data) <= self.sequence_length:
204
+ # Validation/Test sets of size 0 might be passed
205
+ _LOGGER.warning(f"Data length ({len(data)}) is not greater than sequence_length ({self.sequence_length}). Cannot create windows. Returning empty dataset.")
206
+ return _PytorchDataset(numpy.array([]), numpy.array([]), labels_dtype=torch.float32)
207
+
208
+ if self.prediction_mode == MLTaskKeys.SEQUENCE_VALUE:
209
+ # sequence-to-value
210
+ features = data[:-1]
211
+ labels = data[self.sequence_length:]
212
+
213
+ n_windows = len(features) - self.sequence_length + 1
214
+ bytes_per_item = features.strides[0]
215
+ strided_features = numpy.lib.stride_tricks.as_strided(
216
+ features, shape=(n_windows, self.sequence_length), strides=(bytes_per_item, bytes_per_item)
217
+ )
218
+ # Ensure labels align with the end of each feature window
219
+ aligned_labels = labels[:n_windows]
220
+ return _PytorchDataset(strided_features, aligned_labels, labels_dtype=torch.float32)
221
+
222
+ else:
223
+ # Sequence-to-sequence
224
+ x_data = data[:-1]
225
+ y_data = data[1:]
226
+
227
+ n_windows = len(x_data) - self.sequence_length + 1
228
+ bytes_per_item = x_data.strides[0]
229
+
230
+ strided_x = numpy.lib.stride_tricks.as_strided(x_data, shape=(n_windows, self.sequence_length), strides=(bytes_per_item, bytes_per_item))
231
+ strided_y = numpy.lib.stride_tricks.as_strided(y_data, shape=(n_windows, self.sequence_length), strides=(bytes_per_item, bytes_per_item))
232
+
233
+ return _PytorchDataset(strided_x, strided_y, labels_dtype=torch.float32)
234
+
235
+ def plot_splits(self, save_dir: Union[str, Path]):
236
+ """Plots the training, validation and testing data."""
237
+ if not self._is_split:
238
+ _LOGGER.error("Cannot plot before splitting data.")
239
+ raise RuntimeError()
240
+
241
+ if self.scaler is None:
242
+ _LOGGER.error("Cannot plot: data has not been normalized, or scaler is missing.")
243
+ return
244
+
245
+ save_path = make_fullpath(save_dir, make=True, enforce="directory")
246
+ full_path = save_path / "SequenceSplits.svg"
247
+
248
+ plt.figure(figsize=(15, 6))
249
+ plt.title("Sequential Data")
250
+ plt.grid(True)
251
+ plt.xlabel("Sequence")
252
+ plt.ylabel("Value")
253
+
254
+ # Plot denormalized training data
255
+ plt.plot(self.train_time_axis, self.scaler.inverse_transform(self.train_sequence.reshape(-1, 1)), label='Train Data') # type: ignore
256
+
257
+ # Plot denormalized validation data
258
+ # We must skip the overlapping 'sequence_length' part for plotting
259
+ val_plot_data = self.val_sequence[self.sequence_length:] # type: ignore
260
+ plt.plot(self.val_time_axis, self.scaler.inverse_transform(val_plot_data.reshape(-1, 1)), label='Validation Data', c='orange') # type: ignore
261
+
262
+ # Plot denormalized test data
263
+ # We must skip the overlapping 'sequence_length' part for plotting
264
+ test_plot_data = self.test_sequence[self.sequence_length:] # type: ignore
265
+ plt.plot(self.test_time_axis, self.scaler.inverse_transform(test_plot_data.reshape(-1, 1)), label='Test Data', c='green') # type: ignore
266
+
267
+ plt.legend()
268
+
269
+ plt.tight_layout()
270
+ plt.savefig(full_path)
271
+ _LOGGER.info(f"📈 Sequence data splits saved as '{full_path.name}'.")
272
+ plt.close()
273
+
274
+ def get_datasets(self) -> Tuple[Dataset, Dataset, Dataset]:
275
+ """Returns the final train, validation, and test datasets."""
276
+ if not self._are_windows_generated:
277
+ _LOGGER.error("Windows have not been generated. Call .generate_windows() first.")
278
+ raise RuntimeError()
279
+ return self._train_dataset, self._val_dataset, self._test_dataset # type: ignore
280
+
281
+ def save_scaler(self, directory: Union[str, Path], verbose: bool=True) -> None:
282
+ """
283
+ Saves the fitted DragonScaler's state to a .pth file.
284
+
285
+ Args:
286
+ directory (str | Path): The directory where the scaler will be saved.
287
+ """
288
+ if not self.scaler:
289
+ _LOGGER.error("No scaler was fitted or provided.")
290
+ raise RuntimeError()
291
+
292
+ save_path = make_fullpath(directory, make=True, enforce="directory")
293
+
294
+ filename = f"{DatasetKeys.SCALER_PREFIX}{self.prediction_mode}.pth"
295
+ filepath = save_path / filename
296
+ self.scaler.save(filepath, verbose=False)
297
+ if verbose:
298
+ _LOGGER.info(f"Scaler saved as '{filepath.name}'.")
299
+
300
+ def get_last_training_sequence(self) -> numpy.ndarray:
301
+ """
302
+ Returns the final, un-scaled sequence from the training data.
303
+ """
304
+ if not self._is_split:
305
+ _LOGGER.error("Data has not been split. Cannot get last training sequence.")
306
+ raise RuntimeError()
307
+
308
+ # The length of train_time_axis is our validation split index
309
+ val_split_idx = len(self.train_time_axis) # type: ignore
310
+
311
+ if val_split_idx < self.sequence_length:
312
+ _LOGGER.error(f"Training data length ({val_split_idx}) is less than sequence_length ({self.sequence_length}).")
313
+ raise ValueError()
314
+
315
+ # Get the slice from the *original* sequence
316
+ start_idx = val_split_idx - self.sequence_length
317
+ end_idx = val_split_idx
318
+
319
+ return self.sequence[start_idx:end_idx]
320
+
321
+ def __repr__(self) -> str:
322
+ s = f"<{self.__class__.__name__}>:\n"
323
+ s += f" Prediction Mode: {self.prediction_mode}\n"
324
+ s += f" Sequence Length (Window): {self.sequence_length}\n"
325
+ s += f" Total Data Points: {len(self.sequence)}\n"
326
+ s += " --- Status ---\n"
327
+ s += f" Split: {self._is_split}\n"
328
+ s += f" Normalized: {self._is_normalized}\n"
329
+ s += f" Windows Generated: {self._are_windows_generated}\n"
330
+
331
+ if self._are_windows_generated:
332
+ train_len = len(self._train_dataset) if self._train_dataset else 0 # type: ignore
333
+ val_len = len(self._val_dataset) if self._val_dataset else 0 # type: ignore
334
+ test_len = len(self._test_dataset) if self._test_dataset else 0 # type: ignore
335
+ s += f" Datasets (Train | Validation | Test): {train_len} | {val_len} | {test_len} windows\n"
336
+
337
+ return s
338
+
339
+
340
+ def info():
341
+ _script_info(__all__)
@@ -0,0 +1,219 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import matplotlib.pyplot as plt
4
+ import seaborn as sns
5
+ from sklearn.metrics import (
6
+ mean_squared_error,
7
+ mean_absolute_error,
8
+ r2_score,
9
+ median_absolute_error,
10
+ )
11
+ from pathlib import Path
12
+ from typing import Union, Optional
13
+
14
+ from .path_manager import make_fullpath
15
+ from ._logger import _LOGGER
16
+ from ._script_info import _script_info
17
+ from .ML_configuration import SequenceValueMetricsFormat, SequenceSequenceMetricsFormat, _BaseSequenceValueFormat, _BaseSequenceSequenceFormat
18
+
19
+
20
+ __all__ = [
21
+ "sequence_to_value_metrics",
22
+ "sequence_to_sequence_metrics"
23
+ ]
24
+
25
+ DPI_value = 250
26
+
27
+
28
+ def sequence_to_value_metrics(
29
+ y_true: np.ndarray,
30
+ y_pred: np.ndarray,
31
+ save_dir: Union[str, Path],
32
+ config: Optional[SequenceValueMetricsFormat] = None
33
+ ):
34
+ """
35
+ Saves regression metrics and plots for sequence-to-value (many-to-one) tasks.
36
+
37
+ Args:
38
+ y_true (np.ndarray): Ground truth values (1D array).
39
+ y_pred (np.ndarray): Predicted values (1D array).
40
+ save_dir (str | Path): Directory to save plots and report.
41
+ config (object): Formatting configuration object.
42
+ """
43
+
44
+ # --- Ensure 1D input ---
45
+ if y_true.ndim > 1: y_true = y_true.flatten()
46
+ if y_pred.ndim > 1: y_pred = y_pred.flatten()
47
+
48
+ # --- Parse Config or use defaults ---
49
+ if config is None:
50
+ # Create a default config if one wasn't provided
51
+ format_config = _BaseSequenceValueFormat()
52
+ else:
53
+ format_config = config
54
+
55
+ # --- Set Matplotlib font size ---
56
+ original_rc_params = plt.rcParams.copy()
57
+ plt.rcParams.update({'font.size': format_config.font_size})
58
+
59
+ # --- Calculate Metrics ---
60
+ rmse = np.sqrt(mean_squared_error(y_true, y_pred))
61
+ mae = mean_absolute_error(y_true, y_pred)
62
+ r2 = r2_score(y_true, y_pred)
63
+ medae = median_absolute_error(y_true, y_pred)
64
+
65
+ report_lines = [
66
+ "--- Sequence-to-Value Regression Report ---",
67
+ f" Root Mean Squared Error (RMSE): {rmse:.4f}",
68
+ f" Mean Absolute Error (MAE): {mae:.4f}",
69
+ f" Median Absolute Error (MedAE): {medae:.4f}",
70
+ f" Coefficient of Determination (R²): {r2:.4f}"
71
+ ]
72
+ report_string = "\n".join(report_lines)
73
+
74
+ save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
75
+ # Save text report
76
+ report_path = save_dir_path / "sequence_to_value_report.txt"
77
+ report_path.write_text(report_string)
78
+ _LOGGER.info(f"📝 Seq-to-Value report saved as '{report_path.name}'")
79
+
80
+ # --- Save residual plot ---
81
+ residuals = y_true - y_pred
82
+ fig_res, ax_res = plt.subplots(figsize=(8, 6), dpi=DPI_value)
83
+ ax_res.scatter(y_pred, residuals,
84
+ alpha=format_config.scatter_alpha,
85
+ color=format_config.scatter_color)
86
+ ax_res.axhline(0, color=format_config.residual_line_color, linestyle='--')
87
+ ax_res.set_xlabel("Predicted Values")
88
+ ax_res.set_ylabel("Residuals")
89
+ ax_res.set_title("Sequence-to-Value Residual Plot")
90
+ ax_res.grid(True)
91
+ plt.tight_layout()
92
+ res_path = save_dir_path / "sequence_to_value_residual_plot.svg"
93
+ plt.savefig(res_path)
94
+ _LOGGER.info(f"📈 Seq-to-Value residual plot saved as '{res_path.name}'")
95
+ plt.close(fig_res)
96
+
97
+ # --- Save true vs predicted plot ---
98
+ fig_tvp, ax_tvp = plt.subplots(figsize=(8, 6), dpi=DPI_value)
99
+ ax_tvp.scatter(y_true, y_pred,
100
+ alpha=format_config.scatter_alpha,
101
+ color=format_config.scatter_color)
102
+ ax_tvp.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()],
103
+ linestyle='--',
104
+ lw=2,
105
+ color=format_config.ideal_line_color)
106
+ ax_tvp.set_xlabel('True Values')
107
+ ax_tvp.set_ylabel('Predictions')
108
+ ax_tvp.set_title('Sequence-to-Value: True vs. Predicted')
109
+ ax_tvp.grid(True)
110
+ plt.tight_layout()
111
+ tvp_path = save_dir_path / "sequence_to_value_true_vs_predicted_plot.svg"
112
+ plt.savefig(tvp_path)
113
+ _LOGGER.info(f"📉 Seq-to-Value True vs. Predicted plot saved as '{tvp_path.name}'")
114
+ plt.close(fig_tvp)
115
+
116
+ # --- Restore RC params ---
117
+ plt.rcParams.update(original_rc_params)
118
+
119
+
120
+ def sequence_to_sequence_metrics(
121
+ y_true: np.ndarray,
122
+ y_pred: np.ndarray,
123
+ save_dir: Union[str, Path],
124
+ config: Optional[SequenceSequenceMetricsFormat] = None
125
+ ):
126
+ """
127
+ Saves per-step regression metrics for sequence-to-sequence (many-to-many) tasks.
128
+
129
+ Args:
130
+ y_true (np.ndarray): Ground truth sequences (n_samples, sequence_length).
131
+ y_pred (np.ndarray): Predicted sequences (n_samples, sequence_length).
132
+ save_dir (str | Path): Directory to save plots and report.
133
+ config (object): Formatting configuration object.
134
+ """
135
+
136
+ if y_true.ndim != 2 or y_pred.ndim != 2:
137
+ _LOGGER.error(f"Input arrays must be 2D (n_samples, sequence_length). Got y_true: {y_true.shape}, y_pred: {y_pred.shape}")
138
+ raise ValueError("Invalid input dimensions for sequence-to-sequence metrics.")
139
+
140
+ if y_true.shape != y_pred.shape:
141
+ _LOGGER.error(f"Input shapes do not match. Got y_true: {y_true.shape}, y_pred: {y_pred.shape}")
142
+ raise ValueError("Mismatched input shapes.")
143
+
144
+ # --- Parse Config or use defaults ---
145
+ if config is None:
146
+ format_config = _BaseSequenceSequenceFormat()
147
+ else:
148
+ format_config = config
149
+
150
+ # --- Set Matplotlib font size ---
151
+ original_rc_params = plt.rcParams.copy()
152
+ plt.rcParams.update({'font.size': format_config.font_size})
153
+
154
+ sequence_length = y_true.shape[1]
155
+ steps = list(range(1, sequence_length + 1))
156
+ per_step_rmse = []
157
+ per_step_mae = []
158
+
159
+ # --- Calculate metrics for each time step ---
160
+ for i in range(sequence_length):
161
+ y_true_step = y_true[:, i]
162
+ y_pred_step = y_pred[:, i]
163
+
164
+ rmse = np.sqrt(mean_squared_error(y_true_step, y_pred_step))
165
+ mae = mean_absolute_error(y_true_step, y_pred_step)
166
+
167
+ per_step_rmse.append(rmse)
168
+ per_step_mae.append(mae)
169
+
170
+ # --- Create and save DataFrame ---
171
+ report_df = pd.DataFrame({
172
+ "step": steps,
173
+ "rmse": per_step_rmse,
174
+ "mae": per_step_mae
175
+ })
176
+
177
+ save_dir_path = make_fullpath(save_dir, make=True, enforce="directory")
178
+ report_path = save_dir_path / "sequence_to_sequence_report.csv"
179
+ report_df.to_csv(report_path, index=False)
180
+ _LOGGER.info(f"📝 Seq-to-Seq per-step report saved as '{report_path.name}'")
181
+
182
+ # --- Create and save plot ---
183
+ fig, ax1 = plt.subplots(figsize=format_config.plot_figsize, dpi=DPI_value)
184
+
185
+ # Plot RMSE
186
+ color_rmse = format_config.rmse_color
187
+ ax1.set_xlabel('Prediction Step')
188
+ ax1.set_ylabel('RMSE', color=color_rmse)
189
+ ax1.plot(steps, per_step_rmse, format_config.rmse_marker, color=color_rmse, label='RMSE')
190
+ ax1.tick_params(axis='y', labelcolor=color_rmse)
191
+ ax1.grid(True, linestyle=format_config.grid_style)
192
+
193
+ # Create a second y-axis for MAE
194
+ ax2 = ax1.twinx()
195
+ color_mae = format_config.mae_color
196
+ ax2.set_ylabel('MAE', color=color_mae)
197
+ ax2.plot(steps, per_step_mae, format_config.mae_marker, color=color_mae, label='MAE')
198
+ ax2.tick_params(axis='y', labelcolor=color_mae)
199
+
200
+ fig.suptitle('Sequence-to-Sequence Metrics (Per-Step)')
201
+
202
+ # Add a single legend
203
+ lines, labels = ax1.get_legend_handles_labels()
204
+ lines2, labels2 = ax2.get_legend_handles_labels()
205
+ ax2.legend(lines + lines2, labels + labels2, loc='best')
206
+
207
+ fig.tight_layout(rect=(0, 0.03, 1, 0.95)) # Adjust for suptitle
208
+
209
+ plot_path = save_dir_path / "sequence_to_sequence_metrics_plot.svg"
210
+ plt.savefig(plot_path)
211
+ _LOGGER.info(f"📈 Seq-to-Seq per-step metrics plot saved as '{plot_path.name}'")
212
+ plt.close(fig)
213
+
214
+ # --- Restore RC params ---
215
+ plt.rcParams.update(original_rc_params)
216
+
217
+
218
+ def info():
219
+ _script_info(__all__)