dragon-ml-toolbox 14.7.0__py3-none-any.whl → 16.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dragon_ml_toolbox-14.7.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/METADATA +9 -5
- dragon_ml_toolbox-16.2.0.dist-info/RECORD +51 -0
- ml_tools/ETL_cleaning.py +20 -20
- ml_tools/ETL_engineering.py +23 -25
- ml_tools/GUI_tools.py +20 -20
- ml_tools/MICE_imputation.py +3 -3
- ml_tools/ML_callbacks.py +43 -26
- ml_tools/ML_configuration.py +704 -24
- ml_tools/ML_datasetmaster.py +235 -280
- ml_tools/ML_evaluation.py +144 -39
- ml_tools/ML_evaluation_multi.py +103 -35
- ml_tools/ML_inference.py +290 -208
- ml_tools/ML_models.py +13 -102
- ml_tools/ML_models_advanced.py +1 -1
- ml_tools/ML_optimization.py +12 -12
- ml_tools/ML_scaler.py +11 -11
- ml_tools/ML_sequence_datasetmaster.py +341 -0
- ml_tools/ML_sequence_evaluation.py +219 -0
- ml_tools/ML_sequence_inference.py +391 -0
- ml_tools/ML_sequence_models.py +139 -0
- ml_tools/ML_trainer.py +1342 -386
- ml_tools/ML_utilities.py +1 -1
- ml_tools/ML_vision_datasetmaster.py +120 -72
- ml_tools/ML_vision_evaluation.py +30 -6
- ml_tools/ML_vision_inference.py +129 -152
- ml_tools/ML_vision_models.py +1 -1
- ml_tools/ML_vision_transformers.py +121 -40
- ml_tools/PSO_optimization.py +6 -6
- ml_tools/SQL.py +4 -4
- ml_tools/{keys.py → _keys.py} +45 -0
- ml_tools/_schema.py +1 -1
- ml_tools/ensemble_evaluation.py +1 -1
- ml_tools/ensemble_inference.py +7 -33
- ml_tools/ensemble_learning.py +1 -1
- ml_tools/optimization_tools.py +2 -2
- ml_tools/path_manager.py +5 -5
- ml_tools/utilities.py +1 -2
- dragon_ml_toolbox-14.7.0.dist-info/RECORD +0 -49
- ml_tools/RNN_forecast.py +0 -56
- ml_tools/_ML_vision_recipe.py +0 -88
- {dragon_ml_toolbox-14.7.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/WHEEL +0 -0
- {dragon_ml_toolbox-14.7.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/licenses/LICENSE +0 -0
- {dragon_ml_toolbox-14.7.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/licenses/LICENSE-THIRD-PARTY.md +0 -0
- {dragon_ml_toolbox-14.7.0.dist-info → dragon_ml_toolbox-16.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from torch import nn
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
from ._logger import _LOGGER
|
|
6
|
+
from ._script_info import _script_info
|
|
7
|
+
from ._keys import MLTaskKeys
|
|
8
|
+
from .ML_models import _ArchitectureHandlerMixin
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"DragonSequenceLSTM"
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class DragonSequenceLSTM(nn.Module, _ArchitectureHandlerMixin):
|
|
17
|
+
"""
|
|
18
|
+
An LSTM-based network for single-feature (univariate) sequence prediction tasks.
|
|
19
|
+
It can be configured for:
|
|
20
|
+
1. 'sequence-to-sequence': Predicts a full sequence.
|
|
21
|
+
2. 'sequence-to-value': Predicts a single value from the last time step.
|
|
22
|
+
"""
|
|
23
|
+
def __init__(self,
|
|
24
|
+
prediction_mode: Literal["sequence-to-sequence", "sequence-to-value"],
|
|
25
|
+
hidden_size: int = 100,
|
|
26
|
+
recurrent_layers: int = 1,
|
|
27
|
+
dropout: float = 0.1):
|
|
28
|
+
"""
|
|
29
|
+
Args:
|
|
30
|
+
hidden_size (int): The number of features in the LSTM's hidden state.
|
|
31
|
+
recurrent_layers (int): The number of recurrent LSTM layers.
|
|
32
|
+
prediction_mode (str): Determines the model's output behavior.
|
|
33
|
+
- 'sequence-to-sequence': Returns a full sequence.
|
|
34
|
+
- 'sequence-to-value': Returns a single prediction based on the last time step.
|
|
35
|
+
dropout (float): The dropout probability for all but the last LSTM layer.
|
|
36
|
+
"""
|
|
37
|
+
super().__init__()
|
|
38
|
+
|
|
39
|
+
# --- Validation ---
|
|
40
|
+
if not prediction_mode in [MLTaskKeys.SEQUENCE_SEQUENCE, MLTaskKeys.SEQUENCE_VALUE]:
|
|
41
|
+
_LOGGER.error(f"Unrecognized prediction mode: '{prediction_mode}'.")
|
|
42
|
+
raise ValueError()
|
|
43
|
+
else:
|
|
44
|
+
self.prediction_mode = prediction_mode
|
|
45
|
+
|
|
46
|
+
if not isinstance(hidden_size, int) or hidden_size < 1:
|
|
47
|
+
_LOGGER.error("hidden_size must be a positive integer.")
|
|
48
|
+
raise ValueError()
|
|
49
|
+
if not isinstance(recurrent_layers, int) or recurrent_layers < 1:
|
|
50
|
+
_LOGGER.error("recurrent_layers must be a positive integer.")
|
|
51
|
+
raise ValueError()
|
|
52
|
+
if not (0.0 <= dropout < 1.0):
|
|
53
|
+
_LOGGER.error("dropout must be a float between 0.0 and 1.0.")
|
|
54
|
+
raise ValueError()
|
|
55
|
+
|
|
56
|
+
# --- Save configuration ---
|
|
57
|
+
self.features = 1 # Univariate
|
|
58
|
+
self.hidden_size = hidden_size
|
|
59
|
+
self.recurrent_layers = recurrent_layers
|
|
60
|
+
self.dropout = dropout
|
|
61
|
+
|
|
62
|
+
# Build model
|
|
63
|
+
self.lstm = nn.LSTM(
|
|
64
|
+
input_size=self.features,
|
|
65
|
+
hidden_size=hidden_size,
|
|
66
|
+
num_layers=recurrent_layers,
|
|
67
|
+
dropout=dropout,
|
|
68
|
+
batch_first=True # This is crucial for (batch, seq, feature) input
|
|
69
|
+
)
|
|
70
|
+
self.linear = nn.Linear(in_features=hidden_size, out_features=self.features)
|
|
71
|
+
|
|
72
|
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
|
73
|
+
"""
|
|
74
|
+
Defines the forward pass.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
x (torch.Tensor): The input tensor. Can be 2D (batch_size, sequence_length)
|
|
78
|
+
or 3D (batch_size, sequence_length, features).
|
|
79
|
+
The model will automatically handle 2D inputs
|
|
80
|
+
by assuming a feature size of 1.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
torch.Tensor: The output tensor.
|
|
84
|
+
- (batch_size, sequence_length, features) if 'sequence-to-sequence'
|
|
85
|
+
- (batch_size, features) if 'sequence-to-value'
|
|
86
|
+
"""
|
|
87
|
+
# --- Handle Input Shape ---
|
|
88
|
+
if x.ndim == 2:
|
|
89
|
+
# Check if this 2D input is compatible with the model's expected features
|
|
90
|
+
if self.features != 1:
|
|
91
|
+
_LOGGER.error(f"Received 2D input (shape {x.shape}), but model was initialized with features={self.features}.")
|
|
92
|
+
raise ValueError()
|
|
93
|
+
|
|
94
|
+
# Add the feature dimension: (batch_size, seq_len) -> (batch_size, seq_len, 1)
|
|
95
|
+
x = x.unsqueeze(-1)
|
|
96
|
+
|
|
97
|
+
# x is guaranteed to be 3D: (batch_size, seq_len, features)
|
|
98
|
+
# The LSTM returns the full output sequence and the final hidden/cell states
|
|
99
|
+
lstm_out, _ = self.lstm(x)
|
|
100
|
+
|
|
101
|
+
# --- Handle Output Shape based on mode ---
|
|
102
|
+
if self.prediction_mode == MLTaskKeys.SEQUENCE_SEQUENCE:
|
|
103
|
+
# Use the full sequence
|
|
104
|
+
# output shape: (batch_size, seq_len, 1)
|
|
105
|
+
predictions = self.linear(lstm_out)
|
|
106
|
+
# Squeeze to (batch_size, seq_len) to match target
|
|
107
|
+
predictions = predictions.squeeze(-1)
|
|
108
|
+
|
|
109
|
+
elif self.prediction_mode == MLTaskKeys.SEQUENCE_VALUE:
|
|
110
|
+
# Isolate only the last time step's output
|
|
111
|
+
# last_step shape: (batch_size, hidden_size)
|
|
112
|
+
last_step = lstm_out[:, -1, :]
|
|
113
|
+
predictions = self.linear(last_step)
|
|
114
|
+
|
|
115
|
+
# Squeeze the 'features' dim to match label shape
|
|
116
|
+
predictions = predictions.squeeze(-1)
|
|
117
|
+
|
|
118
|
+
return predictions
|
|
119
|
+
|
|
120
|
+
def get_architecture_config(self) -> dict:
|
|
121
|
+
"""Returns the configuration of the model."""
|
|
122
|
+
return {
|
|
123
|
+
'hidden_size': self.hidden_size,
|
|
124
|
+
'recurrent_layers': self.recurrent_layers,
|
|
125
|
+
'prediction_mode': self.prediction_mode,
|
|
126
|
+
'dropout': self.dropout
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
def __repr__(self) -> str:
|
|
130
|
+
"""Returns the developer-friendly string representation of the model."""
|
|
131
|
+
return (
|
|
132
|
+
f"DragonSequenceLSTM(features={self.lstm.input_size}, "
|
|
133
|
+
f"hidden_size={self.lstm.hidden_size}, "
|
|
134
|
+
f"recurrent_layers={self.lstm.num_layers}), "
|
|
135
|
+
f"mode='{self.prediction_mode}')")
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def info():
|
|
139
|
+
_script_info(__all__)
|