pydmoo 0.0.18__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pydmoo/algorithms/base/__init__.py +20 -0
- pydmoo/algorithms/base/core/__init__.py +0 -0
- pydmoo/algorithms/base/core/algorithm.py +416 -0
- pydmoo/algorithms/base/core/genetic.py +129 -0
- pydmoo/algorithms/base/dmoo/__init__.py +0 -0
- pydmoo/algorithms/base/dmoo/dmoead.py +131 -0
- pydmoo/algorithms/base/dmoo/dmoeadde.py +131 -0
- pydmoo/algorithms/base/dmoo/dmopso.py +0 -0
- pydmoo/algorithms/base/dmoo/dnsga2.py +137 -0
- pydmoo/algorithms/base/moo/__init__.py +0 -0
- pydmoo/algorithms/base/moo/moead.py +199 -0
- pydmoo/algorithms/base/moo/moeadde.py +105 -0
- pydmoo/algorithms/base/moo/mopso.py +0 -0
- pydmoo/algorithms/base/moo/nsga2.py +122 -0
- pydmoo/algorithms/modern/__init__.py +94 -0
- pydmoo/algorithms/modern/moead_imkt.py +161 -0
- pydmoo/algorithms/modern/moead_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_lstm.py +109 -0
- pydmoo/algorithms/modern/moead_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moead_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moead_imkt_n_lstm.py +111 -0
- pydmoo/algorithms/modern/moead_ktmm.py +112 -0
- pydmoo/algorithms/modern/moeadde_imkt.py +161 -0
- pydmoo/algorithms/modern/moeadde_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/moeadde_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_lstm.py +212 -0
- pydmoo/algorithms/modern/moeadde_imkt_n.py +117 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_igp.py +56 -0
- pydmoo/algorithms/modern/moeadde_imkt_n_lstm.py +114 -0
- pydmoo/algorithms/modern/moeadde_ktmm.py +112 -0
- pydmoo/algorithms/modern/nsga2_imkt.py +162 -0
- pydmoo/algorithms/modern/nsga2_imkt_clstm.py +223 -0
- pydmoo/algorithms/modern/nsga2_imkt_igp.py +56 -0
- pydmoo/algorithms/modern/nsga2_imkt_lstm.py +248 -0
- pydmoo/algorithms/modern/nsga2_imkt_n.py +117 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_clstm.py +146 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_igp.py +57 -0
- pydmoo/algorithms/modern/nsga2_imkt_n_lstm.py +154 -0
- pydmoo/algorithms/modern/nsga2_ktmm.py +112 -0
- pydmoo/algorithms/utils/__init__.py +0 -0
- pydmoo/algorithms/utils/utils.py +166 -0
- pydmoo/core/__init__.py +0 -0
- pydmoo/{response → core}/ar_model.py +4 -4
- pydmoo/{response → core}/bounds.py +35 -2
- pydmoo/core/distance.py +45 -0
- pydmoo/core/inverse.py +55 -0
- pydmoo/core/lstm/__init__.py +0 -0
- pydmoo/core/lstm/base.py +291 -0
- pydmoo/core/lstm/lstm.py +491 -0
- pydmoo/core/manifold.py +93 -0
- pydmoo/core/predictions.py +50 -0
- pydmoo/core/sample_gaussian.py +56 -0
- pydmoo/core/sample_uniform.py +63 -0
- pydmoo/{response/tca_model.py → core/transfer.py} +3 -3
- pydmoo/problems/__init__.py +53 -49
- pydmoo/problems/dyn.py +94 -13
- pydmoo/problems/dynamic/cec2015.py +10 -5
- pydmoo/problems/dynamic/df.py +6 -3
- pydmoo/problems/dynamic/gts.py +69 -34
- pydmoo/problems/real_world/__init__.py +0 -0
- pydmoo/problems/real_world/dsrp.py +168 -0
- pydmoo/problems/real_world/dwbdp.py +189 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/METADATA +11 -10
- pydmoo-0.1.0.dist-info/RECORD +70 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/WHEEL +1 -1
- pydmoo-0.0.18.dist-info/RECORD +0 -15
- /pydmoo/{response → algorithms}/__init__.py +0 -0
- {pydmoo-0.0.18.dist-info → pydmoo-0.1.0.dist-info}/licenses/LICENSE +0 -0
pydmoo/core/lstm/base.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import random
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import torch
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class TimeSeriesBase:
|
|
9
|
+
"""Base class for time series forecasting models.
|
|
10
|
+
|
|
11
|
+
This class provides common functionality for time series data processing,
|
|
12
|
+
including data conversion, training data preparation, and prediction methods.
|
|
13
|
+
|
|
14
|
+
Attributes
|
|
15
|
+
----------
|
|
16
|
+
sequence_length : int
|
|
17
|
+
Number of historical time steps used for each prediction.
|
|
18
|
+
device : torch.device
|
|
19
|
+
Computation device (CPU or GPU) for model inference.
|
|
20
|
+
model_type : str
|
|
21
|
+
Type of model architecture ('lstm' or 'transformer').
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, sequence_length: int, device: torch.device, model_type: str = "lstm") -> None:
|
|
25
|
+
"""Initialize the time series base class.
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
sequence_length : int
|
|
30
|
+
Number of historical time steps used for each prediction.
|
|
31
|
+
device : torch.device
|
|
32
|
+
Computation device (CPU or GPU) for model inference.
|
|
33
|
+
model_type : str, optional
|
|
34
|
+
Type of model architecture, by default "lstm".
|
|
35
|
+
Supported values: 'lstm', 'transformer'
|
|
36
|
+
|
|
37
|
+
Raises
|
|
38
|
+
------
|
|
39
|
+
ValueError
|
|
40
|
+
If model_type is not supported.
|
|
41
|
+
"""
|
|
42
|
+
if model_type not in ["lstm", "transformer"]:
|
|
43
|
+
raise ValueError(f"Unsupported model_type: {model_type}. Use 'lstm' or 'transformer'.")
|
|
44
|
+
|
|
45
|
+
self.sequence_length = sequence_length
|
|
46
|
+
self.device = device
|
|
47
|
+
self.model_type = model_type
|
|
48
|
+
|
|
49
|
+
def _set_random_seed(self, seed, deterministic=True):
|
|
50
|
+
os.environ['PYTHONHASHSEED'] = str(seed)
|
|
51
|
+
|
|
52
|
+
random.seed(seed)
|
|
53
|
+
np.random.seed(seed)
|
|
54
|
+
torch.manual_seed(seed)
|
|
55
|
+
|
|
56
|
+
if self.device == "cpu":
|
|
57
|
+
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
|
58
|
+
torch.set_default_device('cpu')
|
|
59
|
+
torch.set_default_dtype(torch.float32)
|
|
60
|
+
|
|
61
|
+
elif torch.cuda.is_available():
|
|
62
|
+
torch.cuda.manual_seed(seed)
|
|
63
|
+
torch.cuda.manual_seed_all(seed)
|
|
64
|
+
if deterministic:
|
|
65
|
+
torch.backends.cudnn.deterministic = True
|
|
66
|
+
torch.backends.cudnn.benchmark = False
|
|
67
|
+
if hasattr(torch, 'use_deterministic_algorithms'):
|
|
68
|
+
torch.use_deterministic_algorithms(True)
|
|
69
|
+
|
|
70
|
+
def set_seed(self, seed: int) -> None:
|
|
71
|
+
"""Set random seed after initialization.
|
|
72
|
+
|
|
73
|
+
Parameters
|
|
74
|
+
----------
|
|
75
|
+
seed : int
|
|
76
|
+
Random seed value.
|
|
77
|
+
"""
|
|
78
|
+
self.seed = seed
|
|
79
|
+
self._set_random_seed(seed)
|
|
80
|
+
|
|
81
|
+
def convert_to_tensor(self, time_series_data: list[list[float]]) -> torch.Tensor:
|
|
82
|
+
"""Convert input time series data to PyTorch tensor.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
time_series_data : list[list[float]]
|
|
87
|
+
Input time series as list of feature vectors.
|
|
88
|
+
Shape: (n_timesteps, n_features)
|
|
89
|
+
|
|
90
|
+
Returns
|
|
91
|
+
-------
|
|
92
|
+
series_data : torch.Tensor
|
|
93
|
+
Converted tensor of shape (n_timesteps, n_features)
|
|
94
|
+
|
|
95
|
+
Raises
|
|
96
|
+
------
|
|
97
|
+
ValueError
|
|
98
|
+
If input data is invalid, not 2D, or insufficient for sequence length.
|
|
99
|
+
|
|
100
|
+
Notes
|
|
101
|
+
-----
|
|
102
|
+
Performs comprehensive validation including:
|
|
103
|
+
- Data type checking
|
|
104
|
+
- Array dimensionality validation
|
|
105
|
+
- Sequence length sufficiency check
|
|
106
|
+
"""
|
|
107
|
+
# Input validation
|
|
108
|
+
if not time_series_data or not all(isinstance(x, (int, float)) for row in time_series_data for x in row):
|
|
109
|
+
raise ValueError("Invalid time series data")
|
|
110
|
+
|
|
111
|
+
# Convert to numpy array first for efficient processing
|
|
112
|
+
np_array = np.array(time_series_data, dtype=np.float32)
|
|
113
|
+
|
|
114
|
+
# Validate array shape and dimensions
|
|
115
|
+
if np_array.ndim != 2:
|
|
116
|
+
raise ValueError(f"Expected 2D array, got {np_array.ndim}D array")
|
|
117
|
+
|
|
118
|
+
n_timesteps, _ = np_array.shape # (n_timesteps, n_features)
|
|
119
|
+
|
|
120
|
+
# Validate sequence length requirement
|
|
121
|
+
if self.sequence_length >= n_timesteps:
|
|
122
|
+
raise ValueError(
|
|
123
|
+
f"Sequence length {self.sequence_length} must be less than number of timesteps {n_timesteps}"
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Convert to PyTorch tensor
|
|
127
|
+
series_data = torch.FloatTensor(np_array)
|
|
128
|
+
return series_data
|
|
129
|
+
|
|
130
|
+
def prepare_training_data(self, series_data: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
|
131
|
+
"""Prepare training sequences and targets using sliding window approach.
|
|
132
|
+
|
|
133
|
+
Parameters
|
|
134
|
+
----------
|
|
135
|
+
series_data : torch.Tensor
|
|
136
|
+
Input time series tensor of shape (n_timesteps, n_features)
|
|
137
|
+
|
|
138
|
+
Returns
|
|
139
|
+
-------
|
|
140
|
+
sequences_tensor : torch.Tensor
|
|
141
|
+
Training sequences of shape (n_samples, sequence_length, n_features)
|
|
142
|
+
targets_tensor : torch.Tensor
|
|
143
|
+
Target values of shape (n_samples, n_features)
|
|
144
|
+
|
|
145
|
+
Notes
|
|
146
|
+
-----
|
|
147
|
+
Uses sliding window method to create input-target pairs.
|
|
148
|
+
Number of training samples = n_timesteps - sequence_length.
|
|
149
|
+
|
|
150
|
+
The sliding window approach ensures temporal continuity in training data
|
|
151
|
+
by creating overlapping sequences from the original time series.
|
|
152
|
+
"""
|
|
153
|
+
n_timesteps, _ = series_data.shape
|
|
154
|
+
|
|
155
|
+
# Calculate number of training samples
|
|
156
|
+
n_samples = n_timesteps - self.sequence_length
|
|
157
|
+
|
|
158
|
+
# Create sequence-target pairs using sliding window
|
|
159
|
+
indices = torch.arange(n_samples).unsqueeze(1) + torch.arange(self.sequence_length)
|
|
160
|
+
sequences_tensor = series_data[indices] # shape: (n_samples, sequence_length, n_features)
|
|
161
|
+
targets_tensor = series_data[self.sequence_length:] # shape: (n_samples, n_features)
|
|
162
|
+
|
|
163
|
+
return sequences_tensor, targets_tensor
|
|
164
|
+
|
|
165
|
+
@staticmethod
|
|
166
|
+
def create_training_dataloader(
|
|
167
|
+
sequences_tensor: torch.Tensor, targets_tensor: torch.Tensor, batch_size: int = 32
|
|
168
|
+
) -> torch.utils.data.DataLoader:
|
|
169
|
+
"""Create DataLoader for time series training data.
|
|
170
|
+
|
|
171
|
+
Parameters
|
|
172
|
+
----------
|
|
173
|
+
sequences_tensor : torch.Tensor
|
|
174
|
+
Input sequences of shape (n_samples, sequence_length, n_features)
|
|
175
|
+
targets_tensor : torch.Tensor
|
|
176
|
+
Target values of shape (n_samples, n_features)
|
|
177
|
+
batch_size : int, optional
|
|
178
|
+
Number of samples per batch, by default 32
|
|
179
|
+
|
|
180
|
+
Returns
|
|
181
|
+
-------
|
|
182
|
+
dataloader : torch.utils.data.DataLoader
|
|
183
|
+
Configured DataLoader for training
|
|
184
|
+
|
|
185
|
+
Notes
|
|
186
|
+
-----
|
|
187
|
+
Key configurations:
|
|
188
|
+
- shuffle=False: Maintains temporal order for time series data
|
|
189
|
+
- pin_memory=False: Optimizes GPU data transfer
|
|
190
|
+
- drop_last=False: Uses all available samples
|
|
191
|
+
- num_workers=0: Avoids multiprocessing issues
|
|
192
|
+
|
|
193
|
+
Temporal ordering is critical for time series data to preserve
|
|
194
|
+
the sequential dependencies between data points.
|
|
195
|
+
"""
|
|
196
|
+
dataset = torch.utils.data.TensorDataset(sequences_tensor, targets_tensor)
|
|
197
|
+
|
|
198
|
+
dataloader = torch.utils.data.DataLoader(
|
|
199
|
+
dataset,
|
|
200
|
+
batch_size=batch_size,
|
|
201
|
+
shuffle=False, # Critical for time series data
|
|
202
|
+
num_workers=0,
|
|
203
|
+
pin_memory=False,
|
|
204
|
+
drop_last=False,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return dataloader
|
|
208
|
+
|
|
209
|
+
def predict_future(
|
|
210
|
+
self, model, historical_data: torch.Tensor | list[list[float]], n_steps: int = 1
|
|
211
|
+
) -> torch.Tensor:
|
|
212
|
+
"""Generate multiple future predictions using iterative forecasting.
|
|
213
|
+
|
|
214
|
+
Parameters
|
|
215
|
+
----------
|
|
216
|
+
model : torch.nn.Module
|
|
217
|
+
Trained model for time series prediction.
|
|
218
|
+
historical_data : torch.Tensor | list[list[float]]
|
|
219
|
+
Historical time series data of shape (n_timesteps, n_features).
|
|
220
|
+
n_steps : int, optional
|
|
221
|
+
Number of future time steps to predict, by default 1.
|
|
222
|
+
|
|
223
|
+
Returns
|
|
224
|
+
-------
|
|
225
|
+
predictions : torch.Tensor
|
|
226
|
+
Predicted values for future time steps of shape (n_steps, n_features).
|
|
227
|
+
|
|
228
|
+
Raises
|
|
229
|
+
------
|
|
230
|
+
ValueError
|
|
231
|
+
If model has not been trained before prediction or if model_type is unsupported.
|
|
232
|
+
|
|
233
|
+
Notes
|
|
234
|
+
-----
|
|
235
|
+
Uses recursive prediction strategy with model-specific handling:
|
|
236
|
+
|
|
237
|
+
- For LSTM and Transformer models:
|
|
238
|
+
1. Extract the most recent `sequence_length` points as the initial context.
|
|
239
|
+
2. Predict the next time step.
|
|
240
|
+
3. Slide the window: drop the oldest point, append the new prediction.
|
|
241
|
+
4. Repeat for `n_steps`.
|
|
242
|
+
|
|
243
|
+
- Critical design choice for LSTM:
|
|
244
|
+
Hidden state is **reset to `None` at every step** to match the training protocol,
|
|
245
|
+
where each batch sample was processed independently with zero-initialized hidden states.
|
|
246
|
+
This ensures consistency between training and inference, improving stability and reproducibility.
|
|
247
|
+
|
|
248
|
+
- Output predictions are moved to CPU to avoid GPU memory bloat during long continual loops.
|
|
249
|
+
"""
|
|
250
|
+
if not isinstance(historical_data, torch.Tensor):
|
|
251
|
+
historical_data = self.convert_to_tensor(historical_data)
|
|
252
|
+
|
|
253
|
+
if model is None:
|
|
254
|
+
raise ValueError("Model must be trained first")
|
|
255
|
+
|
|
256
|
+
if len(historical_data) < self.sequence_length:
|
|
257
|
+
raise ValueError(
|
|
258
|
+
f"historical_data has only {len(historical_data)} timesteps, "
|
|
259
|
+
f"but sequence_length = {self.sequence_length} is required."
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# Initialize with historical data (sequence_length, n_features)
|
|
263
|
+
current_sequence = historical_data[-self.sequence_length:].clone().to(self.device)
|
|
264
|
+
predictions = []
|
|
265
|
+
|
|
266
|
+
model.eval()
|
|
267
|
+
|
|
268
|
+
with torch.no_grad():
|
|
269
|
+
for _ in range(n_steps):
|
|
270
|
+
# Prepare input by adding batch dimension
|
|
271
|
+
input_seq = current_sequence.unsqueeze(0) # (1, sequence_length, n_features); already on device
|
|
272
|
+
|
|
273
|
+
# Generate prediction using trained model with model-specific handling
|
|
274
|
+
if self.model_type == "lstm":
|
|
275
|
+
# CRITICAL: Use `hidden=None` to match training protocol.
|
|
276
|
+
# Training used independent sequences with zero-initialized states.
|
|
277
|
+
# Passing a carried-over hidden would create state-input misalignment.
|
|
278
|
+
pred, _ = model(input_seq, hidden=None) # (1, n_features); stateless sliding window
|
|
279
|
+
elif self.model_type == "transformer":
|
|
280
|
+
# Transformer has no recurrent state — naturally stateless.
|
|
281
|
+
pred = model(input_seq) # (1, n_features)
|
|
282
|
+
else:
|
|
283
|
+
raise ValueError(f"Unsupported model_type: {self.model_type}. Use 'lstm' or 'transformer'.")
|
|
284
|
+
|
|
285
|
+
pred = pred.squeeze(0) # (n_features,), still on device
|
|
286
|
+
predictions.append(pred.cpu()) # only detach & move to CPU for storage, leaves original pred unchanged
|
|
287
|
+
|
|
288
|
+
# Update sequence: remove oldest point, append new prediction
|
|
289
|
+
current_sequence = torch.cat([current_sequence[1:], pred.unsqueeze(0)]) # stays on device
|
|
290
|
+
|
|
291
|
+
return torch.stack(predictions) # Stack into (n_steps, n_features)
|