moospread 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- moospread/__init__.py +3 -0
- moospread/core.py +1881 -0
- moospread/problem.py +193 -0
- moospread/tasks/__init__.py +4 -0
- moospread/tasks/dtlz_torch.py +139 -0
- moospread/tasks/mw_torch.py +274 -0
- moospread/tasks/re_torch.py +394 -0
- moospread/tasks/zdt_torch.py +112 -0
- moospread/utils/__init__.py +8 -0
- moospread/utils/constraint_utils/__init__.py +2 -0
- moospread/utils/constraint_utils/gradient.py +72 -0
- moospread/utils/constraint_utils/mgda_core.py +69 -0
- moospread/utils/constraint_utils/pmgda_solver.py +308 -0
- moospread/utils/constraint_utils/prefs.py +64 -0
- moospread/utils/ditmoo.py +127 -0
- moospread/utils/lhs.py +74 -0
- moospread/utils/misc.py +28 -0
- moospread/utils/mobo_utils/__init__.py +11 -0
- moospread/utils/mobo_utils/evolution/__init__.py +0 -0
- moospread/utils/mobo_utils/evolution/dom.py +60 -0
- moospread/utils/mobo_utils/evolution/norm.py +40 -0
- moospread/utils/mobo_utils/evolution/utils.py +97 -0
- moospread/utils/mobo_utils/learning/__init__.py +0 -0
- moospread/utils/mobo_utils/learning/model.py +40 -0
- moospread/utils/mobo_utils/learning/model_init.py +33 -0
- moospread/utils/mobo_utils/learning/model_update.py +51 -0
- moospread/utils/mobo_utils/learning/prediction.py +116 -0
- moospread/utils/mobo_utils/learning/utils.py +143 -0
- moospread/utils/mobo_utils/lhs_for_mobo.py +243 -0
- moospread/utils/mobo_utils/mobo/__init__.py +0 -0
- moospread/utils/mobo_utils/mobo/acquisition.py +209 -0
- moospread/utils/mobo_utils/mobo/algorithms.py +91 -0
- moospread/utils/mobo_utils/mobo/factory.py +86 -0
- moospread/utils/mobo_utils/mobo/mobo.py +132 -0
- moospread/utils/mobo_utils/mobo/selection.py +182 -0
- moospread/utils/mobo_utils/mobo/solver/__init__.py +5 -0
- moospread/utils/mobo_utils/mobo/solver/moead.py +17 -0
- moospread/utils/mobo_utils/mobo/solver/nsga2.py +10 -0
- moospread/utils/mobo_utils/mobo/solver/parego/__init__.py +1 -0
- moospread/utils/mobo_utils/mobo/solver/parego/parego.py +62 -0
- moospread/utils/mobo_utils/mobo/solver/parego/utils.py +34 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/__init__.py +1 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/buffer.py +364 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/pareto_discovery.py +571 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/utils.py +168 -0
- moospread/utils/mobo_utils/mobo/solver/solver.py +74 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/__init__.py +2 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/base.py +36 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/gaussian_process.py +177 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/thompson_sampling.py +79 -0
- moospread/utils/mobo_utils/mobo/surrogate_problem.py +44 -0
- moospread/utils/mobo_utils/mobo/transformation.py +106 -0
- moospread/utils/mobo_utils/mobo/utils.py +65 -0
- moospread/utils/mobo_utils/spread_mobo_utils.py +854 -0
- moospread/utils/offline_utils/__init__.py +10 -0
- moospread/utils/offline_utils/handle_task.py +203 -0
- moospread/utils/offline_utils/proxies.py +338 -0
- moospread/utils/spread_utils.py +91 -0
- moospread-0.1.0.dist-info/METADATA +75 -0
- moospread-0.1.0.dist-info/RECORD +63 -0
- moospread-0.1.0.dist-info/WHEEL +5 -0
- moospread-0.1.0.dist-info/licenses/LICENSE +10 -0
- moospread-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,394 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
from pymoo.problems import get_problem
|
|
4
|
+
from moospread.problem import PymooProblemTorch
|
|
5
|
+
import os
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
######## RE Problems ########
|
|
9
|
+
# Note: For the sake of differentiability, we will use the strict bounds:
|
|
10
|
+
# - Lower bound: xl + 1e-6 (instead of xl)
|
|
11
|
+
# - Upper bound: xu - 1e-6 (instead of xu)
|
|
12
|
+
# ref_point: The default reference points are suitable for the "online" mode.
|
|
13
|
+
|
|
14
|
+
class RE21(PymooProblemTorch):
|
|
15
|
+
def __init__(self, path = None, ref_point=None, **kwargs):
|
|
16
|
+
F = 10.0
|
|
17
|
+
sigma = 10.0
|
|
18
|
+
tmp_val = F / sigma # = 1.
|
|
19
|
+
super().__init__(n_var=4, n_obj=2,
|
|
20
|
+
xl=torch.tensor([tmp_val,
|
|
21
|
+
(2.0**0.5)*tmp_val,
|
|
22
|
+
(2.0**0.5)*tmp_val,
|
|
23
|
+
tmp_val]) + 1e-6,
|
|
24
|
+
xu=torch.full((4,),
|
|
25
|
+
3.0*tmp_val) - 1e-6,
|
|
26
|
+
vtype=float, **kwargs)
|
|
27
|
+
if ref_point is None:
|
|
28
|
+
self.ref_point = [3144.44, 0.05]
|
|
29
|
+
else:
|
|
30
|
+
self.ref_point = ref_point
|
|
31
|
+
|
|
32
|
+
self.path = path
|
|
33
|
+
|
|
34
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
35
|
+
assert self.path is not None, "Path to Pareto front file not specified."
|
|
36
|
+
front = np.loadtxt(self.path)
|
|
37
|
+
return torch.from_numpy(front).to(self.device)
|
|
38
|
+
|
|
39
|
+
def _evaluate(self, X: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
40
|
+
"""
|
|
41
|
+
X: (N, 4) with columns [x1, x2, x3, x4]
|
|
42
|
+
Returns F: (N, 2) with [f0, f1]
|
|
43
|
+
"""
|
|
44
|
+
# ensure batch dimension
|
|
45
|
+
if X.dim() == 1:
|
|
46
|
+
X = X.unsqueeze(0)
|
|
47
|
+
|
|
48
|
+
if X.size(-1) != 4:
|
|
49
|
+
raise ValueError("X must be a (N, 4) tensor.")
|
|
50
|
+
|
|
51
|
+
x1, x2, x3, x4 = X.unbind(dim=1)
|
|
52
|
+
|
|
53
|
+
# constants
|
|
54
|
+
F = 10.0
|
|
55
|
+
sigma = 10.0
|
|
56
|
+
E = 2.0e5
|
|
57
|
+
L = 200.0
|
|
58
|
+
sqrt2 = torch.sqrt(torch.tensor(2.0, dtype=X.dtype, device=X.device))
|
|
59
|
+
|
|
60
|
+
# objectives (vectorized)
|
|
61
|
+
f0 = L * (2.0 * x1 + sqrt2 * x2 + torch.sqrt(x3) + x4)
|
|
62
|
+
|
|
63
|
+
# small clamp for numerical safety (bounds already keep > 0)
|
|
64
|
+
eps = 1e-12
|
|
65
|
+
x1c = torch.clamp(x1, min=eps)
|
|
66
|
+
x2c = torch.clamp(x2, min=eps)
|
|
67
|
+
x3c = torch.clamp(x3, min=eps)
|
|
68
|
+
x4c = torch.clamp(x4, min=eps)
|
|
69
|
+
|
|
70
|
+
f1 = ((F * L) / E) * (
|
|
71
|
+
(2.0 / x1c) + (2.0 * sqrt2 / x2c) - (2.0 * sqrt2 / x3c) + (2.0 / x4c)
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
out["F"] = torch.stack([f0, f1], dim=1) # (N, 2)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class RE33(PymooProblemTorch):
|
|
78
|
+
def __init__(self, path=None, ref_point=None, **kwargs):
|
|
79
|
+
super().__init__(n_var=4, n_obj=3,
|
|
80
|
+
xl=torch.tensor([55.0, 75.0, 1000.0, 11.0])+ 1e-6,
|
|
81
|
+
xu=torch.tensor([80.0, 110.0, 3000.0, 20.0])- 1e-6,
|
|
82
|
+
vtype=float, **kwargs)
|
|
83
|
+
if ref_point is None:
|
|
84
|
+
self.ref_point = [5.01, 9.84, 4.30]
|
|
85
|
+
else:
|
|
86
|
+
self.ref_point = ref_point
|
|
87
|
+
|
|
88
|
+
self.path = path
|
|
89
|
+
|
|
90
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
91
|
+
assert self.path is not None, "Path to Pareto front file not specified."
|
|
92
|
+
front = np.loadtxt(self.path)
|
|
93
|
+
return torch.from_numpy(front).to(self.device)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _evaluate(self, X: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
97
|
+
"""
|
|
98
|
+
X: (N, 4) with columns [x1, x2, x3, x4]
|
|
99
|
+
Returns: (N, 3) with [f0, f1, f2], where f2 is sum of constraint violations.
|
|
100
|
+
"""
|
|
101
|
+
if X.dim() != 2 or X.size(-1) != 4:
|
|
102
|
+
raise ValueError("X must be a (N, 4) tensor.")
|
|
103
|
+
|
|
104
|
+
x1, x2, x3, x4 = X.unbind(dim=1)
|
|
105
|
+
|
|
106
|
+
# ----- Objectives -----
|
|
107
|
+
# f0 = 4.9e-5 * (x2^2 - x1^2) * (x4 - 1)
|
|
108
|
+
d2 = x2**2 - x1**2
|
|
109
|
+
f0 = 4.9e-5 * d2 * (x4 - 1.0)
|
|
110
|
+
|
|
111
|
+
# f1 = (9.82e6) * (x2^2 - x1^2) / (x3 * x4 * (x2^3 - x1^3))
|
|
112
|
+
d3 = x2**3 - x1**3
|
|
113
|
+
eps = torch.tensor(1e-12, dtype=X.dtype, device=X.device)
|
|
114
|
+
|
|
115
|
+
def safe_signed(x):
|
|
116
|
+
s = torch.where(x >= 0, 1.0, -1.0)
|
|
117
|
+
return s * torch.clamp(x.abs(), min=eps)
|
|
118
|
+
|
|
119
|
+
denom_f1 = (torch.clamp(x3, min=eps) *
|
|
120
|
+
torch.clamp(x4, min=eps) *
|
|
121
|
+
safe_signed(d3))
|
|
122
|
+
f1 = (9.82e6) * d2 / denom_f1
|
|
123
|
+
|
|
124
|
+
# ----- Constraints (g >= 0 means satisfied) -----
|
|
125
|
+
# g0 = (x2 - x1) - 20
|
|
126
|
+
g0 = (x2 - x1) - 20.0
|
|
127
|
+
|
|
128
|
+
# g1 = 0.4 - x3 / (pi * (x2^2 - x1^2))
|
|
129
|
+
denom_g1 = torch.pi * safe_signed(d2)
|
|
130
|
+
g1 = 0.4 - (x3 / denom_g1)
|
|
131
|
+
|
|
132
|
+
# g2 = 1 - (2.22e-3 * x3 * (x2^3 - x1^3)) / (x2^2 - x1^2)^2
|
|
133
|
+
denom_g2 = safe_signed(d2)**2
|
|
134
|
+
g2 = 1.0 - (2.22e-3 * x3 * d3) / denom_g2
|
|
135
|
+
|
|
136
|
+
# g3 = (2.66e-2 * x3 * x4 * (x2^3 - x1^3)) / (x2^2 - x1^2) - 900
|
|
137
|
+
denom_g3 = safe_signed(d2)
|
|
138
|
+
g3 = (2.66e-2 * x3 * x4 * d3) / denom_g3 - 900.0
|
|
139
|
+
|
|
140
|
+
Gs = torch.stack([g0, g1, g2, g3], dim=1) # (N, 4)
|
|
141
|
+
# violation = -g if g < 0 else 0
|
|
142
|
+
violations = torch.where(Gs < 0, -Gs, torch.zeros_like(Gs))
|
|
143
|
+
f2 = violations.sum(dim=1)
|
|
144
|
+
|
|
145
|
+
out["F"] = torch.stack([f0, f1, f2], dim=1) # (N, 3)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class RE34(PymooProblemTorch):
|
|
150
|
+
def __init__(self, path = None, ref_point=None, **kwargs):
|
|
151
|
+
super().__init__(n_var=5, n_obj=3,
|
|
152
|
+
xl=torch.full((5,), 1.0) + 1e-6,
|
|
153
|
+
xu=torch.full((5,), 3.0) - 1e-6,
|
|
154
|
+
vtype=float, **kwargs)
|
|
155
|
+
if ref_point is None:
|
|
156
|
+
self.ref_point = [1.86472022e+03, 1.18199394e+01, 2.90399938e-01]
|
|
157
|
+
else:
|
|
158
|
+
self.ref_point = ref_point
|
|
159
|
+
|
|
160
|
+
self.path = path
|
|
161
|
+
|
|
162
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
163
|
+
assert self.path is not None, "Path to Pareto front file not specified."
|
|
164
|
+
front = np.loadtxt(self.path)
|
|
165
|
+
return torch.from_numpy(front).to(self.device)
|
|
166
|
+
|
|
167
|
+
def _evaluate(self, x: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
168
|
+
# ensure batch dimension
|
|
169
|
+
if x.dim() == 1:
|
|
170
|
+
x = x.unsqueeze(0) # [5] → [1,5]
|
|
171
|
+
|
|
172
|
+
# unpack variables
|
|
173
|
+
x1 = x[:, 0]
|
|
174
|
+
x2 = x[:, 1]
|
|
175
|
+
x3 = x[:, 2]
|
|
176
|
+
x4 = x[:, 3]
|
|
177
|
+
x5 = x[:, 4]
|
|
178
|
+
|
|
179
|
+
# prepare output
|
|
180
|
+
f = torch.zeros((x.shape[0], self.n_obj),
|
|
181
|
+
dtype=x.dtype, device=x.device)
|
|
182
|
+
|
|
183
|
+
# objective 1
|
|
184
|
+
f[:, 0] = (
|
|
185
|
+
1640.2823
|
|
186
|
+
+ 2.3573285 * x1
|
|
187
|
+
+ 2.3220035 * x2
|
|
188
|
+
+ 4.5688768 * x3
|
|
189
|
+
+ 7.7213633 * x4
|
|
190
|
+
+ 4.4559504 * x5
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# objective 2
|
|
194
|
+
f[:, 1] = (
|
|
195
|
+
6.5856
|
|
196
|
+
+ 1.15 * x1
|
|
197
|
+
- 1.0427 * x2
|
|
198
|
+
+ 0.9738 * x3
|
|
199
|
+
+ 0.8364 * x4
|
|
200
|
+
- 0.3695 * x1 * x4
|
|
201
|
+
+ 0.0861 * x1 * x5
|
|
202
|
+
+ 0.3628 * x2 * x4
|
|
203
|
+
- 0.1106 * x1 * x1
|
|
204
|
+
- 0.3437 * x3 * x3
|
|
205
|
+
+ 0.1764 * x4 * x4
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# objective 3
|
|
209
|
+
f[:, 2] = (
|
|
210
|
+
-0.0551
|
|
211
|
+
+ 0.0181 * x1
|
|
212
|
+
+ 0.1024 * x2
|
|
213
|
+
+ 0.0421 * x3
|
|
214
|
+
- 0.0073 * x1 * x2
|
|
215
|
+
+ 0.0240 * x2 * x3
|
|
216
|
+
- 0.0118 * x2 * x4
|
|
217
|
+
- 0.0204 * x3 * x4
|
|
218
|
+
- 0.0080 * x3 * x5
|
|
219
|
+
- 0.0241 * x2 * x2
|
|
220
|
+
+ 0.0109 * x4 * x4
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# if input was a single vector, return shape (3,)
|
|
224
|
+
out["F"] = f
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
class RE37(PymooProblemTorch):
|
|
228
|
+
"Rocket Injector Design (RE37)."
|
|
229
|
+
|
|
230
|
+
def __init__(self, path=None, ref_point=None, **kwargs):
|
|
231
|
+
super().__init__(n_var=4, n_obj=3,
|
|
232
|
+
xl=torch.zeros(4) + 1e-6,
|
|
233
|
+
xu=torch.ones(4) - 1e-6,
|
|
234
|
+
vtype=float, **kwargs)
|
|
235
|
+
if ref_point is None:
|
|
236
|
+
self.ref_point = [1.1022, 1.20726899, 1.20318656]
|
|
237
|
+
else:
|
|
238
|
+
self.ref_point = ref_point
|
|
239
|
+
|
|
240
|
+
self.path = path
|
|
241
|
+
|
|
242
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
243
|
+
assert self.path is not None, "Path to Pareto front file not specified."
|
|
244
|
+
front = np.loadtxt(self.path)
|
|
245
|
+
return torch.from_numpy(front).to(self.device)
|
|
246
|
+
|
|
247
|
+
def _evaluate(self, x: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
248
|
+
# Unpack features: each is (B,)
|
|
249
|
+
xAlpha, xHA, xOA, xOPTT = x.unbind(dim=1)
|
|
250
|
+
|
|
251
|
+
# Compute objectives vectorized over batch
|
|
252
|
+
f1 = (
|
|
253
|
+
0.692
|
|
254
|
+
+ 0.477 * xAlpha
|
|
255
|
+
- 0.687 * xHA
|
|
256
|
+
- 0.080 * xOA
|
|
257
|
+
- 0.0650 * xOPTT
|
|
258
|
+
- 0.167 * xAlpha**2
|
|
259
|
+
- 0.0129 * xHA * xAlpha
|
|
260
|
+
+ 0.0796 * xHA**2
|
|
261
|
+
- 0.0634 * xOA * xAlpha
|
|
262
|
+
- 0.0257 * xOA * xHA
|
|
263
|
+
+ 0.0877 * xOA**2
|
|
264
|
+
- 0.0521 * xOPTT * xAlpha
|
|
265
|
+
+ 0.00156 * xOPTT * xHA
|
|
266
|
+
+ 0.00198 * xOPTT * xOA
|
|
267
|
+
+ 0.0184 * xOPTT**2
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
f2 = (
|
|
271
|
+
0.153
|
|
272
|
+
- 0.322 * xAlpha
|
|
273
|
+
+ 0.396 * xHA
|
|
274
|
+
+ 0.424 * xOA
|
|
275
|
+
+ 0.0226 * xOPTT
|
|
276
|
+
+ 0.175 * xAlpha**2
|
|
277
|
+
+ 0.0185 * xHA * xAlpha
|
|
278
|
+
- 0.0701 * xHA**2
|
|
279
|
+
- 0.251 * xOA * xAlpha
|
|
280
|
+
+ 0.179 * xOA * xHA
|
|
281
|
+
+ 0.0150 * xOA**2
|
|
282
|
+
+ 0.0134 * xOPTT * xAlpha
|
|
283
|
+
+ 0.0296 * xOPTT * xHA
|
|
284
|
+
+ 0.0752 * xOPTT * xOA
|
|
285
|
+
+ 0.0192 * xOPTT**2
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
f3 = (
|
|
289
|
+
0.370
|
|
290
|
+
- 0.205 * xAlpha
|
|
291
|
+
+ 0.0307 * xHA
|
|
292
|
+
+ 0.108 * xOA
|
|
293
|
+
+ 1.019 * xOPTT
|
|
294
|
+
- 0.135 * xAlpha**2
|
|
295
|
+
+ 0.0141 * xHA * xAlpha
|
|
296
|
+
+ 0.0998 * xHA**2
|
|
297
|
+
+ 0.208 * xOA * xAlpha
|
|
298
|
+
- 0.0301 * xOA * xHA
|
|
299
|
+
- 0.226 * xOA**2
|
|
300
|
+
+ 0.353 * xOPTT * xAlpha
|
|
301
|
+
- 0.0497 * xOPTT * xOA
|
|
302
|
+
- 0.423 * xOPTT**2
|
|
303
|
+
+ 0.202 * xHA * xAlpha**2
|
|
304
|
+
- 0.281 * xOA * xAlpha**2
|
|
305
|
+
- 0.342 * xHA**2 * xAlpha
|
|
306
|
+
- 0.245 * xHA**2 * xOA
|
|
307
|
+
+ 0.281 * xOA**2 * xHA
|
|
308
|
+
- 0.184 * xOPTT**2 * xAlpha
|
|
309
|
+
- 0.281 * xHA * xAlpha * xOA
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
# Stack into (B, 3)
|
|
313
|
+
out["F"] = torch.stack([f1, f2, f3], dim=1)
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
class RE41(PymooProblemTorch):
|
|
317
|
+
r"""Car side impact problem (RE41).
|
|
318
|
+
|
|
319
|
+
See [Tanabe2020]_ for details.
|
|
320
|
+
|
|
321
|
+
"""
|
|
322
|
+
def __init__(self, path=None, ref_point=None, **kwargs):
|
|
323
|
+
super().__init__(n_var=7, n_obj=4,
|
|
324
|
+
xl=torch.tensor([0.5, 0.45, 0.5, 0.5, 0.875, 0.4, 0.4], dtype=torch.float) + 1e-6,
|
|
325
|
+
xu=torch.tensor([1.5, 1.35, 1.5, 1.5, 2.625, 1.2, 1.2], dtype=torch.float) - 1e-6,
|
|
326
|
+
vtype=float, **kwargs)
|
|
327
|
+
if ref_point is None:
|
|
328
|
+
self.ref_point = [47.04480682, 4.86997366, 14.40049127, 10.3941957 ]
|
|
329
|
+
else:
|
|
330
|
+
self.ref_point = ref_point
|
|
331
|
+
|
|
332
|
+
self.path = path
|
|
333
|
+
|
|
334
|
+
self.continuous_inds = list(range(7))
|
|
335
|
+
|
|
336
|
+
def _evaluate(self, X: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
337
|
+
X1, X2, X3, X4, X5, X6, X7 = torch.split(X, 1, -1)
|
|
338
|
+
f1 = (
|
|
339
|
+
1.98
|
|
340
|
+
+ 4.9 * X1
|
|
341
|
+
+ 6.67 * X2
|
|
342
|
+
+ 6.98 * X3
|
|
343
|
+
+ 4.01 * X4
|
|
344
|
+
+ 1.78 * X5
|
|
345
|
+
+ 10**-5 * X6
|
|
346
|
+
+ 2.73 * X7
|
|
347
|
+
)
|
|
348
|
+
f2 = 4.72 - 0.5 * X4 - 0.19 * X2 * X3
|
|
349
|
+
V_MBP = 10.58 - 0.674 * X1 * X2 - 0.67275 * X2
|
|
350
|
+
V_FD = 16.45 - 0.489 * X3 * X7 - 0.843 * X5 * X6
|
|
351
|
+
f3 = 0.5 * (V_MBP + V_FD)
|
|
352
|
+
g1 = 1 - 1.16 + 0.3717 * X2 * X4 + 0.0092928 * X3
|
|
353
|
+
g2 = (
|
|
354
|
+
0.32
|
|
355
|
+
- 0.261
|
|
356
|
+
+ 0.0159 * X1 * X2
|
|
357
|
+
+ 0.06486 * X1
|
|
358
|
+
+ 0.019 * X2 * X7
|
|
359
|
+
- 0.0144 * X3 * X5
|
|
360
|
+
- 0.0154464 * X6
|
|
361
|
+
)
|
|
362
|
+
g3 = (
|
|
363
|
+
0.32
|
|
364
|
+
- 0.214
|
|
365
|
+
- 0.00817 * X5
|
|
366
|
+
+ 0.045195 * X1
|
|
367
|
+
+ 0.0135168 * X1
|
|
368
|
+
- 0.03099 * X2 * X6
|
|
369
|
+
+ 0.018 * X2 * X7
|
|
370
|
+
- 0.007176 * X3
|
|
371
|
+
- 0.023232 * X3
|
|
372
|
+
+ 0.00364 * X5 * X6
|
|
373
|
+
+ 0.018 * X2.pow(2)
|
|
374
|
+
)
|
|
375
|
+
g4 = 0.32 - 0.74 + 0.61 * X2 + 0.031296 * X3 + 0.031872 * X7 - 0.227 * X2.pow(2)
|
|
376
|
+
g5 = 32 - 28.98 - 3.818 * X3 + 4.2 * X1 * X2 - 1.27296 * X6 + 2.68065 * X7
|
|
377
|
+
g6 = (
|
|
378
|
+
32
|
|
379
|
+
- 33.86
|
|
380
|
+
- 2.95 * X3
|
|
381
|
+
+ 5.057 * X1 * X2
|
|
382
|
+
+ 3.795 * X2
|
|
383
|
+
+ 3.4431 * X7
|
|
384
|
+
- 1.45728
|
|
385
|
+
)
|
|
386
|
+
g7 = 32 - 46.36 + 9.9 * X2 + 4.4505 * X1
|
|
387
|
+
g8 = 4 - f2
|
|
388
|
+
g9 = 9.9 - V_MBP
|
|
389
|
+
g10 = 15.7 - V_FD
|
|
390
|
+
g = torch.cat([g1, g2, g3, g4, g5, g6, g7, g8, g9, g10], dim=-1)
|
|
391
|
+
zero = torch.tensor(0.0, dtype=X.dtype, device=X.device)
|
|
392
|
+
g = torch.where(g < 0, -g, zero)
|
|
393
|
+
f4 = g.sum(dim=-1, keepdim=True)
|
|
394
|
+
out["F"] = torch.cat([f1, f2, f3, f4], dim=-1)
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
import torch.nn as nn
|
|
3
|
+
from pymoo.problems import get_problem
|
|
4
|
+
from moospread.problem import PymooProblemTorch
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
######## ZDT Problems ########
|
|
8
|
+
# Note: For the sake of differentiability, we will use the strict bounds:
|
|
9
|
+
# - Lower bound: 0 + 1e-6 (instead of 0)
|
|
10
|
+
# - Upper bound: 1 - 1e-6 (instead of 1)
|
|
11
|
+
# ref_point: The default reference points are suitable for the "online" mode.
|
|
12
|
+
|
|
13
|
+
class ZDT(PymooProblemTorch):
|
|
14
|
+
"""
|
|
15
|
+
Base class to ensure PyTorch differentiability.
|
|
16
|
+
Provides a default `evaluate` that preserves gradients and guards against NaNs.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, n_var=30, **kwargs):
|
|
20
|
+
super().__init__(n_var=n_var, n_obj=2,
|
|
21
|
+
xl=torch.zeros(n_var, dtype=torch.float) + 1e-6,
|
|
22
|
+
xu=torch.ones(n_var, dtype=torch.float) - 1e-6,
|
|
23
|
+
vtype=float, **kwargs)
|
|
24
|
+
|
|
25
|
+
class ZDT1(ZDT):
|
|
26
|
+
"""
|
|
27
|
+
ZDT1 test problem in PyTorch, fully differentiable.
|
|
28
|
+
"""
|
|
29
|
+
def __init__(self, n_var=30, ref_point=None, **kwargs):
|
|
30
|
+
super().__init__(n_var, **kwargs)
|
|
31
|
+
if ref_point is None:
|
|
32
|
+
self.ref_point = [0.9994, 6.0576]
|
|
33
|
+
else:
|
|
34
|
+
self.ref_point = ref_point
|
|
35
|
+
|
|
36
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
37
|
+
x = torch.linspace(0.0, 1.0, n_pareto_points, device=self.device)
|
|
38
|
+
return torch.stack([x, 1.0 - torch.sqrt(x)], dim=1)
|
|
39
|
+
|
|
40
|
+
def _evaluate(self, x: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
41
|
+
# x: (batch_size, n_var)
|
|
42
|
+
# Objective f1
|
|
43
|
+
f1 = x[:, 0]
|
|
44
|
+
# Auxiliary g
|
|
45
|
+
g = 1.0 + 9.0 / (self.n_var - 1) * torch.sum(x[:, 1:], dim=1)
|
|
46
|
+
# Avoid negative division
|
|
47
|
+
term = torch.clamp(f1 / g, min=0.0)
|
|
48
|
+
# Objective f2
|
|
49
|
+
f2 = g * (1.0 - torch.sqrt(term))
|
|
50
|
+
out["F"] = torch.stack([f1, f2], dim=1)
|
|
51
|
+
|
|
52
|
+
class ZDT2(ZDT):
|
|
53
|
+
"""
|
|
54
|
+
ZDT2 test problem in PyTorch, fully differentiable.
|
|
55
|
+
"""
|
|
56
|
+
def __init__(self, n_var=30, ref_point=None, **kwargs):
|
|
57
|
+
super().__init__(n_var, **kwargs)
|
|
58
|
+
if ref_point is None:
|
|
59
|
+
self.ref_point = [0.9994, 6.8960]
|
|
60
|
+
else:
|
|
61
|
+
self.ref_point = ref_point
|
|
62
|
+
|
|
63
|
+
def _calc_pareto_front(self, n_pareto_points: int = 100) -> torch.Tensor:
|
|
64
|
+
x = torch.linspace(0.0, 1.0, n_pareto_points, device=self.device)
|
|
65
|
+
return torch.stack([x, 1.0 - x.pow(2)], dim=1)
|
|
66
|
+
|
|
67
|
+
def _evaluate(self, x: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
68
|
+
f1 = x[:, 0]
|
|
69
|
+
c = torch.sum(x[:, 1:], dim=1)
|
|
70
|
+
g = 1.0 + 9.0 * c / (self.n_var - 1)
|
|
71
|
+
term = torch.clamp(f1 / g, min=0.0)
|
|
72
|
+
f2 = g * (1.0 - term.pow(2))
|
|
73
|
+
out["F"] = torch.stack([f1, f2], dim=1)
|
|
74
|
+
|
|
75
|
+
class ZDT3(ZDT):
|
|
76
|
+
"""
|
|
77
|
+
ZDT3 test problem in PyTorch, fully differentiable.
|
|
78
|
+
"""
|
|
79
|
+
def __init__(self, n_var=30, ref_point=None, **kwargs):
|
|
80
|
+
super().__init__(n_var, **kwargs)
|
|
81
|
+
if ref_point is None:
|
|
82
|
+
self.ref_point = [0.9994, 6.0571]
|
|
83
|
+
else:
|
|
84
|
+
self.ref_point = ref_point
|
|
85
|
+
|
|
86
|
+
def _calc_pareto_front(
|
|
87
|
+
self,
|
|
88
|
+
n_points: int = 100,
|
|
89
|
+
flatten: bool = True
|
|
90
|
+
) -> torch.Tensor:
|
|
91
|
+
regions = [
|
|
92
|
+
[0.0, 0.0830015349],
|
|
93
|
+
[0.182228780, 0.2577623634],
|
|
94
|
+
[0.4093136748, 0.4538821041],
|
|
95
|
+
[0.6183967944, 0.6525117038],
|
|
96
|
+
[0.8233317983, 0.8518328654]
|
|
97
|
+
]
|
|
98
|
+
pf_list = []
|
|
99
|
+
points_per_region = int(n_points / len(regions))
|
|
100
|
+
for r in regions:
|
|
101
|
+
x1 = torch.linspace(r[0], r[1], points_per_region, device=self.device)
|
|
102
|
+
x2 = 1.0 - torch.sqrt(x1) - x1 * torch.sin(10.0 * torch.pi * x1)
|
|
103
|
+
pf_list.append(torch.stack([x1, x2], dim=1))
|
|
104
|
+
return torch.cat(pf_list, dim=0) if flatten else torch.stack(pf_list, dim=0)
|
|
105
|
+
|
|
106
|
+
def _evaluate(self, x: torch.Tensor, out: dict, *args, **kwargs) -> None:
|
|
107
|
+
f1 = x[:, 0]
|
|
108
|
+
c = torch.sum(x[:, 1:], dim=1)
|
|
109
|
+
g = 1.0 + 9.0 * c / (self.n_var - 1)
|
|
110
|
+
term = torch.clamp(f1 / g, min=0.0)
|
|
111
|
+
f2 = g * (1.0 - torch.sqrt(term) - term * torch.sin(10.0 * torch.pi * f1))
|
|
112
|
+
out["F"] = torch.stack([f1, f2], dim=1)
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
from moospread.utils.ditmoo import DiTMOO
|
|
2
|
+
from moospread.utils.lhs import LHS
|
|
3
|
+
from moospread.utils.spread_utils import get_ddpm_dataloader, is_pass_function
|
|
4
|
+
from moospread.utils.mobo_utils import *
|
|
5
|
+
from moospread.utils.offline_utils import *
|
|
6
|
+
from moospread.utils.misc import *
|
|
7
|
+
from moospread.utils.constraint_utils import PMGDASolver
|
|
8
|
+
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_moo_Jacobian_batch(x_batch, y_batch, n_obj):
|
|
5
|
+
'''
|
|
6
|
+
Input : x_batch: (batch_size, n_var)
|
|
7
|
+
y_batch: (batch_size, n_obj)
|
|
8
|
+
n_obj: int
|
|
9
|
+
Return: grad_batch: (batch_size, n_obj, n_var)
|
|
10
|
+
'''
|
|
11
|
+
grad_batch = []
|
|
12
|
+
batch_size = len(x_batch)
|
|
13
|
+
for batch_idx in range(batch_size):
|
|
14
|
+
# grad_arr.append(get_moo_Jacobian(xs, ys, n_obj))
|
|
15
|
+
grad_arr_idx = [0,] * n_obj
|
|
16
|
+
for obj_idx in range(n_obj):
|
|
17
|
+
y_batch[batch_idx][obj_idx].backward(retain_graph = True)
|
|
18
|
+
grad_arr_idx[obj_idx] = x_batch.grad[batch_idx].clone()
|
|
19
|
+
x_batch.grad.zero_()
|
|
20
|
+
grad_batch.append( torch.stack(grad_arr_idx) )
|
|
21
|
+
return torch.stack(grad_batch)
|
|
22
|
+
|
|
23
|
+
def get_moo_Jacobian(x, y, n_obj):
|
|
24
|
+
grad_arr = [0] * n_obj
|
|
25
|
+
for obj_idx in range(n_obj):
|
|
26
|
+
y[obj_idx].backward(retain_graph=True)
|
|
27
|
+
grad_arr[obj_idx] = x.grad.clone()
|
|
28
|
+
x.grad.zero_()
|
|
29
|
+
grad_arr = torch.stack(grad_arr)
|
|
30
|
+
return grad_arr
|
|
31
|
+
|
|
32
|
+
def flatten_grads(grads_dict):
|
|
33
|
+
return torch.cat( [v.view(-1) for _, v in grads_dict.items()] )
|
|
34
|
+
|
|
35
|
+
def calc_gradients_mtl(data, batch, model, objectives):
|
|
36
|
+
# store gradients and objective values
|
|
37
|
+
gradients = []
|
|
38
|
+
obj_values = []
|
|
39
|
+
for i, objective in enumerate(objectives):
|
|
40
|
+
model.zero_grad()
|
|
41
|
+
logits = model(data)
|
|
42
|
+
output = objective(logits['logits'], **batch)
|
|
43
|
+
output.backward()
|
|
44
|
+
obj_values.append(output.item())
|
|
45
|
+
gradients.append({})
|
|
46
|
+
private_params = model.private_params() if hasattr(model, 'private_params') else []
|
|
47
|
+
for name, param in model.named_parameters():
|
|
48
|
+
not_private = all([p not in name for p in private_params])
|
|
49
|
+
if not_private and param.requires_grad and param.grad is not None:
|
|
50
|
+
gradients[i][name] = param.grad.data.detach().clone()
|
|
51
|
+
return gradients
|
|
52
|
+
|
|
53
|
+
def get_grads_from_model(loss, model):
|
|
54
|
+
G = [0,] * len(loss)
|
|
55
|
+
for idx, l in enumerate(loss):
|
|
56
|
+
model.zero_grad()
|
|
57
|
+
l.backward(retain_graph=True)
|
|
58
|
+
G[idx] = get_flatten_grad(model)
|
|
59
|
+
return torch.stack(G)
|
|
60
|
+
|
|
61
|
+
def get_flatten_grad(model):
|
|
62
|
+
grad = []
|
|
63
|
+
for param in model.parameters():
|
|
64
|
+
if param.grad is not None:
|
|
65
|
+
grad.append(param.grad.view(-1))
|
|
66
|
+
else:
|
|
67
|
+
grad.append(torch.zeros_like(param.view(-1)))
|
|
68
|
+
grad = torch.cat(grad)
|
|
69
|
+
return grad
|
|
70
|
+
|
|
71
|
+
def numel_params(model):
|
|
72
|
+
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import torch
|
|
2
|
+
from numpy import array
|
|
3
|
+
import numpy as np
|
|
4
|
+
from cvxopt import matrix, solvers
|
|
5
|
+
solvers.options['show_progress'] = False
|
|
6
|
+
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def solve_mgda_analy(grad_1, grad_2):
|
|
10
|
+
'''
|
|
11
|
+
Solve_mgda_analy only support 2-objective case.
|
|
12
|
+
grad_i.shape: (n,).
|
|
13
|
+
This function support grad_i as both Tensor and numpy.
|
|
14
|
+
'''
|
|
15
|
+
v1v1 = grad_1 @ grad_1
|
|
16
|
+
v2v2 = grad_2 @ grad_2
|
|
17
|
+
v1v2 = grad_1 @ grad_2
|
|
18
|
+
|
|
19
|
+
if v1v2 >= v1v1:
|
|
20
|
+
gamma = 0.999
|
|
21
|
+
elif v1v2 >= v2v2:
|
|
22
|
+
gamma = 0.001
|
|
23
|
+
else:
|
|
24
|
+
gamma = -1.0 * ((v1v2 - v2v2) / (v1v1 + v2v2 - 2 * v1v2))
|
|
25
|
+
coeff = torch.Tensor([gamma, 1 - gamma] )
|
|
26
|
+
# gw = coeff[0] * grad_1 + coeff[1] * grad_2
|
|
27
|
+
# else:
|
|
28
|
+
# return gw
|
|
29
|
+
return coeff
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def solve_mgda(Jacobian):
|
|
36
|
+
'''
|
|
37
|
+
Input Jacobian: (m,n).
|
|
38
|
+
Output alpha: (m,)
|
|
39
|
+
'''
|
|
40
|
+
|
|
41
|
+
# m : n_obj
|
|
42
|
+
# n : n_var
|
|
43
|
+
|
|
44
|
+
m = Jacobian.shape[0]
|
|
45
|
+
if m == 2:
|
|
46
|
+
return solve_mgda_analy(Jacobian[0], Jacobian[1])
|
|
47
|
+
else:
|
|
48
|
+
Q = (Jacobian @ Jacobian.T).cpu().detach().numpy()
|
|
49
|
+
|
|
50
|
+
Q = matrix(np.float64(Q))
|
|
51
|
+
p = np.zeros(m)
|
|
52
|
+
A = np.ones(m)
|
|
53
|
+
|
|
54
|
+
A = matrix(A, (1, m))
|
|
55
|
+
b = matrix(1.0)
|
|
56
|
+
|
|
57
|
+
G_cvx = -np.eye(m)
|
|
58
|
+
h = [0.0] * m
|
|
59
|
+
h = matrix(h)
|
|
60
|
+
|
|
61
|
+
G_cvx = matrix(G_cvx)
|
|
62
|
+
p = matrix(p)
|
|
63
|
+
sol = solvers.qp(Q, p, G_cvx, h, A, b)
|
|
64
|
+
|
|
65
|
+
res = np.array(sol['x']).squeeze()
|
|
66
|
+
alpha = res / sum(res) # important. Does res already satisfy sum=1?
|
|
67
|
+
return alpha
|
|
68
|
+
|
|
69
|
+
|