moospread 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- moospread/__init__.py +3 -0
- moospread/core.py +1881 -0
- moospread/problem.py +193 -0
- moospread/tasks/__init__.py +4 -0
- moospread/tasks/dtlz_torch.py +139 -0
- moospread/tasks/mw_torch.py +274 -0
- moospread/tasks/re_torch.py +394 -0
- moospread/tasks/zdt_torch.py +112 -0
- moospread/utils/__init__.py +8 -0
- moospread/utils/constraint_utils/__init__.py +2 -0
- moospread/utils/constraint_utils/gradient.py +72 -0
- moospread/utils/constraint_utils/mgda_core.py +69 -0
- moospread/utils/constraint_utils/pmgda_solver.py +308 -0
- moospread/utils/constraint_utils/prefs.py +64 -0
- moospread/utils/ditmoo.py +127 -0
- moospread/utils/lhs.py +74 -0
- moospread/utils/misc.py +28 -0
- moospread/utils/mobo_utils/__init__.py +11 -0
- moospread/utils/mobo_utils/evolution/__init__.py +0 -0
- moospread/utils/mobo_utils/evolution/dom.py +60 -0
- moospread/utils/mobo_utils/evolution/norm.py +40 -0
- moospread/utils/mobo_utils/evolution/utils.py +97 -0
- moospread/utils/mobo_utils/learning/__init__.py +0 -0
- moospread/utils/mobo_utils/learning/model.py +40 -0
- moospread/utils/mobo_utils/learning/model_init.py +33 -0
- moospread/utils/mobo_utils/learning/model_update.py +51 -0
- moospread/utils/mobo_utils/learning/prediction.py +116 -0
- moospread/utils/mobo_utils/learning/utils.py +143 -0
- moospread/utils/mobo_utils/lhs_for_mobo.py +243 -0
- moospread/utils/mobo_utils/mobo/__init__.py +0 -0
- moospread/utils/mobo_utils/mobo/acquisition.py +209 -0
- moospread/utils/mobo_utils/mobo/algorithms.py +91 -0
- moospread/utils/mobo_utils/mobo/factory.py +86 -0
- moospread/utils/mobo_utils/mobo/mobo.py +132 -0
- moospread/utils/mobo_utils/mobo/selection.py +182 -0
- moospread/utils/mobo_utils/mobo/solver/__init__.py +5 -0
- moospread/utils/mobo_utils/mobo/solver/moead.py +17 -0
- moospread/utils/mobo_utils/mobo/solver/nsga2.py +10 -0
- moospread/utils/mobo_utils/mobo/solver/parego/__init__.py +1 -0
- moospread/utils/mobo_utils/mobo/solver/parego/parego.py +62 -0
- moospread/utils/mobo_utils/mobo/solver/parego/utils.py +34 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/__init__.py +1 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/buffer.py +364 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/pareto_discovery.py +571 -0
- moospread/utils/mobo_utils/mobo/solver/pareto_discovery/utils.py +168 -0
- moospread/utils/mobo_utils/mobo/solver/solver.py +74 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/__init__.py +2 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/base.py +36 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/gaussian_process.py +177 -0
- moospread/utils/mobo_utils/mobo/surrogate_model/thompson_sampling.py +79 -0
- moospread/utils/mobo_utils/mobo/surrogate_problem.py +44 -0
- moospread/utils/mobo_utils/mobo/transformation.py +106 -0
- moospread/utils/mobo_utils/mobo/utils.py +65 -0
- moospread/utils/mobo_utils/spread_mobo_utils.py +854 -0
- moospread/utils/offline_utils/__init__.py +10 -0
- moospread/utils/offline_utils/handle_task.py +203 -0
- moospread/utils/offline_utils/proxies.py +338 -0
- moospread/utils/spread_utils.py +91 -0
- moospread-0.1.0.dist-info/METADATA +75 -0
- moospread-0.1.0.dist-info/RECORD +63 -0
- moospread-0.1.0.dist-info/WHEEL +5 -0
- moospread-0.1.0.dist-info/licenses/LICENSE +10 -0
- moospread-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from moospread.utils.mobo_utils.mobo.surrogate_problem import SurrogateProblem
|
|
3
|
+
from moospread.utils.mobo_utils.mobo.utils import Timer, find_pareto_front, calc_hypervolume
|
|
4
|
+
from moospread.utils.mobo_utils.mobo.factory import init_from_config
|
|
5
|
+
from moospread.utils.mobo_utils.mobo.transformation import StandardTransform
|
|
6
|
+
|
|
7
|
+
'''
|
|
8
|
+
Main algorithm framework for Multi-Objective Bayesian Optimization
|
|
9
|
+
'''
|
|
10
|
+
|
|
11
|
+
class MOBO:
|
|
12
|
+
'''
|
|
13
|
+
Base class of algorithm framework, inherit this class with different configs to create new algorithm classes
|
|
14
|
+
'''
|
|
15
|
+
config = {}
|
|
16
|
+
|
|
17
|
+
def __init__(self, problem, n_iter, ref_point, framework_args):
|
|
18
|
+
'''
|
|
19
|
+
Input:
|
|
20
|
+
problem: the original / real optimization problem
|
|
21
|
+
n_iter: number of iterations to optimize
|
|
22
|
+
ref_point: reference point for hypervolume calculation
|
|
23
|
+
framework_args: arguments to initialize each component of the framework
|
|
24
|
+
'''
|
|
25
|
+
self.real_problem = problem
|
|
26
|
+
self.n_var, self.n_obj = problem.n_var, problem.n_obj
|
|
27
|
+
self.n_iter = n_iter
|
|
28
|
+
self.ref_point = ref_point
|
|
29
|
+
|
|
30
|
+
bounds = np.array([problem.xl, problem.xu])
|
|
31
|
+
self.transformation = StandardTransform(bounds) # data normalization for surrogate model fitting
|
|
32
|
+
|
|
33
|
+
# framework components
|
|
34
|
+
framework_args['surrogate']['n_var'] = self.n_var # for surrogate fitting
|
|
35
|
+
framework_args['surrogate']['n_obj'] = self.n_obj # for surroagte fitting
|
|
36
|
+
framework_args['solver']['n_obj'] = self.n_obj # for MOEA/D-EGO
|
|
37
|
+
framework = init_from_config(self.config, framework_args)
|
|
38
|
+
|
|
39
|
+
self.surrogate_model = framework['surrogate'] # surrogate model
|
|
40
|
+
self.acquisition = framework['acquisition'] # acquisition function
|
|
41
|
+
self.solver = framework['solver'] # multi-objective solver for finding the paretofront
|
|
42
|
+
self.selection = framework['selection'] # selection method for choosing new (batch of) samples to evaluate on real problem
|
|
43
|
+
|
|
44
|
+
# to keep track of data and pareto information (current status of algorithm)
|
|
45
|
+
self.X = None
|
|
46
|
+
self.Y = None
|
|
47
|
+
self.sample_num = 0
|
|
48
|
+
self.status = {
|
|
49
|
+
'pset': None,
|
|
50
|
+
'pfront': None,
|
|
51
|
+
'hv': None,
|
|
52
|
+
'ref_point': self.ref_point,
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
# other component-specific information that needs to be stored or exported
|
|
56
|
+
self.info = None
|
|
57
|
+
|
|
58
|
+
def _update_status(self, X, Y):
|
|
59
|
+
'''
|
|
60
|
+
Update the status of algorithm from data
|
|
61
|
+
'''
|
|
62
|
+
if self.sample_num == 0:
|
|
63
|
+
self.X = X
|
|
64
|
+
self.Y = Y
|
|
65
|
+
else:
|
|
66
|
+
self.X = np.vstack([self.X, X])
|
|
67
|
+
self.Y = np.vstack([self.Y, Y])
|
|
68
|
+
self.sample_num += len(X)
|
|
69
|
+
|
|
70
|
+
self.status['pfront'], pfront_idx = find_pareto_front(self.Y, return_index=True)
|
|
71
|
+
self.status['pset'] = self.X[pfront_idx]
|
|
72
|
+
self.status['hv'] = calc_hypervolume(self.status['pfront'], self.ref_point)
|
|
73
|
+
|
|
74
|
+
def solve(self, X_init, Y_init):
|
|
75
|
+
'''
|
|
76
|
+
Solve the real multi-objective problem from initial data (X_init, Y_init)
|
|
77
|
+
'''
|
|
78
|
+
# determine reference point from data if not specified by arguments
|
|
79
|
+
if self.ref_point is None:
|
|
80
|
+
self.ref_point = np.max(Y_init, axis=0)
|
|
81
|
+
self.selection.set_ref_point(self.ref_point)
|
|
82
|
+
|
|
83
|
+
self._update_status(X_init, Y_init)
|
|
84
|
+
|
|
85
|
+
global_timer = Timer()
|
|
86
|
+
|
|
87
|
+
for i in range(self.n_iter):
|
|
88
|
+
print('========== Iteration %d ==========' % i)
|
|
89
|
+
|
|
90
|
+
timer = Timer()
|
|
91
|
+
|
|
92
|
+
# data normalization
|
|
93
|
+
self.transformation.fit(self.X, self.Y)
|
|
94
|
+
X, Y = self.transformation.do(self.X, self.Y)
|
|
95
|
+
|
|
96
|
+
# build surrogate models
|
|
97
|
+
self.surrogate_model.fit(X, Y)
|
|
98
|
+
timer.log('Surrogate model fitted')
|
|
99
|
+
|
|
100
|
+
# define acquisition functions
|
|
101
|
+
self.acquisition.fit(X, Y)
|
|
102
|
+
|
|
103
|
+
# solve surrogate problem
|
|
104
|
+
surr_problem = SurrogateProblem(self.real_problem, self.surrogate_model, self.acquisition, self.transformation)
|
|
105
|
+
solution = self.solver.solve(surr_problem, X, Y)
|
|
106
|
+
timer.log('Surrogate problem solved')
|
|
107
|
+
|
|
108
|
+
# batch point selection
|
|
109
|
+
self.selection.fit(X, Y)
|
|
110
|
+
X_next, self.info = self.selection.select(solution, self.surrogate_model, self.status, self.transformation)
|
|
111
|
+
timer.log('Next sample batch selected')
|
|
112
|
+
|
|
113
|
+
# update dataset
|
|
114
|
+
Y_next = self.real_problem.evaluate(X_next)
|
|
115
|
+
self._update_status(X_next, Y_next)
|
|
116
|
+
timer.log('New samples evaluated')
|
|
117
|
+
|
|
118
|
+
# statistics
|
|
119
|
+
global_timer.log('Total runtime', reset=False)
|
|
120
|
+
print('Total evaluations: %d, hypervolume: %.4f\n' % (self.sample_num, self.status['hv']))
|
|
121
|
+
|
|
122
|
+
# return new data iteration by iteration
|
|
123
|
+
yield X_next, Y_next
|
|
124
|
+
|
|
125
|
+
def __str__(self):
|
|
126
|
+
return \
|
|
127
|
+
'========== Framework Description ==========\n' + \
|
|
128
|
+
f'# algorithm: {self.__class__.__name__}\n' + \
|
|
129
|
+
f'# surrogate: {self.surrogate_model.__class__.__name__}\n' + \
|
|
130
|
+
f'# acquisition: {self.acquisition.__class__.__name__}\n' + \
|
|
131
|
+
f'# solver: {self.solver.__class__.__name__}\n' + \
|
|
132
|
+
f'# selection: {self.selection.__class__.__name__}\n'
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
import numpy as np
|
|
3
|
+
from sklearn.cluster import KMeans
|
|
4
|
+
from pymoo.factory import get_performance_indicator
|
|
5
|
+
from pymoo.algorithms.nsga2 import calc_crowding_distance
|
|
6
|
+
|
|
7
|
+
'''
|
|
8
|
+
Selection methods for new batch of samples to evaluate on real problem
|
|
9
|
+
'''
|
|
10
|
+
|
|
11
|
+
class Selection(ABC):
|
|
12
|
+
'''
|
|
13
|
+
Base class of selection method
|
|
14
|
+
'''
|
|
15
|
+
def __init__(self, batch_size, ref_point=None, **kwargs):
|
|
16
|
+
self.batch_size = batch_size
|
|
17
|
+
self.ref_point = ref_point
|
|
18
|
+
|
|
19
|
+
def fit(self, X, Y):
|
|
20
|
+
'''
|
|
21
|
+
Fit the parameters of selection method from data
|
|
22
|
+
'''
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
def set_ref_point(self, ref_point):
|
|
26
|
+
self.ref_point = ref_point
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
30
|
+
'''
|
|
31
|
+
Select new samples from solution obtained by solver
|
|
32
|
+
Input:
|
|
33
|
+
solution['x']: design variables of solution
|
|
34
|
+
solution['y']: acquisition values of solution
|
|
35
|
+
solution['algo']: solver algorithm, having some relevant information from optimization
|
|
36
|
+
surrogate_model: fitted surrogate model
|
|
37
|
+
status['pset']: current pareto set found
|
|
38
|
+
status['pfront]: current pareto front found
|
|
39
|
+
status['hv']: current hypervolume
|
|
40
|
+
transformation: data normalization for surrogate model fitting
|
|
41
|
+
(some inputs may not be necessary for some selection criterion)
|
|
42
|
+
Output:
|
|
43
|
+
X_next: next batch of samples selected
|
|
44
|
+
info: other informations need to be stored or exported, None if not necessary
|
|
45
|
+
'''
|
|
46
|
+
pass
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class HVI(Selection):
|
|
50
|
+
'''
|
|
51
|
+
Hypervolume Improvement
|
|
52
|
+
'''
|
|
53
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
54
|
+
|
|
55
|
+
pred_pset = solution['x']
|
|
56
|
+
val = surrogate_model.evaluate(pred_pset)
|
|
57
|
+
pred_pfront = val['F']
|
|
58
|
+
pred_pset, pred_pfront = transformation.undo(pred_pset, pred_pfront)
|
|
59
|
+
|
|
60
|
+
curr_pfront = status['pfront'].copy()
|
|
61
|
+
hv = get_performance_indicator('hv', ref_point=self.ref_point)
|
|
62
|
+
idx_choices = np.ma.array(np.arange(len(pred_pset)), mask=False) # mask array for index choices
|
|
63
|
+
next_batch_indices = []
|
|
64
|
+
|
|
65
|
+
# greedily select indices that maximize hypervolume contribution
|
|
66
|
+
for _ in range(self.batch_size):
|
|
67
|
+
curr_hv = hv.calc(curr_pfront)
|
|
68
|
+
max_hv_contrib = 0.
|
|
69
|
+
max_hv_idx = -1
|
|
70
|
+
for idx in idx_choices.compressed():
|
|
71
|
+
# calculate hypervolume contribution
|
|
72
|
+
new_hv = hv.calc(np.vstack([curr_pfront, pred_pfront[idx]]))
|
|
73
|
+
hv_contrib = new_hv - curr_hv
|
|
74
|
+
if hv_contrib > max_hv_contrib:
|
|
75
|
+
max_hv_contrib = hv_contrib
|
|
76
|
+
max_hv_idx = idx
|
|
77
|
+
if max_hv_idx == -1: # if all candidates have no hypervolume contribution, just randomly select one
|
|
78
|
+
max_hv_idx = np.random.choice(idx_choices.compressed())
|
|
79
|
+
|
|
80
|
+
idx_choices.mask[max_hv_idx] = True # mask as selected
|
|
81
|
+
curr_pfront = np.vstack([curr_pfront, pred_pfront[max_hv_idx]]) # add to current pareto front
|
|
82
|
+
next_batch_indices.append(max_hv_idx)
|
|
83
|
+
next_batch_indices = np.array(next_batch_indices)
|
|
84
|
+
|
|
85
|
+
return pred_pset[next_batch_indices], None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class Uncertainty(Selection):
|
|
89
|
+
'''
|
|
90
|
+
Uncertainty
|
|
91
|
+
'''
|
|
92
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
93
|
+
|
|
94
|
+
X = solution['x']
|
|
95
|
+
val = surrogate_model.evaluate(X, std=True)
|
|
96
|
+
Y_std = val['S']
|
|
97
|
+
X = transformation.undo(x=X)
|
|
98
|
+
|
|
99
|
+
uncertainty = np.prod(Y_std, axis=1)
|
|
100
|
+
top_indices = np.argsort(uncertainty)[::-1][:self.batch_size]
|
|
101
|
+
return X[top_indices], None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class Random(Selection):
|
|
105
|
+
'''
|
|
106
|
+
Random selection
|
|
107
|
+
'''
|
|
108
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
109
|
+
X = solution['x']
|
|
110
|
+
X = transformation.undo(x=X)
|
|
111
|
+
random_indices = np.random.choice(len(X), size=self.batch_size, replace=False)
|
|
112
|
+
return X[random_indices], None
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class DGEMOSelect(Selection):
|
|
116
|
+
'''
|
|
117
|
+
Selection method for DGEMO algorithm
|
|
118
|
+
'''
|
|
119
|
+
has_family = True
|
|
120
|
+
|
|
121
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
122
|
+
algo = solution['algo']
|
|
123
|
+
|
|
124
|
+
X_next, _, family_lbls_next = algo.propose_next_batch(status['pfront'], self.ref_point, self.batch_size, transformation)
|
|
125
|
+
family_lbls, approx_pset, approx_pfront = algo.get_sparse_front(transformation)
|
|
126
|
+
|
|
127
|
+
info = {
|
|
128
|
+
'family_lbls_next': family_lbls_next,
|
|
129
|
+
'family_lbls': family_lbls,
|
|
130
|
+
'approx_pset': approx_pset,
|
|
131
|
+
'approx_pfront': approx_pfront,
|
|
132
|
+
}
|
|
133
|
+
return X_next, info
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class MOEADSelect(Selection):
|
|
137
|
+
'''
|
|
138
|
+
Selection method for MOEA/D-EGO algorithm
|
|
139
|
+
'''
|
|
140
|
+
def select(self, solution, surrogate_model, status, transformation):
|
|
141
|
+
X, G, algo = solution['x'], solution['y'], solution['algo']
|
|
142
|
+
ref_dirs = algo.ref_dirs
|
|
143
|
+
|
|
144
|
+
G_s = algo._decomposition.do(G, weights=ref_dirs, ideal_point=algo.ideal_point) # scalarized acquisition value
|
|
145
|
+
|
|
146
|
+
# build candidate pool Q
|
|
147
|
+
Q_x, Q_dir, Q_g = [], [], []
|
|
148
|
+
X_added = status['pset'].copy()
|
|
149
|
+
for x, ref_dir, g in zip(X, ref_dirs, G_s):
|
|
150
|
+
if (x != X_added).any(axis=1).all():
|
|
151
|
+
Q_x.append(x)
|
|
152
|
+
Q_dir.append(ref_dir)
|
|
153
|
+
Q_g.append(g)
|
|
154
|
+
X_added = np.vstack([X_added, x])
|
|
155
|
+
Q_x, Q_dir, Q_g = np.array(Q_x), np.array(Q_dir), np.array(Q_g)
|
|
156
|
+
|
|
157
|
+
batch_size = min(self.batch_size, len(Q_x)) # in case Q is smaller than batch size
|
|
158
|
+
|
|
159
|
+
if batch_size == 0:
|
|
160
|
+
X_next = X[np.random.choice(len(X), self.batch_size, replace=False)]
|
|
161
|
+
X_next = transformation.undo(x=X_next)
|
|
162
|
+
return X_next, None
|
|
163
|
+
|
|
164
|
+
# k-means clustering on X with weight vectors
|
|
165
|
+
labels = KMeans(n_clusters=batch_size).fit_predict(np.column_stack([Q_x, Q_dir]))
|
|
166
|
+
|
|
167
|
+
# select point in each cluster with lowest scalarized acquisition value
|
|
168
|
+
X_next = []
|
|
169
|
+
for i in range(batch_size):
|
|
170
|
+
indices = np.where(labels == i)[0]
|
|
171
|
+
top_idx = indices[np.argmin(Q_g[indices])]
|
|
172
|
+
top_x = transformation.undo(x=Q_x[top_idx])
|
|
173
|
+
X_next.append(top_x)
|
|
174
|
+
X_next = np.array(X_next)
|
|
175
|
+
|
|
176
|
+
# when Q is smaller than batch size
|
|
177
|
+
if batch_size < self.batch_size:
|
|
178
|
+
X_rest = X[np.random.choice(len(X), self.batch_size - batch_size, replace=False)]
|
|
179
|
+
X_next = np.vstack([X_next, transformation.undo(x=X_rest)])
|
|
180
|
+
|
|
181
|
+
return X_next, None
|
|
182
|
+
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
from moospread.utils.mobo_utils.mobo.solver.solver import Solver
|
|
2
|
+
from moospread.utils.mobo_utils.mobo.solver.nsga2 import NSGA2Solver
|
|
3
|
+
from moospread.utils.mobo_utils.mobo.solver.moead import MOEADSolver
|
|
4
|
+
from moospread.utils.mobo_utils.mobo.solver.pareto_discovery import ParetoDiscoverySolver
|
|
5
|
+
from moospread.utils.mobo_utils.mobo.solver.parego import ParEGOSolver
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from moospread.utils.mobo_utils.mobo.solver.solver import Solver
|
|
2
|
+
import numpy as np
|
|
3
|
+
from pymoo.algorithms.moead import MOEAD
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MOEADSolver(Solver):
|
|
7
|
+
'''
|
|
8
|
+
Solver based on MOEA/D
|
|
9
|
+
'''
|
|
10
|
+
def __init__(self, *args, **kwargs):
|
|
11
|
+
pop_size, n_obj = kwargs['pop_size'], kwargs['n_obj']
|
|
12
|
+
# generate direction vectors by random sampling
|
|
13
|
+
ref_dirs = np.random.random((pop_size, n_obj))
|
|
14
|
+
ref_dirs /= np.expand_dims(np.sum(ref_dirs, axis=1), 1)
|
|
15
|
+
kwargs['ref_dirs'] = ref_dirs
|
|
16
|
+
kwargs['eliminate_duplicates'] = False
|
|
17
|
+
super().__init__(*args, algo=MOEAD, **kwargs)
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from moospread.utils.mobo_utils.mobo.solver.solver import Solver
|
|
2
|
+
from pymoo.algorithms.nsga2 import NSGA2
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class NSGA2Solver(Solver):
|
|
6
|
+
'''
|
|
7
|
+
Solver based on NSGA-II
|
|
8
|
+
'''
|
|
9
|
+
def __init__(self, *args, **kwargs):
|
|
10
|
+
super().__init__(*args, algo=NSGA2, **kwargs)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from moospread.utils.mobo_utils.mobo.solver.parego import ParEGOSolver
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from moospread.utils.mobo_utils.mobo.solver.solver import Solver
|
|
3
|
+
from pymoo.optimize import minimize
|
|
4
|
+
from pymoo.algorithms.so_cmaes import CMAES
|
|
5
|
+
from pymoo.decomposition.tchebicheff import Tchebicheff
|
|
6
|
+
from moospread.utils.mobo_utils.mobo.solver.parego.utils import ScalarizedEvaluator
|
|
7
|
+
from multiprocessing import Process, Queue
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def optimization(problem, x, weights, queue):
|
|
11
|
+
'''
|
|
12
|
+
Parallel worker for single-objective CMA-ES optimization
|
|
13
|
+
'''
|
|
14
|
+
evaluator = ScalarizedEvaluator(decomposition=Tchebicheff(), weights=weights)
|
|
15
|
+
res = minimize(problem, CMAES(x), evaluator=evaluator)
|
|
16
|
+
queue.put([res.X[0], res.F[0]])
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ParEGOSolver(Solver):
|
|
20
|
+
'''
|
|
21
|
+
Solver based on ParEGO
|
|
22
|
+
'''
|
|
23
|
+
def __init__(self, *args, **kwargs):
|
|
24
|
+
self.pop_size = kwargs['pop_size']
|
|
25
|
+
self.n_process = kwargs.pop('n_process')
|
|
26
|
+
super().__init__(*args, algo=CMAES, **kwargs)
|
|
27
|
+
|
|
28
|
+
def solve(self, problem, X, Y):
|
|
29
|
+
'''
|
|
30
|
+
Solve the multi-objective problem by multiple scalarized single-objective solvers
|
|
31
|
+
'''
|
|
32
|
+
# initialize population
|
|
33
|
+
sampling = self._get_sampling(X, Y)
|
|
34
|
+
if not isinstance(sampling, np.ndarray):
|
|
35
|
+
sampling = sampling.do(problem, self.pop_size)
|
|
36
|
+
|
|
37
|
+
# generate scalarization weights
|
|
38
|
+
weights = np.random.random((self.pop_size, Y.shape[1]))
|
|
39
|
+
weights /= np.expand_dims(np.sum(weights, axis=1), 1)
|
|
40
|
+
|
|
41
|
+
# optimization
|
|
42
|
+
xs, ys = [], []
|
|
43
|
+
queue = Queue()
|
|
44
|
+
n_active_process = 0
|
|
45
|
+
for i, x0 in enumerate(sampling):
|
|
46
|
+
Process(target=optimization, args=(problem, x0, weights[i], queue)).start()
|
|
47
|
+
n_active_process += 1
|
|
48
|
+
if n_active_process >= self.n_process:
|
|
49
|
+
x, y = queue.get()
|
|
50
|
+
xs.append(x)
|
|
51
|
+
ys.append(y)
|
|
52
|
+
n_active_process -= 1
|
|
53
|
+
|
|
54
|
+
# gather result
|
|
55
|
+
for _ in range(n_active_process):
|
|
56
|
+
x, y = queue.get()
|
|
57
|
+
xs.append(x)
|
|
58
|
+
ys.append(y)
|
|
59
|
+
|
|
60
|
+
# construct solution
|
|
61
|
+
self.solution = {'x': np.array(xs), 'y': np.array(ys)}
|
|
62
|
+
return self.solution
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from pymoo.model.evaluator import Evaluator
|
|
3
|
+
|
|
4
|
+
'''
|
|
5
|
+
Evaluate a scalar value of a multi-objective problem by scalarization (decomposition)
|
|
6
|
+
'''
|
|
7
|
+
|
|
8
|
+
class ScalarizedEvaluator(Evaluator):
|
|
9
|
+
|
|
10
|
+
def __init__(self, *args, decomposition, weights, **kwargs):
|
|
11
|
+
super().__init__(*args, **kwargs)
|
|
12
|
+
self.decomposition = decomposition
|
|
13
|
+
self.weights = weights
|
|
14
|
+
self.ideal_point = None
|
|
15
|
+
|
|
16
|
+
def _eval(self, problem, pop, **kwargs):
|
|
17
|
+
|
|
18
|
+
out = problem.evaluate(pop.get("X"),
|
|
19
|
+
return_values_of=self.evaluate_values_of,
|
|
20
|
+
return_as_dictionary=True,
|
|
21
|
+
**kwargs)
|
|
22
|
+
|
|
23
|
+
for key, val in out.items():
|
|
24
|
+
if val is None:
|
|
25
|
+
continue
|
|
26
|
+
else:
|
|
27
|
+
if key == 'F':
|
|
28
|
+
if self.ideal_point is None:
|
|
29
|
+
self.ideal_point = np.min(val, axis=0)
|
|
30
|
+
else:
|
|
31
|
+
self.ideal_point = np.minimum(self.ideal_point, np.min(val, axis=0))
|
|
32
|
+
val = self.decomposition.do(val, self.weights, ideal_point=self.ideal_point)
|
|
33
|
+
if len(val) > 1: val = np.expand_dims(val, 1)
|
|
34
|
+
pop.set(key, val)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from moospread.utils.mobo_utils.mobo.solver.pareto_discovery import ParetoDiscoverySolver
|