moospread 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. moospread/__init__.py +3 -0
  2. moospread/core.py +1881 -0
  3. moospread/problem.py +193 -0
  4. moospread/tasks/__init__.py +4 -0
  5. moospread/tasks/dtlz_torch.py +139 -0
  6. moospread/tasks/mw_torch.py +274 -0
  7. moospread/tasks/re_torch.py +394 -0
  8. moospread/tasks/zdt_torch.py +112 -0
  9. moospread/utils/__init__.py +8 -0
  10. moospread/utils/constraint_utils/__init__.py +2 -0
  11. moospread/utils/constraint_utils/gradient.py +72 -0
  12. moospread/utils/constraint_utils/mgda_core.py +69 -0
  13. moospread/utils/constraint_utils/pmgda_solver.py +308 -0
  14. moospread/utils/constraint_utils/prefs.py +64 -0
  15. moospread/utils/ditmoo.py +127 -0
  16. moospread/utils/lhs.py +74 -0
  17. moospread/utils/misc.py +28 -0
  18. moospread/utils/mobo_utils/__init__.py +11 -0
  19. moospread/utils/mobo_utils/evolution/__init__.py +0 -0
  20. moospread/utils/mobo_utils/evolution/dom.py +60 -0
  21. moospread/utils/mobo_utils/evolution/norm.py +40 -0
  22. moospread/utils/mobo_utils/evolution/utils.py +97 -0
  23. moospread/utils/mobo_utils/learning/__init__.py +0 -0
  24. moospread/utils/mobo_utils/learning/model.py +40 -0
  25. moospread/utils/mobo_utils/learning/model_init.py +33 -0
  26. moospread/utils/mobo_utils/learning/model_update.py +51 -0
  27. moospread/utils/mobo_utils/learning/prediction.py +116 -0
  28. moospread/utils/mobo_utils/learning/utils.py +143 -0
  29. moospread/utils/mobo_utils/lhs_for_mobo.py +243 -0
  30. moospread/utils/mobo_utils/mobo/__init__.py +0 -0
  31. moospread/utils/mobo_utils/mobo/acquisition.py +209 -0
  32. moospread/utils/mobo_utils/mobo/algorithms.py +91 -0
  33. moospread/utils/mobo_utils/mobo/factory.py +86 -0
  34. moospread/utils/mobo_utils/mobo/mobo.py +132 -0
  35. moospread/utils/mobo_utils/mobo/selection.py +182 -0
  36. moospread/utils/mobo_utils/mobo/solver/__init__.py +5 -0
  37. moospread/utils/mobo_utils/mobo/solver/moead.py +17 -0
  38. moospread/utils/mobo_utils/mobo/solver/nsga2.py +10 -0
  39. moospread/utils/mobo_utils/mobo/solver/parego/__init__.py +1 -0
  40. moospread/utils/mobo_utils/mobo/solver/parego/parego.py +62 -0
  41. moospread/utils/mobo_utils/mobo/solver/parego/utils.py +34 -0
  42. moospread/utils/mobo_utils/mobo/solver/pareto_discovery/__init__.py +1 -0
  43. moospread/utils/mobo_utils/mobo/solver/pareto_discovery/buffer.py +364 -0
  44. moospread/utils/mobo_utils/mobo/solver/pareto_discovery/pareto_discovery.py +571 -0
  45. moospread/utils/mobo_utils/mobo/solver/pareto_discovery/utils.py +168 -0
  46. moospread/utils/mobo_utils/mobo/solver/solver.py +74 -0
  47. moospread/utils/mobo_utils/mobo/surrogate_model/__init__.py +2 -0
  48. moospread/utils/mobo_utils/mobo/surrogate_model/base.py +36 -0
  49. moospread/utils/mobo_utils/mobo/surrogate_model/gaussian_process.py +177 -0
  50. moospread/utils/mobo_utils/mobo/surrogate_model/thompson_sampling.py +79 -0
  51. moospread/utils/mobo_utils/mobo/surrogate_problem.py +44 -0
  52. moospread/utils/mobo_utils/mobo/transformation.py +106 -0
  53. moospread/utils/mobo_utils/mobo/utils.py +65 -0
  54. moospread/utils/mobo_utils/spread_mobo_utils.py +854 -0
  55. moospread/utils/offline_utils/__init__.py +10 -0
  56. moospread/utils/offline_utils/handle_task.py +203 -0
  57. moospread/utils/offline_utils/proxies.py +338 -0
  58. moospread/utils/spread_utils.py +91 -0
  59. moospread-0.1.0.dist-info/METADATA +75 -0
  60. moospread-0.1.0.dist-info/RECORD +63 -0
  61. moospread-0.1.0.dist-info/WHEEL +5 -0
  62. moospread-0.1.0.dist-info/licenses/LICENSE +10 -0
  63. moospread-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,40 @@
1
+ import numpy as np
2
+
3
+ # This file implements normalization procedure
4
+
5
+
6
+ # normalize the variables, for training neural nets
7
+ def var_normalization(individuals, low, up, lv=-1, uv=1):
8
+ if len(np.shape(individuals)) == 1:
9
+ individuals = [individuals]
10
+
11
+ variables = np.array(individuals)
12
+
13
+ low = np.array(low)
14
+ up = np.array(up)
15
+
16
+ normalized_vars = ((variables - low) / (up - low)) * (uv - lv) + lv
17
+ normalized_vars = normalized_vars.tolist()
18
+
19
+ for ind, normalized_var in zip(individuals, normalized_vars):
20
+ ind.normalized_var = normalized_var
21
+
22
+
23
+ # use estimated objective limits to normalize objectives
24
+ def obj_normalization(individuals, f_min, f_max):
25
+ if len(np.shape(individuals)) == 1:
26
+ individuals = [individuals]
27
+
28
+ fvs = np.array([ind.fitness.values for ind in individuals])
29
+ normalized_fvs = (fvs - f_min) / (f_max - f_min)
30
+ normalized_fvs = normalized_fvs.tolist()
31
+
32
+ for ind, normalized_obj_values in zip(individuals, normalized_fvs):
33
+ ind.normalized_obj_values = normalized_obj_values
34
+
35
+
36
+
37
+
38
+
39
+
40
+
@@ -0,0 +1,97 @@
1
+ import numpy as np
2
+ from moospread.utils.mobo_utils.evolution.norm import obj_normalization
3
+ from moospread.utils.mobo_utils.evolution.dom import pareto_dominance
4
+
5
+ # several Utility Functions
6
+
7
+
8
+ # read estimated objective limits
9
+ def init_obj_limits(f_min, f_max):
10
+ if f_min is None:
11
+ f_min = 0
12
+ else:
13
+ print('#######')
14
+ print(f_min)
15
+ f_min = np.array(f_min)
16
+
17
+ if f_max is None:
18
+ f_max = f_min + 1
19
+ else:
20
+ f_max = np.array(f_max)
21
+
22
+ return f_min, f_max
23
+
24
+
25
+ def init_scalar_rep(pop):
26
+ rep_individuals = {}
27
+
28
+ for ind in pop:
29
+ update_scalar_rep(rep_individuals, ind)
30
+
31
+ return rep_individuals
32
+
33
+
34
+ def update_scalar_rep(rep_individuals, ind):
35
+ cid = ind.cluster_id
36
+ rep_ind = rep_individuals.get(cid)
37
+
38
+ if rep_ind is None or ind.scalar_dist < rep_ind.scalar_dist:
39
+ rep_individuals[cid] = ind
40
+ print(f"Scalar rep in cluster {cid} is updated")
41
+ return True
42
+ return False
43
+
44
+
45
+ def get_non_dominated_scalar_rep(rep_individuals):
46
+ keys = set()
47
+ for i in rep_individuals:
48
+ for j in rep_individuals:
49
+ if j <= i:
50
+ continue;
51
+
52
+ ind1 = rep_individuals[i]
53
+ ind2 = rep_individuals[j]
54
+
55
+ r = pareto_dominance(ind1, ind2)
56
+ if r == 1:
57
+ keys.add(j)
58
+ elif r == 2:
59
+ keys.add(i)
60
+
61
+ nd_rep_individuals = {}
62
+
63
+ for i in rep_individuals:
64
+ if i not in keys:
65
+ nd_rep_individuals[i] = rep_individuals[i]
66
+
67
+ return nd_rep_individuals
68
+
69
+
70
+ def get_pareto_rep_ind(s_rep_ind, nd_rep_individuals, ref_points):
71
+ cid = s_rep_ind.cluster_id
72
+ rp = ref_points[cid]
73
+
74
+ p_rep_ind = None
75
+ min_dist = float('inf')
76
+
77
+ if cid in nd_rep_individuals:
78
+ return s_rep_ind
79
+ else:
80
+ for i in nd_rep_individuals:
81
+ ind = nd_rep_individuals.get(i)
82
+ r = pareto_dominance(ind, s_rep_ind)
83
+ if r == 1:
84
+ dist = np.sum((ref_points[i] - rp)**2)
85
+ if dist < min_dist:
86
+ min_dist = dist
87
+ p_rep_ind = ind
88
+
89
+ return p_rep_ind
90
+
91
+
92
+ def init_dom_rel_map(size):
93
+ pareto_rel = np.ones([size, size], dtype=np.int8) * (-1)
94
+ scalar_rel = np.ones([size, size], dtype=np.int8) * (-1)
95
+ return pareto_rel, scalar_rel
96
+
97
+
File without changes
@@ -0,0 +1,40 @@
1
+ import torch.nn as nn
2
+
3
+
4
+ class NeuralNet(nn.Module): # defines deep feedforward neural nets
5
+ def __init__(self, input_size, hidden_size=200, num_hidden_layers=2, output_size=3, activation='relu'):
6
+ super(NeuralNet, self).__init__()
7
+ act_fun = get_activation(activation)
8
+ self.input_size = input_size
9
+ self.first_hidden_layer = nn.Sequential(nn.Linear(input_size, hidden_size), act_fun)
10
+ self.out_layer = nn.Linear(hidden_size, output_size)
11
+
12
+ self.hidden_layers = [self.first_hidden_layer]
13
+ for _ in range(num_hidden_layers - 1):
14
+ layer = nn.Sequential(nn.Linear(hidden_size, hidden_size), act_fun)
15
+ self.hidden_layers.append(layer)
16
+
17
+ if num_hidden_layers != 0:
18
+ self.hidden_layers = nn.ModuleList(self.hidden_layers)
19
+ else:
20
+ self.out_layer = nn.Linear(input_size, output_size)
21
+ self.hidden_layers = []
22
+
23
+ def forward(self, x):
24
+ for layer in self.hidden_layers:
25
+ x = layer(x)
26
+
27
+ x = self.out_layer(x)
28
+ return x
29
+
30
+
31
+ def get_activation(activation):
32
+ if activation == "relu":
33
+ return nn.ReLU()
34
+ elif activation == "tanh":
35
+ return nn.Tanh()
36
+ elif activation == "sigmoid":
37
+ return nn.Sigmoid()
38
+ else:
39
+ raise RuntimeError("activation should be relu/tanh/sigmoid, not %s." % activation)
40
+
@@ -0,0 +1,33 @@
1
+ import torch
2
+ from moospread.utils.mobo_utils.learning.utils import prepare_dom_data, compute_class_weight, load_batched_dom_data, train_nn
3
+ from moospread.utils.mobo_utils.learning.model import NeuralNet
4
+ import torch.nn as nn
5
+
6
+
7
+
8
+ def init_dom_nn_classifier(x, y, rel_map, dom, n_var, batch_size=32,
9
+ activation='relu', lr=0.001, weight_decay=0.00001):
10
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
+ input_size = 2 * n_var
12
+ hidden_size = 200
13
+ num_hidden_layers = 3
14
+ epochs = 20
15
+ data = prepare_dom_data(x, y, rel_map, dom, data_kind='tensor', device=device)
16
+ # print(f"Data shape: {data.shape}")
17
+ # print("data[:, -1]", data[:, -1])
18
+ weight = compute_class_weight(data[:, -1])
19
+ # print(f"Class weights: {weight}")
20
+ if weight is None:
21
+ return None
22
+
23
+ net = NeuralNet(input_size, hidden_size, num_hidden_layers,
24
+ activation=activation).to(device)
25
+
26
+ weight = torch.tensor(weight, device=device).float()
27
+ criterion = nn.CrossEntropyLoss(weight=weight)
28
+
29
+ optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
30
+ train_nn(data, load_batched_dom_data, net, criterion, optimizer, batch_size, epochs)
31
+ # print("Net", net)
32
+ return net
33
+
@@ -0,0 +1,51 @@
1
+ from moospread.utils.mobo_utils.learning.utils import *
2
+ from moospread.utils.mobo_utils.learning.prediction import nn_predict_dom
3
+ import math
4
+
5
+
6
+
7
+
8
+ def update_dom_nn_classifier(net, x, y, rel_map, dom, problem,
9
+ max_adjust_epochs=50, batch_size=32, lr=0.001,
10
+ acc_thr=0.9, weight_decay=0.00001):
11
+ device = 'cuda'
12
+ max_window_size = 11 * problem.n_var + 24
13
+ n = len(x)
14
+ # start = get_start_pos(n, max_window_size)
15
+
16
+ new_data = prepare_new_dom_data(x,y, rel_map, dom, n - 5, start=0, data_kind='tensor', device=device)
17
+ labels, _ = nn_predict_dom(new_data[:, :-1], net)
18
+
19
+ acc0, acc1, acc2 = get_accuracy(new_data[:, -1], labels)
20
+ min_acc = min(acc0, acc1, acc2)
21
+
22
+ # print("Estimated accuracy for each class: ", acc0, acc1, acc2)
23
+
24
+ if min_acc >= acc_thr:
25
+ return
26
+
27
+ data = prepare_dom_data(x,y, rel_map, dom, start=0, data_kind='tensor', device=device)
28
+
29
+ weight = compute_class_weight(data[:, -1])
30
+ if weight is None:
31
+ return
32
+
33
+ weight = torch.tensor(weight, device=device).float()
34
+ criterion = nn.CrossEntropyLoss(weight=weight)
35
+
36
+ optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
37
+
38
+ adjust_epochs = max_adjust_epochs * ((acc_thr - min_acc) / acc_thr)
39
+
40
+ adjust_epochs = math.ceil(adjust_epochs)
41
+ train_nn(data, load_batched_dom_data, net, criterion, optimizer, batch_size, adjust_epochs)
42
+
43
+
44
+
45
+
46
+
47
+
48
+
49
+
50
+
51
+
@@ -0,0 +1,116 @@
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import numpy as np
4
+ from moospread.utils.mobo_utils.evolution.dom import get_inverted_dom_rel
5
+
6
+ # This file implements dominance prediction in two different scenarios
7
+
8
+
9
+ def nn_predict_dom_intra(pop, net, device): # predict the dominance relation between any two solutions in pop
10
+ size = len(pop)
11
+
12
+ label_matrix = np.zeros([size, size], dtype=np.int8)
13
+ conf_matrix = np.ones([size, size])
14
+
15
+ data = []
16
+ indexes = []
17
+ for i in range(size):
18
+ for j in range(i + 1, size):
19
+ ind1 = pop[i]
20
+ ind2 = pop[j]
21
+ # data.append(ind1.normalized_var + ind2.normalized_var)
22
+ a = ind1.tolist() + ind2.tolist()
23
+ data.append(a)
24
+ indexes.append((i, j))
25
+
26
+ data = torch.tensor(data, device=device).float()
27
+
28
+ labels, confidences = nn_predict_dom(data, net)
29
+
30
+ for k in range(len(indexes)):
31
+ i = indexes[k][0]
32
+ j = indexes[k][1]
33
+
34
+ label_matrix[i, j] = labels[k].item()
35
+ conf_matrix[i, j] = confidences[k].item()
36
+
37
+ label_matrix[j, i] = get_inverted_dom_rel(label_matrix[i, j])
38
+ conf_matrix[j, i] = conf_matrix[i, j]
39
+
40
+ return label_matrix, conf_matrix
41
+
42
+
43
+ def nn_predict_dom_inter(pop1, pop2, net, device):
44
+ # predict the dominance relation between any two solutions from pop1 and po2 respectively
45
+
46
+ size1 = len(pop1)
47
+ size2 = len(pop2)
48
+
49
+ label_matrix = np.zeros([size1, size2], dtype=np.int8)
50
+ conf_matrix = np.ones([size1, size2])
51
+ data = []
52
+ indexes = []
53
+ for i in range(size1):
54
+ for j in range(size2):
55
+ ind1 = pop1[i]
56
+ ind2 = pop2[j]
57
+ a = ind1.tolist() + ind2.tolist()
58
+ data.append(a)
59
+ indexes.append((i, j))
60
+
61
+ data = torch.tensor(data, device=device).float()
62
+ labels, confidences = nn_predict_dom(data, net)
63
+ for k in range(len(indexes)):
64
+ i = indexes[k][0]
65
+ j = indexes[k][1]
66
+
67
+ label_matrix[i, j] = labels[k].item()
68
+
69
+ conf_matrix[i, j] = confidences[k].item()
70
+
71
+ return label_matrix, conf_matrix
72
+
73
+
74
+ def nn_predict_dom(data, net):
75
+ # print("data shape:", data.shape)
76
+ n = data.shape[1] // 2
77
+
78
+ s_data = torch.cat((data[:, n:], data[:, 0:n]), dim=1)
79
+
80
+
81
+ net.eval()
82
+ with torch.no_grad():
83
+ y = nn_predict(data, net)
84
+ sy = nn_predict(s_data, net)
85
+ max_y, max_y_ids = y.max(dim=1)
86
+ max_sy, max_sy_ids = sy.max(dim=1)
87
+ max_sy_ids = torch.where(max_sy_ids == 1, max_sy_ids, 2 - max_sy_ids)
88
+
89
+ labels = torch.where(max_y > max_sy, max_y_ids, max_sy_ids)
90
+ confidences = torch.where(max_y > max_sy, max_y, max_sy)
91
+
92
+ return labels, confidences
93
+
94
+
95
+ def nn_predict(data, net, batch_size=1000, max_size=100000):
96
+ if data.shape[0] < max_size:
97
+ y = net(data)
98
+ return F.softmax(y, dim=1)
99
+ else:
100
+ n = data.shape[0]
101
+ y_list = []
102
+ i = 0
103
+ while i < n:
104
+ j = i + batch_size
105
+ if j > n:
106
+ j = n
107
+
108
+ bl_data = data[i:j, :]
109
+
110
+ y = net(bl_data)
111
+ y = F.softmax(y, dim=1)
112
+ y_list.append(y)
113
+
114
+ i = j
115
+
116
+ return torch.cat(y_list, dim=0)
@@ -0,0 +1,143 @@
1
+ from moospread.utils.mobo_utils.evolution.dom import access_dom_rel
2
+ import torch
3
+ import numpy as np
4
+ import torch.nn as nn
5
+
6
+
7
+ # This file implements some Utility Functions about training
8
+
9
+ def prepare_dom_data(x, y, rel_map, dom, start=0, data_kind='tensor', device='cpu'):
10
+ n = len(x)
11
+ data = []
12
+ for i in range(start, n):
13
+ for j in range(start, n):
14
+ if i == j:
15
+ continue
16
+
17
+ r = access_dom_rel(i, j, x, y, rel_map, dom)
18
+ d = x[i,:].tolist() + x[j,:].tolist() + [r]
19
+ data.append(d)
20
+ return get_packed_data(data, data_kind, device)
21
+
22
+
23
+ def prepare_new_dom_data(x , y , rel_map, dom, spilt_loc, start=0, data_kind='tensor', device='cpu'):
24
+ n = len(x)
25
+
26
+ data = []
27
+
28
+ for i in range(start, spilt_loc):
29
+ for j in range(spilt_loc, n):
30
+ r1 = access_dom_rel(i, j, x,y, rel_map, dom)
31
+ d1 = x[i,:].tolist() + x[j,:].tolist() + [r1]
32
+
33
+ r2 = access_dom_rel(j, i, x,y, rel_map, dom)
34
+ d2 = x[j,:].tolist() + x[i,:].tolist() + [r2]
35
+
36
+ data.append(d1)
37
+ data.append(d2)
38
+
39
+ for i in range(spilt_loc, n):
40
+ for j in range(spilt_loc, n):
41
+ if i == j:
42
+ continue;
43
+
44
+ r = access_dom_rel(i, j, x,y, rel_map, dom)
45
+ d = x[i,:].tolist() + x[j,:].tolist() + [r]
46
+ data.append(d)
47
+ return get_packed_data(data, data_kind, device)
48
+
49
+
50
+ def load_batched_dom_data(data, batch_size):
51
+ batched_data = []
52
+ n = data.shape[0]
53
+ r = torch.randperm(n)
54
+ data = data[r, :]
55
+
56
+ j = 0
57
+ while j < n:
58
+ k = min(j + batch_size, n)
59
+ x = data[j:k, :-1]
60
+ y = data[j:k:, -1].long()
61
+ batched_data.append((x, y))
62
+ j = k
63
+
64
+ return batched_data
65
+
66
+
67
+ def compute_class_weight(class_labels):
68
+ n_examples = len(class_labels)
69
+
70
+ class_labels = class_labels.cpu().numpy()
71
+ n_zero = np.sum(class_labels == 0)
72
+ n_one = np.sum(class_labels == 1)
73
+ n_two = n_examples - n_zero - n_one
74
+
75
+ if n_zero == 0 or n_one == 0 or n_two == 0:
76
+ return None
77
+
78
+ w_zero = n_examples / (3. * n_zero)
79
+ w_one = n_examples / (3. * n_one)
80
+ w_two = n_examples / (3. * n_two)
81
+
82
+ return w_zero, w_one, w_two
83
+
84
+
85
+ def train_nn(data, data_loader, net, criterion, optimizer, batch_size, epochs):
86
+ #10000 12
87
+ net.train()
88
+
89
+ for epoch in range(epochs): # loop over the dataset multiple times
90
+ running_loss = 0.0
91
+ batched_data = data_loader(data, batch_size)
92
+ for i, d in enumerate(batched_data): #1000
93
+ # get the inputs; data is a list of [inputs, labels]
94
+ inputs, labels = d #10 *12,10*1
95
+ # zero the parameter gradients
96
+ optimizer.zero_grad()
97
+
98
+ # forward + backward + optimize
99
+ outputs = net(inputs)
100
+
101
+ loss = criterion(outputs, labels)
102
+ loss.backward()
103
+ optimizer.step()
104
+
105
+ running_loss += loss.item()
106
+ # print(f"Epoch {epoch + 1}: {running_loss / len(batched_data)}")
107
+
108
+
109
+ def get_packed_data(data, data_kind, device):
110
+ if data_kind == 'tensor':
111
+ return torch.tensor(data, device=device).float()
112
+ elif data_kind == 'ndarray':
113
+ return np.array(data)
114
+ else:
115
+ raise ValueError(f"{data_kind} is not a supported kind of data")
116
+
117
+
118
+ def get_start_pos(total_size, max_window_size):
119
+ if max_window_size is not None and total_size > max_window_size:
120
+ return total_size - max_window_size
121
+ else:
122
+ return 0
123
+
124
+
125
+ def reset_parameters(m):
126
+ if type(m) == nn.Linear:
127
+ m.reset_parameters()
128
+
129
+
130
+ def get_accuracy(true_labels, pred_labels):
131
+ acc0 = acc_for_class(true_labels, pred_labels, 0)
132
+ acc1 = acc_for_class(true_labels, pred_labels, 1)
133
+ acc2 = acc_for_class(true_labels, pred_labels, 2)
134
+ return acc0, acc1, acc2
135
+
136
+
137
+ def acc_for_class(true_labels, pred_labels, cls):
138
+ pls = pred_labels[true_labels == cls]
139
+
140
+ if len(pls) == 0:
141
+ return 1
142
+
143
+ return (pls == cls).sum().item() / len(pls)