mg-pso-gui 0.1.40__py3-none-any.whl → 0.2.76__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.76.dist-info}/METADATA +10 -11
- mg_pso_gui-0.2.76.dist-info/RECORD +76 -0
- {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.76.dist-info}/WHEEL +1 -1
- mgpsogui/gui/General/ParameterView.py +110 -0
- mgpsogui/gui/General/__init__.py +0 -0
- mgpsogui/gui/HomePage.py +234 -238
- mgpsogui/gui/OptionManager.py +333 -145
- mgpsogui/gui/OptionManager_backup.py +443 -0
- mgpsogui/gui/PlatformTab/PlatformTab.py +15 -6
- mgpsogui/gui/RunTab/OptimalParameterView.py +47 -0
- mgpsogui/gui/RunTab/RunTab.py +90 -17
- mgpsogui/gui/SetupTab/BoundsEditorWindow.py +1 -1
- mgpsogui/gui/SetupTab/BoundsList.py +97 -34
- mgpsogui/gui/SetupTab/CustomFunctionEditorWindow.py +74 -0
- mgpsogui/gui/SetupTab/CustomFunctionMetrics.py +156 -0
- mgpsogui/gui/SetupTab/FunctionsList.py +60 -6
- mgpsogui/gui/SetupTab/{StaticParameterView.py → ListEditor.py} +27 -16
- mgpsogui/gui/SetupTab/ListParametersView.py +7 -6
- mgpsogui/gui/SetupTab/{CalibrationParametersView.py → OverrideParameterMetrics.py} +35 -9
- mgpsogui/gui/SetupTab/OverrideParameterWindow.py +40 -0
- mgpsogui/gui/SetupTab/SetupTab.py +31 -11
- mgpsogui/gui/SetupTab/StepView.py +93 -22
- mgpsogui/gui/VisualizeTab/MatrixEditor.py +68 -0
- mgpsogui/gui/VisualizeTab/SideBar.py +358 -61
- mgpsogui/gui/VisualizeTab/VisualizeTab.py +69 -8
- mgpsogui/gui/defaults/__init__.py +0 -0
- mgpsogui/gui/defaults/optimization.json +176 -0
- mgpsogui/gui/defaults/sampling.json +111 -0
- mgpsogui/gui/defaults/sensitivity.json +20 -0
- mgpsogui/gui/images/plus.png +0 -0
- mgpsogui/util/GraphGenerator.py +721 -50
- mgpsogui/util/PSORunner.py +615 -86
- mgpsogui/util/debug.py +559 -0
- mgpsogui/util/helpers.py +95 -0
- mgpsogui/util/recosu/__init__.py +2 -1
- mgpsogui/util/recosu/pso/pso.py +55 -11
- mgpsogui/util/recosu/sampling/__init__.py +16 -0
- mgpsogui/util/recosu/sampling/halton/__init__.py +0 -0
- mgpsogui/util/recosu/sampling/halton/halton.py +45 -0
- mgpsogui/util/recosu/sampling/halton/prime.py +82 -0
- mgpsogui/util/recosu/sampling/random/__init__.py +0 -0
- mgpsogui/util/recosu/sampling/random/random_sampler.py +34 -0
- mgpsogui/util/recosu/sampling/sample_trace_writer.py +47 -0
- mgpsogui/util/recosu/sampling/sampler_task.py +75 -0
- mgpsogui/util/recosu/sampling/sampling.py +99 -0
- mgpsogui/util/sampler_test_driver.py +129 -0
- mg_pso_gui-0.1.40.dist-info/RECORD +0 -52
- mgpsogui/gui/images/IGOW 4 Logo.png +0 -0
- {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.76.dist-info}/entry_points.txt +0 -0
- {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.76.dist-info}/top_level.txt +0 -0
mgpsogui/util/recosu/pso/pso.py
CHANGED
@@ -22,7 +22,7 @@ import datetime
|
|
22
22
|
import queue
|
23
23
|
import json
|
24
24
|
import os
|
25
|
-
from multiprocessing import Queue
|
25
|
+
from multiprocessing import Queue as MPQueue
|
26
26
|
|
27
27
|
|
28
28
|
def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_queue, files, url, param, conf: Dict, rnd,
|
@@ -82,7 +82,7 @@ def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_qu
|
|
82
82
|
def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
83
83
|
oh_strategy: Dict = None, n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
84
84
|
ftol_iter: int = 1, full_trace: List = None, rtol_iter: int = 1,
|
85
|
-
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue:
|
85
|
+
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue: MPQueue = None) -> Tuple:
|
86
86
|
"""Performs a stepwise particle swarm optimization PSO using a global best approach.
|
87
87
|
|
88
88
|
Parameters
|
@@ -168,7 +168,15 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
168
168
|
step_trace['min_rounds'] = min_rounds
|
169
169
|
step_trace['max_rounds'] = max_rounds
|
170
170
|
step_trace['iters'] = iters
|
171
|
-
|
171
|
+
|
172
|
+
# BUG If ftol is -inf set it to a string
|
173
|
+
ftol_value = ftol
|
174
|
+
if ftol == -np.inf:
|
175
|
+
ftol_value = '-inf'
|
176
|
+
elif ftol == np.inf:
|
177
|
+
ftol_value = 'inf'
|
178
|
+
|
179
|
+
step_trace['ftol'] = ftol_value
|
172
180
|
step_trace['ftol_iter'] = ftol_iter
|
173
181
|
step_trace['rtol'] = rtol
|
174
182
|
step_trace['rtol_iter'] = rtol_iter
|
@@ -176,17 +184,21 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
176
184
|
step_trace['n_particles'] = n_particles
|
177
185
|
step_trace['n_steps'] = len(steps)
|
178
186
|
step_trace['steps'] = copy.deepcopy(steps)
|
179
|
-
step_trace['args'] = args
|
187
|
+
step_trace['args'] = str(args) #BUG MUST BE REMOVED
|
180
188
|
|
181
189
|
if step_file is not None:
|
182
190
|
with open(step_file, "w") as fo:
|
183
191
|
json.dump(step_trace, fo)
|
184
192
|
|
193
|
+
print("Wrote step trace")
|
194
|
+
|
185
195
|
# best round cost
|
186
196
|
best_round_cost = np.inf
|
187
197
|
|
188
198
|
# request queue for worker
|
189
199
|
req_queue = queue.Queue()
|
200
|
+
|
201
|
+
print("Created queue")
|
190
202
|
|
191
203
|
conf = conf or {}
|
192
204
|
done = False
|
@@ -199,6 +211,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
199
211
|
thread_pool.append(worker)
|
200
212
|
worker.start()
|
201
213
|
|
214
|
+
print("Started worker threads")
|
215
|
+
|
202
216
|
r_below = 0
|
203
217
|
early_exit = False
|
204
218
|
start_time = datetime.datetime.now()
|
@@ -222,6 +236,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
222
236
|
args['req_queue'] = req_queue
|
223
237
|
args['conf'] = conf
|
224
238
|
|
239
|
+
print("Calling global best..")
|
240
|
+
|
225
241
|
# create optimizer in the first round.
|
226
242
|
if optimizer[s] is None:
|
227
243
|
optimizer[s] = GlobalBestPSO(step.get('n_particles', n_particles),
|
@@ -232,21 +248,27 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
232
248
|
ftol=step.get('ftol', ftol),
|
233
249
|
ftol_iter=step.get('ftol_iter', ftol_iter),
|
234
250
|
cost_target=step.get('cost_target', cost_target))
|
235
|
-
|
236
|
-
|
251
|
+
|
252
|
+
print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
237
253
|
|
238
|
-
if result_queue is not None:
|
239
|
-
|
254
|
+
#if result_queue is not None:
|
255
|
+
# result_queue.put('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
256
|
+
|
257
|
+
print("Filled request queue...")
|
240
258
|
|
241
259
|
args['rnd'] = r + 1
|
242
260
|
args['step'] = s + 1
|
243
261
|
|
262
|
+
print("Evaluating cost...")
|
263
|
+
|
244
264
|
# perform optimization
|
245
265
|
cost, pos = optimizer[s].optimize(eval_cost, iters=step.get('iters', iters), **args)
|
246
266
|
if cost is None:
|
247
267
|
early_exit = True
|
248
268
|
break
|
249
269
|
|
270
|
+
print("Finished evaluation...")
|
271
|
+
|
250
272
|
# capture the best cost
|
251
273
|
# if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
252
274
|
if cost < best_cost[s]:
|
@@ -262,7 +284,16 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
262
284
|
key = "r{}s{}".format(r + 1, s + 1)
|
263
285
|
step_trace[key] = {}
|
264
286
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
265
|
-
|
287
|
+
|
288
|
+
best_costs_list = best_cost.tolist()
|
289
|
+
# If the cost is inf, set it to a string
|
290
|
+
for i, c in enumerate(best_costs_list):
|
291
|
+
if c == np.inf:
|
292
|
+
best_costs_list[i] = 'inf'
|
293
|
+
elif c == -np.inf:
|
294
|
+
best_costs_list[i] = '-inf'
|
295
|
+
|
296
|
+
step_trace[key]['best_costs'] = best_costs_list # BUG
|
266
297
|
step_trace[key]['steps'] = copy.deepcopy(steps)
|
267
298
|
|
268
299
|
if step_file is not None:
|
@@ -299,8 +330,17 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
299
330
|
step_trace[key] = {}
|
300
331
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
301
332
|
step_trace[key]['round_cost'] = round_cost
|
302
|
-
|
303
|
-
|
333
|
+
|
334
|
+
best_costs_list = best_cost.tolist() #BUG
|
335
|
+
# If the cost is inf, set it to a string
|
336
|
+
for i, c in enumerate(best_costs_list):
|
337
|
+
if c == np.inf:
|
338
|
+
best_costs_list[i] = 'inf'
|
339
|
+
elif c == -np.inf:
|
340
|
+
best_costs_list[i] = '-inf'
|
341
|
+
|
342
|
+
step_trace[key]['best_costs'] = best_costs_list
|
343
|
+
step_trace[key]['improvements'] = no_improvement.tolist()
|
304
344
|
if step_file is not None:
|
305
345
|
with open(step_file, "w") as fo:
|
306
346
|
json.dump(step_trace, fo)
|
@@ -336,4 +376,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
336
376
|
with open(step_file, "w") as fo:
|
337
377
|
json.dump(step_trace, fo)
|
338
378
|
|
379
|
+
if result_queue is not None:
|
380
|
+
result_queue.put("Step Trace")
|
381
|
+
result_queue.put(step_trace)
|
382
|
+
|
339
383
|
return optimizer, step_trace
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
"""
|
4
|
+
LUCA/PSO toolkit
|
5
|
+
=========================================
|
6
|
+
This is ...
|
7
|
+
|
8
|
+
"""
|
9
|
+
|
10
|
+
__author__ = """Olaf David"""
|
11
|
+
__email__ = "odavid@colostate.edu"
|
12
|
+
__version__ = "1.0"
|
13
|
+
|
14
|
+
from .sampling import run_sampler
|
15
|
+
|
16
|
+
__all__ = ["run_sampler"]
|
File without changes
|
@@ -0,0 +1,45 @@
|
|
1
|
+
from collections.abc import Iterable
|
2
|
+
import math
|
3
|
+
from ...sampling.halton.prime import generate_n_primes
|
4
|
+
|
5
|
+
|
6
|
+
def halton(index: int, base: int) -> float:
|
7
|
+
fraction: float = 1.0
|
8
|
+
result: float = 0
|
9
|
+
|
10
|
+
while index > 0:
|
11
|
+
fraction = fraction / base
|
12
|
+
result += fraction * (index % base)
|
13
|
+
index = math.floor(index / base)
|
14
|
+
|
15
|
+
return result
|
16
|
+
|
17
|
+
|
18
|
+
class HaltonSampleGenerator:
|
19
|
+
index: int
|
20
|
+
maxIndex: int
|
21
|
+
primes: list[int]
|
22
|
+
|
23
|
+
def __init__(self, count: int, offset: int, num_parameters: int):
|
24
|
+
assert (count > 0)
|
25
|
+
assert (offset >= 0)
|
26
|
+
self.index = offset + 1
|
27
|
+
self.maxIndex = offset + count + 1
|
28
|
+
self.primes = generate_n_primes(num_parameters)
|
29
|
+
|
30
|
+
def __iter__(self) -> Iterable[tuple[int, list[float]]]:
|
31
|
+
return self
|
32
|
+
|
33
|
+
def __next__(self) -> tuple[int, list[float]]:
|
34
|
+
if self.index >= self.maxIndex:
|
35
|
+
raise StopIteration
|
36
|
+
|
37
|
+
i = 0
|
38
|
+
values: list[float] = []
|
39
|
+
for base in self.primes:
|
40
|
+
values.append(halton(self.index, base))
|
41
|
+
|
42
|
+
result: tuple[int, list[float]] = (self.index, values)
|
43
|
+
self.index = self.index + 1
|
44
|
+
return result
|
45
|
+
|
@@ -0,0 +1,82 @@
|
|
1
|
+
from typing import List
|
2
|
+
|
3
|
+
PRIME_TABLE: List[int] = [
|
4
|
+
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
|
5
|
+
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
|
6
|
+
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
|
7
|
+
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
|
8
|
+
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
|
9
|
+
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
|
10
|
+
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
|
11
|
+
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
|
12
|
+
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
|
13
|
+
467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
|
14
|
+
547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
|
15
|
+
607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
|
16
|
+
661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
|
17
|
+
739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
|
18
|
+
811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
|
19
|
+
877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
|
20
|
+
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
|
21
|
+
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069,
|
22
|
+
1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151,
|
23
|
+
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
|
24
|
+
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291,
|
25
|
+
1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373,
|
26
|
+
1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
|
27
|
+
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511,
|
28
|
+
1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583,
|
29
|
+
1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657,
|
30
|
+
1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,
|
31
|
+
1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811,
|
32
|
+
1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889,
|
33
|
+
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987,
|
34
|
+
1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053,
|
35
|
+
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129,
|
36
|
+
2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213,
|
37
|
+
2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287,
|
38
|
+
2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
|
39
|
+
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423,
|
40
|
+
2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,
|
41
|
+
2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617,
|
42
|
+
2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687,
|
43
|
+
2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741,
|
44
|
+
2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819,
|
45
|
+
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903,
|
46
|
+
2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999,
|
47
|
+
3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079,
|
48
|
+
3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181,
|
49
|
+
3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257,
|
50
|
+
3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331,
|
51
|
+
3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413,
|
52
|
+
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511,
|
53
|
+
3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571,
|
54
|
+
3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643,
|
55
|
+
3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727,
|
56
|
+
3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821,
|
57
|
+
3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,
|
58
|
+
3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989,
|
59
|
+
4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057,
|
60
|
+
4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139,
|
61
|
+
4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231,
|
62
|
+
4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297,
|
63
|
+
4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409
|
64
|
+
]
|
65
|
+
|
66
|
+
|
67
|
+
def generate_n_primes(n: int) -> List[int]:
|
68
|
+
if n < len(PRIME_TABLE):
|
69
|
+
return PRIME_TABLE[0:n]
|
70
|
+
|
71
|
+
primes: List[int] = PRIME_TABLE.copy()
|
72
|
+
num: int = primes[-1] + 2
|
73
|
+
while len(primes) < n:
|
74
|
+
is_prime = True
|
75
|
+
for p in primes:
|
76
|
+
if num % p == 0:
|
77
|
+
is_prime = False
|
78
|
+
break
|
79
|
+
if is_prime:
|
80
|
+
primes.append(num)
|
81
|
+
num += 2
|
82
|
+
return primes
|
File without changes
|
@@ -0,0 +1,34 @@
|
|
1
|
+
from collections.abc import Iterable
|
2
|
+
import random
|
3
|
+
|
4
|
+
|
5
|
+
class RandomSampleGenerator:
|
6
|
+
index: int
|
7
|
+
count: int
|
8
|
+
num_parameters: int
|
9
|
+
rand: random.Random
|
10
|
+
|
11
|
+
def __init__(self, count: int, num_parameters: int):
|
12
|
+
assert (count > 0)
|
13
|
+
self.index = 1
|
14
|
+
self.count = count + 1
|
15
|
+
self.num_parameters = num_parameters
|
16
|
+
self.rand = random.Random()
|
17
|
+
|
18
|
+
def __iter__(self) -> Iterable[tuple[int, list[float]]]:
|
19
|
+
return self
|
20
|
+
|
21
|
+
def __next__(self) -> tuple[int, list[float]]:
|
22
|
+
if self.index >= self.count:
|
23
|
+
raise StopIteration
|
24
|
+
|
25
|
+
i: int = 0
|
26
|
+
values: list[float] = []
|
27
|
+
while i < self.num_parameters:
|
28
|
+
values.append(self.rand.uniform(0, 1))
|
29
|
+
i = i + 1
|
30
|
+
|
31
|
+
result: tuple[int, list[float]] = (self.index, values)
|
32
|
+
self.index = self.index + 1
|
33
|
+
return result
|
34
|
+
|
@@ -0,0 +1,47 @@
|
|
1
|
+
import threading
|
2
|
+
|
3
|
+
|
4
|
+
class SampleTraceWriter:
|
5
|
+
trace_file: str
|
6
|
+
parameter_indices: dict[int, str]
|
7
|
+
objective_indices: dict[int, str]
|
8
|
+
write_lock: threading.Lock
|
9
|
+
|
10
|
+
def __init__(self, trace_file: str):
|
11
|
+
assert(trace_file is not None and len(trace_file) > 0)
|
12
|
+
self.trace_file = trace_file
|
13
|
+
self.parameter_indices = {}
|
14
|
+
self.objective_indices = {}
|
15
|
+
self.write_lock = threading.Lock()
|
16
|
+
|
17
|
+
def write_header(self, parameter_names: list[str], objective_names: list[str]) -> None:
|
18
|
+
with self.write_lock:
|
19
|
+
with open(self.trace_file, 'w') as writer:
|
20
|
+
writer.write("id")
|
21
|
+
self.parameter_indices = {}
|
22
|
+
index: int = 0
|
23
|
+
for name in parameter_names:
|
24
|
+
writer.write(",{}".format(name))
|
25
|
+
self.parameter_indices[index] = name
|
26
|
+
index = index + 1
|
27
|
+
self.objective_indices = {}
|
28
|
+
index = 0
|
29
|
+
for name in objective_names:
|
30
|
+
writer.write(",{}".format(name))
|
31
|
+
self.objective_indices[index] = name
|
32
|
+
index = index + 1
|
33
|
+
writer.write("\n")
|
34
|
+
|
35
|
+
def append_sample(self, sample_id: int, parameters: dict[str, any], objectives: dict[str, any]) -> None:
|
36
|
+
with self.write_lock:
|
37
|
+
with open(self.trace_file, 'a') as writer:
|
38
|
+
writer.write("{}".format(sample_id))
|
39
|
+
index: int = 0
|
40
|
+
while index < len(self.parameter_indices):
|
41
|
+
writer.write(",{}".format(parameters[self.parameter_indices[index]]))
|
42
|
+
index = index + 1
|
43
|
+
index = 0
|
44
|
+
while index < len(self.objective_indices):
|
45
|
+
writer.write(",{}".format(objectives[self.objective_indices[index]]))
|
46
|
+
index = index + 1
|
47
|
+
writer.write("\n")
|
@@ -0,0 +1,75 @@
|
|
1
|
+
import os
|
2
|
+
from csip import Client
|
3
|
+
|
4
|
+
|
5
|
+
class SamplerTask:
|
6
|
+
task_id: int
|
7
|
+
parameters: dict[str, any]
|
8
|
+
objectives: list[dict[str, any]]
|
9
|
+
static_parameters: dict[str, any]
|
10
|
+
url: str
|
11
|
+
files: list[str]
|
12
|
+
metainfo: dict[str, any]
|
13
|
+
conf: dict[str, any]
|
14
|
+
result: dict[str, any]
|
15
|
+
|
16
|
+
def __init__(self, task_id: int, parameters: dict[str, any], objectives: list[dict[str, any]],
|
17
|
+
static_parameters: dict[str, any], url: str, files: list[str] = None, metainfo: dict[str, any] = None,
|
18
|
+
conf: dict[str, any] = None):
|
19
|
+
self.task_id = task_id
|
20
|
+
assert (parameters is not None and len(parameters) > 0)
|
21
|
+
self.parameters = parameters
|
22
|
+
assert (objectives is not None and len(objectives) > 0)
|
23
|
+
self.objectives = objectives
|
24
|
+
self.static_parameters = static_parameters if static_parameters is not None else []
|
25
|
+
assert (url is not None and len(url) > 0)
|
26
|
+
self.url = url
|
27
|
+
self.files = files if files is not None else []
|
28
|
+
self.metainfo = metainfo
|
29
|
+
self.conf = conf
|
30
|
+
|
31
|
+
def create_request(self) -> Client:
|
32
|
+
request: Client = Client(metainfo=self.metainfo)
|
33
|
+
|
34
|
+
for key, value in self.static_parameters.items():
|
35
|
+
request.add_data(key, value)
|
36
|
+
|
37
|
+
for key, value in self.parameters.items():
|
38
|
+
request.add_data(key, value)
|
39
|
+
|
40
|
+
for of in self.objectives:
|
41
|
+
request.add_cosu(of['name'], of['of'], of['data'])
|
42
|
+
|
43
|
+
return request
|
44
|
+
|
45
|
+
def run_task(self) -> bool:
|
46
|
+
self.result = {}
|
47
|
+
request: Client = self.create_request()
|
48
|
+
async_call: bool = self.conf.get('async_call', True) if self.conf is not None else True
|
49
|
+
# save response, set it to a folder if responses should be saved.
|
50
|
+
save_resp = self.conf.get('save_response_to', None) if self.conf is not None else None
|
51
|
+
successful: bool = False
|
52
|
+
|
53
|
+
response: Client = None
|
54
|
+
try:
|
55
|
+
if async_call:
|
56
|
+
response = request.execute_async(self.url, files=self.files, conf=self.conf)
|
57
|
+
else:
|
58
|
+
response = request.execute(self.url, files=self.files, conf=self.conf)
|
59
|
+
|
60
|
+
successful = response.is_finished()
|
61
|
+
if not successful:
|
62
|
+
print(response)
|
63
|
+
|
64
|
+
if save_resp:
|
65
|
+
response.save_to(os.path.join(save_resp, 'task_{}.json'.format(self.task_id)))
|
66
|
+
|
67
|
+
objectives: list[dict[str, str]] = response.get_metainfo("cosu")
|
68
|
+
for of in objectives:
|
69
|
+
self.result[of["name"]] = of["value"]
|
70
|
+
except Exception as ex:
|
71
|
+
print(ex)
|
72
|
+
print(response)
|
73
|
+
successful = False
|
74
|
+
|
75
|
+
return successful
|
@@ -0,0 +1,99 @@
|
|
1
|
+
from collections.abc import Iterable
|
2
|
+
import math
|
3
|
+
import asyncio
|
4
|
+
import concurrent
|
5
|
+
import datetime
|
6
|
+
from ..utils import utils
|
7
|
+
from ..sampling.halton.halton import HaltonSampleGenerator
|
8
|
+
from ..sampling.random.random_sampler import RandomSampleGenerator
|
9
|
+
from ..sampling.sampler_task import SamplerTask
|
10
|
+
from ..sampling.sample_trace_writer import SampleTraceWriter
|
11
|
+
|
12
|
+
|
13
|
+
def weighted_value(weight: float, lower: float, upper: float) -> float:
|
14
|
+
return lower + weight * (upper - lower)
|
15
|
+
|
16
|
+
|
17
|
+
def get_static_parameters(args: dict[str, any]) -> dict[str, any]:
|
18
|
+
static_parameters: dict[str, any] = {}
|
19
|
+
for param in args["param"]:
|
20
|
+
static_parameters[param["name"]] = param["value"]
|
21
|
+
return static_parameters
|
22
|
+
|
23
|
+
|
24
|
+
def get_objective_names(objfunc: dict[str, any]) -> list[str]:
|
25
|
+
objective_names: list[str] = []
|
26
|
+
for of in objfunc:
|
27
|
+
objective_names.append(of["name"])
|
28
|
+
return objective_names
|
29
|
+
|
30
|
+
|
31
|
+
def thread_function(task: SamplerTask) -> tuple[bool, SamplerTask]:
|
32
|
+
return task.run_task(), task
|
33
|
+
|
34
|
+
|
35
|
+
def create_generator(method: str, count: int, num_parameters: int, **kwargs) -> Iterable[tuple[int, list[float]]]:
|
36
|
+
if method == "halton":
|
37
|
+
offset: int = 0
|
38
|
+
if "offset" in kwargs:
|
39
|
+
offset = kwargs["offset"]
|
40
|
+
return HaltonSampleGenerator(count, offset, num_parameters)
|
41
|
+
elif method == "random":
|
42
|
+
return RandomSampleGenerator(count, num_parameters)
|
43
|
+
|
44
|
+
raise Exception("Sampling method is not recognized")
|
45
|
+
|
46
|
+
|
47
|
+
def run_sampler(steps: list[dict[str, any]], args: dict[str, any], count: int, num_threads: int, method: str = "halton",
|
48
|
+
metainfo: dict[str, any] = None, conf: dict[str, any] = None, trace_file: str = "trace.csv",
|
49
|
+
**kwargs) -> dict[int, tuple[dict[str, any], dict[str, any]]]:
|
50
|
+
param_names, bounds, objfunc = utils.get_step_info(steps, 0)
|
51
|
+
generator: Iterable[tuple[int, list[float]]] = create_generator(method, count, len(param_names), **kwargs)
|
52
|
+
objective_names: list[str] = get_objective_names(objfunc)
|
53
|
+
static_parameters: dict[str, any] = get_static_parameters(args)
|
54
|
+
url: str = args["url"]
|
55
|
+
files: list[str] = args["files"]
|
56
|
+
|
57
|
+
trace: dict[int, tuple[dict[str, float], dict[str, float]]] = {}
|
58
|
+
trace_writer: SampleTraceWriter = SampleTraceWriter(trace_file)
|
59
|
+
trace_writer.write_header(param_names, objective_names)
|
60
|
+
|
61
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
|
62
|
+
futures = []
|
63
|
+
for sample_id, sample in generator:
|
64
|
+
params: dict[str, float] = {}
|
65
|
+
index: int = 0
|
66
|
+
while index < len(sample):
|
67
|
+
params[param_names[index]] = weighted_value(sample[index], bounds[0][index], bounds[1][index])
|
68
|
+
index += 1
|
69
|
+
|
70
|
+
task: SamplerTask = SamplerTask(sample_id, params, objfunc, static_parameters, url, files, metainfo, conf)
|
71
|
+
futures.append(executor.submit(thread_function, task))
|
72
|
+
# for future in concurrent.futures.as_completed(futures):
|
73
|
+
# pass
|
74
|
+
num_finished: int = 0
|
75
|
+
percentage: float
|
76
|
+
last_percentage: float = 0
|
77
|
+
for future in concurrent.futures.as_completed(futures):
|
78
|
+
try:
|
79
|
+
successful, task = future.result()
|
80
|
+
|
81
|
+
if successful:
|
82
|
+
trace[task.task_id] = (task.parameters, task.result)
|
83
|
+
trace_writer.append_sample(task.task_id, task.parameters, task.result)
|
84
|
+
else:
|
85
|
+
print("Failed to successfully execute task: {}", task.task_id, flush=True)
|
86
|
+
except asyncio.CancelledError as ce:
|
87
|
+
pass
|
88
|
+
except asyncio.InvalidStateError as ise:
|
89
|
+
pass
|
90
|
+
except Exception as ex:
|
91
|
+
print(ex, flush=True)
|
92
|
+
|
93
|
+
num_finished = num_finished + 1
|
94
|
+
percentage = math.trunc(num_finished / count * 1000) / 10
|
95
|
+
if percentage > last_percentage:
|
96
|
+
last_percentage = percentage
|
97
|
+
print("{}% Done {}".format(percentage, datetime.datetime.now()), flush=True)
|
98
|
+
|
99
|
+
return trace
|