mg-pso-gui 0.2.125__py3-none-any.whl → 0.2.126__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/METADATA +1 -1
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/RECORD +10 -8
- mgpsogui/gui/HomePage.py +5 -5
- mgpsogui/gui/SetupTab/ListParametersView.py +1 -1
- mgpsogui/util/recosu/pso/pso modified.py +585 -0
- mgpsogui/util/recosu/pso/pso.py +48 -292
- mgpsogui/util/recosu/pso/pso_new.py +627 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/WHEEL +0 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/entry_points.txt +0 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/top_level.txt +0 -0
mgpsogui/util/recosu/pso/pso.py
CHANGED
@@ -9,7 +9,6 @@
|
|
9
9
|
# OMSLab licenses this file to you under the MIT license.
|
10
10
|
# See the LICENSE file in the project root for more information.
|
11
11
|
#
|
12
|
-
import numpy
|
13
12
|
|
14
13
|
from ..utils import utils
|
15
14
|
from .csip_access import csip_worker
|
@@ -25,9 +24,8 @@ import json
|
|
25
24
|
import os
|
26
25
|
from multiprocessing import Queue as MPQueue
|
27
26
|
|
28
|
-
cost2 = {}
|
29
27
|
|
30
|
-
def eval_cost(x, iteration,
|
28
|
+
def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_queue, files, url, param, conf: Dict, rnd,
|
31
29
|
step):
|
32
30
|
particles = len(x[:, 0])
|
33
31
|
|
@@ -43,13 +41,7 @@ def eval_cost(x, iteration, step_param, step_objfunc, calib_params, req_queue, f
|
|
43
41
|
# submit for processing
|
44
42
|
# for i_particle, v in enumerate(x[:, 0]):
|
45
43
|
for particle in range(particles):
|
46
|
-
req_queue.put((rnd, step, iteration, particle, x,
|
47
|
-
# print(' rnd: ', rnd)
|
48
|
-
# print(' step: ', step)
|
49
|
-
# print(' interation: ', iteration)
|
50
|
-
# print(' particle: ', particle)
|
51
|
-
# print(' x[particle,:]: ', x[particle,:])
|
52
|
-
|
44
|
+
req_queue.put((rnd, step, iteration, particle, x, step_param_names, calib_params, step_objfunc, res_queue))
|
53
45
|
# req_queue.put((i_particle, x[i_particle,:], step_param_names, calib_params, step_objfunc, res_queue))
|
54
46
|
|
55
47
|
# wait for the cost value to come back
|
@@ -57,16 +49,6 @@ def eval_cost(x, iteration, step_param, step_objfunc, calib_params, req_queue, f
|
|
57
49
|
for idx in range(particles):
|
58
50
|
(particle, p_cost) = res_queue.get()
|
59
51
|
cost[particle] = p_cost
|
60
|
-
cost1 = []
|
61
|
-
cost1.append(iteration)
|
62
|
-
cost1.append(p_cost)
|
63
|
-
cost1.append(str('r{}s{}i{}p{}.json'.format(rnd, step, iteration, particle)))
|
64
|
-
|
65
|
-
if particle not in cost2:
|
66
|
-
cost2[particle] = []
|
67
|
-
|
68
|
-
cost2[particle].append(cost1)
|
69
|
-
|
70
52
|
res_queue.task_done()
|
71
53
|
|
72
54
|
res_queue.join()
|
@@ -81,6 +63,7 @@ def eval_cost(x, iteration, step_param, step_objfunc, calib_params, req_queue, f
|
|
81
63
|
# leave the loop if fails acceptable
|
82
64
|
if failed_particles <= pfail_count:
|
83
65
|
break
|
66
|
+
|
84
67
|
print("Re-running particles, since ", failed_particles, ' out of ', particles, ' particles failed.')
|
85
68
|
pfail_retry -= 1
|
86
69
|
|
@@ -93,15 +76,9 @@ def eval_cost(x, iteration, step_param, step_objfunc, calib_params, req_queue, f
|
|
93
76
|
mean = np.nanmean(cost)
|
94
77
|
cost[nan_idx[0]] = mean
|
95
78
|
|
96
|
-
for key in cost2:
|
97
|
-
# print(key, ' - ', cost2[key])
|
98
|
-
cost2[key][iteration][1] = cost[key] # BUG LIST INDEX OUT OF RANGE?
|
99
|
-
# print(key, ' - ', cost2[key])
|
100
|
-
|
101
79
|
print(flush=True)
|
102
80
|
return cost
|
103
81
|
|
104
|
-
|
105
82
|
def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
106
83
|
oh_strategy: Dict = None, n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
107
84
|
ftol_iter: int = 1, full_trace: List = None, rtol_iter: int = 1,
|
@@ -183,22 +160,6 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
183
160
|
best_cost = np.ones(len(steps)) * np.inf
|
184
161
|
optimizer = np.empty(len(steps), dtype=object)
|
185
162
|
|
186
|
-
# best_pos = {}
|
187
|
-
#
|
188
|
-
# best_pos[0] = numpy.array([1.18792343e+02, 5.43472746e+01, 7.10091373e+01, 1.80144959e+00,
|
189
|
-
# 2.63979951e+00, 4.61775754e+00, 4.84808030e-01, 3.97179059e+00,
|
190
|
-
# 4.12612823e+00, 2.29275033e-01, 5.86661573e+01, 4.33933491e-01,
|
191
|
-
# 3.80515317e-01, 2.32299702e+01, 1.29697400e+01, 3.94149865e+01,
|
192
|
-
# 2.78110081e+01, 1.71484176e+01, 4.59081223e+01, 3.25059995e+01,
|
193
|
-
# 3.03662465e+01, 4.15040920e-01, 4.21613876e-01, 4.07747156e-01,
|
194
|
-
# 4.32604236e-01, 4.19428929e-01, 4.01926017e-01, 4.36295072e-01,
|
195
|
-
# 4.37658392e-01, 4.14423735e-01, 4.39537540e-01, 2.65952198e-01,
|
196
|
-
# 2.63096106e-01, 2.24934845e-01, 1.66953435e-01, 2.32302802e-01,
|
197
|
-
# 2.55939246e-01, 2.42916828e-01, 2.39205412e-01, 2.79600625e-01,
|
198
|
-
# 9.58733328e-02, 8.08481274e-02, 7.34124368e-02, 1.04667432e-01,
|
199
|
-
# 1.26246347e-01, 1.14700200e-01, 1.22694002e-01, 7.86003659e-02,
|
200
|
-
# 1.34393803e-01])
|
201
|
-
|
202
163
|
# trace of steps info
|
203
164
|
step_trace = {}
|
204
165
|
|
@@ -207,7 +168,7 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
207
168
|
step_trace['min_rounds'] = min_rounds
|
208
169
|
step_trace['max_rounds'] = max_rounds
|
209
170
|
step_trace['iters'] = iters
|
210
|
-
|
171
|
+
|
211
172
|
# BUG If ftol is -inf set it to a string
|
212
173
|
ftol_value = ftol
|
213
174
|
if ftol == -np.inf:
|
@@ -216,7 +177,6 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
216
177
|
ftol_value = 'inf'
|
217
178
|
|
218
179
|
step_trace['ftol'] = ftol_value
|
219
|
-
|
220
180
|
step_trace['ftol_iter'] = ftol_iter
|
221
181
|
step_trace['rtol'] = rtol
|
222
182
|
step_trace['rtol_iter'] = rtol_iter
|
@@ -224,14 +184,13 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
224
184
|
step_trace['n_particles'] = n_particles
|
225
185
|
step_trace['n_steps'] = len(steps)
|
226
186
|
step_trace['steps'] = copy.deepcopy(steps)
|
227
|
-
|
228
|
-
#step_trace['args'] = str(args) BUG MUST BE REMOVED?
|
229
|
-
step_trace['args'] = args
|
187
|
+
step_trace['args'] = str(args) #BUG MUST BE REMOVED
|
230
188
|
|
231
189
|
if step_file is not None:
|
232
190
|
with open(step_file, "w") as fo:
|
233
191
|
json.dump(step_trace, fo)
|
234
192
|
|
193
|
+
print("Wrote step trace")
|
235
194
|
|
236
195
|
# best round cost
|
237
196
|
best_round_cost = np.inf
|
@@ -239,6 +198,7 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
239
198
|
# request queue for worker
|
240
199
|
req_queue = queue.Queue()
|
241
200
|
|
201
|
+
print("Created queue")
|
242
202
|
|
243
203
|
conf = conf or {}
|
244
204
|
done = False
|
@@ -251,13 +211,13 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
251
211
|
thread_pool.append(worker)
|
252
212
|
worker.start()
|
253
213
|
|
214
|
+
print("Started worker threads")
|
254
215
|
|
255
216
|
r_below = 0
|
256
217
|
early_exit = False
|
257
218
|
start_time = datetime.datetime.now()
|
258
219
|
for r in range(max_rounds):
|
259
220
|
no_improvement = np.full(len(steps), True)
|
260
|
-
best_step_request = None
|
261
221
|
for s, step in enumerate(steps):
|
262
222
|
|
263
223
|
# check if forced exit.
|
@@ -268,8 +228,7 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
268
228
|
|
269
229
|
param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
270
230
|
# maybe clone args?
|
271
|
-
|
272
|
-
args['step_param'] = step['param']
|
231
|
+
args['step_param_names'] = param_names
|
273
232
|
args['step_objfunc'] = objfunc
|
274
233
|
# get calibrated parameter from all other steps
|
275
234
|
args['calib_params'] = utils.get_calibrated_params(steps, s)
|
@@ -278,30 +237,25 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
278
237
|
args['conf'] = conf
|
279
238
|
|
280
239
|
print("Calling global best..")
|
281
|
-
# if r < 1:
|
282
|
-
# best_pos[s] = np.full(len(param_names), True)
|
283
|
-
# best_pos[s] = np.empty(len(param_names), dtype=object)
|
284
|
-
# best_pos[s] = None
|
285
240
|
|
286
241
|
# create optimizer in the first round.
|
242
|
+
if optimizer[s] is None:
|
243
|
+
optimizer[s] = GlobalBestPSO(step.get('n_particles', n_particles),
|
244
|
+
len(param_names),
|
245
|
+
oh_strategy=step.get('oh_strategy', oh_strategy),
|
246
|
+
options=step.get('options', options),
|
247
|
+
bounds=bounds,
|
248
|
+
ftol=step.get('ftol', ftol),
|
249
|
+
ftol_iter=step.get('ftol_iter', ftol_iter),
|
250
|
+
cost_target=step.get('cost_target', cost_target))
|
251
|
+
|
252
|
+
print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
253
|
+
|
287
254
|
#if result_queue is not None:
|
288
255
|
# result_queue.put('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
289
256
|
|
290
257
|
print("Filled request queue...")
|
291
258
|
|
292
|
-
if optimizer[s] is None:
|
293
|
-
# if r <= 1:
|
294
|
-
optimizer[s] = GlobalBestPSO(step.get('n_particles', n_particles),
|
295
|
-
len(param_names),
|
296
|
-
oh_strategy=step.get('oh_strategy', oh_strategy),
|
297
|
-
options=step.get('options', options),
|
298
|
-
bounds=bounds,
|
299
|
-
ftol=step.get('ftol', ftol),
|
300
|
-
ftol_iter=step.get('ftol_iter', ftol_iter),
|
301
|
-
cost_target=step.get('cost_target', cost_target),
|
302
|
-
init_pos=None)
|
303
|
-
print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
304
|
-
|
305
259
|
args['rnd'] = r + 1
|
306
260
|
args['step'] = s + 1
|
307
261
|
|
@@ -309,30 +263,18 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
309
263
|
|
310
264
|
# perform optimization
|
311
265
|
cost, pos = optimizer[s].optimize(eval_cost, iters=step.get('iters', iters), **args)
|
312
|
-
|
313
|
-
for key in cost2:
|
314
|
-
# print(key, ' - ', cost2[key])
|
315
|
-
inner_arrays = cost2[key]
|
316
|
-
for arrays_part in inner_arrays:
|
317
|
-
if arrays_part[1] == cost:
|
318
|
-
print(' best-file ', arrays_part[2])
|
319
|
-
|
320
|
-
cost2.clear()
|
321
|
-
print(' cost: ', cost, ' pos: ', pos)
|
322
266
|
if cost is None:
|
323
267
|
early_exit = True
|
324
268
|
break
|
325
269
|
|
326
270
|
print("Finished evaluation...")
|
327
|
-
if cost == best_cost[s]:
|
328
|
-
print(' !! equal cost !!!')
|
329
271
|
|
330
|
-
#
|
331
|
-
#
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
272
|
+
# capture the best cost
|
273
|
+
# if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
274
|
+
if cost < best_cost[s]:
|
275
|
+
best_cost[s] = cost
|
276
|
+
no_improvement[s] = False
|
277
|
+
utils.annotate_step(best_cost[s], pos, steps, s)
|
336
278
|
|
337
279
|
print('\n Step summary, best particle values: {} '.format(pos))
|
338
280
|
|
@@ -342,20 +284,18 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
342
284
|
key = "r{}s{}".format(r + 1, s + 1)
|
343
285
|
step_trace[key] = {}
|
344
286
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
345
|
-
|
346
|
-
|
347
|
-
|
287
|
+
|
288
|
+
best_costs_list = best_cost.tolist()
|
289
|
+
# If the cost is inf, set it to a string
|
290
|
+
for i, c in enumerate(best_costs_list):
|
291
|
+
if c == np.inf:
|
292
|
+
best_costs_list[i] = 'inf'
|
293
|
+
elif c == -np.inf:
|
294
|
+
best_costs_list[i] = '-inf'
|
295
|
+
|
296
|
+
step_trace[key]['best_costs'] = best_costs_list # BUG
|
348
297
|
step_trace[key]['steps'] = copy.deepcopy(steps)
|
349
298
|
|
350
|
-
# capture the best cost
|
351
|
-
# if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
352
|
-
if cost < best_cost[s]:
|
353
|
-
best_cost[s] = cost
|
354
|
-
no_improvement[s] = False
|
355
|
-
utils.annotate_step(best_cost[s], pos, steps, s)
|
356
|
-
best_step_request = key
|
357
|
-
# best_pos[s] = pos
|
358
|
-
|
359
299
|
if step_file is not None:
|
360
300
|
with open(step_file, "w") as fo:
|
361
301
|
json.dump(step_trace, fo)
|
@@ -385,14 +325,22 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
385
325
|
if result_queue is not None:
|
386
326
|
result_queue.put('\n Progress - best_round_cost:{}, rel_round_tol:{}, rtol:{}'
|
387
327
|
.format(best_round_cost, rel_round_tol, rtol))
|
388
|
-
print('\n Progress - best_step_request:{}'.format(best_step_request))
|
389
328
|
|
390
329
|
key = "r{}".format(r + 1)
|
391
330
|
step_trace[key] = {}
|
392
331
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
393
332
|
step_trace[key]['round_cost'] = round_cost
|
394
|
-
|
395
|
-
|
333
|
+
|
334
|
+
best_costs_list = best_cost.tolist() #BUG
|
335
|
+
# If the cost is inf, set it to a string
|
336
|
+
for i, c in enumerate(best_costs_list):
|
337
|
+
if c == np.inf:
|
338
|
+
best_costs_list[i] = 'inf'
|
339
|
+
elif c == -np.inf:
|
340
|
+
best_costs_list[i] = '-inf'
|
341
|
+
|
342
|
+
step_trace[key]['best_costs'] = best_costs_list
|
343
|
+
step_trace[key]['improvements'] = no_improvement.tolist()
|
396
344
|
if step_file is not None:
|
397
345
|
with open(step_file, "w") as fo:
|
398
346
|
json.dump(step_trace, fo)
|
@@ -433,195 +381,3 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
433
381
|
result_queue.put(step_trace)
|
434
382
|
|
435
383
|
return optimizer, step_trace
|
436
|
-
|
437
|
-
# def p_global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
438
|
-
# n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
439
|
-
# full_trace: List = None, rounds_below: int = 1) -> Tuple:
|
440
|
-
# """Performs a parallel stepwise particle swarm optimization PSO using a global best approach.
|
441
|
-
#
|
442
|
-
# Parameters
|
443
|
-
# ----------
|
444
|
-
# steps : Dict
|
445
|
-
# step definitions
|
446
|
-
# rounds : tuple
|
447
|
-
# round definition, (min,max) or max
|
448
|
-
# args : Dict
|
449
|
-
# static service args
|
450
|
-
# n_particles : int
|
451
|
-
# number of particles
|
452
|
-
# iters : int
|
453
|
-
# number of iterations
|
454
|
-
# options : Dict
|
455
|
-
# PSO options (see pyswarms)
|
456
|
-
# n_threads : int
|
457
|
-
# size of thread pool (default: 4)
|
458
|
-
# rtol : float
|
459
|
-
# percentage of change of sum(best_cost) between rounds for
|
460
|
-
# convergence. (Default is 0.001 0.1%)
|
461
|
-
# ftol : float
|
462
|
-
# PSO tolerance (default: -np.inf)
|
463
|
-
# full_trace : List
|
464
|
-
# trace of all runs, list of tuples
|
465
|
-
# first is dictionary of parameter names to parameter values
|
466
|
-
# second is the cost value (default: None)
|
467
|
-
# rounds_below : int
|
468
|
-
# the number of subsequent rounds with sum(best_cost) < rtol
|
469
|
-
# (default: 1)
|
470
|
-
# Returns
|
471
|
-
# -------
|
472
|
-
# Tuple
|
473
|
-
# optimizer: List, step_trace: Dict
|
474
|
-
# """
|
475
|
-
#
|
476
|
-
# utils.check_url(args['url'])
|
477
|
-
#
|
478
|
-
# min_rounds = 1
|
479
|
-
# if type(rounds) == tuple:
|
480
|
-
# min_rounds = rounds[0]
|
481
|
-
# max_rounds = rounds[1]
|
482
|
-
# else:
|
483
|
-
# max_rounds = rounds
|
484
|
-
#
|
485
|
-
# if min_rounds < 1:
|
486
|
-
# raise Exception('min rounds >= 1 expected, was "{}"'.format(min_rounds))
|
487
|
-
#
|
488
|
-
# if max_rounds > 20:
|
489
|
-
# raise Exception('max rounds <= 20 expected, was "{}"'.format(max_rounds))
|
490
|
-
#
|
491
|
-
# if n_threads < 1:
|
492
|
-
# raise Exception('n_threads >= 1, was "{}"'.format(n_threads))
|
493
|
-
#
|
494
|
-
# if rounds_below < 1:
|
495
|
-
# raise Exception('rounds_below >= 1, was "{}"'.format(rounds_below))
|
496
|
-
#
|
497
|
-
# if full_trace is not None and not isinstance(full_trace, list):
|
498
|
-
# raise Exception('full_trace must be of type, was "{}"'.format(type(full_trace)))
|
499
|
-
#
|
500
|
-
# best_cost = np.ones(len(steps))
|
501
|
-
# cost = np.ones(len(steps))
|
502
|
-
# optimizer = np.empty(len(steps), dtype=object)
|
503
|
-
# pos = np.empty(len(steps), dtype=object)
|
504
|
-
# args_s = np.empty(len(steps), dtype=object)
|
505
|
-
#
|
506
|
-
# # trace of steps info
|
507
|
-
# step_trace = {}
|
508
|
-
#
|
509
|
-
# # best round cost
|
510
|
-
# best_round_cost = 1000
|
511
|
-
#
|
512
|
-
# # request queue for worker
|
513
|
-
# req_queue = queue.Queue()
|
514
|
-
#
|
515
|
-
# # Threadpool management
|
516
|
-
# done = False
|
517
|
-
# thread_pool = []
|
518
|
-
# for thread_no in range(n_threads):
|
519
|
-
# worker = Thread(target=csip_worker, args=(req_queue, thread_no, lambda: done,
|
520
|
-
# full_trace, args['url'], args['files'], args['param']))
|
521
|
-
# thread_pool.append(worker)
|
522
|
-
# worker.start()
|
523
|
-
#
|
524
|
-
# start_time = datetime.datetime.now()
|
525
|
-
#
|
526
|
-
# # setup Step PSOs
|
527
|
-
# for s, step in enumerate(steps):
|
528
|
-
# param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
529
|
-
# optimizer[s] = GlobalBestPSO(n_particles,
|
530
|
-
# len(param_names),
|
531
|
-
# options=options,
|
532
|
-
# bounds=bounds,
|
533
|
-
# ftol=ftol)
|
534
|
-
#
|
535
|
-
# def step_thread(s, args):
|
536
|
-
# cost[s], pos[s] = optimizer[s].optimize(eval_cost, iters=iters, **args)
|
537
|
-
#
|
538
|
-
# # Run PSOs in parallel
|
539
|
-
# r_below = 0
|
540
|
-
# for r in range(max_rounds):
|
541
|
-
# no_improvement = np.full(len(steps), True)
|
542
|
-
# # check if forced exit.
|
543
|
-
# if path.exists("stop"):
|
544
|
-
# print('\n>>>>> stop file found, exit now.')
|
545
|
-
# break
|
546
|
-
#
|
547
|
-
# # cross copy calibrated parameter, setup the arguments
|
548
|
-
# for s, step in enumerate(steps):
|
549
|
-
# param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
550
|
-
# # maybe clone args?
|
551
|
-
# args_s[s] = args
|
552
|
-
# args_s[s]['step_param_names'] = param_names
|
553
|
-
# args_s[s]['step_objfunc'] = objfunc
|
554
|
-
# # get calibrated parameter from all other steps
|
555
|
-
# args_s[s]['calib_params'] = utils.get_calibrated_params(steps, s)
|
556
|
-
#
|
557
|
-
# args_s[s]['req_queue'] = req_queue
|
558
|
-
#
|
559
|
-
# # create optimizer in the first round.
|
560
|
-
# print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names,
|
561
|
-
# args_s[s]['calib_params']))
|
562
|
-
#
|
563
|
-
# # perform optimization
|
564
|
-
# s_threads = []
|
565
|
-
# for s, step in enumerate(steps):
|
566
|
-
# s_thread = threading.Thread(target=step_thread, args=(s, args_s[s]))
|
567
|
-
# s_threads.append(s_thread)
|
568
|
-
# s_thread.start()
|
569
|
-
#
|
570
|
-
# for t in s_threads:
|
571
|
-
# t.join()
|
572
|
-
#
|
573
|
-
# # eval cost
|
574
|
-
# for s, step in enumerate(steps):
|
575
|
-
# # capture the best cost
|
576
|
-
# # if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
577
|
-
# if cost[s] < best_cost[s]:
|
578
|
-
# best_cost[s] = cost[s]
|
579
|
-
# no_improvement[s] = False
|
580
|
-
# utils.annotate_step(best_cost[s], pos[s], steps, s)
|
581
|
-
#
|
582
|
-
# print('\n Step {} summary, best particle values: {} '.format(s, pos[s]))
|
583
|
-
#
|
584
|
-
# key = "r{}s{}".format(r + 1, s + 1)
|
585
|
-
# step_trace[key] = copy.deepcopy(steps)
|
586
|
-
#
|
587
|
-
# # print(json.dumps(steps, sort_keys=False, indent=2))
|
588
|
-
#
|
589
|
-
# round_cost = np.sum(best_cost)
|
590
|
-
#
|
591
|
-
# # if no improvement in all steps, break out of rounds prematurely
|
592
|
-
# # but start checking only after min_rounds
|
593
|
-
# # if (r + 1 >= min_rounds) and all(no_improvement):
|
594
|
-
# rel_round_tol = 1 - round_cost / best_round_cost
|
595
|
-
#
|
596
|
-
# print('\n Round summary - round_cost:{}, step_costs: {}, step improvement:{}'
|
597
|
-
# .format(round_cost, best_cost, np.invert(no_improvement)))
|
598
|
-
# print('\n Progress - best_round_cost:{}, rel_round_tol:{}, rtol:{}'
|
599
|
-
# .format(best_round_cost, rel_round_tol, rtol))
|
600
|
-
#
|
601
|
-
# if (r + 1 >= min_rounds) and 0 <= rel_round_tol < rtol:
|
602
|
-
# r_below += 1
|
603
|
-
# if r_below == rounds_below:
|
604
|
-
# break
|
605
|
-
# else:
|
606
|
-
# # reset
|
607
|
-
# r_below = 0
|
608
|
-
#
|
609
|
-
# if round_cost < best_round_cost:
|
610
|
-
# best_round_cost = round_cost
|
611
|
-
#
|
612
|
-
# end_time = datetime.datetime.now()
|
613
|
-
# elapsed = str(end_time - start_time)
|
614
|
-
#
|
615
|
-
# print('Done in {} after {} out of {} rounds'.format(elapsed, r + 1, max_rounds))
|
616
|
-
#
|
617
|
-
# done = True
|
618
|
-
# for worker in thread_pool:
|
619
|
-
# worker.join()
|
620
|
-
#
|
621
|
-
# step_trace['rounds'] = r + 1
|
622
|
-
# step_trace['steps'] = len(steps)
|
623
|
-
# step_trace['iters'] = iters
|
624
|
-
# step_trace['particles'] = n_particles
|
625
|
-
# step_trace['time'] = elapsed
|
626
|
-
#
|
627
|
-
# return optimizer, step_trace
|