mg-pso-gui 0.2.125__py3-none-any.whl → 0.2.126__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/METADATA +1 -1
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/RECORD +10 -8
- mgpsogui/gui/HomePage.py +5 -5
- mgpsogui/gui/SetupTab/ListParametersView.py +1 -1
- mgpsogui/util/recosu/pso/pso modified.py +585 -0
- mgpsogui/util/recosu/pso/pso.py +48 -292
- mgpsogui/util/recosu/pso/pso_new.py +627 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/WHEEL +0 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/entry_points.txt +0 -0
- {mg_pso_gui-0.2.125.dist-info → mg_pso_gui-0.2.126.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,627 @@
|
|
1
|
+
#
|
2
|
+
# $Id:$
|
3
|
+
#
|
4
|
+
# This file is part of the Cloud Services Integration Platform (CSIP),
|
5
|
+
# a Model-as-a-Service framework, API, and application suite.
|
6
|
+
#
|
7
|
+
# 2012-2020, OMSLab, Colorado State University.
|
8
|
+
#
|
9
|
+
# OMSLab licenses this file to you under the MIT license.
|
10
|
+
# See the LICENSE file in the project root for more information.
|
11
|
+
#
|
12
|
+
import numpy
|
13
|
+
|
14
|
+
from ..utils import utils
|
15
|
+
from .csip_access import csip_worker
|
16
|
+
from pyswarms.single.global_best import GlobalBestPSO
|
17
|
+
from os import path
|
18
|
+
from threading import Thread
|
19
|
+
from typing import Dict, List, Set, Tuple
|
20
|
+
import numpy as np
|
21
|
+
import copy
|
22
|
+
import datetime
|
23
|
+
import queue
|
24
|
+
import json
|
25
|
+
import os
|
26
|
+
from multiprocessing import Queue as MPQueue
|
27
|
+
|
28
|
+
cost2 = {}
|
29
|
+
|
30
|
+
def eval_cost(x, iteration, step_param, step_objfunc, calib_params, req_queue, files, url, param, conf: Dict, rnd,
|
31
|
+
step):
|
32
|
+
particles = len(x[:, 0])
|
33
|
+
|
34
|
+
pfail_count = conf.get('particles_fail', 1) # Number of particles allowed to fail.
|
35
|
+
pfail_retry = conf.get('particles_retry', 3) # retry number of times if more than allowed fail
|
36
|
+
|
37
|
+
while pfail_retry > 0:
|
38
|
+
cost = np.ones(particles)
|
39
|
+
res_queue = queue.Queue()
|
40
|
+
|
41
|
+
print(' ', end='', flush=True)
|
42
|
+
|
43
|
+
# submit for processing
|
44
|
+
# for i_particle, v in enumerate(x[:, 0]):
|
45
|
+
for particle in range(particles):
|
46
|
+
req_queue.put((rnd, step, iteration, particle, x, step_param, calib_params, step_objfunc, res_queue))
|
47
|
+
# print(' rnd: ', rnd)
|
48
|
+
# print(' step: ', step)
|
49
|
+
# print(' interation: ', iteration)
|
50
|
+
# print(' particle: ', particle)
|
51
|
+
# print(' x[particle,:]: ', x[particle,:])
|
52
|
+
|
53
|
+
# req_queue.put((i_particle, x[i_particle,:], step_param_names, calib_params, step_objfunc, res_queue))
|
54
|
+
|
55
|
+
# wait for the cost value to come back
|
56
|
+
# for i, v in enumerate(x[:, 0]):
|
57
|
+
for idx in range(particles):
|
58
|
+
(particle, p_cost) = res_queue.get()
|
59
|
+
cost[particle] = p_cost
|
60
|
+
cost1 = []
|
61
|
+
cost1.append(iteration)
|
62
|
+
cost1.append(p_cost)
|
63
|
+
cost1.append(str('r{}s{}i{}p{}.json'.format(rnd, step, iteration, particle)))
|
64
|
+
|
65
|
+
if particle not in cost2:
|
66
|
+
cost2[particle] = []
|
67
|
+
|
68
|
+
cost2[particle].append(cost1)
|
69
|
+
|
70
|
+
res_queue.task_done()
|
71
|
+
|
72
|
+
res_queue.join()
|
73
|
+
|
74
|
+
# replace the 'nan' cost values (failed/missing runs) with the mean of the
|
75
|
+
# rest of the cost values, hence ignore it
|
76
|
+
|
77
|
+
# print("cost ", cost)
|
78
|
+
nan_idx = np.where(np.isnan(cost))
|
79
|
+
failed_particles = len(nan_idx[0])
|
80
|
+
|
81
|
+
# leave the loop if fails acceptable
|
82
|
+
if failed_particles <= pfail_count:
|
83
|
+
break
|
84
|
+
print("Re-running particles, since ", failed_particles, ' out of ', particles, ' particles failed.')
|
85
|
+
pfail_retry -= 1
|
86
|
+
|
87
|
+
if pfail_retry == 0:
|
88
|
+
print('Particle evaluation failed ', conf.get('particles_retry', 3), ' times. PSO stopped.')
|
89
|
+
return None
|
90
|
+
|
91
|
+
# print("mean ", mean)
|
92
|
+
# assign the mean value to all failed runs.
|
93
|
+
mean = np.nanmean(cost)
|
94
|
+
cost[nan_idx[0]] = mean
|
95
|
+
|
96
|
+
for key in cost2:
|
97
|
+
# print(key, ' - ', cost2[key])
|
98
|
+
cost2[key][iteration][1] = cost[key] # BUG LIST INDEX OUT OF RANGE?
|
99
|
+
# print(key, ' - ', cost2[key])
|
100
|
+
|
101
|
+
print(flush=True)
|
102
|
+
return cost
|
103
|
+
|
104
|
+
|
105
|
+
def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
106
|
+
oh_strategy: Dict = None, n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
107
|
+
ftol_iter: int = 1, full_trace: List = None, rtol_iter: int = 1,
|
108
|
+
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue: MPQueue = None) -> Tuple:
|
109
|
+
"""Performs a stepwise particle swarm optimization PSO using a global best approach.
|
110
|
+
|
111
|
+
Parameters
|
112
|
+
----------
|
113
|
+
steps : Dict
|
114
|
+
step definitions
|
115
|
+
rounds : tuple
|
116
|
+
round definition, (min,max) or max
|
117
|
+
args : Dict
|
118
|
+
static service args
|
119
|
+
n_particles : int
|
120
|
+
number of particles
|
121
|
+
iters : int
|
122
|
+
number of iterations
|
123
|
+
options : Dict
|
124
|
+
PSO options (see pyswarms)
|
125
|
+
oh_strategy : Dict
|
126
|
+
PSO Option handling strategy (see pyswarms)
|
127
|
+
n_threads : int
|
128
|
+
size of thread pool (default: 4)
|
129
|
+
rtol : float
|
130
|
+
percentage of change of sum(best_cost) between rounds for
|
131
|
+
convergence. (Default is 0.001 0.1%)
|
132
|
+
ftol : float
|
133
|
+
PSO tolerance (default: -np.inf)
|
134
|
+
ftol_iter : float
|
135
|
+
number of iterations over which the relative error in
|
136
|
+
objective_func is acceptable for convergence. (default: 1)
|
137
|
+
full_trace : List
|
138
|
+
trace of all runs, list of tuples
|
139
|
+
first is dictionary of parameter names to parameter values
|
140
|
+
second is the cost value (default: None)
|
141
|
+
rtol_iter : int
|
142
|
+
the number of subsequent rounds with sum(best_cost) < rtol
|
143
|
+
(default: 1)
|
144
|
+
conf : Dict
|
145
|
+
configuration settings (default: {} )
|
146
|
+
metainfo : Dict
|
147
|
+
additional metainfo for the csip client (default: {} )
|
148
|
+
cost_target: float
|
149
|
+
the cost target (default: -np.inf)
|
150
|
+
Returns
|
151
|
+
-------
|
152
|
+
Tuple
|
153
|
+
optimizer: List, step_trace: Dict
|
154
|
+
|
155
|
+
"""
|
156
|
+
|
157
|
+
utils.check_url(args['url'])
|
158
|
+
|
159
|
+
step_file = conf.get('step_trace', None)
|
160
|
+
|
161
|
+
min_rounds = 1
|
162
|
+
if type(rounds) == tuple:
|
163
|
+
min_rounds = rounds[0]
|
164
|
+
max_rounds = rounds[1]
|
165
|
+
else:
|
166
|
+
max_rounds = rounds
|
167
|
+
|
168
|
+
if min_rounds < 1:
|
169
|
+
raise Exception('min rounds >= 1 expected, was "{}"'.format(min_rounds))
|
170
|
+
|
171
|
+
if max_rounds > 20:
|
172
|
+
raise Exception('max rounds <= 20 expected, was "{}"'.format(max_rounds))
|
173
|
+
|
174
|
+
if n_threads < 1:
|
175
|
+
raise Exception('n_threads >= 1, was "{}"'.format(n_threads))
|
176
|
+
|
177
|
+
if rtol_iter < 1:
|
178
|
+
raise Exception('rtol_iter >= 1, was "{}"'.format(rtol_iter))
|
179
|
+
|
180
|
+
if full_trace is not None and not isinstance(full_trace, list):
|
181
|
+
raise Exception('full_trace must be of type, was "{}"'.format(type(full_trace)))
|
182
|
+
|
183
|
+
best_cost = np.ones(len(steps)) * np.inf
|
184
|
+
optimizer = np.empty(len(steps), dtype=object)
|
185
|
+
|
186
|
+
# best_pos = {}
|
187
|
+
#
|
188
|
+
# best_pos[0] = numpy.array([1.18792343e+02, 5.43472746e+01, 7.10091373e+01, 1.80144959e+00,
|
189
|
+
# 2.63979951e+00, 4.61775754e+00, 4.84808030e-01, 3.97179059e+00,
|
190
|
+
# 4.12612823e+00, 2.29275033e-01, 5.86661573e+01, 4.33933491e-01,
|
191
|
+
# 3.80515317e-01, 2.32299702e+01, 1.29697400e+01, 3.94149865e+01,
|
192
|
+
# 2.78110081e+01, 1.71484176e+01, 4.59081223e+01, 3.25059995e+01,
|
193
|
+
# 3.03662465e+01, 4.15040920e-01, 4.21613876e-01, 4.07747156e-01,
|
194
|
+
# 4.32604236e-01, 4.19428929e-01, 4.01926017e-01, 4.36295072e-01,
|
195
|
+
# 4.37658392e-01, 4.14423735e-01, 4.39537540e-01, 2.65952198e-01,
|
196
|
+
# 2.63096106e-01, 2.24934845e-01, 1.66953435e-01, 2.32302802e-01,
|
197
|
+
# 2.55939246e-01, 2.42916828e-01, 2.39205412e-01, 2.79600625e-01,
|
198
|
+
# 9.58733328e-02, 8.08481274e-02, 7.34124368e-02, 1.04667432e-01,
|
199
|
+
# 1.26246347e-01, 1.14700200e-01, 1.22694002e-01, 7.86003659e-02,
|
200
|
+
# 1.34393803e-01])
|
201
|
+
|
202
|
+
# trace of steps info
|
203
|
+
step_trace = {}
|
204
|
+
|
205
|
+
step_trace['dir'] = os.getcwd()
|
206
|
+
step_trace['start'] = str(datetime.datetime.now())
|
207
|
+
step_trace['min_rounds'] = min_rounds
|
208
|
+
step_trace['max_rounds'] = max_rounds
|
209
|
+
step_trace['iters'] = iters
|
210
|
+
|
211
|
+
# BUG If ftol is -inf set it to a string
|
212
|
+
ftol_value = ftol
|
213
|
+
if ftol == -np.inf:
|
214
|
+
ftol_value = '-inf'
|
215
|
+
elif ftol == np.inf:
|
216
|
+
ftol_value = 'inf'
|
217
|
+
|
218
|
+
step_trace['ftol'] = ftol_value
|
219
|
+
|
220
|
+
step_trace['ftol_iter'] = ftol_iter
|
221
|
+
step_trace['rtol'] = rtol
|
222
|
+
step_trace['rtol_iter'] = rtol_iter
|
223
|
+
step_trace['n_threads'] = n_threads
|
224
|
+
step_trace['n_particles'] = n_particles
|
225
|
+
step_trace['n_steps'] = len(steps)
|
226
|
+
step_trace['steps'] = copy.deepcopy(steps)
|
227
|
+
|
228
|
+
#step_trace['args'] = str(args) BUG MUST BE REMOVED?
|
229
|
+
step_trace['args'] = args
|
230
|
+
|
231
|
+
if step_file is not None:
|
232
|
+
with open(step_file, "w") as fo:
|
233
|
+
json.dump(step_trace, fo)
|
234
|
+
|
235
|
+
|
236
|
+
# best round cost
|
237
|
+
best_round_cost = np.inf
|
238
|
+
|
239
|
+
# request queue for worker
|
240
|
+
req_queue = queue.Queue()
|
241
|
+
|
242
|
+
|
243
|
+
conf = conf or {}
|
244
|
+
done = False
|
245
|
+
thread_pool = []
|
246
|
+
for thread_no in range(n_threads):
|
247
|
+
worker = Thread(target=csip_worker, args=(req_queue, thread_no, lambda: done,
|
248
|
+
full_trace, args['url'], args.get('files', None), args['param'],
|
249
|
+
conf, metainfo)
|
250
|
+
)
|
251
|
+
thread_pool.append(worker)
|
252
|
+
worker.start()
|
253
|
+
|
254
|
+
|
255
|
+
r_below = 0
|
256
|
+
early_exit = False
|
257
|
+
start_time = datetime.datetime.now()
|
258
|
+
for r in range(max_rounds):
|
259
|
+
no_improvement = np.full(len(steps), True)
|
260
|
+
best_step_request = None
|
261
|
+
for s, step in enumerate(steps):
|
262
|
+
|
263
|
+
# check if forced exit.
|
264
|
+
if path.exists("stop"):
|
265
|
+
print('\n>>>>> stop file found, exit now.')
|
266
|
+
early_exit = True
|
267
|
+
break
|
268
|
+
|
269
|
+
param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
270
|
+
# maybe clone args?
|
271
|
+
# args['step_param_names'] = param_names
|
272
|
+
args['step_param'] = step['param']
|
273
|
+
args['step_objfunc'] = objfunc
|
274
|
+
# get calibrated parameter from all other steps
|
275
|
+
args['calib_params'] = utils.get_calibrated_params(steps, s)
|
276
|
+
|
277
|
+
args['req_queue'] = req_queue
|
278
|
+
args['conf'] = conf
|
279
|
+
|
280
|
+
print("Calling global best..")
|
281
|
+
# if r < 1:
|
282
|
+
# best_pos[s] = np.full(len(param_names), True)
|
283
|
+
# best_pos[s] = np.empty(len(param_names), dtype=object)
|
284
|
+
# best_pos[s] = None
|
285
|
+
|
286
|
+
# create optimizer in the first round.
|
287
|
+
#if result_queue is not None:
|
288
|
+
# result_queue.put('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
289
|
+
|
290
|
+
print("Filled request queue...")
|
291
|
+
|
292
|
+
if optimizer[s] is None:
|
293
|
+
# if r <= 1:
|
294
|
+
optimizer[s] = GlobalBestPSO(step.get('n_particles', n_particles),
|
295
|
+
len(param_names),
|
296
|
+
oh_strategy=step.get('oh_strategy', oh_strategy),
|
297
|
+
options=step.get('options', options),
|
298
|
+
bounds=bounds,
|
299
|
+
ftol=step.get('ftol', ftol),
|
300
|
+
ftol_iter=step.get('ftol_iter', ftol_iter),
|
301
|
+
cost_target=step.get('cost_target', cost_target),
|
302
|
+
init_pos=None)
|
303
|
+
print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
304
|
+
|
305
|
+
args['rnd'] = r + 1
|
306
|
+
args['step'] = s + 1
|
307
|
+
|
308
|
+
print("Evaluating cost...")
|
309
|
+
|
310
|
+
# perform optimization
|
311
|
+
cost, pos = optimizer[s].optimize(eval_cost, iters=step.get('iters', iters), **args)
|
312
|
+
|
313
|
+
for key in cost2:
|
314
|
+
# print(key, ' - ', cost2[key])
|
315
|
+
inner_arrays = cost2[key]
|
316
|
+
for arrays_part in inner_arrays:
|
317
|
+
if arrays_part[1] == cost:
|
318
|
+
print(' best-file ', arrays_part[2])
|
319
|
+
|
320
|
+
cost2.clear()
|
321
|
+
print(' cost: ', cost, ' pos: ', pos)
|
322
|
+
if cost is None:
|
323
|
+
early_exit = True
|
324
|
+
break
|
325
|
+
|
326
|
+
print("Finished evaluation...")
|
327
|
+
if cost == best_cost[s]:
|
328
|
+
print(' !! equal cost !!!')
|
329
|
+
|
330
|
+
# # capture the best cost
|
331
|
+
# # if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
332
|
+
# if cost < best_cost[s]:
|
333
|
+
# best_cost[s] = cost
|
334
|
+
# no_improvement[s] = False
|
335
|
+
# utils.annotate_step(best_cost[s], pos, steps, s)
|
336
|
+
|
337
|
+
print('\n Step summary, best particle values: {} '.format(pos))
|
338
|
+
|
339
|
+
if result_queue is not None:
|
340
|
+
result_queue.put('\n Step summary, best particle values: {} '.format(pos))
|
341
|
+
|
342
|
+
key = "r{}s{}".format(r + 1, s + 1)
|
343
|
+
step_trace[key] = {}
|
344
|
+
step_trace[key]['time'] = str(datetime.datetime.now())
|
345
|
+
|
346
|
+
#step_trace[key]['best_costs'] = best_costs_list BUG
|
347
|
+
step_trace[key]['best_costs'] = best_cost
|
348
|
+
step_trace[key]['steps'] = copy.deepcopy(steps)
|
349
|
+
|
350
|
+
# capture the best cost
|
351
|
+
# if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
352
|
+
if cost < best_cost[s]:
|
353
|
+
best_cost[s] = cost
|
354
|
+
no_improvement[s] = False
|
355
|
+
utils.annotate_step(best_cost[s], pos, steps, s)
|
356
|
+
best_step_request = key
|
357
|
+
# best_pos[s] = pos
|
358
|
+
|
359
|
+
if step_file is not None:
|
360
|
+
with open(step_file, "w") as fo:
|
361
|
+
json.dump(step_trace, fo)
|
362
|
+
|
363
|
+
# print(json.dumps(steps, sort_keys=False, indent=2))
|
364
|
+
|
365
|
+
if early_exit:
|
366
|
+
step_trace['exit'] = '1'
|
367
|
+
break
|
368
|
+
|
369
|
+
round_cost = np.sum(best_cost)
|
370
|
+
|
371
|
+
# if no improvement in all steps, break out of rounds prematurely
|
372
|
+
# but start checking only after min_rounds
|
373
|
+
# if (r + 1 >= min_rounds) and all(no_improvement):
|
374
|
+
rel_round_tol = 1 - round_cost / best_round_cost
|
375
|
+
|
376
|
+
print('\n Round summary - round_cost:{}, step_costs: {}, step improvement:{}'
|
377
|
+
.format(round_cost, best_cost, np.invert(no_improvement)))
|
378
|
+
print('\n Progress - best_round_cost:{}, rel_round_tol:{}, rtol:{}'
|
379
|
+
.format(best_round_cost, rel_round_tol, rtol))
|
380
|
+
|
381
|
+
if result_queue is not None:
|
382
|
+
result_queue.put('\n Round summary - round_cost:{}, step_costs: {}, step improvement:{}'
|
383
|
+
.format(round_cost, best_cost, np.invert(no_improvement)))
|
384
|
+
|
385
|
+
if result_queue is not None:
|
386
|
+
result_queue.put('\n Progress - best_round_cost:{}, rel_round_tol:{}, rtol:{}'
|
387
|
+
.format(best_round_cost, rel_round_tol, rtol))
|
388
|
+
print('\n Progress - best_step_request:{}'.format(best_step_request))
|
389
|
+
|
390
|
+
key = "r{}".format(r + 1)
|
391
|
+
step_trace[key] = {}
|
392
|
+
step_trace[key]['time'] = str(datetime.datetime.now())
|
393
|
+
step_trace[key]['round_cost'] = round_cost
|
394
|
+
step_trace[key]['best_costs'] = best_cost
|
395
|
+
step_trace[key]['improvements'] = no_improvement
|
396
|
+
if step_file is not None:
|
397
|
+
with open(step_file, "w") as fo:
|
398
|
+
json.dump(step_trace, fo)
|
399
|
+
|
400
|
+
if (r + 1 >= min_rounds) and 0 <= rel_round_tol < rtol:
|
401
|
+
r_below += 1
|
402
|
+
if r_below >= rtol_iter:
|
403
|
+
break
|
404
|
+
else:
|
405
|
+
# reset
|
406
|
+
r_below = 0
|
407
|
+
|
408
|
+
if round_cost < best_round_cost:
|
409
|
+
best_round_cost = round_cost
|
410
|
+
|
411
|
+
end_time = datetime.datetime.now()
|
412
|
+
elapsed = str(end_time - start_time)
|
413
|
+
|
414
|
+
print('Done in {} after {} out of {} rounds'.format(elapsed, r + 1, max_rounds))
|
415
|
+
|
416
|
+
if result_queue is not None:
|
417
|
+
result_queue.put('Done in {} after {} out of {} rounds'.format(elapsed, r + 1, max_rounds))
|
418
|
+
|
419
|
+
done = True
|
420
|
+
for worker in thread_pool:
|
421
|
+
worker.join()
|
422
|
+
|
423
|
+
step_trace['rounds'] = r + 1
|
424
|
+
step_trace['end'] = str(datetime.datetime.now())
|
425
|
+
step_trace['time'] = elapsed
|
426
|
+
|
427
|
+
if step_file is not None:
|
428
|
+
with open(step_file, "w") as fo:
|
429
|
+
json.dump(step_trace, fo)
|
430
|
+
|
431
|
+
if result_queue is not None:
|
432
|
+
result_queue.put("Step Trace")
|
433
|
+
result_queue.put(step_trace)
|
434
|
+
|
435
|
+
return optimizer, step_trace
|
436
|
+
|
437
|
+
# def p_global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
438
|
+
# n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
439
|
+
# full_trace: List = None, rounds_below: int = 1) -> Tuple:
|
440
|
+
# """Performs a parallel stepwise particle swarm optimization PSO using a global best approach.
|
441
|
+
#
|
442
|
+
# Parameters
|
443
|
+
# ----------
|
444
|
+
# steps : Dict
|
445
|
+
# step definitions
|
446
|
+
# rounds : tuple
|
447
|
+
# round definition, (min,max) or max
|
448
|
+
# args : Dict
|
449
|
+
# static service args
|
450
|
+
# n_particles : int
|
451
|
+
# number of particles
|
452
|
+
# iters : int
|
453
|
+
# number of iterations
|
454
|
+
# options : Dict
|
455
|
+
# PSO options (see pyswarms)
|
456
|
+
# n_threads : int
|
457
|
+
# size of thread pool (default: 4)
|
458
|
+
# rtol : float
|
459
|
+
# percentage of change of sum(best_cost) between rounds for
|
460
|
+
# convergence. (Default is 0.001 0.1%)
|
461
|
+
# ftol : float
|
462
|
+
# PSO tolerance (default: -np.inf)
|
463
|
+
# full_trace : List
|
464
|
+
# trace of all runs, list of tuples
|
465
|
+
# first is dictionary of parameter names to parameter values
|
466
|
+
# second is the cost value (default: None)
|
467
|
+
# rounds_below : int
|
468
|
+
# the number of subsequent rounds with sum(best_cost) < rtol
|
469
|
+
# (default: 1)
|
470
|
+
# Returns
|
471
|
+
# -------
|
472
|
+
# Tuple
|
473
|
+
# optimizer: List, step_trace: Dict
|
474
|
+
# """
|
475
|
+
#
|
476
|
+
# utils.check_url(args['url'])
|
477
|
+
#
|
478
|
+
# min_rounds = 1
|
479
|
+
# if type(rounds) == tuple:
|
480
|
+
# min_rounds = rounds[0]
|
481
|
+
# max_rounds = rounds[1]
|
482
|
+
# else:
|
483
|
+
# max_rounds = rounds
|
484
|
+
#
|
485
|
+
# if min_rounds < 1:
|
486
|
+
# raise Exception('min rounds >= 1 expected, was "{}"'.format(min_rounds))
|
487
|
+
#
|
488
|
+
# if max_rounds > 20:
|
489
|
+
# raise Exception('max rounds <= 20 expected, was "{}"'.format(max_rounds))
|
490
|
+
#
|
491
|
+
# if n_threads < 1:
|
492
|
+
# raise Exception('n_threads >= 1, was "{}"'.format(n_threads))
|
493
|
+
#
|
494
|
+
# if rounds_below < 1:
|
495
|
+
# raise Exception('rounds_below >= 1, was "{}"'.format(rounds_below))
|
496
|
+
#
|
497
|
+
# if full_trace is not None and not isinstance(full_trace, list):
|
498
|
+
# raise Exception('full_trace must be of type, was "{}"'.format(type(full_trace)))
|
499
|
+
#
|
500
|
+
# best_cost = np.ones(len(steps))
|
501
|
+
# cost = np.ones(len(steps))
|
502
|
+
# optimizer = np.empty(len(steps), dtype=object)
|
503
|
+
# pos = np.empty(len(steps), dtype=object)
|
504
|
+
# args_s = np.empty(len(steps), dtype=object)
|
505
|
+
#
|
506
|
+
# # trace of steps info
|
507
|
+
# step_trace = {}
|
508
|
+
#
|
509
|
+
# # best round cost
|
510
|
+
# best_round_cost = 1000
|
511
|
+
#
|
512
|
+
# # request queue for worker
|
513
|
+
# req_queue = queue.Queue()
|
514
|
+
#
|
515
|
+
# # Threadpool management
|
516
|
+
# done = False
|
517
|
+
# thread_pool = []
|
518
|
+
# for thread_no in range(n_threads):
|
519
|
+
# worker = Thread(target=csip_worker, args=(req_queue, thread_no, lambda: done,
|
520
|
+
# full_trace, args['url'], args['files'], args['param']))
|
521
|
+
# thread_pool.append(worker)
|
522
|
+
# worker.start()
|
523
|
+
#
|
524
|
+
# start_time = datetime.datetime.now()
|
525
|
+
#
|
526
|
+
# # setup Step PSOs
|
527
|
+
# for s, step in enumerate(steps):
|
528
|
+
# param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
529
|
+
# optimizer[s] = GlobalBestPSO(n_particles,
|
530
|
+
# len(param_names),
|
531
|
+
# options=options,
|
532
|
+
# bounds=bounds,
|
533
|
+
# ftol=ftol)
|
534
|
+
#
|
535
|
+
# def step_thread(s, args):
|
536
|
+
# cost[s], pos[s] = optimizer[s].optimize(eval_cost, iters=iters, **args)
|
537
|
+
#
|
538
|
+
# # Run PSOs in parallel
|
539
|
+
# r_below = 0
|
540
|
+
# for r in range(max_rounds):
|
541
|
+
# no_improvement = np.full(len(steps), True)
|
542
|
+
# # check if forced exit.
|
543
|
+
# if path.exists("stop"):
|
544
|
+
# print('\n>>>>> stop file found, exit now.')
|
545
|
+
# break
|
546
|
+
#
|
547
|
+
# # cross copy calibrated parameter, setup the arguments
|
548
|
+
# for s, step in enumerate(steps):
|
549
|
+
# param_names, bounds, objfunc = utils.get_step_info(steps, s)
|
550
|
+
# # maybe clone args?
|
551
|
+
# args_s[s] = args
|
552
|
+
# args_s[s]['step_param_names'] = param_names
|
553
|
+
# args_s[s]['step_objfunc'] = objfunc
|
554
|
+
# # get calibrated parameter from all other steps
|
555
|
+
# args_s[s]['calib_params'] = utils.get_calibrated_params(steps, s)
|
556
|
+
#
|
557
|
+
# args_s[s]['req_queue'] = req_queue
|
558
|
+
#
|
559
|
+
# # create optimizer in the first round.
|
560
|
+
# print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names,
|
561
|
+
# args_s[s]['calib_params']))
|
562
|
+
#
|
563
|
+
# # perform optimization
|
564
|
+
# s_threads = []
|
565
|
+
# for s, step in enumerate(steps):
|
566
|
+
# s_thread = threading.Thread(target=step_thread, args=(s, args_s[s]))
|
567
|
+
# s_threads.append(s_thread)
|
568
|
+
# s_thread.start()
|
569
|
+
#
|
570
|
+
# for t in s_threads:
|
571
|
+
# t.join()
|
572
|
+
#
|
573
|
+
# # eval cost
|
574
|
+
# for s, step in enumerate(steps):
|
575
|
+
# # capture the best cost
|
576
|
+
# # if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
577
|
+
# if cost[s] < best_cost[s]:
|
578
|
+
# best_cost[s] = cost[s]
|
579
|
+
# no_improvement[s] = False
|
580
|
+
# utils.annotate_step(best_cost[s], pos[s], steps, s)
|
581
|
+
#
|
582
|
+
# print('\n Step {} summary, best particle values: {} '.format(s, pos[s]))
|
583
|
+
#
|
584
|
+
# key = "r{}s{}".format(r + 1, s + 1)
|
585
|
+
# step_trace[key] = copy.deepcopy(steps)
|
586
|
+
#
|
587
|
+
# # print(json.dumps(steps, sort_keys=False, indent=2))
|
588
|
+
#
|
589
|
+
# round_cost = np.sum(best_cost)
|
590
|
+
#
|
591
|
+
# # if no improvement in all steps, break out of rounds prematurely
|
592
|
+
# # but start checking only after min_rounds
|
593
|
+
# # if (r + 1 >= min_rounds) and all(no_improvement):
|
594
|
+
# rel_round_tol = 1 - round_cost / best_round_cost
|
595
|
+
#
|
596
|
+
# print('\n Round summary - round_cost:{}, step_costs: {}, step improvement:{}'
|
597
|
+
# .format(round_cost, best_cost, np.invert(no_improvement)))
|
598
|
+
# print('\n Progress - best_round_cost:{}, rel_round_tol:{}, rtol:{}'
|
599
|
+
# .format(best_round_cost, rel_round_tol, rtol))
|
600
|
+
#
|
601
|
+
# if (r + 1 >= min_rounds) and 0 <= rel_round_tol < rtol:
|
602
|
+
# r_below += 1
|
603
|
+
# if r_below == rounds_below:
|
604
|
+
# break
|
605
|
+
# else:
|
606
|
+
# # reset
|
607
|
+
# r_below = 0
|
608
|
+
#
|
609
|
+
# if round_cost < best_round_cost:
|
610
|
+
# best_round_cost = round_cost
|
611
|
+
#
|
612
|
+
# end_time = datetime.datetime.now()
|
613
|
+
# elapsed = str(end_time - start_time)
|
614
|
+
#
|
615
|
+
# print('Done in {} after {} out of {} rounds'.format(elapsed, r + 1, max_rounds))
|
616
|
+
#
|
617
|
+
# done = True
|
618
|
+
# for worker in thread_pool:
|
619
|
+
# worker.join()
|
620
|
+
#
|
621
|
+
# step_trace['rounds'] = r + 1
|
622
|
+
# step_trace['steps'] = len(steps)
|
623
|
+
# step_trace['iters'] = iters
|
624
|
+
# step_trace['particles'] = n_particles
|
625
|
+
# step_trace['time'] = elapsed
|
626
|
+
#
|
627
|
+
# return optimizer, step_trace
|
File without changes
|
File without changes
|
File without changes
|