mg-pso-gui 0.1.13__py3-none-any.whl → 0.2.75__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- {mg_pso_gui-0.1.13.dist-info → mg_pso_gui-0.2.75.dist-info}/METADATA +10 -11
- mg_pso_gui-0.2.75.dist-info/RECORD +76 -0
- {mg_pso_gui-0.1.13.dist-info → mg_pso_gui-0.2.75.dist-info}/WHEEL +1 -1
- mgpsogui/gui/General/ParameterView.py +110 -0
- mgpsogui/gui/General/__init__.py +0 -0
- mgpsogui/gui/HomePage.py +565 -513
- mgpsogui/gui/OptionManager.py +333 -145
- mgpsogui/gui/OptionManager_backup.py +443 -0
- mgpsogui/gui/PlatformTab/PlatformTab.py +15 -6
- mgpsogui/gui/RunTab/OptimalParameterView.py +47 -0
- mgpsogui/gui/RunTab/RunTab.py +89 -35
- mgpsogui/gui/SetupTab/BoundsEditorWindow.py +1 -1
- mgpsogui/gui/SetupTab/BoundsList.py +97 -34
- mgpsogui/gui/SetupTab/CustomFunctionEditorWindow.py +74 -0
- mgpsogui/gui/SetupTab/CustomFunctionMetrics.py +156 -0
- mgpsogui/gui/SetupTab/FunctionsList.py +60 -6
- mgpsogui/gui/SetupTab/{StaticParameterView.py → ListEditor.py} +27 -16
- mgpsogui/gui/SetupTab/ListParametersView.py +7 -6
- mgpsogui/gui/SetupTab/{CalibrationParametersView.py → OverrideParameterMetrics.py} +35 -9
- mgpsogui/gui/SetupTab/OverrideParameterWindow.py +40 -0
- mgpsogui/gui/SetupTab/SetupTab.py +31 -11
- mgpsogui/gui/SetupTab/StepView.py +93 -22
- mgpsogui/gui/VisualizeTab/MatrixEditor.py +68 -0
- mgpsogui/gui/VisualizeTab/SideBar.py +399 -0
- mgpsogui/gui/VisualizeTab/VisualizeTab.py +76 -11
- mgpsogui/gui/defaults/__init__.py +0 -0
- mgpsogui/gui/defaults/optimization.json +176 -0
- mgpsogui/gui/defaults/sampling.json +111 -0
- mgpsogui/gui/defaults/sensitivity.json +20 -0
- mgpsogui/gui/images/plus.png +0 -0
- mgpsogui/gui/images/test.png +0 -0
- mgpsogui/util/GraphGenerator.py +747 -42
- mgpsogui/util/PSORunner.py +608 -116
- mgpsogui/util/debug.py +559 -0
- mgpsogui/util/helpers.py +95 -0
- mgpsogui/util/recosu/__init__.py +2 -1
- mgpsogui/util/recosu/pso/csip_access.py +2 -35
- mgpsogui/util/recosu/pso/pso.py +55 -59
- mgpsogui/util/recosu/sampling/__init__.py +16 -0
- mgpsogui/util/recosu/sampling/halton/__init__.py +0 -0
- mgpsogui/util/recosu/sampling/halton/halton.py +45 -0
- mgpsogui/util/recosu/sampling/halton/prime.py +82 -0
- mgpsogui/util/recosu/sampling/random/__init__.py +0 -0
- mgpsogui/util/recosu/sampling/random/random_sampler.py +34 -0
- mgpsogui/util/recosu/sampling/sample_trace_writer.py +47 -0
- mgpsogui/util/recosu/sampling/sampler_task.py +75 -0
- mgpsogui/util/recosu/sampling/sampling.py +99 -0
- mgpsogui/util/sampler_test_driver.py +129 -0
- mg_pso_gui-0.1.13.dist-info/RECORD +0 -50
- mgpsogui/gui/images/IGOW 4 Logo.png +0 -0
- {mg_pso_gui-0.1.13.dist-info → mg_pso_gui-0.2.75.dist-info}/entry_points.txt +0 -0
- {mg_pso_gui-0.1.13.dist-info → mg_pso_gui-0.2.75.dist-info}/top_level.txt +0 -0
@@ -9,52 +9,34 @@ def csip_worker(reqq: queue.Queue, thread_no: int, stop, full_trace,
|
|
9
9
|
async_call = conf.get('async_call', True) # default is async
|
10
10
|
save_resp = conf.get('save_response_to', None) # save response, set it to a folder if responses should be saved.
|
11
11
|
|
12
|
-
print("client1")
|
13
|
-
|
14
12
|
while not stop():
|
15
|
-
print("client2")
|
16
13
|
|
17
14
|
try:
|
18
15
|
(rnd, step, iteration, particle, x, step_param_names, calib_params, objfunc, resq) = reqq.get(True, 0.5)
|
19
16
|
# print(thread_no, particle)
|
20
|
-
|
21
|
-
print("client3")
|
22
17
|
|
23
18
|
c = Client(metainfo=metainfo)
|
24
19
|
|
25
|
-
print("client4")
|
26
|
-
|
27
20
|
# static params (from args)
|
28
21
|
for param in arg_params:
|
29
22
|
c.add_data(param['name'], param['value'])
|
30
23
|
|
31
|
-
print("client5")
|
32
|
-
|
33
24
|
# particle params (generated from steps)
|
34
25
|
# for i, value in enumerate(x):
|
35
26
|
for idx, value in enumerate(x[particle, :]):
|
36
27
|
c.add_data(step_param_names[idx], value)
|
37
28
|
|
38
|
-
|
39
|
-
print("client6")
|
40
|
-
|
41
29
|
# other, previously calibrated params (other steps)
|
42
30
|
for name, value in calib_params.items():
|
43
31
|
c.add_data(name, value)
|
44
32
|
|
45
|
-
print("client7")
|
46
|
-
|
47
33
|
# objective function info
|
48
34
|
for of in objfunc:
|
49
35
|
c.add_cosu(of['name'], of['of'], of['data'])
|
50
36
|
# c.add_data(of['name'], (of['data'][0], of['data'][1]))
|
51
37
|
|
52
|
-
print("client8")
|
53
|
-
|
54
38
|
print('.', end='', flush=True)
|
55
39
|
|
56
|
-
print("client9")
|
57
|
-
|
58
40
|
try:
|
59
41
|
# print(c)
|
60
42
|
if async_call:
|
@@ -62,47 +44,32 @@ def csip_worker(reqq: queue.Queue, thread_no: int, stop, full_trace,
|
|
62
44
|
else:
|
63
45
|
res = c.execute(url, files=files, conf=conf)
|
64
46
|
|
65
|
-
print("client10")
|
66
|
-
|
67
47
|
if res.is_failed():
|
68
48
|
print(res)
|
69
49
|
|
70
|
-
print("client11")
|
71
|
-
|
72
50
|
if save_resp:
|
73
51
|
res.save_to(os.path.join(save_resp, 'r{}s{}i{}p{}.json'.format(rnd, step, iteration, particle)))
|
74
|
-
|
75
|
-
print("client12")
|
76
|
-
|
52
|
+
|
77
53
|
# print(res)
|
78
54
|
print('O', end='', flush=True)
|
79
55
|
|
80
|
-
print("client13")
|
81
|
-
|
82
56
|
cost = utils.calc_cost(res, objfunc)
|
83
57
|
|
84
|
-
print("client14")
|
85
|
-
|
86
58
|
if full_trace is not None:
|
87
59
|
all_params = {}
|
88
60
|
# for i, value in enumerate(x):
|
89
61
|
for idx, value in enumerate(x[particle, :]):
|
90
62
|
all_params[step_param_names[idx]] = value
|
91
|
-
|
92
|
-
print("client15")
|
93
63
|
|
94
64
|
for name, value in calib_params.items():
|
95
65
|
all_params[name] = value
|
96
66
|
full_trace.append((all_params, cost))
|
97
67
|
|
98
|
-
print("client16")
|
99
|
-
|
100
68
|
resq.put((particle, cost))
|
101
69
|
except Exception as e:
|
102
70
|
print(res)
|
103
71
|
print(e)
|
104
|
-
|
105
|
-
print("client17")
|
72
|
+
|
106
73
|
reqq.task_done()
|
107
74
|
except queue.Empty:
|
108
75
|
continue
|
mgpsogui/util/recosu/pso/pso.py
CHANGED
@@ -22,51 +22,37 @@ import datetime
|
|
22
22
|
import queue
|
23
23
|
import json
|
24
24
|
import os
|
25
|
-
from multiprocessing import Queue
|
25
|
+
from multiprocessing import Queue as MPQueue
|
26
26
|
|
27
27
|
|
28
28
|
def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_queue, files, url, param, conf: Dict, rnd,
|
29
29
|
step):
|
30
30
|
particles = len(x[:, 0])
|
31
31
|
|
32
|
-
print("c1")
|
33
|
-
|
34
32
|
pfail_count = conf.get('particles_fail', 1) # Number of particles allowed to fail.
|
35
33
|
pfail_retry = conf.get('particles_retry', 3) # retry number of times if more than allowed fail
|
36
34
|
|
37
|
-
print("c2")
|
38
|
-
|
39
35
|
while pfail_retry > 0:
|
40
36
|
cost = np.ones(particles)
|
41
37
|
res_queue = queue.Queue()
|
42
38
|
|
43
39
|
print(' ', end='', flush=True)
|
44
40
|
|
45
|
-
print("c3")
|
46
|
-
|
47
41
|
# submit for processing
|
48
42
|
# for i_particle, v in enumerate(x[:, 0]):
|
49
43
|
for particle in range(particles):
|
50
44
|
req_queue.put((rnd, step, iteration, particle, x, step_param_names, calib_params, step_objfunc, res_queue))
|
51
45
|
# req_queue.put((i_particle, x[i_particle,:], step_param_names, calib_params, step_objfunc, res_queue))
|
52
46
|
|
53
|
-
print("c4")
|
54
|
-
|
55
47
|
# wait for the cost value to come back
|
56
48
|
# for i, v in enumerate(x[:, 0]):
|
57
49
|
for idx in range(particles):
|
58
|
-
print("c4.1")
|
59
50
|
(particle, p_cost) = res_queue.get()
|
60
51
|
cost[particle] = p_cost
|
61
|
-
print("c4.2")
|
62
52
|
res_queue.task_done()
|
63
53
|
|
64
|
-
print("c5")
|
65
|
-
|
66
54
|
res_queue.join()
|
67
55
|
|
68
|
-
print("c6")
|
69
|
-
|
70
56
|
# replace the 'nan' cost values (failed/missing runs) with the mean of the
|
71
57
|
# rest of the cost values, hence ignore it
|
72
58
|
|
@@ -74,24 +60,15 @@ def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_qu
|
|
74
60
|
nan_idx = np.where(np.isnan(cost))
|
75
61
|
failed_particles = len(nan_idx[0])
|
76
62
|
|
77
|
-
print("c7")
|
78
|
-
|
79
63
|
# leave the loop if fails acceptable
|
80
64
|
if failed_particles <= pfail_count:
|
81
65
|
break
|
82
66
|
|
83
|
-
print("c8")
|
84
|
-
|
85
67
|
print("Re-running particles, since ", failed_particles, ' out of ', particles, ' particles failed.')
|
86
68
|
pfail_retry -= 1
|
87
69
|
|
88
|
-
print("c9")
|
89
|
-
|
90
|
-
print("c10")
|
91
|
-
|
92
70
|
if pfail_retry == 0:
|
93
71
|
print('Particle evaluation failed ', conf.get('particles_retry', 3), ' times. PSO stopped.')
|
94
|
-
print("c11")
|
95
72
|
return None
|
96
73
|
|
97
74
|
# print("mean ", mean)
|
@@ -102,22 +79,10 @@ def eval_cost(x, iteration, step_param_names, step_objfunc, calib_params, req_qu
|
|
102
79
|
print(flush=True)
|
103
80
|
return cost
|
104
81
|
|
105
|
-
# Simulator
|
106
|
-
def global_best_sim(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
107
|
-
oh_strategy: Dict = None, n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
108
|
-
ftol_iter: int = 1, full_trace: List = None, rtol_iter: int = 1,
|
109
|
-
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue: Queue = None) -> Tuple:
|
110
|
-
import time
|
111
|
-
while True:
|
112
|
-
print("WOW", flush=True)
|
113
|
-
if result_queue is not None:
|
114
|
-
result_queue.put('WOW')
|
115
|
-
time.sleep(1)
|
116
|
-
|
117
82
|
def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters: int, options: Dict,
|
118
83
|
oh_strategy: Dict = None, n_threads: int = 4, rtol: float = 0.001, ftol: float = -np.inf,
|
119
84
|
ftol_iter: int = 1, full_trace: List = None, rtol_iter: int = 1,
|
120
|
-
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue:
|
85
|
+
conf: Dict = None, metainfo: Dict = None, cost_target: float = -np.inf, result_queue: MPQueue = None) -> Tuple:
|
121
86
|
"""Performs a stepwise particle swarm optimization PSO using a global best approach.
|
122
87
|
|
123
88
|
Parameters
|
@@ -203,7 +168,15 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
203
168
|
step_trace['min_rounds'] = min_rounds
|
204
169
|
step_trace['max_rounds'] = max_rounds
|
205
170
|
step_trace['iters'] = iters
|
206
|
-
|
171
|
+
|
172
|
+
# BUG If ftol is -inf set it to a string
|
173
|
+
ftol_value = ftol
|
174
|
+
if ftol == -np.inf:
|
175
|
+
ftol_value = '-inf'
|
176
|
+
elif ftol == np.inf:
|
177
|
+
ftol_value = 'inf'
|
178
|
+
|
179
|
+
step_trace['ftol'] = ftol_value
|
207
180
|
step_trace['ftol_iter'] = ftol_iter
|
208
181
|
step_trace['rtol'] = rtol
|
209
182
|
step_trace['rtol_iter'] = rtol_iter
|
@@ -211,17 +184,21 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
211
184
|
step_trace['n_particles'] = n_particles
|
212
185
|
step_trace['n_steps'] = len(steps)
|
213
186
|
step_trace['steps'] = copy.deepcopy(steps)
|
214
|
-
step_trace['args'] = args
|
187
|
+
step_trace['args'] = str(args) #BUG MUST BE REMOVED
|
215
188
|
|
216
189
|
if step_file is not None:
|
217
190
|
with open(step_file, "w") as fo:
|
218
191
|
json.dump(step_trace, fo)
|
219
192
|
|
193
|
+
print("Wrote step trace")
|
194
|
+
|
220
195
|
# best round cost
|
221
196
|
best_round_cost = np.inf
|
222
197
|
|
223
198
|
# request queue for worker
|
224
199
|
req_queue = queue.Queue()
|
200
|
+
|
201
|
+
print("Created queue")
|
225
202
|
|
226
203
|
conf = conf or {}
|
227
204
|
done = False
|
@@ -234,6 +211,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
234
211
|
thread_pool.append(worker)
|
235
212
|
worker.start()
|
236
213
|
|
214
|
+
print("Started worker threads")
|
215
|
+
|
237
216
|
r_below = 0
|
238
217
|
early_exit = False
|
239
218
|
start_time = datetime.datetime.now()
|
@@ -257,6 +236,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
257
236
|
args['req_queue'] = req_queue
|
258
237
|
args['conf'] = conf
|
259
238
|
|
239
|
+
print("Calling global best..")
|
240
|
+
|
260
241
|
# create optimizer in the first round.
|
261
242
|
if optimizer[s] is None:
|
262
243
|
optimizer[s] = GlobalBestPSO(step.get('n_particles', n_particles),
|
@@ -267,24 +248,26 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
267
248
|
ftol=step.get('ftol', ftol),
|
268
249
|
ftol_iter=step.get('ftol_iter', ftol_iter),
|
269
250
|
cost_target=step.get('cost_target', cost_target))
|
270
|
-
|
271
|
-
|
251
|
+
|
252
|
+
print('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
272
253
|
|
273
|
-
if result_queue is not None:
|
274
|
-
|
254
|
+
#if result_queue is not None:
|
255
|
+
# result_queue.put('\n>>>>> R{}/S{} particle params: {} calibrated params: {}\n'.format(r + 1, s + 1, param_names, args['calib_params']))
|
256
|
+
|
257
|
+
print("Filled request queue...")
|
275
258
|
|
276
259
|
args['rnd'] = r + 1
|
277
260
|
args['step'] = s + 1
|
278
|
-
|
279
|
-
print("
|
261
|
+
|
262
|
+
print("Evaluating cost...")
|
280
263
|
|
281
264
|
# perform optimization
|
282
265
|
cost, pos = optimizer[s].optimize(eval_cost, iters=step.get('iters', iters), **args)
|
283
266
|
if cost is None:
|
284
267
|
early_exit = True
|
285
268
|
break
|
286
|
-
|
287
|
-
print("
|
269
|
+
|
270
|
+
print("Finished evaluation...")
|
288
271
|
|
289
272
|
# capture the best cost
|
290
273
|
# if cost < best_cost[s] and np.abs(cost - best_cost[s]) > rtol:
|
@@ -293,8 +276,6 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
293
276
|
no_improvement[s] = False
|
294
277
|
utils.annotate_step(best_cost[s], pos, steps, s)
|
295
278
|
|
296
|
-
print("g3")
|
297
|
-
|
298
279
|
print('\n Step summary, best particle values: {} '.format(pos))
|
299
280
|
|
300
281
|
if result_queue is not None:
|
@@ -303,27 +284,29 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
303
284
|
key = "r{}s{}".format(r + 1, s + 1)
|
304
285
|
step_trace[key] = {}
|
305
286
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
306
|
-
|
287
|
+
|
288
|
+
best_costs_list = best_cost.tolist()
|
289
|
+
# If the cost is inf, set it to a string
|
290
|
+
for i, c in enumerate(best_costs_list):
|
291
|
+
if c == np.inf:
|
292
|
+
best_costs_list[i] = 'inf'
|
293
|
+
elif c == -np.inf:
|
294
|
+
best_costs_list[i] = '-inf'
|
295
|
+
|
296
|
+
step_trace[key]['best_costs'] = best_costs_list # BUG
|
307
297
|
step_trace[key]['steps'] = copy.deepcopy(steps)
|
308
298
|
|
309
|
-
print("g4")
|
310
|
-
|
311
299
|
if step_file is not None:
|
312
300
|
with open(step_file, "w") as fo:
|
313
301
|
json.dump(step_trace, fo)
|
314
302
|
|
315
|
-
print("g5")
|
316
|
-
|
317
303
|
# print(json.dumps(steps, sort_keys=False, indent=2))
|
318
304
|
|
319
305
|
if early_exit:
|
320
|
-
print("g6")
|
321
306
|
step_trace['exit'] = '1'
|
322
307
|
break
|
323
308
|
|
324
309
|
round_cost = np.sum(best_cost)
|
325
|
-
|
326
|
-
print("g7")
|
327
310
|
|
328
311
|
# if no improvement in all steps, break out of rounds prematurely
|
329
312
|
# but start checking only after min_rounds
|
@@ -347,8 +330,17 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
347
330
|
step_trace[key] = {}
|
348
331
|
step_trace[key]['time'] = str(datetime.datetime.now())
|
349
332
|
step_trace[key]['round_cost'] = round_cost
|
350
|
-
|
351
|
-
|
333
|
+
|
334
|
+
best_costs_list = best_cost.tolist() #BUG
|
335
|
+
# If the cost is inf, set it to a string
|
336
|
+
for i, c in enumerate(best_costs_list):
|
337
|
+
if c == np.inf:
|
338
|
+
best_costs_list[i] = 'inf'
|
339
|
+
elif c == -np.inf:
|
340
|
+
best_costs_list[i] = '-inf'
|
341
|
+
|
342
|
+
step_trace[key]['best_costs'] = best_costs_list
|
343
|
+
step_trace[key]['improvements'] = no_improvement.tolist()
|
352
344
|
if step_file is not None:
|
353
345
|
with open(step_file, "w") as fo:
|
354
346
|
json.dump(step_trace, fo)
|
@@ -384,4 +376,8 @@ def global_best(steps: Dict, rounds: Tuple, args: Dict, n_particles: int, iters:
|
|
384
376
|
with open(step_file, "w") as fo:
|
385
377
|
json.dump(step_trace, fo)
|
386
378
|
|
379
|
+
if result_queue is not None:
|
380
|
+
result_queue.put("Step Trace")
|
381
|
+
result_queue.put(step_trace)
|
382
|
+
|
387
383
|
return optimizer, step_trace
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
"""
|
4
|
+
LUCA/PSO toolkit
|
5
|
+
=========================================
|
6
|
+
This is ...
|
7
|
+
|
8
|
+
"""
|
9
|
+
|
10
|
+
__author__ = """Olaf David"""
|
11
|
+
__email__ = "odavid@colostate.edu"
|
12
|
+
__version__ = "1.0"
|
13
|
+
|
14
|
+
from .sampling import run_sampler
|
15
|
+
|
16
|
+
__all__ = ["run_sampler"]
|
File without changes
|
@@ -0,0 +1,45 @@
|
|
1
|
+
from collections.abc import Iterable
|
2
|
+
import math
|
3
|
+
from ...sampling.halton.prime import generate_n_primes
|
4
|
+
|
5
|
+
|
6
|
+
def halton(index: int, base: int) -> float:
|
7
|
+
fraction: float = 1.0
|
8
|
+
result: float = 0
|
9
|
+
|
10
|
+
while index > 0:
|
11
|
+
fraction = fraction / base
|
12
|
+
result += fraction * (index % base)
|
13
|
+
index = math.floor(index / base)
|
14
|
+
|
15
|
+
return result
|
16
|
+
|
17
|
+
|
18
|
+
class HaltonSampleGenerator:
|
19
|
+
index: int
|
20
|
+
maxIndex: int
|
21
|
+
primes: list[int]
|
22
|
+
|
23
|
+
def __init__(self, count: int, offset: int, num_parameters: int):
|
24
|
+
assert (count > 0)
|
25
|
+
assert (offset >= 0)
|
26
|
+
self.index = offset + 1
|
27
|
+
self.maxIndex = offset + count + 1
|
28
|
+
self.primes = generate_n_primes(num_parameters)
|
29
|
+
|
30
|
+
def __iter__(self) -> Iterable[tuple[int, list[float]]]:
|
31
|
+
return self
|
32
|
+
|
33
|
+
def __next__(self) -> tuple[int, list[float]]:
|
34
|
+
if self.index >= self.maxIndex:
|
35
|
+
raise StopIteration
|
36
|
+
|
37
|
+
i = 0
|
38
|
+
values: list[float] = []
|
39
|
+
for base in self.primes:
|
40
|
+
values.append(halton(self.index, base))
|
41
|
+
|
42
|
+
result: tuple[int, list[float]] = (self.index, values)
|
43
|
+
self.index = self.index + 1
|
44
|
+
return result
|
45
|
+
|
@@ -0,0 +1,82 @@
|
|
1
|
+
from typing import List
|
2
|
+
|
3
|
+
PRIME_TABLE: List[int] = [
|
4
|
+
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
|
5
|
+
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
|
6
|
+
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
|
7
|
+
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
|
8
|
+
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
|
9
|
+
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
|
10
|
+
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
|
11
|
+
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
|
12
|
+
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
|
13
|
+
467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
|
14
|
+
547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
|
15
|
+
607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
|
16
|
+
661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
|
17
|
+
739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
|
18
|
+
811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
|
19
|
+
877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
|
20
|
+
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
|
21
|
+
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069,
|
22
|
+
1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151,
|
23
|
+
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
|
24
|
+
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291,
|
25
|
+
1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373,
|
26
|
+
1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
|
27
|
+
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511,
|
28
|
+
1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583,
|
29
|
+
1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657,
|
30
|
+
1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,
|
31
|
+
1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811,
|
32
|
+
1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889,
|
33
|
+
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987,
|
34
|
+
1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053,
|
35
|
+
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129,
|
36
|
+
2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213,
|
37
|
+
2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287,
|
38
|
+
2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
|
39
|
+
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423,
|
40
|
+
2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,
|
41
|
+
2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617,
|
42
|
+
2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687,
|
43
|
+
2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741,
|
44
|
+
2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819,
|
45
|
+
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903,
|
46
|
+
2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999,
|
47
|
+
3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079,
|
48
|
+
3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181,
|
49
|
+
3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257,
|
50
|
+
3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331,
|
51
|
+
3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413,
|
52
|
+
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511,
|
53
|
+
3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571,
|
54
|
+
3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643,
|
55
|
+
3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727,
|
56
|
+
3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821,
|
57
|
+
3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,
|
58
|
+
3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989,
|
59
|
+
4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057,
|
60
|
+
4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139,
|
61
|
+
4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231,
|
62
|
+
4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297,
|
63
|
+
4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409
|
64
|
+
]
|
65
|
+
|
66
|
+
|
67
|
+
def generate_n_primes(n: int) -> List[int]:
|
68
|
+
if n < len(PRIME_TABLE):
|
69
|
+
return PRIME_TABLE[0:n]
|
70
|
+
|
71
|
+
primes: List[int] = PRIME_TABLE.copy()
|
72
|
+
num: int = primes[-1] + 2
|
73
|
+
while len(primes) < n:
|
74
|
+
is_prime = True
|
75
|
+
for p in primes:
|
76
|
+
if num % p == 0:
|
77
|
+
is_prime = False
|
78
|
+
break
|
79
|
+
if is_prime:
|
80
|
+
primes.append(num)
|
81
|
+
num += 2
|
82
|
+
return primes
|
File without changes
|
@@ -0,0 +1,34 @@
|
|
1
|
+
from collections.abc import Iterable
|
2
|
+
import random
|
3
|
+
|
4
|
+
|
5
|
+
class RandomSampleGenerator:
|
6
|
+
index: int
|
7
|
+
count: int
|
8
|
+
num_parameters: int
|
9
|
+
rand: random.Random
|
10
|
+
|
11
|
+
def __init__(self, count: int, num_parameters: int):
|
12
|
+
assert (count > 0)
|
13
|
+
self.index = 1
|
14
|
+
self.count = count + 1
|
15
|
+
self.num_parameters = num_parameters
|
16
|
+
self.rand = random.Random()
|
17
|
+
|
18
|
+
def __iter__(self) -> Iterable[tuple[int, list[float]]]:
|
19
|
+
return self
|
20
|
+
|
21
|
+
def __next__(self) -> tuple[int, list[float]]:
|
22
|
+
if self.index >= self.count:
|
23
|
+
raise StopIteration
|
24
|
+
|
25
|
+
i: int = 0
|
26
|
+
values: list[float] = []
|
27
|
+
while i < self.num_parameters:
|
28
|
+
values.append(self.rand.uniform(0, 1))
|
29
|
+
i = i + 1
|
30
|
+
|
31
|
+
result: tuple[int, list[float]] = (self.index, values)
|
32
|
+
self.index = self.index + 1
|
33
|
+
return result
|
34
|
+
|
@@ -0,0 +1,47 @@
|
|
1
|
+
import threading
|
2
|
+
|
3
|
+
|
4
|
+
class SampleTraceWriter:
|
5
|
+
trace_file: str
|
6
|
+
parameter_indices: dict[int, str]
|
7
|
+
objective_indices: dict[int, str]
|
8
|
+
write_lock: threading.Lock
|
9
|
+
|
10
|
+
def __init__(self, trace_file: str):
|
11
|
+
assert(trace_file is not None and len(trace_file) > 0)
|
12
|
+
self.trace_file = trace_file
|
13
|
+
self.parameter_indices = {}
|
14
|
+
self.objective_indices = {}
|
15
|
+
self.write_lock = threading.Lock()
|
16
|
+
|
17
|
+
def write_header(self, parameter_names: list[str], objective_names: list[str]) -> None:
|
18
|
+
with self.write_lock:
|
19
|
+
with open(self.trace_file, 'w') as writer:
|
20
|
+
writer.write("id")
|
21
|
+
self.parameter_indices = {}
|
22
|
+
index: int = 0
|
23
|
+
for name in parameter_names:
|
24
|
+
writer.write(",{}".format(name))
|
25
|
+
self.parameter_indices[index] = name
|
26
|
+
index = index + 1
|
27
|
+
self.objective_indices = {}
|
28
|
+
index = 0
|
29
|
+
for name in objective_names:
|
30
|
+
writer.write(",{}".format(name))
|
31
|
+
self.objective_indices[index] = name
|
32
|
+
index = index + 1
|
33
|
+
writer.write("\n")
|
34
|
+
|
35
|
+
def append_sample(self, sample_id: int, parameters: dict[str, any], objectives: dict[str, any]) -> None:
|
36
|
+
with self.write_lock:
|
37
|
+
with open(self.trace_file, 'a') as writer:
|
38
|
+
writer.write("{}".format(sample_id))
|
39
|
+
index: int = 0
|
40
|
+
while index < len(self.parameter_indices):
|
41
|
+
writer.write(",{}".format(parameters[self.parameter_indices[index]]))
|
42
|
+
index = index + 1
|
43
|
+
index = 0
|
44
|
+
while index < len(self.objective_indices):
|
45
|
+
writer.write(",{}".format(objectives[self.objective_indices[index]]))
|
46
|
+
index = index + 1
|
47
|
+
writer.write("\n")
|
@@ -0,0 +1,75 @@
|
|
1
|
+
import os
|
2
|
+
from csip import Client
|
3
|
+
|
4
|
+
|
5
|
+
class SamplerTask:
|
6
|
+
task_id: int
|
7
|
+
parameters: dict[str, any]
|
8
|
+
objectives: list[dict[str, any]]
|
9
|
+
static_parameters: dict[str, any]
|
10
|
+
url: str
|
11
|
+
files: list[str]
|
12
|
+
metainfo: dict[str, any]
|
13
|
+
conf: dict[str, any]
|
14
|
+
result: dict[str, any]
|
15
|
+
|
16
|
+
def __init__(self, task_id: int, parameters: dict[str, any], objectives: list[dict[str, any]],
|
17
|
+
static_parameters: dict[str, any], url: str, files: list[str] = None, metainfo: dict[str, any] = None,
|
18
|
+
conf: dict[str, any] = None):
|
19
|
+
self.task_id = task_id
|
20
|
+
assert (parameters is not None and len(parameters) > 0)
|
21
|
+
self.parameters = parameters
|
22
|
+
assert (objectives is not None and len(objectives) > 0)
|
23
|
+
self.objectives = objectives
|
24
|
+
self.static_parameters = static_parameters if static_parameters is not None else []
|
25
|
+
assert (url is not None and len(url) > 0)
|
26
|
+
self.url = url
|
27
|
+
self.files = files if files is not None else []
|
28
|
+
self.metainfo = metainfo
|
29
|
+
self.conf = conf
|
30
|
+
|
31
|
+
def create_request(self) -> Client:
|
32
|
+
request: Client = Client(metainfo=self.metainfo)
|
33
|
+
|
34
|
+
for key, value in self.static_parameters.items():
|
35
|
+
request.add_data(key, value)
|
36
|
+
|
37
|
+
for key, value in self.parameters.items():
|
38
|
+
request.add_data(key, value)
|
39
|
+
|
40
|
+
for of in self.objectives:
|
41
|
+
request.add_cosu(of['name'], of['of'], of['data'])
|
42
|
+
|
43
|
+
return request
|
44
|
+
|
45
|
+
def run_task(self) -> bool:
|
46
|
+
self.result = {}
|
47
|
+
request: Client = self.create_request()
|
48
|
+
async_call: bool = self.conf.get('async_call', True) if self.conf is not None else True
|
49
|
+
# save response, set it to a folder if responses should be saved.
|
50
|
+
save_resp = self.conf.get('save_response_to', None) if self.conf is not None else None
|
51
|
+
successful: bool = False
|
52
|
+
|
53
|
+
response: Client = None
|
54
|
+
try:
|
55
|
+
if async_call:
|
56
|
+
response = request.execute_async(self.url, files=self.files, conf=self.conf)
|
57
|
+
else:
|
58
|
+
response = request.execute(self.url, files=self.files, conf=self.conf)
|
59
|
+
|
60
|
+
successful = response.is_finished()
|
61
|
+
if not successful:
|
62
|
+
print(response)
|
63
|
+
|
64
|
+
if save_resp:
|
65
|
+
response.save_to(os.path.join(save_resp, 'task_{}.json'.format(self.task_id)))
|
66
|
+
|
67
|
+
objectives: list[dict[str, str]] = response.get_metainfo("cosu")
|
68
|
+
for of in objectives:
|
69
|
+
self.result[of["name"]] = of["value"]
|
70
|
+
except Exception as ex:
|
71
|
+
print(ex)
|
72
|
+
print(response)
|
73
|
+
successful = False
|
74
|
+
|
75
|
+
return successful
|