mg-pso-gui 0.1.40__py3-none-any.whl → 0.2.75__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (50) hide show
  1. {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.75.dist-info}/METADATA +10 -11
  2. mg_pso_gui-0.2.75.dist-info/RECORD +76 -0
  3. {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.75.dist-info}/WHEEL +1 -1
  4. mgpsogui/gui/General/ParameterView.py +110 -0
  5. mgpsogui/gui/General/__init__.py +0 -0
  6. mgpsogui/gui/HomePage.py +234 -238
  7. mgpsogui/gui/OptionManager.py +333 -145
  8. mgpsogui/gui/OptionManager_backup.py +443 -0
  9. mgpsogui/gui/PlatformTab/PlatformTab.py +15 -6
  10. mgpsogui/gui/RunTab/OptimalParameterView.py +47 -0
  11. mgpsogui/gui/RunTab/RunTab.py +90 -17
  12. mgpsogui/gui/SetupTab/BoundsEditorWindow.py +1 -1
  13. mgpsogui/gui/SetupTab/BoundsList.py +97 -34
  14. mgpsogui/gui/SetupTab/CustomFunctionEditorWindow.py +74 -0
  15. mgpsogui/gui/SetupTab/CustomFunctionMetrics.py +156 -0
  16. mgpsogui/gui/SetupTab/FunctionsList.py +60 -6
  17. mgpsogui/gui/SetupTab/{StaticParameterView.py → ListEditor.py} +27 -16
  18. mgpsogui/gui/SetupTab/ListParametersView.py +7 -6
  19. mgpsogui/gui/SetupTab/{CalibrationParametersView.py → OverrideParameterMetrics.py} +35 -9
  20. mgpsogui/gui/SetupTab/OverrideParameterWindow.py +40 -0
  21. mgpsogui/gui/SetupTab/SetupTab.py +31 -11
  22. mgpsogui/gui/SetupTab/StepView.py +93 -22
  23. mgpsogui/gui/VisualizeTab/MatrixEditor.py +68 -0
  24. mgpsogui/gui/VisualizeTab/SideBar.py +316 -25
  25. mgpsogui/gui/VisualizeTab/VisualizeTab.py +69 -8
  26. mgpsogui/gui/defaults/__init__.py +0 -0
  27. mgpsogui/gui/defaults/optimization.json +176 -0
  28. mgpsogui/gui/defaults/sampling.json +111 -0
  29. mgpsogui/gui/defaults/sensitivity.json +20 -0
  30. mgpsogui/gui/images/plus.png +0 -0
  31. mgpsogui/util/GraphGenerator.py +721 -50
  32. mgpsogui/util/PSORunner.py +615 -86
  33. mgpsogui/util/debug.py +559 -0
  34. mgpsogui/util/helpers.py +95 -0
  35. mgpsogui/util/recosu/__init__.py +2 -1
  36. mgpsogui/util/recosu/pso/pso.py +55 -11
  37. mgpsogui/util/recosu/sampling/__init__.py +16 -0
  38. mgpsogui/util/recosu/sampling/halton/__init__.py +0 -0
  39. mgpsogui/util/recosu/sampling/halton/halton.py +45 -0
  40. mgpsogui/util/recosu/sampling/halton/prime.py +82 -0
  41. mgpsogui/util/recosu/sampling/random/__init__.py +0 -0
  42. mgpsogui/util/recosu/sampling/random/random_sampler.py +34 -0
  43. mgpsogui/util/recosu/sampling/sample_trace_writer.py +47 -0
  44. mgpsogui/util/recosu/sampling/sampler_task.py +75 -0
  45. mgpsogui/util/recosu/sampling/sampling.py +99 -0
  46. mgpsogui/util/sampler_test_driver.py +129 -0
  47. mg_pso_gui-0.1.40.dist-info/RECORD +0 -52
  48. mgpsogui/gui/images/IGOW 4 Logo.png +0 -0
  49. {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.75.dist-info}/entry_points.txt +0 -0
  50. {mg_pso_gui-0.1.40.dist-info → mg_pso_gui-0.2.75.dist-info}/top_level.txt +0 -0
@@ -1,26 +1,480 @@
1
- import csip
2
- from csip import Client
3
- import cosu
1
+
4
2
  import sys
5
3
  from multiprocessing import Process, Queue
6
4
  from queue import Empty
7
5
  import threading
8
6
  import time
9
7
  import os
10
-
11
- #from cosu.pso import global_best
8
+ from .recosu.sampling.sampling import run_sampler
12
9
  from .recosu.pso import global_best
10
+ from csip import Client
11
+ import traceback
12
+ import urllib
13
+ import shutil
14
+ import json
15
+ import numpy as np
13
16
 
14
17
  def enqueue_output(out, queue):
15
18
  for line in iter(out.readline, b''):
16
19
  queue.put(line)
17
20
  out.close()
18
21
 
19
- def run_process(stdout_queue, stderr_queue, results_queue, cosu_queue, data, folder):
22
+ def run_process(stdout_queue, stderr_queue, results_queue, data, folder, mode):
23
+ """_summary_
24
+
25
+ Args:
26
+ stdout_queue (_type_): _description_
27
+ stderr_queue (_type_): _description_
28
+ results_queue (_type_): _description_
29
+ data (_type_): _description_
30
+ folder (_type_): _description_
31
+ mode (_type_): _description_
32
+ """
33
+ try:
34
+ # Setup folders
35
+ if not os.path.exists(folder):
36
+ os.makedirs(folder)
37
+
38
+ if not os.path.exists(os.path.join(folder, "results")):
39
+ os.makedirs(os.path.join(folder, "results"))
40
+
41
+ if (os.path.exists(os.path.join(folder, 'output.txt'))):
42
+ os.remove(os.path.join(folder, 'output.txt'))
43
+
44
+ if (os.path.exists(os.path.join(folder, 'error.txt'))):
45
+ os.remove(os.path.join(folder, 'error.txt'))
46
+
47
+ # Redirect stdout and stderr to files
48
+ old_stdout = sys.stdout
49
+ old_stderr = sys.stderr
50
+
51
+ read_stdout, write_stdout = os.pipe()
52
+ read_stderr, write_stderr = os.pipe()
53
+
54
+ sys.stdout = os.fdopen(write_stdout, 'w')
55
+ sys.stderr = os.fdopen(write_stderr, 'w')
56
+
57
+ stdout_thread = threading.Thread(target=enqueue_output, args=(os.fdopen(read_stdout, 'r'), stdout_queue))
58
+ stderr_thread = threading.Thread(target=enqueue_output, args=(os.fdopen(read_stderr, 'r'), stderr_queue))
59
+ stdout_thread.daemon = True
60
+ stderr_thread.daemon = True
61
+ stdout_thread.start()
62
+ stderr_thread.start()
63
+
64
+ if mode == "Sampling: Halton":
65
+ run_sampling(data, "halton", folder, results_queue)
66
+ elif mode == "Sampling: Random":
67
+ run_sampling(data, "random", folder, results_queue)
68
+ elif mode == "Sensitivity Analysis":
69
+ run_sensitivity_analysis(data, folder, results_queue)
70
+ elif mode == "Optimization":
71
+ run_optimization(data, folder, results_queue)
72
+ else:
73
+ print("Invalid mode")
74
+
75
+ stdout_thread.join()
76
+ stderr_thread.join()
77
+
78
+ sys.stdout = old_stdout
79
+ sys.stderr = old_stderr
80
+
81
+ except Exception as e:
82
+ print("An exception occurred: ", flush=True)
83
+ print(str(e))
84
+ # Print stack trace
85
+ import traceback
86
+ traceback.print_exc()
87
+
88
+ # Write all of this information to a crash file
89
+ with open(os.path.join(folder, 'crash.txt'), 'w') as f:
90
+ f.write(str(e))
91
+ f.write("\n")
92
+ traceback.print_exc(file=f)
93
+ finally:
94
+ stdout_thread.join()
95
+ stderr_thread.join()
96
+
97
+ sys.stdout = old_stdout
98
+ sys.stderr = old_stderr
99
+
100
+ def process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, list_name):
101
+ """_summary_
102
+
103
+ Args:
104
+ data (_type_): _description_
105
+ parameter_map (_type_): _description_
106
+ args (_type_): _description_
107
+ options (_type_): _description_
108
+ oh_strategy (_type_): _description_
109
+ config (_type_): _description_
110
+ metainfo (_type_): _description_
111
+ list_name (_type_): _description_
112
+ """
113
+ for obj in data[list_name]:
114
+ name = obj['name']
115
+ type = obj['type']
116
+ destination = obj['destination']
117
+ original_value = obj['value']
118
+ converted_value = original_value
119
+ if type == "integer":
120
+ converted_value = int(converted_value)
121
+ elif type == "float":
122
+ converted_value = float(converted_value)
123
+ elif type == "boolean":
124
+ converted_value = True if converted_value == "True" else False
125
+
126
+ if destination == "args":
127
+ args['param'].append({"name": name, "value": converted_value})
128
+ elif destination == "kwargs":
129
+ parameter_map[name] = original_value
130
+ elif destination == "conf":
131
+ config[name] = converted_value
132
+ elif destination == "metainfo":
133
+ metainfo[name] = converted_value
134
+ elif destination == "options":
135
+ option_name = name.replace("options_", "")
136
+ options[option_name] = converted_value
137
+ elif destination == "oh_strategy":
138
+ strategy_name = name.replace("strategy_", "")
139
+ oh_strategy[strategy_name] = converted_value
140
+
141
+ def process_steps(data):
142
+ """_summary_
143
+
144
+ Args:
145
+ data (_type_): _description_
146
+
147
+ Returns:
148
+ _type_: _description_
149
+ """
150
+
151
+ steps = data['steps']
152
+ output_steps = []
153
+ for step in steps:
154
+ output_step = {}
155
+ output_step['param'] = []
156
+ output_step['objfunc'] = []
157
+ for parameter in step['parameter_objects']:
158
+ parameter_object = {}
159
+ type = parameter['type']
160
+ if type != "list":
161
+ parameter_object['name'] = parameter['name']
162
+ parameter_object['bounds'] = (float(parameter['min_bound']), float(parameter['max_bound']))
163
+ output_step['param'].append(parameter_object)
164
+ else:
165
+ parameter_object['name'] = parameter['name']
166
+ parameter_object['bounds'] = (float(parameter['min_bound']), float(parameter['max_bound']))
167
+ parameter_object['type'] = "list"
168
+ parameter_object['calibration_strategy'] = parameter['calibration_strategy']
169
+ parameter_object['default_value'] = [float(x) for x in parameter['default_value'].replace("[", "").replace("]", "").split(",")]
170
+ output_step['param'].append(parameter_object)
171
+
172
+ for function in step['objective_functions']:
173
+ out_object = {}
174
+ out_object['name'] = function['name']
175
+ out_object['of'] = function['objective_function']
176
+ out_object['weight'] = float(function['weight'])
177
+ out_object['data'] = [
178
+ function["data_observed"],
179
+ function["data_simulated"]
180
+ ]
181
+ output_step['objfunc'].append(out_object)
182
+ output_steps.append(output_step)
183
+ return output_steps
184
+
185
+ def pp(parameter, parameter_map, default=None):
186
+ """_summary_
187
+
188
+ Args:
189
+ parameter (_type_): _description_
190
+ parameter_map (_type_): _description_
191
+ default (_type_, optional): _description_. Defaults to None.
192
+
193
+ Returns:
194
+ _type_: _description_
195
+ """
196
+ if parameter in parameter_map.keys():
197
+ if parameter_map[parameter] != "" \
198
+ and parameter_map[parameter] != "None" \
199
+ and parameter_map[parameter] != "null" \
200
+ and parameter_map[parameter] != "NULL":
201
+ return parameter_map[parameter]
202
+ else:
203
+ return default
204
+ return default
205
+
206
+ def run_sampling(data, mode, folder, results_queue):
207
+ """_summary_
208
+
209
+ Args:
210
+ data (_type_): _description_
211
+ mode (_type_): _description_
212
+ folder (_type_): _description_
213
+ results_queue (_type_): _description_
214
+ """
215
+
216
+ parameter_map = {}
217
+ args = {
218
+ "param": [],
219
+ "url": data["url"],
220
+ "files": {}
221
+ }
222
+ options = {}
223
+ oh_strategy = {}
224
+ config = {}
225
+ metainfo = {}
226
+
227
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "model_parameters")
228
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "hyperparameters")
229
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "service_parameters")
230
+
231
+ output_steps = process_steps(data)
232
+
233
+ trace_file = os.path.join(folder, 'results', mode + '_trace.csv')
234
+ file_output_mode = data["sampling_output_mode"]
235
+ if file_output_mode == "Append":
236
+ # Backup trace file if it exists
237
+ if os.path.exists(trace_file):
238
+ shutil.copyfile(trace_file, trace_file + ".bak")
239
+
240
+ #config['step_trace'] = os.path.join(folder, 'pso_step_trace.json') # Do we need this?
241
+
242
+ print("Parsing Parameters...\n", flush=True)
243
+ print("steps: ", flush=True)
244
+ print(json.dumps(output_steps, indent=4))
245
+ print("args: ", flush=True)
246
+ print(json.dumps(args, indent=4))
247
+ print("options: ", flush=True)
248
+ print(json.dumps(options, indent=4))
249
+ print("oh_strategy: ", flush=True)
250
+ print(json.dumps(oh_strategy, indent=4))
251
+ print("config: ", flush=True)
252
+ print(json.dumps(config, indent=4))
253
+ print("metainfo: ", flush=True)
254
+ print(json.dumps(metainfo, indent=4))
255
+ print("kwargs: ", flush=True)
256
+ print(json.dumps(parameter_map, indent=4))
257
+
258
+ print("Running Sampling..\n", flush=True)
259
+ trace = run_sampler(output_steps,
260
+ args,
261
+ int(pp('count', parameter_map)),
262
+ int(pp('num_threads', parameter_map)),
263
+ mode,
264
+ conf=config,
265
+ metainfo=metainfo if len(metainfo) > 0 else None,
266
+ trace_file=trace_file,
267
+ offset=int(pp('offset', parameter_map)))
268
+ results_queue.put(trace)
269
+ print(trace, flush=True)
270
+ print("\n", flush=True)
271
+
272
+ if file_output_mode == "Append" and os.path.exists(trace_file + ".bak"):
273
+ # Read the backup file
274
+ with open(trace_file + ".bak", 'r') as f2:
275
+ backup_lines = f2.readlines()
276
+
277
+ # Read the trace file
278
+ with open(trace_file, 'r') as f:
279
+ trace_lines = f.readlines()
280
+
281
+ # Extract headers
282
+ backup_header = backup_lines[0]
283
+ trace_header = trace_lines[0]
284
+
285
+ # Combine data ensuring headers are not duplicated
286
+ with open(trace_file, 'w') as f:
287
+ f.write(backup_header)
288
+ f.writelines(backup_lines[1:])
289
+ f.writelines(trace_lines[1:] if trace_header == backup_header else trace_lines)
290
+
291
+ # Remove the backup file
292
+ os.remove(trace_file + ".bak")
293
+
294
+ def run_optimization(data, folder, results_queue):
295
+ """_summary_
296
+
297
+ Args:
298
+ data (_type_): _description_
299
+ folder (_type_): _description_
300
+ results_queue (_type_): _description_
301
+ """
302
+ parameter_map = {}
303
+ args = {
304
+ "param": [],
305
+ "url": data["url"],
306
+ "files": {}
307
+ }
308
+ options = {}
309
+ oh_strategy = {}
310
+ config = {}
311
+ metainfo = {}
312
+
313
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "model_parameters")
314
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "hyperparameters")
315
+ process_list(data, parameter_map, args, options, oh_strategy, config, metainfo, "service_parameters")
316
+
317
+ output_steps = process_steps(data)
318
+
319
+ config['step_trace'] = os.path.join(folder, 'pso_step_trace.json')
320
+
321
+ print("Parsing Parameters...\n", flush=True)
322
+ print("steps: ", flush=True)
323
+ print(json.dumps(output_steps, indent=4))
324
+ print("args: ", flush=True)
325
+ print(json.dumps(args, indent=4))
326
+ print("options: ", flush=True)
327
+ print(json.dumps(options, indent=4))
328
+ print("oh_strategy: ", flush=True)
329
+ print(json.dumps(oh_strategy, indent=4))
330
+ print("config: ", flush=True)
331
+ print(json.dumps(config, indent=4))
332
+ print("metainfo: ", flush=True)
333
+ print(json.dumps(metainfo, indent=4))
334
+ print("kwargs: ", flush=True)
335
+ print(json.dumps(parameter_map, indent=4))
336
+
337
+ print("Running MG-PSO Optimization...\n", flush=True)
338
+ optimizer, trace = global_best(output_steps,
339
+ rounds=(int(pp('min_rounds', parameter_map)), int(pp('max_rounds', parameter_map))),
340
+ args=args,
341
+ n_particles=int(pp('n_particles', parameter_map, 10)),
342
+ iters=int(pp('iters', parameter_map, 1)),
343
+ n_threads=int(pp('n_threads', parameter_map, 4)),
344
+ rtol=float(pp('rtol', parameter_map, 0.001)),
345
+ ftol=float(pp('ftol', parameter_map, -np.inf)),
346
+ ftol_iter=int(pp('ftol_iter', parameter_map, 1)),
347
+ rtol_iter=int(pp('rtol_iter', parameter_map, 1)),
348
+ options=options,
349
+ oh_strategy=oh_strategy,
350
+ metainfo=metainfo if len(metainfo) > 0 else None,
351
+ cost_target=float(pp('cost_target', parameter_map, -np.inf)),
352
+ conf=config
353
+ )
354
+
355
+ results_queue.put(trace)
356
+ print(trace, flush=True)
357
+ pass
358
+
359
+
360
+
361
+ def run_sensitivity_analysis(data, folder, results_queue):
362
+ """_summary_
363
+
364
+ Args:
365
+ data (_type_): _description_
366
+ folder (_type_): _description_
367
+ results_queue (_type_): _description_
368
+ """
369
+ print("Running Sensitivity Analysis", flush=True)
370
+
371
+ shutil.copyfile(data["sensitivity_analysis_path"], os.path.join(folder, 'results', 'trace.csv'))
372
+ trace_path = os.path.join(folder, 'results', 'trace.csv')
373
+
374
+ output_steps = process_steps(data)
375
+
376
+ # Get list of parameters from steps
377
+ parameters = []
378
+ for param in output_steps[0]['param']:
379
+ parameters.append(param['name'])
380
+
381
+ request_json = {
382
+ "metainfo": {
383
+ "service_url": None,
384
+ "description": "",
385
+ "name": "",
386
+ "mode": "async"
387
+ },
388
+ "parameter": [
389
+ {
390
+ "name": "parameters",
391
+ "value": parameters
392
+ },
393
+ {
394
+ "name": "positiveBestMetrics",
395
+ "value": ["ns","kge","mns","kge09","nslog2"]
396
+ },
397
+ {
398
+ "name": "zeroBestMetrics",
399
+ "value": ["pbias","rmse"]
400
+ }
401
+ ]
402
+ }
403
+
404
+ with open(os.path.join(folder, 'results', 'request.json'), 'w') as json_file:
405
+ json.dump(request_json, json_file, indent=4)
406
+
407
+ request_path = os.path.join(folder, 'results', 'request.json')
408
+
409
+ output_directory = os.path.join(folder, 'results')
410
+
411
+ print("Starting ", data['url'], request_path, trace_path, output_directory, flush=True)
412
+
413
+ sensitivity_analysis(data['url'], request_path, trace_path, output_directory)
414
+
415
+ print("Finished Sensitivity Analysis", flush=True)
416
+
417
+
418
+
419
+
420
+
421
+
422
+
423
+
424
+ def create_request(request_file: str) -> Client:
425
+ request: Client = Client.from_file(request_file)
426
+ return request
427
+
428
+ def download_output(response: Client, target_directory) -> None:
429
+ data_names: list[str] = response.get_data_names()
430
+ for name in data_names:
431
+ url = response.get_data_value(name)
432
+ file_path = os.path.join(target_directory, name)
433
+ urllib.request.urlretrieve(url, file_path)
434
+
435
+ def sensitivity_analysis(url, request_file, trace_file, output_directory):
436
+ request: Client = create_request(request_file)
437
+ files: list[str] = [trace_file] if os.path.isfile(trace_file) else []
438
+ conf = {
439
+ 'service_timeout': 60.0, # (sec)
440
+ }
441
+ result: Client = Client()
442
+ try:
443
+ result = request.execute(url, files=files, sync=True, conf=conf)
444
+ except Exception as ex:
445
+ traceback.print_exc()
446
+ exit(1)
447
+
448
+ if result.is_finished():
449
+ download_output(result, output_directory)
450
+
451
+
452
+
453
+
454
+
455
+
456
+
457
+
458
+
459
+
460
+
461
+
462
+
463
+
464
+
465
+
466
+ """
467
+ def run_process_old(stdout_queue, stderr_queue, results_queue, data, folder):
20
468
  steps = data['steps']
21
469
  args = data['arguments']
22
470
  calib = data['calibration_parameters']
23
471
 
472
+ my_mode = args["mode"]
473
+
474
+ # If "mode" in args remove it
475
+ if "mode" in args:
476
+ del args["mode"]
477
+
24
478
  calibration_map = {}
25
479
  for param in calib:
26
480
  param_name = param['name']
@@ -30,6 +484,9 @@ def run_process(stdout_queue, stderr_queue, results_queue, cosu_queue, data, fol
30
484
  if not os.path.exists(folder):
31
485
  os.makedirs(folder)
32
486
 
487
+ if not os.path.exists(os.path.join(folder, "results")):
488
+ os.makedirs(os.path.join(folder, "results"))
489
+
33
490
  if (os.path.exists(os.path.join(folder, 'output.txt'))):
34
491
  os.remove(os.path.join(folder, 'output.txt'))
35
492
 
@@ -45,98 +502,170 @@ def run_process(stdout_queue, stderr_queue, results_queue, cosu_queue, data, fol
45
502
  sys.stdout = os.fdopen(write_stdout, 'w')
46
503
  sys.stderr = os.fdopen(write_stderr, 'w')
47
504
 
48
- stdour_thread = threading.Thread(target=enqueue_output, args=(os.fdopen(read_stdout, 'r'), stdout_queue))
505
+ stdout_thread = threading.Thread(target=enqueue_output, args=(os.fdopen(read_stdout, 'r'), stdout_queue))
49
506
  stderr_thread = threading.Thread(target=enqueue_output, args=(os.fdopen(read_stderr, 'r'), stderr_queue))
50
- stdour_thread.daemon = True
507
+ stdout_thread.daemon = True
51
508
  stderr_thread.daemon = True
52
- stdour_thread.start()
509
+ stdout_thread.start()
53
510
  stderr_thread.start()
54
511
 
55
- options = {}
56
- oh_strategy = {}
57
-
58
- for key in calibration_map.keys():
59
- if "options_" in key:
60
- options[key.replace("options_", "")] = float(calibration_map[key])
61
- if "strategy_" in key:
62
- oh_strategy[key.replace("strategy_", "")] = calibration_map[key]
63
-
64
- print("\n")
65
- print(calibration_map)
66
- print("\n")
67
- print(options)
68
- print("\n")
69
- print(oh_strategy)
70
- print("\n")
71
-
72
- print("Running global_best...\n")
512
+ try:
513
+
514
+ options = {}
515
+ oh_strategy = {}
516
+
517
+ for key in calibration_map.keys():
518
+ if "options_" in key:
519
+ options[key.replace("options_", "")] = float(calibration_map[key])
520
+ if "strategy_" in key:
521
+ oh_strategy[key.replace("strategy_", "")] = calibration_map[key]
73
522
 
74
- optimizer, trace = global_best(steps,
75
- rounds=(int(calibration_map['min_rounds']), int(calibration_map['max_rounds'])),
76
- args=args,
77
- n_particles=int(calibration_map['n_particles']),
78
- iters=int(calibration_map['iters']),
79
- n_threads=int(calibration_map['n_threads']),
80
- # ftol=0.00000001,
81
- options=options,
82
- oh_strategy=oh_strategy,
83
- conf={
523
+ config = {}
524
+
525
+ if my_mode == "Sampling":
526
+ config = {
527
+ 'service_timeout': int(calibration_map['service_timeout']),
528
+ 'http_retry': int(calibration_map['http_retry']),
529
+ 'allow_redirects': True if calibration_map['allow_redirects'] == "True" else False,
530
+ 'async_call': True if calibration_map['async_call'] == "True" else False,
531
+ 'conn_timeout': int(calibration_map['conn_timeout']),
532
+ 'read_timeout': int(calibration_map['read_timeout']),
533
+ 'step_trace': os.path.join(folder, 'pso_step_trace.json')
534
+ }
535
+ elif my_mode == "Optimization":
536
+ config = {
84
537
  'service_timeout': int(calibration_map['service_timeout']),
85
538
  'http_retry': int(calibration_map['http_retry']),
86
539
  'http_allow_redirects': True if calibration_map['allow_redirects'] == "True" else False,
87
540
  'async_call': True if calibration_map['async_call'] == "True" else False,
88
541
  'http_conn_timeout': int(calibration_map['conn_timeout']),
89
542
  'http_read_timeout': int(calibration_map['read_timeout']),
90
- 'particles_fail': int(calibration_map['particles_fail'])
543
+ 'particles_fail': int(calibration_map['particles_fail']),
544
+ 'step_trace': os.path.join(folder, 'pso_step_trace.json')
545
+ }
546
+
547
+ print("\n")
548
+ print(steps)
549
+ print("\n")
550
+ print(args)
551
+ print("\n")
552
+ print(calibration_map)
553
+ print("\n")
554
+ print(options)
555
+ print("\n")
556
+ print(oh_strategy)
557
+ print("\n")
558
+ print(config)
559
+ print("\n", flush=True)
560
+
561
+ if my_mode == "Sampling: Halton":
562
+ print("Running Halton Sampling..\n", flush=True)
563
+ trace = run_sampler(steps,
564
+ args,
565
+ int(calibration_map['count']),
566
+ int(calibration_map['num_threads']),
567
+ "halton",
568
+ conf=config,
569
+ trace_file=os.path.join(folder, 'results', 'halton_trace.csv'),
570
+ offset=int(calibration_map['offset']))
571
+ results_queue.put(trace)
572
+ print(trace, flush=True)
573
+ print("\n", flush=True)
574
+
575
+ elif my_mode == "Sampling: Random":
576
+ print("Running Random Sampling...\n", flush=True)
577
+ trace = run_sampler(steps,
578
+ args,
579
+ int(calibration_map['count']),
580
+ int(calibration_map['num_threads']),
581
+ "random",
582
+ conf=config,
583
+ trace_file=os.path.join(folder, 'results', 'random_trace.csv'))
584
+ results_queue.put(trace)
585
+ print(trace, flush=True)
586
+ print("\n", flush=True)
587
+
588
+ elif my_mode == "Sensitivity Analysis":
589
+
590
+ print("Running Sensitivity Analysis", flush=True)
591
+
592
+ shutil.copyfile(data["sensitivity_analysis_path"], os.path.join(folder, 'results', 'trace.csv'))
593
+ trace_path = os.path.join(folder, 'results', 'trace.csv')
594
+
595
+ # Get list of parameters from steps
596
+ parameters = []
597
+ for param in steps[0]['param']:
598
+ parameters.append(param['name'])
599
+
600
+ request_json = {
601
+ "metainfo": {
602
+ "service_url": None,
603
+ "description": "",
604
+ "name": "",
605
+ "mode": "async"
91
606
  },
92
- result_queue = cosu_queue
93
- )
94
-
95
-
96
- stdour_thread.join()
97
- stderr_thread.join()
98
-
99
- sys.stdout = old_stdout
100
- sys.stderr = old_stderr
101
- results_queue.put((optimizer, trace))
102
-
103
- def get_results():
104
- request: Client = Client()
105
- for name, value in parameters.items():
106
- # if parameter name has a / in it assume that is a file based parameter and therefore value needs to be an array
107
- if "/" in name and type(value) is not list:
108
- request.add_data(name, [value])
109
- else:
110
- request.add_data(name, value)
607
+ "parameter": [
608
+ {
609
+ "name": "parameters",
610
+ "value": parameters
611
+ },
612
+ {
613
+ "name": "positiveBestMetrics",
614
+ "value": ["ns","kge","mns","kge09","nslog2"]
615
+ },
616
+ {
617
+ "name": "zeroBestMetrics",
618
+ "value": ["pbias","rmse"]
619
+ }
620
+ ]
621
+ }
622
+
623
+ with open(os.path.join(folder, 'results', 'request.json'), 'w') as json_file:
624
+ json.dump(request_json, json_file, indent=4)
625
+
626
+ request_path = os.path.join(folder, 'results', 'request.json')
111
627
 
112
- conf = {
113
- 'service_timeout': 60.0 # (sec)
114
- }
115
- files: List[str] = [] #optional list of filenames
116
-
117
- #Synchronous Call
118
- result: Client = request.execute(CSIP_ENDPOINT, files=files, sync=True, conf=conf)
119
-
120
- #Asynchronous Call
121
- tsamp: float = 0
122
- def callback(c: Client, progress: str):
123
- tsamp2: float = time.time()
124
- print('Halton Update {} - {} - {}'.format(halton_id, c.get_status(), tsamp2 - tsamp))
125
-
126
-
127
- tsamp = time.time()
128
- result: Client = request.execute_async(
129
- CSIP_ENDPOINT,
130
- files=files,
131
- callback=callback,
132
- first_poll=poll_time,
133
- next_poll=poll_time,
134
- conf=conf
135
- )
136
- # After recieving response
137
- if result.is_finished():
138
- print(result)
139
- else:
140
- print(result)
628
+ output_directory = os.path.join(folder, 'results')
629
+
630
+ print("Starting ", args['url'], request_path, trace_path, output_directory, flush=True)
141
631
 
632
+ sensitivity_analysis(args['url'], request_path, trace_path, output_directory)
142
633
 
634
+ print("Finished Sensitivity Analysis", flush=True)
635
+ else:
636
+ print("Running MG-PSO Optimization...\n", flush=True)
637
+ optimizer, trace = global_best(steps,
638
+ rounds=(int(calibration_map['min_rounds']), int(calibration_map['max_rounds'])),
639
+ args=args,
640
+ n_particles=int(calibration_map['n_particles']),
641
+ iters=int(calibration_map['iters']),
642
+ n_threads=int(calibration_map['n_threads']),
643
+ options=options,
644
+ oh_strategy=oh_strategy,
645
+ conf=config
646
+ )
647
+
648
+ results_queue.put(trace)
649
+ print(trace, flush=True)
650
+
651
+ print("Finishing up...", flush=True)
652
+ time.sleep(5)
653
+ except Exception as e:
654
+ print("An exception occurred: ", flush=True)
655
+ print(str(e))
656
+ # Print stack trace
657
+ import traceback
658
+ traceback.print_exc()
659
+
660
+ # Write all of this information to a crash file
661
+ with open(os.path.join(folder, 'crash.txt'), 'w') as f:
662
+ f.write(str(e))
663
+ f.write("\n")
664
+ traceback.print_exc(file=f)
665
+ finally:
666
+ stdout_thread.join()
667
+ stderr_thread.join()
668
+
669
+ sys.stdout = old_stdout
670
+ sys.stderr = old_stderr
671
+ """