omniopt2 7101__py3-none-any.whl → 7107__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. .omniopt.py +260 -152
  2. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt.py +260 -152
  3. {omniopt2-7101.dist-info → omniopt2-7107.dist-info}/METADATA +1 -1
  4. {omniopt2-7101.dist-info → omniopt2-7107.dist-info}/RECORD +35 -35
  5. omniopt2.egg-info/PKG-INFO +1 -1
  6. pyproject.toml +1 -1
  7. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.colorfunctions.sh +0 -0
  8. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.general.sh +0 -0
  9. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.helpers.py +0 -0
  10. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_cpu_ram_usage.py +0 -0
  11. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_general.py +0 -0
  12. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_gpu_usage.py +0 -0
  13. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_kde.py +0 -0
  14. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter.py +0 -0
  15. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter_generation_method.py +0 -0
  16. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter_hex.py +0 -0
  17. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_time_and_exit_code.py +0 -0
  18. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_trial_index_result.py +0 -0
  19. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.omniopt_plot_worker.py +0 -0
  20. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.random_generator.py +0 -0
  21. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.shellscript_functions +0 -0
  22. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/.tpe.py +0 -0
  23. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/LICENSE +0 -0
  24. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/apt-dependencies.txt +0 -0
  25. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/omniopt +0 -0
  26. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/omniopt_docker +0 -0
  27. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/omniopt_evaluate +0 -0
  28. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/omniopt_plot +0 -0
  29. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/omniopt_share +0 -0
  30. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/requirements.txt +0 -0
  31. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/setup.py +0 -0
  32. {omniopt2-7101.data → omniopt2-7107.data}/data/bin/test_requirements.txt +0 -0
  33. {omniopt2-7101.dist-info → omniopt2-7107.dist-info}/WHEEL +0 -0
  34. {omniopt2-7101.dist-info → omniopt2-7107.dist-info}/licenses/LICENSE +0 -0
  35. {omniopt2-7101.dist-info → omniopt2-7107.dist-info}/top_level.txt +0 -0
.omniopt.py CHANGED
@@ -3226,44 +3226,97 @@ def get_return_in_case_of_errors() -> dict:
3226
3226
  return return_in_case_of_error
3227
3227
 
3228
3228
  @beartype
3229
- def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_with_params: str, exit_code: Optional[int], _signal: Optional[int], result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]], start_time: Union[int, float], end_time: Union[int, float], run_time: Union[float, int]) -> None:
3230
- str_parameters_values: List[str] = [str(v) for v in list(parameters.values())]
3229
+ def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_with_params: str,
3230
+ exit_code: Optional[int], _signal: Optional[int],
3231
+ result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]],
3232
+ start_time: Union[int, float], end_time: Union[int, float],
3233
+ run_time: Union[float, int]) -> None:
3234
+ _write_job_infos_csv_main(parameters, stdout, program_string_with_params, exit_code, _signal, result, start_time, end_time, run_time)
3235
+
3236
+ @beartype
3237
+ def _write_job_infos_csv_main(parameters: dict, stdout: Optional[str], program_string_with_params: str,
3238
+ exit_code: Optional[int], _signal: Optional[int],
3239
+ result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]],
3240
+ start_time: Union[int, float], end_time: Union[int, float],
3241
+ run_time: Union[float, int]) -> None:
3242
+ str_parameters_values = _write_job_infos_csv_parameters_to_str(parameters)
3243
+ extra_vars_names, extra_vars_values = _write_job_infos_csv_extract_extra_vars(stdout)
3244
+ extra_vars_names, extra_vars_values = _write_job_infos_csv_add_slurm_job_id(extra_vars_names, extra_vars_values)
3231
3245
 
3232
- extra_vars_names, extra_vars_values = extract_info(stdout)
3246
+ parameters_keys = list(parameters.keys())
3247
+
3248
+ headline = _write_job_infos_csv_build_headline(parameters_keys, extra_vars_names)
3249
+ result_values = _write_job_infos_csv_result_to_strlist(result)
3250
+
3251
+ values = _write_job_infos_csv_build_values(start_time, end_time, run_time, program_string_with_params, str_parameters_values, result_values, exit_code, _signal, extra_vars_values)
3252
+
3253
+ headline = _write_job_infos_csv_replace_none_with_str(headline)
3254
+ values = _write_job_infos_csv_replace_none_with_str(values)
3255
+
3256
+ run_folder = get_current_run_folder()
3257
+ if run_folder is not None and os.path.exists(run_folder):
3258
+ try:
3259
+ add_to_csv(f"{run_folder}/job_infos.csv", headline, values)
3260
+ except Exception as e:
3261
+ print_red(f"Error writing job_infos.csv: {e}")
3262
+ else:
3263
+ print_debug(f"evaluate: get_current_run_folder() {run_folder} could not be found")
3264
+
3265
+ @beartype
3266
+ def _write_job_infos_csv_parameters_to_str(parameters: dict) -> List[str]:
3267
+ return [str(v) for v in list(parameters.values())]
3268
+
3269
+ @beartype
3270
+ def _write_job_infos_csv_extract_extra_vars(stdout: Optional[str]) -> Tuple[List[str], List[str]]:
3271
+ # extract_info ist hier eine vorhandene Funktion, die extra Variablen aus stdout extrahiert
3272
+ return extract_info(stdout)
3233
3273
 
3274
+ @beartype
3275
+ def _write_job_infos_csv_add_slurm_job_id(extra_vars_names: List[str], extra_vars_values: List[str]) -> Tuple[List[str], List[str]]:
3234
3276
  _SLURM_JOB_ID = os.getenv('SLURM_JOB_ID')
3235
3277
  if _SLURM_JOB_ID:
3236
3278
  extra_vars_names.append("OO_Info_SLURM_JOB_ID")
3237
3279
  extra_vars_values.append(str(_SLURM_JOB_ID))
3280
+ return extra_vars_names, extra_vars_values
3238
3281
 
3239
- parameters_keys = list(parameters.keys())
3240
-
3241
- headline: List[str] = [
3282
+ @beartype
3283
+ def _write_job_infos_csv_build_headline(parameters_keys: List[str], extra_vars_names: List[str]) -> List[str]:
3284
+ return [
3242
3285
  "start_time",
3243
3286
  "end_time",
3244
3287
  "run_time",
3245
3288
  "program_string",
3246
3289
  *parameters_keys,
3247
- *arg_result_names,
3290
+ *arg_result_names, # arg_result_names muss global definiert sein
3248
3291
  "exit_code",
3249
3292
  "signal",
3250
3293
  "hostname",
3251
3294
  *extra_vars_names
3252
3295
  ]
3253
3296
 
3254
- result_values = []
3297
+ @beartype
3298
+ def _write_job_infos_csv_result_to_strlist(result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]]) -> List[str]:
3299
+ result_values: List[str] = []
3255
3300
 
3256
3301
  if isinstance(result, list):
3257
3302
  for rkey in result:
3258
3303
  result_values.append(str(rkey))
3259
3304
  elif isinstance(result, dict):
3260
- result_keys: list = list(result.keys())
3305
+ result_keys = list(result.keys())
3261
3306
  for rkey in result_keys:
3262
3307
  rval = str(result[str(rkey)])
3263
-
3264
3308
  result_values.append(rval)
3309
+ elif result is not None: # int or float
3310
+ result_values.append(str(result))
3265
3311
 
3266
- values: List[str] = [
3312
+ return result_values
3313
+
3314
+ @beartype
3315
+ def _write_job_infos_csv_build_values(start_time: Union[int, float], end_time: Union[int, float], run_time: Union[float, int],
3316
+ program_string_with_params: str, str_parameters_values: List[str],
3317
+ result_values: List[str], exit_code: Optional[int], _signal: Optional[int],
3318
+ extra_vars_values: List[str]) -> List[str]:
3319
+ return [
3267
3320
  str(start_time),
3268
3321
  str(end_time),
3269
3322
  str(run_time),
@@ -3276,16 +3329,9 @@ def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_
3276
3329
  *extra_vars_values
3277
3330
  ]
3278
3331
 
3279
- headline = ['None' if element is None else element for element in headline]
3280
- values = ['None' if element is None else element for element in values]
3281
-
3282
- if get_current_run_folder() is not None and os.path.exists(get_current_run_folder()):
3283
- try:
3284
- add_to_csv(f"{get_current_run_folder()}/job_infos.csv", headline, values)
3285
- except Exception as e:
3286
- print_red(f"Error writing job_infos.csv: {e}")
3287
- else:
3288
- print_debug(f"evaluate: get_current_run_folder() {get_current_run_folder()} could not be found")
3332
+ @beartype
3333
+ def _write_job_infos_csv_replace_none_with_str(elements: List[Optional[str]]) -> List[str]:
3334
+ return ['None' if element is None else element for element in elements]
3289
3335
 
3290
3336
  @beartype
3291
3337
  def print_evaluate_times() -> None:
@@ -3456,28 +3502,59 @@ def die_for_debug_reasons() -> None:
3456
3502
  except ValueError:
3457
3503
  print_red(f"Invalid value for DIE_AFTER_THIS_NR_OF_DONE_JOBS: '{max_done_str}', cannot be converted to int")
3458
3504
 
3505
+ @beartype
3506
+ def _evaluate_preprocess_parameters(parameters: dict) -> dict:
3507
+ return {
3508
+ k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
3509
+ for k, v in parameters.items()
3510
+ }
3511
+
3512
+ @beartype
3513
+ def _evaluate_create_signal_map() -> Dict[str, type[BaseException]]:
3514
+ return {
3515
+ "USR1-signal": SignalUSR,
3516
+ "CONT-signal": SignalCONT,
3517
+ "INT-signal": SignalINT
3518
+ }
3519
+
3520
+ @beartype
3521
+ def _evaluate_handle_result(
3522
+ stdout: str,
3523
+ result: Union[int, float, dict, list],
3524
+ parameters: dict
3525
+ ) -> Dict[str, Optional[Union[float, Tuple]]]:
3526
+ final_result: Dict[str, Optional[Union[float, Tuple]]] = {}
3527
+
3528
+ if isinstance(result, (int, float)):
3529
+ for name in arg_result_names:
3530
+ final_result[name] = attach_sem_to_result(stdout, name, float(result))
3531
+
3532
+ elif isinstance(result, list):
3533
+ float_values = [float(r) for r in result]
3534
+ for name in arg_result_names:
3535
+ final_result[name] = attach_sem_to_result(stdout, name, float_values)
3536
+
3537
+ elif isinstance(result, dict):
3538
+ for name in arg_result_names:
3539
+ final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
3540
+
3541
+ else:
3542
+ write_failed_logs(parameters, "No Result")
3543
+
3544
+ return final_result
3545
+
3459
3546
  @beartype
3460
3547
  def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]]:
3461
3548
  start_nvidia_smi_thread()
3462
-
3463
3549
  return_in_case_of_error: dict = get_return_in_case_of_errors()
3464
3550
 
3465
3551
  _test_gpu = test_gpu_before_evaluate(return_in_case_of_error)
3466
- final_result = return_in_case_of_error
3552
+ final_result: Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]] = return_in_case_of_error
3467
3553
 
3468
3554
  if _test_gpu is None:
3469
- parameters = {
3470
- k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
3471
- for k, v in parameters.items()
3472
- }
3473
-
3555
+ parameters = _evaluate_preprocess_parameters(parameters)
3474
3556
  ignore_signals()
3475
-
3476
- signal_messages = {
3477
- "USR1-signal": SignalUSR,
3478
- "CONT-signal": SignalCONT,
3479
- "INT-signal": SignalINT
3480
- }
3557
+ signal_messages = _evaluate_create_signal_map()
3481
3558
 
3482
3559
  try:
3483
3560
  if args.raise_in_eval:
@@ -3498,23 +3575,7 @@ def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[
3498
3575
 
3499
3576
  result = get_results_with_occ(stdout)
3500
3577
 
3501
- final_result = {}
3502
-
3503
- if isinstance(result, (int, float)):
3504
- for name in arg_result_names:
3505
- final_result[name] = attach_sem_to_result(stdout, name, float(result))
3506
-
3507
- elif isinstance(result, list):
3508
- float_values = [float(r) for r in result]
3509
- for name in arg_result_names:
3510
- final_result[name] = attach_sem_to_result(stdout, name, float_values)
3511
-
3512
- elif isinstance(result, dict):
3513
- for name in arg_result_names:
3514
- final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
3515
-
3516
- else:
3517
- write_failed_logs(parameters, "No Result")
3578
+ final_result = _evaluate_handle_result(stdout, result, parameters)
3518
3579
 
3519
3580
  _evaluate_print_stuff(
3520
3581
  parameters,
@@ -5562,7 +5623,6 @@ def parse_csv(csv_path: str) -> Tuple[List, List]:
5562
5623
 
5563
5624
  return arm_params_list, results_list
5564
5625
 
5565
-
5566
5626
  @beartype
5567
5627
  def get_generation_node_for_index(
5568
5628
  this_csv_file_path: str,
@@ -5585,7 +5645,6 @@ def get_generation_node_for_index(
5585
5645
  print(f"Error while get_generation_node_for_index: {e}")
5586
5646
  return "MANUAL"
5587
5647
 
5588
-
5589
5648
  @beartype
5590
5649
  def _get_generation_node_for_index_index_valid(
5591
5650
  index: int,
@@ -5594,7 +5653,6 @@ def _get_generation_node_for_index_index_valid(
5594
5653
  ) -> bool:
5595
5654
  return 0 <= index < len(arm_params_list) and index < len(results_list)
5596
5655
 
5597
-
5598
5656
  @beartype
5599
5657
  def _get_generation_node_for_index_combine_dicts(
5600
5658
  dict1: Dict[str, Any],
@@ -5605,7 +5663,6 @@ def _get_generation_node_for_index_combine_dicts(
5605
5663
  combined.update(dict2)
5606
5664
  return combined
5607
5665
 
5608
-
5609
5666
  @beartype
5610
5667
  def _get_generation_node_for_index_find_generation_node(
5611
5668
  csv_file_path: str,
@@ -5622,7 +5679,6 @@ def _get_generation_node_for_index_find_generation_node(
5622
5679
 
5623
5680
  return "MANUAL"
5624
5681
 
5625
-
5626
5682
  @beartype
5627
5683
  def _get_generation_node_for_index_row_matches(
5628
5684
  row: Dict[str, str],
@@ -5642,7 +5698,6 @@ def _get_generation_node_for_index_row_matches(
5642
5698
 
5643
5699
  return True
5644
5700
 
5645
-
5646
5701
  @beartype
5647
5702
  def _get_generation_node_for_index_floats_match(
5648
5703
  val: float,
@@ -6352,7 +6407,6 @@ def check_orchestrator(stdout_path: str, trial_index: int) -> Optional[List[str]
6352
6407
 
6353
6408
  return _check_orchestrator_find_behaviors(stdout, orchestrator["errors"])
6354
6409
 
6355
-
6356
6410
  @beartype
6357
6411
  def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index: int) -> Optional[str]:
6358
6412
  try:
@@ -6369,7 +6423,6 @@ def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index:
6369
6423
  _check_orchestrator_register_missing_file(stdout_path, trial_index)
6370
6424
  return None
6371
6425
 
6372
-
6373
6426
  @beartype
6374
6427
  def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int) -> None:
6375
6428
  if stdout_path not in ORCHESTRATE_TODO:
@@ -6378,7 +6431,6 @@ def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int
6378
6431
  else:
6379
6432
  print_red(f"File not found: {stdout_path}, not trying again")
6380
6433
 
6381
-
6382
6434
  @beartype
6383
6435
  def _check_orchestrator_find_behaviors(stdout: str, errors: List[Dict[str, Any]]) -> List[str]:
6384
6436
  behaviors: List[str] = []
@@ -7163,64 +7215,82 @@ def generate_time_table_rich() -> None:
7163
7215
  save_table_as_text(table, filepath)
7164
7216
 
7165
7217
  @beartype
7166
- def generate_job_submit_table_rich() -> None:
7167
- if not isinstance(job_submit_durations, list) or not isinstance(job_submit_nrs, list):
7168
- print_debug("generate_job_submit_table_rich: Error: job_submit_durations or job_submit_nrs is not a list.")
7169
- return
7218
+ def validate_job_submit_data(durations: List[float], job_counts: List[int]) -> bool:
7219
+ if not durations or not job_counts:
7220
+ print_debug("No durations or job counts to display.")
7221
+ return False
7170
7222
 
7171
- if len(job_submit_durations) == 0 or len(job_submit_nrs) == 0:
7172
- print_debug("generate_job_submit_table_rich: No durations or job counts to display.")
7173
- return
7223
+ if len(durations) != len(job_counts):
7224
+ print_debug("Length mismatch between durations and job counts.")
7225
+ return False
7174
7226
 
7175
- if len(job_submit_durations) != len(job_submit_nrs):
7176
- print_debug("generate_job_submit_table_rich: Length mismatch between durations and job counts.")
7177
- return
7227
+ return True
7178
7228
 
7179
- for i, val in enumerate(job_submit_durations):
7180
- try:
7181
- float(val)
7182
- except (ValueError, TypeError):
7183
- print_debug(f"generate_job_submit_table_rich: Error: Element at index {i} in durations is not a valid float.")
7184
- return
7185
- for i, val in enumerate(job_submit_nrs):
7186
- if not isinstance(val, int):
7187
- print_debug(f"generate_job_submit_table_rich: Error: Element at index {i} in job counts is not an int.")
7188
- return
7229
+ @beartype
7230
+ def convert_durations_to_float(raw_durations: List) -> List[float] | None:
7231
+ try:
7232
+ return [float(val) for val in raw_durations]
7233
+ except (ValueError, TypeError) as e:
7234
+ print_debug(f"Invalid float in durations: {e}")
7235
+ return None
7236
+
7237
+ @beartype
7238
+ def convert_job_counts_to_int(raw_counts: List) -> List[int] | None:
7239
+ try:
7240
+ return [int(val) for val in raw_counts]
7241
+ except (ValueError, TypeError) as e:
7242
+ print_debug(f"Invalid int in job counts: {e}")
7243
+ return None
7189
7244
 
7245
+ @beartype
7246
+ def build_job_submission_table(durations: List[float], job_counts: List[int]) -> Table:
7190
7247
  table = Table(show_header=True, header_style="bold", title="Job submission durations")
7191
7248
  table.add_column("Batch", justify="right")
7192
7249
  table.add_column("Seconds", justify="right")
7193
7250
  table.add_column("Jobs", justify="right")
7194
7251
  table.add_column("Time per job", justify="right")
7195
7252
 
7196
- for idx, (time_val, jobs) in enumerate(zip(job_submit_durations, job_submit_nrs), start=1):
7197
- time_val_float = float(time_val)
7198
- time_per_job = time_val_float / jobs if jobs > 0 else 0
7199
- table.add_row(str(idx), f"{time_val_float:.3f}", str(jobs), f"{time_per_job:.3f}")
7200
-
7201
- times_float = [float(t) for t in job_submit_durations]
7202
- avg_time = mean(times_float)
7203
- median_time = median(times_float)
7204
- total_time = sum(times_float)
7205
- max_time = max(times_float)
7206
- min_time = min(times_float)
7253
+ for idx, (duration, jobs) in enumerate(zip(durations, job_counts), start=1):
7254
+ time_per_job = duration / jobs if jobs > 0 else 0
7255
+ table.add_row(str(idx), f"{duration:.3f}", str(jobs), f"{time_per_job:.3f}")
7207
7256
 
7208
7257
  table.add_section()
7258
+ table.add_row("Average", f"{mean(durations):.3f}", "", "")
7259
+ table.add_row("Median", f"{median(durations):.3f}", "", "")
7260
+ table.add_row("Total", f"{sum(durations):.3f}", "", "")
7261
+ table.add_row("Max", f"{max(durations):.3f}", "", "")
7262
+ table.add_row("Min", f"{min(durations):.3f}", "", "")
7209
7263
 
7210
- table.add_row("Average", f"{avg_time:.3f}", "", "")
7211
- table.add_row("Median", f"{median_time:.3f}", "", "")
7212
- table.add_row("Total", f"{total_time:.3f}", "", "")
7213
- table.add_row("Max", f"{max_time:.3f}", "", "")
7214
- table.add_row("Min", f"{min_time:.3f}", "", "")
7215
-
7216
- if args.show_generate_time_table:
7217
- console.print(table)
7264
+ return table
7218
7265
 
7266
+ @beartype
7267
+ def export_table_to_file(table: Table, filename: str) -> None:
7219
7268
  folder = get_current_run_folder()
7220
- filename = "job_submit_durations.txt"
7221
7269
  filepath = os.path.join(folder, filename)
7222
7270
  save_table_as_text(table, filepath)
7223
7271
 
7272
+ @beartype
7273
+ def generate_job_submit_table_rich() -> None:
7274
+ if not isinstance(job_submit_durations, list) or not isinstance(job_submit_nrs, list):
7275
+ print_debug("job_submit_durations or job_submit_nrs is not a list.")
7276
+ return
7277
+
7278
+ durations = convert_durations_to_float(job_submit_durations)
7279
+ job_counts = convert_job_counts_to_int(job_submit_nrs)
7280
+
7281
+ if durations is None or job_counts is None:
7282
+ return
7283
+
7284
+ if not validate_job_submit_data(durations, job_counts):
7285
+ return
7286
+
7287
+ table = build_job_submission_table(durations, job_counts)
7288
+
7289
+ if args.show_generate_time_table:
7290
+ console.print(table)
7291
+
7292
+ export_table_to_file(table, "job_submit_durations.txt")
7293
+
7224
7294
  @beartype
7225
7295
  def plot_times_for_creation_and_submission() -> None:
7226
7296
  if not args.show_generation_and_submission_sixel:
@@ -7851,49 +7921,71 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
7851
7921
  print_debug(f"Warning: create_and_execute_next_runs(next_nr_steps: {next_nr_steps}, phase: {phase}, _max_eval: {_max_eval}, progress_bar)")
7852
7922
  return 0
7853
7923
 
7854
- trial_index_to_param = None
7855
- done_optimizing = False
7924
+ trial_index_to_param: Optional[Dict] = None
7925
+ done_optimizing: bool = False
7926
+ results: List = []
7856
7927
 
7857
7928
  try:
7858
- nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
7859
- results = []
7929
+ done_optimizing, trial_index_to_param, results = _create_and_execute_next_runs_run_loop(next_nr_steps, _max_eval, phase, _progress_bar)
7930
+ _create_and_execute_next_runs_finish(done_optimizing)
7931
+ except Exception as e:
7932
+ stacktrace = traceback.format_exc()
7933
+ print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
7934
+ return handle_exceptions_create_and_execute_next_runs(e)
7860
7935
 
7861
- new_nr_of_jobs_to_get = min(max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
7936
+ return _create_and_execute_next_runs_return_value(trial_index_to_param)
7862
7937
 
7863
- range_nr = new_nr_of_jobs_to_get
7864
- get_next_trials_nr = 1
7938
+ @beartype
7939
+ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Optional[int], phase: Optional[str], _progress_bar: Any
7940
+ ) -> Tuple[bool, Optional[Dict], List]:
7941
+ done_optimizing = False
7942
+ trial_index_to_param: Optional[Dict] = None
7943
+ results: List = []
7865
7944
 
7866
- if args.generate_all_jobs_at_once:
7867
- range_nr = 1
7868
- get_next_trials_nr = new_nr_of_jobs_to_get
7945
+ nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
7869
7946
 
7870
- for _ in range(range_nr):
7871
- trial_index_to_param, optimization_complete = _get_next_trials(get_next_trials_nr)
7872
- done_optimizing = handle_optimization_completion(optimization_complete)
7873
- if done_optimizing:
7874
- continue
7875
- if trial_index_to_param:
7876
- nr_jobs_before_removing_abandoned = len(list(trial_index_to_param.keys()))
7947
+ # Sicherstellen, dass _max_eval nicht None ist
7948
+ __max_eval = _max_eval if _max_eval is not None else 0
7949
+ new_nr_of_jobs_to_get = min(__max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
7950
+
7951
+ range_nr = new_nr_of_jobs_to_get
7952
+ get_next_trials_nr = 1
7877
7953
 
7878
- trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
7954
+ if getattr(args, "generate_all_jobs_at_once", False):
7955
+ range_nr = 1
7956
+ get_next_trials_nr = new_nr_of_jobs_to_get
7879
7957
 
7880
- if len(list(trial_index_to_param.keys())):
7881
- results.extend(execute_trials(trial_index_to_param, next_nr_steps, phase, _max_eval, _progress_bar))
7958
+ for _ in range(range_nr):
7959
+ trial_index_to_param, optimization_complete = _get_next_trials(get_next_trials_nr)
7960
+ done_optimizing = handle_optimization_completion(optimization_complete)
7961
+ if done_optimizing:
7962
+ continue
7963
+
7964
+ if trial_index_to_param:
7965
+ nr_jobs_before_removing_abandoned = len(list(trial_index_to_param.keys()))
7966
+
7967
+ filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
7968
+
7969
+ if len(filtered_trial_index_to_param):
7970
+ results.extend(execute_trials(filtered_trial_index_to_param, next_nr_steps, phase, _max_eval, _progress_bar))
7971
+ else:
7972
+ if nr_jobs_before_removing_abandoned > 0:
7973
+ print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
7882
7974
  else:
7883
- if nr_jobs_before_removing_abandoned > 0:
7884
- print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
7885
- else:
7886
- print_debug("Could not generate any jobs")
7975
+ print_debug("Could not generate any jobs")
7887
7976
 
7888
- finish_previous_jobs(["finishing jobs"])
7977
+ trial_index_to_param = filtered_trial_index_to_param
7889
7978
 
7890
- if done_optimizing:
7891
- end_program(False, 0)
7892
- except Exception as e:
7893
- stacktrace = traceback.format_exc()
7894
- print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
7895
- return handle_exceptions_create_and_execute_next_runs(e)
7979
+ return done_optimizing, trial_index_to_param, results
7980
+
7981
+ @beartype
7982
+ def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
7983
+ finish_previous_jobs(["finishing jobs"])
7984
+ if done_optimizing:
7985
+ end_program(False, 0)
7896
7986
 
7987
+ @beartype
7988
+ def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
7897
7989
  try:
7898
7990
  if trial_index_to_param:
7899
7991
  res = len(trial_index_to_param.keys())
@@ -8386,6 +8478,41 @@ def plot_pareto_frontier_sixel(data: Any, x_metric: str, y_metric: str) -> None:
8386
8478
 
8387
8479
  plt.close(fig)
8388
8480
 
8481
+ @beartype
8482
+ def _pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
8483
+ if x.shape != y.shape:
8484
+ raise ValueError("Input arrays x and y must have the same shape.")
8485
+
8486
+ @beartype
8487
+ def _pareto_front_general_compare(
8488
+ xi: float, yi: float, xj: float, yj: float,
8489
+ x_minimize: bool, y_minimize: bool
8490
+ ) -> bool:
8491
+ x_better_eq = xj <= xi if x_minimize else xj >= xi
8492
+ y_better_eq = yj <= yi if y_minimize else yj >= yi
8493
+ x_strictly_better = xj < xi if x_minimize else xj > xi
8494
+ y_strictly_better = yj < yi if y_minimize else yj > yi
8495
+
8496
+ return x_better_eq and y_better_eq and (x_strictly_better or y_strictly_better)
8497
+
8498
+ @beartype
8499
+ def _pareto_front_general_find_dominated(
8500
+ x: np.ndarray, y: np.ndarray, x_minimize: bool, y_minimize: bool
8501
+ ) -> np.ndarray:
8502
+ num_points = len(x)
8503
+ is_dominated = np.zeros(num_points, dtype=bool)
8504
+
8505
+ for i in range(num_points):
8506
+ for j in range(num_points):
8507
+ if i == j:
8508
+ continue
8509
+
8510
+ if _pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
8511
+ is_dominated[i] = True
8512
+ break
8513
+
8514
+ return is_dominated
8515
+
8389
8516
  @beartype
8390
8517
  def pareto_front_general(
8391
8518
  x: np.ndarray,
@@ -8394,28 +8521,9 @@ def pareto_front_general(
8394
8521
  y_minimize: bool = True
8395
8522
  ) -> np.ndarray:
8396
8523
  try:
8397
- if x.shape != y.shape:
8398
- raise ValueError("Input arrays x and y must have the same shape.")
8399
-
8400
- num_points = len(x)
8401
- is_dominated = np.zeros(num_points, dtype=bool)
8402
-
8403
- for i in range(num_points):
8404
- for j in range(num_points):
8405
- if i == j:
8406
- continue
8407
-
8408
- x_better_or_equal = x[j] <= x[i] if x_minimize else x[j] >= x[i]
8409
- y_better_or_equal = y[j] <= y[i] if y_minimize else y[j] >= y[i]
8410
- x_strictly_better = x[j] < x[i] if x_minimize else x[j] > x[i]
8411
- y_strictly_better = y[j] < y[i] if y_minimize else y[j] > y[i]
8412
-
8413
- if x_better_or_equal and y_better_or_equal and (x_strictly_better or y_strictly_better):
8414
- is_dominated[i] = True
8415
- break
8416
-
8524
+ _pareto_front_general_validate_shapes(x, y)
8525
+ is_dominated = _pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
8417
8526
  return np.where(~is_dominated)[0]
8418
-
8419
8527
  except Exception as e:
8420
8528
  print("Error in pareto_front_general:", str(e))
8421
8529
  return np.array([], dtype=int)