omniopt2 8754__tar.gz → 8764__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omniopt2 might be problematic. Click here for more details.

Files changed (40) hide show
  1. {omniopt2-8754 → omniopt2-8764}/.omniopt.py +61 -48
  2. {omniopt2-8754 → omniopt2-8764}/PKG-INFO +1 -1
  3. {omniopt2-8754 → omniopt2-8764}/omniopt +0 -5
  4. {omniopt2-8754 → omniopt2-8764}/omniopt2.egg-info/PKG-INFO +1 -1
  5. {omniopt2-8754 → omniopt2-8764}/pyproject.toml +1 -1
  6. {omniopt2-8754 → omniopt2-8764}/.colorfunctions.sh +0 -0
  7. {omniopt2-8754 → omniopt2-8764}/.dockerignore +0 -0
  8. {omniopt2-8754 → omniopt2-8764}/.general.sh +0 -0
  9. {omniopt2-8754 → omniopt2-8764}/.gitignore +0 -0
  10. {omniopt2-8754 → omniopt2-8764}/.helpers.py +0 -0
  11. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_cpu_ram_usage.py +0 -0
  12. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_general.py +0 -0
  13. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_gpu_usage.py +0 -0
  14. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_kde.py +0 -0
  15. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_scatter.py +0 -0
  16. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_scatter_generation_method.py +0 -0
  17. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_scatter_hex.py +0 -0
  18. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_time_and_exit_code.py +0 -0
  19. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_trial_index_result.py +0 -0
  20. {omniopt2-8754 → omniopt2-8764}/.omniopt_plot_worker.py +0 -0
  21. {omniopt2-8754 → omniopt2-8764}/.random_generator.py +0 -0
  22. {omniopt2-8754 → omniopt2-8764}/.shellscript_functions +0 -0
  23. {omniopt2-8754 → omniopt2-8764}/.tests/pylint.rc +0 -0
  24. {omniopt2-8754 → omniopt2-8764}/.tpe.py +0 -0
  25. {omniopt2-8754 → omniopt2-8764}/LICENSE +0 -0
  26. {omniopt2-8754 → omniopt2-8764}/MANIFEST.in +0 -0
  27. {omniopt2-8754 → omniopt2-8764}/README.md +0 -0
  28. {omniopt2-8754 → omniopt2-8764}/apt-dependencies.txt +0 -0
  29. {omniopt2-8754 → omniopt2-8764}/omniopt2.egg-info/SOURCES.txt +0 -0
  30. {omniopt2-8754 → omniopt2-8764}/omniopt2.egg-info/dependency_links.txt +0 -0
  31. {omniopt2-8754 → omniopt2-8764}/omniopt2.egg-info/requires.txt +0 -0
  32. {omniopt2-8754 → omniopt2-8764}/omniopt2.egg-info/top_level.txt +0 -0
  33. {omniopt2-8754 → omniopt2-8764}/omniopt_docker +0 -0
  34. {omniopt2-8754 → omniopt2-8764}/omniopt_evaluate +0 -0
  35. {omniopt2-8754 → omniopt2-8764}/omniopt_plot +0 -0
  36. {omniopt2-8754 → omniopt2-8764}/omniopt_share +0 -0
  37. {omniopt2-8754 → omniopt2-8764}/requirements.txt +0 -0
  38. {omniopt2-8754 → omniopt2-8764}/setup.cfg +0 -0
  39. {omniopt2-8754 → omniopt2-8764}/setup.py +0 -0
  40. {omniopt2-8754 → omniopt2-8764}/test_requirements.txt +0 -0
@@ -87,6 +87,7 @@ log_nr_gen_jobs: list[int] = []
87
87
  generation_strategy_human_readable: str = ""
88
88
  oo_call: str = "./omniopt"
89
89
  progress_bar_length: int = 0
90
+ worker_usage_file = 'worker_usage.csv'
90
91
 
91
92
  if os.environ.get("CUSTOM_VIRTUAL_ENV") == "1":
92
93
  oo_call = "omniopt"
@@ -2453,7 +2454,7 @@ def set_nr_inserted_jobs(new_nr_inserted_jobs: int) -> None:
2453
2454
 
2454
2455
  def write_worker_usage() -> None:
2455
2456
  if len(WORKER_PERCENTAGE_USAGE):
2456
- csv_filename = get_current_run_folder('worker_usage.csv')
2457
+ csv_filename = get_current_run_folder(worker_usage_file)
2457
2458
 
2458
2459
  csv_columns = ['time', 'num_parallel_jobs', 'nr_current_workers', 'percentage']
2459
2460
 
@@ -2463,7 +2464,7 @@ def write_worker_usage() -> None:
2463
2464
  csv_writer.writerow(row)
2464
2465
  else:
2465
2466
  if is_slurm_job():
2466
- print_debug("WORKER_PERCENTAGE_USAGE seems to be empty. Not writing worker_usage.csv")
2467
+ print_debug(f"WORKER_PERCENTAGE_USAGE seems to be empty. Not writing {worker_usage_file}")
2467
2468
 
2468
2469
  def log_system_usage() -> None:
2469
2470
  if not get_current_run_folder():
@@ -6384,6 +6385,8 @@ def log_worker_numbers() -> None:
6384
6385
  if len(WORKER_PERCENTAGE_USAGE) == 0 or WORKER_PERCENTAGE_USAGE[len(WORKER_PERCENTAGE_USAGE) - 1] != this_values:
6385
6386
  WORKER_PERCENTAGE_USAGE.append(this_values)
6386
6387
 
6388
+ write_worker_usage()
6389
+
6387
6390
  def get_slurm_in_brackets(in_brackets: list) -> list:
6388
6391
  if is_slurm_job():
6389
6392
  workers_strings = get_workers_string()
@@ -8089,7 +8092,7 @@ def break_run_search(_name: str, _max_eval: Optional[int]) -> bool:
8089
8092
 
8090
8093
  return _ret
8091
8094
 
8092
- def _calculate_nr_of_jobs_to_get(simulated_jobs: int, currently_running_jobs: int) -> int:
8095
+ def calculate_nr_of_jobs_to_get(simulated_jobs: int, currently_running_jobs: int) -> int:
8093
8096
  """Calculates the number of jobs to retrieve."""
8094
8097
  return min(
8095
8098
  max_eval + simulated_jobs - count_done_jobs(),
@@ -8102,7 +8105,7 @@ def remove_extra_spaces(text: str) -> str:
8102
8105
  raise ValueError("Input must be a string")
8103
8106
  return re.sub(r'\s+', ' ', text).strip()
8104
8107
 
8105
- def _get_trials_message(nr_of_jobs_to_get: int, full_nr_of_jobs_to_get: int, trial_durations: List[float]) -> str:
8108
+ def get_trials_message(nr_of_jobs_to_get: int, full_nr_of_jobs_to_get: int, trial_durations: List[float]) -> str:
8106
8109
  """Generates the appropriate message for the number of trials being retrieved."""
8107
8110
  ret = ""
8108
8111
  if full_nr_of_jobs_to_get > 1:
@@ -8270,7 +8273,7 @@ def generate_trials(n: int, recursion: bool) -> Tuple[Dict[int, Any], bool]:
8270
8273
  retries += 1
8271
8274
  continue
8272
8275
 
8273
- progressbar_description(_get_trials_message(cnt + 1, n, trial_durations))
8276
+ progressbar_description(get_trials_message(cnt + 1, n, trial_durations))
8274
8277
 
8275
8278
  try:
8276
8279
  result = create_and_handle_trial(arm)
@@ -8292,7 +8295,7 @@ def generate_trials(n: int, recursion: bool) -> Tuple[Dict[int, Any], bool]:
8292
8295
  return finalized
8293
8296
 
8294
8297
  except Exception as e:
8295
- return _handle_generation_failure(e, n, recursion)
8298
+ return handle_generation_failure(e, n, recursion)
8296
8299
 
8297
8300
  class TrialRejected(Exception):
8298
8301
  pass
@@ -8357,7 +8360,7 @@ def finalize_generation(trials_dict: Dict[int, Any], cnt: int, requested: int, s
8357
8360
 
8358
8361
  return trials_dict, False
8359
8362
 
8360
- def _handle_generation_failure(
8363
+ def handle_generation_failure(
8361
8364
  e: Exception,
8362
8365
  requested: int,
8363
8366
  recursion: bool
@@ -8373,7 +8376,7 @@ def _handle_generation_failure(
8373
8376
  )):
8374
8377
  msg = str(e)
8375
8378
  if msg not in error_8_saved:
8376
- _print_exhaustion_warning(e, recursion)
8379
+ print_exhaustion_warning(e, recursion)
8377
8380
  error_8_saved.append(msg)
8378
8381
 
8379
8382
  if not recursion and args.revert_to_random_when_seemingly_exhausted:
@@ -8381,11 +8384,11 @@ def _handle_generation_failure(
8381
8384
  set_global_gs_to_random()
8382
8385
  return fetch_next_trials(requested, True)
8383
8386
 
8384
- print_red(f"_handle_generation_failure: General Exception: {e}")
8387
+ print_red(f"handle_generation_failure: General Exception: {e}")
8385
8388
 
8386
8389
  return {}, True
8387
8390
 
8388
- def _print_exhaustion_warning(e: Exception, recursion: bool) -> None:
8391
+ def print_exhaustion_warning(e: Exception, recursion: bool) -> None:
8389
8392
  if not recursion and args.revert_to_random_when_seemingly_exhausted:
8390
8393
  print_yellow(f"\n⚠Error 8: {e} From now (done jobs: {count_done_jobs()}) on, random points will be generated.")
8391
8394
  else:
@@ -9235,20 +9238,20 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
9235
9238
  done_optimizing: bool = False
9236
9239
 
9237
9240
  try:
9238
- done_optimizing, trial_index_to_param = _create_and_execute_next_runs_run_loop(_max_eval, phase)
9239
- _create_and_execute_next_runs_finish(done_optimizing)
9241
+ done_optimizing, trial_index_to_param = create_and_execute_next_runs_run_loop(_max_eval, phase)
9242
+ create_and_execute_next_runs_finish(done_optimizing)
9240
9243
  except Exception as e:
9241
9244
  stacktrace = traceback.format_exc()
9242
9245
  print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
9243
9246
  return handle_exceptions_create_and_execute_next_runs(e)
9244
9247
 
9245
- return _create_and_execute_next_runs_return_value(trial_index_to_param)
9248
+ return create_and_execute_next_runs_return_value(trial_index_to_param)
9246
9249
 
9247
- def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str]) -> Tuple[bool, Optional[Dict]]:
9250
+ def create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str]) -> Tuple[bool, Optional[Dict]]:
9248
9251
  done_optimizing = False
9249
9252
  trial_index_to_param: Optional[Dict] = None
9250
9253
 
9251
- nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
9254
+ nr_of_jobs_to_get = calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
9252
9255
 
9253
9256
  __max_eval = _max_eval if _max_eval is not None else 0
9254
9257
  new_nr_of_jobs_to_get = min(__max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
@@ -9286,13 +9289,13 @@ def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Opti
9286
9289
 
9287
9290
  return done_optimizing, trial_index_to_param
9288
9291
 
9289
- def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
9292
+ def create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
9290
9293
  finish_previous_jobs(["finishing jobs"])
9291
9294
 
9292
9295
  if done_optimizing:
9293
9296
  end_program(False, 0)
9294
9297
 
9295
- def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
9298
+ def create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
9296
9299
  try:
9297
9300
  if trial_index_to_param:
9298
9301
  res = len(trial_index_to_param.keys())
@@ -9623,28 +9626,28 @@ def parse_parameters() -> Any:
9623
9626
  def create_pareto_front_table(idxs: List[int], metric_x: str, metric_y: str) -> Table:
9624
9627
  table = Table(title=f"Pareto-Front for {metric_y}/{metric_x}:", show_lines=True)
9625
9628
 
9626
- rows = _pareto_front_table_read_csv()
9629
+ rows = pareto_front_table_read_csv()
9627
9630
  if not rows:
9628
9631
  table.add_column("No data found")
9629
9632
  return table
9630
9633
 
9631
- filtered_rows = _pareto_front_table_filter_rows(rows, idxs)
9634
+ filtered_rows = pareto_front_table_filter_rows(rows, idxs)
9632
9635
  if not filtered_rows:
9633
9636
  table.add_column("No matching entries")
9634
9637
  return table
9635
9638
 
9636
- param_cols, result_cols = _pareto_front_table_get_columns(filtered_rows[0])
9639
+ param_cols, result_cols = pareto_front_table_get_columns(filtered_rows[0])
9637
9640
 
9638
- _pareto_front_table_add_headers(table, param_cols, result_cols)
9639
- _pareto_front_table_add_rows(table, filtered_rows, param_cols, result_cols)
9641
+ pareto_front_table_add_headers(table, param_cols, result_cols)
9642
+ pareto_front_table_add_rows(table, filtered_rows, param_cols, result_cols)
9640
9643
 
9641
9644
  return table
9642
9645
 
9643
- def _pareto_front_table_read_csv() -> List[Dict[str, str]]:
9646
+ def pareto_front_table_read_csv() -> List[Dict[str, str]]:
9644
9647
  with open(RESULT_CSV_FILE, mode="r", encoding="utf-8", newline="") as f:
9645
9648
  return list(csv.DictReader(f))
9646
9649
 
9647
- def _pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int]) -> List[Dict[str, str]]:
9650
+ def pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int]) -> List[Dict[str, str]]:
9648
9651
  result = []
9649
9652
  for row in rows:
9650
9653
  try:
@@ -9656,7 +9659,7 @@ def _pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int])
9656
9659
  result.append(row)
9657
9660
  return result
9658
9661
 
9659
- def _pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str], List[str]]:
9662
+ def pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str], List[str]]:
9660
9663
  all_columns = list(first_row.keys())
9661
9664
  ignored_cols = set(special_col_names) - {"trial_index"}
9662
9665
 
@@ -9664,13 +9667,13 @@ def _pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str
9664
9667
  result_cols = [col for col in arg_result_names if col in all_columns]
9665
9668
  return param_cols, result_cols
9666
9669
 
9667
- def _pareto_front_table_add_headers(table: Table, param_cols: List[str], result_cols: List[str]) -> None:
9670
+ def pareto_front_table_add_headers(table: Table, param_cols: List[str], result_cols: List[str]) -> None:
9668
9671
  for col in param_cols:
9669
9672
  table.add_column(col, justify="center")
9670
9673
  for col in result_cols:
9671
9674
  table.add_column(Text(f"{col}", style="cyan"), justify="center")
9672
9675
 
9673
- def _pareto_front_table_add_rows(table: Table, rows: List[Dict[str, str]], param_cols: List[str], result_cols: List[str]) -> None:
9676
+ def pareto_front_table_add_rows(table: Table, rows: List[Dict[str, str]], param_cols: List[str], result_cols: List[str]) -> None:
9674
9677
  for row in rows:
9675
9678
  values = [str(helpers.to_int_when_possible(row[col])) for col in param_cols]
9676
9679
  result_values = [Text(str(helpers.to_int_when_possible(row[col])), style="cyan") for col in result_cols]
@@ -9731,11 +9734,11 @@ def plot_pareto_frontier_sixel(data: Any, x_metric: str, y_metric: str) -> None:
9731
9734
 
9732
9735
  plt.close(fig)
9733
9736
 
9734
- def _pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
9737
+ def pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
9735
9738
  if x.shape != y.shape:
9736
9739
  raise ValueError("Input arrays x and y must have the same shape.")
9737
9740
 
9738
- def _pareto_front_general_compare(
9741
+ def pareto_front_general_compare(
9739
9742
  xi: float, yi: float, xj: float, yj: float,
9740
9743
  x_minimize: bool, y_minimize: bool
9741
9744
  ) -> bool:
@@ -9746,7 +9749,7 @@ def _pareto_front_general_compare(
9746
9749
 
9747
9750
  return bool(x_better_eq and y_better_eq and (x_strictly_better or y_strictly_better))
9748
9751
 
9749
- def _pareto_front_general_find_dominated(
9752
+ def pareto_front_general_find_dominated(
9750
9753
  x: np.ndarray, y: np.ndarray, x_minimize: bool, y_minimize: bool
9751
9754
  ) -> np.ndarray:
9752
9755
  num_points = len(x)
@@ -9757,7 +9760,7 @@ def _pareto_front_general_find_dominated(
9757
9760
  if i == j:
9758
9761
  continue
9759
9762
 
9760
- if _pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
9763
+ if pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
9761
9764
  is_dominated[i] = True
9762
9765
  break
9763
9766
 
@@ -9770,14 +9773,14 @@ def pareto_front_general(
9770
9773
  y_minimize: bool = True
9771
9774
  ) -> np.ndarray:
9772
9775
  try:
9773
- _pareto_front_general_validate_shapes(x, y)
9774
- is_dominated = _pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
9776
+ pareto_front_general_validate_shapes(x, y)
9777
+ is_dominated = pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
9775
9778
  return np.where(~is_dominated)[0]
9776
9779
  except Exception as e:
9777
9780
  print("Error in pareto_front_general:", str(e))
9778
9781
  return np.array([], dtype=int)
9779
9782
 
9780
- def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[int, str], Dict[str, Dict[str, float]]]]:
9783
+ def pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[int, str], Dict[str, Dict[str, float]]]]:
9781
9784
  results_csv_file = f"{path_to_calculate}/{RESULTS_CSV_FILENAME}"
9782
9785
  result_names_file = f"{path_to_calculate}/result_names.txt"
9783
9786
 
@@ -9805,7 +9808,7 @@ def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[
9805
9808
 
9806
9809
  return records
9807
9810
 
9808
- def _pareto_front_filter_complete_points(
9811
+ def pareto_front_filter_complete_points(
9809
9812
  path_to_calculate: str,
9810
9813
  records: Dict[Tuple[int, str], Dict[str, Dict[str, float]]],
9811
9814
  primary_name: str,
@@ -9822,7 +9825,7 @@ def _pareto_front_filter_complete_points(
9822
9825
  raise ValueError(f"No full data points with both objectives found in {path_to_calculate}.")
9823
9826
  return points
9824
9827
 
9825
- def _pareto_front_transform_objectives(
9828
+ def pareto_front_transform_objectives(
9826
9829
  points: List[Tuple[Any, float, float]],
9827
9830
  primary_name: str,
9828
9831
  secondary_name: str
@@ -9845,7 +9848,7 @@ def _pareto_front_transform_objectives(
9845
9848
 
9846
9849
  return x, y
9847
9850
 
9848
- def _pareto_front_select_pareto_points(
9851
+ def pareto_front_select_pareto_points(
9849
9852
  x: np.ndarray,
9850
9853
  y: np.ndarray,
9851
9854
  x_minimize: bool,
@@ -9859,7 +9862,7 @@ def _pareto_front_select_pareto_points(
9859
9862
  selected_points = [points[i] for i in sorted_indices]
9860
9863
  return selected_points
9861
9864
 
9862
- def _pareto_front_build_return_structure(
9865
+ def pareto_front_build_return_structure(
9863
9866
  path_to_calculate: str,
9864
9867
  selected_points: List[Tuple[Any, float, float]],
9865
9868
  records: Dict[Tuple[int, str], Dict[str, Dict[str, float]]],
@@ -9890,7 +9893,7 @@ def _pareto_front_build_return_structure(
9890
9893
  for (trial_index, arm_name), _, _ in selected_points:
9891
9894
  row = csv_rows.get(trial_index, {})
9892
9895
  if row == {} or row is None or row['arm_name'] != arm_name:
9893
- print_debug(f"_pareto_front_build_return_structure: trial_index '{trial_index}' could not be found and row returned as None")
9896
+ print_debug(f"pareto_front_build_return_structure: trial_index '{trial_index}' could not be found and row returned as None")
9894
9897
  continue
9895
9898
 
9896
9899
  idxs.append(int(row["trial_index"]))
@@ -9934,15 +9937,15 @@ def get_pareto_frontier_points(
9934
9937
  absolute_metrics: List[str],
9935
9938
  num_points: int
9936
9939
  ) -> Optional[dict]:
9937
- records = _pareto_front_aggregate_data(path_to_calculate)
9940
+ records = pareto_front_aggregate_data(path_to_calculate)
9938
9941
 
9939
9942
  if records is None:
9940
9943
  return None
9941
9944
 
9942
- points = _pareto_front_filter_complete_points(path_to_calculate, records, primary_objective, secondary_objective)
9943
- x, y = _pareto_front_transform_objectives(points, primary_objective, secondary_objective)
9944
- selected_points = _pareto_front_select_pareto_points(x, y, x_minimize, y_minimize, points, num_points)
9945
- result = _pareto_front_build_return_structure(path_to_calculate, selected_points, records, absolute_metrics, primary_objective, secondary_objective)
9945
+ points = pareto_front_filter_complete_points(path_to_calculate, records, primary_objective, secondary_objective)
9946
+ x, y = pareto_front_transform_objectives(points, primary_objective, secondary_objective)
9947
+ selected_points = pareto_front_select_pareto_points(x, y, x_minimize, y_minimize, points, num_points)
9948
+ result = pareto_front_build_return_structure(path_to_calculate, selected_points, records, absolute_metrics, primary_objective, secondary_objective)
9946
9949
 
9947
9950
  return result
9948
9951
 
@@ -9954,7 +9957,7 @@ def save_experiment_state() -> None:
9954
9957
  state_path = get_current_run_folder("experiment_state.json")
9955
9958
  save_ax_client_to_json_file(state_path)
9956
9959
  except Exception as e:
9957
- print(f"Error saving experiment state: {e}")
9960
+ print_debug(f"Error saving experiment state: {e}")
9958
9961
 
9959
9962
  def wait_for_state_file(state_path: str, min_size: int = 5, max_wait_seconds: int = 60) -> bool:
9960
9963
  try:
@@ -10209,7 +10212,7 @@ def get_pareto_front_data(path_to_calculate: str, res_names: list) -> dict:
10209
10212
  def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_sixel_and_table: bool = False) -> None:
10210
10213
  if len(res_names) <= 1:
10211
10214
  print_debug(f"--result_names (has {len(res_names)} entries) must be at least 2.")
10212
- return
10215
+ return None
10213
10216
 
10214
10217
  pareto_front_data: dict = get_pareto_front_data(path_to_calculate, res_names)
10215
10218
 
@@ -10230,8 +10233,16 @@ def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_s
10230
10233
  else:
10231
10234
  print(f"Not showing Pareto-front-sixel for {path_to_calculate}")
10232
10235
 
10233
- if len(calculated_frontier[metric_x][metric_y]["idxs"]):
10234
- pareto_points[metric_x][metric_y] = sorted(calculated_frontier[metric_x][metric_y]["idxs"])
10236
+ if calculated_frontier is None:
10237
+ print_debug("ERROR: calculated_frontier is None")
10238
+ return None
10239
+
10240
+ try:
10241
+ if len(calculated_frontier[metric_x][metric_y]["idxs"]):
10242
+ pareto_points[metric_x][metric_y] = sorted(calculated_frontier[metric_x][metric_y]["idxs"])
10243
+ except AttributeError:
10244
+ print_debug(f"ERROR: calculated_frontier structure invalid for ({metric_x}, {metric_y})")
10245
+ return None
10235
10246
 
10236
10247
  rich_table = pareto_front_as_rich_table(
10237
10248
  calculated_frontier[metric_x][metric_y]["idxs"],
@@ -10256,6 +10267,8 @@ def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_s
10256
10267
 
10257
10268
  live_share_after_pareto()
10258
10269
 
10270
+ return None
10271
+
10259
10272
  def show_available_hardware_and_generation_strategy_string(gpu_string: str, gpu_color: str) -> None:
10260
10273
  cpu_count = os.cpu_count()
10261
10274
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 8754
3
+ Version: 8764
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -1924,34 +1924,29 @@ EOF
1924
1924
  exit_code_lines=$(grep -i "exit-code:*" "$LOG_PATH" 2>/dev/null)
1925
1925
  exit_code_lines_lines=$?
1926
1926
  if [ $exit_code_lines_lines -ne 0 ] || [ -z "$exit_code_lines" ]; then
1927
- echo "WARN: grep failed or no exit-code line found."
1928
1927
  exit_code_lines=""
1929
1928
  fi
1930
1929
 
1931
1930
  exit_code_sed=$(echo "$exit_code_lines" | sed -e 's#Exit-Code:*[[:space:]]*##i' -e 's#,.*##')
1932
1931
  exit_code_sed_sed=$?
1933
1932
  if [ $exit_code_sed_sed -ne 0 ] || [ -z "$exit_code_sed" ]; then
1934
- echo "WARN: sed failed or no data after sed."
1935
1933
  exit_code_sed=""
1936
1934
  fi
1937
1935
 
1938
1936
  exit_code_tail=$(echo "$exit_code_sed" | tail -n1)
1939
1937
  exit_code_tail_tail=$?
1940
1938
  if [ $exit_code_tail_tail -ne 0 ] || [ -z "$exit_code_tail" ]; then
1941
- echo "WARN: tail failed or no data after tail."
1942
1939
  exit_code_tail=""
1943
1940
  fi
1944
1941
 
1945
1942
  exit_code_only_digits=$(echo "$exit_code_tail" | grep -o '[0-9]\+')
1946
1943
  if [ -z "$exit_code_only_digits" ]; then
1947
- echo "WARN: No valid exit code found, setting it to 3"
1948
1944
  exit_code_only_digits=3
1949
1945
  fi
1950
1946
 
1951
1947
  exit_code="$exit_code_only_digits"
1952
1948
 
1953
1949
  if ! [[ "$exit_code" =~ ^[0-9]+$ ]]; then
1954
- echo "WARN: exit_code invalid ('$exit_code'), setting to 3"
1955
1950
  exit_code=3
1956
1951
  fi
1957
1952
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 8754
3
+ Version: 8764
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -5,7 +5,7 @@ authors = [
5
5
  {email = "norman.koch@tu-dresden.de"},
6
6
  {name = "Norman Koch"}
7
7
  ]
8
- version = "8754"
8
+ version = "8764"
9
9
 
10
10
  readme = "README.md"
11
11
  dynamic = ["dependencies"]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes