omniopt2 8754__tar.gz → 8763__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omniopt2 might be problematic. Click here for more details.

Files changed (40) hide show
  1. {omniopt2-8754 → omniopt2-8763}/.omniopt.py +59 -48
  2. {omniopt2-8754 → omniopt2-8763}/PKG-INFO +1 -1
  3. {omniopt2-8754 → omniopt2-8763}/omniopt +0 -5
  4. {omniopt2-8754 → omniopt2-8763}/omniopt2.egg-info/PKG-INFO +1 -1
  5. {omniopt2-8754 → omniopt2-8763}/pyproject.toml +1 -1
  6. {omniopt2-8754 → omniopt2-8763}/.colorfunctions.sh +0 -0
  7. {omniopt2-8754 → omniopt2-8763}/.dockerignore +0 -0
  8. {omniopt2-8754 → omniopt2-8763}/.general.sh +0 -0
  9. {omniopt2-8754 → omniopt2-8763}/.gitignore +0 -0
  10. {omniopt2-8754 → omniopt2-8763}/.helpers.py +0 -0
  11. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_cpu_ram_usage.py +0 -0
  12. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_general.py +0 -0
  13. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_gpu_usage.py +0 -0
  14. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_kde.py +0 -0
  15. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_scatter.py +0 -0
  16. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_scatter_generation_method.py +0 -0
  17. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_scatter_hex.py +0 -0
  18. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_time_and_exit_code.py +0 -0
  19. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_trial_index_result.py +0 -0
  20. {omniopt2-8754 → omniopt2-8763}/.omniopt_plot_worker.py +0 -0
  21. {omniopt2-8754 → omniopt2-8763}/.random_generator.py +0 -0
  22. {omniopt2-8754 → omniopt2-8763}/.shellscript_functions +0 -0
  23. {omniopt2-8754 → omniopt2-8763}/.tests/pylint.rc +0 -0
  24. {omniopt2-8754 → omniopt2-8763}/.tpe.py +0 -0
  25. {omniopt2-8754 → omniopt2-8763}/LICENSE +0 -0
  26. {omniopt2-8754 → omniopt2-8763}/MANIFEST.in +0 -0
  27. {omniopt2-8754 → omniopt2-8763}/README.md +0 -0
  28. {omniopt2-8754 → omniopt2-8763}/apt-dependencies.txt +0 -0
  29. {omniopt2-8754 → omniopt2-8763}/omniopt2.egg-info/SOURCES.txt +0 -0
  30. {omniopt2-8754 → omniopt2-8763}/omniopt2.egg-info/dependency_links.txt +0 -0
  31. {omniopt2-8754 → omniopt2-8763}/omniopt2.egg-info/requires.txt +0 -0
  32. {omniopt2-8754 → omniopt2-8763}/omniopt2.egg-info/top_level.txt +0 -0
  33. {omniopt2-8754 → omniopt2-8763}/omniopt_docker +0 -0
  34. {omniopt2-8754 → omniopt2-8763}/omniopt_evaluate +0 -0
  35. {omniopt2-8754 → omniopt2-8763}/omniopt_plot +0 -0
  36. {omniopt2-8754 → omniopt2-8763}/omniopt_share +0 -0
  37. {omniopt2-8754 → omniopt2-8763}/requirements.txt +0 -0
  38. {omniopt2-8754 → omniopt2-8763}/setup.cfg +0 -0
  39. {omniopt2-8754 → omniopt2-8763}/setup.py +0 -0
  40. {omniopt2-8754 → omniopt2-8763}/test_requirements.txt +0 -0
@@ -87,6 +87,7 @@ log_nr_gen_jobs: list[int] = []
87
87
  generation_strategy_human_readable: str = ""
88
88
  oo_call: str = "./omniopt"
89
89
  progress_bar_length: int = 0
90
+ worker_usage_file = 'worker_usage.csv'
90
91
 
91
92
  if os.environ.get("CUSTOM_VIRTUAL_ENV") == "1":
92
93
  oo_call = "omniopt"
@@ -2453,7 +2454,7 @@ def set_nr_inserted_jobs(new_nr_inserted_jobs: int) -> None:
2453
2454
 
2454
2455
  def write_worker_usage() -> None:
2455
2456
  if len(WORKER_PERCENTAGE_USAGE):
2456
- csv_filename = get_current_run_folder('worker_usage.csv')
2457
+ csv_filename = get_current_run_folder(worker_usage_file)
2457
2458
 
2458
2459
  csv_columns = ['time', 'num_parallel_jobs', 'nr_current_workers', 'percentage']
2459
2460
 
@@ -2463,7 +2464,7 @@ def write_worker_usage() -> None:
2463
2464
  csv_writer.writerow(row)
2464
2465
  else:
2465
2466
  if is_slurm_job():
2466
- print_debug("WORKER_PERCENTAGE_USAGE seems to be empty. Not writing worker_usage.csv")
2467
+ print_debug(f"WORKER_PERCENTAGE_USAGE seems to be empty. Not writing {worker_usage_file}")
2467
2468
 
2468
2469
  def log_system_usage() -> None:
2469
2470
  if not get_current_run_folder():
@@ -8089,7 +8090,7 @@ def break_run_search(_name: str, _max_eval: Optional[int]) -> bool:
8089
8090
 
8090
8091
  return _ret
8091
8092
 
8092
- def _calculate_nr_of_jobs_to_get(simulated_jobs: int, currently_running_jobs: int) -> int:
8093
+ def calculate_nr_of_jobs_to_get(simulated_jobs: int, currently_running_jobs: int) -> int:
8093
8094
  """Calculates the number of jobs to retrieve."""
8094
8095
  return min(
8095
8096
  max_eval + simulated_jobs - count_done_jobs(),
@@ -8102,7 +8103,7 @@ def remove_extra_spaces(text: str) -> str:
8102
8103
  raise ValueError("Input must be a string")
8103
8104
  return re.sub(r'\s+', ' ', text).strip()
8104
8105
 
8105
- def _get_trials_message(nr_of_jobs_to_get: int, full_nr_of_jobs_to_get: int, trial_durations: List[float]) -> str:
8106
+ def get_trials_message(nr_of_jobs_to_get: int, full_nr_of_jobs_to_get: int, trial_durations: List[float]) -> str:
8106
8107
  """Generates the appropriate message for the number of trials being retrieved."""
8107
8108
  ret = ""
8108
8109
  if full_nr_of_jobs_to_get > 1:
@@ -8270,7 +8271,7 @@ def generate_trials(n: int, recursion: bool) -> Tuple[Dict[int, Any], bool]:
8270
8271
  retries += 1
8271
8272
  continue
8272
8273
 
8273
- progressbar_description(_get_trials_message(cnt + 1, n, trial_durations))
8274
+ progressbar_description(get_trials_message(cnt + 1, n, trial_durations))
8274
8275
 
8275
8276
  try:
8276
8277
  result = create_and_handle_trial(arm)
@@ -8292,7 +8293,7 @@ def generate_trials(n: int, recursion: bool) -> Tuple[Dict[int, Any], bool]:
8292
8293
  return finalized
8293
8294
 
8294
8295
  except Exception as e:
8295
- return _handle_generation_failure(e, n, recursion)
8296
+ return handle_generation_failure(e, n, recursion)
8296
8297
 
8297
8298
  class TrialRejected(Exception):
8298
8299
  pass
@@ -8357,7 +8358,7 @@ def finalize_generation(trials_dict: Dict[int, Any], cnt: int, requested: int, s
8357
8358
 
8358
8359
  return trials_dict, False
8359
8360
 
8360
- def _handle_generation_failure(
8361
+ def handle_generation_failure(
8361
8362
  e: Exception,
8362
8363
  requested: int,
8363
8364
  recursion: bool
@@ -8373,7 +8374,7 @@ def _handle_generation_failure(
8373
8374
  )):
8374
8375
  msg = str(e)
8375
8376
  if msg not in error_8_saved:
8376
- _print_exhaustion_warning(e, recursion)
8377
+ print_exhaustion_warning(e, recursion)
8377
8378
  error_8_saved.append(msg)
8378
8379
 
8379
8380
  if not recursion and args.revert_to_random_when_seemingly_exhausted:
@@ -8381,11 +8382,11 @@ def _handle_generation_failure(
8381
8382
  set_global_gs_to_random()
8382
8383
  return fetch_next_trials(requested, True)
8383
8384
 
8384
- print_red(f"_handle_generation_failure: General Exception: {e}")
8385
+ print_red(f"handle_generation_failure: General Exception: {e}")
8385
8386
 
8386
8387
  return {}, True
8387
8388
 
8388
- def _print_exhaustion_warning(e: Exception, recursion: bool) -> None:
8389
+ def print_exhaustion_warning(e: Exception, recursion: bool) -> None:
8389
8390
  if not recursion and args.revert_to_random_when_seemingly_exhausted:
8390
8391
  print_yellow(f"\n⚠Error 8: {e} From now (done jobs: {count_done_jobs()}) on, random points will be generated.")
8391
8392
  else:
@@ -9235,20 +9236,20 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
9235
9236
  done_optimizing: bool = False
9236
9237
 
9237
9238
  try:
9238
- done_optimizing, trial_index_to_param = _create_and_execute_next_runs_run_loop(_max_eval, phase)
9239
- _create_and_execute_next_runs_finish(done_optimizing)
9239
+ done_optimizing, trial_index_to_param = create_and_execute_next_runs_run_loop(_max_eval, phase)
9240
+ create_and_execute_next_runs_finish(done_optimizing)
9240
9241
  except Exception as e:
9241
9242
  stacktrace = traceback.format_exc()
9242
9243
  print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
9243
9244
  return handle_exceptions_create_and_execute_next_runs(e)
9244
9245
 
9245
- return _create_and_execute_next_runs_return_value(trial_index_to_param)
9246
+ return create_and_execute_next_runs_return_value(trial_index_to_param)
9246
9247
 
9247
- def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str]) -> Tuple[bool, Optional[Dict]]:
9248
+ def create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str]) -> Tuple[bool, Optional[Dict]]:
9248
9249
  done_optimizing = False
9249
9250
  trial_index_to_param: Optional[Dict] = None
9250
9251
 
9251
- nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
9252
+ nr_of_jobs_to_get = calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
9252
9253
 
9253
9254
  __max_eval = _max_eval if _max_eval is not None else 0
9254
9255
  new_nr_of_jobs_to_get = min(__max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
@@ -9286,13 +9287,13 @@ def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Opti
9286
9287
 
9287
9288
  return done_optimizing, trial_index_to_param
9288
9289
 
9289
- def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
9290
+ def create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
9290
9291
  finish_previous_jobs(["finishing jobs"])
9291
9292
 
9292
9293
  if done_optimizing:
9293
9294
  end_program(False, 0)
9294
9295
 
9295
- def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
9296
+ def create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
9296
9297
  try:
9297
9298
  if trial_index_to_param:
9298
9299
  res = len(trial_index_to_param.keys())
@@ -9623,28 +9624,28 @@ def parse_parameters() -> Any:
9623
9624
  def create_pareto_front_table(idxs: List[int], metric_x: str, metric_y: str) -> Table:
9624
9625
  table = Table(title=f"Pareto-Front for {metric_y}/{metric_x}:", show_lines=True)
9625
9626
 
9626
- rows = _pareto_front_table_read_csv()
9627
+ rows = pareto_front_table_read_csv()
9627
9628
  if not rows:
9628
9629
  table.add_column("No data found")
9629
9630
  return table
9630
9631
 
9631
- filtered_rows = _pareto_front_table_filter_rows(rows, idxs)
9632
+ filtered_rows = pareto_front_table_filter_rows(rows, idxs)
9632
9633
  if not filtered_rows:
9633
9634
  table.add_column("No matching entries")
9634
9635
  return table
9635
9636
 
9636
- param_cols, result_cols = _pareto_front_table_get_columns(filtered_rows[0])
9637
+ param_cols, result_cols = pareto_front_table_get_columns(filtered_rows[0])
9637
9638
 
9638
- _pareto_front_table_add_headers(table, param_cols, result_cols)
9639
- _pareto_front_table_add_rows(table, filtered_rows, param_cols, result_cols)
9639
+ pareto_front_table_add_headers(table, param_cols, result_cols)
9640
+ pareto_front_table_add_rows(table, filtered_rows, param_cols, result_cols)
9640
9641
 
9641
9642
  return table
9642
9643
 
9643
- def _pareto_front_table_read_csv() -> List[Dict[str, str]]:
9644
+ def pareto_front_table_read_csv() -> List[Dict[str, str]]:
9644
9645
  with open(RESULT_CSV_FILE, mode="r", encoding="utf-8", newline="") as f:
9645
9646
  return list(csv.DictReader(f))
9646
9647
 
9647
- def _pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int]) -> List[Dict[str, str]]:
9648
+ def pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int]) -> List[Dict[str, str]]:
9648
9649
  result = []
9649
9650
  for row in rows:
9650
9651
  try:
@@ -9656,7 +9657,7 @@ def _pareto_front_table_filter_rows(rows: List[Dict[str, str]], idxs: List[int])
9656
9657
  result.append(row)
9657
9658
  return result
9658
9659
 
9659
- def _pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str], List[str]]:
9660
+ def pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str], List[str]]:
9660
9661
  all_columns = list(first_row.keys())
9661
9662
  ignored_cols = set(special_col_names) - {"trial_index"}
9662
9663
 
@@ -9664,13 +9665,13 @@ def _pareto_front_table_get_columns(first_row: Dict[str, str]) -> Tuple[List[str
9664
9665
  result_cols = [col for col in arg_result_names if col in all_columns]
9665
9666
  return param_cols, result_cols
9666
9667
 
9667
- def _pareto_front_table_add_headers(table: Table, param_cols: List[str], result_cols: List[str]) -> None:
9668
+ def pareto_front_table_add_headers(table: Table, param_cols: List[str], result_cols: List[str]) -> None:
9668
9669
  for col in param_cols:
9669
9670
  table.add_column(col, justify="center")
9670
9671
  for col in result_cols:
9671
9672
  table.add_column(Text(f"{col}", style="cyan"), justify="center")
9672
9673
 
9673
- def _pareto_front_table_add_rows(table: Table, rows: List[Dict[str, str]], param_cols: List[str], result_cols: List[str]) -> None:
9674
+ def pareto_front_table_add_rows(table: Table, rows: List[Dict[str, str]], param_cols: List[str], result_cols: List[str]) -> None:
9674
9675
  for row in rows:
9675
9676
  values = [str(helpers.to_int_when_possible(row[col])) for col in param_cols]
9676
9677
  result_values = [Text(str(helpers.to_int_when_possible(row[col])), style="cyan") for col in result_cols]
@@ -9731,11 +9732,11 @@ def plot_pareto_frontier_sixel(data: Any, x_metric: str, y_metric: str) -> None:
9731
9732
 
9732
9733
  plt.close(fig)
9733
9734
 
9734
- def _pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
9735
+ def pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
9735
9736
  if x.shape != y.shape:
9736
9737
  raise ValueError("Input arrays x and y must have the same shape.")
9737
9738
 
9738
- def _pareto_front_general_compare(
9739
+ def pareto_front_general_compare(
9739
9740
  xi: float, yi: float, xj: float, yj: float,
9740
9741
  x_minimize: bool, y_minimize: bool
9741
9742
  ) -> bool:
@@ -9746,7 +9747,7 @@ def _pareto_front_general_compare(
9746
9747
 
9747
9748
  return bool(x_better_eq and y_better_eq and (x_strictly_better or y_strictly_better))
9748
9749
 
9749
- def _pareto_front_general_find_dominated(
9750
+ def pareto_front_general_find_dominated(
9750
9751
  x: np.ndarray, y: np.ndarray, x_minimize: bool, y_minimize: bool
9751
9752
  ) -> np.ndarray:
9752
9753
  num_points = len(x)
@@ -9757,7 +9758,7 @@ def _pareto_front_general_find_dominated(
9757
9758
  if i == j:
9758
9759
  continue
9759
9760
 
9760
- if _pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
9761
+ if pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
9761
9762
  is_dominated[i] = True
9762
9763
  break
9763
9764
 
@@ -9770,14 +9771,14 @@ def pareto_front_general(
9770
9771
  y_minimize: bool = True
9771
9772
  ) -> np.ndarray:
9772
9773
  try:
9773
- _pareto_front_general_validate_shapes(x, y)
9774
- is_dominated = _pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
9774
+ pareto_front_general_validate_shapes(x, y)
9775
+ is_dominated = pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
9775
9776
  return np.where(~is_dominated)[0]
9776
9777
  except Exception as e:
9777
9778
  print("Error in pareto_front_general:", str(e))
9778
9779
  return np.array([], dtype=int)
9779
9780
 
9780
- def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[int, str], Dict[str, Dict[str, float]]]]:
9781
+ def pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[int, str], Dict[str, Dict[str, float]]]]:
9781
9782
  results_csv_file = f"{path_to_calculate}/{RESULTS_CSV_FILENAME}"
9782
9783
  result_names_file = f"{path_to_calculate}/result_names.txt"
9783
9784
 
@@ -9805,7 +9806,7 @@ def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[
9805
9806
 
9806
9807
  return records
9807
9808
 
9808
- def _pareto_front_filter_complete_points(
9809
+ def pareto_front_filter_complete_points(
9809
9810
  path_to_calculate: str,
9810
9811
  records: Dict[Tuple[int, str], Dict[str, Dict[str, float]]],
9811
9812
  primary_name: str,
@@ -9822,7 +9823,7 @@ def _pareto_front_filter_complete_points(
9822
9823
  raise ValueError(f"No full data points with both objectives found in {path_to_calculate}.")
9823
9824
  return points
9824
9825
 
9825
- def _pareto_front_transform_objectives(
9826
+ def pareto_front_transform_objectives(
9826
9827
  points: List[Tuple[Any, float, float]],
9827
9828
  primary_name: str,
9828
9829
  secondary_name: str
@@ -9845,7 +9846,7 @@ def _pareto_front_transform_objectives(
9845
9846
 
9846
9847
  return x, y
9847
9848
 
9848
- def _pareto_front_select_pareto_points(
9849
+ def pareto_front_select_pareto_points(
9849
9850
  x: np.ndarray,
9850
9851
  y: np.ndarray,
9851
9852
  x_minimize: bool,
@@ -9859,7 +9860,7 @@ def _pareto_front_select_pareto_points(
9859
9860
  selected_points = [points[i] for i in sorted_indices]
9860
9861
  return selected_points
9861
9862
 
9862
- def _pareto_front_build_return_structure(
9863
+ def pareto_front_build_return_structure(
9863
9864
  path_to_calculate: str,
9864
9865
  selected_points: List[Tuple[Any, float, float]],
9865
9866
  records: Dict[Tuple[int, str], Dict[str, Dict[str, float]]],
@@ -9890,7 +9891,7 @@ def _pareto_front_build_return_structure(
9890
9891
  for (trial_index, arm_name), _, _ in selected_points:
9891
9892
  row = csv_rows.get(trial_index, {})
9892
9893
  if row == {} or row is None or row['arm_name'] != arm_name:
9893
- print_debug(f"_pareto_front_build_return_structure: trial_index '{trial_index}' could not be found and row returned as None")
9894
+ print_debug(f"pareto_front_build_return_structure: trial_index '{trial_index}' could not be found and row returned as None")
9894
9895
  continue
9895
9896
 
9896
9897
  idxs.append(int(row["trial_index"]))
@@ -9934,15 +9935,15 @@ def get_pareto_frontier_points(
9934
9935
  absolute_metrics: List[str],
9935
9936
  num_points: int
9936
9937
  ) -> Optional[dict]:
9937
- records = _pareto_front_aggregate_data(path_to_calculate)
9938
+ records = pareto_front_aggregate_data(path_to_calculate)
9938
9939
 
9939
9940
  if records is None:
9940
9941
  return None
9941
9942
 
9942
- points = _pareto_front_filter_complete_points(path_to_calculate, records, primary_objective, secondary_objective)
9943
- x, y = _pareto_front_transform_objectives(points, primary_objective, secondary_objective)
9944
- selected_points = _pareto_front_select_pareto_points(x, y, x_minimize, y_minimize, points, num_points)
9945
- result = _pareto_front_build_return_structure(path_to_calculate, selected_points, records, absolute_metrics, primary_objective, secondary_objective)
9943
+ points = pareto_front_filter_complete_points(path_to_calculate, records, primary_objective, secondary_objective)
9944
+ x, y = pareto_front_transform_objectives(points, primary_objective, secondary_objective)
9945
+ selected_points = pareto_front_select_pareto_points(x, y, x_minimize, y_minimize, points, num_points)
9946
+ result = pareto_front_build_return_structure(path_to_calculate, selected_points, records, absolute_metrics, primary_objective, secondary_objective)
9946
9947
 
9947
9948
  return result
9948
9949
 
@@ -9954,7 +9955,7 @@ def save_experiment_state() -> None:
9954
9955
  state_path = get_current_run_folder("experiment_state.json")
9955
9956
  save_ax_client_to_json_file(state_path)
9956
9957
  except Exception as e:
9957
- print(f"Error saving experiment state: {e}")
9958
+ print_debug(f"Error saving experiment state: {e}")
9958
9959
 
9959
9960
  def wait_for_state_file(state_path: str, min_size: int = 5, max_wait_seconds: int = 60) -> bool:
9960
9961
  try:
@@ -10209,7 +10210,7 @@ def get_pareto_front_data(path_to_calculate: str, res_names: list) -> dict:
10209
10210
  def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_sixel_and_table: bool = False) -> None:
10210
10211
  if len(res_names) <= 1:
10211
10212
  print_debug(f"--result_names (has {len(res_names)} entries) must be at least 2.")
10212
- return
10213
+ return None
10213
10214
 
10214
10215
  pareto_front_data: dict = get_pareto_front_data(path_to_calculate, res_names)
10215
10216
 
@@ -10230,8 +10231,16 @@ def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_s
10230
10231
  else:
10231
10232
  print(f"Not showing Pareto-front-sixel for {path_to_calculate}")
10232
10233
 
10233
- if len(calculated_frontier[metric_x][metric_y]["idxs"]):
10234
- pareto_points[metric_x][metric_y] = sorted(calculated_frontier[metric_x][metric_y]["idxs"])
10234
+ if calculated_frontier is None:
10235
+ print_debug("ERROR: calculated_frontier is None")
10236
+ return None
10237
+
10238
+ try:
10239
+ if len(calculated_frontier[metric_x][metric_y]["idxs"]):
10240
+ pareto_points[metric_x][metric_y] = sorted(calculated_frontier[metric_x][metric_y]["idxs"])
10241
+ except AttributeError:
10242
+ print_debug(f"ERROR: calculated_frontier structure invalid for ({metric_x}, {metric_y})")
10243
+ return None
10235
10244
 
10236
10245
  rich_table = pareto_front_as_rich_table(
10237
10246
  calculated_frontier[metric_x][metric_y]["idxs"],
@@ -10256,6 +10265,8 @@ def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_s
10256
10265
 
10257
10266
  live_share_after_pareto()
10258
10267
 
10268
+ return None
10269
+
10259
10270
  def show_available_hardware_and_generation_strategy_string(gpu_string: str, gpu_color: str) -> None:
10260
10271
  cpu_count = os.cpu_count()
10261
10272
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 8754
3
+ Version: 8763
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -1924,34 +1924,29 @@ EOF
1924
1924
  exit_code_lines=$(grep -i "exit-code:*" "$LOG_PATH" 2>/dev/null)
1925
1925
  exit_code_lines_lines=$?
1926
1926
  if [ $exit_code_lines_lines -ne 0 ] || [ -z "$exit_code_lines" ]; then
1927
- echo "WARN: grep failed or no exit-code line found."
1928
1927
  exit_code_lines=""
1929
1928
  fi
1930
1929
 
1931
1930
  exit_code_sed=$(echo "$exit_code_lines" | sed -e 's#Exit-Code:*[[:space:]]*##i' -e 's#,.*##')
1932
1931
  exit_code_sed_sed=$?
1933
1932
  if [ $exit_code_sed_sed -ne 0 ] || [ -z "$exit_code_sed" ]; then
1934
- echo "WARN: sed failed or no data after sed."
1935
1933
  exit_code_sed=""
1936
1934
  fi
1937
1935
 
1938
1936
  exit_code_tail=$(echo "$exit_code_sed" | tail -n1)
1939
1937
  exit_code_tail_tail=$?
1940
1938
  if [ $exit_code_tail_tail -ne 0 ] || [ -z "$exit_code_tail" ]; then
1941
- echo "WARN: tail failed or no data after tail."
1942
1939
  exit_code_tail=""
1943
1940
  fi
1944
1941
 
1945
1942
  exit_code_only_digits=$(echo "$exit_code_tail" | grep -o '[0-9]\+')
1946
1943
  if [ -z "$exit_code_only_digits" ]; then
1947
- echo "WARN: No valid exit code found, setting it to 3"
1948
1944
  exit_code_only_digits=3
1949
1945
  fi
1950
1946
 
1951
1947
  exit_code="$exit_code_only_digits"
1952
1948
 
1953
1949
  if ! [[ "$exit_code" =~ ^[0-9]+$ ]]; then
1954
- echo "WARN: exit_code invalid ('$exit_code'), setting to 3"
1955
1950
  exit_code=3
1956
1951
  fi
1957
1952
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 8754
3
+ Version: 8763
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -5,7 +5,7 @@ authors = [
5
5
  {email = "norman.koch@tu-dresden.de"},
6
6
  {name = "Norman Koch"}
7
7
  ]
8
- version = "8754"
8
+ version = "8763"
9
9
 
10
10
  readme = "README.md"
11
11
  dynamic = ["dependencies"]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes