omniopt2 6963__tar.gz → 6965__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {omniopt2-6963 → omniopt2-6965}/.omniopt.py +27 -32
  2. {omniopt2-6963 → omniopt2-6965}/PKG-INFO +1 -1
  3. {omniopt2-6963 → omniopt2-6965}/omniopt2.egg-info/PKG-INFO +1 -1
  4. {omniopt2-6963 → omniopt2-6965}/pyproject.toml +1 -1
  5. {omniopt2-6963 → omniopt2-6965}/.colorfunctions.sh +0 -0
  6. {omniopt2-6963 → omniopt2-6965}/.dockerignore +0 -0
  7. {omniopt2-6963 → omniopt2-6965}/.general.sh +0 -0
  8. {omniopt2-6963 → omniopt2-6965}/.gitignore +0 -0
  9. {omniopt2-6963 → omniopt2-6965}/.helpers.py +0 -0
  10. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_cpu_ram_usage.py +0 -0
  11. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_general.py +0 -0
  12. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_gpu_usage.py +0 -0
  13. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_kde.py +0 -0
  14. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_scatter.py +0 -0
  15. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_scatter_generation_method.py +0 -0
  16. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_scatter_hex.py +0 -0
  17. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_time_and_exit_code.py +0 -0
  18. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_trial_index_result.py +0 -0
  19. {omniopt2-6963 → omniopt2-6965}/.omniopt_plot_worker.py +0 -0
  20. {omniopt2-6963 → omniopt2-6965}/.random_generator.py +0 -0
  21. {omniopt2-6963 → omniopt2-6965}/.shellscript_functions +0 -0
  22. {omniopt2-6963 → omniopt2-6965}/LICENSE +0 -0
  23. {omniopt2-6963 → omniopt2-6965}/MANIFEST.in +0 -0
  24. {omniopt2-6963 → omniopt2-6965}/README.md +0 -0
  25. {omniopt2-6963 → omniopt2-6965}/apt-dependencies.txt +0 -0
  26. {omniopt2-6963 → omniopt2-6965}/omniopt +0 -0
  27. {omniopt2-6963 → omniopt2-6965}/omniopt2.egg-info/SOURCES.txt +0 -0
  28. {omniopt2-6963 → omniopt2-6965}/omniopt2.egg-info/dependency_links.txt +0 -0
  29. {omniopt2-6963 → omniopt2-6965}/omniopt2.egg-info/requires.txt +0 -0
  30. {omniopt2-6963 → omniopt2-6965}/omniopt2.egg-info/top_level.txt +0 -0
  31. {omniopt2-6963 → omniopt2-6965}/omniopt_docker +0 -0
  32. {omniopt2-6963 → omniopt2-6965}/omniopt_evaluate +0 -0
  33. {omniopt2-6963 → omniopt2-6965}/omniopt_plot +0 -0
  34. {omniopt2-6963 → omniopt2-6965}/omniopt_share +0 -0
  35. {omniopt2-6963 → omniopt2-6965}/requirements.txt +0 -0
  36. {omniopt2-6963 → omniopt2-6965}/setup.cfg +0 -0
  37. {omniopt2-6963 → omniopt2-6965}/setup.py +0 -0
  38. {omniopt2-6963 → omniopt2-6965}/test_requirements.txt +0 -0
@@ -240,7 +240,7 @@ with console.status("[bold green]Importing rich_argparse...") as status:
240
240
  try:
241
241
  from rich_argparse import RichHelpFormatter
242
242
  except ModuleNotFoundError:
243
- RichHelpFormatter = argparse.HelpFormatter # type: ignore
243
+ RichHelpFormatter = argparse.HelpFormatter
244
244
 
245
245
  @beartype
246
246
  def makedirs(p: str) -> bool:
@@ -1879,7 +1879,6 @@ def get_line_info() -> Any:
1879
1879
 
1880
1880
  frame_info = stack[1]
1881
1881
 
1882
- # fallbacks bei Problemen mit Encoding oder Zugriffsfehlern
1883
1882
  try:
1884
1883
  filename = str(frame_info.filename)
1885
1884
  except Exception as e:
@@ -1900,7 +1899,6 @@ def get_line_info() -> Any:
1900
1899
  return (filename, ":", lineno, ":", function)
1901
1900
 
1902
1901
  except Exception as e:
1903
- # finaler Fallback, wenn gar nichts geht
1904
1902
  return ("<exception in get_line_info>", ":", -1, ":", str(e))
1905
1903
 
1906
1904
  @beartype
@@ -2413,11 +2411,13 @@ def switch_lower_and_upper_if_needed(name: Union[list, str], lower_bound: Union[
2413
2411
  def round_lower_and_upper_if_type_is_int(value_type: str, lower_bound: Union[int, float], upper_bound: Union[int, float]) -> Tuple[Union[int, float], Union[int, float]]:
2414
2412
  if value_type == "int":
2415
2413
  if not helpers.looks_like_int(lower_bound):
2416
- print_yellow(f"{value_type} can only contain integers. You chose {lower_bound}. Will be rounded down to {math.floor(lower_bound)}.")
2414
+ if not args.tests:
2415
+ print_yellow(f"{value_type} can only contain integers. You chose {lower_bound}. Will be rounded down to {math.floor(lower_bound)}.")
2417
2416
  lower_bound = math.floor(lower_bound)
2418
2417
 
2419
2418
  if not helpers.looks_like_int(upper_bound):
2420
- print_yellow(f"{value_type} can only contain integers. You chose {upper_bound}. Will be rounded up to {math.ceil(upper_bound)}.")
2419
+ if not args.tests:
2420
+ print_yellow(f"{value_type} can only contain integers. You chose {upper_bound}. Will be rounded up to {math.ceil(upper_bound)}.")
2421
2421
  upper_bound = math.ceil(upper_bound)
2422
2422
 
2423
2423
  return lower_bound, upper_bound
@@ -3175,11 +3175,13 @@ def calculate_signed_weighted_euclidean_distance(_args: Union[dict, List[float]]
3175
3175
  weights = [float(w.strip()) for w in weights_string.split(",") if w.strip()]
3176
3176
 
3177
3177
  if len(weights) > len(_args):
3178
- print_yellow(f"calculate_signed_weighted_euclidean_distance: Warning: Trimming {len(weights) - len(_args)} extra weight(s): {weights[len(_args):]}")
3178
+ if not args.tests:
3179
+ print_yellow(f"calculate_signed_weighted_euclidean_distance: Warning: Trimming {len(weights) - len(_args)} extra weight(s): {weights[len(_args):]}")
3179
3180
  weights = weights[:len(_args)]
3180
3181
 
3181
3182
  if len(weights) < len(_args):
3182
- print_yellow("calculate_signed_weighted_euclidean_distance: Warning: Not enough weights, filling with 1s")
3183
+ if not args.tests:
3184
+ print_yellow("calculate_signed_weighted_euclidean_distance: Warning: Not enough weights, filling with 1s")
3183
3185
  weights.extend([1] * (len(_args) - len(weights)))
3184
3186
 
3185
3187
  if len(_args) != len(weights):
@@ -6182,7 +6184,7 @@ def finish_job_core(job: Any, trial_index: int, this_jobs_finished: int) -> int:
6182
6184
  try:
6183
6185
  _finish_job_core_helper_mark_success(_trial, result)
6184
6186
 
6185
- if len(arg_result_names) > 1 and count_done_jobs() > 1 and job_calculate_pareto_front(get_current_run_folder(), True):
6187
+ if len(arg_result_names) > 1 and count_done_jobs() > 1 and not job_calculate_pareto_front(get_current_run_folder(), True):
6186
6188
  print_red("job_calculate_pareto_front post job failed")
6187
6189
  except Exception as e:
6188
6190
  print(f"ERROR in line {get_line_info()}: {e}")
@@ -7159,7 +7161,7 @@ def plot_times_vs_jobs_sixel(
7159
7161
  fig, _ax = plt.subplots()
7160
7162
 
7161
7163
  iterations = list(range(1, len(times) + 1))
7162
- sizes = [max(20, min(200, jc * 10)) for jc in job_counts] # Punktgröße je nach Jobanzahl, skaliert
7164
+ sizes = [max(20, min(200, jc * 10)) for jc in job_counts]
7163
7165
 
7164
7166
  scatter = _ax.scatter(iterations, times, s=sizes, c=job_counts, cmap='viridis', alpha=0.7, edgecolors='black')
7165
7167
 
@@ -8321,13 +8323,11 @@ def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[
8321
8323
  if not os.path.exists(results_csv_file) or not os.path.exists(result_names_file):
8322
8324
  return None
8323
8325
 
8324
- # Lade die Ergebnisnamen
8325
8326
  with open(result_names_file, mode="r", encoding="utf-8") as f:
8326
8327
  result_names = [line.strip() for line in f if line.strip()]
8327
8328
 
8328
8329
  records: dict = defaultdict(lambda: {'means': {}})
8329
8330
 
8330
- # Lese die CSV-Datei
8331
8331
  with open(results_csv_file, encoding="utf-8", mode="r", newline='') as csvfile:
8332
8332
  reader = csv.DictReader(csvfile)
8333
8333
  for row in reader:
@@ -8340,7 +8340,7 @@ def _pareto_front_aggregate_data(path_to_calculate: str) -> Optional[Dict[Tuple[
8340
8340
  try:
8341
8341
  records[key]['means'][metric] = float(row[metric])
8342
8342
  except ValueError:
8343
- continue # Wenn der Wert nicht konvertierbar ist
8343
+ continue
8344
8344
 
8345
8345
  return records
8346
8346
 
@@ -8412,11 +8412,9 @@ def _pareto_front_build_return_structure(
8412
8412
  results_csv_file = f"{path_to_calculate}/results.csv"
8413
8413
  result_names_file = f"{path_to_calculate}/result_names.txt"
8414
8414
 
8415
- # Lade die Ergebnisnamen
8416
8415
  with open(result_names_file, mode="r", encoding="utf-8") as f:
8417
8416
  result_names = [line.strip() for line in f if line.strip()]
8418
8417
 
8419
- # CSV komplett in dict laden (trial_index als int -> row dict)
8420
8418
  csv_rows = {}
8421
8419
  with open(results_csv_file, mode="r", encoding="utf-8", newline='') as csvfile:
8422
8420
  reader = csv.DictReader(csvfile)
@@ -8424,7 +8422,6 @@ def _pareto_front_build_return_structure(
8424
8422
  trial_index = int(row['trial_index'])
8425
8423
  csv_rows[trial_index] = row
8426
8424
 
8427
- # Statische Spalten, die keine Parameter sind
8428
8425
  ignored_columns = {'trial_index', 'arm_name', 'trial_status', 'generation_node'}
8429
8426
  ignored_columns.update(result_names)
8430
8427
 
@@ -8435,11 +8432,10 @@ def _pareto_front_build_return_structure(
8435
8432
  for (trial_index, arm_name), _, _ in selected_points:
8436
8433
  row = csv_rows.get(trial_index)
8437
8434
  if row is None or row['arm_name'] != arm_name:
8438
- continue # Sicherheitshalber prüfen
8435
+ continue
8439
8436
 
8440
8437
  idxs.append(int(row["trial_index"]))
8441
8438
 
8442
- # Parameter extrahieren
8443
8439
  param_dict = {}
8444
8440
  for key, value in row.items():
8445
8441
  if key not in ignored_columns:
@@ -8449,7 +8445,7 @@ def _pareto_front_build_return_structure(
8449
8445
  try:
8450
8446
  param_dict[key] = float(value)
8451
8447
  except ValueError:
8452
- param_dict[key] = value # z.B. choice_param als String
8448
+ param_dict[key] = value
8453
8449
 
8454
8450
  param_dicts.append(param_dict)
8455
8451
 
@@ -8675,7 +8671,7 @@ def show_pareto_frontier_data(path_to_calculate: str, res_names: list, disable_s
8675
8671
 
8676
8672
  pareto_front_data: dict = get_pareto_front_data(path_to_calculate, res_names)
8677
8673
 
8678
- pareto_points = {}
8674
+ pareto_points: dict = {}
8679
8675
 
8680
8676
  for metric_x in pareto_front_data.keys():
8681
8677
  if metric_x not in pareto_points:
@@ -9025,7 +9021,7 @@ def post_job_calculate_pareto_front() -> None:
9025
9021
 
9026
9022
  for _path_to_calculate in _paths_to_calculate:
9027
9023
  for path_to_calculate in found_paths:
9028
- if job_calculate_pareto_front(path_to_calculate):
9024
+ if not job_calculate_pareto_front(path_to_calculate):
9029
9025
  failure = True
9030
9026
 
9031
9027
  if failure:
@@ -9037,9 +9033,8 @@ def post_job_calculate_pareto_front() -> None:
9037
9033
  def job_calculate_pareto_front(path_to_calculate: str, disable_sixel_and_table: bool = False) -> bool:
9038
9034
  pf_start_time = time.time()
9039
9035
 
9040
- # Returns true if it fails
9041
9036
  if not path_to_calculate:
9042
- return True
9037
+ return False
9043
9038
 
9044
9039
  global CURRENT_RUN_FOLDER
9045
9040
  global RESULT_CSV_FILE
@@ -9047,41 +9042,41 @@ def job_calculate_pareto_front(path_to_calculate: str, disable_sixel_and_table:
9047
9042
 
9048
9043
  if not path_to_calculate:
9049
9044
  print_red("Can only calculate pareto front of previous job when --calculate_pareto_front_of_job is set")
9050
- return True
9045
+ return False
9051
9046
 
9052
9047
  if not os.path.exists(path_to_calculate):
9053
9048
  print_red(f"Path '{path_to_calculate}' does not exist")
9054
- return True
9049
+ return False
9055
9050
 
9056
9051
  ax_client_json = f"{path_to_calculate}/state_files/ax_client.experiment.json"
9057
9052
 
9058
9053
  if not os.path.exists(ax_client_json):
9059
9054
  print_red(f"Path '{ax_client_json}' not found")
9060
- return True
9055
+ return False
9061
9056
 
9062
9057
  checkpoint_file: str = f"{path_to_calculate}/state_files/checkpoint.json"
9063
9058
  if not os.path.exists(checkpoint_file):
9064
9059
  print_red(f"The checkpoint file '{checkpoint_file}' does not exist")
9065
- return True
9060
+ return False
9066
9061
 
9067
9062
  RESULT_CSV_FILE = f"{path_to_calculate}/results.csv"
9068
9063
  if not os.path.exists(RESULT_CSV_FILE):
9069
9064
  print_red(f"{RESULT_CSV_FILE} not found")
9070
- return True
9065
+ return False
9071
9066
 
9072
9067
  res_names = []
9073
9068
 
9074
9069
  res_names_file = f"{path_to_calculate}/result_names.txt"
9075
9070
  if not os.path.exists(res_names_file):
9076
9071
  print_red(f"File '{res_names_file}' does not exist")
9077
- return True
9072
+ return False
9078
9073
 
9079
9074
  try:
9080
9075
  with open(res_names_file, "r", encoding="utf-8") as file:
9081
9076
  lines = file.readlines()
9082
9077
  except Exception as e:
9083
9078
  print_red(f"Error reading file '{res_names_file}': {e}")
9084
- return True
9079
+ return False
9085
9080
 
9086
9081
  for line in lines:
9087
9082
  entry = line.strip()
@@ -9090,7 +9085,7 @@ def job_calculate_pareto_front(path_to_calculate: str, disable_sixel_and_table:
9090
9085
 
9091
9086
  if len(res_names) < 2:
9092
9087
  print_red(f"Error: There are less than 2 result names (is: {len(res_names)}, {', '.join(res_names)}) in {path_to_calculate}. Cannot continue calculating the pareto front.")
9093
- return True
9088
+ return False
9094
9089
 
9095
9090
  load_username_to_args(path_to_calculate)
9096
9091
 
@@ -9101,7 +9096,7 @@ def job_calculate_pareto_front(path_to_calculate: str, disable_sixel_and_table:
9101
9096
  experiment_parameters = load_experiment_parameters_from_checkpoint_file(checkpoint_file, False)
9102
9097
 
9103
9098
  if experiment_parameters is None:
9104
- return True
9099
+ return False
9105
9100
 
9106
9101
  show_pareto_or_error_msg(path_to_calculate, res_names, disable_sixel_and_table)
9107
9102
 
@@ -9109,7 +9104,7 @@ def job_calculate_pareto_front(path_to_calculate: str, disable_sixel_and_table:
9109
9104
 
9110
9105
  print_debug(f"Calculating the pareto-front took {pf_end_time - pf_start_time} seconds")
9111
9106
 
9112
- return False
9107
+ return True
9113
9108
 
9114
9109
  @beartype
9115
9110
  def set_arg_states_from_continue() -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 6963
3
+ Version: 6965
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 6963
3
+ Version: 6965
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -5,7 +5,7 @@ authors = [
5
5
  {email = "norman.koch@tu-dresden.de"},
6
6
  {name = "Norman Koch"}
7
7
  ]
8
- version = "6963"
8
+ version = "6965"
9
9
 
10
10
  readme = "README.md"
11
11
  dynamic = ["dependencies"]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes