omniopt2 7705__tar.gz → 7747__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of omniopt2 might be problematic. Click here for more details.

Files changed (39) hide show
  1. {omniopt2-7705 → omniopt2-7747}/.omniopt.py +68 -42
  2. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_scatter.py +1 -1
  3. {omniopt2-7705 → omniopt2-7747}/PKG-INFO +1 -1
  4. {omniopt2-7705 → omniopt2-7747}/omniopt2.egg-info/PKG-INFO +1 -1
  5. {omniopt2-7705 → omniopt2-7747}/pyproject.toml +1 -1
  6. {omniopt2-7705 → omniopt2-7747}/.colorfunctions.sh +0 -0
  7. {omniopt2-7705 → omniopt2-7747}/.dockerignore +0 -0
  8. {omniopt2-7705 → omniopt2-7747}/.general.sh +0 -0
  9. {omniopt2-7705 → omniopt2-7747}/.gitignore +0 -0
  10. {omniopt2-7705 → omniopt2-7747}/.helpers.py +0 -0
  11. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_cpu_ram_usage.py +0 -0
  12. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_general.py +0 -0
  13. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_gpu_usage.py +0 -0
  14. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_kde.py +0 -0
  15. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_scatter_generation_method.py +0 -0
  16. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_scatter_hex.py +0 -0
  17. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_time_and_exit_code.py +0 -0
  18. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_trial_index_result.py +0 -0
  19. {omniopt2-7705 → omniopt2-7747}/.omniopt_plot_worker.py +0 -0
  20. {omniopt2-7705 → omniopt2-7747}/.random_generator.py +0 -0
  21. {omniopt2-7705 → omniopt2-7747}/.shellscript_functions +0 -0
  22. {omniopt2-7705 → omniopt2-7747}/.tpe.py +0 -0
  23. {omniopt2-7705 → omniopt2-7747}/LICENSE +0 -0
  24. {omniopt2-7705 → omniopt2-7747}/MANIFEST.in +0 -0
  25. {omniopt2-7705 → omniopt2-7747}/README.md +0 -0
  26. {omniopt2-7705 → omniopt2-7747}/apt-dependencies.txt +0 -0
  27. {omniopt2-7705 → omniopt2-7747}/omniopt +0 -0
  28. {omniopt2-7705 → omniopt2-7747}/omniopt2.egg-info/SOURCES.txt +0 -0
  29. {omniopt2-7705 → omniopt2-7747}/omniopt2.egg-info/dependency_links.txt +0 -0
  30. {omniopt2-7705 → omniopt2-7747}/omniopt2.egg-info/requires.txt +0 -0
  31. {omniopt2-7705 → omniopt2-7747}/omniopt2.egg-info/top_level.txt +0 -0
  32. {omniopt2-7705 → omniopt2-7747}/omniopt_docker +0 -0
  33. {omniopt2-7705 → omniopt2-7747}/omniopt_evaluate +0 -0
  34. {omniopt2-7705 → omniopt2-7747}/omniopt_plot +0 -0
  35. {omniopt2-7705 → omniopt2-7747}/omniopt_share +0 -0
  36. {omniopt2-7705 → omniopt2-7747}/requirements.txt +0 -0
  37. {omniopt2-7705 → omniopt2-7747}/setup.cfg +0 -0
  38. {omniopt2-7705 → omniopt2-7747}/setup.py +0 -0
  39. {omniopt2-7705 → omniopt2-7747}/test_requirements.txt +0 -0
@@ -570,6 +570,7 @@ class ConfigLoader:
570
570
  no_sleep: bool
571
571
  username: Optional[str]
572
572
  max_nr_of_zero_results: int
573
+ run_program_once: str
573
574
  mem_gb: int
574
575
  flame_graph: bool
575
576
  continue_previous_job: Optional[str]
@@ -692,6 +693,7 @@ class ConfigLoader:
692
693
  optional.add_argument('--share_password', help='Use this as a password for share. Default is none.', default=None, type=str)
693
694
  optional.add_argument('--dryrun', help='Try to do a dry run, i.e. a run for very short running jobs to test the installation of OmniOpt2 and check if environment stuff and paths and so on works properly', action='store_true', default=False)
694
695
  optional.add_argument('--db_url', type=str, default=None, help='Database URL (e.g., mysql+pymysql://user:pass@host/db), disables sqlite3 storage')
696
+ optional.add_argument('--run_program_once', type=str, help='Path to a setup script that will run once before the main program starts. Supports placeholders like %(lr), %(epochs), etc.')
695
697
 
696
698
  speed.add_argument('--dont_warm_start_refitting', help='Do not keep Model weights, thus, refit for every generator (may be more accurate, but slower)', action='store_true', default=False)
697
699
  speed.add_argument('--refit_on_cv', help='Refit on Cross-Validation (helps in accuracy, but makes generating new points slower)', action='store_true', default=False)
@@ -792,7 +794,7 @@ class ConfigLoader:
792
794
 
793
795
  validated_config = self.validate_and_convert(config, arg_defaults)
794
796
 
795
- for key, value in vars(cli_args).items():
797
+ for key, _ in vars(cli_args).items():
796
798
  if key in validated_config:
797
799
  setattr(cli_args, key, validated_config[key])
798
800
 
@@ -1227,7 +1229,7 @@ class RandomForestGenerationNode(ExternalGenerationNode):
1227
1229
  @beartype
1228
1230
  def _build_reverse_choice_map(self: Any, choice_parameters: dict) -> dict:
1229
1231
  choice_value_map = {}
1230
- for name, param in choice_parameters.items():
1232
+ for _, param in choice_parameters.items():
1231
1233
  for value, idx in param.items():
1232
1234
  choice_value_map[value] = idx
1233
1235
  return {idx: value for value, idx in choice_value_map.items()}
@@ -1793,6 +1795,9 @@ def live_share(force: bool = False) -> bool:
1793
1795
  print_green(stderr)
1794
1796
 
1795
1797
  extract_and_print_qr(stderr)
1798
+
1799
+ if stdout:
1800
+ print_debug(f"live_share stdout: {stdout}")
1796
1801
  else:
1797
1802
  stdout, stderr = run_live_share_command(force)
1798
1803
 
@@ -3035,7 +3040,7 @@ def _parse_experiment_parameters_parse_this_args(
3035
3040
  return j, params, classic_params, search_space_reduction_warning
3036
3041
 
3037
3042
  @beartype
3038
- def parse_experiment_parameters() -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
3043
+ def parse_experiment_parameters() -> List[Dict[str, Any]]:
3039
3044
  params: List[Dict[str, Any]] = []
3040
3045
  classic_params: List[Dict[str, Any]] = []
3041
3046
  param_names: List[str] = []
@@ -3059,9 +3064,8 @@ def parse_experiment_parameters() -> Tuple[List[Dict[str, Any]], List[Dict[str,
3059
3064
 
3060
3065
  # Remove duplicates by 'name' key preserving order
3061
3066
  params = list({p['name']: p for p in params}.values())
3062
- classic_params = list({p['name']: p for p in classic_params}.values())
3063
3067
 
3064
- return params, classic_params
3068
+ return params
3065
3069
 
3066
3070
  @beartype
3067
3071
  def check_factorial_range() -> None:
@@ -3695,7 +3699,7 @@ def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_
3695
3699
  values = _write_job_infos_csv_replace_none_with_str(values)
3696
3700
 
3697
3701
  headline = ["trial_index", "submit_time", "queue_time", *headline]
3698
- values = [str(trial_index), submit_time, queue_time, *values]
3702
+ values = [str(trial_index), str(submit_time), str(queue_time), *values]
3699
3703
 
3700
3704
  run_folder = get_current_run_folder()
3701
3705
  if run_folder is not None and os.path.exists(run_folder):
@@ -7295,7 +7299,7 @@ def save_state_files() -> None:
7295
7299
  @beartype
7296
7300
  def execute_evaluation(_params: list) -> Optional[int]:
7297
7301
  print_debug(f"execute_evaluation({_params})")
7298
- trial_index, parameters, trial_counter, next_nr_steps, phase = _params
7302
+ trial_index, parameters, trial_counter, phase = _params
7299
7303
  if not ax_client:
7300
7304
  _fatal_error("Failed to get ax_client", 9)
7301
7305
 
@@ -8553,12 +8557,10 @@ def handle_optimization_completion(optimization_complete: bool) -> bool:
8553
8557
  @beartype
8554
8558
  def execute_trials(
8555
8559
  trial_index_to_param: dict,
8556
- next_nr_steps: int,
8557
8560
  phase: Optional[str],
8558
8561
  _max_eval: Optional[int],
8559
8562
  _progress_bar: Any
8560
- ) -> List[Optional[int]]:
8561
- results: List[Optional[int]] = []
8563
+ ) -> None:
8562
8564
  index_param_list: List[List[Any]] = []
8563
8565
  i: int = 1
8564
8566
 
@@ -8569,7 +8571,7 @@ def execute_trials(
8569
8571
  break
8570
8572
 
8571
8573
  progressbar_description([f"eval #{i}/{len(trial_index_to_param.items())} start"])
8572
- _args = [trial_index, parameters, i, next_nr_steps, phase]
8574
+ _args = [trial_index, parameters, i, phase]
8573
8575
  index_param_list.append(_args)
8574
8576
  i += 1
8575
8577
 
@@ -8582,16 +8584,15 @@ def execute_trials(
8582
8584
  nr_workers = max(1, min(len(index_param_list), args.max_num_of_parallel_sruns))
8583
8585
 
8584
8586
  with ThreadPoolExecutor(max_workers=nr_workers) as tp_executor:
8585
- future_to_args = {tp_executor.submit(execute_evaluation, args): args for args in index_param_list}
8587
+ future_to_args = {tp_executor.submit(execute_evaluation, _args): _args for _args in index_param_list}
8586
8588
 
8587
8589
  for future in as_completed(future_to_args):
8588
8590
  cnt = cnt + 1
8589
8591
  try:
8590
8592
  result = future.result()
8591
- results.append(result)
8593
+ print_debug(f"result in execute_trials: {result}")
8592
8594
  except Exception as exc:
8593
8595
  print_red(f"execute_trials: Error at executing a trial: {exc}")
8594
- results.append(None)
8595
8596
 
8596
8597
  end_time = time.time()
8597
8598
 
@@ -8599,8 +8600,6 @@ def execute_trials(
8599
8600
  job_submit_durations.append(duration)
8600
8601
  job_submit_nrs.append(cnt)
8601
8602
 
8602
- return results
8603
-
8604
8603
  @beartype
8605
8604
  def handle_exceptions_create_and_execute_next_runs(e: Exception) -> int:
8606
8605
  if isinstance(e, TypeError):
@@ -8630,10 +8629,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
8630
8629
 
8631
8630
  trial_index_to_param: Optional[Dict] = None
8632
8631
  done_optimizing: bool = False
8633
- results: List = []
8634
8632
 
8635
8633
  try:
8636
- done_optimizing, trial_index_to_param, results = _create_and_execute_next_runs_run_loop(next_nr_steps, _max_eval, phase, _progress_bar)
8634
+ done_optimizing, trial_index_to_param = _create_and_execute_next_runs_run_loop(_max_eval, phase, _progress_bar)
8637
8635
  _create_and_execute_next_runs_finish(done_optimizing)
8638
8636
  except Exception as e:
8639
8637
  stacktrace = traceback.format_exc()
@@ -8643,10 +8641,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
8643
8641
  return _create_and_execute_next_runs_return_value(trial_index_to_param)
8644
8642
 
8645
8643
  @beartype
8646
- def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Optional[int], phase: Optional[str], _progress_bar: Any) -> Tuple[bool, Optional[Dict], List]:
8644
+ def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str], _progress_bar: Any) -> Tuple[bool, Optional[Dict]]:
8647
8645
  done_optimizing = False
8648
8646
  trial_index_to_param: Optional[Dict] = None
8649
- results: List = []
8650
8647
 
8651
8648
  nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
8652
8649
 
@@ -8672,7 +8669,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
8672
8669
  filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
8673
8670
 
8674
8671
  if len(filtered_trial_index_to_param):
8675
- results.extend(execute_trials(filtered_trial_index_to_param, next_nr_steps, phase, _max_eval, _progress_bar))
8672
+ execute_trials(filtered_trial_index_to_param, phase, _max_eval, _progress_bar)
8676
8673
  else:
8677
8674
  if nr_jobs_before_removing_abandoned > 0:
8678
8675
  print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
@@ -8681,7 +8678,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
8681
8678
 
8682
8679
  trial_index_to_param = filtered_trial_index_to_param
8683
8680
 
8684
- return done_optimizing, trial_index_to_param, results
8681
+ return done_optimizing, trial_index_to_param
8685
8682
 
8686
8683
  @beartype
8687
8684
  def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
@@ -8704,7 +8701,7 @@ def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Di
8704
8701
  return 0
8705
8702
 
8706
8703
  @beartype
8707
- def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
8704
+ def get_number_of_steps(_max_eval: int) -> int:
8708
8705
  with console.status("[bold green]Calculating number of steps..."):
8709
8706
  _random_steps = args.num_random_steps
8710
8707
 
@@ -8722,20 +8719,7 @@ def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
8722
8719
  if _random_steps > _max_eval:
8723
8720
  set_max_eval(_random_steps)
8724
8721
 
8725
- original_second_steps = _max_eval - _random_steps
8726
- second_step_steps = max(0, original_second_steps)
8727
- if second_step_steps != original_second_steps:
8728
- original_print(f"? original_second_steps: {original_second_steps} = max_eval {_max_eval} - _random_steps {_random_steps}")
8729
- if second_step_steps == 0:
8730
- if not args.dryrun:
8731
- print_yellow("This is basically a random search. Increase --max_eval or reduce --num_random_steps")
8732
-
8733
- second_step_steps = second_step_steps - already_done_random_steps
8734
-
8735
- if args.continue_previous_job:
8736
- second_step_steps = _max_eval
8737
-
8738
- return _random_steps, second_step_steps
8722
+ return _random_steps
8739
8723
 
8740
8724
  @beartype
8741
8725
  def _set_global_executor() -> None:
@@ -9045,12 +9029,11 @@ def check_max_eval(_max_eval: int) -> None:
9045
9029
  def parse_parameters() -> Any:
9046
9030
  experiment_parameters = None
9047
9031
  cli_params_experiment_parameters = None
9048
- classic_params = None
9049
9032
  if args.parameter:
9050
- experiment_parameters, classic_params = parse_experiment_parameters()
9033
+ experiment_parameters = parse_experiment_parameters()
9051
9034
  cli_params_experiment_parameters = experiment_parameters
9052
9035
 
9053
- return experiment_parameters, cli_params_experiment_parameters, classic_params
9036
+ return experiment_parameters, cli_params_experiment_parameters
9054
9037
 
9055
9038
  @beartype
9056
9039
  def create_pareto_front_table(idxs: List[int], metric_x: str, metric_y: str) -> Table:
@@ -10057,6 +10040,47 @@ def write_result_names_file() -> None:
10057
10040
  except Exception as e:
10058
10041
  print_red(f"Error trying to open file '{fn}': {e}")
10059
10042
 
10043
+ @beartype
10044
+ def run_program_once(params=None) -> None:
10045
+ if not args.run_program_once:
10046
+ print_debug("[yellow]No setup script specified (run_program_once). Skipping setup.[/yellow]")
10047
+ return
10048
+
10049
+ if params is None:
10050
+ params = {}
10051
+
10052
+ if isinstance(args.run_program_once, str):
10053
+ command_str = args.run_program_once
10054
+ for k, v in params.items():
10055
+ placeholder = f"%({k})"
10056
+ command_str = command_str.replace(placeholder, str(v))
10057
+
10058
+ with console.status("[bold green]Running setup script...[/bold green]", spinner="dots"):
10059
+ console.log(f"Executing command: [cyan]{command_str}[/cyan]")
10060
+ result = subprocess.run(command_str, shell=True, check=True)
10061
+ if result.returncode == 0:
10062
+ console.log("[bold green]Setup script completed successfully ✅[/bold green]")
10063
+ else:
10064
+ console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
10065
+
10066
+ my_exit(57)
10067
+
10068
+ elif isinstance(args.run_program_once, (list, tuple)):
10069
+ with console.status("[bold green]Running setup script (list)...[/bold green]", spinner="dots"):
10070
+ console.log(f"Executing command list: [cyan]{args.run_program_once}[/cyan]")
10071
+ result = subprocess.run(args.run_program_once, check=True)
10072
+ if result.returncode == 0:
10073
+ console.log("[bold green]Setup script completed successfully ✅[/bold green]")
10074
+ else:
10075
+ console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
10076
+
10077
+ my_exit(57)
10078
+
10079
+ else:
10080
+ console.print(f"[red]Invalid type for run_program_once: {type(args.run_program_once)}[/red]")
10081
+
10082
+ my_exit(57)
10083
+
10060
10084
  @beartype
10061
10085
  def main() -> None:
10062
10086
  global RESULT_CSV_FILE, ax_client, LOGFILE_DEBUG_GET_NEXT_TRIALS
@@ -10091,6 +10115,8 @@ def main() -> None:
10091
10115
  if args.dryrun:
10092
10116
  set_max_eval(1)
10093
10117
 
10118
+ run_program_once()
10119
+
10094
10120
  if os.getenv("CI"):
10095
10121
  data_dict: dict = {
10096
10122
  "param1": "value1",
@@ -10110,7 +10136,7 @@ def main() -> None:
10110
10136
  write_ui_url_if_present()
10111
10137
 
10112
10138
  LOGFILE_DEBUG_GET_NEXT_TRIALS = f'{get_current_run_folder()}/get_next_trials.csv'
10113
- experiment_parameters, cli_params_experiment_parameters, classic_params = parse_parameters()
10139
+ experiment_parameters, cli_params_experiment_parameters = parse_parameters()
10114
10140
 
10115
10141
  write_live_share_file_if_needed()
10116
10142
 
@@ -10120,7 +10146,7 @@ def main() -> None:
10120
10146
 
10121
10147
  check_max_eval(max_eval)
10122
10148
 
10123
- _random_steps, second_step_steps = get_number_of_steps(max_eval)
10149
+ _random_steps = get_number_of_steps(max_eval)
10124
10150
 
10125
10151
  set_random_steps(_random_steps)
10126
10152
 
@@ -140,7 +140,7 @@ def plot_single_graph(_params: list) -> Any:
140
140
  @beartype
141
141
  def plot_graphs(_params: list) -> None:
142
142
  global fig
143
- df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols, result_column_values, csv_file_path = _params
143
+ df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols, _, csv_file_path = _params
144
144
 
145
145
  cmap, norm, colors = helpers.get_color_list(df, args, plt, csv_file_path)
146
146
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 7705
3
+ Version: 7747
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: omniopt2
3
- Version: 7705
3
+ Version: 7747
4
4
  Summary: Automatic highly parallelized hyperparameter optimizer based on Ax/Botorch
5
5
  Home-page: https://scads.ai/transfer-2/verfuegbare-software-dienste-en/omniopt/
6
6
  Author: Norman Koch
@@ -5,7 +5,7 @@ authors = [
5
5
  {email = "norman.koch@tu-dresden.de"},
6
6
  {name = "Norman Koch"}
7
7
  ]
8
- version = "7705"
8
+ version = "7747"
9
9
 
10
10
  readme = "README.md"
11
11
  dynamic = ["dependencies"]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes