omniopt2 7705__py3-none-any.whl → 7747__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of omniopt2 might be problematic. Click here for more details.
- .omniopt.py +68 -42
- .omniopt_plot_scatter.py +1 -1
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt.py +68 -42
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_scatter.py +1 -1
- {omniopt2-7705.dist-info → omniopt2-7747.dist-info}/METADATA +1 -1
- {omniopt2-7705.dist-info → omniopt2-7747.dist-info}/RECORD +36 -36
- omniopt2.egg-info/PKG-INFO +1 -1
- pyproject.toml +1 -1
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.colorfunctions.sh +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.general.sh +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.helpers.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_cpu_ram_usage.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_general.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_gpu_usage.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_kde.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_scatter_generation_method.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_scatter_hex.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_time_and_exit_code.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_trial_index_result.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_worker.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.random_generator.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.shellscript_functions +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/.tpe.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/LICENSE +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/apt-dependencies.txt +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/omniopt +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/omniopt_docker +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/omniopt_evaluate +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/omniopt_plot +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/omniopt_share +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/requirements.txt +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/setup.py +0 -0
- {omniopt2-7705.data → omniopt2-7747.data}/data/bin/test_requirements.txt +0 -0
- {omniopt2-7705.dist-info → omniopt2-7747.dist-info}/WHEEL +0 -0
- {omniopt2-7705.dist-info → omniopt2-7747.dist-info}/licenses/LICENSE +0 -0
- {omniopt2-7705.dist-info → omniopt2-7747.dist-info}/top_level.txt +0 -0
.omniopt.py
CHANGED
|
@@ -570,6 +570,7 @@ class ConfigLoader:
|
|
|
570
570
|
no_sleep: bool
|
|
571
571
|
username: Optional[str]
|
|
572
572
|
max_nr_of_zero_results: int
|
|
573
|
+
run_program_once: str
|
|
573
574
|
mem_gb: int
|
|
574
575
|
flame_graph: bool
|
|
575
576
|
continue_previous_job: Optional[str]
|
|
@@ -692,6 +693,7 @@ class ConfigLoader:
|
|
|
692
693
|
optional.add_argument('--share_password', help='Use this as a password for share. Default is none.', default=None, type=str)
|
|
693
694
|
optional.add_argument('--dryrun', help='Try to do a dry run, i.e. a run for very short running jobs to test the installation of OmniOpt2 and check if environment stuff and paths and so on works properly', action='store_true', default=False)
|
|
694
695
|
optional.add_argument('--db_url', type=str, default=None, help='Database URL (e.g., mysql+pymysql://user:pass@host/db), disables sqlite3 storage')
|
|
696
|
+
optional.add_argument('--run_program_once', type=str, help='Path to a setup script that will run once before the main program starts. Supports placeholders like %(lr), %(epochs), etc.')
|
|
695
697
|
|
|
696
698
|
speed.add_argument('--dont_warm_start_refitting', help='Do not keep Model weights, thus, refit for every generator (may be more accurate, but slower)', action='store_true', default=False)
|
|
697
699
|
speed.add_argument('--refit_on_cv', help='Refit on Cross-Validation (helps in accuracy, but makes generating new points slower)', action='store_true', default=False)
|
|
@@ -792,7 +794,7 @@ class ConfigLoader:
|
|
|
792
794
|
|
|
793
795
|
validated_config = self.validate_and_convert(config, arg_defaults)
|
|
794
796
|
|
|
795
|
-
for key,
|
|
797
|
+
for key, _ in vars(cli_args).items():
|
|
796
798
|
if key in validated_config:
|
|
797
799
|
setattr(cli_args, key, validated_config[key])
|
|
798
800
|
|
|
@@ -1227,7 +1229,7 @@ class RandomForestGenerationNode(ExternalGenerationNode):
|
|
|
1227
1229
|
@beartype
|
|
1228
1230
|
def _build_reverse_choice_map(self: Any, choice_parameters: dict) -> dict:
|
|
1229
1231
|
choice_value_map = {}
|
|
1230
|
-
for
|
|
1232
|
+
for _, param in choice_parameters.items():
|
|
1231
1233
|
for value, idx in param.items():
|
|
1232
1234
|
choice_value_map[value] = idx
|
|
1233
1235
|
return {idx: value for value, idx in choice_value_map.items()}
|
|
@@ -1793,6 +1795,9 @@ def live_share(force: bool = False) -> bool:
|
|
|
1793
1795
|
print_green(stderr)
|
|
1794
1796
|
|
|
1795
1797
|
extract_and_print_qr(stderr)
|
|
1798
|
+
|
|
1799
|
+
if stdout:
|
|
1800
|
+
print_debug(f"live_share stdout: {stdout}")
|
|
1796
1801
|
else:
|
|
1797
1802
|
stdout, stderr = run_live_share_command(force)
|
|
1798
1803
|
|
|
@@ -3035,7 +3040,7 @@ def _parse_experiment_parameters_parse_this_args(
|
|
|
3035
3040
|
return j, params, classic_params, search_space_reduction_warning
|
|
3036
3041
|
|
|
3037
3042
|
@beartype
|
|
3038
|
-
def parse_experiment_parameters() ->
|
|
3043
|
+
def parse_experiment_parameters() -> List[Dict[str, Any]]:
|
|
3039
3044
|
params: List[Dict[str, Any]] = []
|
|
3040
3045
|
classic_params: List[Dict[str, Any]] = []
|
|
3041
3046
|
param_names: List[str] = []
|
|
@@ -3059,9 +3064,8 @@ def parse_experiment_parameters() -> Tuple[List[Dict[str, Any]], List[Dict[str,
|
|
|
3059
3064
|
|
|
3060
3065
|
# Remove duplicates by 'name' key preserving order
|
|
3061
3066
|
params = list({p['name']: p for p in params}.values())
|
|
3062
|
-
classic_params = list({p['name']: p for p in classic_params}.values())
|
|
3063
3067
|
|
|
3064
|
-
return params
|
|
3068
|
+
return params
|
|
3065
3069
|
|
|
3066
3070
|
@beartype
|
|
3067
3071
|
def check_factorial_range() -> None:
|
|
@@ -3695,7 +3699,7 @@ def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_
|
|
|
3695
3699
|
values = _write_job_infos_csv_replace_none_with_str(values)
|
|
3696
3700
|
|
|
3697
3701
|
headline = ["trial_index", "submit_time", "queue_time", *headline]
|
|
3698
|
-
values = [str(trial_index), submit_time, queue_time, *values]
|
|
3702
|
+
values = [str(trial_index), str(submit_time), str(queue_time), *values]
|
|
3699
3703
|
|
|
3700
3704
|
run_folder = get_current_run_folder()
|
|
3701
3705
|
if run_folder is not None and os.path.exists(run_folder):
|
|
@@ -7295,7 +7299,7 @@ def save_state_files() -> None:
|
|
|
7295
7299
|
@beartype
|
|
7296
7300
|
def execute_evaluation(_params: list) -> Optional[int]:
|
|
7297
7301
|
print_debug(f"execute_evaluation({_params})")
|
|
7298
|
-
trial_index, parameters, trial_counter,
|
|
7302
|
+
trial_index, parameters, trial_counter, phase = _params
|
|
7299
7303
|
if not ax_client:
|
|
7300
7304
|
_fatal_error("Failed to get ax_client", 9)
|
|
7301
7305
|
|
|
@@ -8553,12 +8557,10 @@ def handle_optimization_completion(optimization_complete: bool) -> bool:
|
|
|
8553
8557
|
@beartype
|
|
8554
8558
|
def execute_trials(
|
|
8555
8559
|
trial_index_to_param: dict,
|
|
8556
|
-
next_nr_steps: int,
|
|
8557
8560
|
phase: Optional[str],
|
|
8558
8561
|
_max_eval: Optional[int],
|
|
8559
8562
|
_progress_bar: Any
|
|
8560
|
-
) ->
|
|
8561
|
-
results: List[Optional[int]] = []
|
|
8563
|
+
) -> None:
|
|
8562
8564
|
index_param_list: List[List[Any]] = []
|
|
8563
8565
|
i: int = 1
|
|
8564
8566
|
|
|
@@ -8569,7 +8571,7 @@ def execute_trials(
|
|
|
8569
8571
|
break
|
|
8570
8572
|
|
|
8571
8573
|
progressbar_description([f"eval #{i}/{len(trial_index_to_param.items())} start"])
|
|
8572
|
-
_args = [trial_index, parameters, i,
|
|
8574
|
+
_args = [trial_index, parameters, i, phase]
|
|
8573
8575
|
index_param_list.append(_args)
|
|
8574
8576
|
i += 1
|
|
8575
8577
|
|
|
@@ -8582,16 +8584,15 @@ def execute_trials(
|
|
|
8582
8584
|
nr_workers = max(1, min(len(index_param_list), args.max_num_of_parallel_sruns))
|
|
8583
8585
|
|
|
8584
8586
|
with ThreadPoolExecutor(max_workers=nr_workers) as tp_executor:
|
|
8585
|
-
future_to_args = {tp_executor.submit(execute_evaluation,
|
|
8587
|
+
future_to_args = {tp_executor.submit(execute_evaluation, _args): _args for _args in index_param_list}
|
|
8586
8588
|
|
|
8587
8589
|
for future in as_completed(future_to_args):
|
|
8588
8590
|
cnt = cnt + 1
|
|
8589
8591
|
try:
|
|
8590
8592
|
result = future.result()
|
|
8591
|
-
|
|
8593
|
+
print_debug(f"result in execute_trials: {result}")
|
|
8592
8594
|
except Exception as exc:
|
|
8593
8595
|
print_red(f"execute_trials: Error at executing a trial: {exc}")
|
|
8594
|
-
results.append(None)
|
|
8595
8596
|
|
|
8596
8597
|
end_time = time.time()
|
|
8597
8598
|
|
|
@@ -8599,8 +8600,6 @@ def execute_trials(
|
|
|
8599
8600
|
job_submit_durations.append(duration)
|
|
8600
8601
|
job_submit_nrs.append(cnt)
|
|
8601
8602
|
|
|
8602
|
-
return results
|
|
8603
|
-
|
|
8604
8603
|
@beartype
|
|
8605
8604
|
def handle_exceptions_create_and_execute_next_runs(e: Exception) -> int:
|
|
8606
8605
|
if isinstance(e, TypeError):
|
|
@@ -8630,10 +8629,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
|
8630
8629
|
|
|
8631
8630
|
trial_index_to_param: Optional[Dict] = None
|
|
8632
8631
|
done_optimizing: bool = False
|
|
8633
|
-
results: List = []
|
|
8634
8632
|
|
|
8635
8633
|
try:
|
|
8636
|
-
done_optimizing, trial_index_to_param
|
|
8634
|
+
done_optimizing, trial_index_to_param = _create_and_execute_next_runs_run_loop(_max_eval, phase, _progress_bar)
|
|
8637
8635
|
_create_and_execute_next_runs_finish(done_optimizing)
|
|
8638
8636
|
except Exception as e:
|
|
8639
8637
|
stacktrace = traceback.format_exc()
|
|
@@ -8643,10 +8641,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
|
8643
8641
|
return _create_and_execute_next_runs_return_value(trial_index_to_param)
|
|
8644
8642
|
|
|
8645
8643
|
@beartype
|
|
8646
|
-
def _create_and_execute_next_runs_run_loop(
|
|
8644
|
+
def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str], _progress_bar: Any) -> Tuple[bool, Optional[Dict]]:
|
|
8647
8645
|
done_optimizing = False
|
|
8648
8646
|
trial_index_to_param: Optional[Dict] = None
|
|
8649
|
-
results: List = []
|
|
8650
8647
|
|
|
8651
8648
|
nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
|
|
8652
8649
|
|
|
@@ -8672,7 +8669,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
|
|
|
8672
8669
|
filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
|
|
8673
8670
|
|
|
8674
8671
|
if len(filtered_trial_index_to_param):
|
|
8675
|
-
|
|
8672
|
+
execute_trials(filtered_trial_index_to_param, phase, _max_eval, _progress_bar)
|
|
8676
8673
|
else:
|
|
8677
8674
|
if nr_jobs_before_removing_abandoned > 0:
|
|
8678
8675
|
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
|
@@ -8681,7 +8678,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
|
|
|
8681
8678
|
|
|
8682
8679
|
trial_index_to_param = filtered_trial_index_to_param
|
|
8683
8680
|
|
|
8684
|
-
return done_optimizing, trial_index_to_param
|
|
8681
|
+
return done_optimizing, trial_index_to_param
|
|
8685
8682
|
|
|
8686
8683
|
@beartype
|
|
8687
8684
|
def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
|
|
@@ -8704,7 +8701,7 @@ def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Di
|
|
|
8704
8701
|
return 0
|
|
8705
8702
|
|
|
8706
8703
|
@beartype
|
|
8707
|
-
def get_number_of_steps(_max_eval: int) ->
|
|
8704
|
+
def get_number_of_steps(_max_eval: int) -> int:
|
|
8708
8705
|
with console.status("[bold green]Calculating number of steps..."):
|
|
8709
8706
|
_random_steps = args.num_random_steps
|
|
8710
8707
|
|
|
@@ -8722,20 +8719,7 @@ def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
|
|
|
8722
8719
|
if _random_steps > _max_eval:
|
|
8723
8720
|
set_max_eval(_random_steps)
|
|
8724
8721
|
|
|
8725
|
-
|
|
8726
|
-
second_step_steps = max(0, original_second_steps)
|
|
8727
|
-
if second_step_steps != original_second_steps:
|
|
8728
|
-
original_print(f"? original_second_steps: {original_second_steps} = max_eval {_max_eval} - _random_steps {_random_steps}")
|
|
8729
|
-
if second_step_steps == 0:
|
|
8730
|
-
if not args.dryrun:
|
|
8731
|
-
print_yellow("This is basically a random search. Increase --max_eval or reduce --num_random_steps")
|
|
8732
|
-
|
|
8733
|
-
second_step_steps = second_step_steps - already_done_random_steps
|
|
8734
|
-
|
|
8735
|
-
if args.continue_previous_job:
|
|
8736
|
-
second_step_steps = _max_eval
|
|
8737
|
-
|
|
8738
|
-
return _random_steps, second_step_steps
|
|
8722
|
+
return _random_steps
|
|
8739
8723
|
|
|
8740
8724
|
@beartype
|
|
8741
8725
|
def _set_global_executor() -> None:
|
|
@@ -9045,12 +9029,11 @@ def check_max_eval(_max_eval: int) -> None:
|
|
|
9045
9029
|
def parse_parameters() -> Any:
|
|
9046
9030
|
experiment_parameters = None
|
|
9047
9031
|
cli_params_experiment_parameters = None
|
|
9048
|
-
classic_params = None
|
|
9049
9032
|
if args.parameter:
|
|
9050
|
-
experiment_parameters
|
|
9033
|
+
experiment_parameters = parse_experiment_parameters()
|
|
9051
9034
|
cli_params_experiment_parameters = experiment_parameters
|
|
9052
9035
|
|
|
9053
|
-
return experiment_parameters, cli_params_experiment_parameters
|
|
9036
|
+
return experiment_parameters, cli_params_experiment_parameters
|
|
9054
9037
|
|
|
9055
9038
|
@beartype
|
|
9056
9039
|
def create_pareto_front_table(idxs: List[int], metric_x: str, metric_y: str) -> Table:
|
|
@@ -10057,6 +10040,47 @@ def write_result_names_file() -> None:
|
|
|
10057
10040
|
except Exception as e:
|
|
10058
10041
|
print_red(f"Error trying to open file '{fn}': {e}")
|
|
10059
10042
|
|
|
10043
|
+
@beartype
|
|
10044
|
+
def run_program_once(params=None) -> None:
|
|
10045
|
+
if not args.run_program_once:
|
|
10046
|
+
print_debug("[yellow]No setup script specified (run_program_once). Skipping setup.[/yellow]")
|
|
10047
|
+
return
|
|
10048
|
+
|
|
10049
|
+
if params is None:
|
|
10050
|
+
params = {}
|
|
10051
|
+
|
|
10052
|
+
if isinstance(args.run_program_once, str):
|
|
10053
|
+
command_str = args.run_program_once
|
|
10054
|
+
for k, v in params.items():
|
|
10055
|
+
placeholder = f"%({k})"
|
|
10056
|
+
command_str = command_str.replace(placeholder, str(v))
|
|
10057
|
+
|
|
10058
|
+
with console.status("[bold green]Running setup script...[/bold green]", spinner="dots"):
|
|
10059
|
+
console.log(f"Executing command: [cyan]{command_str}[/cyan]")
|
|
10060
|
+
result = subprocess.run(command_str, shell=True, check=True)
|
|
10061
|
+
if result.returncode == 0:
|
|
10062
|
+
console.log("[bold green]Setup script completed successfully ✅[/bold green]")
|
|
10063
|
+
else:
|
|
10064
|
+
console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
|
|
10065
|
+
|
|
10066
|
+
my_exit(57)
|
|
10067
|
+
|
|
10068
|
+
elif isinstance(args.run_program_once, (list, tuple)):
|
|
10069
|
+
with console.status("[bold green]Running setup script (list)...[/bold green]", spinner="dots"):
|
|
10070
|
+
console.log(f"Executing command list: [cyan]{args.run_program_once}[/cyan]")
|
|
10071
|
+
result = subprocess.run(args.run_program_once, check=True)
|
|
10072
|
+
if result.returncode == 0:
|
|
10073
|
+
console.log("[bold green]Setup script completed successfully ✅[/bold green]")
|
|
10074
|
+
else:
|
|
10075
|
+
console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
|
|
10076
|
+
|
|
10077
|
+
my_exit(57)
|
|
10078
|
+
|
|
10079
|
+
else:
|
|
10080
|
+
console.print(f"[red]Invalid type for run_program_once: {type(args.run_program_once)}[/red]")
|
|
10081
|
+
|
|
10082
|
+
my_exit(57)
|
|
10083
|
+
|
|
10060
10084
|
@beartype
|
|
10061
10085
|
def main() -> None:
|
|
10062
10086
|
global RESULT_CSV_FILE, ax_client, LOGFILE_DEBUG_GET_NEXT_TRIALS
|
|
@@ -10091,6 +10115,8 @@ def main() -> None:
|
|
|
10091
10115
|
if args.dryrun:
|
|
10092
10116
|
set_max_eval(1)
|
|
10093
10117
|
|
|
10118
|
+
run_program_once()
|
|
10119
|
+
|
|
10094
10120
|
if os.getenv("CI"):
|
|
10095
10121
|
data_dict: dict = {
|
|
10096
10122
|
"param1": "value1",
|
|
@@ -10110,7 +10136,7 @@ def main() -> None:
|
|
|
10110
10136
|
write_ui_url_if_present()
|
|
10111
10137
|
|
|
10112
10138
|
LOGFILE_DEBUG_GET_NEXT_TRIALS = f'{get_current_run_folder()}/get_next_trials.csv'
|
|
10113
|
-
experiment_parameters, cli_params_experiment_parameters
|
|
10139
|
+
experiment_parameters, cli_params_experiment_parameters = parse_parameters()
|
|
10114
10140
|
|
|
10115
10141
|
write_live_share_file_if_needed()
|
|
10116
10142
|
|
|
@@ -10120,7 +10146,7 @@ def main() -> None:
|
|
|
10120
10146
|
|
|
10121
10147
|
check_max_eval(max_eval)
|
|
10122
10148
|
|
|
10123
|
-
_random_steps
|
|
10149
|
+
_random_steps = get_number_of_steps(max_eval)
|
|
10124
10150
|
|
|
10125
10151
|
set_random_steps(_random_steps)
|
|
10126
10152
|
|
.omniopt_plot_scatter.py
CHANGED
|
@@ -140,7 +140,7 @@ def plot_single_graph(_params: list) -> Any:
|
|
|
140
140
|
@beartype
|
|
141
141
|
def plot_graphs(_params: list) -> None:
|
|
142
142
|
global fig
|
|
143
|
-
df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols,
|
|
143
|
+
df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols, _, csv_file_path = _params
|
|
144
144
|
|
|
145
145
|
cmap, norm, colors = helpers.get_color_list(df, args, plt, csv_file_path)
|
|
146
146
|
|
|
@@ -570,6 +570,7 @@ class ConfigLoader:
|
|
|
570
570
|
no_sleep: bool
|
|
571
571
|
username: Optional[str]
|
|
572
572
|
max_nr_of_zero_results: int
|
|
573
|
+
run_program_once: str
|
|
573
574
|
mem_gb: int
|
|
574
575
|
flame_graph: bool
|
|
575
576
|
continue_previous_job: Optional[str]
|
|
@@ -692,6 +693,7 @@ class ConfigLoader:
|
|
|
692
693
|
optional.add_argument('--share_password', help='Use this as a password for share. Default is none.', default=None, type=str)
|
|
693
694
|
optional.add_argument('--dryrun', help='Try to do a dry run, i.e. a run for very short running jobs to test the installation of OmniOpt2 and check if environment stuff and paths and so on works properly', action='store_true', default=False)
|
|
694
695
|
optional.add_argument('--db_url', type=str, default=None, help='Database URL (e.g., mysql+pymysql://user:pass@host/db), disables sqlite3 storage')
|
|
696
|
+
optional.add_argument('--run_program_once', type=str, help='Path to a setup script that will run once before the main program starts. Supports placeholders like %(lr), %(epochs), etc.')
|
|
695
697
|
|
|
696
698
|
speed.add_argument('--dont_warm_start_refitting', help='Do not keep Model weights, thus, refit for every generator (may be more accurate, but slower)', action='store_true', default=False)
|
|
697
699
|
speed.add_argument('--refit_on_cv', help='Refit on Cross-Validation (helps in accuracy, but makes generating new points slower)', action='store_true', default=False)
|
|
@@ -792,7 +794,7 @@ class ConfigLoader:
|
|
|
792
794
|
|
|
793
795
|
validated_config = self.validate_and_convert(config, arg_defaults)
|
|
794
796
|
|
|
795
|
-
for key,
|
|
797
|
+
for key, _ in vars(cli_args).items():
|
|
796
798
|
if key in validated_config:
|
|
797
799
|
setattr(cli_args, key, validated_config[key])
|
|
798
800
|
|
|
@@ -1227,7 +1229,7 @@ class RandomForestGenerationNode(ExternalGenerationNode):
|
|
|
1227
1229
|
@beartype
|
|
1228
1230
|
def _build_reverse_choice_map(self: Any, choice_parameters: dict) -> dict:
|
|
1229
1231
|
choice_value_map = {}
|
|
1230
|
-
for
|
|
1232
|
+
for _, param in choice_parameters.items():
|
|
1231
1233
|
for value, idx in param.items():
|
|
1232
1234
|
choice_value_map[value] = idx
|
|
1233
1235
|
return {idx: value for value, idx in choice_value_map.items()}
|
|
@@ -1793,6 +1795,9 @@ def live_share(force: bool = False) -> bool:
|
|
|
1793
1795
|
print_green(stderr)
|
|
1794
1796
|
|
|
1795
1797
|
extract_and_print_qr(stderr)
|
|
1798
|
+
|
|
1799
|
+
if stdout:
|
|
1800
|
+
print_debug(f"live_share stdout: {stdout}")
|
|
1796
1801
|
else:
|
|
1797
1802
|
stdout, stderr = run_live_share_command(force)
|
|
1798
1803
|
|
|
@@ -3035,7 +3040,7 @@ def _parse_experiment_parameters_parse_this_args(
|
|
|
3035
3040
|
return j, params, classic_params, search_space_reduction_warning
|
|
3036
3041
|
|
|
3037
3042
|
@beartype
|
|
3038
|
-
def parse_experiment_parameters() ->
|
|
3043
|
+
def parse_experiment_parameters() -> List[Dict[str, Any]]:
|
|
3039
3044
|
params: List[Dict[str, Any]] = []
|
|
3040
3045
|
classic_params: List[Dict[str, Any]] = []
|
|
3041
3046
|
param_names: List[str] = []
|
|
@@ -3059,9 +3064,8 @@ def parse_experiment_parameters() -> Tuple[List[Dict[str, Any]], List[Dict[str,
|
|
|
3059
3064
|
|
|
3060
3065
|
# Remove duplicates by 'name' key preserving order
|
|
3061
3066
|
params = list({p['name']: p for p in params}.values())
|
|
3062
|
-
classic_params = list({p['name']: p for p in classic_params}.values())
|
|
3063
3067
|
|
|
3064
|
-
return params
|
|
3068
|
+
return params
|
|
3065
3069
|
|
|
3066
3070
|
@beartype
|
|
3067
3071
|
def check_factorial_range() -> None:
|
|
@@ -3695,7 +3699,7 @@ def write_job_infos_csv(parameters: dict, stdout: Optional[str], program_string_
|
|
|
3695
3699
|
values = _write_job_infos_csv_replace_none_with_str(values)
|
|
3696
3700
|
|
|
3697
3701
|
headline = ["trial_index", "submit_time", "queue_time", *headline]
|
|
3698
|
-
values = [str(trial_index), submit_time, queue_time, *values]
|
|
3702
|
+
values = [str(trial_index), str(submit_time), str(queue_time), *values]
|
|
3699
3703
|
|
|
3700
3704
|
run_folder = get_current_run_folder()
|
|
3701
3705
|
if run_folder is not None and os.path.exists(run_folder):
|
|
@@ -7295,7 +7299,7 @@ def save_state_files() -> None:
|
|
|
7295
7299
|
@beartype
|
|
7296
7300
|
def execute_evaluation(_params: list) -> Optional[int]:
|
|
7297
7301
|
print_debug(f"execute_evaluation({_params})")
|
|
7298
|
-
trial_index, parameters, trial_counter,
|
|
7302
|
+
trial_index, parameters, trial_counter, phase = _params
|
|
7299
7303
|
if not ax_client:
|
|
7300
7304
|
_fatal_error("Failed to get ax_client", 9)
|
|
7301
7305
|
|
|
@@ -8553,12 +8557,10 @@ def handle_optimization_completion(optimization_complete: bool) -> bool:
|
|
|
8553
8557
|
@beartype
|
|
8554
8558
|
def execute_trials(
|
|
8555
8559
|
trial_index_to_param: dict,
|
|
8556
|
-
next_nr_steps: int,
|
|
8557
8560
|
phase: Optional[str],
|
|
8558
8561
|
_max_eval: Optional[int],
|
|
8559
8562
|
_progress_bar: Any
|
|
8560
|
-
) ->
|
|
8561
|
-
results: List[Optional[int]] = []
|
|
8563
|
+
) -> None:
|
|
8562
8564
|
index_param_list: List[List[Any]] = []
|
|
8563
8565
|
i: int = 1
|
|
8564
8566
|
|
|
@@ -8569,7 +8571,7 @@ def execute_trials(
|
|
|
8569
8571
|
break
|
|
8570
8572
|
|
|
8571
8573
|
progressbar_description([f"eval #{i}/{len(trial_index_to_param.items())} start"])
|
|
8572
|
-
_args = [trial_index, parameters, i,
|
|
8574
|
+
_args = [trial_index, parameters, i, phase]
|
|
8573
8575
|
index_param_list.append(_args)
|
|
8574
8576
|
i += 1
|
|
8575
8577
|
|
|
@@ -8582,16 +8584,15 @@ def execute_trials(
|
|
|
8582
8584
|
nr_workers = max(1, min(len(index_param_list), args.max_num_of_parallel_sruns))
|
|
8583
8585
|
|
|
8584
8586
|
with ThreadPoolExecutor(max_workers=nr_workers) as tp_executor:
|
|
8585
|
-
future_to_args = {tp_executor.submit(execute_evaluation,
|
|
8587
|
+
future_to_args = {tp_executor.submit(execute_evaluation, _args): _args for _args in index_param_list}
|
|
8586
8588
|
|
|
8587
8589
|
for future in as_completed(future_to_args):
|
|
8588
8590
|
cnt = cnt + 1
|
|
8589
8591
|
try:
|
|
8590
8592
|
result = future.result()
|
|
8591
|
-
|
|
8593
|
+
print_debug(f"result in execute_trials: {result}")
|
|
8592
8594
|
except Exception as exc:
|
|
8593
8595
|
print_red(f"execute_trials: Error at executing a trial: {exc}")
|
|
8594
|
-
results.append(None)
|
|
8595
8596
|
|
|
8596
8597
|
end_time = time.time()
|
|
8597
8598
|
|
|
@@ -8599,8 +8600,6 @@ def execute_trials(
|
|
|
8599
8600
|
job_submit_durations.append(duration)
|
|
8600
8601
|
job_submit_nrs.append(cnt)
|
|
8601
8602
|
|
|
8602
|
-
return results
|
|
8603
|
-
|
|
8604
8603
|
@beartype
|
|
8605
8604
|
def handle_exceptions_create_and_execute_next_runs(e: Exception) -> int:
|
|
8606
8605
|
if isinstance(e, TypeError):
|
|
@@ -8630,10 +8629,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
|
8630
8629
|
|
|
8631
8630
|
trial_index_to_param: Optional[Dict] = None
|
|
8632
8631
|
done_optimizing: bool = False
|
|
8633
|
-
results: List = []
|
|
8634
8632
|
|
|
8635
8633
|
try:
|
|
8636
|
-
done_optimizing, trial_index_to_param
|
|
8634
|
+
done_optimizing, trial_index_to_param = _create_and_execute_next_runs_run_loop(_max_eval, phase, _progress_bar)
|
|
8637
8635
|
_create_and_execute_next_runs_finish(done_optimizing)
|
|
8638
8636
|
except Exception as e:
|
|
8639
8637
|
stacktrace = traceback.format_exc()
|
|
@@ -8643,10 +8641,9 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
|
8643
8641
|
return _create_and_execute_next_runs_return_value(trial_index_to_param)
|
|
8644
8642
|
|
|
8645
8643
|
@beartype
|
|
8646
|
-
def _create_and_execute_next_runs_run_loop(
|
|
8644
|
+
def _create_and_execute_next_runs_run_loop(_max_eval: Optional[int], phase: Optional[str], _progress_bar: Any) -> Tuple[bool, Optional[Dict]]:
|
|
8647
8645
|
done_optimizing = False
|
|
8648
8646
|
trial_index_to_param: Optional[Dict] = None
|
|
8649
|
-
results: List = []
|
|
8650
8647
|
|
|
8651
8648
|
nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
|
|
8652
8649
|
|
|
@@ -8672,7 +8669,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
|
|
|
8672
8669
|
filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
|
|
8673
8670
|
|
|
8674
8671
|
if len(filtered_trial_index_to_param):
|
|
8675
|
-
|
|
8672
|
+
execute_trials(filtered_trial_index_to_param, phase, _max_eval, _progress_bar)
|
|
8676
8673
|
else:
|
|
8677
8674
|
if nr_jobs_before_removing_abandoned > 0:
|
|
8678
8675
|
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
|
@@ -8681,7 +8678,7 @@ def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Option
|
|
|
8681
8678
|
|
|
8682
8679
|
trial_index_to_param = filtered_trial_index_to_param
|
|
8683
8680
|
|
|
8684
|
-
return done_optimizing, trial_index_to_param
|
|
8681
|
+
return done_optimizing, trial_index_to_param
|
|
8685
8682
|
|
|
8686
8683
|
@beartype
|
|
8687
8684
|
def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
|
|
@@ -8704,7 +8701,7 @@ def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Di
|
|
|
8704
8701
|
return 0
|
|
8705
8702
|
|
|
8706
8703
|
@beartype
|
|
8707
|
-
def get_number_of_steps(_max_eval: int) ->
|
|
8704
|
+
def get_number_of_steps(_max_eval: int) -> int:
|
|
8708
8705
|
with console.status("[bold green]Calculating number of steps..."):
|
|
8709
8706
|
_random_steps = args.num_random_steps
|
|
8710
8707
|
|
|
@@ -8722,20 +8719,7 @@ def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
|
|
|
8722
8719
|
if _random_steps > _max_eval:
|
|
8723
8720
|
set_max_eval(_random_steps)
|
|
8724
8721
|
|
|
8725
|
-
|
|
8726
|
-
second_step_steps = max(0, original_second_steps)
|
|
8727
|
-
if second_step_steps != original_second_steps:
|
|
8728
|
-
original_print(f"? original_second_steps: {original_second_steps} = max_eval {_max_eval} - _random_steps {_random_steps}")
|
|
8729
|
-
if second_step_steps == 0:
|
|
8730
|
-
if not args.dryrun:
|
|
8731
|
-
print_yellow("This is basically a random search. Increase --max_eval or reduce --num_random_steps")
|
|
8732
|
-
|
|
8733
|
-
second_step_steps = second_step_steps - already_done_random_steps
|
|
8734
|
-
|
|
8735
|
-
if args.continue_previous_job:
|
|
8736
|
-
second_step_steps = _max_eval
|
|
8737
|
-
|
|
8738
|
-
return _random_steps, second_step_steps
|
|
8722
|
+
return _random_steps
|
|
8739
8723
|
|
|
8740
8724
|
@beartype
|
|
8741
8725
|
def _set_global_executor() -> None:
|
|
@@ -9045,12 +9029,11 @@ def check_max_eval(_max_eval: int) -> None:
|
|
|
9045
9029
|
def parse_parameters() -> Any:
|
|
9046
9030
|
experiment_parameters = None
|
|
9047
9031
|
cli_params_experiment_parameters = None
|
|
9048
|
-
classic_params = None
|
|
9049
9032
|
if args.parameter:
|
|
9050
|
-
experiment_parameters
|
|
9033
|
+
experiment_parameters = parse_experiment_parameters()
|
|
9051
9034
|
cli_params_experiment_parameters = experiment_parameters
|
|
9052
9035
|
|
|
9053
|
-
return experiment_parameters, cli_params_experiment_parameters
|
|
9036
|
+
return experiment_parameters, cli_params_experiment_parameters
|
|
9054
9037
|
|
|
9055
9038
|
@beartype
|
|
9056
9039
|
def create_pareto_front_table(idxs: List[int], metric_x: str, metric_y: str) -> Table:
|
|
@@ -10057,6 +10040,47 @@ def write_result_names_file() -> None:
|
|
|
10057
10040
|
except Exception as e:
|
|
10058
10041
|
print_red(f"Error trying to open file '{fn}': {e}")
|
|
10059
10042
|
|
|
10043
|
+
@beartype
|
|
10044
|
+
def run_program_once(params=None) -> None:
|
|
10045
|
+
if not args.run_program_once:
|
|
10046
|
+
print_debug("[yellow]No setup script specified (run_program_once). Skipping setup.[/yellow]")
|
|
10047
|
+
return
|
|
10048
|
+
|
|
10049
|
+
if params is None:
|
|
10050
|
+
params = {}
|
|
10051
|
+
|
|
10052
|
+
if isinstance(args.run_program_once, str):
|
|
10053
|
+
command_str = args.run_program_once
|
|
10054
|
+
for k, v in params.items():
|
|
10055
|
+
placeholder = f"%({k})"
|
|
10056
|
+
command_str = command_str.replace(placeholder, str(v))
|
|
10057
|
+
|
|
10058
|
+
with console.status("[bold green]Running setup script...[/bold green]", spinner="dots"):
|
|
10059
|
+
console.log(f"Executing command: [cyan]{command_str}[/cyan]")
|
|
10060
|
+
result = subprocess.run(command_str, shell=True, check=True)
|
|
10061
|
+
if result.returncode == 0:
|
|
10062
|
+
console.log("[bold green]Setup script completed successfully ✅[/bold green]")
|
|
10063
|
+
else:
|
|
10064
|
+
console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
|
|
10065
|
+
|
|
10066
|
+
my_exit(57)
|
|
10067
|
+
|
|
10068
|
+
elif isinstance(args.run_program_once, (list, tuple)):
|
|
10069
|
+
with console.status("[bold green]Running setup script (list)...[/bold green]", spinner="dots"):
|
|
10070
|
+
console.log(f"Executing command list: [cyan]{args.run_program_once}[/cyan]")
|
|
10071
|
+
result = subprocess.run(args.run_program_once, check=True)
|
|
10072
|
+
if result.returncode == 0:
|
|
10073
|
+
console.log("[bold green]Setup script completed successfully ✅[/bold green]")
|
|
10074
|
+
else:
|
|
10075
|
+
console.log(f"[bold red]Setup script failed with exit code {result.returncode} ❌[/bold red]")
|
|
10076
|
+
|
|
10077
|
+
my_exit(57)
|
|
10078
|
+
|
|
10079
|
+
else:
|
|
10080
|
+
console.print(f"[red]Invalid type for run_program_once: {type(args.run_program_once)}[/red]")
|
|
10081
|
+
|
|
10082
|
+
my_exit(57)
|
|
10083
|
+
|
|
10060
10084
|
@beartype
|
|
10061
10085
|
def main() -> None:
|
|
10062
10086
|
global RESULT_CSV_FILE, ax_client, LOGFILE_DEBUG_GET_NEXT_TRIALS
|
|
@@ -10091,6 +10115,8 @@ def main() -> None:
|
|
|
10091
10115
|
if args.dryrun:
|
|
10092
10116
|
set_max_eval(1)
|
|
10093
10117
|
|
|
10118
|
+
run_program_once()
|
|
10119
|
+
|
|
10094
10120
|
if os.getenv("CI"):
|
|
10095
10121
|
data_dict: dict = {
|
|
10096
10122
|
"param1": "value1",
|
|
@@ -10110,7 +10136,7 @@ def main() -> None:
|
|
|
10110
10136
|
write_ui_url_if_present()
|
|
10111
10137
|
|
|
10112
10138
|
LOGFILE_DEBUG_GET_NEXT_TRIALS = f'{get_current_run_folder()}/get_next_trials.csv'
|
|
10113
|
-
experiment_parameters, cli_params_experiment_parameters
|
|
10139
|
+
experiment_parameters, cli_params_experiment_parameters = parse_parameters()
|
|
10114
10140
|
|
|
10115
10141
|
write_live_share_file_if_needed()
|
|
10116
10142
|
|
|
@@ -10120,7 +10146,7 @@ def main() -> None:
|
|
|
10120
10146
|
|
|
10121
10147
|
check_max_eval(max_eval)
|
|
10122
10148
|
|
|
10123
|
-
_random_steps
|
|
10149
|
+
_random_steps = get_number_of_steps(max_eval)
|
|
10124
10150
|
|
|
10125
10151
|
set_random_steps(_random_steps)
|
|
10126
10152
|
|
|
@@ -140,7 +140,7 @@ def plot_single_graph(_params: list) -> Any:
|
|
|
140
140
|
@beartype
|
|
141
141
|
def plot_graphs(_params: list) -> None:
|
|
142
142
|
global fig
|
|
143
|
-
df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols,
|
|
143
|
+
df, fig, axs, df_filtered, non_empty_graphs, num_subplots, parameter_combinations, num_rows, num_cols, _, csv_file_path = _params
|
|
144
144
|
|
|
145
145
|
cmap, norm, colors = helpers.get_color_list(df, args, plt, csv_file_path)
|
|
146
146
|
|
|
@@ -3,12 +3,12 @@
|
|
|
3
3
|
.general.sh,sha256=uyGMN8xNToQ0v50KoiYxm6jRmgf0orroOaodM_Nuq30,2107
|
|
4
4
|
.gitignore,sha256=1onZvC5-n7f_K7S8-MFFVih88H5PuV8rRldD2Ji7qLA,3749
|
|
5
5
|
.helpers.py,sha256=ONRgQCWhOLzi11IsZJyfuedZdMQO2mh0X0ApsnHmz4I,31446
|
|
6
|
-
.omniopt.py,sha256=
|
|
6
|
+
.omniopt.py,sha256=7isXyHg1KHh-kkd6zvoiYgC5yNSQyLDI3FS-HnJw00E,411607
|
|
7
7
|
.omniopt_plot_cpu_ram_usage.py,sha256=DbOAmdrbcZtsMnHJgHfeRngjtv6zX5J0axyua_dYezc,3932
|
|
8
8
|
.omniopt_plot_general.py,sha256=3iy-bPef8I5rTB3KRz-TuleMdgKDmVZ6c8LuNQhNwu0,6810
|
|
9
9
|
.omniopt_plot_gpu_usage.py,sha256=ojxVicwSoiyl7f3c-6lLuT2EpyPcSJKEcbp75LgDY2k,5107
|
|
10
10
|
.omniopt_plot_kde.py,sha256=uRLWr72TDKvj3AqJ0O0AvkKZ1ok1O1QpXnbfQQdo0nA,6873
|
|
11
|
-
.omniopt_plot_scatter.py,sha256
|
|
11
|
+
.omniopt_plot_scatter.py,sha256=b0_CIqgyi6PztaUVJRL9X9XBTaOonh-yDH2hRxMGkH0,8403
|
|
12
12
|
.omniopt_plot_scatter_generation_method.py,sha256=rgKY_w1E516c9UucVaEvaKd8tCnoUq9xg-RrYSDzYEQ,4289
|
|
13
13
|
.omniopt_plot_scatter_hex.py,sha256=UKjw40c1eumgEcf0xqB-_SakX5PB6HD3u4VwBxbsgQo,10279
|
|
14
14
|
.omniopt_plot_time_and_exit_code.py,sha256=WUyl2uI59wsC1eSX_5uJHOrqcF-s5cUDIEu8u3IFMLU,6462
|
|
@@ -26,44 +26,44 @@ omniopt_docker,sha256=LWVUeyvmA5AKqAHiH9jBUkR5uZ6AHMnSy0eET7mK6E4,3602
|
|
|
26
26
|
omniopt_evaluate,sha256=9oBh0_ikCuIz_aJQZrN0j39NDiIDYoSvEFmSVIoFjJE,23842
|
|
27
27
|
omniopt_plot,sha256=TMQ8a_IZCh2xOR_DW40Sg0gGjYDOqFTNuNlnnWtRkK0,13739
|
|
28
28
|
omniopt_share,sha256=curejmImuMdn5OwTGO4xohLQz0KIrYetP0yaS23zb34,14001
|
|
29
|
-
pyproject.toml,sha256=
|
|
29
|
+
pyproject.toml,sha256=N1m1wDobHsupdF05P0u3wayFtSMGMi_URFwLTNIw1zw,397
|
|
30
30
|
requirements.txt,sha256=qUyzH7JT-Xsh82c0vRQBhArYTSHNzn2OYK7YRh3NUm4,314
|
|
31
31
|
setup.cfg,sha256=HEc8uu6NpfxG5_AVh5SvXOpEFMNKPPPxgMIAH144vT4,38
|
|
32
32
|
test_requirements.txt,sha256=jpyZzAwbWR_qnoRqWvpBB5MUjIX9jVwynX2D-B-r8aA,487
|
|
33
|
-
omniopt2-
|
|
34
|
-
omniopt2-
|
|
35
|
-
omniopt2-
|
|
36
|
-
omniopt2-
|
|
37
|
-
omniopt2-
|
|
38
|
-
omniopt2-
|
|
39
|
-
omniopt2-
|
|
40
|
-
omniopt2-
|
|
41
|
-
omniopt2-
|
|
42
|
-
omniopt2-
|
|
43
|
-
omniopt2-
|
|
44
|
-
omniopt2-
|
|
45
|
-
omniopt2-
|
|
46
|
-
omniopt2-
|
|
47
|
-
omniopt2-
|
|
48
|
-
omniopt2-
|
|
49
|
-
omniopt2-
|
|
50
|
-
omniopt2-
|
|
51
|
-
omniopt2-
|
|
52
|
-
omniopt2-
|
|
53
|
-
omniopt2-
|
|
54
|
-
omniopt2-
|
|
55
|
-
omniopt2-
|
|
56
|
-
omniopt2-
|
|
57
|
-
omniopt2-
|
|
58
|
-
omniopt2-
|
|
59
|
-
omniopt2-
|
|
60
|
-
omniopt2-
|
|
61
|
-
omniopt2.egg-info/PKG-INFO,sha256=
|
|
33
|
+
omniopt2-7747.data/data/bin/.colorfunctions.sh,sha256=xxc08V3Fh_0Je20fkJMRO14u9VCSvMyOiMaDfioEyCY,1098
|
|
34
|
+
omniopt2-7747.data/data/bin/.general.sh,sha256=uyGMN8xNToQ0v50KoiYxm6jRmgf0orroOaodM_Nuq30,2107
|
|
35
|
+
omniopt2-7747.data/data/bin/.helpers.py,sha256=ONRgQCWhOLzi11IsZJyfuedZdMQO2mh0X0ApsnHmz4I,31446
|
|
36
|
+
omniopt2-7747.data/data/bin/.omniopt.py,sha256=7isXyHg1KHh-kkd6zvoiYgC5yNSQyLDI3FS-HnJw00E,411607
|
|
37
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_cpu_ram_usage.py,sha256=DbOAmdrbcZtsMnHJgHfeRngjtv6zX5J0axyua_dYezc,3932
|
|
38
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_general.py,sha256=3iy-bPef8I5rTB3KRz-TuleMdgKDmVZ6c8LuNQhNwu0,6810
|
|
39
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_gpu_usage.py,sha256=ojxVicwSoiyl7f3c-6lLuT2EpyPcSJKEcbp75LgDY2k,5107
|
|
40
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_kde.py,sha256=uRLWr72TDKvj3AqJ0O0AvkKZ1ok1O1QpXnbfQQdo0nA,6873
|
|
41
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_scatter.py,sha256=b0_CIqgyi6PztaUVJRL9X9XBTaOonh-yDH2hRxMGkH0,8403
|
|
42
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_scatter_generation_method.py,sha256=rgKY_w1E516c9UucVaEvaKd8tCnoUq9xg-RrYSDzYEQ,4289
|
|
43
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_scatter_hex.py,sha256=UKjw40c1eumgEcf0xqB-_SakX5PB6HD3u4VwBxbsgQo,10279
|
|
44
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_time_and_exit_code.py,sha256=WUyl2uI59wsC1eSX_5uJHOrqcF-s5cUDIEu8u3IFMLU,6462
|
|
45
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_trial_index_result.py,sha256=9ApSv3-eZXohafsCAY5jr2B0rG4XpaTloCb0TuevYSg,4577
|
|
46
|
+
omniopt2-7747.data/data/bin/.omniopt_plot_worker.py,sha256=VuluQq4W6KRR5RU08dxmDSFk5mbfDRkRJQFwwcLgAGw,4524
|
|
47
|
+
omniopt2-7747.data/data/bin/.random_generator.py,sha256=ezBBUXpez_QaGdpCglMcJ0KZPdQP0XdX5gnLzO1xhwU,2987
|
|
48
|
+
omniopt2-7747.data/data/bin/.shellscript_functions,sha256=anHc5T0Lm_BzTZTzEIp7cIICu5zdXj1FKL3LnODmHeE,13610
|
|
49
|
+
omniopt2-7747.data/data/bin/.tpe.py,sha256=xxQuTZFQHHobvZuqG8cP2y2ev8ifjlLvex3TOaNWq8w,6754
|
|
50
|
+
omniopt2-7747.data/data/bin/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
51
|
+
omniopt2-7747.data/data/bin/apt-dependencies.txt,sha256=X5tBB8ZLW9XaFtRh8B7C2pIkSoxNNawqioDr0QZAtuM,149
|
|
52
|
+
omniopt2-7747.data/data/bin/omniopt,sha256=-Zj530qw1_c_wR4p0qn-SlPxM2u1oRLFERPZzwqxW8E,47906
|
|
53
|
+
omniopt2-7747.data/data/bin/omniopt_docker,sha256=LWVUeyvmA5AKqAHiH9jBUkR5uZ6AHMnSy0eET7mK6E4,3602
|
|
54
|
+
omniopt2-7747.data/data/bin/omniopt_evaluate,sha256=9oBh0_ikCuIz_aJQZrN0j39NDiIDYoSvEFmSVIoFjJE,23842
|
|
55
|
+
omniopt2-7747.data/data/bin/omniopt_plot,sha256=TMQ8a_IZCh2xOR_DW40Sg0gGjYDOqFTNuNlnnWtRkK0,13739
|
|
56
|
+
omniopt2-7747.data/data/bin/omniopt_share,sha256=curejmImuMdn5OwTGO4xohLQz0KIrYetP0yaS23zb34,14001
|
|
57
|
+
omniopt2-7747.data/data/bin/requirements.txt,sha256=qUyzH7JT-Xsh82c0vRQBhArYTSHNzn2OYK7YRh3NUm4,314
|
|
58
|
+
omniopt2-7747.data/data/bin/setup.py,sha256=UKkBlnpbQ3sN-5jaEG7NuZzHK5Qcyi-DZd8jJmXiCyY,4588
|
|
59
|
+
omniopt2-7747.data/data/bin/test_requirements.txt,sha256=jpyZzAwbWR_qnoRqWvpBB5MUjIX9jVwynX2D-B-r8aA,487
|
|
60
|
+
omniopt2-7747.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
|
61
|
+
omniopt2.egg-info/PKG-INFO,sha256=PgnR8ven-Sl9wcIPhKwGxNWAptNkBD66L-QKTMzbrws,5881
|
|
62
62
|
omniopt2.egg-info/SOURCES.txt,sha256=N-HtSaaqFRsd4XqAfeWVSp__3I-sw0d7cknJgyewRwQ,778
|
|
63
63
|
omniopt2.egg-info/dependency_links.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
64
64
|
omniopt2.egg-info/requires.txt,sha256=ZRZrgp9LV45KU7IW6dBsJvdTcJatzNjF5aVEqnYnimQ,801
|
|
65
65
|
omniopt2.egg-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
66
|
-
omniopt2-
|
|
67
|
-
omniopt2-
|
|
68
|
-
omniopt2-
|
|
69
|
-
omniopt2-
|
|
66
|
+
omniopt2-7747.dist-info/METADATA,sha256=PgnR8ven-Sl9wcIPhKwGxNWAptNkBD66L-QKTMzbrws,5881
|
|
67
|
+
omniopt2-7747.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
68
|
+
omniopt2-7747.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
69
|
+
omniopt2-7747.dist-info/RECORD,,
|
omniopt2.egg-info/PKG-INFO
CHANGED
pyproject.toml
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{omniopt2-7705.data → omniopt2-7747.data}/data/bin/.omniopt_plot_scatter_generation_method.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|