omniopt2 7102__py3-none-any.whl → 7107__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- .omniopt.py +194 -140
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt.py +194 -140
- {omniopt2-7102.dist-info → omniopt2-7107.dist-info}/METADATA +1 -1
- {omniopt2-7102.dist-info → omniopt2-7107.dist-info}/RECORD +35 -35
- omniopt2.egg-info/PKG-INFO +1 -1
- pyproject.toml +1 -1
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.colorfunctions.sh +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.general.sh +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.helpers.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_cpu_ram_usage.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_general.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_gpu_usage.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_kde.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter_generation_method.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter_hex.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_time_and_exit_code.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_trial_index_result.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_worker.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.random_generator.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.shellscript_functions +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/.tpe.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/LICENSE +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/apt-dependencies.txt +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/omniopt +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/omniopt_docker +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/omniopt_evaluate +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/omniopt_plot +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/omniopt_share +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/requirements.txt +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/setup.py +0 -0
- {omniopt2-7102.data → omniopt2-7107.data}/data/bin/test_requirements.txt +0 -0
- {omniopt2-7102.dist-info → omniopt2-7107.dist-info}/WHEEL +0 -0
- {omniopt2-7102.dist-info → omniopt2-7107.dist-info}/licenses/LICENSE +0 -0
- {omniopt2-7102.dist-info → omniopt2-7107.dist-info}/top_level.txt +0 -0
.omniopt.py
CHANGED
@@ -3248,9 +3248,7 @@ def _write_job_infos_csv_main(parameters: dict, stdout: Optional[str], program_s
|
|
3248
3248
|
headline = _write_job_infos_csv_build_headline(parameters_keys, extra_vars_names)
|
3249
3249
|
result_values = _write_job_infos_csv_result_to_strlist(result)
|
3250
3250
|
|
3251
|
-
values = _write_job_infos_csv_build_values(start_time, end_time, run_time, program_string_with_params,
|
3252
|
-
str_parameters_values, result_values, exit_code, _signal,
|
3253
|
-
extra_vars_values)
|
3251
|
+
values = _write_job_infos_csv_build_values(start_time, end_time, run_time, program_string_with_params, str_parameters_values, result_values, exit_code, _signal, extra_vars_values)
|
3254
3252
|
|
3255
3253
|
headline = _write_job_infos_csv_replace_none_with_str(headline)
|
3256
3254
|
values = _write_job_infos_csv_replace_none_with_str(values)
|
@@ -3268,13 +3266,11 @@ def _write_job_infos_csv_main(parameters: dict, stdout: Optional[str], program_s
|
|
3268
3266
|
def _write_job_infos_csv_parameters_to_str(parameters: dict) -> List[str]:
|
3269
3267
|
return [str(v) for v in list(parameters.values())]
|
3270
3268
|
|
3271
|
-
|
3272
3269
|
@beartype
|
3273
3270
|
def _write_job_infos_csv_extract_extra_vars(stdout: Optional[str]) -> Tuple[List[str], List[str]]:
|
3274
3271
|
# extract_info ist hier eine vorhandene Funktion, die extra Variablen aus stdout extrahiert
|
3275
3272
|
return extract_info(stdout)
|
3276
3273
|
|
3277
|
-
|
3278
3274
|
@beartype
|
3279
3275
|
def _write_job_infos_csv_add_slurm_job_id(extra_vars_names: List[str], extra_vars_values: List[str]) -> Tuple[List[str], List[str]]:
|
3280
3276
|
_SLURM_JOB_ID = os.getenv('SLURM_JOB_ID')
|
@@ -3283,7 +3279,6 @@ def _write_job_infos_csv_add_slurm_job_id(extra_vars_names: List[str], extra_var
|
|
3283
3279
|
extra_vars_values.append(str(_SLURM_JOB_ID))
|
3284
3280
|
return extra_vars_names, extra_vars_values
|
3285
3281
|
|
3286
|
-
|
3287
3282
|
@beartype
|
3288
3283
|
def _write_job_infos_csv_build_headline(parameters_keys: List[str], extra_vars_names: List[str]) -> List[str]:
|
3289
3284
|
return [
|
@@ -3299,7 +3294,6 @@ def _write_job_infos_csv_build_headline(parameters_keys: List[str], extra_vars_n
|
|
3299
3294
|
*extra_vars_names
|
3300
3295
|
]
|
3301
3296
|
|
3302
|
-
|
3303
3297
|
@beartype
|
3304
3298
|
def _write_job_infos_csv_result_to_strlist(result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]]) -> List[str]:
|
3305
3299
|
result_values: List[str] = []
|
@@ -3317,7 +3311,6 @@ def _write_job_infos_csv_result_to_strlist(result: Optional[Union[Dict[str, Opti
|
|
3317
3311
|
|
3318
3312
|
return result_values
|
3319
3313
|
|
3320
|
-
|
3321
3314
|
@beartype
|
3322
3315
|
def _write_job_infos_csv_build_values(start_time: Union[int, float], end_time: Union[int, float], run_time: Union[float, int],
|
3323
3316
|
program_string_with_params: str, str_parameters_values: List[str],
|
@@ -3336,7 +3329,6 @@ def _write_job_infos_csv_build_values(start_time: Union[int, float], end_time: U
|
|
3336
3329
|
*extra_vars_values
|
3337
3330
|
]
|
3338
3331
|
|
3339
|
-
|
3340
3332
|
@beartype
|
3341
3333
|
def _write_job_infos_csv_replace_none_with_str(elements: List[Optional[str]]) -> List[str]:
|
3342
3334
|
return ['None' if element is None else element for element in elements]
|
@@ -3510,28 +3502,59 @@ def die_for_debug_reasons() -> None:
|
|
3510
3502
|
except ValueError:
|
3511
3503
|
print_red(f"Invalid value for DIE_AFTER_THIS_NR_OF_DONE_JOBS: '{max_done_str}', cannot be converted to int")
|
3512
3504
|
|
3505
|
+
@beartype
|
3506
|
+
def _evaluate_preprocess_parameters(parameters: dict) -> dict:
|
3507
|
+
return {
|
3508
|
+
k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
|
3509
|
+
for k, v in parameters.items()
|
3510
|
+
}
|
3511
|
+
|
3512
|
+
@beartype
|
3513
|
+
def _evaluate_create_signal_map() -> Dict[str, type[BaseException]]:
|
3514
|
+
return {
|
3515
|
+
"USR1-signal": SignalUSR,
|
3516
|
+
"CONT-signal": SignalCONT,
|
3517
|
+
"INT-signal": SignalINT
|
3518
|
+
}
|
3519
|
+
|
3520
|
+
@beartype
|
3521
|
+
def _evaluate_handle_result(
|
3522
|
+
stdout: str,
|
3523
|
+
result: Union[int, float, dict, list],
|
3524
|
+
parameters: dict
|
3525
|
+
) -> Dict[str, Optional[Union[float, Tuple]]]:
|
3526
|
+
final_result: Dict[str, Optional[Union[float, Tuple]]] = {}
|
3527
|
+
|
3528
|
+
if isinstance(result, (int, float)):
|
3529
|
+
for name in arg_result_names:
|
3530
|
+
final_result[name] = attach_sem_to_result(stdout, name, float(result))
|
3531
|
+
|
3532
|
+
elif isinstance(result, list):
|
3533
|
+
float_values = [float(r) for r in result]
|
3534
|
+
for name in arg_result_names:
|
3535
|
+
final_result[name] = attach_sem_to_result(stdout, name, float_values)
|
3536
|
+
|
3537
|
+
elif isinstance(result, dict):
|
3538
|
+
for name in arg_result_names:
|
3539
|
+
final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
|
3540
|
+
|
3541
|
+
else:
|
3542
|
+
write_failed_logs(parameters, "No Result")
|
3543
|
+
|
3544
|
+
return final_result
|
3545
|
+
|
3513
3546
|
@beartype
|
3514
3547
|
def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]]:
|
3515
3548
|
start_nvidia_smi_thread()
|
3516
|
-
|
3517
3549
|
return_in_case_of_error: dict = get_return_in_case_of_errors()
|
3518
3550
|
|
3519
3551
|
_test_gpu = test_gpu_before_evaluate(return_in_case_of_error)
|
3520
|
-
final_result = return_in_case_of_error
|
3552
|
+
final_result: Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]] = return_in_case_of_error
|
3521
3553
|
|
3522
3554
|
if _test_gpu is None:
|
3523
|
-
parameters =
|
3524
|
-
k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
|
3525
|
-
for k, v in parameters.items()
|
3526
|
-
}
|
3527
|
-
|
3555
|
+
parameters = _evaluate_preprocess_parameters(parameters)
|
3528
3556
|
ignore_signals()
|
3529
|
-
|
3530
|
-
signal_messages = {
|
3531
|
-
"USR1-signal": SignalUSR,
|
3532
|
-
"CONT-signal": SignalCONT,
|
3533
|
-
"INT-signal": SignalINT
|
3534
|
-
}
|
3557
|
+
signal_messages = _evaluate_create_signal_map()
|
3535
3558
|
|
3536
3559
|
try:
|
3537
3560
|
if args.raise_in_eval:
|
@@ -3552,23 +3575,7 @@ def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[
|
|
3552
3575
|
|
3553
3576
|
result = get_results_with_occ(stdout)
|
3554
3577
|
|
3555
|
-
final_result =
|
3556
|
-
|
3557
|
-
if isinstance(result, (int, float)):
|
3558
|
-
for name in arg_result_names:
|
3559
|
-
final_result[name] = attach_sem_to_result(stdout, name, float(result))
|
3560
|
-
|
3561
|
-
elif isinstance(result, list):
|
3562
|
-
float_values = [float(r) for r in result]
|
3563
|
-
for name in arg_result_names:
|
3564
|
-
final_result[name] = attach_sem_to_result(stdout, name, float_values)
|
3565
|
-
|
3566
|
-
elif isinstance(result, dict):
|
3567
|
-
for name in arg_result_names:
|
3568
|
-
final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
|
3569
|
-
|
3570
|
-
else:
|
3571
|
-
write_failed_logs(parameters, "No Result")
|
3578
|
+
final_result = _evaluate_handle_result(stdout, result, parameters)
|
3572
3579
|
|
3573
3580
|
_evaluate_print_stuff(
|
3574
3581
|
parameters,
|
@@ -5616,7 +5623,6 @@ def parse_csv(csv_path: str) -> Tuple[List, List]:
|
|
5616
5623
|
|
5617
5624
|
return arm_params_list, results_list
|
5618
5625
|
|
5619
|
-
|
5620
5626
|
@beartype
|
5621
5627
|
def get_generation_node_for_index(
|
5622
5628
|
this_csv_file_path: str,
|
@@ -5639,7 +5645,6 @@ def get_generation_node_for_index(
|
|
5639
5645
|
print(f"Error while get_generation_node_for_index: {e}")
|
5640
5646
|
return "MANUAL"
|
5641
5647
|
|
5642
|
-
|
5643
5648
|
@beartype
|
5644
5649
|
def _get_generation_node_for_index_index_valid(
|
5645
5650
|
index: int,
|
@@ -5648,7 +5653,6 @@ def _get_generation_node_for_index_index_valid(
|
|
5648
5653
|
) -> bool:
|
5649
5654
|
return 0 <= index < len(arm_params_list) and index < len(results_list)
|
5650
5655
|
|
5651
|
-
|
5652
5656
|
@beartype
|
5653
5657
|
def _get_generation_node_for_index_combine_dicts(
|
5654
5658
|
dict1: Dict[str, Any],
|
@@ -5659,7 +5663,6 @@ def _get_generation_node_for_index_combine_dicts(
|
|
5659
5663
|
combined.update(dict2)
|
5660
5664
|
return combined
|
5661
5665
|
|
5662
|
-
|
5663
5666
|
@beartype
|
5664
5667
|
def _get_generation_node_for_index_find_generation_node(
|
5665
5668
|
csv_file_path: str,
|
@@ -5676,7 +5679,6 @@ def _get_generation_node_for_index_find_generation_node(
|
|
5676
5679
|
|
5677
5680
|
return "MANUAL"
|
5678
5681
|
|
5679
|
-
|
5680
5682
|
@beartype
|
5681
5683
|
def _get_generation_node_for_index_row_matches(
|
5682
5684
|
row: Dict[str, str],
|
@@ -5696,7 +5698,6 @@ def _get_generation_node_for_index_row_matches(
|
|
5696
5698
|
|
5697
5699
|
return True
|
5698
5700
|
|
5699
|
-
|
5700
5701
|
@beartype
|
5701
5702
|
def _get_generation_node_for_index_floats_match(
|
5702
5703
|
val: float,
|
@@ -6406,7 +6407,6 @@ def check_orchestrator(stdout_path: str, trial_index: int) -> Optional[List[str]
|
|
6406
6407
|
|
6407
6408
|
return _check_orchestrator_find_behaviors(stdout, orchestrator["errors"])
|
6408
6409
|
|
6409
|
-
|
6410
6410
|
@beartype
|
6411
6411
|
def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index: int) -> Optional[str]:
|
6412
6412
|
try:
|
@@ -6423,7 +6423,6 @@ def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index:
|
|
6423
6423
|
_check_orchestrator_register_missing_file(stdout_path, trial_index)
|
6424
6424
|
return None
|
6425
6425
|
|
6426
|
-
|
6427
6426
|
@beartype
|
6428
6427
|
def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int) -> None:
|
6429
6428
|
if stdout_path not in ORCHESTRATE_TODO:
|
@@ -6432,7 +6431,6 @@ def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int
|
|
6432
6431
|
else:
|
6433
6432
|
print_red(f"File not found: {stdout_path}, not trying again")
|
6434
6433
|
|
6435
|
-
|
6436
6434
|
@beartype
|
6437
6435
|
def _check_orchestrator_find_behaviors(stdout: str, errors: List[Dict[str, Any]]) -> List[str]:
|
6438
6436
|
behaviors: List[str] = []
|
@@ -7217,64 +7215,82 @@ def generate_time_table_rich() -> None:
|
|
7217
7215
|
save_table_as_text(table, filepath)
|
7218
7216
|
|
7219
7217
|
@beartype
|
7220
|
-
def
|
7221
|
-
if not
|
7222
|
-
print_debug("
|
7223
|
-
return
|
7218
|
+
def validate_job_submit_data(durations: List[float], job_counts: List[int]) -> bool:
|
7219
|
+
if not durations or not job_counts:
|
7220
|
+
print_debug("No durations or job counts to display.")
|
7221
|
+
return False
|
7224
7222
|
|
7225
|
-
if len(
|
7226
|
-
print_debug("
|
7227
|
-
return
|
7223
|
+
if len(durations) != len(job_counts):
|
7224
|
+
print_debug("Length mismatch between durations and job counts.")
|
7225
|
+
return False
|
7228
7226
|
|
7229
|
-
|
7230
|
-
print_debug("generate_job_submit_table_rich: Length mismatch between durations and job counts.")
|
7231
|
-
return
|
7227
|
+
return True
|
7232
7228
|
|
7233
|
-
|
7234
|
-
|
7235
|
-
|
7236
|
-
|
7237
|
-
|
7238
|
-
|
7239
|
-
|
7240
|
-
|
7241
|
-
|
7242
|
-
|
7229
|
+
@beartype
|
7230
|
+
def convert_durations_to_float(raw_durations: List) -> List[float] | None:
|
7231
|
+
try:
|
7232
|
+
return [float(val) for val in raw_durations]
|
7233
|
+
except (ValueError, TypeError) as e:
|
7234
|
+
print_debug(f"Invalid float in durations: {e}")
|
7235
|
+
return None
|
7236
|
+
|
7237
|
+
@beartype
|
7238
|
+
def convert_job_counts_to_int(raw_counts: List) -> List[int] | None:
|
7239
|
+
try:
|
7240
|
+
return [int(val) for val in raw_counts]
|
7241
|
+
except (ValueError, TypeError) as e:
|
7242
|
+
print_debug(f"Invalid int in job counts: {e}")
|
7243
|
+
return None
|
7243
7244
|
|
7245
|
+
@beartype
|
7246
|
+
def build_job_submission_table(durations: List[float], job_counts: List[int]) -> Table:
|
7244
7247
|
table = Table(show_header=True, header_style="bold", title="Job submission durations")
|
7245
7248
|
table.add_column("Batch", justify="right")
|
7246
7249
|
table.add_column("Seconds", justify="right")
|
7247
7250
|
table.add_column("Jobs", justify="right")
|
7248
7251
|
table.add_column("Time per job", justify="right")
|
7249
7252
|
|
7250
|
-
for idx, (
|
7251
|
-
|
7252
|
-
|
7253
|
-
table.add_row(str(idx), f"{time_val_float:.3f}", str(jobs), f"{time_per_job:.3f}")
|
7254
|
-
|
7255
|
-
times_float = [float(t) for t in job_submit_durations]
|
7256
|
-
avg_time = mean(times_float)
|
7257
|
-
median_time = median(times_float)
|
7258
|
-
total_time = sum(times_float)
|
7259
|
-
max_time = max(times_float)
|
7260
|
-
min_time = min(times_float)
|
7253
|
+
for idx, (duration, jobs) in enumerate(zip(durations, job_counts), start=1):
|
7254
|
+
time_per_job = duration / jobs if jobs > 0 else 0
|
7255
|
+
table.add_row(str(idx), f"{duration:.3f}", str(jobs), f"{time_per_job:.3f}")
|
7261
7256
|
|
7262
7257
|
table.add_section()
|
7258
|
+
table.add_row("Average", f"{mean(durations):.3f}", "", "")
|
7259
|
+
table.add_row("Median", f"{median(durations):.3f}", "", "")
|
7260
|
+
table.add_row("Total", f"{sum(durations):.3f}", "", "")
|
7261
|
+
table.add_row("Max", f"{max(durations):.3f}", "", "")
|
7262
|
+
table.add_row("Min", f"{min(durations):.3f}", "", "")
|
7263
7263
|
|
7264
|
-
table
|
7265
|
-
table.add_row("Median", f"{median_time:.3f}", "", "")
|
7266
|
-
table.add_row("Total", f"{total_time:.3f}", "", "")
|
7267
|
-
table.add_row("Max", f"{max_time:.3f}", "", "")
|
7268
|
-
table.add_row("Min", f"{min_time:.3f}", "", "")
|
7269
|
-
|
7270
|
-
if args.show_generate_time_table:
|
7271
|
-
console.print(table)
|
7264
|
+
return table
|
7272
7265
|
|
7266
|
+
@beartype
|
7267
|
+
def export_table_to_file(table: Table, filename: str) -> None:
|
7273
7268
|
folder = get_current_run_folder()
|
7274
|
-
filename = "job_submit_durations.txt"
|
7275
7269
|
filepath = os.path.join(folder, filename)
|
7276
7270
|
save_table_as_text(table, filepath)
|
7277
7271
|
|
7272
|
+
@beartype
|
7273
|
+
def generate_job_submit_table_rich() -> None:
|
7274
|
+
if not isinstance(job_submit_durations, list) or not isinstance(job_submit_nrs, list):
|
7275
|
+
print_debug("job_submit_durations or job_submit_nrs is not a list.")
|
7276
|
+
return
|
7277
|
+
|
7278
|
+
durations = convert_durations_to_float(job_submit_durations)
|
7279
|
+
job_counts = convert_job_counts_to_int(job_submit_nrs)
|
7280
|
+
|
7281
|
+
if durations is None or job_counts is None:
|
7282
|
+
return
|
7283
|
+
|
7284
|
+
if not validate_job_submit_data(durations, job_counts):
|
7285
|
+
return
|
7286
|
+
|
7287
|
+
table = build_job_submission_table(durations, job_counts)
|
7288
|
+
|
7289
|
+
if args.show_generate_time_table:
|
7290
|
+
console.print(table)
|
7291
|
+
|
7292
|
+
export_table_to_file(table, "job_submit_durations.txt")
|
7293
|
+
|
7278
7294
|
@beartype
|
7279
7295
|
def plot_times_for_creation_and_submission() -> None:
|
7280
7296
|
if not args.show_generation_and_submission_sixel:
|
@@ -7905,49 +7921,71 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
7905
7921
|
print_debug(f"Warning: create_and_execute_next_runs(next_nr_steps: {next_nr_steps}, phase: {phase}, _max_eval: {_max_eval}, progress_bar)")
|
7906
7922
|
return 0
|
7907
7923
|
|
7908
|
-
trial_index_to_param = None
|
7909
|
-
done_optimizing = False
|
7924
|
+
trial_index_to_param: Optional[Dict] = None
|
7925
|
+
done_optimizing: bool = False
|
7926
|
+
results: List = []
|
7910
7927
|
|
7911
7928
|
try:
|
7912
|
-
|
7913
|
-
|
7929
|
+
done_optimizing, trial_index_to_param, results = _create_and_execute_next_runs_run_loop(next_nr_steps, _max_eval, phase, _progress_bar)
|
7930
|
+
_create_and_execute_next_runs_finish(done_optimizing)
|
7931
|
+
except Exception as e:
|
7932
|
+
stacktrace = traceback.format_exc()
|
7933
|
+
print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
|
7934
|
+
return handle_exceptions_create_and_execute_next_runs(e)
|
7935
|
+
|
7936
|
+
return _create_and_execute_next_runs_return_value(trial_index_to_param)
|
7914
7937
|
|
7915
|
-
|
7938
|
+
@beartype
|
7939
|
+
def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Optional[int], phase: Optional[str], _progress_bar: Any
|
7940
|
+
) -> Tuple[bool, Optional[Dict], List]:
|
7941
|
+
done_optimizing = False
|
7942
|
+
trial_index_to_param: Optional[Dict] = None
|
7943
|
+
results: List = []
|
7916
7944
|
|
7917
|
-
|
7918
|
-
get_next_trials_nr = 1
|
7945
|
+
nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
|
7919
7946
|
|
7920
|
-
|
7921
|
-
|
7922
|
-
|
7947
|
+
# Sicherstellen, dass _max_eval nicht None ist
|
7948
|
+
__max_eval = _max_eval if _max_eval is not None else 0
|
7949
|
+
new_nr_of_jobs_to_get = min(__max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
|
7923
7950
|
|
7924
|
-
|
7925
|
-
|
7926
|
-
|
7927
|
-
|
7928
|
-
|
7929
|
-
|
7930
|
-
|
7951
|
+
range_nr = new_nr_of_jobs_to_get
|
7952
|
+
get_next_trials_nr = 1
|
7953
|
+
|
7954
|
+
if getattr(args, "generate_all_jobs_at_once", False):
|
7955
|
+
range_nr = 1
|
7956
|
+
get_next_trials_nr = new_nr_of_jobs_to_get
|
7957
|
+
|
7958
|
+
for _ in range(range_nr):
|
7959
|
+
trial_index_to_param, optimization_complete = _get_next_trials(get_next_trials_nr)
|
7960
|
+
done_optimizing = handle_optimization_completion(optimization_complete)
|
7961
|
+
if done_optimizing:
|
7962
|
+
continue
|
7963
|
+
|
7964
|
+
if trial_index_to_param:
|
7965
|
+
nr_jobs_before_removing_abandoned = len(list(trial_index_to_param.keys()))
|
7931
7966
|
|
7932
|
-
|
7967
|
+
filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
|
7933
7968
|
|
7934
|
-
|
7935
|
-
|
7969
|
+
if len(filtered_trial_index_to_param):
|
7970
|
+
results.extend(execute_trials(filtered_trial_index_to_param, next_nr_steps, phase, _max_eval, _progress_bar))
|
7971
|
+
else:
|
7972
|
+
if nr_jobs_before_removing_abandoned > 0:
|
7973
|
+
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
7936
7974
|
else:
|
7937
|
-
|
7938
|
-
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
7939
|
-
else:
|
7940
|
-
print_debug("Could not generate any jobs")
|
7975
|
+
print_debug("Could not generate any jobs")
|
7941
7976
|
|
7942
|
-
|
7977
|
+
trial_index_to_param = filtered_trial_index_to_param
|
7943
7978
|
|
7944
|
-
|
7945
|
-
end_program(False, 0)
|
7946
|
-
except Exception as e:
|
7947
|
-
stacktrace = traceback.format_exc()
|
7948
|
-
print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
|
7949
|
-
return handle_exceptions_create_and_execute_next_runs(e)
|
7979
|
+
return done_optimizing, trial_index_to_param, results
|
7950
7980
|
|
7981
|
+
@beartype
|
7982
|
+
def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
|
7983
|
+
finish_previous_jobs(["finishing jobs"])
|
7984
|
+
if done_optimizing:
|
7985
|
+
end_program(False, 0)
|
7986
|
+
|
7987
|
+
@beartype
|
7988
|
+
def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
|
7951
7989
|
try:
|
7952
7990
|
if trial_index_to_param:
|
7953
7991
|
res = len(trial_index_to_param.keys())
|
@@ -8440,6 +8478,41 @@ def plot_pareto_frontier_sixel(data: Any, x_metric: str, y_metric: str) -> None:
|
|
8440
8478
|
|
8441
8479
|
plt.close(fig)
|
8442
8480
|
|
8481
|
+
@beartype
|
8482
|
+
def _pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
|
8483
|
+
if x.shape != y.shape:
|
8484
|
+
raise ValueError("Input arrays x and y must have the same shape.")
|
8485
|
+
|
8486
|
+
@beartype
|
8487
|
+
def _pareto_front_general_compare(
|
8488
|
+
xi: float, yi: float, xj: float, yj: float,
|
8489
|
+
x_minimize: bool, y_minimize: bool
|
8490
|
+
) -> bool:
|
8491
|
+
x_better_eq = xj <= xi if x_minimize else xj >= xi
|
8492
|
+
y_better_eq = yj <= yi if y_minimize else yj >= yi
|
8493
|
+
x_strictly_better = xj < xi if x_minimize else xj > xi
|
8494
|
+
y_strictly_better = yj < yi if y_minimize else yj > yi
|
8495
|
+
|
8496
|
+
return x_better_eq and y_better_eq and (x_strictly_better or y_strictly_better)
|
8497
|
+
|
8498
|
+
@beartype
|
8499
|
+
def _pareto_front_general_find_dominated(
|
8500
|
+
x: np.ndarray, y: np.ndarray, x_minimize: bool, y_minimize: bool
|
8501
|
+
) -> np.ndarray:
|
8502
|
+
num_points = len(x)
|
8503
|
+
is_dominated = np.zeros(num_points, dtype=bool)
|
8504
|
+
|
8505
|
+
for i in range(num_points):
|
8506
|
+
for j in range(num_points):
|
8507
|
+
if i == j:
|
8508
|
+
continue
|
8509
|
+
|
8510
|
+
if _pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
|
8511
|
+
is_dominated[i] = True
|
8512
|
+
break
|
8513
|
+
|
8514
|
+
return is_dominated
|
8515
|
+
|
8443
8516
|
@beartype
|
8444
8517
|
def pareto_front_general(
|
8445
8518
|
x: np.ndarray,
|
@@ -8448,28 +8521,9 @@ def pareto_front_general(
|
|
8448
8521
|
y_minimize: bool = True
|
8449
8522
|
) -> np.ndarray:
|
8450
8523
|
try:
|
8451
|
-
|
8452
|
-
|
8453
|
-
|
8454
|
-
num_points = len(x)
|
8455
|
-
is_dominated = np.zeros(num_points, dtype=bool)
|
8456
|
-
|
8457
|
-
for i in range(num_points):
|
8458
|
-
for j in range(num_points):
|
8459
|
-
if i == j:
|
8460
|
-
continue
|
8461
|
-
|
8462
|
-
x_better_or_equal = x[j] <= x[i] if x_minimize else x[j] >= x[i]
|
8463
|
-
y_better_or_equal = y[j] <= y[i] if y_minimize else y[j] >= y[i]
|
8464
|
-
x_strictly_better = x[j] < x[i] if x_minimize else x[j] > x[i]
|
8465
|
-
y_strictly_better = y[j] < y[i] if y_minimize else y[j] > y[i]
|
8466
|
-
|
8467
|
-
if x_better_or_equal and y_better_or_equal and (x_strictly_better or y_strictly_better):
|
8468
|
-
is_dominated[i] = True
|
8469
|
-
break
|
8470
|
-
|
8524
|
+
_pareto_front_general_validate_shapes(x, y)
|
8525
|
+
is_dominated = _pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
|
8471
8526
|
return np.where(~is_dominated)[0]
|
8472
|
-
|
8473
8527
|
except Exception as e:
|
8474
8528
|
print("Error in pareto_front_general:", str(e))
|
8475
8529
|
return np.array([], dtype=int)
|
@@ -3248,9 +3248,7 @@ def _write_job_infos_csv_main(parameters: dict, stdout: Optional[str], program_s
|
|
3248
3248
|
headline = _write_job_infos_csv_build_headline(parameters_keys, extra_vars_names)
|
3249
3249
|
result_values = _write_job_infos_csv_result_to_strlist(result)
|
3250
3250
|
|
3251
|
-
values = _write_job_infos_csv_build_values(start_time, end_time, run_time, program_string_with_params,
|
3252
|
-
str_parameters_values, result_values, exit_code, _signal,
|
3253
|
-
extra_vars_values)
|
3251
|
+
values = _write_job_infos_csv_build_values(start_time, end_time, run_time, program_string_with_params, str_parameters_values, result_values, exit_code, _signal, extra_vars_values)
|
3254
3252
|
|
3255
3253
|
headline = _write_job_infos_csv_replace_none_with_str(headline)
|
3256
3254
|
values = _write_job_infos_csv_replace_none_with_str(values)
|
@@ -3268,13 +3266,11 @@ def _write_job_infos_csv_main(parameters: dict, stdout: Optional[str], program_s
|
|
3268
3266
|
def _write_job_infos_csv_parameters_to_str(parameters: dict) -> List[str]:
|
3269
3267
|
return [str(v) for v in list(parameters.values())]
|
3270
3268
|
|
3271
|
-
|
3272
3269
|
@beartype
|
3273
3270
|
def _write_job_infos_csv_extract_extra_vars(stdout: Optional[str]) -> Tuple[List[str], List[str]]:
|
3274
3271
|
# extract_info ist hier eine vorhandene Funktion, die extra Variablen aus stdout extrahiert
|
3275
3272
|
return extract_info(stdout)
|
3276
3273
|
|
3277
|
-
|
3278
3274
|
@beartype
|
3279
3275
|
def _write_job_infos_csv_add_slurm_job_id(extra_vars_names: List[str], extra_vars_values: List[str]) -> Tuple[List[str], List[str]]:
|
3280
3276
|
_SLURM_JOB_ID = os.getenv('SLURM_JOB_ID')
|
@@ -3283,7 +3279,6 @@ def _write_job_infos_csv_add_slurm_job_id(extra_vars_names: List[str], extra_var
|
|
3283
3279
|
extra_vars_values.append(str(_SLURM_JOB_ID))
|
3284
3280
|
return extra_vars_names, extra_vars_values
|
3285
3281
|
|
3286
|
-
|
3287
3282
|
@beartype
|
3288
3283
|
def _write_job_infos_csv_build_headline(parameters_keys: List[str], extra_vars_names: List[str]) -> List[str]:
|
3289
3284
|
return [
|
@@ -3299,7 +3294,6 @@ def _write_job_infos_csv_build_headline(parameters_keys: List[str], extra_vars_n
|
|
3299
3294
|
*extra_vars_names
|
3300
3295
|
]
|
3301
3296
|
|
3302
|
-
|
3303
3297
|
@beartype
|
3304
3298
|
def _write_job_infos_csv_result_to_strlist(result: Optional[Union[Dict[str, Optional[float]], List[float], int, float]]) -> List[str]:
|
3305
3299
|
result_values: List[str] = []
|
@@ -3317,7 +3311,6 @@ def _write_job_infos_csv_result_to_strlist(result: Optional[Union[Dict[str, Opti
|
|
3317
3311
|
|
3318
3312
|
return result_values
|
3319
3313
|
|
3320
|
-
|
3321
3314
|
@beartype
|
3322
3315
|
def _write_job_infos_csv_build_values(start_time: Union[int, float], end_time: Union[int, float], run_time: Union[float, int],
|
3323
3316
|
program_string_with_params: str, str_parameters_values: List[str],
|
@@ -3336,7 +3329,6 @@ def _write_job_infos_csv_build_values(start_time: Union[int, float], end_time: U
|
|
3336
3329
|
*extra_vars_values
|
3337
3330
|
]
|
3338
3331
|
|
3339
|
-
|
3340
3332
|
@beartype
|
3341
3333
|
def _write_job_infos_csv_replace_none_with_str(elements: List[Optional[str]]) -> List[str]:
|
3342
3334
|
return ['None' if element is None else element for element in elements]
|
@@ -3510,28 +3502,59 @@ def die_for_debug_reasons() -> None:
|
|
3510
3502
|
except ValueError:
|
3511
3503
|
print_red(f"Invalid value for DIE_AFTER_THIS_NR_OF_DONE_JOBS: '{max_done_str}', cannot be converted to int")
|
3512
3504
|
|
3505
|
+
@beartype
|
3506
|
+
def _evaluate_preprocess_parameters(parameters: dict) -> dict:
|
3507
|
+
return {
|
3508
|
+
k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
|
3509
|
+
for k, v in parameters.items()
|
3510
|
+
}
|
3511
|
+
|
3512
|
+
@beartype
|
3513
|
+
def _evaluate_create_signal_map() -> Dict[str, type[BaseException]]:
|
3514
|
+
return {
|
3515
|
+
"USR1-signal": SignalUSR,
|
3516
|
+
"CONT-signal": SignalCONT,
|
3517
|
+
"INT-signal": SignalINT
|
3518
|
+
}
|
3519
|
+
|
3520
|
+
@beartype
|
3521
|
+
def _evaluate_handle_result(
|
3522
|
+
stdout: str,
|
3523
|
+
result: Union[int, float, dict, list],
|
3524
|
+
parameters: dict
|
3525
|
+
) -> Dict[str, Optional[Union[float, Tuple]]]:
|
3526
|
+
final_result: Dict[str, Optional[Union[float, Tuple]]] = {}
|
3527
|
+
|
3528
|
+
if isinstance(result, (int, float)):
|
3529
|
+
for name in arg_result_names:
|
3530
|
+
final_result[name] = attach_sem_to_result(stdout, name, float(result))
|
3531
|
+
|
3532
|
+
elif isinstance(result, list):
|
3533
|
+
float_values = [float(r) for r in result]
|
3534
|
+
for name in arg_result_names:
|
3535
|
+
final_result[name] = attach_sem_to_result(stdout, name, float_values)
|
3536
|
+
|
3537
|
+
elif isinstance(result, dict):
|
3538
|
+
for name in arg_result_names:
|
3539
|
+
final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
|
3540
|
+
|
3541
|
+
else:
|
3542
|
+
write_failed_logs(parameters, "No Result")
|
3543
|
+
|
3544
|
+
return final_result
|
3545
|
+
|
3513
3546
|
@beartype
|
3514
3547
|
def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]]:
|
3515
3548
|
start_nvidia_smi_thread()
|
3516
|
-
|
3517
3549
|
return_in_case_of_error: dict = get_return_in_case_of_errors()
|
3518
3550
|
|
3519
3551
|
_test_gpu = test_gpu_before_evaluate(return_in_case_of_error)
|
3520
|
-
final_result = return_in_case_of_error
|
3552
|
+
final_result: Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]] = return_in_case_of_error
|
3521
3553
|
|
3522
3554
|
if _test_gpu is None:
|
3523
|
-
parameters =
|
3524
|
-
k: (int(float(v)) if isinstance(v, (int, float, str)) and re.fullmatch(r'^\d+(\.0+)?$', str(v)) else v)
|
3525
|
-
for k, v in parameters.items()
|
3526
|
-
}
|
3527
|
-
|
3555
|
+
parameters = _evaluate_preprocess_parameters(parameters)
|
3528
3556
|
ignore_signals()
|
3529
|
-
|
3530
|
-
signal_messages = {
|
3531
|
-
"USR1-signal": SignalUSR,
|
3532
|
-
"CONT-signal": SignalCONT,
|
3533
|
-
"INT-signal": SignalINT
|
3534
|
-
}
|
3557
|
+
signal_messages = _evaluate_create_signal_map()
|
3535
3558
|
|
3536
3559
|
try:
|
3537
3560
|
if args.raise_in_eval:
|
@@ -3552,23 +3575,7 @@ def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[
|
|
3552
3575
|
|
3553
3576
|
result = get_results_with_occ(stdout)
|
3554
3577
|
|
3555
|
-
final_result =
|
3556
|
-
|
3557
|
-
if isinstance(result, (int, float)):
|
3558
|
-
for name in arg_result_names:
|
3559
|
-
final_result[name] = attach_sem_to_result(stdout, name, float(result))
|
3560
|
-
|
3561
|
-
elif isinstance(result, list):
|
3562
|
-
float_values = [float(r) for r in result]
|
3563
|
-
for name in arg_result_names:
|
3564
|
-
final_result[name] = attach_sem_to_result(stdout, name, float_values)
|
3565
|
-
|
3566
|
-
elif isinstance(result, dict):
|
3567
|
-
for name in arg_result_names:
|
3568
|
-
final_result[name] = attach_sem_to_result(stdout, name, result.get(name))
|
3569
|
-
|
3570
|
-
else:
|
3571
|
-
write_failed_logs(parameters, "No Result")
|
3578
|
+
final_result = _evaluate_handle_result(stdout, result, parameters)
|
3572
3579
|
|
3573
3580
|
_evaluate_print_stuff(
|
3574
3581
|
parameters,
|
@@ -5616,7 +5623,6 @@ def parse_csv(csv_path: str) -> Tuple[List, List]:
|
|
5616
5623
|
|
5617
5624
|
return arm_params_list, results_list
|
5618
5625
|
|
5619
|
-
|
5620
5626
|
@beartype
|
5621
5627
|
def get_generation_node_for_index(
|
5622
5628
|
this_csv_file_path: str,
|
@@ -5639,7 +5645,6 @@ def get_generation_node_for_index(
|
|
5639
5645
|
print(f"Error while get_generation_node_for_index: {e}")
|
5640
5646
|
return "MANUAL"
|
5641
5647
|
|
5642
|
-
|
5643
5648
|
@beartype
|
5644
5649
|
def _get_generation_node_for_index_index_valid(
|
5645
5650
|
index: int,
|
@@ -5648,7 +5653,6 @@ def _get_generation_node_for_index_index_valid(
|
|
5648
5653
|
) -> bool:
|
5649
5654
|
return 0 <= index < len(arm_params_list) and index < len(results_list)
|
5650
5655
|
|
5651
|
-
|
5652
5656
|
@beartype
|
5653
5657
|
def _get_generation_node_for_index_combine_dicts(
|
5654
5658
|
dict1: Dict[str, Any],
|
@@ -5659,7 +5663,6 @@ def _get_generation_node_for_index_combine_dicts(
|
|
5659
5663
|
combined.update(dict2)
|
5660
5664
|
return combined
|
5661
5665
|
|
5662
|
-
|
5663
5666
|
@beartype
|
5664
5667
|
def _get_generation_node_for_index_find_generation_node(
|
5665
5668
|
csv_file_path: str,
|
@@ -5676,7 +5679,6 @@ def _get_generation_node_for_index_find_generation_node(
|
|
5676
5679
|
|
5677
5680
|
return "MANUAL"
|
5678
5681
|
|
5679
|
-
|
5680
5682
|
@beartype
|
5681
5683
|
def _get_generation_node_for_index_row_matches(
|
5682
5684
|
row: Dict[str, str],
|
@@ -5696,7 +5698,6 @@ def _get_generation_node_for_index_row_matches(
|
|
5696
5698
|
|
5697
5699
|
return True
|
5698
5700
|
|
5699
|
-
|
5700
5701
|
@beartype
|
5701
5702
|
def _get_generation_node_for_index_floats_match(
|
5702
5703
|
val: float,
|
@@ -6406,7 +6407,6 @@ def check_orchestrator(stdout_path: str, trial_index: int) -> Optional[List[str]
|
|
6406
6407
|
|
6407
6408
|
return _check_orchestrator_find_behaviors(stdout, orchestrator["errors"])
|
6408
6409
|
|
6409
|
-
|
6410
6410
|
@beartype
|
6411
6411
|
def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index: int) -> Optional[str]:
|
6412
6412
|
try:
|
@@ -6423,7 +6423,6 @@ def _check_orchestrator_read_stdout_with_fallback(stdout_path: str, trial_index:
|
|
6423
6423
|
_check_orchestrator_register_missing_file(stdout_path, trial_index)
|
6424
6424
|
return None
|
6425
6425
|
|
6426
|
-
|
6427
6426
|
@beartype
|
6428
6427
|
def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int) -> None:
|
6429
6428
|
if stdout_path not in ORCHESTRATE_TODO:
|
@@ -6432,7 +6431,6 @@ def _check_orchestrator_register_missing_file(stdout_path: str, trial_index: int
|
|
6432
6431
|
else:
|
6433
6432
|
print_red(f"File not found: {stdout_path}, not trying again")
|
6434
6433
|
|
6435
|
-
|
6436
6434
|
@beartype
|
6437
6435
|
def _check_orchestrator_find_behaviors(stdout: str, errors: List[Dict[str, Any]]) -> List[str]:
|
6438
6436
|
behaviors: List[str] = []
|
@@ -7217,64 +7215,82 @@ def generate_time_table_rich() -> None:
|
|
7217
7215
|
save_table_as_text(table, filepath)
|
7218
7216
|
|
7219
7217
|
@beartype
|
7220
|
-
def
|
7221
|
-
if not
|
7222
|
-
print_debug("
|
7223
|
-
return
|
7218
|
+
def validate_job_submit_data(durations: List[float], job_counts: List[int]) -> bool:
|
7219
|
+
if not durations or not job_counts:
|
7220
|
+
print_debug("No durations or job counts to display.")
|
7221
|
+
return False
|
7224
7222
|
|
7225
|
-
if len(
|
7226
|
-
print_debug("
|
7227
|
-
return
|
7223
|
+
if len(durations) != len(job_counts):
|
7224
|
+
print_debug("Length mismatch between durations and job counts.")
|
7225
|
+
return False
|
7228
7226
|
|
7229
|
-
|
7230
|
-
print_debug("generate_job_submit_table_rich: Length mismatch between durations and job counts.")
|
7231
|
-
return
|
7227
|
+
return True
|
7232
7228
|
|
7233
|
-
|
7234
|
-
|
7235
|
-
|
7236
|
-
|
7237
|
-
|
7238
|
-
|
7239
|
-
|
7240
|
-
|
7241
|
-
|
7242
|
-
|
7229
|
+
@beartype
|
7230
|
+
def convert_durations_to_float(raw_durations: List) -> List[float] | None:
|
7231
|
+
try:
|
7232
|
+
return [float(val) for val in raw_durations]
|
7233
|
+
except (ValueError, TypeError) as e:
|
7234
|
+
print_debug(f"Invalid float in durations: {e}")
|
7235
|
+
return None
|
7236
|
+
|
7237
|
+
@beartype
|
7238
|
+
def convert_job_counts_to_int(raw_counts: List) -> List[int] | None:
|
7239
|
+
try:
|
7240
|
+
return [int(val) for val in raw_counts]
|
7241
|
+
except (ValueError, TypeError) as e:
|
7242
|
+
print_debug(f"Invalid int in job counts: {e}")
|
7243
|
+
return None
|
7243
7244
|
|
7245
|
+
@beartype
|
7246
|
+
def build_job_submission_table(durations: List[float], job_counts: List[int]) -> Table:
|
7244
7247
|
table = Table(show_header=True, header_style="bold", title="Job submission durations")
|
7245
7248
|
table.add_column("Batch", justify="right")
|
7246
7249
|
table.add_column("Seconds", justify="right")
|
7247
7250
|
table.add_column("Jobs", justify="right")
|
7248
7251
|
table.add_column("Time per job", justify="right")
|
7249
7252
|
|
7250
|
-
for idx, (
|
7251
|
-
|
7252
|
-
|
7253
|
-
table.add_row(str(idx), f"{time_val_float:.3f}", str(jobs), f"{time_per_job:.3f}")
|
7254
|
-
|
7255
|
-
times_float = [float(t) for t in job_submit_durations]
|
7256
|
-
avg_time = mean(times_float)
|
7257
|
-
median_time = median(times_float)
|
7258
|
-
total_time = sum(times_float)
|
7259
|
-
max_time = max(times_float)
|
7260
|
-
min_time = min(times_float)
|
7253
|
+
for idx, (duration, jobs) in enumerate(zip(durations, job_counts), start=1):
|
7254
|
+
time_per_job = duration / jobs if jobs > 0 else 0
|
7255
|
+
table.add_row(str(idx), f"{duration:.3f}", str(jobs), f"{time_per_job:.3f}")
|
7261
7256
|
|
7262
7257
|
table.add_section()
|
7258
|
+
table.add_row("Average", f"{mean(durations):.3f}", "", "")
|
7259
|
+
table.add_row("Median", f"{median(durations):.3f}", "", "")
|
7260
|
+
table.add_row("Total", f"{sum(durations):.3f}", "", "")
|
7261
|
+
table.add_row("Max", f"{max(durations):.3f}", "", "")
|
7262
|
+
table.add_row("Min", f"{min(durations):.3f}", "", "")
|
7263
7263
|
|
7264
|
-
table
|
7265
|
-
table.add_row("Median", f"{median_time:.3f}", "", "")
|
7266
|
-
table.add_row("Total", f"{total_time:.3f}", "", "")
|
7267
|
-
table.add_row("Max", f"{max_time:.3f}", "", "")
|
7268
|
-
table.add_row("Min", f"{min_time:.3f}", "", "")
|
7269
|
-
|
7270
|
-
if args.show_generate_time_table:
|
7271
|
-
console.print(table)
|
7264
|
+
return table
|
7272
7265
|
|
7266
|
+
@beartype
|
7267
|
+
def export_table_to_file(table: Table, filename: str) -> None:
|
7273
7268
|
folder = get_current_run_folder()
|
7274
|
-
filename = "job_submit_durations.txt"
|
7275
7269
|
filepath = os.path.join(folder, filename)
|
7276
7270
|
save_table_as_text(table, filepath)
|
7277
7271
|
|
7272
|
+
@beartype
|
7273
|
+
def generate_job_submit_table_rich() -> None:
|
7274
|
+
if not isinstance(job_submit_durations, list) or not isinstance(job_submit_nrs, list):
|
7275
|
+
print_debug("job_submit_durations or job_submit_nrs is not a list.")
|
7276
|
+
return
|
7277
|
+
|
7278
|
+
durations = convert_durations_to_float(job_submit_durations)
|
7279
|
+
job_counts = convert_job_counts_to_int(job_submit_nrs)
|
7280
|
+
|
7281
|
+
if durations is None or job_counts is None:
|
7282
|
+
return
|
7283
|
+
|
7284
|
+
if not validate_job_submit_data(durations, job_counts):
|
7285
|
+
return
|
7286
|
+
|
7287
|
+
table = build_job_submission_table(durations, job_counts)
|
7288
|
+
|
7289
|
+
if args.show_generate_time_table:
|
7290
|
+
console.print(table)
|
7291
|
+
|
7292
|
+
export_table_to_file(table, "job_submit_durations.txt")
|
7293
|
+
|
7278
7294
|
@beartype
|
7279
7295
|
def plot_times_for_creation_and_submission() -> None:
|
7280
7296
|
if not args.show_generation_and_submission_sixel:
|
@@ -7905,49 +7921,71 @@ def create_and_execute_next_runs(next_nr_steps: int, phase: Optional[str], _max_
|
|
7905
7921
|
print_debug(f"Warning: create_and_execute_next_runs(next_nr_steps: {next_nr_steps}, phase: {phase}, _max_eval: {_max_eval}, progress_bar)")
|
7906
7922
|
return 0
|
7907
7923
|
|
7908
|
-
trial_index_to_param = None
|
7909
|
-
done_optimizing = False
|
7924
|
+
trial_index_to_param: Optional[Dict] = None
|
7925
|
+
done_optimizing: bool = False
|
7926
|
+
results: List = []
|
7910
7927
|
|
7911
7928
|
try:
|
7912
|
-
|
7913
|
-
|
7929
|
+
done_optimizing, trial_index_to_param, results = _create_and_execute_next_runs_run_loop(next_nr_steps, _max_eval, phase, _progress_bar)
|
7930
|
+
_create_and_execute_next_runs_finish(done_optimizing)
|
7931
|
+
except Exception as e:
|
7932
|
+
stacktrace = traceback.format_exc()
|
7933
|
+
print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
|
7934
|
+
return handle_exceptions_create_and_execute_next_runs(e)
|
7935
|
+
|
7936
|
+
return _create_and_execute_next_runs_return_value(trial_index_to_param)
|
7914
7937
|
|
7915
|
-
|
7938
|
+
@beartype
|
7939
|
+
def _create_and_execute_next_runs_run_loop(next_nr_steps: int, _max_eval: Optional[int], phase: Optional[str], _progress_bar: Any
|
7940
|
+
) -> Tuple[bool, Optional[Dict], List]:
|
7941
|
+
done_optimizing = False
|
7942
|
+
trial_index_to_param: Optional[Dict] = None
|
7943
|
+
results: List = []
|
7916
7944
|
|
7917
|
-
|
7918
|
-
get_next_trials_nr = 1
|
7945
|
+
nr_of_jobs_to_get = _calculate_nr_of_jobs_to_get(get_nr_of_imported_jobs(), len(global_vars["jobs"]))
|
7919
7946
|
|
7920
|
-
|
7921
|
-
|
7922
|
-
|
7947
|
+
# Sicherstellen, dass _max_eval nicht None ist
|
7948
|
+
__max_eval = _max_eval if _max_eval is not None else 0
|
7949
|
+
new_nr_of_jobs_to_get = min(__max_eval - (submitted_jobs() - failed_jobs()), nr_of_jobs_to_get)
|
7923
7950
|
|
7924
|
-
|
7925
|
-
|
7926
|
-
|
7927
|
-
|
7928
|
-
|
7929
|
-
|
7930
|
-
|
7951
|
+
range_nr = new_nr_of_jobs_to_get
|
7952
|
+
get_next_trials_nr = 1
|
7953
|
+
|
7954
|
+
if getattr(args, "generate_all_jobs_at_once", False):
|
7955
|
+
range_nr = 1
|
7956
|
+
get_next_trials_nr = new_nr_of_jobs_to_get
|
7957
|
+
|
7958
|
+
for _ in range(range_nr):
|
7959
|
+
trial_index_to_param, optimization_complete = _get_next_trials(get_next_trials_nr)
|
7960
|
+
done_optimizing = handle_optimization_completion(optimization_complete)
|
7961
|
+
if done_optimizing:
|
7962
|
+
continue
|
7963
|
+
|
7964
|
+
if trial_index_to_param:
|
7965
|
+
nr_jobs_before_removing_abandoned = len(list(trial_index_to_param.keys()))
|
7931
7966
|
|
7932
|
-
|
7967
|
+
filtered_trial_index_to_param = {k: v for k, v in trial_index_to_param.items() if k not in abandoned_trial_indices}
|
7933
7968
|
|
7934
|
-
|
7935
|
-
|
7969
|
+
if len(filtered_trial_index_to_param):
|
7970
|
+
results.extend(execute_trials(filtered_trial_index_to_param, next_nr_steps, phase, _max_eval, _progress_bar))
|
7971
|
+
else:
|
7972
|
+
if nr_jobs_before_removing_abandoned > 0:
|
7973
|
+
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
7936
7974
|
else:
|
7937
|
-
|
7938
|
-
print_debug(f"Could not get jobs. They've been deleted by abandoned_trial_indices: {abandoned_trial_indices}")
|
7939
|
-
else:
|
7940
|
-
print_debug("Could not generate any jobs")
|
7975
|
+
print_debug("Could not generate any jobs")
|
7941
7976
|
|
7942
|
-
|
7977
|
+
trial_index_to_param = filtered_trial_index_to_param
|
7943
7978
|
|
7944
|
-
|
7945
|
-
end_program(False, 0)
|
7946
|
-
except Exception as e:
|
7947
|
-
stacktrace = traceback.format_exc()
|
7948
|
-
print_debug(f"Warning: create_and_execute_next_runs encountered an exception: {e}\n{stacktrace}")
|
7949
|
-
return handle_exceptions_create_and_execute_next_runs(e)
|
7979
|
+
return done_optimizing, trial_index_to_param, results
|
7950
7980
|
|
7981
|
+
@beartype
|
7982
|
+
def _create_and_execute_next_runs_finish(done_optimizing: bool) -> None:
|
7983
|
+
finish_previous_jobs(["finishing jobs"])
|
7984
|
+
if done_optimizing:
|
7985
|
+
end_program(False, 0)
|
7986
|
+
|
7987
|
+
@beartype
|
7988
|
+
def _create_and_execute_next_runs_return_value(trial_index_to_param: Optional[Dict]) -> int:
|
7951
7989
|
try:
|
7952
7990
|
if trial_index_to_param:
|
7953
7991
|
res = len(trial_index_to_param.keys())
|
@@ -8440,6 +8478,41 @@ def plot_pareto_frontier_sixel(data: Any, x_metric: str, y_metric: str) -> None:
|
|
8440
8478
|
|
8441
8479
|
plt.close(fig)
|
8442
8480
|
|
8481
|
+
@beartype
|
8482
|
+
def _pareto_front_general_validate_shapes(x: np.ndarray, y: np.ndarray) -> None:
|
8483
|
+
if x.shape != y.shape:
|
8484
|
+
raise ValueError("Input arrays x and y must have the same shape.")
|
8485
|
+
|
8486
|
+
@beartype
|
8487
|
+
def _pareto_front_general_compare(
|
8488
|
+
xi: float, yi: float, xj: float, yj: float,
|
8489
|
+
x_minimize: bool, y_minimize: bool
|
8490
|
+
) -> bool:
|
8491
|
+
x_better_eq = xj <= xi if x_minimize else xj >= xi
|
8492
|
+
y_better_eq = yj <= yi if y_minimize else yj >= yi
|
8493
|
+
x_strictly_better = xj < xi if x_minimize else xj > xi
|
8494
|
+
y_strictly_better = yj < yi if y_minimize else yj > yi
|
8495
|
+
|
8496
|
+
return x_better_eq and y_better_eq and (x_strictly_better or y_strictly_better)
|
8497
|
+
|
8498
|
+
@beartype
|
8499
|
+
def _pareto_front_general_find_dominated(
|
8500
|
+
x: np.ndarray, y: np.ndarray, x_minimize: bool, y_minimize: bool
|
8501
|
+
) -> np.ndarray:
|
8502
|
+
num_points = len(x)
|
8503
|
+
is_dominated = np.zeros(num_points, dtype=bool)
|
8504
|
+
|
8505
|
+
for i in range(num_points):
|
8506
|
+
for j in range(num_points):
|
8507
|
+
if i == j:
|
8508
|
+
continue
|
8509
|
+
|
8510
|
+
if _pareto_front_general_compare(x[i], y[i], x[j], y[j], x_minimize, y_minimize):
|
8511
|
+
is_dominated[i] = True
|
8512
|
+
break
|
8513
|
+
|
8514
|
+
return is_dominated
|
8515
|
+
|
8443
8516
|
@beartype
|
8444
8517
|
def pareto_front_general(
|
8445
8518
|
x: np.ndarray,
|
@@ -8448,28 +8521,9 @@ def pareto_front_general(
|
|
8448
8521
|
y_minimize: bool = True
|
8449
8522
|
) -> np.ndarray:
|
8450
8523
|
try:
|
8451
|
-
|
8452
|
-
|
8453
|
-
|
8454
|
-
num_points = len(x)
|
8455
|
-
is_dominated = np.zeros(num_points, dtype=bool)
|
8456
|
-
|
8457
|
-
for i in range(num_points):
|
8458
|
-
for j in range(num_points):
|
8459
|
-
if i == j:
|
8460
|
-
continue
|
8461
|
-
|
8462
|
-
x_better_or_equal = x[j] <= x[i] if x_minimize else x[j] >= x[i]
|
8463
|
-
y_better_or_equal = y[j] <= y[i] if y_minimize else y[j] >= y[i]
|
8464
|
-
x_strictly_better = x[j] < x[i] if x_minimize else x[j] > x[i]
|
8465
|
-
y_strictly_better = y[j] < y[i] if y_minimize else y[j] > y[i]
|
8466
|
-
|
8467
|
-
if x_better_or_equal and y_better_or_equal and (x_strictly_better or y_strictly_better):
|
8468
|
-
is_dominated[i] = True
|
8469
|
-
break
|
8470
|
-
|
8524
|
+
_pareto_front_general_validate_shapes(x, y)
|
8525
|
+
is_dominated = _pareto_front_general_find_dominated(x, y, x_minimize, y_minimize)
|
8471
8526
|
return np.where(~is_dominated)[0]
|
8472
|
-
|
8473
8527
|
except Exception as e:
|
8474
8528
|
print("Error in pareto_front_general:", str(e))
|
8475
8529
|
return np.array([], dtype=int)
|
@@ -3,7 +3,7 @@
|
|
3
3
|
.general.sh,sha256=uyGMN8xNToQ0v50KoiYxm6jRmgf0orroOaodM_Nuq30,2107
|
4
4
|
.gitignore,sha256=OMaFWOR6wxjAlI85rF3euQcjQFFAl1F34abZkltKnaU,3714
|
5
5
|
.helpers.py,sha256=srrRn0QBzG8JpNbHAYG2k1rQY6JYrUrNdTj-tDMJSyg,30526
|
6
|
-
.omniopt.py,sha256=
|
6
|
+
.omniopt.py,sha256=0vurXNI21uVLit0NtcllnYOabBVPNjJDTATB3ZnXKps,381086
|
7
7
|
.omniopt_plot_cpu_ram_usage.py,sha256=DbOAmdrbcZtsMnHJgHfeRngjtv6zX5J0axyua_dYezc,3932
|
8
8
|
.omniopt_plot_general.py,sha256=ZERZJkvVOoJhi7SszmTF1Iln-_08_0Aki48u3LHUW-k,6809
|
9
9
|
.omniopt_plot_gpu_usage.py,sha256=ojxVicwSoiyl7f3c-6lLuT2EpyPcSJKEcbp75LgDY2k,5107
|
@@ -26,44 +26,44 @@ omniopt_docker,sha256=LWVUeyvmA5AKqAHiH9jBUkR5uZ6AHMnSy0eET7mK6E4,3602
|
|
26
26
|
omniopt_evaluate,sha256=9oBh0_ikCuIz_aJQZrN0j39NDiIDYoSvEFmSVIoFjJE,23842
|
27
27
|
omniopt_plot,sha256=Z8ZR10p-ZRSgMeVPO-wVCJ8lk-LQtntjZ9Bk9RifCIs,13360
|
28
28
|
omniopt_share,sha256=7g5I7YdoWcA6_GDwWwq0xPf23qiVc_VDrm9ySLH7SH0,14051
|
29
|
-
pyproject.toml,sha256=
|
29
|
+
pyproject.toml,sha256=jyiW09_RWWu5MkE9xRBdr69Q7uAR5bU8lQGwYHpdW3Y,397
|
30
30
|
requirements.txt,sha256=QsRYgd43IXr8rN9m0CxufI9mEneBrDeKh4s8E_W2xwI,287
|
31
31
|
setup.cfg,sha256=HEc8uu6NpfxG5_AVh5SvXOpEFMNKPPPxgMIAH144vT4,38
|
32
32
|
test_requirements.txt,sha256=dnCbKmKalrVzNZ_-iQWf1xCxcnDsdGuhbDAr9XlGm-U,477
|
33
|
-
omniopt2-
|
34
|
-
omniopt2-
|
35
|
-
omniopt2-
|
36
|
-
omniopt2-
|
37
|
-
omniopt2-
|
38
|
-
omniopt2-
|
39
|
-
omniopt2-
|
40
|
-
omniopt2-
|
41
|
-
omniopt2-
|
42
|
-
omniopt2-
|
43
|
-
omniopt2-
|
44
|
-
omniopt2-
|
45
|
-
omniopt2-
|
46
|
-
omniopt2-
|
47
|
-
omniopt2-
|
48
|
-
omniopt2-
|
49
|
-
omniopt2-
|
50
|
-
omniopt2-
|
51
|
-
omniopt2-
|
52
|
-
omniopt2-
|
53
|
-
omniopt2-
|
54
|
-
omniopt2-
|
55
|
-
omniopt2-
|
56
|
-
omniopt2-
|
57
|
-
omniopt2-
|
58
|
-
omniopt2-
|
59
|
-
omniopt2-
|
60
|
-
omniopt2-
|
61
|
-
omniopt2.egg-info/PKG-INFO,sha256=
|
33
|
+
omniopt2-7107.data/data/bin/.colorfunctions.sh,sha256=CDlgjwrsrHR_E6c-Qak5wZlotArXm-nf9sVvXePzGZA,1083
|
34
|
+
omniopt2-7107.data/data/bin/.general.sh,sha256=uyGMN8xNToQ0v50KoiYxm6jRmgf0orroOaodM_Nuq30,2107
|
35
|
+
omniopt2-7107.data/data/bin/.helpers.py,sha256=srrRn0QBzG8JpNbHAYG2k1rQY6JYrUrNdTj-tDMJSyg,30526
|
36
|
+
omniopt2-7107.data/data/bin/.omniopt.py,sha256=0vurXNI21uVLit0NtcllnYOabBVPNjJDTATB3ZnXKps,381086
|
37
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_cpu_ram_usage.py,sha256=DbOAmdrbcZtsMnHJgHfeRngjtv6zX5J0axyua_dYezc,3932
|
38
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_general.py,sha256=ZERZJkvVOoJhi7SszmTF1Iln-_08_0Aki48u3LHUW-k,6809
|
39
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_gpu_usage.py,sha256=ojxVicwSoiyl7f3c-6lLuT2EpyPcSJKEcbp75LgDY2k,5107
|
40
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_kde.py,sha256=uRLWr72TDKvj3AqJ0O0AvkKZ1ok1O1QpXnbfQQdo0nA,6873
|
41
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_scatter.py,sha256=-amqmLR_YLCbHImLPh6gwVFv8iiVnXME544XHsRVCuw,8422
|
42
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_scatter_generation_method.py,sha256=rgKY_w1E516c9UucVaEvaKd8tCnoUq9xg-RrYSDzYEQ,4289
|
43
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_scatter_hex.py,sha256=w1L5gL6Bc_QudoSJi2lxEVvF17Apjjb3l2T-lXGnsUg,10279
|
44
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_time_and_exit_code.py,sha256=hC4RFDiJN_UImezFR6M5uVF-QKDqMDpq6R5DIg7dDDc,6463
|
45
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_trial_index_result.py,sha256=5DmqZAQO_PFmzdap-TIhSMAshRXpOHQacnHAtjwnzN4,4629
|
46
|
+
omniopt2-7107.data/data/bin/.omniopt_plot_worker.py,sha256=VuluQq4W6KRR5RU08dxmDSFk5mbfDRkRJQFwwcLgAGw,4524
|
47
|
+
omniopt2-7107.data/data/bin/.random_generator.py,sha256=ezBBUXpez_QaGdpCglMcJ0KZPdQP0XdX5gnLzO1xhwU,2987
|
48
|
+
omniopt2-7107.data/data/bin/.shellscript_functions,sha256=C2VT3Eafz9OtCzjWOMXSeZCUGVMRm8y3eGDL43Ay0qM,14656
|
49
|
+
omniopt2-7107.data/data/bin/.tpe.py,sha256=Yd9s-ixCbWxTd3x0O1M1sok9QfM2mBEfsDwXhx50-Nc,6464
|
50
|
+
omniopt2-7107.data/data/bin/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
51
|
+
omniopt2-7107.data/data/bin/apt-dependencies.txt,sha256=X5tBB8ZLW9XaFtRh8B7C2pIkSoxNNawqioDr0QZAtuM,149
|
52
|
+
omniopt2-7107.data/data/bin/omniopt,sha256=Xe8NP6NRoX4j2IPGRTY80eYfo5fYM-fJfj9_aVP-aR8,47963
|
53
|
+
omniopt2-7107.data/data/bin/omniopt_docker,sha256=LWVUeyvmA5AKqAHiH9jBUkR5uZ6AHMnSy0eET7mK6E4,3602
|
54
|
+
omniopt2-7107.data/data/bin/omniopt_evaluate,sha256=9oBh0_ikCuIz_aJQZrN0j39NDiIDYoSvEFmSVIoFjJE,23842
|
55
|
+
omniopt2-7107.data/data/bin/omniopt_plot,sha256=Z8ZR10p-ZRSgMeVPO-wVCJ8lk-LQtntjZ9Bk9RifCIs,13360
|
56
|
+
omniopt2-7107.data/data/bin/omniopt_share,sha256=7g5I7YdoWcA6_GDwWwq0xPf23qiVc_VDrm9ySLH7SH0,14051
|
57
|
+
omniopt2-7107.data/data/bin/requirements.txt,sha256=QsRYgd43IXr8rN9m0CxufI9mEneBrDeKh4s8E_W2xwI,287
|
58
|
+
omniopt2-7107.data/data/bin/setup.py,sha256=g3uEqJHXhggXwgLYoxOjsXg9Z6IV1ubh-Og59AZ264Q,4648
|
59
|
+
omniopt2-7107.data/data/bin/test_requirements.txt,sha256=dnCbKmKalrVzNZ_-iQWf1xCxcnDsdGuhbDAr9XlGm-U,477
|
60
|
+
omniopt2-7107.dist-info/licenses/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
|
61
|
+
omniopt2.egg-info/PKG-INFO,sha256=MQF55JMUX9KCYuwCTv-rgkrdcrct5U8EwlRl915dY7Q,5814
|
62
62
|
omniopt2.egg-info/SOURCES.txt,sha256=N-HtSaaqFRsd4XqAfeWVSp__3I-sw0d7cknJgyewRwQ,778
|
63
63
|
omniopt2.egg-info/dependency_links.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
64
64
|
omniopt2.egg-info/requires.txt,sha256=cZgCKMii2eXHaYbDhCwiLjxtFK6PW0aqFoJZ7xebsqM,764
|
65
65
|
omniopt2.egg-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
66
|
-
omniopt2-
|
67
|
-
omniopt2-
|
68
|
-
omniopt2-
|
69
|
-
omniopt2-
|
66
|
+
omniopt2-7107.dist-info/METADATA,sha256=MQF55JMUX9KCYuwCTv-rgkrdcrct5U8EwlRl915dY7Q,5814
|
67
|
+
omniopt2-7107.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
68
|
+
omniopt2-7107.dist-info/top_level.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
69
|
+
omniopt2-7107.dist-info/RECORD,,
|
omniopt2.egg-info/PKG-INFO
CHANGED
pyproject.toml
CHANGED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{omniopt2-7102.data → omniopt2-7107.data}/data/bin/.omniopt_plot_scatter_generation_method.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|