omniopt2 7201__py3-none-any.whl → 7238__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- .omniopt.py +410 -39
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt.py +410 -39
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/omniopt_share +0 -1
- {omniopt2-7201.dist-info → omniopt2-7238.dist-info}/METADATA +1 -1
- {omniopt2-7201.dist-info → omniopt2-7238.dist-info}/RECORD +36 -36
- omniopt2.egg-info/PKG-INFO +1 -1
- omniopt_share +0 -1
- pyproject.toml +1 -1
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.colorfunctions.sh +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.general.sh +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.helpers.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_cpu_ram_usage.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_general.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_gpu_usage.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_kde.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_scatter.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_scatter_generation_method.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_scatter_hex.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_time_and_exit_code.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_trial_index_result.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.omniopt_plot_worker.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.random_generator.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.shellscript_functions +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/.tpe.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/LICENSE +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/apt-dependencies.txt +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/omniopt +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/omniopt_docker +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/omniopt_evaluate +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/omniopt_plot +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/requirements.txt +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/setup.py +0 -0
- {omniopt2-7201.data → omniopt2-7238.data}/data/bin/test_requirements.txt +0 -0
- {omniopt2-7201.dist-info → omniopt2-7238.dist-info}/WHEEL +0 -0
- {omniopt2-7201.dist-info → omniopt2-7238.dist-info}/licenses/LICENSE +0 -0
- {omniopt2-7201.dist-info → omniopt2-7238.dist-info}/top_level.txt +0 -0
.omniopt.py
CHANGED
@@ -18,6 +18,7 @@ import statistics
|
|
18
18
|
import tempfile
|
19
19
|
import threading
|
20
20
|
|
21
|
+
prepared_setting_to_custom: bool = False
|
21
22
|
whole_start_time: float = time.time()
|
22
23
|
last_progress_bar_desc: str = ""
|
23
24
|
job_submit_durations: list[float] = []
|
@@ -54,6 +55,9 @@ figlet_loaded: bool = False
|
|
54
55
|
try:
|
55
56
|
from rich.console import Console
|
56
57
|
|
58
|
+
from rich.panel import Panel
|
59
|
+
from rich.text import Text
|
60
|
+
|
57
61
|
terminal_width = 150
|
58
62
|
|
59
63
|
try:
|
@@ -61,7 +65,7 @@ try:
|
|
61
65
|
except OSError:
|
62
66
|
pass
|
63
67
|
|
64
|
-
console
|
68
|
+
console = Console(
|
65
69
|
force_interactive=True,
|
66
70
|
soft_wrap=True,
|
67
71
|
color_system="256",
|
@@ -127,15 +131,15 @@ try:
|
|
127
131
|
with console.status("[bold green]Importing rich.table..."):
|
128
132
|
from rich.table import Table
|
129
133
|
|
130
|
-
with console.status("[bold green]Importing rich.text..."):
|
131
|
-
from rich.text import Text
|
132
|
-
|
133
134
|
with console.status("[bold green]Importing rich print..."):
|
134
135
|
from rich import print
|
135
136
|
|
136
137
|
with console.status("[bold green]Importing rich.pretty..."):
|
137
138
|
from rich.pretty import pprint
|
138
139
|
|
140
|
+
with console.status("[bold green]Importing rich.prompt..."):
|
141
|
+
from rich.prompt import Prompt, FloatPrompt, IntPrompt
|
142
|
+
|
139
143
|
with console.status("[bold green]Importing fcntl..."):
|
140
144
|
import fcntl
|
141
145
|
|
@@ -485,6 +489,33 @@ def get_min_max_from_file(continue_path: str, n: int, _default_min_max: str) ->
|
|
485
489
|
print_yellow(f"Line {n} did not contain min/max, will be set to {_default_min_max}")
|
486
490
|
return _default_min_max
|
487
491
|
|
492
|
+
@beartype
|
493
|
+
def set_max_eval(new_max_eval: int) -> None:
|
494
|
+
global max_eval
|
495
|
+
|
496
|
+
print_debug(f"set_max_eval({new_max_eval})")
|
497
|
+
|
498
|
+
max_eval = new_max_eval
|
499
|
+
|
500
|
+
@beartype
|
501
|
+
def set_random_steps(new_steps: int) -> None:
|
502
|
+
global random_steps
|
503
|
+
|
504
|
+
print_debug(f"Setting random_steps from {random_steps} to {new_steps}")
|
505
|
+
|
506
|
+
random_steps = new_steps
|
507
|
+
|
508
|
+
_DEFAULT_SPECIALS: Dict[str, Any] = {
|
509
|
+
"epochs": 1,
|
510
|
+
"epoch": 1,
|
511
|
+
"steps": 1,
|
512
|
+
"batchsize": 1,
|
513
|
+
"batchsz": 1,
|
514
|
+
"bs": 1,
|
515
|
+
"lr": "min",
|
516
|
+
"learning_rate": "min",
|
517
|
+
}
|
518
|
+
|
488
519
|
class ConfigLoader:
|
489
520
|
disable_previous_job_constraint: bool
|
490
521
|
run_tests_that_fail_on_taurus: bool
|
@@ -559,6 +590,7 @@ class ConfigLoader:
|
|
559
590
|
load_data_from_existing_jobs: List[str]
|
560
591
|
time: str
|
561
592
|
share_password: Optional[str]
|
593
|
+
prettyprint: bool
|
562
594
|
generate_all_jobs_at_once: bool
|
563
595
|
result_names: Optional[List[str]]
|
564
596
|
verbose_break_run_search_table: bool
|
@@ -576,6 +608,8 @@ class ConfigLoader:
|
|
576
608
|
no_transform_inputs: bool
|
577
609
|
occ: bool
|
578
610
|
force_choice_for_ranges: bool
|
611
|
+
dryrun: bool
|
612
|
+
just_return_defaults: bool
|
579
613
|
run_mode: str
|
580
614
|
|
581
615
|
@beartype
|
@@ -655,6 +689,7 @@ class ConfigLoader:
|
|
655
689
|
optional.add_argument('--force_choice_for_ranges', help='Force float ranges to be converted to choice', action='store_true', default=False)
|
656
690
|
optional.add_argument('--max_abandoned_retrial', help='Maximum number retrials to get when a job is abandoned post-generation', default=20, type=int)
|
657
691
|
optional.add_argument('--share_password', help='Use this as a password for share. Default is none.', default=None, type=str)
|
692
|
+
optional.add_argument('--dryrun', help='Try to do a dry run, i.e. a run for very short running jobs to test the installation of OmniOpt2 and check if environment stuff and paths and so on works properly', action='store_true', default=False)
|
658
693
|
|
659
694
|
speed.add_argument('--dont_warm_start_refitting', help='Do not keep Model weights, thus, refit for every generator (may be more accurate, but slower)', action='store_true', default=False)
|
660
695
|
speed.add_argument('--refit_on_cv', help='Refit on Cross-Validation (helps in accuracy, but makes generating new points slower)', action='store_true', default=False)
|
@@ -696,6 +731,8 @@ class ConfigLoader:
|
|
696
731
|
debug.add_argument('--raise_in_eval', help='Raise a signal in eval (only useful for debugging and testing)', action='store_true', default=False)
|
697
732
|
debug.add_argument('--show_ram_every_n_seconds', help='Show RAM usage every n seconds (0 = disabled)', type=int, default=0)
|
698
733
|
debug.add_argument('--show_generation_and_submission_sixel', help='Show sixel plots for generation and submission times', action='store_true', default=False)
|
734
|
+
debug.add_argument('--just_return_defaults', help='Just return defaults in dryrun', action='store_true', default=False)
|
735
|
+
debug.add_argument('--prettyprint', help='Shows stdout and stderr in a pretty printed format', action='store_true', default=False)
|
699
736
|
|
700
737
|
@beartype
|
701
738
|
def load_config(self: Any, config_path: str, file_format: str) -> dict:
|
@@ -782,8 +819,50 @@ class ConfigLoader:
|
|
782
819
|
|
783
820
|
_args = self.merge_args_with_config(config, _args)
|
784
821
|
|
822
|
+
if _args.dryrun:
|
823
|
+
print_yellow("--dryrun activated. This job will try to run only one job which should be running quickly.")
|
824
|
+
|
825
|
+
print_yellow("Setting max_eval to 1, ignoring your settings")
|
826
|
+
set_max_eval(1)
|
827
|
+
|
828
|
+
print_yellow("Setting random steps to 0")
|
829
|
+
set_random_steps(0)
|
830
|
+
|
831
|
+
print_yellow("Using generation strategy HUMAN_INTERVENTION_MINIMUM")
|
832
|
+
set_global_gs_to_HUMAN_INTERVENTION_MINIMUM()
|
833
|
+
|
834
|
+
print_yellow("Setting --force_local_execution to disable SLURM")
|
835
|
+
_args.force_local_execution = True
|
836
|
+
|
837
|
+
print_yellow("Disabling TQDM")
|
838
|
+
_args.disable_tqdm = True
|
839
|
+
|
840
|
+
print_yellow("Enabling pretty-print")
|
841
|
+
_args.prettyprint = True
|
842
|
+
|
843
|
+
if _args.live_share:
|
844
|
+
print_yellow("Disabling live-share")
|
845
|
+
_args.live_share = False
|
846
|
+
|
785
847
|
return _args
|
786
848
|
|
849
|
+
@beartype
|
850
|
+
def set_global_gs_to_HUMAN_INTERVENTION_MINIMUM() -> None:
|
851
|
+
global prepared_setting_to_custom
|
852
|
+
|
853
|
+
if not prepared_setting_to_custom:
|
854
|
+
prepared_setting_to_custom = True
|
855
|
+
return
|
856
|
+
|
857
|
+
global global_gs
|
858
|
+
|
859
|
+
node = InteractiveCLIGenerationNode()
|
860
|
+
|
861
|
+
global_gs = GenerationStrategy(
|
862
|
+
name="HUMAN_INTERVENTION_MINIMUM",
|
863
|
+
nodes=[node]
|
864
|
+
)
|
865
|
+
|
787
866
|
with console.status("[bold green]Parsing arguments..."):
|
788
867
|
loader = ConfigLoader()
|
789
868
|
args = loader.parse_arguments()
|
@@ -1238,6 +1317,217 @@ def warn_if_param_outside_of_valid_params(param: dict, _res: Any, keyname: str)
|
|
1238
1317
|
if _res != param["value"]:
|
1239
1318
|
print_yellow(f"The result by the external generator for the axis '{keyname}' (FIXED) is not the specified fixed value '{param['value']}' {_res}")
|
1240
1319
|
|
1320
|
+
# ────────────────────────────────────────────────────────────────────────────
|
1321
|
+
@dataclass(init=False)
|
1322
|
+
class InteractiveCLIGenerationNode(ExternalGenerationNode):
|
1323
|
+
"""
|
1324
|
+
A GenerationNode that queries the user on the command line (via *rich*)
|
1325
|
+
for the next candidate hyper‑parameter set instead of spawning an external
|
1326
|
+
program. All prompts come pre‑filled with sensible defaults:
|
1327
|
+
|
1328
|
+
• If the parameter name matches a key in `_DEFAULT_SPECIALS`, the associated
|
1329
|
+
value is used:
|
1330
|
+
– literal ``"min"`` → lower bound of the RangeParameter
|
1331
|
+
– any other literal → taken verbatim
|
1332
|
+
|
1333
|
+
• Otherwise:
|
1334
|
+
– RangeParameter(INT/FLOAT) ⇒ midpoint (cast to int for INT)
|
1335
|
+
– ChoiceParameter ⇒ first element of ``param.values``
|
1336
|
+
– FixedParameter ⇒ its fixed value (prompt is skipped)
|
1337
|
+
|
1338
|
+
The user can simply press *Enter* to accept the default or type a new
|
1339
|
+
value (validated & casted to the correct type automatically).
|
1340
|
+
"""
|
1341
|
+
seed: int
|
1342
|
+
parameters: Optional[Dict[str, Any]]
|
1343
|
+
minimize: Optional[bool]
|
1344
|
+
data: Optional[Any]
|
1345
|
+
constraints: Optional[Any]
|
1346
|
+
fit_time_since_gen: float
|
1347
|
+
|
1348
|
+
# ────────────────────────────────────────────────────────────────────
|
1349
|
+
@beartype
|
1350
|
+
def __init__( # identical signature to the original class
|
1351
|
+
self: Any,
|
1352
|
+
node_name: str = "INTERACTIVE_GENERATOR",
|
1353
|
+
) -> None:
|
1354
|
+
t0 = time.monotonic()
|
1355
|
+
super().__init__(node_name=node_name)
|
1356
|
+
self.parameters = None
|
1357
|
+
self.minimize = None
|
1358
|
+
self.data = None
|
1359
|
+
self.constraints = None
|
1360
|
+
self.seed = int(time.time()) # deterministic seeds are pointless here
|
1361
|
+
self.fit_time_since_gen = time.monotonic() - t0
|
1362
|
+
|
1363
|
+
# ────────────────────────────────────────────────────────────────────
|
1364
|
+
@beartype
|
1365
|
+
def update_generator_state(self: Any, experiment: Any, data: Any) -> None:
|
1366
|
+
self.data = data
|
1367
|
+
search_space = experiment.search_space
|
1368
|
+
self.parameters = search_space.parameters
|
1369
|
+
self.constraints = search_space.parameter_constraints
|
1370
|
+
|
1371
|
+
# ────────────────────────────────────────────────────────────────────
|
1372
|
+
@staticmethod
|
1373
|
+
def _ptype_to_str(param_type: Any) -> str:
|
1374
|
+
return {
|
1375
|
+
ParameterType.INT: "INT",
|
1376
|
+
ParameterType.FLOAT: "FLOAT",
|
1377
|
+
ParameterType.STRING: "STRING",
|
1378
|
+
}.get(param_type, "<UNKNOWN>")
|
1379
|
+
|
1380
|
+
# ────────────────────────────────────────────────────────────────────
|
1381
|
+
@beartype
|
1382
|
+
def _default_for_param(self: Any, name: str, param: Any) -> Any:
|
1383
|
+
# 1. explicit override
|
1384
|
+
if name.lower() in _DEFAULT_SPECIALS:
|
1385
|
+
override = _DEFAULT_SPECIALS[name.lower()]
|
1386
|
+
if override == "min" and isinstance(param, RangeParameter):
|
1387
|
+
return param.lower
|
1388
|
+
return override
|
1389
|
+
|
1390
|
+
# 2. generic rules
|
1391
|
+
if isinstance(param, FixedParameter):
|
1392
|
+
return param.value
|
1393
|
+
if isinstance(param, RangeParameter):
|
1394
|
+
mid = (param.lower + param.upper) / 2
|
1395
|
+
return int(mid) if param.parameter_type == ParameterType.INT else mid
|
1396
|
+
if isinstance(param, ChoiceParameter):
|
1397
|
+
return param.values[0]
|
1398
|
+
|
1399
|
+
# fall back
|
1400
|
+
return None
|
1401
|
+
|
1402
|
+
# ────────────────────────────────────────────────────────────────────
|
1403
|
+
@beartype
|
1404
|
+
def _ask_user(self: Any, name: str, param: Any, default: Any) -> Any:
|
1405
|
+
if args.just_return_defaults:
|
1406
|
+
print_yellow("Just returning defaults for _ask_user in InteractiveCLIGenerationNode")
|
1407
|
+
return default
|
1408
|
+
|
1409
|
+
if not console.is_terminal:
|
1410
|
+
print_red(f"Cannot prompt for {name!r}: no interactive terminal attached.")
|
1411
|
+
return default
|
1412
|
+
|
1413
|
+
prompt_msg = f"{name} ({self._ptype_to_str(param.parameter_type)})"
|
1414
|
+
|
1415
|
+
try:
|
1416
|
+
return self._handle_fixed(param, prompt_msg)
|
1417
|
+
except TypeError:
|
1418
|
+
pass
|
1419
|
+
except Exception as e:
|
1420
|
+
print_red(f"Error #1: {e}")
|
1421
|
+
|
1422
|
+
try:
|
1423
|
+
return self._handle_choice(param, default, prompt_msg)
|
1424
|
+
except TypeError:
|
1425
|
+
pass
|
1426
|
+
except Exception as e:
|
1427
|
+
print_red(f"Error #2: {e}")
|
1428
|
+
|
1429
|
+
try:
|
1430
|
+
return self._handle_range(param, default, prompt_msg)
|
1431
|
+
except TypeError:
|
1432
|
+
pass
|
1433
|
+
except Exception as e:
|
1434
|
+
print_red(f"Error #3: {e}")
|
1435
|
+
|
1436
|
+
return self._handle_fallback(prompt_msg, default)
|
1437
|
+
|
1438
|
+
@beartype
|
1439
|
+
def _handle_fixed(self, param: Any, prompt_msg: str) -> Any:
|
1440
|
+
if isinstance(param, FixedParameter):
|
1441
|
+
console.print(f"[yellow]{prompt_msg} is FIXED at {param.value} → skipping prompt.[/]")
|
1442
|
+
return param.value
|
1443
|
+
raise TypeError("Not a FixedParameter")
|
1444
|
+
|
1445
|
+
@beartype
|
1446
|
+
def _handle_choice(self, param: Any, default: Any, prompt_msg: str) -> Any:
|
1447
|
+
if not isinstance(param, ChoiceParameter):
|
1448
|
+
raise TypeError("Not a ChoiceParameter")
|
1449
|
+
|
1450
|
+
choices_str = ", ".join(f"{v}" for v in param.values)
|
1451
|
+
console.print(f"{prompt_msg} choices → {choices_str}")
|
1452
|
+
user_val = Prompt.ask("Pick choice", default=str(default))
|
1453
|
+
return param.values[int(user_val)] if user_val.isdigit() else user_val
|
1454
|
+
|
1455
|
+
@beartype
|
1456
|
+
def _handle_range(self, param: Any, default: Any, prompt_msg: str) -> Any:
|
1457
|
+
if not isinstance(param, RangeParameter):
|
1458
|
+
raise TypeError("Not a RangeParameter")
|
1459
|
+
|
1460
|
+
low, high = param.lower, param.upper
|
1461
|
+
console.print(f"{prompt_msg} range → [{low}, {high}]")
|
1462
|
+
|
1463
|
+
if param.parameter_type == ParameterType.FLOAT:
|
1464
|
+
user_val = FloatPrompt.ask("Enter float", default=str(default))
|
1465
|
+
try:
|
1466
|
+
val = float(user_val)
|
1467
|
+
except ValueError:
|
1468
|
+
val = default
|
1469
|
+
else:
|
1470
|
+
user_val = IntPrompt.ask("Enter int", default=str(default))
|
1471
|
+
try:
|
1472
|
+
val = int(user_val)
|
1473
|
+
except ValueError:
|
1474
|
+
val = default
|
1475
|
+
|
1476
|
+
return min(max(val, low), high)
|
1477
|
+
|
1478
|
+
@beartype
|
1479
|
+
def _handle_fallback(self, prompt_msg: str, default: Any) -> Any:
|
1480
|
+
return Prompt.ask(prompt_msg, default=str(default))
|
1481
|
+
|
1482
|
+
# ────────────────────────────────────────────────────────────────────
|
1483
|
+
@beartype
|
1484
|
+
def get_next_candidate(
|
1485
|
+
self: Any,
|
1486
|
+
pending_parameters: List[TParameterization],
|
1487
|
+
) -> Dict[str, Any]:
|
1488
|
+
"""
|
1489
|
+
Build the next candidate by querying the user for **each** parameter.
|
1490
|
+
Raises RuntimeError if `update_generator_state` has not been called.
|
1491
|
+
"""
|
1492
|
+
if self.parameters is None:
|
1493
|
+
raise RuntimeError(
|
1494
|
+
"Parameters are not initialized – call update_generator_state() first."
|
1495
|
+
)
|
1496
|
+
|
1497
|
+
console.rule("[bold magenta]Next Candidate[/]")
|
1498
|
+
|
1499
|
+
candidate: Dict[str, Any] = {}
|
1500
|
+
for name, param in self.parameters.items():
|
1501
|
+
default_val = self._default_for_param(name, param)
|
1502
|
+
value = self._ask_user(name, param, default_val)
|
1503
|
+
candidate[name] = value
|
1504
|
+
|
1505
|
+
# ── simple constraint check (optional) ──────────────────────────
|
1506
|
+
|
1507
|
+
if self.constraints:
|
1508
|
+
console.rule("[bold magenta]Checking constraints[/]")
|
1509
|
+
violations = [
|
1510
|
+
c
|
1511
|
+
for c in self.constraints
|
1512
|
+
if not c.check(candidate) # Ax Constraint objects support .check
|
1513
|
+
]
|
1514
|
+
if violations:
|
1515
|
+
console.print(
|
1516
|
+
"[red]WARNING:[/] The candidate violates "
|
1517
|
+
f"{len(violations)} constraint(s): {violations}"
|
1518
|
+
)
|
1519
|
+
|
1520
|
+
# show summary table
|
1521
|
+
tbl = Table(title="Chosen Hyper‑Parameters", show_lines=True)
|
1522
|
+
tbl.add_column("Name", style="cyan", no_wrap=True)
|
1523
|
+
tbl.add_column("Value", style="green")
|
1524
|
+
for k, v in candidate.items():
|
1525
|
+
tbl.add_row(k, str(v))
|
1526
|
+
console.print(tbl)
|
1527
|
+
|
1528
|
+
console.rule()
|
1529
|
+
return candidate
|
1530
|
+
|
1241
1531
|
@dataclass(init=False)
|
1242
1532
|
class ExternalProgramGenerationNode(ExternalGenerationNode):
|
1243
1533
|
@beartype
|
@@ -1757,14 +2047,6 @@ def set_nr_inserted_jobs(new_nr_inserted_jobs: int) -> None:
|
|
1757
2047
|
|
1758
2048
|
NR_INSERTED_JOBS = new_nr_inserted_jobs
|
1759
2049
|
|
1760
|
-
@beartype
|
1761
|
-
def set_max_eval(new_max_eval: int) -> None:
|
1762
|
-
global max_eval
|
1763
|
-
|
1764
|
-
print_debug(f"set_max_eval({new_max_eval})")
|
1765
|
-
|
1766
|
-
max_eval = new_max_eval
|
1767
|
-
|
1768
2050
|
@beartype
|
1769
2051
|
def write_worker_usage() -> None:
|
1770
2052
|
if len(WORKER_PERCENTAGE_USAGE):
|
@@ -3555,6 +3837,54 @@ def _evaluate_handle_result(
|
|
3555
3837
|
|
3556
3838
|
return final_result
|
3557
3839
|
|
3840
|
+
@beartype
|
3841
|
+
def pretty_process_output(stdout_path: str, stderr_path: str, exit_code: Optional[int]) -> None:
|
3842
|
+
global console
|
3843
|
+
|
3844
|
+
console = Console(
|
3845
|
+
force_interactive=True,
|
3846
|
+
soft_wrap=True,
|
3847
|
+
color_system="256",
|
3848
|
+
force_terminal=not ci_env,
|
3849
|
+
width=max(200, terminal_width)
|
3850
|
+
)
|
3851
|
+
|
3852
|
+
def _read(p: str) -> Optional[str]:
|
3853
|
+
try:
|
3854
|
+
return Path(p).read_text(encoding="utf-8", errors="replace")
|
3855
|
+
except FileNotFoundError:
|
3856
|
+
print_debug(f"[file not found: {p}]")
|
3857
|
+
|
3858
|
+
return None
|
3859
|
+
|
3860
|
+
stdout_txt = _read(stdout_path)
|
3861
|
+
stderr_txt = _read(stderr_path)
|
3862
|
+
|
3863
|
+
# -------- header -------- #
|
3864
|
+
outcome = "SUCCESS" if (exit_code is not None and exit_code == 0) else "FAILURE"
|
3865
|
+
header_style = "bold white on green" if exit_code == 0 else "bold white on red"
|
3866
|
+
console.rule(Text(f" {outcome} (exit {exit_code}) ", style=header_style))
|
3867
|
+
|
3868
|
+
def is_nonempty(s: str) -> bool:
|
3869
|
+
return bool(s and s.strip())
|
3870
|
+
|
3871
|
+
# TODO: Shows empty stuff here as well, shouldn not. Especially stderr.
|
3872
|
+
if is_nonempty(stdout_txt):
|
3873
|
+
print("\n")
|
3874
|
+
console.print(
|
3875
|
+
Panel(stdout_txt, title="STDOUT", border_style="cyan", padding=(0, 1))
|
3876
|
+
)
|
3877
|
+
|
3878
|
+
if is_nonempty(stderr_txt):
|
3879
|
+
print("\n")
|
3880
|
+
console.print(
|
3881
|
+
Panel(stderr_txt, title="STDERR", border_style="magenta", padding=(0, 1))
|
3882
|
+
)
|
3883
|
+
|
3884
|
+
if not (is_nonempty(stdout_txt) or is_nonempty(stderr_txt)):
|
3885
|
+
print("\n")
|
3886
|
+
console.print("[dim]No output captured.[/dim]")
|
3887
|
+
|
3558
3888
|
@beartype
|
3559
3889
|
def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]]:
|
3560
3890
|
start_nvidia_smi_thread()
|
@@ -4254,9 +4584,12 @@ def show_end_table_and_save_end_files() -> int:
|
|
4254
4584
|
|
4255
4585
|
display_failed_jobs_table()
|
4256
4586
|
|
4587
|
+
best_result_exit = 0
|
4588
|
+
|
4257
4589
|
best_result_exit: int = print_best_result()
|
4258
4590
|
|
4259
|
-
|
4591
|
+
if not args.dryrun:
|
4592
|
+
print_evaluate_times()
|
4260
4593
|
|
4261
4594
|
if best_result_exit > 0:
|
4262
4595
|
_exit = best_result_exit
|
@@ -4303,6 +4636,10 @@ def abandon_all_jobs() -> None:
|
|
4303
4636
|
|
4304
4637
|
@beartype
|
4305
4638
|
def show_pareto_or_error_msg(path_to_calculate: str, res_names: list = arg_result_names, disable_sixel_and_table: bool = False) -> None:
|
4639
|
+
if args.dryrun:
|
4640
|
+
print_debug("Not showing pareto-frontier data with --dryrun")
|
4641
|
+
return None
|
4642
|
+
|
4306
4643
|
if len(res_names) > 1:
|
4307
4644
|
try:
|
4308
4645
|
show_pareto_frontier_data(path_to_calculate, res_names, disable_sixel_and_table)
|
@@ -4310,6 +4647,7 @@ def show_pareto_or_error_msg(path_to_calculate: str, res_names: list = arg_resul
|
|
4310
4647
|
print_red(f"show_pareto_frontier_data() failed with exception '{e}'")
|
4311
4648
|
else:
|
4312
4649
|
print_debug(f"show_pareto_frontier_data will NOT be executed because len(arg_result_names) is {len(arg_result_names)}")
|
4650
|
+
return None
|
4313
4651
|
|
4314
4652
|
@beartype
|
4315
4653
|
def end_program(_force: Optional[bool] = False, exit_code: Optional[int] = None) -> None:
|
@@ -6349,6 +6687,7 @@ def _finish_previous_jobs_helper_handle_failed_job(job: Any, trial_index: int) -
|
|
6349
6687
|
print(f"ERROR in line {get_line_info()}: {e}")
|
6350
6688
|
job.cancel()
|
6351
6689
|
orchestrate_job(job, trial_index)
|
6690
|
+
|
6352
6691
|
failed_jobs(1)
|
6353
6692
|
print_debug(f"finish_previous_jobs: removing job {job}, trial_index: {trial_index}")
|
6354
6693
|
|
@@ -6373,6 +6712,9 @@ def _finish_previous_jobs_helper_handle_exception(job: Any, trial_index: int, er
|
|
6373
6712
|
def _finish_previous_jobs_helper_process_job(job: Any, trial_index: int, this_jobs_finished: int) -> int:
|
6374
6713
|
try:
|
6375
6714
|
this_jobs_finished = finish_job_core(job, trial_index, this_jobs_finished)
|
6715
|
+
|
6716
|
+
if args.prettyprint:
|
6717
|
+
pretty_print_job_output(job)
|
6376
6718
|
except (SignalINT, SignalUSR, SignalCONT) as e:
|
6377
6719
|
print_red(f"Cancelled finish_job_core: {e}")
|
6378
6720
|
except (FileNotFoundError, submitit.core.utils.UncompletedJobError, ax.exceptions.core.UserInputError) as error:
|
@@ -6509,21 +6851,53 @@ def _check_orchestrator_find_behaviors(stdout: str, errors: List[Dict[str, Any]]
|
|
6509
6851
|
return behaviors
|
6510
6852
|
|
6511
6853
|
@beartype
|
6512
|
-
def
|
6513
|
-
|
6514
|
-
|
6854
|
+
def get_exit_code_from_stderr_or_stdout_path(stderr_path: str, stdout_path: str) -> Optional[int]:
|
6855
|
+
def extract_last_exit_code(path: str) -> Optional[int]:
|
6856
|
+
try:
|
6857
|
+
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
6858
|
+
matches = re.findall(r"EXIT_CODE:\s*(-?\d+)", f.read())
|
6859
|
+
if matches:
|
6860
|
+
return int(matches[-1])
|
6861
|
+
except Exception:
|
6862
|
+
pass
|
6863
|
+
return None
|
6864
|
+
|
6865
|
+
code = extract_last_exit_code(stdout_path)
|
6866
|
+
if code is not None:
|
6867
|
+
return code
|
6515
6868
|
|
6516
|
-
|
6517
|
-
stdout_path = stdout_path.rstrip('\r\n')
|
6518
|
-
stdout_path = stdout_path.rstrip('\n')
|
6519
|
-
stdout_path = stdout_path.rstrip('\r')
|
6520
|
-
stdout_path = stdout_path.rstrip(' ')
|
6869
|
+
return extract_last_exit_code(stderr_path)
|
6521
6870
|
|
6522
|
-
|
6523
|
-
|
6524
|
-
|
6525
|
-
stderr_path =
|
6526
|
-
|
6871
|
+
@beartype
|
6872
|
+
def pretty_print_job_output(job: Job) -> None:
|
6873
|
+
stdout_path = get_stderr_or_stdout_from_job(job, "stdout")
|
6874
|
+
stderr_path = get_stderr_or_stdout_from_job(job, "stderr")
|
6875
|
+
exit_code = get_exit_code_from_stderr_or_stdout_path(stderr_path, stdout_path)
|
6876
|
+
|
6877
|
+
pretty_process_output(stdout_path, stderr_path, exit_code)
|
6878
|
+
|
6879
|
+
@beartype
|
6880
|
+
def get_stderr_or_stdout_from_job(job: Job, path_type: str) -> str:
|
6881
|
+
if path_type == "stderr":
|
6882
|
+
_path = str(job.paths.stderr.resolve())
|
6883
|
+
elif path_type == "stdout":
|
6884
|
+
_path = str(job.paths.stdout.resolve())
|
6885
|
+
else:
|
6886
|
+
print_red(f"ERROR: path_type {path_type} was neither stdout nor stderr. Using stdout")
|
6887
|
+
_path = str(job.paths.stdout.resolve())
|
6888
|
+
|
6889
|
+
_path = _path.replace('\n', ' ').replace('\r', '')
|
6890
|
+
_path = _path.rstrip('\r\n')
|
6891
|
+
_path = _path.rstrip('\n')
|
6892
|
+
_path = _path.rstrip('\r')
|
6893
|
+
_path = _path.rstrip(' ')
|
6894
|
+
|
6895
|
+
return _path
|
6896
|
+
|
6897
|
+
@beartype
|
6898
|
+
def orchestrate_job(job: Job, trial_index: int) -> None:
|
6899
|
+
stdout_path = get_stderr_or_stdout_from_job(job, "stdout")
|
6900
|
+
stderr_path = get_stderr_or_stdout_from_job(job, "stderr")
|
6527
6901
|
|
6528
6902
|
print_outfile_analyzed(stdout_path)
|
6529
6903
|
print_outfile_analyzed(stderr_path)
|
@@ -7208,11 +7582,12 @@ def save_table_as_text(table: Table, filepath: str) -> None:
|
|
7208
7582
|
|
7209
7583
|
@beartype
|
7210
7584
|
def show_time_debugging_table() -> None:
|
7211
|
-
|
7212
|
-
|
7213
|
-
|
7585
|
+
if not args.dryrun:
|
7586
|
+
generate_time_table_rich()
|
7587
|
+
generate_job_submit_table_rich()
|
7588
|
+
plot_times_for_creation_and_submission()
|
7214
7589
|
|
7215
|
-
|
7590
|
+
live_share()
|
7216
7591
|
|
7217
7592
|
@beartype
|
7218
7593
|
def generate_time_table_rich() -> None:
|
@@ -8082,7 +8457,8 @@ def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
|
|
8082
8457
|
if second_step_steps != original_second_steps:
|
8083
8458
|
original_print(f"? original_second_steps: {original_second_steps} = max_eval {_max_eval} - _random_steps {_random_steps}")
|
8084
8459
|
if second_step_steps == 0:
|
8085
|
-
|
8460
|
+
if not args.dryrun:
|
8461
|
+
print_yellow("This is basically a random search. Increase --max_eval or reduce --num_random_steps")
|
8086
8462
|
|
8087
8463
|
second_step_steps = second_step_steps - already_done_random_steps
|
8088
8464
|
|
@@ -9496,6 +9872,9 @@ def main() -> None:
|
|
9496
9872
|
|
9497
9873
|
set_global_generation_strategy()
|
9498
9874
|
|
9875
|
+
if args.dryrun:
|
9876
|
+
set_global_gs_to_HUMAN_INTERVENTION_MINIMUM()
|
9877
|
+
|
9499
9878
|
initialize_ax_client()
|
9500
9879
|
|
9501
9880
|
with console.status("[bold green]Getting experiment parameters..."):
|
@@ -9580,14 +9959,6 @@ def write_ui_url_if_present() -> None:
|
|
9580
9959
|
with open(f"{get_current_run_folder()}/ui_url.txt", mode="a", encoding="utf-8") as myfile:
|
9581
9960
|
myfile.write(decode_if_base64(args.ui_url))
|
9582
9961
|
|
9583
|
-
@beartype
|
9584
|
-
def set_random_steps(new_steps: int) -> None:
|
9585
|
-
global random_steps
|
9586
|
-
|
9587
|
-
print_debug(f"Setting random_steps from {random_steps} to {new_steps}")
|
9588
|
-
|
9589
|
-
random_steps = new_steps
|
9590
|
-
|
9591
9962
|
@beartype
|
9592
9963
|
def handle_random_steps() -> None:
|
9593
9964
|
with console.status("[bold green]Handling random steps..."):
|