omniopt2 7202__py3-none-any.whl → 7239__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- .omniopt.py +403 -39
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt.py +403 -39
- {omniopt2-7202.dist-info → omniopt2-7239.dist-info}/METADATA +1 -1
- {omniopt2-7202.dist-info → omniopt2-7239.dist-info}/RECORD +35 -35
- omniopt2.egg-info/PKG-INFO +1 -1
- pyproject.toml +1 -1
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.colorfunctions.sh +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.general.sh +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.helpers.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_cpu_ram_usage.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_general.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_gpu_usage.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_kde.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_scatter.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_scatter_generation_method.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_scatter_hex.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_time_and_exit_code.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_trial_index_result.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.omniopt_plot_worker.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.random_generator.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.shellscript_functions +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/.tpe.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/LICENSE +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/apt-dependencies.txt +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/omniopt +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/omniopt_docker +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/omniopt_evaluate +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/omniopt_plot +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/omniopt_share +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/requirements.txt +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/setup.py +0 -0
- {omniopt2-7202.data → omniopt2-7239.data}/data/bin/test_requirements.txt +0 -0
- {omniopt2-7202.dist-info → omniopt2-7239.dist-info}/WHEEL +0 -0
- {omniopt2-7202.dist-info → omniopt2-7239.dist-info}/licenses/LICENSE +0 -0
- {omniopt2-7202.dist-info → omniopt2-7239.dist-info}/top_level.txt +0 -0
.omniopt.py
CHANGED
@@ -18,6 +18,7 @@ import statistics
|
|
18
18
|
import tempfile
|
19
19
|
import threading
|
20
20
|
|
21
|
+
prepared_setting_to_custom: bool = False
|
21
22
|
whole_start_time: float = time.time()
|
22
23
|
last_progress_bar_desc: str = ""
|
23
24
|
job_submit_durations: list[float] = []
|
@@ -54,6 +55,9 @@ figlet_loaded: bool = False
|
|
54
55
|
try:
|
55
56
|
from rich.console import Console
|
56
57
|
|
58
|
+
from rich.panel import Panel
|
59
|
+
from rich.text import Text
|
60
|
+
|
57
61
|
terminal_width = 150
|
58
62
|
|
59
63
|
try:
|
@@ -61,7 +65,7 @@ try:
|
|
61
65
|
except OSError:
|
62
66
|
pass
|
63
67
|
|
64
|
-
console
|
68
|
+
console = Console(
|
65
69
|
force_interactive=True,
|
66
70
|
soft_wrap=True,
|
67
71
|
color_system="256",
|
@@ -127,15 +131,15 @@ try:
|
|
127
131
|
with console.status("[bold green]Importing rich.table..."):
|
128
132
|
from rich.table import Table
|
129
133
|
|
130
|
-
with console.status("[bold green]Importing rich.text..."):
|
131
|
-
from rich.text import Text
|
132
|
-
|
133
134
|
with console.status("[bold green]Importing rich print..."):
|
134
135
|
from rich import print
|
135
136
|
|
136
137
|
with console.status("[bold green]Importing rich.pretty..."):
|
137
138
|
from rich.pretty import pprint
|
138
139
|
|
140
|
+
with console.status("[bold green]Importing rich.prompt..."):
|
141
|
+
from rich.prompt import Prompt, FloatPrompt, IntPrompt
|
142
|
+
|
139
143
|
with console.status("[bold green]Importing fcntl..."):
|
140
144
|
import fcntl
|
141
145
|
|
@@ -485,6 +489,33 @@ def get_min_max_from_file(continue_path: str, n: int, _default_min_max: str) ->
|
|
485
489
|
print_yellow(f"Line {n} did not contain min/max, will be set to {_default_min_max}")
|
486
490
|
return _default_min_max
|
487
491
|
|
492
|
+
@beartype
|
493
|
+
def set_max_eval(new_max_eval: int) -> None:
|
494
|
+
global max_eval
|
495
|
+
|
496
|
+
print_debug(f"set_max_eval({new_max_eval})")
|
497
|
+
|
498
|
+
max_eval = new_max_eval
|
499
|
+
|
500
|
+
@beartype
|
501
|
+
def set_random_steps(new_steps: int) -> None:
|
502
|
+
global random_steps
|
503
|
+
|
504
|
+
print_debug(f"Setting random_steps from {random_steps} to {new_steps}")
|
505
|
+
|
506
|
+
random_steps = new_steps
|
507
|
+
|
508
|
+
_DEFAULT_SPECIALS: Dict[str, Any] = {
|
509
|
+
"epochs": 1,
|
510
|
+
"epoch": 1,
|
511
|
+
"steps": 1,
|
512
|
+
"batchsize": 1,
|
513
|
+
"batchsz": 1,
|
514
|
+
"bs": 1,
|
515
|
+
"lr": "min",
|
516
|
+
"learning_rate": "min",
|
517
|
+
}
|
518
|
+
|
488
519
|
class ConfigLoader:
|
489
520
|
disable_previous_job_constraint: bool
|
490
521
|
run_tests_that_fail_on_taurus: bool
|
@@ -559,6 +590,7 @@ class ConfigLoader:
|
|
559
590
|
load_data_from_existing_jobs: List[str]
|
560
591
|
time: str
|
561
592
|
share_password: Optional[str]
|
593
|
+
prettyprint: bool
|
562
594
|
generate_all_jobs_at_once: bool
|
563
595
|
result_names: Optional[List[str]]
|
564
596
|
verbose_break_run_search_table: bool
|
@@ -576,6 +608,8 @@ class ConfigLoader:
|
|
576
608
|
no_transform_inputs: bool
|
577
609
|
occ: bool
|
578
610
|
force_choice_for_ranges: bool
|
611
|
+
dryrun: bool
|
612
|
+
just_return_defaults: bool
|
579
613
|
run_mode: str
|
580
614
|
|
581
615
|
@beartype
|
@@ -655,6 +689,7 @@ class ConfigLoader:
|
|
655
689
|
optional.add_argument('--force_choice_for_ranges', help='Force float ranges to be converted to choice', action='store_true', default=False)
|
656
690
|
optional.add_argument('--max_abandoned_retrial', help='Maximum number retrials to get when a job is abandoned post-generation', default=20, type=int)
|
657
691
|
optional.add_argument('--share_password', help='Use this as a password for share. Default is none.', default=None, type=str)
|
692
|
+
optional.add_argument('--dryrun', help='Try to do a dry run, i.e. a run for very short running jobs to test the installation of OmniOpt2 and check if environment stuff and paths and so on works properly', action='store_true', default=False)
|
658
693
|
|
659
694
|
speed.add_argument('--dont_warm_start_refitting', help='Do not keep Model weights, thus, refit for every generator (may be more accurate, but slower)', action='store_true', default=False)
|
660
695
|
speed.add_argument('--refit_on_cv', help='Refit on Cross-Validation (helps in accuracy, but makes generating new points slower)', action='store_true', default=False)
|
@@ -696,6 +731,8 @@ class ConfigLoader:
|
|
696
731
|
debug.add_argument('--raise_in_eval', help='Raise a signal in eval (only useful for debugging and testing)', action='store_true', default=False)
|
697
732
|
debug.add_argument('--show_ram_every_n_seconds', help='Show RAM usage every n seconds (0 = disabled)', type=int, default=0)
|
698
733
|
debug.add_argument('--show_generation_and_submission_sixel', help='Show sixel plots for generation and submission times', action='store_true', default=False)
|
734
|
+
debug.add_argument('--just_return_defaults', help='Just return defaults in dryrun', action='store_true', default=False)
|
735
|
+
debug.add_argument('--prettyprint', help='Shows stdout and stderr in a pretty printed format', action='store_true', default=False)
|
699
736
|
|
700
737
|
@beartype
|
701
738
|
def load_config(self: Any, config_path: str, file_format: str) -> dict:
|
@@ -782,8 +819,50 @@ class ConfigLoader:
|
|
782
819
|
|
783
820
|
_args = self.merge_args_with_config(config, _args)
|
784
821
|
|
822
|
+
if _args.dryrun:
|
823
|
+
print_yellow("--dryrun activated. This job will try to run only one job which should be running quickly.")
|
824
|
+
|
825
|
+
print_yellow("Setting max_eval to 1, ignoring your settings")
|
826
|
+
set_max_eval(1)
|
827
|
+
|
828
|
+
print_yellow("Setting random steps to 0")
|
829
|
+
set_random_steps(0)
|
830
|
+
|
831
|
+
print_yellow("Using generation strategy HUMAN_INTERVENTION_MINIMUM")
|
832
|
+
set_global_gs_to_HUMAN_INTERVENTION_MINIMUM()
|
833
|
+
|
834
|
+
print_yellow("Setting --force_local_execution to disable SLURM")
|
835
|
+
_args.force_local_execution = True
|
836
|
+
|
837
|
+
print_yellow("Disabling TQDM")
|
838
|
+
_args.disable_tqdm = True
|
839
|
+
|
840
|
+
print_yellow("Enabling pretty-print")
|
841
|
+
_args.prettyprint = True
|
842
|
+
|
843
|
+
if _args.live_share:
|
844
|
+
print_yellow("Disabling live-share")
|
845
|
+
_args.live_share = False
|
846
|
+
|
785
847
|
return _args
|
786
848
|
|
849
|
+
@beartype
|
850
|
+
def set_global_gs_to_HUMAN_INTERVENTION_MINIMUM() -> None:
|
851
|
+
global prepared_setting_to_custom
|
852
|
+
|
853
|
+
if not prepared_setting_to_custom:
|
854
|
+
prepared_setting_to_custom = True
|
855
|
+
return
|
856
|
+
|
857
|
+
global global_gs
|
858
|
+
|
859
|
+
node = InteractiveCLIGenerationNode()
|
860
|
+
|
861
|
+
global_gs = GenerationStrategy(
|
862
|
+
name="HUMAN_INTERVENTION_MINIMUM",
|
863
|
+
nodes=[node]
|
864
|
+
)
|
865
|
+
|
787
866
|
with console.status("[bold green]Parsing arguments..."):
|
788
867
|
loader = ConfigLoader()
|
789
868
|
args = loader.parse_arguments()
|
@@ -1238,6 +1317,210 @@ def warn_if_param_outside_of_valid_params(param: dict, _res: Any, keyname: str)
|
|
1238
1317
|
if _res != param["value"]:
|
1239
1318
|
print_yellow(f"The result by the external generator for the axis '{keyname}' (FIXED) is not the specified fixed value '{param['value']}' {_res}")
|
1240
1319
|
|
1320
|
+
@dataclass(init=False)
|
1321
|
+
class InteractiveCLIGenerationNode(ExternalGenerationNode):
|
1322
|
+
"""
|
1323
|
+
A GenerationNode that queries the user on the command line (via *rich*)
|
1324
|
+
for the next candidate hyper‑parameter set instead of spawning an external
|
1325
|
+
program. All prompts come pre‑filled with sensible defaults:
|
1326
|
+
|
1327
|
+
• If the parameter name matches a key in `_DEFAULT_SPECIALS`, the associated
|
1328
|
+
value is used:
|
1329
|
+
– literal ``"min"`` → lower bound of the RangeParameter
|
1330
|
+
– any other literal → taken verbatim
|
1331
|
+
|
1332
|
+
• Otherwise:
|
1333
|
+
– RangeParameter(INT/FLOAT) ⇒ midpoint (cast to int for INT)
|
1334
|
+
– ChoiceParameter ⇒ first element of ``param.values``
|
1335
|
+
– FixedParameter ⇒ its fixed value (prompt is skipped)
|
1336
|
+
|
1337
|
+
The user can simply press *Enter* to accept the default or type a new
|
1338
|
+
value (validated & casted to the correct type automatically).
|
1339
|
+
"""
|
1340
|
+
seed: int
|
1341
|
+
parameters: Optional[Dict[str, Any]]
|
1342
|
+
minimize: Optional[bool]
|
1343
|
+
data: Optional[Any]
|
1344
|
+
constraints: Optional[Any]
|
1345
|
+
fit_time_since_gen: float
|
1346
|
+
|
1347
|
+
@beartype
|
1348
|
+
def __init__( # identical signature to the original class
|
1349
|
+
self: Any,
|
1350
|
+
node_name: str = "INTERACTIVE_GENERATOR",
|
1351
|
+
) -> None:
|
1352
|
+
t0 = time.monotonic()
|
1353
|
+
super().__init__(node_name=node_name)
|
1354
|
+
self.parameters = None
|
1355
|
+
self.minimize = None
|
1356
|
+
self.data = None
|
1357
|
+
self.constraints = None
|
1358
|
+
self.seed = int(time.time()) # deterministic seeds are pointless here
|
1359
|
+
self.fit_time_since_gen = time.monotonic() - t0
|
1360
|
+
|
1361
|
+
@beartype
|
1362
|
+
def update_generator_state(self: Any, experiment: Any, data: Any) -> None:
|
1363
|
+
self.data = data
|
1364
|
+
search_space = experiment.search_space
|
1365
|
+
self.parameters = search_space.parameters
|
1366
|
+
self.constraints = search_space.parameter_constraints
|
1367
|
+
|
1368
|
+
@staticmethod
|
1369
|
+
def _ptype_to_str(param_type: Any) -> str:
|
1370
|
+
return {
|
1371
|
+
ParameterType.INT: "INT",
|
1372
|
+
ParameterType.FLOAT: "FLOAT",
|
1373
|
+
ParameterType.STRING: "STRING",
|
1374
|
+
}.get(param_type, "<UNKNOWN>")
|
1375
|
+
|
1376
|
+
@beartype
|
1377
|
+
def _default_for_param(self: Any, name: str, param: Any) -> Any:
|
1378
|
+
# 1. explicit override
|
1379
|
+
if name.lower() in _DEFAULT_SPECIALS:
|
1380
|
+
override = _DEFAULT_SPECIALS[name.lower()]
|
1381
|
+
if override == "min" and isinstance(param, RangeParameter):
|
1382
|
+
return param.lower
|
1383
|
+
return override
|
1384
|
+
|
1385
|
+
# 2. generic rules
|
1386
|
+
if isinstance(param, FixedParameter):
|
1387
|
+
return param.value
|
1388
|
+
if isinstance(param, RangeParameter):
|
1389
|
+
mid = (param.lower + param.upper) / 2
|
1390
|
+
return int(mid) if param.parameter_type == ParameterType.INT else mid
|
1391
|
+
if isinstance(param, ChoiceParameter):
|
1392
|
+
return param.values[0]
|
1393
|
+
|
1394
|
+
# fall back
|
1395
|
+
return None
|
1396
|
+
|
1397
|
+
@beartype
|
1398
|
+
def _ask_user(self: Any, name: str, param: Any, default: Any) -> Any:
|
1399
|
+
if args.just_return_defaults:
|
1400
|
+
print_yellow("Just returning defaults for _ask_user in InteractiveCLIGenerationNode")
|
1401
|
+
return default
|
1402
|
+
|
1403
|
+
if not console.is_terminal:
|
1404
|
+
print_red(f"Cannot prompt for {name!r}: no interactive terminal attached.")
|
1405
|
+
return default
|
1406
|
+
|
1407
|
+
prompt_msg = f"{name} ({self._ptype_to_str(param.parameter_type)})"
|
1408
|
+
|
1409
|
+
try:
|
1410
|
+
return self._handle_fixed(param, prompt_msg)
|
1411
|
+
except TypeError:
|
1412
|
+
pass
|
1413
|
+
except Exception as e:
|
1414
|
+
print_red(f"Error #1: {e}")
|
1415
|
+
|
1416
|
+
try:
|
1417
|
+
return self._handle_choice(param, default, prompt_msg)
|
1418
|
+
except TypeError:
|
1419
|
+
pass
|
1420
|
+
except Exception as e:
|
1421
|
+
print_red(f"Error #2: {e}")
|
1422
|
+
|
1423
|
+
try:
|
1424
|
+
return self._handle_range(param, default, prompt_msg)
|
1425
|
+
except TypeError:
|
1426
|
+
pass
|
1427
|
+
except Exception as e:
|
1428
|
+
print_red(f"Error #3: {e}")
|
1429
|
+
|
1430
|
+
return self._handle_fallback(prompt_msg, default)
|
1431
|
+
|
1432
|
+
@beartype
|
1433
|
+
def _handle_fixed(self, param: Any, prompt_msg: str) -> Any:
|
1434
|
+
if isinstance(param, FixedParameter):
|
1435
|
+
console.print(f"[yellow]{prompt_msg} is FIXED at {param.value} → skipping prompt.[/]")
|
1436
|
+
return param.value
|
1437
|
+
raise TypeError("Not a FixedParameter")
|
1438
|
+
|
1439
|
+
@beartype
|
1440
|
+
def _handle_choice(self, param: Any, default: Any, prompt_msg: str) -> Any:
|
1441
|
+
if not isinstance(param, ChoiceParameter):
|
1442
|
+
raise TypeError("Not a ChoiceParameter")
|
1443
|
+
|
1444
|
+
choices_str = ", ".join(f"{v}" for v in param.values)
|
1445
|
+
console.print(f"{prompt_msg} choices → {choices_str}")
|
1446
|
+
user_val = Prompt.ask("Pick choice", default=str(default))
|
1447
|
+
return param.values[int(user_val)] if user_val.isdigit() else user_val
|
1448
|
+
|
1449
|
+
@beartype
|
1450
|
+
def _handle_range(self, param: Any, default: Any, prompt_msg: str) -> Any:
|
1451
|
+
if not isinstance(param, RangeParameter):
|
1452
|
+
raise TypeError("Not a RangeParameter")
|
1453
|
+
|
1454
|
+
low, high = param.lower, param.upper
|
1455
|
+
console.print(f"{prompt_msg} range → [{low}, {high}]")
|
1456
|
+
|
1457
|
+
if param.parameter_type == ParameterType.FLOAT:
|
1458
|
+
user_val = FloatPrompt.ask("Enter float", default=str(default))
|
1459
|
+
try:
|
1460
|
+
val = float(user_val)
|
1461
|
+
except ValueError:
|
1462
|
+
val = default
|
1463
|
+
else:
|
1464
|
+
user_val = IntPrompt.ask("Enter int", default=str(default))
|
1465
|
+
try:
|
1466
|
+
val = int(user_val)
|
1467
|
+
except ValueError:
|
1468
|
+
val = default
|
1469
|
+
|
1470
|
+
return min(max(val, low), high)
|
1471
|
+
|
1472
|
+
@beartype
|
1473
|
+
def _handle_fallback(self, prompt_msg: str, default: Any) -> Any:
|
1474
|
+
return Prompt.ask(prompt_msg, default=str(default))
|
1475
|
+
|
1476
|
+
@beartype
|
1477
|
+
def get_next_candidate(
|
1478
|
+
self: Any,
|
1479
|
+
pending_parameters: List[TParameterization],
|
1480
|
+
) -> Dict[str, Any]:
|
1481
|
+
"""
|
1482
|
+
Build the next candidate by querying the user for **each** parameter.
|
1483
|
+
Raises RuntimeError if `update_generator_state` has not been called.
|
1484
|
+
"""
|
1485
|
+
if self.parameters is None:
|
1486
|
+
raise RuntimeError(
|
1487
|
+
"Parameters are not initialized – call update_generator_state() first."
|
1488
|
+
)
|
1489
|
+
|
1490
|
+
console.rule("[bold magenta]Next Candidate[/]")
|
1491
|
+
|
1492
|
+
candidate: Dict[str, Any] = {}
|
1493
|
+
for name, param in self.parameters.items():
|
1494
|
+
default_val = self._default_for_param(name, param)
|
1495
|
+
value = self._ask_user(name, param, default_val)
|
1496
|
+
candidate[name] = value
|
1497
|
+
|
1498
|
+
# ── simple constraint check (optional) ──────────────────────────
|
1499
|
+
|
1500
|
+
if self.constraints:
|
1501
|
+
console.rule("[bold magenta]Checking constraints[/]")
|
1502
|
+
violations = [
|
1503
|
+
c
|
1504
|
+
for c in self.constraints
|
1505
|
+
if not c.check(candidate) # Ax Constraint objects support .check
|
1506
|
+
]
|
1507
|
+
if violations:
|
1508
|
+
console.print(
|
1509
|
+
"[red]WARNING:[/] The candidate violates "
|
1510
|
+
f"{len(violations)} constraint(s): {violations}"
|
1511
|
+
)
|
1512
|
+
|
1513
|
+
# show summary table
|
1514
|
+
tbl = Table(title="Chosen Hyper‑Parameters", show_lines=True)
|
1515
|
+
tbl.add_column("Name", style="cyan", no_wrap=True)
|
1516
|
+
tbl.add_column("Value", style="green")
|
1517
|
+
for k, v in candidate.items():
|
1518
|
+
tbl.add_row(k, str(v))
|
1519
|
+
console.print(tbl)
|
1520
|
+
|
1521
|
+
console.rule()
|
1522
|
+
return candidate
|
1523
|
+
|
1241
1524
|
@dataclass(init=False)
|
1242
1525
|
class ExternalProgramGenerationNode(ExternalGenerationNode):
|
1243
1526
|
@beartype
|
@@ -1757,14 +2040,6 @@ def set_nr_inserted_jobs(new_nr_inserted_jobs: int) -> None:
|
|
1757
2040
|
|
1758
2041
|
NR_INSERTED_JOBS = new_nr_inserted_jobs
|
1759
2042
|
|
1760
|
-
@beartype
|
1761
|
-
def set_max_eval(new_max_eval: int) -> None:
|
1762
|
-
global max_eval
|
1763
|
-
|
1764
|
-
print_debug(f"set_max_eval({new_max_eval})")
|
1765
|
-
|
1766
|
-
max_eval = new_max_eval
|
1767
|
-
|
1768
2043
|
@beartype
|
1769
2044
|
def write_worker_usage() -> None:
|
1770
2045
|
if len(WORKER_PERCENTAGE_USAGE):
|
@@ -3555,6 +3830,54 @@ def _evaluate_handle_result(
|
|
3555
3830
|
|
3556
3831
|
return final_result
|
3557
3832
|
|
3833
|
+
@beartype
|
3834
|
+
def pretty_process_output(stdout_path: str, stderr_path: str, exit_code: Optional[int]) -> None:
|
3835
|
+
global console
|
3836
|
+
|
3837
|
+
console = Console(
|
3838
|
+
force_interactive=True,
|
3839
|
+
soft_wrap=True,
|
3840
|
+
color_system="256",
|
3841
|
+
force_terminal=not ci_env,
|
3842
|
+
width=max(200, terminal_width)
|
3843
|
+
)
|
3844
|
+
|
3845
|
+
def _read(p: str) -> Optional[str]:
|
3846
|
+
try:
|
3847
|
+
return Path(p).read_text(encoding="utf-8", errors="replace")
|
3848
|
+
except FileNotFoundError:
|
3849
|
+
print_debug(f"[file not found: {p}]")
|
3850
|
+
|
3851
|
+
return None
|
3852
|
+
|
3853
|
+
stdout_txt = _read(stdout_path)
|
3854
|
+
stderr_txt = _read(stderr_path)
|
3855
|
+
|
3856
|
+
# -------- header -------- #
|
3857
|
+
outcome = "SUCCESS" if (exit_code is not None and exit_code == 0) else "FAILURE"
|
3858
|
+
header_style = "bold white on green" if exit_code == 0 else "bold white on red"
|
3859
|
+
console.rule(Text(f" {outcome} (exit {exit_code}) ", style=header_style))
|
3860
|
+
|
3861
|
+
def is_nonempty(s: str) -> bool:
|
3862
|
+
return bool(s and s.strip())
|
3863
|
+
|
3864
|
+
# TODO: Shows empty stuff here as well, shouldn not. Especially stderr.
|
3865
|
+
if is_nonempty(stdout_txt):
|
3866
|
+
print("\n")
|
3867
|
+
console.print(
|
3868
|
+
Panel(stdout_txt, title="STDOUT", border_style="cyan", padding=(0, 1))
|
3869
|
+
)
|
3870
|
+
|
3871
|
+
if is_nonempty(stderr_txt):
|
3872
|
+
print("\n")
|
3873
|
+
console.print(
|
3874
|
+
Panel(stderr_txt, title="STDERR", border_style="magenta", padding=(0, 1))
|
3875
|
+
)
|
3876
|
+
|
3877
|
+
if not (is_nonempty(stdout_txt) or is_nonempty(stderr_txt)):
|
3878
|
+
print("\n")
|
3879
|
+
console.print("[dim]No output captured.[/dim]")
|
3880
|
+
|
3558
3881
|
@beartype
|
3559
3882
|
def evaluate(parameters: dict) -> Optional[Union[int, float, Dict[str, Optional[Union[int, float, Tuple]]], List[float]]]:
|
3560
3883
|
start_nvidia_smi_thread()
|
@@ -4254,9 +4577,12 @@ def show_end_table_and_save_end_files() -> int:
|
|
4254
4577
|
|
4255
4578
|
display_failed_jobs_table()
|
4256
4579
|
|
4580
|
+
best_result_exit = 0
|
4581
|
+
|
4257
4582
|
best_result_exit: int = print_best_result()
|
4258
4583
|
|
4259
|
-
|
4584
|
+
if not args.dryrun:
|
4585
|
+
print_evaluate_times()
|
4260
4586
|
|
4261
4587
|
if best_result_exit > 0:
|
4262
4588
|
_exit = best_result_exit
|
@@ -4303,6 +4629,10 @@ def abandon_all_jobs() -> None:
|
|
4303
4629
|
|
4304
4630
|
@beartype
|
4305
4631
|
def show_pareto_or_error_msg(path_to_calculate: str, res_names: list = arg_result_names, disable_sixel_and_table: bool = False) -> None:
|
4632
|
+
if args.dryrun:
|
4633
|
+
print_debug("Not showing pareto-frontier data with --dryrun")
|
4634
|
+
return None
|
4635
|
+
|
4306
4636
|
if len(res_names) > 1:
|
4307
4637
|
try:
|
4308
4638
|
show_pareto_frontier_data(path_to_calculate, res_names, disable_sixel_and_table)
|
@@ -4310,6 +4640,7 @@ def show_pareto_or_error_msg(path_to_calculate: str, res_names: list = arg_resul
|
|
4310
4640
|
print_red(f"show_pareto_frontier_data() failed with exception '{e}'")
|
4311
4641
|
else:
|
4312
4642
|
print_debug(f"show_pareto_frontier_data will NOT be executed because len(arg_result_names) is {len(arg_result_names)}")
|
4643
|
+
return None
|
4313
4644
|
|
4314
4645
|
@beartype
|
4315
4646
|
def end_program(_force: Optional[bool] = False, exit_code: Optional[int] = None) -> None:
|
@@ -6349,6 +6680,7 @@ def _finish_previous_jobs_helper_handle_failed_job(job: Any, trial_index: int) -
|
|
6349
6680
|
print(f"ERROR in line {get_line_info()}: {e}")
|
6350
6681
|
job.cancel()
|
6351
6682
|
orchestrate_job(job, trial_index)
|
6683
|
+
|
6352
6684
|
failed_jobs(1)
|
6353
6685
|
print_debug(f"finish_previous_jobs: removing job {job}, trial_index: {trial_index}")
|
6354
6686
|
|
@@ -6373,6 +6705,9 @@ def _finish_previous_jobs_helper_handle_exception(job: Any, trial_index: int, er
|
|
6373
6705
|
def _finish_previous_jobs_helper_process_job(job: Any, trial_index: int, this_jobs_finished: int) -> int:
|
6374
6706
|
try:
|
6375
6707
|
this_jobs_finished = finish_job_core(job, trial_index, this_jobs_finished)
|
6708
|
+
|
6709
|
+
if args.prettyprint:
|
6710
|
+
pretty_print_job_output(job)
|
6376
6711
|
except (SignalINT, SignalUSR, SignalCONT) as e:
|
6377
6712
|
print_red(f"Cancelled finish_job_core: {e}")
|
6378
6713
|
except (FileNotFoundError, submitit.core.utils.UncompletedJobError, ax.exceptions.core.UserInputError) as error:
|
@@ -6509,21 +6844,53 @@ def _check_orchestrator_find_behaviors(stdout: str, errors: List[Dict[str, Any]]
|
|
6509
6844
|
return behaviors
|
6510
6845
|
|
6511
6846
|
@beartype
|
6512
|
-
def
|
6513
|
-
|
6514
|
-
|
6847
|
+
def get_exit_code_from_stderr_or_stdout_path(stderr_path: str, stdout_path: str) -> Optional[int]:
|
6848
|
+
def extract_last_exit_code(path: str) -> Optional[int]:
|
6849
|
+
try:
|
6850
|
+
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
6851
|
+
matches = re.findall(r"EXIT_CODE:\s*(-?\d+)", f.read())
|
6852
|
+
if matches:
|
6853
|
+
return int(matches[-1])
|
6854
|
+
except Exception:
|
6855
|
+
pass
|
6856
|
+
return None
|
6857
|
+
|
6858
|
+
code = extract_last_exit_code(stdout_path)
|
6859
|
+
if code is not None:
|
6860
|
+
return code
|
6515
6861
|
|
6516
|
-
|
6517
|
-
stdout_path = stdout_path.rstrip('\r\n')
|
6518
|
-
stdout_path = stdout_path.rstrip('\n')
|
6519
|
-
stdout_path = stdout_path.rstrip('\r')
|
6520
|
-
stdout_path = stdout_path.rstrip(' ')
|
6862
|
+
return extract_last_exit_code(stderr_path)
|
6521
6863
|
|
6522
|
-
|
6523
|
-
|
6524
|
-
|
6525
|
-
stderr_path =
|
6526
|
-
|
6864
|
+
@beartype
|
6865
|
+
def pretty_print_job_output(job: Job) -> None:
|
6866
|
+
stdout_path = get_stderr_or_stdout_from_job(job, "stdout")
|
6867
|
+
stderr_path = get_stderr_or_stdout_from_job(job, "stderr")
|
6868
|
+
exit_code = get_exit_code_from_stderr_or_stdout_path(stderr_path, stdout_path)
|
6869
|
+
|
6870
|
+
pretty_process_output(stdout_path, stderr_path, exit_code)
|
6871
|
+
|
6872
|
+
@beartype
|
6873
|
+
def get_stderr_or_stdout_from_job(job: Job, path_type: str) -> str:
|
6874
|
+
if path_type == "stderr":
|
6875
|
+
_path = str(job.paths.stderr.resolve())
|
6876
|
+
elif path_type == "stdout":
|
6877
|
+
_path = str(job.paths.stdout.resolve())
|
6878
|
+
else:
|
6879
|
+
print_red(f"ERROR: path_type {path_type} was neither stdout nor stderr. Using stdout")
|
6880
|
+
_path = str(job.paths.stdout.resolve())
|
6881
|
+
|
6882
|
+
_path = _path.replace('\n', ' ').replace('\r', '')
|
6883
|
+
_path = _path.rstrip('\r\n')
|
6884
|
+
_path = _path.rstrip('\n')
|
6885
|
+
_path = _path.rstrip('\r')
|
6886
|
+
_path = _path.rstrip(' ')
|
6887
|
+
|
6888
|
+
return _path
|
6889
|
+
|
6890
|
+
@beartype
|
6891
|
+
def orchestrate_job(job: Job, trial_index: int) -> None:
|
6892
|
+
stdout_path = get_stderr_or_stdout_from_job(job, "stdout")
|
6893
|
+
stderr_path = get_stderr_or_stdout_from_job(job, "stderr")
|
6527
6894
|
|
6528
6895
|
print_outfile_analyzed(stdout_path)
|
6529
6896
|
print_outfile_analyzed(stderr_path)
|
@@ -7208,11 +7575,12 @@ def save_table_as_text(table: Table, filepath: str) -> None:
|
|
7208
7575
|
|
7209
7576
|
@beartype
|
7210
7577
|
def show_time_debugging_table() -> None:
|
7211
|
-
|
7212
|
-
|
7213
|
-
|
7578
|
+
if not args.dryrun:
|
7579
|
+
generate_time_table_rich()
|
7580
|
+
generate_job_submit_table_rich()
|
7581
|
+
plot_times_for_creation_and_submission()
|
7214
7582
|
|
7215
|
-
|
7583
|
+
live_share()
|
7216
7584
|
|
7217
7585
|
@beartype
|
7218
7586
|
def generate_time_table_rich() -> None:
|
@@ -8082,7 +8450,8 @@ def get_number_of_steps(_max_eval: int) -> Tuple[int, int]:
|
|
8082
8450
|
if second_step_steps != original_second_steps:
|
8083
8451
|
original_print(f"? original_second_steps: {original_second_steps} = max_eval {_max_eval} - _random_steps {_random_steps}")
|
8084
8452
|
if second_step_steps == 0:
|
8085
|
-
|
8453
|
+
if not args.dryrun:
|
8454
|
+
print_yellow("This is basically a random search. Increase --max_eval or reduce --num_random_steps")
|
8086
8455
|
|
8087
8456
|
second_step_steps = second_step_steps - already_done_random_steps
|
8088
8457
|
|
@@ -9496,6 +9865,9 @@ def main() -> None:
|
|
9496
9865
|
|
9497
9866
|
set_global_generation_strategy()
|
9498
9867
|
|
9868
|
+
if args.dryrun:
|
9869
|
+
set_global_gs_to_HUMAN_INTERVENTION_MINIMUM()
|
9870
|
+
|
9499
9871
|
initialize_ax_client()
|
9500
9872
|
|
9501
9873
|
with console.status("[bold green]Getting experiment parameters..."):
|
@@ -9580,14 +9952,6 @@ def write_ui_url_if_present() -> None:
|
|
9580
9952
|
with open(f"{get_current_run_folder()}/ui_url.txt", mode="a", encoding="utf-8") as myfile:
|
9581
9953
|
myfile.write(decode_if_base64(args.ui_url))
|
9582
9954
|
|
9583
|
-
@beartype
|
9584
|
-
def set_random_steps(new_steps: int) -> None:
|
9585
|
-
global random_steps
|
9586
|
-
|
9587
|
-
print_debug(f"Setting random_steps from {random_steps} to {new_steps}")
|
9588
|
-
|
9589
|
-
random_steps = new_steps
|
9590
|
-
|
9591
9955
|
@beartype
|
9592
9956
|
def handle_random_steps() -> None:
|
9593
9957
|
with console.status("[bold green]Handling random steps..."):
|