siliconcompiler 0.35.3__py3-none-any.whl → 0.35.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/sc_issue.py +18 -2
- siliconcompiler/checklist.py +2 -1
- siliconcompiler/constraints/asic_component.py +49 -11
- siliconcompiler/constraints/asic_floorplan.py +23 -21
- siliconcompiler/constraints/asic_pins.py +55 -17
- siliconcompiler/constraints/asic_timing.py +53 -22
- siliconcompiler/constraints/fpga_timing.py +5 -6
- siliconcompiler/data/templates/replay/replay.sh.j2 +27 -14
- siliconcompiler/package/__init__.py +17 -6
- siliconcompiler/project.py +9 -1
- siliconcompiler/scheduler/docker.py +24 -25
- siliconcompiler/scheduler/scheduler.py +82 -68
- siliconcompiler/scheduler/schedulernode.py +133 -20
- siliconcompiler/scheduler/slurm.py +113 -29
- siliconcompiler/scheduler/taskscheduler.py +0 -7
- siliconcompiler/schema/editableschema.py +29 -0
- siliconcompiler/schema/parametervalue.py +14 -2
- siliconcompiler/schema_support/option.py +82 -1
- siliconcompiler/schema_support/pathschema.py +7 -13
- siliconcompiler/tool.py +47 -25
- siliconcompiler/tools/klayout/__init__.py +3 -0
- siliconcompiler/tools/klayout/scripts/klayout_convert_drc_db.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_export.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_operations.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_show.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_utils.py +3 -4
- siliconcompiler/tools/openroad/__init__.py +27 -1
- siliconcompiler/tools/openroad/_apr.py +81 -4
- siliconcompiler/tools/openroad/clock_tree_synthesis.py +1 -0
- siliconcompiler/tools/openroad/global_placement.py +1 -0
- siliconcompiler/tools/openroad/init_floorplan.py +116 -7
- siliconcompiler/tools/openroad/power_grid_analysis.py +174 -0
- siliconcompiler/tools/openroad/repair_design.py +1 -0
- siliconcompiler/tools/openroad/repair_timing.py +1 -0
- siliconcompiler/tools/openroad/scripts/apr/preamble.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_init_floorplan.tcl +42 -4
- siliconcompiler/tools/openroad/scripts/apr/sc_irdrop.tcl +146 -0
- siliconcompiler/tools/openroad/scripts/apr/sc_repair_design.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_write_data.tcl +4 -6
- siliconcompiler/tools/openroad/scripts/common/procs.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/common/reports.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/rcx/sc_rcx_bench.tcl +2 -4
- siliconcompiler/tools/opensta/__init__.py +1 -1
- siliconcompiler/tools/opensta/scripts/sc_timing.tcl +17 -12
- siliconcompiler/tools/vivado/scripts/sc_bitstream.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_place.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_route.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_syn_fpga.tcl +10 -0
- siliconcompiler/tools/vpr/__init__.py +28 -0
- siliconcompiler/tools/yosys/scripts/sc_screenshot.tcl +1 -1
- siliconcompiler/tools/yosys/scripts/sc_synth_asic.tcl +40 -4
- siliconcompiler/tools/yosys/scripts/sc_synth_fpga.tcl +15 -5
- siliconcompiler/tools/yosys/syn_asic.py +42 -0
- siliconcompiler/tools/yosys/syn_fpga.py +8 -0
- siliconcompiler/toolscripts/_tools.json +6 -6
- siliconcompiler/utils/__init__.py +243 -51
- siliconcompiler/utils/curation.py +89 -56
- siliconcompiler/utils/issue.py +6 -1
- siliconcompiler/utils/multiprocessing.py +35 -2
- siliconcompiler/utils/paths.py +21 -0
- siliconcompiler/utils/settings.py +141 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/METADATA +4 -3
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/RECORD +68 -65
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/WHEEL +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/entry_points.txt +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/licenses/LICENSE +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.35.4.dist-info}/top_level.txt +0 -0
|
@@ -12,7 +12,7 @@ import os.path
|
|
|
12
12
|
|
|
13
13
|
from datetime import datetime
|
|
14
14
|
|
|
15
|
-
from typing import Union, Dict, Optional, Tuple, List, TYPE_CHECKING
|
|
15
|
+
from typing import Union, Dict, Optional, Tuple, List, Set, TYPE_CHECKING
|
|
16
16
|
|
|
17
17
|
from siliconcompiler import NodeStatus
|
|
18
18
|
from siliconcompiler.schema import Journal
|
|
@@ -29,6 +29,7 @@ from siliconcompiler.utils.logging import SCLoggerFormatter
|
|
|
29
29
|
from siliconcompiler.utils.multiprocessing import MPManager
|
|
30
30
|
from siliconcompiler.scheduler import send_messages, SCRuntimeError
|
|
31
31
|
from siliconcompiler.utils.paths import collectiondir, jobdir, workdir
|
|
32
|
+
from siliconcompiler.utils.curation import collect
|
|
32
33
|
|
|
33
34
|
if TYPE_CHECKING:
|
|
34
35
|
from siliconcompiler.project import Project
|
|
@@ -101,12 +102,27 @@ class Scheduler:
|
|
|
101
102
|
self.__metrics: "MetricSchema" = self.__project.get("metric", field="schema")
|
|
102
103
|
|
|
103
104
|
self.__tasks: Dict[Tuple[str, str], SchedulerNode] = {}
|
|
105
|
+
self.__skippedtasks: Set[Tuple[str, str]] = set()
|
|
104
106
|
|
|
105
107
|
# Create dummy handler
|
|
106
108
|
self.__joblog_handler = logging.NullHandler()
|
|
107
109
|
self.__org_job_name = self.__project.get("option", "jobname")
|
|
108
110
|
self.__logfile = None
|
|
109
111
|
|
|
112
|
+
# Create tasks
|
|
113
|
+
for step, index in self.__flow.get_nodes():
|
|
114
|
+
node_cls = SchedulerNode
|
|
115
|
+
|
|
116
|
+
node_scheduler = self.__project.get('option', 'scheduler', 'name',
|
|
117
|
+
step=step, index=index)
|
|
118
|
+
if node_scheduler == 'slurm':
|
|
119
|
+
node_cls = SlurmSchedulerNode
|
|
120
|
+
elif node_scheduler == 'docker':
|
|
121
|
+
node_cls = DockerSchedulerNode
|
|
122
|
+
self.__tasks[(step, index)] = node_cls(self.__project, step, index)
|
|
123
|
+
if self.__flow.get(step, index, "tool") == "builtin":
|
|
124
|
+
self.__tasks[(step, index)].set_builtin()
|
|
125
|
+
|
|
110
126
|
@property
|
|
111
127
|
def manifest(self) -> str:
|
|
112
128
|
"""
|
|
@@ -270,6 +286,9 @@ class Scheduler:
|
|
|
270
286
|
if not self.check_manifest():
|
|
271
287
|
raise SCRuntimeError("check_manifest() failed")
|
|
272
288
|
|
|
289
|
+
# Initialize schedulers
|
|
290
|
+
self.__init_schedulers()
|
|
291
|
+
|
|
273
292
|
self.__run_setup()
|
|
274
293
|
self.configure_nodes()
|
|
275
294
|
|
|
@@ -288,15 +307,23 @@ class Scheduler:
|
|
|
288
307
|
if not self.__check_flowgraph_io():
|
|
289
308
|
raise SCRuntimeError("Flowgraph file IO constrains errors")
|
|
290
309
|
|
|
291
|
-
|
|
310
|
+
# Collect files for remote runs
|
|
311
|
+
if self.__check_collect_files():
|
|
312
|
+
collect(self.project)
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
self.run_core()
|
|
316
|
+
except SCRuntimeError as e:
|
|
317
|
+
raise e
|
|
292
318
|
|
|
293
|
-
|
|
294
|
-
|
|
319
|
+
finally:
|
|
320
|
+
# Store run in history
|
|
321
|
+
self.__project._record_history()
|
|
295
322
|
|
|
296
|
-
|
|
297
|
-
|
|
323
|
+
# Record final manifest
|
|
324
|
+
self.__project.write_manifest(self.manifest)
|
|
298
325
|
|
|
299
|
-
|
|
326
|
+
send_messages.send(self.__project, 'summary', None, None)
|
|
300
327
|
finally:
|
|
301
328
|
if self.__joblog_handler is not None:
|
|
302
329
|
self.__logger.removeHandler(self.__joblog_handler)
|
|
@@ -319,50 +346,10 @@ class Scheduler:
|
|
|
319
346
|
error = False
|
|
320
347
|
|
|
321
348
|
for (step, index) in self.__flow_runtime.get_nodes():
|
|
322
|
-
|
|
323
|
-
check_file_access = not self.__project.option.get_remote() and scheduler is None
|
|
324
|
-
|
|
325
|
-
node = SchedulerNode(self.__project, step, index)
|
|
326
|
-
requires = []
|
|
327
|
-
with node.runtime():
|
|
328
|
-
requires = node.task.get('require')
|
|
329
|
-
|
|
330
|
-
for item in sorted(set(requires)):
|
|
331
|
-
keypath = item.split(',')
|
|
332
|
-
if not self.__project.valid(*keypath):
|
|
333
|
-
self.__logger.error(f'Cannot resolve required keypath [{",".join(keypath)}] '
|
|
334
|
-
f'for {step}/{index}.')
|
|
335
|
-
error = True
|
|
336
|
-
continue
|
|
337
|
-
|
|
338
|
-
param = self.__project.get(*keypath, field=None)
|
|
339
|
-
check_step, check_index = step, index
|
|
340
|
-
if param.get(field='pernode').is_never():
|
|
341
|
-
check_step, check_index = None, None
|
|
342
|
-
|
|
343
|
-
if not param.has_value(step=check_step, index=check_index):
|
|
344
|
-
self.__logger.error('No value set for required keypath '
|
|
345
|
-
f'[{",".join(keypath)}] for {step}/{index}.')
|
|
346
|
-
error = True
|
|
347
|
-
continue
|
|
349
|
+
node = self.__tasks[(step, index)]
|
|
348
350
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
abspath = self.__project.find_files(*keypath,
|
|
352
|
-
missing_ok=True,
|
|
353
|
-
step=check_step, index=check_index)
|
|
354
|
-
|
|
355
|
-
unresolved_paths = param.get(step=check_step, index=check_index)
|
|
356
|
-
if not isinstance(abspath, list):
|
|
357
|
-
abspath = [abspath]
|
|
358
|
-
unresolved_paths = [unresolved_paths]
|
|
359
|
-
|
|
360
|
-
for path, setpath in zip(abspath, unresolved_paths):
|
|
361
|
-
if path is None:
|
|
362
|
-
self.__logger.error(f'Cannot resolve path {setpath} in '
|
|
363
|
-
f'required file keypath [{",".join(keypath)}] '
|
|
364
|
-
f'for {step}/{index}.')
|
|
365
|
-
error = True
|
|
351
|
+
error |= not node.check_required_values()
|
|
352
|
+
error |= not node.check_required_paths()
|
|
366
353
|
|
|
367
354
|
return not error
|
|
368
355
|
|
|
@@ -414,12 +401,10 @@ class Scheduler:
|
|
|
414
401
|
in_task_class = self.__project.get("tool", in_tool, "task", in_task,
|
|
415
402
|
field="schema")
|
|
416
403
|
|
|
417
|
-
with in_task_class.runtime(
|
|
418
|
-
in_step, in_index)) as task:
|
|
404
|
+
with in_task_class.runtime(self.__tasks[(in_step, in_index)]) as task:
|
|
419
405
|
inputs = task.get_output_files()
|
|
420
406
|
|
|
421
|
-
with task_class.runtime(
|
|
422
|
-
step, index)) as task:
|
|
407
|
+
with task_class.runtime(self.__tasks[(step, index)]) as task:
|
|
423
408
|
for inp in inputs:
|
|
424
409
|
node_inp = task.compute_input_file_node_name(inp, in_step, in_index)
|
|
425
410
|
if node_inp in requirements:
|
|
@@ -455,7 +440,7 @@ class Scheduler:
|
|
|
455
440
|
|
|
456
441
|
self.__record.set('status', NodeStatus.PENDING, step=step, index=index)
|
|
457
442
|
for next_step, next_index in self.__flow_runtime.get_nodes_starting_at(step, index):
|
|
458
|
-
if
|
|
443
|
+
if (next_step, next_index) in self.__skippedtasks:
|
|
459
444
|
continue
|
|
460
445
|
|
|
461
446
|
# Mark following steps as pending
|
|
@@ -475,18 +460,6 @@ class Scheduler:
|
|
|
475
460
|
copy_from_nodes = set(self.__flow_load_runtime.get_nodes()).difference(
|
|
476
461
|
self.__flow_runtime.get_entry_nodes())
|
|
477
462
|
for step, index in self.__flow.get_nodes():
|
|
478
|
-
node_cls = SchedulerNode
|
|
479
|
-
|
|
480
|
-
node_scheduler = self.__project.get('option', 'scheduler', 'name',
|
|
481
|
-
step=step, index=index)
|
|
482
|
-
if node_scheduler == 'slurm':
|
|
483
|
-
node_cls = SlurmSchedulerNode
|
|
484
|
-
elif node_scheduler == 'docker':
|
|
485
|
-
node_cls = DockerSchedulerNode
|
|
486
|
-
self.__tasks[(step, index)] = node_cls(self.__project, step, index)
|
|
487
|
-
if self.__flow.get(step, index, "tool") == "builtin":
|
|
488
|
-
self.__tasks[(step, index)].set_builtin()
|
|
489
|
-
|
|
490
463
|
if self.__org_job_name and (step, index) in copy_from_nodes:
|
|
491
464
|
self.__tasks[(step, index)].copy_from(self.__org_job_name)
|
|
492
465
|
|
|
@@ -590,6 +563,10 @@ class Scheduler:
|
|
|
590
563
|
if NodeStatus.is_waiting(self.__record.get('status', step=step, index=index)):
|
|
591
564
|
with self.__tasks[(step, index)].runtime():
|
|
592
565
|
self.__tasks[(step, index)].clean_directory()
|
|
566
|
+
parent_dir = os.path.dirname(self.__tasks[(step, index)].workdir)
|
|
567
|
+
if os.path.exists(parent_dir) and len(os.listdir(parent_dir)) == 0:
|
|
568
|
+
# Step directory is empty so safe to remove
|
|
569
|
+
os.rmdir(parent_dir)
|
|
593
570
|
|
|
594
571
|
def __configure_collect_previous_information(self) -> Dict[Tuple[str, str], "Project"]:
|
|
595
572
|
"""Collects information from previous runs for nodes that won't be re-executed.
|
|
@@ -662,10 +639,13 @@ class Scheduler:
|
|
|
662
639
|
for step, index in layer_nodes:
|
|
663
640
|
with self.__tasks[(step, index)].runtime():
|
|
664
641
|
node_kept = self.__tasks[(step, index)].setup()
|
|
642
|
+
if not node_kept:
|
|
643
|
+
self.__skippedtasks.add((step, index))
|
|
665
644
|
if not node_kept and (step, index) in extra_setup_nodes:
|
|
666
645
|
# remove from previous node data
|
|
667
646
|
del extra_setup_nodes[(step, index)]
|
|
668
647
|
|
|
648
|
+
# Copy in old status information, this will be overwritten if needed
|
|
669
649
|
if (step, index) in extra_setup_nodes:
|
|
670
650
|
schema = extra_setup_nodes[(step, index)]
|
|
671
651
|
node_status = None
|
|
@@ -893,6 +873,8 @@ class Scheduler:
|
|
|
893
873
|
if m:
|
|
894
874
|
jobid = max(jobid, int(m.group(1)))
|
|
895
875
|
self.__project.set('option', 'jobname', f'{stem}{jobid + 1}')
|
|
876
|
+
for task in self.__tasks.values():
|
|
877
|
+
task._update_job()
|
|
896
878
|
return True
|
|
897
879
|
return False
|
|
898
880
|
|
|
@@ -937,7 +919,7 @@ class Scheduler:
|
|
|
937
919
|
if self.__project.option.scheduler.get_name(step=step, index=index) is not None:
|
|
938
920
|
continue
|
|
939
921
|
|
|
940
|
-
node =
|
|
922
|
+
node = self.__tasks[(step, index)]
|
|
941
923
|
with node.runtime():
|
|
942
924
|
try:
|
|
943
925
|
exe = node.get_exe_path()
|
|
@@ -965,3 +947,35 @@ class Scheduler:
|
|
|
965
947
|
os.chdir(cwd)
|
|
966
948
|
|
|
967
949
|
return not error
|
|
950
|
+
|
|
951
|
+
def __check_collect_files(self) -> bool:
|
|
952
|
+
"""
|
|
953
|
+
Iterates through all tasks in the scheduler, and checks if the there
|
|
954
|
+
are files or directories that need to be collected
|
|
955
|
+
|
|
956
|
+
Returns:
|
|
957
|
+
bool: True if there is something to be collected, False otherwise.
|
|
958
|
+
"""
|
|
959
|
+
do_collect = False
|
|
960
|
+
for task in self.__tasks.values():
|
|
961
|
+
if task.mark_copy():
|
|
962
|
+
do_collect = True
|
|
963
|
+
|
|
964
|
+
return do_collect
|
|
965
|
+
|
|
966
|
+
def __init_schedulers(self) -> None:
|
|
967
|
+
"""
|
|
968
|
+
Collect and invoke unique initialization callbacks from all task schedulers.
|
|
969
|
+
|
|
970
|
+
This method gathers init functions from all SchedulerNode instances, deduplicates them
|
|
971
|
+
(since multiple tasks may share the same scheduler class), and invokes each once to
|
|
972
|
+
perform early validation (e.g., checking Docker/Slurm availability).
|
|
973
|
+
"""
|
|
974
|
+
self.__logger.debug("Collecting unique scheduler initialization callbacks")
|
|
975
|
+
init_funcs = set()
|
|
976
|
+
for step, index in self.__flow_runtime.get_nodes():
|
|
977
|
+
init_funcs.add(self.__tasks[(step, index)].init)
|
|
978
|
+
|
|
979
|
+
for init in sorted(init_funcs, key=lambda func: func.__qualname__):
|
|
980
|
+
self.__logger.debug(f"Initializing scheduler: {init.__qualname__}")
|
|
981
|
+
init(self.__project)
|
|
@@ -9,7 +9,7 @@ import time
|
|
|
9
9
|
|
|
10
10
|
import os.path
|
|
11
11
|
|
|
12
|
-
from
|
|
12
|
+
from siliconcompiler.utils.multiprocessing import MPQueueHandler as QueueHandler
|
|
13
13
|
|
|
14
14
|
from typing import List, Optional, Set, Tuple, TYPE_CHECKING
|
|
15
15
|
|
|
@@ -19,7 +19,7 @@ from siliconcompiler.utils.logging import get_console_formatter, SCInRunLoggerFo
|
|
|
19
19
|
|
|
20
20
|
from siliconcompiler.package import Resolver
|
|
21
21
|
from siliconcompiler.schema_support.record import RecordTime, RecordTool
|
|
22
|
-
from siliconcompiler.schema import Journal
|
|
22
|
+
from siliconcompiler.schema import Journal, Parameter
|
|
23
23
|
from siliconcompiler.scheduler import send_messages
|
|
24
24
|
from siliconcompiler.utils.paths import workdir, jobdir, collectiondir, cwdir
|
|
25
25
|
|
|
@@ -67,9 +67,10 @@ class SchedulerNode:
|
|
|
67
67
|
This class encapsulates the state and logic required to run a specific
|
|
68
68
|
step and index, including setting up directories, handling file I/O,
|
|
69
69
|
executing the associated tool, and recording results.
|
|
70
|
-
|
|
71
70
|
"""
|
|
72
71
|
|
|
72
|
+
__MAX_LOG_PRINT = 100 # Maximum number of warnings/error to print to log
|
|
73
|
+
|
|
73
74
|
def __init__(self, project: "Project", step: str, index: str, replay: bool = False):
|
|
74
75
|
"""
|
|
75
76
|
Initializes a SchedulerNode.
|
|
@@ -100,7 +101,6 @@ class SchedulerNode:
|
|
|
100
101
|
self.__project.get("option", "fileset")[0],
|
|
101
102
|
"topmodule")
|
|
102
103
|
|
|
103
|
-
self.__job: str = self.__project.get('option', 'jobname')
|
|
104
104
|
self.__record_user_info: bool = self.__project.get(
|
|
105
105
|
"option", "track", step=self.__step, index=self.__index)
|
|
106
106
|
self.__pipe = None
|
|
@@ -116,24 +116,12 @@ class SchedulerNode:
|
|
|
116
116
|
self.__enforce_inputfiles = True
|
|
117
117
|
self.__enforce_outputfiles = True
|
|
118
118
|
|
|
119
|
+
self._update_job()
|
|
120
|
+
|
|
119
121
|
flow: str = self.__project.get('option', 'flow')
|
|
120
122
|
self.__is_entry_node: bool = (self.__step, self.__index) in \
|
|
121
123
|
self.__project.get("flowgraph", flow, field="schema").get_entry_nodes()
|
|
122
124
|
|
|
123
|
-
self.__cwd = cwdir(self.__project)
|
|
124
|
-
self.__jobworkdir = jobdir(self.__project)
|
|
125
|
-
self.__workdir = workdir(self.__project, step=self.__step, index=self.__index)
|
|
126
|
-
self.__manifests = {
|
|
127
|
-
"input": os.path.join(self.__workdir, "inputs", f"{self.__name}.pkg.json"),
|
|
128
|
-
"output": os.path.join(self.__workdir, "outputs", f"{self.__name}.pkg.json")
|
|
129
|
-
}
|
|
130
|
-
self.__logs = {
|
|
131
|
-
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
132
|
-
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
133
|
-
}
|
|
134
|
-
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
135
|
-
self.__collection_path = collectiondir(self.__project)
|
|
136
|
-
|
|
137
125
|
self.set_queue(None, None)
|
|
138
126
|
self.__setup_schema_access()
|
|
139
127
|
|
|
@@ -258,6 +246,22 @@ class SchedulerNode:
|
|
|
258
246
|
"""Task: The task object associated with this node."""
|
|
259
247
|
return self.__task
|
|
260
248
|
|
|
249
|
+
def _update_job(self):
|
|
250
|
+
self.__job: str = self.__project.get('option', 'jobname')
|
|
251
|
+
self.__cwd = cwdir(self.__project)
|
|
252
|
+
self.__jobworkdir = jobdir(self.__project)
|
|
253
|
+
self.__workdir = workdir(self.__project, step=self.__step, index=self.__index)
|
|
254
|
+
self.__manifests = {
|
|
255
|
+
"input": os.path.join(self.__workdir, "inputs", f"{self.__name}.pkg.json"),
|
|
256
|
+
"output": os.path.join(self.__workdir, "outputs", f"{self.__name}.pkg.json")
|
|
257
|
+
}
|
|
258
|
+
self.__logs = {
|
|
259
|
+
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
260
|
+
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
261
|
+
}
|
|
262
|
+
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
263
|
+
self.__collection_path = collectiondir(self.__project)
|
|
264
|
+
|
|
261
265
|
def get_manifest(self, input: bool = False) -> str:
|
|
262
266
|
"""
|
|
263
267
|
Gets the path to the input or output manifest file for this node.
|
|
@@ -1156,11 +1160,13 @@ class SchedulerNode:
|
|
|
1156
1160
|
if 'errors' in checks:
|
|
1157
1161
|
ordered_suffixes.append('errors')
|
|
1158
1162
|
|
|
1163
|
+
print_paths = {}
|
|
1159
1164
|
# Looping through patterns for each line
|
|
1160
1165
|
with sc_open(self.__logs["exe"]) as f:
|
|
1161
1166
|
line_count = sum(1 for _ in f)
|
|
1162
1167
|
right_align = len(str(line_count))
|
|
1163
1168
|
for suffix in ordered_suffixes:
|
|
1169
|
+
print_paths[suffix] = False
|
|
1164
1170
|
# Start at the beginning of file again
|
|
1165
1171
|
f.seek(0)
|
|
1166
1172
|
for num, line in enumerate(f, start=1):
|
|
@@ -1169,7 +1175,7 @@ class SchedulerNode:
|
|
|
1169
1175
|
if string is None:
|
|
1170
1176
|
break
|
|
1171
1177
|
else:
|
|
1172
|
-
string = utils.grep(self.__project, item, string)
|
|
1178
|
+
string = utils.grep(self.__project.logger, item, string)
|
|
1173
1179
|
if string is not None:
|
|
1174
1180
|
matches[suffix] += 1
|
|
1175
1181
|
# always print to file
|
|
@@ -1177,11 +1183,21 @@ class SchedulerNode:
|
|
|
1177
1183
|
print(line_with_num, file=checks[suffix]['report'])
|
|
1178
1184
|
# selectively print to display
|
|
1179
1185
|
if checks[suffix]["display"]:
|
|
1180
|
-
|
|
1186
|
+
if matches[suffix] <= SchedulerNode.__MAX_LOG_PRINT:
|
|
1187
|
+
checks[suffix]["display"](suffix, line_with_num)
|
|
1188
|
+
else:
|
|
1189
|
+
if not print_paths[suffix]:
|
|
1190
|
+
checks[suffix]["display"](suffix, "print limit reached")
|
|
1191
|
+
print_paths[suffix] = True
|
|
1181
1192
|
|
|
1182
1193
|
for check in checks.values():
|
|
1183
1194
|
check['report'].close()
|
|
1184
1195
|
|
|
1196
|
+
for suffix in ordered_suffixes:
|
|
1197
|
+
if print_paths[suffix]:
|
|
1198
|
+
self.logger.info(f"All {suffix} can be viewed at: "
|
|
1199
|
+
f"{os.path.abspath(f'{self.__step}.{suffix}')}")
|
|
1200
|
+
|
|
1185
1201
|
for metric in ("errors", "warnings"):
|
|
1186
1202
|
if metric in matches:
|
|
1187
1203
|
value = self.__metrics.get(metric, step=self.__step, index=self.__index)
|
|
@@ -1370,3 +1386,100 @@ class SchedulerNode:
|
|
|
1370
1386
|
for logfile in self.__logs.values():
|
|
1371
1387
|
if os.path.isfile(logfile):
|
|
1372
1388
|
tar.add(logfile, arcname=arcname(logfile))
|
|
1389
|
+
|
|
1390
|
+
def get_required_keys(self) -> Set[Tuple[str, ...]]:
|
|
1391
|
+
"""
|
|
1392
|
+
This function walks through the 'require' keys and returns the
|
|
1393
|
+
keys.
|
|
1394
|
+
"""
|
|
1395
|
+
path_keys = set()
|
|
1396
|
+
with self.runtime():
|
|
1397
|
+
task = self.task
|
|
1398
|
+
for key in task.get('require'):
|
|
1399
|
+
path_keys.add(tuple(key.split(",")))
|
|
1400
|
+
if task.has_prescript():
|
|
1401
|
+
path_keys.add((*task._keypath, "prescript"))
|
|
1402
|
+
if task.has_postscript():
|
|
1403
|
+
path_keys.add((*task._keypath, "postscript"))
|
|
1404
|
+
if task.get("refdir"):
|
|
1405
|
+
path_keys.add((*task._keypath, "refdir"))
|
|
1406
|
+
if task.get("script"):
|
|
1407
|
+
path_keys.add((*task._keypath, "script"))
|
|
1408
|
+
if task.get("exe"):
|
|
1409
|
+
path_keys.add((*task._keypath, "exe"))
|
|
1410
|
+
|
|
1411
|
+
return path_keys
|
|
1412
|
+
|
|
1413
|
+
def get_required_path_keys(self) -> Set[Tuple[str, ...]]:
|
|
1414
|
+
"""
|
|
1415
|
+
This function walks through the 'require' keys and returns the
|
|
1416
|
+
keys that are of type path (file/dir).
|
|
1417
|
+
"""
|
|
1418
|
+
path_keys = set()
|
|
1419
|
+
for key in self.get_required_keys():
|
|
1420
|
+
try:
|
|
1421
|
+
param_type: str = self.__project.get(*key, field="type")
|
|
1422
|
+
if "file" in param_type or "dir" in param_type:
|
|
1423
|
+
path_keys.add(key)
|
|
1424
|
+
except KeyError:
|
|
1425
|
+
# Key does not exist
|
|
1426
|
+
pass
|
|
1427
|
+
|
|
1428
|
+
return path_keys
|
|
1429
|
+
|
|
1430
|
+
def mark_copy(self) -> bool:
|
|
1431
|
+
"""Marks files from the 'require' path keys for copying."""
|
|
1432
|
+
return False
|
|
1433
|
+
|
|
1434
|
+
def check_required_values(self) -> bool:
|
|
1435
|
+
requires = self.get_required_keys()
|
|
1436
|
+
|
|
1437
|
+
error = False
|
|
1438
|
+
for key in sorted(requires):
|
|
1439
|
+
if not self.__project.valid(*key):
|
|
1440
|
+
self.logger.error(f'Cannot resolve required keypath [{",".join(key)}] '
|
|
1441
|
+
f'for {self.step}/{self.index}.')
|
|
1442
|
+
error = True
|
|
1443
|
+
continue
|
|
1444
|
+
|
|
1445
|
+
param: Parameter = self.__project.get(*key, field=None)
|
|
1446
|
+
check_step, check_index = self.step, self.index
|
|
1447
|
+
if param.get(field='pernode').is_never():
|
|
1448
|
+
check_step, check_index = None, None
|
|
1449
|
+
|
|
1450
|
+
if not param.has_value(step=check_step, index=check_index):
|
|
1451
|
+
self.logger.error('No value set for required keypath '
|
|
1452
|
+
f'[{",".join(key)}] for {self.step}/{self.index}.')
|
|
1453
|
+
error = True
|
|
1454
|
+
continue
|
|
1455
|
+
return not error
|
|
1456
|
+
|
|
1457
|
+
def check_required_paths(self) -> bool:
|
|
1458
|
+
if self.__project.option.get_remote():
|
|
1459
|
+
return True
|
|
1460
|
+
|
|
1461
|
+
requires = self.get_required_path_keys()
|
|
1462
|
+
|
|
1463
|
+
error = False
|
|
1464
|
+
for key in sorted(requires):
|
|
1465
|
+
param: Parameter = self.__project.get(*key, field=None)
|
|
1466
|
+
check_step, check_index = self.step, self.index
|
|
1467
|
+
if param.get(field='pernode').is_never():
|
|
1468
|
+
check_step, check_index = None, None
|
|
1469
|
+
|
|
1470
|
+
abspath = self.__project.find_files(*key,
|
|
1471
|
+
missing_ok=True,
|
|
1472
|
+
step=check_step, index=check_index)
|
|
1473
|
+
|
|
1474
|
+
unresolved_paths = param.get(step=check_step, index=check_index)
|
|
1475
|
+
if not isinstance(abspath, list):
|
|
1476
|
+
abspath = [abspath]
|
|
1477
|
+
unresolved_paths = [unresolved_paths]
|
|
1478
|
+
|
|
1479
|
+
for path, setpath in zip(abspath, unresolved_paths):
|
|
1480
|
+
if path is None:
|
|
1481
|
+
self.logger.error(f'Cannot resolve path {setpath} in '
|
|
1482
|
+
f'required file keypath [{",".join(key)}] '
|
|
1483
|
+
f'for {self.step}/{self.index}.')
|
|
1484
|
+
error = True
|
|
1485
|
+
return not error
|