siliconcompiler 0.35.3__py3-none-any.whl → 0.36.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/sc_issue.py +18 -2
- siliconcompiler/checklist.py +2 -1
- siliconcompiler/constraints/__init__.py +4 -1
- siliconcompiler/constraints/asic_component.py +49 -11
- siliconcompiler/constraints/asic_floorplan.py +23 -21
- siliconcompiler/constraints/asic_pins.py +55 -17
- siliconcompiler/constraints/asic_timing.py +280 -57
- siliconcompiler/constraints/fpga_timing.py +212 -18
- siliconcompiler/constraints/timing_mode.py +82 -0
- siliconcompiler/data/templates/replay/replay.sh.j2 +27 -14
- siliconcompiler/data/templates/tcl/manifest.tcl.j2 +0 -6
- siliconcompiler/flowgraph.py +95 -42
- siliconcompiler/flows/generate_openroad_rcx.py +2 -2
- siliconcompiler/flows/highresscreenshotflow.py +37 -0
- siliconcompiler/library.py +2 -1
- siliconcompiler/package/__init__.py +56 -51
- siliconcompiler/project.py +13 -2
- siliconcompiler/scheduler/docker.py +24 -25
- siliconcompiler/scheduler/scheduler.py +143 -100
- siliconcompiler/scheduler/schedulernode.py +138 -22
- siliconcompiler/scheduler/slurm.py +120 -35
- siliconcompiler/scheduler/taskscheduler.py +19 -23
- siliconcompiler/schema/_metadata.py +1 -1
- siliconcompiler/schema/editableschema.py +29 -0
- siliconcompiler/schema/namedschema.py +2 -4
- siliconcompiler/schema/parametervalue.py +14 -2
- siliconcompiler/schema_support/cmdlineschema.py +0 -3
- siliconcompiler/schema_support/dependencyschema.py +0 -6
- siliconcompiler/schema_support/option.py +82 -1
- siliconcompiler/schema_support/pathschema.py +7 -13
- siliconcompiler/schema_support/record.py +4 -3
- siliconcompiler/tool.py +105 -52
- siliconcompiler/tools/_common/tcl/sc_schema_access.tcl +0 -6
- siliconcompiler/tools/keplerformal/__init__.py +7 -0
- siliconcompiler/tools/keplerformal/lec.py +112 -0
- siliconcompiler/tools/klayout/__init__.py +3 -0
- siliconcompiler/tools/klayout/screenshot.py +66 -1
- siliconcompiler/tools/klayout/scripts/klayout_convert_drc_db.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_export.py +11 -40
- siliconcompiler/tools/klayout/scripts/klayout_operations.py +1 -0
- siliconcompiler/tools/klayout/scripts/klayout_show.py +5 -4
- siliconcompiler/tools/klayout/scripts/klayout_utils.py +16 -5
- siliconcompiler/tools/montage/tile.py +26 -12
- siliconcompiler/tools/openroad/__init__.py +27 -1
- siliconcompiler/tools/openroad/_apr.py +107 -14
- siliconcompiler/tools/openroad/clock_tree_synthesis.py +1 -0
- siliconcompiler/tools/openroad/global_placement.py +1 -0
- siliconcompiler/tools/openroad/init_floorplan.py +119 -7
- siliconcompiler/tools/openroad/power_grid_analysis.py +174 -0
- siliconcompiler/tools/openroad/repair_design.py +1 -0
- siliconcompiler/tools/openroad/repair_timing.py +1 -0
- siliconcompiler/tools/openroad/scripts/apr/preamble.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_init_floorplan.tcl +91 -18
- siliconcompiler/tools/openroad/scripts/apr/sc_irdrop.tcl +148 -0
- siliconcompiler/tools/openroad/scripts/apr/sc_repair_design.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_write_data.tcl +8 -10
- siliconcompiler/tools/openroad/scripts/common/procs.tcl +15 -6
- siliconcompiler/tools/openroad/scripts/common/read_liberty.tcl +2 -2
- siliconcompiler/tools/openroad/scripts/common/reports.tcl +7 -4
- siliconcompiler/tools/openroad/scripts/common/screenshot.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/common/write_data_physical.tcl +8 -0
- siliconcompiler/tools/openroad/scripts/common/write_images.tcl +16 -12
- siliconcompiler/tools/openroad/scripts/rcx/sc_rcx_bench.tcl +2 -4
- siliconcompiler/tools/openroad/scripts/sc_rdlroute.tcl +3 -1
- siliconcompiler/tools/openroad/write_data.py +2 -2
- siliconcompiler/tools/opensta/__init__.py +1 -1
- siliconcompiler/tools/opensta/scripts/sc_check_library.tcl +2 -2
- siliconcompiler/tools/opensta/scripts/sc_report_libraries.tcl +2 -2
- siliconcompiler/tools/opensta/scripts/sc_timing.tcl +13 -10
- siliconcompiler/tools/opensta/timing.py +6 -2
- siliconcompiler/tools/vivado/scripts/sc_bitstream.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_place.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_route.tcl +11 -0
- siliconcompiler/tools/vivado/scripts/sc_syn_fpga.tcl +10 -0
- siliconcompiler/tools/vpr/__init__.py +28 -0
- siliconcompiler/tools/yosys/scripts/sc_screenshot.tcl +1 -1
- siliconcompiler/tools/yosys/scripts/sc_synth_asic.tcl +40 -4
- siliconcompiler/tools/yosys/scripts/sc_synth_fpga.tcl +15 -5
- siliconcompiler/tools/yosys/syn_asic.py +42 -0
- siliconcompiler/tools/yosys/syn_fpga.py +8 -0
- siliconcompiler/toolscripts/_tools.json +12 -7
- siliconcompiler/toolscripts/ubuntu22/install-keplerformal.sh +72 -0
- siliconcompiler/toolscripts/ubuntu24/install-keplerformal.sh +72 -0
- siliconcompiler/utils/__init__.py +243 -51
- siliconcompiler/utils/curation.py +89 -56
- siliconcompiler/utils/issue.py +6 -1
- siliconcompiler/utils/multiprocessing.py +46 -2
- siliconcompiler/utils/paths.py +21 -0
- siliconcompiler/utils/settings.py +162 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/METADATA +5 -4
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/RECORD +96 -87
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/WHEEL +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/entry_points.txt +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/licenses/LICENSE +0 -0
- {siliconcompiler-0.35.3.dist-info → siliconcompiler-0.36.0.dist-info}/top_level.txt +0 -0
|
@@ -12,9 +12,9 @@ import os.path
|
|
|
12
12
|
|
|
13
13
|
from datetime import datetime
|
|
14
14
|
|
|
15
|
-
from typing import Union, Dict, Optional, Tuple, List, TYPE_CHECKING
|
|
15
|
+
from typing import Union, Dict, Optional, Tuple, List, Set, TYPE_CHECKING
|
|
16
16
|
|
|
17
|
-
from siliconcompiler import NodeStatus
|
|
17
|
+
from siliconcompiler import NodeStatus, Task
|
|
18
18
|
from siliconcompiler.schema import Journal
|
|
19
19
|
from siliconcompiler.flowgraph import RuntimeFlowgraph
|
|
20
20
|
from siliconcompiler.scheduler import SchedulerNode
|
|
@@ -29,6 +29,7 @@ from siliconcompiler.utils.logging import SCLoggerFormatter
|
|
|
29
29
|
from siliconcompiler.utils.multiprocessing import MPManager
|
|
30
30
|
from siliconcompiler.scheduler import send_messages, SCRuntimeError
|
|
31
31
|
from siliconcompiler.utils.paths import collectiondir, jobdir, workdir
|
|
32
|
+
from siliconcompiler.utils.curation import collect
|
|
32
33
|
|
|
33
34
|
if TYPE_CHECKING:
|
|
34
35
|
from siliconcompiler.project import Project
|
|
@@ -101,12 +102,27 @@ class Scheduler:
|
|
|
101
102
|
self.__metrics: "MetricSchema" = self.__project.get("metric", field="schema")
|
|
102
103
|
|
|
103
104
|
self.__tasks: Dict[Tuple[str, str], SchedulerNode] = {}
|
|
105
|
+
self.__skippedtasks: Set[Tuple[str, str]] = set()
|
|
104
106
|
|
|
105
107
|
# Create dummy handler
|
|
106
108
|
self.__joblog_handler = logging.NullHandler()
|
|
107
109
|
self.__org_job_name = self.__project.get("option", "jobname")
|
|
108
110
|
self.__logfile = None
|
|
109
111
|
|
|
112
|
+
# Create tasks
|
|
113
|
+
for step, index in self.__flow.get_nodes():
|
|
114
|
+
node_cls = SchedulerNode
|
|
115
|
+
|
|
116
|
+
node_scheduler = self.__project.get('option', 'scheduler', 'name',
|
|
117
|
+
step=step, index=index)
|
|
118
|
+
if node_scheduler == 'slurm':
|
|
119
|
+
node_cls = SlurmSchedulerNode
|
|
120
|
+
elif node_scheduler == 'docker':
|
|
121
|
+
node_cls = DockerSchedulerNode
|
|
122
|
+
self.__tasks[(step, index)] = node_cls(self.__project, step, index)
|
|
123
|
+
if self.__flow.get(step, index, "tool") == "builtin":
|
|
124
|
+
self.__tasks[(step, index)].set_builtin()
|
|
125
|
+
|
|
110
126
|
@property
|
|
111
127
|
def manifest(self) -> str:
|
|
112
128
|
"""
|
|
@@ -270,9 +286,16 @@ class Scheduler:
|
|
|
270
286
|
if not self.check_manifest():
|
|
271
287
|
raise SCRuntimeError("check_manifest() failed")
|
|
272
288
|
|
|
289
|
+
# Initialize schedulers
|
|
290
|
+
self.__init_schedulers()
|
|
291
|
+
|
|
273
292
|
self.__run_setup()
|
|
274
293
|
self.configure_nodes()
|
|
275
294
|
|
|
295
|
+
# Verify task classes
|
|
296
|
+
if not self.__check_task_classes():
|
|
297
|
+
raise SCRuntimeError("Task classes are missing")
|
|
298
|
+
|
|
276
299
|
# Verify tool setups
|
|
277
300
|
if not self.__check_tool_versions():
|
|
278
301
|
raise SCRuntimeError("Tools did not meet version requirements")
|
|
@@ -288,15 +311,23 @@ class Scheduler:
|
|
|
288
311
|
if not self.__check_flowgraph_io():
|
|
289
312
|
raise SCRuntimeError("Flowgraph file IO constrains errors")
|
|
290
313
|
|
|
291
|
-
|
|
314
|
+
# Collect files for remote runs
|
|
315
|
+
if self.__check_collect_files():
|
|
316
|
+
collect(self.project)
|
|
317
|
+
|
|
318
|
+
try:
|
|
319
|
+
self.run_core()
|
|
320
|
+
except SCRuntimeError as e:
|
|
321
|
+
raise e
|
|
292
322
|
|
|
293
|
-
|
|
294
|
-
|
|
323
|
+
finally:
|
|
324
|
+
# Store run in history
|
|
325
|
+
self.__project._record_history()
|
|
295
326
|
|
|
296
|
-
|
|
297
|
-
|
|
327
|
+
# Record final manifest
|
|
328
|
+
self.__project.write_manifest(self.manifest)
|
|
298
329
|
|
|
299
|
-
|
|
330
|
+
send_messages.send(self.__project, 'summary', None, None)
|
|
300
331
|
finally:
|
|
301
332
|
if self.__joblog_handler is not None:
|
|
302
333
|
self.__logger.removeHandler(self.__joblog_handler)
|
|
@@ -319,50 +350,37 @@ class Scheduler:
|
|
|
319
350
|
error = False
|
|
320
351
|
|
|
321
352
|
for (step, index) in self.__flow_runtime.get_nodes():
|
|
322
|
-
|
|
323
|
-
check_file_access = not self.__project.option.get_remote() and scheduler is None
|
|
324
|
-
|
|
325
|
-
node = SchedulerNode(self.__project, step, index)
|
|
326
|
-
requires = []
|
|
327
|
-
with node.runtime():
|
|
328
|
-
requires = node.task.get('require')
|
|
329
|
-
|
|
330
|
-
for item in sorted(set(requires)):
|
|
331
|
-
keypath = item.split(',')
|
|
332
|
-
if not self.__project.valid(*keypath):
|
|
333
|
-
self.__logger.error(f'Cannot resolve required keypath [{",".join(keypath)}] '
|
|
334
|
-
f'for {step}/{index}.')
|
|
335
|
-
error = True
|
|
336
|
-
continue
|
|
353
|
+
node = self.__tasks[(step, index)]
|
|
337
354
|
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
if param.get(field='pernode').is_never():
|
|
341
|
-
check_step, check_index = None, None
|
|
355
|
+
error |= not node.check_required_values()
|
|
356
|
+
error |= not node.check_required_paths()
|
|
342
357
|
|
|
343
|
-
|
|
344
|
-
self.__logger.error('No value set for required keypath '
|
|
345
|
-
f'[{",".join(keypath)}] for {step}/{index}.')
|
|
346
|
-
error = True
|
|
347
|
-
continue
|
|
358
|
+
return not error
|
|
348
359
|
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
360
|
+
def __check_task_classes(self) -> bool:
|
|
361
|
+
"""
|
|
362
|
+
Verifies that all runtime nodes have loaded their specific Task implementation classes.
|
|
363
|
+
|
|
364
|
+
Iterates through all nodes in the execution flow and checks if the associated
|
|
365
|
+
task object is a generic instance of the base `Task` class. If so, it indicates
|
|
366
|
+
that the specific module for that tool/task was not loaded correctly.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
bool: `True` if all nodes are using specialized Task subclasses, `False` if any
|
|
370
|
+
node is using the base `Task` class.
|
|
371
|
+
"""
|
|
372
|
+
nodes = self.__flow_runtime.get_nodes()
|
|
373
|
+
error = False
|
|
374
|
+
|
|
375
|
+
for (step, index) in nodes:
|
|
376
|
+
tool = self.__flow.get(step, index, "tool")
|
|
377
|
+
task = self.__flow.get(step, index, "task")
|
|
378
|
+
|
|
379
|
+
task_cls = self.project.get("tool", tool, "task", task, field="schema")
|
|
380
|
+
if type(task_cls) is Task:
|
|
381
|
+
self.__logger.error(f"Invalid task: {step}/{index} did not load "
|
|
382
|
+
"the correct class module")
|
|
383
|
+
error = True
|
|
366
384
|
|
|
367
385
|
return not error
|
|
368
386
|
|
|
@@ -414,16 +432,16 @@ class Scheduler:
|
|
|
414
432
|
in_task_class = self.__project.get("tool", in_tool, "task", in_task,
|
|
415
433
|
field="schema")
|
|
416
434
|
|
|
417
|
-
with in_task_class.runtime(
|
|
418
|
-
in_step, in_index)) as task:
|
|
435
|
+
with in_task_class.runtime(self.__tasks[(in_step, in_index)]) as task:
|
|
419
436
|
inputs = task.get_output_files()
|
|
420
437
|
|
|
421
|
-
with task_class.runtime(
|
|
422
|
-
step, index)) as task:
|
|
438
|
+
with task_class.runtime(self.__tasks[(step, index)]) as task:
|
|
423
439
|
for inp in inputs:
|
|
424
440
|
node_inp = task.compute_input_file_node_name(inp, in_step, in_index)
|
|
425
441
|
if node_inp in requirements:
|
|
426
442
|
inp = node_inp
|
|
443
|
+
if inp not in requirements:
|
|
444
|
+
continue
|
|
427
445
|
if inp in all_inputs:
|
|
428
446
|
self.__logger.error(f'Invalid flow: {step}/{index} '
|
|
429
447
|
f'receives {inp} from multiple input tasks')
|
|
@@ -455,7 +473,7 @@ class Scheduler:
|
|
|
455
473
|
|
|
456
474
|
self.__record.set('status', NodeStatus.PENDING, step=step, index=index)
|
|
457
475
|
for next_step, next_index in self.__flow_runtime.get_nodes_starting_at(step, index):
|
|
458
|
-
if
|
|
476
|
+
if (next_step, next_index) in self.__skippedtasks:
|
|
459
477
|
continue
|
|
460
478
|
|
|
461
479
|
# Mark following steps as pending
|
|
@@ -475,18 +493,6 @@ class Scheduler:
|
|
|
475
493
|
copy_from_nodes = set(self.__flow_load_runtime.get_nodes()).difference(
|
|
476
494
|
self.__flow_runtime.get_entry_nodes())
|
|
477
495
|
for step, index in self.__flow.get_nodes():
|
|
478
|
-
node_cls = SchedulerNode
|
|
479
|
-
|
|
480
|
-
node_scheduler = self.__project.get('option', 'scheduler', 'name',
|
|
481
|
-
step=step, index=index)
|
|
482
|
-
if node_scheduler == 'slurm':
|
|
483
|
-
node_cls = SlurmSchedulerNode
|
|
484
|
-
elif node_scheduler == 'docker':
|
|
485
|
-
node_cls = DockerSchedulerNode
|
|
486
|
-
self.__tasks[(step, index)] = node_cls(self.__project, step, index)
|
|
487
|
-
if self.__flow.get(step, index, "tool") == "builtin":
|
|
488
|
-
self.__tasks[(step, index)].set_builtin()
|
|
489
|
-
|
|
490
496
|
if self.__org_job_name and (step, index) in copy_from_nodes:
|
|
491
497
|
self.__tasks[(step, index)].copy_from(self.__org_job_name)
|
|
492
498
|
|
|
@@ -590,6 +596,10 @@ class Scheduler:
|
|
|
590
596
|
if NodeStatus.is_waiting(self.__record.get('status', step=step, index=index)):
|
|
591
597
|
with self.__tasks[(step, index)].runtime():
|
|
592
598
|
self.__tasks[(step, index)].clean_directory()
|
|
599
|
+
parent_dir = os.path.dirname(self.__tasks[(step, index)].workdir)
|
|
600
|
+
if os.path.exists(parent_dir) and len(os.listdir(parent_dir)) == 0:
|
|
601
|
+
# Step directory is empty so safe to remove
|
|
602
|
+
os.rmdir(parent_dir)
|
|
593
603
|
|
|
594
604
|
def __configure_collect_previous_information(self) -> Dict[Tuple[str, str], "Project"]:
|
|
595
605
|
"""Collects information from previous runs for nodes that won't be re-executed.
|
|
@@ -662,10 +672,13 @@ class Scheduler:
|
|
|
662
672
|
for step, index in layer_nodes:
|
|
663
673
|
with self.__tasks[(step, index)].runtime():
|
|
664
674
|
node_kept = self.__tasks[(step, index)].setup()
|
|
675
|
+
if not node_kept:
|
|
676
|
+
self.__skippedtasks.add((step, index))
|
|
665
677
|
if not node_kept and (step, index) in extra_setup_nodes:
|
|
666
678
|
# remove from previous node data
|
|
667
679
|
del extra_setup_nodes[(step, index)]
|
|
668
680
|
|
|
681
|
+
# Copy in old status information, this will be overwritten if needed
|
|
669
682
|
if (step, index) in extra_setup_nodes:
|
|
670
683
|
schema = extra_setup_nodes[(step, index)]
|
|
671
684
|
node_status = None
|
|
@@ -893,6 +906,8 @@ class Scheduler:
|
|
|
893
906
|
if m:
|
|
894
907
|
jobid = max(jobid, int(m.group(1)))
|
|
895
908
|
self.__project.set('option', 'jobname', f'{stem}{jobid + 1}')
|
|
909
|
+
for task in self.__tasks.values():
|
|
910
|
+
task._update_job()
|
|
896
911
|
return True
|
|
897
912
|
return False
|
|
898
913
|
|
|
@@ -926,42 +941,70 @@ class Scheduler:
|
|
|
926
941
|
|
|
927
942
|
error = False
|
|
928
943
|
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
versions: Dict[str, Optional[str]] = {}
|
|
944
|
+
with tempfile.TemporaryDirectory(
|
|
945
|
+
prefix=f"sc_tool_check_{self.project.option.get_jobname()}_") as d:
|
|
946
|
+
versions: Dict[str, Optional[str]] = {}
|
|
933
947
|
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
continue
|
|
948
|
+
self.__logger.debug(f"Executing tool checks in: {d}")
|
|
949
|
+
for (step, index) in self.__flow_runtime.get_nodes():
|
|
950
|
+
if self.__project.option.scheduler.get_name(step=step, index=index) is not None:
|
|
951
|
+
continue
|
|
939
952
|
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
953
|
+
node = self.__tasks[(step, index)]
|
|
954
|
+
with node.runtime():
|
|
955
|
+
try:
|
|
956
|
+
exe = node.get_exe_path()
|
|
957
|
+
except TaskExecutableNotReceived:
|
|
958
|
+
continue
|
|
959
|
+
except TaskExecutableNotFound:
|
|
960
|
+
exe = node.task.get("exe")
|
|
961
|
+
self.__logger.error(f"Executable for {step}/{index} could not "
|
|
962
|
+
f"be found: {exe}")
|
|
963
|
+
error = True
|
|
964
|
+
continue
|
|
952
965
|
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
finally:
|
|
965
|
-
os.chdir(cwd)
|
|
966
|
+
try:
|
|
967
|
+
if exe:
|
|
968
|
+
version: Optional[str] = versions.get(exe, None)
|
|
969
|
+
version, check = node.check_version(version, workdir=d)
|
|
970
|
+
versions[exe] = version
|
|
971
|
+
if not check:
|
|
972
|
+
self.__logger.error(f"Executable for {step}/{index} did not "
|
|
973
|
+
"meet version checks")
|
|
974
|
+
error = True
|
|
975
|
+
except NotImplementedError:
|
|
976
|
+
self.__logger.error(f"Unable to process version for {step}/{index}")
|
|
966
977
|
|
|
967
978
|
return not error
|
|
979
|
+
|
|
980
|
+
def __check_collect_files(self) -> bool:
|
|
981
|
+
"""
|
|
982
|
+
Iterates through all tasks in the scheduler, and checks if the there
|
|
983
|
+
are files or directories that need to be collected
|
|
984
|
+
|
|
985
|
+
Returns:
|
|
986
|
+
bool: True if there is something to be collected, False otherwise.
|
|
987
|
+
"""
|
|
988
|
+
do_collect = False
|
|
989
|
+
for task in self.__tasks.values():
|
|
990
|
+
if task.mark_copy():
|
|
991
|
+
do_collect = True
|
|
992
|
+
|
|
993
|
+
return do_collect
|
|
994
|
+
|
|
995
|
+
def __init_schedulers(self) -> None:
|
|
996
|
+
"""
|
|
997
|
+
Collect and invoke unique initialization callbacks from all task schedulers.
|
|
998
|
+
|
|
999
|
+
This method gathers init functions from all SchedulerNode instances, deduplicates them
|
|
1000
|
+
(since multiple tasks may share the same scheduler class), and invokes each once to
|
|
1001
|
+
perform early validation (e.g., checking Docker/Slurm availability).
|
|
1002
|
+
"""
|
|
1003
|
+
self.__logger.debug("Collecting unique scheduler initialization callbacks")
|
|
1004
|
+
init_funcs = set()
|
|
1005
|
+
for step, index in self.__flow_runtime.get_nodes():
|
|
1006
|
+
init_funcs.add(self.__tasks[(step, index)].init)
|
|
1007
|
+
|
|
1008
|
+
for init in sorted(init_funcs, key=lambda func: func.__qualname__):
|
|
1009
|
+
self.__logger.debug(f"Initializing scheduler: {init.__qualname__}")
|
|
1010
|
+
init(self.__project)
|
|
@@ -9,7 +9,7 @@ import time
|
|
|
9
9
|
|
|
10
10
|
import os.path
|
|
11
11
|
|
|
12
|
-
from
|
|
12
|
+
from siliconcompiler.utils.multiprocessing import MPQueueHandler as QueueHandler
|
|
13
13
|
|
|
14
14
|
from typing import List, Optional, Set, Tuple, TYPE_CHECKING
|
|
15
15
|
|
|
@@ -19,7 +19,7 @@ from siliconcompiler.utils.logging import get_console_formatter, SCInRunLoggerFo
|
|
|
19
19
|
|
|
20
20
|
from siliconcompiler.package import Resolver
|
|
21
21
|
from siliconcompiler.schema_support.record import RecordTime, RecordTool
|
|
22
|
-
from siliconcompiler.schema import Journal
|
|
22
|
+
from siliconcompiler.schema import Journal, Parameter
|
|
23
23
|
from siliconcompiler.scheduler import send_messages
|
|
24
24
|
from siliconcompiler.utils.paths import workdir, jobdir, collectiondir, cwdir
|
|
25
25
|
|
|
@@ -67,9 +67,10 @@ class SchedulerNode:
|
|
|
67
67
|
This class encapsulates the state and logic required to run a specific
|
|
68
68
|
step and index, including setting up directories, handling file I/O,
|
|
69
69
|
executing the associated tool, and recording results.
|
|
70
|
-
|
|
71
70
|
"""
|
|
72
71
|
|
|
72
|
+
__MAX_LOG_PRINT = 100 # Maximum number of warnings/error to print to log
|
|
73
|
+
|
|
73
74
|
def __init__(self, project: "Project", step: str, index: str, replay: bool = False):
|
|
74
75
|
"""
|
|
75
76
|
Initializes a SchedulerNode.
|
|
@@ -100,7 +101,6 @@ class SchedulerNode:
|
|
|
100
101
|
self.__project.get("option", "fileset")[0],
|
|
101
102
|
"topmodule")
|
|
102
103
|
|
|
103
|
-
self.__job: str = self.__project.get('option', 'jobname')
|
|
104
104
|
self.__record_user_info: bool = self.__project.get(
|
|
105
105
|
"option", "track", step=self.__step, index=self.__index)
|
|
106
106
|
self.__pipe = None
|
|
@@ -116,24 +116,12 @@ class SchedulerNode:
|
|
|
116
116
|
self.__enforce_inputfiles = True
|
|
117
117
|
self.__enforce_outputfiles = True
|
|
118
118
|
|
|
119
|
+
self._update_job()
|
|
120
|
+
|
|
119
121
|
flow: str = self.__project.get('option', 'flow')
|
|
120
122
|
self.__is_entry_node: bool = (self.__step, self.__index) in \
|
|
121
123
|
self.__project.get("flowgraph", flow, field="schema").get_entry_nodes()
|
|
122
124
|
|
|
123
|
-
self.__cwd = cwdir(self.__project)
|
|
124
|
-
self.__jobworkdir = jobdir(self.__project)
|
|
125
|
-
self.__workdir = workdir(self.__project, step=self.__step, index=self.__index)
|
|
126
|
-
self.__manifests = {
|
|
127
|
-
"input": os.path.join(self.__workdir, "inputs", f"{self.__name}.pkg.json"),
|
|
128
|
-
"output": os.path.join(self.__workdir, "outputs", f"{self.__name}.pkg.json")
|
|
129
|
-
}
|
|
130
|
-
self.__logs = {
|
|
131
|
-
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
132
|
-
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
133
|
-
}
|
|
134
|
-
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
135
|
-
self.__collection_path = collectiondir(self.__project)
|
|
136
|
-
|
|
137
125
|
self.set_queue(None, None)
|
|
138
126
|
self.__setup_schema_access()
|
|
139
127
|
|
|
@@ -258,6 +246,22 @@ class SchedulerNode:
|
|
|
258
246
|
"""Task: The task object associated with this node."""
|
|
259
247
|
return self.__task
|
|
260
248
|
|
|
249
|
+
def _update_job(self):
|
|
250
|
+
self.__job: str = self.__project.get('option', 'jobname')
|
|
251
|
+
self.__cwd = cwdir(self.__project)
|
|
252
|
+
self.__jobworkdir = jobdir(self.__project)
|
|
253
|
+
self.__workdir = workdir(self.__project, step=self.__step, index=self.__index)
|
|
254
|
+
self.__manifests = {
|
|
255
|
+
"input": os.path.join(self.__workdir, "inputs", f"{self.__name}.pkg.json"),
|
|
256
|
+
"output": os.path.join(self.__workdir, "outputs", f"{self.__name}.pkg.json")
|
|
257
|
+
}
|
|
258
|
+
self.__logs = {
|
|
259
|
+
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
260
|
+
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
261
|
+
}
|
|
262
|
+
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
263
|
+
self.__collection_path = collectiondir(self.__project)
|
|
264
|
+
|
|
261
265
|
def get_manifest(self, input: bool = False) -> str:
|
|
262
266
|
"""
|
|
263
267
|
Gets the path to the input or output manifest file for this node.
|
|
@@ -891,7 +895,8 @@ class SchedulerNode:
|
|
|
891
895
|
with self.__set_env():
|
|
892
896
|
return self.__task.get_exe()
|
|
893
897
|
|
|
894
|
-
def check_version(self, version: Optional[str] = None
|
|
898
|
+
def check_version(self, version: Optional[str] = None,
|
|
899
|
+
workdir: Optional[str] = None) -> Tuple[Optional[str], bool]:
|
|
895
900
|
"""Checks the version of the tool for this task.
|
|
896
901
|
|
|
897
902
|
Compares a version string against the tool's requirements. This check
|
|
@@ -904,6 +909,8 @@ class SchedulerNode:
|
|
|
904
909
|
Args:
|
|
905
910
|
version: The version string to check. If None, the task's
|
|
906
911
|
configured version is fetched and used.
|
|
912
|
+
workdir: The working directory to use for the version check. If None,
|
|
913
|
+
the current working directory is used.
|
|
907
914
|
|
|
908
915
|
Returns:
|
|
909
916
|
A tuple (version_str, check_passed):
|
|
@@ -917,7 +924,7 @@ class SchedulerNode:
|
|
|
917
924
|
|
|
918
925
|
with self.__set_env():
|
|
919
926
|
if version is None:
|
|
920
|
-
version = self.__task.get_exe_version()
|
|
927
|
+
version = self.__task.get_exe_version(workdir=workdir)
|
|
921
928
|
|
|
922
929
|
check = self.__task.check_exe_version(version)
|
|
923
930
|
|
|
@@ -1156,11 +1163,13 @@ class SchedulerNode:
|
|
|
1156
1163
|
if 'errors' in checks:
|
|
1157
1164
|
ordered_suffixes.append('errors')
|
|
1158
1165
|
|
|
1166
|
+
print_paths = {}
|
|
1159
1167
|
# Looping through patterns for each line
|
|
1160
1168
|
with sc_open(self.__logs["exe"]) as f:
|
|
1161
1169
|
line_count = sum(1 for _ in f)
|
|
1162
1170
|
right_align = len(str(line_count))
|
|
1163
1171
|
for suffix in ordered_suffixes:
|
|
1172
|
+
print_paths[suffix] = False
|
|
1164
1173
|
# Start at the beginning of file again
|
|
1165
1174
|
f.seek(0)
|
|
1166
1175
|
for num, line in enumerate(f, start=1):
|
|
@@ -1169,7 +1178,7 @@ class SchedulerNode:
|
|
|
1169
1178
|
if string is None:
|
|
1170
1179
|
break
|
|
1171
1180
|
else:
|
|
1172
|
-
string = utils.grep(self.__project, item, string)
|
|
1181
|
+
string = utils.grep(self.__project.logger, item, string)
|
|
1173
1182
|
if string is not None:
|
|
1174
1183
|
matches[suffix] += 1
|
|
1175
1184
|
# always print to file
|
|
@@ -1177,11 +1186,21 @@ class SchedulerNode:
|
|
|
1177
1186
|
print(line_with_num, file=checks[suffix]['report'])
|
|
1178
1187
|
# selectively print to display
|
|
1179
1188
|
if checks[suffix]["display"]:
|
|
1180
|
-
|
|
1189
|
+
if matches[suffix] <= SchedulerNode.__MAX_LOG_PRINT:
|
|
1190
|
+
checks[suffix]["display"](suffix, line_with_num)
|
|
1191
|
+
else:
|
|
1192
|
+
if not print_paths[suffix]:
|
|
1193
|
+
checks[suffix]["display"](suffix, "print limit reached")
|
|
1194
|
+
print_paths[suffix] = True
|
|
1181
1195
|
|
|
1182
1196
|
for check in checks.values():
|
|
1183
1197
|
check['report'].close()
|
|
1184
1198
|
|
|
1199
|
+
for suffix in ordered_suffixes:
|
|
1200
|
+
if print_paths[suffix]:
|
|
1201
|
+
self.logger.info(f"All {suffix} can be viewed at: "
|
|
1202
|
+
f"{os.path.abspath(f'{self.__step}.{suffix}')}")
|
|
1203
|
+
|
|
1185
1204
|
for metric in ("errors", "warnings"):
|
|
1186
1205
|
if metric in matches:
|
|
1187
1206
|
value = self.__metrics.get(metric, step=self.__step, index=self.__index)
|
|
@@ -1370,3 +1389,100 @@ class SchedulerNode:
|
|
|
1370
1389
|
for logfile in self.__logs.values():
|
|
1371
1390
|
if os.path.isfile(logfile):
|
|
1372
1391
|
tar.add(logfile, arcname=arcname(logfile))
|
|
1392
|
+
|
|
1393
|
+
def get_required_keys(self) -> Set[Tuple[str, ...]]:
|
|
1394
|
+
"""
|
|
1395
|
+
This function walks through the 'require' keys and returns the
|
|
1396
|
+
keys.
|
|
1397
|
+
"""
|
|
1398
|
+
path_keys = set()
|
|
1399
|
+
with self.runtime():
|
|
1400
|
+
task = self.task
|
|
1401
|
+
for key in task.get('require'):
|
|
1402
|
+
path_keys.add(tuple(key.split(",")))
|
|
1403
|
+
if task.has_prescript():
|
|
1404
|
+
path_keys.add((*task._keypath, "prescript"))
|
|
1405
|
+
if task.has_postscript():
|
|
1406
|
+
path_keys.add((*task._keypath, "postscript"))
|
|
1407
|
+
if task.get("refdir"):
|
|
1408
|
+
path_keys.add((*task._keypath, "refdir"))
|
|
1409
|
+
if task.get("script"):
|
|
1410
|
+
path_keys.add((*task._keypath, "script"))
|
|
1411
|
+
if task.get("exe"):
|
|
1412
|
+
path_keys.add((*task._keypath, "exe"))
|
|
1413
|
+
|
|
1414
|
+
return path_keys
|
|
1415
|
+
|
|
1416
|
+
def get_required_path_keys(self) -> Set[Tuple[str, ...]]:
|
|
1417
|
+
"""
|
|
1418
|
+
This function walks through the 'require' keys and returns the
|
|
1419
|
+
keys that are of type path (file/dir).
|
|
1420
|
+
"""
|
|
1421
|
+
path_keys = set()
|
|
1422
|
+
for key in self.get_required_keys():
|
|
1423
|
+
try:
|
|
1424
|
+
param_type: str = self.__project.get(*key, field="type")
|
|
1425
|
+
if "file" in param_type or "dir" in param_type:
|
|
1426
|
+
path_keys.add(key)
|
|
1427
|
+
except KeyError:
|
|
1428
|
+
# Key does not exist
|
|
1429
|
+
pass
|
|
1430
|
+
|
|
1431
|
+
return path_keys
|
|
1432
|
+
|
|
1433
|
+
def mark_copy(self) -> bool:
|
|
1434
|
+
"""Marks files from the 'require' path keys for copying."""
|
|
1435
|
+
return False
|
|
1436
|
+
|
|
1437
|
+
def check_required_values(self) -> bool:
|
|
1438
|
+
requires = self.get_required_keys()
|
|
1439
|
+
|
|
1440
|
+
error = False
|
|
1441
|
+
for key in sorted(requires):
|
|
1442
|
+
if not self.__project.valid(*key):
|
|
1443
|
+
self.logger.error(f'Cannot resolve required keypath [{",".join(key)}] '
|
|
1444
|
+
f'for {self.step}/{self.index}.')
|
|
1445
|
+
error = True
|
|
1446
|
+
continue
|
|
1447
|
+
|
|
1448
|
+
param: Parameter = self.__project.get(*key, field=None)
|
|
1449
|
+
check_step, check_index = self.step, self.index
|
|
1450
|
+
if param.get(field='pernode').is_never():
|
|
1451
|
+
check_step, check_index = None, None
|
|
1452
|
+
|
|
1453
|
+
if not param.has_value(step=check_step, index=check_index):
|
|
1454
|
+
self.logger.error('No value set for required keypath '
|
|
1455
|
+
f'[{",".join(key)}] for {self.step}/{self.index}.')
|
|
1456
|
+
error = True
|
|
1457
|
+
continue
|
|
1458
|
+
return not error
|
|
1459
|
+
|
|
1460
|
+
def check_required_paths(self) -> bool:
|
|
1461
|
+
if self.__project.option.get_remote():
|
|
1462
|
+
return True
|
|
1463
|
+
|
|
1464
|
+
requires = self.get_required_path_keys()
|
|
1465
|
+
|
|
1466
|
+
error = False
|
|
1467
|
+
for key in sorted(requires):
|
|
1468
|
+
param: Parameter = self.__project.get(*key, field=None)
|
|
1469
|
+
check_step, check_index = self.step, self.index
|
|
1470
|
+
if param.get(field='pernode').is_never():
|
|
1471
|
+
check_step, check_index = None, None
|
|
1472
|
+
|
|
1473
|
+
abspath = self.__project.find_files(*key,
|
|
1474
|
+
missing_ok=True,
|
|
1475
|
+
step=check_step, index=check_index)
|
|
1476
|
+
|
|
1477
|
+
unresolved_paths = param.get(step=check_step, index=check_index)
|
|
1478
|
+
if not isinstance(abspath, list):
|
|
1479
|
+
abspath = [abspath]
|
|
1480
|
+
unresolved_paths = [unresolved_paths]
|
|
1481
|
+
|
|
1482
|
+
for path, setpath in zip(abspath, unresolved_paths):
|
|
1483
|
+
if path is None:
|
|
1484
|
+
self.logger.error(f'Cannot resolve path {setpath} in '
|
|
1485
|
+
f'required file keypath [{",".join(key)}] '
|
|
1486
|
+
f'for {self.step}/{self.index}.')
|
|
1487
|
+
error = True
|
|
1488
|
+
return not error
|