siliconcompiler 0.35.2__py3-none-any.whl → 0.35.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/smake.py +106 -100
- siliconcompiler/flowgraph.py +418 -129
- siliconcompiler/library.py +5 -4
- siliconcompiler/package/https.py +10 -5
- siliconcompiler/project.py +83 -32
- siliconcompiler/remote/client.py +17 -6
- siliconcompiler/scheduler/scheduler.py +204 -55
- siliconcompiler/scheduler/schedulernode.py +63 -70
- siliconcompiler/schema/__init__.py +3 -2
- siliconcompiler/schema/_metadata.py +1 -1
- siliconcompiler/schema/baseschema.py +205 -93
- siliconcompiler/schema/namedschema.py +21 -13
- siliconcompiler/schema/safeschema.py +18 -7
- siliconcompiler/schema_support/dependencyschema.py +4 -3
- siliconcompiler/schema_support/pathschema.py +7 -2
- siliconcompiler/schema_support/record.py +5 -4
- siliconcompiler/targets/asap7_demo.py +4 -1
- siliconcompiler/tool.py +9 -4
- siliconcompiler/tools/builtin/__init__.py +2 -0
- siliconcompiler/tools/builtin/filter.py +8 -1
- siliconcompiler/tools/builtin/importfiles.py +2 -0
- siliconcompiler/tools/klayout/scripts/klayout_show.py +1 -1
- siliconcompiler/tools/klayout/show.py +17 -5
- siliconcompiler/tools/yosys/prepareLib.py +7 -2
- siliconcompiler/tools/yosys/syn_asic.py +20 -2
- siliconcompiler/toolscripts/_tools.json +4 -4
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/METADATA +2 -2
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/RECORD +33 -33
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/WHEEL +0 -0
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/entry_points.txt +0 -0
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/licenses/LICENSE +0 -0
- {siliconcompiler-0.35.2.dist-info → siliconcompiler-0.35.3.dist-info}/top_level.txt +0 -0
siliconcompiler/library.py
CHANGED
|
@@ -7,7 +7,7 @@ from siliconcompiler.schema_support.filesetschema import FileSetSchema
|
|
|
7
7
|
from siliconcompiler.schema_support.pathschema import PathSchema
|
|
8
8
|
from siliconcompiler.schema import NamedSchema, BaseSchema
|
|
9
9
|
|
|
10
|
-
from siliconcompiler.schema import EditableSchema, Parameter, Scope, PerNode
|
|
10
|
+
from siliconcompiler.schema import EditableSchema, Parameter, Scope, PerNode, LazyLoad
|
|
11
11
|
from siliconcompiler.schema.utils import trim
|
|
12
12
|
|
|
13
13
|
|
|
@@ -99,7 +99,8 @@ class ToolLibrarySchema(LibrarySchema):
|
|
|
99
99
|
|
|
100
100
|
def _from_dict(self, manifest: Dict,
|
|
101
101
|
keypath: Union[List[str], Tuple[str, ...]],
|
|
102
|
-
version: Optional[Tuple[int, ...]] = None
|
|
102
|
+
version: Optional[Tuple[int, ...]] = None,
|
|
103
|
+
lazyload: LazyLoad = LazyLoad.ON) \
|
|
103
104
|
-> Tuple[Set[Tuple[str, ...]], Set[Tuple[str, ...]]]:
|
|
104
105
|
"""
|
|
105
106
|
Constructs a schema from a dictionary.
|
|
@@ -112,7 +113,7 @@ class ToolLibrarySchema(LibrarySchema):
|
|
|
112
113
|
Returns:
|
|
113
114
|
dict: The constructed dictionary.
|
|
114
115
|
"""
|
|
115
|
-
if "tool" in manifest:
|
|
116
|
+
if not lazyload.is_enforced and "tool" in manifest:
|
|
116
117
|
# collect tool keys
|
|
117
118
|
tool_keys = self.allkeys("tool")
|
|
118
119
|
|
|
@@ -136,7 +137,7 @@ class ToolLibrarySchema(LibrarySchema):
|
|
|
136
137
|
if not manifest["tool"]:
|
|
137
138
|
del manifest["tool"]
|
|
138
139
|
|
|
139
|
-
return super()._from_dict(manifest, keypath, version)
|
|
140
|
+
return super()._from_dict(manifest, keypath, version=version, lazyload=lazyload)
|
|
140
141
|
|
|
141
142
|
def _generate_doc(self, doc,
|
|
142
143
|
ref_root: str = "",
|
siliconcompiler/package/https.py
CHANGED
|
@@ -114,11 +114,16 @@ class HTTPResolver(RemoteResolver):
|
|
|
114
114
|
except tarfile.ReadError:
|
|
115
115
|
fileobj.seek(0)
|
|
116
116
|
try:
|
|
117
|
-
with
|
|
118
|
-
|
|
119
|
-
except
|
|
120
|
-
|
|
121
|
-
|
|
117
|
+
with tarfile.open(fileobj=fileobj, mode='r:bz2') as tar_ref:
|
|
118
|
+
tar_ref.extractall(path=self.cache_path)
|
|
119
|
+
except tarfile.ReadError:
|
|
120
|
+
fileobj.seek(0)
|
|
121
|
+
try:
|
|
122
|
+
with zipfile.ZipFile(fileobj) as zip_ref:
|
|
123
|
+
zip_ref.extractall(path=self.cache_path)
|
|
124
|
+
except zipfile.BadZipFile:
|
|
125
|
+
raise TypeError(f"Could not extract file from {data_url}. "
|
|
126
|
+
"File is not a valid tar.gz or zip archive.")
|
|
122
127
|
|
|
123
128
|
# --- GitHub-specific directory flattening ---
|
|
124
129
|
# GitHub archives often have a single top-level directory like 'repo-v1.0'.
|
siliconcompiler/project.py
CHANGED
|
@@ -5,10 +5,11 @@ import uuid
|
|
|
5
5
|
|
|
6
6
|
import os.path
|
|
7
7
|
|
|
8
|
-
from typing import Union, List, Tuple, TextIO, Optional
|
|
8
|
+
from typing import Union, List, Tuple, TextIO, Optional, Dict, Set
|
|
9
9
|
|
|
10
10
|
from siliconcompiler.schema import BaseSchema, NamedSchema, EditableSchema, Parameter, Scope, \
|
|
11
|
-
__version__ as schema_version
|
|
11
|
+
__version__ as schema_version, \
|
|
12
|
+
LazyLoad
|
|
12
13
|
|
|
13
14
|
from siliconcompiler import Design
|
|
14
15
|
from siliconcompiler import Flowgraph
|
|
@@ -107,7 +108,7 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
107
108
|
is not intended for external use."""))
|
|
108
109
|
|
|
109
110
|
schema.insert("checklist", "default", Checklist())
|
|
110
|
-
schema.insert("library",
|
|
111
|
+
schema.insert("library", _ProjectLibrary())
|
|
111
112
|
schema.insert("flowgraph", "default", Flowgraph())
|
|
112
113
|
schema.insert("metric", MetricSchema())
|
|
113
114
|
schema.insert("record", RecordSchema())
|
|
@@ -289,26 +290,11 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
289
290
|
"""
|
|
290
291
|
return Project.__name__
|
|
291
292
|
|
|
292
|
-
def
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
Args:
|
|
299
|
-
obj (DependencySchema, optional): An optional dependency object to
|
|
300
|
-
reset and populate. If None, all existing library dependencies
|
|
301
|
-
in the project are processed. Defaults to None.
|
|
302
|
-
"""
|
|
303
|
-
if obj:
|
|
304
|
-
obj._reset_deps()
|
|
305
|
-
dep_map = {name: self.get("library", name, field="schema")
|
|
306
|
-
for name in self.getkeys("library")}
|
|
307
|
-
for obj in dep_map.values():
|
|
308
|
-
if isinstance(obj, DependencySchema):
|
|
309
|
-
obj._populate_deps(dep_map)
|
|
310
|
-
|
|
311
|
-
def _from_dict(self, manifest, keypath, version=None):
|
|
293
|
+
def _from_dict(self, manifest: Dict,
|
|
294
|
+
keypath: Union[List[str], Tuple[str, ...]],
|
|
295
|
+
version: Optional[Tuple[int, ...]] = None,
|
|
296
|
+
lazyload: LazyLoad = LazyLoad.ON) \
|
|
297
|
+
-> Tuple[Set[Tuple[str, ...]], Set[Tuple[str, ...]]]:
|
|
312
298
|
"""
|
|
313
299
|
Populates the project's schema from a dictionary representation.
|
|
314
300
|
|
|
@@ -325,15 +311,13 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
325
311
|
Returns:
|
|
326
312
|
Any: The result of the superclass's `_from_dict` method.
|
|
327
313
|
"""
|
|
328
|
-
ret = super()._from_dict(manifest, keypath, version)
|
|
329
|
-
|
|
330
|
-
# Restore dependencies
|
|
331
|
-
self.__populate_deps()
|
|
314
|
+
ret = super()._from_dict(manifest, keypath, version=version, lazyload=lazyload)
|
|
332
315
|
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
316
|
+
if not lazyload.is_enforced:
|
|
317
|
+
# Preserve logger in history
|
|
318
|
+
for history in self.getkeys("history"):
|
|
319
|
+
hist: "Project" = self.get("history", history, field="schema")
|
|
320
|
+
hist.__logger = self.__logger
|
|
337
321
|
|
|
338
322
|
return ret
|
|
339
323
|
|
|
@@ -392,7 +376,7 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
392
376
|
self.add_dep(dep)
|
|
393
377
|
|
|
394
378
|
# Rebuild dependencies to ensure instances are correct
|
|
395
|
-
self.
|
|
379
|
+
self.get("library", field="schema")._populate_deps(obj)
|
|
396
380
|
|
|
397
381
|
def __import_flow(self, flow: Flowgraph):
|
|
398
382
|
"""
|
|
@@ -664,6 +648,9 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
664
648
|
# Pass along manager address
|
|
665
649
|
state["__manager__"] = MPManager._get_manager_address()
|
|
666
650
|
|
|
651
|
+
# Pass along logger level
|
|
652
|
+
state["__loglevel__"] = self.logger.level
|
|
653
|
+
|
|
667
654
|
return state
|
|
668
655
|
|
|
669
656
|
def __setstate__(self, state):
|
|
@@ -676,6 +663,10 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
676
663
|
Args:
|
|
677
664
|
state (dict): The deserialized state of the object.
|
|
678
665
|
"""
|
|
666
|
+
# Retrieve log level
|
|
667
|
+
loglevel = state["__loglevel__"]
|
|
668
|
+
del state["__loglevel__"]
|
|
669
|
+
|
|
679
670
|
# Retrieve manager address
|
|
680
671
|
MPManager._set_manager_address(state["__manager__"])
|
|
681
672
|
del state["__manager__"]
|
|
@@ -684,6 +675,7 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
684
675
|
|
|
685
676
|
# Reinitialize logger on restore
|
|
686
677
|
self.__init_logger()
|
|
678
|
+
self.logger.setLevel(loglevel)
|
|
687
679
|
|
|
688
680
|
# Restore callbacks
|
|
689
681
|
self.__init_option_callbacks()
|
|
@@ -1225,6 +1217,7 @@ class Project(PathSchemaBase, CommandLineSchema, BaseSchema):
|
|
|
1225
1217
|
# Setup options:
|
|
1226
1218
|
for option, value in [
|
|
1227
1219
|
("track", False),
|
|
1220
|
+
("remote", False),
|
|
1228
1221
|
("hash", False),
|
|
1229
1222
|
("nodisplay", False),
|
|
1230
1223
|
("continue", True),
|
|
@@ -1276,3 +1269,61 @@ class Lint(Project):
|
|
|
1276
1269
|
@classmethod
|
|
1277
1270
|
def _getdict_type(cls) -> str:
|
|
1278
1271
|
return Lint.__name__
|
|
1272
|
+
|
|
1273
|
+
|
|
1274
|
+
class _ProjectLibrary(BaseSchema):
|
|
1275
|
+
def _from_dict(self, manifest: Dict,
|
|
1276
|
+
keypath: Union[List[str], Tuple[str, ...]],
|
|
1277
|
+
version: Optional[Tuple[int, ...]] = None,
|
|
1278
|
+
lazyload: LazyLoad = LazyLoad.ON) \
|
|
1279
|
+
-> Tuple[Set[Tuple[str, ...]], Set[Tuple[str, ...]]]:
|
|
1280
|
+
"""
|
|
1281
|
+
Populates the project's schema from a dictionary representation.
|
|
1282
|
+
|
|
1283
|
+
This method is typically used during deserialization or when loading
|
|
1284
|
+
a project state from a manifest. After loading the data, it ensures
|
|
1285
|
+
that internal dependencies are correctly re-established.
|
|
1286
|
+
|
|
1287
|
+
Args:
|
|
1288
|
+
manifest (dict): The dictionary containing the schema data.
|
|
1289
|
+
keypath (list): The current keypath being processed (used internally
|
|
1290
|
+
for recursive loading).
|
|
1291
|
+
version (str, optional): The schema version of the manifest. Defaults to None.
|
|
1292
|
+
|
|
1293
|
+
Returns:
|
|
1294
|
+
Any: The result of the superclass's `_from_dict` method.
|
|
1295
|
+
"""
|
|
1296
|
+
ret = super()._from_dict(manifest, keypath, version=version, lazyload=lazyload)
|
|
1297
|
+
|
|
1298
|
+
if not lazyload.is_enforced:
|
|
1299
|
+
# Restore dependencies
|
|
1300
|
+
self._populate_deps(complete=True)
|
|
1301
|
+
|
|
1302
|
+
return ret
|
|
1303
|
+
|
|
1304
|
+
def _populate_deps(self, obj: Optional[DependencySchema] = None, complete: bool = False):
|
|
1305
|
+
"""
|
|
1306
|
+
Ensures that all loaded dependencies (like libraries) within the project
|
|
1307
|
+
contain correct internal pointers back to the project's libraries.
|
|
1308
|
+
This is crucial for maintaining a consistent and navigable schema graph.
|
|
1309
|
+
|
|
1310
|
+
Args:
|
|
1311
|
+
obj (DependencySchema, optional): An optional dependency object to
|
|
1312
|
+
reset and populate. If None, all existing library dependencies
|
|
1313
|
+
in the project are processed. Defaults to None.
|
|
1314
|
+
complete (bool, optional): If True, performs a full reset of all
|
|
1315
|
+
DependencySchema objects before populating dependencies. This
|
|
1316
|
+
ensures a clean state during manifest deserialization. Defaults to False.
|
|
1317
|
+
"""
|
|
1318
|
+
if obj:
|
|
1319
|
+
obj._reset_deps()
|
|
1320
|
+
dep_map = {name: self.get(name, field="schema") for name in self.getkeys()}
|
|
1321
|
+
|
|
1322
|
+
if complete:
|
|
1323
|
+
for obj in dep_map.values():
|
|
1324
|
+
if isinstance(obj, DependencySchema):
|
|
1325
|
+
obj._reset_deps()
|
|
1326
|
+
|
|
1327
|
+
for obj in dep_map.values():
|
|
1328
|
+
if isinstance(obj, DependencySchema):
|
|
1329
|
+
obj._populate_deps(dep_map)
|
siliconcompiler/remote/client.py
CHANGED
|
@@ -18,7 +18,8 @@ from siliconcompiler import NodeStatus as SCNodeStatus
|
|
|
18
18
|
from siliconcompiler._metadata import default_server
|
|
19
19
|
from siliconcompiler.flowgraph import RuntimeFlowgraph
|
|
20
20
|
from siliconcompiler.scheduler import Scheduler
|
|
21
|
-
from siliconcompiler.schema import Journal
|
|
21
|
+
from siliconcompiler.schema import Journal, Parameter
|
|
22
|
+
from siliconcompiler.package import PythonPathResolver, FileResolver, KeyPathResolver
|
|
22
23
|
|
|
23
24
|
from siliconcompiler.utils.logging import get_console_formatter
|
|
24
25
|
from siliconcompiler.utils.curation import collect
|
|
@@ -573,19 +574,29 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
|
|
|
573
574
|
|
|
574
575
|
# Ensure dataroots with python sources are copied
|
|
575
576
|
for key in self.__project.allkeys():
|
|
576
|
-
|
|
577
|
+
if key[0] == "history":
|
|
578
|
+
continue
|
|
579
|
+
|
|
580
|
+
param: Parameter = self.__project.get(*key, field=None)
|
|
581
|
+
key_type: str = param.get(field="type")
|
|
577
582
|
|
|
578
583
|
if 'dir' in key_type or 'file' in key_type:
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
584
|
+
schema_obj = self.__project.get(*key[:-1], field="schema")
|
|
585
|
+
dataroot_objs = schema_obj._find_files_dataroot_resolvers(True)
|
|
586
|
+
|
|
587
|
+
for value, step, index in param.getvalues():
|
|
588
|
+
if not value:
|
|
589
|
+
continue
|
|
590
|
+
dataroots = param.get(field='dataroot', step=step, index=index)
|
|
582
591
|
if not isinstance(dataroots, list):
|
|
583
592
|
dataroots = [dataroots]
|
|
584
593
|
force_copy = False
|
|
585
594
|
for dataroot in dataroots:
|
|
586
595
|
if not dataroot:
|
|
587
596
|
continue
|
|
588
|
-
|
|
597
|
+
dataroot_resolver = dataroot_objs.get(dataroot, None)
|
|
598
|
+
if isinstance(dataroot_resolver,
|
|
599
|
+
(PythonPathResolver, FileResolver, KeyPathResolver)):
|
|
589
600
|
force_copy = True
|
|
590
601
|
if force_copy:
|
|
591
602
|
self.__project.set(*key, True, field='copy', step=step, index=index)
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import io
|
|
2
2
|
import logging
|
|
3
|
+
import multiprocessing
|
|
3
4
|
import os
|
|
4
5
|
import re
|
|
5
6
|
import shutil
|
|
@@ -9,7 +10,9 @@ import traceback
|
|
|
9
10
|
|
|
10
11
|
import os.path
|
|
11
12
|
|
|
12
|
-
from
|
|
13
|
+
from datetime import datetime
|
|
14
|
+
|
|
15
|
+
from typing import Union, Dict, Optional, Tuple, List, TYPE_CHECKING
|
|
13
16
|
|
|
14
17
|
from siliconcompiler import NodeStatus
|
|
15
18
|
from siliconcompiler.schema import Journal
|
|
@@ -18,7 +21,7 @@ from siliconcompiler.scheduler import SchedulerNode
|
|
|
18
21
|
from siliconcompiler.scheduler import SlurmSchedulerNode
|
|
19
22
|
from siliconcompiler.scheduler import DockerSchedulerNode
|
|
20
23
|
from siliconcompiler.scheduler import TaskScheduler
|
|
21
|
-
from siliconcompiler.scheduler.schedulernode import SchedulerFlowReset
|
|
24
|
+
from siliconcompiler.scheduler.schedulernode import SchedulerFlowReset, SchedulerNodeReset
|
|
22
25
|
from siliconcompiler.tool import TaskExecutableNotFound, TaskExecutableNotReceived
|
|
23
26
|
|
|
24
27
|
from siliconcompiler import utils
|
|
@@ -58,7 +61,7 @@ class Scheduler:
|
|
|
58
61
|
SCRuntimeError: If the specified flow is not defined or fails validation.
|
|
59
62
|
"""
|
|
60
63
|
self.__project = project
|
|
61
|
-
self.__logger: logging.Logger = project.logger
|
|
64
|
+
self.__logger: logging.Logger = project.logger.getChild("scheduler")
|
|
62
65
|
self.__name = project.name
|
|
63
66
|
|
|
64
67
|
flow = self.__project.get("option", "flow")
|
|
@@ -97,13 +100,20 @@ class Scheduler:
|
|
|
97
100
|
self.__record: "RecordSchema" = self.__project.get("record", field="schema")
|
|
98
101
|
self.__metrics: "MetricSchema" = self.__project.get("metric", field="schema")
|
|
99
102
|
|
|
100
|
-
self.__tasks = {}
|
|
103
|
+
self.__tasks: Dict[Tuple[str, str], SchedulerNode] = {}
|
|
101
104
|
|
|
102
105
|
# Create dummy handler
|
|
103
106
|
self.__joblog_handler = logging.NullHandler()
|
|
104
107
|
self.__org_job_name = self.__project.get("option", "jobname")
|
|
105
108
|
self.__logfile = None
|
|
106
109
|
|
|
110
|
+
@property
|
|
111
|
+
def manifest(self) -> str:
|
|
112
|
+
"""
|
|
113
|
+
Returns the path to the job manifest
|
|
114
|
+
"""
|
|
115
|
+
return os.path.join(jobdir(self.__project), f"{self.__name}.pkg.json")
|
|
116
|
+
|
|
107
117
|
@property
|
|
108
118
|
def log(self) -> Union[None, str]:
|
|
109
119
|
"""
|
|
@@ -131,7 +141,7 @@ class Scheduler:
|
|
|
131
141
|
Args:
|
|
132
142
|
header (str): A header message to print before the status list.
|
|
133
143
|
"""
|
|
134
|
-
self.__logger.debug(f"#### {header}")
|
|
144
|
+
self.__logger.debug(f"#### {header} : {datetime.now().strftime('%H:%M:%S')}")
|
|
135
145
|
for step, index in self.__flow.get_nodes():
|
|
136
146
|
self.__logger.debug(f"({step}, {index}) -> "
|
|
137
147
|
f"{self.__record.get('status', step=step, index=index)}")
|
|
@@ -284,8 +294,7 @@ class Scheduler:
|
|
|
284
294
|
self.__project._record_history()
|
|
285
295
|
|
|
286
296
|
# Record final manifest
|
|
287
|
-
|
|
288
|
-
self.__project.write_manifest(filepath)
|
|
297
|
+
self.__project.write_manifest(self.manifest)
|
|
289
298
|
|
|
290
299
|
send_messages.send(self.__project, 'summary', None, None)
|
|
291
300
|
finally:
|
|
@@ -372,6 +381,8 @@ class Scheduler:
|
|
|
372
381
|
nodes = self.__flow_runtime.get_nodes()
|
|
373
382
|
error = False
|
|
374
383
|
|
|
384
|
+
manifest_name = os.path.basename(self.manifest)
|
|
385
|
+
|
|
375
386
|
for (step, index) in nodes:
|
|
376
387
|
# Get files we receive from input nodes.
|
|
377
388
|
in_nodes = self.__flow_runtime.get_node_inputs(step, index, record=self.__record)
|
|
@@ -396,9 +407,7 @@ class Scheduler:
|
|
|
396
407
|
inputs = []
|
|
397
408
|
continue
|
|
398
409
|
|
|
399
|
-
|
|
400
|
-
manifest = f'{design}.pkg.json'
|
|
401
|
-
inputs = [inp for inp in os.listdir(in_step_out_dir) if inp != manifest]
|
|
410
|
+
inputs = [inp for inp in os.listdir(in_step_out_dir) if inp != manifest_name]
|
|
402
411
|
else:
|
|
403
412
|
in_tool = self.__flow.get(in_step, in_index, "tool")
|
|
404
413
|
in_task = self.__flow.get(in_step, in_index, "task")
|
|
@@ -582,32 +591,23 @@ class Scheduler:
|
|
|
582
591
|
with self.__tasks[(step, index)].runtime():
|
|
583
592
|
self.__tasks[(step, index)].clean_directory()
|
|
584
593
|
|
|
585
|
-
def
|
|
586
|
-
"""
|
|
587
|
-
Prepare and configure all flow nodes before execution, including loading prior run state,
|
|
588
|
-
running per-node setup, and marking nodes that require rerun.
|
|
594
|
+
def __configure_collect_previous_information(self) -> Dict[Tuple[str, str], "Project"]:
|
|
595
|
+
"""Collects information from previous runs for nodes that won't be re-executed.
|
|
589
596
|
|
|
590
|
-
This method
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
as pending.
|
|
599
|
-
- Persists the resulting manifest for the current job before returning.
|
|
597
|
+
This method identifies nodes that are marked for loading (not cleaning) and
|
|
598
|
+
are not part of the current 'from' execution path. For each of these
|
|
599
|
+
nodes, it attempts to load its manifest from a previous run.
|
|
600
|
+
|
|
601
|
+
Returns:
|
|
602
|
+
Dict[Tuple[str, str], "Project"]: A dictionary mapping (step, index)
|
|
603
|
+
tuples to their corresponding loaded Project objects from
|
|
604
|
+
previous runs.
|
|
600
605
|
"""
|
|
601
606
|
from siliconcompiler import Project
|
|
607
|
+
self.__print_status("Start - collect")
|
|
602
608
|
|
|
603
|
-
from_nodes = []
|
|
604
609
|
extra_setup_nodes = {}
|
|
605
|
-
|
|
606
|
-
journal = Journal.access(self.__project)
|
|
607
|
-
journal.start()
|
|
608
|
-
|
|
609
|
-
self.__print_status("Start")
|
|
610
|
-
|
|
610
|
+
from_nodes = []
|
|
611
611
|
if self.__project.get('option', 'clean'):
|
|
612
612
|
if self.__project.get("option", "from"):
|
|
613
613
|
from_nodes = self.__flow_runtime.get_entry_nodes()
|
|
@@ -626,16 +626,37 @@ class Scheduler:
|
|
|
626
626
|
# Node will be run so no need to load
|
|
627
627
|
continue
|
|
628
628
|
|
|
629
|
-
manifest =
|
|
630
|
-
'outputs',
|
|
631
|
-
f'{self.__name}.pkg.json')
|
|
629
|
+
manifest = self.__tasks[(step, index)].get_manifest()
|
|
632
630
|
if os.path.exists(manifest):
|
|
633
631
|
# ensure we setup these nodes again
|
|
634
632
|
try:
|
|
635
633
|
extra_setup_nodes[(step, index)] = Project.from_manifest(filepath=manifest)
|
|
636
|
-
except Exception:
|
|
634
|
+
except Exception as e:
|
|
635
|
+
self.__logger.debug(f"Reading {manifest} caused: {e}")
|
|
637
636
|
pass
|
|
638
637
|
|
|
638
|
+
self.__print_status("End - collect")
|
|
639
|
+
|
|
640
|
+
return extra_setup_nodes
|
|
641
|
+
|
|
642
|
+
def __configure_run_setup(self, extra_setup_nodes: Dict[Tuple[str, str], "Project"]) -> None:
|
|
643
|
+
"""Runs the setup() method for all flow nodes and forwards previous status.
|
|
644
|
+
|
|
645
|
+
This method iterates through all nodes in execution order and calls
|
|
646
|
+
their respective `setup()` methods.
|
|
647
|
+
|
|
648
|
+
It also uses the `extra_setup_nodes` to:
|
|
649
|
+
1. Prune nodes from `extra_setup_nodes` if their `setup()` method
|
|
650
|
+
returns False (indicating the node is no longer valid).
|
|
651
|
+
2. Forward the 'status' from a valid, previously-run node (found in
|
|
652
|
+
`extra_setup_nodes`) into the current job's records.
|
|
653
|
+
|
|
654
|
+
Args:
|
|
655
|
+
extra_setup_nodes (Dict[Tuple[str, str], "Project"]): A dictionary
|
|
656
|
+
of loaded Project objects from previous runs. This dictionary
|
|
657
|
+
may be modified in-place (nodes may be removed).
|
|
658
|
+
"""
|
|
659
|
+
self.__print_status("Start - setup")
|
|
639
660
|
# Setup tools for all nodes to run
|
|
640
661
|
for layer_nodes in self.__flow.get_execution_order():
|
|
641
662
|
for step, index in layer_nodes:
|
|
@@ -655,28 +676,155 @@ class Scheduler:
|
|
|
655
676
|
if node_status:
|
|
656
677
|
# Forward old status
|
|
657
678
|
self.__record.set('status', node_status, step=step, index=index)
|
|
679
|
+
self.__print_status("End - setup")
|
|
680
|
+
|
|
681
|
+
@staticmethod
|
|
682
|
+
def _configure_run_required(task: SchedulerNode) \
|
|
683
|
+
-> Optional[Union[SchedulerFlowReset, SchedulerNodeReset]]:
|
|
684
|
+
"""
|
|
685
|
+
Helper method to run requires_run() with threads.
|
|
686
|
+
"""
|
|
687
|
+
with task.runtime():
|
|
688
|
+
try:
|
|
689
|
+
task.requires_run()
|
|
690
|
+
except (SchedulerFlowReset, SchedulerNodeReset) as e:
|
|
691
|
+
return e
|
|
692
|
+
return None
|
|
693
|
+
|
|
694
|
+
def __configure_check_run_required(self) -> List[Tuple[str, str]]:
|
|
695
|
+
"""Checks which nodes require a re-run and which can be replayed.
|
|
658
696
|
|
|
659
|
-
|
|
697
|
+
This method iterates through all nodes that are currently marked as
|
|
698
|
+
'SUCCESS' (typically from a previous run). It calls `requires_run()`
|
|
699
|
+
on each to determine if inputs, parameters, or other dependencies
|
|
700
|
+
have changed.
|
|
701
|
+
|
|
702
|
+
- If `requires_run()` is True, the node is marked as 'pending' (and
|
|
703
|
+
will be re-executed).
|
|
704
|
+
- If `requires_run()` is False, the node is added to the 'replay' list,
|
|
705
|
+
indicating its previous results can be reused.
|
|
706
|
+
|
|
707
|
+
Returns:
|
|
708
|
+
List[Tuple[str, str]]: A list of (step, index) tuples for nodes
|
|
709
|
+
that do *not* require a re-run and whose results can be
|
|
710
|
+
replayed from the journal.
|
|
711
|
+
"""
|
|
712
|
+
self.__print_status("Start - check")
|
|
713
|
+
|
|
714
|
+
replay: List[Tuple[str, str]] = []
|
|
715
|
+
|
|
716
|
+
nodes: List[Tuple[str, str]] = []
|
|
717
|
+
|
|
718
|
+
def filter_nodes(nodes: List[Tuple[str, str]]) -> None:
|
|
719
|
+
for step, index in tuple(nodes):
|
|
720
|
+
# Only look at successful nodes
|
|
721
|
+
if self.__record.get("status", step=step, index=index) != NodeStatus.SUCCESS:
|
|
722
|
+
nodes.remove((step, index))
|
|
723
|
+
|
|
724
|
+
def create_node_group(nodes: List[Tuple[str, str]], size: int) -> List[Tuple[str, str]]:
|
|
725
|
+
group = []
|
|
726
|
+
for _ in range(size):
|
|
727
|
+
if nodes:
|
|
728
|
+
group.append(nodes.pop(0))
|
|
729
|
+
return group
|
|
730
|
+
|
|
731
|
+
# Collect initial list of nodes to process
|
|
732
|
+
for layer_nodes in self.__flow.get_execution_order():
|
|
733
|
+
nodes.extend(layer_nodes)
|
|
734
|
+
|
|
735
|
+
# Determine pool size
|
|
736
|
+
cores = utils.get_cores()
|
|
737
|
+
pool_size = self.project.option.scheduler.get_maxthreads() or cores
|
|
738
|
+
pool_size = max(1, min(cores, pool_size))
|
|
739
|
+
|
|
740
|
+
# Limit based on number of nodes if less than number of cores
|
|
741
|
+
filter_nodes(nodes)
|
|
742
|
+
if not nodes:
|
|
743
|
+
# No nodes left so just return
|
|
744
|
+
return []
|
|
745
|
+
|
|
746
|
+
pool_size = min(pool_size, len(nodes))
|
|
747
|
+
|
|
748
|
+
self.__logger.debug(f"Check pool size: {pool_size}")
|
|
749
|
+
|
|
750
|
+
# Call this in case this was invoked without __main__
|
|
751
|
+
multiprocessing.freeze_support()
|
|
752
|
+
|
|
753
|
+
with multiprocessing.get_context("spawn").Pool(pool_size) as pool:
|
|
754
|
+
while True:
|
|
755
|
+
# Filter nodes
|
|
756
|
+
filter_nodes(nodes)
|
|
757
|
+
|
|
758
|
+
# Generate a group of nodes to run
|
|
759
|
+
group = create_node_group(nodes, pool_size)
|
|
760
|
+
self.__logger.debug(f"Group to check: {group}")
|
|
761
|
+
if not group:
|
|
762
|
+
# Group is empty
|
|
763
|
+
break
|
|
764
|
+
|
|
765
|
+
tasks = [self.__tasks[(step, index)] for step, index in group]
|
|
766
|
+
# Suppress excess info messages during checks
|
|
767
|
+
cur_level = self.project.logger.level
|
|
768
|
+
self.project.logger.setLevel(logging.WARNING)
|
|
769
|
+
try:
|
|
770
|
+
runcheck = pool.map(Scheduler._configure_run_required, tasks)
|
|
771
|
+
finally:
|
|
772
|
+
self.project.logger.setLevel(cur_level)
|
|
773
|
+
|
|
774
|
+
for node, runrequired in zip(group, runcheck):
|
|
775
|
+
if self.__record.get("status", step=node[0], index=node[1]) != \
|
|
776
|
+
NodeStatus.SUCCESS:
|
|
777
|
+
continue
|
|
778
|
+
|
|
779
|
+
self.__logger.debug(f" Result: {node} -> {runrequired}")
|
|
780
|
+
|
|
781
|
+
if runrequired is not None:
|
|
782
|
+
runrequired.log(self.__logger)
|
|
783
|
+
|
|
784
|
+
if isinstance(runrequired, SchedulerFlowReset):
|
|
785
|
+
raise runrequired from None
|
|
786
|
+
|
|
787
|
+
# This node must be run
|
|
788
|
+
self.__mark_pending(*node)
|
|
789
|
+
else:
|
|
790
|
+
# import old information
|
|
791
|
+
replay.append(node)
|
|
792
|
+
|
|
793
|
+
self.__print_status("End - check")
|
|
794
|
+
|
|
795
|
+
return replay
|
|
796
|
+
|
|
797
|
+
def configure_nodes(self) -> None:
|
|
798
|
+
"""
|
|
799
|
+
Prepare and configure all flow nodes before execution, including loading prior run state,
|
|
800
|
+
running per-node setup, and marking nodes that require rerun.
|
|
801
|
+
|
|
802
|
+
This method:
|
|
803
|
+
- Loads available node manifests from previous jobs and uses them to populate setup data
|
|
804
|
+
where appropriate.
|
|
805
|
+
- Runs each node's setup routine to initialize tools and runtime state.
|
|
806
|
+
- For nodes whose parameters or inputs have changed, marks them and all downstream nodes
|
|
807
|
+
as pending so they will be re-executed.
|
|
808
|
+
- Replays preserved journaled results for nodes that remain valid to reuse previous outputs.
|
|
809
|
+
- On a SchedulerFlowReset, forces a full build-directory recheck and marks every node
|
|
810
|
+
as pending.
|
|
811
|
+
- Persists the resulting manifest for the current job before returning.
|
|
812
|
+
"""
|
|
813
|
+
journal = Journal.access(self.__project)
|
|
814
|
+
journal.start()
|
|
815
|
+
|
|
816
|
+
extra_setup_nodes = self.__configure_collect_previous_information()
|
|
817
|
+
|
|
818
|
+
self.__configure_run_setup(extra_setup_nodes)
|
|
660
819
|
|
|
661
820
|
# Check for modified information
|
|
662
821
|
try:
|
|
663
|
-
replay =
|
|
664
|
-
for layer_nodes in self.__flow.get_execution_order():
|
|
665
|
-
for step, index in layer_nodes:
|
|
666
|
-
# Only look at successful nodes
|
|
667
|
-
if self.__record.get("status", step=step, index=index) != NodeStatus.SUCCESS:
|
|
668
|
-
continue
|
|
822
|
+
replay = self.__configure_check_run_required()
|
|
669
823
|
|
|
670
|
-
with self.__tasks[(step, index)].runtime():
|
|
671
|
-
if self.__tasks[(step, index)].requires_run():
|
|
672
|
-
# This node must be run
|
|
673
|
-
self.__mark_pending(step, index)
|
|
674
|
-
elif (step, index) in extra_setup_nodes:
|
|
675
|
-
# import old information
|
|
676
|
-
replay.append((step, index))
|
|
677
824
|
# Replay previous information
|
|
678
825
|
for step, index in replay:
|
|
679
|
-
|
|
826
|
+
if (step, index) in extra_setup_nodes:
|
|
827
|
+
Journal.access(extra_setup_nodes[(step, index)]).replay(self.__project)
|
|
680
828
|
except SchedulerFlowReset:
|
|
681
829
|
# Mark all nodes as pending
|
|
682
830
|
self.__clean_build_dir_full(recheck=True)
|
|
@@ -684,7 +832,7 @@ class Scheduler:
|
|
|
684
832
|
for step, index in self.__flow.get_nodes():
|
|
685
833
|
self.__mark_pending(step, index)
|
|
686
834
|
|
|
687
|
-
self.__print_status("
|
|
835
|
+
self.__print_status("Before ensure")
|
|
688
836
|
|
|
689
837
|
# Ensure all nodes are marked as pending if needed
|
|
690
838
|
for layer_nodes in self.__flow_runtime.get_execution_order():
|
|
@@ -693,11 +841,12 @@ class Scheduler:
|
|
|
693
841
|
if NodeStatus.is_waiting(status) or NodeStatus.is_error(status):
|
|
694
842
|
self.__mark_pending(step, index)
|
|
695
843
|
|
|
696
|
-
self.__print_status("
|
|
844
|
+
self.__print_status("FINAL")
|
|
845
|
+
|
|
846
|
+
# Write configured manifest
|
|
847
|
+
os.makedirs(os.path.dirname(self.manifest), exist_ok=True)
|
|
848
|
+
self.__project.write_manifest(self.manifest)
|
|
697
849
|
|
|
698
|
-
os.makedirs(jobdir(self.__project), exist_ok=True)
|
|
699
|
-
self.__project.write_manifest(os.path.join(jobdir(self.__project),
|
|
700
|
-
f"{self.__name}.pkg.json"))
|
|
701
850
|
journal.stop()
|
|
702
851
|
|
|
703
852
|
def __check_display(self) -> None:
|