siliconcompiler 0.34.0__py3-none-any.whl → 0.34.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/_common.py +1 -1
- siliconcompiler/apps/sc.py +1 -1
- siliconcompiler/apps/sc_issue.py +1 -1
- siliconcompiler/apps/sc_remote.py +3 -3
- siliconcompiler/apps/sc_show.py +2 -2
- siliconcompiler/apps/utils/replay.py +4 -4
- siliconcompiler/checklist.py +203 -2
- siliconcompiler/core.py +28 -246
- siliconcompiler/data/templates/email/general.j2 +3 -3
- siliconcompiler/data/templates/email/summary.j2 +1 -1
- siliconcompiler/data/templates/issue/README.txt +1 -1
- siliconcompiler/data/templates/report/sc_report.j2 +7 -7
- siliconcompiler/design.py +148 -54
- siliconcompiler/flowgraph.py +50 -15
- siliconcompiler/optimizer/vizier.py +2 -2
- siliconcompiler/pdk.py +5 -5
- siliconcompiler/remote/client.py +18 -12
- siliconcompiler/remote/server.py +2 -2
- siliconcompiler/report/dashboard/cli/__init__.py +6 -6
- siliconcompiler/report/dashboard/cli/board.py +3 -3
- siliconcompiler/report/dashboard/web/components/__init__.py +5 -5
- siliconcompiler/report/dashboard/web/components/flowgraph.py +4 -4
- siliconcompiler/report/dashboard/web/components/graph.py +2 -2
- siliconcompiler/report/dashboard/web/state.py +1 -1
- siliconcompiler/report/dashboard/web/utils/__init__.py +5 -5
- siliconcompiler/report/html_report.py +1 -1
- siliconcompiler/report/report.py +4 -4
- siliconcompiler/report/summary_table.py +2 -2
- siliconcompiler/report/utils.py +5 -5
- siliconcompiler/scheduler/docker.py +3 -8
- siliconcompiler/scheduler/run_node.py +2 -7
- siliconcompiler/scheduler/scheduler.py +14 -11
- siliconcompiler/scheduler/schedulernode.py +136 -126
- siliconcompiler/scheduler/send_messages.py +3 -3
- siliconcompiler/scheduler/slurm.py +5 -3
- siliconcompiler/scheduler/taskscheduler.py +8 -7
- siliconcompiler/schema/baseschema.py +1 -2
- siliconcompiler/schema/namedschema.py +26 -2
- siliconcompiler/tool.py +398 -175
- siliconcompiler/tools/__init__.py +2 -0
- siliconcompiler/tools/builtin/_common.py +5 -5
- siliconcompiler/tools/builtin/concatenate.py +5 -5
- siliconcompiler/tools/builtin/minimum.py +4 -4
- siliconcompiler/tools/builtin/mux.py +4 -4
- siliconcompiler/tools/builtin/nop.py +4 -4
- siliconcompiler/tools/builtin/verify.py +7 -7
- siliconcompiler/tools/execute/exec_input.py +1 -1
- siliconcompiler/tools/genfasm/genfasm.py +1 -6
- siliconcompiler/tools/openroad/_apr.py +5 -1
- siliconcompiler/tools/openroad/antenna_repair.py +1 -1
- siliconcompiler/tools/openroad/macro_placement.py +1 -1
- siliconcompiler/tools/openroad/power_grid.py +1 -1
- siliconcompiler/tools/openroad/scripts/common/procs.tcl +5 -0
- siliconcompiler/tools/opensta/timing.py +26 -3
- siliconcompiler/tools/slang/__init__.py +2 -2
- siliconcompiler/tools/surfer/__init__.py +0 -0
- siliconcompiler/tools/surfer/show.py +53 -0
- siliconcompiler/tools/surfer/surfer.py +30 -0
- siliconcompiler/tools/vpr/route.py +27 -14
- siliconcompiler/tools/vpr/vpr.py +23 -6
- siliconcompiler/tools/yosys/__init__.py +1 -1
- siliconcompiler/tools/yosys/scripts/procs.tcl +143 -0
- siliconcompiler/tools/yosys/{sc_synth_asic.tcl → scripts/sc_synth_asic.tcl} +4 -0
- siliconcompiler/tools/yosys/{sc_synth_fpga.tcl → scripts/sc_synth_fpga.tcl} +24 -77
- siliconcompiler/tools/yosys/syn_fpga.py +14 -0
- siliconcompiler/toolscripts/_tools.json +8 -12
- siliconcompiler/toolscripts/rhel9/install-vpr.sh +0 -2
- siliconcompiler/toolscripts/ubuntu22/install-surfer.sh +33 -0
- siliconcompiler/toolscripts/ubuntu24/install-surfer.sh +33 -0
- siliconcompiler/utils/__init__.py +2 -1
- siliconcompiler/utils/flowgraph.py +24 -23
- siliconcompiler/utils/issue.py +23 -29
- siliconcompiler/utils/logging.py +35 -6
- siliconcompiler/utils/showtools.py +6 -1
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/METADATA +15 -25
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/RECORD +84 -82
- siliconcompiler/tools/yosys/procs.tcl +0 -71
- siliconcompiler/toolscripts/rhel9/install-yosys-parmys.sh +0 -68
- siliconcompiler/toolscripts/ubuntu22/install-yosys-parmys.sh +0 -68
- siliconcompiler/toolscripts/ubuntu24/install-yosys-parmys.sh +0 -68
- /siliconcompiler/tools/yosys/{sc_lec.tcl → scripts/sc_lec.tcl} +0 -0
- /siliconcompiler/tools/yosys/{sc_screenshot.tcl → scripts/sc_screenshot.tcl} +0 -0
- /siliconcompiler/tools/yosys/{syn_strategies.tcl → scripts/syn_strategies.tcl} +0 -0
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/WHEEL +0 -0
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/entry_points.txt +0 -0
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/licenses/LICENSE +0 -0
- {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.1.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import contextlib
|
|
1
2
|
import logging
|
|
2
3
|
import os
|
|
3
4
|
import shutil
|
|
@@ -11,6 +12,8 @@ from logging.handlers import QueueHandler
|
|
|
11
12
|
from siliconcompiler import utils, sc_open
|
|
12
13
|
from siliconcompiler import Schema
|
|
13
14
|
from siliconcompiler import NodeStatus
|
|
15
|
+
from siliconcompiler.utils.logging import get_console_formatter, SCInRunLoggerFormatter
|
|
16
|
+
from siliconcompiler.schema import utils as schema_utils
|
|
14
17
|
|
|
15
18
|
from siliconcompiler.tools._common import input_file_node_name, record_metric
|
|
16
19
|
|
|
@@ -53,18 +56,21 @@ class SchedulerNode:
|
|
|
53
56
|
"output": os.path.join(self.__workdir, "outputs", f"{self.__design}.pkg.json")
|
|
54
57
|
}
|
|
55
58
|
self.__logs = {
|
|
56
|
-
"sc": os.path.join(self.__workdir, f"sc_{self.__step}{self.__index}.log"),
|
|
59
|
+
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
57
60
|
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
58
61
|
}
|
|
59
62
|
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
60
63
|
|
|
61
64
|
self.set_queue(None, None)
|
|
62
|
-
self.init_state()
|
|
63
|
-
|
|
64
|
-
def init_state(self, assign_runtime=False):
|
|
65
65
|
self.__setup_schema_access()
|
|
66
|
-
|
|
67
|
-
|
|
66
|
+
|
|
67
|
+
@contextlib.contextmanager
|
|
68
|
+
def runtime(self):
|
|
69
|
+
prev_task = self.__task
|
|
70
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as runtask:
|
|
71
|
+
self.__task = runtask
|
|
72
|
+
yield
|
|
73
|
+
self.__task = prev_task
|
|
68
74
|
|
|
69
75
|
@staticmethod
|
|
70
76
|
def init(chip):
|
|
@@ -137,24 +143,40 @@ class SchedulerNode:
|
|
|
137
143
|
|
|
138
144
|
@property
|
|
139
145
|
def threads(self):
|
|
140
|
-
self.__task.
|
|
141
|
-
|
|
142
|
-
step=self.__step, index=self.__index)
|
|
143
|
-
self.__task.set_runtime(None)
|
|
146
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as task:
|
|
147
|
+
thread_count = task.get("threads")
|
|
144
148
|
return thread_count
|
|
145
149
|
|
|
146
150
|
def set_queue(self, pipe, queue):
|
|
147
151
|
self.__pipe = pipe
|
|
148
152
|
self.__queue = queue
|
|
149
153
|
|
|
154
|
+
# Reinit
|
|
155
|
+
self.__setup_schema_access()
|
|
156
|
+
|
|
150
157
|
def __setup_schema_access(self):
|
|
151
158
|
flow = self.__chip.get('option', 'flow')
|
|
152
159
|
self.__flow = self.__chip.get("flowgraph", flow, field="schema")
|
|
153
160
|
|
|
154
161
|
tool = self.__flow.get(self.__step, self.__index, 'tool')
|
|
155
|
-
|
|
156
|
-
self.
|
|
157
|
-
self.
|
|
162
|
+
task = self.__flow.get(self.__step, self.__index, 'task')
|
|
163
|
+
self.__task = self.__chip.get("tool", tool, "task", task, field="schema")
|
|
164
|
+
self.__record = self.__chip.get("record", field="schema")
|
|
165
|
+
self.__metrics = self.__chip.get("metric", field="schema")
|
|
166
|
+
|
|
167
|
+
def _init_run_logger(self):
|
|
168
|
+
self.__chip._logger_console.setFormatter(
|
|
169
|
+
get_console_formatter(self.__chip, True, self.__step, self.__index))
|
|
170
|
+
self.logger.setLevel(
|
|
171
|
+
schema_utils.translate_loglevel(self.__chip.get('option', 'loglevel',
|
|
172
|
+
step=self.__step, index=self.__index)))
|
|
173
|
+
|
|
174
|
+
if self.__queue:
|
|
175
|
+
formatter = self.__chip._logger_console.formatter
|
|
176
|
+
self.logger.removeHandler(self.__chip._logger_console)
|
|
177
|
+
self.__chip._logger_console = QueueHandler(self.__queue)
|
|
178
|
+
self.__chip._logger_console.setFormatter(formatter)
|
|
179
|
+
self.logger.addHandler(self.__chip._logger_console)
|
|
158
180
|
|
|
159
181
|
def halt(self, msg=None):
|
|
160
182
|
if msg:
|
|
@@ -164,36 +186,33 @@ class SchedulerNode:
|
|
|
164
186
|
try:
|
|
165
187
|
self.__chip.schema.write_manifest(self.__manifests["output"])
|
|
166
188
|
except FileNotFoundError:
|
|
167
|
-
self.logger.error(f"Failed to write manifest for {self.__step}{self.__index}.")
|
|
189
|
+
self.logger.error(f"Failed to write manifest for {self.__step}/{self.__index}.")
|
|
168
190
|
|
|
169
|
-
self.logger.error(f"Halting {self.__step}{self.__index} due to errors.")
|
|
191
|
+
self.logger.error(f"Halting {self.__step}/{self.__index} due to errors.")
|
|
170
192
|
send_messages.send(self.__chip, "fail", self.__step, self.__index)
|
|
171
193
|
sys.exit(1)
|
|
172
194
|
|
|
173
195
|
def setup(self):
|
|
174
|
-
self.__task.
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
self.__task.set_runtime(None)
|
|
186
|
-
raise e
|
|
187
|
-
|
|
188
|
-
self.__task.set_runtime(None)
|
|
196
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as task:
|
|
197
|
+
# Run node setup.
|
|
198
|
+
self.logger.info(f'Setting up node {self.__step}/{self.__index} with '
|
|
199
|
+
f'{task.tool()}/{task.task()}')
|
|
200
|
+
setup_ret = None
|
|
201
|
+
try:
|
|
202
|
+
setup_ret = task.setup()
|
|
203
|
+
except Exception as e:
|
|
204
|
+
self.logger.error(f'Failed to run setup() for {self.__step}/{self.__index} '
|
|
205
|
+
f'with {task.tool()}/{task.task()}')
|
|
206
|
+
raise e
|
|
189
207
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
208
|
+
if setup_ret is not None:
|
|
209
|
+
self.logger.warning(f'Removing {self.__step}/{self.__index} due to {setup_ret}')
|
|
210
|
+
self.__record.set('status', NodeStatus.SKIPPED,
|
|
211
|
+
step=self.__step, index=self.__index)
|
|
193
212
|
|
|
194
|
-
|
|
213
|
+
return False
|
|
195
214
|
|
|
196
|
-
|
|
215
|
+
return True
|
|
197
216
|
|
|
198
217
|
def check_previous_run_status(self, previous_run):
|
|
199
218
|
# Assume modified if flow does not match
|
|
@@ -230,7 +249,7 @@ class SchedulerNode:
|
|
|
230
249
|
self.logger.setLevel(log_level)
|
|
231
250
|
if set(previous_run.__chip.get("record", "inputnode",
|
|
232
251
|
step=self.__step, index=self.__index)) != set(sel_inputs):
|
|
233
|
-
self.logger.warning(f'inputs to {self.__step}{self.__index} has been modified from '
|
|
252
|
+
self.logger.warning(f'inputs to {self.__step}/{self.__index} has been modified from '
|
|
234
253
|
'previous run')
|
|
235
254
|
return False
|
|
236
255
|
|
|
@@ -240,7 +259,7 @@ class SchedulerNode:
|
|
|
240
259
|
|
|
241
260
|
def check_values_changed(self, previous_run, keys):
|
|
242
261
|
def print_warning(key):
|
|
243
|
-
self.logger.warning(f'[{",".join(key)}] in {self.__step}{self.__index} has been '
|
|
262
|
+
self.logger.warning(f'[{",".join(key)}] in {self.__step}/{self.__index} has been '
|
|
244
263
|
'modified from previous run')
|
|
245
264
|
|
|
246
265
|
for key in sorted(keys):
|
|
@@ -267,7 +286,7 @@ class SchedulerNode:
|
|
|
267
286
|
use_hash = self.__hash and previous_run.__hash
|
|
268
287
|
|
|
269
288
|
def print_warning(key, reason):
|
|
270
|
-
self.logger.warning(f'[{",".join(key)}] ({reason}) in {self.__step}{self.__index} has '
|
|
289
|
+
self.logger.warning(f'[{",".join(key)}] ({reason}) in {self.__step}/{self.__index} has '
|
|
271
290
|
'been modified from previous run')
|
|
272
291
|
|
|
273
292
|
def get_file_time(path):
|
|
@@ -314,8 +333,7 @@ class SchedulerNode:
|
|
|
314
333
|
def get_check_changed_keys(self):
|
|
315
334
|
all_keys = set()
|
|
316
335
|
|
|
317
|
-
all_keys.update(self.__task.get('
|
|
318
|
-
step=self.__step, index=self.__index))
|
|
336
|
+
all_keys.update(self.__task.get('require'))
|
|
319
337
|
|
|
320
338
|
tool_task_prefix = ('tool', self.__task.tool(), 'task', self.__task.task())
|
|
321
339
|
for key in ('option', 'threads', 'prescript', 'postscript', 'refdir', 'script',):
|
|
@@ -353,7 +371,6 @@ class SchedulerNode:
|
|
|
353
371
|
self.logger.debug("Input manifest failed to load")
|
|
354
372
|
return True
|
|
355
373
|
previous_node = SchedulerNode(chip, self.__step, self.__index)
|
|
356
|
-
previous_node.init_state(assign_runtime=True)
|
|
357
374
|
else:
|
|
358
375
|
# No manifest found so assume rerun is needed
|
|
359
376
|
self.logger.debug("Previous run did not generate input manifest")
|
|
@@ -368,54 +385,53 @@ class SchedulerNode:
|
|
|
368
385
|
self.logger.debug("Output manifest failed to load")
|
|
369
386
|
return True
|
|
370
387
|
previous_node_end = SchedulerNode(chip, self.__step, self.__index)
|
|
371
|
-
previous_node_end.init_state(assign_runtime=True)
|
|
372
388
|
else:
|
|
373
389
|
# No manifest found so assume rerun is needed
|
|
374
390
|
self.logger.debug("Previous run did not generate output manifest")
|
|
375
391
|
return True
|
|
376
392
|
|
|
377
|
-
self.
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
393
|
+
with self.runtime():
|
|
394
|
+
if previous_node_end:
|
|
395
|
+
with previous_node_end.runtime():
|
|
396
|
+
if not self.check_previous_run_status(previous_node_end):
|
|
397
|
+
self.logger.debug("Previous run state failed")
|
|
398
|
+
return True
|
|
383
399
|
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
400
|
+
if previous_node:
|
|
401
|
+
with previous_node.runtime():
|
|
402
|
+
# Generate key paths to check
|
|
403
|
+
try:
|
|
404
|
+
value_keys, path_keys = self.get_check_changed_keys()
|
|
405
|
+
previous_value_keys, previous_path_keys = \
|
|
406
|
+
previous_node.get_check_changed_keys()
|
|
407
|
+
value_keys.update(previous_value_keys)
|
|
408
|
+
path_keys.update(previous_path_keys)
|
|
409
|
+
except KeyError:
|
|
410
|
+
self.logger.debug("Failed to acquire keys")
|
|
411
|
+
return True
|
|
394
412
|
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
return True
|
|
413
|
+
if self.check_values_changed(previous_node, value_keys.union(path_keys)):
|
|
414
|
+
self.logger.debug("Key values changed")
|
|
415
|
+
return True
|
|
399
416
|
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
417
|
+
if self.check_files_changed(previous_node, previous_node_time, path_keys):
|
|
418
|
+
self.logger.debug("Files changed")
|
|
419
|
+
return True
|
|
403
420
|
|
|
404
421
|
return False
|
|
405
422
|
|
|
406
423
|
def setup_input_directory(self):
|
|
407
|
-
in_files = set(self.__task.get('
|
|
408
|
-
step=self.__step, index=self.__index))
|
|
424
|
+
in_files = set(self.__task.get('input'))
|
|
409
425
|
|
|
410
426
|
for in_step, in_index in self.__record.get('inputnode',
|
|
411
427
|
step=self.__step, index=self.__index):
|
|
412
428
|
if NodeStatus.is_error(self.__record.get('status', step=in_step, index=in_index)):
|
|
413
|
-
self.halt(f'Halting step due to previous error in {in_step}{in_index}')
|
|
429
|
+
self.halt(f'Halting step due to previous error in {in_step}/{in_index}')
|
|
414
430
|
|
|
415
431
|
output_dir = os.path.join(
|
|
416
432
|
self.__chip.getworkdir(step=in_step, index=in_index), "outputs")
|
|
417
433
|
if not os.path.isdir(output_dir):
|
|
418
|
-
self.halt(f'Unable to locate outputs directory for {in_step}{in_index}: '
|
|
434
|
+
self.halt(f'Unable to locate outputs directory for {in_step}/{in_index}: '
|
|
419
435
|
f'{output_dir}')
|
|
420
436
|
|
|
421
437
|
for outfile in os.scandir(output_dir):
|
|
@@ -451,8 +467,7 @@ class SchedulerNode:
|
|
|
451
467
|
'''
|
|
452
468
|
error = False
|
|
453
469
|
|
|
454
|
-
required_inputs = self.__task.get('
|
|
455
|
-
step=self.__step, index=self.__index)
|
|
470
|
+
required_inputs = self.__task.get('input')
|
|
456
471
|
|
|
457
472
|
input_dir = os.path.join(self.__workdir, 'inputs')
|
|
458
473
|
|
|
@@ -460,11 +475,10 @@ class SchedulerNode:
|
|
|
460
475
|
path = os.path.join(input_dir, filename)
|
|
461
476
|
if not os.path.exists(path):
|
|
462
477
|
self.logger.error(f'Required input {filename} not received for '
|
|
463
|
-
f'{self.__step}{self.__index}.')
|
|
478
|
+
f'{self.__step}/{self.__index}.')
|
|
464
479
|
error = True
|
|
465
480
|
|
|
466
|
-
all_required = self.__task.get('
|
|
467
|
-
step=self.__step, index=self.__index)
|
|
481
|
+
all_required = self.__task.get('require')
|
|
468
482
|
for item in all_required:
|
|
469
483
|
keypath = item.split(',')
|
|
470
484
|
if not self.__chip.valid(*keypath):
|
|
@@ -525,15 +539,8 @@ class SchedulerNode:
|
|
|
525
539
|
to the filesystem to communicate updates between processes.
|
|
526
540
|
'''
|
|
527
541
|
|
|
528
|
-
# Setup
|
|
529
|
-
self.
|
|
530
|
-
self.__chip._init_logger(self.__step, self.__index, in_run=True)
|
|
531
|
-
|
|
532
|
-
if self.__queue:
|
|
533
|
-
self.logger.removeHandler(self.logger._console)
|
|
534
|
-
self.logger._console = QueueHandler(self.__queue)
|
|
535
|
-
self.logger.addHandler(self.logger._console)
|
|
536
|
-
self.__chip._init_logger_formats()
|
|
542
|
+
# Setup logger
|
|
543
|
+
self._init_run_logger()
|
|
537
544
|
|
|
538
545
|
self.__chip.set('arg', 'step', self.__step)
|
|
539
546
|
self.__chip.set('arg', 'index', self.__index)
|
|
@@ -543,7 +550,7 @@ class SchedulerNode:
|
|
|
543
550
|
journal.start()
|
|
544
551
|
|
|
545
552
|
# Must be after journaling to ensure journal is complete
|
|
546
|
-
self.
|
|
553
|
+
self.__setup_schema_access()
|
|
547
554
|
|
|
548
555
|
# Make record of sc version and machine
|
|
549
556
|
self.__record.record_version(self.__step, self.__index)
|
|
@@ -555,40 +562,44 @@ class SchedulerNode:
|
|
|
555
562
|
# Start wall timer
|
|
556
563
|
self.__record.record_time(self.__step, self.__index, RecordTime.START)
|
|
557
564
|
|
|
558
|
-
# Setup run directory
|
|
559
|
-
self.__task.setup_work_directory(self.__workdir, remove_exist=not self.__replay)
|
|
560
|
-
|
|
561
565
|
cwd = os.getcwd()
|
|
562
|
-
|
|
566
|
+
with self.runtime():
|
|
567
|
+
# Setup run directory
|
|
568
|
+
self.__task.setup_work_directory(self.__workdir, remove_exist=not self.__replay)
|
|
563
569
|
|
|
564
|
-
|
|
565
|
-
self.__chip._add_file_logger(self.__logs["sc"])
|
|
570
|
+
os.chdir(self.__workdir)
|
|
566
571
|
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
+
# Attach siliconcompiler file log handler
|
|
573
|
+
file_log = logging.FileHandler(self.__logs["sc"])
|
|
574
|
+
file_log.setFormatter(
|
|
575
|
+
SCInRunLoggerFormatter(self.__chip, self.__job, self.__step, self.__index))
|
|
576
|
+
self.logger.addHandler(file_log)
|
|
572
577
|
|
|
573
|
-
|
|
574
|
-
self.
|
|
578
|
+
# Select the inputs to this node
|
|
579
|
+
sel_inputs = self.__task.select_input_nodes()
|
|
580
|
+
if not self.__is_entry_node and not sel_inputs:
|
|
581
|
+
self.halt(f'No inputs selected for {self.__step}/{self.__index}')
|
|
582
|
+
self.__record.set("inputnode", sel_inputs, step=self.__step, index=self.__index)
|
|
575
583
|
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
self.setup_input_directory()
|
|
584
|
+
if self.__hash:
|
|
585
|
+
self.__hash_files_pre_execute()
|
|
579
586
|
|
|
580
|
-
|
|
581
|
-
|
|
587
|
+
# Forward data
|
|
588
|
+
if not self.__replay:
|
|
589
|
+
self.setup_input_directory()
|
|
582
590
|
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
self.halt("Failed to validate node setup. See previous errors")
|
|
591
|
+
# Write manifest prior to step running into inputs
|
|
592
|
+
self.__chip.write_manifest(self.__manifests["input"])
|
|
586
593
|
|
|
587
|
-
|
|
588
|
-
self.
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
594
|
+
# Check manifest
|
|
595
|
+
if not self.validate():
|
|
596
|
+
self.halt("Failed to validate node setup. See previous errors")
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
self.execute()
|
|
600
|
+
except Exception as e:
|
|
601
|
+
utils.print_traceback(self.logger, e)
|
|
602
|
+
self.halt()
|
|
592
603
|
|
|
593
604
|
# return to original directory
|
|
594
605
|
os.chdir(cwd)
|
|
@@ -614,12 +625,17 @@ class SchedulerNode:
|
|
|
614
625
|
# copy inputs to outputs and skip execution
|
|
615
626
|
for in_step, in_index in self.__record.get('inputnode',
|
|
616
627
|
step=self.__step, index=self.__index):
|
|
628
|
+
required_outputs = set(self.__task.get('output'))
|
|
617
629
|
in_workdir = self.__chip.getworkdir(step=in_step, index=in_index)
|
|
618
630
|
for outfile in os.scandir(f"{in_workdir}/outputs"):
|
|
619
631
|
if outfile.name == f'{self.__design}.pkg.json':
|
|
620
632
|
# Dont forward manifest
|
|
621
633
|
continue
|
|
622
634
|
|
|
635
|
+
if outfile.name not in required_outputs:
|
|
636
|
+
# Dont forward non-required outputs
|
|
637
|
+
continue
|
|
638
|
+
|
|
623
639
|
if outfile.is_file() or outfile.is_symlink():
|
|
624
640
|
utils.link_symlink_copy(outfile.path,
|
|
625
641
|
f'outputs/{outfile.name}')
|
|
@@ -716,7 +732,7 @@ class SchedulerNode:
|
|
|
716
732
|
if errors and not self.__chip.get('option', 'continue',
|
|
717
733
|
step=self.__step, index=self.__index):
|
|
718
734
|
self.halt(f'{self.__task.tool()}/{self.__task.task()} reported {errors} '
|
|
719
|
-
f'errors during {self.__step}{self.__index}')
|
|
735
|
+
f'errors during {self.__step}/{self.__index}')
|
|
720
736
|
|
|
721
737
|
if self.__error:
|
|
722
738
|
self.halt()
|
|
@@ -747,9 +763,8 @@ class SchedulerNode:
|
|
|
747
763
|
|
|
748
764
|
checks = {}
|
|
749
765
|
matches = {}
|
|
750
|
-
for suffix in self.__task.getkeys('
|
|
751
|
-
regexes = self.__task.get('
|
|
752
|
-
step=self.__step, index=self.__index)
|
|
766
|
+
for suffix in self.__task.getkeys('regex'):
|
|
767
|
+
regexes = self.__task.get('regex', suffix)
|
|
753
768
|
if not regexes:
|
|
754
769
|
continue
|
|
755
770
|
|
|
@@ -820,8 +835,7 @@ class SchedulerNode:
|
|
|
820
835
|
errors += matches[metric]
|
|
821
836
|
|
|
822
837
|
sources = [os.path.basename(self.__logs["exe"])]
|
|
823
|
-
if self.__task.get('
|
|
824
|
-
step=self.__step, index=self.__index):
|
|
838
|
+
if self.__task.get('regex', metric):
|
|
825
839
|
sources.append(f'{self.__step}.{metric}')
|
|
826
840
|
|
|
827
841
|
record_metric(self.__chip, self.__step, self.__index, metric, errors, sources)
|
|
@@ -833,8 +847,7 @@ class SchedulerNode:
|
|
|
833
847
|
allow_cache=True, verbose=False)
|
|
834
848
|
|
|
835
849
|
# hash all requirements
|
|
836
|
-
for item in set(self.__task.get('
|
|
837
|
-
step=self.__step, index=self.__index)):
|
|
850
|
+
for item in set(self.__task.get('require')):
|
|
838
851
|
args = item.split(',')
|
|
839
852
|
sc_type = self.__chip.get(*args, field='type')
|
|
840
853
|
if 'file' in sc_type or 'dir' in sc_type:
|
|
@@ -850,8 +863,7 @@ class SchedulerNode:
|
|
|
850
863
|
step=self.__step, index=self.__index, check=False, verbose=False)
|
|
851
864
|
|
|
852
865
|
# hash all requirements
|
|
853
|
-
for item in set(self.__task.get('
|
|
854
|
-
step=self.__step, index=self.__index)):
|
|
866
|
+
for item in set(self.__task.get('require')):
|
|
855
867
|
args = item.split(',')
|
|
856
868
|
sc_type = self.__chip.get(*args, field='type')
|
|
857
869
|
if 'file' in sc_type or 'dir' in sc_type:
|
|
@@ -883,8 +895,7 @@ class SchedulerNode:
|
|
|
883
895
|
|
|
884
896
|
outputs = set(outputs)
|
|
885
897
|
|
|
886
|
-
output_files = set(self.__task.get('
|
|
887
|
-
step=self.__step, index=self.__index))
|
|
898
|
+
output_files = set(self.__task.get('output'))
|
|
888
899
|
|
|
889
900
|
missing = output_files.difference(outputs)
|
|
890
901
|
excess = outputs.difference(output_files)
|
|
@@ -906,7 +917,7 @@ class SchedulerNode:
|
|
|
906
917
|
if not os.path.exists(copy_from):
|
|
907
918
|
return
|
|
908
919
|
|
|
909
|
-
self.logger.info(f'Importing {self.__step}{self.__index} from {source}')
|
|
920
|
+
self.logger.info(f'Importing {self.__step}/{self.__index} from {source}')
|
|
910
921
|
shutil.copytree(
|
|
911
922
|
copy_from, self.__workdir,
|
|
912
923
|
dirs_exist_ok=True,
|
|
@@ -917,9 +928,8 @@ class SchedulerNode:
|
|
|
917
928
|
# delete file as it might be a hard link
|
|
918
929
|
os.remove(self.__replay_script)
|
|
919
930
|
|
|
920
|
-
self.
|
|
921
|
-
|
|
922
|
-
self.__task.set_runtime(None)
|
|
931
|
+
with self.runtime():
|
|
932
|
+
self.__task.generate_replay_script(self.__replay_script, self.__workdir)
|
|
923
933
|
|
|
924
934
|
for manifest in self.__manifests.values():
|
|
925
935
|
if os.path.exists(manifest):
|
|
@@ -66,7 +66,7 @@ def send(chip, msg_type, step, index):
|
|
|
66
66
|
msg = MIMEMultipart()
|
|
67
67
|
|
|
68
68
|
if step and index:
|
|
69
|
-
subject = f'SiliconCompiler : {chip.design} | {jobname} | {step}{index} | {msg_type}'
|
|
69
|
+
subject = f'SiliconCompiler : {chip.design} | {jobname} | {step} | {index} | {msg_type}'
|
|
70
70
|
else:
|
|
71
71
|
subject = f'SiliconCompiler : {chip.design} | {jobname} | {msg_type}'
|
|
72
72
|
|
|
@@ -92,7 +92,7 @@ def send(chip, msg_type, step, index):
|
|
|
92
92
|
msg.attach(img_attach)
|
|
93
93
|
|
|
94
94
|
runtime = RuntimeFlowgraph(
|
|
95
|
-
chip.
|
|
95
|
+
chip.get("flowgraph", flow, field='schema'),
|
|
96
96
|
from_steps=chip.get('option', 'from'),
|
|
97
97
|
to_steps=chip.get('option', 'to'),
|
|
98
98
|
prune_nodes=chip.get('option', 'prune'))
|
|
@@ -110,7 +110,7 @@ def send(chip, msg_type, step, index):
|
|
|
110
110
|
metric_keys=metrics_to_show)
|
|
111
111
|
else:
|
|
112
112
|
# Attach logs
|
|
113
|
-
for log in (f'sc_{step}{index}.log', f'{step}.log'):
|
|
113
|
+
for log in (f'sc_{step}_{index}.log', f'{step}.log'):
|
|
114
114
|
log_file = f'{chip.getworkdir(step=step, index=index)}/{log}'
|
|
115
115
|
if os.path.exists(log_file):
|
|
116
116
|
with sc_open(log_file) as f:
|
|
@@ -36,10 +36,10 @@ class SlurmSchedulerNode(SchedulerNode):
|
|
|
36
36
|
|
|
37
37
|
collect = False
|
|
38
38
|
flow = chip.get('option', 'flow')
|
|
39
|
-
entry_nodes = chip.
|
|
39
|
+
entry_nodes = chip.get("flowgraph", flow, field="schema").get_entry_nodes()
|
|
40
40
|
|
|
41
41
|
runtime = RuntimeFlowgraph(
|
|
42
|
-
chip.
|
|
42
|
+
chip.get("flowgraph", flow, field='schema'),
|
|
43
43
|
from_steps=chip.get('option', 'from'),
|
|
44
44
|
to_steps=chip.get('option', 'to'),
|
|
45
45
|
prune_nodes=chip.get('option', 'prune'))
|
|
@@ -65,7 +65,7 @@ class SlurmSchedulerNode(SchedulerNode):
|
|
|
65
65
|
|
|
66
66
|
@staticmethod
|
|
67
67
|
def get_job_name(jobhash, step, index):
|
|
68
|
-
return f'{jobhash}_{step}{index}'
|
|
68
|
+
return f'{jobhash}_{step}_{index}'
|
|
69
69
|
|
|
70
70
|
@staticmethod
|
|
71
71
|
def get_runtime_file_name(jobhash, step, index, ext):
|
|
@@ -93,6 +93,8 @@ class SlurmSchedulerNode(SchedulerNode):
|
|
|
93
93
|
finishes processing this step, and it sets the active/error bits.
|
|
94
94
|
'''
|
|
95
95
|
|
|
96
|
+
self._init_run_logger()
|
|
97
|
+
|
|
96
98
|
if shutil.which('sinfo') is None:
|
|
97
99
|
raise RuntimeError('slurm is not available or installed on this machine')
|
|
98
100
|
|
|
@@ -33,6 +33,7 @@ class TaskScheduler:
|
|
|
33
33
|
def __init__(self, chip, tasks):
|
|
34
34
|
self.__chip = chip
|
|
35
35
|
self.__logger = self.__chip.logger
|
|
36
|
+
self.__logger_console_handler = self.__chip._logger_console
|
|
36
37
|
self.__schema = self.__chip.schema
|
|
37
38
|
self.__flow = self.__schema.get("flowgraph", self.__chip.get('option', 'flow'),
|
|
38
39
|
field="schema")
|
|
@@ -73,13 +74,14 @@ class TaskScheduler:
|
|
|
73
74
|
if self.__record.get('status', step=step, index=index) != NodeStatus.PENDING:
|
|
74
75
|
continue
|
|
75
76
|
|
|
76
|
-
|
|
77
|
+
with tasks[(step, index)].runtime():
|
|
78
|
+
threads = tasks[(step, index)].threads
|
|
77
79
|
if not threads:
|
|
78
80
|
threads = self.__max_threads
|
|
79
81
|
threads = max(1, min(threads, self.__max_threads))
|
|
80
82
|
|
|
81
83
|
task = {
|
|
82
|
-
"name": f"{step}{index}",
|
|
84
|
+
"name": f"{step}/{index}",
|
|
83
85
|
"inputs": runtime.get_node_inputs(step, index, record=self.__record),
|
|
84
86
|
"proc": None,
|
|
85
87
|
"parent_pipe": None,
|
|
@@ -93,7 +95,6 @@ class TaskScheduler:
|
|
|
93
95
|
|
|
94
96
|
task["parent_pipe"], pipe = multiprocessing.Pipe()
|
|
95
97
|
task["node"].set_queue(pipe, self.__log_queue)
|
|
96
|
-
task["node"].init_state() # reinit access to remove holdover access
|
|
97
98
|
|
|
98
99
|
task["proc"] = multiprocessing.Process(target=task["node"].run)
|
|
99
100
|
init_funcs.add(task["node"].init)
|
|
@@ -108,9 +109,9 @@ class TaskScheduler:
|
|
|
108
109
|
multiprocessing.freeze_support()
|
|
109
110
|
|
|
110
111
|
# Handle logs across threads
|
|
111
|
-
log_listener = QueueListener(self.__log_queue, self.
|
|
112
|
-
console_format = self.
|
|
113
|
-
self.
|
|
112
|
+
log_listener = QueueListener(self.__log_queue, self.__logger_console_handler)
|
|
113
|
+
console_format = self.__logger_console_handler.formatter
|
|
114
|
+
self.__logger_console_handler.setFormatter(SCBlankLoggerFormatter())
|
|
114
115
|
log_listener.start()
|
|
115
116
|
|
|
116
117
|
# Update dashboard before run begins
|
|
@@ -130,7 +131,7 @@ class TaskScheduler:
|
|
|
130
131
|
|
|
131
132
|
# Cleanup logger
|
|
132
133
|
log_listener.stop()
|
|
133
|
-
self.
|
|
134
|
+
self.__logger_console_handler.setFormatter(console_format)
|
|
134
135
|
|
|
135
136
|
def __run_loop(self):
|
|
136
137
|
self.__startTimes = {None: time.time()}
|
|
@@ -32,7 +32,6 @@ class BaseSchema:
|
|
|
32
32
|
'''
|
|
33
33
|
|
|
34
34
|
def __init__(self):
|
|
35
|
-
# Data storage for the schema
|
|
36
35
|
self.__manifest = {}
|
|
37
36
|
self.__default = None
|
|
38
37
|
self.__journal = Journal()
|
|
@@ -716,7 +715,7 @@ class BaseSchema:
|
|
|
716
715
|
if index is None:
|
|
717
716
|
node_indicator = f" ({step})"
|
|
718
717
|
else:
|
|
719
|
-
node_indicator = f" ({step}{index})"
|
|
718
|
+
node_indicator = f" ({step}/{index})"
|
|
720
719
|
|
|
721
720
|
logger.error(f"Parameter [{','.join(keypath)}]{node_indicator} path "
|
|
722
721
|
f"{check_file} is invalid")
|