siliconcompiler 0.33.2__py3-none-any.whl → 0.34.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/__init__.py +2 -0
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/_common.py +1 -1
- siliconcompiler/apps/sc.py +1 -1
- siliconcompiler/apps/sc_issue.py +6 -4
- siliconcompiler/apps/sc_remote.py +3 -20
- siliconcompiler/apps/sc_show.py +2 -2
- siliconcompiler/apps/utils/replay.py +4 -4
- siliconcompiler/checklist.py +202 -1
- siliconcompiler/core.py +62 -293
- siliconcompiler/data/templates/email/general.j2 +3 -3
- siliconcompiler/data/templates/email/summary.j2 +1 -1
- siliconcompiler/data/templates/issue/README.txt +1 -1
- siliconcompiler/data/templates/report/sc_report.j2 +7 -7
- siliconcompiler/dependencyschema.py +392 -0
- siliconcompiler/design.py +758 -0
- siliconcompiler/flowgraph.py +79 -13
- siliconcompiler/optimizer/vizier.py +2 -2
- siliconcompiler/package/__init__.py +383 -223
- siliconcompiler/package/git.py +75 -77
- siliconcompiler/package/github.py +70 -97
- siliconcompiler/package/https.py +77 -93
- siliconcompiler/packageschema.py +260 -0
- siliconcompiler/pdk.py +5 -5
- siliconcompiler/remote/client.py +33 -15
- siliconcompiler/remote/server.py +2 -2
- siliconcompiler/report/dashboard/cli/__init__.py +6 -6
- siliconcompiler/report/dashboard/cli/board.py +4 -4
- siliconcompiler/report/dashboard/web/components/__init__.py +5 -5
- siliconcompiler/report/dashboard/web/components/flowgraph.py +4 -4
- siliconcompiler/report/dashboard/web/components/graph.py +2 -2
- siliconcompiler/report/dashboard/web/state.py +1 -1
- siliconcompiler/report/dashboard/web/utils/__init__.py +5 -5
- siliconcompiler/report/html_report.py +1 -1
- siliconcompiler/report/report.py +4 -4
- siliconcompiler/report/summary_table.py +2 -2
- siliconcompiler/report/utils.py +5 -5
- siliconcompiler/scheduler/__init__.py +3 -1382
- siliconcompiler/scheduler/docker.py +263 -0
- siliconcompiler/scheduler/run_node.py +10 -21
- siliconcompiler/scheduler/scheduler.py +311 -0
- siliconcompiler/scheduler/schedulernode.py +944 -0
- siliconcompiler/scheduler/send_messages.py +3 -3
- siliconcompiler/scheduler/slurm.py +149 -163
- siliconcompiler/scheduler/taskscheduler.py +45 -57
- siliconcompiler/schema/__init__.py +3 -3
- siliconcompiler/schema/baseschema.py +234 -11
- siliconcompiler/schema/editableschema.py +4 -0
- siliconcompiler/schema/journal.py +210 -0
- siliconcompiler/schema/namedschema.py +55 -2
- siliconcompiler/schema/parameter.py +14 -1
- siliconcompiler/schema/parametervalue.py +1 -34
- siliconcompiler/schema/schema_cfg.py +210 -349
- siliconcompiler/tool.py +412 -148
- siliconcompiler/tools/__init__.py +2 -0
- siliconcompiler/tools/builtin/_common.py +5 -5
- siliconcompiler/tools/builtin/concatenate.py +7 -7
- siliconcompiler/tools/builtin/minimum.py +4 -4
- siliconcompiler/tools/builtin/mux.py +4 -4
- siliconcompiler/tools/builtin/nop.py +4 -4
- siliconcompiler/tools/builtin/verify.py +8 -9
- siliconcompiler/tools/execute/exec_input.py +1 -1
- siliconcompiler/tools/genfasm/genfasm.py +1 -6
- siliconcompiler/tools/openroad/_apr.py +5 -1
- siliconcompiler/tools/openroad/antenna_repair.py +1 -1
- siliconcompiler/tools/openroad/macro_placement.py +1 -1
- siliconcompiler/tools/openroad/power_grid.py +1 -1
- siliconcompiler/tools/openroad/scripts/common/procs.tcl +32 -25
- siliconcompiler/tools/opensta/timing.py +26 -3
- siliconcompiler/tools/slang/__init__.py +2 -2
- siliconcompiler/tools/surfer/__init__.py +0 -0
- siliconcompiler/tools/surfer/show.py +53 -0
- siliconcompiler/tools/surfer/surfer.py +30 -0
- siliconcompiler/tools/vpr/route.py +82 -0
- siliconcompiler/tools/vpr/vpr.py +23 -6
- siliconcompiler/tools/yosys/__init__.py +1 -1
- siliconcompiler/tools/yosys/scripts/procs.tcl +143 -0
- siliconcompiler/tools/yosys/{sc_synth_asic.tcl → scripts/sc_synth_asic.tcl} +4 -0
- siliconcompiler/tools/yosys/{sc_synth_fpga.tcl → scripts/sc_synth_fpga.tcl} +24 -77
- siliconcompiler/tools/yosys/syn_fpga.py +14 -0
- siliconcompiler/toolscripts/_tools.json +9 -13
- siliconcompiler/toolscripts/rhel9/install-vpr.sh +0 -2
- siliconcompiler/toolscripts/ubuntu22/install-surfer.sh +33 -0
- siliconcompiler/toolscripts/ubuntu24/install-surfer.sh +33 -0
- siliconcompiler/utils/__init__.py +4 -24
- siliconcompiler/utils/flowgraph.py +29 -28
- siliconcompiler/utils/issue.py +23 -29
- siliconcompiler/utils/logging.py +37 -7
- siliconcompiler/utils/showtools.py +6 -1
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/METADATA +16 -25
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/RECORD +98 -91
- siliconcompiler/scheduler/docker_runner.py +0 -254
- siliconcompiler/schema/journalingschema.py +0 -242
- siliconcompiler/tools/yosys/procs.tcl +0 -71
- siliconcompiler/toolscripts/rhel9/install-yosys-parmys.sh +0 -68
- siliconcompiler/toolscripts/ubuntu22/install-yosys-parmys.sh +0 -68
- siliconcompiler/toolscripts/ubuntu24/install-yosys-parmys.sh +0 -68
- /siliconcompiler/tools/yosys/{sc_lec.tcl → scripts/sc_lec.tcl} +0 -0
- /siliconcompiler/tools/yosys/{sc_screenshot.tcl → scripts/sc_screenshot.tcl} +0 -0
- /siliconcompiler/tools/yosys/{syn_strategies.tcl → scripts/syn_strategies.tcl} +0 -0
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/WHEEL +0 -0
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/entry_points.txt +0 -0
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/licenses/LICENSE +0 -0
- {siliconcompiler-0.33.2.dist-info → siliconcompiler-0.34.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,944 @@
|
|
|
1
|
+
import contextlib
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import shutil
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
import os.path
|
|
9
|
+
|
|
10
|
+
from logging.handlers import QueueHandler
|
|
11
|
+
|
|
12
|
+
from siliconcompiler import utils, sc_open
|
|
13
|
+
from siliconcompiler import Schema
|
|
14
|
+
from siliconcompiler import NodeStatus
|
|
15
|
+
from siliconcompiler.utils.logging import get_console_formatter, SCInRunLoggerFormatter
|
|
16
|
+
from siliconcompiler.schema import utils as schema_utils
|
|
17
|
+
|
|
18
|
+
from siliconcompiler.tools._common import input_file_node_name, record_metric
|
|
19
|
+
|
|
20
|
+
from siliconcompiler.record import RecordTime, RecordTool
|
|
21
|
+
from siliconcompiler.schema import Journal
|
|
22
|
+
from siliconcompiler.scheduler import send_messages
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SchedulerNode:
|
|
26
|
+
def __init__(self, chip, step, index, replay=False):
|
|
27
|
+
self.__step = step
|
|
28
|
+
self.__index = index
|
|
29
|
+
self.__chip = chip
|
|
30
|
+
|
|
31
|
+
self.__design = self.__chip.design
|
|
32
|
+
|
|
33
|
+
self.__job = self.__chip.get('option', 'jobname')
|
|
34
|
+
self.__record_user_info = self.__chip.get("option", "track",
|
|
35
|
+
step=self.__step, index=self.__index)
|
|
36
|
+
self.__pipe = None
|
|
37
|
+
self.__failed_log_lines = 20
|
|
38
|
+
self.__error = False
|
|
39
|
+
self.__generate_test_case = not replay
|
|
40
|
+
self.__replay = replay
|
|
41
|
+
self.__hash = self.__chip.get("option", "hash")
|
|
42
|
+
self.__builtin = False
|
|
43
|
+
|
|
44
|
+
self.__enforce_inputfiles = self.__chip.get('option', 'strict')
|
|
45
|
+
self.__enforce_outputfiles = self.__chip.get('option', 'strict')
|
|
46
|
+
|
|
47
|
+
flow = self.__chip.get('option', 'flow')
|
|
48
|
+
self.__is_entry_node = (self.__step, self.__index) in \
|
|
49
|
+
self.__chip.get("flowgraph", flow, field="schema").get_entry_nodes()
|
|
50
|
+
|
|
51
|
+
self.__jobworkdir = self.__chip.getworkdir(jobname=self.__job)
|
|
52
|
+
self.__workdir = self.__chip.getworkdir(jobname=self.__job,
|
|
53
|
+
step=self.__step, index=self.__index)
|
|
54
|
+
self.__manifests = {
|
|
55
|
+
"input": os.path.join(self.__workdir, "inputs", f"{self.__design}.pkg.json"),
|
|
56
|
+
"output": os.path.join(self.__workdir, "outputs", f"{self.__design}.pkg.json")
|
|
57
|
+
}
|
|
58
|
+
self.__logs = {
|
|
59
|
+
"sc": os.path.join(self.__workdir, f"sc_{self.__step}_{self.__index}.log"),
|
|
60
|
+
"exe": os.path.join(self.__workdir, f"{self.__step}.log")
|
|
61
|
+
}
|
|
62
|
+
self.__replay_script = os.path.join(self.__workdir, "replay.sh")
|
|
63
|
+
|
|
64
|
+
self.set_queue(None, None)
|
|
65
|
+
self.__setup_schema_access()
|
|
66
|
+
|
|
67
|
+
@contextlib.contextmanager
|
|
68
|
+
def runtime(self):
|
|
69
|
+
prev_task = self.__task
|
|
70
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as runtask:
|
|
71
|
+
self.__task = runtask
|
|
72
|
+
yield
|
|
73
|
+
self.__task = prev_task
|
|
74
|
+
|
|
75
|
+
@staticmethod
|
|
76
|
+
def init(chip):
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def is_local(self):
|
|
81
|
+
return True
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def has_error(self):
|
|
85
|
+
return self.__error
|
|
86
|
+
|
|
87
|
+
def set_builtin(self):
|
|
88
|
+
self.__builtin = True
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def is_builtin(self):
|
|
92
|
+
return self.__builtin
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def logger(self):
|
|
96
|
+
return self.__chip.logger
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def chip(self):
|
|
100
|
+
return self.__chip
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def step(self):
|
|
104
|
+
return self.__step
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def index(self):
|
|
108
|
+
return self.__index
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def design(self):
|
|
112
|
+
return self.__design
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def workdir(self):
|
|
116
|
+
return self.__workdir
|
|
117
|
+
|
|
118
|
+
@property
|
|
119
|
+
def jobworkdir(self):
|
|
120
|
+
return self.__jobworkdir
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
def is_replay(self):
|
|
124
|
+
return self.__replay
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def task(self):
|
|
128
|
+
return self.__task
|
|
129
|
+
|
|
130
|
+
def get_manifest(self, input=False):
|
|
131
|
+
if input:
|
|
132
|
+
return self.__manifests["input"]
|
|
133
|
+
return self.__manifests["output"]
|
|
134
|
+
|
|
135
|
+
def get_log(self, type="exe"):
|
|
136
|
+
if type not in self.__logs:
|
|
137
|
+
raise ValueError(f"{type} is not a log")
|
|
138
|
+
return self.__logs[type]
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
def replay_script(self):
|
|
142
|
+
return self.__replay_script
|
|
143
|
+
|
|
144
|
+
@property
|
|
145
|
+
def threads(self):
|
|
146
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as task:
|
|
147
|
+
thread_count = task.get("threads")
|
|
148
|
+
return thread_count
|
|
149
|
+
|
|
150
|
+
def set_queue(self, pipe, queue):
|
|
151
|
+
self.__pipe = pipe
|
|
152
|
+
self.__queue = queue
|
|
153
|
+
|
|
154
|
+
# Reinit
|
|
155
|
+
self.__setup_schema_access()
|
|
156
|
+
|
|
157
|
+
def __setup_schema_access(self):
|
|
158
|
+
flow = self.__chip.get('option', 'flow')
|
|
159
|
+
self.__flow = self.__chip.get("flowgraph", flow, field="schema")
|
|
160
|
+
|
|
161
|
+
tool = self.__flow.get(self.__step, self.__index, 'tool')
|
|
162
|
+
task = self.__flow.get(self.__step, self.__index, 'task')
|
|
163
|
+
self.__task = self.__chip.get("tool", tool, "task", task, field="schema")
|
|
164
|
+
self.__record = self.__chip.get("record", field="schema")
|
|
165
|
+
self.__metrics = self.__chip.get("metric", field="schema")
|
|
166
|
+
|
|
167
|
+
def _init_run_logger(self):
|
|
168
|
+
self.__chip._logger_console.setFormatter(
|
|
169
|
+
get_console_formatter(self.__chip, True, self.__step, self.__index))
|
|
170
|
+
self.logger.setLevel(
|
|
171
|
+
schema_utils.translate_loglevel(self.__chip.get('option', 'loglevel',
|
|
172
|
+
step=self.__step, index=self.__index)))
|
|
173
|
+
|
|
174
|
+
if self.__queue:
|
|
175
|
+
formatter = self.__chip._logger_console.formatter
|
|
176
|
+
self.logger.removeHandler(self.__chip._logger_console)
|
|
177
|
+
self.__chip._logger_console = QueueHandler(self.__queue)
|
|
178
|
+
self.__chip._logger_console.setFormatter(formatter)
|
|
179
|
+
self.logger.addHandler(self.__chip._logger_console)
|
|
180
|
+
|
|
181
|
+
def halt(self, msg=None):
|
|
182
|
+
if msg:
|
|
183
|
+
self.logger.error(msg)
|
|
184
|
+
|
|
185
|
+
self.__record.set("status", NodeStatus.ERROR, step=self.__step, index=self.__index)
|
|
186
|
+
try:
|
|
187
|
+
self.__chip.schema.write_manifest(self.__manifests["output"])
|
|
188
|
+
except FileNotFoundError:
|
|
189
|
+
self.logger.error(f"Failed to write manifest for {self.__step}/{self.__index}.")
|
|
190
|
+
|
|
191
|
+
self.logger.error(f"Halting {self.__step}/{self.__index} due to errors.")
|
|
192
|
+
send_messages.send(self.__chip, "fail", self.__step, self.__index)
|
|
193
|
+
sys.exit(1)
|
|
194
|
+
|
|
195
|
+
def setup(self):
|
|
196
|
+
with self.__task.runtime(self.__chip, step=self.__step, index=self.__index) as task:
|
|
197
|
+
# Run node setup.
|
|
198
|
+
self.logger.info(f'Setting up node {self.__step}/{self.__index} with '
|
|
199
|
+
f'{task.tool()}/{task.task()}')
|
|
200
|
+
setup_ret = None
|
|
201
|
+
try:
|
|
202
|
+
setup_ret = task.setup()
|
|
203
|
+
except Exception as e:
|
|
204
|
+
self.logger.error(f'Failed to run setup() for {self.__step}/{self.__index} '
|
|
205
|
+
f'with {task.tool()}/{task.task()}')
|
|
206
|
+
raise e
|
|
207
|
+
|
|
208
|
+
if setup_ret is not None:
|
|
209
|
+
self.logger.warning(f'Removing {self.__step}/{self.__index} due to {setup_ret}')
|
|
210
|
+
self.__record.set('status', NodeStatus.SKIPPED,
|
|
211
|
+
step=self.__step, index=self.__index)
|
|
212
|
+
|
|
213
|
+
return False
|
|
214
|
+
|
|
215
|
+
return True
|
|
216
|
+
|
|
217
|
+
def check_previous_run_status(self, previous_run):
|
|
218
|
+
# Assume modified if flow does not match
|
|
219
|
+
if self.__flow.name() != previous_run.__flow.name():
|
|
220
|
+
self.logger.debug("Flow name changed")
|
|
221
|
+
return False
|
|
222
|
+
|
|
223
|
+
# Tool name
|
|
224
|
+
if self.__task.tool() != previous_run.__task.tool():
|
|
225
|
+
self.logger.debug("Tool name changed")
|
|
226
|
+
return False
|
|
227
|
+
|
|
228
|
+
# Task name
|
|
229
|
+
if self.__task.task() != previous_run.__task.task():
|
|
230
|
+
self.logger.debug("Task name changed")
|
|
231
|
+
return False
|
|
232
|
+
|
|
233
|
+
previous_status = previous_run.__chip.get("record", "status",
|
|
234
|
+
step=self.__step, index=self.__index)
|
|
235
|
+
if not NodeStatus.is_done(previous_status):
|
|
236
|
+
self.logger.debug("Previous step did not complete")
|
|
237
|
+
# Not complete
|
|
238
|
+
return False
|
|
239
|
+
|
|
240
|
+
if not NodeStatus.is_success(previous_status):
|
|
241
|
+
self.logger.debug("Previous step was not successful")
|
|
242
|
+
# Not a success
|
|
243
|
+
return False
|
|
244
|
+
|
|
245
|
+
# Check input nodes
|
|
246
|
+
log_level = self.logger.level
|
|
247
|
+
self.logger.setLevel(logging.CRITICAL)
|
|
248
|
+
sel_inputs = self.__task.select_input_nodes()
|
|
249
|
+
self.logger.setLevel(log_level)
|
|
250
|
+
if set(previous_run.__chip.get("record", "inputnode",
|
|
251
|
+
step=self.__step, index=self.__index)) != set(sel_inputs):
|
|
252
|
+
self.logger.warning(f'inputs to {self.__step}/{self.__index} has been modified from '
|
|
253
|
+
'previous run')
|
|
254
|
+
return False
|
|
255
|
+
|
|
256
|
+
# Check that all output files are present?
|
|
257
|
+
|
|
258
|
+
return True
|
|
259
|
+
|
|
260
|
+
def check_values_changed(self, previous_run, keys):
|
|
261
|
+
def print_warning(key):
|
|
262
|
+
self.logger.warning(f'[{",".join(key)}] in {self.__step}/{self.__index} has been '
|
|
263
|
+
'modified from previous run')
|
|
264
|
+
|
|
265
|
+
for key in sorted(keys):
|
|
266
|
+
if not self.__chip.valid(*key) or not previous_run.__chip.valid(*key):
|
|
267
|
+
# Key is missing in either run
|
|
268
|
+
print_warning(key)
|
|
269
|
+
return True
|
|
270
|
+
|
|
271
|
+
param = self.__chip.get(*key, field=None)
|
|
272
|
+
step, index = self.__step, self.__index
|
|
273
|
+
if param.get(field='pernode').is_never():
|
|
274
|
+
step, index = None, None
|
|
275
|
+
|
|
276
|
+
check_val = param.get(step=step, index=index)
|
|
277
|
+
prev_val = previous_run.__chip.get(*key, step=step, index=index)
|
|
278
|
+
|
|
279
|
+
if check_val != prev_val:
|
|
280
|
+
print_warning(key)
|
|
281
|
+
return True
|
|
282
|
+
|
|
283
|
+
return False
|
|
284
|
+
|
|
285
|
+
def check_files_changed(self, previous_run, previous_time, keys):
|
|
286
|
+
use_hash = self.__hash and previous_run.__hash
|
|
287
|
+
|
|
288
|
+
def print_warning(key, reason):
|
|
289
|
+
self.logger.warning(f'[{",".join(key)}] ({reason}) in {self.__step}/{self.__index} has '
|
|
290
|
+
'been modified from previous run')
|
|
291
|
+
|
|
292
|
+
def get_file_time(path):
|
|
293
|
+
times = [os.path.getmtime(path)]
|
|
294
|
+
if os.path.isdir(path):
|
|
295
|
+
for path_root, _, files in os.walk(path):
|
|
296
|
+
for path_end in files:
|
|
297
|
+
times.append(os.path.getmtime(os.path.join(path_root, path_end)))
|
|
298
|
+
|
|
299
|
+
return max(times)
|
|
300
|
+
|
|
301
|
+
for key in sorted(keys):
|
|
302
|
+
param = self.__chip.get(*key, field=None)
|
|
303
|
+
step, index = self.__step, self.__index
|
|
304
|
+
if param.get(field='pernode').is_never():
|
|
305
|
+
step, index = None, None
|
|
306
|
+
|
|
307
|
+
if use_hash:
|
|
308
|
+
check_hash = self.__chip.hash_files(*key, update=False, check=False,
|
|
309
|
+
verbose=False, allow_cache=True,
|
|
310
|
+
step=step, index=index)
|
|
311
|
+
prev_hash = previous_run.__chip.get(*key, field='filehash',
|
|
312
|
+
step=step, index=index)
|
|
313
|
+
|
|
314
|
+
if check_hash != prev_hash:
|
|
315
|
+
print_warning(key, "file hash")
|
|
316
|
+
return True
|
|
317
|
+
else:
|
|
318
|
+
# check package values
|
|
319
|
+
check_val = self.__chip.get(*key, field='package', step=step, index=index)
|
|
320
|
+
prev_val = previous_run.__chip.get(*key, field='package', step=step, index=index)
|
|
321
|
+
|
|
322
|
+
if check_val != prev_val:
|
|
323
|
+
print_warning(key, "file package")
|
|
324
|
+
return True
|
|
325
|
+
|
|
326
|
+
for check_file in self.__chip.find_files(*key, step=step, index=index):
|
|
327
|
+
if get_file_time(check_file) > previous_time:
|
|
328
|
+
print_warning(key, "timestamp")
|
|
329
|
+
return True
|
|
330
|
+
|
|
331
|
+
return False
|
|
332
|
+
|
|
333
|
+
def get_check_changed_keys(self):
|
|
334
|
+
all_keys = set()
|
|
335
|
+
|
|
336
|
+
all_keys.update(self.__task.get('require'))
|
|
337
|
+
|
|
338
|
+
tool_task_prefix = ('tool', self.__task.tool(), 'task', self.__task.task())
|
|
339
|
+
for key in ('option', 'threads', 'prescript', 'postscript', 'refdir', 'script',):
|
|
340
|
+
all_keys.add(",".join([*tool_task_prefix, key]))
|
|
341
|
+
|
|
342
|
+
for env_key in self.__chip.getkeys(*tool_task_prefix, 'env'):
|
|
343
|
+
all_keys.add(",".join([*tool_task_prefix, 'env', env_key]))
|
|
344
|
+
|
|
345
|
+
value_keys = set()
|
|
346
|
+
path_keys = set()
|
|
347
|
+
for key in all_keys:
|
|
348
|
+
keypath = tuple(key.split(","))
|
|
349
|
+
if not self.__chip.valid(*keypath, default_valid=True):
|
|
350
|
+
raise KeyError(f"[{','.join(keypath)}] not found")
|
|
351
|
+
keytype = self.__chip.get(*keypath, field="type")
|
|
352
|
+
if 'file' in keytype or 'dir' in keytype:
|
|
353
|
+
path_keys.add(keypath)
|
|
354
|
+
else:
|
|
355
|
+
value_keys.add(keypath)
|
|
356
|
+
|
|
357
|
+
return value_keys, path_keys
|
|
358
|
+
|
|
359
|
+
def requires_run(self):
|
|
360
|
+
from siliconcompiler import Chip
|
|
361
|
+
|
|
362
|
+
# Load previous manifest
|
|
363
|
+
previous_node = None
|
|
364
|
+
previous_node_time = time.time()
|
|
365
|
+
if os.path.exists(self.__manifests["input"]):
|
|
366
|
+
previous_node_time = os.path.getmtime(self.__manifests["input"])
|
|
367
|
+
chip = Chip('')
|
|
368
|
+
try:
|
|
369
|
+
chip.schema = Schema(manifest=self.__manifests["input"], logger=self.logger)
|
|
370
|
+
except: # noqa E722
|
|
371
|
+
self.logger.debug("Input manifest failed to load")
|
|
372
|
+
return True
|
|
373
|
+
previous_node = SchedulerNode(chip, self.__step, self.__index)
|
|
374
|
+
else:
|
|
375
|
+
# No manifest found so assume rerun is needed
|
|
376
|
+
self.logger.debug("Previous run did not generate input manifest")
|
|
377
|
+
return True
|
|
378
|
+
|
|
379
|
+
previous_node_end = None
|
|
380
|
+
if os.path.exists(self.__manifests["output"]):
|
|
381
|
+
chip = Chip('')
|
|
382
|
+
try:
|
|
383
|
+
chip.schema = Schema(manifest=self.__manifests["output"], logger=self.logger)
|
|
384
|
+
except: # noqa E722
|
|
385
|
+
self.logger.debug("Output manifest failed to load")
|
|
386
|
+
return True
|
|
387
|
+
previous_node_end = SchedulerNode(chip, self.__step, self.__index)
|
|
388
|
+
else:
|
|
389
|
+
# No manifest found so assume rerun is needed
|
|
390
|
+
self.logger.debug("Previous run did not generate output manifest")
|
|
391
|
+
return True
|
|
392
|
+
|
|
393
|
+
with self.runtime():
|
|
394
|
+
if previous_node_end:
|
|
395
|
+
with previous_node_end.runtime():
|
|
396
|
+
if not self.check_previous_run_status(previous_node_end):
|
|
397
|
+
self.logger.debug("Previous run state failed")
|
|
398
|
+
return True
|
|
399
|
+
|
|
400
|
+
if previous_node:
|
|
401
|
+
with previous_node.runtime():
|
|
402
|
+
# Generate key paths to check
|
|
403
|
+
try:
|
|
404
|
+
value_keys, path_keys = self.get_check_changed_keys()
|
|
405
|
+
previous_value_keys, previous_path_keys = \
|
|
406
|
+
previous_node.get_check_changed_keys()
|
|
407
|
+
value_keys.update(previous_value_keys)
|
|
408
|
+
path_keys.update(previous_path_keys)
|
|
409
|
+
except KeyError:
|
|
410
|
+
self.logger.debug("Failed to acquire keys")
|
|
411
|
+
return True
|
|
412
|
+
|
|
413
|
+
if self.check_values_changed(previous_node, value_keys.union(path_keys)):
|
|
414
|
+
self.logger.debug("Key values changed")
|
|
415
|
+
return True
|
|
416
|
+
|
|
417
|
+
if self.check_files_changed(previous_node, previous_node_time, path_keys):
|
|
418
|
+
self.logger.debug("Files changed")
|
|
419
|
+
return True
|
|
420
|
+
|
|
421
|
+
return False
|
|
422
|
+
|
|
423
|
+
def setup_input_directory(self):
|
|
424
|
+
in_files = set(self.__task.get('input'))
|
|
425
|
+
|
|
426
|
+
for in_step, in_index in self.__record.get('inputnode',
|
|
427
|
+
step=self.__step, index=self.__index):
|
|
428
|
+
if NodeStatus.is_error(self.__record.get('status', step=in_step, index=in_index)):
|
|
429
|
+
self.halt(f'Halting step due to previous error in {in_step}/{in_index}')
|
|
430
|
+
|
|
431
|
+
output_dir = os.path.join(
|
|
432
|
+
self.__chip.getworkdir(step=in_step, index=in_index), "outputs")
|
|
433
|
+
if not os.path.isdir(output_dir):
|
|
434
|
+
self.halt(f'Unable to locate outputs directory for {in_step}/{in_index}: '
|
|
435
|
+
f'{output_dir}')
|
|
436
|
+
|
|
437
|
+
for outfile in os.scandir(output_dir):
|
|
438
|
+
if outfile.name == f'{self.__design}.pkg.json':
|
|
439
|
+
# Dont forward manifest
|
|
440
|
+
continue
|
|
441
|
+
|
|
442
|
+
new_name = input_file_node_name(outfile.name, in_step, in_index)
|
|
443
|
+
if self.__enforce_inputfiles:
|
|
444
|
+
if outfile.name not in in_files and new_name not in in_files:
|
|
445
|
+
continue
|
|
446
|
+
|
|
447
|
+
if outfile.is_file() or outfile.is_symlink():
|
|
448
|
+
utils.link_symlink_copy(outfile.path,
|
|
449
|
+
f'{self.__workdir}/inputs/{outfile.name}')
|
|
450
|
+
elif outfile.is_dir():
|
|
451
|
+
shutil.copytree(outfile.path,
|
|
452
|
+
f'{self.__workdir}/inputs/{outfile.name}',
|
|
453
|
+
dirs_exist_ok=True,
|
|
454
|
+
copy_function=utils.link_symlink_copy)
|
|
455
|
+
|
|
456
|
+
if new_name in in_files:
|
|
457
|
+
# perform rename
|
|
458
|
+
os.rename(f'{self.__workdir}/inputs/{outfile.name}',
|
|
459
|
+
f'{self.__workdir}/inputs/{new_name}')
|
|
460
|
+
|
|
461
|
+
def validate(self):
|
|
462
|
+
'''
|
|
463
|
+
Runtime checks called from _runtask().
|
|
464
|
+
|
|
465
|
+
- Make sure expected inputs exist.
|
|
466
|
+
- Make sure all required filepaths resolve correctly.
|
|
467
|
+
'''
|
|
468
|
+
error = False
|
|
469
|
+
|
|
470
|
+
required_inputs = self.__task.get('input')
|
|
471
|
+
|
|
472
|
+
input_dir = os.path.join(self.__workdir, 'inputs')
|
|
473
|
+
|
|
474
|
+
for filename in required_inputs:
|
|
475
|
+
path = os.path.join(input_dir, filename)
|
|
476
|
+
if not os.path.exists(path):
|
|
477
|
+
self.logger.error(f'Required input {filename} not received for '
|
|
478
|
+
f'{self.__step}/{self.__index}.')
|
|
479
|
+
error = True
|
|
480
|
+
|
|
481
|
+
all_required = self.__task.get('require')
|
|
482
|
+
for item in all_required:
|
|
483
|
+
keypath = item.split(',')
|
|
484
|
+
if not self.__chip.valid(*keypath):
|
|
485
|
+
self.logger.error(f'Cannot resolve required keypath [{",".join(keypath)}].')
|
|
486
|
+
error = True
|
|
487
|
+
continue
|
|
488
|
+
|
|
489
|
+
param = self.__chip.get(*keypath, field=None)
|
|
490
|
+
check_step, check_index = self.__step, self.__index
|
|
491
|
+
if param.get(field='pernode').is_never():
|
|
492
|
+
check_step, check_index = None, None
|
|
493
|
+
|
|
494
|
+
value = self.__chip.get(*keypath, step=check_step, index=check_index)
|
|
495
|
+
if not value:
|
|
496
|
+
self.logger.error('No value set for required keypath '
|
|
497
|
+
f'[{",".join(keypath)}].')
|
|
498
|
+
error = True
|
|
499
|
+
continue
|
|
500
|
+
|
|
501
|
+
paramtype = param.get(field='type')
|
|
502
|
+
if ('file' in paramtype) or ('dir' in paramtype):
|
|
503
|
+
abspath = self.__chip.find_files(*keypath,
|
|
504
|
+
missing_ok=True,
|
|
505
|
+
step=check_step, index=check_index)
|
|
506
|
+
|
|
507
|
+
unresolved_paths = value
|
|
508
|
+
if not isinstance(abspath, list):
|
|
509
|
+
abspath = [abspath]
|
|
510
|
+
unresolved_paths = [unresolved_paths]
|
|
511
|
+
|
|
512
|
+
for path, setpath in zip(abspath, unresolved_paths):
|
|
513
|
+
if path is None:
|
|
514
|
+
self.logger.error(f'Cannot resolve path {setpath} in '
|
|
515
|
+
f'required file keypath [{",".join(keypath)}].')
|
|
516
|
+
error = True
|
|
517
|
+
|
|
518
|
+
return not error
|
|
519
|
+
|
|
520
|
+
def summarize(self):
|
|
521
|
+
for metric in ['errors', 'warnings']:
|
|
522
|
+
val = self.__metrics.get(metric, step=self.__step, index=self.__index)
|
|
523
|
+
if val is not None:
|
|
524
|
+
self.logger.info(f'Number of {metric}: {val}')
|
|
525
|
+
|
|
526
|
+
walltime = self.__metrics.get("tasktime", step=self.__step, index=self.__index)
|
|
527
|
+
self.logger.info(f"Finished task in {walltime:.2f}s")
|
|
528
|
+
|
|
529
|
+
def run(self):
|
|
530
|
+
'''
|
|
531
|
+
Private per node run method called by run().
|
|
532
|
+
|
|
533
|
+
The method takes in a step string and index string to indicate what
|
|
534
|
+
to run.
|
|
535
|
+
|
|
536
|
+
Note that since _runtask occurs in its own process with a separate
|
|
537
|
+
address space, any changes made to the `self` object will not
|
|
538
|
+
be reflected in the parent. We rely on reading/writing the chip manifest
|
|
539
|
+
to the filesystem to communicate updates between processes.
|
|
540
|
+
'''
|
|
541
|
+
|
|
542
|
+
# Setup logger
|
|
543
|
+
self._init_run_logger()
|
|
544
|
+
|
|
545
|
+
self.__chip.set('arg', 'step', self.__step)
|
|
546
|
+
self.__chip.set('arg', 'index', self.__index)
|
|
547
|
+
|
|
548
|
+
# Setup journaling
|
|
549
|
+
journal = Journal.access(self.__chip.schema)
|
|
550
|
+
journal.start()
|
|
551
|
+
|
|
552
|
+
# Must be after journaling to ensure journal is complete
|
|
553
|
+
self.__setup_schema_access()
|
|
554
|
+
|
|
555
|
+
# Make record of sc version and machine
|
|
556
|
+
self.__record.record_version(self.__step, self.__index)
|
|
557
|
+
|
|
558
|
+
# Record user information if enabled
|
|
559
|
+
if self.__record_user_info:
|
|
560
|
+
self.__record.record_userinformation(self.__step, self.__index)
|
|
561
|
+
|
|
562
|
+
# Start wall timer
|
|
563
|
+
self.__record.record_time(self.__step, self.__index, RecordTime.START)
|
|
564
|
+
|
|
565
|
+
cwd = os.getcwd()
|
|
566
|
+
with self.runtime():
|
|
567
|
+
# Setup run directory
|
|
568
|
+
self.__task.setup_work_directory(self.__workdir, remove_exist=not self.__replay)
|
|
569
|
+
|
|
570
|
+
os.chdir(self.__workdir)
|
|
571
|
+
|
|
572
|
+
# Attach siliconcompiler file log handler
|
|
573
|
+
file_log = logging.FileHandler(self.__logs["sc"])
|
|
574
|
+
file_log.setFormatter(
|
|
575
|
+
SCInRunLoggerFormatter(self.__chip, self.__job, self.__step, self.__index))
|
|
576
|
+
self.logger.addHandler(file_log)
|
|
577
|
+
|
|
578
|
+
# Select the inputs to this node
|
|
579
|
+
sel_inputs = self.__task.select_input_nodes()
|
|
580
|
+
if not self.__is_entry_node and not sel_inputs:
|
|
581
|
+
self.halt(f'No inputs selected for {self.__step}/{self.__index}')
|
|
582
|
+
self.__record.set("inputnode", sel_inputs, step=self.__step, index=self.__index)
|
|
583
|
+
|
|
584
|
+
if self.__hash:
|
|
585
|
+
self.__hash_files_pre_execute()
|
|
586
|
+
|
|
587
|
+
# Forward data
|
|
588
|
+
if not self.__replay:
|
|
589
|
+
self.setup_input_directory()
|
|
590
|
+
|
|
591
|
+
# Write manifest prior to step running into inputs
|
|
592
|
+
self.__chip.write_manifest(self.__manifests["input"])
|
|
593
|
+
|
|
594
|
+
# Check manifest
|
|
595
|
+
if not self.validate():
|
|
596
|
+
self.halt("Failed to validate node setup. See previous errors")
|
|
597
|
+
|
|
598
|
+
try:
|
|
599
|
+
self.execute()
|
|
600
|
+
except Exception as e:
|
|
601
|
+
utils.print_traceback(self.logger, e)
|
|
602
|
+
self.halt()
|
|
603
|
+
|
|
604
|
+
# return to original directory
|
|
605
|
+
os.chdir(cwd)
|
|
606
|
+
|
|
607
|
+
# Stop journaling
|
|
608
|
+
journal.stop()
|
|
609
|
+
|
|
610
|
+
if self.__pipe:
|
|
611
|
+
self.__pipe.send(self.__chip.get("package", field="schema").get_path_cache())
|
|
612
|
+
|
|
613
|
+
def execute(self):
|
|
614
|
+
self.logger.info(f'Running in {self.__workdir}')
|
|
615
|
+
|
|
616
|
+
try:
|
|
617
|
+
self.__task.pre_process()
|
|
618
|
+
except Exception as e:
|
|
619
|
+
self.logger.error(
|
|
620
|
+
f"Pre-processing failed for {self.__task.tool()}/{self.__task.task()}")
|
|
621
|
+
utils.print_traceback(self.logger, e)
|
|
622
|
+
raise e
|
|
623
|
+
|
|
624
|
+
if self.__record.get('status', step=self.__step, index=self.__index) == NodeStatus.SKIPPED:
|
|
625
|
+
# copy inputs to outputs and skip execution
|
|
626
|
+
for in_step, in_index in self.__record.get('inputnode',
|
|
627
|
+
step=self.__step, index=self.__index):
|
|
628
|
+
required_outputs = set(self.__task.get('output'))
|
|
629
|
+
in_workdir = self.__chip.getworkdir(step=in_step, index=in_index)
|
|
630
|
+
for outfile in os.scandir(f"{in_workdir}/outputs"):
|
|
631
|
+
if outfile.name == f'{self.__design}.pkg.json':
|
|
632
|
+
# Dont forward manifest
|
|
633
|
+
continue
|
|
634
|
+
|
|
635
|
+
if outfile.name not in required_outputs:
|
|
636
|
+
# Dont forward non-required outputs
|
|
637
|
+
continue
|
|
638
|
+
|
|
639
|
+
if outfile.is_file() or outfile.is_symlink():
|
|
640
|
+
utils.link_symlink_copy(outfile.path,
|
|
641
|
+
f'outputs/{outfile.name}')
|
|
642
|
+
elif outfile.is_dir():
|
|
643
|
+
shutil.copytree(outfile.path,
|
|
644
|
+
f'outputs/{outfile.name}',
|
|
645
|
+
dirs_exist_ok=True,
|
|
646
|
+
copy_function=utils.link_symlink_copy)
|
|
647
|
+
|
|
648
|
+
send_messages.send(self.__chip, "skipped", self.__step, self.__index)
|
|
649
|
+
else:
|
|
650
|
+
org_env = os.environ.copy()
|
|
651
|
+
os.environ.update(self.__task.get_runtime_environmental_variables())
|
|
652
|
+
|
|
653
|
+
toolpath = self.__task.get_exe()
|
|
654
|
+
version = self.__task.get_exe_version()
|
|
655
|
+
|
|
656
|
+
if not self.__chip.get('option', 'novercheck', step=self.__step, index=self.__index):
|
|
657
|
+
if not self.__task.check_exe_version(version):
|
|
658
|
+
self.halt()
|
|
659
|
+
|
|
660
|
+
if version:
|
|
661
|
+
self.__record.record_tool(self.__step, self.__index, version, RecordTool.VERSION)
|
|
662
|
+
|
|
663
|
+
if toolpath:
|
|
664
|
+
self.__record.record_tool(self.__step, self.__index, toolpath, RecordTool.PATH)
|
|
665
|
+
|
|
666
|
+
send_messages.send(self.__chip, "begin", self.__step, self.__index)
|
|
667
|
+
|
|
668
|
+
try:
|
|
669
|
+
if not self.__replay:
|
|
670
|
+
self.__task.generate_replay_script(self.__replay_script, self.__workdir)
|
|
671
|
+
ret_code = self.__task.run_task(
|
|
672
|
+
self.__workdir,
|
|
673
|
+
self.__chip.get('option', 'quiet', step=self.__step, index=self.__index),
|
|
674
|
+
self.__chip.get('option', 'loglevel', step=self.__step, index=self.__index),
|
|
675
|
+
self.__chip.get('option', 'breakpoint', step=self.__step, index=self.__index),
|
|
676
|
+
self.__chip.get('option', 'nice', step=self.__step, index=self.__index),
|
|
677
|
+
self.__chip.get('option', 'timeout', step=self.__step, index=self.__index))
|
|
678
|
+
except Exception as e:
|
|
679
|
+
raise e
|
|
680
|
+
|
|
681
|
+
os.environ.clear()
|
|
682
|
+
os.environ.update(org_env)
|
|
683
|
+
|
|
684
|
+
if ret_code != 0:
|
|
685
|
+
msg = f'Command failed with code {ret_code}.'
|
|
686
|
+
if os.path.exists(self.__logs["exe"]):
|
|
687
|
+
if self.__chip.get('option', 'quiet', step=self.__step, index=self.__index):
|
|
688
|
+
# Print last N lines of log when in quiet mode
|
|
689
|
+
with sc_open(self.__logs["exe"]) as logfd:
|
|
690
|
+
loglines = logfd.read().splitlines()
|
|
691
|
+
for logline in loglines[-self.__failed_log_lines:]:
|
|
692
|
+
self.logger.error(logline)
|
|
693
|
+
# No log file for pure-Python tools.
|
|
694
|
+
msg += f' See log file {os.path.abspath(self.__logs["exe"])}'
|
|
695
|
+
self.logger.warning(msg)
|
|
696
|
+
self.__error = True
|
|
697
|
+
|
|
698
|
+
try:
|
|
699
|
+
self.__task.post_process()
|
|
700
|
+
except Exception as e:
|
|
701
|
+
self.logger.error(
|
|
702
|
+
f"Post-processing failed for {self.__task.tool()}/{self.__task.task()}")
|
|
703
|
+
utils.print_traceback(self.logger, e)
|
|
704
|
+
self.__error = True
|
|
705
|
+
|
|
706
|
+
self.check_logfile()
|
|
707
|
+
|
|
708
|
+
if not self.__error and self.__hash:
|
|
709
|
+
self.__hash_files_post_execute()
|
|
710
|
+
|
|
711
|
+
# Capture wall runtime
|
|
712
|
+
self.__record.record_time(self.__step, self.__index, RecordTime.END)
|
|
713
|
+
self.__metrics.record_tasktime(self.__step, self.__index, self.__record)
|
|
714
|
+
self.__metrics.record_totaltime(
|
|
715
|
+
self.__step, self.__index,
|
|
716
|
+
self.__flow,
|
|
717
|
+
self.__record)
|
|
718
|
+
|
|
719
|
+
# Save a successful manifest
|
|
720
|
+
if self.__record.get('status', step=self.__step, index=self.__index) != NodeStatus.SKIPPED:
|
|
721
|
+
self.__record.set('status', NodeStatus.SUCCESS, step=self.__step, index=self.__index)
|
|
722
|
+
|
|
723
|
+
self.__chip.write_manifest(self.__manifests["output"])
|
|
724
|
+
|
|
725
|
+
self.summarize()
|
|
726
|
+
|
|
727
|
+
if self.__error and self.__generate_test_case:
|
|
728
|
+
self.__generate_testcase()
|
|
729
|
+
|
|
730
|
+
# Stop if there are errors
|
|
731
|
+
errors = self.__metrics.get('errors', step=self.__step, index=self.__index)
|
|
732
|
+
if errors and not self.__chip.get('option', 'continue',
|
|
733
|
+
step=self.__step, index=self.__index):
|
|
734
|
+
self.halt(f'{self.__task.tool()}/{self.__task.task()} reported {errors} '
|
|
735
|
+
f'errors during {self.__step}/{self.__index}')
|
|
736
|
+
|
|
737
|
+
if self.__error:
|
|
738
|
+
self.halt()
|
|
739
|
+
|
|
740
|
+
self.__report_output_files()
|
|
741
|
+
|
|
742
|
+
send_messages.send(self.__chip, "end", self.__step, self.__index)
|
|
743
|
+
|
|
744
|
+
def __generate_testcase(self):
|
|
745
|
+
from siliconcompiler.utils.issue import generate_testcase
|
|
746
|
+
import lambdapdk
|
|
747
|
+
|
|
748
|
+
generate_testcase(
|
|
749
|
+
self.__chip,
|
|
750
|
+
self.__step,
|
|
751
|
+
self.__index,
|
|
752
|
+
archive_directory=self.__jobworkdir,
|
|
753
|
+
include_pdks=False,
|
|
754
|
+
include_specific_pdks=lambdapdk.get_pdks(),
|
|
755
|
+
include_libraries=False,
|
|
756
|
+
include_specific_libraries=lambdapdk.get_libs(),
|
|
757
|
+
hash_files=self.__hash,
|
|
758
|
+
verbose_collect=False)
|
|
759
|
+
|
|
760
|
+
def check_logfile(self):
|
|
761
|
+
if self.__record.get('status', step=self.__step, index=self.__index) == NodeStatus.SKIPPED:
|
|
762
|
+
return
|
|
763
|
+
|
|
764
|
+
checks = {}
|
|
765
|
+
matches = {}
|
|
766
|
+
for suffix in self.__task.getkeys('regex'):
|
|
767
|
+
regexes = self.__task.get('regex', suffix)
|
|
768
|
+
if not regexes:
|
|
769
|
+
continue
|
|
770
|
+
|
|
771
|
+
checks[suffix] = {
|
|
772
|
+
"report": open(f"{self.__step}.{suffix}", "w"),
|
|
773
|
+
"args": regexes,
|
|
774
|
+
"display": False
|
|
775
|
+
}
|
|
776
|
+
matches[suffix] = 0
|
|
777
|
+
|
|
778
|
+
def print_error(suffix, line):
|
|
779
|
+
self.logger.error(line)
|
|
780
|
+
|
|
781
|
+
def print_warning(suffix, line):
|
|
782
|
+
self.logger.warning(line)
|
|
783
|
+
|
|
784
|
+
def print_info(suffix, line):
|
|
785
|
+
self.logger.warning(f'{suffix}: {line}')
|
|
786
|
+
|
|
787
|
+
if not self.__chip.get('option', 'quiet', step=self.__step, index=self.__index):
|
|
788
|
+
for suffix, info in checks.items():
|
|
789
|
+
if suffix == 'errors':
|
|
790
|
+
info["display"] = print_error
|
|
791
|
+
elif suffix == "warnings":
|
|
792
|
+
info["display"] = print_warning
|
|
793
|
+
else:
|
|
794
|
+
info["display"] = print_info
|
|
795
|
+
|
|
796
|
+
# Order suffixes as follows: [..., 'warnings', 'errors']
|
|
797
|
+
ordered_suffixes = list(
|
|
798
|
+
filter(lambda key: key not in ['warnings', 'errors'], checks.keys()))
|
|
799
|
+
if 'warnings' in checks:
|
|
800
|
+
ordered_suffixes.append('warnings')
|
|
801
|
+
if 'errors' in checks:
|
|
802
|
+
ordered_suffixes.append('errors')
|
|
803
|
+
|
|
804
|
+
# Looping through patterns for each line
|
|
805
|
+
with sc_open(self.__logs["exe"]) as f:
|
|
806
|
+
line_count = sum(1 for _ in f)
|
|
807
|
+
right_align = len(str(line_count))
|
|
808
|
+
for suffix in ordered_suffixes:
|
|
809
|
+
# Start at the beginning of file again
|
|
810
|
+
f.seek(0)
|
|
811
|
+
for num, line in enumerate(f, start=1):
|
|
812
|
+
string = line
|
|
813
|
+
for item in checks[suffix]['args']:
|
|
814
|
+
if string is None:
|
|
815
|
+
break
|
|
816
|
+
else:
|
|
817
|
+
string = utils.grep(self.__chip, item, string)
|
|
818
|
+
if string is not None:
|
|
819
|
+
matches[suffix] += 1
|
|
820
|
+
# always print to file
|
|
821
|
+
line_with_num = f'{num: >{right_align}}: {string.strip()}'
|
|
822
|
+
print(line_with_num, file=checks[suffix]['report'])
|
|
823
|
+
# selectively print to display
|
|
824
|
+
if checks[suffix]["display"]:
|
|
825
|
+
checks[suffix]["display"](suffix, line_with_num)
|
|
826
|
+
|
|
827
|
+
for check in checks.values():
|
|
828
|
+
check['report'].close()
|
|
829
|
+
|
|
830
|
+
for metric in ("errors", "warnings"):
|
|
831
|
+
if metric in matches:
|
|
832
|
+
errors = self.__metrics.get(metric, step=self.__step, index=self.__index)
|
|
833
|
+
if errors is None:
|
|
834
|
+
errors = 0
|
|
835
|
+
errors += matches[metric]
|
|
836
|
+
|
|
837
|
+
sources = [os.path.basename(self.__logs["exe"])]
|
|
838
|
+
if self.__task.get('regex', metric):
|
|
839
|
+
sources.append(f'{self.__step}.{metric}')
|
|
840
|
+
|
|
841
|
+
record_metric(self.__chip, self.__step, self.__index, metric, errors, sources)
|
|
842
|
+
|
|
843
|
+
def __hash_files_pre_execute(self):
|
|
844
|
+
for task_key in ('refdir', 'prescript', 'postscript', 'script'):
|
|
845
|
+
self.__chip.hash_files('tool', self.__task.tool(), 'task', self.__task.task(), task_key,
|
|
846
|
+
step=self.__step, index=self.__index, check=False,
|
|
847
|
+
allow_cache=True, verbose=False)
|
|
848
|
+
|
|
849
|
+
# hash all requirements
|
|
850
|
+
for item in set(self.__task.get('require')):
|
|
851
|
+
args = item.split(',')
|
|
852
|
+
sc_type = self.__chip.get(*args, field='type')
|
|
853
|
+
if 'file' in sc_type or 'dir' in sc_type:
|
|
854
|
+
access_step, access_index = self.__step, self.__index
|
|
855
|
+
if self.__chip.get(*args, field='pernode').is_never():
|
|
856
|
+
access_step, access_index = None, None
|
|
857
|
+
self.__chip.hash_files(*args, step=access_step, index=access_index,
|
|
858
|
+
check=False, allow_cache=True, verbose=False)
|
|
859
|
+
|
|
860
|
+
def __hash_files_post_execute(self):
|
|
861
|
+
# hash all outputs
|
|
862
|
+
self.__chip.hash_files('tool', self.__task.tool(), 'task', self.__task.task(), 'output',
|
|
863
|
+
step=self.__step, index=self.__index, check=False, verbose=False)
|
|
864
|
+
|
|
865
|
+
# hash all requirements
|
|
866
|
+
for item in set(self.__task.get('require')):
|
|
867
|
+
args = item.split(',')
|
|
868
|
+
sc_type = self.__chip.get(*args, field='type')
|
|
869
|
+
if 'file' in sc_type or 'dir' in sc_type:
|
|
870
|
+
access_step, access_index = self.__step, self.__index
|
|
871
|
+
if self.__chip.get(*args, field='pernode').is_never():
|
|
872
|
+
access_step, access_index = None, None
|
|
873
|
+
if self.__chip.get(*args, field='filehash'):
|
|
874
|
+
continue
|
|
875
|
+
self.__chip.hash_files(*args, step=access_step, index=access_index,
|
|
876
|
+
check=False, allow_cache=True, verbose=False)
|
|
877
|
+
|
|
878
|
+
def __report_output_files(self):
|
|
879
|
+
if self.__task.tool() == 'builtin':
|
|
880
|
+
return
|
|
881
|
+
|
|
882
|
+
error = False
|
|
883
|
+
|
|
884
|
+
try:
|
|
885
|
+
outputs = os.listdir(os.path.join(self.__workdir, "outputs"))
|
|
886
|
+
except FileNotFoundError:
|
|
887
|
+
self.halt("Output directory is missing")
|
|
888
|
+
|
|
889
|
+
try:
|
|
890
|
+
outputs.remove(os.path.basename(self.__manifests["output"]))
|
|
891
|
+
except ValueError:
|
|
892
|
+
self.logger.error(f"Output manifest ({os.path.basename(self.__manifests['output'])}) "
|
|
893
|
+
"is missing.")
|
|
894
|
+
error = True
|
|
895
|
+
|
|
896
|
+
outputs = set(outputs)
|
|
897
|
+
|
|
898
|
+
output_files = set(self.__task.get('output'))
|
|
899
|
+
|
|
900
|
+
missing = output_files.difference(outputs)
|
|
901
|
+
excess = outputs.difference(output_files)
|
|
902
|
+
|
|
903
|
+
if missing:
|
|
904
|
+
error = True
|
|
905
|
+
self.logger.error(f"Expected output files are missing: {', '.join(missing)}")
|
|
906
|
+
|
|
907
|
+
if excess:
|
|
908
|
+
error = True
|
|
909
|
+
self.logger.error(f"Unexpected output files found: {', '.join(excess)}")
|
|
910
|
+
|
|
911
|
+
if error and self.__enforce_outputfiles:
|
|
912
|
+
self.halt()
|
|
913
|
+
|
|
914
|
+
def copy_from(self, source):
|
|
915
|
+
copy_from = self.__chip.getworkdir(jobname=source, step=self.__step, index=self.__index)
|
|
916
|
+
|
|
917
|
+
if not os.path.exists(copy_from):
|
|
918
|
+
return
|
|
919
|
+
|
|
920
|
+
self.logger.info(f'Importing {self.__step}/{self.__index} from {source}')
|
|
921
|
+
shutil.copytree(
|
|
922
|
+
copy_from, self.__workdir,
|
|
923
|
+
dirs_exist_ok=True,
|
|
924
|
+
copy_function=utils.link_copy)
|
|
925
|
+
|
|
926
|
+
# rewrite replay files
|
|
927
|
+
if os.path.exists(self.__replay_script):
|
|
928
|
+
# delete file as it might be a hard link
|
|
929
|
+
os.remove(self.__replay_script)
|
|
930
|
+
|
|
931
|
+
with self.runtime():
|
|
932
|
+
self.__task.generate_replay_script(self.__replay_script, self.__workdir)
|
|
933
|
+
|
|
934
|
+
for manifest in self.__manifests.values():
|
|
935
|
+
if os.path.exists(manifest):
|
|
936
|
+
schema = Schema.from_manifest(manifest)
|
|
937
|
+
# delete file as it might be a hard link
|
|
938
|
+
os.remove(manifest)
|
|
939
|
+
schema.set('option', 'jobname', self.__chip.get('option', 'jobname'))
|
|
940
|
+
schema.write_manifest(manifest)
|
|
941
|
+
|
|
942
|
+
def clean_directory(self):
|
|
943
|
+
if os.path.exists(self.__workdir):
|
|
944
|
+
shutil.rmtree(self.__workdir)
|