siliconcompiler 0.33.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. siliconcompiler/_common.py +5 -0
  2. siliconcompiler/_metadata.py +1 -1
  3. siliconcompiler/apps/sc_install.py +7 -0
  4. siliconcompiler/apps/sc_remote.py +7 -2
  5. siliconcompiler/core.py +34 -12
  6. siliconcompiler/metric.py +59 -0
  7. siliconcompiler/record.py +6 -2
  8. siliconcompiler/remote/client.py +10 -3
  9. siliconcompiler/remote/server.py +7 -2
  10. siliconcompiler/report/dashboard/cli/__init__.py +2 -0
  11. siliconcompiler/report/dashboard/cli/board.py +34 -30
  12. siliconcompiler/report/report.py +10 -5
  13. siliconcompiler/report/utils.py +12 -6
  14. siliconcompiler/scheduler/__init__.py +137 -974
  15. siliconcompiler/scheduler/send_messages.py +9 -3
  16. siliconcompiler/scheduler/slurm.py +10 -43
  17. siliconcompiler/scheduler/taskscheduler.py +320 -0
  18. siliconcompiler/schema/schema_cfg.py +2 -2
  19. siliconcompiler/tool.py +127 -19
  20. siliconcompiler/tools/_common/asic.py +5 -5
  21. siliconcompiler/tools/bluespec/convert.py +2 -1
  22. siliconcompiler/tools/builtin/_common.py +9 -2
  23. siliconcompiler/tools/builtin/concatenate.py +6 -2
  24. siliconcompiler/tools/builtin/minimum.py +7 -2
  25. siliconcompiler/tools/builtin/mux.py +7 -2
  26. siliconcompiler/tools/builtin/nop.py +7 -2
  27. siliconcompiler/tools/builtin/verify.py +7 -3
  28. siliconcompiler/tools/chisel/convert.py +10 -10
  29. siliconcompiler/tools/klayout/drc.py +2 -2
  30. siliconcompiler/tools/klayout/klayout_show.py +6 -6
  31. siliconcompiler/tools/klayout/klayout_utils.py +12 -12
  32. siliconcompiler/tools/netgen/count_lvs.py +2 -2
  33. siliconcompiler/tools/netgen/lvs.py +1 -1
  34. siliconcompiler/tools/openroad/_apr.py +2 -2
  35. siliconcompiler/tools/openroad/scripts/apr/sc_init_floorplan.tcl +1 -7
  36. siliconcompiler/tools/openroad/scripts/common/procs.tcl +18 -0
  37. siliconcompiler/tools/openroad/scripts/common/read_input_files.tcl +1 -7
  38. siliconcompiler/tools/opensta/scripts/sc_timing.tcl +10 -0
  39. siliconcompiler/tools/opensta/timing.py +11 -0
  40. siliconcompiler/tools/slang/__init__.py +3 -3
  41. siliconcompiler/tools/slang/elaborate.py +6 -6
  42. siliconcompiler/tools/slang/lint.py +1 -3
  43. siliconcompiler/tools/vpr/_xml_constraint.py +8 -8
  44. siliconcompiler/tools/yosys/prepareLib.py +2 -2
  45. siliconcompiler/tools/yosys/sc_synth_asic.tcl +43 -1
  46. siliconcompiler/tools/yosys/screenshot.py +1 -1
  47. siliconcompiler/tools/yosys/syn_asic.py +5 -5
  48. siliconcompiler/toolscripts/_tools.json +8 -6
  49. siliconcompiler/toolscripts/rhel8/install-chisel.sh +9 -2
  50. siliconcompiler/toolscripts/rhel8/install-icarus.sh +10 -3
  51. siliconcompiler/toolscripts/rhel8/install-klayout.sh +8 -1
  52. siliconcompiler/toolscripts/rhel8/install-magic.sh +9 -2
  53. siliconcompiler/toolscripts/rhel8/install-montage.sh +1 -1
  54. siliconcompiler/toolscripts/rhel8/install-netgen.sh +9 -2
  55. siliconcompiler/toolscripts/rhel8/install-slang.sh +11 -4
  56. siliconcompiler/toolscripts/rhel8/install-surelog.sh +9 -2
  57. siliconcompiler/toolscripts/rhel8/install-sv2v.sh +11 -4
  58. siliconcompiler/toolscripts/rhel8/install-verible.sh +11 -3
  59. siliconcompiler/toolscripts/rhel8/install-verilator.sh +10 -3
  60. siliconcompiler/toolscripts/rhel8/install-xyce.sh +15 -10
  61. siliconcompiler/toolscripts/rhel9/install-chisel.sh +9 -2
  62. siliconcompiler/toolscripts/rhel9/install-ghdl.sh +9 -2
  63. siliconcompiler/toolscripts/rhel9/install-gtkwave.sh +10 -3
  64. siliconcompiler/toolscripts/rhel9/install-icarus.sh +10 -3
  65. siliconcompiler/toolscripts/rhel9/install-klayout.sh +8 -1
  66. siliconcompiler/toolscripts/rhel9/install-magic.sh +9 -2
  67. siliconcompiler/toolscripts/rhel9/install-montage.sh +1 -1
  68. siliconcompiler/toolscripts/rhel9/install-netgen.sh +9 -2
  69. siliconcompiler/toolscripts/rhel9/install-openroad.sh +16 -3
  70. siliconcompiler/toolscripts/rhel9/install-opensta.sh +17 -5
  71. siliconcompiler/toolscripts/rhel9/install-slang.sh +11 -4
  72. siliconcompiler/toolscripts/rhel9/install-surelog.sh +9 -2
  73. siliconcompiler/toolscripts/rhel9/install-sv2v.sh +11 -4
  74. siliconcompiler/toolscripts/rhel9/install-verible.sh +11 -3
  75. siliconcompiler/toolscripts/rhel9/install-verilator.sh +10 -3
  76. siliconcompiler/toolscripts/rhel9/install-vpr.sh +9 -2
  77. siliconcompiler/toolscripts/rhel9/install-xdm.sh +10 -2
  78. siliconcompiler/toolscripts/rhel9/install-xyce.sh +15 -10
  79. siliconcompiler/toolscripts/rhel9/install-yosys-moosic.sh +9 -2
  80. siliconcompiler/toolscripts/rhel9/install-yosys-parmys.sh +10 -3
  81. siliconcompiler/toolscripts/rhel9/install-yosys-slang.sh +10 -2
  82. siliconcompiler/toolscripts/rhel9/install-yosys.sh +9 -2
  83. siliconcompiler/toolscripts/ubuntu20/install-bambu.sh +10 -2
  84. siliconcompiler/toolscripts/ubuntu20/install-bluespec.sh +10 -3
  85. siliconcompiler/toolscripts/ubuntu20/install-chisel.sh +9 -2
  86. siliconcompiler/toolscripts/ubuntu20/install-ghdl.sh +9 -2
  87. siliconcompiler/toolscripts/ubuntu20/install-gtkwave.sh +9 -2
  88. siliconcompiler/toolscripts/ubuntu20/install-icarus.sh +9 -2
  89. siliconcompiler/toolscripts/ubuntu20/install-icepack.sh +9 -2
  90. siliconcompiler/toolscripts/ubuntu20/install-klayout.sh +8 -1
  91. siliconcompiler/toolscripts/ubuntu20/install-magic.sh +9 -2
  92. siliconcompiler/toolscripts/ubuntu20/install-montage.sh +1 -1
  93. siliconcompiler/toolscripts/ubuntu20/install-netgen.sh +9 -2
  94. siliconcompiler/toolscripts/ubuntu20/install-nextpnr.sh +9 -2
  95. siliconcompiler/toolscripts/ubuntu20/install-openroad.sh +16 -3
  96. siliconcompiler/toolscripts/ubuntu20/install-opensta.sh +16 -5
  97. siliconcompiler/toolscripts/ubuntu20/install-slang.sh +11 -4
  98. siliconcompiler/toolscripts/ubuntu20/install-slurm.sh +9 -2
  99. siliconcompiler/toolscripts/ubuntu20/install-surelog.sh +10 -2
  100. siliconcompiler/toolscripts/ubuntu20/install-sv2v.sh +11 -4
  101. siliconcompiler/toolscripts/ubuntu20/install-verible.sh +11 -3
  102. siliconcompiler/toolscripts/ubuntu20/install-verilator.sh +9 -2
  103. siliconcompiler/toolscripts/ubuntu20/install-xdm.sh +10 -2
  104. siliconcompiler/toolscripts/ubuntu20/install-xyce.sh +13 -8
  105. siliconcompiler/toolscripts/ubuntu20/install-yosys-moosic.sh +9 -2
  106. siliconcompiler/toolscripts/ubuntu20/install-yosys.sh +9 -2
  107. siliconcompiler/toolscripts/ubuntu22/install-bambu.sh +10 -2
  108. siliconcompiler/toolscripts/ubuntu22/install-bluespec.sh +10 -3
  109. siliconcompiler/toolscripts/ubuntu22/install-chisel.sh +9 -2
  110. siliconcompiler/toolscripts/ubuntu22/install-ghdl.sh +9 -2
  111. siliconcompiler/toolscripts/ubuntu22/install-gtkwave.sh +9 -2
  112. siliconcompiler/toolscripts/ubuntu22/install-icarus.sh +9 -2
  113. siliconcompiler/toolscripts/ubuntu22/install-icepack.sh +9 -2
  114. siliconcompiler/toolscripts/ubuntu22/install-klayout.sh +8 -1
  115. siliconcompiler/toolscripts/ubuntu22/install-magic.sh +9 -2
  116. siliconcompiler/toolscripts/ubuntu22/install-montage.sh +1 -1
  117. siliconcompiler/toolscripts/ubuntu22/install-netgen.sh +9 -2
  118. siliconcompiler/toolscripts/ubuntu22/install-nextpnr.sh +9 -2
  119. siliconcompiler/toolscripts/ubuntu22/install-openroad.sh +16 -3
  120. siliconcompiler/toolscripts/ubuntu22/install-opensta.sh +17 -5
  121. siliconcompiler/toolscripts/ubuntu22/install-slang.sh +11 -4
  122. siliconcompiler/toolscripts/ubuntu22/install-slurm.sh +9 -2
  123. siliconcompiler/toolscripts/ubuntu22/install-surelog.sh +10 -2
  124. siliconcompiler/toolscripts/ubuntu22/install-sv2v.sh +11 -4
  125. siliconcompiler/toolscripts/ubuntu22/install-verible.sh +11 -3
  126. siliconcompiler/toolscripts/ubuntu22/install-verilator.sh +9 -2
  127. siliconcompiler/toolscripts/ubuntu22/install-vpr.sh +9 -2
  128. siliconcompiler/toolscripts/ubuntu22/install-xdm.sh +10 -2
  129. siliconcompiler/toolscripts/ubuntu22/install-xyce.sh +13 -8
  130. siliconcompiler/toolscripts/ubuntu22/install-yosys-moosic.sh +9 -2
  131. siliconcompiler/toolscripts/ubuntu22/install-yosys-parmys.sh +10 -3
  132. siliconcompiler/toolscripts/ubuntu22/install-yosys-slang.sh +10 -2
  133. siliconcompiler/toolscripts/ubuntu22/install-yosys.sh +9 -2
  134. siliconcompiler/toolscripts/ubuntu24/install-bambu.sh +12 -4
  135. siliconcompiler/toolscripts/ubuntu24/install-bluespec.sh +10 -3
  136. siliconcompiler/toolscripts/ubuntu24/install-chisel.sh +9 -2
  137. siliconcompiler/toolscripts/ubuntu24/install-ghdl.sh +9 -2
  138. siliconcompiler/toolscripts/ubuntu24/install-gtkwave.sh +9 -2
  139. siliconcompiler/toolscripts/ubuntu24/install-icarus.sh +9 -2
  140. siliconcompiler/toolscripts/ubuntu24/install-icepack.sh +9 -2
  141. siliconcompiler/toolscripts/ubuntu24/install-klayout.sh +8 -1
  142. siliconcompiler/toolscripts/ubuntu24/install-magic.sh +9 -2
  143. siliconcompiler/toolscripts/ubuntu24/install-montage.sh +1 -1
  144. siliconcompiler/toolscripts/ubuntu24/install-netgen.sh +9 -2
  145. siliconcompiler/toolscripts/ubuntu24/install-nextpnr.sh +9 -2
  146. siliconcompiler/toolscripts/ubuntu24/install-openroad.sh +16 -3
  147. siliconcompiler/toolscripts/ubuntu24/install-opensta.sh +17 -5
  148. siliconcompiler/toolscripts/ubuntu24/install-slang.sh +11 -4
  149. siliconcompiler/toolscripts/ubuntu24/install-slurm.sh +9 -2
  150. siliconcompiler/toolscripts/ubuntu24/install-surelog.sh +10 -2
  151. siliconcompiler/toolscripts/ubuntu24/install-sv2v.sh +11 -4
  152. siliconcompiler/toolscripts/ubuntu24/install-verible.sh +11 -3
  153. siliconcompiler/toolscripts/ubuntu24/install-verilator.sh +9 -2
  154. siliconcompiler/toolscripts/ubuntu24/install-vpr.sh +9 -2
  155. siliconcompiler/toolscripts/ubuntu24/install-xdm.sh +10 -2
  156. siliconcompiler/toolscripts/ubuntu24/install-xyce.sh +13 -8
  157. siliconcompiler/toolscripts/ubuntu24/install-yosys-moosic.sh +9 -2
  158. siliconcompiler/toolscripts/ubuntu24/install-yosys-parmys.sh +10 -3
  159. siliconcompiler/toolscripts/ubuntu24/install-yosys-slang.sh +10 -2
  160. siliconcompiler/toolscripts/ubuntu24/install-yosys.sh +9 -2
  161. siliconcompiler/utils/__init__.py +11 -0
  162. siliconcompiler/utils/flowgraph.py +6 -101
  163. siliconcompiler/utils/issue.py +15 -23
  164. siliconcompiler/utils/logging.py +2 -2
  165. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/METADATA +2 -2
  166. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/RECORD +170 -169
  167. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/WHEEL +1 -1
  168. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/entry_points.txt +0 -0
  169. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/licenses/LICENSE +0 -0
  170. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.1.dist-info}/top_level.txt +0 -0
@@ -1,70 +1,28 @@
1
- import contextlib
2
- import multiprocessing
3
1
  import logging
4
2
  import os
5
- import psutil
6
3
  import re
7
- import shlex
8
4
  import shutil
9
- import subprocess
10
5
  import sys
11
- import time
12
- import packaging.version
13
- import packaging.specifiers
14
- from io import StringIO
15
- import traceback
16
- from logging.handlers import QueueHandler, QueueListener
6
+ from logging.handlers import QueueHandler
17
7
  from siliconcompiler import sc_open
18
8
  from siliconcompiler import utils
19
9
  from siliconcompiler.remote import Client
20
10
  from siliconcompiler import Schema
21
11
  from siliconcompiler.schema import JournalingSchema
22
12
  from siliconcompiler.record import RecordTime, RecordTool
23
- from siliconcompiler.scheduler import slurm
24
- from siliconcompiler.scheduler import docker_runner
25
13
  from siliconcompiler import NodeStatus, SiliconCompilerError
26
- from siliconcompiler.utils.flowgraph import _get_flowgraph_execution_order, \
27
- _get_pruned_node_inputs, \
28
- get_nodes_from, nodes_to_execute, _check_flowgraph
29
- from siliconcompiler.utils.logging import SCBlankLoggerFormatter
30
14
  from siliconcompiler.tools._common import input_file_node_name
31
15
  import lambdapdk
32
16
  from siliconcompiler.tools._common import get_tool_task, record_metric
33
17
  from siliconcompiler.scheduler import send_messages
34
18
  from siliconcompiler.flowgraph import RuntimeFlowgraph
35
-
36
- try:
37
- import resource
38
- except ModuleNotFoundError:
39
- resource = None
40
-
41
-
42
- # callback hooks to help custom runners track progress
43
- _callback_funcs = {}
44
-
45
-
46
- def register_callback(hook, func):
47
- _callback_funcs[hook] = func
48
-
49
-
50
- def _get_callback(hook):
51
- if hook in _callback_funcs:
52
- return _callback_funcs[hook]
53
- return None
19
+ from siliconcompiler.scheduler.taskscheduler import TaskScheduler
54
20
 
55
21
 
56
22
  # Max lines to print from failed node log
57
23
  _failed_log_lines = 20
58
24
 
59
25
 
60
- #######################################
61
- def _do_record_access():
62
- '''
63
- Determine if Schema should record calls to .get
64
- '''
65
- return False
66
-
67
-
68
26
  ###############################################################################
69
27
  class SiliconCompilerTimeout(Exception):
70
28
  ''' Minimal Exception wrapper used to raise sc timeout errors.
@@ -97,14 +55,30 @@ def run(chip):
97
55
 
98
56
  # Check if flowgraph is complete and valid
99
57
  flow = chip.get('option', 'flow')
100
- if not _check_flowgraph(chip, flow=flow):
58
+ if not chip.schema.get("flowgraph", flow, field="schema").validate(logger=chip.logger):
59
+ raise SiliconCompilerError(
60
+ f"{flow} flowgraph contains errors and cannot be run.",
61
+ chip=chip)
62
+ if not RuntimeFlowgraph.validate(
63
+ chip.schema.get("flowgraph", flow, field="schema"),
64
+ from_steps=chip.get('option', 'from'),
65
+ to_steps=chip.get('option', 'to'),
66
+ prune_nodes=chip.get('option', 'prune'),
67
+ logger=chip.logger):
101
68
  raise SiliconCompilerError(
102
69
  f"{flow} flowgraph contains errors and cannot be run.",
103
70
  chip=chip)
104
71
 
105
72
  copy_old_run_dir(chip, org_jobname)
106
73
  clean_build_dir(chip)
107
- _reset_flow_nodes(chip, flow, nodes_to_execute(chip, flow))
74
+
75
+ runtime = RuntimeFlowgraph(
76
+ chip.schema.get("flowgraph", flow, field='schema'),
77
+ from_steps=chip.get('option', 'from'),
78
+ to_steps=chip.get('option', 'to'),
79
+ prune_nodes=chip.get('option', 'prune'))
80
+
81
+ _reset_flow_nodes(chip, flow, runtime.get_nodes())
108
82
  chip.schema.get("record", field='schema').record_python_packages()
109
83
 
110
84
  if chip.get('option', 'remote'):
@@ -191,7 +165,7 @@ def _local_process(chip, flow):
191
165
  prune_nodes=chip.get('option', 'prune'))
192
166
  load_nodes = list(runtime.get_nodes())
193
167
 
194
- for node_level in _get_flowgraph_execution_order(chip, flow):
168
+ for node_level in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
195
169
  for step, index in node_level:
196
170
  if (step, index) not in load_nodes:
197
171
  continue
@@ -210,10 +184,16 @@ def _local_process(chip, flow):
210
184
  except Exception:
211
185
  pass
212
186
 
187
+ runtimeflow = RuntimeFlowgraph(
188
+ chip.schema.get("flowgraph", flow, field="schema"),
189
+ from_steps=chip.get('option', 'from'),
190
+ to_steps=chip.get('option', 'to'),
191
+ prune_nodes=chip.get('option', 'prune'))
192
+
213
193
  # Setup tools for all nodes to run.
214
- nodes = list(nodes_to_execute(chip, flow))
194
+ nodes = list(runtimeflow.get_nodes())
215
195
  all_setup_nodes = nodes + load_nodes + list(extra_setup_nodes.keys())
216
- for layer_nodes in _get_flowgraph_execution_order(chip, flow):
196
+ for layer_nodes in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
217
197
  for step, index in layer_nodes:
218
198
  if (step, index) in all_setup_nodes:
219
199
  node_kept = _setup_node(chip, step, index)
@@ -233,7 +213,7 @@ def _local_process(chip, flow):
233
213
  def mark_pending(step, index):
234
214
  chip.schema.get("record", field='schema').set('status', NodeStatus.PENDING,
235
215
  step=step, index=index)
236
- for next_step, next_index in get_nodes_from(chip, flow, [(step, index)]):
216
+ for next_step, next_index in runtimeflow.get_nodes_starting_at(step, index):
237
217
  if chip.get('record', 'status', step=next_step, index=next_index) == \
238
218
  NodeStatus.SKIPPED:
239
219
  continue
@@ -243,13 +223,7 @@ def _local_process(chip, flow):
243
223
  step=next_step, index=next_index)
244
224
 
245
225
  # Check if nodes have been modified from previous data
246
- runtimeflow = RuntimeFlowgraph(
247
- chip.schema.get("flowgraph", flow, field="schema"),
248
- from_steps=chip.get('option', 'from'),
249
- to_steps=chip.get('option', 'to'),
250
- prune_nodes=chip.get('option', 'prune'))
251
-
252
- for layer_nodes in _get_flowgraph_execution_order(chip, flow):
226
+ for layer_nodes in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
253
227
  for step, index in layer_nodes:
254
228
  # Only look at successful nodes
255
229
  if chip.get('record', 'status', step=step, index=index) not in \
@@ -289,40 +263,11 @@ def _local_process(chip, flow):
289
263
  'Implementation errors encountered. See previous errors.',
290
264
  chip=chip)
291
265
 
292
- nodes_to_run = {}
293
- processes = {}
294
- local_processes = []
295
- log_queue = _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow)
296
-
297
- # Handle logs across threads
298
- log_listener = QueueListener(log_queue, chip.logger._console)
299
- chip.logger._console.setFormatter(SCBlankLoggerFormatter())
300
- log_listener.start()
301
-
302
- # Update dashboard before run begins
303
- if chip._dash:
304
- chip._dash.update_manifest()
305
-
306
- try:
307
- _launch_nodes(chip, nodes_to_run, processes, local_processes)
308
- except KeyboardInterrupt:
309
- # exit immediately
310
- log_listener.stop()
311
- sys.exit(0)
312
-
313
- if _get_callback('post_run'):
314
- _get_callback('post_run')(chip)
266
+ task_scheduler = TaskScheduler(chip)
267
+ task_scheduler.run()
315
268
 
316
269
  _check_nodes_status(chip, flow)
317
270
 
318
- # Cleanup logger
319
- log_listener.stop()
320
- chip._init_logger_formats()
321
-
322
-
323
- def __is_posix():
324
- return sys.platform != 'win32'
325
-
326
271
 
327
272
  ###########################################################################
328
273
  def _setup_node(chip, step, index, flow=None):
@@ -337,21 +282,19 @@ def _setup_node(chip, step, index, flow=None):
337
282
  chip.set('arg', 'index', index)
338
283
  tool, task = get_tool_task(chip, step, index, flow=flow)
339
284
 
285
+ task_class = chip.get("tool", tool, field="schema")
286
+ task_class.set_runtime(chip)
287
+
340
288
  # Run node setup.
289
+ chip.logger.info(f'Setting up node {step}{index} with {tool}/{task}')
341
290
  setup_ret = None
342
291
  try:
343
- setup_step = getattr(chip._get_task_module(step, index), 'setup', None)
344
- except SiliconCompilerError:
345
- setup_step = None
346
- if setup_step:
347
- try:
348
- chip.logger.info(f'Setting up node {step}{index} with {tool}/{task}')
349
- setup_ret = setup_step(chip)
350
- except Exception as e:
351
- chip.logger.error(f'Failed to run setup() for {tool}/{task}')
352
- raise e
353
- else:
354
- raise SiliconCompilerError(f'setup() not found for tool {tool}, task {task}', chip=chip)
292
+ setup_ret = task_class.setup()
293
+ except Exception as e:
294
+ chip.logger.error(f'Failed to run setup() for {tool}/{task}')
295
+ raise e
296
+
297
+ task_class.set_runtime(None)
355
298
 
356
299
  # Need to restore step/index, otherwise we will skip setting up other indices.
357
300
  chip.set('option', 'flow', preset_flow)
@@ -368,86 +311,6 @@ def _setup_node(chip, step, index, flow=None):
368
311
  return True
369
312
 
370
313
 
371
- def _check_version(chip, reported_version, tool, step, index):
372
- # Based on regex for deprecated "legacy specifier" from PyPA packaging
373
- # library. Use this to parse PEP-440ish specifiers with arbitrary
374
- # versions.
375
- _regex_str = r"""
376
- (?P<operator>(==|!=|<=|>=|<|>|~=))
377
- \s*
378
- (?P<version>
379
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
380
- # string can be just about anything, we match everything
381
- # except for whitespace, a semi-colon for marker support,
382
- # a closing paren since versions can be enclosed in
383
- # them, and a comma since it's a version separator.
384
- )
385
- """
386
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
387
-
388
- normalize_version = getattr(chip._get_tool_module(step, index), 'normalize_version', None)
389
- # Version is good if it matches any of the specifier sets in this list.
390
- spec_sets = chip.get('tool', tool, 'version', step=step, index=index)
391
- if not spec_sets:
392
- return True
393
-
394
- for spec_set in spec_sets:
395
- split_specs = [s.strip() for s in spec_set.split(",") if s.strip()]
396
- specs_list = []
397
- for spec in split_specs:
398
- match = re.match(_regex, spec)
399
- if match is None:
400
- chip.logger.warning(f'Invalid version specifier {spec}. '
401
- f'Defaulting to =={spec}.')
402
- operator = '=='
403
- spec_version = spec
404
- else:
405
- operator = match.group('operator')
406
- spec_version = match.group('version')
407
- specs_list.append((operator, spec_version))
408
-
409
- if normalize_version is None:
410
- normalized_version = reported_version
411
- normalized_specs = ','.join([f'{op}{ver}' for op, ver in specs_list])
412
- else:
413
- try:
414
- normalized_version = normalize_version(reported_version)
415
- except Exception as e:
416
- chip.logger.error(f'Unable to normalize version for {tool}: {reported_version}')
417
- raise e
418
- normalized_spec_list = [f'{op}{normalize_version(ver)}' for op, ver in specs_list]
419
- normalized_specs = ','.join(normalized_spec_list)
420
-
421
- try:
422
- version = packaging.version.Version(normalized_version)
423
- except packaging.version.InvalidVersion:
424
- chip.logger.error(f'Version {reported_version} reported by {tool} does '
425
- 'not match standard.')
426
- if normalize_version is None:
427
- chip.logger.error('Tool driver should implement normalize_version().')
428
- else:
429
- chip.logger.error('normalize_version() returned '
430
- f'invalid version {normalized_version}')
431
-
432
- return False
433
-
434
- try:
435
- spec_set = packaging.specifiers.SpecifierSet(normalized_specs)
436
- except packaging.specifiers.InvalidSpecifier:
437
- chip.logger.error(f'Version specifier set {normalized_specs} '
438
- 'does not match standard.')
439
- return False
440
-
441
- if version in spec_set:
442
- return True
443
-
444
- allowedstr = '; '.join(spec_sets)
445
- chip.logger.error(f"Version check failed for {tool}. Check installation.")
446
- chip.logger.error(f"Found version {reported_version}, "
447
- f"did not satisfy any version specifier set {allowedstr}.")
448
- return False
449
-
450
-
451
314
  ###########################################################################
452
315
  def _runtask(chip, flow, step, index, exec_func, pipe=None, queue=None, replay=False):
453
316
  '''
@@ -497,7 +360,7 @@ def _runtask(chip, flow, step, index, exec_func, pipe=None, queue=None, replay=F
497
360
 
498
361
  exec_func(chip, step, index, replay)
499
362
  except Exception as e:
500
- print_traceback(chip, e)
363
+ utils.print_traceback(chip.logger, e)
501
364
  _haltstep(chip, chip.get('option', 'flow'), step, index)
502
365
 
503
366
  # return to original directory
@@ -539,20 +402,6 @@ def _setupnode(chip, flow, step, index, replay):
539
402
  _haltstep(chip, flow, step, index)
540
403
 
541
404
 
542
- ###########################################################################
543
- def _write_task_manifest(chip, tool, path=None, backup=True):
544
- suffix = chip.get('tool', tool, 'format')
545
- if suffix:
546
- manifest_path = f"sc_manifest.{suffix}"
547
- if path:
548
- manifest_path = os.path.join(path, manifest_path)
549
-
550
- if backup and os.path.exists(manifest_path):
551
- shutil.copyfile(manifest_path, f'{manifest_path}.bak')
552
-
553
- chip.write_manifest(manifest_path, abspath=True)
554
-
555
-
556
405
  ###########################################################################
557
406
  def _setup_workdir(chip, step, index, replay):
558
407
  workdir = chip.getworkdir(step=step, index=index)
@@ -570,20 +419,18 @@ def _select_inputs(chip, step, index, trial=False):
570
419
 
571
420
  flow = chip.get('option', 'flow')
572
421
  tool, _ = get_tool_task(chip, step, index, flow)
573
- sel_inputs = []
574
-
575
- select_inputs = getattr(chip._get_task_module(step, index, flow=flow),
576
- '_select_inputs',
577
- None)
578
- if select_inputs:
579
- log_level = chip.logger.level
580
- if trial:
581
- chip.logger.setLevel(logging.CRITICAL)
582
- sel_inputs = select_inputs(chip, step, index)
583
- if trial:
584
- chip.logger.setLevel(log_level)
585
- else:
586
- sel_inputs = _get_pruned_node_inputs(chip, flow, (step, index))
422
+
423
+ task_class = chip.get("tool", tool, field="schema")
424
+ task_class.set_runtime(chip, step=step, index=index)
425
+
426
+ log_level = chip.logger.level
427
+ if trial:
428
+ chip.logger.setLevel(logging.CRITICAL)
429
+
430
+ sel_inputs = task_class.select_input_nodes()
431
+
432
+ if trial:
433
+ chip.logger.setLevel(log_level)
587
434
 
588
435
  if (step, index) not in chip.schema.get("flowgraph", flow, field="schema").get_entry_nodes() \
589
436
  and not sel_inputs:
@@ -624,10 +471,18 @@ def _copy_previous_steps_output_data(chip, step, index, replay):
624
471
  '''
625
472
 
626
473
  flow = chip.get('option', 'flow')
627
- if not _get_pruned_node_inputs(chip, flow, (step, index)):
474
+
475
+ flow_schema = chip.schema.get("flowgraph", flow, field="schema")
476
+ runtime = RuntimeFlowgraph(
477
+ flow_schema,
478
+ from_steps=set([step for step, _ in flow_schema.get_entry_nodes()]),
479
+ prune_nodes=chip.get('option', 'prune'))
480
+
481
+ if not runtime.get_node_inputs(step, index, record=chip.schema.get("record", field="schema")):
628
482
  all_inputs = []
629
483
  elif not chip.get('record', 'inputnode', step=step, index=index):
630
- all_inputs = _get_pruned_node_inputs(chip, flow, (step, index))
484
+ all_inputs = runtime.get_node_inputs(step, index,
485
+ record=chip.schema.get("record", field="schema"))
631
486
  else:
632
487
  all_inputs = chip.get('record', 'inputnode', step=step, index=index)
633
488
 
@@ -663,414 +518,9 @@ def _copy_previous_steps_output_data(chip, step, index, replay):
663
518
  os.rename(f'inputs/{outfile.name}', f'inputs/{new_name}')
664
519
 
665
520
 
666
- def __read_std_streams(chip, quiet,
667
- is_stdout_log, stdout_reader, stdout_print,
668
- is_stderr_log, stderr_reader, stderr_print):
669
- '''
670
- Handle directing tool outputs to logger
671
- '''
672
- if not quiet:
673
- if is_stdout_log:
674
- for line in stdout_reader.readlines():
675
- stdout_print(line.rstrip())
676
- if is_stderr_log:
677
- for line in stderr_reader.readlines():
678
- stderr_print(line.rstrip())
679
-
680
-
681
521
  ############################################################################
682
522
  # Chip helper Functions
683
523
  ############################################################################
684
- def _getexe(chip, tool, step, index):
685
- exe = chip.get('tool', tool, 'exe')
686
- if exe is None:
687
- return None
688
- path = chip.find_files('tool', tool, 'path', step=step, index=index)
689
-
690
- syspath = os.getenv('PATH', os.defpath)
691
- if path:
692
- # Prepend 'path' schema var to system path
693
- syspath = path + os.pathsep + syspath
694
-
695
- fullexe = shutil.which(exe, path=syspath)
696
-
697
- return fullexe
698
-
699
-
700
- def _get_run_env_vars(chip, tool, task, step, index, include_path):
701
- envvars = utils.get_env_vars(chip, step, index)
702
- for item in chip.getkeys('tool', tool, 'licenseserver'):
703
- license_file = chip.get('tool', tool, 'licenseserver', item, step=step, index=index)
704
- if license_file:
705
- envvars[item] = ':'.join(license_file)
706
-
707
- if include_path:
708
- path = chip.get('tool', tool, 'path', step=step, index=index)
709
- if path:
710
- envvars['PATH'] = path + os.pathsep + os.environ['PATH']
711
- else:
712
- envvars['PATH'] = os.environ['PATH']
713
-
714
- # Forward additional variables
715
- for var in ('LD_LIBRARY_PATH',):
716
- val = os.getenv(var, None)
717
- if val:
718
- envvars[var] = val
719
-
720
- return envvars
721
-
722
-
723
- #######################################
724
- def _makecmd(chip, tool, task, step, index, script_name='replay.sh', include_path=True):
725
- '''
726
- Constructs a subprocess run command based on eda tool setup.
727
- Creates a replay script in current directory.
728
-
729
- Returns:
730
- runnable command (list)
731
- printable command (str)
732
- command name (str)
733
- command arguments (list)
734
- '''
735
-
736
- fullexe = _getexe(chip, tool, step, index)
737
-
738
- def parse_options(options):
739
- if not options:
740
- return []
741
- shlex_opts = []
742
- for option in options:
743
- shlex_opts.append(str(option).strip())
744
- return shlex_opts
745
-
746
- # Add scripts files
747
- scripts = chip.find_files('tool', tool, 'task', task, 'script', step=step, index=index)
748
-
749
- cmdlist = [fullexe]
750
- cmdlist.extend(parse_options(chip.get('tool', tool, 'task', task, 'option',
751
- step=step, index=index)))
752
- cmdlist.extend(scripts)
753
-
754
- runtime_options = getattr(chip._get_task_module(step, index), 'runtime_options', None)
755
- if not runtime_options:
756
- runtime_options = getattr(chip._get_tool_module(step, index), 'runtime_options', None)
757
- if runtime_options:
758
- try:
759
- if _do_record_access():
760
- chip.schema.add_journaling_type("get")
761
- cmdlist.extend(parse_options(runtime_options(chip)))
762
- chip.schema.remove_journaling_type("get")
763
- except Exception as e:
764
- chip.logger.error(f'Failed to get runtime options for {tool}/{task}')
765
- raise e
766
-
767
- # Separate variables to be able to display nice name of executable
768
- cmd = os.path.basename(cmdlist[0])
769
- cmd_args = cmdlist[1:]
770
- print_cmd = shlex.join([cmd, *cmd_args])
771
-
772
- # create replay file
773
- with open(script_name, 'w') as f:
774
- # Ensure execution runs from the same directory
775
- replay_opts = {}
776
- work_dir = chip.getworkdir(step=step, index=index)
777
- if chip._relative_path:
778
- work_dir = os.path.relpath(work_dir, chip._relative_path)
779
- replay_opts["work_dir"] = work_dir
780
- replay_opts["exports"] = _get_run_env_vars(chip,
781
- tool, task,
782
- step, index,
783
- include_path=include_path)
784
- replay_opts["executable"] = chip.get('tool', tool, 'exe')
785
-
786
- vswitch = chip.get('tool', tool, 'vswitch')
787
- if vswitch:
788
- replay_opts["version_flag"] = " ".join(vswitch)
789
-
790
- format_cmd = [replay_opts["executable"]]
791
- arg_test = re.compile(r'^[-+]')
792
- file_test = re.compile(r'^[/]')
793
- for cmdarg in cmd_args:
794
- add_new_line = len(format_cmd) == 1
795
-
796
- if arg_test.match(cmdarg) or file_test.match(cmdarg):
797
- add_new_line = True
798
- else:
799
- if not arg_test.match(format_cmd[-1]):
800
- add_new_line = True
801
-
802
- if add_new_line:
803
- format_cmd.append(shlex.quote(cmdarg))
804
- else:
805
- format_cmd[-1] += f' {shlex.quote(cmdarg)}'
806
-
807
- replay_opts["cmds"] = format_cmd
808
-
809
- f.write(utils.get_file_template("replay/replay.sh.j2").render(replay_opts))
810
- f.write("\n")
811
-
812
- os.chmod(script_name, 0o755)
813
-
814
- return cmdlist, print_cmd, cmd, cmd_args
815
-
816
-
817
- def __get_stdio(chip, tool, task, flow, step, index):
818
- def get_file(io_type):
819
- suffix = chip.get('tool', tool, 'task', task, io_type, 'suffix',
820
- step=step, index=index)
821
- destination = chip.get('tool', tool, 'task', task, io_type, 'destination',
822
- step=step, index=index)
823
-
824
- io_file = None
825
- if destination == 'log':
826
- io_file = step + "." + suffix
827
- elif destination == 'output':
828
- io_file = os.path.join('outputs', chip.top() + "." + suffix)
829
- elif destination == 'none':
830
- io_file = os.devnull
831
- else:
832
- # This should not happen
833
- chip.logger.error(f'{io_type}/destination has no support for {destination}.')
834
- _haltstep(chip, flow, step, index)
835
-
836
- return io_file
837
-
838
- stdout_file = get_file('stdout')
839
- stderr_file = get_file('stderr')
840
-
841
- return stdout_file, stderr_file
842
-
843
-
844
- def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func=None):
845
- '''
846
- Run executable (or copy inputs to outputs for builtin functions)
847
- '''
848
-
849
- flow = chip.get('option', 'flow')
850
- tool, task = get_tool_task(chip, step, index, flow)
851
-
852
- quiet = (
853
- chip.get('option', 'quiet', step=step, index=index) and
854
- not chip.get('option', 'breakpoint', step=step, index=index)
855
- )
856
-
857
- stdout_print = chip.logger.info
858
- stderr_print = chip.logger.error
859
- if chip.get('option', 'loglevel', step=step, index=index) == "quiet":
860
- stdout_print = chip.logger.error
861
- stderr_print = chip.logger.error
862
-
863
- # TODO: Currently no memory usage tracking in breakpoints, builtins, or unexpected errors.
864
- max_mem_bytes = 0
865
- cpu_start = time.time()
866
-
867
- stdout_file, stderr_file = __get_stdio(chip, tool, task, flow, step, index)
868
- is_stdout_log = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
869
- step=step, index=index) == 'log'
870
- is_stderr_log = chip.get('tool', tool, 'task', task, 'stderr', 'destination',
871
- step=step, index=index) == 'log' and stderr_file != stdout_file
872
-
873
- chip.logger.info(f'Running in {workdir}')
874
-
875
- retcode = 0
876
- cmdlist = []
877
- cmd_args = []
878
- if run_func:
879
- logfile = None
880
- try:
881
- with open(stdout_file, 'w') as stdout_writer, \
882
- open(stderr_file, 'w') as stderr_writer:
883
- if stderr_file == stdout_file:
884
- stderr_writer.close()
885
- stderr_writer = sys.stdout
886
-
887
- # Handle logger stdout suppression if quiet
888
- stdout_handler_level = chip.logger._console.level
889
- if chip.get('option', 'quiet', step=step, index=index):
890
- chip.logger._console.setLevel(logging.CRITICAL)
891
-
892
- with contextlib.redirect_stderr(stderr_writer), \
893
- contextlib.redirect_stdout(stdout_writer):
894
- retcode = run_func(chip)
895
-
896
- chip.logger._console.setLevel(stdout_handler_level)
897
- except Exception as e:
898
- chip.logger.error(f'Failed in run() for {tool}/{task}: {e}')
899
- retcode = 1 # default to non-zero
900
- print_traceback(chip, e)
901
- chip._error = True
902
- finally:
903
- with sc_open(stdout_file) as stdout_reader, \
904
- sc_open(stderr_file) as stderr_reader:
905
- __read_std_streams(chip,
906
- quiet,
907
- is_stdout_log, stdout_reader, stdout_print,
908
- is_stderr_log, stderr_reader, stderr_print)
909
-
910
- try:
911
- if resource:
912
- # Since memory collection is not possible, collect the current process
913
- # peak memory
914
- max_mem_bytes = max(
915
- max_mem_bytes,
916
- 1024 * resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
917
- except (OSError, ValueError, PermissionError):
918
- pass
919
- else:
920
- cmdlist, printable_cmd, _, cmd_args = _makecmd(chip, tool, task, step, index)
921
-
922
- ##################
923
- # Make record of tool options
924
- if cmd_args is not None:
925
- chip.schema.get("record", field='schema').record_tool(
926
- step, index, cmd_args, RecordTool.ARGS)
927
-
928
- chip.logger.info('%s', printable_cmd)
929
- timeout = chip.get('option', 'timeout', step=step, index=index)
930
- logfile = step + '.log'
931
- if sys.platform in ('darwin', 'linux') and \
932
- chip.get('option', 'breakpoint', step=step, index=index):
933
- # When we break on a step, the tool often drops into a shell.
934
- # However, our usual subprocess scheme seems to break terminal
935
- # echo for some tools. On POSIX-compatible systems, we can use
936
- # pty to connect the tool to our terminal instead. This code
937
- # doesn't handle quiet/timeout logic, since we don't want either
938
- # of these features for an interactive session. Logic for
939
- # forwarding to file based on
940
- # https://docs.python.org/3/library/pty.html#example.
941
- with open(logfile, 'wb') as log_writer:
942
- def read(fd):
943
- data = os.read(fd, 1024)
944
- log_writer.write(data)
945
- return data
946
- import pty # Note: this import throws exception on Windows
947
- retcode = pty.spawn(cmdlist, read)
948
- else:
949
- with open(stdout_file, 'w') as stdout_writer, \
950
- open(stdout_file, 'r', errors='replace_with_warning') as stdout_reader, \
951
- open(stderr_file, 'w') as stderr_writer, \
952
- open(stderr_file, 'r', errors='replace_with_warning') as stderr_reader:
953
- # if STDOUT and STDERR are to be redirected to the same file,
954
- # use a single writer
955
- if stderr_file == stdout_file:
956
- stderr_writer.close()
957
- stderr_reader.close()
958
- stderr_writer = subprocess.STDOUT
959
-
960
- preexec_fn = None
961
- nice = None
962
- if __is_posix():
963
- nice = chip.get('option', 'nice', step=step, index=index)
964
-
965
- def set_nice():
966
- os.nice(nice)
967
-
968
- if nice:
969
- preexec_fn = set_nice
970
-
971
- proc = subprocess.Popen(cmdlist,
972
- stdin=subprocess.DEVNULL,
973
- stdout=stdout_writer,
974
- stderr=stderr_writer,
975
- preexec_fn=preexec_fn)
976
- # How long to wait for proc to quit on ctrl-c before force
977
- # terminating.
978
- POLL_INTERVAL = 0.1
979
- MEMORY_WARN_LIMIT = 90
980
- try:
981
- while proc.poll() is None:
982
- # Gather subprocess memory usage.
983
- try:
984
- pproc = psutil.Process(proc.pid)
985
- proc_mem_bytes = pproc.memory_full_info().uss
986
- for child in pproc.children(recursive=True):
987
- proc_mem_bytes += child.memory_full_info().uss
988
- max_mem_bytes = max(max_mem_bytes, proc_mem_bytes)
989
-
990
- memory_usage = psutil.virtual_memory()
991
- if memory_usage.percent > MEMORY_WARN_LIMIT:
992
- chip.logger.warn(
993
- f'Current system memory usage is {memory_usage.percent}%')
994
-
995
- # increase limit warning
996
- MEMORY_WARN_LIMIT = int(memory_usage.percent + 1)
997
- except psutil.Error:
998
- # Process may have already terminated or been killed.
999
- # Retain existing memory usage statistics in this case.
1000
- pass
1001
- except PermissionError:
1002
- # OS is preventing access to this information so it cannot
1003
- # be collected
1004
- pass
1005
-
1006
- # Loop until process terminates
1007
- __read_std_streams(chip,
1008
- quiet,
1009
- is_stdout_log, stdout_reader, stdout_print,
1010
- is_stderr_log, stderr_reader, stderr_print)
1011
-
1012
- if timeout is not None and time.time() - cpu_start > timeout:
1013
- chip.logger.error(f'Step timed out after {timeout} seconds')
1014
- utils.terminate_process(proc.pid)
1015
- raise SiliconCompilerTimeout(f'{step}{index} timeout')
1016
- time.sleep(POLL_INTERVAL)
1017
- except KeyboardInterrupt:
1018
- kill_process(chip, proc, tool, 5 * POLL_INTERVAL, msg="Received ctrl-c. ")
1019
- _haltstep(chip, flow, step, index, log=False)
1020
- except SiliconCompilerTimeout:
1021
- send_messages.send(chip, "timeout", step, index)
1022
- kill_process(chip, proc, tool, 5 * POLL_INTERVAL)
1023
- chip._error = True
1024
-
1025
- # Read the remaining
1026
- __read_std_streams(chip,
1027
- quiet,
1028
- is_stdout_log, stdout_reader, stdout_print,
1029
- is_stderr_log, stderr_reader, stderr_print)
1030
- retcode = proc.returncode
1031
-
1032
- chip.schema.get("record", field='schema').record_tool(step, index, retcode, RecordTool.EXITCODE)
1033
- if retcode != 0:
1034
- msg = f'Command failed with code {retcode}.'
1035
- if logfile:
1036
- if quiet:
1037
- # Print last N lines of log when in quiet mode
1038
- with sc_open(logfile) as logfd:
1039
- loglines = logfd.read().splitlines()
1040
- for logline in loglines[-_failed_log_lines:]:
1041
- chip.logger.error(logline)
1042
- # No log file for pure-Python tools.
1043
- msg += f' See log file {os.path.abspath(logfile)}'
1044
- chip.logger.warning(msg)
1045
- chip._error = True
1046
-
1047
- # Capture cpu runtime
1048
- record_metric(chip, step, index, 'exetime', round((time.time() - cpu_start), 2),
1049
- source=None,
1050
- source_unit='s')
1051
-
1052
- # Capture memory usage
1053
- record_metric(chip, step, index, 'memory', max_mem_bytes,
1054
- source=None,
1055
- source_unit='B')
1056
-
1057
-
1058
- def _post_process(chip, step, index):
1059
- flow = chip.get('option', 'flow')
1060
- tool, task = get_tool_task(chip, step, index, flow)
1061
- func = getattr(chip._get_task_module(step, index, flow=flow), 'post_process', None)
1062
- if func:
1063
- try:
1064
- if _do_record_access():
1065
- chip.schema.add_journaling_type("get")
1066
- func(chip)
1067
- chip.schema.remove_journaling_type("get")
1068
- except Exception as e:
1069
- chip.logger.error(f'Failed to run post-process for {tool}/{task}.')
1070
- print_traceback(chip, e)
1071
- chip._error = True
1072
-
1073
-
1074
524
  def _check_logfile(chip, step, index, quiet=False, run_func=None):
1075
525
  '''
1076
526
  Check log file (must be after post-process)
@@ -1113,9 +563,19 @@ def _check_logfile(chip, step, index, quiet=False, run_func=None):
1113
563
  def _executenode(chip, step, index, replay):
1114
564
  workdir = chip.getworkdir(step=step, index=index)
1115
565
  flow = chip.get('option', 'flow')
1116
- tool, _ = get_tool_task(chip, step, index, flow)
566
+ tool, task = get_tool_task(chip, step, index, flow)
1117
567
 
1118
- _pre_process(chip, step, index)
568
+ task_class = chip.get("tool", tool, field="schema")
569
+ task_class.set_runtime(chip)
570
+
571
+ chip.logger.info(f'Running in {workdir}')
572
+
573
+ try:
574
+ task_class.pre_process()
575
+ except Exception as e:
576
+ chip.logger.error(f"Pre-processing failed for '{tool}/{task}'.")
577
+ utils.print_traceback(chip.logger, e)
578
+ raise e
1119
579
 
1120
580
  if chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED:
1121
581
  # copy inputs to outputs and skip execution
@@ -1123,10 +583,15 @@ def _executenode(chip, step, index, replay):
1123
583
 
1124
584
  send_messages.send(chip, "skipped", step, index)
1125
585
  else:
1126
- org_env = _set_env_vars(chip, step, index)
586
+ org_env = os.environ.copy()
587
+ os.environ.update(task_class.get_runtime_environmental_variables())
1127
588
 
1128
- run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1129
- toolpath, version = _check_tool_version(chip, step, index, run_func)
589
+ toolpath = task_class.get_exe()
590
+ version = task_class.get_exe_version()
591
+
592
+ if not chip.get('option', 'novercheck', step=step, index=index):
593
+ if not task_class.check_exe_version(version):
594
+ _haltstep(chip, flow, step, index)
1130
595
 
1131
596
  if version:
1132
597
  chip.schema.get("record", field='schema').record_tool(
@@ -1136,105 +601,50 @@ def _executenode(chip, step, index, replay):
1136
601
  chip.schema.get("record", field='schema').record_tool(
1137
602
  step, index, toolpath, RecordTool.PATH)
1138
603
 
1139
- # Write manifest (tool interface) (Don't move this!)
1140
- _write_task_manifest(chip, tool)
1141
-
1142
604
  send_messages.send(chip, "begin", step, index)
1143
605
 
1144
- _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func)
606
+ try:
607
+ task_class.generate_replay_script(
608
+ os.path.join(workdir, "replay.sh"),
609
+ workdir)
610
+ ret_code = task_class.run_task(
611
+ workdir,
612
+ chip.get('option', 'quiet', step=step, index=index),
613
+ chip.get('option', 'loglevel', step=step, index=index),
614
+ chip.get('option', 'breakpoint', step=step, index=index),
615
+ chip.get('option', 'nice', step=step, index=index),
616
+ chip.get('option', 'timeout', step=step, index=index))
617
+ except Exception as e:
618
+ raise e
1145
619
 
1146
620
  os.environ.clear()
1147
621
  os.environ.update(org_env)
1148
622
 
1149
- _post_process(chip, step, index)
1150
-
1151
- _finalizenode(chip, step, index, replay)
1152
-
1153
- send_messages.send(chip, "end", step, index)
1154
-
623
+ if ret_code != 0:
624
+ msg = f'Command failed with code {ret_code}.'
625
+ logfile = f"{step}.log"
626
+ if os.path.exists(logfile):
627
+ if chip.get('option', 'quiet', step=step, index=index):
628
+ # Print last N lines of log when in quiet mode
629
+ with sc_open(logfile) as logfd:
630
+ loglines = logfd.read().splitlines()
631
+ for logline in loglines[-_failed_log_lines:]:
632
+ chip.logger.error(logline)
633
+ # No log file for pure-Python tools.
634
+ msg += f' See log file {os.path.abspath(logfile)}'
635
+ chip.logger.warning(msg)
636
+ chip._error = True
1155
637
 
1156
- def _pre_process(chip, step, index):
1157
- flow = chip.get('option', 'flow')
1158
- tool, task = get_tool_task(chip, step, index, flow)
1159
- func = getattr(chip._get_task_module(step, index, flow=flow), 'pre_process', None)
1160
- if func:
1161
638
  try:
1162
- if _do_record_access():
1163
- chip.schema.add_journaling_type("get")
1164
- func(chip)
1165
- chip.schema.remove_journaling_type("get")
639
+ task_class.post_process()
1166
640
  except Exception as e:
1167
- chip.logger.error(f"Pre-processing failed for '{tool}/{task}'.")
1168
- raise e
1169
- if chip._error:
1170
- chip.logger.error(f"Pre-processing failed for '{tool}/{task}'")
1171
- _haltstep(chip, flow, step, index)
1172
-
1173
-
1174
- def _set_env_vars(chip, step, index):
1175
- org_env = os.environ.copy()
1176
-
1177
- tool, task = get_tool_task(chip, step, index)
1178
-
1179
- if _do_record_access():
1180
- chip.schema.add_journaling_type("get")
1181
- os.environ.update(_get_run_env_vars(chip, tool, task, step, index, include_path=True))
1182
- chip.schema.remove_journaling_type("get")
1183
-
1184
- return org_env
1185
-
1186
-
1187
- def _check_tool_version(chip, step, index, run_func=None):
1188
- '''
1189
- Check exe version
1190
- '''
641
+ chip.logger.error(f"Post-processing failed for '{tool}/{task}'.")
642
+ utils.print_traceback(chip.logger, e)
643
+ chip._error = True
1191
644
 
1192
- flow = chip.get('option', 'flow')
1193
- tool, task = get_tool_task(chip, step, index, flow)
645
+ _finalizenode(chip, step, index, replay)
1194
646
 
1195
- vercheck = not chip.get('option', 'novercheck', step=step, index=index)
1196
- veropt = chip.get('tool', tool, 'vswitch')
1197
- exe = _getexe(chip, tool, step, index)
1198
- version = None
1199
- if exe is not None:
1200
- exe_path, exe_base = os.path.split(exe)
1201
- if veropt:
1202
- cmdlist = [exe]
1203
- cmdlist.extend(veropt)
1204
- proc = subprocess.run(cmdlist,
1205
- stdin=subprocess.DEVNULL,
1206
- stdout=subprocess.PIPE,
1207
- stderr=subprocess.STDOUT,
1208
- universal_newlines=True)
1209
- if proc.returncode != 0:
1210
- chip.logger.warning(f'Version check on {tool} failed with '
1211
- f'code {proc.returncode}')
1212
-
1213
- parse_version = getattr(chip._get_tool_module(step, index, flow=flow),
1214
- 'parse_version',
1215
- None)
1216
- if parse_version is None:
1217
- chip.logger.error(f'{tool}/{task} does not implement parse_version().')
1218
- _haltstep(chip, flow, step, index)
1219
- try:
1220
- version = parse_version(proc.stdout)
1221
- except Exception as e:
1222
- chip.logger.error(f'{tool} failed to parse version string: {proc.stdout}')
1223
- raise e
1224
-
1225
- chip.logger.info(f"Tool '{exe_base}' found with version '{version}' "
1226
- f"in directory '{exe_path}'")
1227
- if vercheck and not _check_version(chip, version, tool, step, index):
1228
- if proc.returncode != 0:
1229
- chip.logger.error(f"Tool '{exe_base}' responded with: {proc.stdout}")
1230
- _haltstep(chip, flow, step, index)
1231
- else:
1232
- chip.logger.info(f"Tool '{exe_base}' found in directory '{exe_path}'")
1233
- elif run_func is None:
1234
- exe_base = chip.get('tool', tool, 'exe')
1235
- chip.logger.error(f'Executable {exe_base} not found')
1236
- _haltstep(chip, flow, step, index)
1237
- return (exe, version)
647
+ send_messages.send(chip, "end", step, index)
1238
648
 
1239
649
 
1240
650
  def _hash_files(chip, step, index, setup=False):
@@ -1283,12 +693,11 @@ def _finalizenode(chip, step, index, replay):
1283
693
  chip.get('option', 'quiet', step=step, index=index) and not
1284
694
  chip.get('option', 'breakpoint', step=step, index=index)
1285
695
  )
1286
- run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1287
696
 
1288
697
  is_skipped = chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED
1289
698
 
1290
699
  if not is_skipped:
1291
- _check_logfile(chip, step, index, quiet, run_func)
700
+ _check_logfile(chip, step, index, quiet, None)
1292
701
 
1293
702
  # Report metrics
1294
703
  for metric in ['errors', 'warnings']:
@@ -1301,23 +710,15 @@ def _finalizenode(chip, step, index, replay):
1301
710
  # Capture wall runtime and cpu cores
1302
711
  end_time = chip.schema.get("record", field='schema').record_time(step, index, RecordTime.END)
1303
712
 
1304
- # calculate total time
1305
- total_times = []
1306
- for check_step, check_index in chip.schema.get("flowgraph", flow, field="schema").get_nodes():
1307
- total_time = chip.get('metric', 'totaltime', step=check_step, index=check_index)
1308
- if total_time is not None:
1309
- total_times.append(total_time)
1310
- if total_times:
1311
- total_time = max(total_times)
1312
- else:
1313
- total_time = 0.0
1314
-
1315
713
  walltime = end_time - chip.schema.get("record", field='schema').get_recorded_time(
1316
714
  step, index, RecordTime.START)
1317
715
  record_metric(chip, step, index, 'tasktime', walltime,
1318
716
  source=None, source_unit='s')
1319
- record_metric(chip, step, index, 'totaltime', total_time + walltime,
1320
- source=None, source_unit='s')
717
+
718
+ chip.schema.get("metric", field='schema').record_totaltime(
719
+ step, index,
720
+ chip.schema.get("flowgraph", flow, field='schema'),
721
+ chip.schema.get("record", field='schema'))
1321
722
  chip.logger.info(f"Finished task in {round(walltime, 2)}s")
1322
723
 
1323
724
  # Save a successful manifest
@@ -1430,12 +831,6 @@ def assert_required_accesses(chip, step, index):
1430
831
  for key in chip.getkeys('tool', tool, 'task', task, 'report'):
1431
832
  exempt.append(('tool', tool, 'task', task, 'report', key))
1432
833
 
1433
- # Get exempted keys from task
1434
- func = getattr(chip._get_task_module(step, index, flow=flow), 'exempt_keys', None)
1435
- if func:
1436
- # No need for try / except since this must work properly
1437
- exempt.extend(func(chip))
1438
-
1439
834
  required = set(
1440
835
  [tuple(key.split(',')) for key in chip.get('tool', tool, 'task', task, 'require',
1441
836
  step=step, index=index)])
@@ -1519,225 +914,6 @@ def _reset_flow_nodes(chip, flow, nodes_to_execute):
1519
914
  clear_node(step, index)
1520
915
 
1521
916
 
1522
- def _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow):
1523
- '''
1524
- For each node to run, prepare a process and store its dependencies
1525
- '''
1526
-
1527
- # Call this in case this was invoked without __main__
1528
- multiprocessing.freeze_support()
1529
-
1530
- # Log queue for logging messages
1531
- log_queue = multiprocessing.Queue(-1)
1532
-
1533
- init_funcs = set()
1534
- for (step, index) in nodes_to_execute(chip, flow):
1535
- node = (step, index)
1536
-
1537
- if chip.get('record', 'status', step=step, index=index) != NodeStatus.PENDING:
1538
- continue
1539
-
1540
- nodes_to_run[node] = _get_pruned_node_inputs(chip, flow, (step, index))
1541
-
1542
- exec_func = _executenode
1543
-
1544
- if chip.get('option', 'scheduler', 'name', step=step, index=index) == 'slurm':
1545
- # Defer job to compute node
1546
- # If the job is configured to run on a cluster, collect the schema
1547
- # and send it to a compute node for deferred execution.
1548
- init_funcs.add(slurm.init)
1549
- exec_func = slurm._defernode
1550
- elif chip.get('option', 'scheduler', 'name', step=step, index=index) == 'docker':
1551
- # Run job in docker
1552
- init_funcs.add(docker_runner.init)
1553
- exec_func = docker_runner.run
1554
- local_processes.append((step, index))
1555
- else:
1556
- local_processes.append((step, index))
1557
-
1558
- process = {
1559
- "child_pipe": None,
1560
- "parent_pipe": None,
1561
- "proc": None
1562
- }
1563
- process["parent_pipe"], process["child_pipe"] = multiprocessing.Pipe()
1564
- process["proc"] = multiprocessing.Process(
1565
- target=_runtask,
1566
- args=(chip, flow, step, index, exec_func),
1567
- kwargs={"pipe": process["child_pipe"],
1568
- "queue": log_queue})
1569
-
1570
- processes[node] = process
1571
-
1572
- for init_func in init_funcs:
1573
- init_func(chip)
1574
-
1575
- return log_queue
1576
-
1577
-
1578
- def _check_node_dependencies(chip, node, deps, deps_was_successful):
1579
- had_deps = len(deps) > 0
1580
- step, index = node
1581
- tool, _ = get_tool_task(chip, step, index)
1582
-
1583
- # Clear any nodes that have finished from dependency list.
1584
- for in_step, in_index in list(deps):
1585
- in_status = chip.get('record', 'status', step=in_step, index=in_index)
1586
- if NodeStatus.is_done(in_status):
1587
- deps.remove((in_step, in_index))
1588
- if in_status == NodeStatus.SUCCESS:
1589
- deps_was_successful[node] = True
1590
- if NodeStatus.is_error(in_status):
1591
- # Fail if any dependency failed for non-builtin task
1592
- if tool != 'builtin':
1593
- deps.clear()
1594
- chip.schema.get("record", field='schema').set('status', NodeStatus.ERROR,
1595
- step=step, index=index)
1596
- return
1597
-
1598
- # Fail if no dependency successfully finished for builtin task
1599
- if had_deps and len(deps) == 0 \
1600
- and tool == 'builtin' and not deps_was_successful.get(node):
1601
- chip.schema.get("record", field='schema').set('status', NodeStatus.ERROR,
1602
- step=step, index=index)
1603
-
1604
-
1605
- def _launch_nodes(chip, nodes_to_run, processes, local_processes):
1606
- running_nodes = {}
1607
- max_parallel_run = chip.get('option', 'scheduler', 'maxnodes')
1608
- max_cores = utils.get_cores(chip)
1609
- max_threads = utils.get_cores(chip)
1610
- if not max_parallel_run:
1611
- max_parallel_run = utils.get_cores(chip)
1612
-
1613
- # clip max parallel jobs to 1 <= jobs <= max_cores
1614
- max_parallel_run = max(1, min(max_parallel_run, max_cores))
1615
-
1616
- def allow_start(node):
1617
- if node not in local_processes:
1618
- # using a different scheduler, so allow
1619
- return True, 0
1620
-
1621
- if len(running_nodes) >= max_parallel_run:
1622
- return False, 0
1623
-
1624
- # Record thread count requested
1625
- step, index = node
1626
- tool, task = get_tool_task(chip, step, index)
1627
- requested_threads = chip.get('tool', tool, 'task', task, 'threads',
1628
- step=step, index=index)
1629
- if not requested_threads:
1630
- # not specified, marking it max to be safe
1631
- requested_threads = max_threads
1632
- # clamp to max_parallel to avoid getting locked up
1633
- requested_threads = max(1, min(requested_threads, max_threads))
1634
-
1635
- if requested_threads + sum(running_nodes.values()) > max_cores:
1636
- # delay until there are enough core available
1637
- return False, 0
1638
-
1639
- # allow and record how many threads to associate
1640
- return True, requested_threads
1641
-
1642
- deps_was_successful = {}
1643
-
1644
- if _get_callback('pre_run'):
1645
- _get_callback('pre_run')(chip)
1646
-
1647
- start_times = {None: time.time()}
1648
-
1649
- while len(nodes_to_run) > 0 or len(running_nodes) > 0:
1650
- changed = _process_completed_nodes(chip, processes, running_nodes)
1651
-
1652
- # Check for new nodes that can be launched.
1653
- for node, deps in list(nodes_to_run.items()):
1654
- # TODO: breakpoint logic:
1655
- # if node is breakpoint, then don't launch while len(running_nodes) > 0
1656
-
1657
- _check_node_dependencies(chip, node, deps, deps_was_successful)
1658
-
1659
- if chip.get('record', 'status', step=node[0], index=node[1]) == NodeStatus.ERROR:
1660
- del nodes_to_run[node]
1661
- continue
1662
-
1663
- # If there are no dependencies left, launch this node and
1664
- # remove from nodes_to_run.
1665
- if len(deps) == 0:
1666
- dostart, requested_threads = allow_start(node)
1667
-
1668
- if dostart:
1669
- if _get_callback('pre_node'):
1670
- _get_callback('pre_node')(chip, *node)
1671
-
1672
- chip.schema.get("record", field='schema').set('status', NodeStatus.RUNNING,
1673
- step=node[0], index=node[1])
1674
- start_times[node] = time.time()
1675
- changed = True
1676
-
1677
- processes[node]["proc"].start()
1678
- del nodes_to_run[node]
1679
- running_nodes[node] = requested_threads
1680
-
1681
- # Check for situation where we have stuff left to run but don't
1682
- # have any nodes running. This shouldn't happen, but we will get
1683
- # stuck in an infinite loop if it does, so we want to break out
1684
- # with an explicit error.
1685
- if len(nodes_to_run) > 0 and len(running_nodes) == 0:
1686
- raise SiliconCompilerError(
1687
- 'Nodes left to run, but no running nodes. From/to may be invalid.', chip=chip)
1688
-
1689
- if chip._dash and changed:
1690
- # Update dashboard if the manifest changed
1691
- chip._dash.update_manifest(payload={"starttimes": start_times})
1692
-
1693
- if len(running_nodes) == 1:
1694
- # if there is only one node running, just join the thread
1695
- running_node = list(running_nodes.keys())[0]
1696
- processes[running_node]["proc"].join()
1697
- elif len(running_nodes) > 1:
1698
- # if there are more than 1, join the first with a timeout
1699
- running_node = list(running_nodes.keys())[0]
1700
- processes[running_node]["proc"].join(timeout=0.1)
1701
-
1702
-
1703
- def _process_completed_nodes(chip, processes, running_nodes):
1704
- changed = False
1705
- for node in list(running_nodes.keys()):
1706
- if not processes[node]["proc"].is_alive():
1707
- step, index = node
1708
- manifest = os.path.join(chip.getworkdir(step=step, index=index),
1709
- 'outputs',
1710
- f'{chip.design}.pkg.json')
1711
- chip.logger.debug(f'{step}{index} is complete merging: {manifest}')
1712
- if os.path.exists(manifest):
1713
- JournalingSchema(chip.schema).read_journal(manifest)
1714
-
1715
- if processes[node]["parent_pipe"] and processes[node]["parent_pipe"].poll(1):
1716
- try:
1717
- packages = processes[node]["parent_pipe"].recv()
1718
- if isinstance(packages, dict):
1719
- chip._packages.update(packages)
1720
- except: # noqa E722
1721
- pass
1722
-
1723
- del running_nodes[node]
1724
- if processes[node]["proc"].exitcode > 0:
1725
- status = NodeStatus.ERROR
1726
- else:
1727
- status = chip.get('record', 'status', step=step, index=index)
1728
- if not status or status == NodeStatus.PENDING:
1729
- status = NodeStatus.ERROR
1730
-
1731
- chip.schema.get("record", field='schema').set('status', status, step=step, index=index)
1732
-
1733
- changed = True
1734
-
1735
- if _get_callback('post_node'):
1736
- _get_callback('post_node')(chip, *node)
1737
-
1738
- return changed
1739
-
1740
-
1741
917
  def _check_nodes_status(chip, flow):
1742
918
  flowgraph = chip.schema.get("flowgraph", flow, field="schema")
1743
919
  runtime = RuntimeFlowgraph(
@@ -1762,28 +938,6 @@ def _check_nodes_status(chip, flow):
1762
938
  f'These final steps could not be reached: {",".join(sorted(unreached))}', chip=chip)
1763
939
 
1764
940
 
1765
- def print_traceback(chip, exception):
1766
- chip.logger.error(f'{exception}')
1767
- trace = StringIO()
1768
- traceback.print_tb(exception.__traceback__, file=trace)
1769
- chip.logger.error("Backtrace:")
1770
- for line in trace.getvalue().splitlines():
1771
- chip.logger.error(line)
1772
-
1773
-
1774
- def kill_process(chip, proc, tool, poll_interval, msg=""):
1775
- TERMINATE_TIMEOUT = 5
1776
- interrupt_time = time.time()
1777
- chip.logger.info(f'{msg}Waiting for {tool} to exit...')
1778
- while proc.poll() is None and \
1779
- (time.time() - interrupt_time) < TERMINATE_TIMEOUT:
1780
- time.sleep(5 * poll_interval)
1781
- if proc.poll() is None:
1782
- chip.logger.warning(f'{tool} did not exit within {TERMINATE_TIMEOUT} '
1783
- 'seconds. Terminating...')
1784
- utils.terminate_process(proc.pid)
1785
-
1786
-
1787
941
  def get_check_node_keys(chip, step, index):
1788
942
  tool, task = get_tool_task(chip, step, index)
1789
943
 
@@ -2065,6 +1219,9 @@ def copy_old_run_dir(chip, org_jobname):
2065
1219
 
2066
1220
  # Modify manifests to correct jobname
2067
1221
  for step, index in copy_nodes:
1222
+ tool, _ = get_tool_task(chip, step, index)
1223
+ task_class = chip.get("tool", tool, field="schema")
1224
+
2068
1225
  # rewrite replay files
2069
1226
  replay_file = f'{chip.getworkdir(step=step, index=index)}/replay.sh'
2070
1227
  if os.path.exists(replay_file):
@@ -2072,8 +1229,9 @@ def copy_old_run_dir(chip, org_jobname):
2072
1229
  os.remove(replay_file)
2073
1230
  chip.set('arg', 'step', step)
2074
1231
  chip.set('arg', 'index', index)
2075
- tool, task = get_tool_task(chip, step, index)
2076
- _makecmd(chip, tool, task, step, index, script_name=replay_file)
1232
+ task_class.set_runtime(chip, step=step, index=index)
1233
+ task_class.generate_replay_script(replay_file, chip.getworkdir(step=step, index=index))
1234
+ task_class.set_runtime(None)
2077
1235
  chip.unset('arg', 'step')
2078
1236
  chip.unset('arg', 'index')
2079
1237
 
@@ -2110,8 +1268,13 @@ def clean_build_dir(chip):
2110
1268
  return
2111
1269
 
2112
1270
  if chip.get('option', 'from'):
1271
+ runtime = RuntimeFlowgraph(
1272
+ chip.schema.get("flowgraph", chip.get('option', 'flow'), field='schema'),
1273
+ from_steps=chip.get('option', 'from'),
1274
+ to_steps=chip.get('option', 'to'),
1275
+ prune_nodes=chip.get('option', 'prune'))
2113
1276
  # Remove stale outputs that will be rerun
2114
- for step, index in nodes_to_execute(chip):
1277
+ for step, index in runtime.get_nodes():
2115
1278
  clean_node_dir(chip, step, index)
2116
1279
 
2117
1280
  all_nodes = set(chip.schema.get("flowgraph", chip.get('option', 'flow'),