siliconcompiler 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (177) hide show
  1. siliconcompiler/_common.py +5 -0
  2. siliconcompiler/_metadata.py +1 -1
  3. siliconcompiler/apps/sc_install.py +7 -0
  4. siliconcompiler/apps/sc_remote.py +7 -2
  5. siliconcompiler/apps/utils/replay.py +5 -5
  6. siliconcompiler/core.py +38 -12
  7. siliconcompiler/data/templates/replay/replay.sh.j2 +18 -1
  8. siliconcompiler/metric.py +78 -0
  9. siliconcompiler/package/git.py +1 -1
  10. siliconcompiler/record.py +63 -7
  11. siliconcompiler/remote/client.py +57 -14
  12. siliconcompiler/remote/server.py +110 -60
  13. siliconcompiler/report/dashboard/cli/__init__.py +2 -0
  14. siliconcompiler/report/dashboard/cli/board.py +34 -31
  15. siliconcompiler/report/report.py +10 -5
  16. siliconcompiler/report/utils.py +12 -6
  17. siliconcompiler/scheduler/__init__.py +146 -976
  18. siliconcompiler/scheduler/run_node.py +12 -5
  19. siliconcompiler/scheduler/send_messages.py +9 -3
  20. siliconcompiler/scheduler/slurm.py +10 -43
  21. siliconcompiler/scheduler/taskscheduler.py +320 -0
  22. siliconcompiler/schema/baseschema.py +25 -4
  23. siliconcompiler/schema/journalingschema.py +4 -0
  24. siliconcompiler/schema/schema_cfg.py +3 -3
  25. siliconcompiler/tool.py +201 -32
  26. siliconcompiler/tools/_common/__init__.py +14 -11
  27. siliconcompiler/tools/_common/asic.py +5 -5
  28. siliconcompiler/tools/bluespec/convert.py +2 -1
  29. siliconcompiler/tools/builtin/_common.py +9 -2
  30. siliconcompiler/tools/builtin/concatenate.py +6 -2
  31. siliconcompiler/tools/builtin/minimum.py +7 -2
  32. siliconcompiler/tools/builtin/mux.py +7 -2
  33. siliconcompiler/tools/builtin/nop.py +7 -2
  34. siliconcompiler/tools/builtin/verify.py +7 -3
  35. siliconcompiler/tools/chisel/convert.py +10 -10
  36. siliconcompiler/tools/klayout/drc.py +2 -2
  37. siliconcompiler/tools/klayout/klayout_show.py +6 -6
  38. siliconcompiler/tools/klayout/klayout_utils.py +12 -12
  39. siliconcompiler/tools/netgen/count_lvs.py +2 -2
  40. siliconcompiler/tools/netgen/lvs.py +1 -1
  41. siliconcompiler/tools/openroad/_apr.py +2 -2
  42. siliconcompiler/tools/openroad/scripts/apr/sc_init_floorplan.tcl +1 -7
  43. siliconcompiler/tools/openroad/scripts/common/procs.tcl +18 -0
  44. siliconcompiler/tools/openroad/scripts/common/read_input_files.tcl +1 -7
  45. siliconcompiler/tools/opensta/scripts/sc_timing.tcl +10 -0
  46. siliconcompiler/tools/opensta/timing.py +11 -0
  47. siliconcompiler/tools/slang/__init__.py +6 -5
  48. siliconcompiler/tools/slang/elaborate.py +6 -6
  49. siliconcompiler/tools/slang/lint.py +1 -3
  50. siliconcompiler/tools/vpr/_xml_constraint.py +8 -8
  51. siliconcompiler/tools/yosys/prepareLib.py +2 -2
  52. siliconcompiler/tools/yosys/sc_synth_asic.tcl +43 -5
  53. siliconcompiler/tools/yosys/screenshot.py +1 -1
  54. siliconcompiler/tools/yosys/syn_asic.py +5 -5
  55. siliconcompiler/toolscripts/_tools.json +17 -10
  56. siliconcompiler/toolscripts/rhel8/install-chisel.sh +9 -2
  57. siliconcompiler/toolscripts/rhel8/install-icarus.sh +10 -3
  58. siliconcompiler/toolscripts/rhel8/install-klayout.sh +8 -1
  59. siliconcompiler/toolscripts/rhel8/install-magic.sh +9 -2
  60. siliconcompiler/toolscripts/rhel8/install-montage.sh +1 -1
  61. siliconcompiler/toolscripts/rhel8/install-netgen.sh +9 -2
  62. siliconcompiler/toolscripts/rhel8/install-slang.sh +11 -4
  63. siliconcompiler/toolscripts/rhel8/install-surelog.sh +9 -2
  64. siliconcompiler/toolscripts/rhel8/install-sv2v.sh +11 -4
  65. siliconcompiler/toolscripts/rhel8/install-verible.sh +11 -3
  66. siliconcompiler/toolscripts/rhel8/install-verilator.sh +10 -3
  67. siliconcompiler/toolscripts/rhel8/install-xyce.sh +15 -10
  68. siliconcompiler/toolscripts/rhel9/install-chisel.sh +9 -2
  69. siliconcompiler/toolscripts/rhel9/install-ghdl.sh +9 -2
  70. siliconcompiler/toolscripts/rhel9/install-gtkwave.sh +10 -3
  71. siliconcompiler/toolscripts/rhel9/install-icarus.sh +10 -3
  72. siliconcompiler/toolscripts/rhel9/install-klayout.sh +8 -1
  73. siliconcompiler/toolscripts/rhel9/install-magic.sh +9 -2
  74. siliconcompiler/toolscripts/rhel9/install-montage.sh +1 -1
  75. siliconcompiler/toolscripts/rhel9/install-netgen.sh +9 -2
  76. siliconcompiler/toolscripts/rhel9/install-openroad.sh +16 -3
  77. siliconcompiler/toolscripts/rhel9/install-opensta.sh +17 -5
  78. siliconcompiler/toolscripts/rhel9/install-slang.sh +11 -4
  79. siliconcompiler/toolscripts/rhel9/install-surelog.sh +9 -2
  80. siliconcompiler/toolscripts/rhel9/install-sv2v.sh +11 -4
  81. siliconcompiler/toolscripts/rhel9/install-verible.sh +11 -3
  82. siliconcompiler/toolscripts/rhel9/install-verilator.sh +10 -3
  83. siliconcompiler/toolscripts/rhel9/install-vpr.sh +9 -2
  84. siliconcompiler/toolscripts/rhel9/install-xdm.sh +10 -2
  85. siliconcompiler/toolscripts/rhel9/install-xyce.sh +15 -10
  86. siliconcompiler/toolscripts/rhel9/install-yosys-moosic.sh +9 -2
  87. siliconcompiler/toolscripts/rhel9/install-yosys-parmys.sh +10 -3
  88. siliconcompiler/toolscripts/rhel9/install-yosys-slang.sh +10 -2
  89. siliconcompiler/toolscripts/rhel9/install-yosys.sh +9 -2
  90. siliconcompiler/toolscripts/ubuntu20/install-bambu.sh +10 -2
  91. siliconcompiler/toolscripts/ubuntu20/install-bluespec.sh +10 -3
  92. siliconcompiler/toolscripts/ubuntu20/install-chisel.sh +9 -2
  93. siliconcompiler/toolscripts/ubuntu20/install-ghdl.sh +9 -2
  94. siliconcompiler/toolscripts/ubuntu20/install-gtkwave.sh +9 -2
  95. siliconcompiler/toolscripts/ubuntu20/install-icarus.sh +9 -2
  96. siliconcompiler/toolscripts/ubuntu20/install-icepack.sh +9 -2
  97. siliconcompiler/toolscripts/ubuntu20/install-klayout.sh +8 -1
  98. siliconcompiler/toolscripts/ubuntu20/install-magic.sh +9 -2
  99. siliconcompiler/toolscripts/ubuntu20/install-montage.sh +1 -1
  100. siliconcompiler/toolscripts/ubuntu20/install-netgen.sh +9 -2
  101. siliconcompiler/toolscripts/ubuntu20/install-nextpnr.sh +9 -2
  102. siliconcompiler/toolscripts/ubuntu20/install-openroad.sh +16 -3
  103. siliconcompiler/toolscripts/ubuntu20/install-opensta.sh +16 -5
  104. siliconcompiler/toolscripts/ubuntu20/install-slang.sh +11 -4
  105. siliconcompiler/toolscripts/ubuntu20/install-slurm.sh +9 -2
  106. siliconcompiler/toolscripts/ubuntu20/install-surelog.sh +10 -2
  107. siliconcompiler/toolscripts/ubuntu20/install-sv2v.sh +11 -4
  108. siliconcompiler/toolscripts/ubuntu20/install-verible.sh +11 -3
  109. siliconcompiler/toolscripts/ubuntu20/install-verilator.sh +9 -2
  110. siliconcompiler/toolscripts/ubuntu20/install-xdm.sh +10 -2
  111. siliconcompiler/toolscripts/ubuntu20/install-xyce.sh +13 -8
  112. siliconcompiler/toolscripts/ubuntu20/install-yosys-moosic.sh +9 -2
  113. siliconcompiler/toolscripts/ubuntu20/install-yosys.sh +9 -2
  114. siliconcompiler/toolscripts/ubuntu22/install-bambu.sh +10 -2
  115. siliconcompiler/toolscripts/ubuntu22/install-bluespec.sh +10 -3
  116. siliconcompiler/toolscripts/ubuntu22/install-chisel.sh +9 -2
  117. siliconcompiler/toolscripts/ubuntu22/install-ghdl.sh +9 -2
  118. siliconcompiler/toolscripts/ubuntu22/install-gtkwave.sh +9 -2
  119. siliconcompiler/toolscripts/ubuntu22/install-icarus.sh +9 -2
  120. siliconcompiler/toolscripts/ubuntu22/install-icepack.sh +9 -2
  121. siliconcompiler/toolscripts/ubuntu22/install-klayout.sh +12 -1
  122. siliconcompiler/toolscripts/ubuntu22/install-magic.sh +9 -2
  123. siliconcompiler/toolscripts/ubuntu22/install-montage.sh +1 -1
  124. siliconcompiler/toolscripts/ubuntu22/install-netgen.sh +9 -2
  125. siliconcompiler/toolscripts/ubuntu22/install-nextpnr.sh +9 -2
  126. siliconcompiler/toolscripts/ubuntu22/install-openroad.sh +16 -3
  127. siliconcompiler/toolscripts/ubuntu22/install-opensta.sh +17 -5
  128. siliconcompiler/toolscripts/ubuntu22/install-slang.sh +11 -4
  129. siliconcompiler/toolscripts/ubuntu22/install-slurm.sh +9 -2
  130. siliconcompiler/toolscripts/ubuntu22/install-surelog.sh +10 -2
  131. siliconcompiler/toolscripts/ubuntu22/install-sv2v.sh +11 -4
  132. siliconcompiler/toolscripts/ubuntu22/install-verible.sh +11 -3
  133. siliconcompiler/toolscripts/ubuntu22/install-verilator.sh +9 -2
  134. siliconcompiler/toolscripts/ubuntu22/install-vpr.sh +9 -2
  135. siliconcompiler/toolscripts/ubuntu22/install-xdm.sh +10 -2
  136. siliconcompiler/toolscripts/ubuntu22/install-xyce.sh +13 -8
  137. siliconcompiler/toolscripts/ubuntu22/install-yosys-moosic.sh +9 -2
  138. siliconcompiler/toolscripts/ubuntu22/install-yosys-parmys.sh +10 -3
  139. siliconcompiler/toolscripts/ubuntu22/install-yosys-slang.sh +10 -2
  140. siliconcompiler/toolscripts/ubuntu22/install-yosys.sh +9 -2
  141. siliconcompiler/toolscripts/ubuntu24/install-bambu.sh +12 -4
  142. siliconcompiler/toolscripts/ubuntu24/install-bluespec.sh +10 -3
  143. siliconcompiler/toolscripts/ubuntu24/install-chisel.sh +9 -2
  144. siliconcompiler/toolscripts/ubuntu24/install-ghdl.sh +9 -2
  145. siliconcompiler/toolscripts/ubuntu24/install-gtkwave.sh +9 -2
  146. siliconcompiler/toolscripts/ubuntu24/install-icarus.sh +9 -2
  147. siliconcompiler/toolscripts/ubuntu24/install-icepack.sh +9 -2
  148. siliconcompiler/toolscripts/ubuntu24/install-klayout.sh +12 -1
  149. siliconcompiler/toolscripts/ubuntu24/install-magic.sh +9 -2
  150. siliconcompiler/toolscripts/ubuntu24/install-montage.sh +1 -1
  151. siliconcompiler/toolscripts/ubuntu24/install-netgen.sh +9 -2
  152. siliconcompiler/toolscripts/ubuntu24/install-nextpnr.sh +9 -2
  153. siliconcompiler/toolscripts/ubuntu24/install-openroad.sh +16 -3
  154. siliconcompiler/toolscripts/ubuntu24/install-opensta.sh +17 -5
  155. siliconcompiler/toolscripts/ubuntu24/install-slang.sh +11 -4
  156. siliconcompiler/toolscripts/ubuntu24/install-slurm.sh +9 -2
  157. siliconcompiler/toolscripts/ubuntu24/install-surelog.sh +10 -2
  158. siliconcompiler/toolscripts/ubuntu24/install-sv2v.sh +11 -4
  159. siliconcompiler/toolscripts/ubuntu24/install-verible.sh +11 -3
  160. siliconcompiler/toolscripts/ubuntu24/install-verilator.sh +9 -2
  161. siliconcompiler/toolscripts/ubuntu24/install-vpr.sh +9 -2
  162. siliconcompiler/toolscripts/ubuntu24/install-xdm.sh +10 -2
  163. siliconcompiler/toolscripts/ubuntu24/install-xyce.sh +13 -8
  164. siliconcompiler/toolscripts/ubuntu24/install-yosys-moosic.sh +9 -2
  165. siliconcompiler/toolscripts/ubuntu24/install-yosys-parmys.sh +10 -3
  166. siliconcompiler/toolscripts/ubuntu24/install-yosys-slang.sh +10 -2
  167. siliconcompiler/toolscripts/ubuntu24/install-yosys.sh +9 -2
  168. siliconcompiler/utils/__init__.py +11 -0
  169. siliconcompiler/utils/flowgraph.py +6 -101
  170. siliconcompiler/utils/issue.py +15 -23
  171. siliconcompiler/utils/logging.py +2 -2
  172. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/METADATA +6 -5
  173. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/RECORD +177 -176
  174. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/WHEEL +1 -1
  175. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/entry_points.txt +0 -0
  176. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/licenses/LICENSE +0 -0
  177. {siliconcompiler-0.33.0.dist-info → siliconcompiler-0.33.2.dist-info}/top_level.txt +0 -0
@@ -1,70 +1,28 @@
1
- import contextlib
2
- import multiprocessing
3
1
  import logging
4
2
  import os
5
- import psutil
6
3
  import re
7
- import shlex
8
4
  import shutil
9
- import subprocess
10
5
  import sys
11
- import time
12
- import packaging.version
13
- import packaging.specifiers
14
- from io import StringIO
15
- import traceback
16
- from logging.handlers import QueueHandler, QueueListener
6
+ from logging.handlers import QueueHandler
17
7
  from siliconcompiler import sc_open
18
8
  from siliconcompiler import utils
19
9
  from siliconcompiler.remote import Client
20
10
  from siliconcompiler import Schema
21
11
  from siliconcompiler.schema import JournalingSchema
22
12
  from siliconcompiler.record import RecordTime, RecordTool
23
- from siliconcompiler.scheduler import slurm
24
- from siliconcompiler.scheduler import docker_runner
25
13
  from siliconcompiler import NodeStatus, SiliconCompilerError
26
- from siliconcompiler.utils.flowgraph import _get_flowgraph_execution_order, \
27
- _get_pruned_node_inputs, \
28
- get_nodes_from, nodes_to_execute, _check_flowgraph
29
- from siliconcompiler.utils.logging import SCBlankLoggerFormatter
30
14
  from siliconcompiler.tools._common import input_file_node_name
31
15
  import lambdapdk
32
16
  from siliconcompiler.tools._common import get_tool_task, record_metric
33
17
  from siliconcompiler.scheduler import send_messages
34
18
  from siliconcompiler.flowgraph import RuntimeFlowgraph
35
-
36
- try:
37
- import resource
38
- except ModuleNotFoundError:
39
- resource = None
40
-
41
-
42
- # callback hooks to help custom runners track progress
43
- _callback_funcs = {}
44
-
45
-
46
- def register_callback(hook, func):
47
- _callback_funcs[hook] = func
48
-
49
-
50
- def _get_callback(hook):
51
- if hook in _callback_funcs:
52
- return _callback_funcs[hook]
53
- return None
19
+ from siliconcompiler.scheduler.taskscheduler import TaskScheduler
54
20
 
55
21
 
56
22
  # Max lines to print from failed node log
57
23
  _failed_log_lines = 20
58
24
 
59
25
 
60
- #######################################
61
- def _do_record_access():
62
- '''
63
- Determine if Schema should record calls to .get
64
- '''
65
- return False
66
-
67
-
68
26
  ###############################################################################
69
27
  class SiliconCompilerTimeout(Exception):
70
28
  ''' Minimal Exception wrapper used to raise sc timeout errors.
@@ -97,14 +55,30 @@ def run(chip):
97
55
 
98
56
  # Check if flowgraph is complete and valid
99
57
  flow = chip.get('option', 'flow')
100
- if not _check_flowgraph(chip, flow=flow):
58
+ if not chip.schema.get("flowgraph", flow, field="schema").validate(logger=chip.logger):
59
+ raise SiliconCompilerError(
60
+ f"{flow} flowgraph contains errors and cannot be run.",
61
+ chip=chip)
62
+ if not RuntimeFlowgraph.validate(
63
+ chip.schema.get("flowgraph", flow, field="schema"),
64
+ from_steps=chip.get('option', 'from'),
65
+ to_steps=chip.get('option', 'to'),
66
+ prune_nodes=chip.get('option', 'prune'),
67
+ logger=chip.logger):
101
68
  raise SiliconCompilerError(
102
69
  f"{flow} flowgraph contains errors and cannot be run.",
103
70
  chip=chip)
104
71
 
105
72
  copy_old_run_dir(chip, org_jobname)
106
73
  clean_build_dir(chip)
107
- _reset_flow_nodes(chip, flow, nodes_to_execute(chip, flow))
74
+
75
+ runtime = RuntimeFlowgraph(
76
+ chip.schema.get("flowgraph", flow, field='schema'),
77
+ from_steps=chip.get('option', 'from'),
78
+ to_steps=chip.get('option', 'to'),
79
+ prune_nodes=chip.get('option', 'prune'))
80
+
81
+ _reset_flow_nodes(chip, flow, runtime.get_nodes())
108
82
  chip.schema.get("record", field='schema').record_python_packages()
109
83
 
110
84
  if chip.get('option', 'remote'):
@@ -178,6 +152,9 @@ def _local_process(chip, flow):
178
152
  from_nodes = []
179
153
  extra_setup_nodes = {}
180
154
 
155
+ chip.schema = JournalingSchema(chip.schema)
156
+ chip.schema.start_journal()
157
+
181
158
  if chip.get('option', 'clean') or not chip.get('option', 'from'):
182
159
  load_nodes = list(chip.schema.get("flowgraph", flow, field="schema").get_nodes())
183
160
  else:
@@ -191,7 +168,7 @@ def _local_process(chip, flow):
191
168
  prune_nodes=chip.get('option', 'prune'))
192
169
  load_nodes = list(runtime.get_nodes())
193
170
 
194
- for node_level in _get_flowgraph_execution_order(chip, flow):
171
+ for node_level in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
195
172
  for step, index in node_level:
196
173
  if (step, index) not in load_nodes:
197
174
  continue
@@ -210,10 +187,16 @@ def _local_process(chip, flow):
210
187
  except Exception:
211
188
  pass
212
189
 
190
+ runtimeflow = RuntimeFlowgraph(
191
+ chip.schema.get("flowgraph", flow, field="schema"),
192
+ from_steps=chip.get('option', 'from'),
193
+ to_steps=chip.get('option', 'to'),
194
+ prune_nodes=chip.get('option', 'prune'))
195
+
213
196
  # Setup tools for all nodes to run.
214
- nodes = list(nodes_to_execute(chip, flow))
197
+ nodes = list(runtimeflow.get_nodes())
215
198
  all_setup_nodes = nodes + load_nodes + list(extra_setup_nodes.keys())
216
- for layer_nodes in _get_flowgraph_execution_order(chip, flow):
199
+ for layer_nodes in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
217
200
  for step, index in layer_nodes:
218
201
  if (step, index) in all_setup_nodes:
219
202
  node_kept = _setup_node(chip, step, index)
@@ -233,7 +216,7 @@ def _local_process(chip, flow):
233
216
  def mark_pending(step, index):
234
217
  chip.schema.get("record", field='schema').set('status', NodeStatus.PENDING,
235
218
  step=step, index=index)
236
- for next_step, next_index in get_nodes_from(chip, flow, [(step, index)]):
219
+ for next_step, next_index in runtimeflow.get_nodes_starting_at(step, index):
237
220
  if chip.get('record', 'status', step=next_step, index=next_index) == \
238
221
  NodeStatus.SKIPPED:
239
222
  continue
@@ -243,13 +226,7 @@ def _local_process(chip, flow):
243
226
  step=next_step, index=next_index)
244
227
 
245
228
  # Check if nodes have been modified from previous data
246
- runtimeflow = RuntimeFlowgraph(
247
- chip.schema.get("flowgraph", flow, field="schema"),
248
- from_steps=chip.get('option', 'from'),
249
- to_steps=chip.get('option', 'to'),
250
- prune_nodes=chip.get('option', 'prune'))
251
-
252
- for layer_nodes in _get_flowgraph_execution_order(chip, flow):
229
+ for layer_nodes in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
253
230
  for step, index in layer_nodes:
254
231
  # Only look at successful nodes
255
232
  if chip.get('record', 'status', step=step, index=index) not in \
@@ -262,8 +239,7 @@ def _local_process(chip, flow):
262
239
  mark_pending(step, index)
263
240
  elif (step, index) in extra_setup_nodes:
264
241
  # import old information
265
- JournalingSchema(chip.schema).import_journal(
266
- schema=extra_setup_nodes[(step, index)])
242
+ chip.schema.import_journal(schema=extra_setup_nodes[(step, index)])
267
243
 
268
244
  # Ensure pending nodes cause following nodes to be run
269
245
  for step, index in nodes:
@@ -276,6 +252,10 @@ def _local_process(chip, flow):
276
252
  if chip.get('record', 'status', step=step, index=index) == NodeStatus.PENDING:
277
253
  clean_node_dir(chip, step, index)
278
254
 
255
+ chip.write_manifest(os.path.join(chip.getworkdir(), f"{chip.get('design')}.pkg.json"))
256
+ chip.schema.stop_journal()
257
+ chip.schema = chip.schema.get_base_schema()
258
+
279
259
  # Check validity of setup
280
260
  chip.logger.info("Checking manifest before running.")
281
261
  check_ok = chip.check_manifest()
@@ -289,40 +269,11 @@ def _local_process(chip, flow):
289
269
  'Implementation errors encountered. See previous errors.',
290
270
  chip=chip)
291
271
 
292
- nodes_to_run = {}
293
- processes = {}
294
- local_processes = []
295
- log_queue = _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow)
296
-
297
- # Handle logs across threads
298
- log_listener = QueueListener(log_queue, chip.logger._console)
299
- chip.logger._console.setFormatter(SCBlankLoggerFormatter())
300
- log_listener.start()
301
-
302
- # Update dashboard before run begins
303
- if chip._dash:
304
- chip._dash.update_manifest()
305
-
306
- try:
307
- _launch_nodes(chip, nodes_to_run, processes, local_processes)
308
- except KeyboardInterrupt:
309
- # exit immediately
310
- log_listener.stop()
311
- sys.exit(0)
312
-
313
- if _get_callback('post_run'):
314
- _get_callback('post_run')(chip)
272
+ task_scheduler = TaskScheduler(chip)
273
+ task_scheduler.run()
315
274
 
316
275
  _check_nodes_status(chip, flow)
317
276
 
318
- # Cleanup logger
319
- log_listener.stop()
320
- chip._init_logger_formats()
321
-
322
-
323
- def __is_posix():
324
- return sys.platform != 'win32'
325
-
326
277
 
327
278
  ###########################################################################
328
279
  def _setup_node(chip, step, index, flow=None):
@@ -337,21 +288,19 @@ def _setup_node(chip, step, index, flow=None):
337
288
  chip.set('arg', 'index', index)
338
289
  tool, task = get_tool_task(chip, step, index, flow=flow)
339
290
 
291
+ task_class = chip.get("tool", tool, field="schema")
292
+ task_class.set_runtime(chip)
293
+
340
294
  # Run node setup.
295
+ chip.logger.info(f'Setting up node {step}{index} with {tool}/{task}')
341
296
  setup_ret = None
342
297
  try:
343
- setup_step = getattr(chip._get_task_module(step, index), 'setup', None)
344
- except SiliconCompilerError:
345
- setup_step = None
346
- if setup_step:
347
- try:
348
- chip.logger.info(f'Setting up node {step}{index} with {tool}/{task}')
349
- setup_ret = setup_step(chip)
350
- except Exception as e:
351
- chip.logger.error(f'Failed to run setup() for {tool}/{task}')
352
- raise e
353
- else:
354
- raise SiliconCompilerError(f'setup() not found for tool {tool}, task {task}', chip=chip)
298
+ setup_ret = task_class.setup()
299
+ except Exception as e:
300
+ chip.logger.error(f'Failed to run setup() for {tool}/{task}')
301
+ raise e
302
+
303
+ task_class.set_runtime(None)
355
304
 
356
305
  # Need to restore step/index, otherwise we will skip setting up other indices.
357
306
  chip.set('option', 'flow', preset_flow)
@@ -368,86 +317,6 @@ def _setup_node(chip, step, index, flow=None):
368
317
  return True
369
318
 
370
319
 
371
- def _check_version(chip, reported_version, tool, step, index):
372
- # Based on regex for deprecated "legacy specifier" from PyPA packaging
373
- # library. Use this to parse PEP-440ish specifiers with arbitrary
374
- # versions.
375
- _regex_str = r"""
376
- (?P<operator>(==|!=|<=|>=|<|>|~=))
377
- \s*
378
- (?P<version>
379
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
380
- # string can be just about anything, we match everything
381
- # except for whitespace, a semi-colon for marker support,
382
- # a closing paren since versions can be enclosed in
383
- # them, and a comma since it's a version separator.
384
- )
385
- """
386
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
387
-
388
- normalize_version = getattr(chip._get_tool_module(step, index), 'normalize_version', None)
389
- # Version is good if it matches any of the specifier sets in this list.
390
- spec_sets = chip.get('tool', tool, 'version', step=step, index=index)
391
- if not spec_sets:
392
- return True
393
-
394
- for spec_set in spec_sets:
395
- split_specs = [s.strip() for s in spec_set.split(",") if s.strip()]
396
- specs_list = []
397
- for spec in split_specs:
398
- match = re.match(_regex, spec)
399
- if match is None:
400
- chip.logger.warning(f'Invalid version specifier {spec}. '
401
- f'Defaulting to =={spec}.')
402
- operator = '=='
403
- spec_version = spec
404
- else:
405
- operator = match.group('operator')
406
- spec_version = match.group('version')
407
- specs_list.append((operator, spec_version))
408
-
409
- if normalize_version is None:
410
- normalized_version = reported_version
411
- normalized_specs = ','.join([f'{op}{ver}' for op, ver in specs_list])
412
- else:
413
- try:
414
- normalized_version = normalize_version(reported_version)
415
- except Exception as e:
416
- chip.logger.error(f'Unable to normalize version for {tool}: {reported_version}')
417
- raise e
418
- normalized_spec_list = [f'{op}{normalize_version(ver)}' for op, ver in specs_list]
419
- normalized_specs = ','.join(normalized_spec_list)
420
-
421
- try:
422
- version = packaging.version.Version(normalized_version)
423
- except packaging.version.InvalidVersion:
424
- chip.logger.error(f'Version {reported_version} reported by {tool} does '
425
- 'not match standard.')
426
- if normalize_version is None:
427
- chip.logger.error('Tool driver should implement normalize_version().')
428
- else:
429
- chip.logger.error('normalize_version() returned '
430
- f'invalid version {normalized_version}')
431
-
432
- return False
433
-
434
- try:
435
- spec_set = packaging.specifiers.SpecifierSet(normalized_specs)
436
- except packaging.specifiers.InvalidSpecifier:
437
- chip.logger.error(f'Version specifier set {normalized_specs} '
438
- 'does not match standard.')
439
- return False
440
-
441
- if version in spec_set:
442
- return True
443
-
444
- allowedstr = '; '.join(spec_sets)
445
- chip.logger.error(f"Version check failed for {tool}. Check installation.")
446
- chip.logger.error(f"Found version {reported_version}, "
447
- f"did not satisfy any version specifier set {allowedstr}.")
448
- return False
449
-
450
-
451
320
  ###########################################################################
452
321
  def _runtask(chip, flow, step, index, exec_func, pipe=None, queue=None, replay=False):
453
322
  '''
@@ -497,7 +366,7 @@ def _runtask(chip, flow, step, index, exec_func, pipe=None, queue=None, replay=F
497
366
 
498
367
  exec_func(chip, step, index, replay)
499
368
  except Exception as e:
500
- print_traceback(chip, e)
369
+ utils.print_traceback(chip.logger, e)
501
370
  _haltstep(chip, chip.get('option', 'flow'), step, index)
502
371
 
503
372
  # return to original directory
@@ -539,20 +408,6 @@ def _setupnode(chip, flow, step, index, replay):
539
408
  _haltstep(chip, flow, step, index)
540
409
 
541
410
 
542
- ###########################################################################
543
- def _write_task_manifest(chip, tool, path=None, backup=True):
544
- suffix = chip.get('tool', tool, 'format')
545
- if suffix:
546
- manifest_path = f"sc_manifest.{suffix}"
547
- if path:
548
- manifest_path = os.path.join(path, manifest_path)
549
-
550
- if backup and os.path.exists(manifest_path):
551
- shutil.copyfile(manifest_path, f'{manifest_path}.bak')
552
-
553
- chip.write_manifest(manifest_path, abspath=True)
554
-
555
-
556
411
  ###########################################################################
557
412
  def _setup_workdir(chip, step, index, replay):
558
413
  workdir = chip.getworkdir(step=step, index=index)
@@ -570,20 +425,18 @@ def _select_inputs(chip, step, index, trial=False):
570
425
 
571
426
  flow = chip.get('option', 'flow')
572
427
  tool, _ = get_tool_task(chip, step, index, flow)
573
- sel_inputs = []
574
-
575
- select_inputs = getattr(chip._get_task_module(step, index, flow=flow),
576
- '_select_inputs',
577
- None)
578
- if select_inputs:
579
- log_level = chip.logger.level
580
- if trial:
581
- chip.logger.setLevel(logging.CRITICAL)
582
- sel_inputs = select_inputs(chip, step, index)
583
- if trial:
584
- chip.logger.setLevel(log_level)
585
- else:
586
- sel_inputs = _get_pruned_node_inputs(chip, flow, (step, index))
428
+
429
+ task_class = chip.get("tool", tool, field="schema")
430
+ task_class.set_runtime(chip, step=step, index=index)
431
+
432
+ log_level = chip.logger.level
433
+ if trial:
434
+ chip.logger.setLevel(logging.CRITICAL)
435
+
436
+ sel_inputs = task_class.select_input_nodes()
437
+
438
+ if trial:
439
+ chip.logger.setLevel(log_level)
587
440
 
588
441
  if (step, index) not in chip.schema.get("flowgraph", flow, field="schema").get_entry_nodes() \
589
442
  and not sel_inputs:
@@ -624,10 +477,18 @@ def _copy_previous_steps_output_data(chip, step, index, replay):
624
477
  '''
625
478
 
626
479
  flow = chip.get('option', 'flow')
627
- if not _get_pruned_node_inputs(chip, flow, (step, index)):
480
+
481
+ flow_schema = chip.schema.get("flowgraph", flow, field="schema")
482
+ runtime = RuntimeFlowgraph(
483
+ flow_schema,
484
+ from_steps=set([step for step, _ in flow_schema.get_entry_nodes()]),
485
+ prune_nodes=chip.get('option', 'prune'))
486
+
487
+ if not runtime.get_node_inputs(step, index, record=chip.schema.get("record", field="schema")):
628
488
  all_inputs = []
629
489
  elif not chip.get('record', 'inputnode', step=step, index=index):
630
- all_inputs = _get_pruned_node_inputs(chip, flow, (step, index))
490
+ all_inputs = runtime.get_node_inputs(step, index,
491
+ record=chip.schema.get("record", field="schema"))
631
492
  else:
632
493
  all_inputs = chip.get('record', 'inputnode', step=step, index=index)
633
494
 
@@ -663,414 +524,9 @@ def _copy_previous_steps_output_data(chip, step, index, replay):
663
524
  os.rename(f'inputs/{outfile.name}', f'inputs/{new_name}')
664
525
 
665
526
 
666
- def __read_std_streams(chip, quiet,
667
- is_stdout_log, stdout_reader, stdout_print,
668
- is_stderr_log, stderr_reader, stderr_print):
669
- '''
670
- Handle directing tool outputs to logger
671
- '''
672
- if not quiet:
673
- if is_stdout_log:
674
- for line in stdout_reader.readlines():
675
- stdout_print(line.rstrip())
676
- if is_stderr_log:
677
- for line in stderr_reader.readlines():
678
- stderr_print(line.rstrip())
679
-
680
-
681
527
  ############################################################################
682
528
  # Chip helper Functions
683
529
  ############################################################################
684
- def _getexe(chip, tool, step, index):
685
- exe = chip.get('tool', tool, 'exe')
686
- if exe is None:
687
- return None
688
- path = chip.find_files('tool', tool, 'path', step=step, index=index)
689
-
690
- syspath = os.getenv('PATH', os.defpath)
691
- if path:
692
- # Prepend 'path' schema var to system path
693
- syspath = path + os.pathsep + syspath
694
-
695
- fullexe = shutil.which(exe, path=syspath)
696
-
697
- return fullexe
698
-
699
-
700
- def _get_run_env_vars(chip, tool, task, step, index, include_path):
701
- envvars = utils.get_env_vars(chip, step, index)
702
- for item in chip.getkeys('tool', tool, 'licenseserver'):
703
- license_file = chip.get('tool', tool, 'licenseserver', item, step=step, index=index)
704
- if license_file:
705
- envvars[item] = ':'.join(license_file)
706
-
707
- if include_path:
708
- path = chip.get('tool', tool, 'path', step=step, index=index)
709
- if path:
710
- envvars['PATH'] = path + os.pathsep + os.environ['PATH']
711
- else:
712
- envvars['PATH'] = os.environ['PATH']
713
-
714
- # Forward additional variables
715
- for var in ('LD_LIBRARY_PATH',):
716
- val = os.getenv(var, None)
717
- if val:
718
- envvars[var] = val
719
-
720
- return envvars
721
-
722
-
723
- #######################################
724
- def _makecmd(chip, tool, task, step, index, script_name='replay.sh', include_path=True):
725
- '''
726
- Constructs a subprocess run command based on eda tool setup.
727
- Creates a replay script in current directory.
728
-
729
- Returns:
730
- runnable command (list)
731
- printable command (str)
732
- command name (str)
733
- command arguments (list)
734
- '''
735
-
736
- fullexe = _getexe(chip, tool, step, index)
737
-
738
- def parse_options(options):
739
- if not options:
740
- return []
741
- shlex_opts = []
742
- for option in options:
743
- shlex_opts.append(str(option).strip())
744
- return shlex_opts
745
-
746
- # Add scripts files
747
- scripts = chip.find_files('tool', tool, 'task', task, 'script', step=step, index=index)
748
-
749
- cmdlist = [fullexe]
750
- cmdlist.extend(parse_options(chip.get('tool', tool, 'task', task, 'option',
751
- step=step, index=index)))
752
- cmdlist.extend(scripts)
753
-
754
- runtime_options = getattr(chip._get_task_module(step, index), 'runtime_options', None)
755
- if not runtime_options:
756
- runtime_options = getattr(chip._get_tool_module(step, index), 'runtime_options', None)
757
- if runtime_options:
758
- try:
759
- if _do_record_access():
760
- chip.schema.add_journaling_type("get")
761
- cmdlist.extend(parse_options(runtime_options(chip)))
762
- chip.schema.remove_journaling_type("get")
763
- except Exception as e:
764
- chip.logger.error(f'Failed to get runtime options for {tool}/{task}')
765
- raise e
766
-
767
- # Separate variables to be able to display nice name of executable
768
- cmd = os.path.basename(cmdlist[0])
769
- cmd_args = cmdlist[1:]
770
- print_cmd = shlex.join([cmd, *cmd_args])
771
-
772
- # create replay file
773
- with open(script_name, 'w') as f:
774
- # Ensure execution runs from the same directory
775
- replay_opts = {}
776
- work_dir = chip.getworkdir(step=step, index=index)
777
- if chip._relative_path:
778
- work_dir = os.path.relpath(work_dir, chip._relative_path)
779
- replay_opts["work_dir"] = work_dir
780
- replay_opts["exports"] = _get_run_env_vars(chip,
781
- tool, task,
782
- step, index,
783
- include_path=include_path)
784
- replay_opts["executable"] = chip.get('tool', tool, 'exe')
785
-
786
- vswitch = chip.get('tool', tool, 'vswitch')
787
- if vswitch:
788
- replay_opts["version_flag"] = " ".join(vswitch)
789
-
790
- format_cmd = [replay_opts["executable"]]
791
- arg_test = re.compile(r'^[-+]')
792
- file_test = re.compile(r'^[/]')
793
- for cmdarg in cmd_args:
794
- add_new_line = len(format_cmd) == 1
795
-
796
- if arg_test.match(cmdarg) or file_test.match(cmdarg):
797
- add_new_line = True
798
- else:
799
- if not arg_test.match(format_cmd[-1]):
800
- add_new_line = True
801
-
802
- if add_new_line:
803
- format_cmd.append(shlex.quote(cmdarg))
804
- else:
805
- format_cmd[-1] += f' {shlex.quote(cmdarg)}'
806
-
807
- replay_opts["cmds"] = format_cmd
808
-
809
- f.write(utils.get_file_template("replay/replay.sh.j2").render(replay_opts))
810
- f.write("\n")
811
-
812
- os.chmod(script_name, 0o755)
813
-
814
- return cmdlist, print_cmd, cmd, cmd_args
815
-
816
-
817
- def __get_stdio(chip, tool, task, flow, step, index):
818
- def get_file(io_type):
819
- suffix = chip.get('tool', tool, 'task', task, io_type, 'suffix',
820
- step=step, index=index)
821
- destination = chip.get('tool', tool, 'task', task, io_type, 'destination',
822
- step=step, index=index)
823
-
824
- io_file = None
825
- if destination == 'log':
826
- io_file = step + "." + suffix
827
- elif destination == 'output':
828
- io_file = os.path.join('outputs', chip.top() + "." + suffix)
829
- elif destination == 'none':
830
- io_file = os.devnull
831
- else:
832
- # This should not happen
833
- chip.logger.error(f'{io_type}/destination has no support for {destination}.')
834
- _haltstep(chip, flow, step, index)
835
-
836
- return io_file
837
-
838
- stdout_file = get_file('stdout')
839
- stderr_file = get_file('stderr')
840
-
841
- return stdout_file, stderr_file
842
-
843
-
844
- def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func=None):
845
- '''
846
- Run executable (or copy inputs to outputs for builtin functions)
847
- '''
848
-
849
- flow = chip.get('option', 'flow')
850
- tool, task = get_tool_task(chip, step, index, flow)
851
-
852
- quiet = (
853
- chip.get('option', 'quiet', step=step, index=index) and
854
- not chip.get('option', 'breakpoint', step=step, index=index)
855
- )
856
-
857
- stdout_print = chip.logger.info
858
- stderr_print = chip.logger.error
859
- if chip.get('option', 'loglevel', step=step, index=index) == "quiet":
860
- stdout_print = chip.logger.error
861
- stderr_print = chip.logger.error
862
-
863
- # TODO: Currently no memory usage tracking in breakpoints, builtins, or unexpected errors.
864
- max_mem_bytes = 0
865
- cpu_start = time.time()
866
-
867
- stdout_file, stderr_file = __get_stdio(chip, tool, task, flow, step, index)
868
- is_stdout_log = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
869
- step=step, index=index) == 'log'
870
- is_stderr_log = chip.get('tool', tool, 'task', task, 'stderr', 'destination',
871
- step=step, index=index) == 'log' and stderr_file != stdout_file
872
-
873
- chip.logger.info(f'Running in {workdir}')
874
-
875
- retcode = 0
876
- cmdlist = []
877
- cmd_args = []
878
- if run_func:
879
- logfile = None
880
- try:
881
- with open(stdout_file, 'w') as stdout_writer, \
882
- open(stderr_file, 'w') as stderr_writer:
883
- if stderr_file == stdout_file:
884
- stderr_writer.close()
885
- stderr_writer = sys.stdout
886
-
887
- # Handle logger stdout suppression if quiet
888
- stdout_handler_level = chip.logger._console.level
889
- if chip.get('option', 'quiet', step=step, index=index):
890
- chip.logger._console.setLevel(logging.CRITICAL)
891
-
892
- with contextlib.redirect_stderr(stderr_writer), \
893
- contextlib.redirect_stdout(stdout_writer):
894
- retcode = run_func(chip)
895
-
896
- chip.logger._console.setLevel(stdout_handler_level)
897
- except Exception as e:
898
- chip.logger.error(f'Failed in run() for {tool}/{task}: {e}')
899
- retcode = 1 # default to non-zero
900
- print_traceback(chip, e)
901
- chip._error = True
902
- finally:
903
- with sc_open(stdout_file) as stdout_reader, \
904
- sc_open(stderr_file) as stderr_reader:
905
- __read_std_streams(chip,
906
- quiet,
907
- is_stdout_log, stdout_reader, stdout_print,
908
- is_stderr_log, stderr_reader, stderr_print)
909
-
910
- try:
911
- if resource:
912
- # Since memory collection is not possible, collect the current process
913
- # peak memory
914
- max_mem_bytes = max(
915
- max_mem_bytes,
916
- 1024 * resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
917
- except (OSError, ValueError, PermissionError):
918
- pass
919
- else:
920
- cmdlist, printable_cmd, _, cmd_args = _makecmd(chip, tool, task, step, index)
921
-
922
- ##################
923
- # Make record of tool options
924
- if cmd_args is not None:
925
- chip.schema.get("record", field='schema').record_tool(
926
- step, index, cmd_args, RecordTool.ARGS)
927
-
928
- chip.logger.info('%s', printable_cmd)
929
- timeout = chip.get('option', 'timeout', step=step, index=index)
930
- logfile = step + '.log'
931
- if sys.platform in ('darwin', 'linux') and \
932
- chip.get('option', 'breakpoint', step=step, index=index):
933
- # When we break on a step, the tool often drops into a shell.
934
- # However, our usual subprocess scheme seems to break terminal
935
- # echo for some tools. On POSIX-compatible systems, we can use
936
- # pty to connect the tool to our terminal instead. This code
937
- # doesn't handle quiet/timeout logic, since we don't want either
938
- # of these features for an interactive session. Logic for
939
- # forwarding to file based on
940
- # https://docs.python.org/3/library/pty.html#example.
941
- with open(logfile, 'wb') as log_writer:
942
- def read(fd):
943
- data = os.read(fd, 1024)
944
- log_writer.write(data)
945
- return data
946
- import pty # Note: this import throws exception on Windows
947
- retcode = pty.spawn(cmdlist, read)
948
- else:
949
- with open(stdout_file, 'w') as stdout_writer, \
950
- open(stdout_file, 'r', errors='replace_with_warning') as stdout_reader, \
951
- open(stderr_file, 'w') as stderr_writer, \
952
- open(stderr_file, 'r', errors='replace_with_warning') as stderr_reader:
953
- # if STDOUT and STDERR are to be redirected to the same file,
954
- # use a single writer
955
- if stderr_file == stdout_file:
956
- stderr_writer.close()
957
- stderr_reader.close()
958
- stderr_writer = subprocess.STDOUT
959
-
960
- preexec_fn = None
961
- nice = None
962
- if __is_posix():
963
- nice = chip.get('option', 'nice', step=step, index=index)
964
-
965
- def set_nice():
966
- os.nice(nice)
967
-
968
- if nice:
969
- preexec_fn = set_nice
970
-
971
- proc = subprocess.Popen(cmdlist,
972
- stdin=subprocess.DEVNULL,
973
- stdout=stdout_writer,
974
- stderr=stderr_writer,
975
- preexec_fn=preexec_fn)
976
- # How long to wait for proc to quit on ctrl-c before force
977
- # terminating.
978
- POLL_INTERVAL = 0.1
979
- MEMORY_WARN_LIMIT = 90
980
- try:
981
- while proc.poll() is None:
982
- # Gather subprocess memory usage.
983
- try:
984
- pproc = psutil.Process(proc.pid)
985
- proc_mem_bytes = pproc.memory_full_info().uss
986
- for child in pproc.children(recursive=True):
987
- proc_mem_bytes += child.memory_full_info().uss
988
- max_mem_bytes = max(max_mem_bytes, proc_mem_bytes)
989
-
990
- memory_usage = psutil.virtual_memory()
991
- if memory_usage.percent > MEMORY_WARN_LIMIT:
992
- chip.logger.warn(
993
- f'Current system memory usage is {memory_usage.percent}%')
994
-
995
- # increase limit warning
996
- MEMORY_WARN_LIMIT = int(memory_usage.percent + 1)
997
- except psutil.Error:
998
- # Process may have already terminated or been killed.
999
- # Retain existing memory usage statistics in this case.
1000
- pass
1001
- except PermissionError:
1002
- # OS is preventing access to this information so it cannot
1003
- # be collected
1004
- pass
1005
-
1006
- # Loop until process terminates
1007
- __read_std_streams(chip,
1008
- quiet,
1009
- is_stdout_log, stdout_reader, stdout_print,
1010
- is_stderr_log, stderr_reader, stderr_print)
1011
-
1012
- if timeout is not None and time.time() - cpu_start > timeout:
1013
- chip.logger.error(f'Step timed out after {timeout} seconds')
1014
- utils.terminate_process(proc.pid)
1015
- raise SiliconCompilerTimeout(f'{step}{index} timeout')
1016
- time.sleep(POLL_INTERVAL)
1017
- except KeyboardInterrupt:
1018
- kill_process(chip, proc, tool, 5 * POLL_INTERVAL, msg="Received ctrl-c. ")
1019
- _haltstep(chip, flow, step, index, log=False)
1020
- except SiliconCompilerTimeout:
1021
- send_messages.send(chip, "timeout", step, index)
1022
- kill_process(chip, proc, tool, 5 * POLL_INTERVAL)
1023
- chip._error = True
1024
-
1025
- # Read the remaining
1026
- __read_std_streams(chip,
1027
- quiet,
1028
- is_stdout_log, stdout_reader, stdout_print,
1029
- is_stderr_log, stderr_reader, stderr_print)
1030
- retcode = proc.returncode
1031
-
1032
- chip.schema.get("record", field='schema').record_tool(step, index, retcode, RecordTool.EXITCODE)
1033
- if retcode != 0:
1034
- msg = f'Command failed with code {retcode}.'
1035
- if logfile:
1036
- if quiet:
1037
- # Print last N lines of log when in quiet mode
1038
- with sc_open(logfile) as logfd:
1039
- loglines = logfd.read().splitlines()
1040
- for logline in loglines[-_failed_log_lines:]:
1041
- chip.logger.error(logline)
1042
- # No log file for pure-Python tools.
1043
- msg += f' See log file {os.path.abspath(logfile)}'
1044
- chip.logger.warning(msg)
1045
- chip._error = True
1046
-
1047
- # Capture cpu runtime
1048
- record_metric(chip, step, index, 'exetime', round((time.time() - cpu_start), 2),
1049
- source=None,
1050
- source_unit='s')
1051
-
1052
- # Capture memory usage
1053
- record_metric(chip, step, index, 'memory', max_mem_bytes,
1054
- source=None,
1055
- source_unit='B')
1056
-
1057
-
1058
- def _post_process(chip, step, index):
1059
- flow = chip.get('option', 'flow')
1060
- tool, task = get_tool_task(chip, step, index, flow)
1061
- func = getattr(chip._get_task_module(step, index, flow=flow), 'post_process', None)
1062
- if func:
1063
- try:
1064
- if _do_record_access():
1065
- chip.schema.add_journaling_type("get")
1066
- func(chip)
1067
- chip.schema.remove_journaling_type("get")
1068
- except Exception as e:
1069
- chip.logger.error(f'Failed to run post-process for {tool}/{task}.')
1070
- print_traceback(chip, e)
1071
- chip._error = True
1072
-
1073
-
1074
530
  def _check_logfile(chip, step, index, quiet=False, run_func=None):
1075
531
  '''
1076
532
  Check log file (must be after post-process)
@@ -1113,9 +569,19 @@ def _check_logfile(chip, step, index, quiet=False, run_func=None):
1113
569
  def _executenode(chip, step, index, replay):
1114
570
  workdir = chip.getworkdir(step=step, index=index)
1115
571
  flow = chip.get('option', 'flow')
1116
- tool, _ = get_tool_task(chip, step, index, flow)
572
+ tool, task = get_tool_task(chip, step, index, flow)
1117
573
 
1118
- _pre_process(chip, step, index)
574
+ task_class = chip.get("tool", tool, field="schema")
575
+ task_class.set_runtime(chip)
576
+
577
+ chip.logger.info(f'Running in {workdir}')
578
+
579
+ try:
580
+ task_class.pre_process()
581
+ except Exception as e:
582
+ chip.logger.error(f"Pre-processing failed for '{tool}/{task}'.")
583
+ utils.print_traceback(chip.logger, e)
584
+ raise e
1119
585
 
1120
586
  if chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED:
1121
587
  # copy inputs to outputs and skip execution
@@ -1123,10 +589,15 @@ def _executenode(chip, step, index, replay):
1123
589
 
1124
590
  send_messages.send(chip, "skipped", step, index)
1125
591
  else:
1126
- org_env = _set_env_vars(chip, step, index)
592
+ org_env = os.environ.copy()
593
+ os.environ.update(task_class.get_runtime_environmental_variables())
1127
594
 
1128
- run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1129
- toolpath, version = _check_tool_version(chip, step, index, run_func)
595
+ toolpath = task_class.get_exe()
596
+ version = task_class.get_exe_version()
597
+
598
+ if not chip.get('option', 'novercheck', step=step, index=index):
599
+ if not task_class.check_exe_version(version):
600
+ _haltstep(chip, flow, step, index)
1130
601
 
1131
602
  if version:
1132
603
  chip.schema.get("record", field='schema').record_tool(
@@ -1136,105 +607,51 @@ def _executenode(chip, step, index, replay):
1136
607
  chip.schema.get("record", field='schema').record_tool(
1137
608
  step, index, toolpath, RecordTool.PATH)
1138
609
 
1139
- # Write manifest (tool interface) (Don't move this!)
1140
- _write_task_manifest(chip, tool)
1141
-
1142
610
  send_messages.send(chip, "begin", step, index)
1143
611
 
1144
- _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func)
612
+ try:
613
+ if not replay:
614
+ task_class.generate_replay_script(
615
+ os.path.join(workdir, "replay.sh"),
616
+ workdir)
617
+ ret_code = task_class.run_task(
618
+ workdir,
619
+ chip.get('option', 'quiet', step=step, index=index),
620
+ chip.get('option', 'loglevel', step=step, index=index),
621
+ chip.get('option', 'breakpoint', step=step, index=index),
622
+ chip.get('option', 'nice', step=step, index=index),
623
+ chip.get('option', 'timeout', step=step, index=index))
624
+ except Exception as e:
625
+ raise e
1145
626
 
1146
627
  os.environ.clear()
1147
628
  os.environ.update(org_env)
1148
629
 
1149
- _post_process(chip, step, index)
1150
-
1151
- _finalizenode(chip, step, index, replay)
1152
-
1153
- send_messages.send(chip, "end", step, index)
1154
-
630
+ if ret_code != 0:
631
+ msg = f'Command failed with code {ret_code}.'
632
+ logfile = f"{step}.log"
633
+ if os.path.exists(logfile):
634
+ if chip.get('option', 'quiet', step=step, index=index):
635
+ # Print last N lines of log when in quiet mode
636
+ with sc_open(logfile) as logfd:
637
+ loglines = logfd.read().splitlines()
638
+ for logline in loglines[-_failed_log_lines:]:
639
+ chip.logger.error(logline)
640
+ # No log file for pure-Python tools.
641
+ msg += f' See log file {os.path.abspath(logfile)}'
642
+ chip.logger.warning(msg)
643
+ chip._error = True
1155
644
 
1156
- def _pre_process(chip, step, index):
1157
- flow = chip.get('option', 'flow')
1158
- tool, task = get_tool_task(chip, step, index, flow)
1159
- func = getattr(chip._get_task_module(step, index, flow=flow), 'pre_process', None)
1160
- if func:
1161
645
  try:
1162
- if _do_record_access():
1163
- chip.schema.add_journaling_type("get")
1164
- func(chip)
1165
- chip.schema.remove_journaling_type("get")
646
+ task_class.post_process()
1166
647
  except Exception as e:
1167
- chip.logger.error(f"Pre-processing failed for '{tool}/{task}'.")
1168
- raise e
1169
- if chip._error:
1170
- chip.logger.error(f"Pre-processing failed for '{tool}/{task}'")
1171
- _haltstep(chip, flow, step, index)
1172
-
1173
-
1174
- def _set_env_vars(chip, step, index):
1175
- org_env = os.environ.copy()
1176
-
1177
- tool, task = get_tool_task(chip, step, index)
1178
-
1179
- if _do_record_access():
1180
- chip.schema.add_journaling_type("get")
1181
- os.environ.update(_get_run_env_vars(chip, tool, task, step, index, include_path=True))
1182
- chip.schema.remove_journaling_type("get")
1183
-
1184
- return org_env
1185
-
1186
-
1187
- def _check_tool_version(chip, step, index, run_func=None):
1188
- '''
1189
- Check exe version
1190
- '''
648
+ chip.logger.error(f"Post-processing failed for '{tool}/{task}'.")
649
+ utils.print_traceback(chip.logger, e)
650
+ chip._error = True
1191
651
 
1192
- flow = chip.get('option', 'flow')
1193
- tool, task = get_tool_task(chip, step, index, flow)
652
+ _finalizenode(chip, step, index, replay)
1194
653
 
1195
- vercheck = not chip.get('option', 'novercheck', step=step, index=index)
1196
- veropt = chip.get('tool', tool, 'vswitch')
1197
- exe = _getexe(chip, tool, step, index)
1198
- version = None
1199
- if exe is not None:
1200
- exe_path, exe_base = os.path.split(exe)
1201
- if veropt:
1202
- cmdlist = [exe]
1203
- cmdlist.extend(veropt)
1204
- proc = subprocess.run(cmdlist,
1205
- stdin=subprocess.DEVNULL,
1206
- stdout=subprocess.PIPE,
1207
- stderr=subprocess.STDOUT,
1208
- universal_newlines=True)
1209
- if proc.returncode != 0:
1210
- chip.logger.warning(f'Version check on {tool} failed with '
1211
- f'code {proc.returncode}')
1212
-
1213
- parse_version = getattr(chip._get_tool_module(step, index, flow=flow),
1214
- 'parse_version',
1215
- None)
1216
- if parse_version is None:
1217
- chip.logger.error(f'{tool}/{task} does not implement parse_version().')
1218
- _haltstep(chip, flow, step, index)
1219
- try:
1220
- version = parse_version(proc.stdout)
1221
- except Exception as e:
1222
- chip.logger.error(f'{tool} failed to parse version string: {proc.stdout}')
1223
- raise e
1224
-
1225
- chip.logger.info(f"Tool '{exe_base}' found with version '{version}' "
1226
- f"in directory '{exe_path}'")
1227
- if vercheck and not _check_version(chip, version, tool, step, index):
1228
- if proc.returncode != 0:
1229
- chip.logger.error(f"Tool '{exe_base}' responded with: {proc.stdout}")
1230
- _haltstep(chip, flow, step, index)
1231
- else:
1232
- chip.logger.info(f"Tool '{exe_base}' found in directory '{exe_path}'")
1233
- elif run_func is None:
1234
- exe_base = chip.get('tool', tool, 'exe')
1235
- chip.logger.error(f'Executable {exe_base} not found')
1236
- _haltstep(chip, flow, step, index)
1237
- return (exe, version)
654
+ send_messages.send(chip, "end", step, index)
1238
655
 
1239
656
 
1240
657
  def _hash_files(chip, step, index, setup=False):
@@ -1283,12 +700,11 @@ def _finalizenode(chip, step, index, replay):
1283
700
  chip.get('option', 'quiet', step=step, index=index) and not
1284
701
  chip.get('option', 'breakpoint', step=step, index=index)
1285
702
  )
1286
- run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1287
703
 
1288
704
  is_skipped = chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED
1289
705
 
1290
706
  if not is_skipped:
1291
- _check_logfile(chip, step, index, quiet, run_func)
707
+ _check_logfile(chip, step, index, quiet, None)
1292
708
 
1293
709
  # Report metrics
1294
710
  for metric in ['errors', 'warnings']:
@@ -1301,23 +717,15 @@ def _finalizenode(chip, step, index, replay):
1301
717
  # Capture wall runtime and cpu cores
1302
718
  end_time = chip.schema.get("record", field='schema').record_time(step, index, RecordTime.END)
1303
719
 
1304
- # calculate total time
1305
- total_times = []
1306
- for check_step, check_index in chip.schema.get("flowgraph", flow, field="schema").get_nodes():
1307
- total_time = chip.get('metric', 'totaltime', step=check_step, index=check_index)
1308
- if total_time is not None:
1309
- total_times.append(total_time)
1310
- if total_times:
1311
- total_time = max(total_times)
1312
- else:
1313
- total_time = 0.0
1314
-
1315
720
  walltime = end_time - chip.schema.get("record", field='schema').get_recorded_time(
1316
721
  step, index, RecordTime.START)
1317
722
  record_metric(chip, step, index, 'tasktime', walltime,
1318
723
  source=None, source_unit='s')
1319
- record_metric(chip, step, index, 'totaltime', total_time + walltime,
1320
- source=None, source_unit='s')
724
+
725
+ chip.schema.get("metric", field='schema').record_totaltime(
726
+ step, index,
727
+ chip.schema.get("flowgraph", flow, field='schema'),
728
+ chip.schema.get("record", field='schema'))
1321
729
  chip.logger.info(f"Finished task in {round(walltime, 2)}s")
1322
730
 
1323
731
  # Save a successful manifest
@@ -1430,12 +838,6 @@ def assert_required_accesses(chip, step, index):
1430
838
  for key in chip.getkeys('tool', tool, 'task', task, 'report'):
1431
839
  exempt.append(('tool', tool, 'task', task, 'report', key))
1432
840
 
1433
- # Get exempted keys from task
1434
- func = getattr(chip._get_task_module(step, index, flow=flow), 'exempt_keys', None)
1435
- if func:
1436
- # No need for try / except since this must work properly
1437
- exempt.extend(func(chip))
1438
-
1439
841
  required = set(
1440
842
  [tuple(key.split(',')) for key in chip.get('tool', tool, 'task', task, 'require',
1441
843
  step=step, index=index)])
@@ -1519,225 +921,6 @@ def _reset_flow_nodes(chip, flow, nodes_to_execute):
1519
921
  clear_node(step, index)
1520
922
 
1521
923
 
1522
- def _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow):
1523
- '''
1524
- For each node to run, prepare a process and store its dependencies
1525
- '''
1526
-
1527
- # Call this in case this was invoked without __main__
1528
- multiprocessing.freeze_support()
1529
-
1530
- # Log queue for logging messages
1531
- log_queue = multiprocessing.Queue(-1)
1532
-
1533
- init_funcs = set()
1534
- for (step, index) in nodes_to_execute(chip, flow):
1535
- node = (step, index)
1536
-
1537
- if chip.get('record', 'status', step=step, index=index) != NodeStatus.PENDING:
1538
- continue
1539
-
1540
- nodes_to_run[node] = _get_pruned_node_inputs(chip, flow, (step, index))
1541
-
1542
- exec_func = _executenode
1543
-
1544
- if chip.get('option', 'scheduler', 'name', step=step, index=index) == 'slurm':
1545
- # Defer job to compute node
1546
- # If the job is configured to run on a cluster, collect the schema
1547
- # and send it to a compute node for deferred execution.
1548
- init_funcs.add(slurm.init)
1549
- exec_func = slurm._defernode
1550
- elif chip.get('option', 'scheduler', 'name', step=step, index=index) == 'docker':
1551
- # Run job in docker
1552
- init_funcs.add(docker_runner.init)
1553
- exec_func = docker_runner.run
1554
- local_processes.append((step, index))
1555
- else:
1556
- local_processes.append((step, index))
1557
-
1558
- process = {
1559
- "child_pipe": None,
1560
- "parent_pipe": None,
1561
- "proc": None
1562
- }
1563
- process["parent_pipe"], process["child_pipe"] = multiprocessing.Pipe()
1564
- process["proc"] = multiprocessing.Process(
1565
- target=_runtask,
1566
- args=(chip, flow, step, index, exec_func),
1567
- kwargs={"pipe": process["child_pipe"],
1568
- "queue": log_queue})
1569
-
1570
- processes[node] = process
1571
-
1572
- for init_func in init_funcs:
1573
- init_func(chip)
1574
-
1575
- return log_queue
1576
-
1577
-
1578
- def _check_node_dependencies(chip, node, deps, deps_was_successful):
1579
- had_deps = len(deps) > 0
1580
- step, index = node
1581
- tool, _ = get_tool_task(chip, step, index)
1582
-
1583
- # Clear any nodes that have finished from dependency list.
1584
- for in_step, in_index in list(deps):
1585
- in_status = chip.get('record', 'status', step=in_step, index=in_index)
1586
- if NodeStatus.is_done(in_status):
1587
- deps.remove((in_step, in_index))
1588
- if in_status == NodeStatus.SUCCESS:
1589
- deps_was_successful[node] = True
1590
- if NodeStatus.is_error(in_status):
1591
- # Fail if any dependency failed for non-builtin task
1592
- if tool != 'builtin':
1593
- deps.clear()
1594
- chip.schema.get("record", field='schema').set('status', NodeStatus.ERROR,
1595
- step=step, index=index)
1596
- return
1597
-
1598
- # Fail if no dependency successfully finished for builtin task
1599
- if had_deps and len(deps) == 0 \
1600
- and tool == 'builtin' and not deps_was_successful.get(node):
1601
- chip.schema.get("record", field='schema').set('status', NodeStatus.ERROR,
1602
- step=step, index=index)
1603
-
1604
-
1605
- def _launch_nodes(chip, nodes_to_run, processes, local_processes):
1606
- running_nodes = {}
1607
- max_parallel_run = chip.get('option', 'scheduler', 'maxnodes')
1608
- max_cores = utils.get_cores(chip)
1609
- max_threads = utils.get_cores(chip)
1610
- if not max_parallel_run:
1611
- max_parallel_run = utils.get_cores(chip)
1612
-
1613
- # clip max parallel jobs to 1 <= jobs <= max_cores
1614
- max_parallel_run = max(1, min(max_parallel_run, max_cores))
1615
-
1616
- def allow_start(node):
1617
- if node not in local_processes:
1618
- # using a different scheduler, so allow
1619
- return True, 0
1620
-
1621
- if len(running_nodes) >= max_parallel_run:
1622
- return False, 0
1623
-
1624
- # Record thread count requested
1625
- step, index = node
1626
- tool, task = get_tool_task(chip, step, index)
1627
- requested_threads = chip.get('tool', tool, 'task', task, 'threads',
1628
- step=step, index=index)
1629
- if not requested_threads:
1630
- # not specified, marking it max to be safe
1631
- requested_threads = max_threads
1632
- # clamp to max_parallel to avoid getting locked up
1633
- requested_threads = max(1, min(requested_threads, max_threads))
1634
-
1635
- if requested_threads + sum(running_nodes.values()) > max_cores:
1636
- # delay until there are enough core available
1637
- return False, 0
1638
-
1639
- # allow and record how many threads to associate
1640
- return True, requested_threads
1641
-
1642
- deps_was_successful = {}
1643
-
1644
- if _get_callback('pre_run'):
1645
- _get_callback('pre_run')(chip)
1646
-
1647
- start_times = {None: time.time()}
1648
-
1649
- while len(nodes_to_run) > 0 or len(running_nodes) > 0:
1650
- changed = _process_completed_nodes(chip, processes, running_nodes)
1651
-
1652
- # Check for new nodes that can be launched.
1653
- for node, deps in list(nodes_to_run.items()):
1654
- # TODO: breakpoint logic:
1655
- # if node is breakpoint, then don't launch while len(running_nodes) > 0
1656
-
1657
- _check_node_dependencies(chip, node, deps, deps_was_successful)
1658
-
1659
- if chip.get('record', 'status', step=node[0], index=node[1]) == NodeStatus.ERROR:
1660
- del nodes_to_run[node]
1661
- continue
1662
-
1663
- # If there are no dependencies left, launch this node and
1664
- # remove from nodes_to_run.
1665
- if len(deps) == 0:
1666
- dostart, requested_threads = allow_start(node)
1667
-
1668
- if dostart:
1669
- if _get_callback('pre_node'):
1670
- _get_callback('pre_node')(chip, *node)
1671
-
1672
- chip.schema.get("record", field='schema').set('status', NodeStatus.RUNNING,
1673
- step=node[0], index=node[1])
1674
- start_times[node] = time.time()
1675
- changed = True
1676
-
1677
- processes[node]["proc"].start()
1678
- del nodes_to_run[node]
1679
- running_nodes[node] = requested_threads
1680
-
1681
- # Check for situation where we have stuff left to run but don't
1682
- # have any nodes running. This shouldn't happen, but we will get
1683
- # stuck in an infinite loop if it does, so we want to break out
1684
- # with an explicit error.
1685
- if len(nodes_to_run) > 0 and len(running_nodes) == 0:
1686
- raise SiliconCompilerError(
1687
- 'Nodes left to run, but no running nodes. From/to may be invalid.', chip=chip)
1688
-
1689
- if chip._dash and changed:
1690
- # Update dashboard if the manifest changed
1691
- chip._dash.update_manifest(payload={"starttimes": start_times})
1692
-
1693
- if len(running_nodes) == 1:
1694
- # if there is only one node running, just join the thread
1695
- running_node = list(running_nodes.keys())[0]
1696
- processes[running_node]["proc"].join()
1697
- elif len(running_nodes) > 1:
1698
- # if there are more than 1, join the first with a timeout
1699
- running_node = list(running_nodes.keys())[0]
1700
- processes[running_node]["proc"].join(timeout=0.1)
1701
-
1702
-
1703
- def _process_completed_nodes(chip, processes, running_nodes):
1704
- changed = False
1705
- for node in list(running_nodes.keys()):
1706
- if not processes[node]["proc"].is_alive():
1707
- step, index = node
1708
- manifest = os.path.join(chip.getworkdir(step=step, index=index),
1709
- 'outputs',
1710
- f'{chip.design}.pkg.json')
1711
- chip.logger.debug(f'{step}{index} is complete merging: {manifest}')
1712
- if os.path.exists(manifest):
1713
- JournalingSchema(chip.schema).read_journal(manifest)
1714
-
1715
- if processes[node]["parent_pipe"] and processes[node]["parent_pipe"].poll(1):
1716
- try:
1717
- packages = processes[node]["parent_pipe"].recv()
1718
- if isinstance(packages, dict):
1719
- chip._packages.update(packages)
1720
- except: # noqa E722
1721
- pass
1722
-
1723
- del running_nodes[node]
1724
- if processes[node]["proc"].exitcode > 0:
1725
- status = NodeStatus.ERROR
1726
- else:
1727
- status = chip.get('record', 'status', step=step, index=index)
1728
- if not status or status == NodeStatus.PENDING:
1729
- status = NodeStatus.ERROR
1730
-
1731
- chip.schema.get("record", field='schema').set('status', status, step=step, index=index)
1732
-
1733
- changed = True
1734
-
1735
- if _get_callback('post_node'):
1736
- _get_callback('post_node')(chip, *node)
1737
-
1738
- return changed
1739
-
1740
-
1741
924
  def _check_nodes_status(chip, flow):
1742
925
  flowgraph = chip.schema.get("flowgraph", flow, field="schema")
1743
926
  runtime = RuntimeFlowgraph(
@@ -1762,28 +945,6 @@ def _check_nodes_status(chip, flow):
1762
945
  f'These final steps could not be reached: {",".join(sorted(unreached))}', chip=chip)
1763
946
 
1764
947
 
1765
- def print_traceback(chip, exception):
1766
- chip.logger.error(f'{exception}')
1767
- trace = StringIO()
1768
- traceback.print_tb(exception.__traceback__, file=trace)
1769
- chip.logger.error("Backtrace:")
1770
- for line in trace.getvalue().splitlines():
1771
- chip.logger.error(line)
1772
-
1773
-
1774
- def kill_process(chip, proc, tool, poll_interval, msg=""):
1775
- TERMINATE_TIMEOUT = 5
1776
- interrupt_time = time.time()
1777
- chip.logger.info(f'{msg}Waiting for {tool} to exit...')
1778
- while proc.poll() is None and \
1779
- (time.time() - interrupt_time) < TERMINATE_TIMEOUT:
1780
- time.sleep(5 * poll_interval)
1781
- if proc.poll() is None:
1782
- chip.logger.warning(f'{tool} did not exit within {TERMINATE_TIMEOUT} '
1783
- 'seconds. Terminating...')
1784
- utils.terminate_process(proc.pid)
1785
-
1786
-
1787
948
  def get_check_node_keys(chip, step, index):
1788
949
  tool, task = get_tool_task(chip, step, index)
1789
950
 
@@ -2065,6 +1226,9 @@ def copy_old_run_dir(chip, org_jobname):
2065
1226
 
2066
1227
  # Modify manifests to correct jobname
2067
1228
  for step, index in copy_nodes:
1229
+ tool, _ = get_tool_task(chip, step, index)
1230
+ task_class = chip.get("tool", tool, field="schema")
1231
+
2068
1232
  # rewrite replay files
2069
1233
  replay_file = f'{chip.getworkdir(step=step, index=index)}/replay.sh'
2070
1234
  if os.path.exists(replay_file):
@@ -2072,8 +1236,9 @@ def copy_old_run_dir(chip, org_jobname):
2072
1236
  os.remove(replay_file)
2073
1237
  chip.set('arg', 'step', step)
2074
1238
  chip.set('arg', 'index', index)
2075
- tool, task = get_tool_task(chip, step, index)
2076
- _makecmd(chip, tool, task, step, index, script_name=replay_file)
1239
+ task_class.set_runtime(chip, step=step, index=index)
1240
+ task_class.generate_replay_script(replay_file, chip.getworkdir(step=step, index=index))
1241
+ task_class.set_runtime(None)
2077
1242
  chip.unset('arg', 'step')
2078
1243
  chip.unset('arg', 'index')
2079
1244
 
@@ -2110,8 +1275,13 @@ def clean_build_dir(chip):
2110
1275
  return
2111
1276
 
2112
1277
  if chip.get('option', 'from'):
1278
+ runtime = RuntimeFlowgraph(
1279
+ chip.schema.get("flowgraph", chip.get('option', 'flow'), field='schema'),
1280
+ from_steps=chip.get('option', 'from'),
1281
+ to_steps=chip.get('option', 'to'),
1282
+ prune_nodes=chip.get('option', 'prune'))
2113
1283
  # Remove stale outputs that will be rerun
2114
- for step, index in nodes_to_execute(chip):
1284
+ for step, index in runtime.get_nodes():
2115
1285
  clean_node_dir(chip, step, index)
2116
1286
 
2117
1287
  all_nodes = set(chip.schema.get("flowgraph", chip.get('option', 'flow'),