siliconcompiler 0.26.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (251) hide show
  1. siliconcompiler/__init__.py +24 -0
  2. siliconcompiler/__main__.py +12 -0
  3. siliconcompiler/_common.py +49 -0
  4. siliconcompiler/_metadata.py +36 -0
  5. siliconcompiler/apps/__init__.py +0 -0
  6. siliconcompiler/apps/_common.py +76 -0
  7. siliconcompiler/apps/sc.py +92 -0
  8. siliconcompiler/apps/sc_dashboard.py +94 -0
  9. siliconcompiler/apps/sc_issue.py +178 -0
  10. siliconcompiler/apps/sc_remote.py +199 -0
  11. siliconcompiler/apps/sc_server.py +39 -0
  12. siliconcompiler/apps/sc_show.py +142 -0
  13. siliconcompiler/apps/smake.py +232 -0
  14. siliconcompiler/checklists/__init__.py +0 -0
  15. siliconcompiler/checklists/oh_tapeout.py +41 -0
  16. siliconcompiler/core.py +3221 -0
  17. siliconcompiler/data/RobotoMono/LICENSE.txt +202 -0
  18. siliconcompiler/data/RobotoMono/RobotoMono-Regular.ttf +0 -0
  19. siliconcompiler/data/heartbeat.v +18 -0
  20. siliconcompiler/data/logo.png +0 -0
  21. siliconcompiler/flowgraph.py +570 -0
  22. siliconcompiler/flows/__init__.py +0 -0
  23. siliconcompiler/flows/_common.py +67 -0
  24. siliconcompiler/flows/asicflow.py +180 -0
  25. siliconcompiler/flows/asictopflow.py +38 -0
  26. siliconcompiler/flows/dvflow.py +86 -0
  27. siliconcompiler/flows/fpgaflow.py +202 -0
  28. siliconcompiler/flows/generate_openroad_rcx.py +66 -0
  29. siliconcompiler/flows/lintflow.py +35 -0
  30. siliconcompiler/flows/screenshotflow.py +51 -0
  31. siliconcompiler/flows/showflow.py +59 -0
  32. siliconcompiler/flows/signoffflow.py +53 -0
  33. siliconcompiler/flows/synflow.py +128 -0
  34. siliconcompiler/fpgas/__init__.py +0 -0
  35. siliconcompiler/fpgas/lattice_ice40.py +42 -0
  36. siliconcompiler/fpgas/vpr_example.py +109 -0
  37. siliconcompiler/issue.py +300 -0
  38. siliconcompiler/libs/__init__.py +0 -0
  39. siliconcompiler/libs/asap7sc7p5t.py +8 -0
  40. siliconcompiler/libs/gf180mcu.py +8 -0
  41. siliconcompiler/libs/nangate45.py +8 -0
  42. siliconcompiler/libs/sky130hd.py +8 -0
  43. siliconcompiler/libs/sky130io.py +8 -0
  44. siliconcompiler/package.py +412 -0
  45. siliconcompiler/pdks/__init__.py +0 -0
  46. siliconcompiler/pdks/asap7.py +8 -0
  47. siliconcompiler/pdks/freepdk45.py +8 -0
  48. siliconcompiler/pdks/gf180.py +8 -0
  49. siliconcompiler/pdks/skywater130.py +8 -0
  50. siliconcompiler/remote/__init__.py +36 -0
  51. siliconcompiler/remote/client.py +891 -0
  52. siliconcompiler/remote/schema.py +106 -0
  53. siliconcompiler/remote/server.py +507 -0
  54. siliconcompiler/remote/server_schema/requests/cancel_job.json +51 -0
  55. siliconcompiler/remote/server_schema/requests/check_progress.json +61 -0
  56. siliconcompiler/remote/server_schema/requests/check_server.json +38 -0
  57. siliconcompiler/remote/server_schema/requests/delete_job.json +51 -0
  58. siliconcompiler/remote/server_schema/requests/get_results.json +48 -0
  59. siliconcompiler/remote/server_schema/requests/remote_run.json +40 -0
  60. siliconcompiler/remote/server_schema/responses/cancel_job.json +18 -0
  61. siliconcompiler/remote/server_schema/responses/check_progress.json +30 -0
  62. siliconcompiler/remote/server_schema/responses/check_server.json +32 -0
  63. siliconcompiler/remote/server_schema/responses/delete_job.json +18 -0
  64. siliconcompiler/remote/server_schema/responses/get_results.json +21 -0
  65. siliconcompiler/remote/server_schema/responses/remote_run.json +25 -0
  66. siliconcompiler/report/__init__.py +13 -0
  67. siliconcompiler/report/html_report.py +74 -0
  68. siliconcompiler/report/report.py +355 -0
  69. siliconcompiler/report/streamlit_report.py +137 -0
  70. siliconcompiler/report/streamlit_viewer.py +944 -0
  71. siliconcompiler/report/summary_image.py +117 -0
  72. siliconcompiler/report/summary_table.py +105 -0
  73. siliconcompiler/report/utils.py +163 -0
  74. siliconcompiler/scheduler/__init__.py +2092 -0
  75. siliconcompiler/scheduler/docker_runner.py +253 -0
  76. siliconcompiler/scheduler/run_node.py +138 -0
  77. siliconcompiler/scheduler/send_messages.py +178 -0
  78. siliconcompiler/scheduler/slurm.py +208 -0
  79. siliconcompiler/scheduler/validation/email_credentials.json +54 -0
  80. siliconcompiler/schema/__init__.py +7 -0
  81. siliconcompiler/schema/schema_cfg.py +4014 -0
  82. siliconcompiler/schema/schema_obj.py +1841 -0
  83. siliconcompiler/schema/utils.py +93 -0
  84. siliconcompiler/sphinx_ext/__init__.py +0 -0
  85. siliconcompiler/sphinx_ext/dynamicgen.py +1006 -0
  86. siliconcompiler/sphinx_ext/schemagen.py +221 -0
  87. siliconcompiler/sphinx_ext/utils.py +166 -0
  88. siliconcompiler/targets/__init__.py +0 -0
  89. siliconcompiler/targets/asap7_demo.py +68 -0
  90. siliconcompiler/targets/asic_demo.py +38 -0
  91. siliconcompiler/targets/fpgaflow_demo.py +47 -0
  92. siliconcompiler/targets/freepdk45_demo.py +59 -0
  93. siliconcompiler/targets/gf180_demo.py +77 -0
  94. siliconcompiler/targets/skywater130_demo.py +70 -0
  95. siliconcompiler/templates/email/general.j2 +66 -0
  96. siliconcompiler/templates/email/summary.j2 +43 -0
  97. siliconcompiler/templates/issue/README.txt +26 -0
  98. siliconcompiler/templates/issue/run.sh +6 -0
  99. siliconcompiler/templates/report/bootstrap.min.css +7 -0
  100. siliconcompiler/templates/report/bootstrap.min.js +7 -0
  101. siliconcompiler/templates/report/bootstrap_LICENSE.md +24 -0
  102. siliconcompiler/templates/report/sc_report.j2 +427 -0
  103. siliconcompiler/templates/slurm/run.sh +9 -0
  104. siliconcompiler/templates/tcl/manifest.tcl.j2 +137 -0
  105. siliconcompiler/tools/__init__.py +0 -0
  106. siliconcompiler/tools/_common/__init__.py +432 -0
  107. siliconcompiler/tools/_common/asic.py +115 -0
  108. siliconcompiler/tools/_common/sdc/sc_constraints.sdc +76 -0
  109. siliconcompiler/tools/_common/tcl/sc_pin_constraints.tcl +63 -0
  110. siliconcompiler/tools/bambu/bambu.py +32 -0
  111. siliconcompiler/tools/bambu/convert.py +77 -0
  112. siliconcompiler/tools/bluespec/bluespec.py +40 -0
  113. siliconcompiler/tools/bluespec/convert.py +103 -0
  114. siliconcompiler/tools/builtin/_common.py +155 -0
  115. siliconcompiler/tools/builtin/builtin.py +26 -0
  116. siliconcompiler/tools/builtin/concatenate.py +85 -0
  117. siliconcompiler/tools/builtin/join.py +27 -0
  118. siliconcompiler/tools/builtin/maximum.py +46 -0
  119. siliconcompiler/tools/builtin/minimum.py +57 -0
  120. siliconcompiler/tools/builtin/mux.py +70 -0
  121. siliconcompiler/tools/builtin/nop.py +38 -0
  122. siliconcompiler/tools/builtin/verify.py +83 -0
  123. siliconcompiler/tools/chisel/SCDriver.scala +10 -0
  124. siliconcompiler/tools/chisel/build.sbt +27 -0
  125. siliconcompiler/tools/chisel/chisel.py +37 -0
  126. siliconcompiler/tools/chisel/convert.py +140 -0
  127. siliconcompiler/tools/execute/exec_input.py +41 -0
  128. siliconcompiler/tools/execute/execute.py +17 -0
  129. siliconcompiler/tools/genfasm/bitstream.py +61 -0
  130. siliconcompiler/tools/genfasm/genfasm.py +40 -0
  131. siliconcompiler/tools/ghdl/convert.py +87 -0
  132. siliconcompiler/tools/ghdl/ghdl.py +41 -0
  133. siliconcompiler/tools/icarus/compile.py +87 -0
  134. siliconcompiler/tools/icarus/icarus.py +36 -0
  135. siliconcompiler/tools/icepack/bitstream.py +20 -0
  136. siliconcompiler/tools/icepack/icepack.py +43 -0
  137. siliconcompiler/tools/klayout/export.py +117 -0
  138. siliconcompiler/tools/klayout/klayout.py +119 -0
  139. siliconcompiler/tools/klayout/klayout_export.py +205 -0
  140. siliconcompiler/tools/klayout/klayout_operations.py +363 -0
  141. siliconcompiler/tools/klayout/klayout_show.py +242 -0
  142. siliconcompiler/tools/klayout/klayout_utils.py +176 -0
  143. siliconcompiler/tools/klayout/operations.py +194 -0
  144. siliconcompiler/tools/klayout/screenshot.py +98 -0
  145. siliconcompiler/tools/klayout/show.py +101 -0
  146. siliconcompiler/tools/magic/drc.py +49 -0
  147. siliconcompiler/tools/magic/extspice.py +19 -0
  148. siliconcompiler/tools/magic/magic.py +85 -0
  149. siliconcompiler/tools/magic/sc_drc.tcl +96 -0
  150. siliconcompiler/tools/magic/sc_extspice.tcl +54 -0
  151. siliconcompiler/tools/magic/sc_magic.tcl +47 -0
  152. siliconcompiler/tools/montage/montage.py +30 -0
  153. siliconcompiler/tools/montage/tile.py +66 -0
  154. siliconcompiler/tools/netgen/count_lvs.py +132 -0
  155. siliconcompiler/tools/netgen/lvs.py +90 -0
  156. siliconcompiler/tools/netgen/netgen.py +36 -0
  157. siliconcompiler/tools/netgen/sc_lvs.tcl +46 -0
  158. siliconcompiler/tools/nextpnr/apr.py +24 -0
  159. siliconcompiler/tools/nextpnr/nextpnr.py +59 -0
  160. siliconcompiler/tools/openfpgaloader/openfpgaloader.py +39 -0
  161. siliconcompiler/tools/openroad/__init__.py +0 -0
  162. siliconcompiler/tools/openroad/cts.py +45 -0
  163. siliconcompiler/tools/openroad/dfm.py +66 -0
  164. siliconcompiler/tools/openroad/export.py +131 -0
  165. siliconcompiler/tools/openroad/floorplan.py +70 -0
  166. siliconcompiler/tools/openroad/openroad.py +977 -0
  167. siliconcompiler/tools/openroad/physyn.py +27 -0
  168. siliconcompiler/tools/openroad/place.py +41 -0
  169. siliconcompiler/tools/openroad/rcx_bench.py +95 -0
  170. siliconcompiler/tools/openroad/rcx_extract.py +34 -0
  171. siliconcompiler/tools/openroad/route.py +45 -0
  172. siliconcompiler/tools/openroad/screenshot.py +60 -0
  173. siliconcompiler/tools/openroad/scripts/sc_apr.tcl +499 -0
  174. siliconcompiler/tools/openroad/scripts/sc_cts.tcl +64 -0
  175. siliconcompiler/tools/openroad/scripts/sc_dfm.tcl +20 -0
  176. siliconcompiler/tools/openroad/scripts/sc_export.tcl +98 -0
  177. siliconcompiler/tools/openroad/scripts/sc_floorplan.tcl +413 -0
  178. siliconcompiler/tools/openroad/scripts/sc_metrics.tcl +158 -0
  179. siliconcompiler/tools/openroad/scripts/sc_physyn.tcl +7 -0
  180. siliconcompiler/tools/openroad/scripts/sc_place.tcl +84 -0
  181. siliconcompiler/tools/openroad/scripts/sc_procs.tcl +423 -0
  182. siliconcompiler/tools/openroad/scripts/sc_rcx.tcl +63 -0
  183. siliconcompiler/tools/openroad/scripts/sc_rcx_bench.tcl +20 -0
  184. siliconcompiler/tools/openroad/scripts/sc_rcx_extract.tcl +12 -0
  185. siliconcompiler/tools/openroad/scripts/sc_route.tcl +133 -0
  186. siliconcompiler/tools/openroad/scripts/sc_screenshot.tcl +21 -0
  187. siliconcompiler/tools/openroad/scripts/sc_write.tcl +5 -0
  188. siliconcompiler/tools/openroad/scripts/sc_write_images.tcl +361 -0
  189. siliconcompiler/tools/openroad/show.py +94 -0
  190. siliconcompiler/tools/openroad/templates/pex.tcl +8 -0
  191. siliconcompiler/tools/opensta/__init__.py +101 -0
  192. siliconcompiler/tools/opensta/report_libraries.py +28 -0
  193. siliconcompiler/tools/opensta/scripts/sc_procs.tcl +47 -0
  194. siliconcompiler/tools/opensta/scripts/sc_report_libraries.tcl +74 -0
  195. siliconcompiler/tools/opensta/scripts/sc_timing.tcl +268 -0
  196. siliconcompiler/tools/opensta/timing.py +214 -0
  197. siliconcompiler/tools/slang/__init__.py +49 -0
  198. siliconcompiler/tools/slang/lint.py +101 -0
  199. siliconcompiler/tools/surelog/__init__.py +123 -0
  200. siliconcompiler/tools/surelog/parse.py +183 -0
  201. siliconcompiler/tools/surelog/templates/output.v +7 -0
  202. siliconcompiler/tools/sv2v/convert.py +46 -0
  203. siliconcompiler/tools/sv2v/sv2v.py +37 -0
  204. siliconcompiler/tools/template/template.py +125 -0
  205. siliconcompiler/tools/verilator/compile.py +139 -0
  206. siliconcompiler/tools/verilator/lint.py +19 -0
  207. siliconcompiler/tools/verilator/parse.py +27 -0
  208. siliconcompiler/tools/verilator/verilator.py +172 -0
  209. siliconcompiler/tools/vivado/__init__.py +7 -0
  210. siliconcompiler/tools/vivado/bitstream.py +21 -0
  211. siliconcompiler/tools/vivado/place.py +21 -0
  212. siliconcompiler/tools/vivado/route.py +21 -0
  213. siliconcompiler/tools/vivado/scripts/sc_bitstream.tcl +6 -0
  214. siliconcompiler/tools/vivado/scripts/sc_place.tcl +2 -0
  215. siliconcompiler/tools/vivado/scripts/sc_route.tcl +4 -0
  216. siliconcompiler/tools/vivado/scripts/sc_run.tcl +45 -0
  217. siliconcompiler/tools/vivado/scripts/sc_syn_fpga.tcl +25 -0
  218. siliconcompiler/tools/vivado/syn_fpga.py +20 -0
  219. siliconcompiler/tools/vivado/vivado.py +147 -0
  220. siliconcompiler/tools/vpr/_json_constraint.py +63 -0
  221. siliconcompiler/tools/vpr/_xml_constraint.py +109 -0
  222. siliconcompiler/tools/vpr/place.py +137 -0
  223. siliconcompiler/tools/vpr/route.py +124 -0
  224. siliconcompiler/tools/vpr/screenshot.py +54 -0
  225. siliconcompiler/tools/vpr/show.py +88 -0
  226. siliconcompiler/tools/vpr/vpr.py +357 -0
  227. siliconcompiler/tools/xyce/xyce.py +36 -0
  228. siliconcompiler/tools/yosys/lec.py +56 -0
  229. siliconcompiler/tools/yosys/prepareLib.py +59 -0
  230. siliconcompiler/tools/yosys/sc_lec.tcl +84 -0
  231. siliconcompiler/tools/yosys/sc_syn.tcl +79 -0
  232. siliconcompiler/tools/yosys/syn_asic.py +565 -0
  233. siliconcompiler/tools/yosys/syn_asic.tcl +377 -0
  234. siliconcompiler/tools/yosys/syn_asic_fpga_shared.tcl +31 -0
  235. siliconcompiler/tools/yosys/syn_fpga.py +146 -0
  236. siliconcompiler/tools/yosys/syn_fpga.tcl +233 -0
  237. siliconcompiler/tools/yosys/syn_strategies.tcl +81 -0
  238. siliconcompiler/tools/yosys/techmaps/lcu_kogge_stone.v +39 -0
  239. siliconcompiler/tools/yosys/templates/abc.const +2 -0
  240. siliconcompiler/tools/yosys/yosys.py +147 -0
  241. siliconcompiler/units.py +259 -0
  242. siliconcompiler/use.py +177 -0
  243. siliconcompiler/utils/__init__.py +423 -0
  244. siliconcompiler/utils/asic.py +158 -0
  245. siliconcompiler/utils/showtools.py +25 -0
  246. siliconcompiler-0.26.5.dist-info/LICENSE +190 -0
  247. siliconcompiler-0.26.5.dist-info/METADATA +195 -0
  248. siliconcompiler-0.26.5.dist-info/RECORD +251 -0
  249. siliconcompiler-0.26.5.dist-info/WHEEL +5 -0
  250. siliconcompiler-0.26.5.dist-info/entry_points.txt +12 -0
  251. siliconcompiler-0.26.5.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2092 @@
1
+ import copy
2
+ import distro
3
+ import getpass
4
+ import multiprocessing
5
+ import os
6
+ import platform
7
+ import psutil
8
+ import socket
9
+ import re
10
+ import shlex
11
+ import shutil
12
+ import subprocess
13
+ import sys
14
+ import time
15
+ import packaging.version
16
+ import packaging.specifiers
17
+ from io import StringIO
18
+ import traceback
19
+ from datetime import datetime
20
+ from siliconcompiler import sc_open
21
+ from siliconcompiler import utils
22
+ from siliconcompiler import _metadata
23
+ from siliconcompiler.remote import client
24
+ from siliconcompiler.schema import Schema
25
+ from siliconcompiler.scheduler import slurm
26
+ from siliconcompiler.scheduler import docker_runner
27
+ from siliconcompiler import NodeStatus, SiliconCompilerError
28
+ from siliconcompiler.flowgraph import _get_flowgraph_nodes, _get_flowgraph_execution_order, \
29
+ _get_pruned_node_inputs, _get_flowgraph_node_inputs, _get_flowgraph_entry_nodes, \
30
+ _unreachable_steps_to_execute, _nodes_to_execute, \
31
+ get_nodes_from, nodes_to_execute, _check_flowgraph
32
+ from siliconcompiler.tools._common import input_file_node_name
33
+ import lambdapdk
34
+ from siliconcompiler.tools._common import get_tool_task, record_metric
35
+ from siliconcompiler.scheduler import send_messages
36
+
37
+ try:
38
+ import resource
39
+ except ModuleNotFoundError:
40
+ resource = None
41
+
42
+
43
+ # callback hooks to help custom runners track progress
44
+ _callback_funcs = {}
45
+
46
+
47
+ def register_callback(hook, func):
48
+ _callback_funcs[hook] = func
49
+
50
+
51
+ def _get_callback(hook):
52
+ if hook in _callback_funcs:
53
+ return _callback_funcs[hook]
54
+ return None
55
+
56
+
57
+ ###############################################################################
58
+ class SiliconCompilerTimeout(Exception):
59
+ ''' Minimal Exception wrapper used to raise sc timeout errors.
60
+ '''
61
+ def __init__(self, message):
62
+ super(Exception, self).__init__(message)
63
+
64
+
65
+ def run(chip):
66
+ '''
67
+ See :meth:`~siliconcompiler.core.Chip.run` for detailed documentation.
68
+ '''
69
+
70
+ _check_display(chip)
71
+
72
+ # Check required settings before attempting run()
73
+ for key in (['option', 'flow'], ):
74
+ if chip.get(*key) is None:
75
+ raise SiliconCompilerError(
76
+ f"{key} must be set before calling run()",
77
+ chip=chip)
78
+
79
+ org_jobname = chip.get('option', 'jobname')
80
+ _increment_job_name(chip)
81
+
82
+ # Re-init logger to include run info after setting up flowgraph.
83
+ chip._init_logger(in_run=True)
84
+
85
+ # Check if flowgraph is complete and valid
86
+ flow = chip.get('option', 'flow')
87
+ if not _check_flowgraph(chip, flow=flow):
88
+ raise SiliconCompilerError(
89
+ f"{flow} flowgraph contains errors and cannot be run.",
90
+ chip=chip)
91
+
92
+ copy_old_run_dir(chip, org_jobname)
93
+ clean_build_dir(chip)
94
+ _reset_flow_nodes(chip, flow, nodes_to_execute(chip, flow))
95
+
96
+ # Save current environment
97
+ environment = copy.deepcopy(os.environ)
98
+ # Set env variables
99
+ for envvar in chip.getkeys('option', 'env'):
100
+ val = chip.get('option', 'env', envvar)
101
+ os.environ[envvar] = val
102
+
103
+ if chip.get('option', 'remote'):
104
+ client.remote_process(chip)
105
+ else:
106
+ _local_process(chip, flow)
107
+
108
+ # Merge cfgs from last executed tasks, and write out a final manifest.
109
+ _finalize_run(chip, environment)
110
+
111
+
112
+ ###########################################################################
113
+ def _finalize_run(chip, environment):
114
+ '''
115
+ Helper function to finalize a job run after it completes:
116
+ * Restore any environment variable changes made during the run.
117
+ * Clear any -arg_step/-arg_index values in case only one node was run.
118
+ * Store this run in the Schema's 'history' field.
119
+ * Write out a final JSON manifest containing the full results and history.
120
+ '''
121
+
122
+ # Restore environment
123
+ os.environ.clear()
124
+ os.environ.update(environment)
125
+
126
+ # Clear scratchpad args since these are checked on run() entry
127
+ chip.set('arg', 'step', None, clobber=True)
128
+ chip.set('arg', 'index', None, clobber=True)
129
+
130
+ # Store run in history
131
+ chip.schema.record_history()
132
+
133
+ # Storing manifest in job root directory
134
+ filepath = os.path.join(chip.getworkdir(), f"{chip.design}.pkg.json")
135
+ chip.write_manifest(filepath)
136
+
137
+ send_messages.send(chip, 'summary', None, None)
138
+
139
+
140
+ def _increment_job_name(chip):
141
+ '''
142
+ Auto-update jobname if ['option', 'jobincr'] is True
143
+ Do this before initializing logger so that it picks up correct jobname
144
+ '''
145
+ if not chip.get('option', 'clean'):
146
+ return
147
+ if chip.get('option', 'jobincr'):
148
+ workdir = chip.getworkdir()
149
+ if os.path.isdir(workdir):
150
+ # Strip off digits following jobname, if any
151
+ stem = chip.get('option', 'jobname').rstrip('0123456789')
152
+
153
+ designdir = os.path.dirname(workdir)
154
+ jobid = 0
155
+ for job in os.listdir(designdir):
156
+ m = re.match(stem + r'(\d+)', job)
157
+ if m:
158
+ jobid = max(jobid, int(m.group(1)))
159
+ chip.set('option', 'jobname', f'{stem}{jobid + 1}')
160
+
161
+
162
+ ###########################################################################
163
+ def _check_display(chip):
164
+ '''
165
+ Automatically disable display for Linux systems without desktop environment
166
+ '''
167
+ if not chip.get('option', 'nodisplay') and sys.platform == 'linux' \
168
+ and 'DISPLAY' not in os.environ and 'WAYLAND_DISPLAY' not in os.environ:
169
+ chip.logger.warning('Environment variable $DISPLAY or $WAYLAND_DISPLAY not set')
170
+ chip.logger.warning("Setting ['option', 'nodisplay'] to True")
171
+ chip.set('option', 'nodisplay', True)
172
+
173
+
174
+ def _local_process(chip, flow):
175
+ from_nodes = []
176
+ extra_setup_nodes = {}
177
+
178
+ if chip.get('option', 'clean') or not chip.get('option', 'from'):
179
+ load_nodes = _get_flowgraph_nodes(chip, flow)
180
+ else:
181
+ for step in chip.get('option', 'from'):
182
+ from_nodes.extend(
183
+ [(step, index) for index in chip.getkeys('flowgraph', flow, step)])
184
+
185
+ load_nodes = _nodes_to_execute(
186
+ chip,
187
+ flow,
188
+ _get_flowgraph_entry_nodes(chip, flow),
189
+ from_nodes,
190
+ chip.get('option', 'prune'))
191
+
192
+ for node_level in _get_flowgraph_execution_order(chip, flow):
193
+ for step, index in node_level:
194
+ if (step, index) not in load_nodes:
195
+ continue
196
+ if (step, index) in from_nodes:
197
+ continue
198
+
199
+ manifest = os.path.join(chip.getworkdir(step=step, index=index),
200
+ 'outputs',
201
+ f'{chip.design}.pkg.json')
202
+ if os.path.exists(manifest):
203
+ # ensure we setup these nodes again
204
+ try:
205
+ extra_setup_nodes[(step, index)] = Schema(manifest=manifest, logger=chip.logger)
206
+ except Exception:
207
+ pass
208
+
209
+ # Setup tools for all nodes to run.
210
+ nodes = nodes_to_execute(chip, flow)
211
+ all_setup_nodes = nodes + load_nodes + list(extra_setup_nodes.keys())
212
+ for layer_nodes in _get_flowgraph_execution_order(chip, flow):
213
+ for step, index in layer_nodes:
214
+ if (step, index) in all_setup_nodes:
215
+ node_kept = _setup_node(chip, step, index)
216
+ if not node_kept and (step, index) in extra_setup_nodes:
217
+ del extra_setup_nodes[(step, index)]
218
+ if (step, index) in extra_setup_nodes:
219
+ schema = extra_setup_nodes[(step, index)]
220
+ node_status = None
221
+ try:
222
+ node_status = schema.get('record', 'status', step=step, index=index)
223
+ except: # noqa E722
224
+ pass
225
+ if node_status:
226
+ chip.set('record', 'status', node_status, step=step, index=index)
227
+
228
+ def mark_pending(step, index):
229
+ chip.set('record', 'status', NodeStatus.PENDING, step=step, index=index)
230
+ for next_step, next_index in get_nodes_from(chip, flow, [(step, index)]):
231
+ if chip.get('record', 'status', step=next_step, index=next_index) == \
232
+ NodeStatus.SKIPPED:
233
+ continue
234
+
235
+ # Mark following steps as pending
236
+ chip.set('record', 'status', NodeStatus.PENDING, step=next_step, index=next_index)
237
+
238
+ # Check if nodes have been modified from previous data
239
+ for layer_nodes in _get_flowgraph_execution_order(chip, flow):
240
+ for step, index in layer_nodes:
241
+ # Only look at successful nodes
242
+ if chip.get('record', 'status', step=step, index=index) not in \
243
+ (NodeStatus.SUCCESS, NodeStatus.SKIPPED):
244
+ continue
245
+
246
+ if not check_node_inputs(chip, step, index):
247
+ # change failing nodes to pending
248
+ mark_pending(step, index)
249
+ elif (step, index) in extra_setup_nodes:
250
+ # import old information
251
+ chip.schema._import_journal(extra_setup_nodes[(step, index)])
252
+
253
+ # Ensure pending nodes cause following nodes to be run
254
+ for step, index in nodes:
255
+ if chip.get('record', 'status', step=step, index=index) in \
256
+ (NodeStatus.PENDING, NodeStatus.ERROR):
257
+ mark_pending(step, index)
258
+
259
+ # Clean nodes marked pending
260
+ for step, index in nodes:
261
+ if chip.get('record', 'status', step=step, index=index) == NodeStatus.PENDING:
262
+ clean_node_dir(chip, step, index)
263
+
264
+ # Check validity of setup
265
+ chip.logger.info("Checking manifest before running.")
266
+ check_ok = chip.check_manifest()
267
+
268
+ # Check if there were errors before proceeding with run
269
+ if not check_ok:
270
+ raise SiliconCompilerError('Manifest check failed. See previous errors.', chip=chip)
271
+
272
+ if chip._error:
273
+ raise SiliconCompilerError(
274
+ 'Implementation errors encountered. See previous errors.',
275
+ chip=chip)
276
+
277
+ nodes_to_run = {}
278
+ processes = {}
279
+ local_processes = []
280
+ _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow)
281
+ try:
282
+ _launch_nodes(chip, nodes_to_run, processes, local_processes)
283
+ except KeyboardInterrupt:
284
+ # exit immediately
285
+ sys.exit(0)
286
+
287
+ if _get_callback('post_run'):
288
+ _get_callback('post_run')(chip)
289
+
290
+ _check_nodes_status(chip, flow)
291
+
292
+
293
+ def __is_posix():
294
+ return sys.platform != 'win32'
295
+
296
+
297
+ ###########################################################################
298
+ def _setup_node(chip, step, index, flow=None):
299
+ preset_step = chip.get('arg', 'step')
300
+ preset_index = chip.get('arg', 'index')
301
+ preset_flow = chip.get('option', 'flow')
302
+
303
+ if flow:
304
+ chip.set('option', 'flow', flow)
305
+
306
+ chip.set('arg', 'step', step)
307
+ chip.set('arg', 'index', index)
308
+ tool, task = get_tool_task(chip, step, index, flow=flow)
309
+
310
+ # Run node setup.
311
+ setup_ret = None
312
+ try:
313
+ setup_step = getattr(chip._get_task_module(step, index), 'setup', None)
314
+ except SiliconCompilerError:
315
+ setup_step = None
316
+ if setup_step:
317
+ try:
318
+ chip.logger.info(f'Setting up node {step}{index} with {tool}/{task}')
319
+ setup_ret = setup_step(chip)
320
+ except Exception as e:
321
+ chip.logger.error(f'Failed to run setup() for {tool}/{task}')
322
+ raise e
323
+ else:
324
+ raise SiliconCompilerError(f'setup() not found for tool {tool}, task {task}', chip=chip)
325
+
326
+ # Need to restore step/index, otherwise we will skip setting up other indices.
327
+ chip.set('option', 'flow', preset_flow)
328
+ chip.set('arg', 'step', preset_step)
329
+ chip.set('arg', 'index', preset_index)
330
+
331
+ if setup_ret is not None:
332
+ chip.logger.warning(f'Removing {step}{index} due to {setup_ret}')
333
+ chip.set('record', 'status', NodeStatus.SKIPPED, step=step, index=index)
334
+
335
+ return False
336
+
337
+ return True
338
+
339
+
340
+ def _check_version(chip, reported_version, tool, step, index):
341
+ # Based on regex for deprecated "legacy specifier" from PyPA packaging
342
+ # library. Use this to parse PEP-440ish specifiers with arbitrary
343
+ # versions.
344
+ _regex_str = r"""
345
+ (?P<operator>(==|!=|<=|>=|<|>|~=))
346
+ \s*
347
+ (?P<version>
348
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
349
+ # string can be just about anything, we match everything
350
+ # except for whitespace, a semi-colon for marker support,
351
+ # a closing paren since versions can be enclosed in
352
+ # them, and a comma since it's a version separator.
353
+ )
354
+ """
355
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
356
+
357
+ normalize_version = getattr(chip._get_tool_module(step, index), 'normalize_version', None)
358
+ # Version is good if it matches any of the specifier sets in this list.
359
+ spec_sets = chip.get('tool', tool, 'version', step=step, index=index)
360
+ if not spec_sets:
361
+ return True
362
+
363
+ for spec_set in spec_sets:
364
+ split_specs = [s.strip() for s in spec_set.split(",") if s.strip()]
365
+ specs_list = []
366
+ for spec in split_specs:
367
+ match = re.match(_regex, spec)
368
+ if match is None:
369
+ chip.logger.warning(f'Invalid version specifier {spec}. '
370
+ f'Defaulting to =={spec}.')
371
+ operator = '=='
372
+ spec_version = spec
373
+ else:
374
+ operator = match.group('operator')
375
+ spec_version = match.group('version')
376
+ specs_list.append((operator, spec_version))
377
+
378
+ if normalize_version is None:
379
+ normalized_version = reported_version
380
+ normalized_specs = ','.join([f'{op}{ver}' for op, ver in specs_list])
381
+ else:
382
+ try:
383
+ normalized_version = normalize_version(reported_version)
384
+ except Exception as e:
385
+ chip.logger.error(f'Unable to normalize version for {tool}: {reported_version}')
386
+ raise e
387
+ normalized_spec_list = [f'{op}{normalize_version(ver)}' for op, ver in specs_list]
388
+ normalized_specs = ','.join(normalized_spec_list)
389
+
390
+ try:
391
+ version = packaging.version.Version(normalized_version)
392
+ except packaging.version.InvalidVersion:
393
+ chip.logger.error(f'Version {reported_version} reported by {tool} does '
394
+ 'not match standard.')
395
+ if normalize_version is None:
396
+ chip.logger.error('Tool driver should implement normalize_version().')
397
+ else:
398
+ chip.logger.error('normalize_version() returned '
399
+ f'invalid version {normalized_version}')
400
+
401
+ return False
402
+
403
+ try:
404
+ spec_set = packaging.specifiers.SpecifierSet(normalized_specs)
405
+ except packaging.specifiers.InvalidSpecifier:
406
+ chip.logger.error(f'Version specifier set {normalized_specs} '
407
+ 'does not match standard.')
408
+ return False
409
+
410
+ if version in spec_set:
411
+ return True
412
+
413
+ allowedstr = '; '.join(spec_sets)
414
+ chip.logger.error(f"Version check failed for {tool}. Check installation.")
415
+ chip.logger.error(f"Found version {reported_version}, "
416
+ f"did not satisfy any version specifier set {allowedstr}.")
417
+ return False
418
+
419
+
420
+ ###########################################################################
421
+ def _runtask(chip, flow, step, index, exec_func, replay=False):
422
+ '''
423
+ Private per node run method called by run().
424
+
425
+ The method takes in a step string and index string to indicate what
426
+ to run.
427
+
428
+ Note that since _runtask occurs in its own process with a separate
429
+ address space, any changes made to the `self` object will not
430
+ be reflected in the parent. We rely on reading/writing the chip manifest
431
+ to the filesystem to communicate updates between processes.
432
+ '''
433
+
434
+ chip._init_codecs()
435
+
436
+ chip._init_logger(step, index, in_run=True)
437
+
438
+ chip.set('arg', 'step', step, clobber=True)
439
+ chip.set('arg', 'index', index, clobber=True)
440
+
441
+ chip.schema._start_journal()
442
+
443
+ # Make record of sc version and machine
444
+ __record_version(chip, step, index)
445
+ # Record user information if enabled
446
+ if chip.get('option', 'track', step=step, index=index):
447
+ __record_usermachine(chip, step, index)
448
+
449
+ # Start wall timer
450
+ wall_start = time.time()
451
+ __record_time(chip, step, index, wall_start, 'start')
452
+
453
+ workdir = _setup_workdir(chip, step, index, replay)
454
+ cwd = os.getcwd()
455
+ os.chdir(workdir)
456
+
457
+ chip._add_file_logger(os.path.join(workdir, f'sc_{step}{index}.log'))
458
+
459
+ try:
460
+ _setupnode(chip, flow, step, index, replay)
461
+
462
+ exec_func(chip, step, index, replay)
463
+ except Exception as e:
464
+ print_traceback(chip, e)
465
+ _haltstep(chip, chip.get('option', 'flow'), step, index)
466
+
467
+ # return to original directory
468
+ os.chdir(cwd)
469
+ chip.schema._stop_journal()
470
+
471
+
472
+ ###########################################################################
473
+ def _haltstep(chip, flow, step, index, log=True):
474
+ chip.set('record', 'status', NodeStatus.ERROR, step=step, index=index)
475
+ chip.write_manifest(os.path.join("outputs", f"{chip.get('design')}.pkg.json"))
476
+
477
+ if log:
478
+ chip.logger.error(f"Halting step '{step}' index '{index}' due to errors.")
479
+ send_messages.send(chip, "fail", step, index)
480
+ sys.exit(1)
481
+
482
+
483
+ def _setupnode(chip, flow, step, index, replay):
484
+ _hash_files(chip, step, index, setup=True)
485
+
486
+ # Write manifest prior to step running into inputs
487
+ chip.write_manifest(f'inputs/{chip.get("design")}.pkg.json')
488
+
489
+ _select_inputs(chip, step, index)
490
+ _copy_previous_steps_output_data(chip, step, index, replay)
491
+
492
+ # Check manifest
493
+ if not _check_manifest_dynamic(chip, step, index):
494
+ chip.logger.error("Fatal error in check_manifest()! See previous errors.")
495
+ _haltstep(chip, flow, step, index)
496
+
497
+
498
+ ###########################################################################
499
+ def _write_task_manifest(chip, tool, path=None, backup=True):
500
+ suffix = chip.get('tool', tool, 'format')
501
+ if suffix:
502
+ manifest_path = f"sc_manifest.{suffix}"
503
+ if path:
504
+ manifest_path = os.path.join(path, manifest_path)
505
+
506
+ if backup and os.path.exists(manifest_path):
507
+ shutil.copyfile(manifest_path, f'{manifest_path}.bak')
508
+
509
+ chip.write_manifest(manifest_path, abspath=True)
510
+
511
+
512
+ ###########################################################################
513
+ def _setup_workdir(chip, step, index, replay):
514
+ workdir = chip.getworkdir(step=step, index=index)
515
+
516
+ if os.path.isdir(workdir) and not replay:
517
+ shutil.rmtree(workdir)
518
+ os.makedirs(workdir, exist_ok=True)
519
+ os.makedirs(os.path.join(workdir, 'inputs'), exist_ok=True)
520
+ os.makedirs(os.path.join(workdir, 'outputs'), exist_ok=True)
521
+ os.makedirs(os.path.join(workdir, 'reports'), exist_ok=True)
522
+ return workdir
523
+
524
+
525
+ def _select_inputs(chip, step, index):
526
+
527
+ flow = chip.get('option', 'flow')
528
+ tool, _ = get_tool_task(chip, step, index, flow)
529
+ sel_inputs = []
530
+
531
+ select_inputs = getattr(chip._get_task_module(step, index, flow=flow),
532
+ '_select_inputs',
533
+ None)
534
+ if select_inputs:
535
+ sel_inputs = select_inputs(chip, step, index)
536
+ else:
537
+ sel_inputs = _get_flowgraph_node_inputs(chip, flow, (step, index))
538
+
539
+ if (step, index) not in _get_flowgraph_entry_nodes(chip, flow) and not sel_inputs:
540
+ chip.logger.error(f'No inputs selected after running {tool}')
541
+ _haltstep(chip, flow, step, index)
542
+
543
+ chip.set('record', 'inputnode', sel_inputs, step=step, index=index)
544
+
545
+
546
+ def copy_output_file(chip, outfile, folder='inputs'):
547
+ design = chip.get('design')
548
+
549
+ if outfile.is_file() or outfile.is_symlink():
550
+ if outfile.name == f'{design}.pkg.json':
551
+ return
552
+ utils.link_symlink_copy(outfile.path, f'{folder}/{outfile.name}')
553
+ elif outfile.is_dir():
554
+ shutil.copytree(outfile.path,
555
+ f'{folder}/{outfile.name}',
556
+ dirs_exist_ok=True,
557
+ copy_function=utils.link_symlink_copy)
558
+
559
+
560
+ def forward_output_files(chip, step, index):
561
+ for in_step, in_index in chip.get('record', 'inputnode', step=step, index=index):
562
+ in_workdir = chip.getworkdir(step=in_step, index=in_index)
563
+ for outfile in os.scandir(f"{in_workdir}/outputs"):
564
+ copy_output_file(chip, outfile, folder='outputs')
565
+
566
+
567
+ def _copy_previous_steps_output_data(chip, step, index, replay):
568
+ '''
569
+ Copy (link) output data from previous steps
570
+ '''
571
+
572
+ flow = chip.get('option', 'flow')
573
+ if not _get_pruned_node_inputs(chip, flow, (step, index)):
574
+ all_inputs = []
575
+ elif not chip.get('record', 'inputnode', step=step, index=index):
576
+ all_inputs = _get_pruned_node_inputs(chip, flow, (step, index))
577
+ else:
578
+ all_inputs = chip.get('record', 'inputnode', step=step, index=index)
579
+
580
+ strict = chip.get('option', 'strict')
581
+ tool, task = get_tool_task(chip, step, index)
582
+ in_files = chip.get('tool', tool, 'task', task, 'input', step=step, index=index)
583
+ for in_step, in_index in all_inputs:
584
+ if chip.get('record', 'status', step=in_step, index=in_index) == NodeStatus.ERROR:
585
+ chip.logger.error(f'Halting step due to previous error in {in_step}{in_index}')
586
+ _haltstep(chip, flow, step, index)
587
+
588
+ # Skip copying pkg.json files here, since we write the current chip
589
+ # configuration into inputs/{design}.pkg.json earlier in _runstep.
590
+ if not replay:
591
+ in_workdir = chip.getworkdir(step=in_step, index=in_index)
592
+
593
+ for outfile in os.scandir(f"{in_workdir}/outputs"):
594
+ new_name = input_file_node_name(outfile.name, in_step, in_index)
595
+ if strict:
596
+ if outfile.name not in in_files and new_name not in in_files:
597
+ continue
598
+
599
+ copy_output_file(chip, outfile)
600
+
601
+ if new_name in in_files:
602
+ # perform rename
603
+ os.rename(f'inputs/{outfile.name}', f'inputs/{new_name}')
604
+
605
+
606
+ def __read_std_streams(chip, quiet, is_stdout_log, stdout_reader, is_stderr_log, stderr_reader):
607
+ '''
608
+ Handle directing tool outputs to logger
609
+ '''
610
+ if not quiet:
611
+ if is_stdout_log:
612
+ for line in stdout_reader.readlines():
613
+ chip.logger.info(line.rstrip())
614
+ if is_stderr_log:
615
+ for line in stderr_reader.readlines():
616
+ chip.logger.error(line.rstrip())
617
+
618
+
619
+ ############################################################################
620
+ # Chip helper Functions
621
+ ############################################################################
622
+ def _getexe(chip, tool, step, index):
623
+ path = chip.get('tool', tool, 'path', step=step, index=index)
624
+ exe = chip.get('tool', tool, 'exe')
625
+ if exe is None:
626
+ return None
627
+
628
+ syspath = os.getenv('PATH', os.defpath)
629
+ if path:
630
+ # Prepend 'path' schema var to system path
631
+ syspath = utils._resolve_env_vars(chip, path) + os.pathsep + syspath
632
+
633
+ fullexe = shutil.which(exe, path=syspath)
634
+
635
+ return fullexe
636
+
637
+
638
+ #######################################
639
+ def _makecmd(chip, tool, task, step, index, script_name='replay.sh', include_path=True):
640
+ '''
641
+ Constructs a subprocess run command based on eda tool setup.
642
+ Creates a replay script in current directory.
643
+
644
+ Returns:
645
+ runnable command (list)
646
+ printable command (str)
647
+ command name (str)
648
+ command arguments (list)
649
+ '''
650
+
651
+ fullexe = _getexe(chip, tool, step, index)
652
+
653
+ is_posix = __is_posix()
654
+
655
+ def parse_options(options):
656
+ if not options:
657
+ return []
658
+ shlex_opts = []
659
+ for option in options:
660
+ option = option.strip()
661
+ if (option.startswith("\"") and option.endswith("\"")) or \
662
+ (option.startswith("'") and option.endswith("'")):
663
+ # Make sure strings are quoted in double quotes
664
+ shlex_opts.append(f'"{option[1:-1]}"')
665
+ else:
666
+ shlex_opts.extend(shlex.split(option, posix=is_posix))
667
+ return shlex_opts
668
+
669
+ # Add scripts files
670
+ scripts = chip.find_files('tool', tool, 'task', task, 'script', step=step, index=index)
671
+
672
+ cmdlist = [fullexe]
673
+ cmdlist.extend(parse_options(chip.get('tool', tool, 'task', task, 'option',
674
+ step=step, index=index)))
675
+ cmdlist.extend(scripts)
676
+
677
+ runtime_options = getattr(chip._get_task_module(step, index), 'runtime_options', None)
678
+ if not runtime_options:
679
+ runtime_options = getattr(chip._get_tool_module(step, index), 'runtime_options', None)
680
+ if runtime_options:
681
+ try:
682
+ cmdlist.extend(parse_options(runtime_options(chip)))
683
+ except Exception as e:
684
+ chip.logger.error(f'Failed to get runtime options for {tool}/{task}')
685
+ raise e
686
+
687
+ envvars = {}
688
+ for key in chip.getkeys('option', 'env'):
689
+ envvars[key] = chip.get('option', 'env', key)
690
+ for item in chip.getkeys('tool', tool, 'licenseserver'):
691
+ license_file = chip.get('tool', tool, 'licenseserver', item, step=step, index=index)
692
+ if license_file:
693
+ envvars[item] = ':'.join(license_file)
694
+
695
+ if include_path:
696
+ path = chip.get('tool', tool, 'path', step=step, index=index)
697
+ if path:
698
+ envvars['PATH'] = path + os.pathsep + os.environ['PATH']
699
+ else:
700
+ envvars['PATH'] = os.environ['PATH']
701
+
702
+ # Forward additional variables
703
+ for var in ('LD_LIBRARY_PATH',):
704
+ val = os.getenv(var, None)
705
+ if val:
706
+ envvars[var] = val
707
+
708
+ for key in chip.getkeys('tool', tool, 'task', task, 'env'):
709
+ val = chip.get('tool', tool, 'task', task, 'env', key, step=step, index=index)
710
+ if val:
711
+ envvars[key] = val
712
+
713
+ # Separate variables to be able to display nice name of executable
714
+ cmd = os.path.basename(cmdlist[0])
715
+ cmd_args = cmdlist[1:]
716
+ print_cmd = " ".join([cmd, *cmd_args])
717
+ cmdlist = [cmdlist[0]]
718
+ for arg in cmd_args:
719
+ if arg.startswith("\"") and arg.endswith("\""):
720
+ # Remove quoting since subprocess will handle that for us
721
+ cmdlist.append(arg[1:-1])
722
+ else:
723
+ cmdlist.append(arg)
724
+
725
+ # create replay file
726
+ with open(script_name, 'w') as f:
727
+ print('#!/usr/bin/env bash', file=f)
728
+
729
+ envvar_cmd = 'export'
730
+ for key, val in envvars.items():
731
+ print(f'{envvar_cmd} {key}="{val}"', file=f)
732
+
733
+ # Ensure execution runs from the same directory
734
+ work_dir = chip.getworkdir(step=step, index=index)
735
+ if chip._relative_path:
736
+ work_dir = os.path.relpath(work_dir, chip._relative_path)
737
+ print(f'cd {work_dir}', file=f)
738
+
739
+ format_cmd = [chip.get('tool', tool, 'exe')]
740
+ arg_test = re.compile(r'^[-+]')
741
+ file_test = re.compile(r'^[/]')
742
+ for cmdarg in cmd_args:
743
+ add_new_line = len(format_cmd) == 1
744
+
745
+ if arg_test.match(cmdarg) or file_test.match(cmdarg):
746
+ add_new_line = True
747
+ else:
748
+ if not arg_test.match(format_cmd[-1]):
749
+ add_new_line = True
750
+
751
+ if add_new_line:
752
+ format_cmd.append(cmdarg)
753
+ else:
754
+ format_cmd[-1] += f' {cmdarg}'
755
+ print(" \\\n ".join(format_cmd), file=f)
756
+
757
+ os.chmod(script_name, 0o755)
758
+
759
+ return cmdlist, print_cmd, cmd, cmd_args
760
+
761
+
762
+ def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func=None):
763
+ '''
764
+ Run executable (or copy inputs to outputs for builtin functions)
765
+ '''
766
+
767
+ flow = chip.get('option', 'flow')
768
+ top = chip.top()
769
+ tool, task = get_tool_task(chip, step, index, flow)
770
+
771
+ quiet = (
772
+ chip.get('option', 'quiet', step=step, index=index) and not
773
+ chip.get('option', 'breakpoint', step=step, index=index)
774
+ )
775
+
776
+ # TODO: Currently no memory usage tracking in breakpoints, builtins, or unexpected errors.
777
+ max_mem_bytes = 0
778
+
779
+ retcode = 0
780
+ cmdlist = []
781
+ cmd_args = []
782
+ if run_func:
783
+ logfile = None
784
+ try:
785
+ retcode = run_func(chip)
786
+ except Exception as e:
787
+ chip.logger.error(f'Failed in run() for {tool}/{task}: {e}')
788
+ retcode = 1 # default to non-zero
789
+ print_traceback(chip, e)
790
+ chip._error = True
791
+ finally:
792
+ try:
793
+ if resource:
794
+ # Since memory collection is not possible, collect the current process
795
+ # peak memory
796
+ max_mem_bytes = max(
797
+ max_mem_bytes,
798
+ 1024 * resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
799
+ except (OSError, ValueError, PermissionError):
800
+ pass
801
+ else:
802
+ cmdlist, printable_cmd, _, cmd_args = _makecmd(chip, tool, task, step, index)
803
+
804
+ ##################
805
+ # Make record of tool options
806
+ __record_tool(chip, step, index, version, toolpath, cmd_args)
807
+
808
+ chip.logger.info('Running in %s', workdir)
809
+ chip.logger.info('%s', printable_cmd)
810
+ timeout = chip.get('option', 'timeout', step=step, index=index)
811
+ logfile = step + '.log'
812
+ if sys.platform in ('darwin', 'linux') and \
813
+ chip.get('option', 'breakpoint', step=step, index=index):
814
+ # When we break on a step, the tool often drops into a shell.
815
+ # However, our usual subprocess scheme seems to break terminal
816
+ # echo for some tools. On POSIX-compatible systems, we can use
817
+ # pty to connect the tool to our terminal instead. This code
818
+ # doesn't handle quiet/timeout logic, since we don't want either
819
+ # of these features for an interactive session. Logic for
820
+ # forwarding to file based on
821
+ # https://docs.python.org/3/library/pty.html#example.
822
+ with open(logfile, 'wb') as log_writer:
823
+ def read(fd):
824
+ data = os.read(fd, 1024)
825
+ log_writer.write(data)
826
+ return data
827
+ import pty # Note: this import throws exception on Windows
828
+ retcode = pty.spawn(cmdlist, read)
829
+ else:
830
+ stdout_file = ''
831
+ stdout_suffix = chip.get('tool', tool, 'task', task, 'stdout', 'suffix',
832
+ step=step, index=index)
833
+ stdout_destination = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
834
+ step=step, index=index)
835
+ if stdout_destination == 'log':
836
+ stdout_file = step + "." + stdout_suffix
837
+ elif stdout_destination == 'output':
838
+ stdout_file = os.path.join('outputs', top + "." + stdout_suffix)
839
+ elif stdout_destination == 'none':
840
+ stdout_file = os.devnull
841
+ else:
842
+ chip.logger.error(f'stdout/destination has no support for {stdout_destination}. '
843
+ 'Use [log|output|none].')
844
+ _haltstep(chip, flow, step, index)
845
+
846
+ stderr_file = ''
847
+ stderr_suffix = chip.get('tool', tool, 'task', task, 'stderr', 'suffix',
848
+ step=step, index=index)
849
+ stderr_destination = chip.get('tool', tool, 'task', task, 'stderr', 'destination',
850
+ step=step, index=index)
851
+ if stderr_destination == 'log':
852
+ stderr_file = step + "." + stderr_suffix
853
+ elif stderr_destination == 'output':
854
+ stderr_file = os.path.join('outputs', top + "." + stderr_suffix)
855
+ elif stderr_destination == 'none':
856
+ stderr_file = os.devnull
857
+ else:
858
+ chip.logger.error(f'stderr/destination has no support for {stderr_destination}. '
859
+ 'Use [log|output|none].')
860
+ _haltstep(chip, flow, step, index)
861
+
862
+ with open(stdout_file, 'w') as stdout_writer, \
863
+ open(stdout_file, 'r', errors='replace_with_warning') as stdout_reader, \
864
+ open(stderr_file, 'w') as stderr_writer, \
865
+ open(stderr_file, 'r', errors='replace_with_warning') as stderr_reader:
866
+ # Use separate reader/writer file objects as hack to display
867
+ # live output in non-blocking way, so we can monitor the
868
+ # timeout. Based on https://stackoverflow.com/a/18422264.
869
+ is_stdout_log = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
870
+ step=step, index=index) == 'log'
871
+ is_stderr_log = stderr_destination == 'log' and stderr_file != stdout_file
872
+ # if STDOUT and STDERR are to be redirected to the same file,
873
+ # use a single writer
874
+ if stderr_file == stdout_file:
875
+ stderr_writer.close()
876
+ stderr_reader.close()
877
+ stderr_writer = subprocess.STDOUT
878
+
879
+ preexec_fn = None
880
+ nice = None
881
+ if __is_posix():
882
+ nice = chip.get('option', 'nice', step=step, index=index)
883
+
884
+ def set_nice():
885
+ os.nice(nice)
886
+
887
+ if nice:
888
+ preexec_fn = set_nice
889
+
890
+ cmd_start_time = time.time()
891
+ proc = subprocess.Popen(cmdlist,
892
+ stdout=stdout_writer,
893
+ stderr=stderr_writer,
894
+ preexec_fn=preexec_fn)
895
+ # How long to wait for proc to quit on ctrl-c before force
896
+ # terminating.
897
+ POLL_INTERVAL = 0.1
898
+ MEMORY_WARN_LIMIT = 90
899
+ try:
900
+ while proc.poll() is None:
901
+ # Gather subprocess memory usage.
902
+ try:
903
+ pproc = psutil.Process(proc.pid)
904
+ proc_mem_bytes = pproc.memory_full_info().uss
905
+ for child in pproc.children(recursive=True):
906
+ proc_mem_bytes += child.memory_full_info().uss
907
+ max_mem_bytes = max(max_mem_bytes, proc_mem_bytes)
908
+
909
+ memory_usage = psutil.virtual_memory()
910
+ if memory_usage.percent > MEMORY_WARN_LIMIT:
911
+ chip.logger.warn(
912
+ f'Current system memory usage is {memory_usage.percent}%')
913
+
914
+ # increase limit warning
915
+ MEMORY_WARN_LIMIT = int(memory_usage.percent + 1)
916
+ except psutil.Error:
917
+ # Process may have already terminated or been killed.
918
+ # Retain existing memory usage statistics in this case.
919
+ pass
920
+ except PermissionError:
921
+ # OS is preventing access to this information so it cannot
922
+ # be collected
923
+ pass
924
+
925
+ # Loop until process terminates
926
+ __read_std_streams(chip,
927
+ quiet,
928
+ is_stdout_log, stdout_reader,
929
+ is_stderr_log, stderr_reader)
930
+
931
+ if timeout is not None and time.time() - cmd_start_time > timeout:
932
+ chip.logger.error(f'Step timed out after {timeout} seconds')
933
+ utils.terminate_process(proc.pid)
934
+ raise SiliconCompilerTimeout(f'{step}{index} timeout')
935
+ time.sleep(POLL_INTERVAL)
936
+ except KeyboardInterrupt:
937
+ kill_process(chip, proc, tool, 5 * POLL_INTERVAL, msg="Received ctrl-c. ")
938
+ _haltstep(chip, flow, step, index, log=False)
939
+ except SiliconCompilerTimeout:
940
+ send_messages.send(chip, "timeout", step, index)
941
+ kill_process(chip, proc, tool, 5 * POLL_INTERVAL)
942
+ chip._error = True
943
+
944
+ # Read the remaining
945
+ __read_std_streams(chip,
946
+ quiet,
947
+ is_stdout_log, stdout_reader,
948
+ is_stderr_log, stderr_reader)
949
+ retcode = proc.returncode
950
+
951
+ chip.set('record', 'toolexitcode', retcode, step=step, index=index)
952
+ if retcode != 0:
953
+ msg = f'Command failed with code {retcode}.'
954
+ if logfile:
955
+ if quiet:
956
+ # Print last 10 lines of log when in quiet mode
957
+ with sc_open(logfile) as logfd:
958
+ loglines = logfd.read().splitlines()
959
+ for logline in loglines[-10:]:
960
+ chip.logger.error(logline)
961
+ # No log file for pure-Python tools.
962
+ msg += f' See log file {os.path.abspath(logfile)}'
963
+ chip.logger.warning(msg)
964
+ chip._error = True
965
+
966
+ # Capture memory usage
967
+ record_metric(chip, step, index, 'memory', max_mem_bytes, source=None, source_unit='B')
968
+
969
+
970
+ def _post_process(chip, step, index):
971
+ flow = chip.get('option', 'flow')
972
+ tool, task = get_tool_task(chip, step, index, flow)
973
+ func = getattr(chip._get_task_module(step, index, flow=flow), 'post_process', None)
974
+ if func:
975
+ try:
976
+ func(chip)
977
+ except Exception as e:
978
+ chip.logger.error(f'Failed to run post-process for {tool}/{task}.')
979
+ print_traceback(chip, e)
980
+ chip._error = True
981
+
982
+
983
+ def _check_logfile(chip, step, index, quiet=False, run_func=None):
984
+ '''
985
+ Check log file (must be after post-process)
986
+ '''
987
+ if run_func is None:
988
+ log_file = os.path.join(chip.getworkdir(step=step, index=index), f'{step}.log')
989
+ matches = check_logfile(chip, step=step, index=index,
990
+ display=not quiet,
991
+ logfile=log_file)
992
+ if 'errors' in matches:
993
+ errors = chip.get('metric', 'errors', step=step, index=index)
994
+ if errors is None:
995
+ errors = 0
996
+ errors += matches['errors']
997
+ record_metric(chip, step, index, 'errors', errors, f'{step}.log')
998
+ if 'warnings' in matches:
999
+ warnings = chip.get('metric', 'warnings', step=step, index=index)
1000
+ if warnings is None:
1001
+ warnings = 0
1002
+ warnings += matches['warnings']
1003
+ record_metric(chip, step, index, 'warnings', warnings, f'{step}.log')
1004
+
1005
+
1006
+ def _executenode(chip, step, index, replay):
1007
+ workdir = chip.getworkdir(step=step, index=index)
1008
+ flow = chip.get('option', 'flow')
1009
+ tool, _ = get_tool_task(chip, step, index, flow)
1010
+
1011
+ _pre_process(chip, step, index)
1012
+
1013
+ if chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED:
1014
+ # copy inputs to outputs and skip execution
1015
+ forward_output_files(chip, step, index)
1016
+
1017
+ send_messages.send(chip, "skipped", step, index)
1018
+ else:
1019
+ _set_env_vars(chip, step, index)
1020
+
1021
+ run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1022
+ (toolpath, version) = _check_tool_version(chip, step, index, run_func)
1023
+
1024
+ # Write manifest (tool interface) (Don't move this!)
1025
+ _write_task_manifest(chip, tool)
1026
+
1027
+ send_messages.send(chip, "begin", step, index)
1028
+
1029
+ # Start CPU Timer
1030
+ chip.logger.debug("Starting executable")
1031
+ cpu_start = time.time()
1032
+
1033
+ _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func)
1034
+
1035
+ # Capture cpu runtime
1036
+ cpu_end = time.time()
1037
+ cputime = round((cpu_end - cpu_start), 2)
1038
+ record_metric(chip, step, index, 'exetime', cputime, source=None, source_unit='s')
1039
+
1040
+ _post_process(chip, step, index)
1041
+
1042
+ _finalizenode(chip, step, index, replay)
1043
+
1044
+ send_messages.send(chip, "end", step, index)
1045
+
1046
+
1047
+ def _pre_process(chip, step, index):
1048
+ flow = chip.get('option', 'flow')
1049
+ tool, task = get_tool_task(chip, step, index, flow)
1050
+ func = getattr(chip._get_task_module(step, index, flow=flow), 'pre_process', None)
1051
+ if func:
1052
+ try:
1053
+ func(chip)
1054
+ except Exception as e:
1055
+ chip.logger.error(f"Pre-processing failed for '{tool}/{task}'.")
1056
+ raise e
1057
+ if chip._error:
1058
+ chip.logger.error(f"Pre-processing failed for '{tool}/{task}'")
1059
+ _haltstep(chip, flow, step, index)
1060
+
1061
+
1062
+ def _set_env_vars(chip, step, index):
1063
+ flow = chip.get('option', 'flow')
1064
+ tool, task = get_tool_task(chip, step, index, flow)
1065
+ # License file configuration.
1066
+ for item in chip.getkeys('tool', tool, 'licenseserver'):
1067
+ license_file = chip.get('tool', tool, 'licenseserver', item, step=step, index=index)
1068
+ if license_file:
1069
+ os.environ[item] = ':'.join(license_file)
1070
+
1071
+ # Tool-specific environment variables for this task.
1072
+ for item in chip.getkeys('tool', tool, 'task', task, 'env'):
1073
+ val = chip.get('tool', tool, 'task', task, 'env', item, step=step, index=index)
1074
+ if val:
1075
+ os.environ[item] = val
1076
+
1077
+
1078
+ def _check_tool_version(chip, step, index, run_func=None):
1079
+ '''
1080
+ Check exe version
1081
+ '''
1082
+
1083
+ flow = chip.get('option', 'flow')
1084
+ tool, task = get_tool_task(chip, step, index, flow)
1085
+
1086
+ vercheck = not chip.get('option', 'novercheck', step=step, index=index)
1087
+ veropt = chip.get('tool', tool, 'vswitch')
1088
+ exe = _getexe(chip, tool, step, index)
1089
+ version = None
1090
+ if exe is not None:
1091
+ exe_path, exe_base = os.path.split(exe)
1092
+ if veropt:
1093
+ cmdlist = [exe]
1094
+ cmdlist.extend(veropt)
1095
+ proc = subprocess.run(cmdlist,
1096
+ stdout=subprocess.PIPE,
1097
+ stderr=subprocess.STDOUT,
1098
+ universal_newlines=True)
1099
+ if proc.returncode != 0:
1100
+ chip.logger.warning(f'Version check on {tool} failed with '
1101
+ f'code {proc.returncode}')
1102
+
1103
+ parse_version = getattr(chip._get_tool_module(step, index, flow=flow),
1104
+ 'parse_version',
1105
+ None)
1106
+ if parse_version is None:
1107
+ chip.logger.error(f'{tool}/{task} does not implement parse_version().')
1108
+ _haltstep(chip, flow, step, index)
1109
+ try:
1110
+ version = parse_version(proc.stdout)
1111
+ except Exception as e:
1112
+ chip.logger.error(f'{tool} failed to parse version string: {proc.stdout}')
1113
+ raise e
1114
+
1115
+ chip.logger.info(f"Tool '{exe_base}' found with version '{version}' "
1116
+ f"in directory '{exe_path}'")
1117
+ if vercheck and not _check_version(chip, version, tool, step, index):
1118
+ if proc.returncode != 0:
1119
+ chip.logger.error(f"Tool '{exe_base}' responded with: {proc.stdout}")
1120
+ _haltstep(chip, flow, step, index)
1121
+ else:
1122
+ chip.logger.info(f"Tool '{exe_base}' found in directory '{exe_path}'")
1123
+ elif run_func is None:
1124
+ exe_base = chip.get('tool', tool, 'exe')
1125
+ chip.logger.error(f'Executable {exe_base} not found')
1126
+ _haltstep(chip, flow, step, index)
1127
+ return (exe, version)
1128
+
1129
+
1130
+ def _hash_files(chip, step, index, setup=False):
1131
+ if chip._error:
1132
+ return
1133
+
1134
+ flow = chip.get('option', 'flow')
1135
+ tool, task = get_tool_task(chip, step, index, flow)
1136
+ if chip.get('option', 'hash'):
1137
+ if not setup:
1138
+ # hash all outputs
1139
+ chip.hash_files('tool', tool, 'task', task, 'output',
1140
+ step=step, index=index, check=False, verbose=False)
1141
+ else:
1142
+ for task_key in ('refdir', 'prescript', 'postscript', 'script'):
1143
+ chip.hash_files('tool', tool, 'task', task, task_key,
1144
+ step=step, index=index, check=False,
1145
+ allow_cache=True, verbose=False)
1146
+
1147
+ # hash all requirements
1148
+ for item in set(chip.get('tool', tool, 'task', task, 'require', step=step, index=index)):
1149
+ args = item.split(',')
1150
+ sc_type = chip.get(*args, field='type')
1151
+ if 'file' in sc_type or 'dir' in sc_type:
1152
+ pernode = chip.get(*args, field='pernode')
1153
+ if pernode == 'never':
1154
+ if not setup:
1155
+ if chip.get(*args, field='filehash'):
1156
+ continue
1157
+ chip.hash_files(*args, check=False, allow_cache=True, verbose=False)
1158
+ else:
1159
+ if not setup:
1160
+ if chip.get(*args, field='filehash', step=step, index=index):
1161
+ continue
1162
+ chip.hash_files(*args, step=step, index=index,
1163
+ check=False, allow_cache=True, verbose=False)
1164
+
1165
+
1166
+ def _finalizenode(chip, step, index, replay):
1167
+ flow = chip.get('option', 'flow')
1168
+ tool, task = get_tool_task(chip, step, index, flow)
1169
+ quiet = (
1170
+ chip.get('option', 'quiet', step=step, index=index) and not
1171
+ chip.get('option', 'breakpoint', step=step, index=index)
1172
+ )
1173
+ run_func = getattr(chip._get_task_module(step, index, flow=flow), 'run', None)
1174
+
1175
+ is_skipped = chip.get('record', 'status', step=step, index=index) == NodeStatus.SKIPPED
1176
+
1177
+ if not is_skipped:
1178
+ _check_logfile(chip, step, index, quiet, run_func)
1179
+ _hash_files(chip, step, index)
1180
+
1181
+ # Capture wall runtime and cpu cores
1182
+ wall_end = time.time()
1183
+ __record_time(chip, step, index, wall_end, 'end')
1184
+
1185
+ # calculate total time
1186
+ total_times = []
1187
+ for check_step, check_index in _get_flowgraph_nodes(chip, flow):
1188
+ total_time = chip.get('metric', 'totaltime', step=check_step, index=check_index)
1189
+ if total_time is not None:
1190
+ total_times.append(total_time)
1191
+ if total_times:
1192
+ total_time = max(total_times)
1193
+ else:
1194
+ total_time = 0.0
1195
+
1196
+ walltime = wall_end - get_record_time(chip, step, index, 'starttime')
1197
+ record_metric(chip, step, index, 'tasktime', walltime,
1198
+ source=None, source_unit='s')
1199
+ record_metric(chip, step, index, 'totaltime', total_time + walltime,
1200
+ source=None, source_unit='s')
1201
+ chip.logger.info(f"Finished task in {round(walltime, 2)}s")
1202
+
1203
+ # Save a successful manifest
1204
+ if not is_skipped:
1205
+ chip.set('record', 'status', NodeStatus.SUCCESS, step=step, index=index)
1206
+ chip.write_manifest(os.path.join("outputs", f"{chip.get('design')}.pkg.json"))
1207
+
1208
+ if chip._error and not replay:
1209
+ _make_testcase(chip, step, index)
1210
+
1211
+ # Stop if there are errors
1212
+ errors = chip.get('metric', 'errors', step=step, index=index)
1213
+ if errors and not chip.get('option', 'continue', step=step, index=index):
1214
+ # TODO: should we warn if errors is not set?
1215
+ chip.logger.error(f'{tool} reported {errors} errors during {step}{index}')
1216
+ _haltstep(chip, flow, step, index)
1217
+
1218
+ if chip._error:
1219
+ _haltstep(chip, flow, step, index)
1220
+
1221
+ if chip.get('option', 'strict'):
1222
+ assert_output_files(chip, step, index)
1223
+
1224
+
1225
+ def _make_testcase(chip, step, index):
1226
+ # Import here to avoid circular import
1227
+ from siliconcompiler.issue import generate_testcase
1228
+
1229
+ generate_testcase(
1230
+ chip,
1231
+ step,
1232
+ index,
1233
+ archive_directory=chip.getworkdir(),
1234
+ include_pdks=False,
1235
+ include_specific_pdks=lambdapdk.get_pdks(),
1236
+ include_libraries=False,
1237
+ include_specific_libraries=lambdapdk.get_libs(),
1238
+ hash_files=chip.get('option', 'hash'),
1239
+ verbose_collect=False)
1240
+
1241
+
1242
+ def assert_output_files(chip, step, index):
1243
+ flow = chip.get('option', 'flow')
1244
+ tool, task = get_tool_task(chip, step, index, flow)
1245
+
1246
+ if tool == 'builtin':
1247
+ return
1248
+
1249
+ outputs = os.listdir(f'{chip.getworkdir(step=step, index=index)}/outputs')
1250
+ outputs.remove(f'{chip.design}.pkg.json')
1251
+
1252
+ output_files = chip.get('tool', tool, 'task', task, 'output',
1253
+ step=step, index=index)
1254
+
1255
+ if set(outputs) != set(output_files):
1256
+ raise SiliconCompilerError(
1257
+ f'Output files set {output_files} for {step}{index} does not match generated '
1258
+ f'outputs: {outputs}',
1259
+ chip=chip)
1260
+
1261
+
1262
+ def _reset_flow_nodes(chip, flow, nodes_to_execute):
1263
+ # Reset flowgraph/records/metrics by probing build directory. We need
1264
+ # to set values to None for steps we may re-run so that merging
1265
+ # manifests from _runtask() actually updates values.
1266
+
1267
+ def clear_node(step, index):
1268
+ # Reset metrics and records
1269
+ for metric in chip.getkeys('metric'):
1270
+ _clear_metric(chip, step, index, metric)
1271
+ for record in chip.getkeys('record'):
1272
+ _clear_record(chip, step, index, record, preserve=['remoteid', 'status'])
1273
+
1274
+ # Mark all nodes as pending
1275
+ for step, index in _get_flowgraph_nodes(chip, flow):
1276
+ chip.set('record', 'status', NodeStatus.PENDING, step=step, index=index)
1277
+
1278
+ should_resume = not chip.get('option', 'clean')
1279
+ for step, index in _get_flowgraph_nodes(chip, flow):
1280
+ stepdir = chip.getworkdir(step=step, index=index)
1281
+ cfg = f"{stepdir}/outputs/{chip.get('design')}.pkg.json"
1282
+
1283
+ if not os.path.isdir(stepdir) or (
1284
+ (step, index) in nodes_to_execute and not should_resume):
1285
+ # If stepdir doesn't exist, we need to re-run this task. If
1286
+ # we're not running with -resume, we also re-run anything
1287
+ # in the nodes to execute.
1288
+ clear_node(step, index)
1289
+ elif os.path.isfile(cfg):
1290
+ try:
1291
+ old_status = Schema(manifest=cfg).get('record', 'status', step=step, index=index)
1292
+ if old_status:
1293
+ chip.set('record', 'status', old_status, step=step, index=index)
1294
+ except Exception:
1295
+ # unable to load so leave it default
1296
+ pass
1297
+ else:
1298
+ chip.set('record', 'status', NodeStatus.ERROR, step=step, index=index)
1299
+
1300
+ for step in chip.getkeys('flowgraph', flow):
1301
+ all_indices_failed = True
1302
+ for index in chip.getkeys('flowgraph', flow, step):
1303
+ if chip.get('record', 'status', step=step, index=index) == NodeStatus.SUCCESS:
1304
+ all_indices_failed = False
1305
+
1306
+ if should_resume and all_indices_failed:
1307
+ # When running with -resume, we re-run any step in flowgraph that
1308
+ # had all indices fail.
1309
+ for index in chip.getkeys('flowgraph', flow, step):
1310
+ if (step, index) in nodes_to_execute:
1311
+ clear_node(step, index)
1312
+
1313
+
1314
+ def _prepare_nodes(chip, nodes_to_run, processes, local_processes, flow):
1315
+ '''
1316
+ For each node to run, prepare a process and store its dependencies
1317
+ '''
1318
+ # Ensure we use spawn for multiprocessing so loggers initialized correctly
1319
+ multiprocessor = multiprocessing.get_context('spawn')
1320
+ init_funcs = set()
1321
+ for (step, index) in nodes_to_execute(chip, flow):
1322
+ node = (step, index)
1323
+
1324
+ if chip.get('record', 'status', step=step, index=index) != NodeStatus.PENDING:
1325
+ continue
1326
+
1327
+ nodes_to_run[node] = _get_pruned_node_inputs(chip, flow, (step, index))
1328
+
1329
+ exec_func = _executenode
1330
+
1331
+ if chip.get('option', 'scheduler', 'name', step=step, index=index) == 'slurm':
1332
+ # Defer job to compute node
1333
+ # If the job is configured to run on a cluster, collect the schema
1334
+ # and send it to a compute node for deferred execution.
1335
+ init_funcs.add(slurm.init)
1336
+ exec_func = slurm._defernode
1337
+ elif chip.get('option', 'scheduler', 'name', step=step, index=index) == 'docker':
1338
+ # Run job in docker
1339
+ init_funcs.add(docker_runner.init)
1340
+ exec_func = docker_runner.run
1341
+ local_processes.append((step, index))
1342
+ else:
1343
+ local_processes.append((step, index))
1344
+
1345
+ processes[node] = multiprocessor.Process(target=_runtask,
1346
+ args=(chip, flow, step, index, exec_func))
1347
+
1348
+ for init_func in init_funcs:
1349
+ init_func(chip)
1350
+
1351
+
1352
+ def _check_node_dependencies(chip, node, deps, deps_was_successful):
1353
+ had_deps = len(deps) > 0
1354
+ step, index = node
1355
+ tool, task = get_tool_task(chip, step, index)
1356
+
1357
+ # Clear any nodes that have finished from dependency list.
1358
+ for in_step, in_index in list(deps):
1359
+ in_status = chip.get('record', 'status', step=in_step, index=in_index)
1360
+ if in_status != NodeStatus.PENDING:
1361
+ deps.remove((in_step, in_index))
1362
+ if in_status == NodeStatus.SUCCESS:
1363
+ deps_was_successful[node] = True
1364
+ if in_status == NodeStatus.ERROR:
1365
+ # Fail if any dependency failed for non-builtin task
1366
+ if tool != 'builtin':
1367
+ deps.clear()
1368
+ chip.set('record', 'status', NodeStatus.ERROR, step=step, index=index)
1369
+ return
1370
+
1371
+ # Fail if no dependency successfully finished for builtin task
1372
+ if had_deps and len(deps) == 0 \
1373
+ and tool == 'builtin' and not deps_was_successful.get(node):
1374
+ chip.set('record', 'status', NodeStatus.ERROR, step=step, index=index)
1375
+
1376
+
1377
+ def _launch_nodes(chip, nodes_to_run, processes, local_processes):
1378
+ running_nodes = {}
1379
+ max_parallel_run = chip.get('option', 'scheduler', 'maxnodes')
1380
+ max_threads = os.cpu_count()
1381
+ if not max_parallel_run:
1382
+ max_parallel_run = max_threads
1383
+
1384
+ # clip max parallel jobs to 1 <= jobs <= max_threads
1385
+ max_parallel_run = max(1, min(max_parallel_run, max_threads))
1386
+
1387
+ def allow_start(node):
1388
+ if node not in local_processes:
1389
+ # using a different scheduler, so allow
1390
+ return True, 0
1391
+
1392
+ if len(running_nodes) >= max_parallel_run:
1393
+ return False, 0
1394
+
1395
+ # Record thread count requested
1396
+ step, index = node
1397
+ tool, task = get_tool_task(chip, step, index)
1398
+ requested_threads = chip.get('tool', tool, 'task', task, 'threads',
1399
+ step=step, index=index)
1400
+ if not requested_threads:
1401
+ # not specified, marking it max to be safe
1402
+ requested_threads = max_threads
1403
+ # clamp to max_parallel to avoid getting locked up
1404
+ requested_threads = max(1, min(requested_threads, max_threads))
1405
+
1406
+ if requested_threads + sum(running_nodes.values()) > max_threads:
1407
+ # delay until there are enough core available
1408
+ return False, 0
1409
+
1410
+ # allow and record how many threads to associate
1411
+ return True, requested_threads
1412
+
1413
+ deps_was_successful = {}
1414
+
1415
+ if _get_callback('pre_run'):
1416
+ _get_callback('pre_run')(chip)
1417
+
1418
+ while len(nodes_to_run) > 0 or len(running_nodes) > 0:
1419
+ _process_completed_nodes(chip, processes, running_nodes)
1420
+
1421
+ # Check for new nodes that can be launched.
1422
+ for node, deps in list(nodes_to_run.items()):
1423
+ # TODO: breakpoint logic:
1424
+ # if node is breakpoint, then don't launch while len(running_nodes) > 0
1425
+
1426
+ _check_node_dependencies(chip, node, deps, deps_was_successful)
1427
+
1428
+ if chip.get('record', 'status', step=node[0], index=node[1]) == NodeStatus.ERROR:
1429
+ del nodes_to_run[node]
1430
+ continue
1431
+
1432
+ # If there are no dependencies left, launch this node and
1433
+ # remove from nodes_to_run.
1434
+ if len(deps) == 0:
1435
+ dostart, requested_threads = allow_start(node)
1436
+
1437
+ if dostart:
1438
+ if _get_callback('pre_node'):
1439
+ _get_callback('pre_node')(chip, *node)
1440
+
1441
+ processes[node].start()
1442
+ del nodes_to_run[node]
1443
+ running_nodes[node] = requested_threads
1444
+
1445
+ # Check for situation where we have stuff left to run but don't
1446
+ # have any nodes running. This shouldn't happen, but we will get
1447
+ # stuck in an infinite loop if it does, so we want to break out
1448
+ # with an explicit error.
1449
+ if len(nodes_to_run) > 0 and len(running_nodes) == 0:
1450
+ raise SiliconCompilerError(
1451
+ 'Nodes left to run, but no running nodes. From/to may be invalid.', chip=chip)
1452
+
1453
+ # TODO: exponential back-off with max?
1454
+ time.sleep(0.1)
1455
+
1456
+
1457
+ def _process_completed_nodes(chip, processes, running_nodes):
1458
+ for node in list(running_nodes.keys()):
1459
+ if not processes[node].is_alive():
1460
+ step, index = node
1461
+ manifest = os.path.join(chip.getworkdir(step=step, index=index),
1462
+ 'outputs',
1463
+ f'{chip.design}.pkg.json')
1464
+ chip.logger.debug(f'{step}{index} is complete merging: {manifest}')
1465
+ if os.path.exists(manifest):
1466
+ chip.schema.read_journal(manifest)
1467
+
1468
+ del running_nodes[node]
1469
+ if processes[node].exitcode > 0:
1470
+ status = NodeStatus.ERROR
1471
+ else:
1472
+ status = chip.get('record', 'status', step=step, index=index)
1473
+ if not status or status == NodeStatus.PENDING:
1474
+ status = NodeStatus.ERROR
1475
+
1476
+ chip.set('record', 'status', status, step=step, index=index)
1477
+
1478
+ if _get_callback('post_node'):
1479
+ _get_callback('post_node')(chip, *node)
1480
+
1481
+
1482
+ def _check_nodes_status(chip, flow):
1483
+ def success(node):
1484
+ return chip.get('record', 'status', step=node[0], index=node[1]) in \
1485
+ (NodeStatus.SUCCESS, NodeStatus.SKIPPED)
1486
+
1487
+ unreachable_steps = _unreachable_steps_to_execute(chip, flow, cond=success)
1488
+ if unreachable_steps:
1489
+ raise SiliconCompilerError(
1490
+ f'These final steps could not be reached: {list(unreachable_steps)}', chip=chip)
1491
+
1492
+
1493
+ #######################################
1494
+ def __record_version(chip, step, index):
1495
+ chip.set('record', 'scversion', _metadata.version, step=step, index=index)
1496
+
1497
+
1498
+ #######################################
1499
+ def __record_time(chip, step, index, record_time, timetype):
1500
+ formatted_time = datetime.fromtimestamp(record_time).strftime('%Y-%m-%d %H:%M:%S')
1501
+
1502
+ if timetype == 'start':
1503
+ key = 'starttime'
1504
+ elif timetype == 'end':
1505
+ key = 'endtime'
1506
+ else:
1507
+ raise ValueError(f'{timetype} is not a valid time record')
1508
+
1509
+ chip.set('record', key, formatted_time, step=step, index=index)
1510
+
1511
+
1512
+ def get_record_time(chip, step, index, timetype):
1513
+ return datetime.strptime(
1514
+ chip.get('record', timetype, step=step, index=index),
1515
+ '%Y-%m-%d %H:%M:%S').timestamp()
1516
+
1517
+
1518
+ #######################################
1519
+ def __record_tool(chip, step, index, toolversion=None, toolpath=None, cli_args=None):
1520
+ if toolversion:
1521
+ chip.set('record', 'toolversion', toolversion, step=step, index=index)
1522
+
1523
+ if toolpath:
1524
+ chip.set('record', 'toolpath', toolpath, step=step, index=index)
1525
+
1526
+ if cli_args is not None:
1527
+ toolargs = ' '.join(f'"{arg}"' if ' ' in arg else arg for arg in cli_args)
1528
+ chip.set('record', 'toolargs', toolargs, step=step, index=index)
1529
+
1530
+
1531
+ #######################################
1532
+ def _get_cloud_region():
1533
+ # TODO: add logic to figure out if we're running on a remote cluster and
1534
+ # extract the region in a provider-specific way.
1535
+ return 'local'
1536
+
1537
+
1538
+ #######################################
1539
+ def __record_usermachine(chip, step, index):
1540
+ machine_info = _get_machine_info()
1541
+ chip.set('record', 'platform', machine_info['system'], step=step, index=index)
1542
+
1543
+ if machine_info['distro']:
1544
+ chip.set('record', 'distro', machine_info['distro'], step=step, index=index)
1545
+
1546
+ chip.set('record', 'osversion', machine_info['osversion'], step=step, index=index)
1547
+
1548
+ if machine_info['kernelversion']:
1549
+ chip.set('record', 'kernelversion', machine_info['kernelversion'], step=step, index=index)
1550
+
1551
+ chip.set('record', 'arch', machine_info['arch'], step=step, index=index)
1552
+
1553
+ chip.set('record', 'userid', getpass.getuser(), step=step, index=index)
1554
+
1555
+ chip.set('record', 'machine', platform.node(), step=step, index=index)
1556
+
1557
+ chip.set('record', 'region', _get_cloud_region(), step=step, index=index)
1558
+
1559
+ try:
1560
+ for interface, addrs in psutil.net_if_addrs().items():
1561
+ if interface == 'lo':
1562
+ # don't consider loopback device
1563
+ continue
1564
+
1565
+ if not addrs:
1566
+ # skip missing addrs
1567
+ continue
1568
+
1569
+ use_addr = False
1570
+ for addr in addrs:
1571
+ if addr.family == socket.AF_INET:
1572
+ if not addr.address.startswith('127.'):
1573
+ use_addr = True
1574
+ break
1575
+
1576
+ if use_addr:
1577
+ ipaddr = None
1578
+ macaddr = None
1579
+ for addr in addrs:
1580
+ if not ipaddr and addr.family == socket.AF_INET:
1581
+ ipaddr = addr.address
1582
+ if not ipaddr and addr.family == socket.AF_INET6:
1583
+ ipaddr = addr.address
1584
+ if not macaddr and addr.family == psutil.AF_LINK:
1585
+ macaddr = addr.address
1586
+
1587
+ chip.set('record', 'ipaddr', ipaddr, step=step, index=index)
1588
+ chip.set('record', 'macaddr', macaddr, step=step, index=index)
1589
+ break
1590
+ except: # noqa E722
1591
+ chip.logger.warning('Could not find default network interface info')
1592
+
1593
+
1594
+ #######################################
1595
+ def _get_machine_info():
1596
+ system = platform.system()
1597
+ if system == 'Darwin':
1598
+ lower_sys_name = 'macos'
1599
+ else:
1600
+ lower_sys_name = system.lower()
1601
+
1602
+ if system == 'Linux':
1603
+ distro_name = distro.id()
1604
+ else:
1605
+ distro_name = None
1606
+
1607
+ if system == 'Darwin':
1608
+ osversion, _, _ = platform.mac_ver()
1609
+ elif system == 'Linux':
1610
+ osversion = distro.version()
1611
+ else:
1612
+ osversion = platform.release()
1613
+
1614
+ if system == 'Linux':
1615
+ kernelversion = platform.release()
1616
+ elif system == 'Windows':
1617
+ kernelversion = platform.version()
1618
+ elif system == 'Darwin':
1619
+ kernelversion = platform.release()
1620
+ else:
1621
+ kernelversion = None
1622
+
1623
+ arch = platform.machine()
1624
+
1625
+ return {'system': lower_sys_name,
1626
+ 'distro': distro_name,
1627
+ 'osversion': osversion,
1628
+ 'kernelversion': kernelversion,
1629
+ 'arch': arch}
1630
+
1631
+
1632
+ def print_traceback(chip, exception):
1633
+ chip.logger.error(f'{exception}')
1634
+ trace = StringIO()
1635
+ traceback.print_tb(exception.__traceback__, file=trace)
1636
+ chip.logger.error("Backtrace:")
1637
+ for line in trace.getvalue().splitlines():
1638
+ chip.logger.error(line)
1639
+
1640
+
1641
+ def kill_process(chip, proc, tool, poll_interval, msg=""):
1642
+ TERMINATE_TIMEOUT = 5
1643
+ interrupt_time = time.time()
1644
+ chip.logger.info(f'{msg}Waiting for {tool} to exit...')
1645
+ while proc.poll() is None and \
1646
+ (time.time() - interrupt_time) < TERMINATE_TIMEOUT:
1647
+ time.sleep(5 * poll_interval)
1648
+ if proc.poll() is None:
1649
+ chip.logger.warning(f'{tool} did not exit within {TERMINATE_TIMEOUT} '
1650
+ 'seconds. Terminating...')
1651
+ utils.terminate_process(proc.pid)
1652
+
1653
+
1654
+ def check_node_inputs(chip, step, index):
1655
+ from siliconcompiler import Chip # import here to avoid circular import
1656
+
1657
+ if chip.get('option', 'clean'):
1658
+ return True
1659
+
1660
+ def get_file_time(path):
1661
+ times = [os.path.getmtime(path)]
1662
+ if os.path.isdir(path):
1663
+ for path_root, _, files in os.walk(path):
1664
+ for path_end in files:
1665
+ times.append(os.path.getmtime(os.path.join(path_root, path_end)))
1666
+
1667
+ return max(times)
1668
+
1669
+ # Load previous manifest
1670
+ input_manifest = None
1671
+ in_cfg = f"{chip.getworkdir(step=step, index=index)}/inputs/{chip.design}.pkg.json"
1672
+ if os.path.exists(in_cfg):
1673
+ input_manifest_time = get_file_time(in_cfg)
1674
+ input_manifest = Schema(manifest=in_cfg, logger=chip.logger)
1675
+
1676
+ if not input_manifest:
1677
+ # No manifest found so assume okay
1678
+ return True
1679
+
1680
+ flow = chip.get('option', 'flow')
1681
+ input_flow = input_manifest.get('option', 'flow')
1682
+
1683
+ # Assume modified if flow does not match
1684
+ if flow != input_flow:
1685
+ return False
1686
+
1687
+ input_chip = Chip('<>')
1688
+ input_chip.schema = input_manifest
1689
+ # Copy over useful information from chip
1690
+ input_chip.logger = chip.logger
1691
+ input_chip._packages = chip._packages
1692
+
1693
+ tool, task = get_tool_task(chip, step, index)
1694
+ input_tool, input_task = get_tool_task(input_chip, step, index)
1695
+
1696
+ # Assume modified if tool or task does not match
1697
+ if tool != input_tool or task != input_task:
1698
+ return False
1699
+
1700
+ # Collect keys to check for changes
1701
+ required = chip.get('tool', tool, 'task', task, 'require', step=step, index=index)
1702
+ required.extend(input_chip.get('tool', tool, 'task', task, 'require', step=step, index=index))
1703
+
1704
+ tool_task_key = ('tool', tool, 'task', task)
1705
+ for key in ('option', 'threads', 'prescript', 'postscript', 'refdir', 'script',):
1706
+ required.append(",".join([*tool_task_key, key]))
1707
+ for check_chip in (chip, input_chip):
1708
+ for env_key in chip.getkeys(*tool_task_key, 'env'):
1709
+ required.append(",".join([*tool_task_key, 'env', env_key]))
1710
+
1711
+ def print_warning(key, extra=None):
1712
+ if extra:
1713
+ chip.logger.warning(f'[{",".join(key)}] ({extra}) in {step}{index} has been modified '
1714
+ 'from previous run')
1715
+ else:
1716
+ chip.logger.warning(f'[{",".join(key)}] in {step}{index} has been modified '
1717
+ 'from previous run')
1718
+
1719
+ # Check if keys have been modified
1720
+ for check_key in sorted(set(required)):
1721
+ key = check_key.split(',')
1722
+
1723
+ if not chip.valid(*key) or not input_chip.valid(*key):
1724
+ print_warning(key)
1725
+ return False
1726
+
1727
+ pernode = chip.get(*key, field='pernode')
1728
+
1729
+ check_step = step
1730
+ check_index = index
1731
+ if pernode == 'never':
1732
+ check_step = None
1733
+ check_index = None
1734
+
1735
+ sc_type = chip.get(*key, field='type')
1736
+ if 'file' in sc_type or 'dir' in sc_type:
1737
+ if chip.get('option', 'hash') and input_chip.get('option', 'hash'):
1738
+ check_hash = chip.hash_files(*key, update=False, check=False,
1739
+ verbose=False, allow_cache=True,
1740
+ step=check_step, index=check_index)
1741
+ prev_hash = input_chip.get(*key, field='filehash',
1742
+ step=check_step, index=check_index)
1743
+
1744
+ if check_hash != prev_hash:
1745
+ print_warning(key)
1746
+ return False
1747
+ else:
1748
+ # check timestamps on current files
1749
+ for check_file in chip.find_files(*key, step=check_step, index=check_index):
1750
+ if get_file_time(check_file) > input_manifest_time:
1751
+ print_warning(key, "timestamp")
1752
+ return False
1753
+
1754
+ # check values
1755
+ for field in ('value', 'package'):
1756
+ check_val = chip.get(*key, field=field, step=check_step, index=check_index)
1757
+ prev_val = input_chip.get(*key, field=field, step=check_step, index=check_index)
1758
+
1759
+ if check_val != prev_val:
1760
+ print_warning(key)
1761
+ return False
1762
+ else:
1763
+ check_val = chip.get(*key, step=check_step, index=check_index)
1764
+ prev_val = input_chip.get(*key, step=check_step, index=check_index)
1765
+
1766
+ if check_val != prev_val:
1767
+ print_warning(key)
1768
+ return False
1769
+
1770
+ return True
1771
+
1772
+
1773
+ ###########################################################################
1774
+ def check_logfile(chip, jobname=None, step=None, index='0',
1775
+ logfile=None, display=True):
1776
+ '''
1777
+ Checks logfile for patterns found in the 'regex' parameter.
1778
+
1779
+ Reads the content of the task's log file and compares the content found
1780
+ with the task's 'regex' parameter. The matches are stored in the file
1781
+ '<step>.<suffix>' in the current directory. The matches are logged
1782
+ if display is set to True.
1783
+
1784
+ Args:
1785
+ jobname (str): Job directory name. If None, :keypath:`option, jobname` is used.
1786
+ step (str): Task step name ('syn', 'place', etc). If None, :keypath:`arg, step` is used.
1787
+ index (str): Task index. Default value is 0. If None, :keypath:`arg, index` is used.
1788
+ logfile (str): Path to logfile. If None, the default task logfile is used.
1789
+ display (bool): If True, logs matches.
1790
+
1791
+ Returns:
1792
+ Dictionary mapping suffixes to number of matches for that suffix's
1793
+ regex.
1794
+
1795
+ Examples:
1796
+ >>> chip.check_logfile(step='place')
1797
+ Searches for regex matches in the place logfile.
1798
+ '''
1799
+
1800
+ # Using manifest to get defaults
1801
+
1802
+ flow = chip.get('option', 'flow')
1803
+
1804
+ if jobname is None:
1805
+ jobname = chip.get('option', 'jobname')
1806
+ if step is None:
1807
+ step = chip.get('arg', 'step')
1808
+ if step is None:
1809
+ raise ValueError("Must provide 'step' or set ['arg', 'step']")
1810
+ if index is None:
1811
+ index = chip.get('arg', 'index')
1812
+ if index is None:
1813
+ raise ValueError("Must provide 'index' or set ['arg', 'index']")
1814
+ if logfile is None:
1815
+ logfile = os.path.join(chip.getworkdir(jobname=jobname, step=step, index=index),
1816
+ f'{step}.log')
1817
+
1818
+ tool, task = get_tool_task(chip, step, index, flow=flow)
1819
+
1820
+ # Creating local dictionary (for speed)
1821
+ # chip.get is slow
1822
+ checks = {}
1823
+ matches = {}
1824
+ for suffix in chip.getkeys('tool', tool, 'task', task, 'regex'):
1825
+ regexes = chip.get('tool', tool, 'task', task, 'regex', suffix, step=step, index=index)
1826
+ if not regexes:
1827
+ continue
1828
+
1829
+ checks[suffix] = {}
1830
+ checks[suffix]['report'] = open(f"{step}.{suffix}", "w")
1831
+ checks[suffix]['args'] = regexes
1832
+ matches[suffix] = 0
1833
+
1834
+ # Order suffixes as follows: [..., 'warnings', 'errors']
1835
+ ordered_suffixes = list(filter(lambda key:
1836
+ key not in ['warnings', 'errors'], checks.keys()))
1837
+ if 'warnings' in checks:
1838
+ ordered_suffixes.append('warnings')
1839
+ if 'errors' in checks:
1840
+ ordered_suffixes.append('errors')
1841
+
1842
+ # Looping through patterns for each line
1843
+ with sc_open(logfile) as f:
1844
+ line_count = sum(1 for _ in f)
1845
+ right_align = len(str(line_count))
1846
+ for suffix in ordered_suffixes:
1847
+ # Start at the beginning of file again
1848
+ f.seek(0)
1849
+ for num, line in enumerate(f, start=1):
1850
+ string = line
1851
+ for item in checks[suffix]['args']:
1852
+ if string is None:
1853
+ break
1854
+ else:
1855
+ string = utils.grep(chip, item, string)
1856
+ if string is not None:
1857
+ matches[suffix] += 1
1858
+ # always print to file
1859
+ line_with_num = f'{num: >{right_align}}: {string.strip()}'
1860
+ print(line_with_num, file=checks[suffix]['report'])
1861
+ # selectively print to display
1862
+ if display:
1863
+ if suffix == 'errors':
1864
+ chip.logger.error(line_with_num)
1865
+ elif suffix == 'warnings':
1866
+ chip.logger.warning(line_with_num)
1867
+ else:
1868
+ chip.logger.info(f'{suffix}: {line_with_num}')
1869
+
1870
+ for suffix in ordered_suffixes:
1871
+ chip.logger.info(f'Number of {suffix}: {matches[suffix]}')
1872
+ checks[suffix]['report'].close()
1873
+
1874
+ return matches
1875
+
1876
+
1877
+ def copy_old_run_dir(chip, org_jobname):
1878
+ from_nodes = []
1879
+ flow = chip.get('option', 'flow')
1880
+ for step in chip.get('option', 'from'):
1881
+ from_nodes.extend(
1882
+ [(step, index) for index in chip.getkeys('flowgraph', flow, step)])
1883
+ from_nodes = set(from_nodes)
1884
+ if not from_nodes:
1885
+ # Nothing to do
1886
+ return
1887
+
1888
+ if org_jobname == chip.get('option', 'jobname'):
1889
+ return
1890
+
1891
+ # Copy nodes forward
1892
+ org_nodes = set(_nodes_to_execute(
1893
+ chip,
1894
+ flow,
1895
+ _get_flowgraph_entry_nodes(chip, flow),
1896
+ from_nodes,
1897
+ chip.get('option', 'prune')))
1898
+
1899
+ copy_nodes = org_nodes.difference(from_nodes)
1900
+
1901
+ def copy_files(from_path, to_path):
1902
+ shutil.copytree(from_path, to_path,
1903
+ dirs_exist_ok=True,
1904
+ copy_function=utils.link_copy)
1905
+
1906
+ for step, index in copy_nodes:
1907
+ copy_from = chip.getworkdir(jobname=org_jobname, step=step, index=index)
1908
+ copy_to = chip.getworkdir(step=step, index=index)
1909
+
1910
+ if not os.path.exists(copy_from):
1911
+ continue
1912
+
1913
+ chip.logger.info(f'Importing {step}{index} from {org_jobname}')
1914
+ copy_files(copy_from, copy_to)
1915
+
1916
+ # Copy collect directory
1917
+ copy_from = chip._getcollectdir(jobname=org_jobname)
1918
+ copy_to = chip._getcollectdir()
1919
+ if os.path.exists(copy_from):
1920
+ copy_files(copy_from, copy_to)
1921
+
1922
+ # Modify manifests to correct jobname
1923
+ for step, index in copy_nodes:
1924
+ # rewrite replay files
1925
+ replay_file = f'{chip.getworkdir(step=step, index=index)}/replay.sh'
1926
+ if os.path.exists(replay_file):
1927
+ # delete file as it might be a hard link
1928
+ os.remove(replay_file)
1929
+ chip.set('arg', 'step', step)
1930
+ chip.set('arg', 'index', index)
1931
+ tool, task = get_tool_task(chip, step, index)
1932
+ _makecmd(chip, tool, task, step, index, script_name=replay_file)
1933
+ chip.unset('arg', 'step')
1934
+ chip.unset('arg', 'index')
1935
+
1936
+ for io in ('inputs', 'outputs'):
1937
+ manifest = f'{chip.getworkdir(step=step, index=index)}/{io}/{chip.design}.pkg.json'
1938
+ if os.path.exists(manifest):
1939
+ schema = Schema(manifest=manifest)
1940
+ # delete file as it might be a hard link
1941
+ os.remove(manifest)
1942
+ schema.set('option', 'jobname', chip.get('option', 'jobname'))
1943
+ with open(manifest, 'w') as f:
1944
+ schema.write_json(f)
1945
+
1946
+
1947
+ def clean_node_dir(chip, step, index):
1948
+ node_dir = chip.getworkdir(step=step, index=index)
1949
+ if os.path.isdir(node_dir):
1950
+ shutil.rmtree(node_dir)
1951
+
1952
+
1953
+ def clean_build_dir(chip):
1954
+ if chip.get('record', 'remoteid'):
1955
+ return
1956
+
1957
+ if chip.get('arg', 'step'):
1958
+ return
1959
+
1960
+ if chip.get('option', 'clean') and not chip.get('option', 'from'):
1961
+ # If no step or nodes to start from were specified, the whole flow is being run
1962
+ # start-to-finish. Delete the build dir to clear stale results.
1963
+ cur_job_dir = chip.getworkdir()
1964
+ if os.path.isdir(cur_job_dir):
1965
+ shutil.rmtree(cur_job_dir)
1966
+
1967
+ return
1968
+
1969
+ if chip.get('option', 'from'):
1970
+ # Remove stale outputs that will be rerun
1971
+ for step, index in nodes_to_execute(chip):
1972
+ clean_node_dir(chip, step, index)
1973
+
1974
+ all_nodes = set(_get_flowgraph_nodes(chip, flow=chip.get('option', 'flow')))
1975
+ old_nodes = __collect_nodes_in_workdir(chip)
1976
+ node_mismatch = old_nodes.difference(all_nodes)
1977
+ if node_mismatch:
1978
+ # flow has different structure so clear whole
1979
+ cur_job_dir = chip.getworkdir()
1980
+ shutil.rmtree(cur_job_dir)
1981
+
1982
+
1983
+ def __collect_nodes_in_workdir(chip):
1984
+ workdir = chip.getworkdir()
1985
+ if not os.path.isdir(workdir):
1986
+ return set()
1987
+
1988
+ collect_dir = chip._getcollectdir()
1989
+
1990
+ nodes = []
1991
+ for step in os.listdir(workdir):
1992
+ step_dir = os.path.join(workdir, step)
1993
+
1994
+ if step_dir == collect_dir:
1995
+ continue
1996
+
1997
+ if not os.path.isdir(step_dir):
1998
+ continue
1999
+
2000
+ for index in os.listdir(step_dir):
2001
+ if os.path.isdir(os.path.join(step_dir, index)):
2002
+ nodes.append((step, index))
2003
+
2004
+ return set(nodes)
2005
+
2006
+
2007
+ ###########################################################################
2008
+ def _check_manifest_dynamic(chip, step, index):
2009
+ '''Runtime checks called from _runtask().
2010
+
2011
+ - Make sure expected inputs exist.
2012
+ - Make sure all required filepaths resolve correctly.
2013
+ '''
2014
+ error = False
2015
+
2016
+ flow = chip.get('option', 'flow')
2017
+ tool, task = get_tool_task(chip, step, index, flow=flow)
2018
+
2019
+ required_inputs = chip.get('tool', tool, 'task', task, 'input', step=step, index=index)
2020
+ input_dir = os.path.join(chip.getworkdir(step=step, index=index), 'inputs')
2021
+ for filename in required_inputs:
2022
+ path = os.path.join(input_dir, filename)
2023
+ if not os.path.exists(path):
2024
+ chip.logger.error(f'Required input {filename} not received for {step}{index}.')
2025
+ error = True
2026
+
2027
+ all_required = chip.get('tool', tool, 'task', task, 'require', step=step, index=index)
2028
+ for item in all_required:
2029
+ keypath = item.split(',')
2030
+ if not chip.valid(*keypath):
2031
+ chip.logger.error(f'Cannot resolve required keypath {keypath}.')
2032
+ error = True
2033
+ else:
2034
+ paramtype = chip.get(*keypath, field='type')
2035
+ is_perstep = chip.get(*keypath, field='pernode') != 'never'
2036
+ if ('file' in paramtype) or ('dir' in paramtype):
2037
+ for val, check_step, check_index in chip.schema._getvals(*keypath):
2038
+ if is_perstep:
2039
+ if check_step is None:
2040
+ check_step = Schema.GLOBAL_KEY
2041
+ if check_index is None:
2042
+ check_index = Schema.GLOBAL_KEY
2043
+ abspath = chip.find_files(*keypath,
2044
+ missing_ok=True,
2045
+ step=check_step, index=check_index)
2046
+ unresolved_paths = val
2047
+ if not isinstance(abspath, list):
2048
+ abspath = [abspath]
2049
+ unresolved_paths = [unresolved_paths]
2050
+ for i, path in enumerate(abspath):
2051
+ if path is None:
2052
+ unresolved_path = unresolved_paths[i]
2053
+ chip.logger.error(f'Cannot resolve path {unresolved_path} in '
2054
+ f'required file keypath {keypath}.')
2055
+ error = True
2056
+
2057
+ return not error
2058
+
2059
+
2060
+ #######################################
2061
+ def _clear_metric(chip, step, index, metric, preserve=None):
2062
+ '''
2063
+ Helper function to clear metrics records
2064
+ '''
2065
+
2066
+ # This function is often called in a loop; don't clear
2067
+ # metrics which the caller wants to preserve.
2068
+ if preserve and metric in preserve:
2069
+ return
2070
+
2071
+ flow = chip.get('option', 'flow')
2072
+ tool, task = get_tool_task(chip, step, index, flow=flow)
2073
+
2074
+ chip.unset('metric', metric, step=step, index=index)
2075
+ chip.unset('tool', tool, 'task', task, 'report', metric, step=step, index=index)
2076
+
2077
+
2078
+ #######################################
2079
+ def _clear_record(chip, step, index, record, preserve=None):
2080
+ '''
2081
+ Helper function to clear record parameters
2082
+ '''
2083
+
2084
+ # This function is often called in a loop; don't clear
2085
+ # records which the caller wants to preserve.
2086
+ if preserve and record in preserve:
2087
+ return
2088
+
2089
+ if chip.get('record', record, field='pernode') == 'never':
2090
+ chip.unset('record', record)
2091
+ else:
2092
+ chip.unset('record', record, step=step, index=index)