siliconcompiler 0.29.0__py3-none-any.whl → 0.29.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- siliconcompiler/_metadata.py +1 -1
- siliconcompiler/apps/__init__.py +26 -0
- siliconcompiler/apps/sc_install.py +1 -1
- siliconcompiler/apps/utils/replay.py +96 -38
- siliconcompiler/checklists/__init__.py +12 -0
- siliconcompiler/core.py +85 -15
- siliconcompiler/flows/__init__.py +34 -0
- siliconcompiler/flows/showflow.py +1 -1
- siliconcompiler/libs/__init__.py +5 -0
- siliconcompiler/optimizer/__init__.py +199 -0
- siliconcompiler/optimizer/vizier.py +259 -0
- siliconcompiler/pdks/__init__.py +5 -0
- siliconcompiler/scheduler/__init__.py +67 -49
- siliconcompiler/scheduler/send_messages.py +1 -1
- siliconcompiler/schema/schema_cfg.py +2 -2
- siliconcompiler/schema/schema_obj.py +13 -10
- siliconcompiler/schema/utils.py +2 -0
- siliconcompiler/sphinx_ext/__init__.py +85 -0
- siliconcompiler/sphinx_ext/dynamicgen.py +17 -33
- siliconcompiler/sphinx_ext/schemagen.py +3 -2
- siliconcompiler/targets/__init__.py +26 -0
- siliconcompiler/templates/replay/replay.py.j2 +62 -0
- siliconcompiler/templates/replay/requirements.txt +2 -1
- siliconcompiler/templates/replay/setup.sh +119 -6
- siliconcompiler/tools/__init__.py +62 -0
- siliconcompiler/tools/_common/asic.py +77 -6
- siliconcompiler/tools/_common/tcl/sc_pin_constraints.tcl +2 -2
- siliconcompiler/tools/ghdl/ghdl.py +1 -2
- siliconcompiler/tools/gtkwave/__init__.py +39 -0
- siliconcompiler/tools/gtkwave/scripts/sc_show.tcl +34 -0
- siliconcompiler/tools/gtkwave/show.py +70 -0
- siliconcompiler/tools/icarus/compile.py +4 -0
- siliconcompiler/tools/klayout/convert_drc_db.py +1 -1
- siliconcompiler/tools/klayout/drc.py +1 -1
- siliconcompiler/tools/klayout/export.py +8 -1
- siliconcompiler/tools/klayout/klayout.py +2 -2
- siliconcompiler/tools/klayout/klayout_convert_drc_db.py +2 -2
- siliconcompiler/tools/klayout/klayout_export.py +7 -5
- siliconcompiler/tools/klayout/klayout_operations.py +4 -3
- siliconcompiler/tools/klayout/klayout_show.py +3 -2
- siliconcompiler/tools/klayout/klayout_utils.py +1 -1
- siliconcompiler/tools/klayout/operations.py +8 -0
- siliconcompiler/tools/klayout/screenshot.py +6 -1
- siliconcompiler/tools/klayout/show.py +8 -1
- siliconcompiler/tools/magic/magic.py +1 -1
- siliconcompiler/tools/openroad/__init__.py +1 -1
- siliconcompiler/tools/openroad/_apr.py +11 -2
- siliconcompiler/tools/openroad/global_placement.py +23 -2
- siliconcompiler/tools/openroad/init_floorplan.py +1 -1
- siliconcompiler/tools/openroad/scripts/apr/preamble.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_clock_tree_synthesis.tcl +4 -0
- siliconcompiler/tools/openroad/scripts/apr/sc_global_placement.tcl +64 -1
- siliconcompiler/tools/openroad/scripts/apr/sc_repair_design.tcl +4 -0
- siliconcompiler/tools/openroad/scripts/apr/sc_repair_timing.tcl +8 -2
- siliconcompiler/tools/openroad/scripts/common/procs.tcl +88 -0
- siliconcompiler/tools/openroad/scripts/common/reports.tcl +1 -1
- siliconcompiler/tools/openroad/scripts/common/write_images.tcl +10 -1
- siliconcompiler/tools/openroad/scripts/sc_show.tcl +5 -0
- siliconcompiler/tools/opensta/__init__.py +1 -1
- siliconcompiler/tools/opensta/check_library.py +27 -0
- siliconcompiler/tools/opensta/scripts/sc_check_library.tcl +255 -0
- siliconcompiler/tools/opensta/scripts/sc_timing.tcl +1 -1
- siliconcompiler/tools/sv2v/sv2v.py +1 -2
- siliconcompiler/tools/verilator/compile.py +11 -0
- siliconcompiler/tools/verilator/verilator.py +7 -8
- siliconcompiler/tools/vivado/vivado.py +1 -1
- siliconcompiler/tools/yosys/__init__.py +149 -0
- siliconcompiler/tools/yosys/lec.py +22 -9
- siliconcompiler/tools/yosys/sc_lec.tcl +94 -49
- siliconcompiler/tools/yosys/sc_syn.tcl +1 -0
- siliconcompiler/tools/yosys/screenshot.py +2 -2
- siliconcompiler/tools/yosys/syn_asic.py +98 -74
- siliconcompiler/tools/yosys/syn_asic.tcl +31 -6
- siliconcompiler/tools/yosys/syn_fpga.py +2 -3
- siliconcompiler/tools/yosys/syn_fpga.tcl +0 -1
- siliconcompiler/toolscripts/_tools.json +8 -3
- siliconcompiler/toolscripts/rhel9/install-gtkwave.sh +40 -0
- siliconcompiler/toolscripts/ubuntu20/install-gtkwave.sh +28 -0
- siliconcompiler/toolscripts/ubuntu22/install-gtkwave.sh +28 -0
- siliconcompiler/toolscripts/ubuntu22/install-slang.sh +0 -0
- siliconcompiler/toolscripts/ubuntu24/install-gtkwave.sh +29 -0
- siliconcompiler/utils/__init__.py +7 -3
- siliconcompiler/utils/showtools.py +3 -0
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/METADATA +14 -11
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/RECORD +88 -91
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/WHEEL +1 -1
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/entry_points.txt +13 -0
- siliconcompiler/libs/asap7sc7p5t.py +0 -8
- siliconcompiler/libs/gf180mcu.py +0 -8
- siliconcompiler/libs/interposer.py +0 -8
- siliconcompiler/libs/nangate45.py +0 -8
- siliconcompiler/libs/sg13g2_stdcell.py +0 -8
- siliconcompiler/libs/sky130hd.py +0 -8
- siliconcompiler/libs/sky130io.py +0 -8
- siliconcompiler/pdks/asap7.py +0 -8
- siliconcompiler/pdks/freepdk45.py +0 -8
- siliconcompiler/pdks/gf180.py +0 -8
- siliconcompiler/pdks/ihp130.py +0 -8
- siliconcompiler/pdks/interposer.py +0 -8
- siliconcompiler/pdks/skywater130.py +0 -8
- siliconcompiler/templates/replay/run.py.j2 +0 -22
- siliconcompiler/tools/yosys/yosys.py +0 -148
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/LICENSE +0 -0
- {siliconcompiler-0.29.0.dist-info → siliconcompiler-0.29.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import uuid
|
|
3
|
+
import math
|
|
4
|
+
from siliconcompiler import Chip
|
|
5
|
+
from siliconcompiler.optimizer import Optimizer
|
|
6
|
+
from siliconcompiler.flowgraph import _get_flowgraph_nodes
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from vizier.service import clients as vz_clients
|
|
10
|
+
from vizier.service import pyvizier as vz
|
|
11
|
+
|
|
12
|
+
from jax import config
|
|
13
|
+
config.update("jax_enable_x64", True)
|
|
14
|
+
_has_vizier = True
|
|
15
|
+
|
|
16
|
+
logging.getLogger('absl').setLevel(logging.CRITICAL)
|
|
17
|
+
logging.getLogger('jax').setLevel(logging.CRITICAL)
|
|
18
|
+
except ModuleNotFoundError:
|
|
19
|
+
_has_vizier = False
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class VizierOptimizier(Optimizer):
|
|
23
|
+
def __init__(self, chip):
|
|
24
|
+
if not _has_vizier:
|
|
25
|
+
raise RuntimeError("vizier is not available")
|
|
26
|
+
|
|
27
|
+
super().__init__(chip)
|
|
28
|
+
|
|
29
|
+
self.__problem = None
|
|
30
|
+
self.__study = None
|
|
31
|
+
|
|
32
|
+
self.__owner = chip.design
|
|
33
|
+
self.__experiment_rounds = None
|
|
34
|
+
self.__parallel_experiment = None
|
|
35
|
+
|
|
36
|
+
def __init_parameters(self):
|
|
37
|
+
search_space = self.__problem.search_space.root
|
|
38
|
+
for name, info in self._parameters.items():
|
|
39
|
+
values = info["values"]
|
|
40
|
+
if info["type"] == 'float':
|
|
41
|
+
search_space.add_float_param(name, values[0], values[1])
|
|
42
|
+
elif info["type"] == 'int':
|
|
43
|
+
search_space.add_int_param(name, values[0], values[1])
|
|
44
|
+
elif info["type"] == 'bool':
|
|
45
|
+
search_space.add_discrete_param(name, values)
|
|
46
|
+
elif info["type"] == 'enum':
|
|
47
|
+
if any([isinstance(v, str) for v in values]):
|
|
48
|
+
search_space.add_categorical_param(name, values)
|
|
49
|
+
else:
|
|
50
|
+
search_space.add_discrete_param(name, values)
|
|
51
|
+
|
|
52
|
+
def __init_goals(self):
|
|
53
|
+
metric_information = self.__problem.metric_information
|
|
54
|
+
|
|
55
|
+
for name, info in self._goals.items():
|
|
56
|
+
|
|
57
|
+
vz_goal = None
|
|
58
|
+
if info["goal"] == 'max':
|
|
59
|
+
vz_goal = vz.ObjectiveMetricGoal.MAXIMIZE
|
|
60
|
+
elif info["goal"] == 'min':
|
|
61
|
+
vz_goal = vz.ObjectiveMetricGoal.MINIMIZE
|
|
62
|
+
else:
|
|
63
|
+
raise ValueError(f'{info["goal"]} is not a supported goal')
|
|
64
|
+
|
|
65
|
+
metric_information.append(vz.MetricInformation(name, goal=vz_goal))
|
|
66
|
+
|
|
67
|
+
def __init_study(self):
|
|
68
|
+
# Setup client and begin optimization
|
|
69
|
+
# Vizier Service will be implicitly created
|
|
70
|
+
study_config = vz.StudyConfig.from_problem(self.__problem)
|
|
71
|
+
study_config.algorithm = 'DEFAULT'
|
|
72
|
+
if len(self._goals) > 1:
|
|
73
|
+
if self.__parallel_experiment == 1:
|
|
74
|
+
study_config.algorithm = 'GAUSSIAN_PROCESS_BANDIT'
|
|
75
|
+
else:
|
|
76
|
+
study_config.algorithm = 'QUASI_RANDOM_SEARCH'
|
|
77
|
+
|
|
78
|
+
self.__study = vz_clients.Study.from_study_config(
|
|
79
|
+
study_config,
|
|
80
|
+
owner=self.__owner,
|
|
81
|
+
study_id=uuid.uuid4().hex)
|
|
82
|
+
|
|
83
|
+
def run(self, experiments=None, parallel=None):
|
|
84
|
+
if not experiments:
|
|
85
|
+
experiments = 10 * len(self._parameters)
|
|
86
|
+
self._chip.logger.debug(f'Setting number of optimizer experiments to {experiments}')
|
|
87
|
+
|
|
88
|
+
if not parallel:
|
|
89
|
+
parallel = 1
|
|
90
|
+
|
|
91
|
+
self.__parallel_experiment = parallel
|
|
92
|
+
|
|
93
|
+
# Algorithm, search space, and metrics.
|
|
94
|
+
self.__problem = vz.ProblemStatement()
|
|
95
|
+
|
|
96
|
+
self._clear_results()
|
|
97
|
+
|
|
98
|
+
self.__init_parameters()
|
|
99
|
+
self.__init_goals()
|
|
100
|
+
|
|
101
|
+
self.__init_study()
|
|
102
|
+
|
|
103
|
+
self.__experiment_rounds = int(math.ceil(float(experiments) / parallel))
|
|
104
|
+
accept = True
|
|
105
|
+
try:
|
|
106
|
+
for n in range(self.__experiment_rounds):
|
|
107
|
+
if self.__run_round(n):
|
|
108
|
+
break
|
|
109
|
+
except KeyboardInterrupt:
|
|
110
|
+
pass
|
|
111
|
+
except Exception as e:
|
|
112
|
+
self._chip.logger.error(f"{e}")
|
|
113
|
+
accept = False
|
|
114
|
+
finally:
|
|
115
|
+
if accept:
|
|
116
|
+
self.__record_optimal()
|
|
117
|
+
|
|
118
|
+
self.__study.delete()
|
|
119
|
+
self.__study = None
|
|
120
|
+
|
|
121
|
+
def __run_round(self, experiment_round):
|
|
122
|
+
# create a new chip with a copy of its schema
|
|
123
|
+
chip = Chip(self._chip.design)
|
|
124
|
+
chip.schema = self._chip.schema.copy()
|
|
125
|
+
|
|
126
|
+
suggestions = self.__setup_round(experiment_round, chip)
|
|
127
|
+
|
|
128
|
+
# Start run
|
|
129
|
+
try:
|
|
130
|
+
chip.logger.info(
|
|
131
|
+
f"Starting optimizer run ({experiment_round+1} / {self.__experiment_rounds})")
|
|
132
|
+
chip.run()
|
|
133
|
+
except KeyboardInterrupt:
|
|
134
|
+
raise
|
|
135
|
+
except Exception as e:
|
|
136
|
+
chip.logger.error(f"{e}")
|
|
137
|
+
|
|
138
|
+
return self.__record_round(chip, suggestions)
|
|
139
|
+
|
|
140
|
+
def __setup_round(self, experiment_round, chip):
|
|
141
|
+
org_flow = self._chip.get("option", "flow")
|
|
142
|
+
org_jobname = self._chip.get("option", "jobname")
|
|
143
|
+
|
|
144
|
+
jobname = f"{org_jobname}-{org_flow}-{experiment_round+1}"
|
|
145
|
+
|
|
146
|
+
flow_map = {}
|
|
147
|
+
|
|
148
|
+
if self.__parallel_experiment > 1:
|
|
149
|
+
flow = f'optimize_{org_flow}'
|
|
150
|
+
# Create new graph
|
|
151
|
+
for m in range(self.__parallel_experiment):
|
|
152
|
+
graph_name = f'opt{m+1}'
|
|
153
|
+
flow_map[m] = {
|
|
154
|
+
"name": f'{jobname}/{graph_name}',
|
|
155
|
+
"prefix": f"{graph_name}.",
|
|
156
|
+
"suggestion": None
|
|
157
|
+
}
|
|
158
|
+
chip.graph(flow, org_flow, name=graph_name)
|
|
159
|
+
|
|
160
|
+
# Complete nodes
|
|
161
|
+
nodes = _get_flowgraph_nodes(chip, org_flow)
|
|
162
|
+
for step, _ in list(nodes):
|
|
163
|
+
nodes.append((step, None))
|
|
164
|
+
nodes = set(nodes)
|
|
165
|
+
|
|
166
|
+
# Forward node specific values
|
|
167
|
+
for key in chip.schema.allkeys():
|
|
168
|
+
if key[0] == 'history':
|
|
169
|
+
continue
|
|
170
|
+
|
|
171
|
+
for value, step, index in chip.schema._getvals(*key):
|
|
172
|
+
node = (step, index)
|
|
173
|
+
|
|
174
|
+
if node in nodes:
|
|
175
|
+
for info in flow_map.values():
|
|
176
|
+
chip.set(
|
|
177
|
+
*key,
|
|
178
|
+
value,
|
|
179
|
+
step=f'{info["prefix"]}{step}',
|
|
180
|
+
index=index)
|
|
181
|
+
else:
|
|
182
|
+
flow = org_flow
|
|
183
|
+
flow_map[0] = {
|
|
184
|
+
"name": jobname,
|
|
185
|
+
"prefix": "",
|
|
186
|
+
"suggestion": None
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# Setup each experiment
|
|
190
|
+
for m, suggestion in enumerate(self.__study.suggest(count=self.__parallel_experiment)):
|
|
191
|
+
self._chip.logger.info(f'Setting parameters for {flow_map[m]["name"]}')
|
|
192
|
+
flow_map[m]["suggestion"] = suggestion
|
|
193
|
+
|
|
194
|
+
for param_name, param_value in suggestion.parameters.items():
|
|
195
|
+
self._set_parameter(
|
|
196
|
+
param_name,
|
|
197
|
+
param_value,
|
|
198
|
+
chip,
|
|
199
|
+
flow_prefix=flow_map[m]["prefix"])
|
|
200
|
+
|
|
201
|
+
chip.set('option', 'jobname', jobname)
|
|
202
|
+
chip.set('option', 'flow', flow)
|
|
203
|
+
chip.set('option', 'quiet', True)
|
|
204
|
+
|
|
205
|
+
steps = set()
|
|
206
|
+
for info in list(self._goals.values()) + list(self._assertions.values()):
|
|
207
|
+
for flow in flow_map.values():
|
|
208
|
+
steps.add(f'{flow["prefix"]}{info["step"]}')
|
|
209
|
+
chip.set('option', 'to', steps)
|
|
210
|
+
|
|
211
|
+
return flow_map
|
|
212
|
+
|
|
213
|
+
def __record_round(self, chip, suggestions):
|
|
214
|
+
jobname = chip.get('option', 'jobname')
|
|
215
|
+
|
|
216
|
+
# Record history
|
|
217
|
+
self._chip.schema.cfg['history'][jobname] = chip.schema.history(jobname).cfg
|
|
218
|
+
|
|
219
|
+
stop = False
|
|
220
|
+
|
|
221
|
+
for trial_entry in suggestions.values():
|
|
222
|
+
trial_suggestion = trial_entry['suggestion']
|
|
223
|
+
|
|
224
|
+
measurement = {}
|
|
225
|
+
self._chip.logger.info(f'Measuring {trial_entry["name"]}')
|
|
226
|
+
for meas_name, meas_entry in self._goals.items():
|
|
227
|
+
measurement[meas_name] = chip.get(
|
|
228
|
+
*meas_entry["key"],
|
|
229
|
+
step=f'{trial_entry["prefix"]}{meas_entry["step"]}',
|
|
230
|
+
index=meas_entry["index"])
|
|
231
|
+
|
|
232
|
+
self._chip.logger.info(f' Measured {meas_entry["print"]} = '
|
|
233
|
+
f'{measurement[meas_name]}')
|
|
234
|
+
|
|
235
|
+
failed = None
|
|
236
|
+
if any([value is None for value in measurement.values()]):
|
|
237
|
+
failed = "Did not record measurement goal"
|
|
238
|
+
elif not self._check_assertions(chip, trial_entry["prefix"]):
|
|
239
|
+
failed = "Failed to meet assertions"
|
|
240
|
+
|
|
241
|
+
if failed:
|
|
242
|
+
self._chip.logger.error(f'{trial_entry["name"]} failed: {failed}')
|
|
243
|
+
trial_suggestion.complete(vz.Measurement(),
|
|
244
|
+
infeasible_reason=failed)
|
|
245
|
+
else:
|
|
246
|
+
trial_suggestion.complete(vz.Measurement(measurement))
|
|
247
|
+
stop |= self._check_stop_goal(measurement)
|
|
248
|
+
|
|
249
|
+
return stop
|
|
250
|
+
|
|
251
|
+
def __record_optimal(self):
|
|
252
|
+
optimal_trials = list(self.__study.optimal_trials())
|
|
253
|
+
for n, optimal_trial in enumerate(optimal_trials):
|
|
254
|
+
optimal_trial = optimal_trial.materialize()
|
|
255
|
+
|
|
256
|
+
self._add_result(
|
|
257
|
+
optimal_trial.parameters,
|
|
258
|
+
optimal_trial.final_measurement
|
|
259
|
+
)
|
siliconcompiler/pdks/__init__.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import contextlib
|
|
1
2
|
import copy
|
|
2
3
|
import distro
|
|
3
4
|
import getpass
|
|
@@ -617,17 +618,19 @@ def _copy_previous_steps_output_data(chip, step, index, replay):
|
|
|
617
618
|
os.rename(f'inputs/{outfile.name}', f'inputs/{new_name}')
|
|
618
619
|
|
|
619
620
|
|
|
620
|
-
def __read_std_streams(chip, quiet,
|
|
621
|
+
def __read_std_streams(chip, quiet,
|
|
622
|
+
is_stdout_log, stdout_reader, stdout_print,
|
|
623
|
+
is_stderr_log, stderr_reader, stderr_print):
|
|
621
624
|
'''
|
|
622
625
|
Handle directing tool outputs to logger
|
|
623
626
|
'''
|
|
624
627
|
if not quiet:
|
|
625
628
|
if is_stdout_log:
|
|
626
629
|
for line in stdout_reader.readlines():
|
|
627
|
-
|
|
630
|
+
stdout_print(line.rstrip())
|
|
628
631
|
if is_stderr_log:
|
|
629
632
|
for line in stderr_reader.readlines():
|
|
630
|
-
|
|
633
|
+
stderr_print(line.rstrip())
|
|
631
634
|
|
|
632
635
|
|
|
633
636
|
############################################################################
|
|
@@ -775,36 +778,89 @@ def _makecmd(chip, tool, task, step, index, script_name='replay.sh', include_pat
|
|
|
775
778
|
return cmdlist, print_cmd, cmd, cmd_args
|
|
776
779
|
|
|
777
780
|
|
|
781
|
+
def __get_stdio(chip, tool, task, flow, step, index):
|
|
782
|
+
def get_file(io_type):
|
|
783
|
+
suffix = chip.get('tool', tool, 'task', task, io_type, 'suffix',
|
|
784
|
+
step=step, index=index)
|
|
785
|
+
destination = chip.get('tool', tool, 'task', task, io_type, 'destination',
|
|
786
|
+
step=step, index=index)
|
|
787
|
+
|
|
788
|
+
io_file = None
|
|
789
|
+
if destination == 'log':
|
|
790
|
+
io_file = step + "." + suffix
|
|
791
|
+
elif destination == 'output':
|
|
792
|
+
io_file = os.path.join('outputs', chip.top() + "." + suffix)
|
|
793
|
+
elif destination == 'none':
|
|
794
|
+
io_file = os.devnull
|
|
795
|
+
else:
|
|
796
|
+
# This should not happen
|
|
797
|
+
chip.logger.error(f'{io_type}/destination has no support for {destination}.')
|
|
798
|
+
_haltstep(chip, flow, step, index)
|
|
799
|
+
|
|
800
|
+
return io_file
|
|
801
|
+
|
|
802
|
+
stdout_file = get_file('stdout')
|
|
803
|
+
stderr_file = get_file('stderr')
|
|
804
|
+
|
|
805
|
+
return stdout_file, stderr_file
|
|
806
|
+
|
|
807
|
+
|
|
778
808
|
def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, run_func=None):
|
|
779
809
|
'''
|
|
780
810
|
Run executable (or copy inputs to outputs for builtin functions)
|
|
781
811
|
'''
|
|
782
812
|
|
|
783
813
|
flow = chip.get('option', 'flow')
|
|
784
|
-
top = chip.top()
|
|
785
814
|
tool, task = get_tool_task(chip, step, index, flow)
|
|
786
815
|
|
|
787
816
|
quiet = (
|
|
788
|
-
chip.get('option', 'quiet', step=step, index=index) and
|
|
789
|
-
chip.get('option', 'breakpoint', step=step, index=index)
|
|
817
|
+
chip.get('option', 'quiet', step=step, index=index) and
|
|
818
|
+
not chip.get('option', 'breakpoint', step=step, index=index)
|
|
790
819
|
)
|
|
791
820
|
|
|
821
|
+
stdout_print = chip.logger.info
|
|
822
|
+
stderr_print = chip.logger.error
|
|
823
|
+
if chip.get('option', 'loglevel', step=step, index=index) == "quiet":
|
|
824
|
+
stdout_print = chip.logger.error
|
|
825
|
+
stderr_print = chip.logger.error
|
|
826
|
+
|
|
792
827
|
# TODO: Currently no memory usage tracking in breakpoints, builtins, or unexpected errors.
|
|
793
828
|
max_mem_bytes = 0
|
|
794
829
|
|
|
830
|
+
stdout_file, stderr_file = __get_stdio(chip, tool, task, flow, step, index)
|
|
831
|
+
is_stdout_log = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
|
|
832
|
+
step=step, index=index) == 'log'
|
|
833
|
+
is_stderr_log = chip.get('tool', tool, 'task', task, 'stderr', 'destination',
|
|
834
|
+
step=step, index=index) == 'log' and stderr_file != stdout_file
|
|
835
|
+
|
|
795
836
|
retcode = 0
|
|
796
837
|
cmdlist = []
|
|
797
838
|
cmd_args = []
|
|
798
839
|
if run_func:
|
|
799
840
|
logfile = None
|
|
800
841
|
try:
|
|
801
|
-
|
|
842
|
+
with open(stdout_file, 'w') as stdout_writer, \
|
|
843
|
+
open(stderr_file, 'w') as stderr_writer:
|
|
844
|
+
if stderr_file == stdout_file:
|
|
845
|
+
stderr_writer.close()
|
|
846
|
+
stderr_writer = sys.stdout
|
|
847
|
+
|
|
848
|
+
with contextlib.redirect_stderr(stderr_writer), \
|
|
849
|
+
contextlib.redirect_stdout(stdout_writer):
|
|
850
|
+
retcode = run_func(chip)
|
|
802
851
|
except Exception as e:
|
|
803
852
|
chip.logger.error(f'Failed in run() for {tool}/{task}: {e}')
|
|
804
853
|
retcode = 1 # default to non-zero
|
|
805
854
|
print_traceback(chip, e)
|
|
806
855
|
chip._error = True
|
|
807
856
|
finally:
|
|
857
|
+
with sc_open(stdout_file) as stdout_reader, \
|
|
858
|
+
sc_open(stderr_file) as stderr_reader:
|
|
859
|
+
__read_std_streams(chip,
|
|
860
|
+
quiet,
|
|
861
|
+
is_stdout_log, stdout_reader, stdout_print,
|
|
862
|
+
is_stderr_log, stderr_reader, stderr_print)
|
|
863
|
+
|
|
808
864
|
try:
|
|
809
865
|
if resource:
|
|
810
866
|
# Since memory collection is not possible, collect the current process
|
|
@@ -843,48 +899,10 @@ def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, ru
|
|
|
843
899
|
import pty # Note: this import throws exception on Windows
|
|
844
900
|
retcode = pty.spawn(cmdlist, read)
|
|
845
901
|
else:
|
|
846
|
-
stdout_file = ''
|
|
847
|
-
stdout_suffix = chip.get('tool', tool, 'task', task, 'stdout', 'suffix',
|
|
848
|
-
step=step, index=index)
|
|
849
|
-
stdout_destination = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
|
|
850
|
-
step=step, index=index)
|
|
851
|
-
if stdout_destination == 'log':
|
|
852
|
-
stdout_file = step + "." + stdout_suffix
|
|
853
|
-
elif stdout_destination == 'output':
|
|
854
|
-
stdout_file = os.path.join('outputs', top + "." + stdout_suffix)
|
|
855
|
-
elif stdout_destination == 'none':
|
|
856
|
-
stdout_file = os.devnull
|
|
857
|
-
else:
|
|
858
|
-
chip.logger.error(f'stdout/destination has no support for {stdout_destination}. '
|
|
859
|
-
'Use [log|output|none].')
|
|
860
|
-
_haltstep(chip, flow, step, index)
|
|
861
|
-
|
|
862
|
-
stderr_file = ''
|
|
863
|
-
stderr_suffix = chip.get('tool', tool, 'task', task, 'stderr', 'suffix',
|
|
864
|
-
step=step, index=index)
|
|
865
|
-
stderr_destination = chip.get('tool', tool, 'task', task, 'stderr', 'destination',
|
|
866
|
-
step=step, index=index)
|
|
867
|
-
if stderr_destination == 'log':
|
|
868
|
-
stderr_file = step + "." + stderr_suffix
|
|
869
|
-
elif stderr_destination == 'output':
|
|
870
|
-
stderr_file = os.path.join('outputs', top + "." + stderr_suffix)
|
|
871
|
-
elif stderr_destination == 'none':
|
|
872
|
-
stderr_file = os.devnull
|
|
873
|
-
else:
|
|
874
|
-
chip.logger.error(f'stderr/destination has no support for {stderr_destination}. '
|
|
875
|
-
'Use [log|output|none].')
|
|
876
|
-
_haltstep(chip, flow, step, index)
|
|
877
|
-
|
|
878
902
|
with open(stdout_file, 'w') as stdout_writer, \
|
|
879
903
|
open(stdout_file, 'r', errors='replace_with_warning') as stdout_reader, \
|
|
880
904
|
open(stderr_file, 'w') as stderr_writer, \
|
|
881
905
|
open(stderr_file, 'r', errors='replace_with_warning') as stderr_reader:
|
|
882
|
-
# Use separate reader/writer file objects as hack to display
|
|
883
|
-
# live output in non-blocking way, so we can monitor the
|
|
884
|
-
# timeout. Based on https://stackoverflow.com/a/18422264.
|
|
885
|
-
is_stdout_log = chip.get('tool', tool, 'task', task, 'stdout', 'destination',
|
|
886
|
-
step=step, index=index) == 'log'
|
|
887
|
-
is_stderr_log = stderr_destination == 'log' and stderr_file != stdout_file
|
|
888
906
|
# if STDOUT and STDERR are to be redirected to the same file,
|
|
889
907
|
# use a single writer
|
|
890
908
|
if stderr_file == stdout_file:
|
|
@@ -942,8 +960,8 @@ def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, ru
|
|
|
942
960
|
# Loop until process terminates
|
|
943
961
|
__read_std_streams(chip,
|
|
944
962
|
quiet,
|
|
945
|
-
is_stdout_log, stdout_reader,
|
|
946
|
-
is_stderr_log, stderr_reader)
|
|
963
|
+
is_stdout_log, stdout_reader, stdout_print,
|
|
964
|
+
is_stderr_log, stderr_reader, stderr_print)
|
|
947
965
|
|
|
948
966
|
if timeout is not None and time.time() - cmd_start_time > timeout:
|
|
949
967
|
chip.logger.error(f'Step timed out after {timeout} seconds')
|
|
@@ -961,8 +979,8 @@ def _run_executable_or_builtin(chip, step, index, version, toolpath, workdir, ru
|
|
|
961
979
|
# Read the remaining
|
|
962
980
|
__read_std_streams(chip,
|
|
963
981
|
quiet,
|
|
964
|
-
is_stdout_log, stdout_reader,
|
|
965
|
-
is_stderr_log, stderr_reader)
|
|
982
|
+
is_stdout_log, stdout_reader, stdout_print,
|
|
983
|
+
is_stderr_log, stderr_reader, stderr_print)
|
|
966
984
|
retcode = proc.returncode
|
|
967
985
|
|
|
968
986
|
chip.set('record', 'toolexitcode', retcode, step=step, index=index)
|
|
@@ -12,7 +12,6 @@ import fastjsonschema
|
|
|
12
12
|
from pathlib import Path
|
|
13
13
|
from siliconcompiler.flowgraph import get_executed_nodes
|
|
14
14
|
import uuid
|
|
15
|
-
from siliconcompiler.targets import freepdk45_demo
|
|
16
15
|
|
|
17
16
|
|
|
18
17
|
# Compile validation code for API request bodies.
|
|
@@ -176,6 +175,7 @@ def send(chip, msg_type, step, index):
|
|
|
176
175
|
|
|
177
176
|
if __name__ == "__main__":
|
|
178
177
|
from siliconcompiler import Chip
|
|
178
|
+
from siliconcompiler.targets import freepdk45_demo
|
|
179
179
|
chip = Chip('test')
|
|
180
180
|
chip.use(freepdk45_demo)
|
|
181
181
|
chip.set('option', 'scheduler', 'msgevent', 'ALL')
|
|
@@ -10,7 +10,7 @@ try:
|
|
|
10
10
|
except ImportError:
|
|
11
11
|
from siliconcompiler.schema.utils import trim
|
|
12
12
|
|
|
13
|
-
SCHEMA_VERSION = '0.48.
|
|
13
|
+
SCHEMA_VERSION = '0.48.6'
|
|
14
14
|
|
|
15
15
|
#############################################################################
|
|
16
16
|
# PARAM DEFINITION
|
|
@@ -2772,7 +2772,7 @@ def schema_option(cfg):
|
|
|
2772
2772
|
|
|
2773
2773
|
scparam(cfg, ['option', 'loglevel'],
|
|
2774
2774
|
sctype='enum',
|
|
2775
|
-
enum=["info", "warning", "error", "critical", "debug"],
|
|
2775
|
+
enum=["info", "warning", "error", "critical", "debug", "quiet"],
|
|
2776
2776
|
pernode='optional',
|
|
2777
2777
|
scope='job',
|
|
2778
2778
|
defvalue='info',
|
|
@@ -1511,7 +1511,7 @@ class Schema:
|
|
|
1511
1511
|
switchstrs, metavar = self.__get_switches(schema, *keypath)
|
|
1512
1512
|
|
|
1513
1513
|
# Three switch types (bool, list, scalar)
|
|
1514
|
-
if
|
|
1514
|
+
if switchlist is None or any(switch in switchlist for switch in switchstrs):
|
|
1515
1515
|
used_switches.update(switchstrs)
|
|
1516
1516
|
if typestr == 'bool':
|
|
1517
1517
|
# Boolean type arguments
|
|
@@ -1606,7 +1606,18 @@ class Schema:
|
|
|
1606
1606
|
# Grab argument from pre-process sysargs
|
|
1607
1607
|
cmdargs = vars(parser.parse_args(scargs))
|
|
1608
1608
|
|
|
1609
|
-
if
|
|
1609
|
+
# Set loglevel if set at command line
|
|
1610
|
+
do_print_banner = True
|
|
1611
|
+
if 'option_loglevel' in cmdargs.keys():
|
|
1612
|
+
log_level = cmdargs['option_loglevel']
|
|
1613
|
+
if isinstance(log_level, list):
|
|
1614
|
+
# if multiple found, pick the first one
|
|
1615
|
+
log_level = log_level[0]
|
|
1616
|
+
if log_level == 'quiet':
|
|
1617
|
+
do_print_banner = False
|
|
1618
|
+
logger.setLevel(translate_loglevel(log_level).split()[-1])
|
|
1619
|
+
|
|
1620
|
+
if print_banner and do_print_banner:
|
|
1610
1621
|
print_banner()
|
|
1611
1622
|
|
|
1612
1623
|
extra_params = None
|
|
@@ -1623,14 +1634,6 @@ class Schema:
|
|
|
1623
1634
|
# Remove from cmdargs
|
|
1624
1635
|
del cmdargs[arg]
|
|
1625
1636
|
|
|
1626
|
-
# Set loglevel if set at command line
|
|
1627
|
-
if 'option_loglevel' in cmdargs.keys():
|
|
1628
|
-
log_level = cmdargs['option_loglevel']
|
|
1629
|
-
if isinstance(log_level, list):
|
|
1630
|
-
# if multiple found, pick the first one
|
|
1631
|
-
log_level = log_level[0]
|
|
1632
|
-
logger.setLevel(translate_loglevel(log_level).split()[-1])
|
|
1633
|
-
|
|
1634
1637
|
# Read in all cfg files
|
|
1635
1638
|
if 'option_cfg' in cmdargs.keys():
|
|
1636
1639
|
for item in cmdargs['option_cfg']:
|
siliconcompiler/schema/utils.py
CHANGED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import os.path
|
|
2
|
+
|
|
3
|
+
import siliconcompiler
|
|
4
|
+
from siliconcompiler import __version__ as sc_version
|
|
5
|
+
|
|
6
|
+
from siliconcompiler.targets import get_targets
|
|
7
|
+
from siliconcompiler.flows import get_flows
|
|
8
|
+
from siliconcompiler.checklists import get_checklists
|
|
9
|
+
from siliconcompiler.libs import get_libs
|
|
10
|
+
from siliconcompiler.pdks import get_pdks
|
|
11
|
+
from siliconcompiler.apps import get_apps
|
|
12
|
+
from siliconcompiler.tools import get_tools
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
sc_root = os.path.dirname(os.path.dirname(os.path.abspath(siliconcompiler.__file__)))
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def relpath(file):
|
|
19
|
+
file = os.path.abspath(file)
|
|
20
|
+
if file.startswith(sc_root):
|
|
21
|
+
return os.path.relpath(file, sc_root)
|
|
22
|
+
return None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_codeurl(file=None):
|
|
26
|
+
base_url = f"https://github.com/siliconcompiler/siliconcompiler/blob/v{sc_version}"
|
|
27
|
+
|
|
28
|
+
if not file:
|
|
29
|
+
return base_url
|
|
30
|
+
|
|
31
|
+
if os.path.isabs(file):
|
|
32
|
+
file = relpath(file)
|
|
33
|
+
if not file:
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
return f"{base_url}/{file}"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def targets():
|
|
40
|
+
modules = []
|
|
41
|
+
for name, mod in get_targets().items():
|
|
42
|
+
modules.append((mod, name))
|
|
43
|
+
return modules
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def flows():
|
|
47
|
+
modules = []
|
|
48
|
+
for name, mod in get_flows().items():
|
|
49
|
+
modules.append((mod, name))
|
|
50
|
+
return modules
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def libraries():
|
|
54
|
+
modules = []
|
|
55
|
+
for name, mod in get_libs().items():
|
|
56
|
+
modules.append((mod, name))
|
|
57
|
+
return modules
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def pdks():
|
|
61
|
+
modules = []
|
|
62
|
+
for name, mod in get_pdks().items():
|
|
63
|
+
modules.append((mod, name))
|
|
64
|
+
return modules
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def tools():
|
|
68
|
+
modules = []
|
|
69
|
+
for name, mod in get_tools().items():
|
|
70
|
+
modules.append((mod, name))
|
|
71
|
+
return modules
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def apps():
|
|
75
|
+
modules = []
|
|
76
|
+
for name, mod in get_apps().items():
|
|
77
|
+
modules.append((mod, name))
|
|
78
|
+
return modules
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def checklists():
|
|
82
|
+
modules = []
|
|
83
|
+
for name, mod in get_checklists().items():
|
|
84
|
+
modules.append((mod, name))
|
|
85
|
+
return modules
|