siliconcompiler 0.34.0__py3-none-any.whl → 0.34.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. siliconcompiler/__init__.py +14 -2
  2. siliconcompiler/_metadata.py +1 -1
  3. siliconcompiler/apps/_common.py +1 -1
  4. siliconcompiler/apps/sc.py +1 -1
  5. siliconcompiler/apps/sc_issue.py +1 -1
  6. siliconcompiler/apps/sc_remote.py +3 -3
  7. siliconcompiler/apps/sc_show.py +3 -3
  8. siliconcompiler/apps/utils/replay.py +4 -4
  9. siliconcompiler/checklist.py +203 -2
  10. siliconcompiler/constraints/__init__.py +17 -0
  11. siliconcompiler/constraints/asic_component.py +378 -0
  12. siliconcompiler/constraints/asic_floorplan.py +449 -0
  13. siliconcompiler/constraints/asic_pins.py +489 -0
  14. siliconcompiler/constraints/asic_timing.py +517 -0
  15. siliconcompiler/core.py +31 -249
  16. siliconcompiler/data/templates/email/general.j2 +3 -3
  17. siliconcompiler/data/templates/email/summary.j2 +1 -1
  18. siliconcompiler/data/templates/issue/README.txt +1 -1
  19. siliconcompiler/data/templates/report/sc_report.j2 +7 -7
  20. siliconcompiler/dependencyschema.py +10 -174
  21. siliconcompiler/design.py +325 -114
  22. siliconcompiler/flowgraph.py +63 -15
  23. siliconcompiler/library.py +133 -0
  24. siliconcompiler/metric.py +94 -72
  25. siliconcompiler/metrics/__init__.py +7 -0
  26. siliconcompiler/metrics/asic.py +245 -0
  27. siliconcompiler/metrics/fpga.py +220 -0
  28. siliconcompiler/optimizer/vizier.py +2 -2
  29. siliconcompiler/package/__init__.py +138 -35
  30. siliconcompiler/package/github.py +6 -10
  31. siliconcompiler/packageschema.py +256 -12
  32. siliconcompiler/pathschema.py +226 -0
  33. siliconcompiler/pdk.py +5 -5
  34. siliconcompiler/project.py +459 -0
  35. siliconcompiler/remote/client.py +18 -12
  36. siliconcompiler/remote/server.py +2 -2
  37. siliconcompiler/report/dashboard/cli/__init__.py +6 -6
  38. siliconcompiler/report/dashboard/cli/board.py +3 -3
  39. siliconcompiler/report/dashboard/web/components/__init__.py +5 -5
  40. siliconcompiler/report/dashboard/web/components/flowgraph.py +4 -4
  41. siliconcompiler/report/dashboard/web/components/graph.py +2 -2
  42. siliconcompiler/report/dashboard/web/state.py +1 -1
  43. siliconcompiler/report/dashboard/web/utils/__init__.py +5 -5
  44. siliconcompiler/report/html_report.py +1 -1
  45. siliconcompiler/report/report.py +4 -4
  46. siliconcompiler/report/summary_table.py +2 -2
  47. siliconcompiler/report/utils.py +5 -5
  48. siliconcompiler/scheduler/docker.py +4 -10
  49. siliconcompiler/scheduler/run_node.py +4 -8
  50. siliconcompiler/scheduler/scheduler.py +18 -24
  51. siliconcompiler/scheduler/schedulernode.py +161 -143
  52. siliconcompiler/scheduler/send_messages.py +3 -3
  53. siliconcompiler/scheduler/slurm.py +5 -3
  54. siliconcompiler/scheduler/taskscheduler.py +10 -8
  55. siliconcompiler/schema/__init__.py +0 -2
  56. siliconcompiler/schema/baseschema.py +148 -26
  57. siliconcompiler/schema/editableschema.py +14 -6
  58. siliconcompiler/schema/journal.py +23 -15
  59. siliconcompiler/schema/namedschema.py +30 -4
  60. siliconcompiler/schema/parameter.py +34 -19
  61. siliconcompiler/schema/parametertype.py +2 -0
  62. siliconcompiler/schema/parametervalue.py +198 -15
  63. siliconcompiler/schema/schema_cfg.py +18 -14
  64. siliconcompiler/schema_obj.py +5 -3
  65. siliconcompiler/tool.py +591 -179
  66. siliconcompiler/tools/__init__.py +2 -0
  67. siliconcompiler/tools/builtin/_common.py +5 -5
  68. siliconcompiler/tools/builtin/concatenate.py +5 -5
  69. siliconcompiler/tools/builtin/minimum.py +4 -4
  70. siliconcompiler/tools/builtin/mux.py +4 -4
  71. siliconcompiler/tools/builtin/nop.py +4 -4
  72. siliconcompiler/tools/builtin/verify.py +7 -7
  73. siliconcompiler/tools/execute/exec_input.py +1 -1
  74. siliconcompiler/tools/genfasm/genfasm.py +1 -6
  75. siliconcompiler/tools/openroad/_apr.py +5 -1
  76. siliconcompiler/tools/openroad/antenna_repair.py +1 -1
  77. siliconcompiler/tools/openroad/macro_placement.py +1 -1
  78. siliconcompiler/tools/openroad/power_grid.py +1 -1
  79. siliconcompiler/tools/openroad/scripts/common/procs.tcl +5 -0
  80. siliconcompiler/tools/opensta/timing.py +26 -3
  81. siliconcompiler/tools/slang/__init__.py +2 -2
  82. siliconcompiler/tools/surfer/__init__.py +0 -0
  83. siliconcompiler/tools/surfer/show.py +53 -0
  84. siliconcompiler/tools/surfer/surfer.py +30 -0
  85. siliconcompiler/tools/vpr/route.py +27 -14
  86. siliconcompiler/tools/vpr/vpr.py +23 -6
  87. siliconcompiler/tools/yosys/__init__.py +1 -1
  88. siliconcompiler/tools/yosys/scripts/procs.tcl +143 -0
  89. siliconcompiler/tools/yosys/{sc_synth_asic.tcl → scripts/sc_synth_asic.tcl} +4 -0
  90. siliconcompiler/tools/yosys/{sc_synth_fpga.tcl → scripts/sc_synth_fpga.tcl} +24 -77
  91. siliconcompiler/tools/yosys/syn_fpga.py +14 -0
  92. siliconcompiler/toolscripts/_tools.json +9 -13
  93. siliconcompiler/toolscripts/rhel9/install-vpr.sh +0 -2
  94. siliconcompiler/toolscripts/ubuntu22/install-surfer.sh +33 -0
  95. siliconcompiler/toolscripts/ubuntu24/install-surfer.sh +33 -0
  96. siliconcompiler/utils/__init__.py +2 -1
  97. siliconcompiler/utils/flowgraph.py +24 -23
  98. siliconcompiler/utils/issue.py +23 -29
  99. siliconcompiler/utils/logging.py +35 -6
  100. siliconcompiler/utils/showtools.py +6 -1
  101. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/METADATA +15 -25
  102. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/RECORD +109 -97
  103. siliconcompiler/schema/packageschema.py +0 -101
  104. siliconcompiler/tools/yosys/procs.tcl +0 -71
  105. siliconcompiler/toolscripts/rhel9/install-yosys-parmys.sh +0 -68
  106. siliconcompiler/toolscripts/ubuntu22/install-yosys-parmys.sh +0 -68
  107. siliconcompiler/toolscripts/ubuntu24/install-yosys-parmys.sh +0 -68
  108. /siliconcompiler/tools/yosys/{sc_lec.tcl → scripts/sc_lec.tcl} +0 -0
  109. /siliconcompiler/tools/yosys/{sc_screenshot.tcl → scripts/sc_screenshot.tcl} +0 -0
  110. /siliconcompiler/tools/yosys/{syn_strategies.tcl → scripts/syn_strategies.tcl} +0 -0
  111. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/WHEEL +0 -0
  112. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/entry_points.txt +0 -0
  113. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/licenses/LICENSE +0 -0
  114. {siliconcompiler-0.34.0.dist-info → siliconcompiler-0.34.2.dist-info}/top_level.txt +0 -0
@@ -20,6 +20,7 @@ from siliconcompiler.report.dashboard import DashboardType
20
20
  from siliconcompiler.flowgraph import RuntimeFlowgraph
21
21
  from siliconcompiler.scheduler.scheduler import Scheduler
22
22
  from siliconcompiler.schema import Journal
23
+ from siliconcompiler.utils.logging import get_console_formatter
23
24
 
24
25
  # Step name to use while logging
25
26
  remote_step_name = 'remote'
@@ -298,14 +299,15 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
298
299
  nodes_log = f' {status.title()} ({num_nodes}): '
299
300
  log_nodes = []
300
301
  for node, _ in nodes:
301
- node_len = len(node)
302
+ node_name = self.__node_information[node]['print']
303
+ node_len = len(node_name)
302
304
 
303
305
  if node_len + line_len + 2 < self.__maxlinelength:
304
- log_nodes.append(node)
306
+ log_nodes.append(node_name)
305
307
  line_len += node_len + 2
306
308
  else:
307
309
  if len(log_nodes) == num_nodes - 1:
308
- log_nodes.append(node)
310
+ log_nodes.append(node_name)
309
311
  else:
310
312
  log_nodes.append('...')
311
313
  break
@@ -375,7 +377,7 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
375
377
  if SCNodeStatus.is_running(stat):
376
378
  self.__logger.info(f' {stat.title()} ({len(nodes)}):')
377
379
  for node, node_info in nodes:
378
- running_log = f" {node}"
380
+ running_log = f" {self.__node_information[node]['print']}"
379
381
  if 'elapsed_time' in node_info:
380
382
  running_log += f" ({node_info['elapsed_time']})"
381
383
  self.__logger.info(running_log)
@@ -483,7 +485,8 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
483
485
 
484
486
  # Run the job on the remote server, and wait for it to finish.
485
487
  # Set logger to indicate remote run
486
- self.__chip._init_logger(step=self.STEP_NAME, index=None, in_run=True)
488
+ self.__chip._logger_console.setFormatter(
489
+ get_console_formatter(self.__chip, True, self.STEP_NAME, None))
487
490
 
488
491
  # Ask the remote server to start processing the requested step.
489
492
  self.__request_run()
@@ -494,7 +497,8 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
494
497
  finally:
495
498
  # Restore logger
496
499
  self.__chip._dash.end_of_run()
497
- self.__chip._init_logger(in_run=True)
500
+ self.__chip._logger_console.setFormatter(
501
+ get_console_formatter(self.__chip, False, None, None))
498
502
 
499
503
  def __request_run(self):
500
504
  '''
@@ -529,7 +533,7 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
529
533
  # Redirected POST requests are translated to GETs. This is actually
530
534
  # part of the HTTP spec, so we need to manually follow the trail.
531
535
  post_params = {
532
- 'chip_cfg': self.__chip.schema.getdict(),
536
+ 'chip_cfg': self.__chip.getdict(),
533
537
  'params': self.__get_post_params(include_job_id=True)
534
538
  }
535
539
 
@@ -571,7 +575,7 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
571
575
  key_type = self.__chip.get(*key, field='type')
572
576
 
573
577
  if 'dir' in key_type or 'file' in key_type:
574
- for _, step, index in self.__chip.schema.get(*key, field=None).getvalues(
578
+ for _, step, index in self.__chip.get(*key, field=None).getvalues(
575
579
  return_defvalue=False):
576
580
  packages = self.__chip.get(*key, field='package', step=step, index=index)
577
581
  if not isinstance(packages, list):
@@ -650,7 +654,8 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
650
654
  return changed
651
655
 
652
656
  def __ensure_run_loop_information(self):
653
- self.__chip._init_logger(step=self.STEP_NAME, index='0', in_run=True)
657
+ self.__chip._logger_console.setFormatter(
658
+ get_console_formatter(self.__chip, True, self.STEP_NAME, None))
654
659
  if not self.__download_pool:
655
660
  self.__download_pool = multiprocessing.Pool()
656
661
 
@@ -663,7 +668,7 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
663
668
 
664
669
  self.__node_information = {}
665
670
  runtime = RuntimeFlowgraph(
666
- self.__chip.schema.get("flowgraph", self.__chip.get('option', 'flow'), field='schema'),
671
+ self.__chip.get("flowgraph", self.__chip.get('option', 'flow'), field='schema'),
667
672
  from_steps=self.__chip.get('option', 'from'),
668
673
  to_steps=self.__chip.get('option', 'to'),
669
674
  prune_nodes=self.__chip.get('option', 'prune'))
@@ -674,7 +679,8 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
674
679
  "step": step,
675
680
  "index": index,
676
681
  "imported": done,
677
- "fetched": done
682
+ "fetched": done,
683
+ "print": f"{step}/{index}"
678
684
  }
679
685
  self.__node_information[f'{step}{index}'] = node_info
680
686
 
@@ -749,7 +755,7 @@ service, provided by SiliconCompiler, is not intended to process proprietary IP.
749
755
  def __schedule_fetch_result(self, node):
750
756
  if node:
751
757
  self.__node_information[node]["fetched"] = True
752
- self.__logger.info(f' {node}')
758
+ self.__logger.info(f' {self.__node_information[node]["print"]}')
753
759
  else:
754
760
  self.__setup_information_fetched = True
755
761
  self.__download_pool.apply_async(Client._fetch_result, (self, node))
@@ -96,7 +96,7 @@ class Server:
96
96
 
97
97
  def __run_start(self, chip):
98
98
  flow = chip.get("option", "flow")
99
- nodes = chip.schema.get("flowgraph", flow, field="schema").get_nodes()
99
+ nodes = chip.get("flowgraph", flow, field="schema").get_nodes()
100
100
 
101
101
  with self.sc_jobs_lock:
102
102
  job_hash = self.sc_chip_lookup[chip]["jobhash"]
@@ -442,7 +442,7 @@ class Server:
442
442
  job_hash = chip.get('record', 'remoteid')
443
443
 
444
444
  runtime = RuntimeFlowgraph(
445
- chip.schema.get("flowgraph", chip.get('option', 'flow'), field='schema'),
445
+ chip.get("flowgraph", chip.get('option', 'flow'), field='schema'),
446
446
  from_steps=chip.get('option', 'from'),
447
447
  to_steps=chip.get('option', 'to'),
448
448
  prune_nodes=chip.get('option', 'prune'))
@@ -23,11 +23,11 @@ class CliDashboard(AbstractDashboard):
23
23
  self._logger = logger
24
24
  if self._logger and self._dashboard._active:
25
25
  # Hijack the console
26
- self._logger.removeHandler(self._chip.logger._console)
27
- self.__logger_console = self._chip.logger._console
28
- self._chip.logger._console = self._dashboard._log_handler
26
+ self._logger.removeHandler(self._chip._logger_console)
27
+ self.__logger_console = self._chip._logger_console
28
+ self._chip._logger_console = self._dashboard._log_handler
29
29
  self._logger.addHandler(self._dashboard._log_handler)
30
- self._chip._init_logger_formats()
30
+ self._dashboard._log_handler.setFormatter(self.__logger_console.formatter)
31
31
 
32
32
  def open_dashboard(self):
33
33
  """Starts the dashboard rendering thread if it is not already running."""
@@ -71,9 +71,9 @@ class CliDashboard(AbstractDashboard):
71
71
  # Restore logger
72
72
  if self.__logger_console:
73
73
  self._logger.removeHandler(self._dashboard._log_handler)
74
- self._chip.logger._console = self.__logger_console
74
+ self._chip._logger_console = self.__logger_console
75
75
  self._logger.addHandler(self.__logger_console)
76
- self._chip._init_logger_formats()
76
+ self.__logger_console.setFormatter(self._dashboard._log_handler.formatter)
77
77
  self.__logger_console = None
78
78
 
79
79
  def wait(self):
@@ -750,10 +750,10 @@ class Board(metaclass=BoardSingleton):
750
750
  raise SiliconCompilerError("dummy error")
751
751
 
752
752
  runtime_flow = RuntimeFlowgraph(
753
- chip.schema.get("flowgraph", flow, field='schema'),
753
+ chip.get("flowgraph", flow, field='schema'),
754
754
  to_steps=chip.get('option', 'to'),
755
755
  prune_nodes=chip.get('option', 'prune'))
756
- record = chip.schema.get("record", field='schema')
756
+ record = chip.get("record", field='schema')
757
757
 
758
758
  execnodes = runtime_flow.get_nodes()
759
759
  lowest_priority = 3 * len(execnodes) # 2x + 1 is lowest computed, so 3x will be lower
@@ -776,7 +776,7 @@ class Board(metaclass=BoardSingleton):
776
776
  node_outputs.setdefault(in_node, set()).add(node)
777
777
 
778
778
  flow_entry_nodes = set(
779
- chip.schema.get("flowgraph", flow, field="schema").get_entry_nodes())
779
+ chip.get("flowgraph", flow, field="schema").get_entry_nodes())
780
780
 
781
781
  running_nodes = set([node for node in nodes if NodeStatus.is_running(nodestatus[node])])
782
782
  done_nodes = set([node for node in nodes if NodeStatus.is_done(nodestatus[node])])
@@ -301,7 +301,7 @@ def manifest_viewer(
301
301
  if streamlit.checkbox(
302
302
  'Raw manifest',
303
303
  help='Click here to see the manifest before it was made more readable'):
304
- manifest_to_show = chip.schema.getdict()
304
+ manifest_to_show = chip.getdict()
305
305
  else:
306
306
  manifest_to_show = report.make_manifest(chip)
307
307
 
@@ -326,7 +326,7 @@ def manifest_viewer(
326
326
  streamlit.download_button(
327
327
  label='Download',
328
328
  file_name='manifest.json',
329
- data=json.dumps(chip.schema.getdict(), indent=2),
329
+ data=json.dumps(chip.getdict(), indent=2),
330
330
  mime="application/json",
331
331
  use_container_width=True)
332
332
 
@@ -494,7 +494,7 @@ def node_viewer(chip, step, index, metric_dataframe, height=None):
494
494
 
495
495
  metrics_col, records_col, logs_and_reports_col = streamlit.columns(3, gap='small')
496
496
 
497
- node_name = f'{step}{index}'
497
+ node_name = f'{step}/{index}'
498
498
 
499
499
  with metrics_col:
500
500
  streamlit.subheader(f'{node_name} metrics')
@@ -504,7 +504,7 @@ def node_viewer(chip, step, index, metric_dataframe, height=None):
504
504
  use_container_width=True,
505
505
  height=height)
506
506
  with records_col:
507
- streamlit.subheader(f'{step}{index} details')
507
+ streamlit.subheader(f'{step}/{index} details')
508
508
  nodes = {}
509
509
  nodes[step + index] = report.get_flowgraph_nodes(chip, step, index)
510
510
  streamlit.dataframe(
@@ -512,7 +512,7 @@ def node_viewer(chip, step, index, metric_dataframe, height=None):
512
512
  use_container_width=True,
513
513
  height=height)
514
514
  with logs_and_reports_col:
515
- streamlit.subheader(f'{step}{index} files')
515
+ streamlit.subheader(f'{step}/{index} files')
516
516
  node_file_tree_viewer(chip, step, index)
517
517
 
518
518
 
@@ -45,8 +45,8 @@ def get_nodes_and_edges(chip):
45
45
  successful_path = report.get_flowgraph_path(chip)
46
46
 
47
47
  flow = chip.get('option', 'flow')
48
- entry_exit_nodes = chip.schema.get("flowgraph", flow, field="schema").get_entry_nodes() + \
49
- chip.schema.get("flowgraph", flow, field="schema").get_exit_nodes()
48
+ entry_exit_nodes = chip.get("flowgraph", flow, field="schema").get_entry_nodes() + \
49
+ chip.get("flowgraph", flow, field="schema").get_exit_nodes()
50
50
 
51
51
  for step, index in node_dependencies:
52
52
  # Build node
@@ -60,7 +60,7 @@ def get_nodes_and_edges(chip):
60
60
  node_color = NODE_COLORS[node_status]
61
61
 
62
62
  tool, task = get_tool_task(chip, step, index)
63
- node_name = f'{step}{index}'
63
+ node_name = f'{step}/{index}'
64
64
  label = node_name + "\n" + tool + "/" + task
65
65
  if tool == 'builtin':
66
66
  label = node_name + "\n" + tool
@@ -95,7 +95,7 @@ def get_nodes_and_edges(chip):
95
95
  dashes = True
96
96
 
97
97
  edges.append(Edge(
98
- source=f'{source_step}{source_index}',
98
+ source=f'{source_step}/{source_index}',
99
99
  target=node_name,
100
100
  dir='up',
101
101
  width=edge_width,
@@ -67,7 +67,7 @@ def settings(metrics, nodes, graph_number):
67
67
 
68
68
  Args:
69
69
  metrics (list) : A list of metrics that are set for all chips given in chips.
70
- nodes (list) : A list of nodes given in the form f'{step}{index}'
70
+ nodes (list) : A list of nodes given in the form f'{step}/{index}'
71
71
  graph_number (int) : The number of graphs there are. Used to create
72
72
  keys to distinguish selectboxes from each other.
73
73
  """
@@ -147,7 +147,7 @@ def graph(metrics, nodes, node_to_step_index_map, graph_number):
147
147
 
148
148
  labels = {
149
149
  "runs": state.get_key(state.GRAPH_JOBS),
150
- "nodes": [f'{step}{index}' for step, index in data]
150
+ "nodes": [f'{step}/{index}' for step, index in data]
151
151
  }
152
152
 
153
153
  if nodes:
@@ -125,7 +125,7 @@ def init():
125
125
  chip_index = chip.get('arg', 'index')
126
126
 
127
127
  if chip_step and chip_index:
128
- set_key(SELECTED_NODE, f'{chip_step}{chip_index}')
128
+ set_key(SELECTED_NODE, f'{chip_step}/{chip_index}')
129
129
 
130
130
  chip = get_chip("default")
131
131
  chip.unset('arg', 'step')
@@ -24,9 +24,9 @@ def make_node_to_step_index_map(chip, metric_dataframe):
24
24
  '''
25
25
  node_to_step_index_map = {}
26
26
  if chip.get('option', 'flow'):
27
- for step, index in chip.schema.get("flowgraph", chip.get('option', 'flow'),
28
- field="schema").get_nodes():
29
- node_to_step_index_map[f'{step}{index}'] = (step, index)
27
+ for step, index in chip.get("flowgraph", chip.get('option', 'flow'),
28
+ field="schema").get_nodes():
29
+ node_to_step_index_map[f'{step}/{index}'] = (step, index)
30
30
 
31
31
  # concatenate step and index
32
32
  metric_dataframe.columns = metric_dataframe.columns.map(lambda x: f'{x[0]}{x[1]}')
@@ -57,8 +57,8 @@ def make_metric_to_metric_unit_map(metric_dataframe):
57
57
  def is_running(chip):
58
58
  if not chip.get('option', 'flow'):
59
59
  return False
60
- for step, index in chip.schema.get("flowgraph", chip.get('option', 'flow'),
61
- field="schema").get_nodes():
60
+ for step, index in chip.get("flowgraph", chip.get('option', 'flow'),
61
+ field="schema").get_nodes():
62
62
  state = chip.get('record', 'status', step=step, index=index)
63
63
  if not NodeStatus.is_done(state):
64
64
  return True
@@ -50,7 +50,7 @@ def _generate_html_report(chip, flow, flowgraph_nodes, results_html):
50
50
  metrics=metrics,
51
51
  metrics_unit=metrics_unit,
52
52
  reports=reports,
53
- manifest=chip.schema.getdict(),
53
+ manifest=chip.getdict(),
54
54
  pruned_cfg=pruned_cfg,
55
55
  metric_keys=metrics_to_show,
56
56
  img_data=img_data,
@@ -61,7 +61,7 @@ def get_flowgraph_nodes(chip, step, index):
61
61
  value = chip.get('record', key, step=step, index=index)
62
62
  if value is not None:
63
63
  if key == 'inputnode':
64
- value = ", ".join([f'{step}{index}' for step, index in value])
64
+ value = ", ".join([f'{step}/{index}' for step, index in value])
65
65
  if key == 'pythonpackage':
66
66
  value = ", ".join(value)
67
67
  nodes[key] = str(value)
@@ -181,7 +181,7 @@ def make_manifest(chip):
181
181
  >>> make_manifest(chip)
182
182
  Returns tree/json of manifest.
183
183
  '''
184
- manifest = chip.schema.getdict()
184
+ manifest = chip.getdict()
185
185
  modified_manifest = {}
186
186
  make_manifest_helper(manifest, modified_manifest)
187
187
  return modified_manifest
@@ -200,7 +200,7 @@ def get_flowgraph_path(chip):
200
200
  '''
201
201
  flow = chip.get('option', 'flow')
202
202
  runtime = RuntimeFlowgraph(
203
- chip.schema.get("flowgraph", flow, field='schema'),
203
+ chip.get("flowgraph", flow, field='schema'),
204
204
  from_steps=chip.get('option', 'from'),
205
205
  to_steps=chip.get('option', 'to'),
206
206
  prune_nodes=chip.get('option', 'prune'))
@@ -363,7 +363,7 @@ def get_chart_selection_options(chips):
363
363
  chip = chip_and_chip_name['chip_object']
364
364
  nodes_list, _, _, _, chip_metrics, _ = \
365
365
  utils._collect_data(chip, format_as_string=False)
366
- nodes.update(set([f'{step}{index}' for step, index in nodes_list]))
366
+ nodes.update(set([f'{step}/{index}' for step, index in nodes_list]))
367
367
  metrics.update(set(chip_metrics))
368
368
  return nodes, metrics
369
369
 
@@ -44,7 +44,7 @@ def _show_summary_table(chip, flow, flowgraph_nodes, show_all_indices):
44
44
 
45
45
  # trim labels to column width
46
46
  column_labels = []
47
- labels = [f'{step}{index}' for step, index in nodes_to_show]
47
+ labels = [f'{step}/{index}' for step, index in nodes_to_show]
48
48
  if labels:
49
49
  column_width = min([column_width, max([len(label) for label in labels])])
50
50
 
@@ -85,7 +85,7 @@ def _show_summary_table(chip, flow, flowgraph_nodes, show_all_indices):
85
85
  info_list.append(f"partname : {fpga_partname}")
86
86
 
87
87
  libraries = set()
88
- for val, step, index in chip.schema.get('asic', 'logiclib', field=None).getvalues():
88
+ for val, step, index in chip.get('asic', 'logiclib', field=None).getvalues():
89
89
  if not step or (step, index) in flowgraph_nodes:
90
90
  libraries.update(val)
91
91
  if libraries:
@@ -6,7 +6,7 @@ from siliconcompiler.flowgraph import RuntimeFlowgraph
6
6
 
7
7
 
8
8
  def _find_summary_image(chip, ext='png'):
9
- for nodes in reversed(chip.schema.get(
9
+ for nodes in reversed(chip.get(
10
10
  "flowgraph", chip.get('option', 'flow'), field="schema").get_execution_order()):
11
11
  for step, index in nodes:
12
12
  layout_img = chip.find_result(ext, step=step, index=index)
@@ -17,7 +17,7 @@ def _find_summary_image(chip, ext='png'):
17
17
 
18
18
  def _find_summary_metrics(chip, metrics_map):
19
19
  metrics = {}
20
- for nodes in reversed(chip.schema.get(
20
+ for nodes in reversed(chip.get(
21
21
  "flowgraph", chip.get('option', 'flow'), field="schema").get_execution_order()):
22
22
  for step, index in nodes:
23
23
  for name, metric_info in metrics_map.items():
@@ -45,7 +45,7 @@ def _collect_data(chip, flow=None, flowgraph_nodes=None, format_as_string=True):
45
45
 
46
46
  if not flowgraph_nodes:
47
47
  runtime = RuntimeFlowgraph(
48
- chip.schema.get("flowgraph", flow, field='schema'),
48
+ chip.get("flowgraph", flow, field='schema'),
49
49
  from_steps=chip.get('option', 'from'),
50
50
  to_steps=chip.get('option', 'to'),
51
51
  prune_nodes=chip.get('option', 'prune'))
@@ -70,7 +70,7 @@ def _collect_data(chip, flow=None, flowgraph_nodes=None, format_as_string=True):
70
70
  reports = {}
71
71
 
72
72
  # Build ordered list of nodes in flowgraph
73
- for level_nodes in chip.schema.get("flowgraph", flow, field="schema").get_execution_order():
73
+ for level_nodes in chip.get("flowgraph", flow, field="schema").get_execution_order():
74
74
  nodes.extend(sorted(level_nodes))
75
75
  nodes = [node for node in nodes if node in flowgraph_nodes]
76
76
  for (step, index) in nodes:
@@ -147,7 +147,7 @@ def _get_flowgraph_path(chip, flow, nodes_to_execute, only_include_successful=Fa
147
147
  to_search = []
148
148
  # Start search with any successful leaf nodes.
149
149
  flowgraph_steps = list(map(lambda node: node[0], nodes_to_execute))
150
- runtime = RuntimeFlowgraph(chip.schema.get("flowgraph", flow, field='schema'),
150
+ runtime = RuntimeFlowgraph(chip.get("flowgraph", flow, field='schema'),
151
151
  from_steps=flowgraph_steps,
152
152
  to_steps=flowgraph_steps)
153
153
  end_nodes = runtime.get_exit_nodes()
@@ -112,6 +112,8 @@ class DockerSchedulerNode(SchedulerNode):
112
112
  chip.collect()
113
113
 
114
114
  def run(self):
115
+ self._init_run_logger()
116
+
115
117
  try:
116
118
  client = docker.from_env()
117
119
  client.version()
@@ -124,13 +126,6 @@ class DockerSchedulerNode(SchedulerNode):
124
126
  workdir = self.chip.getworkdir()
125
127
  start_cwd = os.getcwd()
126
128
 
127
- # Remove handlers from logger
128
- for handler in self.logger.handlers.copy():
129
- self.logger.removeHandler(handler)
130
-
131
- # Reinit logger
132
- self.chip._init_logger(step=self.step, index=self.index, in_run=True)
133
-
134
129
  # Change working directory since the run may delete this folder
135
130
  os.makedirs(workdir, exist_ok=True)
136
131
  os.chdir(workdir)
@@ -162,8 +157,7 @@ class DockerSchedulerNode(SchedulerNode):
162
157
  builddir = f'{cwd}/build'
163
158
 
164
159
  local_cfg = os.path.join(start_cwd, 'sc_docker.json')
165
- job = self.chip.get('option', 'jobname')
166
- cfg = f'{builddir}/{self.chip.design}/{job}/{self.step}/{self.index}/sc_docker.json'
160
+ cfg = f'{builddir}/{self.name}/{self.jobname}/{self.step}/{self.index}/sc_docker.json'
167
161
 
168
162
  user = None
169
163
 
@@ -215,7 +209,7 @@ class DockerSchedulerNode(SchedulerNode):
215
209
  volumes=volumes,
216
210
  labels=[
217
211
  "siliconcompiler",
218
- f"sc_node:{self.chip.design}:{self.step}{self.index}"
212
+ f"sc_node:{self.name}:{self.step}:{self.index}"
219
213
  ],
220
214
  user=user,
221
215
  detach=True,
@@ -7,6 +7,7 @@ import tarfile
7
7
  import os.path
8
8
 
9
9
  from siliconcompiler import Chip, Schema
10
+ from siliconcompiler.package import Resolver
10
11
  from siliconcompiler.scheduler.schedulernode import SchedulerNode
11
12
  from siliconcompiler import __version__
12
13
 
@@ -95,19 +96,14 @@ def main():
95
96
  chip.set('record', 'remoteid', args.remoteid)
96
97
 
97
98
  if args.unset_scheduler:
98
- for vals, step, index in chip.schema.get('option', 'scheduler', 'name',
99
- field=None).getvalues():
99
+ for _, step, index in chip.get('option', 'scheduler', 'name',
100
+ field=None).getvalues():
100
101
  chip.unset('option', 'scheduler', 'name', step=step, index=index)
101
102
 
102
- # Init logger to ensure consistent view
103
- chip._init_logger(step=args.step,
104
- index=args.index,
105
- in_run=True)
106
-
107
103
  if args.cachemap:
108
104
  for cachepair in args.cachemap:
109
105
  package, path = cachepair.split(':')
110
- chip.get("package", field="schema")._set_cache(package, path)
106
+ Resolver.set_cache(chip, package, path)
111
107
 
112
108
  # Populate cache
113
109
  for resolver in chip.get('package', field='schema').get_resolvers().values():
@@ -21,7 +21,8 @@ from siliconcompiler.scheduler import send_messages
21
21
  class Scheduler:
22
22
  def __init__(self, chip):
23
23
  self.__chip = chip
24
- self.__logger = self.__chip.logger
24
+ self.__logger = chip.logger
25
+ self.__name = chip.design
25
26
 
26
27
  flow = self.__chip.get("option", "flow")
27
28
  if not flow:
@@ -30,7 +31,7 @@ class Scheduler:
30
31
  if flow not in self.__chip.getkeys("flowgraph"):
31
32
  raise ValueError("flow is not defined")
32
33
 
33
- self.__flow = self.__chip.schema.get("flowgraph", flow, field="schema")
34
+ self.__flow = self.__chip.get("flowgraph", flow, field="schema")
34
35
  from_steps = self.__chip.get('option', 'from')
35
36
  to_steps = self.__chip.get('option', 'to')
36
37
  prune_nodes = self.__chip.get('option', 'prune')
@@ -51,23 +52,13 @@ class Scheduler:
51
52
  to_steps=to_steps,
52
53
  prune_nodes=self.__chip.get('option', 'prune'))
53
54
 
54
- self.__flow_runtime_no_prune = RuntimeFlowgraph(
55
- self.__flow,
56
- from_steps=from_steps,
57
- to_steps=to_steps)
58
-
59
55
  self.__flow_load_runtime = RuntimeFlowgraph(
60
56
  self.__flow,
61
57
  to_steps=from_steps,
62
58
  prune_nodes=prune_nodes)
63
59
 
64
- self.__flow_something = RuntimeFlowgraph(
65
- self.__flow,
66
- from_steps=set([step for step, _ in self.__flow.get_entry_nodes()]),
67
- prune_nodes=prune_nodes)
68
-
69
- self.__record = self.__chip.schema.get("record", field="schema")
70
- self.__metrics = self.__chip.schema.get("metric", field="schema")
60
+ self.__record = self.__chip.get("record", field="schema")
61
+ self.__metrics = self.__chip.get("metric", field="schema")
71
62
 
72
63
  self.__tasks = {}
73
64
 
@@ -103,7 +94,7 @@ class Scheduler:
103
94
  self.__chip.schema.record_history()
104
95
 
105
96
  # Record final manifest
106
- filepath = os.path.join(self.__chip.getworkdir(), f"{self.__chip.design}.pkg.json")
97
+ filepath = os.path.join(self.__chip.getworkdir(), f"{self.__name}.pkg.json")
107
98
  self.__chip.write_manifest(filepath)
108
99
 
109
100
  send_messages.send(self.__chip, 'summary', None, None)
@@ -206,7 +197,7 @@ class Scheduler:
206
197
 
207
198
  manifest = os.path.join(self.__chip.getworkdir(step=step, index=index),
208
199
  'outputs',
209
- f'{self.__chip.design}.pkg.json')
200
+ f'{self.__name}.pkg.json')
210
201
  if os.path.exists(manifest):
211
202
  # ensure we setup these nodes again
212
203
  try:
@@ -217,7 +208,8 @@ class Scheduler:
217
208
  # Setup tools for all nodes to run
218
209
  for layer_nodes in self.__flow.get_execution_order():
219
210
  for step, index in layer_nodes:
220
- node_kept = self.__tasks[(step, index)].setup()
211
+ with self.__tasks[(step, index)].runtime():
212
+ node_kept = self.__tasks[(step, index)].setup()
221
213
  if not node_kept and (step, index) in extra_setup_nodes:
222
214
  # remove from previous node data
223
215
  del extra_setup_nodes[(step, index)]
@@ -242,12 +234,13 @@ class Scheduler:
242
234
  if self.__record.get("status", step=step, index=index) != NodeStatus.SUCCESS:
243
235
  continue
244
236
 
245
- if self.__tasks[(step, index)].requires_run():
246
- # This node must be run
247
- self.__mark_pending(step, index)
248
- elif (step, index) in extra_setup_nodes:
249
- # import old information
250
- Journal.access(extra_setup_nodes[(step, index)]).replay(self.__chip.schema)
237
+ with self.__tasks[(step, index)].runtime():
238
+ if self.__tasks[(step, index)].requires_run():
239
+ # This node must be run
240
+ self.__mark_pending(step, index)
241
+ elif (step, index) in extra_setup_nodes:
242
+ # import old information
243
+ Journal.access(extra_setup_nodes[(step, index)]).replay(self.__chip.schema)
251
244
 
252
245
  self.__print_status("After requires run")
253
246
 
@@ -267,7 +260,8 @@ class Scheduler:
267
260
  # Clean nodes marked pending
268
261
  for step, index in self.__flow_runtime.get_nodes():
269
262
  if NodeStatus.is_waiting(self.__record.get('status', step=step, index=index)):
270
- self.__tasks[(step, index)].clean_directory()
263
+ with self.__tasks[(step, index)].runtime():
264
+ self.__tasks[(step, index)].clean_directory()
271
265
 
272
266
  def __check_display(self):
273
267
  '''