siliconcompiler 0.34.2__py3-none-any.whl → 0.34.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. siliconcompiler/__init__.py +12 -5
  2. siliconcompiler/__main__.py +1 -7
  3. siliconcompiler/_metadata.py +1 -1
  4. siliconcompiler/apps/_common.py +104 -23
  5. siliconcompiler/apps/sc.py +4 -8
  6. siliconcompiler/apps/sc_dashboard.py +6 -4
  7. siliconcompiler/apps/sc_install.py +10 -6
  8. siliconcompiler/apps/sc_issue.py +7 -5
  9. siliconcompiler/apps/sc_remote.py +1 -1
  10. siliconcompiler/apps/sc_server.py +9 -14
  11. siliconcompiler/apps/sc_show.py +6 -5
  12. siliconcompiler/apps/smake.py +130 -94
  13. siliconcompiler/apps/utils/replay.py +4 -7
  14. siliconcompiler/apps/utils/summarize.py +3 -5
  15. siliconcompiler/asic.py +420 -0
  16. siliconcompiler/checklist.py +25 -2
  17. siliconcompiler/cmdlineschema.py +534 -0
  18. siliconcompiler/constraints/asic_component.py +2 -2
  19. siliconcompiler/constraints/asic_pins.py +2 -2
  20. siliconcompiler/constraints/asic_timing.py +3 -3
  21. siliconcompiler/core.py +7 -32
  22. siliconcompiler/data/templates/tcl/manifest.tcl.j2 +8 -0
  23. siliconcompiler/dependencyschema.py +89 -31
  24. siliconcompiler/design.py +176 -207
  25. siliconcompiler/filesetschema.py +250 -0
  26. siliconcompiler/flowgraph.py +274 -95
  27. siliconcompiler/fpga.py +124 -1
  28. siliconcompiler/library.py +218 -20
  29. siliconcompiler/metric.py +233 -20
  30. siliconcompiler/package/__init__.py +271 -50
  31. siliconcompiler/package/git.py +92 -16
  32. siliconcompiler/package/github.py +108 -12
  33. siliconcompiler/package/https.py +79 -16
  34. siliconcompiler/packageschema.py +88 -7
  35. siliconcompiler/pathschema.py +31 -2
  36. siliconcompiler/pdk.py +566 -1
  37. siliconcompiler/project.py +1095 -94
  38. siliconcompiler/record.py +38 -1
  39. siliconcompiler/remote/__init__.py +5 -2
  40. siliconcompiler/remote/client.py +11 -6
  41. siliconcompiler/remote/schema.py +5 -23
  42. siliconcompiler/remote/server.py +41 -54
  43. siliconcompiler/report/__init__.py +3 -3
  44. siliconcompiler/report/dashboard/__init__.py +48 -14
  45. siliconcompiler/report/dashboard/cli/__init__.py +99 -21
  46. siliconcompiler/report/dashboard/cli/board.py +364 -179
  47. siliconcompiler/report/dashboard/web/__init__.py +90 -12
  48. siliconcompiler/report/dashboard/web/components/__init__.py +219 -240
  49. siliconcompiler/report/dashboard/web/components/flowgraph.py +49 -26
  50. siliconcompiler/report/dashboard/web/components/graph.py +139 -100
  51. siliconcompiler/report/dashboard/web/layouts/__init__.py +29 -1
  52. siliconcompiler/report/dashboard/web/layouts/_common.py +38 -2
  53. siliconcompiler/report/dashboard/web/layouts/vertical_flowgraph.py +39 -26
  54. siliconcompiler/report/dashboard/web/layouts/vertical_flowgraph_node_tab.py +50 -50
  55. siliconcompiler/report/dashboard/web/layouts/vertical_flowgraph_sac_tabs.py +49 -46
  56. siliconcompiler/report/dashboard/web/state.py +141 -14
  57. siliconcompiler/report/dashboard/web/utils/__init__.py +79 -16
  58. siliconcompiler/report/dashboard/web/utils/file_utils.py +74 -11
  59. siliconcompiler/report/dashboard/web/viewer.py +25 -1
  60. siliconcompiler/report/report.py +5 -2
  61. siliconcompiler/report/summary_image.py +29 -11
  62. siliconcompiler/scheduler/__init__.py +9 -1
  63. siliconcompiler/scheduler/docker.py +79 -1
  64. siliconcompiler/scheduler/run_node.py +35 -19
  65. siliconcompiler/scheduler/scheduler.py +208 -24
  66. siliconcompiler/scheduler/schedulernode.py +372 -46
  67. siliconcompiler/scheduler/send_messages.py +77 -29
  68. siliconcompiler/scheduler/slurm.py +76 -12
  69. siliconcompiler/scheduler/taskscheduler.py +140 -20
  70. siliconcompiler/schema/__init__.py +0 -2
  71. siliconcompiler/schema/baseschema.py +194 -38
  72. siliconcompiler/schema/journal.py +7 -4
  73. siliconcompiler/schema/namedschema.py +16 -10
  74. siliconcompiler/schema/parameter.py +55 -9
  75. siliconcompiler/schema/parametervalue.py +60 -0
  76. siliconcompiler/schema/safeschema.py +25 -2
  77. siliconcompiler/schema/schema_cfg.py +5 -5
  78. siliconcompiler/schema/utils.py +2 -2
  79. siliconcompiler/schema_obj.py +20 -3
  80. siliconcompiler/tool.py +979 -302
  81. siliconcompiler/tools/bambu/__init__.py +41 -0
  82. siliconcompiler/tools/builtin/concatenate.py +2 -2
  83. siliconcompiler/tools/builtin/minimum.py +2 -1
  84. siliconcompiler/tools/builtin/mux.py +2 -1
  85. siliconcompiler/tools/builtin/nop.py +2 -1
  86. siliconcompiler/tools/builtin/verify.py +2 -1
  87. siliconcompiler/tools/klayout/__init__.py +95 -0
  88. siliconcompiler/tools/openroad/__init__.py +289 -0
  89. siliconcompiler/tools/openroad/scripts/apr/preamble.tcl +3 -0
  90. siliconcompiler/tools/openroad/scripts/apr/sc_detailed_route.tcl +7 -2
  91. siliconcompiler/tools/openroad/scripts/apr/sc_global_route.tcl +8 -4
  92. siliconcompiler/tools/openroad/scripts/apr/sc_init_floorplan.tcl +9 -5
  93. siliconcompiler/tools/openroad/scripts/common/write_images.tcl +5 -1
  94. siliconcompiler/tools/slang/__init__.py +1 -1
  95. siliconcompiler/tools/slang/elaborate.py +2 -1
  96. siliconcompiler/tools/vivado/scripts/sc_run.tcl +1 -1
  97. siliconcompiler/tools/vivado/scripts/sc_syn_fpga.tcl +8 -1
  98. siliconcompiler/tools/vivado/syn_fpga.py +6 -0
  99. siliconcompiler/tools/vivado/vivado.py +35 -2
  100. siliconcompiler/tools/vpr/__init__.py +150 -0
  101. siliconcompiler/tools/yosys/__init__.py +369 -1
  102. siliconcompiler/tools/yosys/scripts/procs.tcl +0 -1
  103. siliconcompiler/toolscripts/_tools.json +5 -10
  104. siliconcompiler/utils/__init__.py +66 -0
  105. siliconcompiler/utils/flowgraph.py +2 -2
  106. siliconcompiler/utils/issue.py +2 -1
  107. siliconcompiler/utils/logging.py +14 -0
  108. siliconcompiler/utils/multiprocessing.py +256 -0
  109. siliconcompiler/utils/showtools.py +10 -0
  110. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/METADATA +5 -5
  111. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/RECORD +115 -118
  112. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/entry_points.txt +3 -0
  113. siliconcompiler/schema/cmdlineschema.py +0 -250
  114. siliconcompiler/toolscripts/rhel8/install-slang.sh +0 -40
  115. siliconcompiler/toolscripts/rhel9/install-slang.sh +0 -40
  116. siliconcompiler/toolscripts/ubuntu20/install-slang.sh +0 -47
  117. siliconcompiler/toolscripts/ubuntu22/install-slang.sh +0 -37
  118. siliconcompiler/toolscripts/ubuntu24/install-slang.sh +0 -37
  119. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/WHEEL +0 -0
  120. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/licenses/LICENSE +0 -0
  121. {siliconcompiler-0.34.2.dist-info → siliconcompiler-0.34.3.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,12 @@
1
+ """
2
+ A module for sending email notifications about SiliconCompiler job events.
3
+
4
+ This module provides functionality to send detailed email updates at various
5
+ stages of a compilation flow (e.g., on begin, failure, or a final summary).
6
+ It loads SMTP server credentials from a configuration file, constructs
7
+ HTML-formatted emails with relevant job data and attachments (logs, images),
8
+ and sends them to specified recipients.
9
+ """
1
10
  from siliconcompiler.utils import default_email_credentials_file, get_file_template
2
11
  import smtplib
3
12
  from email.mime.multipart import MIMEMultipart
@@ -23,6 +32,19 @@ with open(api_dir / 'email_credentials.json') as schema:
23
32
 
24
33
 
25
34
  def __load_config(chip):
35
+ """
36
+ Loads and validates email credentials from the default configuration file.
37
+
38
+ This function locates the email credentials JSON file, loads its content,
39
+ and validates it against a predefined JSON schema.
40
+
41
+ Args:
42
+ chip (Chip): The Chip object, used for logging.
43
+
44
+ Returns:
45
+ dict: A dictionary containing the validated email credentials. Returns
46
+ an empty dictionary if the file is not found or is invalid.
47
+ """
26
48
  path = default_email_credentials_file()
27
49
  if not os.path.exists(path):
28
50
  chip.logger.warning(f'Email credentials are not available: {path}')
@@ -39,6 +61,23 @@ def __load_config(chip):
39
61
 
40
62
 
41
63
  def send(chip, msg_type, step, index):
64
+ """
65
+ Constructs and sends an email notification for a specific job event.
66
+
67
+ This function checks if a notification is required for the given event type
68
+ based on the chip's configuration. If so, it assembles an email with a
69
+ subject, HTML body, and relevant attachments (logs or images) and sends
70
+ it via the configured SMTP server.
71
+
72
+ Args:
73
+ chip (Chip): The Chip object containing all run data and configuration.
74
+ msg_type (str): The type of event triggering the message (e.g., 'begin',
75
+ 'fail', 'summary').
76
+ step (str): The step name associated with the event. Can be None for
77
+ global events.
78
+ index (str): The index associated with the event. Can be None for
79
+ global events.
80
+ """
42
81
  chip_step, chip_index = step, index
43
82
  if step is None:
44
83
  chip_step = Schema.GLOBAL_KEY
@@ -82,6 +121,7 @@ def send(chip, msg_type, step, index):
82
121
 
83
122
  if cred["max_file_size"] > 0:
84
123
  if msg_type == "summary":
124
+ # Handle summary message: attach layout image and metrics summary
85
125
  layout_img = report_utils._find_summary_image(chip)
86
126
  if layout_img and os.path.isfile(layout_img):
87
127
  with open(layout_img, 'rb') as img_file:
@@ -109,6 +149,7 @@ def send(chip, msg_type, step, index):
109
149
  metrics_unit=metrics_unit,
110
150
  metric_keys=metrics_to_show)
111
151
  else:
152
+ # Handle general node message: attach log files and node-specific data
112
153
  # Attach logs
113
154
  for log in (f'sc_{step}_{index}.log', f'{step}.log'):
114
155
  log_file = f'{chip.getworkdir(step=step, index=index)}/{log}'
@@ -125,43 +166,48 @@ def send(chip, msg_type, step, index):
125
166
  filename=f'{log_name}.txt')
126
167
  msg.attach(log_attach)
127
168
 
128
- records = {}
129
- for record in chip.getkeys('record'):
130
- value = None
131
- if chip.get('record', record, field='pernode').is_never():
132
- value = chip.get('record', record)
133
- else:
134
- value = chip.get('record', record, step=step, index=index)
135
-
136
- if value is not None:
137
- records[record] = value
138
-
139
- nodes, errors, metrics, metrics_unit, metrics_to_show, _ = \
140
- report_utils._collect_data(chip, flow=flow, flowgraph_nodes=[(step, index)])
141
-
142
- status = chip.get('record', 'status', step=step, index=index)
143
-
144
- text_msg = get_file_template('email/general.j2').render(
145
- design=chip.design,
146
- job=jobname,
147
- step=step,
148
- index=index,
149
- status=status,
150
- records=records,
151
- nodes=nodes,
152
- errors=errors,
153
- metrics=metrics,
154
- metrics_unit=metrics_unit,
155
- metric_keys=metrics_to_show)
169
+ # Collect records for the specific node
170
+ records = {}
171
+ for record in chip.getkeys('record'):
172
+ value = None
173
+ if chip.get('record', record, field='pernode').is_never():
174
+ value = chip.get('record', record)
175
+ else:
176
+ value = chip.get('record', record, step=step, index=index)
177
+
178
+ if value is not None:
179
+ records[record] = value
180
+
181
+ # Collect metrics for the specific node
182
+ nodes, errors, metrics, metrics_unit, metrics_to_show, _ = \
183
+ report_utils._collect_data(chip, flow=flow, flowgraph_nodes=[(step, index)])
184
+
185
+ status = chip.get('record', 'status', step=step, index=index)
186
+
187
+ # Render the general email template
188
+ text_msg = get_file_template('email/general.j2').render(
189
+ design=chip.design,
190
+ job=jobname,
191
+ step=step,
192
+ index=index,
193
+ status=status,
194
+ records=records,
195
+ nodes=nodes,
196
+ errors=errors,
197
+ metrics=metrics,
198
+ metrics_unit=metrics_unit,
199
+ metric_keys=metrics_to_show)
156
200
 
157
201
  body = MIMEText(text_msg, 'html')
158
202
  msg.attach(body)
159
203
 
204
+ # Determine whether to use SSL for the SMTP connection
160
205
  if cred['ssl']:
161
206
  smtp_use = smtplib.SMTP_SSL
162
207
  else:
163
208
  smtp_use = smtplib.SMTP
164
209
 
210
+ # Connect to the SMTP server and send the email
165
211
  with smtp_use(cred["server"], cred["port"]) as smtp_server:
166
212
  do_send = False
167
213
  try:
@@ -180,10 +226,12 @@ def send(chip, msg_type, step, index):
180
226
 
181
227
 
182
228
  if __name__ == "__main__":
229
+ # Example usage for testing the send function
183
230
  from siliconcompiler import Chip
184
231
  from siliconcompiler.targets import freepdk45_demo
185
232
  chip = Chip('test')
186
233
  chip.use(freepdk45_demo)
187
234
  chip.set('option', 'scheduler', 'msgevent', 'ALL')
188
- # chip.set('option', 'scheduler', 'msgcontact', 'fillin')
235
+ # To test, uncomment the following line and fill in a valid email address
236
+ # chip.set('option', 'scheduler', 'msgcontact', 'your.email@example.com')
189
237
  send(chip, "BEGIN", "import", "0")
@@ -1,21 +1,37 @@
1
+ import json
1
2
  import os
2
3
  import shlex
3
- import subprocess
4
+ import shutil
4
5
  import stat
6
+ import subprocess
5
7
  import uuid
6
- import json
7
- import shutil
8
8
 
9
9
  import os.path
10
10
 
11
11
  from siliconcompiler import utils
12
12
  from siliconcompiler.package import RemoteResolver
13
13
  from siliconcompiler.flowgraph import RuntimeFlowgraph
14
- from siliconcompiler.scheduler.schedulernode import SchedulerNode
14
+ from siliconcompiler.scheduler import SchedulerNode
15
15
 
16
16
 
17
17
  class SlurmSchedulerNode(SchedulerNode):
18
+ """A SchedulerNode implementation for running tasks on a Slurm cluster.
19
+
20
+ This class extends the base SchedulerNode to handle the specifics of
21
+ submitting a compilation step as a job to a Slurm workload manager.
22
+ It prepares a run script, a manifest, and uses the 'srun' command
23
+ to execute the step on a compute node.
24
+ """
25
+
18
26
  def __init__(self, chip, step, index, replay=False):
27
+ """Initializes a SlurmSchedulerNode.
28
+
29
+ Args:
30
+ chip (Chip): The parent Chip object.
31
+ step (str): The step name in the flowgraph.
32
+ index (str): The index for the step.
33
+ replay (bool): If True, sets up the node to replay a previous run.
34
+ """
19
35
  super().__init__(chip, step, index, replay=replay)
20
36
 
21
37
  # Get the temporary UID associated with this job run.
@@ -26,10 +42,22 @@ class SlurmSchedulerNode(SchedulerNode):
26
42
 
27
43
  @property
28
44
  def jobhash(self):
45
+ """str: A unique hash identifying the entire job run."""
29
46
  return self.__job_hash
30
47
 
31
48
  @staticmethod
32
49
  def init(chip):
50
+ """
51
+ A static pre-processing hook for the Slurm scheduler.
52
+
53
+ This method checks if the compilation flow starts from an entry node.
54
+ If so, it calls `chip.collect()` to gather all necessary source files
55
+ into a central location before any remote jobs are submitted. This
56
+ ensures that compute nodes have access to all required source files.
57
+
58
+ Args:
59
+ chip (Chip): The Chip object to perform pre-processing on.
60
+ """
33
61
  if os.path.exists(chip._getcollectdir()):
34
62
  # nothing to do
35
63
  return
@@ -53,26 +81,61 @@ class SlurmSchedulerNode(SchedulerNode):
53
81
 
54
82
  @property
55
83
  def is_local(self):
84
+ """bool: Returns False, as this node executes on a remote cluster."""
56
85
  return False
57
86
 
58
87
  @staticmethod
59
88
  def get_configuration_directory(chip):
60
- '''
61
- Helper function to get the configuration directory for the scheduler
62
- '''
89
+ """Gets the directory for storing Slurm-related configuration files.
90
+
91
+ Args:
92
+ chip (Chip): The Chip object.
93
+
94
+ Returns:
95
+ str: The path to the configuration directory.
96
+ """
63
97
 
64
98
  return os.path.join(chip.getworkdir(), 'sc_configs')
65
99
 
66
100
  @staticmethod
67
101
  def get_job_name(jobhash, step, index):
102
+ """Generates a unique job name for a Slurm job.
103
+
104
+ Args:
105
+ jobhash (str): The unique hash for the entire run.
106
+ step (str): The step name of the node.
107
+ index (str): The index of the node.
108
+
109
+ Returns:
110
+ str: A unique job name string.
111
+ """
68
112
  return f'{jobhash}_{step}_{index}'
69
113
 
70
114
  @staticmethod
71
115
  def get_runtime_file_name(jobhash, step, index, ext):
116
+ """Generates a standardized filename for runtime files.
117
+
118
+ Args:
119
+ jobhash (str): The unique hash for the entire run.
120
+ step (str): The step name of the node.
121
+ index (str): The index of the node.
122
+ ext (str): The file extension.
123
+
124
+ Returns:
125
+ str: A standardized filename.
126
+ """
72
127
  return f"{SlurmSchedulerNode.get_job_name(jobhash, step, index)}.{ext}"
73
128
 
74
129
  @staticmethod
75
130
  def get_slurm_partition():
131
+ """Determines a default Slurm partition by querying the cluster.
132
+
133
+ Returns:
134
+ str: The name of the first available Slurm partition.
135
+
136
+ Raises:
137
+ RuntimeError: If the 'sinfo' command fails.
138
+ """
76
139
  partitions = subprocess.run(['sinfo', '--json'],
77
140
  stdout=subprocess.PIPE,
78
141
  stderr=subprocess.STDOUT)
@@ -86,12 +149,13 @@ class SlurmSchedulerNode(SchedulerNode):
86
149
  return sinfo['nodes'][0]['partitions'][0]
87
150
 
88
151
  def run(self):
89
- '''
90
- Helper method to run an individual step on a slurm cluster.
152
+ """
153
+ Runs the node's task as a job on a Slurm cluster.
91
154
 
92
- Blocks until the compute node
93
- finishes processing this step, and it sets the active/error bits.
94
- '''
155
+ This method prepares all necessary files (manifest, run script),
156
+ constructs an 'srun' command, and submits the job. It then blocks
157
+ until the job completes on the compute node.
158
+ """
95
159
 
96
160
  self._init_run_logger()
97
161
 
@@ -14,10 +14,18 @@ from siliconcompiler.flowgraph import RuntimeFlowgraph
14
14
  from siliconcompiler.package import Resolver
15
15
  from siliconcompiler.schema import Journal
16
16
 
17
- from siliconcompiler.utils.logging import SCBlankLoggerFormatter
17
+ from siliconcompiler.utils.logging import SCBlankLoggerFormatter, SCBlankColorlessLoggerFormatter
18
+ from siliconcompiler.utils.multiprocessing import MPManager
18
19
 
19
20
 
20
21
  class TaskScheduler:
22
+ """A class for managing the execution of individual tasks in a flowgraph.
23
+
24
+ This class is responsible for the fine-grained scheduling of tasks,
25
+ handling multiprocessing, resource allocation (cores/threads), and
26
+ dependency checking. It operates on a set of pending tasks defined by the
27
+ main Scheduler and executes them in a loop until the flow is complete.
28
+ """
21
29
  __callbacks = {
22
30
  "pre_run": lambda chip: None,
23
31
  "pre_node": lambda chip, step, index: None,
@@ -27,11 +35,30 @@ class TaskScheduler:
27
35
 
28
36
  @staticmethod
29
37
  def register_callback(hook, func):
38
+ """Registers a callback function to be executed at a specific hook point.
39
+
40
+ Valid hooks are 'pre_run', 'pre_node', 'post_node', and 'post_run'.
41
+
42
+ Args:
43
+ hook (str): The name of the hook to register the callback for.
44
+ func (function): The function to be called. It should accept the
45
+ chip object and, for node hooks, the step and index as arguments.
46
+
47
+ Raises:
48
+ ValueError: If the specified hook is not valid.
49
+ """
30
50
  if hook not in TaskScheduler.__callbacks:
31
51
  raise ValueError(f"{hook} is not a valid callback")
32
52
  TaskScheduler.__callbacks[hook] = func
33
53
 
34
54
  def __init__(self, chip, tasks):
55
+ """Initializes the TaskScheduler.
56
+
57
+ Args:
58
+ chip (Chip): The Chip object containing the configuration.
59
+ tasks (dict): A dictionary of SchedulerNode objects keyed by
60
+ (step, index) tuples.
61
+ """
35
62
  self.__chip = chip
36
63
  self.__logger = self.__chip.logger
37
64
  self.__logger_console_handler = self.__chip._logger_console
@@ -55,7 +82,7 @@ class TaskScheduler:
55
82
  to_steps=self.__chip.get('option', 'to'),
56
83
  prune_nodes=self.__chip.get('option', 'prune'))
57
84
 
58
- self.__log_queue = multiprocessing.Queue(-1)
85
+ self.__log_queue = MPManager.get_manager().Queue()
59
86
 
60
87
  self.__nodes = {}
61
88
  self.__startTimes = {}
@@ -64,6 +91,16 @@ class TaskScheduler:
64
91
  self.__create_nodes(tasks)
65
92
 
66
93
  def __create_nodes(self, tasks):
94
+ """
95
+ Private helper to prepare all pending tasks for execution.
96
+
97
+ This method iterates through the tasks identified by the main Scheduler,
98
+ creates a multiprocessing.Process for each one, and sets up pipes for
99
+ inter-process communication (primarily for logging and package resolution).
100
+
101
+ Args:
102
+ tasks (dict): A dictionary of SchedulerNode objects.
103
+ """
67
104
  runtime = RuntimeFlowgraph(
68
105
  self.__flow,
69
106
  from_steps=set([step for step, _ in self.__flow.get_entry_nodes()]),
@@ -75,25 +112,24 @@ class TaskScheduler:
75
112
  if self.__record.get('status', step=step, index=index) != NodeStatus.PENDING:
76
113
  continue
77
114
 
78
- with tasks[(step, index)].runtime():
79
- threads = tasks[(step, index)].threads
80
- if not threads:
81
- threads = self.__max_threads
82
- threads = max(1, min(threads, self.__max_threads))
83
-
84
115
  task = {
85
116
  "name": f"{step}/{index}",
86
117
  "inputs": runtime.get_node_inputs(step, index, record=self.__record),
87
118
  "proc": None,
88
119
  "parent_pipe": None,
89
- "threads": threads,
120
+ "threads": None,
90
121
  "running": False,
91
- "manifest": os.path.join(self.__chip.getworkdir(step=step, index=index),
92
- 'outputs',
93
- f'{self.__chip.design}.pkg.json'),
122
+ "manifest": None,
94
123
  "node": tasks[(step, index)]
95
124
  }
96
125
 
126
+ with tasks[(step, index)].runtime():
127
+ threads = tasks[(step, index)].threads
128
+ task["manifest"] = tasks[(step, index)].get_manifest()
129
+ if not threads:
130
+ threads = self.__max_threads
131
+ task["threads"] = max(1, min(threads, self.__max_threads))
132
+
97
133
  task["parent_pipe"], pipe = multiprocessing.Pipe()
98
134
  task["node"].set_queue(pipe, self.__log_queue)
99
135
 
@@ -105,14 +141,29 @@ class TaskScheduler:
105
141
  for init_func in init_funcs:
106
142
  init_func(self.__chip)
107
143
 
108
- def run(self):
144
+ def run(self, job_log_handler):
145
+ """
146
+ The main entry point for the task scheduling loop.
147
+
148
+ This method sets up a listener to handle logs from child processes,
149
+ calls the 'pre_run' callback, enters the main execution loop, and
150
+ handles cleanup and the 'post_run' callback.
151
+
152
+ Args:
153
+ job_log_handler (logging.FileHandler): The handler for the main job log file.
154
+ """
109
155
  # Call this in case this was invoked without __main__
110
156
  multiprocessing.freeze_support()
111
157
 
112
158
  # Handle logs across threads
113
- log_listener = QueueListener(self.__log_queue, self.__logger_console_handler)
159
+ log_listener = QueueListener(self.__log_queue, self.__logger_console_handler,
160
+ job_log_handler)
114
161
  console_format = self.__logger_console_handler.formatter
162
+ file_formatter = job_log_handler.formatter
115
163
  self.__logger_console_handler.setFormatter(SCBlankLoggerFormatter())
164
+ job_log_handler.setFormatter(SCBlankColorlessLoggerFormatter())
165
+ self.__logger.removeHandler(job_log_handler)
166
+
116
167
  log_listener.start()
117
168
 
118
169
  # Update dashboard before run begins
@@ -123,18 +174,30 @@ class TaskScheduler:
123
174
 
124
175
  try:
125
176
  self.__run_loop()
177
+ TaskScheduler.__callbacks["post_run"](self.__chip)
126
178
  except KeyboardInterrupt:
127
179
  # exit immediately
128
180
  log_listener.stop()
129
181
  sys.exit(0)
130
-
131
- TaskScheduler.__callbacks["post_run"](self.__chip)
132
-
133
- # Cleanup logger
134
- log_listener.stop()
135
- self.__logger_console_handler.setFormatter(console_format)
182
+ finally:
183
+ # Cleanup logger
184
+ try:
185
+ log_listener.stop()
186
+ except AttributeError:
187
+ # Logger already stopped
188
+ pass
189
+ self.__logger_console_handler.setFormatter(console_format)
190
+ job_log_handler.setFormatter(file_formatter)
191
+ self.__logger.addHandler(job_log_handler)
136
192
 
137
193
  def __run_loop(self):
194
+ """
195
+ The core execution loop of the scheduler.
196
+
197
+ This loop continues as long as there are nodes running or waiting to
198
+ run. In each iteration, it processes completed nodes and launches new
199
+ ones whose dependencies have been met.
200
+ """
138
201
  self.__startTimes = {None: time.time()}
139
202
 
140
203
  while len(self.get_nodes_waiting_to_run()) > 0 or len(self.get_running_nodes()) > 0:
@@ -164,9 +227,19 @@ class TaskScheduler:
164
227
  self.__nodes[running_nodes[0]]["proc"].join(timeout=self.__dwellTime)
165
228
 
166
229
  def get_nodes(self):
230
+ """Gets a sorted list of all nodes managed by this scheduler.
231
+
232
+ Returns:
233
+ list: A list of (step, index) tuples for all nodes.
234
+ """
167
235
  return sorted(self.__nodes.keys())
168
236
 
169
237
  def get_running_nodes(self):
238
+ """Gets a sorted list of all nodes that are currently running.
239
+
240
+ Returns:
241
+ list: A list of (step, index) tuples for running nodes.
242
+ """
170
243
  nodes = []
171
244
  for node, info in self.__nodes.items():
172
245
  if info["running"]:
@@ -174,6 +247,11 @@ class TaskScheduler:
174
247
  return sorted(nodes)
175
248
 
176
249
  def get_nodes_waiting_to_run(self):
250
+ """Gets a sorted list of all nodes that are pending execution.
251
+
252
+ Returns:
253
+ list: A list of (step, index) tuples for pending nodes.
254
+ """
177
255
  nodes = []
178
256
  for node, info in self.__nodes.items():
179
257
  if not info["running"] and info["proc"]:
@@ -181,6 +259,17 @@ class TaskScheduler:
181
259
  return sorted(nodes)
182
260
 
183
261
  def __process_completed_nodes(self):
262
+ """
263
+ Private helper to check for and process completed nodes.
264
+
265
+ This method iterates through running nodes, checks if their process has
266
+ terminated, and if so, merges their results (manifest and package cache)
267
+ back into the main chip object. It updates the node's status based on
268
+ the process exit code.
269
+
270
+ Returns:
271
+ bool: True if any node's status changed, False otherwise.
272
+ """
184
273
  changed = False
185
274
  for node in self.get_running_nodes():
186
275
  info = self.__nodes[node]
@@ -225,6 +314,18 @@ class TaskScheduler:
225
314
  return changed
226
315
 
227
316
  def __allow_start(self, node):
317
+ """
318
+ Private helper to check if a node is allowed to start based on resources.
319
+
320
+ This method checks if launching a new node would exceed the configured
321
+ maximum number of parallel jobs or the total available CPU cores.
322
+
323
+ Args:
324
+ node (tuple): The (step, index) of the node to check.
325
+
326
+ Returns:
327
+ bool: True if the node can be launched, False otherwise.
328
+ """
228
329
  info = self.__nodes[node]
229
330
 
230
331
  if not info["node"].is_local:
@@ -247,6 +348,16 @@ class TaskScheduler:
247
348
  return True
248
349
 
249
350
  def __lanuch_nodes(self):
351
+ """
352
+ Private helper to launch new nodes whose dependencies are met.
353
+
354
+ This method iterates through pending nodes, checks if all their input
355
+ nodes have completed successfully, and if system resources are available.
356
+ If all conditions are met, it starts the node's process.
357
+
358
+ Returns:
359
+ bool: True if any new node was launched, False otherwise.
360
+ """
250
361
  changed = False
251
362
  for node in self.get_nodes_waiting_to_run():
252
363
  # TODO: breakpoint logic:
@@ -298,6 +409,15 @@ class TaskScheduler:
298
409
  return changed
299
410
 
300
411
  def check(self):
412
+ """
413
+ Checks if the flow completed successfully.
414
+
415
+ This method verifies that all nodes designated as exit points in the
416
+ flowgraph have been successfully completed.
417
+
418
+ Raises:
419
+ RuntimeError: If any final steps in the flow were not reached.
420
+ """
301
421
  exit_steps = set([step for step, _ in self.__runtime_flow.get_exit_nodes()])
302
422
  completed_steps = set([step for step, _ in
303
423
  self.__runtime_flow.get_completed_nodes(record=self.__record)])
@@ -3,7 +3,6 @@ from .journal import Journal
3
3
  from .safeschema import SafeSchema
4
4
  from .editableschema import EditableSchema
5
5
  from .baseschema import BaseSchema
6
- from .cmdlineschema import CommandLineSchema
7
6
  from .namedschema import NamedSchema
8
7
 
9
8
  from .schema_cfg import SCHEMA_VERSION
@@ -13,7 +12,6 @@ __all__ = [
13
12
  "BaseSchema",
14
13
  "SafeSchema",
15
14
  "EditableSchema",
16
- "CommandLineSchema",
17
15
  "NamedSchema",
18
16
  "Parameter",
19
17
  "Scope",