siliconcompiler 0.33.1__py3-none-any.whl → 0.34.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. siliconcompiler/__init__.py +2 -0
  2. siliconcompiler/_metadata.py +1 -1
  3. siliconcompiler/apps/sc_issue.py +5 -3
  4. siliconcompiler/apps/sc_remote.py +0 -17
  5. siliconcompiler/apps/utils/replay.py +5 -5
  6. siliconcompiler/checklist.py +1 -1
  7. siliconcompiler/core.py +39 -48
  8. siliconcompiler/data/templates/replay/replay.sh.j2 +18 -1
  9. siliconcompiler/dependencyschema.py +392 -0
  10. siliconcompiler/design.py +664 -0
  11. siliconcompiler/flowgraph.py +32 -1
  12. siliconcompiler/metric.py +19 -0
  13. siliconcompiler/package/__init__.py +383 -223
  14. siliconcompiler/package/git.py +75 -77
  15. siliconcompiler/package/github.py +70 -97
  16. siliconcompiler/package/https.py +77 -93
  17. siliconcompiler/packageschema.py +260 -0
  18. siliconcompiler/pdk.py +2 -2
  19. siliconcompiler/record.py +57 -5
  20. siliconcompiler/remote/client.py +61 -13
  21. siliconcompiler/remote/server.py +109 -64
  22. siliconcompiler/report/dashboard/cli/board.py +1 -2
  23. siliconcompiler/scheduler/__init__.py +3 -1375
  24. siliconcompiler/scheduler/docker.py +268 -0
  25. siliconcompiler/scheduler/run_node.py +20 -19
  26. siliconcompiler/scheduler/scheduler.py +308 -0
  27. siliconcompiler/scheduler/schedulernode.py +934 -0
  28. siliconcompiler/scheduler/slurm.py +147 -163
  29. siliconcompiler/scheduler/taskscheduler.py +39 -52
  30. siliconcompiler/schema/__init__.py +3 -3
  31. siliconcompiler/schema/baseschema.py +256 -11
  32. siliconcompiler/schema/editableschema.py +4 -0
  33. siliconcompiler/schema/journal.py +210 -0
  34. siliconcompiler/schema/namedschema.py +31 -2
  35. siliconcompiler/schema/parameter.py +14 -1
  36. siliconcompiler/schema/parametervalue.py +1 -34
  37. siliconcompiler/schema/schema_cfg.py +211 -350
  38. siliconcompiler/tool.py +139 -37
  39. siliconcompiler/tools/_common/__init__.py +14 -11
  40. siliconcompiler/tools/builtin/concatenate.py +2 -2
  41. siliconcompiler/tools/builtin/verify.py +1 -2
  42. siliconcompiler/tools/openroad/scripts/common/procs.tcl +27 -25
  43. siliconcompiler/tools/slang/__init__.py +3 -2
  44. siliconcompiler/tools/vpr/route.py +69 -0
  45. siliconcompiler/tools/yosys/sc_synth_asic.tcl +0 -4
  46. siliconcompiler/toolscripts/_tools.json +13 -8
  47. siliconcompiler/toolscripts/ubuntu22/install-klayout.sh +4 -0
  48. siliconcompiler/toolscripts/ubuntu24/install-klayout.sh +4 -0
  49. siliconcompiler/utils/__init__.py +2 -23
  50. siliconcompiler/utils/flowgraph.py +5 -5
  51. siliconcompiler/utils/logging.py +2 -1
  52. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/METADATA +8 -6
  53. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/RECORD +57 -52
  54. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/WHEEL +1 -1
  55. siliconcompiler/scheduler/docker_runner.py +0 -254
  56. siliconcompiler/schema/journalingschema.py +0 -238
  57. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/entry_points.txt +0 -0
  58. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/licenses/LICENSE +0 -0
  59. {siliconcompiler-0.33.1.dist-info → siliconcompiler-0.34.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
2
2
 
3
3
  from aiohttp import web
4
+ import copy
4
5
  import threading
5
6
  import json
6
7
  import logging as log
@@ -12,7 +13,8 @@ import sys
12
13
  import fastjsonschema
13
14
  from pathlib import Path
14
15
  from fastjsonschema import JsonSchemaException
15
- import io
16
+
17
+ import os.path
16
18
 
17
19
  from siliconcompiler import Chip, Schema
18
20
  from siliconcompiler.schema import utils as schema_utils
@@ -20,8 +22,10 @@ from siliconcompiler._metadata import version as sc_version
20
22
  from siliconcompiler.schema import SCHEMA_VERSION as sc_schema_version
21
23
  from siliconcompiler.remote.schema import ServerSchema
22
24
  from siliconcompiler.remote import banner, JobStatus
23
- from siliconcompiler.scheduler.slurm import get_configuration_directory
25
+ from siliconcompiler import NodeStatus as SCNodeStatus
26
+ from siliconcompiler.remote import NodeStatus
24
27
  from siliconcompiler.flowgraph import RuntimeFlowgraph
28
+ from siliconcompiler.scheduler.taskscheduler import TaskScheduler
25
29
 
26
30
 
27
31
  # Compile validation code for API request bodies.
@@ -81,7 +85,61 @@ class Server:
81
85
  self.schema = ServerSchema()
82
86
 
83
87
  # Set up a dictionary to track running jobs.
88
+ self.sc_jobs_lock = threading.Lock()
84
89
  self.sc_jobs = {}
90
+ self.sc_chip_lookup = {}
91
+
92
+ # Register callbacks
93
+ TaskScheduler.register_callback("pre_run", self.__run_start)
94
+ TaskScheduler.register_callback("pre_node", self.__node_start)
95
+ TaskScheduler.register_callback("post_node", self.__node_end)
96
+
97
+ def __run_start(self, chip):
98
+ flow = chip.get("option", "flow")
99
+ nodes = chip.schema.get("flowgraph", flow, field="schema").get_nodes()
100
+
101
+ with self.sc_jobs_lock:
102
+ job_hash = self.sc_chip_lookup[chip]["jobhash"]
103
+
104
+ start_tar = os.path.join(self.nfs_mount, job_hash, f'{job_hash}_None.tar.gz')
105
+ start_status = NodeStatus.SUCCESS
106
+ with tarfile.open(start_tar, "w:gz") as tf:
107
+ start_manifest = os.path.join(chip.getworkdir(), f"{chip.design}.pkg.json")
108
+ tf.add(start_manifest, arcname=os.path.relpath(start_manifest, self.nfs_mount))
109
+
110
+ with self.sc_jobs_lock:
111
+ job_name = self.sc_chip_lookup[chip]["name"]
112
+
113
+ self.sc_jobs[job_name][None]["status"] = start_status
114
+
115
+ for step, index in nodes:
116
+ name = f"{step}{index}"
117
+ if name not in self.sc_jobs[job_name]:
118
+ continue
119
+ self.sc_jobs[job_name][name]["status"] = \
120
+ chip.get('record', 'status', step=step, index=index)
121
+
122
+ def __node_start(self, chip, step, index):
123
+ with self.sc_jobs_lock:
124
+ job_name = self.sc_chip_lookup[chip]["name"]
125
+ self.sc_jobs[job_name][f"{step}{index}"]["status"] = NodeStatus.RUNNING
126
+
127
+ def __node_end(self, chip, step, index):
128
+ with self.sc_jobs_lock:
129
+ job_hash = self.sc_chip_lookup[chip]["jobhash"]
130
+ job_name = self.sc_chip_lookup[chip]["name"]
131
+
132
+ chip = copy.deepcopy(chip)
133
+ chip.cwd = os.path.join(chip.get('option', 'builddir'), '..')
134
+ with tarfile.open(os.path.join(self.nfs_mount,
135
+ job_hash,
136
+ f'{job_hash}_{step}{index}.tar.gz'),
137
+ mode='w:gz') as tf:
138
+ chip._archive_node(tf, step=step, index=index, include="*")
139
+
140
+ with self.sc_jobs_lock:
141
+ self.sc_jobs[job_name][f"{step}{index}"]["status"] = \
142
+ chip.get('record', 'status', step=step, index=index)
85
143
 
86
144
  def run(self):
87
145
  if not os.path.exists(self.nfs_mount):
@@ -226,9 +284,6 @@ class Server:
226
284
  # Remove 'remote' JSON config value to run locally on compute node.
227
285
  chip.set('option', 'remote', False)
228
286
 
229
- # Write JSON config to shared compute storage.
230
- os.makedirs(os.path.join(job_root, 'configs'), exist_ok=True)
231
-
232
287
  # Run the job with the configured clustering option. (Non-blocking)
233
288
  job_proc = threading.Thread(target=self.remote_sc,
234
289
  args=[
@@ -258,31 +313,13 @@ class Server:
258
313
  job_hash = job_params['job_hash']
259
314
  node = job_params['node'] if 'node' in job_params else None
260
315
 
261
- resp = web.StreamResponse(
262
- status=200,
263
- reason='OK',
264
- headers={
265
- 'Content-Type': 'application/x-tar',
266
- 'Content-Disposition': f'attachment; filename="{job_hash}_{node}.tar.gz"'
267
- },
268
- )
269
- await resp.prepare(request)
270
-
271
316
  zipfn = os.path.join(self.nfs_mount, job_hash, f'{job_hash}_{node}.tar.gz')
272
- if not node:
273
- with tarfile.open(zipfn, 'w:gz') as tar:
274
- text = "Done"
275
- metadata_file = io.BytesIO(text.encode('ascii'))
276
- tarinfo = tarfile.TarInfo(f'{job_hash}/done')
277
- tarinfo.size = metadata_file.getbuffer().nbytes
278
- tar.addfile(tarinfo=tarinfo, fileobj=metadata_file)
317
+ if not os.path.exists(zipfn):
318
+ return web.json_response(
319
+ {'message': 'Could not find results for the requested job/node.'},
320
+ status=404)
279
321
 
280
- with open(zipfn, 'rb') as zipf:
281
- await resp.write(zipf.read())
282
-
283
- await resp.write_eof()
284
-
285
- return resp
322
+ return web.FileResponse(zipfn)
286
323
 
287
324
  ####################
288
325
  async def handle_delete_job(self, request):
@@ -300,9 +337,10 @@ class Server:
300
337
  job_hash = job_params['job_hash']
301
338
 
302
339
  # Determine if the job is running.
303
- for job in self.sc_jobs:
304
- if job_hash in job:
305
- return self.__response("Error: job is still running.", status=400)
340
+ with self.sc_jobs_lock:
341
+ for job in self.sc_jobs:
342
+ if job_hash in job:
343
+ return self.__response("Error: job is still running.", status=400)
306
344
 
307
345
  # Delete job hash directory, only if it exists.
308
346
  # TODO: This assumes no malicious input.
@@ -342,16 +380,17 @@ class Server:
342
380
 
343
381
  # Determine if the job is running.
344
382
  # TODO: Return information about individual flowgraph nodes.
345
- if jobname in self.sc_jobs:
346
- resp = {
347
- 'status': JobStatus.RUNNING,
348
- 'message': 'Job is currently running on the server.',
349
- }
350
- else:
351
- resp = {
352
- 'status': JobStatus.COMPLETED,
353
- 'message': 'Job has no running steps.',
354
- }
383
+ with self.sc_jobs_lock:
384
+ if jobname in self.sc_jobs:
385
+ resp = {
386
+ 'status': JobStatus.RUNNING,
387
+ 'message': self.sc_jobs[jobname],
388
+ }
389
+ else:
390
+ resp = {
391
+ 'status': JobStatus.COMPLETED,
392
+ 'message': 'Job has no running steps.',
393
+ }
355
394
  return web.json_response(resp)
356
395
 
357
396
  ####################
@@ -402,18 +441,39 @@ class Server:
402
441
  # Assemble core job parameters.
403
442
  job_hash = chip.get('record', 'remoteid')
404
443
 
444
+ runtime = RuntimeFlowgraph(
445
+ chip.schema.get("flowgraph", chip.get('option', 'flow'), field='schema'),
446
+ from_steps=chip.get('option', 'from'),
447
+ to_steps=chip.get('option', 'to'),
448
+ prune_nodes=chip.get('option', 'prune'))
449
+
450
+ nodes = {}
451
+ nodes[None] = {
452
+ "status": SCNodeStatus.PENDING
453
+ }
454
+ for step, index in runtime.get_nodes():
455
+ status = chip.get('record', 'status', step=step, index=index)
456
+ if not status:
457
+ status = SCNodeStatus.PENDING
458
+ if SCNodeStatus.is_done(status):
459
+ status = NodeStatus.UPLOADED
460
+ nodes[f"{step}{index}"] = {
461
+ "status": status
462
+ }
463
+
405
464
  # Mark the job run as busy.
406
465
  sc_job_name = self.job_name(username, job_hash)
407
- self.sc_jobs[sc_job_name] = 'busy'
466
+ with self.sc_jobs_lock:
467
+ self.sc_chip_lookup[chip] = {
468
+ "name": sc_job_name,
469
+ "jobhash": job_hash
470
+ }
471
+ self.sc_jobs[sc_job_name] = nodes
408
472
 
409
473
  build_dir = os.path.join(self.nfs_mount, job_hash)
410
474
  chip.set('option', 'builddir', build_dir)
411
475
  chip.set('option', 'remote', False)
412
476
 
413
- job_cfg_dir = get_configuration_directory(chip)
414
- os.makedirs(job_cfg_dir, exist_ok=True)
415
- chip.write_manifest(f"{job_cfg_dir}/chip{chip.get('option', 'jobname')}.json")
416
-
417
477
  if self.get('option', 'cluster') == 'slurm':
418
478
  # Run the job with slurm clustering.
419
479
  chip.set('option', 'scheduler', 'name', 'slurm')
@@ -421,25 +481,10 @@ class Server:
421
481
  # Run the job.
422
482
  chip.run()
423
483
 
424
- # Archive each task.
425
- runtime = RuntimeFlowgraph(
426
- chip.schema.get("flowgraph", chip.get('option', 'flow'), field='schema'),
427
- from_steps=chip.get('option', 'from'),
428
- to_steps=chip.get('option', 'to'),
429
- prune_nodes=chip.get('option', 'prune'))
430
- for (step, index) in runtime.get_nodes():
431
- chip.cwd = os.path.join(chip.get('option', 'builddir'), '..')
432
- tf = tarfile.open(os.path.join(self.nfs_mount,
433
- job_hash,
434
- f'{job_hash}_{step}{index}.tar.gz'),
435
- mode='w:gz')
436
- chip._archive_node(tf, step=step, index=index)
437
- tf.close()
438
-
439
- # (Email notifications can be sent here using your preferred API)
440
-
441
484
  # Mark the job hash as being done.
442
- self.sc_jobs.pop(sc_job_name)
485
+ with self.sc_jobs_lock:
486
+ self.sc_jobs.pop(sc_job_name)
487
+ self.sc_chip_lookup.pop(chip)
443
488
 
444
489
  ####################
445
490
  def __auth_password(self, username, password):
@@ -414,7 +414,7 @@ class Board(metaclass=BoardSingleton):
414
414
  table.show_footer = False
415
415
  table.show_header = False
416
416
  for line in self._log_handler.get_lines(layout.log_height):
417
- table.add_row(f"[bright_black]{line}[/]")
417
+ table.add_row(f"[white]{line}[/]")
418
418
  while table.row_count < layout.log_height:
419
419
  table.add_row("")
420
420
 
@@ -751,7 +751,6 @@ class Board(metaclass=BoardSingleton):
751
751
 
752
752
  runtime_flow = RuntimeFlowgraph(
753
753
  chip.schema.get("flowgraph", flow, field='schema'),
754
- args=(chip.get('arg', 'step'), chip.get('arg', 'index')),
755
754
  to_steps=chip.get('option', 'to'),
756
755
  prune_nodes=chip.get('option', 'prune'))
757
756
  record = chip.schema.get("record", field='schema')