siliconcompiler 0.35.1__py3-none-any.whl → 0.35.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. siliconcompiler/_metadata.py +1 -1
  2. siliconcompiler/apps/sc_install.py +1 -1
  3. siliconcompiler/apps/sc_issue.py +8 -16
  4. siliconcompiler/apps/smake.py +106 -100
  5. siliconcompiler/checklist.py +349 -91
  6. siliconcompiler/design.py +8 -1
  7. siliconcompiler/flowgraph.py +419 -130
  8. siliconcompiler/flows/showflow.py +1 -2
  9. siliconcompiler/library.py +6 -5
  10. siliconcompiler/package/https.py +10 -5
  11. siliconcompiler/project.py +87 -37
  12. siliconcompiler/remote/client.py +17 -6
  13. siliconcompiler/scheduler/scheduler.py +284 -59
  14. siliconcompiler/scheduler/schedulernode.py +154 -102
  15. siliconcompiler/schema/__init__.py +3 -2
  16. siliconcompiler/schema/_metadata.py +1 -1
  17. siliconcompiler/schema/baseschema.py +210 -93
  18. siliconcompiler/schema/namedschema.py +21 -13
  19. siliconcompiler/schema/parameter.py +8 -1
  20. siliconcompiler/schema/safeschema.py +18 -7
  21. siliconcompiler/schema_support/dependencyschema.py +23 -3
  22. siliconcompiler/schema_support/filesetschema.py +10 -4
  23. siliconcompiler/schema_support/option.py +37 -34
  24. siliconcompiler/schema_support/pathschema.py +7 -2
  25. siliconcompiler/schema_support/record.py +5 -4
  26. siliconcompiler/targets/asap7_demo.py +4 -1
  27. siliconcompiler/tool.py +100 -8
  28. siliconcompiler/tools/__init__.py +10 -7
  29. siliconcompiler/tools/bambu/convert.py +19 -0
  30. siliconcompiler/tools/builtin/__init__.py +3 -2
  31. siliconcompiler/tools/builtin/filter.py +108 -0
  32. siliconcompiler/tools/builtin/importfiles.py +154 -0
  33. siliconcompiler/tools/execute/exec_input.py +4 -3
  34. siliconcompiler/tools/gtkwave/show.py +6 -2
  35. siliconcompiler/tools/icarus/compile.py +1 -0
  36. siliconcompiler/tools/klayout/scripts/klayout_show.py +1 -1
  37. siliconcompiler/tools/klayout/show.py +17 -5
  38. siliconcompiler/tools/openroad/screenshot.py +0 -1
  39. siliconcompiler/tools/openroad/scripts/common/screenshot.tcl +1 -1
  40. siliconcompiler/tools/openroad/scripts/common/write_images.tcl +2 -0
  41. siliconcompiler/tools/openroad/show.py +10 -0
  42. siliconcompiler/tools/surfer/show.py +7 -2
  43. siliconcompiler/tools/verilator/compile.py +2 -2
  44. siliconcompiler/tools/yosys/prepareLib.py +7 -2
  45. siliconcompiler/tools/yosys/syn_asic.py +20 -2
  46. siliconcompiler/toolscripts/_tools.json +5 -5
  47. siliconcompiler/toolscripts/rhel9/{install-yosys-wildebeest.sh → install-wildebeest.sh} +5 -5
  48. siliconcompiler/toolscripts/ubuntu22/{install-yosys-wildebeest.sh → install-wildebeest.sh} +5 -5
  49. siliconcompiler/toolscripts/ubuntu24/{install-yosys-wildebeest.sh → install-wildebeest.sh} +5 -5
  50. siliconcompiler/utils/__init__.py +1 -2
  51. siliconcompiler/utils/issue.py +38 -45
  52. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/METADATA +4 -4
  53. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/RECORD +57 -55
  54. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/WHEEL +0 -0
  55. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/entry_points.txt +0 -0
  56. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/licenses/LICENSE +0 -0
  57. {siliconcompiler-0.35.1.dist-info → siliconcompiler-0.35.3.dist-info}/top_level.txt +0 -0
@@ -1,14 +1,18 @@
1
1
  import io
2
2
  import logging
3
+ import multiprocessing
3
4
  import os
4
5
  import re
5
6
  import shutil
6
7
  import sys
8
+ import tempfile
7
9
  import traceback
8
10
 
9
11
  import os.path
10
12
 
11
- from typing import Union, TYPE_CHECKING
13
+ from datetime import datetime
14
+
15
+ from typing import Union, Dict, Optional, Tuple, List, TYPE_CHECKING
12
16
 
13
17
  from siliconcompiler import NodeStatus
14
18
  from siliconcompiler.schema import Journal
@@ -17,7 +21,8 @@ from siliconcompiler.scheduler import SchedulerNode
17
21
  from siliconcompiler.scheduler import SlurmSchedulerNode
18
22
  from siliconcompiler.scheduler import DockerSchedulerNode
19
23
  from siliconcompiler.scheduler import TaskScheduler
20
- from siliconcompiler.scheduler.schedulernode import SchedulerFlowReset
24
+ from siliconcompiler.scheduler.schedulernode import SchedulerFlowReset, SchedulerNodeReset
25
+ from siliconcompiler.tool import TaskExecutableNotFound, TaskExecutableNotReceived
21
26
 
22
27
  from siliconcompiler import utils
23
28
  from siliconcompiler.utils.logging import SCLoggerFormatter
@@ -56,7 +61,7 @@ class Scheduler:
56
61
  SCRuntimeError: If the specified flow is not defined or fails validation.
57
62
  """
58
63
  self.__project = project
59
- self.__logger: logging.Logger = project.logger
64
+ self.__logger: logging.Logger = project.logger.getChild("scheduler")
60
65
  self.__name = project.name
61
66
 
62
67
  flow = self.__project.get("option", "flow")
@@ -95,13 +100,20 @@ class Scheduler:
95
100
  self.__record: "RecordSchema" = self.__project.get("record", field="schema")
96
101
  self.__metrics: "MetricSchema" = self.__project.get("metric", field="schema")
97
102
 
98
- self.__tasks = {}
103
+ self.__tasks: Dict[Tuple[str, str], SchedulerNode] = {}
99
104
 
100
105
  # Create dummy handler
101
106
  self.__joblog_handler = logging.NullHandler()
102
107
  self.__org_job_name = self.__project.get("option", "jobname")
103
108
  self.__logfile = None
104
109
 
110
+ @property
111
+ def manifest(self) -> str:
112
+ """
113
+ Returns the path to the job manifest
114
+ """
115
+ return os.path.join(jobdir(self.__project), f"{self.__name}.pkg.json")
116
+
105
117
  @property
106
118
  def log(self) -> Union[None, str]:
107
119
  """
@@ -129,7 +141,7 @@ class Scheduler:
129
141
  Args:
130
142
  header (str): A header message to print before the status list.
131
143
  """
132
- self.__logger.debug(f"#### {header}")
144
+ self.__logger.debug(f"#### {header} : {datetime.now().strftime('%H:%M:%S')}")
133
145
  for step, index in self.__flow.get_nodes():
134
146
  self.__logger.debug(f"({step}, {index}) -> "
135
147
  f"{self.__record.get('status', step=step, index=index)}")
@@ -261,6 +273,10 @@ class Scheduler:
261
273
  self.__run_setup()
262
274
  self.configure_nodes()
263
275
 
276
+ # Verify tool setups
277
+ if not self.__check_tool_versions():
278
+ raise SCRuntimeError("Tools did not meet version requirements")
279
+
264
280
  # Verify tool setups
265
281
  if not self.__check_tool_requirements():
266
282
  raise SCRuntimeError("Tools requirements not met")
@@ -278,8 +294,7 @@ class Scheduler:
278
294
  self.__project._record_history()
279
295
 
280
296
  # Record final manifest
281
- filepath = os.path.join(jobdir(self.__project), f"{self.__name}.pkg.json")
282
- self.__project.write_manifest(filepath)
297
+ self.__project.write_manifest(self.manifest)
283
298
 
284
299
  send_messages.send(self.__project, 'summary', None, None)
285
300
  finally:
@@ -366,6 +381,8 @@ class Scheduler:
366
381
  nodes = self.__flow_runtime.get_nodes()
367
382
  error = False
368
383
 
384
+ manifest_name = os.path.basename(self.manifest)
385
+
369
386
  for (step, index) in nodes:
370
387
  # Get files we receive from input nodes.
371
388
  in_nodes = self.__flow_runtime.get_node_inputs(step, index, record=self.__record)
@@ -390,9 +407,7 @@ class Scheduler:
390
407
  inputs = []
391
408
  continue
392
409
 
393
- design = self.__project.get("option", 'design')
394
- manifest = f'{design}.pkg.json'
395
- inputs = [inp for inp in os.listdir(in_step_out_dir) if inp != manifest]
410
+ inputs = [inp for inp in os.listdir(in_step_out_dir) if inp != manifest_name]
396
411
  else:
397
412
  in_tool = self.__flow.get(in_step, in_index, "tool")
398
413
  in_task = self.__flow.get(in_step, in_index, "task")
@@ -534,6 +549,8 @@ class Scheduler:
534
549
  for delfile in os.listdir(cur_job_dir):
535
550
  if delfile == "job.log" and recheck:
536
551
  continue
552
+ if delfile.startswith("sc_") and recheck:
553
+ continue
537
554
  if os.path.isfile(os.path.join(cur_job_dir, delfile)):
538
555
  os.remove(os.path.join(cur_job_dir, delfile))
539
556
  else:
@@ -548,19 +565,17 @@ class Scheduler:
548
565
  node's runtime context and invokes its clean_directory method to perform
549
566
  node-specific cleanup.
550
567
  """
551
- protected_dirs = {os.path.basename(collectiondir(self.__project))}
552
-
553
568
  keep_steps = set([step for step, _ in self.__flow.get_nodes()])
554
569
  cur_job_dir = jobdir(self.__project)
555
570
  for step in os.listdir(cur_job_dir):
556
- if step in protected_dirs:
571
+ if step.startswith("sc_"):
557
572
  continue
558
573
  if not os.path.isdir(os.path.join(cur_job_dir, step)):
559
574
  continue
560
575
  if step not in keep_steps:
561
576
  shutil.rmtree(os.path.join(cur_job_dir, step))
562
577
  for step in os.listdir(cur_job_dir):
563
- if step in protected_dirs:
578
+ if step.startswith("sc_"):
564
579
  continue
565
580
  if not os.path.isdir(os.path.join(cur_job_dir, step)):
566
581
  continue
@@ -576,32 +591,23 @@ class Scheduler:
576
591
  with self.__tasks[(step, index)].runtime():
577
592
  self.__tasks[(step, index)].clean_directory()
578
593
 
579
- def configure_nodes(self) -> None:
580
- """
581
- Prepare and configure all flow nodes before execution, including loading prior run state,
582
- running per-node setup, and marking nodes that require rerun.
594
+ def __configure_collect_previous_information(self) -> Dict[Tuple[str, str], "Project"]:
595
+ """Collects information from previous runs for nodes that won't be re-executed.
583
596
 
584
- This method:
585
- - Loads available node manifests from previous jobs and uses them to populate setup data
586
- where appropriate.
587
- - Runs each node's setup routine to initialize tools and runtime state.
588
- - For nodes whose parameters or inputs have changed, marks them and all downstream nodes
589
- as pending so they will be re-executed.
590
- - Replays preserved journaled results for nodes that remain valid to reuse previous outputs.
591
- - On a SchedulerFlowReset, forces a full build-directory recheck and marks every node
592
- as pending.
593
- - Persists the resulting manifest for the current job before returning.
597
+ This method identifies nodes that are marked for loading (not cleaning) and
598
+ are not part of the current 'from' execution path. For each of these
599
+ nodes, it attempts to load its manifest from a previous run.
600
+
601
+ Returns:
602
+ Dict[Tuple[str, str], "Project"]: A dictionary mapping (step, index)
603
+ tuples to their corresponding loaded Project objects from
604
+ previous runs.
594
605
  """
595
606
  from siliconcompiler import Project
607
+ self.__print_status("Start - collect")
596
608
 
597
- from_nodes = []
598
609
  extra_setup_nodes = {}
599
-
600
- journal = Journal.access(self.__project)
601
- journal.start()
602
-
603
- self.__print_status("Start")
604
-
610
+ from_nodes = []
605
611
  if self.__project.get('option', 'clean'):
606
612
  if self.__project.get("option", "from"):
607
613
  from_nodes = self.__flow_runtime.get_entry_nodes()
@@ -620,16 +626,37 @@ class Scheduler:
620
626
  # Node will be run so no need to load
621
627
  continue
622
628
 
623
- manifest = os.path.join(workdir(self.__project, step=step, index=index),
624
- 'outputs',
625
- f'{self.__name}.pkg.json')
629
+ manifest = self.__tasks[(step, index)].get_manifest()
626
630
  if os.path.exists(manifest):
627
631
  # ensure we setup these nodes again
628
632
  try:
629
633
  extra_setup_nodes[(step, index)] = Project.from_manifest(filepath=manifest)
630
- except Exception:
634
+ except Exception as e:
635
+ self.__logger.debug(f"Reading {manifest} caused: {e}")
631
636
  pass
632
637
 
638
+ self.__print_status("End - collect")
639
+
640
+ return extra_setup_nodes
641
+
642
+ def __configure_run_setup(self, extra_setup_nodes: Dict[Tuple[str, str], "Project"]) -> None:
643
+ """Runs the setup() method for all flow nodes and forwards previous status.
644
+
645
+ This method iterates through all nodes in execution order and calls
646
+ their respective `setup()` methods.
647
+
648
+ It also uses the `extra_setup_nodes` to:
649
+ 1. Prune nodes from `extra_setup_nodes` if their `setup()` method
650
+ returns False (indicating the node is no longer valid).
651
+ 2. Forward the 'status' from a valid, previously-run node (found in
652
+ `extra_setup_nodes`) into the current job's records.
653
+
654
+ Args:
655
+ extra_setup_nodes (Dict[Tuple[str, str], "Project"]): A dictionary
656
+ of loaded Project objects from previous runs. This dictionary
657
+ may be modified in-place (nodes may be removed).
658
+ """
659
+ self.__print_status("Start - setup")
633
660
  # Setup tools for all nodes to run
634
661
  for layer_nodes in self.__flow.get_execution_order():
635
662
  for step, index in layer_nodes:
@@ -649,28 +676,155 @@ class Scheduler:
649
676
  if node_status:
650
677
  # Forward old status
651
678
  self.__record.set('status', node_status, step=step, index=index)
679
+ self.__print_status("End - setup")
680
+
681
+ @staticmethod
682
+ def _configure_run_required(task: SchedulerNode) \
683
+ -> Optional[Union[SchedulerFlowReset, SchedulerNodeReset]]:
684
+ """
685
+ Helper method to run requires_run() with threads.
686
+ """
687
+ with task.runtime():
688
+ try:
689
+ task.requires_run()
690
+ except (SchedulerFlowReset, SchedulerNodeReset) as e:
691
+ return e
692
+ return None
693
+
694
+ def __configure_check_run_required(self) -> List[Tuple[str, str]]:
695
+ """Checks which nodes require a re-run and which can be replayed.
696
+
697
+ This method iterates through all nodes that are currently marked as
698
+ 'SUCCESS' (typically from a previous run). It calls `requires_run()`
699
+ on each to determine if inputs, parameters, or other dependencies
700
+ have changed.
701
+
702
+ - If `requires_run()` is True, the node is marked as 'pending' (and
703
+ will be re-executed).
704
+ - If `requires_run()` is False, the node is added to the 'replay' list,
705
+ indicating its previous results can be reused.
706
+
707
+ Returns:
708
+ List[Tuple[str, str]]: A list of (step, index) tuples for nodes
709
+ that do *not* require a re-run and whose results can be
710
+ replayed from the journal.
711
+ """
712
+ self.__print_status("Start - check")
713
+
714
+ replay: List[Tuple[str, str]] = []
715
+
716
+ nodes: List[Tuple[str, str]] = []
717
+
718
+ def filter_nodes(nodes: List[Tuple[str, str]]) -> None:
719
+ for step, index in tuple(nodes):
720
+ # Only look at successful nodes
721
+ if self.__record.get("status", step=step, index=index) != NodeStatus.SUCCESS:
722
+ nodes.remove((step, index))
723
+
724
+ def create_node_group(nodes: List[Tuple[str, str]], size: int) -> List[Tuple[str, str]]:
725
+ group = []
726
+ for _ in range(size):
727
+ if nodes:
728
+ group.append(nodes.pop(0))
729
+ return group
652
730
 
653
- self.__print_status("After setup")
731
+ # Collect initial list of nodes to process
732
+ for layer_nodes in self.__flow.get_execution_order():
733
+ nodes.extend(layer_nodes)
734
+
735
+ # Determine pool size
736
+ cores = utils.get_cores()
737
+ pool_size = self.project.option.scheduler.get_maxthreads() or cores
738
+ pool_size = max(1, min(cores, pool_size))
739
+
740
+ # Limit based on number of nodes if less than number of cores
741
+ filter_nodes(nodes)
742
+ if not nodes:
743
+ # No nodes left so just return
744
+ return []
745
+
746
+ pool_size = min(pool_size, len(nodes))
747
+
748
+ self.__logger.debug(f"Check pool size: {pool_size}")
749
+
750
+ # Call this in case this was invoked without __main__
751
+ multiprocessing.freeze_support()
752
+
753
+ with multiprocessing.get_context("spawn").Pool(pool_size) as pool:
754
+ while True:
755
+ # Filter nodes
756
+ filter_nodes(nodes)
757
+
758
+ # Generate a group of nodes to run
759
+ group = create_node_group(nodes, pool_size)
760
+ self.__logger.debug(f"Group to check: {group}")
761
+ if not group:
762
+ # Group is empty
763
+ break
764
+
765
+ tasks = [self.__tasks[(step, index)] for step, index in group]
766
+ # Suppress excess info messages during checks
767
+ cur_level = self.project.logger.level
768
+ self.project.logger.setLevel(logging.WARNING)
769
+ try:
770
+ runcheck = pool.map(Scheduler._configure_run_required, tasks)
771
+ finally:
772
+ self.project.logger.setLevel(cur_level)
773
+
774
+ for node, runrequired in zip(group, runcheck):
775
+ if self.__record.get("status", step=node[0], index=node[1]) != \
776
+ NodeStatus.SUCCESS:
777
+ continue
778
+
779
+ self.__logger.debug(f" Result: {node} -> {runrequired}")
780
+
781
+ if runrequired is not None:
782
+ runrequired.log(self.__logger)
783
+
784
+ if isinstance(runrequired, SchedulerFlowReset):
785
+ raise runrequired from None
786
+
787
+ # This node must be run
788
+ self.__mark_pending(*node)
789
+ else:
790
+ # import old information
791
+ replay.append(node)
792
+
793
+ self.__print_status("End - check")
794
+
795
+ return replay
796
+
797
+ def configure_nodes(self) -> None:
798
+ """
799
+ Prepare and configure all flow nodes before execution, including loading prior run state,
800
+ running per-node setup, and marking nodes that require rerun.
801
+
802
+ This method:
803
+ - Loads available node manifests from previous jobs and uses them to populate setup data
804
+ where appropriate.
805
+ - Runs each node's setup routine to initialize tools and runtime state.
806
+ - For nodes whose parameters or inputs have changed, marks them and all downstream nodes
807
+ as pending so they will be re-executed.
808
+ - Replays preserved journaled results for nodes that remain valid to reuse previous outputs.
809
+ - On a SchedulerFlowReset, forces a full build-directory recheck and marks every node
810
+ as pending.
811
+ - Persists the resulting manifest for the current job before returning.
812
+ """
813
+ journal = Journal.access(self.__project)
814
+ journal.start()
815
+
816
+ extra_setup_nodes = self.__configure_collect_previous_information()
817
+
818
+ self.__configure_run_setup(extra_setup_nodes)
654
819
 
655
820
  # Check for modified information
656
821
  try:
657
- replay = []
658
- for layer_nodes in self.__flow.get_execution_order():
659
- for step, index in layer_nodes:
660
- # Only look at successful nodes
661
- if self.__record.get("status", step=step, index=index) != NodeStatus.SUCCESS:
662
- continue
822
+ replay = self.__configure_check_run_required()
663
823
 
664
- with self.__tasks[(step, index)].runtime():
665
- if self.__tasks[(step, index)].requires_run():
666
- # This node must be run
667
- self.__mark_pending(step, index)
668
- elif (step, index) in extra_setup_nodes:
669
- # import old information
670
- replay.append((step, index))
671
824
  # Replay previous information
672
825
  for step, index in replay:
673
- Journal.access(extra_setup_nodes[(step, index)]).replay(self.__project)
826
+ if (step, index) in extra_setup_nodes:
827
+ Journal.access(extra_setup_nodes[(step, index)]).replay(self.__project)
674
828
  except SchedulerFlowReset:
675
829
  # Mark all nodes as pending
676
830
  self.__clean_build_dir_full(recheck=True)
@@ -678,7 +832,7 @@ class Scheduler:
678
832
  for step, index in self.__flow.get_nodes():
679
833
  self.__mark_pending(step, index)
680
834
 
681
- self.__print_status("After requires run")
835
+ self.__print_status("Before ensure")
682
836
 
683
837
  # Ensure all nodes are marked as pending if needed
684
838
  for layer_nodes in self.__flow_runtime.get_execution_order():
@@ -687,11 +841,12 @@ class Scheduler:
687
841
  if NodeStatus.is_waiting(status) or NodeStatus.is_error(status):
688
842
  self.__mark_pending(step, index)
689
843
 
690
- self.__print_status("After ensure")
844
+ self.__print_status("FINAL")
845
+
846
+ # Write configured manifest
847
+ os.makedirs(os.path.dirname(self.manifest), exist_ok=True)
848
+ self.__project.write_manifest(self.manifest)
691
849
 
692
- os.makedirs(jobdir(self.__project), exist_ok=True)
693
- self.__project.write_manifest(os.path.join(jobdir(self.__project),
694
- f"{self.__name}.pkg.json"))
695
850
  journal.stop()
696
851
 
697
852
  def __check_display(self) -> None:
@@ -740,3 +895,73 @@ class Scheduler:
740
895
  self.__project.set('option', 'jobname', f'{stem}{jobid + 1}')
741
896
  return True
742
897
  return False
898
+
899
+ def __check_tool_versions(self) -> bool:
900
+ """
901
+ Validates tool executables and versions for all local nodes.
902
+
903
+ This method iterates through all nodes defined in the flow runtime.
904
+ It performs checks only for nodes scheduled to run locally. Nodes
905
+ configured to run on a remote scheduler (e.g., LSF, Slurm) are
906
+ skipped. The entire check is also skipped if the project is
907
+ configured for remote execution.
908
+
909
+ For each local node, it:
910
+ 1. Runs from within a temporary directory to avoid conflicts.
911
+ 2. Enters the node's specific runtime context (e.g., sets env vars).
912
+ 3. Tries to resolve the executable path. Logs an error if not found.
913
+ 4. Calls `node.check_version()` to validate the tool version.
914
+ 5. Caches version results to avoid re-checking the same executable
915
+ for different nodes.
916
+
917
+ It logs an error for any node that fails validation (missing executable
918
+ or failed version check) and returns an overall status.
919
+
920
+ Returns:
921
+ bool: True if all local nodes pass validation or if checks
922
+ are skipped. False if any local node fails.
923
+ """
924
+ if self.__project.option.get_remote():
925
+ return True
926
+
927
+ error = False
928
+
929
+ cwd = os.getcwd()
930
+ with tempfile.TemporaryDirectory(prefix="sc_tool_check") as d:
931
+ try:
932
+ versions: Dict[str, Optional[str]] = {}
933
+
934
+ self.__logger.debug(f"Executing tool checks in: {d}")
935
+ os.chdir(d)
936
+ for (step, index) in self.__flow_runtime.get_nodes():
937
+ if self.__project.option.scheduler.get_name(step=step, index=index) is not None:
938
+ continue
939
+
940
+ node = SchedulerNode(self.__project, step, index)
941
+ with node.runtime():
942
+ try:
943
+ exe = node.get_exe_path()
944
+ except TaskExecutableNotReceived:
945
+ continue
946
+ except TaskExecutableNotFound:
947
+ exe = node.task.get("exe")
948
+ self.__logger.error(f"Executable for {step}/{index} could not "
949
+ f"be found: {exe}")
950
+ error = True
951
+ continue
952
+
953
+ try:
954
+ if exe:
955
+ version: Optional[str] = versions.get(exe, None)
956
+ version, check = node.check_version(version)
957
+ versions[exe] = version
958
+ if not check:
959
+ self.__logger.error(f"Executable for {step}/{index} did not "
960
+ "meet version checks")
961
+ error = True
962
+ except NotImplementedError:
963
+ self.__logger.error(f"Unable to process version for {step}/{index}")
964
+ finally:
965
+ os.chdir(cwd)
966
+
967
+ return not error