nemo-evaluator-launcher 0.1.19__py3-none-any.whl → 0.1.56__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. nemo_evaluator_launcher/api/functional.py +159 -5
  2. nemo_evaluator_launcher/cli/logs.py +102 -0
  3. nemo_evaluator_launcher/cli/ls_task.py +280 -0
  4. nemo_evaluator_launcher/cli/ls_tasks.py +208 -55
  5. nemo_evaluator_launcher/cli/main.py +29 -2
  6. nemo_evaluator_launcher/cli/run.py +114 -16
  7. nemo_evaluator_launcher/cli/version.py +26 -23
  8. nemo_evaluator_launcher/common/container_metadata/__init__.py +61 -0
  9. nemo_evaluator_launcher/common/container_metadata/intermediate_repr.py +530 -0
  10. nemo_evaluator_launcher/common/container_metadata/loading.py +1126 -0
  11. nemo_evaluator_launcher/common/container_metadata/registries.py +824 -0
  12. nemo_evaluator_launcher/common/container_metadata/utils.py +63 -0
  13. nemo_evaluator_launcher/common/helpers.py +200 -51
  14. nemo_evaluator_launcher/common/logging_utils.py +16 -5
  15. nemo_evaluator_launcher/common/mapping.py +341 -155
  16. nemo_evaluator_launcher/common/printing_utils.py +25 -12
  17. nemo_evaluator_launcher/configs/deployment/sglang.yaml +4 -2
  18. nemo_evaluator_launcher/configs/deployment/trtllm.yaml +2 -3
  19. nemo_evaluator_launcher/configs/deployment/vllm.yaml +0 -1
  20. nemo_evaluator_launcher/configs/execution/slurm/default.yaml +14 -0
  21. nemo_evaluator_launcher/executors/base.py +31 -1
  22. nemo_evaluator_launcher/executors/lepton/deployment_helpers.py +36 -1
  23. nemo_evaluator_launcher/executors/lepton/executor.py +107 -9
  24. nemo_evaluator_launcher/executors/local/executor.py +383 -24
  25. nemo_evaluator_launcher/executors/local/run.template.sh +54 -2
  26. nemo_evaluator_launcher/executors/slurm/executor.py +559 -64
  27. nemo_evaluator_launcher/executors/slurm/proxy.cfg.template +26 -0
  28. nemo_evaluator_launcher/exporters/utils.py +32 -46
  29. nemo_evaluator_launcher/package_info.py +1 -1
  30. nemo_evaluator_launcher/resources/all_tasks_irs.yaml +17016 -0
  31. nemo_evaluator_launcher/resources/mapping.toml +64 -315
  32. {nemo_evaluator_launcher-0.1.19.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/METADATA +4 -3
  33. nemo_evaluator_launcher-0.1.56.dist-info/RECORD +69 -0
  34. {nemo_evaluator_launcher-0.1.19.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/entry_points.txt +1 -0
  35. nemo_evaluator_launcher-0.1.19.dist-info/RECORD +0 -60
  36. {nemo_evaluator_launcher-0.1.19.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/WHEEL +0 -0
  37. {nemo_evaluator_launcher-0.1.19.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/licenses/LICENSE +0 -0
  38. {nemo_evaluator_launcher-0.1.19.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/top_level.txt +0 -0
@@ -18,8 +18,9 @@
18
18
  This module provides the main functional entry points for running evaluations, querying job status, and listing available tasks. These functions are intended to be used by CLI commands and external integrations.
19
19
  """
20
20
 
21
+ import copy
21
22
  from pathlib import Path
22
- from typing import Any, List, Optional, Union
23
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
23
24
 
24
25
  import yaml
25
26
  from omegaconf import DictConfig, OmegaConf
@@ -35,7 +36,7 @@ def get_tasks_list() -> list[list[Any]]:
35
36
  """Get a list of available tasks from the mapping.
36
37
 
37
38
  Returns:
38
- list[list[Any]]: Each sublist contains task name, endpoint type, harness, and container.
39
+ list[list[Any]]: Each sublist contains task name, endpoint type, harness, container, arch, description, and type.
39
40
  """
40
41
  mapping = load_tasks_mapping()
41
42
  data = [
@@ -44,6 +45,9 @@ def get_tasks_list() -> list[list[Any]]:
44
45
  task_data.get("endpoint_type"),
45
46
  task_data.get("harness"),
46
47
  task_data.get("container"),
48
+ task_data.get("arch", ""),
49
+ task_data.get("description", ""),
50
+ task_data.get("type", ""),
47
51
  ]
48
52
  for task_data in mapping.values()
49
53
  ]
@@ -75,12 +79,54 @@ def _validate_no_missing_values(cfg: Any, path: str = "") -> None:
75
79
  _validate_no_missing_values(value, current_path)
76
80
 
77
81
 
78
- def run_eval(cfg: RunConfig, dry_run: bool = False) -> Optional[str]:
82
+ def filter_tasks(cfg: RunConfig, task_names: list[str]) -> RunConfig:
83
+ """Filter evaluation tasks to only include specified task names.
84
+
85
+ Args:
86
+ cfg: The configuration object for the evaluation run.
87
+ task_names: List of task names to include (e.g., ["ifeval", "gsm8k"]).
88
+
89
+ Returns:
90
+ RunConfig: A new configuration with filtered tasks (input is not mutated).
91
+
92
+ Raises:
93
+ ValueError: If any requested task is not found in config or no tasks defined.
94
+ """
95
+ if not task_names:
96
+ return cfg
97
+
98
+ if not hasattr(cfg.evaluation, "tasks") or not cfg.evaluation.tasks:
99
+ raise ValueError("No tasks defined in config. Cannot filter tasks.")
100
+
101
+ requested_tasks = set(task_names)
102
+ original_tasks = cfg.evaluation.tasks
103
+ filtered_tasks = [task for task in original_tasks if task.name in requested_tasks]
104
+
105
+ # Fail if ANY requested tasks are not found
106
+ found_names = {task.name for task in filtered_tasks}
107
+ not_found = requested_tasks - found_names
108
+ if not_found:
109
+ available = [task.name for task in original_tasks]
110
+ raise ValueError(
111
+ f"Requested task(s) not found in config: {sorted(not_found)}. "
112
+ f"Available tasks: {available}"
113
+ )
114
+
115
+ # Create a deep copy to preserve input immutability
116
+ result = copy.deepcopy(cfg)
117
+ result.evaluation.tasks = filtered_tasks
118
+ return result
119
+
120
+
121
+ def run_eval(
122
+ cfg: RunConfig, dry_run: bool = False, tasks: Optional[list[str]] = None
123
+ ) -> Optional[str]:
79
124
  """Run evaluation with specified config and overrides.
80
125
 
81
126
  Args:
82
127
  cfg: The configuration object for the evaluation run.
83
128
  dry_run: If True, do not run the evaluation, just prepare scripts and save them.
129
+ tasks: Optional list of task names to run. If provided, only these tasks will be executed.
84
130
 
85
131
  Returns:
86
132
  Optional[str]: The invocation ID for the evaluation run.
@@ -89,6 +135,10 @@ def run_eval(cfg: RunConfig, dry_run: bool = False) -> Optional[str]:
89
135
  ValueError: If configuration validation fails or MISSING values are found.
90
136
  RuntimeError: If the executor fails to start the evaluation.
91
137
  """
138
+ # Filter tasks if specified
139
+ if tasks:
140
+ cfg = filter_tasks(cfg, tasks)
141
+
92
142
  # Validate that no MISSING values exist in the configuration
93
143
  _validate_no_missing_values(cfg)
94
144
 
@@ -116,6 +166,7 @@ def get_status(ids_or_prefixes: list[str]) -> list[dict[str, Any]]:
116
166
  db = ExecutionDB()
117
167
  results: List[dict[str, Any]] = []
118
168
 
169
+ # TODO(agronskiy): refactor the `.`-checking job in all the functions.
119
170
  for id_or_prefix in ids_or_prefixes:
120
171
  # If id looks like an invocation_id (no dot), get all jobs for it
121
172
  if "." not in id_or_prefix:
@@ -259,6 +310,108 @@ def get_status(ids_or_prefixes: list[str]) -> list[dict[str, Any]]:
259
310
  return results
260
311
 
261
312
 
313
+ def stream_logs(
314
+ ids_or_prefixes: Union[str, list[str]],
315
+ ) -> Iterator[Tuple[str, str, str]]:
316
+ """Stream logs from jobs or invocations by their IDs or invocation IDs.
317
+
318
+ Args:
319
+ ids_or_prefixes: Single ID/prefix or list of job IDs or invocation IDs to stream logs from.
320
+ Short prefixes are allowed, we would try to match the full ones from
321
+ prefixes if no collisions are present.
322
+
323
+ Yields:
324
+ Tuple[str, str, str]: Tuples of (job_id, task_name, log_line) for each log line.
325
+ Empty lines are yielded as empty strings.
326
+
327
+ Raises:
328
+ ValueError: If the executor doesn't support log streaming.
329
+ """
330
+ db = ExecutionDB()
331
+
332
+ # Normalize to list for consistent processing
333
+ if isinstance(ids_or_prefixes, str):
334
+ ids_or_prefixes = [ids_or_prefixes]
335
+
336
+ # Collect all jobs from all IDs, grouped by executor
337
+ executor_to_jobs: Dict[str, Dict[str, JobData]] = {}
338
+ executor_to_invocations: Dict[str, list[str]] = {}
339
+
340
+ # TODO(agronskiy): refactor the `.`-checking job in all the functions.
341
+ for id_or_prefix in ids_or_prefixes:
342
+ # Determine if this is a job ID or invocation ID
343
+ if "." in id_or_prefix:
344
+ # This is a job ID
345
+ job_data = db.get_job(id_or_prefix)
346
+ if job_data is None:
347
+ continue
348
+
349
+ executor = job_data.executor
350
+ if executor not in executor_to_jobs:
351
+ executor_to_jobs[executor] = {}
352
+ executor_to_jobs[executor][id_or_prefix] = job_data
353
+ else:
354
+ # This is an invocation ID
355
+ jobs = db.get_jobs(id_or_prefix)
356
+ if not jobs:
357
+ continue
358
+
359
+ # Get the executor class from the first job
360
+ first_job_data = next(iter(jobs.values()))
361
+ executor = first_job_data.executor
362
+ if executor not in executor_to_invocations:
363
+ executor_to_invocations[executor] = []
364
+ executor_to_invocations[executor].append(id_or_prefix)
365
+
366
+ # Stream logs from each executor simultaneously
367
+ # For each executor, collect all job IDs and stream them together
368
+ for executor, jobs_dict in executor_to_jobs.items():
369
+ try:
370
+ executor_cls = get_executor(executor)
371
+ except ValueError:
372
+ continue
373
+
374
+ # For local executor with multiple jobs, pass list to stream simultaneously
375
+ # For other executors or single jobs, pass individual job IDs
376
+ if executor == "local" and len(jobs_dict) > 1:
377
+ # Pass all job IDs as a list to stream simultaneously
378
+ try:
379
+ yield from executor_cls.stream_logs(
380
+ list(jobs_dict.keys()), executor_name=executor
381
+ )
382
+ except NotImplementedError:
383
+ raise ValueError(
384
+ f"Log streaming is not yet implemented for executor '{executor}'"
385
+ )
386
+ else:
387
+ # Single job or non-local executor
388
+ for job_id in jobs_dict.keys():
389
+ try:
390
+ yield from executor_cls.stream_logs(job_id, executor_name=executor)
391
+ except NotImplementedError:
392
+ raise ValueError(
393
+ f"Log streaming is not yet implemented for executor '{executor}'"
394
+ )
395
+
396
+ # Stream logs from invocation IDs
397
+ for executor, invocation_ids in executor_to_invocations.items():
398
+ try:
399
+ executor_cls = get_executor(executor)
400
+ except ValueError:
401
+ continue
402
+
403
+ # Stream each invocation (each invocation already handles multiple jobs internally)
404
+ for invocation_id in invocation_ids:
405
+ try:
406
+ yield from executor_cls.stream_logs(
407
+ invocation_id, executor_name=executor
408
+ )
409
+ except NotImplementedError:
410
+ raise ValueError(
411
+ f"Log streaming is not yet implemented for executor '{executor}'"
412
+ )
413
+
414
+
262
415
  def list_all_invocations_summary() -> list[dict[str, Any]]:
263
416
  """Return a concise per-invocation summary from the exec DB.
264
417
 
@@ -378,6 +531,7 @@ def kill_job_or_invocation(id: str) -> list[dict[str, Any]]:
378
531
  "data": {"error": f"Unexpected error: {str(e)}"},
379
532
  }
380
533
 
534
+ # TODO(agronskiy): refactor the `.`-checking job in all the functions.
381
535
  # Determine if this is a job ID or invocation ID
382
536
  if "." in id:
383
537
  # This is a job ID - kill single job
@@ -413,7 +567,7 @@ def kill_job_or_invocation(id: str) -> list[dict[str, Any]]:
413
567
 
414
568
 
415
569
  def export_results(
416
- invocation_ids: Union[str, List[str]],
570
+ invocation_ids: Union[str, list[str]],
417
571
  dest: str = "local",
418
572
  config: dict[Any, Any] | None = None,
419
573
  ) -> dict:
@@ -442,7 +596,7 @@ def export_results(
442
596
  if "." in single_id: # job_id
443
597
  # Try reading config from artifacts working dir (auto-export on remote node)
444
598
  cfg_file = None
445
- for name in ("run_config.yml", "config.yml"):
599
+ for name in ("config.yml", "run_config.yml"):
446
600
  p = Path(name)
447
601
  if p.exists():
448
602
  cfg_file = p
@@ -0,0 +1,102 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ """Logs command for streaming logs from evaluation jobs."""
17
+
18
+ import sys
19
+ from dataclasses import dataclass
20
+ from typing import Callable, Dict
21
+
22
+ from simple_parsing import field
23
+
24
+ import nemo_evaluator_launcher.common.printing_utils as pu
25
+ from nemo_evaluator_launcher.api.functional import stream_logs
26
+ from nemo_evaluator_launcher.common.execdb import ExecutionDB
27
+ from nemo_evaluator_launcher.common.logging_utils import logger
28
+
29
+
30
+ @dataclass
31
+ class Cmd:
32
+ """Logs command configuration."""
33
+
34
+ ids: list[str] = field(
35
+ default_factory=list,
36
+ positional=True,
37
+ help="Invocation IDs or job IDs (e.g., '15b9f667' or '15b9f667.0'). Multiple IDs can be provided.",
38
+ )
39
+
40
+ def execute(self) -> None:
41
+ """Execute the logs command to stream logs from jobs."""
42
+ if not self.ids:
43
+ logger.error("At least one ID is required")
44
+ sys.exit(1)
45
+
46
+ db = ExecutionDB()
47
+
48
+ # Validate all IDs exist
49
+ all_job_ids = []
50
+ for id_or_prefix in self.ids:
51
+ if "." in id_or_prefix:
52
+ # This is a job ID - get single job
53
+ job_data = db.get_job(id_or_prefix)
54
+ if job_data is None:
55
+ logger.error(f"Job {id_or_prefix} not found")
56
+ sys.exit(1)
57
+ all_job_ids.append(id_or_prefix)
58
+ else:
59
+ # This is an invocation ID - get all jobs
60
+ jobs = db.get_jobs(id_or_prefix)
61
+ if not jobs:
62
+ logger.error(f"Invocation {id_or_prefix} not found")
63
+ sys.exit(1)
64
+ all_job_ids.extend(jobs.keys())
65
+
66
+ # Build color mapping for job IDs
67
+ colors = [pu.red, pu.green, pu.yellow, pu.magenta, pu.cyan]
68
+ job_colors: Dict[str, Callable[[str], str]] = {}
69
+ color_index = 0
70
+
71
+ for job_id in all_job_ids:
72
+ job_colors[job_id] = colors[color_index % len(colors)]
73
+ color_index += 1
74
+
75
+ # Stream logs from executor
76
+ try:
77
+ log_stream = stream_logs(self.ids)
78
+ for job_id, task_name, log_line in log_stream:
79
+ # Extract short prefix: first 6 chars of invocation ID + job number
80
+ if "." in job_id:
81
+ inv_id, job_num = job_id.split(".", 1)
82
+ short_prefix = f"{inv_id[:6]}.{job_num}"
83
+ else:
84
+ short_prefix = job_id[:6]
85
+ prefix = f"{short_prefix}:"
86
+ color_func = job_colors.get(job_id, pu.grey)
87
+ if log_line:
88
+ print(f"{color_func(prefix)} {log_line}")
89
+ else:
90
+ # Print empty lines without prefix
91
+ print()
92
+
93
+ except ValueError:
94
+ # Handle case where executor doesn't support streaming
95
+ # Warning already logged by BaseExecutor.stream_logs
96
+ pass
97
+ except KeyboardInterrupt:
98
+ # Clean exit on Ctrl+C
99
+ pass
100
+ except Exception as e:
101
+ logger.error(f"Error streaming logs: {e}")
102
+ sys.exit(1)
@@ -0,0 +1,280 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ """CLI command for listing task details."""
17
+
18
+ import json
19
+ from dataclasses import dataclass
20
+
21
+ import yaml
22
+ from simple_parsing import field
23
+
24
+ from nemo_evaluator_launcher.common.container_metadata import (
25
+ TaskIntermediateRepresentation,
26
+ load_tasks_from_tasks_file,
27
+ )
28
+ from nemo_evaluator_launcher.common.logging_utils import logger
29
+ from nemo_evaluator_launcher.common.mapping import load_tasks_mapping
30
+ from nemo_evaluator_launcher.common.printing_utils import (
31
+ bold,
32
+ cyan,
33
+ magenta,
34
+ yellow,
35
+ )
36
+
37
+
38
+ @dataclass
39
+ class Cmd:
40
+ """List task command configuration."""
41
+
42
+ task_identifier: str = field(
43
+ default="",
44
+ positional=True,
45
+ help="Task identifier in format '[harness.]task_name'. If empty, shows all tasks.",
46
+ )
47
+ json: bool = field(
48
+ default=False,
49
+ action="store_true",
50
+ help="Print output as JSON instead of formatted text",
51
+ )
52
+ tasks_file: str = field(
53
+ default="",
54
+ help="Path to all_tasks_irs.yaml file (default: auto-detect)",
55
+ )
56
+ from_container: str = field(
57
+ default="",
58
+ help="Load tasks from container image (e.g., nvcr.io/nvidia/eval-factory/simple-evals:25.10). "
59
+ "If provided, extracts framework.yml from container and loads tasks on-the-fly instead of using all_tasks_irs.yaml",
60
+ )
61
+
62
+ def execute(self) -> None:
63
+ """Execute the ls task command."""
64
+ import pathlib
65
+
66
+ # Initialize tasks_path to None - it will be set when loading from file
67
+ tasks_path = None
68
+
69
+ # If --from is provided, load tasks from container
70
+ if self.from_container:
71
+ from nemo_evaluator_launcher.common.container_metadata import (
72
+ load_tasks_from_container,
73
+ )
74
+
75
+ try:
76
+ tasks = load_tasks_from_container(self.from_container)
77
+ except ValueError as e:
78
+ print(f"Error: {e}")
79
+ return
80
+ except Exception as e:
81
+ logger.error(
82
+ "Failed to load tasks from container",
83
+ container=self.from_container,
84
+ error=str(e),
85
+ exc_info=True,
86
+ )
87
+ return
88
+
89
+ if not tasks:
90
+ logger.error(
91
+ "No tasks found in container",
92
+ container=self.from_container,
93
+ )
94
+ return
95
+
96
+ logger.debug(
97
+ "Loaded tasks from container",
98
+ container=self.from_container,
99
+ num_tasks=len(tasks),
100
+ containers=set(task.container for task in tasks),
101
+ )
102
+ mapping_verified = True # Tasks from container are always verified
103
+ else:
104
+ # Default behavior: load from all_tasks_irs.yaml
105
+ if self.tasks_file:
106
+ tasks_path = pathlib.Path(self.tasks_file)
107
+ if not tasks_path.exists():
108
+ logger.error("Tasks file not found", path=str(tasks_path))
109
+ return
110
+
111
+ # Load tasks
112
+ try:
113
+ tasks, mapping_verified = load_tasks_from_tasks_file(tasks_path)
114
+ except Exception as e:
115
+ print(f"Error loading tasks: {e}")
116
+ import traceback
117
+
118
+ traceback.print_exc()
119
+ logger.error("Failed to load tasks", error=str(e), exc_info=True)
120
+ return
121
+
122
+ # Display warning if mapping is not verified
123
+ if not mapping_verified:
124
+ print(
125
+ yellow(
126
+ "⚠ Warning: Tasks are from unverified mapping (mapping.toml checksum mismatch)"
127
+ )
128
+ )
129
+ print(
130
+ yellow(
131
+ " Consider regenerating all_tasks_irs.yaml if mapping.toml has changed"
132
+ )
133
+ )
134
+ print()
135
+
136
+ # Override containers from mapping.toml (which has the latest containers)
137
+ # This ensures ls task shows the same containers as ls tasks
138
+ # Only do this when NOT using --from (when loading from all_tasks_irs.yaml)
139
+ try:
140
+ mapping = load_tasks_mapping()
141
+ # Create a lookup: (normalized_harness, normalized_task_name) -> container
142
+ # Use case-insensitive keys for matching
143
+ container_lookup = {}
144
+ for (harness, task_name), task_data in mapping.items():
145
+ container = task_data.get("container")
146
+ if container:
147
+ # Normalize harness name for lookup (frameworks.yaml uses hyphens)
148
+ normalized_harness = harness.replace("_", "-").lower()
149
+ normalized_task = task_name.lower()
150
+ container_lookup[(normalized_harness, normalized_task)] = (
151
+ container
152
+ )
153
+
154
+ # Update task containers from mapping.toml
155
+ for task in tasks:
156
+ # Defensive checks: ensure task has required attributes
157
+ if not hasattr(task, "harness") or not task.harness:
158
+ logger.warning(
159
+ "Task missing harness attribute, skipping container override",
160
+ task_name=getattr(task, "name", "unknown"),
161
+ )
162
+ continue
163
+ if not hasattr(task, "name") or not task.name:
164
+ logger.warning(
165
+ "Task missing name attribute, skipping container override",
166
+ harness=getattr(task, "harness", "unknown"),
167
+ )
168
+ continue
169
+
170
+ # Normalize both harness and task name for case-insensitive lookup
171
+ normalized_harness = task.harness.lower()
172
+ normalized_task = task.name.lower()
173
+ lookup_key = (normalized_harness, normalized_task)
174
+ if lookup_key in container_lookup:
175
+ task.container = container_lookup[lookup_key]
176
+ except Exception as e:
177
+ logger.debug(
178
+ "Failed to override containers from mapping.toml",
179
+ error=str(e),
180
+ )
181
+ # Continue with containers from all_tasks_irs.yaml if mapping load fails
182
+
183
+ if not tasks:
184
+ print("No tasks found.")
185
+ if tasks_path:
186
+ print(f" Tasks file: {tasks_path}")
187
+ else:
188
+ print(
189
+ " Note: Make sure all_tasks_irs.yaml exists and contains valid task definitions."
190
+ )
191
+ return
192
+
193
+ # Parse task identifier
194
+ harness_filter = None
195
+ task_filter = None
196
+ if self.task_identifier:
197
+ if "." in self.task_identifier:
198
+ parts = self.task_identifier.split(".", 1)
199
+ harness_filter = parts[0]
200
+ task_filter = parts[1]
201
+ else:
202
+ task_filter = self.task_identifier
203
+
204
+ # Filter tasks
205
+ filtered_tasks = []
206
+ for task in tasks:
207
+ if harness_filter and task.harness.lower() != harness_filter.lower():
208
+ continue
209
+ if task_filter and task.name.lower() != task_filter.lower():
210
+ continue
211
+ filtered_tasks.append(task)
212
+
213
+ if not filtered_tasks:
214
+ print(f"No tasks found matching: {self.task_identifier}")
215
+ if self.task_identifier:
216
+ # Show available tasks for debugging
217
+ print("\nAvailable tasks (showing first 10):")
218
+ for i, task in enumerate(tasks[:10]):
219
+ print(f" - {task.harness}.{task.name}")
220
+ if len(tasks) > 10:
221
+ print(f" ... and {len(tasks) - 10} more")
222
+ return
223
+
224
+ # Display tasks
225
+ if self.json:
226
+ self._print_json(filtered_tasks)
227
+ else:
228
+ self._print_formatted(filtered_tasks, mapping_verified)
229
+
230
+ def _print_json(self, tasks: list[TaskIntermediateRepresentation]) -> None:
231
+ """Print tasks as JSON."""
232
+ tasks_dict = [task.to_dict() for task in tasks]
233
+ print(json.dumps({"tasks": tasks_dict}, indent=2))
234
+
235
+ def _print_formatted(
236
+ self, tasks: list[TaskIntermediateRepresentation], mapping_verified: bool = True
237
+ ) -> None:
238
+ """Print tasks in formatted text with colorized output."""
239
+ for i, task in enumerate(tasks):
240
+ if i > 0:
241
+ print() # Spacing between tasks
242
+ print(bold("=" * 80))
243
+
244
+ # Task name - bold and magenta key, cyan value (matching logging utils)
245
+ print(f"{bold(magenta('Task:'))} {bold(cyan(str(task.name)))}")
246
+
247
+ # Description - magenta key, cyan value
248
+ if task.description:
249
+ print(f"{magenta('Description:')} {cyan(str(task.description))}")
250
+
251
+ # Harness - magenta key, cyan value
252
+ print(f"{magenta('Harness:')} {cyan(str(task.harness))}")
253
+
254
+ # Container - magenta key, cyan value
255
+ print(f"{magenta('Container:')} {cyan(str(task.container))}")
256
+
257
+ # Container Digest - magenta key, cyan value
258
+ if task.container_digest:
259
+ print(
260
+ f"{magenta('Container Digest:')} {cyan(str(task.container_digest))}"
261
+ )
262
+
263
+ # Print defaults as YAML
264
+ if task.defaults:
265
+ print(f"\n{bold(magenta('Defaults:'))}")
266
+ defaults_yaml = yaml.dump(
267
+ task.defaults, default_flow_style=False, sort_keys=False
268
+ )
269
+ # Indent defaults - use cyan for YAML content (FDF values)
270
+ for line in defaults_yaml.split("\n"):
271
+ if line.strip():
272
+ print(f" {cyan(line)}")
273
+ else:
274
+ print()
275
+
276
+ print(bold("-" * 80))
277
+
278
+ # Total count - bold
279
+ task_word = "task" if len(tasks) == 1 else "tasks"
280
+ print(f"\n{bold(f'Total: {len(tasks)} {task_word}')}")