nemo-evaluator-launcher 0.1.41__py3-none-any.whl → 0.1.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nemo_evaluator_launcher/api/functional.py +55 -5
- nemo_evaluator_launcher/cli/ls_task.py +280 -0
- nemo_evaluator_launcher/cli/ls_tasks.py +208 -55
- nemo_evaluator_launcher/cli/main.py +17 -2
- nemo_evaluator_launcher/cli/run.py +41 -1
- nemo_evaluator_launcher/common/container_metadata/__init__.py +61 -0
- nemo_evaluator_launcher/common/container_metadata/intermediate_repr.py +530 -0
- nemo_evaluator_launcher/common/container_metadata/loading.py +1126 -0
- nemo_evaluator_launcher/common/container_metadata/registries.py +824 -0
- nemo_evaluator_launcher/common/container_metadata/utils.py +63 -0
- nemo_evaluator_launcher/common/helpers.py +44 -28
- nemo_evaluator_launcher/common/mapping.py +341 -155
- nemo_evaluator_launcher/common/printing_utils.py +18 -12
- nemo_evaluator_launcher/executors/lepton/executor.py +26 -8
- nemo_evaluator_launcher/executors/local/executor.py +6 -2
- nemo_evaluator_launcher/executors/slurm/executor.py +141 -9
- nemo_evaluator_launcher/package_info.py +1 -1
- nemo_evaluator_launcher/resources/all_tasks_irs.yaml +17016 -0
- nemo_evaluator_launcher/resources/mapping.toml +62 -354
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/METADATA +2 -1
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/RECORD +25 -18
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/WHEEL +0 -0
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/entry_points.txt +0 -0
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/licenses/LICENSE +0 -0
- {nemo_evaluator_launcher-0.1.41.dist-info → nemo_evaluator_launcher-0.1.56.dist-info}/top_level.txt +0 -0
|
@@ -13,222 +13,360 @@
|
|
|
13
13
|
# See the License for the specific language governing permissions and
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
#
|
|
16
|
-
import importlib
|
|
17
16
|
import pathlib
|
|
18
|
-
import
|
|
19
|
-
from importlib import resources
|
|
20
|
-
from typing import Any, Optional
|
|
17
|
+
from typing import Any
|
|
21
18
|
|
|
22
|
-
import
|
|
23
|
-
|
|
24
|
-
if sys.version_info >= (3, 11):
|
|
25
|
-
import tomllib
|
|
26
|
-
else:
|
|
27
|
-
import tomli as tomllib
|
|
19
|
+
import yaml
|
|
28
20
|
|
|
21
|
+
from nemo_evaluator_launcher.common.container_metadata import (
|
|
22
|
+
TaskIntermediateRepresentation,
|
|
23
|
+
load_tasks_from_tasks_file,
|
|
24
|
+
)
|
|
29
25
|
from nemo_evaluator_launcher.common.logging_utils import logger
|
|
30
26
|
|
|
31
|
-
# Configuration constants
|
|
32
|
-
# For below, see docs: https://docs.github.com/en/rest/repos/contents
|
|
33
|
-
MAPPING_URL = "https://raw.githubusercontent.com/NVIDIA-NeMo/Eval/main/packages/nemo-evaluator-launcher/src/nemo_evaluator_launcher/resources/mapping.toml"
|
|
34
|
-
CACHE_DIR = pathlib.Path.home() / ".nemo-evaluator" / "cache"
|
|
35
|
-
CACHE_FILENAME = "mapping.toml"
|
|
36
|
-
INTERNAL_RESOURCES_PKG = "nemo_evaluator_launcher_internal.resources"
|
|
37
|
-
|
|
38
27
|
|
|
39
|
-
def
|
|
40
|
-
"""
|
|
41
|
-
|
|
28
|
+
def _load_packaged_resource(*_args: Any, **_kwargs: Any) -> dict[str, Any]:
|
|
29
|
+
"""Deprecated: mapping.toml support was removed in favor of packaged IRs."""
|
|
30
|
+
raise RuntimeError(
|
|
31
|
+
"mapping.toml is no longer supported. Use packaged IRs (all_tasks_irs.yaml) instead."
|
|
32
|
+
)
|
|
42
33
|
|
|
43
34
|
|
|
44
|
-
def
|
|
45
|
-
"""
|
|
35
|
+
def _process_mapping(mapping_toml: dict) -> dict:
|
|
36
|
+
"""Process the raw mapping TOML into the expected format.
|
|
46
37
|
|
|
38
|
+
Args:
|
|
39
|
+
mapping_toml: Raw mapping TOML data.
|
|
47
40
|
Returns:
|
|
48
|
-
|
|
41
|
+
dict: Processed mapping in the expected format.
|
|
49
42
|
"""
|
|
50
|
-
|
|
43
|
+
mapping = {}
|
|
44
|
+
for harness_name, harness_data in mapping_toml.items():
|
|
45
|
+
# Skip entries that don't have the expected structure
|
|
46
|
+
if not isinstance(harness_data, dict):
|
|
47
|
+
logger.warning(
|
|
48
|
+
"Skipping invalid harness entry",
|
|
49
|
+
harness_name=harness_name,
|
|
50
|
+
reason="harness_data is not a dict",
|
|
51
|
+
)
|
|
52
|
+
continue
|
|
51
53
|
|
|
54
|
+
# Check if tasks field exists
|
|
55
|
+
if "tasks" not in harness_data:
|
|
56
|
+
logger.warning(
|
|
57
|
+
"Skipping harness entry without tasks",
|
|
58
|
+
harness_name=harness_name,
|
|
59
|
+
)
|
|
60
|
+
continue
|
|
52
61
|
|
|
53
|
-
|
|
54
|
-
|
|
62
|
+
if not isinstance(harness_data["tasks"], dict):
|
|
63
|
+
logger.warning(
|
|
64
|
+
"Skipping invalid harness entry",
|
|
65
|
+
harness_name=harness_name,
|
|
66
|
+
reason="tasks is not a dict",
|
|
67
|
+
)
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
# Get container, which may be optional
|
|
71
|
+
container = harness_data.get("container")
|
|
72
|
+
if not container:
|
|
73
|
+
logger.debug(
|
|
74
|
+
"Harness entry without container",
|
|
75
|
+
harness_name=harness_name,
|
|
76
|
+
)
|
|
55
77
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
78
|
+
for endpoint_type, harness_tasks in harness_data["tasks"].items():
|
|
79
|
+
if not isinstance(harness_tasks, dict):
|
|
80
|
+
logger.warning(
|
|
81
|
+
"Skipping invalid endpoint type",
|
|
82
|
+
harness_name=harness_name,
|
|
83
|
+
endpoint_type=endpoint_type,
|
|
84
|
+
reason="harness_tasks is not a dict",
|
|
85
|
+
)
|
|
86
|
+
continue
|
|
62
87
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
88
|
+
for task_name, task_data in harness_tasks.items():
|
|
89
|
+
if not isinstance(task_data, dict):
|
|
90
|
+
logger.warning(
|
|
91
|
+
"Skipping invalid task entry",
|
|
92
|
+
harness_name=harness_name,
|
|
93
|
+
task_name=task_name,
|
|
94
|
+
reason="task_data is not a dict",
|
|
95
|
+
)
|
|
96
|
+
continue
|
|
66
97
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
98
|
+
key = (harness_name, task_name)
|
|
99
|
+
if key in mapping:
|
|
100
|
+
raise KeyError(
|
|
101
|
+
f"(harness,task)-tuple key {repr(key)} already exists in the mapping"
|
|
102
|
+
)
|
|
72
103
|
|
|
104
|
+
# Validate required fields exist in task_data
|
|
105
|
+
# task_name and harness_name are already validated above
|
|
106
|
+
# endpoint_type is validated as a key in harness_tasks
|
|
107
|
+
# task_data must be a dict (validated above)
|
|
73
108
|
|
|
74
|
-
|
|
75
|
-
|
|
109
|
+
mapping[key] = {
|
|
110
|
+
"task": task_name,
|
|
111
|
+
"harness": harness_name,
|
|
112
|
+
"endpoint_type": endpoint_type,
|
|
113
|
+
}
|
|
114
|
+
# Only add container if it exists
|
|
115
|
+
if container:
|
|
116
|
+
mapping[key]["container"] = container
|
|
76
117
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
118
|
+
# Validate task_data keys before updating
|
|
119
|
+
for task_data_key in task_data.keys():
|
|
120
|
+
if task_data_key in mapping[key]:
|
|
121
|
+
raise KeyError(
|
|
122
|
+
f"{repr(task_data_key)} is not allowed as key under {repr(key)} in the mapping"
|
|
123
|
+
)
|
|
124
|
+
# Validate that task_data values are valid types (basic check)
|
|
125
|
+
if task_data_key not in ("description", "type") and not isinstance(
|
|
126
|
+
task_data[task_data_key],
|
|
127
|
+
(str, int, float, bool, dict, list, type(None)),
|
|
128
|
+
):
|
|
129
|
+
logger.warning(
|
|
130
|
+
"Unexpected value type in task_data",
|
|
131
|
+
harness_name=harness_name,
|
|
132
|
+
task_name=task_name,
|
|
133
|
+
key=task_data_key,
|
|
134
|
+
value_type=type(task_data[task_data_key]).__name__,
|
|
135
|
+
)
|
|
83
136
|
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
mapping = tomllib.load(f)
|
|
87
|
-
logger.debug("Loaded mapping from cache")
|
|
88
|
-
return mapping # type: ignore[no-any-return]
|
|
89
|
-
except (OSError, tomllib.TOMLDecodeError) as e:
|
|
90
|
-
logger.warning("Failed to load mapping from cache", error=str(e))
|
|
91
|
-
return None
|
|
137
|
+
mapping[key].update(task_data)
|
|
138
|
+
return mapping
|
|
92
139
|
|
|
93
140
|
|
|
94
|
-
def
|
|
95
|
-
|
|
141
|
+
def _extract_tasks_from_framework_yml(
|
|
142
|
+
framework_yml_content: str, harness_name: str, container: str
|
|
143
|
+
) -> dict[tuple[str, str], dict]:
|
|
144
|
+
"""Extract tasks from framework.yml content and return as mapping entries.
|
|
96
145
|
|
|
97
146
|
Args:
|
|
98
|
-
|
|
147
|
+
framework_yml_content: YAML content from framework.yml file
|
|
148
|
+
harness_name: Name of the harness
|
|
149
|
+
container: Container image string
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Dictionary mapping (harness_name, task_name) to task configuration
|
|
99
153
|
"""
|
|
154
|
+
tasks = {}
|
|
100
155
|
try:
|
|
101
|
-
|
|
102
|
-
|
|
156
|
+
framework_data = yaml.safe_load(framework_yml_content)
|
|
157
|
+
if not framework_data or "evaluations" not in framework_data:
|
|
158
|
+
logger.warning(
|
|
159
|
+
"No evaluations found in framework.yml",
|
|
160
|
+
harness=harness_name,
|
|
161
|
+
container=container,
|
|
162
|
+
)
|
|
163
|
+
return tasks
|
|
103
164
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
165
|
+
evaluations = framework_data.get("evaluations", [])
|
|
166
|
+
for eval_config in evaluations:
|
|
167
|
+
task_name = eval_config.get("name")
|
|
168
|
+
description = eval_config.get("description", "")
|
|
107
169
|
|
|
108
|
-
|
|
109
|
-
|
|
170
|
+
if not task_name:
|
|
171
|
+
continue
|
|
110
172
|
|
|
173
|
+
# Extract endpoint types from the evaluation config
|
|
174
|
+
defaults = eval_config.get("defaults", {})
|
|
175
|
+
config = defaults.get("config", {})
|
|
176
|
+
supported_endpoint_types = config.get("supported_endpoint_types", ["chat"])
|
|
177
|
+
task_type = config.get("type", "") # Extract type from defaults.config.type
|
|
111
178
|
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
179
|
+
# Use first endpoint type (mapping key is (harness, task), so one entry per task)
|
|
180
|
+
endpoint_type = (
|
|
181
|
+
supported_endpoint_types[0] if supported_endpoint_types else "chat"
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
key = (harness_name, task_name)
|
|
185
|
+
# Only add if not already in mapping (don't override existing entries)
|
|
186
|
+
if key not in tasks:
|
|
187
|
+
tasks[key] = {
|
|
188
|
+
"task": task_name,
|
|
189
|
+
"harness": harness_name,
|
|
190
|
+
"container": container,
|
|
191
|
+
"endpoint_type": endpoint_type,
|
|
192
|
+
"description": description,
|
|
193
|
+
"type": task_type, # Store type from defaults.config.type
|
|
194
|
+
}
|
|
195
|
+
# Merge any additional config from defaults
|
|
196
|
+
if defaults:
|
|
197
|
+
tasks[key].update(defaults)
|
|
116
198
|
|
|
117
|
-
Args:
|
|
118
|
-
resource_name: The name of the resource to load.
|
|
119
|
-
"""
|
|
120
|
-
try:
|
|
121
|
-
resource_toml: dict[str, Any] = {}
|
|
122
|
-
with resources.files(pkg_name).joinpath(resource_name).open("rb") as f:
|
|
123
|
-
resource_toml = tomllib.load(f)
|
|
124
199
|
logger.info(
|
|
125
|
-
"
|
|
200
|
+
"Extracted tasks from framework.yml",
|
|
201
|
+
harness=harness_name,
|
|
202
|
+
container=container,
|
|
203
|
+
num_tasks=len(tasks),
|
|
126
204
|
)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
205
|
+
except yaml.YAMLError as e:
|
|
206
|
+
logger.warning(
|
|
207
|
+
"Failed to parse framework.yml",
|
|
208
|
+
harness=harness_name,
|
|
209
|
+
container=container,
|
|
210
|
+
error=str(e),
|
|
211
|
+
)
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.warning(
|
|
214
|
+
"Error extracting tasks from framework.yml",
|
|
215
|
+
harness=harness_name,
|
|
216
|
+
container=container,
|
|
133
217
|
error=str(e),
|
|
134
218
|
)
|
|
135
|
-
raise RuntimeError(f"Failed to load {resource_name} from packaged file") from e
|
|
136
219
|
|
|
220
|
+
return tasks
|
|
137
221
|
|
|
138
|
-
|
|
139
|
-
|
|
222
|
+
|
|
223
|
+
def _convert_irs_to_mapping_format(
|
|
224
|
+
tasks: list[TaskIntermediateRepresentation],
|
|
225
|
+
) -> dict[tuple[str, str], dict]:
|
|
226
|
+
"""Convert list of TaskIntermediateRepresentation objects to mapping dict format.
|
|
140
227
|
|
|
141
228
|
Args:
|
|
142
|
-
|
|
229
|
+
tasks: List of TaskIntermediateRepresentation objects.
|
|
230
|
+
harnesses_by_name: Optional mapping of harness name -> Harness IR. If provided,
|
|
231
|
+
adds harness-level metadata (e.g., arch) to each task mapping entry.
|
|
232
|
+
|
|
143
233
|
Returns:
|
|
144
|
-
dict:
|
|
234
|
+
dict: Mapping of (harness_name, task_name) to dict holding their configuration.
|
|
145
235
|
"""
|
|
146
|
-
mapping = {}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
236
|
+
mapping: dict[tuple[str, str], dict] = {}
|
|
237
|
+
|
|
238
|
+
for task_ir in tasks:
|
|
239
|
+
harness_name = task_ir.harness
|
|
240
|
+
task_name = task_ir.name
|
|
241
|
+
key = (harness_name, task_name)
|
|
242
|
+
|
|
243
|
+
if key in mapping:
|
|
244
|
+
logger.warning(
|
|
245
|
+
"Duplicate task key found in IRs, keeping first occurrence",
|
|
246
|
+
harness=harness_name,
|
|
247
|
+
task=task_name,
|
|
248
|
+
)
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
# Extract endpoint_type from defaults.config.supported_endpoint_types
|
|
252
|
+
defaults = task_ir.defaults or {}
|
|
253
|
+
config = defaults.get("config", {})
|
|
254
|
+
supported_endpoint_types = config.get("supported_endpoint_types", ["chat"])
|
|
255
|
+
endpoint_type = (
|
|
256
|
+
supported_endpoint_types[0] if supported_endpoint_types else "chat"
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
# Extract type from defaults.config.type
|
|
260
|
+
task_type = config.get("type", "")
|
|
261
|
+
|
|
262
|
+
# Build mapping entry
|
|
263
|
+
mapping[key] = {
|
|
264
|
+
"task": task_name,
|
|
265
|
+
"harness": harness_name,
|
|
266
|
+
"endpoint_type": endpoint_type,
|
|
267
|
+
"container": task_ir.container,
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
if task_ir.container_arch:
|
|
271
|
+
mapping[key]["arch"] = task_ir.container_arch
|
|
272
|
+
|
|
273
|
+
# Backwards-compatible enhancement: keep full IR defaults available.
|
|
274
|
+
# Existing code uses flattened defaults (excluding `config`) below; this adds a
|
|
275
|
+
# new field without changing any existing keys.
|
|
276
|
+
mapping[key]["defaults"] = defaults
|
|
277
|
+
|
|
278
|
+
# Backwards-compatible enhancement: surface command explicitly if present.
|
|
279
|
+
# Note: `command` is already included via flattened defaults merge, but
|
|
280
|
+
# keeping it explicit makes downstream usage simpler.
|
|
281
|
+
if "command" in defaults and "command" not in mapping[key]:
|
|
282
|
+
mapping[key]["command"] = defaults["command"]
|
|
283
|
+
|
|
284
|
+
# Add description if available
|
|
285
|
+
if task_ir.description:
|
|
286
|
+
mapping[key]["description"] = task_ir.description
|
|
287
|
+
|
|
288
|
+
# Add type if available
|
|
289
|
+
if task_type:
|
|
290
|
+
mapping[key]["type"] = task_type
|
|
291
|
+
|
|
292
|
+
# Add container_digest if available
|
|
293
|
+
if task_ir.container_digest:
|
|
294
|
+
mapping[key]["container_digest"] = task_ir.container_digest
|
|
295
|
+
|
|
296
|
+
# Merge defaults (flattened, excluding config which is already processed)
|
|
297
|
+
defaults_copy = {k: v for k, v in defaults.items() if k != "config"}
|
|
298
|
+
mapping[key].update(defaults_copy)
|
|
299
|
+
|
|
170
300
|
return mapping
|
|
171
301
|
|
|
172
302
|
|
|
173
303
|
def load_tasks_mapping(
|
|
174
|
-
latest: bool = False,
|
|
175
304
|
mapping_toml: pathlib.Path | str | None = None,
|
|
305
|
+
*,
|
|
306
|
+
from_container: str | None = None,
|
|
176
307
|
) -> dict[tuple[str, str], dict]:
|
|
177
308
|
"""Load tasks mapping.
|
|
178
309
|
|
|
179
310
|
The function obeys the following priority rules:
|
|
180
|
-
1.
|
|
181
|
-
2.
|
|
182
|
-
|
|
311
|
+
1. If from_container is not None -> extract framework.yml from that container and build mapping from the resulting IRs.
|
|
312
|
+
2. Otherwise -> load packaged IRs (all_tasks_irs.yaml) and build mapping from those IRs.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
mapping_toml: Deprecated. mapping.toml is no longer supported (IR-only mode).
|
|
316
|
+
from_container: Optional container image identifier. If provided, tasks are loaded on-the-fly from that container.
|
|
183
317
|
|
|
184
318
|
Returns:
|
|
185
319
|
dict: Mapping of (harness_name, task_name) to dict holding their configuration.
|
|
186
320
|
|
|
187
321
|
"""
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
322
|
+
if mapping_toml is not None:
|
|
323
|
+
raise ValueError(
|
|
324
|
+
"mapping_toml is no longer supported. This project has switched to packaged IRs (all_tasks_irs.yaml)."
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Explicit container path: extract tasks from container and return mapping built from IRs.
|
|
328
|
+
# This bypasses packaged IRs.
|
|
329
|
+
if from_container is not None:
|
|
330
|
+
try:
|
|
331
|
+
# Optional dependency path; importing may fail in "IR-only" environments.
|
|
332
|
+
from nemo_evaluator_launcher.common.container_metadata import (
|
|
333
|
+
load_tasks_from_container,
|
|
334
|
+
)
|
|
335
|
+
except ModuleNotFoundError as e:
|
|
336
|
+
raise RuntimeError(
|
|
337
|
+
"Loading tasks from a container requires optional dependencies. "
|
|
338
|
+
"Install nemo-evaluator-launcher with the full runtime dependencies."
|
|
339
|
+
) from e
|
|
340
|
+
|
|
341
|
+
tasks = load_tasks_from_container(from_container)
|
|
342
|
+
if not tasks:
|
|
343
|
+
logger.warning(
|
|
344
|
+
"No tasks loaded from container via container-metadata",
|
|
345
|
+
container=from_container,
|
|
195
346
|
)
|
|
196
347
|
else:
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
raise RuntimeError("could not download latest mapping")
|
|
203
|
-
|
|
204
|
-
elif mapping_toml is not None:
|
|
205
|
-
with open(mapping_toml, "rb") as f:
|
|
206
|
-
local_mapping = _process_mapping(tomllib.load(f))
|
|
207
|
-
else:
|
|
208
|
-
local_mapping = _process_mapping(_load_packaged_resource(CACHE_FILENAME))
|
|
348
|
+
logger.info(
|
|
349
|
+
"Loaded tasks from container via container-metadata",
|
|
350
|
+
container=from_container,
|
|
351
|
+
num_tasks=len(tasks),
|
|
352
|
+
)
|
|
209
353
|
|
|
210
|
-
|
|
211
|
-
# Check if nemo_evaluator_launcher_internal is available and load its mapping.toml
|
|
212
|
-
# CAVEAT: lazy-loading here, not somewhere top level, is important, to ensure
|
|
213
|
-
# order of package initialization.
|
|
214
|
-
try:
|
|
215
|
-
importlib.import_module("nemo_evaluator_launcher_internal")
|
|
216
|
-
logger.debug("Internal package available, loading internal mapping")
|
|
217
|
-
internal_mapping = _process_mapping(
|
|
218
|
-
_load_packaged_resource(CACHE_FILENAME, INTERNAL_RESOURCES_PKG)
|
|
219
|
-
)
|
|
354
|
+
return _convert_irs_to_mapping_format(tasks)
|
|
220
355
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
logger.info(
|
|
224
|
-
"Successfully merged internal mapping", internal_tasks=len(internal_mapping)
|
|
225
|
-
)
|
|
226
|
-
except ImportError:
|
|
227
|
-
logger.debug("Internal package not available, using external mapping only")
|
|
356
|
+
try:
|
|
357
|
+
tasks, mapping_verified = load_tasks_from_tasks_file()
|
|
228
358
|
except Exception as e:
|
|
229
|
-
|
|
359
|
+
raise RuntimeError("Failed to load tasks from packaged IRs") from e
|
|
360
|
+
|
|
361
|
+
if not tasks:
|
|
362
|
+
raise RuntimeError("No tasks available in packaged IRs (all_tasks_irs.yaml)")
|
|
230
363
|
|
|
231
|
-
|
|
364
|
+
logger.info(
|
|
365
|
+
"Loaded tasks from packaged IRs",
|
|
366
|
+
num_tasks=len(tasks),
|
|
367
|
+
mapping_verified=mapping_verified,
|
|
368
|
+
)
|
|
369
|
+
return _convert_irs_to_mapping_format(tasks)
|
|
232
370
|
|
|
233
371
|
|
|
234
372
|
def get_task_from_mapping(query: str, mapping: dict[Any, Any]) -> dict[Any, Any]:
|
|
@@ -293,3 +431,51 @@ def get_task_from_mapping(query: str, mapping: dict[Any, Any]) -> dict[Any, Any]
|
|
|
293
431
|
f"invalid query={repr(query)} for task mapping,"
|
|
294
432
|
" it must contain exactly zero or one occurrence of '.' character"
|
|
295
433
|
)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def _minimal_task_definition(task_query: str, *, container: str) -> dict[str, Any]:
|
|
437
|
+
"""Create a minimal task definition when task is not known in any mapping."""
|
|
438
|
+
if task_query.count(".") == 1:
|
|
439
|
+
harness, task = task_query.split(".")
|
|
440
|
+
else:
|
|
441
|
+
harness, task = "", task_query
|
|
442
|
+
|
|
443
|
+
# Default to chat; most configs and endpoints use chat unless explicitly known.
|
|
444
|
+
return {
|
|
445
|
+
"task": task,
|
|
446
|
+
"harness": harness,
|
|
447
|
+
"endpoint_type": "chat",
|
|
448
|
+
"container": container,
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
def get_task_definition_for_job(
|
|
453
|
+
*,
|
|
454
|
+
task_query: str,
|
|
455
|
+
base_mapping: dict[Any, Any],
|
|
456
|
+
container: str | None = None,
|
|
457
|
+
) -> dict[str, Any]:
|
|
458
|
+
"""Resolve task definition for a job.
|
|
459
|
+
|
|
460
|
+
If a container is provided, tasks are loaded from that container (using
|
|
461
|
+
container-metadata) and we attempt to resolve the task from that mapping.
|
|
462
|
+
If the task isn't found in the container, we warn and return a minimal
|
|
463
|
+
task definition so submission can proceed.
|
|
464
|
+
"""
|
|
465
|
+
if not container:
|
|
466
|
+
return get_task_from_mapping(task_query, base_mapping)
|
|
467
|
+
|
|
468
|
+
# `load_tasks_mapping(from_container=...)` uses container-metadata extraction,
|
|
469
|
+
# which already has its own caching (e.g., caching extracted framework.yml).
|
|
470
|
+
container_mapping = load_tasks_mapping(from_container=container)
|
|
471
|
+
|
|
472
|
+
try:
|
|
473
|
+
return get_task_from_mapping(task_query, container_mapping)
|
|
474
|
+
except ValueError as e:
|
|
475
|
+
logger.warning(
|
|
476
|
+
"Task not found in provided container; proceeding with minimal task definition",
|
|
477
|
+
task=task_query,
|
|
478
|
+
container=container,
|
|
479
|
+
error=str(e),
|
|
480
|
+
)
|
|
481
|
+
return _minimal_task_definition(task_query, container=container)
|
|
@@ -62,39 +62,45 @@ _CODES: dict[str, str] = dict(
|
|
|
62
62
|
reset="\033[0m",
|
|
63
63
|
)
|
|
64
64
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
65
|
+
|
|
66
|
+
def _apply(code_key: str, s: str) -> str:
|
|
67
|
+
"""Apply an ANSI code if colors are enabled.
|
|
68
|
+
|
|
69
|
+
Note: Color enablement is evaluated at call time so tests that redirect
|
|
70
|
+
stdout (e.g. to StringIO) correctly disable ANSI sequences.
|
|
71
|
+
"""
|
|
72
|
+
if _is_color_disabled():
|
|
73
|
+
return s
|
|
74
|
+
return _CODES[code_key] + s + _CODES["reset"]
|
|
69
75
|
|
|
70
76
|
|
|
71
77
|
def green(s: str) -> str:
|
|
72
|
-
return
|
|
78
|
+
return _apply("green", s)
|
|
73
79
|
|
|
74
80
|
|
|
75
81
|
def red(s: str) -> str:
|
|
76
|
-
return
|
|
82
|
+
return _apply("red", s)
|
|
77
83
|
|
|
78
84
|
|
|
79
85
|
def red_bg(s: str) -> str:
|
|
80
|
-
return
|
|
86
|
+
return _apply("red_bg", s)
|
|
81
87
|
|
|
82
88
|
|
|
83
89
|
def cyan(s: str) -> str:
|
|
84
|
-
return
|
|
90
|
+
return _apply("cyan", s)
|
|
85
91
|
|
|
86
92
|
|
|
87
93
|
def yellow(s: str) -> str:
|
|
88
|
-
return
|
|
94
|
+
return _apply("yellow", s)
|
|
89
95
|
|
|
90
96
|
|
|
91
97
|
def magenta(s: str) -> str:
|
|
92
|
-
return
|
|
98
|
+
return _apply("magenta", s)
|
|
93
99
|
|
|
94
100
|
|
|
95
101
|
def grey(s: str) -> str:
|
|
96
|
-
return
|
|
102
|
+
return _apply("grey", s)
|
|
97
103
|
|
|
98
104
|
|
|
99
105
|
def bold(s: str) -> str:
|
|
100
|
-
return
|
|
106
|
+
return _apply("bold", s)
|