nemo-evaluator-launcher 0.1.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of nemo-evaluator-launcher might be problematic. Click here for more details.
- nemo_evaluator_launcher/__init__.py +79 -0
- nemo_evaluator_launcher/api/__init__.py +24 -0
- nemo_evaluator_launcher/api/functional.py +698 -0
- nemo_evaluator_launcher/api/types.py +98 -0
- nemo_evaluator_launcher/api/utils.py +19 -0
- nemo_evaluator_launcher/cli/__init__.py +15 -0
- nemo_evaluator_launcher/cli/export.py +267 -0
- nemo_evaluator_launcher/cli/info.py +512 -0
- nemo_evaluator_launcher/cli/kill.py +41 -0
- nemo_evaluator_launcher/cli/ls_runs.py +134 -0
- nemo_evaluator_launcher/cli/ls_tasks.py +136 -0
- nemo_evaluator_launcher/cli/main.py +226 -0
- nemo_evaluator_launcher/cli/run.py +200 -0
- nemo_evaluator_launcher/cli/status.py +164 -0
- nemo_evaluator_launcher/cli/version.py +55 -0
- nemo_evaluator_launcher/common/__init__.py +16 -0
- nemo_evaluator_launcher/common/execdb.py +283 -0
- nemo_evaluator_launcher/common/helpers.py +366 -0
- nemo_evaluator_launcher/common/logging_utils.py +357 -0
- nemo_evaluator_launcher/common/mapping.py +295 -0
- nemo_evaluator_launcher/common/printing_utils.py +93 -0
- nemo_evaluator_launcher/configs/__init__.py +15 -0
- nemo_evaluator_launcher/configs/default.yaml +28 -0
- nemo_evaluator_launcher/configs/deployment/generic.yaml +33 -0
- nemo_evaluator_launcher/configs/deployment/nim.yaml +32 -0
- nemo_evaluator_launcher/configs/deployment/none.yaml +16 -0
- nemo_evaluator_launcher/configs/deployment/sglang.yaml +38 -0
- nemo_evaluator_launcher/configs/deployment/trtllm.yaml +24 -0
- nemo_evaluator_launcher/configs/deployment/vllm.yaml +42 -0
- nemo_evaluator_launcher/configs/execution/lepton/default.yaml +92 -0
- nemo_evaluator_launcher/configs/execution/local.yaml +19 -0
- nemo_evaluator_launcher/configs/execution/slurm/default.yaml +34 -0
- nemo_evaluator_launcher/executors/__init__.py +22 -0
- nemo_evaluator_launcher/executors/base.py +120 -0
- nemo_evaluator_launcher/executors/lepton/__init__.py +16 -0
- nemo_evaluator_launcher/executors/lepton/deployment_helpers.py +609 -0
- nemo_evaluator_launcher/executors/lepton/executor.py +1004 -0
- nemo_evaluator_launcher/executors/lepton/job_helpers.py +398 -0
- nemo_evaluator_launcher/executors/local/__init__.py +15 -0
- nemo_evaluator_launcher/executors/local/executor.py +605 -0
- nemo_evaluator_launcher/executors/local/run.template.sh +103 -0
- nemo_evaluator_launcher/executors/registry.py +38 -0
- nemo_evaluator_launcher/executors/slurm/__init__.py +15 -0
- nemo_evaluator_launcher/executors/slurm/executor.py +1147 -0
- nemo_evaluator_launcher/exporters/__init__.py +36 -0
- nemo_evaluator_launcher/exporters/base.py +121 -0
- nemo_evaluator_launcher/exporters/gsheets.py +409 -0
- nemo_evaluator_launcher/exporters/local.py +502 -0
- nemo_evaluator_launcher/exporters/mlflow.py +619 -0
- nemo_evaluator_launcher/exporters/registry.py +40 -0
- nemo_evaluator_launcher/exporters/utils.py +624 -0
- nemo_evaluator_launcher/exporters/wandb.py +490 -0
- nemo_evaluator_launcher/package_info.py +38 -0
- nemo_evaluator_launcher/resources/mapping.toml +380 -0
- nemo_evaluator_launcher-0.1.28.dist-info/METADATA +494 -0
- nemo_evaluator_launcher-0.1.28.dist-info/RECORD +60 -0
- nemo_evaluator_launcher-0.1.28.dist-info/WHEEL +5 -0
- nemo_evaluator_launcher-0.1.28.dist-info/entry_points.txt +3 -0
- nemo_evaluator_launcher-0.1.28.dist-info/licenses/LICENSE +451 -0
- nemo_evaluator_launcher-0.1.28.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,698 @@
|
|
|
1
|
+
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
#
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
#
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
#
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
#
|
|
16
|
+
"""Public API functions for nemo-evaluator-launcher.
|
|
17
|
+
|
|
18
|
+
This module provides the main functional entry points for running evaluations, querying job status, and listing available tasks. These functions are intended to be used by CLI commands and external integrations.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Any, List, Optional, Union
|
|
23
|
+
|
|
24
|
+
import yaml
|
|
25
|
+
from omegaconf import DictConfig, OmegaConf
|
|
26
|
+
|
|
27
|
+
from nemo_evaluator_launcher.api.types import RunConfig
|
|
28
|
+
from nemo_evaluator_launcher.common.execdb import ExecutionDB, JobData
|
|
29
|
+
from nemo_evaluator_launcher.common.mapping import load_tasks_mapping
|
|
30
|
+
from nemo_evaluator_launcher.executors.registry import get_executor
|
|
31
|
+
from nemo_evaluator_launcher.exporters import create_exporter
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_tasks_list() -> list[list[Any]]:
|
|
35
|
+
"""Get a list of available tasks from the mapping.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
list[list[Any]]: Each sublist contains task name, endpoint type, harness, and container.
|
|
39
|
+
"""
|
|
40
|
+
mapping = load_tasks_mapping()
|
|
41
|
+
data = [
|
|
42
|
+
[
|
|
43
|
+
task_data.get("task"),
|
|
44
|
+
task_data.get("endpoint_type"),
|
|
45
|
+
task_data.get("harness"),
|
|
46
|
+
task_data.get("container"),
|
|
47
|
+
]
|
|
48
|
+
for task_data in mapping.values()
|
|
49
|
+
]
|
|
50
|
+
return data
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _validate_no_missing_values(cfg: Any, path: str = "") -> None:
|
|
54
|
+
"""Recursively validate that no MISSING values exist in the configuration.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
cfg: The configuration object to validate.
|
|
58
|
+
path: Current path in the configuration for error reporting.
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
ValueError: If any MISSING values are found in the configuration.
|
|
62
|
+
"""
|
|
63
|
+
if OmegaConf.is_dict(cfg):
|
|
64
|
+
for key, value in cfg.items():
|
|
65
|
+
current_path = f"{path}.{key!s}" if path else str(key)
|
|
66
|
+
# Check if this specific key has a MISSING value
|
|
67
|
+
if OmegaConf.is_missing(cfg, key):
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"Configuration has MISSING value at path: {current_path!s}"
|
|
70
|
+
)
|
|
71
|
+
_validate_no_missing_values(value, current_path)
|
|
72
|
+
elif OmegaConf.is_list(cfg):
|
|
73
|
+
for i, value in enumerate(cfg):
|
|
74
|
+
current_path = f"{path}[{i}]"
|
|
75
|
+
_validate_no_missing_values(value, current_path)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def run_eval(cfg: RunConfig, dry_run: bool = False) -> Optional[str]:
|
|
79
|
+
"""Run evaluation with specified config and overrides.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
cfg: The configuration object for the evaluation run.
|
|
83
|
+
dry_run: If True, do not run the evaluation, just prepare scripts and save them.
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
Optional[str]: The invocation ID for the evaluation run.
|
|
87
|
+
|
|
88
|
+
Raises:
|
|
89
|
+
ValueError: If configuration validation fails or MISSING values are found.
|
|
90
|
+
RuntimeError: If the executor fails to start the evaluation.
|
|
91
|
+
"""
|
|
92
|
+
# Validate that no MISSING values exist in the configuration
|
|
93
|
+
_validate_no_missing_values(cfg)
|
|
94
|
+
|
|
95
|
+
if dry_run:
|
|
96
|
+
print(OmegaConf.to_yaml(cfg))
|
|
97
|
+
|
|
98
|
+
_check_api_endpoint_when_deployment_is_configured(cfg)
|
|
99
|
+
return get_executor(cfg.execution.type).execute_eval(cfg, dry_run)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_status(ids_or_prefixes: list[str]) -> list[dict[str, Any]]:
|
|
103
|
+
"""Get status of jobs by their IDs or invocation IDs.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
job_ids: List of job IDs or invocation IDs to check status for. Short ones are allowed,
|
|
107
|
+
we would try to match the full ones from prefixes if no collisions are
|
|
108
|
+
present.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
list[dict[str, Any]]: List of status dictionaries for each job or invocation.
|
|
112
|
+
Each dictionary contains keys: 'invocation', 'job_id', 'status', and 'data'.
|
|
113
|
+
If a job or invocation is not found, status is 'not_found'.
|
|
114
|
+
If an error occurs, status is 'error' and 'data' contains error details.
|
|
115
|
+
"""
|
|
116
|
+
db = ExecutionDB()
|
|
117
|
+
results: List[dict[str, Any]] = []
|
|
118
|
+
|
|
119
|
+
for id_or_prefix in ids_or_prefixes:
|
|
120
|
+
# If id looks like an invocation_id (no dot), get all jobs for it
|
|
121
|
+
if "." not in id_or_prefix:
|
|
122
|
+
jobs = db.get_jobs(id_or_prefix)
|
|
123
|
+
if not jobs:
|
|
124
|
+
results.append(
|
|
125
|
+
{
|
|
126
|
+
"invocation": id_or_prefix,
|
|
127
|
+
"job_id": None,
|
|
128
|
+
"status": "not_found",
|
|
129
|
+
"data": {},
|
|
130
|
+
}
|
|
131
|
+
)
|
|
132
|
+
continue
|
|
133
|
+
|
|
134
|
+
# Get the executor class from the first job
|
|
135
|
+
first_job_data = next(iter(jobs.values()))
|
|
136
|
+
try:
|
|
137
|
+
executor_cls = get_executor(first_job_data.executor)
|
|
138
|
+
except ValueError as e:
|
|
139
|
+
results.append(
|
|
140
|
+
{
|
|
141
|
+
"invocation": id_or_prefix,
|
|
142
|
+
"job_id": None,
|
|
143
|
+
"status": "error",
|
|
144
|
+
"data": {"error": str(e)},
|
|
145
|
+
}
|
|
146
|
+
)
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
# Get status from the executor for all jobs in the invocation
|
|
150
|
+
try:
|
|
151
|
+
status_list = executor_cls.get_status(id_or_prefix)
|
|
152
|
+
|
|
153
|
+
# Create a result for each job in the invocation
|
|
154
|
+
for job_id_in_invocation, job_data in jobs.items():
|
|
155
|
+
# Find the status for this specific job
|
|
156
|
+
job_status: str | None = None
|
|
157
|
+
job_progress: Optional[dict[str, Any]] = None
|
|
158
|
+
for status in status_list:
|
|
159
|
+
if status.id == job_id_in_invocation:
|
|
160
|
+
job_status = status.state.value
|
|
161
|
+
job_progress = status.progress
|
|
162
|
+
break
|
|
163
|
+
|
|
164
|
+
results.append(
|
|
165
|
+
{
|
|
166
|
+
"invocation": job_data.invocation_id,
|
|
167
|
+
"job_id": job_id_in_invocation,
|
|
168
|
+
"status": (
|
|
169
|
+
job_status if job_status is not None else "unknown"
|
|
170
|
+
),
|
|
171
|
+
"progress": (
|
|
172
|
+
job_progress if job_progress is not None else "unknown"
|
|
173
|
+
),
|
|
174
|
+
"data": job_data.data,
|
|
175
|
+
}
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
except Exception as e:
|
|
179
|
+
results.append(
|
|
180
|
+
{
|
|
181
|
+
"invocation": id_or_prefix,
|
|
182
|
+
"job_id": None,
|
|
183
|
+
"status": "error",
|
|
184
|
+
"data": {"error": str(e)},
|
|
185
|
+
}
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
# Otherwise, treat as job_id
|
|
189
|
+
single_job_data: Optional[JobData] = db.get_job(id_or_prefix)
|
|
190
|
+
|
|
191
|
+
if single_job_data is None:
|
|
192
|
+
results.append(
|
|
193
|
+
{
|
|
194
|
+
"invocation": None,
|
|
195
|
+
"job_id": id_or_prefix,
|
|
196
|
+
"status": "not_found",
|
|
197
|
+
"data": {},
|
|
198
|
+
}
|
|
199
|
+
)
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
# Get the executor class
|
|
203
|
+
try:
|
|
204
|
+
executor_cls = get_executor(single_job_data.executor)
|
|
205
|
+
except ValueError as e:
|
|
206
|
+
results.append(
|
|
207
|
+
{
|
|
208
|
+
"invocation": None,
|
|
209
|
+
"job_id": id_or_prefix,
|
|
210
|
+
"status": "error",
|
|
211
|
+
"data": {"error": str(e)},
|
|
212
|
+
}
|
|
213
|
+
)
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
# Get status from the executor
|
|
217
|
+
try:
|
|
218
|
+
status_list = executor_cls.get_status(id_or_prefix)
|
|
219
|
+
|
|
220
|
+
if not status_list:
|
|
221
|
+
results.append(
|
|
222
|
+
{
|
|
223
|
+
"invocation": single_job_data.invocation_id,
|
|
224
|
+
"job_id": single_job_data.job_id,
|
|
225
|
+
"status": "unknown",
|
|
226
|
+
"data": single_job_data.data,
|
|
227
|
+
}
|
|
228
|
+
)
|
|
229
|
+
else:
|
|
230
|
+
# For individual job queries, return the first status
|
|
231
|
+
results.append(
|
|
232
|
+
{
|
|
233
|
+
"invocation": single_job_data.invocation_id,
|
|
234
|
+
"job_id": single_job_data.job_id,
|
|
235
|
+
"status": (
|
|
236
|
+
status_list[0].state.value if status_list else "unknown"
|
|
237
|
+
),
|
|
238
|
+
"progress": (
|
|
239
|
+
status_list[0].progress if status_list else "unknown"
|
|
240
|
+
),
|
|
241
|
+
"data": single_job_data.data,
|
|
242
|
+
}
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
results.append(
|
|
247
|
+
{
|
|
248
|
+
"invocation": (
|
|
249
|
+
single_job_data.invocation_id if single_job_data else None
|
|
250
|
+
),
|
|
251
|
+
"job_id": (
|
|
252
|
+
single_job_data.job_id if single_job_data else id_or_prefix
|
|
253
|
+
),
|
|
254
|
+
"status": "error",
|
|
255
|
+
"data": {"error": str(e)},
|
|
256
|
+
}
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
return results
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def list_all_invocations_summary() -> list[dict[str, Any]]:
|
|
263
|
+
"""Return a concise per-invocation summary from the exec DB.
|
|
264
|
+
|
|
265
|
+
Columns: invocation_id, earliest_job_ts, num_jobs, executor (or 'mixed').
|
|
266
|
+
Sorted by earliest_job_ts (newest first).
|
|
267
|
+
"""
|
|
268
|
+
db = ExecutionDB()
|
|
269
|
+
jobs = db.get_all_jobs()
|
|
270
|
+
|
|
271
|
+
inv_to_earliest: dict[str, float] = {}
|
|
272
|
+
inv_to_count: dict[str, int] = {}
|
|
273
|
+
inv_to_execs: dict[str, set[str]] = {}
|
|
274
|
+
|
|
275
|
+
for jd in jobs.values():
|
|
276
|
+
inv = jd.invocation_id
|
|
277
|
+
ts = jd.timestamp or 0.0
|
|
278
|
+
if inv not in inv_to_earliest or ts < inv_to_earliest[inv]:
|
|
279
|
+
inv_to_earliest[inv] = ts
|
|
280
|
+
inv_to_count[inv] = inv_to_count.get(inv, 0) + 1
|
|
281
|
+
if inv not in inv_to_execs:
|
|
282
|
+
inv_to_execs[inv] = set()
|
|
283
|
+
inv_to_execs[inv].add(jd.executor)
|
|
284
|
+
|
|
285
|
+
rows: list[dict[str, Any]] = []
|
|
286
|
+
for inv, earliest_ts in inv_to_earliest.items():
|
|
287
|
+
execs = inv_to_execs.get(inv, set())
|
|
288
|
+
executor = (
|
|
289
|
+
next(iter(execs)) if len(execs) == 1 else ("mixed" if execs else None)
|
|
290
|
+
)
|
|
291
|
+
rows.append(
|
|
292
|
+
{
|
|
293
|
+
"invocation_id": inv,
|
|
294
|
+
"earliest_job_ts": earliest_ts,
|
|
295
|
+
"num_jobs": inv_to_count.get(inv, 0),
|
|
296
|
+
"executor": executor,
|
|
297
|
+
}
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
rows.sort(key=lambda r: r.get("earliest_job_ts") or 0, reverse=True)
|
|
301
|
+
return rows
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def get_invocation_benchmarks(invocation_id: str) -> list[str]:
|
|
305
|
+
"""Return a sorted list of benchmark/task names for a given invocation.
|
|
306
|
+
|
|
307
|
+
Extracted from stored configs in the execution DB. If anything goes wrong,
|
|
308
|
+
returns an empty list; callers can display 'unknown' if desired.
|
|
309
|
+
"""
|
|
310
|
+
db = ExecutionDB()
|
|
311
|
+
jobs = db.get_jobs(invocation_id)
|
|
312
|
+
names: set[str] = set()
|
|
313
|
+
for jd in jobs.values():
|
|
314
|
+
try:
|
|
315
|
+
cfg = jd.config or {}
|
|
316
|
+
tasks = (cfg.get("evaluation", {}) or {}).get("tasks", []) or []
|
|
317
|
+
for t in tasks:
|
|
318
|
+
n = t.get("name") if isinstance(t, dict) else None
|
|
319
|
+
if n:
|
|
320
|
+
names.add(str(n))
|
|
321
|
+
except Exception:
|
|
322
|
+
# Ignore malformed entries; continue collecting from others
|
|
323
|
+
continue
|
|
324
|
+
return sorted(names)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def kill_job_or_invocation(id: str) -> list[dict[str, Any]]:
|
|
328
|
+
"""Kill a job or an entire invocation by its ID.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
id: The job ID (e.g., aefc4819.0) or invocation ID (e.g., aefc4819) to kill.
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
list[dict[str, Any]]: List of kill operation results.
|
|
335
|
+
Each dictionary contains keys: 'invocation', 'job_id', 'status', and 'data'.
|
|
336
|
+
If a job is not found, status is 'not_found'.
|
|
337
|
+
If an error occurs, status is 'error' and 'data' contains error details.
|
|
338
|
+
"""
|
|
339
|
+
db = ExecutionDB()
|
|
340
|
+
results = []
|
|
341
|
+
|
|
342
|
+
def kill_single_job(job_id: str, job_data: JobData) -> dict[str, Any]:
|
|
343
|
+
"""Helper function to kill a single job."""
|
|
344
|
+
try:
|
|
345
|
+
executor_cls = get_executor(job_data.executor)
|
|
346
|
+
if hasattr(executor_cls, "kill_job"):
|
|
347
|
+
executor_cls.kill_job(job_id)
|
|
348
|
+
# Success - job was killed
|
|
349
|
+
return {
|
|
350
|
+
"invocation": job_data.invocation_id,
|
|
351
|
+
"job_id": job_id,
|
|
352
|
+
"status": "killed",
|
|
353
|
+
"data": {"result": "Successfully killed job"},
|
|
354
|
+
}
|
|
355
|
+
else:
|
|
356
|
+
return {
|
|
357
|
+
"invocation": job_data.invocation_id,
|
|
358
|
+
"job_id": job_id,
|
|
359
|
+
"status": "error",
|
|
360
|
+
"data": {
|
|
361
|
+
"error": f"Executor {job_data.executor} does not support killing jobs"
|
|
362
|
+
},
|
|
363
|
+
}
|
|
364
|
+
except (ValueError, RuntimeError) as e:
|
|
365
|
+
# Expected errors from kill_job
|
|
366
|
+
return {
|
|
367
|
+
"invocation": job_data.invocation_id,
|
|
368
|
+
"job_id": job_id,
|
|
369
|
+
"status": "error",
|
|
370
|
+
"data": {"error": str(e)},
|
|
371
|
+
}
|
|
372
|
+
except Exception as e:
|
|
373
|
+
# Unexpected errors
|
|
374
|
+
return {
|
|
375
|
+
"invocation": job_data.invocation_id,
|
|
376
|
+
"job_id": job_id,
|
|
377
|
+
"status": "error",
|
|
378
|
+
"data": {"error": f"Unexpected error: {str(e)}"},
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
# Determine if this is a job ID or invocation ID
|
|
382
|
+
if "." in id:
|
|
383
|
+
# This is a job ID - kill single job
|
|
384
|
+
job_data = db.get_job(id)
|
|
385
|
+
if job_data is None:
|
|
386
|
+
return [
|
|
387
|
+
{
|
|
388
|
+
"invocation": None,
|
|
389
|
+
"job_id": id,
|
|
390
|
+
"status": "not_found",
|
|
391
|
+
"data": {},
|
|
392
|
+
}
|
|
393
|
+
]
|
|
394
|
+
results.append(kill_single_job(id, job_data))
|
|
395
|
+
else:
|
|
396
|
+
# This is an invocation ID - kill all jobs in the invocation
|
|
397
|
+
jobs = db.get_jobs(id)
|
|
398
|
+
if not jobs:
|
|
399
|
+
return [
|
|
400
|
+
{
|
|
401
|
+
"invocation": id,
|
|
402
|
+
"job_id": None,
|
|
403
|
+
"status": "not_found",
|
|
404
|
+
"data": {},
|
|
405
|
+
}
|
|
406
|
+
]
|
|
407
|
+
|
|
408
|
+
# Kill each job in the invocation
|
|
409
|
+
for job_id, job_data in jobs.items():
|
|
410
|
+
results.append(kill_single_job(job_id, job_data))
|
|
411
|
+
|
|
412
|
+
return results
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def export_results(
|
|
416
|
+
invocation_ids: Union[str, List[str]],
|
|
417
|
+
dest: str = "local",
|
|
418
|
+
config: dict[Any, Any] | None = None,
|
|
419
|
+
) -> dict:
|
|
420
|
+
"""Export results for one or more IDs (jobs/invocations/pipeline IDs) to a destination.
|
|
421
|
+
|
|
422
|
+
Args:
|
|
423
|
+
invocation_ids: Single invocation ID or list of invocation/job IDs
|
|
424
|
+
dest: Export destination (local, wandb, jet, mlflow, gsheets)
|
|
425
|
+
config: exporter configuration
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
Export evaluation results dictionary
|
|
429
|
+
"""
|
|
430
|
+
|
|
431
|
+
try:
|
|
432
|
+
# Normalize to list
|
|
433
|
+
if isinstance(invocation_ids, str):
|
|
434
|
+
invocation_ids = [invocation_ids]
|
|
435
|
+
|
|
436
|
+
exporter = create_exporter(dest, config or {})
|
|
437
|
+
|
|
438
|
+
if len(invocation_ids) == 1:
|
|
439
|
+
# Single id (job or invocation)
|
|
440
|
+
single_id = invocation_ids[0]
|
|
441
|
+
|
|
442
|
+
if "." in single_id: # job_id
|
|
443
|
+
# Try reading config from artifacts working dir (auto-export on remote node)
|
|
444
|
+
cfg_file = None
|
|
445
|
+
for name in ("run_config.yml", "config.yml"):
|
|
446
|
+
p = Path(name)
|
|
447
|
+
if p.exists():
|
|
448
|
+
cfg_file = p
|
|
449
|
+
break
|
|
450
|
+
|
|
451
|
+
md_job_data = None
|
|
452
|
+
if cfg_file:
|
|
453
|
+
try:
|
|
454
|
+
cfg_yaml = (
|
|
455
|
+
yaml.safe_load(cfg_file.read_text(encoding="utf-8")) or {}
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
# Merge exporter override file if present
|
|
459
|
+
ypath_export = Path("export_config.yml")
|
|
460
|
+
if ypath_export.exists():
|
|
461
|
+
exp_yaml = (
|
|
462
|
+
yaml.safe_load(ypath_export.read_text(encoding="utf-8"))
|
|
463
|
+
or {}
|
|
464
|
+
)
|
|
465
|
+
exec_cfg = cfg_yaml.get("execution") or {}
|
|
466
|
+
auto_exp = (exp_yaml.get("execution") or {}).get(
|
|
467
|
+
"auto_export"
|
|
468
|
+
)
|
|
469
|
+
if auto_exp is not None:
|
|
470
|
+
exec_cfg["auto_export"] = auto_exp
|
|
471
|
+
cfg_yaml["execution"] = exec_cfg
|
|
472
|
+
if "export" in exp_yaml:
|
|
473
|
+
cfg_yaml["export"] = exp_yaml["export"]
|
|
474
|
+
if "evaluation" in exp_yaml and exp_yaml["evaluation"]:
|
|
475
|
+
eval_cfg = cfg_yaml.get("evaluation") or {}
|
|
476
|
+
eval_cfg.update(exp_yaml["evaluation"])
|
|
477
|
+
cfg_yaml["evaluation"] = eval_cfg
|
|
478
|
+
|
|
479
|
+
executor_name = (cfg_yaml.get("execution") or {}).get(
|
|
480
|
+
"type", "local"
|
|
481
|
+
)
|
|
482
|
+
md_job_data = JobData(
|
|
483
|
+
invocation_id=single_id.split(".")[0],
|
|
484
|
+
job_id=single_id,
|
|
485
|
+
timestamp=0.0,
|
|
486
|
+
executor=executor_name, # ensures slurm tag is preserved
|
|
487
|
+
data={
|
|
488
|
+
"output_dir": str(Path.cwd().parent),
|
|
489
|
+
"storage_type": "remote_local", # no SSH in auto-export path
|
|
490
|
+
},
|
|
491
|
+
config=cfg_yaml,
|
|
492
|
+
)
|
|
493
|
+
except Exception:
|
|
494
|
+
md_job_data = None
|
|
495
|
+
|
|
496
|
+
job_data = md_job_data or ExecutionDB().get_job(single_id)
|
|
497
|
+
if job_data is None:
|
|
498
|
+
return {
|
|
499
|
+
"success": False,
|
|
500
|
+
"error": f"Job {single_id} not found in ExecutionDB",
|
|
501
|
+
}
|
|
502
|
+
|
|
503
|
+
job_result = exporter.export_job(job_data)
|
|
504
|
+
return {
|
|
505
|
+
"success": job_result.success,
|
|
506
|
+
"invocation_id": job_data.invocation_id,
|
|
507
|
+
"jobs": {
|
|
508
|
+
job_data.job_id: {
|
|
509
|
+
"success": job_result.success,
|
|
510
|
+
"message": job_result.message,
|
|
511
|
+
"metadata": job_result.metadata or {},
|
|
512
|
+
"dest": getattr(job_result, "dest", None),
|
|
513
|
+
}
|
|
514
|
+
},
|
|
515
|
+
"metadata": job_result.metadata or {},
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
elif single_id.isdigit(): # pipeline_id
|
|
519
|
+
db = ExecutionDB()
|
|
520
|
+
for job_id, job_data in db._jobs.items():
|
|
521
|
+
if job_data.data.get("pipeline_id") == int(single_id):
|
|
522
|
+
job_result = exporter.export_job(job_data)
|
|
523
|
+
return {
|
|
524
|
+
"success": job_result.success,
|
|
525
|
+
"invocation_id": job_data.invocation_id,
|
|
526
|
+
"jobs": {
|
|
527
|
+
job_data.job_id: {
|
|
528
|
+
"success": job_result.success,
|
|
529
|
+
"message": job_result.message,
|
|
530
|
+
"metadata": job_result.metadata or {},
|
|
531
|
+
}
|
|
532
|
+
},
|
|
533
|
+
"metadata": job_result.metadata or {},
|
|
534
|
+
}
|
|
535
|
+
return {"success": False, "error": f"Pipeline {single_id} not found"}
|
|
536
|
+
|
|
537
|
+
else: # invocation_id
|
|
538
|
+
result = exporter.export_invocation(single_id)
|
|
539
|
+
if "jobs" in result:
|
|
540
|
+
for job_id, job_result in result["jobs"].items():
|
|
541
|
+
job_result.setdefault("metadata", {})
|
|
542
|
+
return result
|
|
543
|
+
else:
|
|
544
|
+
# Multiple IDs - parse and group
|
|
545
|
+
db = ExecutionDB()
|
|
546
|
+
grouped_jobs: dict[
|
|
547
|
+
str, dict[str, Any]
|
|
548
|
+
] = {} # invocation_id -> {job_id: job_data}
|
|
549
|
+
invocation_only = set() # invocation_ids with no specific jobs
|
|
550
|
+
all_jobs_for_consolidated = {} # job_id -> job_data (for consolidated export)
|
|
551
|
+
|
|
552
|
+
# Parse and group IDs
|
|
553
|
+
for id_str in invocation_ids:
|
|
554
|
+
if "." in id_str: # job_id
|
|
555
|
+
job_data = db.get_job(id_str)
|
|
556
|
+
if job_data:
|
|
557
|
+
inv_id = job_data.invocation_id
|
|
558
|
+
if inv_id not in grouped_jobs:
|
|
559
|
+
grouped_jobs[inv_id] = {}
|
|
560
|
+
grouped_jobs[inv_id][id_str] = job_data
|
|
561
|
+
all_jobs_for_consolidated[id_str] = job_data
|
|
562
|
+
elif id_str.isdigit(): # pipeline_id
|
|
563
|
+
# Find job by pipeline_id and add to group
|
|
564
|
+
for job_id, job_data in db._jobs.items():
|
|
565
|
+
if job_data.data.get("pipeline_id") == int(id_str):
|
|
566
|
+
inv_id = job_data.invocation_id
|
|
567
|
+
if inv_id not in grouped_jobs:
|
|
568
|
+
grouped_jobs[inv_id] = {}
|
|
569
|
+
grouped_jobs[inv_id][job_id] = job_data
|
|
570
|
+
all_jobs_for_consolidated[job_id] = job_data
|
|
571
|
+
break
|
|
572
|
+
else: # invocation_id
|
|
573
|
+
invocation_only.add(id_str)
|
|
574
|
+
# Add all jobs from this invocation for consolidated export
|
|
575
|
+
invocation_jobs = db.get_jobs(id_str)
|
|
576
|
+
all_jobs_for_consolidated.update(invocation_jobs)
|
|
577
|
+
|
|
578
|
+
# Check if we should use consolidated export (local + json/csv format)
|
|
579
|
+
should_consolidate = (
|
|
580
|
+
dest == "local"
|
|
581
|
+
and config
|
|
582
|
+
and config.get("format") in ["json", "csv"]
|
|
583
|
+
and (
|
|
584
|
+
len(invocation_only) > 1
|
|
585
|
+
or (len(invocation_only) == 1 and len(grouped_jobs) > 0)
|
|
586
|
+
)
|
|
587
|
+
)
|
|
588
|
+
|
|
589
|
+
if should_consolidate and hasattr(exporter, "export_multiple_invocations"):
|
|
590
|
+
# Use consolidated export for local exporter with JSON/CSV format
|
|
591
|
+
all_invocation_ids = list(invocation_only)
|
|
592
|
+
# Add invocations from grouped jobs
|
|
593
|
+
all_invocation_ids.extend(
|
|
594
|
+
set(
|
|
595
|
+
job_data.invocation_id
|
|
596
|
+
for jobs in grouped_jobs.values()
|
|
597
|
+
for job_data in jobs.values()
|
|
598
|
+
)
|
|
599
|
+
)
|
|
600
|
+
all_invocation_ids = list(set(all_invocation_ids)) # remove duplicates
|
|
601
|
+
|
|
602
|
+
try:
|
|
603
|
+
consolidated_result = exporter.export_multiple_invocations(
|
|
604
|
+
all_invocation_ids
|
|
605
|
+
)
|
|
606
|
+
return consolidated_result # type: ignore[no-any-return]
|
|
607
|
+
except Exception as e:
|
|
608
|
+
return {
|
|
609
|
+
"success": False,
|
|
610
|
+
"error": f"Consolidated export failed: {str(e)}",
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
# Regular multi-invocation export
|
|
614
|
+
all_results = {}
|
|
615
|
+
overall_success = True
|
|
616
|
+
|
|
617
|
+
# Export grouped jobs (partial invocations)
|
|
618
|
+
for inv_id, jobs in grouped_jobs.items():
|
|
619
|
+
try:
|
|
620
|
+
# Create a custom partial invocation export
|
|
621
|
+
results = {}
|
|
622
|
+
for job_id, job_data in jobs.items():
|
|
623
|
+
job_result = exporter.export_job(job_data)
|
|
624
|
+
results[job_id] = {
|
|
625
|
+
"success": job_result.success,
|
|
626
|
+
"message": job_result.message,
|
|
627
|
+
"metadata": job_result.metadata or {},
|
|
628
|
+
}
|
|
629
|
+
if not job_result.success:
|
|
630
|
+
overall_success = False
|
|
631
|
+
|
|
632
|
+
all_results[inv_id] = {
|
|
633
|
+
"success": all(r["success"] for r in results.values()),
|
|
634
|
+
"invocation_id": inv_id,
|
|
635
|
+
"jobs": results,
|
|
636
|
+
"partial": True, # indicate this was partial invocation
|
|
637
|
+
}
|
|
638
|
+
except Exception as e:
|
|
639
|
+
all_results[inv_id] = {
|
|
640
|
+
"success": False,
|
|
641
|
+
"error": f"Partial invocation export failed: {str(e)}",
|
|
642
|
+
}
|
|
643
|
+
overall_success = False
|
|
644
|
+
|
|
645
|
+
# Export full invocations
|
|
646
|
+
for inv_id in invocation_only:
|
|
647
|
+
result = exporter.export_invocation(inv_id)
|
|
648
|
+
# Ensure metadata is present in job results to prevent KeyError
|
|
649
|
+
if "jobs" in result:
|
|
650
|
+
for job_id, job_result in result["jobs"].items():
|
|
651
|
+
if "metadata" not in job_result:
|
|
652
|
+
job_result["metadata"] = {}
|
|
653
|
+
all_results[inv_id] = result
|
|
654
|
+
if not result.get("success", False):
|
|
655
|
+
overall_success = False
|
|
656
|
+
|
|
657
|
+
return {
|
|
658
|
+
"success": overall_success,
|
|
659
|
+
"invocations": all_results,
|
|
660
|
+
"metadata": {
|
|
661
|
+
"total_invocations": len(all_results),
|
|
662
|
+
"successful_invocations": sum(
|
|
663
|
+
1 for r in all_results.values() if r.get("success")
|
|
664
|
+
),
|
|
665
|
+
"mixed_export": len(grouped_jobs)
|
|
666
|
+
> 0, # indicates mixed job/invocation export
|
|
667
|
+
},
|
|
668
|
+
}
|
|
669
|
+
|
|
670
|
+
except Exception as e:
|
|
671
|
+
return {"success": False, "error": f"Export failed: {str(e)}"}
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
def _check_api_endpoint_when_deployment_is_configured(cfg: RunConfig) -> None:
|
|
675
|
+
"""Check API endpoint configuration when deployment is configured.
|
|
676
|
+
|
|
677
|
+
Args:
|
|
678
|
+
cfg: Configuration object.
|
|
679
|
+
|
|
680
|
+
Raises:
|
|
681
|
+
ValueError: If invalid configuration is detected.
|
|
682
|
+
"""
|
|
683
|
+
if cfg.deployment.type == "none":
|
|
684
|
+
return
|
|
685
|
+
if "target" not in cfg or not isinstance(cfg.target, DictConfig):
|
|
686
|
+
return
|
|
687
|
+
if "api_endpoint" not in cfg.target or not isinstance(
|
|
688
|
+
cfg.target.api_endpoint, DictConfig
|
|
689
|
+
):
|
|
690
|
+
return
|
|
691
|
+
if "url" in cfg.target.api_endpoint:
|
|
692
|
+
raise ValueError(
|
|
693
|
+
"when deployment is configured, url field should not exist in target.api_endpoint"
|
|
694
|
+
)
|
|
695
|
+
if "model_id" in cfg.target.api_endpoint:
|
|
696
|
+
raise ValueError(
|
|
697
|
+
"when deployment is configured, model_id field should not exist in target.api_endpoint"
|
|
698
|
+
)
|