llama-stack-api 0.4.4__py3-none-any.whl → 0.5.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack_api/__init__.py +175 -20
- llama_stack_api/agents/__init__.py +38 -0
- llama_stack_api/agents/api.py +52 -0
- llama_stack_api/agents/fastapi_routes.py +268 -0
- llama_stack_api/agents/models.py +181 -0
- llama_stack_api/common/errors.py +15 -0
- llama_stack_api/connectors/__init__.py +38 -0
- llama_stack_api/connectors/api.py +50 -0
- llama_stack_api/connectors/fastapi_routes.py +103 -0
- llama_stack_api/connectors/models.py +103 -0
- llama_stack_api/conversations/__init__.py +61 -0
- llama_stack_api/conversations/api.py +44 -0
- llama_stack_api/conversations/fastapi_routes.py +177 -0
- llama_stack_api/conversations/models.py +245 -0
- llama_stack_api/datasetio/__init__.py +34 -0
- llama_stack_api/datasetio/api.py +42 -0
- llama_stack_api/datasetio/fastapi_routes.py +94 -0
- llama_stack_api/datasetio/models.py +48 -0
- llama_stack_api/eval/__init__.py +55 -0
- llama_stack_api/eval/api.py +51 -0
- llama_stack_api/eval/compat.py +300 -0
- llama_stack_api/eval/fastapi_routes.py +126 -0
- llama_stack_api/eval/models.py +141 -0
- llama_stack_api/inference/__init__.py +207 -0
- llama_stack_api/inference/api.py +93 -0
- llama_stack_api/inference/fastapi_routes.py +243 -0
- llama_stack_api/inference/models.py +1035 -0
- llama_stack_api/models/__init__.py +47 -0
- llama_stack_api/models/api.py +38 -0
- llama_stack_api/models/fastapi_routes.py +104 -0
- llama_stack_api/{models.py → models/models.py} +65 -79
- llama_stack_api/openai_responses.py +32 -6
- llama_stack_api/post_training/__init__.py +73 -0
- llama_stack_api/post_training/api.py +36 -0
- llama_stack_api/post_training/fastapi_routes.py +116 -0
- llama_stack_api/{post_training.py → post_training/models.py} +55 -86
- llama_stack_api/prompts/__init__.py +47 -0
- llama_stack_api/prompts/api.py +44 -0
- llama_stack_api/prompts/fastapi_routes.py +163 -0
- llama_stack_api/prompts/models.py +177 -0
- llama_stack_api/resource.py +0 -1
- llama_stack_api/safety/__init__.py +37 -0
- llama_stack_api/safety/api.py +29 -0
- llama_stack_api/safety/datatypes.py +83 -0
- llama_stack_api/safety/fastapi_routes.py +55 -0
- llama_stack_api/safety/models.py +38 -0
- llama_stack_api/schema_utils.py +47 -4
- llama_stack_api/scoring/__init__.py +66 -0
- llama_stack_api/scoring/api.py +35 -0
- llama_stack_api/scoring/fastapi_routes.py +67 -0
- llama_stack_api/scoring/models.py +81 -0
- llama_stack_api/scoring_functions/__init__.py +50 -0
- llama_stack_api/scoring_functions/api.py +39 -0
- llama_stack_api/scoring_functions/fastapi_routes.py +108 -0
- llama_stack_api/{scoring_functions.py → scoring_functions/models.py} +67 -64
- llama_stack_api/shields/__init__.py +41 -0
- llama_stack_api/shields/api.py +39 -0
- llama_stack_api/shields/fastapi_routes.py +104 -0
- llama_stack_api/shields/models.py +74 -0
- llama_stack_api/validators.py +46 -0
- llama_stack_api/vector_io/__init__.py +88 -0
- llama_stack_api/vector_io/api.py +234 -0
- llama_stack_api/vector_io/fastapi_routes.py +447 -0
- llama_stack_api/{vector_io.py → vector_io/models.py} +99 -377
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/METADATA +1 -1
- llama_stack_api-0.5.0rc1.dist-info/RECORD +115 -0
- llama_stack_api/agents.py +0 -173
- llama_stack_api/connectors.py +0 -146
- llama_stack_api/conversations.py +0 -270
- llama_stack_api/datasetio.py +0 -55
- llama_stack_api/eval.py +0 -137
- llama_stack_api/inference.py +0 -1169
- llama_stack_api/prompts.py +0 -203
- llama_stack_api/safety.py +0 -132
- llama_stack_api/scoring.py +0 -93
- llama_stack_api/shields.py +0 -93
- llama_stack_api-0.4.4.dist-info/RECORD +0 -70
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/WHEEL +0 -0
- {llama_stack_api-0.4.4.dist-info → llama_stack_api-0.5.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""DatasetIO API protocol definition.
|
|
8
|
+
|
|
9
|
+
This module contains the DatasetIO protocol definition.
|
|
10
|
+
Pydantic models are defined in llama_stack_api.datasetio.models.
|
|
11
|
+
The FastAPI router is defined in llama_stack_api.datasetio.fastapi_routes.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from typing import Protocol, runtime_checkable
|
|
15
|
+
|
|
16
|
+
from llama_stack_api.datasets import Dataset
|
|
17
|
+
|
|
18
|
+
from .models import (
|
|
19
|
+
AppendRowsRequest,
|
|
20
|
+
IterRowsRequest,
|
|
21
|
+
PaginatedResponse,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class DatasetStore(Protocol):
|
|
26
|
+
def get_dataset(self, dataset_id: str) -> Dataset: ...
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@runtime_checkable
|
|
30
|
+
class DatasetIO(Protocol):
|
|
31
|
+
"""Protocol for dataset I/O operations.
|
|
32
|
+
|
|
33
|
+
The DatasetIO API provides operations for reading and writing data to datasets.
|
|
34
|
+
This includes iterating over rows and appending new rows to existing datasets.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# keeping for aligning with inference/safety, but this is not used
|
|
38
|
+
dataset_store: DatasetStore
|
|
39
|
+
|
|
40
|
+
async def iterrows(self, request: IterRowsRequest) -> PaginatedResponse: ...
|
|
41
|
+
|
|
42
|
+
async def append_rows(self, request: AppendRowsRequest) -> None: ...
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""FastAPI router for the DatasetIO API.
|
|
8
|
+
|
|
9
|
+
This module defines the FastAPI router for the DatasetIO API using standard
|
|
10
|
+
FastAPI route decorators.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from typing import Annotated
|
|
14
|
+
|
|
15
|
+
from fastapi import APIRouter, Body, Path, Query
|
|
16
|
+
|
|
17
|
+
from llama_stack_api.common.responses import PaginatedResponse
|
|
18
|
+
from llama_stack_api.router_utils import standard_responses
|
|
19
|
+
from llama_stack_api.version import LLAMA_STACK_API_V1BETA
|
|
20
|
+
|
|
21
|
+
from .api import DatasetIO
|
|
22
|
+
from .models import (
|
|
23
|
+
AppendRowsRequest,
|
|
24
|
+
IterRowsRequest,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_router(impl: DatasetIO) -> APIRouter:
|
|
29
|
+
"""Create a FastAPI router for the DatasetIO API.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
impl: The DatasetIO implementation instance
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
APIRouter configured for the DatasetIO API
|
|
36
|
+
"""
|
|
37
|
+
router = APIRouter(
|
|
38
|
+
prefix=f"/{LLAMA_STACK_API_V1BETA}",
|
|
39
|
+
tags=["DatasetIO"],
|
|
40
|
+
responses=standard_responses,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
@router.get(
|
|
44
|
+
"/datasetio/iterrows/{dataset_id:path}",
|
|
45
|
+
response_model=PaginatedResponse,
|
|
46
|
+
summary="Get a paginated list of rows from a dataset.",
|
|
47
|
+
description="""Get a paginated list of rows from a dataset.
|
|
48
|
+
|
|
49
|
+
Uses offset-based pagination where:
|
|
50
|
+
- start_index: The starting index (0-based). If None, starts from beginning.
|
|
51
|
+
- limit: Number of items to return. If None or -1, returns all items.
|
|
52
|
+
|
|
53
|
+
The response includes:
|
|
54
|
+
- data: List of items for the current page.
|
|
55
|
+
- has_more: Whether there are more items available after this set.""",
|
|
56
|
+
responses={
|
|
57
|
+
200: {"description": "A PaginatedResponse containing the rows."},
|
|
58
|
+
},
|
|
59
|
+
)
|
|
60
|
+
async def iterrows(
|
|
61
|
+
dataset_id: Annotated[str, Path(description="The ID of the dataset to get the rows from.")],
|
|
62
|
+
start_index: Annotated[
|
|
63
|
+
int | None, Query(description="Index into dataset for the first row to get. Get all rows if None.")
|
|
64
|
+
] = None,
|
|
65
|
+
limit: Annotated[int | None, Query(description="The number of rows to get.")] = None,
|
|
66
|
+
) -> PaginatedResponse:
|
|
67
|
+
request = IterRowsRequest(
|
|
68
|
+
dataset_id=dataset_id,
|
|
69
|
+
start_index=start_index,
|
|
70
|
+
limit=limit,
|
|
71
|
+
)
|
|
72
|
+
return await impl.iterrows(request)
|
|
73
|
+
|
|
74
|
+
@router.post(
|
|
75
|
+
"/datasetio/append-rows/{dataset_id:path}",
|
|
76
|
+
status_code=204,
|
|
77
|
+
summary="Append rows to a dataset.",
|
|
78
|
+
description="Append rows to a dataset.",
|
|
79
|
+
responses={
|
|
80
|
+
204: {"description": "Rows were successfully appended."},
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
async def append_rows(
|
|
84
|
+
dataset_id: Annotated[str, Path(description="The ID of the dataset to append the rows to.")],
|
|
85
|
+
request: Annotated[AppendRowsRequest, Body(...)],
|
|
86
|
+
) -> None:
|
|
87
|
+
# Override the dataset_id from the path
|
|
88
|
+
request_with_id = AppendRowsRequest(
|
|
89
|
+
dataset_id=dataset_id,
|
|
90
|
+
rows=request.rows,
|
|
91
|
+
)
|
|
92
|
+
return await impl.append_rows(request_with_id)
|
|
93
|
+
|
|
94
|
+
return router
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""Pydantic models for DatasetIO API requests and responses.
|
|
8
|
+
|
|
9
|
+
This module defines the request and response models for the DatasetIO API
|
|
10
|
+
using Pydantic with Field descriptions for OpenAPI schema generation.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel, Field
|
|
16
|
+
|
|
17
|
+
from llama_stack_api.common.responses import PaginatedResponse
|
|
18
|
+
from llama_stack_api.schema_utils import json_schema_type
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@json_schema_type
|
|
22
|
+
class IterRowsRequest(BaseModel):
|
|
23
|
+
"""Request model for iterating over rows in a dataset."""
|
|
24
|
+
|
|
25
|
+
dataset_id: str = Field(..., description="The ID of the dataset to get the rows from.")
|
|
26
|
+
start_index: int | None = Field(
|
|
27
|
+
default=None,
|
|
28
|
+
description="Index into dataset for the first row to get. Get all rows if None.",
|
|
29
|
+
)
|
|
30
|
+
limit: int | None = Field(
|
|
31
|
+
default=None,
|
|
32
|
+
description="The number of rows to get.",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@json_schema_type
|
|
37
|
+
class AppendRowsRequest(BaseModel):
|
|
38
|
+
"""Request model for appending rows to a dataset."""
|
|
39
|
+
|
|
40
|
+
dataset_id: str = Field(..., description="The ID of the dataset to append the rows to.")
|
|
41
|
+
rows: list[dict[str, Any]] = Field(..., description="The rows to append to the dataset.")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
__all__ = [
|
|
45
|
+
"AppendRowsRequest",
|
|
46
|
+
"IterRowsRequest",
|
|
47
|
+
"PaginatedResponse",
|
|
48
|
+
]
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from llama_stack_api.common.job_types import Job
|
|
8
|
+
|
|
9
|
+
from . import fastapi_routes
|
|
10
|
+
from .api import Eval
|
|
11
|
+
from .compat import (
|
|
12
|
+
resolve_evaluate_rows_request,
|
|
13
|
+
resolve_job_cancel_request,
|
|
14
|
+
resolve_job_result_request,
|
|
15
|
+
resolve_job_status_request,
|
|
16
|
+
resolve_run_eval_request,
|
|
17
|
+
)
|
|
18
|
+
from .models import (
|
|
19
|
+
BenchmarkConfig,
|
|
20
|
+
BenchmarkIdRequest,
|
|
21
|
+
EvalCandidate,
|
|
22
|
+
EvaluateResponse,
|
|
23
|
+
EvaluateRowsBodyRequest,
|
|
24
|
+
EvaluateRowsRequest,
|
|
25
|
+
JobCancelRequest,
|
|
26
|
+
JobResultRequest,
|
|
27
|
+
JobStatusRequest,
|
|
28
|
+
ModelCandidate,
|
|
29
|
+
RunEvalBodyRequest,
|
|
30
|
+
RunEvalRequest,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
__all__ = [
|
|
34
|
+
"Eval",
|
|
35
|
+
"BenchmarkConfig",
|
|
36
|
+
"BenchmarkIdRequest",
|
|
37
|
+
"EvalCandidate",
|
|
38
|
+
"EvaluateResponse",
|
|
39
|
+
"EvaluateRowsBodyRequest",
|
|
40
|
+
"EvaluateRowsRequest",
|
|
41
|
+
"Job",
|
|
42
|
+
"JobCancelRequest",
|
|
43
|
+
"JobResultRequest",
|
|
44
|
+
"JobStatusRequest",
|
|
45
|
+
"ModelCandidate",
|
|
46
|
+
"RunEvalBodyRequest",
|
|
47
|
+
"RunEvalRequest",
|
|
48
|
+
"fastapi_routes",
|
|
49
|
+
# Backward compatibility helpers
|
|
50
|
+
"resolve_run_eval_request",
|
|
51
|
+
"resolve_evaluate_rows_request",
|
|
52
|
+
"resolve_job_status_request",
|
|
53
|
+
"resolve_job_cancel_request",
|
|
54
|
+
"resolve_job_result_request",
|
|
55
|
+
]
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
from typing import Protocol, runtime_checkable
|
|
8
|
+
|
|
9
|
+
from llama_stack_api.common.job_types import Job
|
|
10
|
+
|
|
11
|
+
from .models import (
|
|
12
|
+
EvaluateResponse,
|
|
13
|
+
EvaluateRowsRequest,
|
|
14
|
+
JobCancelRequest,
|
|
15
|
+
JobResultRequest,
|
|
16
|
+
JobStatusRequest,
|
|
17
|
+
RunEvalRequest,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@runtime_checkable
|
|
22
|
+
class Eval(Protocol):
|
|
23
|
+
"""Evaluations
|
|
24
|
+
|
|
25
|
+
Llama Stack Evaluation API for running evaluations on model and agent candidates."""
|
|
26
|
+
|
|
27
|
+
async def run_eval(
|
|
28
|
+
self,
|
|
29
|
+
request: RunEvalRequest,
|
|
30
|
+
) -> Job:
|
|
31
|
+
"""Run an evaluation on a benchmark."""
|
|
32
|
+
...
|
|
33
|
+
|
|
34
|
+
async def evaluate_rows(
|
|
35
|
+
self,
|
|
36
|
+
request: EvaluateRowsRequest,
|
|
37
|
+
) -> EvaluateResponse:
|
|
38
|
+
"""Evaluate a list of rows on a benchmark."""
|
|
39
|
+
...
|
|
40
|
+
|
|
41
|
+
async def job_status(self, request: JobStatusRequest) -> Job:
|
|
42
|
+
"""Get the status of a job."""
|
|
43
|
+
...
|
|
44
|
+
|
|
45
|
+
async def job_cancel(self, request: JobCancelRequest) -> None:
|
|
46
|
+
"""Cancel a job."""
|
|
47
|
+
...
|
|
48
|
+
|
|
49
|
+
async def job_result(self, request: JobResultRequest) -> EvaluateResponse:
|
|
50
|
+
"""Get the result of a job."""
|
|
51
|
+
...
|
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
+
# All rights reserved.
|
|
3
|
+
#
|
|
4
|
+
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
+
# the root directory of this source tree.
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Backward compatibility helpers for the Eval API.
|
|
9
|
+
|
|
10
|
+
This module provides utilities to support both the old-style (individual parameters)
|
|
11
|
+
and new-style (request objects) calling conventions for Eval API methods.
|
|
12
|
+
|
|
13
|
+
The old-style parameters are deprecated and will be removed in a future release.
|
|
14
|
+
|
|
15
|
+
Note: When both a request object AND individual parameters are provided, the request
|
|
16
|
+
object takes precedence and individual parameters are ignored.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import warnings
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from .models import (
|
|
23
|
+
BenchmarkConfig,
|
|
24
|
+
EvaluateRowsRequest,
|
|
25
|
+
JobCancelRequest,
|
|
26
|
+
JobResultRequest,
|
|
27
|
+
JobStatusRequest,
|
|
28
|
+
RunEvalRequest,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
_DEPRECATION_TARGET = "0.6.0"
|
|
32
|
+
|
|
33
|
+
_DEPRECATION_MESSAGE = (
|
|
34
|
+
"Passing individual parameters to {method_name}() is deprecated. "
|
|
35
|
+
"Please use {request_class}(benchmark_id=..., ...) instead. "
|
|
36
|
+
"This will be removed in version {target}."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _emit_deprecation_warning(method_name: str, request_class: str) -> None:
|
|
41
|
+
"""Emit a deprecation warning for old-style parameter usage."""
|
|
42
|
+
warnings.warn(
|
|
43
|
+
_DEPRECATION_MESSAGE.format(method_name=method_name, request_class=request_class, target=_DEPRECATION_TARGET),
|
|
44
|
+
DeprecationWarning,
|
|
45
|
+
stacklevel=4,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _format_missing_params(required: list[str], provided: dict[str, Any]) -> str:
|
|
50
|
+
"""Format error message showing which parameters are missing."""
|
|
51
|
+
missing = [p for p in required if provided.get(p) is None]
|
|
52
|
+
provided_names = [p for p in required if provided.get(p) is not None]
|
|
53
|
+
|
|
54
|
+
parts = []
|
|
55
|
+
if missing:
|
|
56
|
+
parts.append(f"missing: {', '.join(missing)}")
|
|
57
|
+
if provided_names:
|
|
58
|
+
parts.append(f"provided: {', '.join(provided_names)}")
|
|
59
|
+
|
|
60
|
+
return "; ".join(parts)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _validate_not_empty(value: Any, name: str) -> None:
|
|
64
|
+
"""Validate that a value is not None, empty string, or empty list."""
|
|
65
|
+
if not value:
|
|
66
|
+
raise ValueError(f"'{name}' cannot be None or empty. Provided: {value}")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def resolve_run_eval_request(
|
|
70
|
+
request: RunEvalRequest | None = None,
|
|
71
|
+
*,
|
|
72
|
+
benchmark_id: str | None = None,
|
|
73
|
+
benchmark_config: BenchmarkConfig | None = None,
|
|
74
|
+
) -> RunEvalRequest:
|
|
75
|
+
"""
|
|
76
|
+
Resolve run_eval parameters to a RunEvalRequest object.
|
|
77
|
+
|
|
78
|
+
Supports both new-style (request object) and old-style (individual parameters).
|
|
79
|
+
Old-style usage emits a DeprecationWarning.
|
|
80
|
+
|
|
81
|
+
Note: If both request object and individual parameters are provided, the request
|
|
82
|
+
object takes precedence and individual parameters are ignored.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
request: The new-style request object (preferred)
|
|
86
|
+
benchmark_id: (Deprecated) The benchmark ID
|
|
87
|
+
benchmark_config: (Deprecated) The benchmark configuration
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
RunEvalRequest object
|
|
91
|
+
"""
|
|
92
|
+
if request is not None:
|
|
93
|
+
_validate_not_empty(request.benchmark_id, "benchmark_id")
|
|
94
|
+
_validate_not_empty(request.benchmark_config, "benchmark_config")
|
|
95
|
+
return request
|
|
96
|
+
|
|
97
|
+
# Old-style parameters
|
|
98
|
+
if benchmark_id and benchmark_config:
|
|
99
|
+
_emit_deprecation_warning("run_eval", "RunEvalRequest")
|
|
100
|
+
return RunEvalRequest(
|
|
101
|
+
benchmark_id=benchmark_id,
|
|
102
|
+
benchmark_config=benchmark_config,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
required = ["benchmark_id", "benchmark_config"]
|
|
106
|
+
provided = {"benchmark_id": benchmark_id, "benchmark_config": benchmark_config}
|
|
107
|
+
raise ValueError(
|
|
108
|
+
f"Either 'request' (RunEvalRequest) or both 'benchmark_id' and 'benchmark_config' "
|
|
109
|
+
f"must be provided. {_format_missing_params(required, provided)}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def resolve_evaluate_rows_request(
|
|
114
|
+
request: EvaluateRowsRequest | None = None,
|
|
115
|
+
*,
|
|
116
|
+
benchmark_id: str | None = None,
|
|
117
|
+
input_rows: list[dict[str, Any]] | None = None,
|
|
118
|
+
scoring_functions: list[str] | None = None,
|
|
119
|
+
benchmark_config: BenchmarkConfig | None = None,
|
|
120
|
+
) -> EvaluateRowsRequest:
|
|
121
|
+
"""
|
|
122
|
+
Resolve evaluate_rows parameters to an EvaluateRowsRequest object.
|
|
123
|
+
|
|
124
|
+
Supports both new-style (request object) and old-style (individual parameters).
|
|
125
|
+
Old-style usage emits a DeprecationWarning.
|
|
126
|
+
|
|
127
|
+
Note: If both request object and individual parameters are provided, the request
|
|
128
|
+
object takes precedence and individual parameters are ignored.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
request: The new-style request object (preferred)
|
|
132
|
+
benchmark_id: (Deprecated) The benchmark ID
|
|
133
|
+
input_rows: (Deprecated) The rows to evaluate
|
|
134
|
+
scoring_functions: (Deprecated) The scoring functions to use
|
|
135
|
+
benchmark_config: (Deprecated) The benchmark configuration
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
EvaluateRowsRequest object
|
|
139
|
+
"""
|
|
140
|
+
if request is not None:
|
|
141
|
+
_validate_not_empty(request.benchmark_id, "benchmark_id")
|
|
142
|
+
_validate_not_empty(request.input_rows, "input_rows")
|
|
143
|
+
_validate_not_empty(request.scoring_functions, "scoring_functions")
|
|
144
|
+
_validate_not_empty(request.benchmark_config, "benchmark_config")
|
|
145
|
+
return request
|
|
146
|
+
|
|
147
|
+
# Old-style parameters
|
|
148
|
+
if benchmark_id and input_rows and scoring_functions and benchmark_config:
|
|
149
|
+
_emit_deprecation_warning("evaluate_rows", "EvaluateRowsRequest")
|
|
150
|
+
return EvaluateRowsRequest(
|
|
151
|
+
benchmark_id=benchmark_id,
|
|
152
|
+
input_rows=input_rows,
|
|
153
|
+
scoring_functions=scoring_functions,
|
|
154
|
+
benchmark_config=benchmark_config,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
required = ["benchmark_id", "input_rows", "scoring_functions", "benchmark_config"]
|
|
158
|
+
provided = {
|
|
159
|
+
"benchmark_id": benchmark_id,
|
|
160
|
+
"input_rows": input_rows,
|
|
161
|
+
"scoring_functions": scoring_functions,
|
|
162
|
+
"benchmark_config": benchmark_config,
|
|
163
|
+
}
|
|
164
|
+
raise ValueError(
|
|
165
|
+
f"Either 'request' (EvaluateRowsRequest) or all of 'benchmark_id', 'input_rows', "
|
|
166
|
+
f"'scoring_functions', and 'benchmark_config' must be provided. "
|
|
167
|
+
f"{_format_missing_params(required, provided)}"
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def resolve_job_status_request(
|
|
172
|
+
request: JobStatusRequest | None = None,
|
|
173
|
+
*,
|
|
174
|
+
benchmark_id: str | None = None,
|
|
175
|
+
job_id: str | None = None,
|
|
176
|
+
) -> JobStatusRequest:
|
|
177
|
+
"""
|
|
178
|
+
Resolve job_status parameters to a JobStatusRequest object.
|
|
179
|
+
|
|
180
|
+
Supports both new-style (request object) and old-style (individual parameters).
|
|
181
|
+
Old-style usage emits a DeprecationWarning.
|
|
182
|
+
|
|
183
|
+
Note: If both request object and individual parameters are provided, the request
|
|
184
|
+
object takes precedence and individual parameters are ignored.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
request: The new-style request object (preferred)
|
|
188
|
+
benchmark_id: (Deprecated) The benchmark ID
|
|
189
|
+
job_id: (Deprecated) The job ID
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
JobStatusRequest object
|
|
193
|
+
"""
|
|
194
|
+
if request is not None:
|
|
195
|
+
_validate_not_empty(request.benchmark_id, "benchmark_id")
|
|
196
|
+
_validate_not_empty(request.job_id, "job_id")
|
|
197
|
+
return request
|
|
198
|
+
|
|
199
|
+
# Old-style parameters
|
|
200
|
+
if benchmark_id and job_id:
|
|
201
|
+
_emit_deprecation_warning("job_status", "JobStatusRequest")
|
|
202
|
+
return JobStatusRequest(
|
|
203
|
+
benchmark_id=benchmark_id,
|
|
204
|
+
job_id=job_id,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
required = ["benchmark_id", "job_id"]
|
|
208
|
+
provided = {"benchmark_id": benchmark_id, "job_id": job_id}
|
|
209
|
+
raise ValueError(
|
|
210
|
+
f"Either 'request' (JobStatusRequest) or both 'benchmark_id' and 'job_id' "
|
|
211
|
+
f"must be provided. {_format_missing_params(required, provided)}"
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def resolve_job_cancel_request(
|
|
216
|
+
request: JobCancelRequest | None = None,
|
|
217
|
+
*,
|
|
218
|
+
benchmark_id: str | None = None,
|
|
219
|
+
job_id: str | None = None,
|
|
220
|
+
) -> JobCancelRequest:
|
|
221
|
+
"""
|
|
222
|
+
Resolve job_cancel parameters to a JobCancelRequest object.
|
|
223
|
+
|
|
224
|
+
Supports both new-style (request object) and old-style (individual parameters).
|
|
225
|
+
Old-style usage emits a DeprecationWarning.
|
|
226
|
+
|
|
227
|
+
Note: If both request object and individual parameters are provided, the request
|
|
228
|
+
object takes precedence and individual parameters are ignored.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
request: The new-style request object (preferred)
|
|
232
|
+
benchmark_id: (Deprecated) The benchmark ID
|
|
233
|
+
job_id: (Deprecated) The job ID
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
JobCancelRequest object
|
|
237
|
+
"""
|
|
238
|
+
if request is not None:
|
|
239
|
+
_validate_not_empty(request.benchmark_id, "benchmark_id")
|
|
240
|
+
_validate_not_empty(request.job_id, "job_id")
|
|
241
|
+
return request
|
|
242
|
+
|
|
243
|
+
# Old-style parameters
|
|
244
|
+
if benchmark_id and job_id:
|
|
245
|
+
_emit_deprecation_warning("job_cancel", "JobCancelRequest")
|
|
246
|
+
return JobCancelRequest(
|
|
247
|
+
benchmark_id=benchmark_id,
|
|
248
|
+
job_id=job_id,
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
required = ["benchmark_id", "job_id"]
|
|
252
|
+
provided = {"benchmark_id": benchmark_id, "job_id": job_id}
|
|
253
|
+
raise ValueError(
|
|
254
|
+
f"Either 'request' (JobCancelRequest) or both 'benchmark_id' and 'job_id' "
|
|
255
|
+
f"must be provided. {_format_missing_params(required, provided)}"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def resolve_job_result_request(
|
|
260
|
+
request: JobResultRequest | None = None,
|
|
261
|
+
*,
|
|
262
|
+
benchmark_id: str | None = None,
|
|
263
|
+
job_id: str | None = None,
|
|
264
|
+
) -> JobResultRequest:
|
|
265
|
+
"""
|
|
266
|
+
Resolve job_result parameters to a JobResultRequest object.
|
|
267
|
+
|
|
268
|
+
Supports both new-style (request object) and old-style (individual parameters).
|
|
269
|
+
Old-style usage emits a DeprecationWarning.
|
|
270
|
+
|
|
271
|
+
Note: If both request object and individual parameters are provided, the request
|
|
272
|
+
object takes precedence and individual parameters are ignored.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
request: The new-style request object (preferred)
|
|
276
|
+
benchmark_id: (Deprecated) The benchmark ID
|
|
277
|
+
job_id: (Deprecated) The job ID
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
JobResultRequest object
|
|
281
|
+
"""
|
|
282
|
+
if request is not None:
|
|
283
|
+
_validate_not_empty(request.benchmark_id, "benchmark_id")
|
|
284
|
+
_validate_not_empty(request.job_id, "job_id")
|
|
285
|
+
return request
|
|
286
|
+
|
|
287
|
+
# Old-style parameters
|
|
288
|
+
if benchmark_id and job_id:
|
|
289
|
+
_emit_deprecation_warning("job_result", "JobResultRequest")
|
|
290
|
+
return JobResultRequest(
|
|
291
|
+
benchmark_id=benchmark_id,
|
|
292
|
+
job_id=job_id,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
required = ["benchmark_id", "job_id"]
|
|
296
|
+
provided = {"benchmark_id": benchmark_id, "job_id": job_id}
|
|
297
|
+
raise ValueError(
|
|
298
|
+
f"Either 'request' (JobResultRequest) or both 'benchmark_id' and 'job_id' "
|
|
299
|
+
f"must be provided. {_format_missing_params(required, provided)}"
|
|
300
|
+
)
|